linux/kernel/trace/ftrace.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Infrastructure for profiling code inserted by 'gcc -pg'.
   4 *
   5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
   7 *
   8 * Originally ported from the -rt patch by:
   9 *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
  10 *
  11 * Based on code in the latency_tracer, that is:
  12 *
  13 *  Copyright (C) 2004-2006 Ingo Molnar
  14 *  Copyright (C) 2004 Nadia Yvette Chambers
  15 */
  16
  17#include <linux/stop_machine.h>
  18#include <linux/clocksource.h>
  19#include <linux/sched/task.h>
  20#include <linux/kallsyms.h>
  21#include <linux/security.h>
  22#include <linux/seq_file.h>
  23#include <linux/tracefs.h>
  24#include <linux/hardirq.h>
  25#include <linux/kthread.h>
  26#include <linux/uaccess.h>
  27#include <linux/bsearch.h>
  28#include <linux/module.h>
  29#include <linux/ftrace.h>
  30#include <linux/sysctl.h>
  31#include <linux/slab.h>
  32#include <linux/ctype.h>
  33#include <linux/sort.h>
  34#include <linux/list.h>
  35#include <linux/hash.h>
  36#include <linux/rcupdate.h>
  37#include <linux/kprobes.h>
  38
  39#include <trace/events/sched.h>
  40
  41#include <asm/sections.h>
  42#include <asm/setup.h>
  43
  44#include "ftrace_internal.h"
  45#include "trace_output.h"
  46#include "trace_stat.h"
  47
  48#define FTRACE_WARN_ON(cond)                    \
  49        ({                                      \
  50                int ___r = cond;                \
  51                if (WARN_ON(___r))              \
  52                        ftrace_kill();          \
  53                ___r;                           \
  54        })
  55
  56#define FTRACE_WARN_ON_ONCE(cond)               \
  57        ({                                      \
  58                int ___r = cond;                \
  59                if (WARN_ON_ONCE(___r))         \
  60                        ftrace_kill();          \
  61                ___r;                           \
  62        })
  63
  64/* hash bits for specific function selection */
  65#define FTRACE_HASH_DEFAULT_BITS 10
  66#define FTRACE_HASH_MAX_BITS 12
  67
  68#ifdef CONFIG_DYNAMIC_FTRACE
  69#define INIT_OPS_HASH(opsname)  \
  70        .func_hash              = &opsname.local_hash,                  \
  71        .local_hash.regex_lock  = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
  72#else
  73#define INIT_OPS_HASH(opsname)
  74#endif
  75
  76enum {
  77        FTRACE_MODIFY_ENABLE_FL         = (1 << 0),
  78        FTRACE_MODIFY_MAY_SLEEP_FL      = (1 << 1),
  79};
  80
  81struct ftrace_ops ftrace_list_end __read_mostly = {
  82        .func           = ftrace_stub,
  83        .flags          = FTRACE_OPS_FL_STUB,
  84        INIT_OPS_HASH(ftrace_list_end)
  85};
  86
  87/* ftrace_enabled is a method to turn ftrace on or off */
  88int ftrace_enabled __read_mostly;
  89static int last_ftrace_enabled;
  90
  91/* Current function tracing op */
  92struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
  93/* What to set function_trace_op to */
  94static struct ftrace_ops *set_function_trace_op;
  95
  96static bool ftrace_pids_enabled(struct ftrace_ops *ops)
  97{
  98        struct trace_array *tr;
  99
 100        if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
 101                return false;
 102
 103        tr = ops->private;
 104
 105        return tr->function_pids != NULL || tr->function_no_pids != NULL;
 106}
 107
 108static void ftrace_update_trampoline(struct ftrace_ops *ops);
 109
 110/*
 111 * ftrace_disabled is set when an anomaly is discovered.
 112 * ftrace_disabled is much stronger than ftrace_enabled.
 113 */
 114static int ftrace_disabled __read_mostly;
 115
 116DEFINE_MUTEX(ftrace_lock);
 117
 118struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
 119ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
 120struct ftrace_ops global_ops;
 121
 122#if ARCH_SUPPORTS_FTRACE_OPS
 123static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
 124                                 struct ftrace_ops *op, struct ftrace_regs *fregs);
 125#else
 126/* See comment below, where ftrace_ops_list_func is defined */
 127static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
 128#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
 129#endif
 130
 131static inline void ftrace_ops_init(struct ftrace_ops *ops)
 132{
 133#ifdef CONFIG_DYNAMIC_FTRACE
 134        if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
 135                mutex_init(&ops->local_hash.regex_lock);
 136                ops->func_hash = &ops->local_hash;
 137                ops->flags |= FTRACE_OPS_FL_INITIALIZED;
 138        }
 139#endif
 140}
 141
 142static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
 143                            struct ftrace_ops *op, struct ftrace_regs *fregs)
 144{
 145        struct trace_array *tr = op->private;
 146        int pid;
 147
 148        if (tr) {
 149                pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
 150                if (pid == FTRACE_PID_IGNORE)
 151                        return;
 152                if (pid != FTRACE_PID_TRACE &&
 153                    pid != current->pid)
 154                        return;
 155        }
 156
 157        op->saved_func(ip, parent_ip, op, fregs);
 158}
 159
 160static void ftrace_sync_ipi(void *data)
 161{
 162        /* Probably not needed, but do it anyway */
 163        smp_rmb();
 164}
 165
 166static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
 167{
 168        /*
 169         * If this is a dynamic, RCU, or per CPU ops, or we force list func,
 170         * then it needs to call the list anyway.
 171         */
 172        if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
 173            FTRACE_FORCE_LIST_FUNC)
 174                return ftrace_ops_list_func;
 175
 176        return ftrace_ops_get_func(ops);
 177}
 178
 179static void update_ftrace_function(void)
 180{
 181        ftrace_func_t func;
 182
 183        /*
 184         * Prepare the ftrace_ops that the arch callback will use.
 185         * If there's only one ftrace_ops registered, the ftrace_ops_list
 186         * will point to the ops we want.
 187         */
 188        set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
 189                                                lockdep_is_held(&ftrace_lock));
 190
 191        /* If there's no ftrace_ops registered, just call the stub function */
 192        if (set_function_trace_op == &ftrace_list_end) {
 193                func = ftrace_stub;
 194
 195        /*
 196         * If we are at the end of the list and this ops is
 197         * recursion safe and not dynamic and the arch supports passing ops,
 198         * then have the mcount trampoline call the function directly.
 199         */
 200        } else if (rcu_dereference_protected(ftrace_ops_list->next,
 201                        lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
 202                func = ftrace_ops_get_list_func(ftrace_ops_list);
 203
 204        } else {
 205                /* Just use the default ftrace_ops */
 206                set_function_trace_op = &ftrace_list_end;
 207                func = ftrace_ops_list_func;
 208        }
 209
 210        update_function_graph_func();
 211
 212        /* If there's no change, then do nothing more here */
 213        if (ftrace_trace_function == func)
 214                return;
 215
 216        /*
 217         * If we are using the list function, it doesn't care
 218         * about the function_trace_ops.
 219         */
 220        if (func == ftrace_ops_list_func) {
 221                ftrace_trace_function = func;
 222                /*
 223                 * Don't even bother setting function_trace_ops,
 224                 * it would be racy to do so anyway.
 225                 */
 226                return;
 227        }
 228
 229#ifndef CONFIG_DYNAMIC_FTRACE
 230        /*
 231         * For static tracing, we need to be a bit more careful.
 232         * The function change takes affect immediately. Thus,
 233         * we need to coordinate the setting of the function_trace_ops
 234         * with the setting of the ftrace_trace_function.
 235         *
 236         * Set the function to the list ops, which will call the
 237         * function we want, albeit indirectly, but it handles the
 238         * ftrace_ops and doesn't depend on function_trace_op.
 239         */
 240        ftrace_trace_function = ftrace_ops_list_func;
 241        /*
 242         * Make sure all CPUs see this. Yes this is slow, but static
 243         * tracing is slow and nasty to have enabled.
 244         */
 245        synchronize_rcu_tasks_rude();
 246        /* Now all cpus are using the list ops. */
 247        function_trace_op = set_function_trace_op;
 248        /* Make sure the function_trace_op is visible on all CPUs */
 249        smp_wmb();
 250        /* Nasty way to force a rmb on all cpus */
 251        smp_call_function(ftrace_sync_ipi, NULL, 1);
 252        /* OK, we are all set to update the ftrace_trace_function now! */
 253#endif /* !CONFIG_DYNAMIC_FTRACE */
 254
 255        ftrace_trace_function = func;
 256}
 257
 258static void add_ftrace_ops(struct ftrace_ops __rcu **list,
 259                           struct ftrace_ops *ops)
 260{
 261        rcu_assign_pointer(ops->next, *list);
 262
 263        /*
 264         * We are entering ops into the list but another
 265         * CPU might be walking that list. We need to make sure
 266         * the ops->next pointer is valid before another CPU sees
 267         * the ops pointer included into the list.
 268         */
 269        rcu_assign_pointer(*list, ops);
 270}
 271
 272static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
 273                             struct ftrace_ops *ops)
 274{
 275        struct ftrace_ops **p;
 276
 277        /*
 278         * If we are removing the last function, then simply point
 279         * to the ftrace_stub.
 280         */
 281        if (rcu_dereference_protected(*list,
 282                        lockdep_is_held(&ftrace_lock)) == ops &&
 283            rcu_dereference_protected(ops->next,
 284                        lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
 285                *list = &ftrace_list_end;
 286                return 0;
 287        }
 288
 289        for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
 290                if (*p == ops)
 291                        break;
 292
 293        if (*p != ops)
 294                return -1;
 295
 296        *p = (*p)->next;
 297        return 0;
 298}
 299
 300static void ftrace_update_trampoline(struct ftrace_ops *ops);
 301
 302int __register_ftrace_function(struct ftrace_ops *ops)
 303{
 304        if (ops->flags & FTRACE_OPS_FL_DELETED)
 305                return -EINVAL;
 306
 307        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
 308                return -EBUSY;
 309
 310#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 311        /*
 312         * If the ftrace_ops specifies SAVE_REGS, then it only can be used
 313         * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
 314         * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
 315         */
 316        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
 317            !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
 318                return -EINVAL;
 319
 320        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
 321                ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
 322#endif
 323        if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
 324                return -EBUSY;
 325
 326        if (!core_kernel_data((unsigned long)ops))
 327                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 328
 329        add_ftrace_ops(&ftrace_ops_list, ops);
 330
 331        /* Always save the function, and reset at unregistering */
 332        ops->saved_func = ops->func;
 333
 334        if (ftrace_pids_enabled(ops))
 335                ops->func = ftrace_pid_func;
 336
 337        ftrace_update_trampoline(ops);
 338
 339        if (ftrace_enabled)
 340                update_ftrace_function();
 341
 342        return 0;
 343}
 344
 345int __unregister_ftrace_function(struct ftrace_ops *ops)
 346{
 347        int ret;
 348
 349        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
 350                return -EBUSY;
 351
 352        ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 353
 354        if (ret < 0)
 355                return ret;
 356
 357        if (ftrace_enabled)
 358                update_ftrace_function();
 359
 360        ops->func = ops->saved_func;
 361
 362        return 0;
 363}
 364
 365static void ftrace_update_pid_func(void)
 366{
 367        struct ftrace_ops *op;
 368
 369        /* Only do something if we are tracing something */
 370        if (ftrace_trace_function == ftrace_stub)
 371                return;
 372
 373        do_for_each_ftrace_op(op, ftrace_ops_list) {
 374                if (op->flags & FTRACE_OPS_FL_PID) {
 375                        op->func = ftrace_pids_enabled(op) ?
 376                                ftrace_pid_func : op->saved_func;
 377                        ftrace_update_trampoline(op);
 378                }
 379        } while_for_each_ftrace_op(op);
 380
 381        update_ftrace_function();
 382}
 383
 384#ifdef CONFIG_FUNCTION_PROFILER
 385struct ftrace_profile {
 386        struct hlist_node               node;
 387        unsigned long                   ip;
 388        unsigned long                   counter;
 389#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 390        unsigned long long              time;
 391        unsigned long long              time_squared;
 392#endif
 393};
 394
 395struct ftrace_profile_page {
 396        struct ftrace_profile_page      *next;
 397        unsigned long                   index;
 398        struct ftrace_profile           records[];
 399};
 400
 401struct ftrace_profile_stat {
 402        atomic_t                        disabled;
 403        struct hlist_head               *hash;
 404        struct ftrace_profile_page      *pages;
 405        struct ftrace_profile_page      *start;
 406        struct tracer_stat              stat;
 407};
 408
 409#define PROFILE_RECORDS_SIZE                                            \
 410        (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
 411
 412#define PROFILES_PER_PAGE                                       \
 413        (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
 414
 415static int ftrace_profile_enabled __read_mostly;
 416
 417/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
 418static DEFINE_MUTEX(ftrace_profile_lock);
 419
 420static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
 421
 422#define FTRACE_PROFILE_HASH_BITS 10
 423#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
 424
 425static void *
 426function_stat_next(void *v, int idx)
 427{
 428        struct ftrace_profile *rec = v;
 429        struct ftrace_profile_page *pg;
 430
 431        pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
 432
 433 again:
 434        if (idx != 0)
 435                rec++;
 436
 437        if ((void *)rec >= (void *)&pg->records[pg->index]) {
 438                pg = pg->next;
 439                if (!pg)
 440                        return NULL;
 441                rec = &pg->records[0];
 442                if (!rec->counter)
 443                        goto again;
 444        }
 445
 446        return rec;
 447}
 448
 449static void *function_stat_start(struct tracer_stat *trace)
 450{
 451        struct ftrace_profile_stat *stat =
 452                container_of(trace, struct ftrace_profile_stat, stat);
 453
 454        if (!stat || !stat->start)
 455                return NULL;
 456
 457        return function_stat_next(&stat->start->records[0], 0);
 458}
 459
 460#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 461/* function graph compares on total time */
 462static int function_stat_cmp(const void *p1, const void *p2)
 463{
 464        const struct ftrace_profile *a = p1;
 465        const struct ftrace_profile *b = p2;
 466
 467        if (a->time < b->time)
 468                return -1;
 469        if (a->time > b->time)
 470                return 1;
 471        else
 472                return 0;
 473}
 474#else
 475/* not function graph compares against hits */
 476static int function_stat_cmp(const void *p1, const void *p2)
 477{
 478        const struct ftrace_profile *a = p1;
 479        const struct ftrace_profile *b = p2;
 480
 481        if (a->counter < b->counter)
 482                return -1;
 483        if (a->counter > b->counter)
 484                return 1;
 485        else
 486                return 0;
 487}
 488#endif
 489
 490static int function_stat_headers(struct seq_file *m)
 491{
 492#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 493        seq_puts(m, "  Function                               "
 494                 "Hit    Time            Avg             s^2\n"
 495                    "  --------                               "
 496                 "---    ----            ---             ---\n");
 497#else
 498        seq_puts(m, "  Function                               Hit\n"
 499                    "  --------                               ---\n");
 500#endif
 501        return 0;
 502}
 503
 504static int function_stat_show(struct seq_file *m, void *v)
 505{
 506        struct ftrace_profile *rec = v;
 507        char str[KSYM_SYMBOL_LEN];
 508        int ret = 0;
 509#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 510        static struct trace_seq s;
 511        unsigned long long avg;
 512        unsigned long long stddev;
 513#endif
 514        mutex_lock(&ftrace_profile_lock);
 515
 516        /* we raced with function_profile_reset() */
 517        if (unlikely(rec->counter == 0)) {
 518                ret = -EBUSY;
 519                goto out;
 520        }
 521
 522#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 523        avg = div64_ul(rec->time, rec->counter);
 524        if (tracing_thresh && (avg < tracing_thresh))
 525                goto out;
 526#endif
 527
 528        kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
 529        seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
 530
 531#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 532        seq_puts(m, "    ");
 533
 534        /* Sample standard deviation (s^2) */
 535        if (rec->counter <= 1)
 536                stddev = 0;
 537        else {
 538                /*
 539                 * Apply Welford's method:
 540                 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
 541                 */
 542                stddev = rec->counter * rec->time_squared -
 543                         rec->time * rec->time;
 544
 545                /*
 546                 * Divide only 1000 for ns^2 -> us^2 conversion.
 547                 * trace_print_graph_duration will divide 1000 again.
 548                 */
 549                stddev = div64_ul(stddev,
 550                                  rec->counter * (rec->counter - 1) * 1000);
 551        }
 552
 553        trace_seq_init(&s);
 554        trace_print_graph_duration(rec->time, &s);
 555        trace_seq_puts(&s, "    ");
 556        trace_print_graph_duration(avg, &s);
 557        trace_seq_puts(&s, "    ");
 558        trace_print_graph_duration(stddev, &s);
 559        trace_print_seq(m, &s);
 560#endif
 561        seq_putc(m, '\n');
 562out:
 563        mutex_unlock(&ftrace_profile_lock);
 564
 565        return ret;
 566}
 567
 568static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
 569{
 570        struct ftrace_profile_page *pg;
 571
 572        pg = stat->pages = stat->start;
 573
 574        while (pg) {
 575                memset(pg->records, 0, PROFILE_RECORDS_SIZE);
 576                pg->index = 0;
 577                pg = pg->next;
 578        }
 579
 580        memset(stat->hash, 0,
 581               FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
 582}
 583
 584int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 585{
 586        struct ftrace_profile_page *pg;
 587        int functions;
 588        int pages;
 589        int i;
 590
 591        /* If we already allocated, do nothing */
 592        if (stat->pages)
 593                return 0;
 594
 595        stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
 596        if (!stat->pages)
 597                return -ENOMEM;
 598
 599#ifdef CONFIG_DYNAMIC_FTRACE
 600        functions = ftrace_update_tot_cnt;
 601#else
 602        /*
 603         * We do not know the number of functions that exist because
 604         * dynamic tracing is what counts them. With past experience
 605         * we have around 20K functions. That should be more than enough.
 606         * It is highly unlikely we will execute every function in
 607         * the kernel.
 608         */
 609        functions = 20000;
 610#endif
 611
 612        pg = stat->start = stat->pages;
 613
 614        pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 615
 616        for (i = 1; i < pages; i++) {
 617                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
 618                if (!pg->next)
 619                        goto out_free;
 620                pg = pg->next;
 621        }
 622
 623        return 0;
 624
 625 out_free:
 626        pg = stat->start;
 627        while (pg) {
 628                unsigned long tmp = (unsigned long)pg;
 629
 630                pg = pg->next;
 631                free_page(tmp);
 632        }
 633
 634        stat->pages = NULL;
 635        stat->start = NULL;
 636
 637        return -ENOMEM;
 638}
 639
 640static int ftrace_profile_init_cpu(int cpu)
 641{
 642        struct ftrace_profile_stat *stat;
 643        int size;
 644
 645        stat = &per_cpu(ftrace_profile_stats, cpu);
 646
 647        if (stat->hash) {
 648                /* If the profile is already created, simply reset it */
 649                ftrace_profile_reset(stat);
 650                return 0;
 651        }
 652
 653        /*
 654         * We are profiling all functions, but usually only a few thousand
 655         * functions are hit. We'll make a hash of 1024 items.
 656         */
 657        size = FTRACE_PROFILE_HASH_SIZE;
 658
 659        stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
 660
 661        if (!stat->hash)
 662                return -ENOMEM;
 663
 664        /* Preallocate the function profiling pages */
 665        if (ftrace_profile_pages_init(stat) < 0) {
 666                kfree(stat->hash);
 667                stat->hash = NULL;
 668                return -ENOMEM;
 669        }
 670
 671        return 0;
 672}
 673
 674static int ftrace_profile_init(void)
 675{
 676        int cpu;
 677        int ret = 0;
 678
 679        for_each_possible_cpu(cpu) {
 680                ret = ftrace_profile_init_cpu(cpu);
 681                if (ret)
 682                        break;
 683        }
 684
 685        return ret;
 686}
 687
 688/* interrupts must be disabled */
 689static struct ftrace_profile *
 690ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
 691{
 692        struct ftrace_profile *rec;
 693        struct hlist_head *hhd;
 694        unsigned long key;
 695
 696        key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
 697        hhd = &stat->hash[key];
 698
 699        if (hlist_empty(hhd))
 700                return NULL;
 701
 702        hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
 703                if (rec->ip == ip)
 704                        return rec;
 705        }
 706
 707        return NULL;
 708}
 709
 710static void ftrace_add_profile(struct ftrace_profile_stat *stat,
 711                               struct ftrace_profile *rec)
 712{
 713        unsigned long key;
 714
 715        key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
 716        hlist_add_head_rcu(&rec->node, &stat->hash[key]);
 717}
 718
 719/*
 720 * The memory is already allocated, this simply finds a new record to use.
 721 */
 722static struct ftrace_profile *
 723ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
 724{
 725        struct ftrace_profile *rec = NULL;
 726
 727        /* prevent recursion (from NMIs) */
 728        if (atomic_inc_return(&stat->disabled) != 1)
 729                goto out;
 730
 731        /*
 732         * Try to find the function again since an NMI
 733         * could have added it
 734         */
 735        rec = ftrace_find_profiled_func(stat, ip);
 736        if (rec)
 737                goto out;
 738
 739        if (stat->pages->index == PROFILES_PER_PAGE) {
 740                if (!stat->pages->next)
 741                        goto out;
 742                stat->pages = stat->pages->next;
 743        }
 744
 745        rec = &stat->pages->records[stat->pages->index++];
 746        rec->ip = ip;
 747        ftrace_add_profile(stat, rec);
 748
 749 out:
 750        atomic_dec(&stat->disabled);
 751
 752        return rec;
 753}
 754
 755static void
 756function_profile_call(unsigned long ip, unsigned long parent_ip,
 757                      struct ftrace_ops *ops, struct ftrace_regs *fregs)
 758{
 759        struct ftrace_profile_stat *stat;
 760        struct ftrace_profile *rec;
 761        unsigned long flags;
 762
 763        if (!ftrace_profile_enabled)
 764                return;
 765
 766        local_irq_save(flags);
 767
 768        stat = this_cpu_ptr(&ftrace_profile_stats);
 769        if (!stat->hash || !ftrace_profile_enabled)
 770                goto out;
 771
 772        rec = ftrace_find_profiled_func(stat, ip);
 773        if (!rec) {
 774                rec = ftrace_profile_alloc(stat, ip);
 775                if (!rec)
 776                        goto out;
 777        }
 778
 779        rec->counter++;
 780 out:
 781        local_irq_restore(flags);
 782}
 783
 784#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 785static bool fgraph_graph_time = true;
 786
 787void ftrace_graph_graph_time_control(bool enable)
 788{
 789        fgraph_graph_time = enable;
 790}
 791
 792static int profile_graph_entry(struct ftrace_graph_ent *trace)
 793{
 794        struct ftrace_ret_stack *ret_stack;
 795
 796        function_profile_call(trace->func, 0, NULL, NULL);
 797
 798        /* If function graph is shutting down, ret_stack can be NULL */
 799        if (!current->ret_stack)
 800                return 0;
 801
 802        ret_stack = ftrace_graph_get_ret_stack(current, 0);
 803        if (ret_stack)
 804                ret_stack->subtime = 0;
 805
 806        return 1;
 807}
 808
 809static void profile_graph_return(struct ftrace_graph_ret *trace)
 810{
 811        struct ftrace_ret_stack *ret_stack;
 812        struct ftrace_profile_stat *stat;
 813        unsigned long long calltime;
 814        struct ftrace_profile *rec;
 815        unsigned long flags;
 816
 817        local_irq_save(flags);
 818        stat = this_cpu_ptr(&ftrace_profile_stats);
 819        if (!stat->hash || !ftrace_profile_enabled)
 820                goto out;
 821
 822        /* If the calltime was zero'd ignore it */
 823        if (!trace->calltime)
 824                goto out;
 825
 826        calltime = trace->rettime - trace->calltime;
 827
 828        if (!fgraph_graph_time) {
 829
 830                /* Append this call time to the parent time to subtract */
 831                ret_stack = ftrace_graph_get_ret_stack(current, 1);
 832                if (ret_stack)
 833                        ret_stack->subtime += calltime;
 834
 835                ret_stack = ftrace_graph_get_ret_stack(current, 0);
 836                if (ret_stack && ret_stack->subtime < calltime)
 837                        calltime -= ret_stack->subtime;
 838                else
 839                        calltime = 0;
 840        }
 841
 842        rec = ftrace_find_profiled_func(stat, trace->func);
 843        if (rec) {
 844                rec->time += calltime;
 845                rec->time_squared += calltime * calltime;
 846        }
 847
 848 out:
 849        local_irq_restore(flags);
 850}
 851
 852static struct fgraph_ops fprofiler_ops = {
 853        .entryfunc = &profile_graph_entry,
 854        .retfunc = &profile_graph_return,
 855};
 856
 857static int register_ftrace_profiler(void)
 858{
 859        return register_ftrace_graph(&fprofiler_ops);
 860}
 861
 862static void unregister_ftrace_profiler(void)
 863{
 864        unregister_ftrace_graph(&fprofiler_ops);
 865}
 866#else
 867static struct ftrace_ops ftrace_profile_ops __read_mostly = {
 868        .func           = function_profile_call,
 869        .flags          = FTRACE_OPS_FL_INITIALIZED,
 870        INIT_OPS_HASH(ftrace_profile_ops)
 871};
 872
 873static int register_ftrace_profiler(void)
 874{
 875        return register_ftrace_function(&ftrace_profile_ops);
 876}
 877
 878static void unregister_ftrace_profiler(void)
 879{
 880        unregister_ftrace_function(&ftrace_profile_ops);
 881}
 882#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 883
 884static ssize_t
 885ftrace_profile_write(struct file *filp, const char __user *ubuf,
 886                     size_t cnt, loff_t *ppos)
 887{
 888        unsigned long val;
 889        int ret;
 890
 891        ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
 892        if (ret)
 893                return ret;
 894
 895        val = !!val;
 896
 897        mutex_lock(&ftrace_profile_lock);
 898        if (ftrace_profile_enabled ^ val) {
 899                if (val) {
 900                        ret = ftrace_profile_init();
 901                        if (ret < 0) {
 902                                cnt = ret;
 903                                goto out;
 904                        }
 905
 906                        ret = register_ftrace_profiler();
 907                        if (ret < 0) {
 908                                cnt = ret;
 909                                goto out;
 910                        }
 911                        ftrace_profile_enabled = 1;
 912                } else {
 913                        ftrace_profile_enabled = 0;
 914                        /*
 915                         * unregister_ftrace_profiler calls stop_machine
 916                         * so this acts like an synchronize_rcu.
 917                         */
 918                        unregister_ftrace_profiler();
 919                }
 920        }
 921 out:
 922        mutex_unlock(&ftrace_profile_lock);
 923
 924        *ppos += cnt;
 925
 926        return cnt;
 927}
 928
 929static ssize_t
 930ftrace_profile_read(struct file *filp, char __user *ubuf,
 931                     size_t cnt, loff_t *ppos)
 932{
 933        char buf[64];           /* big enough to hold a number */
 934        int r;
 935
 936        r = sprintf(buf, "%u\n", ftrace_profile_enabled);
 937        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
 938}
 939
 940static const struct file_operations ftrace_profile_fops = {
 941        .open           = tracing_open_generic,
 942        .read           = ftrace_profile_read,
 943        .write          = ftrace_profile_write,
 944        .llseek         = default_llseek,
 945};
 946
 947/* used to initialize the real stat files */
 948static struct tracer_stat function_stats __initdata = {
 949        .name           = "functions",
 950        .stat_start     = function_stat_start,
 951        .stat_next      = function_stat_next,
 952        .stat_cmp       = function_stat_cmp,
 953        .stat_headers   = function_stat_headers,
 954        .stat_show      = function_stat_show
 955};
 956
 957static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
 958{
 959        struct ftrace_profile_stat *stat;
 960        struct dentry *entry;
 961        char *name;
 962        int ret;
 963        int cpu;
 964
 965        for_each_possible_cpu(cpu) {
 966                stat = &per_cpu(ftrace_profile_stats, cpu);
 967
 968                name = kasprintf(GFP_KERNEL, "function%d", cpu);
 969                if (!name) {
 970                        /*
 971                         * The files created are permanent, if something happens
 972                         * we still do not free memory.
 973                         */
 974                        WARN(1,
 975                             "Could not allocate stat file for cpu %d\n",
 976                             cpu);
 977                        return;
 978                }
 979                stat->stat = function_stats;
 980                stat->stat.name = name;
 981                ret = register_stat_tracer(&stat->stat);
 982                if (ret) {
 983                        WARN(1,
 984                             "Could not register function stat for cpu %d\n",
 985                             cpu);
 986                        kfree(name);
 987                        return;
 988                }
 989        }
 990
 991        entry = tracefs_create_file("function_profile_enabled", 0644,
 992                                    d_tracer, NULL, &ftrace_profile_fops);
 993        if (!entry)
 994                pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
 995}
 996
 997#else /* CONFIG_FUNCTION_PROFILER */
 998static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
 999{
1000}
1001#endif /* CONFIG_FUNCTION_PROFILER */
1002
1003#ifdef CONFIG_DYNAMIC_FTRACE
1004
1005static struct ftrace_ops *removed_ops;
1006
1007/*
1008 * Set when doing a global update, like enabling all recs or disabling them.
1009 * It is not set when just updating a single ftrace_ops.
1010 */
1011static bool update_all_ops;
1012
1013#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1014# error Dynamic ftrace depends on MCOUNT_RECORD
1015#endif
1016
1017struct ftrace_func_probe {
1018        struct ftrace_probe_ops *probe_ops;
1019        struct ftrace_ops       ops;
1020        struct trace_array      *tr;
1021        struct list_head        list;
1022        void                    *data;
1023        int                     ref;
1024};
1025
1026/*
1027 * We make these constant because no one should touch them,
1028 * but they are used as the default "empty hash", to avoid allocating
1029 * it all the time. These are in a read only section such that if
1030 * anyone does try to modify it, it will cause an exception.
1031 */
1032static const struct hlist_head empty_buckets[1];
1033static const struct ftrace_hash empty_hash = {
1034        .buckets = (struct hlist_head *)empty_buckets,
1035};
1036#define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
1037
1038struct ftrace_ops global_ops = {
1039        .func                           = ftrace_stub,
1040        .local_hash.notrace_hash        = EMPTY_HASH,
1041        .local_hash.filter_hash         = EMPTY_HASH,
1042        INIT_OPS_HASH(global_ops)
1043        .flags                          = FTRACE_OPS_FL_INITIALIZED |
1044                                          FTRACE_OPS_FL_PID,
1045};
1046
1047/*
1048 * Used by the stack unwinder to know about dynamic ftrace trampolines.
1049 */
1050struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1051{
1052        struct ftrace_ops *op = NULL;
1053
1054        /*
1055         * Some of the ops may be dynamically allocated,
1056         * they are freed after a synchronize_rcu().
1057         */
1058        preempt_disable_notrace();
1059
1060        do_for_each_ftrace_op(op, ftrace_ops_list) {
1061                /*
1062                 * This is to check for dynamically allocated trampolines.
1063                 * Trampolines that are in kernel text will have
1064                 * core_kernel_text() return true.
1065                 */
1066                if (op->trampoline && op->trampoline_size)
1067                        if (addr >= op->trampoline &&
1068                            addr < op->trampoline + op->trampoline_size) {
1069                                preempt_enable_notrace();
1070                                return op;
1071                        }
1072        } while_for_each_ftrace_op(op);
1073        preempt_enable_notrace();
1074
1075        return NULL;
1076}
1077
1078/*
1079 * This is used by __kernel_text_address() to return true if the
1080 * address is on a dynamically allocated trampoline that would
1081 * not return true for either core_kernel_text() or
1082 * is_module_text_address().
1083 */
1084bool is_ftrace_trampoline(unsigned long addr)
1085{
1086        return ftrace_ops_trampoline(addr) != NULL;
1087}
1088
1089struct ftrace_page {
1090        struct ftrace_page      *next;
1091        struct dyn_ftrace       *records;
1092        int                     index;
1093        int                     order;
1094};
1095
1096#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1097#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1098
1099static struct ftrace_page       *ftrace_pages_start;
1100static struct ftrace_page       *ftrace_pages;
1101
1102static __always_inline unsigned long
1103ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1104{
1105        if (hash->size_bits > 0)
1106                return hash_long(ip, hash->size_bits);
1107
1108        return 0;
1109}
1110
1111/* Only use this function if ftrace_hash_empty() has already been tested */
1112static __always_inline struct ftrace_func_entry *
1113__ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1114{
1115        unsigned long key;
1116        struct ftrace_func_entry *entry;
1117        struct hlist_head *hhd;
1118
1119        key = ftrace_hash_key(hash, ip);
1120        hhd = &hash->buckets[key];
1121
1122        hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1123                if (entry->ip == ip)
1124                        return entry;
1125        }
1126        return NULL;
1127}
1128
1129/**
1130 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1131 * @hash: The hash to look at
1132 * @ip: The instruction pointer to test
1133 *
1134 * Search a given @hash to see if a given instruction pointer (@ip)
1135 * exists in it.
1136 *
1137 * Returns the entry that holds the @ip if found. NULL otherwise.
1138 */
1139struct ftrace_func_entry *
1140ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1141{
1142        if (ftrace_hash_empty(hash))
1143                return NULL;
1144
1145        return __ftrace_lookup_ip(hash, ip);
1146}
1147
1148static void __add_hash_entry(struct ftrace_hash *hash,
1149                             struct ftrace_func_entry *entry)
1150{
1151        struct hlist_head *hhd;
1152        unsigned long key;
1153
1154        key = ftrace_hash_key(hash, entry->ip);
1155        hhd = &hash->buckets[key];
1156        hlist_add_head(&entry->hlist, hhd);
1157        hash->count++;
1158}
1159
1160static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1161{
1162        struct ftrace_func_entry *entry;
1163
1164        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1165        if (!entry)
1166                return -ENOMEM;
1167
1168        entry->ip = ip;
1169        __add_hash_entry(hash, entry);
1170
1171        return 0;
1172}
1173
1174static void
1175free_hash_entry(struct ftrace_hash *hash,
1176                  struct ftrace_func_entry *entry)
1177{
1178        hlist_del(&entry->hlist);
1179        kfree(entry);
1180        hash->count--;
1181}
1182
1183static void
1184remove_hash_entry(struct ftrace_hash *hash,
1185                  struct ftrace_func_entry *entry)
1186{
1187        hlist_del_rcu(&entry->hlist);
1188        hash->count--;
1189}
1190
1191static void ftrace_hash_clear(struct ftrace_hash *hash)
1192{
1193        struct hlist_head *hhd;
1194        struct hlist_node *tn;
1195        struct ftrace_func_entry *entry;
1196        int size = 1 << hash->size_bits;
1197        int i;
1198
1199        if (!hash->count)
1200                return;
1201
1202        for (i = 0; i < size; i++) {
1203                hhd = &hash->buckets[i];
1204                hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1205                        free_hash_entry(hash, entry);
1206        }
1207        FTRACE_WARN_ON(hash->count);
1208}
1209
1210static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1211{
1212        list_del(&ftrace_mod->list);
1213        kfree(ftrace_mod->module);
1214        kfree(ftrace_mod->func);
1215        kfree(ftrace_mod);
1216}
1217
1218static void clear_ftrace_mod_list(struct list_head *head)
1219{
1220        struct ftrace_mod_load *p, *n;
1221
1222        /* stack tracer isn't supported yet */
1223        if (!head)
1224                return;
1225
1226        mutex_lock(&ftrace_lock);
1227        list_for_each_entry_safe(p, n, head, list)
1228                free_ftrace_mod(p);
1229        mutex_unlock(&ftrace_lock);
1230}
1231
1232static void free_ftrace_hash(struct ftrace_hash *hash)
1233{
1234        if (!hash || hash == EMPTY_HASH)
1235                return;
1236        ftrace_hash_clear(hash);
1237        kfree(hash->buckets);
1238        kfree(hash);
1239}
1240
1241static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1242{
1243        struct ftrace_hash *hash;
1244
1245        hash = container_of(rcu, struct ftrace_hash, rcu);
1246        free_ftrace_hash(hash);
1247}
1248
1249static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1250{
1251        if (!hash || hash == EMPTY_HASH)
1252                return;
1253        call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1254}
1255
1256void ftrace_free_filter(struct ftrace_ops *ops)
1257{
1258        ftrace_ops_init(ops);
1259        free_ftrace_hash(ops->func_hash->filter_hash);
1260        free_ftrace_hash(ops->func_hash->notrace_hash);
1261}
1262
1263static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1264{
1265        struct ftrace_hash *hash;
1266        int size;
1267
1268        hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1269        if (!hash)
1270                return NULL;
1271
1272        size = 1 << size_bits;
1273        hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1274
1275        if (!hash->buckets) {
1276                kfree(hash);
1277                return NULL;
1278        }
1279
1280        hash->size_bits = size_bits;
1281
1282        return hash;
1283}
1284
1285
1286static int ftrace_add_mod(struct trace_array *tr,
1287                          const char *func, const char *module,
1288                          int enable)
1289{
1290        struct ftrace_mod_load *ftrace_mod;
1291        struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1292
1293        ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1294        if (!ftrace_mod)
1295                return -ENOMEM;
1296
1297        ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1298        ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1299        ftrace_mod->enable = enable;
1300
1301        if (!ftrace_mod->func || !ftrace_mod->module)
1302                goto out_free;
1303
1304        list_add(&ftrace_mod->list, mod_head);
1305
1306        return 0;
1307
1308 out_free:
1309        free_ftrace_mod(ftrace_mod);
1310
1311        return -ENOMEM;
1312}
1313
1314static struct ftrace_hash *
1315alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1316{
1317        struct ftrace_func_entry *entry;
1318        struct ftrace_hash *new_hash;
1319        int size;
1320        int ret;
1321        int i;
1322
1323        new_hash = alloc_ftrace_hash(size_bits);
1324        if (!new_hash)
1325                return NULL;
1326
1327        if (hash)
1328                new_hash->flags = hash->flags;
1329
1330        /* Empty hash? */
1331        if (ftrace_hash_empty(hash))
1332                return new_hash;
1333
1334        size = 1 << hash->size_bits;
1335        for (i = 0; i < size; i++) {
1336                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1337                        ret = add_hash_entry(new_hash, entry->ip);
1338                        if (ret < 0)
1339                                goto free_hash;
1340                }
1341        }
1342
1343        FTRACE_WARN_ON(new_hash->count != hash->count);
1344
1345        return new_hash;
1346
1347 free_hash:
1348        free_ftrace_hash(new_hash);
1349        return NULL;
1350}
1351
1352static void
1353ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1354static void
1355ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1356
1357static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1358                                       struct ftrace_hash *new_hash);
1359
1360static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
1361{
1362        struct ftrace_func_entry *entry;
1363        struct ftrace_hash *new_hash;
1364        struct hlist_head *hhd;
1365        struct hlist_node *tn;
1366        int bits = 0;
1367        int i;
1368
1369        /*
1370         * Use around half the size (max bit of it), but
1371         * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
1372         */
1373        bits = fls(size / 2);
1374
1375        /* Don't allocate too much */
1376        if (bits > FTRACE_HASH_MAX_BITS)
1377                bits = FTRACE_HASH_MAX_BITS;
1378
1379        new_hash = alloc_ftrace_hash(bits);
1380        if (!new_hash)
1381                return NULL;
1382
1383        new_hash->flags = src->flags;
1384
1385        size = 1 << src->size_bits;
1386        for (i = 0; i < size; i++) {
1387                hhd = &src->buckets[i];
1388                hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1389                        remove_hash_entry(src, entry);
1390                        __add_hash_entry(new_hash, entry);
1391                }
1392        }
1393        return new_hash;
1394}
1395
1396static struct ftrace_hash *
1397__ftrace_hash_move(struct ftrace_hash *src)
1398{
1399        int size = src->count;
1400
1401        /*
1402         * If the new source is empty, just return the empty_hash.
1403         */
1404        if (ftrace_hash_empty(src))
1405                return EMPTY_HASH;
1406
1407        return dup_hash(src, size);
1408}
1409
1410static int
1411ftrace_hash_move(struct ftrace_ops *ops, int enable,
1412                 struct ftrace_hash **dst, struct ftrace_hash *src)
1413{
1414        struct ftrace_hash *new_hash;
1415        int ret;
1416
1417        /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1418        if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1419                return -EINVAL;
1420
1421        new_hash = __ftrace_hash_move(src);
1422        if (!new_hash)
1423                return -ENOMEM;
1424
1425        /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1426        if (enable) {
1427                /* IPMODIFY should be updated only when filter_hash updating */
1428                ret = ftrace_hash_ipmodify_update(ops, new_hash);
1429                if (ret < 0) {
1430                        free_ftrace_hash(new_hash);
1431                        return ret;
1432                }
1433        }
1434
1435        /*
1436         * Remove the current set, update the hash and add
1437         * them back.
1438         */
1439        ftrace_hash_rec_disable_modify(ops, enable);
1440
1441        rcu_assign_pointer(*dst, new_hash);
1442
1443        ftrace_hash_rec_enable_modify(ops, enable);
1444
1445        return 0;
1446}
1447
1448static bool hash_contains_ip(unsigned long ip,
1449                             struct ftrace_ops_hash *hash)
1450{
1451        /*
1452         * The function record is a match if it exists in the filter
1453         * hash and not in the notrace hash. Note, an empty hash is
1454         * considered a match for the filter hash, but an empty
1455         * notrace hash is considered not in the notrace hash.
1456         */
1457        return (ftrace_hash_empty(hash->filter_hash) ||
1458                __ftrace_lookup_ip(hash->filter_hash, ip)) &&
1459                (ftrace_hash_empty(hash->notrace_hash) ||
1460                 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1461}
1462
1463/*
1464 * Test the hashes for this ops to see if we want to call
1465 * the ops->func or not.
1466 *
1467 * It's a match if the ip is in the ops->filter_hash or
1468 * the filter_hash does not exist or is empty,
1469 *  AND
1470 * the ip is not in the ops->notrace_hash.
1471 *
1472 * This needs to be called with preemption disabled as
1473 * the hashes are freed with call_rcu().
1474 */
1475int
1476ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1477{
1478        struct ftrace_ops_hash hash;
1479        int ret;
1480
1481#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1482        /*
1483         * There's a small race when adding ops that the ftrace handler
1484         * that wants regs, may be called without them. We can not
1485         * allow that handler to be called if regs is NULL.
1486         */
1487        if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1488                return 0;
1489#endif
1490
1491        rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1492        rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1493
1494        if (hash_contains_ip(ip, &hash))
1495                ret = 1;
1496        else
1497                ret = 0;
1498
1499        return ret;
1500}
1501
1502/*
1503 * This is a double for. Do not use 'break' to break out of the loop,
1504 * you must use a goto.
1505 */
1506#define do_for_each_ftrace_rec(pg, rec)                                 \
1507        for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1508                int _____i;                                             \
1509                for (_____i = 0; _____i < pg->index; _____i++) {        \
1510                        rec = &pg->records[_____i];
1511
1512#define while_for_each_ftrace_rec()             \
1513                }                               \
1514        }
1515
1516
1517static int ftrace_cmp_recs(const void *a, const void *b)
1518{
1519        const struct dyn_ftrace *key = a;
1520        const struct dyn_ftrace *rec = b;
1521
1522        if (key->flags < rec->ip)
1523                return -1;
1524        if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1525                return 1;
1526        return 0;
1527}
1528
1529static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1530{
1531        struct ftrace_page *pg;
1532        struct dyn_ftrace *rec = NULL;
1533        struct dyn_ftrace key;
1534
1535        key.ip = start;
1536        key.flags = end;        /* overload flags, as it is unsigned long */
1537
1538        for (pg = ftrace_pages_start; pg; pg = pg->next) {
1539                if (end < pg->records[0].ip ||
1540                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1541                        continue;
1542                rec = bsearch(&key, pg->records, pg->index,
1543                              sizeof(struct dyn_ftrace),
1544                              ftrace_cmp_recs);
1545                if (rec)
1546                        break;
1547        }
1548        return rec;
1549}
1550
1551/**
1552 * ftrace_location_range - return the first address of a traced location
1553 *      if it touches the given ip range
1554 * @start: start of range to search.
1555 * @end: end of range to search (inclusive). @end points to the last byte
1556 *      to check.
1557 *
1558 * Returns rec->ip if the related ftrace location is a least partly within
1559 * the given address range. That is, the first address of the instruction
1560 * that is either a NOP or call to the function tracer. It checks the ftrace
1561 * internal tables to determine if the address belongs or not.
1562 */
1563unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1564{
1565        struct dyn_ftrace *rec;
1566
1567        rec = lookup_rec(start, end);
1568        if (rec)
1569                return rec->ip;
1570
1571        return 0;
1572}
1573
1574/**
1575 * ftrace_location - return true if the ip giving is a traced location
1576 * @ip: the instruction pointer to check
1577 *
1578 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1579 * That is, the instruction that is either a NOP or call to
1580 * the function tracer. It checks the ftrace internal tables to
1581 * determine if the address belongs or not.
1582 */
1583unsigned long ftrace_location(unsigned long ip)
1584{
1585        return ftrace_location_range(ip, ip);
1586}
1587
1588/**
1589 * ftrace_text_reserved - return true if range contains an ftrace location
1590 * @start: start of range to search
1591 * @end: end of range to search (inclusive). @end points to the last byte to check.
1592 *
1593 * Returns 1 if @start and @end contains a ftrace location.
1594 * That is, the instruction that is either a NOP or call to
1595 * the function tracer. It checks the ftrace internal tables to
1596 * determine if the address belongs or not.
1597 */
1598int ftrace_text_reserved(const void *start, const void *end)
1599{
1600        unsigned long ret;
1601
1602        ret = ftrace_location_range((unsigned long)start,
1603                                    (unsigned long)end);
1604
1605        return (int)!!ret;
1606}
1607
1608/* Test if ops registered to this rec needs regs */
1609static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1610{
1611        struct ftrace_ops *ops;
1612        bool keep_regs = false;
1613
1614        for (ops = ftrace_ops_list;
1615             ops != &ftrace_list_end; ops = ops->next) {
1616                /* pass rec in as regs to have non-NULL val */
1617                if (ftrace_ops_test(ops, rec->ip, rec)) {
1618                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1619                                keep_regs = true;
1620                                break;
1621                        }
1622                }
1623        }
1624
1625        return  keep_regs;
1626}
1627
1628static struct ftrace_ops *
1629ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1630static struct ftrace_ops *
1631ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1632static struct ftrace_ops *
1633ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1634
1635static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1636                                     int filter_hash,
1637                                     bool inc)
1638{
1639        struct ftrace_hash *hash;
1640        struct ftrace_hash *other_hash;
1641        struct ftrace_page *pg;
1642        struct dyn_ftrace *rec;
1643        bool update = false;
1644        int count = 0;
1645        int all = false;
1646
1647        /* Only update if the ops has been registered */
1648        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1649                return false;
1650
1651        /*
1652         * In the filter_hash case:
1653         *   If the count is zero, we update all records.
1654         *   Otherwise we just update the items in the hash.
1655         *
1656         * In the notrace_hash case:
1657         *   We enable the update in the hash.
1658         *   As disabling notrace means enabling the tracing,
1659         *   and enabling notrace means disabling, the inc variable
1660         *   gets inversed.
1661         */
1662        if (filter_hash) {
1663                hash = ops->func_hash->filter_hash;
1664                other_hash = ops->func_hash->notrace_hash;
1665                if (ftrace_hash_empty(hash))
1666                        all = true;
1667        } else {
1668                inc = !inc;
1669                hash = ops->func_hash->notrace_hash;
1670                other_hash = ops->func_hash->filter_hash;
1671                /*
1672                 * If the notrace hash has no items,
1673                 * then there's nothing to do.
1674                 */
1675                if (ftrace_hash_empty(hash))
1676                        return false;
1677        }
1678
1679        do_for_each_ftrace_rec(pg, rec) {
1680                int in_other_hash = 0;
1681                int in_hash = 0;
1682                int match = 0;
1683
1684                if (rec->flags & FTRACE_FL_DISABLED)
1685                        continue;
1686
1687                if (all) {
1688                        /*
1689                         * Only the filter_hash affects all records.
1690                         * Update if the record is not in the notrace hash.
1691                         */
1692                        if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1693                                match = 1;
1694                } else {
1695                        in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1696                        in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1697
1698                        /*
1699                         * If filter_hash is set, we want to match all functions
1700                         * that are in the hash but not in the other hash.
1701                         *
1702                         * If filter_hash is not set, then we are decrementing.
1703                         * That means we match anything that is in the hash
1704                         * and also in the other_hash. That is, we need to turn
1705                         * off functions in the other hash because they are disabled
1706                         * by this hash.
1707                         */
1708                        if (filter_hash && in_hash && !in_other_hash)
1709                                match = 1;
1710                        else if (!filter_hash && in_hash &&
1711                                 (in_other_hash || ftrace_hash_empty(other_hash)))
1712                                match = 1;
1713                }
1714                if (!match)
1715                        continue;
1716
1717                if (inc) {
1718                        rec->flags++;
1719                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1720                                return false;
1721
1722                        if (ops->flags & FTRACE_OPS_FL_DIRECT)
1723                                rec->flags |= FTRACE_FL_DIRECT;
1724
1725                        /*
1726                         * If there's only a single callback registered to a
1727                         * function, and the ops has a trampoline registered
1728                         * for it, then we can call it directly.
1729                         */
1730                        if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1731                                rec->flags |= FTRACE_FL_TRAMP;
1732                        else
1733                                /*
1734                                 * If we are adding another function callback
1735                                 * to this function, and the previous had a
1736                                 * custom trampoline in use, then we need to go
1737                                 * back to the default trampoline.
1738                                 */
1739                                rec->flags &= ~FTRACE_FL_TRAMP;
1740
1741                        /*
1742                         * If any ops wants regs saved for this function
1743                         * then all ops will get saved regs.
1744                         */
1745                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1746                                rec->flags |= FTRACE_FL_REGS;
1747                } else {
1748                        if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1749                                return false;
1750                        rec->flags--;
1751
1752                        /*
1753                         * Only the internal direct_ops should have the
1754                         * DIRECT flag set. Thus, if it is removing a
1755                         * function, then that function should no longer
1756                         * be direct.
1757                         */
1758                        if (ops->flags & FTRACE_OPS_FL_DIRECT)
1759                                rec->flags &= ~FTRACE_FL_DIRECT;
1760
1761                        /*
1762                         * If the rec had REGS enabled and the ops that is
1763                         * being removed had REGS set, then see if there is
1764                         * still any ops for this record that wants regs.
1765                         * If not, we can stop recording them.
1766                         */
1767                        if (ftrace_rec_count(rec) > 0 &&
1768                            rec->flags & FTRACE_FL_REGS &&
1769                            ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1770                                if (!test_rec_ops_needs_regs(rec))
1771                                        rec->flags &= ~FTRACE_FL_REGS;
1772                        }
1773
1774                        /*
1775                         * The TRAMP needs to be set only if rec count
1776                         * is decremented to one, and the ops that is
1777                         * left has a trampoline. As TRAMP can only be
1778                         * enabled if there is only a single ops attached
1779                         * to it.
1780                         */
1781                        if (ftrace_rec_count(rec) == 1 &&
1782                            ftrace_find_tramp_ops_any_other(rec, ops))
1783                                rec->flags |= FTRACE_FL_TRAMP;
1784                        else
1785                                rec->flags &= ~FTRACE_FL_TRAMP;
1786
1787                        /*
1788                         * flags will be cleared in ftrace_check_record()
1789                         * if rec count is zero.
1790                         */
1791                }
1792                count++;
1793
1794                /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1795                update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1796
1797                /* Shortcut, if we handled all records, we are done. */
1798                if (!all && count == hash->count)
1799                        return update;
1800        } while_for_each_ftrace_rec();
1801
1802        return update;
1803}
1804
1805static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1806                                    int filter_hash)
1807{
1808        return __ftrace_hash_rec_update(ops, filter_hash, 0);
1809}
1810
1811static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1812                                   int filter_hash)
1813{
1814        return __ftrace_hash_rec_update(ops, filter_hash, 1);
1815}
1816
1817static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1818                                          int filter_hash, int inc)
1819{
1820        struct ftrace_ops *op;
1821
1822        __ftrace_hash_rec_update(ops, filter_hash, inc);
1823
1824        if (ops->func_hash != &global_ops.local_hash)
1825                return;
1826
1827        /*
1828         * If the ops shares the global_ops hash, then we need to update
1829         * all ops that are enabled and use this hash.
1830         */
1831        do_for_each_ftrace_op(op, ftrace_ops_list) {
1832                /* Already done */
1833                if (op == ops)
1834                        continue;
1835                if (op->func_hash == &global_ops.local_hash)
1836                        __ftrace_hash_rec_update(op, filter_hash, inc);
1837        } while_for_each_ftrace_op(op);
1838}
1839
1840static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1841                                           int filter_hash)
1842{
1843        ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1844}
1845
1846static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1847                                          int filter_hash)
1848{
1849        ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1850}
1851
1852/*
1853 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1854 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1855 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1856 * Note that old_hash and new_hash has below meanings
1857 *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1858 *  - If the hash is EMPTY_HASH, it hits nothing
1859 *  - Anything else hits the recs which match the hash entries.
1860 */
1861static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1862                                         struct ftrace_hash *old_hash,
1863                                         struct ftrace_hash *new_hash)
1864{
1865        struct ftrace_page *pg;
1866        struct dyn_ftrace *rec, *end = NULL;
1867        int in_old, in_new;
1868
1869        /* Only update if the ops has been registered */
1870        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1871                return 0;
1872
1873        if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1874                return 0;
1875
1876        /*
1877         * Since the IPMODIFY is a very address sensitive action, we do not
1878         * allow ftrace_ops to set all functions to new hash.
1879         */
1880        if (!new_hash || !old_hash)
1881                return -EINVAL;
1882
1883        /* Update rec->flags */
1884        do_for_each_ftrace_rec(pg, rec) {
1885
1886                if (rec->flags & FTRACE_FL_DISABLED)
1887                        continue;
1888
1889                /* We need to update only differences of filter_hash */
1890                in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1891                in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1892                if (in_old == in_new)
1893                        continue;
1894
1895                if (in_new) {
1896                        /* New entries must ensure no others are using it */
1897                        if (rec->flags & FTRACE_FL_IPMODIFY)
1898                                goto rollback;
1899                        rec->flags |= FTRACE_FL_IPMODIFY;
1900                } else /* Removed entry */
1901                        rec->flags &= ~FTRACE_FL_IPMODIFY;
1902        } while_for_each_ftrace_rec();
1903
1904        return 0;
1905
1906rollback:
1907        end = rec;
1908
1909        /* Roll back what we did above */
1910        do_for_each_ftrace_rec(pg, rec) {
1911
1912                if (rec->flags & FTRACE_FL_DISABLED)
1913                        continue;
1914
1915                if (rec == end)
1916                        goto err_out;
1917
1918                in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1919                in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1920                if (in_old == in_new)
1921                        continue;
1922
1923                if (in_new)
1924                        rec->flags &= ~FTRACE_FL_IPMODIFY;
1925                else
1926                        rec->flags |= FTRACE_FL_IPMODIFY;
1927        } while_for_each_ftrace_rec();
1928
1929err_out:
1930        return -EBUSY;
1931}
1932
1933static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1934{
1935        struct ftrace_hash *hash = ops->func_hash->filter_hash;
1936
1937        if (ftrace_hash_empty(hash))
1938                hash = NULL;
1939
1940        return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1941}
1942
1943/* Disabling always succeeds */
1944static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1945{
1946        struct ftrace_hash *hash = ops->func_hash->filter_hash;
1947
1948        if (ftrace_hash_empty(hash))
1949                hash = NULL;
1950
1951        __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1952}
1953
1954static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1955                                       struct ftrace_hash *new_hash)
1956{
1957        struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1958
1959        if (ftrace_hash_empty(old_hash))
1960                old_hash = NULL;
1961
1962        if (ftrace_hash_empty(new_hash))
1963                new_hash = NULL;
1964
1965        return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1966}
1967
1968static void print_ip_ins(const char *fmt, const unsigned char *p)
1969{
1970        char ins[MCOUNT_INSN_SIZE];
1971        int i;
1972
1973        if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
1974                printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
1975                return;
1976        }
1977
1978        printk(KERN_CONT "%s", fmt);
1979
1980        for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1981                printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
1982}
1983
1984enum ftrace_bug_type ftrace_bug_type;
1985const void *ftrace_expected;
1986
1987static void print_bug_type(void)
1988{
1989        switch (ftrace_bug_type) {
1990        case FTRACE_BUG_UNKNOWN:
1991                break;
1992        case FTRACE_BUG_INIT:
1993                pr_info("Initializing ftrace call sites\n");
1994                break;
1995        case FTRACE_BUG_NOP:
1996                pr_info("Setting ftrace call site to NOP\n");
1997                break;
1998        case FTRACE_BUG_CALL:
1999                pr_info("Setting ftrace call site to call ftrace function\n");
2000                break;
2001        case FTRACE_BUG_UPDATE:
2002                pr_info("Updating ftrace call site to call a different ftrace function\n");
2003                break;
2004        }
2005}
2006
2007/**
2008 * ftrace_bug - report and shutdown function tracer
2009 * @failed: The failed type (EFAULT, EINVAL, EPERM)
2010 * @rec: The record that failed
2011 *
2012 * The arch code that enables or disables the function tracing
2013 * can call ftrace_bug() when it has detected a problem in
2014 * modifying the code. @failed should be one of either:
2015 * EFAULT - if the problem happens on reading the @ip address
2016 * EINVAL - if what is read at @ip is not what was expected
2017 * EPERM - if the problem happens on writing to the @ip address
2018 */
2019void ftrace_bug(int failed, struct dyn_ftrace *rec)
2020{
2021        unsigned long ip = rec ? rec->ip : 0;
2022
2023        pr_info("------------[ ftrace bug ]------------\n");
2024
2025        switch (failed) {
2026        case -EFAULT:
2027                pr_info("ftrace faulted on modifying ");
2028                print_ip_sym(KERN_INFO, ip);
2029                break;
2030        case -EINVAL:
2031                pr_info("ftrace failed to modify ");
2032                print_ip_sym(KERN_INFO, ip);
2033                print_ip_ins(" actual:   ", (unsigned char *)ip);
2034                pr_cont("\n");
2035                if (ftrace_expected) {
2036                        print_ip_ins(" expected: ", ftrace_expected);
2037                        pr_cont("\n");
2038                }
2039                break;
2040        case -EPERM:
2041                pr_info("ftrace faulted on writing ");
2042                print_ip_sym(KERN_INFO, ip);
2043                break;
2044        default:
2045                pr_info("ftrace faulted on unknown error ");
2046                print_ip_sym(KERN_INFO, ip);
2047        }
2048        print_bug_type();
2049        if (rec) {
2050                struct ftrace_ops *ops = NULL;
2051
2052                pr_info("ftrace record flags: %lx\n", rec->flags);
2053                pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2054                        rec->flags & FTRACE_FL_REGS ? " R" : "  ");
2055                if (rec->flags & FTRACE_FL_TRAMP_EN) {
2056                        ops = ftrace_find_tramp_ops_any(rec);
2057                        if (ops) {
2058                                do {
2059                                        pr_cont("\ttramp: %pS (%pS)",
2060                                                (void *)ops->trampoline,
2061                                                (void *)ops->func);
2062                                        ops = ftrace_find_tramp_ops_next(rec, ops);
2063                                } while (ops);
2064                        } else
2065                                pr_cont("\ttramp: ERROR!");
2066
2067                }
2068                ip = ftrace_get_addr_curr(rec);
2069                pr_cont("\n expected tramp: %lx\n", ip);
2070        }
2071
2072        FTRACE_WARN_ON_ONCE(1);
2073}
2074
2075static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2076{
2077        unsigned long flag = 0UL;
2078
2079        ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2080
2081        if (rec->flags & FTRACE_FL_DISABLED)
2082                return FTRACE_UPDATE_IGNORE;
2083
2084        /*
2085         * If we are updating calls:
2086         *
2087         *   If the record has a ref count, then we need to enable it
2088         *   because someone is using it.
2089         *
2090         *   Otherwise we make sure its disabled.
2091         *
2092         * If we are disabling calls, then disable all records that
2093         * are enabled.
2094         */
2095        if (enable && ftrace_rec_count(rec))
2096                flag = FTRACE_FL_ENABLED;
2097
2098        /*
2099         * If enabling and the REGS flag does not match the REGS_EN, or
2100         * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2101         * this record. Set flags to fail the compare against ENABLED.
2102         * Same for direct calls.
2103         */
2104        if (flag) {
2105                if (!(rec->flags & FTRACE_FL_REGS) !=
2106                    !(rec->flags & FTRACE_FL_REGS_EN))
2107                        flag |= FTRACE_FL_REGS;
2108
2109                if (!(rec->flags & FTRACE_FL_TRAMP) !=
2110                    !(rec->flags & FTRACE_FL_TRAMP_EN))
2111                        flag |= FTRACE_FL_TRAMP;
2112
2113                /*
2114                 * Direct calls are special, as count matters.
2115                 * We must test the record for direct, if the
2116                 * DIRECT and DIRECT_EN do not match, but only
2117                 * if the count is 1. That's because, if the
2118                 * count is something other than one, we do not
2119                 * want the direct enabled (it will be done via the
2120                 * direct helper). But if DIRECT_EN is set, and
2121                 * the count is not one, we need to clear it.
2122                 */
2123                if (ftrace_rec_count(rec) == 1) {
2124                        if (!(rec->flags & FTRACE_FL_DIRECT) !=
2125                            !(rec->flags & FTRACE_FL_DIRECT_EN))
2126                                flag |= FTRACE_FL_DIRECT;
2127                } else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2128                        flag |= FTRACE_FL_DIRECT;
2129                }
2130        }
2131
2132        /* If the state of this record hasn't changed, then do nothing */
2133        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2134                return FTRACE_UPDATE_IGNORE;
2135
2136        if (flag) {
2137                /* Save off if rec is being enabled (for return value) */
2138                flag ^= rec->flags & FTRACE_FL_ENABLED;
2139
2140                if (update) {
2141                        rec->flags |= FTRACE_FL_ENABLED;
2142                        if (flag & FTRACE_FL_REGS) {
2143                                if (rec->flags & FTRACE_FL_REGS)
2144                                        rec->flags |= FTRACE_FL_REGS_EN;
2145                                else
2146                                        rec->flags &= ~FTRACE_FL_REGS_EN;
2147                        }
2148                        if (flag & FTRACE_FL_TRAMP) {
2149                                if (rec->flags & FTRACE_FL_TRAMP)
2150                                        rec->flags |= FTRACE_FL_TRAMP_EN;
2151                                else
2152                                        rec->flags &= ~FTRACE_FL_TRAMP_EN;
2153                        }
2154
2155                        if (flag & FTRACE_FL_DIRECT) {
2156                                /*
2157                                 * If there's only one user (direct_ops helper)
2158                                 * then we can call the direct function
2159                                 * directly (no ftrace trampoline).
2160                                 */
2161                                if (ftrace_rec_count(rec) == 1) {
2162                                        if (rec->flags & FTRACE_FL_DIRECT)
2163                                                rec->flags |= FTRACE_FL_DIRECT_EN;
2164                                        else
2165                                                rec->flags &= ~FTRACE_FL_DIRECT_EN;
2166                                } else {
2167                                        /*
2168                                         * Can only call directly if there's
2169                                         * only one callback to the function.
2170                                         */
2171                                        rec->flags &= ~FTRACE_FL_DIRECT_EN;
2172                                }
2173                        }
2174                }
2175
2176                /*
2177                 * If this record is being updated from a nop, then
2178                 *   return UPDATE_MAKE_CALL.
2179                 * Otherwise,
2180                 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2181                 *   from the save regs, to a non-save regs function or
2182                 *   vice versa, or from a trampoline call.
2183                 */
2184                if (flag & FTRACE_FL_ENABLED) {
2185                        ftrace_bug_type = FTRACE_BUG_CALL;
2186                        return FTRACE_UPDATE_MAKE_CALL;
2187                }
2188
2189                ftrace_bug_type = FTRACE_BUG_UPDATE;
2190                return FTRACE_UPDATE_MODIFY_CALL;
2191        }
2192
2193        if (update) {
2194                /* If there's no more users, clear all flags */
2195                if (!ftrace_rec_count(rec))
2196                        rec->flags = 0;
2197                else
2198                        /*
2199                         * Just disable the record, but keep the ops TRAMP
2200                         * and REGS states. The _EN flags must be disabled though.
2201                         */
2202                        rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2203                                        FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
2204        }
2205
2206        ftrace_bug_type = FTRACE_BUG_NOP;
2207        return FTRACE_UPDATE_MAKE_NOP;
2208}
2209
2210/**
2211 * ftrace_update_record, set a record that now is tracing or not
2212 * @rec: the record to update
2213 * @enable: set to true if the record is tracing, false to force disable
2214 *
2215 * The records that represent all functions that can be traced need
2216 * to be updated when tracing has been enabled.
2217 */
2218int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2219{
2220        return ftrace_check_record(rec, enable, true);
2221}
2222
2223/**
2224 * ftrace_test_record, check if the record has been enabled or not
2225 * @rec: the record to test
2226 * @enable: set to true to check if enabled, false if it is disabled
2227 *
2228 * The arch code may need to test if a record is already set to
2229 * tracing to determine how to modify the function code that it
2230 * represents.
2231 */
2232int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2233{
2234        return ftrace_check_record(rec, enable, false);
2235}
2236
2237static struct ftrace_ops *
2238ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2239{
2240        struct ftrace_ops *op;
2241        unsigned long ip = rec->ip;
2242
2243        do_for_each_ftrace_op(op, ftrace_ops_list) {
2244
2245                if (!op->trampoline)
2246                        continue;
2247
2248                if (hash_contains_ip(ip, op->func_hash))
2249                        return op;
2250        } while_for_each_ftrace_op(op);
2251
2252        return NULL;
2253}
2254
2255static struct ftrace_ops *
2256ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2257{
2258        struct ftrace_ops *op;
2259        unsigned long ip = rec->ip;
2260
2261        do_for_each_ftrace_op(op, ftrace_ops_list) {
2262
2263                if (op == op_exclude || !op->trampoline)
2264                        continue;
2265
2266                if (hash_contains_ip(ip, op->func_hash))
2267                        return op;
2268        } while_for_each_ftrace_op(op);
2269
2270        return NULL;
2271}
2272
2273static struct ftrace_ops *
2274ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2275                           struct ftrace_ops *op)
2276{
2277        unsigned long ip = rec->ip;
2278
2279        while_for_each_ftrace_op(op) {
2280
2281                if (!op->trampoline)
2282                        continue;
2283
2284                if (hash_contains_ip(ip, op->func_hash))
2285                        return op;
2286        }
2287
2288        return NULL;
2289}
2290
2291static struct ftrace_ops *
2292ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2293{
2294        struct ftrace_ops *op;
2295        unsigned long ip = rec->ip;
2296
2297        /*
2298         * Need to check removed ops first.
2299         * If they are being removed, and this rec has a tramp,
2300         * and this rec is in the ops list, then it would be the
2301         * one with the tramp.
2302         */
2303        if (removed_ops) {
2304                if (hash_contains_ip(ip, &removed_ops->old_hash))
2305                        return removed_ops;
2306        }
2307
2308        /*
2309         * Need to find the current trampoline for a rec.
2310         * Now, a trampoline is only attached to a rec if there
2311         * was a single 'ops' attached to it. But this can be called
2312         * when we are adding another op to the rec or removing the
2313         * current one. Thus, if the op is being added, we can
2314         * ignore it because it hasn't attached itself to the rec
2315         * yet.
2316         *
2317         * If an ops is being modified (hooking to different functions)
2318         * then we don't care about the new functions that are being
2319         * added, just the old ones (that are probably being removed).
2320         *
2321         * If we are adding an ops to a function that already is using
2322         * a trampoline, it needs to be removed (trampolines are only
2323         * for single ops connected), then an ops that is not being
2324         * modified also needs to be checked.
2325         */
2326        do_for_each_ftrace_op(op, ftrace_ops_list) {
2327
2328                if (!op->trampoline)
2329                        continue;
2330
2331                /*
2332                 * If the ops is being added, it hasn't gotten to
2333                 * the point to be removed from this tree yet.
2334                 */
2335                if (op->flags & FTRACE_OPS_FL_ADDING)
2336                        continue;
2337
2338
2339                /*
2340                 * If the ops is being modified and is in the old
2341                 * hash, then it is probably being removed from this
2342                 * function.
2343                 */
2344                if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2345                    hash_contains_ip(ip, &op->old_hash))
2346                        return op;
2347                /*
2348                 * If the ops is not being added or modified, and it's
2349                 * in its normal filter hash, then this must be the one
2350                 * we want!
2351                 */
2352                if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2353                    hash_contains_ip(ip, op->func_hash))
2354                        return op;
2355
2356        } while_for_each_ftrace_op(op);
2357
2358        return NULL;
2359}
2360
2361static struct ftrace_ops *
2362ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2363{
2364        struct ftrace_ops *op;
2365        unsigned long ip = rec->ip;
2366
2367        do_for_each_ftrace_op(op, ftrace_ops_list) {
2368                /* pass rec in as regs to have non-NULL val */
2369                if (hash_contains_ip(ip, op->func_hash))
2370                        return op;
2371        } while_for_each_ftrace_op(op);
2372
2373        return NULL;
2374}
2375
2376#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2377/* Protected by rcu_tasks for reading, and direct_mutex for writing */
2378static struct ftrace_hash *direct_functions = EMPTY_HASH;
2379static DEFINE_MUTEX(direct_mutex);
2380int ftrace_direct_func_count;
2381
2382/*
2383 * Search the direct_functions hash to see if the given instruction pointer
2384 * has a direct caller attached to it.
2385 */
2386unsigned long ftrace_find_rec_direct(unsigned long ip)
2387{
2388        struct ftrace_func_entry *entry;
2389
2390        entry = __ftrace_lookup_ip(direct_functions, ip);
2391        if (!entry)
2392                return 0;
2393
2394        return entry->direct;
2395}
2396
2397static void call_direct_funcs(unsigned long ip, unsigned long pip,
2398                              struct ftrace_ops *ops, struct ftrace_regs *fregs)
2399{
2400        struct pt_regs *regs = ftrace_get_regs(fregs);
2401        unsigned long addr;
2402
2403        addr = ftrace_find_rec_direct(ip);
2404        if (!addr)
2405                return;
2406
2407        arch_ftrace_set_direct_caller(regs, addr);
2408}
2409
2410struct ftrace_ops direct_ops = {
2411        .func           = call_direct_funcs,
2412        .flags          = FTRACE_OPS_FL_IPMODIFY
2413                          | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
2414                          | FTRACE_OPS_FL_PERMANENT,
2415        /*
2416         * By declaring the main trampoline as this trampoline
2417         * it will never have one allocated for it. Allocated
2418         * trampolines should not call direct functions.
2419         * The direct_ops should only be called by the builtin
2420         * ftrace_regs_caller trampoline.
2421         */
2422        .trampoline     = FTRACE_REGS_ADDR,
2423};
2424#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2425
2426/**
2427 * ftrace_get_addr_new - Get the call address to set to
2428 * @rec:  The ftrace record descriptor
2429 *
2430 * If the record has the FTRACE_FL_REGS set, that means that it
2431 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2432 * is not set, then it wants to convert to the normal callback.
2433 *
2434 * Returns the address of the trampoline to set to
2435 */
2436unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2437{
2438        struct ftrace_ops *ops;
2439        unsigned long addr;
2440
2441        if ((rec->flags & FTRACE_FL_DIRECT) &&
2442            (ftrace_rec_count(rec) == 1)) {
2443                addr = ftrace_find_rec_direct(rec->ip);
2444                if (addr)
2445                        return addr;
2446                WARN_ON_ONCE(1);
2447        }
2448
2449        /* Trampolines take precedence over regs */
2450        if (rec->flags & FTRACE_FL_TRAMP) {
2451                ops = ftrace_find_tramp_ops_new(rec);
2452                if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2453                        pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2454                                (void *)rec->ip, (void *)rec->ip, rec->flags);
2455                        /* Ftrace is shutting down, return anything */
2456                        return (unsigned long)FTRACE_ADDR;
2457                }
2458                return ops->trampoline;
2459        }
2460
2461        if (rec->flags & FTRACE_FL_REGS)
2462                return (unsigned long)FTRACE_REGS_ADDR;
2463        else
2464                return (unsigned long)FTRACE_ADDR;
2465}
2466
2467/**
2468 * ftrace_get_addr_curr - Get the call address that is already there
2469 * @rec:  The ftrace record descriptor
2470 *
2471 * The FTRACE_FL_REGS_EN is set when the record already points to
2472 * a function that saves all the regs. Basically the '_EN' version
2473 * represents the current state of the function.
2474 *
2475 * Returns the address of the trampoline that is currently being called
2476 */
2477unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2478{
2479        struct ftrace_ops *ops;
2480        unsigned long addr;
2481
2482        /* Direct calls take precedence over trampolines */
2483        if (rec->flags & FTRACE_FL_DIRECT_EN) {
2484                addr = ftrace_find_rec_direct(rec->ip);
2485                if (addr)
2486                        return addr;
2487                WARN_ON_ONCE(1);
2488        }
2489
2490        /* Trampolines take precedence over regs */
2491        if (rec->flags & FTRACE_FL_TRAMP_EN) {
2492                ops = ftrace_find_tramp_ops_curr(rec);
2493                if (FTRACE_WARN_ON(!ops)) {
2494                        pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2495                                (void *)rec->ip, (void *)rec->ip);
2496                        /* Ftrace is shutting down, return anything */
2497                        return (unsigned long)FTRACE_ADDR;
2498                }
2499                return ops->trampoline;
2500        }
2501
2502        if (rec->flags & FTRACE_FL_REGS_EN)
2503                return (unsigned long)FTRACE_REGS_ADDR;
2504        else
2505                return (unsigned long)FTRACE_ADDR;
2506}
2507
2508static int
2509__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2510{
2511        unsigned long ftrace_old_addr;
2512        unsigned long ftrace_addr;
2513        int ret;
2514
2515        ftrace_addr = ftrace_get_addr_new(rec);
2516
2517        /* This needs to be done before we call ftrace_update_record */
2518        ftrace_old_addr = ftrace_get_addr_curr(rec);
2519
2520        ret = ftrace_update_record(rec, enable);
2521
2522        ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2523
2524        switch (ret) {
2525        case FTRACE_UPDATE_IGNORE:
2526                return 0;
2527
2528        case FTRACE_UPDATE_MAKE_CALL:
2529                ftrace_bug_type = FTRACE_BUG_CALL;
2530                return ftrace_make_call(rec, ftrace_addr);
2531
2532        case FTRACE_UPDATE_MAKE_NOP:
2533                ftrace_bug_type = FTRACE_BUG_NOP;
2534                return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2535
2536        case FTRACE_UPDATE_MODIFY_CALL:
2537                ftrace_bug_type = FTRACE_BUG_UPDATE;
2538                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2539        }
2540
2541        return -1; /* unknown ftrace bug */
2542}
2543
2544void __weak ftrace_replace_code(int mod_flags)
2545{
2546        struct dyn_ftrace *rec;
2547        struct ftrace_page *pg;
2548        bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2549        int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2550        int failed;
2551
2552        if (unlikely(ftrace_disabled))
2553                return;
2554
2555        do_for_each_ftrace_rec(pg, rec) {
2556
2557                if (rec->flags & FTRACE_FL_DISABLED)
2558                        continue;
2559
2560                failed = __ftrace_replace_code(rec, enable);
2561                if (failed) {
2562                        ftrace_bug(failed, rec);
2563                        /* Stop processing */
2564                        return;
2565                }
2566                if (schedulable)
2567                        cond_resched();
2568        } while_for_each_ftrace_rec();
2569}
2570
2571struct ftrace_rec_iter {
2572        struct ftrace_page      *pg;
2573        int                     index;
2574};
2575
2576/**
2577 * ftrace_rec_iter_start, start up iterating over traced functions
2578 *
2579 * Returns an iterator handle that is used to iterate over all
2580 * the records that represent address locations where functions
2581 * are traced.
2582 *
2583 * May return NULL if no records are available.
2584 */
2585struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2586{
2587        /*
2588         * We only use a single iterator.
2589         * Protected by the ftrace_lock mutex.
2590         */
2591        static struct ftrace_rec_iter ftrace_rec_iter;
2592        struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2593
2594        iter->pg = ftrace_pages_start;
2595        iter->index = 0;
2596
2597        /* Could have empty pages */
2598        while (iter->pg && !iter->pg->index)
2599                iter->pg = iter->pg->next;
2600
2601        if (!iter->pg)
2602                return NULL;
2603
2604        return iter;
2605}
2606
2607/**
2608 * ftrace_rec_iter_next, get the next record to process.
2609 * @iter: The handle to the iterator.
2610 *
2611 * Returns the next iterator after the given iterator @iter.
2612 */
2613struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2614{
2615        iter->index++;
2616
2617        if (iter->index >= iter->pg->index) {
2618                iter->pg = iter->pg->next;
2619                iter->index = 0;
2620
2621                /* Could have empty pages */
2622                while (iter->pg && !iter->pg->index)
2623                        iter->pg = iter->pg->next;
2624        }
2625
2626        if (!iter->pg)
2627                return NULL;
2628
2629        return iter;
2630}
2631
2632/**
2633 * ftrace_rec_iter_record, get the record at the iterator location
2634 * @iter: The current iterator location
2635 *
2636 * Returns the record that the current @iter is at.
2637 */
2638struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2639{
2640        return &iter->pg->records[iter->index];
2641}
2642
2643static int
2644ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2645{
2646        int ret;
2647
2648        if (unlikely(ftrace_disabled))
2649                return 0;
2650
2651        ret = ftrace_init_nop(mod, rec);
2652        if (ret) {
2653                ftrace_bug_type = FTRACE_BUG_INIT;
2654                ftrace_bug(ret, rec);
2655                return 0;
2656        }
2657        return 1;
2658}
2659
2660/*
2661 * archs can override this function if they must do something
2662 * before the modifying code is performed.
2663 */
2664int __weak ftrace_arch_code_modify_prepare(void)
2665{
2666        return 0;
2667}
2668
2669/*
2670 * archs can override this function if they must do something
2671 * after the modifying code is performed.
2672 */
2673int __weak ftrace_arch_code_modify_post_process(void)
2674{
2675        return 0;
2676}
2677
2678void ftrace_modify_all_code(int command)
2679{
2680        int update = command & FTRACE_UPDATE_TRACE_FUNC;
2681        int mod_flags = 0;
2682        int err = 0;
2683
2684        if (command & FTRACE_MAY_SLEEP)
2685                mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2686
2687        /*
2688         * If the ftrace_caller calls a ftrace_ops func directly,
2689         * we need to make sure that it only traces functions it
2690         * expects to trace. When doing the switch of functions,
2691         * we need to update to the ftrace_ops_list_func first
2692         * before the transition between old and new calls are set,
2693         * as the ftrace_ops_list_func will check the ops hashes
2694         * to make sure the ops are having the right functions
2695         * traced.
2696         */
2697        if (update) {
2698                err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2699                if (FTRACE_WARN_ON(err))
2700                        return;
2701        }
2702
2703        if (command & FTRACE_UPDATE_CALLS)
2704                ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2705        else if (command & FTRACE_DISABLE_CALLS)
2706                ftrace_replace_code(mod_flags);
2707
2708        if (update && ftrace_trace_function != ftrace_ops_list_func) {
2709                function_trace_op = set_function_trace_op;
2710                smp_wmb();
2711                /* If irqs are disabled, we are in stop machine */
2712                if (!irqs_disabled())
2713                        smp_call_function(ftrace_sync_ipi, NULL, 1);
2714                err = ftrace_update_ftrace_func(ftrace_trace_function);
2715                if (FTRACE_WARN_ON(err))
2716                        return;
2717        }
2718
2719        if (command & FTRACE_START_FUNC_RET)
2720                err = ftrace_enable_ftrace_graph_caller();
2721        else if (command & FTRACE_STOP_FUNC_RET)
2722                err = ftrace_disable_ftrace_graph_caller();
2723        FTRACE_WARN_ON(err);
2724}
2725
2726static int __ftrace_modify_code(void *data)
2727{
2728        int *command = data;
2729
2730        ftrace_modify_all_code(*command);
2731
2732        return 0;
2733}
2734
2735/**
2736 * ftrace_run_stop_machine, go back to the stop machine method
2737 * @command: The command to tell ftrace what to do
2738 *
2739 * If an arch needs to fall back to the stop machine method, the
2740 * it can call this function.
2741 */
2742void ftrace_run_stop_machine(int command)
2743{
2744        stop_machine(__ftrace_modify_code, &command, NULL);
2745}
2746
2747/**
2748 * arch_ftrace_update_code, modify the code to trace or not trace
2749 * @command: The command that needs to be done
2750 *
2751 * Archs can override this function if it does not need to
2752 * run stop_machine() to modify code.
2753 */
2754void __weak arch_ftrace_update_code(int command)
2755{
2756        ftrace_run_stop_machine(command);
2757}
2758
2759static void ftrace_run_update_code(int command)
2760{
2761        int ret;
2762
2763        ret = ftrace_arch_code_modify_prepare();
2764        FTRACE_WARN_ON(ret);
2765        if (ret)
2766                return;
2767
2768        /*
2769         * By default we use stop_machine() to modify the code.
2770         * But archs can do what ever they want as long as it
2771         * is safe. The stop_machine() is the safest, but also
2772         * produces the most overhead.
2773         */
2774        arch_ftrace_update_code(command);
2775
2776        ret = ftrace_arch_code_modify_post_process();
2777        FTRACE_WARN_ON(ret);
2778}
2779
2780static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2781                                   struct ftrace_ops_hash *old_hash)
2782{
2783        ops->flags |= FTRACE_OPS_FL_MODIFYING;
2784        ops->old_hash.filter_hash = old_hash->filter_hash;
2785        ops->old_hash.notrace_hash = old_hash->notrace_hash;
2786        ftrace_run_update_code(command);
2787        ops->old_hash.filter_hash = NULL;
2788        ops->old_hash.notrace_hash = NULL;
2789        ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2790}
2791
2792static ftrace_func_t saved_ftrace_func;
2793static int ftrace_start_up;
2794
2795void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2796{
2797}
2798
2799/* List of trace_ops that have allocated trampolines */
2800static LIST_HEAD(ftrace_ops_trampoline_list);
2801
2802static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2803{
2804        lockdep_assert_held(&ftrace_lock);
2805        list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
2806}
2807
2808static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
2809{
2810        lockdep_assert_held(&ftrace_lock);
2811        list_del_rcu(&ops->list);
2812        synchronize_rcu();
2813}
2814
2815/*
2816 * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
2817 * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
2818 * not a module.
2819 */
2820#define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
2821#define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
2822
2823static void ftrace_trampoline_free(struct ftrace_ops *ops)
2824{
2825        if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
2826            ops->trampoline) {
2827                /*
2828                 * Record the text poke event before the ksymbol unregister
2829                 * event.
2830                 */
2831                perf_event_text_poke((void *)ops->trampoline,
2832                                     (void *)ops->trampoline,
2833                                     ops->trampoline_size, NULL, 0);
2834                perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
2835                                   ops->trampoline, ops->trampoline_size,
2836                                   true, FTRACE_TRAMPOLINE_SYM);
2837                /* Remove from kallsyms after the perf events */
2838                ftrace_remove_trampoline_from_kallsyms(ops);
2839        }
2840
2841        arch_ftrace_trampoline_free(ops);
2842}
2843
2844static void ftrace_startup_enable(int command)
2845{
2846        if (saved_ftrace_func != ftrace_trace_function) {
2847                saved_ftrace_func = ftrace_trace_function;
2848                command |= FTRACE_UPDATE_TRACE_FUNC;
2849        }
2850
2851        if (!command || !ftrace_enabled)
2852                return;
2853
2854        ftrace_run_update_code(command);
2855}
2856
2857static void ftrace_startup_all(int command)
2858{
2859        update_all_ops = true;
2860        ftrace_startup_enable(command);
2861        update_all_ops = false;
2862}
2863
2864int ftrace_startup(struct ftrace_ops *ops, int command)
2865{
2866        int ret;
2867
2868        if (unlikely(ftrace_disabled))
2869                return -ENODEV;
2870
2871        ret = __register_ftrace_function(ops);
2872        if (ret)
2873                return ret;
2874
2875        ftrace_start_up++;
2876
2877        /*
2878         * Note that ftrace probes uses this to start up
2879         * and modify functions it will probe. But we still
2880         * set the ADDING flag for modification, as probes
2881         * do not have trampolines. If they add them in the
2882         * future, then the probes will need to distinguish
2883         * between adding and updating probes.
2884         */
2885        ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2886
2887        ret = ftrace_hash_ipmodify_enable(ops);
2888        if (ret < 0) {
2889                /* Rollback registration process */
2890                __unregister_ftrace_function(ops);
2891                ftrace_start_up--;
2892                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2893                if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2894                        ftrace_trampoline_free(ops);
2895                return ret;
2896        }
2897
2898        if (ftrace_hash_rec_enable(ops, 1))
2899                command |= FTRACE_UPDATE_CALLS;
2900
2901        ftrace_startup_enable(command);
2902
2903        ops->flags &= ~FTRACE_OPS_FL_ADDING;
2904
2905        return 0;
2906}
2907
2908int ftrace_shutdown(struct ftrace_ops *ops, int command)
2909{
2910        int ret;
2911
2912        if (unlikely(ftrace_disabled))
2913                return -ENODEV;
2914
2915        ret = __unregister_ftrace_function(ops);
2916        if (ret)
2917                return ret;
2918
2919        ftrace_start_up--;
2920        /*
2921         * Just warn in case of unbalance, no need to kill ftrace, it's not
2922         * critical but the ftrace_call callers may be never nopped again after
2923         * further ftrace uses.
2924         */
2925        WARN_ON_ONCE(ftrace_start_up < 0);
2926
2927        /* Disabling ipmodify never fails */
2928        ftrace_hash_ipmodify_disable(ops);
2929
2930        if (ftrace_hash_rec_disable(ops, 1))
2931                command |= FTRACE_UPDATE_CALLS;
2932
2933        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2934
2935        if (saved_ftrace_func != ftrace_trace_function) {
2936                saved_ftrace_func = ftrace_trace_function;
2937                command |= FTRACE_UPDATE_TRACE_FUNC;
2938        }
2939
2940        if (!command || !ftrace_enabled) {
2941                /*
2942                 * If these are dynamic or per_cpu ops, they still
2943                 * need their data freed. Since, function tracing is
2944                 * not currently active, we can just free them
2945                 * without synchronizing all CPUs.
2946                 */
2947                if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2948                        goto free_ops;
2949
2950                return 0;
2951        }
2952
2953        /*
2954         * If the ops uses a trampoline, then it needs to be
2955         * tested first on update.
2956         */
2957        ops->flags |= FTRACE_OPS_FL_REMOVING;
2958        removed_ops = ops;
2959
2960        /* The trampoline logic checks the old hashes */
2961        ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2962        ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2963
2964        ftrace_run_update_code(command);
2965
2966        /*
2967         * If there's no more ops registered with ftrace, run a
2968         * sanity check to make sure all rec flags are cleared.
2969         */
2970        if (rcu_dereference_protected(ftrace_ops_list,
2971                        lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
2972                struct ftrace_page *pg;
2973                struct dyn_ftrace *rec;
2974
2975                do_for_each_ftrace_rec(pg, rec) {
2976                        if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2977                                pr_warn("  %pS flags:%lx\n",
2978                                        (void *)rec->ip, rec->flags);
2979                } while_for_each_ftrace_rec();
2980        }
2981
2982        ops->old_hash.filter_hash = NULL;
2983        ops->old_hash.notrace_hash = NULL;
2984
2985        removed_ops = NULL;
2986        ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2987
2988        /*
2989         * Dynamic ops may be freed, we must make sure that all
2990         * callers are done before leaving this function.
2991         * The same goes for freeing the per_cpu data of the per_cpu
2992         * ops.
2993         */
2994        if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
2995                /*
2996                 * We need to do a hard force of sched synchronization.
2997                 * This is because we use preempt_disable() to do RCU, but
2998                 * the function tracers can be called where RCU is not watching
2999                 * (like before user_exit()). We can not rely on the RCU
3000                 * infrastructure to do the synchronization, thus we must do it
3001                 * ourselves.
3002                 */
3003                synchronize_rcu_tasks_rude();
3004
3005                /*
3006                 * When the kernel is preemptive, tasks can be preempted
3007                 * while on a ftrace trampoline. Just scheduling a task on
3008                 * a CPU is not good enough to flush them. Calling
3009                 * synchronize_rcu_tasks() will wait for those tasks to
3010                 * execute and either schedule voluntarily or enter user space.
3011                 */
3012                if (IS_ENABLED(CONFIG_PREEMPTION))
3013                        synchronize_rcu_tasks();
3014
3015 free_ops:
3016                ftrace_trampoline_free(ops);
3017        }
3018
3019        return 0;
3020}
3021
3022static void ftrace_startup_sysctl(void)
3023{
3024        int command;
3025
3026        if (unlikely(ftrace_disabled))
3027                return;
3028
3029        /* Force update next time */
3030        saved_ftrace_func = NULL;
3031        /* ftrace_start_up is true if we want ftrace running */
3032        if (ftrace_start_up) {
3033                command = FTRACE_UPDATE_CALLS;
3034                if (ftrace_graph_active)
3035                        command |= FTRACE_START_FUNC_RET;
3036                ftrace_startup_enable(command);
3037        }
3038}
3039
3040static void ftrace_shutdown_sysctl(void)
3041{
3042        int command;
3043
3044        if (unlikely(ftrace_disabled))
3045                return;
3046
3047        /* ftrace_start_up is true if ftrace is running */
3048        if (ftrace_start_up) {
3049                command = FTRACE_DISABLE_CALLS;
3050                if (ftrace_graph_active)
3051                        command |= FTRACE_STOP_FUNC_RET;
3052                ftrace_run_update_code(command);
3053        }
3054}
3055
3056static u64              ftrace_update_time;
3057unsigned long           ftrace_update_tot_cnt;
3058unsigned long           ftrace_number_of_pages;
3059unsigned long           ftrace_number_of_groups;
3060
3061static inline int ops_traces_mod(struct ftrace_ops *ops)
3062{
3063        /*
3064         * Filter_hash being empty will default to trace module.
3065         * But notrace hash requires a test of individual module functions.
3066         */
3067        return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3068                ftrace_hash_empty(ops->func_hash->notrace_hash);
3069}
3070
3071/*
3072 * Check if the current ops references the record.
3073 *
3074 * If the ops traces all functions, then it was already accounted for.
3075 * If the ops does not trace the current record function, skip it.
3076 * If the ops ignores the function via notrace filter, skip it.
3077 */
3078static inline bool
3079ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3080{
3081        /* If ops isn't enabled, ignore it */
3082        if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
3083                return false;
3084
3085        /* If ops traces all then it includes this function */
3086        if (ops_traces_mod(ops))
3087                return true;
3088
3089        /* The function must be in the filter */
3090        if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
3091            !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
3092                return false;
3093
3094        /* If in notrace hash, we ignore it too */
3095        if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
3096                return false;
3097
3098        return true;
3099}
3100
3101static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3102{
3103        struct ftrace_page *pg;
3104        struct dyn_ftrace *p;
3105        u64 start, stop;
3106        unsigned long update_cnt = 0;
3107        unsigned long rec_flags = 0;
3108        int i;
3109
3110        start = ftrace_now(raw_smp_processor_id());
3111
3112        /*
3113         * When a module is loaded, this function is called to convert
3114         * the calls to mcount in its text to nops, and also to create
3115         * an entry in the ftrace data. Now, if ftrace is activated
3116         * after this call, but before the module sets its text to
3117         * read-only, the modification of enabling ftrace can fail if
3118         * the read-only is done while ftrace is converting the calls.
3119         * To prevent this, the module's records are set as disabled
3120         * and will be enabled after the call to set the module's text
3121         * to read-only.
3122         */
3123        if (mod)
3124                rec_flags |= FTRACE_FL_DISABLED;
3125
3126        for (pg = new_pgs; pg; pg = pg->next) {
3127
3128                for (i = 0; i < pg->index; i++) {
3129
3130                        /* If something went wrong, bail without enabling anything */
3131                        if (unlikely(ftrace_disabled))
3132                                return -1;
3133
3134                        p = &pg->records[i];
3135                        p->flags = rec_flags;
3136
3137                        /*
3138                         * Do the initial record conversion from mcount jump
3139                         * to the NOP instructions.
3140                         */
3141                        if (!__is_defined(CC_USING_NOP_MCOUNT) &&
3142                            !ftrace_nop_initialize(mod, p))
3143                                break;
3144
3145                        update_cnt++;
3146                }
3147        }
3148
3149        stop = ftrace_now(raw_smp_processor_id());
3150        ftrace_update_time = stop - start;
3151        ftrace_update_tot_cnt += update_cnt;
3152
3153        return 0;
3154}
3155
3156static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3157{
3158        int order;
3159        int pages;
3160        int cnt;
3161
3162        if (WARN_ON(!count))
3163                return -EINVAL;
3164
3165        /* We want to fill as much as possible, with no empty pages */
3166        pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3167        order = fls(pages) - 1;
3168
3169 again:
3170        pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3171
3172        if (!pg->records) {
3173                /* if we can't allocate this size, try something smaller */
3174                if (!order)
3175                        return -ENOMEM;
3176                order >>= 1;
3177                goto again;
3178        }
3179
3180        ftrace_number_of_pages += 1 << order;
3181        ftrace_number_of_groups++;
3182
3183        cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3184        pg->order = order;
3185
3186        if (cnt > count)
3187                cnt = count;
3188
3189        return cnt;
3190}
3191
3192static struct ftrace_page *
3193ftrace_allocate_pages(unsigned long num_to_init)
3194{
3195        struct ftrace_page *start_pg;
3196        struct ftrace_page *pg;
3197        int cnt;
3198
3199        if (!num_to_init)
3200                return NULL;
3201
3202        start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3203        if (!pg)
3204                return NULL;
3205
3206        /*
3207         * Try to allocate as much as possible in one continues
3208         * location that fills in all of the space. We want to
3209         * waste as little space as possible.
3210         */
3211        for (;;) {
3212                cnt = ftrace_allocate_records(pg, num_to_init);
3213                if (cnt < 0)
3214                        goto free_pages;
3215
3216                num_to_init -= cnt;
3217                if (!num_to_init)
3218                        break;
3219
3220                pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3221                if (!pg->next)
3222                        goto free_pages;
3223
3224                pg = pg->next;
3225        }
3226
3227        return start_pg;
3228
3229 free_pages:
3230        pg = start_pg;
3231        while (pg) {
3232                if (pg->records) {
3233                        free_pages((unsigned long)pg->records, pg->order);
3234                        ftrace_number_of_pages -= 1 << pg->order;
3235                }
3236                start_pg = pg->next;
3237                kfree(pg);
3238                pg = start_pg;
3239                ftrace_number_of_groups--;
3240        }
3241        pr_info("ftrace: FAILED to allocate memory for functions\n");
3242        return NULL;
3243}
3244
3245#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3246
3247struct ftrace_iterator {
3248        loff_t                          pos;
3249        loff_t                          func_pos;
3250        loff_t                          mod_pos;
3251        struct ftrace_page              *pg;
3252        struct dyn_ftrace               *func;
3253        struct ftrace_func_probe        *probe;
3254        struct ftrace_func_entry        *probe_entry;
3255        struct trace_parser             parser;
3256        struct ftrace_hash              *hash;
3257        struct ftrace_ops               *ops;
3258        struct trace_array              *tr;
3259        struct list_head                *mod_list;
3260        int                             pidx;
3261        int                             idx;
3262        unsigned                        flags;
3263};
3264
3265static void *
3266t_probe_next(struct seq_file *m, loff_t *pos)
3267{
3268        struct ftrace_iterator *iter = m->private;
3269        struct trace_array *tr = iter->ops->private;
3270        struct list_head *func_probes;
3271        struct ftrace_hash *hash;
3272        struct list_head *next;
3273        struct hlist_node *hnd = NULL;
3274        struct hlist_head *hhd;
3275        int size;
3276
3277        (*pos)++;
3278        iter->pos = *pos;
3279
3280        if (!tr)
3281                return NULL;
3282
3283        func_probes = &tr->func_probes;
3284        if (list_empty(func_probes))
3285                return NULL;
3286
3287        if (!iter->probe) {
3288                next = func_probes->next;
3289                iter->probe = list_entry(next, struct ftrace_func_probe, list);
3290        }
3291
3292        if (iter->probe_entry)
3293                hnd = &iter->probe_entry->hlist;
3294
3295        hash = iter->probe->ops.func_hash->filter_hash;
3296
3297        /*
3298         * A probe being registered may temporarily have an empty hash
3299         * and it's at the end of the func_probes list.
3300         */
3301        if (!hash || hash == EMPTY_HASH)
3302                return NULL;
3303
3304        size = 1 << hash->size_bits;
3305
3306 retry:
3307        if (iter->pidx >= size) {
3308                if (iter->probe->list.next == func_probes)
3309                        return NULL;
3310                next = iter->probe->list.next;
3311                iter->probe = list_entry(next, struct ftrace_func_probe, list);
3312                hash = iter->probe->ops.func_hash->filter_hash;
3313                size = 1 << hash->size_bits;
3314                iter->pidx = 0;
3315        }
3316
3317        hhd = &hash->buckets[iter->pidx];
3318
3319        if (hlist_empty(hhd)) {
3320                iter->pidx++;
3321                hnd = NULL;
3322                goto retry;
3323        }
3324
3325        if (!hnd)
3326                hnd = hhd->first;
3327        else {
3328                hnd = hnd->next;
3329                if (!hnd) {
3330                        iter->pidx++;
3331                        goto retry;
3332                }
3333        }
3334
3335        if (WARN_ON_ONCE(!hnd))
3336                return NULL;
3337
3338        iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3339
3340        return iter;
3341}
3342
3343static void *t_probe_start(struct seq_file *m, loff_t *pos)
3344{
3345        struct ftrace_iterator *iter = m->private;
3346        void *p = NULL;
3347        loff_t l;
3348
3349        if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3350                return NULL;
3351
3352        if (iter->mod_pos > *pos)
3353                return NULL;
3354
3355        iter->probe = NULL;
3356        iter->probe_entry = NULL;
3357        iter->pidx = 0;
3358        for (l = 0; l <= (*pos - iter->mod_pos); ) {
3359                p = t_probe_next(m, &l);
3360                if (!p)
3361                        break;
3362        }
3363        if (!p)
3364                return NULL;
3365
3366        /* Only set this if we have an item */
3367        iter->flags |= FTRACE_ITER_PROBE;
3368
3369        return iter;
3370}
3371
3372static int
3373t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3374{
3375        struct ftrace_func_entry *probe_entry;
3376        struct ftrace_probe_ops *probe_ops;
3377        struct ftrace_func_probe *probe;
3378
3379        probe = iter->probe;
3380        probe_entry = iter->probe_entry;
3381
3382        if (WARN_ON_ONCE(!probe || !probe_entry))
3383                return -EIO;
3384
3385        probe_ops = probe->probe_ops;
3386
3387        if (probe_ops->print)
3388                return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3389
3390        seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3391                   (void *)probe_ops->func);
3392
3393        return 0;
3394}
3395
3396static void *
3397t_mod_next(struct seq_file *m, loff_t *pos)
3398{
3399        struct ftrace_iterator *iter = m->private;
3400        struct trace_array *tr = iter->tr;
3401
3402        (*pos)++;
3403        iter->pos = *pos;
3404
3405        iter->mod_list = iter->mod_list->next;
3406
3407        if (iter->mod_list == &tr->mod_trace ||
3408            iter->mod_list == &tr->mod_notrace) {
3409                iter->flags &= ~FTRACE_ITER_MOD;
3410                return NULL;
3411        }
3412
3413        iter->mod_pos = *pos;
3414
3415        return iter;
3416}
3417
3418static void *t_mod_start(struct seq_file *m, loff_t *pos)
3419{
3420        struct ftrace_iterator *iter = m->private;
3421        void *p = NULL;
3422        loff_t l;
3423
3424        if (iter->func_pos > *pos)
3425                return NULL;
3426
3427        iter->mod_pos = iter->func_pos;
3428
3429        /* probes are only available if tr is set */
3430        if (!iter->tr)
3431                return NULL;
3432
3433        for (l = 0; l <= (*pos - iter->func_pos); ) {
3434                p = t_mod_next(m, &l);
3435                if (!p)
3436                        break;
3437        }
3438        if (!p) {
3439                iter->flags &= ~FTRACE_ITER_MOD;
3440                return t_probe_start(m, pos);
3441        }
3442
3443        /* Only set this if we have an item */
3444        iter->flags |= FTRACE_ITER_MOD;
3445
3446        return iter;
3447}
3448
3449static int
3450t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3451{
3452        struct ftrace_mod_load *ftrace_mod;
3453        struct trace_array *tr = iter->tr;
3454
3455        if (WARN_ON_ONCE(!iter->mod_list) ||
3456                         iter->mod_list == &tr->mod_trace ||
3457                         iter->mod_list == &tr->mod_notrace)
3458                return -EIO;
3459
3460        ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3461
3462        if (ftrace_mod->func)
3463                seq_printf(m, "%s", ftrace_mod->func);
3464        else
3465                seq_putc(m, '*');
3466
3467        seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3468
3469        return 0;
3470}
3471
3472static void *
3473t_func_next(struct seq_file *m, loff_t *pos)
3474{
3475        struct ftrace_iterator *iter = m->private;
3476        struct dyn_ftrace *rec = NULL;
3477
3478        (*pos)++;
3479
3480 retry:
3481        if (iter->idx >= iter->pg->index) {
3482                if (iter->pg->next) {
3483                        iter->pg = iter->pg->next;
3484                        iter->idx = 0;
3485                        goto retry;
3486                }
3487        } else {
3488                rec = &iter->pg->records[iter->idx++];
3489                if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3490                     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3491
3492                    ((iter->flags & FTRACE_ITER_ENABLED) &&
3493                     !(rec->flags & FTRACE_FL_ENABLED))) {
3494
3495                        rec = NULL;
3496                        goto retry;
3497                }
3498        }
3499
3500        if (!rec)
3501                return NULL;
3502
3503        iter->pos = iter->func_pos = *pos;
3504        iter->func = rec;
3505
3506        return iter;
3507}
3508
3509static void *
3510t_next(struct seq_file *m, void *v, loff_t *pos)
3511{
3512        struct ftrace_iterator *iter = m->private;
3513        loff_t l = *pos; /* t_probe_start() must use original pos */
3514        void *ret;
3515
3516        if (unlikely(ftrace_disabled))
3517                return NULL;
3518
3519        if (iter->flags & FTRACE_ITER_PROBE)
3520                return t_probe_next(m, pos);
3521
3522        if (iter->flags & FTRACE_ITER_MOD)
3523                return t_mod_next(m, pos);
3524
3525        if (iter->flags & FTRACE_ITER_PRINTALL) {
3526                /* next must increment pos, and t_probe_start does not */
3527                (*pos)++;
3528                return t_mod_start(m, &l);
3529        }
3530
3531        ret = t_func_next(m, pos);
3532
3533        if (!ret)
3534                return t_mod_start(m, &l);
3535
3536        return ret;
3537}
3538
3539static void reset_iter_read(struct ftrace_iterator *iter)
3540{
3541        iter->pos = 0;
3542        iter->func_pos = 0;
3543        iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3544}
3545
3546static void *t_start(struct seq_file *m, loff_t *pos)
3547{
3548        struct ftrace_iterator *iter = m->private;
3549        void *p = NULL;
3550        loff_t l;
3551
3552        mutex_lock(&ftrace_lock);
3553
3554        if (unlikely(ftrace_disabled))
3555                return NULL;
3556
3557        /*
3558         * If an lseek was done, then reset and start from beginning.
3559         */
3560        if (*pos < iter->pos)
3561                reset_iter_read(iter);
3562
3563        /*
3564         * For set_ftrace_filter reading, if we have the filter
3565         * off, we can short cut and just print out that all
3566         * functions are enabled.
3567         */
3568        if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3569            ftrace_hash_empty(iter->hash)) {
3570                iter->func_pos = 1; /* Account for the message */
3571                if (*pos > 0)
3572                        return t_mod_start(m, pos);
3573                iter->flags |= FTRACE_ITER_PRINTALL;
3574                /* reset in case of seek/pread */
3575                iter->flags &= ~FTRACE_ITER_PROBE;
3576                return iter;
3577        }
3578
3579        if (iter->flags & FTRACE_ITER_MOD)
3580                return t_mod_start(m, pos);
3581
3582        /*
3583         * Unfortunately, we need to restart at ftrace_pages_start
3584         * every time we let go of the ftrace_mutex. This is because
3585         * those pointers can change without the lock.
3586         */
3587        iter->pg = ftrace_pages_start;
3588        iter->idx = 0;
3589        for (l = 0; l <= *pos; ) {
3590                p = t_func_next(m, &l);
3591                if (!p)
3592                        break;
3593        }
3594
3595        if (!p)
3596                return t_mod_start(m, pos);
3597
3598        return iter;
3599}
3600
3601static void t_stop(struct seq_file *m, void *p)
3602{
3603        mutex_unlock(&ftrace_lock);
3604}
3605
3606void * __weak
3607arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3608{
3609        return NULL;
3610}
3611
3612static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3613                                struct dyn_ftrace *rec)
3614{
3615        void *ptr;
3616
3617        ptr = arch_ftrace_trampoline_func(ops, rec);
3618        if (ptr)
3619                seq_printf(m, " ->%pS", ptr);
3620}
3621
3622static int t_show(struct seq_file *m, void *v)
3623{
3624        struct ftrace_iterator *iter = m->private;
3625        struct dyn_ftrace *rec;
3626
3627        if (iter->flags & FTRACE_ITER_PROBE)
3628                return t_probe_show(m, iter);
3629
3630        if (iter->flags & FTRACE_ITER_MOD)
3631                return t_mod_show(m, iter);
3632
3633        if (iter->flags & FTRACE_ITER_PRINTALL) {
3634                if (iter->flags & FTRACE_ITER_NOTRACE)
3635                        seq_puts(m, "#### no functions disabled ####\n");
3636                else
3637                        seq_puts(m, "#### all functions enabled ####\n");
3638                return 0;
3639        }
3640
3641        rec = iter->func;
3642
3643        if (!rec)
3644                return 0;
3645
3646        seq_printf(m, "%ps", (void *)rec->ip);
3647        if (iter->flags & FTRACE_ITER_ENABLED) {
3648                struct ftrace_ops *ops;
3649
3650                seq_printf(m, " (%ld)%s%s%s",
3651                           ftrace_rec_count(rec),
3652                           rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3653                           rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
3654                           rec->flags & FTRACE_FL_DIRECT ? " D" : "  ");
3655                if (rec->flags & FTRACE_FL_TRAMP_EN) {
3656                        ops = ftrace_find_tramp_ops_any(rec);
3657                        if (ops) {
3658                                do {
3659                                        seq_printf(m, "\ttramp: %pS (%pS)",
3660                                                   (void *)ops->trampoline,
3661                                                   (void *)ops->func);
3662                                        add_trampoline_func(m, ops, rec);
3663                                        ops = ftrace_find_tramp_ops_next(rec, ops);
3664                                } while (ops);
3665                        } else
3666                                seq_puts(m, "\ttramp: ERROR!");
3667                } else {
3668                        add_trampoline_func(m, NULL, rec);
3669                }
3670                if (rec->flags & FTRACE_FL_DIRECT) {
3671                        unsigned long direct;
3672
3673                        direct = ftrace_find_rec_direct(rec->ip);
3674                        if (direct)
3675                                seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3676                }
3677        }
3678
3679        seq_putc(m, '\n');
3680
3681        return 0;
3682}
3683
3684static const struct seq_operations show_ftrace_seq_ops = {
3685        .start = t_start,
3686        .next = t_next,
3687        .stop = t_stop,
3688        .show = t_show,
3689};
3690
3691static int
3692ftrace_avail_open(struct inode *inode, struct file *file)
3693{
3694        struct ftrace_iterator *iter;
3695        int ret;
3696
3697        ret = security_locked_down(LOCKDOWN_TRACEFS);
3698        if (ret)
3699                return ret;
3700
3701        if (unlikely(ftrace_disabled))
3702                return -ENODEV;
3703
3704        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3705        if (!iter)
3706                return -ENOMEM;
3707
3708        iter->pg = ftrace_pages_start;
3709        iter->ops = &global_ops;
3710
3711        return 0;
3712}
3713
3714static int
3715ftrace_enabled_open(struct inode *inode, struct file *file)
3716{
3717        struct ftrace_iterator *iter;
3718
3719        /*
3720         * This shows us what functions are currently being
3721         * traced and by what. Not sure if we want lockdown
3722         * to hide such critical information for an admin.
3723         * Although, perhaps it can show information we don't
3724         * want people to see, but if something is tracing
3725         * something, we probably want to know about it.
3726         */
3727
3728        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3729        if (!iter)
3730                return -ENOMEM;
3731
3732        iter->pg = ftrace_pages_start;
3733        iter->flags = FTRACE_ITER_ENABLED;
3734        iter->ops = &global_ops;
3735
3736        return 0;
3737}
3738
3739/**
3740 * ftrace_regex_open - initialize function tracer filter files
3741 * @ops: The ftrace_ops that hold the hash filters
3742 * @flag: The type of filter to process
3743 * @inode: The inode, usually passed in to your open routine
3744 * @file: The file, usually passed in to your open routine
3745 *
3746 * ftrace_regex_open() initializes the filter files for the
3747 * @ops. Depending on @flag it may process the filter hash or
3748 * the notrace hash of @ops. With this called from the open
3749 * routine, you can use ftrace_filter_write() for the write
3750 * routine if @flag has FTRACE_ITER_FILTER set, or
3751 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3752 * tracing_lseek() should be used as the lseek routine, and
3753 * release must call ftrace_regex_release().
3754 */
3755int
3756ftrace_regex_open(struct ftrace_ops *ops, int flag,
3757                  struct inode *inode, struct file *file)
3758{
3759        struct ftrace_iterator *iter;
3760        struct ftrace_hash *hash;
3761        struct list_head *mod_head;
3762        struct trace_array *tr = ops->private;
3763        int ret = -ENOMEM;
3764
3765        ftrace_ops_init(ops);
3766
3767        if (unlikely(ftrace_disabled))
3768                return -ENODEV;
3769
3770        if (tracing_check_open_get_tr(tr))
3771                return -ENODEV;
3772
3773        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3774        if (!iter)
3775                goto out;
3776
3777        if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
3778                goto out;
3779
3780        iter->ops = ops;
3781        iter->flags = flag;
3782        iter->tr = tr;
3783
3784        mutex_lock(&ops->func_hash->regex_lock);
3785
3786        if (flag & FTRACE_ITER_NOTRACE) {
3787                hash = ops->func_hash->notrace_hash;
3788                mod_head = tr ? &tr->mod_notrace : NULL;
3789        } else {
3790                hash = ops->func_hash->filter_hash;
3791                mod_head = tr ? &tr->mod_trace : NULL;
3792        }
3793
3794        iter->mod_list = mod_head;
3795
3796        if (file->f_mode & FMODE_WRITE) {
3797                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3798
3799                if (file->f_flags & O_TRUNC) {
3800                        iter->hash = alloc_ftrace_hash(size_bits);
3801                        clear_ftrace_mod_list(mod_head);
3802                } else {
3803                        iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3804                }
3805
3806                if (!iter->hash) {
3807                        trace_parser_put(&iter->parser);
3808                        goto out_unlock;
3809                }
3810        } else
3811                iter->hash = hash;
3812
3813        ret = 0;
3814
3815        if (file->f_mode & FMODE_READ) {
3816                iter->pg = ftrace_pages_start;
3817
3818                ret = seq_open(file, &show_ftrace_seq_ops);
3819                if (!ret) {
3820                        struct seq_file *m = file->private_data;
3821                        m->private = iter;
3822                } else {
3823                        /* Failed */
3824                        free_ftrace_hash(iter->hash);
3825                        trace_parser_put(&iter->parser);
3826                }
3827        } else
3828                file->private_data = iter;
3829
3830 out_unlock:
3831        mutex_unlock(&ops->func_hash->regex_lock);
3832
3833 out:
3834        if (ret) {
3835                kfree(iter);
3836                if (tr)
3837                        trace_array_put(tr);
3838        }
3839
3840        return ret;
3841}
3842
3843static int
3844ftrace_filter_open(struct inode *inode, struct file *file)
3845{
3846        struct ftrace_ops *ops = inode->i_private;
3847
3848        /* Checks for tracefs lockdown */
3849        return ftrace_regex_open(ops,
3850                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3851                        inode, file);
3852}
3853
3854static int
3855ftrace_notrace_open(struct inode *inode, struct file *file)
3856{
3857        struct ftrace_ops *ops = inode->i_private;
3858
3859        /* Checks for tracefs lockdown */
3860        return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3861                                 inode, file);
3862}
3863
3864/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3865struct ftrace_glob {
3866        char *search;
3867        unsigned len;
3868        int type;
3869};
3870
3871/*
3872 * If symbols in an architecture don't correspond exactly to the user-visible
3873 * name of what they represent, it is possible to define this function to
3874 * perform the necessary adjustments.
3875*/
3876char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3877{
3878        return str;
3879}
3880
3881static int ftrace_match(char *str, struct ftrace_glob *g)
3882{
3883        int matched = 0;
3884        int slen;
3885
3886        str = arch_ftrace_match_adjust(str, g->search);
3887
3888        switch (g->type) {
3889        case MATCH_FULL:
3890                if (strcmp(str, g->search) == 0)
3891                        matched = 1;
3892                break;
3893        case MATCH_FRONT_ONLY:
3894                if (strncmp(str, g->search, g->len) == 0)
3895                        matched = 1;
3896                break;
3897        case MATCH_MIDDLE_ONLY:
3898                if (strstr(str, g->search))
3899                        matched = 1;
3900                break;
3901        case MATCH_END_ONLY:
3902                slen = strlen(str);
3903                if (slen >= g->len &&
3904                    memcmp(str + slen - g->len, g->search, g->len) == 0)
3905                        matched = 1;
3906                break;
3907        case MATCH_GLOB:
3908                if (glob_match(g->search, str))
3909                        matched = 1;
3910                break;
3911        }
3912
3913        return matched;
3914}
3915
3916static int
3917enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3918{
3919        struct ftrace_func_entry *entry;
3920        int ret = 0;
3921
3922        entry = ftrace_lookup_ip(hash, rec->ip);
3923        if (clear_filter) {
3924                /* Do nothing if it doesn't exist */
3925                if (!entry)
3926                        return 0;
3927
3928                free_hash_entry(hash, entry);
3929        } else {
3930                /* Do nothing if it exists */
3931                if (entry)
3932                        return 0;
3933
3934                ret = add_hash_entry(hash, rec->ip);
3935        }
3936        return ret;
3937}
3938
3939static int
3940add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
3941                 int clear_filter)
3942{
3943        long index = simple_strtoul(func_g->search, NULL, 0);
3944        struct ftrace_page *pg;
3945        struct dyn_ftrace *rec;
3946
3947        /* The index starts at 1 */
3948        if (--index < 0)
3949                return 0;
3950
3951        do_for_each_ftrace_rec(pg, rec) {
3952                if (pg->index <= index) {
3953                        index -= pg->index;
3954                        /* this is a double loop, break goes to the next page */
3955                        break;
3956                }
3957                rec = &pg->records[index];
3958                enter_record(hash, rec, clear_filter);
3959                return 1;
3960        } while_for_each_ftrace_rec();
3961        return 0;
3962}
3963
3964static int
3965ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3966                struct ftrace_glob *mod_g, int exclude_mod)
3967{
3968        char str[KSYM_SYMBOL_LEN];
3969        char *modname;
3970
3971        kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3972
3973        if (mod_g) {
3974                int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3975
3976                /* blank module name to match all modules */
3977                if (!mod_g->len) {
3978                        /* blank module globbing: modname xor exclude_mod */
3979                        if (!exclude_mod != !modname)
3980                                goto func_match;
3981                        return 0;
3982                }
3983
3984                /*
3985                 * exclude_mod is set to trace everything but the given
3986                 * module. If it is set and the module matches, then
3987                 * return 0. If it is not set, and the module doesn't match
3988                 * also return 0. Otherwise, check the function to see if
3989                 * that matches.
3990                 */
3991                if (!mod_matches == !exclude_mod)
3992                        return 0;
3993func_match:
3994                /* blank search means to match all funcs in the mod */
3995                if (!func_g->len)
3996                        return 1;
3997        }
3998
3999        return ftrace_match(str, func_g);
4000}
4001
4002static int
4003match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4004{
4005        struct ftrace_page *pg;
4006        struct dyn_ftrace *rec;
4007        struct ftrace_glob func_g = { .type = MATCH_FULL };
4008        struct ftrace_glob mod_g = { .type = MATCH_FULL };
4009        struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4010        int exclude_mod = 0;
4011        int found = 0;
4012        int ret;
4013        int clear_filter = 0;
4014
4015        if (func) {
4016                func_g.type = filter_parse_regex(func, len, &func_g.search,
4017                                                 &clear_filter);
4018                func_g.len = strlen(func_g.search);
4019        }
4020
4021        if (mod) {
4022                mod_g.type = filter_parse_regex(mod, strlen(mod),
4023                                &mod_g.search, &exclude_mod);
4024                mod_g.len = strlen(mod_g.search);
4025        }
4026
4027        mutex_lock(&ftrace_lock);
4028
4029        if (unlikely(ftrace_disabled))
4030                goto out_unlock;
4031
4032        if (func_g.type == MATCH_INDEX) {
4033                found = add_rec_by_index(hash, &func_g, clear_filter);
4034                goto out_unlock;
4035        }
4036
4037        do_for_each_ftrace_rec(pg, rec) {
4038
4039                if (rec->flags & FTRACE_FL_DISABLED)
4040                        continue;
4041
4042                if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4043                        ret = enter_record(hash, rec, clear_filter);
4044                        if (ret < 0) {
4045                                found = ret;
4046                                goto out_unlock;
4047                        }
4048                        found = 1;
4049                }
4050        } while_for_each_ftrace_rec();
4051 out_unlock:
4052        mutex_unlock(&ftrace_lock);
4053
4054        return found;
4055}
4056
4057static int
4058ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4059{
4060        return match_records(hash, buff, len, NULL);
4061}
4062
4063static void ftrace_ops_update_code(struct ftrace_ops *ops,
4064                                   struct ftrace_ops_hash *old_hash)
4065{
4066        struct ftrace_ops *op;
4067
4068        if (!ftrace_enabled)
4069                return;
4070
4071        if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4072                ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4073                return;
4074        }
4075
4076        /*
4077         * If this is the shared global_ops filter, then we need to
4078         * check if there is another ops that shares it, is enabled.
4079         * If so, we still need to run the modify code.
4080         */
4081        if (ops->func_hash != &global_ops.local_hash)
4082                return;
4083
4084        do_for_each_ftrace_op(op, ftrace_ops_list) {
4085                if (op->func_hash == &global_ops.local_hash &&
4086                    op->flags & FTRACE_OPS_FL_ENABLED) {
4087                        ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4088                        /* Only need to do this once */
4089                        return;
4090                }
4091        } while_for_each_ftrace_op(op);
4092}
4093
4094static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4095                                           struct ftrace_hash **orig_hash,
4096                                           struct ftrace_hash *hash,
4097                                           int enable)
4098{
4099        struct ftrace_ops_hash old_hash_ops;
4100        struct ftrace_hash *old_hash;
4101        int ret;
4102
4103        old_hash = *orig_hash;
4104        old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4105        old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4106        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4107        if (!ret) {
4108                ftrace_ops_update_code(ops, &old_hash_ops);
4109                free_ftrace_hash_rcu(old_hash);
4110        }
4111        return ret;
4112}
4113
4114static bool module_exists(const char *module)
4115{
4116        /* All modules have the symbol __this_module */
4117        static const char this_mod[] = "__this_module";
4118        char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4119        unsigned long val;
4120        int n;
4121
4122        n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4123
4124        if (n > sizeof(modname) - 1)
4125                return false;
4126
4127        val = module_kallsyms_lookup_name(modname);
4128        return val != 0;
4129}
4130
4131static int cache_mod(struct trace_array *tr,
4132                     const char *func, char *module, int enable)
4133{
4134        struct ftrace_mod_load *ftrace_mod, *n;
4135        struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4136        int ret;
4137
4138        mutex_lock(&ftrace_lock);
4139
4140        /* We do not cache inverse filters */
4141        if (func[0] == '!') {
4142                func++;
4143                ret = -EINVAL;
4144
4145                /* Look to remove this hash */
4146                list_for_each_entry_safe(ftrace_mod, n, head, list) {
4147                        if (strcmp(ftrace_mod->module, module) != 0)
4148                                continue;
4149
4150                        /* no func matches all */
4151                        if (strcmp(func, "*") == 0 ||
4152                            (ftrace_mod->func &&
4153                             strcmp(ftrace_mod->func, func) == 0)) {
4154                                ret = 0;
4155                                free_ftrace_mod(ftrace_mod);
4156                                continue;
4157                        }
4158                }
4159                goto out;
4160        }
4161
4162        ret = -EINVAL;
4163        /* We only care about modules that have not been loaded yet */
4164        if (module_exists(module))
4165                goto out;
4166
4167        /* Save this string off, and execute it when the module is loaded */
4168        ret = ftrace_add_mod(tr, func, module, enable);
4169 out:
4170        mutex_unlock(&ftrace_lock);
4171
4172        return ret;
4173}
4174
4175static int
4176ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4177                 int reset, int enable);
4178
4179#ifdef CONFIG_MODULES
4180static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4181                             char *mod, bool enable)
4182{
4183        struct ftrace_mod_load *ftrace_mod, *n;
4184        struct ftrace_hash **orig_hash, *new_hash;
4185        LIST_HEAD(process_mods);
4186        char *func;
4187
4188        mutex_lock(&ops->func_hash->regex_lock);
4189
4190        if (enable)
4191                orig_hash = &ops->func_hash->filter_hash;
4192        else
4193                orig_hash = &ops->func_hash->notrace_hash;
4194
4195        new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4196                                              *orig_hash);
4197        if (!new_hash)
4198                goto out; /* warn? */
4199
4200        mutex_lock(&ftrace_lock);
4201
4202        list_for_each_entry_safe(ftrace_mod, n, head, list) {
4203
4204                if (strcmp(ftrace_mod->module, mod) != 0)
4205                        continue;
4206
4207                if (ftrace_mod->func)
4208                        func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4209                else
4210                        func = kstrdup("*", GFP_KERNEL);
4211
4212                if (!func) /* warn? */
4213                        continue;
4214
4215                list_move(&ftrace_mod->list, &process_mods);
4216
4217                /* Use the newly allocated func, as it may be "*" */
4218                kfree(ftrace_mod->func);
4219                ftrace_mod->func = func;
4220        }
4221
4222        mutex_unlock(&ftrace_lock);
4223
4224        list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4225
4226                func = ftrace_mod->func;
4227
4228                /* Grabs ftrace_lock, which is why we have this extra step */
4229                match_records(new_hash, func, strlen(func), mod);
4230                free_ftrace_mod(ftrace_mod);
4231        }
4232
4233        if (enable && list_empty(head))
4234                new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4235
4236        mutex_lock(&ftrace_lock);
4237
4238        ftrace_hash_move_and_update_ops(ops, orig_hash,
4239                                              new_hash, enable);
4240        mutex_unlock(&ftrace_lock);
4241
4242 out:
4243        mutex_unlock(&ops->func_hash->regex_lock);
4244
4245        free_ftrace_hash(new_hash);
4246}
4247
4248static void process_cached_mods(const char *mod_name)
4249{
4250        struct trace_array *tr;
4251        char *mod;
4252
4253        mod = kstrdup(mod_name, GFP_KERNEL);
4254        if (!mod)
4255                return;
4256
4257        mutex_lock(&trace_types_lock);
4258        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4259                if (!list_empty(&tr->mod_trace))
4260                        process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4261                if (!list_empty(&tr->mod_notrace))
4262                        process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4263        }
4264        mutex_unlock(&trace_types_lock);
4265
4266        kfree(mod);
4267}
4268#endif
4269
4270/*
4271 * We register the module command as a template to show others how
4272 * to register the a command as well.
4273 */
4274
4275static int
4276ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4277                    char *func_orig, char *cmd, char *module, int enable)
4278{
4279        char *func;
4280        int ret;
4281
4282        /* match_records() modifies func, and we need the original */
4283        func = kstrdup(func_orig, GFP_KERNEL);
4284        if (!func)
4285                return -ENOMEM;
4286
4287        /*
4288         * cmd == 'mod' because we only registered this func
4289         * for the 'mod' ftrace_func_command.
4290         * But if you register one func with multiple commands,
4291         * you can tell which command was used by the cmd
4292         * parameter.
4293         */
4294        ret = match_records(hash, func, strlen(func), module);
4295        kfree(func);
4296
4297        if (!ret)
4298                return cache_mod(tr, func_orig, module, enable);
4299        if (ret < 0)
4300                return ret;
4301        return 0;
4302}
4303
4304static struct ftrace_func_command ftrace_mod_cmd = {
4305        .name                   = "mod",
4306        .func                   = ftrace_mod_callback,
4307};
4308
4309static int __init ftrace_mod_cmd_init(void)
4310{
4311        return register_ftrace_command(&ftrace_mod_cmd);
4312}
4313core_initcall(ftrace_mod_cmd_init);
4314
4315static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4316                                      struct ftrace_ops *op, struct ftrace_regs *fregs)
4317{
4318        struct ftrace_probe_ops *probe_ops;
4319        struct ftrace_func_probe *probe;
4320
4321        probe = container_of(op, struct ftrace_func_probe, ops);
4322        probe_ops = probe->probe_ops;
4323
4324        /*
4325         * Disable preemption for these calls to prevent a RCU grace
4326         * period. This syncs the hash iteration and freeing of items
4327         * on the hash. rcu_read_lock is too dangerous here.
4328         */
4329        preempt_disable_notrace();
4330        probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4331        preempt_enable_notrace();
4332}
4333
4334struct ftrace_func_map {
4335        struct ftrace_func_entry        entry;
4336        void                            *data;
4337};
4338
4339struct ftrace_func_mapper {
4340        struct ftrace_hash              hash;
4341};
4342
4343/**
4344 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4345 *
4346 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4347 */
4348struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4349{
4350        struct ftrace_hash *hash;
4351
4352        /*
4353         * The mapper is simply a ftrace_hash, but since the entries
4354         * in the hash are not ftrace_func_entry type, we define it
4355         * as a separate structure.
4356         */
4357        hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4358        return (struct ftrace_func_mapper *)hash;
4359}
4360
4361/**
4362 * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4363 * @mapper: The mapper that has the ip maps
4364 * @ip: the instruction pointer to find the data for
4365 *
4366 * Returns the data mapped to @ip if found otherwise NULL. The return
4367 * is actually the address of the mapper data pointer. The address is
4368 * returned for use cases where the data is no bigger than a long, and
4369 * the user can use the data pointer as its data instead of having to
4370 * allocate more memory for the reference.
4371 */
4372void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4373                                  unsigned long ip)
4374{
4375        struct ftrace_func_entry *entry;
4376        struct ftrace_func_map *map;
4377
4378        entry = ftrace_lookup_ip(&mapper->hash, ip);
4379        if (!entry)
4380                return NULL;
4381
4382        map = (struct ftrace_func_map *)entry;
4383        return &map->data;
4384}
4385
4386/**
4387 * ftrace_func_mapper_add_ip - Map some data to an ip
4388 * @mapper: The mapper that has the ip maps
4389 * @ip: The instruction pointer address to map @data to
4390 * @data: The data to map to @ip
4391 *
4392 * Returns 0 on success otherwise an error.
4393 */
4394int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4395                              unsigned long ip, void *data)
4396{
4397        struct ftrace_func_entry *entry;
4398        struct ftrace_func_map *map;
4399
4400        entry = ftrace_lookup_ip(&mapper->hash, ip);
4401        if (entry)
4402                return -EBUSY;
4403
4404        map = kmalloc(sizeof(*map), GFP_KERNEL);
4405        if (!map)
4406                return -ENOMEM;
4407
4408        map->entry.ip = ip;
4409        map->data = data;
4410
4411        __add_hash_entry(&mapper->hash, &map->entry);
4412
4413        return 0;
4414}
4415
4416/**
4417 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4418 * @mapper: The mapper that has the ip maps
4419 * @ip: The instruction pointer address to remove the data from
4420 *
4421 * Returns the data if it is found, otherwise NULL.
4422 * Note, if the data pointer is used as the data itself, (see 
4423 * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4424 * if the data pointer was set to zero.
4425 */
4426void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4427                                   unsigned long ip)
4428{
4429        struct ftrace_func_entry *entry;
4430        struct ftrace_func_map *map;
4431        void *data;
4432
4433        entry = ftrace_lookup_ip(&mapper->hash, ip);
4434        if (!entry)
4435                return NULL;
4436
4437        map = (struct ftrace_func_map *)entry;
4438        data = map->data;
4439
4440        remove_hash_entry(&mapper->hash, entry);
4441        kfree(entry);
4442
4443        return data;
4444}
4445
4446/**
4447 * free_ftrace_func_mapper - free a mapping of ips and data
4448 * @mapper: The mapper that has the ip maps
4449 * @free_func: A function to be called on each data item.
4450 *
4451 * This is used to free the function mapper. The @free_func is optional
4452 * and can be used if the data needs to be freed as well.
4453 */
4454void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4455                             ftrace_mapper_func free_func)
4456{
4457        struct ftrace_func_entry *entry;
4458        struct ftrace_func_map *map;
4459        struct hlist_head *hhd;
4460        int size, i;
4461
4462        if (!mapper)
4463                return;
4464
4465        if (free_func && mapper->hash.count) {
4466                size = 1 << mapper->hash.size_bits;
4467                for (i = 0; i < size; i++) {
4468                        hhd = &mapper->hash.buckets[i];
4469                        hlist_for_each_entry(entry, hhd, hlist) {
4470                                map = (struct ftrace_func_map *)entry;
4471                                free_func(map);
4472                        }
4473                }
4474        }
4475        free_ftrace_hash(&mapper->hash);
4476}
4477
4478static void release_probe(struct ftrace_func_probe *probe)
4479{
4480        struct ftrace_probe_ops *probe_ops;
4481
4482        mutex_lock(&ftrace_lock);
4483
4484        WARN_ON(probe->ref <= 0);
4485
4486        /* Subtract the ref that was used to protect this instance */
4487        probe->ref--;
4488
4489        if (!probe->ref) {
4490                probe_ops = probe->probe_ops;
4491                /*
4492                 * Sending zero as ip tells probe_ops to free
4493                 * the probe->data itself
4494                 */
4495                if (probe_ops->free)
4496                        probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4497                list_del(&probe->list);
4498                kfree(probe);
4499        }
4500        mutex_unlock(&ftrace_lock);
4501}
4502
4503static void acquire_probe_locked(struct ftrace_func_probe *probe)
4504{
4505        /*
4506         * Add one ref to keep it from being freed when releasing the
4507         * ftrace_lock mutex.
4508         */
4509        probe->ref++;
4510}
4511
4512int
4513register_ftrace_function_probe(char *glob, struct trace_array *tr,
4514                               struct ftrace_probe_ops *probe_ops,
4515                               void *data)
4516{
4517        struct ftrace_func_entry *entry;
4518        struct ftrace_func_probe *probe;
4519        struct ftrace_hash **orig_hash;
4520        struct ftrace_hash *old_hash;
4521        struct ftrace_hash *hash;
4522        int count = 0;
4523        int size;
4524        int ret;
4525        int i;
4526
4527        if (WARN_ON(!tr))
4528                return -EINVAL;
4529
4530        /* We do not support '!' for function probes */
4531        if (WARN_ON(glob[0] == '!'))
4532                return -EINVAL;
4533
4534
4535        mutex_lock(&ftrace_lock);
4536        /* Check if the probe_ops is already registered */
4537        list_for_each_entry(probe, &tr->func_probes, list) {
4538                if (probe->probe_ops == probe_ops)
4539                        break;
4540        }
4541        if (&probe->list == &tr->func_probes) {
4542                probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4543                if (!probe) {
4544                        mutex_unlock(&ftrace_lock);
4545                        return -ENOMEM;
4546                }
4547                probe->probe_ops = probe_ops;
4548                probe->ops.func = function_trace_probe_call;
4549                probe->tr = tr;
4550                ftrace_ops_init(&probe->ops);
4551                list_add(&probe->list, &tr->func_probes);
4552        }
4553
4554        acquire_probe_locked(probe);
4555
4556        mutex_unlock(&ftrace_lock);
4557
4558        /*
4559         * Note, there's a small window here that the func_hash->filter_hash
4560         * may be NULL or empty. Need to be careful when reading the loop.
4561         */
4562        mutex_lock(&probe->ops.func_hash->regex_lock);
4563
4564        orig_hash = &probe->ops.func_hash->filter_hash;
4565        old_hash = *orig_hash;
4566        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4567
4568        if (!hash) {
4569                ret = -ENOMEM;
4570                goto out;
4571        }
4572
4573        ret = ftrace_match_records(hash, glob, strlen(glob));
4574
4575        /* Nothing found? */
4576        if (!ret)
4577                ret = -EINVAL;
4578
4579        if (ret < 0)
4580                goto out;
4581
4582        size = 1 << hash->size_bits;
4583        for (i = 0; i < size; i++) {
4584                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4585                        if (ftrace_lookup_ip(old_hash, entry->ip))
4586                                continue;
4587                        /*
4588                         * The caller might want to do something special
4589                         * for each function we find. We call the callback
4590                         * to give the caller an opportunity to do so.
4591                         */
4592                        if (probe_ops->init) {
4593                                ret = probe_ops->init(probe_ops, tr,
4594                                                      entry->ip, data,
4595                                                      &probe->data);
4596                                if (ret < 0) {
4597                                        if (probe_ops->free && count)
4598                                                probe_ops->free(probe_ops, tr,
4599                                                                0, probe->data);
4600                                        probe->data = NULL;
4601                                        goto out;
4602                                }
4603                        }
4604                        count++;
4605                }
4606        }
4607
4608        mutex_lock(&ftrace_lock);
4609
4610        if (!count) {
4611                /* Nothing was added? */
4612                ret = -EINVAL;
4613                goto out_unlock;
4614        }
4615
4616        ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4617                                              hash, 1);
4618        if (ret < 0)
4619                goto err_unlock;
4620
4621        /* One ref for each new function traced */
4622        probe->ref += count;
4623
4624        if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4625                ret = ftrace_startup(&probe->ops, 0);
4626
4627 out_unlock:
4628        mutex_unlock(&ftrace_lock);
4629
4630        if (!ret)
4631                ret = count;
4632 out:
4633        mutex_unlock(&probe->ops.func_hash->regex_lock);
4634        free_ftrace_hash(hash);
4635
4636        release_probe(probe);
4637
4638        return ret;
4639
4640 err_unlock:
4641        if (!probe_ops->free || !count)
4642                goto out_unlock;
4643
4644        /* Failed to do the move, need to call the free functions */
4645        for (i = 0; i < size; i++) {
4646                hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4647                        if (ftrace_lookup_ip(old_hash, entry->ip))
4648                                continue;
4649                        probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4650                }
4651        }
4652        goto out_unlock;
4653}
4654
4655int
4656unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4657                                      struct ftrace_probe_ops *probe_ops)
4658{
4659        struct ftrace_ops_hash old_hash_ops;
4660        struct ftrace_func_entry *entry;
4661        struct ftrace_func_probe *probe;
4662        struct ftrace_glob func_g;
4663        struct ftrace_hash **orig_hash;
4664        struct ftrace_hash *old_hash;
4665        struct ftrace_hash *hash = NULL;
4666        struct hlist_node *tmp;
4667        struct hlist_head hhd;
4668        char str[KSYM_SYMBOL_LEN];
4669        int count = 0;
4670        int i, ret = -ENODEV;
4671        int size;
4672
4673        if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4674                func_g.search = NULL;
4675        else {
4676                int not;
4677
4678                func_g.type = filter_parse_regex(glob, strlen(glob),
4679                                                 &func_g.search, &not);
4680                func_g.len = strlen(func_g.search);
4681
4682                /* we do not support '!' for function probes */
4683                if (WARN_ON(not))
4684                        return -EINVAL;
4685        }
4686
4687        mutex_lock(&ftrace_lock);
4688        /* Check if the probe_ops is already registered */
4689        list_for_each_entry(probe, &tr->func_probes, list) {
4690                if (probe->probe_ops == probe_ops)
4691                        break;
4692        }
4693        if (&probe->list == &tr->func_probes)
4694                goto err_unlock_ftrace;
4695
4696        ret = -EINVAL;
4697        if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4698                goto err_unlock_ftrace;
4699
4700        acquire_probe_locked(probe);
4701
4702        mutex_unlock(&ftrace_lock);
4703
4704        mutex_lock(&probe->ops.func_hash->regex_lock);
4705
4706        orig_hash = &probe->ops.func_hash->filter_hash;
4707        old_hash = *orig_hash;
4708
4709        if (ftrace_hash_empty(old_hash))
4710                goto out_unlock;
4711
4712        old_hash_ops.filter_hash = old_hash;
4713        /* Probes only have filters */
4714        old_hash_ops.notrace_hash = NULL;
4715
4716        ret = -ENOMEM;
4717        hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4718        if (!hash)
4719                goto out_unlock;
4720
4721        INIT_HLIST_HEAD(&hhd);
4722
4723        size = 1 << hash->size_bits;
4724        for (i = 0; i < size; i++) {
4725                hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4726
4727                        if (func_g.search) {
4728                                kallsyms_lookup(entry->ip, NULL, NULL,
4729                                                NULL, str);
4730                                if (!ftrace_match(str, &func_g))
4731                                        continue;
4732                        }
4733                        count++;
4734                        remove_hash_entry(hash, entry);
4735                        hlist_add_head(&entry->hlist, &hhd);
4736                }
4737        }
4738
4739        /* Nothing found? */
4740        if (!count) {
4741                ret = -EINVAL;
4742                goto out_unlock;
4743        }
4744
4745        mutex_lock(&ftrace_lock);
4746
4747        WARN_ON(probe->ref < count);
4748
4749        probe->ref -= count;
4750
4751        if (ftrace_hash_empty(hash))
4752                ftrace_shutdown(&probe->ops, 0);
4753
4754        ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4755                                              hash, 1);
4756
4757        /* still need to update the function call sites */
4758        if (ftrace_enabled && !ftrace_hash_empty(hash))
4759                ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4760                                       &old_hash_ops);
4761        synchronize_rcu();
4762
4763        hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4764                hlist_del(&entry->hlist);
4765                if (probe_ops->free)
4766                        probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4767                kfree(entry);
4768        }
4769        mutex_unlock(&ftrace_lock);
4770
4771 out_unlock:
4772        mutex_unlock(&probe->ops.func_hash->regex_lock);
4773        free_ftrace_hash(hash);
4774
4775        release_probe(probe);
4776
4777        return ret;
4778
4779 err_unlock_ftrace:
4780        mutex_unlock(&ftrace_lock);
4781        return ret;
4782}
4783
4784void clear_ftrace_function_probes(struct trace_array *tr)
4785{
4786        struct ftrace_func_probe *probe, *n;
4787
4788        list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4789                unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4790}
4791
4792static LIST_HEAD(ftrace_commands);
4793static DEFINE_MUTEX(ftrace_cmd_mutex);
4794
4795/*
4796 * Currently we only register ftrace commands from __init, so mark this
4797 * __init too.
4798 */
4799__init int register_ftrace_command(struct ftrace_func_command *cmd)
4800{
4801        struct ftrace_func_command *p;
4802        int ret = 0;
4803
4804        mutex_lock(&ftrace_cmd_mutex);
4805        list_for_each_entry(p, &ftrace_commands, list) {
4806                if (strcmp(cmd->name, p->name) == 0) {
4807                        ret = -EBUSY;
4808                        goto out_unlock;
4809                }
4810        }
4811        list_add(&cmd->list, &ftrace_commands);
4812 out_unlock:
4813        mutex_unlock(&ftrace_cmd_mutex);
4814
4815        return ret;
4816}
4817
4818/*
4819 * Currently we only unregister ftrace commands from __init, so mark
4820 * this __init too.
4821 */
4822__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4823{
4824        struct ftrace_func_command *p, *n;
4825        int ret = -ENODEV;
4826
4827        mutex_lock(&ftrace_cmd_mutex);
4828        list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4829                if (strcmp(cmd->name, p->name) == 0) {
4830                        ret = 0;
4831                        list_del_init(&p->list);
4832                        goto out_unlock;
4833                }
4834        }
4835 out_unlock:
4836        mutex_unlock(&ftrace_cmd_mutex);
4837
4838        return ret;
4839}
4840
4841static int ftrace_process_regex(struct ftrace_iterator *iter,
4842                                char *buff, int len, int enable)
4843{
4844        struct ftrace_hash *hash = iter->hash;
4845        struct trace_array *tr = iter->ops->private;
4846        char *func, *command, *next = buff;
4847        struct ftrace_func_command *p;
4848        int ret = -EINVAL;
4849
4850        func = strsep(&next, ":");
4851
4852        if (!next) {
4853                ret = ftrace_match_records(hash, func, len);
4854                if (!ret)
4855                        ret = -EINVAL;
4856                if (ret < 0)
4857                        return ret;
4858                return 0;
4859        }
4860
4861        /* command found */
4862
4863        command = strsep(&next, ":");
4864
4865        mutex_lock(&ftrace_cmd_mutex);
4866        list_for_each_entry(p, &ftrace_commands, list) {
4867                if (strcmp(p->name, command) == 0) {
4868                        ret = p->func(tr, hash, func, command, next, enable);
4869                        goto out_unlock;
4870                }
4871        }
4872 out_unlock:
4873        mutex_unlock(&ftrace_cmd_mutex);
4874
4875        return ret;
4876}
4877
4878static ssize_t
4879ftrace_regex_write(struct file *file, const char __user *ubuf,
4880                   size_t cnt, loff_t *ppos, int enable)
4881{
4882        struct ftrace_iterator *iter;
4883        struct trace_parser *parser;
4884        ssize_t ret, read;
4885
4886        if (!cnt)
4887                return 0;
4888
4889        if (file->f_mode & FMODE_READ) {
4890                struct seq_file *m = file->private_data;
4891                iter = m->private;
4892        } else
4893                iter = file->private_data;
4894
4895        if (unlikely(ftrace_disabled))
4896                return -ENODEV;
4897
4898        /* iter->hash is a local copy, so we don't need regex_lock */
4899
4900        parser = &iter->parser;
4901        read = trace_get_user(parser, ubuf, cnt, ppos);
4902
4903        if (read >= 0 && trace_parser_loaded(parser) &&
4904            !trace_parser_cont(parser)) {
4905                ret = ftrace_process_regex(iter, parser->buffer,
4906                                           parser->idx, enable);
4907                trace_parser_clear(parser);
4908                if (ret < 0)
4909                        goto out;
4910        }
4911
4912        ret = read;
4913 out:
4914        return ret;
4915}
4916
4917ssize_t
4918ftrace_filter_write(struct file *file, const char __user *ubuf,
4919                    size_t cnt, loff_t *ppos)
4920{
4921        return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4922}
4923
4924ssize_t
4925ftrace_notrace_write(struct file *file, const char __user *ubuf,
4926                     size_t cnt, loff_t *ppos)
4927{
4928        return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4929}
4930
4931static int
4932ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4933{
4934        struct ftrace_func_entry *entry;
4935
4936        if (!ftrace_location(ip))
4937                return -EINVAL;
4938
4939        if (remove) {
4940                entry = ftrace_lookup_ip(hash, ip);
4941                if (!entry)
4942                        return -ENOENT;
4943                free_hash_entry(hash, entry);
4944                return 0;
4945        }
4946
4947        return add_hash_entry(hash, ip);
4948}
4949
4950static int
4951ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4952                unsigned long ip, int remove, int reset, int enable)
4953{
4954        struct ftrace_hash **orig_hash;
4955        struct ftrace_hash *hash;
4956        int ret;
4957
4958        if (unlikely(ftrace_disabled))
4959                return -ENODEV;
4960
4961        mutex_lock(&ops->func_hash->regex_lock);
4962
4963        if (enable)
4964                orig_hash = &ops->func_hash->filter_hash;
4965        else
4966                orig_hash = &ops->func_hash->notrace_hash;
4967
4968        if (reset)
4969                hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4970        else
4971                hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4972
4973        if (!hash) {
4974                ret = -ENOMEM;
4975                goto out_regex_unlock;
4976        }
4977
4978        if (buf && !ftrace_match_records(hash, buf, len)) {
4979                ret = -EINVAL;
4980                goto out_regex_unlock;
4981        }
4982        if (ip) {
4983                ret = ftrace_match_addr(hash, ip, remove);
4984                if (ret < 0)
4985                        goto out_regex_unlock;
4986        }
4987
4988        mutex_lock(&ftrace_lock);
4989        ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4990        mutex_unlock(&ftrace_lock);
4991
4992 out_regex_unlock:
4993        mutex_unlock(&ops->func_hash->regex_lock);
4994
4995        free_ftrace_hash(hash);
4996        return ret;
4997}
4998
4999static int
5000ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
5001                int reset, int enable)
5002{
5003        return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
5004}
5005
5006#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5007
5008struct ftrace_direct_func {
5009        struct list_head        next;
5010        unsigned long           addr;
5011        int                     count;
5012};
5013
5014static LIST_HEAD(ftrace_direct_funcs);
5015
5016/**
5017 * ftrace_find_direct_func - test an address if it is a registered direct caller
5018 * @addr: The address of a registered direct caller
5019 *
5020 * This searches to see if a ftrace direct caller has been registered
5021 * at a specific address, and if so, it returns a descriptor for it.
5022 *
5023 * This can be used by architecture code to see if an address is
5024 * a direct caller (trampoline) attached to a fentry/mcount location.
5025 * This is useful for the function_graph tracer, as it may need to
5026 * do adjustments if it traced a location that also has a direct
5027 * trampoline attached to it.
5028 */
5029struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
5030{
5031        struct ftrace_direct_func *entry;
5032        bool found = false;
5033
5034        /* May be called by fgraph trampoline (protected by rcu tasks) */
5035        list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
5036                if (entry->addr == addr) {
5037                        found = true;
5038                        break;
5039                }
5040        }
5041        if (found)
5042                return entry;
5043
5044        return NULL;
5045}
5046
5047static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
5048{
5049        struct ftrace_direct_func *direct;
5050
5051        direct = kmalloc(sizeof(*direct), GFP_KERNEL);
5052        if (!direct)
5053                return NULL;
5054        direct->addr = addr;
5055        direct->count = 0;
5056        list_add_rcu(&direct->next, &ftrace_direct_funcs);
5057        ftrace_direct_func_count++;
5058        return direct;
5059}
5060
5061/**
5062 * register_ftrace_direct - Call a custom trampoline directly
5063 * @ip: The address of the nop at the beginning of a function
5064 * @addr: The address of the trampoline to call at @ip
5065 *
5066 * This is used to connect a direct call from the nop location (@ip)
5067 * at the start of ftrace traced functions. The location that it calls
5068 * (@addr) must be able to handle a direct call, and save the parameters
5069 * of the function being traced, and restore them (or inject new ones
5070 * if needed), before returning.
5071 *
5072 * Returns:
5073 *  0 on success
5074 *  -EBUSY - Another direct function is already attached (there can be only one)
5075 *  -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5076 *  -ENOMEM - There was an allocation failure.
5077 */
5078int register_ftrace_direct(unsigned long ip, unsigned long addr)
5079{
5080        struct ftrace_direct_func *direct;
5081        struct ftrace_func_entry *entry;
5082        struct ftrace_hash *free_hash = NULL;
5083        struct dyn_ftrace *rec;
5084        int ret = -EBUSY;
5085
5086        mutex_lock(&direct_mutex);
5087
5088        /* See if there's a direct function at @ip already */
5089        if (ftrace_find_rec_direct(ip))
5090                goto out_unlock;
5091
5092        ret = -ENODEV;
5093        rec = lookup_rec(ip, ip);
5094        if (!rec)
5095                goto out_unlock;
5096
5097        /*
5098         * Check if the rec says it has a direct call but we didn't
5099         * find one earlier?
5100         */
5101        if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
5102                goto out_unlock;
5103
5104        /* Make sure the ip points to the exact record */
5105        if (ip != rec->ip) {
5106                ip = rec->ip;
5107                /* Need to check this ip for a direct. */
5108                if (ftrace_find_rec_direct(ip))
5109                        goto out_unlock;
5110        }
5111
5112        ret = -ENOMEM;
5113        if (ftrace_hash_empty(direct_functions) ||
5114            direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
5115                struct ftrace_hash *new_hash;
5116                int size = ftrace_hash_empty(direct_functions) ? 0 :
5117                        direct_functions->count + 1;
5118
5119                if (size < 32)
5120                        size = 32;
5121
5122                new_hash = dup_hash(direct_functions, size);
5123                if (!new_hash)
5124                        goto out_unlock;
5125
5126                free_hash = direct_functions;
5127                direct_functions = new_hash;
5128        }
5129
5130        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
5131        if (!entry)
5132                goto out_unlock;
5133
5134        direct = ftrace_find_direct_func(addr);
5135        if (!direct) {
5136                direct = ftrace_alloc_direct_func(addr);
5137                if (!direct) {
5138                        kfree(entry);
5139                        goto out_unlock;
5140                }
5141        }
5142
5143        entry->ip = ip;
5144        entry->direct = addr;
5145        __add_hash_entry(direct_functions, entry);
5146
5147        ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
5148        if (ret)
5149                remove_hash_entry(direct_functions, entry);
5150
5151        if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
5152                ret = register_ftrace_function(&direct_ops);
5153                if (ret)
5154                        ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5155        }
5156
5157        if (ret) {
5158                kfree(entry);
5159                if (!direct->count) {
5160                        list_del_rcu(&direct->next);
5161                        synchronize_rcu_tasks();
5162                        kfree(direct);
5163                        if (free_hash)
5164                                free_ftrace_hash(free_hash);
5165                        free_hash = NULL;
5166                        ftrace_direct_func_count--;
5167                }
5168        } else {
5169                direct->count++;
5170        }
5171 out_unlock:
5172        mutex_unlock(&direct_mutex);
5173
5174        if (free_hash) {
5175                synchronize_rcu_tasks();
5176                free_ftrace_hash(free_hash);
5177        }
5178
5179        return ret;
5180}
5181EXPORT_SYMBOL_GPL(register_ftrace_direct);
5182
5183static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
5184                                                   struct dyn_ftrace **recp)
5185{
5186        struct ftrace_func_entry *entry;
5187        struct dyn_ftrace *rec;
5188
5189        rec = lookup_rec(*ip, *ip);
5190        if (!rec)
5191                return NULL;
5192
5193        entry = __ftrace_lookup_ip(direct_functions, rec->ip);
5194        if (!entry) {
5195                WARN_ON(rec->flags & FTRACE_FL_DIRECT);
5196                return NULL;
5197        }
5198
5199        WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
5200
5201        /* Passed in ip just needs to be on the call site */
5202        *ip = rec->ip;
5203
5204        if (recp)
5205                *recp = rec;
5206
5207        return entry;
5208}
5209
5210int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
5211{
5212        struct ftrace_direct_func *direct;
5213        struct ftrace_func_entry *entry;
5214        int ret = -ENODEV;
5215
5216        mutex_lock(&direct_mutex);
5217
5218        entry = find_direct_entry(&ip, NULL);
5219        if (!entry)
5220                goto out_unlock;
5221
5222        if (direct_functions->count == 1)
5223                unregister_ftrace_function(&direct_ops);
5224
5225        ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5226
5227        WARN_ON(ret);
5228
5229        remove_hash_entry(direct_functions, entry);
5230
5231        direct = ftrace_find_direct_func(addr);
5232        if (!WARN_ON(!direct)) {
5233                /* This is the good path (see the ! before WARN) */
5234                direct->count--;
5235                WARN_ON(direct->count < 0);
5236                if (!direct->count) {
5237                        list_del_rcu(&direct->next);
5238                        synchronize_rcu_tasks();
5239                        kfree(direct);
5240                        kfree(entry);
5241                        ftrace_direct_func_count--;
5242                }
5243        }
5244 out_unlock:
5245        mutex_unlock(&direct_mutex);
5246
5247        return ret;
5248}
5249EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5250
5251static struct ftrace_ops stub_ops = {
5252        .func           = ftrace_stub,
5253};
5254
5255/**
5256 * ftrace_modify_direct_caller - modify ftrace nop directly
5257 * @entry: The ftrace hash entry of the direct helper for @rec
5258 * @rec: The record representing the function site to patch
5259 * @old_addr: The location that the site at @rec->ip currently calls
5260 * @new_addr: The location that the site at @rec->ip should call
5261 *
5262 * An architecture may overwrite this function to optimize the
5263 * changing of the direct callback on an ftrace nop location.
5264 * This is called with the ftrace_lock mutex held, and no other
5265 * ftrace callbacks are on the associated record (@rec). Thus,
5266 * it is safe to modify the ftrace record, where it should be
5267 * currently calling @old_addr directly, to call @new_addr.
5268 *
5269 * Safety checks should be made to make sure that the code at
5270 * @rec->ip is currently calling @old_addr. And this must
5271 * also update entry->direct to @new_addr.
5272 */
5273int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
5274                                       struct dyn_ftrace *rec,
5275                                       unsigned long old_addr,
5276                                       unsigned long new_addr)
5277{
5278        unsigned long ip = rec->ip;
5279        int ret;
5280
5281        /*
5282         * The ftrace_lock was used to determine if the record
5283         * had more than one registered user to it. If it did,
5284         * we needed to prevent that from changing to do the quick
5285         * switch. But if it did not (only a direct caller was attached)
5286         * then this function is called. But this function can deal
5287         * with attached callers to the rec that we care about, and
5288         * since this function uses standard ftrace calls that take
5289         * the ftrace_lock mutex, we need to release it.
5290         */
5291        mutex_unlock(&ftrace_lock);
5292
5293        /*
5294         * By setting a stub function at the same address, we force
5295         * the code to call the iterator and the direct_ops helper.
5296         * This means that @ip does not call the direct call, and
5297         * we can simply modify it.
5298         */
5299        ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
5300        if (ret)
5301                goto out_lock;
5302
5303        ret = register_ftrace_function(&stub_ops);
5304        if (ret) {
5305                ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5306                goto out_lock;
5307        }
5308
5309        entry->direct = new_addr;
5310
5311        /*
5312         * By removing the stub, we put back the direct call, calling
5313         * the @new_addr.
5314         */
5315        unregister_ftrace_function(&stub_ops);
5316        ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5317
5318 out_lock:
5319        mutex_lock(&ftrace_lock);
5320
5321        return ret;
5322}
5323
5324/**
5325 * modify_ftrace_direct - Modify an existing direct call to call something else
5326 * @ip: The instruction pointer to modify
5327 * @old_addr: The address that the current @ip calls directly
5328 * @new_addr: The address that the @ip should call
5329 *
5330 * This modifies a ftrace direct caller at an instruction pointer without
5331 * having to disable it first. The direct call will switch over to the
5332 * @new_addr without missing anything.
5333 *
5334 * Returns: zero on success. Non zero on error, which includes:
5335 *  -ENODEV : the @ip given has no direct caller attached
5336 *  -EINVAL : the @old_addr does not match the current direct caller
5337 */
5338int modify_ftrace_direct(unsigned long ip,
5339                         unsigned long old_addr, unsigned long new_addr)
5340{
5341        struct ftrace_direct_func *direct, *new_direct = NULL;
5342        struct ftrace_func_entry *entry;
5343        struct dyn_ftrace *rec;
5344        int ret = -ENODEV;
5345
5346        mutex_lock(&direct_mutex);
5347
5348        mutex_lock(&ftrace_lock);
5349        entry = find_direct_entry(&ip, &rec);
5350        if (!entry)
5351                goto out_unlock;
5352
5353        ret = -EINVAL;
5354        if (entry->direct != old_addr)
5355                goto out_unlock;
5356
5357        direct = ftrace_find_direct_func(old_addr);
5358        if (WARN_ON(!direct))
5359                goto out_unlock;
5360        if (direct->count > 1) {
5361                ret = -ENOMEM;
5362                new_direct = ftrace_alloc_direct_func(new_addr);
5363                if (!new_direct)
5364                        goto out_unlock;
5365                direct->count--;
5366                new_direct->count++;
5367        } else {
5368                direct->addr = new_addr;
5369        }
5370
5371        /*
5372         * If there's no other ftrace callback on the rec->ip location,
5373         * then it can be changed directly by the architecture.
5374         * If there is another caller, then we just need to change the
5375         * direct caller helper to point to @new_addr.
5376         */
5377        if (ftrace_rec_count(rec) == 1) {
5378                ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
5379        } else {
5380                entry->direct = new_addr;
5381                ret = 0;
5382        }
5383
5384        if (unlikely(ret && new_direct)) {
5385                direct->count++;
5386                list_del_rcu(&new_direct->next);
5387                synchronize_rcu_tasks();
5388                kfree(new_direct);
5389                ftrace_direct_func_count--;
5390        }
5391
5392 out_unlock:
5393        mutex_unlock(&ftrace_lock);
5394        mutex_unlock(&direct_mutex);
5395        return ret;
5396}
5397EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5398#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
5399
5400/**
5401 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5402 * @ops - the ops to set the filter with
5403 * @ip - the address to add to or remove from the filter.
5404 * @remove - non zero to remove the ip from the filter
5405 * @reset - non zero to reset all filters before applying this filter.
5406 *
5407 * Filters denote which functions should be enabled when tracing is enabled
5408 * If @ip is NULL, it fails to update filter.
5409 */
5410int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5411                         int remove, int reset)
5412{
5413        ftrace_ops_init(ops);
5414        return ftrace_set_addr(ops, ip, remove, reset, 1);
5415}
5416EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5417
5418/**
5419 * ftrace_ops_set_global_filter - setup ops to use global filters
5420 * @ops - the ops which will use the global filters
5421 *
5422 * ftrace users who need global function trace filtering should call this.
5423 * It can set the global filter only if ops were not initialized before.
5424 */
5425void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5426{
5427        if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5428                return;
5429
5430        ftrace_ops_init(ops);
5431        ops->func_hash = &global_ops.local_hash;
5432}
5433EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5434
5435static int
5436ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5437                 int reset, int enable)
5438{
5439        return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
5440}
5441
5442/**
5443 * ftrace_set_filter - set a function to filter on in ftrace
5444 * @ops - the ops to set the filter with
5445 * @buf - the string that holds the function filter text.
5446 * @len - the length of the string.
5447 * @reset - non zero to reset all filters before applying this filter.
5448 *
5449 * Filters denote which functions should be enabled when tracing is enabled.
5450 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5451 */
5452int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5453                       int len, int reset)
5454{
5455        ftrace_ops_init(ops);
5456        return ftrace_set_regex(ops, buf, len, reset, 1);
5457}
5458EXPORT_SYMBOL_GPL(ftrace_set_filter);
5459
5460/**
5461 * ftrace_set_notrace - set a function to not trace in ftrace
5462 * @ops - the ops to set the notrace filter with
5463 * @buf - the string that holds the function notrace text.
5464 * @len - the length of the string.
5465 * @reset - non zero to reset all filters before applying this filter.
5466 *
5467 * Notrace Filters denote which functions should not be enabled when tracing
5468 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5469 * for tracing.
5470 */
5471int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5472                        int len, int reset)
5473{
5474        ftrace_ops_init(ops);
5475        return ftrace_set_regex(ops, buf, len, reset, 0);
5476}
5477EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5478/**
5479 * ftrace_set_global_filter - set a function to filter on with global tracers
5480 * @buf - the string that holds the function filter text.
5481 * @len - the length of the string.
5482 * @reset - non zero to reset all filters before applying this filter.
5483 *
5484 * Filters denote which functions should be enabled when tracing is enabled.
5485 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5486 */
5487void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5488{
5489        ftrace_set_regex(&global_ops, buf, len, reset, 1);
5490}
5491EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5492
5493/**
5494 * ftrace_set_global_notrace - set a function to not trace with global tracers
5495 * @buf - the string that holds the function notrace text.
5496 * @len - the length of the string.
5497 * @reset - non zero to reset all filters before applying this filter.
5498 *
5499 * Notrace Filters denote which functions should not be enabled when tracing
5500 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5501 * for tracing.
5502 */
5503void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
5504{
5505        ftrace_set_regex(&global_ops, buf, len, reset, 0);
5506}
5507EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
5508
5509/*
5510 * command line interface to allow users to set filters on boot up.
5511 */
5512#define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
5513static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5514static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5515
5516/* Used by function selftest to not test if filter is set */
5517bool ftrace_filter_param __initdata;
5518
5519static int __init set_ftrace_notrace(char *str)
5520{
5521        ftrace_filter_param = true;
5522        strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
5523        return 1;
5524}
5525__setup("ftrace_notrace=", set_ftrace_notrace);
5526
5527static int __init set_ftrace_filter(char *str)
5528{
5529        ftrace_filter_param = true;
5530        strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
5531        return 1;
5532}
5533__setup("ftrace_filter=", set_ftrace_filter);
5534
5535#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5536static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
5537static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5538static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5539
5540static int __init set_graph_function(char *str)
5541{
5542        strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5543        return 1;
5544}
5545__setup("ftrace_graph_filter=", set_graph_function);
5546
5547static int __init set_graph_notrace_function(char *str)
5548{
5549        strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5550        return 1;
5551}
5552__setup("ftrace_graph_notrace=", set_graph_notrace_function);
5553
5554static int __init set_graph_max_depth_function(char *str)
5555{
5556        if (!str)
5557                return 0;
5558        fgraph_max_depth = simple_strtoul(str, NULL, 0);
5559        return 1;
5560}
5561__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
5562
5563static void __init set_ftrace_early_graph(char *buf, int enable)
5564{
5565        int ret;
5566        char *func;
5567        struct ftrace_hash *hash;
5568
5569        hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5570        if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
5571                return;
5572
5573        while (buf) {
5574                func = strsep(&buf, ",");
5575                /* we allow only one expression at a time */
5576                ret = ftrace_graph_set_hash(hash, func);
5577                if (ret)
5578                        printk(KERN_DEBUG "ftrace: function %s not "
5579                                          "traceable\n", func);
5580        }
5581
5582        if (enable)
5583                ftrace_graph_hash = hash;
5584        else
5585                ftrace_graph_notrace_hash = hash;
5586}
5587#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5588
5589void __init
5590ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
5591{
5592        char *func;
5593
5594        ftrace_ops_init(ops);
5595
5596        while (buf) {
5597                func = strsep(&buf, ",");
5598                ftrace_set_regex(ops, func, strlen(func), 0, enable);
5599        }
5600}
5601
5602static void __init set_ftrace_early_filters(void)
5603{
5604        if (ftrace_filter_buf[0])
5605                ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
5606        if (ftrace_notrace_buf[0])
5607                ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5608#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5609        if (ftrace_graph_buf[0])
5610                set_ftrace_early_graph(ftrace_graph_buf, 1);
5611        if (ftrace_graph_notrace_buf[0])
5612                set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5613#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5614}
5615
5616int ftrace_regex_release(struct inode *inode, struct file *file)
5617{
5618        struct seq_file *m = (struct seq_file *)file->private_data;
5619        struct ftrace_iterator *iter;
5620        struct ftrace_hash **orig_hash;
5621        struct trace_parser *parser;
5622        int filter_hash;
5623
5624        if (file->f_mode & FMODE_READ) {
5625                iter = m->private;
5626                seq_release(inode, file);
5627        } else
5628                iter = file->private_data;
5629
5630        parser = &iter->parser;
5631        if (trace_parser_loaded(parser)) {
5632                int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
5633
5634                ftrace_process_regex(iter, parser->buffer,
5635                                     parser->idx, enable);
5636        }
5637
5638        trace_parser_put(parser);
5639
5640        mutex_lock(&iter->ops->func_hash->regex_lock);
5641
5642        if (file->f_mode & FMODE_WRITE) {
5643                filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5644
5645                if (filter_hash) {
5646                        orig_hash = &iter->ops->func_hash->filter_hash;
5647                        if (iter->tr && !list_empty(&iter->tr->mod_trace))
5648                                iter->hash->flags |= FTRACE_HASH_FL_MOD;
5649                } else
5650                        orig_hash = &iter->ops->func_hash->notrace_hash;
5651
5652                mutex_lock(&ftrace_lock);
5653                ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5654                                                      iter->hash, filter_hash);
5655                mutex_unlock(&ftrace_lock);
5656        } else {
5657                /* For read only, the hash is the ops hash */
5658                iter->hash = NULL;
5659        }
5660
5661        mutex_unlock(&iter->ops->func_hash->regex_lock);
5662        free_ftrace_hash(iter->hash);
5663        if (iter->tr)
5664                trace_array_put(iter->tr);
5665        kfree(iter);
5666
5667        return 0;
5668}
5669
5670static const struct file_operations ftrace_avail_fops = {
5671        .open = ftrace_avail_open,
5672        .read = seq_read,
5673        .llseek = seq_lseek,
5674        .release = seq_release_private,
5675};
5676
5677static const struct file_operations ftrace_enabled_fops = {
5678        .open = ftrace_enabled_open,
5679        .read = seq_read,
5680        .llseek = seq_lseek,
5681        .release = seq_release_private,
5682};
5683
5684static const struct file_operations ftrace_filter_fops = {
5685        .open = ftrace_filter_open,
5686        .read = seq_read,
5687        .write = ftrace_filter_write,
5688        .llseek = tracing_lseek,
5689        .release = ftrace_regex_release,
5690};
5691
5692static const struct file_operations ftrace_notrace_fops = {
5693        .open = ftrace_notrace_open,
5694        .read = seq_read,
5695        .write = ftrace_notrace_write,
5696        .llseek = tracing_lseek,
5697        .release = ftrace_regex_release,
5698};
5699
5700#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5701
5702static DEFINE_MUTEX(graph_lock);
5703
5704struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
5705struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
5706
5707enum graph_filter_type {
5708        GRAPH_FILTER_NOTRACE    = 0,
5709        GRAPH_FILTER_FUNCTION,
5710};
5711
5712#define FTRACE_GRAPH_EMPTY      ((void *)1)
5713
5714struct ftrace_graph_data {
5715        struct ftrace_hash              *hash;
5716        struct ftrace_func_entry        *entry;
5717        int                             idx;   /* for hash table iteration */
5718        enum graph_filter_type          type;
5719        struct ftrace_hash              *new_hash;
5720        const struct seq_operations     *seq_ops;
5721        struct trace_parser             parser;
5722};
5723
5724static void *
5725__g_next(struct seq_file *m, loff_t *pos)
5726{
5727        struct ftrace_graph_data *fgd = m->private;
5728        struct ftrace_func_entry *entry = fgd->entry;
5729        struct hlist_head *head;
5730        int i, idx = fgd->idx;
5731
5732        if (*pos >= fgd->hash->count)
5733                return NULL;
5734
5735        if (entry) {
5736                hlist_for_each_entry_continue(entry, hlist) {
5737                        fgd->entry = entry;
5738                        return entry;
5739                }
5740
5741                idx++;
5742        }
5743
5744        for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
5745                head = &fgd->hash->buckets[i];
5746                hlist_for_each_entry(entry, head, hlist) {
5747                        fgd->entry = entry;
5748                        fgd->idx = i;
5749                        return entry;
5750                }
5751        }
5752        return NULL;
5753}
5754
5755static void *
5756g_next(struct seq_file *m, void *v, loff_t *pos)
5757{
5758        (*pos)++;
5759        return __g_next(m, pos);
5760}
5761
5762static void *g_start(struct seq_file *m, loff_t *pos)
5763{
5764        struct ftrace_graph_data *fgd = m->private;
5765
5766        mutex_lock(&graph_lock);
5767
5768        if (fgd->type == GRAPH_FILTER_FUNCTION)
5769                fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5770                                        lockdep_is_held(&graph_lock));
5771        else
5772                fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5773                                        lockdep_is_held(&graph_lock));
5774
5775        /* Nothing, tell g_show to print all functions are enabled */
5776        if (ftrace_hash_empty(fgd->hash) && !*pos)
5777                return FTRACE_GRAPH_EMPTY;
5778
5779        fgd->idx = 0;
5780        fgd->entry = NULL;
5781        return __g_next(m, pos);
5782}
5783
5784static void g_stop(struct seq_file *m, void *p)
5785{
5786        mutex_unlock(&graph_lock);
5787}
5788
5789static int g_show(struct seq_file *m, void *v)
5790{
5791        struct ftrace_func_entry *entry = v;
5792
5793        if (!entry)
5794                return 0;
5795
5796        if (entry == FTRACE_GRAPH_EMPTY) {
5797                struct ftrace_graph_data *fgd = m->private;
5798
5799                if (fgd->type == GRAPH_FILTER_FUNCTION)
5800                        seq_puts(m, "#### all functions enabled ####\n");
5801                else
5802                        seq_puts(m, "#### no functions disabled ####\n");
5803                return 0;
5804        }
5805
5806        seq_printf(m, "%ps\n", (void *)entry->ip);
5807
5808        return 0;
5809}
5810
5811static const struct seq_operations ftrace_graph_seq_ops = {
5812        .start = g_start,
5813        .next = g_next,
5814        .stop = g_stop,
5815        .show = g_show,
5816};
5817
5818static int
5819__ftrace_graph_open(struct inode *inode, struct file *file,
5820                    struct ftrace_graph_data *fgd)
5821{
5822        int ret;
5823        struct ftrace_hash *new_hash = NULL;
5824
5825        ret = security_locked_down(LOCKDOWN_TRACEFS);
5826        if (ret)
5827                return ret;
5828
5829        if (file->f_mode & FMODE_WRITE) {
5830                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
5831
5832                if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
5833                        return -ENOMEM;
5834
5835                if (file->f_flags & O_TRUNC)
5836                        new_hash = alloc_ftrace_hash(size_bits);
5837                else
5838                        new_hash = alloc_and_copy_ftrace_hash(size_bits,
5839                                                              fgd->hash);
5840                if (!new_hash) {
5841                        ret = -ENOMEM;
5842                        goto out;
5843                }
5844        }
5845
5846        if (file->f_mode & FMODE_READ) {
5847                ret = seq_open(file, &ftrace_graph_seq_ops);
5848                if (!ret) {
5849                        struct seq_file *m = file->private_data;
5850                        m->private = fgd;
5851                } else {
5852                        /* Failed */
5853                        free_ftrace_hash(new_hash);
5854                        new_hash = NULL;
5855                }
5856        } else
5857                file->private_data = fgd;
5858
5859out:
5860        if (ret < 0 && file->f_mode & FMODE_WRITE)
5861                trace_parser_put(&fgd->parser);
5862
5863        fgd->new_hash = new_hash;
5864
5865        /*
5866         * All uses of fgd->hash must be taken with the graph_lock
5867         * held. The graph_lock is going to be released, so force
5868         * fgd->hash to be reinitialized when it is taken again.
5869         */
5870        fgd->hash = NULL;
5871
5872        return ret;
5873}
5874
5875static int
5876ftrace_graph_open(struct inode *inode, struct file *file)
5877{
5878        struct ftrace_graph_data *fgd;
5879        int ret;
5880
5881        if (unlikely(ftrace_disabled))
5882                return -ENODEV;
5883
5884        fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5885        if (fgd == NULL)
5886                return -ENOMEM;
5887
5888        mutex_lock(&graph_lock);
5889
5890        fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5891                                        lockdep_is_held(&graph_lock));
5892        fgd->type = GRAPH_FILTER_FUNCTION;
5893        fgd->seq_ops = &ftrace_graph_seq_ops;
5894
5895        ret = __ftrace_graph_open(inode, file, fgd);
5896        if (ret < 0)
5897                kfree(fgd);
5898
5899        mutex_unlock(&graph_lock);
5900        return ret;
5901}
5902
5903static int
5904ftrace_graph_notrace_open(struct inode *inode, struct file *file)
5905{
5906        struct ftrace_graph_data *fgd;
5907        int ret;
5908
5909        if (unlikely(ftrace_disabled))
5910                return -ENODEV;
5911
5912        fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5913        if (fgd == NULL)
5914                return -ENOMEM;
5915
5916        mutex_lock(&graph_lock);
5917
5918        fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5919                                        lockdep_is_held(&graph_lock));
5920        fgd->type = GRAPH_FILTER_NOTRACE;
5921        fgd->seq_ops = &ftrace_graph_seq_ops;
5922
5923        ret = __ftrace_graph_open(inode, file, fgd);
5924        if (ret < 0)
5925                kfree(fgd);
5926
5927        mutex_unlock(&graph_lock);
5928        return ret;
5929}
5930
5931static int
5932ftrace_graph_release(struct inode *inode, struct file *file)
5933{
5934        struct ftrace_graph_data *fgd;
5935        struct ftrace_hash *old_hash, *new_hash;
5936        struct trace_parser *parser;
5937        int ret = 0;
5938
5939        if (file->f_mode & FMODE_READ) {
5940                struct seq_file *m = file->private_data;
5941
5942                fgd = m->private;
5943                seq_release(inode, file);
5944        } else {
5945                fgd = file->private_data;
5946        }
5947
5948
5949        if (file->f_mode & FMODE_WRITE) {
5950
5951                parser = &fgd->parser;
5952
5953                if (trace_parser_loaded((parser))) {
5954                        ret = ftrace_graph_set_hash(fgd->new_hash,
5955                                                    parser->buffer);
5956                }
5957
5958                trace_parser_put(parser);
5959
5960                new_hash = __ftrace_hash_move(fgd->new_hash);
5961                if (!new_hash) {
5962                        ret = -ENOMEM;
5963                        goto out;
5964                }
5965
5966                mutex_lock(&graph_lock);
5967
5968                if (fgd->type == GRAPH_FILTER_FUNCTION) {
5969                        old_hash = rcu_dereference_protected(ftrace_graph_hash,
5970                                        lockdep_is_held(&graph_lock));
5971                        rcu_assign_pointer(ftrace_graph_hash, new_hash);
5972                } else {
5973                        old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5974                                        lockdep_is_held(&graph_lock));
5975                        rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
5976                }
5977
5978                mutex_unlock(&graph_lock);
5979
5980                /*
5981                 * We need to do a hard force of sched synchronization.
5982                 * This is because we use preempt_disable() to do RCU, but
5983                 * the function tracers can be called where RCU is not watching
5984                 * (like before user_exit()). We can not rely on the RCU
5985                 * infrastructure to do the synchronization, thus we must do it
5986                 * ourselves.
5987                 */
5988                if (old_hash != EMPTY_HASH)
5989                        synchronize_rcu_tasks_rude();
5990
5991                free_ftrace_hash(old_hash);
5992        }
5993
5994 out:
5995        free_ftrace_hash(fgd->new_hash);
5996        kfree(fgd);
5997
5998        return ret;
5999}
6000
6001static int
6002ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6003{
6004        struct ftrace_glob func_g;
6005        struct dyn_ftrace *rec;
6006        struct ftrace_page *pg;
6007        struct ftrace_func_entry *entry;
6008        int fail = 1;
6009        int not;
6010
6011        /* decode regex */
6012        func_g.type = filter_parse_regex(buffer, strlen(buffer),
6013                                         &func_g.search, &not);
6014
6015        func_g.len = strlen(func_g.search);
6016
6017        mutex_lock(&ftrace_lock);
6018
6019        if (unlikely(ftrace_disabled)) {
6020                mutex_unlock(&ftrace_lock);
6021                return -ENODEV;
6022        }
6023
6024        do_for_each_ftrace_rec(pg, rec) {
6025
6026                if (rec->flags & FTRACE_FL_DISABLED)
6027                        continue;
6028
6029                if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6030                        entry = ftrace_lookup_ip(hash, rec->ip);
6031
6032                        if (!not) {
6033                                fail = 0;
6034
6035                                if (entry)
6036                                        continue;
6037                                if (add_hash_entry(hash, rec->ip) < 0)
6038                                        goto out;
6039                        } else {
6040                                if (entry) {
6041                                        free_hash_entry(hash, entry);
6042                                        fail = 0;
6043                                }
6044                        }
6045                }
6046        } while_for_each_ftrace_rec();
6047out:
6048        mutex_unlock(&ftrace_lock);
6049
6050        if (fail)
6051                return -EINVAL;
6052
6053        return 0;
6054}
6055
6056static ssize_t
6057ftrace_graph_write(struct file *file, const char __user *ubuf,
6058                   size_t cnt, loff_t *ppos)
6059{
6060        ssize_t read, ret = 0;
6061        struct ftrace_graph_data *fgd = file->private_data;
6062        struct trace_parser *parser;
6063
6064        if (!cnt)
6065                return 0;
6066
6067        /* Read mode uses seq functions */
6068        if (file->f_mode & FMODE_READ) {
6069                struct seq_file *m = file->private_data;
6070                fgd = m->private;
6071        }
6072
6073        parser = &fgd->parser;
6074
6075        read = trace_get_user(parser, ubuf, cnt, ppos);
6076
6077        if (read >= 0 && trace_parser_loaded(parser) &&
6078            !trace_parser_cont(parser)) {
6079
6080                ret = ftrace_graph_set_hash(fgd->new_hash,
6081                                            parser->buffer);
6082                trace_parser_clear(parser);
6083        }
6084
6085        if (!ret)
6086                ret = read;
6087
6088        return ret;
6089}
6090
6091static const struct file_operations ftrace_graph_fops = {
6092        .open           = ftrace_graph_open,
6093        .read           = seq_read,
6094        .write          = ftrace_graph_write,
6095        .llseek         = tracing_lseek,
6096        .release        = ftrace_graph_release,
6097};
6098
6099static const struct file_operations ftrace_graph_notrace_fops = {
6100        .open           = ftrace_graph_notrace_open,
6101        .read           = seq_read,
6102        .write          = ftrace_graph_write,
6103        .llseek         = tracing_lseek,
6104        .release        = ftrace_graph_release,
6105};
6106#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6107
6108void ftrace_create_filter_files(struct ftrace_ops *ops,
6109                                struct dentry *parent)
6110{
6111
6112        trace_create_file("set_ftrace_filter", 0644, parent,
6113                          ops, &ftrace_filter_fops);
6114
6115        trace_create_file("set_ftrace_notrace", 0644, parent,
6116                          ops, &ftrace_notrace_fops);
6117}
6118
6119/*
6120 * The name "destroy_filter_files" is really a misnomer. Although
6121 * in the future, it may actually delete the files, but this is
6122 * really intended to make sure the ops passed in are disabled
6123 * and that when this function returns, the caller is free to
6124 * free the ops.
6125 *
6126 * The "destroy" name is only to match the "create" name that this
6127 * should be paired with.
6128 */
6129void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6130{
6131        mutex_lock(&ftrace_lock);
6132        if (ops->flags & FTRACE_OPS_FL_ENABLED)
6133                ftrace_shutdown(ops, 0);
6134        ops->flags |= FTRACE_OPS_FL_DELETED;
6135        ftrace_free_filter(ops);
6136        mutex_unlock(&ftrace_lock);
6137}
6138
6139static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6140{
6141
6142        trace_create_file("available_filter_functions", 0444,
6143                        d_tracer, NULL, &ftrace_avail_fops);
6144
6145        trace_create_file("enabled_functions", 0444,
6146                        d_tracer, NULL, &ftrace_enabled_fops);
6147
6148        ftrace_create_filter_files(&global_ops, d_tracer);
6149
6150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
6151        trace_create_file("set_graph_function", 0644, d_tracer,
6152                                    NULL,
6153                                    &ftrace_graph_fops);
6154        trace_create_file("set_graph_notrace", 0644, d_tracer,
6155                                    NULL,
6156                                    &ftrace_graph_notrace_fops);
6157#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6158
6159        return 0;
6160}
6161
6162static int ftrace_cmp_ips(const void *a, const void *b)
6163{
6164        const unsigned long *ipa = a;
6165        const unsigned long *ipb = b;
6166
6167        if (*ipa > *ipb)
6168                return 1;
6169        if (*ipa < *ipb)
6170                return -1;
6171        return 0;
6172}
6173
6174static int ftrace_process_locs(struct module *mod,
6175                               unsigned long *start,
6176                               unsigned long *end)
6177{
6178        struct ftrace_page *start_pg;
6179        struct ftrace_page *pg;
6180        struct dyn_ftrace *rec;
6181        unsigned long count;
6182        unsigned long *p;
6183        unsigned long addr;
6184        unsigned long flags = 0; /* Shut up gcc */
6185        int ret = -ENOMEM;
6186
6187        count = end - start;
6188
6189        if (!count)
6190                return 0;
6191
6192        sort(start, count, sizeof(*start),
6193             ftrace_cmp_ips, NULL);
6194
6195        start_pg = ftrace_allocate_pages(count);
6196        if (!start_pg)
6197                return -ENOMEM;
6198
6199        mutex_lock(&ftrace_lock);
6200
6201        /*
6202         * Core and each module needs their own pages, as
6203         * modules will free them when they are removed.
6204         * Force a new page to be allocated for modules.
6205         */
6206        if (!mod) {
6207                WARN_ON(ftrace_pages || ftrace_pages_start);
6208                /* First initialization */
6209                ftrace_pages = ftrace_pages_start = start_pg;
6210        } else {
6211                if (!ftrace_pages)
6212                        goto out;
6213
6214                if (WARN_ON(ftrace_pages->next)) {
6215                        /* Hmm, we have free pages? */
6216                        while (ftrace_pages->next)
6217                                ftrace_pages = ftrace_pages->next;
6218                }
6219
6220                ftrace_pages->next = start_pg;
6221        }
6222
6223        p = start;
6224        pg = start_pg;
6225        while (p < end) {
6226                unsigned long end_offset;
6227                addr = ftrace_call_adjust(*p++);
6228                /*
6229                 * Some architecture linkers will pad between
6230                 * the different mcount_loc sections of different
6231                 * object files to satisfy alignments.
6232                 * Skip any NULL pointers.
6233                 */
6234                if (!addr)
6235                        continue;
6236
6237                end_offset = (pg->index+1) * sizeof(pg->records[0]);
6238                if (end_offset > PAGE_SIZE << pg->order) {
6239                        /* We should have allocated enough */
6240                        if (WARN_ON(!pg->next))
6241                                break;
6242                        pg = pg->next;
6243                }
6244
6245                rec = &pg->records[pg->index++];
6246                rec->ip = addr;
6247        }
6248
6249        /* We should have used all pages */
6250        WARN_ON(pg->next);
6251
6252        /* Assign the last page to ftrace_pages */
6253        ftrace_pages = pg;
6254
6255        /*
6256         * We only need to disable interrupts on start up
6257         * because we are modifying code that an interrupt
6258         * may execute, and the modification is not atomic.
6259         * But for modules, nothing runs the code we modify
6260         * until we are finished with it, and there's no
6261         * reason to cause large interrupt latencies while we do it.
6262         */
6263        if (!mod)
6264                local_irq_save(flags);
6265        ftrace_update_code(mod, start_pg);
6266        if (!mod)
6267                local_irq_restore(flags);
6268        ret = 0;
6269 out:
6270        mutex_unlock(&ftrace_lock);
6271
6272        return ret;
6273}
6274
6275struct ftrace_mod_func {
6276        struct list_head        list;
6277        char                    *name;
6278        unsigned long           ip;
6279        unsigned int            size;
6280};
6281
6282struct ftrace_mod_map {
6283        struct rcu_head         rcu;
6284        struct list_head        list;
6285        struct module           *mod;
6286        unsigned long           start_addr;
6287        unsigned long           end_addr;
6288        struct list_head        funcs;
6289        unsigned int            num_funcs;
6290};
6291
6292static int ftrace_get_trampoline_kallsym(unsigned int symnum,
6293                                         unsigned long *value, char *type,
6294                                         char *name, char *module_name,
6295                                         int *exported)
6296{
6297        struct ftrace_ops *op;
6298
6299        list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
6300                if (!op->trampoline || symnum--)
6301                        continue;
6302                *value = op->trampoline;
6303                *type = 't';
6304                strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
6305                strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
6306                *exported = 0;
6307                return 0;
6308        }
6309
6310        return -ERANGE;
6311}
6312
6313#ifdef CONFIG_MODULES
6314
6315#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6316
6317static LIST_HEAD(ftrace_mod_maps);
6318
6319static int referenced_filters(struct dyn_ftrace *rec)
6320{
6321        struct ftrace_ops *ops;
6322        int cnt = 0;
6323
6324        for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6325                if (ops_references_rec(ops, rec)) {
6326                        if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
6327                                continue;
6328                        if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
6329                                continue;
6330                        cnt++;
6331                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
6332                                rec->flags |= FTRACE_FL_REGS;
6333                        if (cnt == 1 && ops->trampoline)
6334                                rec->flags |= FTRACE_FL_TRAMP;
6335                        else
6336                                rec->flags &= ~FTRACE_FL_TRAMP;
6337                }
6338        }
6339
6340        return cnt;
6341}
6342
6343static void
6344clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6345{
6346        struct ftrace_func_entry *entry;
6347        struct dyn_ftrace *rec;
6348        int i;
6349
6350        if (ftrace_hash_empty(hash))
6351                return;
6352
6353        for (i = 0; i < pg->index; i++) {
6354                rec = &pg->records[i];
6355                entry = __ftrace_lookup_ip(hash, rec->ip);
6356                /*
6357                 * Do not allow this rec to match again.
6358                 * Yeah, it may waste some memory, but will be removed
6359                 * if/when the hash is modified again.
6360                 */
6361                if (entry)
6362                        entry->ip = 0;
6363        }
6364}
6365
6366/* Clear any records from hashes */
6367static void clear_mod_from_hashes(struct ftrace_page *pg)
6368{
6369        struct trace_array *tr;
6370
6371        mutex_lock(&trace_types_lock);
6372        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6373                if (!tr->ops || !tr->ops->func_hash)
6374                        continue;
6375                mutex_lock(&tr->ops->func_hash->regex_lock);
6376                clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6377                clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6378                mutex_unlock(&tr->ops->func_hash->regex_lock);
6379        }
6380        mutex_unlock(&trace_types_lock);
6381}
6382
6383static void ftrace_free_mod_map(struct rcu_head *rcu)
6384{
6385        struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6386        struct ftrace_mod_func *mod_func;
6387        struct ftrace_mod_func *n;
6388
6389        /* All the contents of mod_map are now not visible to readers */
6390        list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6391                kfree(mod_func->name);
6392                list_del(&mod_func->list);
6393                kfree(mod_func);
6394        }
6395
6396        kfree(mod_map);
6397}
6398
6399void ftrace_release_mod(struct module *mod)
6400{
6401        struct ftrace_mod_map *mod_map;
6402        struct ftrace_mod_map *n;
6403        struct dyn_ftrace *rec;
6404        struct ftrace_page **last_pg;
6405        struct ftrace_page *tmp_page = NULL;
6406        struct ftrace_page *pg;
6407
6408        mutex_lock(&ftrace_lock);
6409
6410        if (ftrace_disabled)
6411                goto out_unlock;
6412
6413        list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6414                if (mod_map->mod == mod) {
6415                        list_del_rcu(&mod_map->list);
6416                        call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6417                        break;
6418                }
6419        }
6420
6421        /*
6422         * Each module has its own ftrace_pages, remove
6423         * them from the list.
6424         */
6425        last_pg = &ftrace_pages_start;
6426        for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6427                rec = &pg->records[0];
6428                if (within_module_core(rec->ip, mod) ||
6429                    within_module_init(rec->ip, mod)) {
6430                        /*
6431                         * As core pages are first, the first
6432                         * page should never be a module page.
6433                         */
6434                        if (WARN_ON(pg == ftrace_pages_start))
6435                                goto out_unlock;
6436
6437                        /* Check if we are deleting the last page */
6438                        if (pg == ftrace_pages)
6439                                ftrace_pages = next_to_ftrace_page(last_pg);
6440
6441                        ftrace_update_tot_cnt -= pg->index;
6442                        *last_pg = pg->next;
6443
6444                        pg->next = tmp_page;
6445                        tmp_page = pg;
6446                } else
6447                        last_pg = &pg->next;
6448        }
6449 out_unlock:
6450        mutex_unlock(&ftrace_lock);
6451
6452        for (pg = tmp_page; pg; pg = tmp_page) {
6453
6454                /* Needs to be called outside of ftrace_lock */
6455                clear_mod_from_hashes(pg);
6456
6457                if (pg->records) {
6458                        free_pages((unsigned long)pg->records, pg->order);
6459                        ftrace_number_of_pages -= 1 << pg->order;
6460                }
6461                tmp_page = pg->next;
6462                kfree(pg);
6463                ftrace_number_of_groups--;
6464        }
6465}
6466
6467void ftrace_module_enable(struct module *mod)
6468{
6469        struct dyn_ftrace *rec;
6470        struct ftrace_page *pg;
6471
6472        mutex_lock(&ftrace_lock);
6473
6474        if (ftrace_disabled)
6475                goto out_unlock;
6476
6477        /*
6478         * If the tracing is enabled, go ahead and enable the record.
6479         *
6480         * The reason not to enable the record immediately is the
6481         * inherent check of ftrace_make_nop/ftrace_make_call for
6482         * correct previous instructions.  Making first the NOP
6483         * conversion puts the module to the correct state, thus
6484         * passing the ftrace_make_call check.
6485         *
6486         * We also delay this to after the module code already set the
6487         * text to read-only, as we now need to set it back to read-write
6488         * so that we can modify the text.
6489         */
6490        if (ftrace_start_up)
6491                ftrace_arch_code_modify_prepare();
6492
6493        do_for_each_ftrace_rec(pg, rec) {
6494                int cnt;
6495                /*
6496                 * do_for_each_ftrace_rec() is a double loop.
6497                 * module text shares the pg. If a record is
6498                 * not part of this module, then skip this pg,
6499                 * which the "break" will do.
6500                 */
6501                if (!within_module_core(rec->ip, mod) &&
6502                    !within_module_init(rec->ip, mod))
6503                        break;
6504
6505                cnt = 0;
6506
6507                /*
6508                 * When adding a module, we need to check if tracers are
6509                 * currently enabled and if they are, and can trace this record,
6510                 * we need to enable the module functions as well as update the
6511                 * reference counts for those function records.
6512                 */
6513                if (ftrace_start_up)
6514                        cnt += referenced_filters(rec);
6515
6516                rec->flags &= ~FTRACE_FL_DISABLED;
6517                rec->flags += cnt;
6518
6519                if (ftrace_start_up && cnt) {
6520                        int failed = __ftrace_replace_code(rec, 1);
6521                        if (failed) {
6522                                ftrace_bug(failed, rec);
6523                                goto out_loop;
6524                        }
6525                }
6526
6527        } while_for_each_ftrace_rec();
6528
6529 out_loop:
6530        if (ftrace_start_up)
6531                ftrace_arch_code_modify_post_process();
6532
6533 out_unlock:
6534        mutex_unlock(&ftrace_lock);
6535
6536        process_cached_mods(mod->name);
6537}
6538
6539void ftrace_module_init(struct module *mod)
6540{
6541        if (ftrace_disabled || !mod->num_ftrace_callsites)
6542                return;
6543
6544        ftrace_process_locs(mod, mod->ftrace_callsites,
6545                            mod->ftrace_callsites + mod->num_ftrace_callsites);
6546}
6547
6548static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6549                                struct dyn_ftrace *rec)
6550{
6551        struct ftrace_mod_func *mod_func;
6552        unsigned long symsize;
6553        unsigned long offset;
6554        char str[KSYM_SYMBOL_LEN];
6555        char *modname;
6556        const char *ret;
6557
6558        ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
6559        if (!ret)
6560                return;
6561
6562        mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
6563        if (!mod_func)
6564                return;
6565
6566        mod_func->name = kstrdup(str, GFP_KERNEL);
6567        if (!mod_func->name) {
6568                kfree(mod_func);
6569                return;
6570        }
6571
6572        mod_func->ip = rec->ip - offset;
6573        mod_func->size = symsize;
6574
6575        mod_map->num_funcs++;
6576
6577        list_add_rcu(&mod_func->list, &mod_map->funcs);
6578}
6579
6580static struct ftrace_mod_map *
6581allocate_ftrace_mod_map(struct module *mod,
6582                        unsigned long start, unsigned long end)
6583{
6584        struct ftrace_mod_map *mod_map;
6585
6586        mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
6587        if (!mod_map)
6588                return NULL;
6589
6590        mod_map->mod = mod;
6591        mod_map->start_addr = start;
6592        mod_map->end_addr = end;
6593        mod_map->num_funcs = 0;
6594
6595        INIT_LIST_HEAD_RCU(&mod_map->funcs);
6596
6597        list_add_rcu(&mod_map->list, &ftrace_mod_maps);
6598
6599        return mod_map;
6600}
6601
6602static const char *
6603ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
6604                           unsigned long addr, unsigned long *size,
6605                           unsigned long *off, char *sym)
6606{
6607        struct ftrace_mod_func *found_func =  NULL;
6608        struct ftrace_mod_func *mod_func;
6609
6610        list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6611                if (addr >= mod_func->ip &&
6612                    addr < mod_func->ip + mod_func->size) {
6613                        found_func = mod_func;
6614                        break;
6615                }
6616        }
6617
6618        if (found_func) {
6619                if (size)
6620                        *size = found_func->size;
6621                if (off)
6622                        *off = addr - found_func->ip;
6623                if (sym)
6624                        strlcpy(sym, found_func->name, KSYM_NAME_LEN);
6625
6626                return found_func->name;
6627        }
6628
6629        return NULL;
6630}
6631
6632const char *
6633ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
6634                   unsigned long *off, char **modname, char *sym)
6635{
6636        struct ftrace_mod_map *mod_map;
6637        const char *ret = NULL;
6638
6639        /* mod_map is freed via call_rcu() */
6640        preempt_disable();
6641        list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6642                ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
6643                if (ret) {
6644                        if (modname)
6645                                *modname = mod_map->mod->name;
6646                        break;
6647                }
6648        }
6649        preempt_enable();
6650
6651        return ret;
6652}
6653
6654int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
6655                           char *type, char *name,
6656                           char *module_name, int *exported)
6657{
6658        struct ftrace_mod_map *mod_map;
6659        struct ftrace_mod_func *mod_func;
6660        int ret;
6661
6662        preempt_disable();
6663        list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6664
6665                if (symnum >= mod_map->num_funcs) {
6666                        symnum -= mod_map->num_funcs;
6667                        continue;
6668                }
6669
6670                list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6671                        if (symnum > 1) {
6672                                symnum--;
6673                                continue;
6674                        }
6675
6676                        *value = mod_func->ip;
6677                        *type = 'T';
6678                        strlcpy(name, mod_func->name, KSYM_NAME_LEN);
6679                        strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
6680                        *exported = 1;
6681                        preempt_enable();
6682                        return 0;
6683                }
6684                WARN_ON(1);
6685                break;
6686        }
6687        ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
6688                                            module_name, exported);
6689        preempt_enable();
6690        return ret;
6691}
6692
6693#else
6694static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6695                                struct dyn_ftrace *rec) { }
6696static inline struct ftrace_mod_map *
6697allocate_ftrace_mod_map(struct module *mod,
6698                        unsigned long start, unsigned long end)
6699{
6700        return NULL;
6701}
6702int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
6703                           char *type, char *name, char *module_name,
6704                           int *exported)
6705{
6706        int ret;
6707
6708        preempt_disable();
6709        ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
6710                                            module_name, exported);
6711        preempt_enable();
6712        return ret;
6713}
6714#endif /* CONFIG_MODULES */
6715
6716struct ftrace_init_func {
6717        struct list_head list;
6718        unsigned long ip;
6719};
6720
6721/* Clear any init ips from hashes */
6722static void
6723clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
6724{
6725        struct ftrace_func_entry *entry;
6726
6727        entry = ftrace_lookup_ip(hash, func->ip);
6728        /*
6729         * Do not allow this rec to match again.
6730         * Yeah, it may waste some memory, but will be removed
6731         * if/when the hash is modified again.
6732         */
6733        if (entry)
6734                entry->ip = 0;
6735}
6736
6737static void
6738clear_func_from_hashes(struct ftrace_init_func *func)
6739{
6740        struct trace_array *tr;
6741
6742        mutex_lock(&trace_types_lock);
6743        list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6744                if (!tr->ops || !tr->ops->func_hash)
6745                        continue;
6746                mutex_lock(&tr->ops->func_hash->regex_lock);
6747                clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
6748                clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
6749                mutex_unlock(&tr->ops->func_hash->regex_lock);
6750        }
6751        mutex_unlock(&trace_types_lock);
6752}
6753
6754static void add_to_clear_hash_list(struct list_head *clear_list,
6755                                   struct dyn_ftrace *rec)
6756{
6757        struct ftrace_init_func *func;
6758
6759        func = kmalloc(sizeof(*func), GFP_KERNEL);
6760        if (!func) {
6761                MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
6762                return;
6763        }
6764
6765        func->ip = rec->ip;
6766        list_add(&func->list, clear_list);
6767}
6768
6769void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
6770{
6771        unsigned long start = (unsigned long)(start_ptr);
6772        unsigned long end = (unsigned long)(end_ptr);
6773        struct ftrace_page **last_pg = &ftrace_pages_start;
6774        struct ftrace_page *pg;
6775        struct dyn_ftrace *rec;
6776        struct dyn_ftrace key;
6777        struct ftrace_mod_map *mod_map = NULL;
6778        struct ftrace_init_func *func, *func_next;
6779        struct list_head clear_hash;
6780
6781        INIT_LIST_HEAD(&clear_hash);
6782
6783        key.ip = start;
6784        key.flags = end;        /* overload flags, as it is unsigned long */
6785
6786        mutex_lock(&ftrace_lock);
6787
6788        /*
6789         * If we are freeing module init memory, then check if
6790         * any tracer is active. If so, we need to save a mapping of
6791         * the module functions being freed with the address.
6792         */
6793        if (mod && ftrace_ops_list != &ftrace_list_end)
6794                mod_map = allocate_ftrace_mod_map(mod, start, end);
6795
6796        for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
6797                if (end < pg->records[0].ip ||
6798                    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
6799                        continue;
6800 again:
6801                rec = bsearch(&key, pg->records, pg->index,
6802                              sizeof(struct dyn_ftrace),
6803                              ftrace_cmp_recs);
6804                if (!rec)
6805                        continue;
6806
6807                /* rec will be cleared from hashes after ftrace_lock unlock */
6808                add_to_clear_hash_list(&clear_hash, rec);
6809
6810                if (mod_map)
6811                        save_ftrace_mod_rec(mod_map, rec);
6812
6813                pg->index--;
6814                ftrace_update_tot_cnt--;
6815                if (!pg->index) {
6816                        *last_pg = pg->next;
6817                        if (pg->records) {
6818                                free_pages((unsigned long)pg->records, pg->order);
6819                                ftrace_number_of_pages -= 1 << pg->order;
6820                        }
6821                        ftrace_number_of_groups--;
6822                        kfree(pg);
6823                        pg = container_of(last_pg, struct ftrace_page, next);
6824                        if (!(*last_pg))
6825                                ftrace_pages = pg;
6826                        continue;
6827                }
6828                memmove(rec, rec + 1,
6829                        (pg->index - (rec - pg->records)) * sizeof(*rec));
6830                /* More than one function may be in this block */
6831                goto again;
6832        }
6833        mutex_unlock(&ftrace_lock);
6834
6835        list_for_each_entry_safe(func, func_next, &clear_hash, list) {
6836                clear_func_from_hashes(func);
6837                kfree(func);
6838        }
6839}
6840
6841void __init ftrace_free_init_mem(void)
6842{
6843        void *start = (void *)(&__init_begin);
6844        void *end = (void *)(&__init_end);
6845
6846        ftrace_free_mem(NULL, start, end);
6847}
6848
6849void __init ftrace_init(void)
6850{
6851        extern unsigned long __start_mcount_loc[];
6852        extern unsigned long __stop_mcount_loc[];
6853        unsigned long count, flags;
6854        int ret;
6855
6856        local_irq_save(flags);
6857        ret = ftrace_dyn_arch_init();
6858        local_irq_restore(flags);
6859        if (ret)
6860                goto failed;
6861
6862        count = __stop_mcount_loc - __start_mcount_loc;
6863        if (!count) {
6864                pr_info("ftrace: No functions to be traced?\n");
6865                goto failed;
6866        }
6867
6868        pr_info("ftrace: allocating %ld entries in %ld pages\n",
6869                count, count / ENTRIES_PER_PAGE + 1);
6870
6871        last_ftrace_enabled = ftrace_enabled = 1;
6872
6873        ret = ftrace_process_locs(NULL,
6874                                  __start_mcount_loc,
6875                                  __stop_mcount_loc);
6876
6877        pr_info("ftrace: allocated %ld pages with %ld groups\n",
6878                ftrace_number_of_pages, ftrace_number_of_groups);
6879
6880        set_ftrace_early_filters();
6881
6882        return;
6883 failed:
6884        ftrace_disabled = 1;
6885}
6886
6887/* Do nothing if arch does not support this */
6888void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
6889{
6890}
6891
6892static void ftrace_update_trampoline(struct ftrace_ops *ops)
6893{
6894        unsigned long trampoline = ops->trampoline;
6895
6896        arch_ftrace_update_trampoline(ops);
6897        if (ops->trampoline && ops->trampoline != trampoline &&
6898            (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
6899                /* Add to kallsyms before the perf events */
6900                ftrace_add_trampoline_to_kallsyms(ops);
6901                perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
6902                                   ops->trampoline, ops->trampoline_size, false,
6903                                   FTRACE_TRAMPOLINE_SYM);
6904                /*
6905                 * Record the perf text poke event after the ksymbol register
6906                 * event.
6907                 */
6908                perf_event_text_poke((void *)ops->trampoline, NULL, 0,
6909                                     (void *)ops->trampoline,
6910                                     ops->trampoline_size);
6911        }
6912}
6913
6914void ftrace_init_trace_array(struct trace_array *tr)
6915{
6916        INIT_LIST_HEAD(&tr->func_probes);
6917        INIT_LIST_HEAD(&tr->mod_trace);
6918        INIT_LIST_HEAD(&tr->mod_notrace);
6919}
6920#else
6921
6922struct ftrace_ops global_ops = {
6923        .func                   = ftrace_stub,
6924        .flags                  = FTRACE_OPS_FL_INITIALIZED |
6925                                  FTRACE_OPS_FL_PID,
6926};
6927
6928static int __init ftrace_nodyn_init(void)
6929{
6930        ftrace_enabled = 1;
6931        return 0;
6932}
6933core_initcall(ftrace_nodyn_init);
6934
6935static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
6936static inline void ftrace_startup_enable(int command) { }
6937static inline void ftrace_startup_all(int command) { }
6938
6939# define ftrace_startup_sysctl()        do { } while (0)
6940# define ftrace_shutdown_sysctl()       do { } while (0)
6941
6942static void ftrace_update_trampoline(struct ftrace_ops *ops)
6943{
6944}
6945
6946#endif /* CONFIG_DYNAMIC_FTRACE */
6947
6948__init void ftrace_init_global_array_ops(struct trace_array *tr)
6949{
6950        tr->ops = &global_ops;
6951        tr->ops->private = tr;
6952        ftrace_init_trace_array(tr);
6953}
6954
6955void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
6956{
6957        /* If we filter on pids, update to use the pid function */
6958        if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
6959                if (WARN_ON(tr->ops->func != ftrace_stub))
6960                        printk("ftrace ops had %pS for function\n",
6961                               tr->ops->func);
6962        }
6963        tr->ops->func = func;
6964        tr->ops->private = tr;
6965}
6966
6967void ftrace_reset_array_ops(struct trace_array *tr)
6968{
6969        tr->ops->func = ftrace_stub;
6970}
6971
6972static nokprobe_inline void
6973__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6974                       struct ftrace_ops *ignored, struct ftrace_regs *fregs)
6975{
6976        struct pt_regs *regs = ftrace_get_regs(fregs);
6977        struct ftrace_ops *op;
6978        int bit;
6979
6980        bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
6981        if (bit < 0)
6982                return;
6983
6984        /*
6985         * Some of the ops may be dynamically allocated,
6986         * they must be freed after a synchronize_rcu().
6987         */
6988        preempt_disable_notrace();
6989
6990        do_for_each_ftrace_op(op, ftrace_ops_list) {
6991                /* Stub functions don't need to be called nor tested */
6992                if (op->flags & FTRACE_OPS_FL_STUB)
6993                        continue;
6994                /*
6995                 * Check the following for each ops before calling their func:
6996                 *  if RCU flag is set, then rcu_is_watching() must be true
6997                 *  if PER_CPU is set, then ftrace_function_local_disable()
6998                 *                          must be false
6999                 *  Otherwise test if the ip matches the ops filter
7000                 *
7001                 * If any of the above fails then the op->func() is not executed.
7002                 */
7003                if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7004                    ftrace_ops_test(op, ip, regs)) {
7005                        if (FTRACE_WARN_ON(!op->func)) {
7006                                pr_warn("op=%p %pS\n", op, op);
7007                                goto out;
7008                        }
7009                        op->func(ip, parent_ip, op, fregs);
7010                }
7011        } while_for_each_ftrace_op(op);
7012out:
7013        preempt_enable_notrace();
7014        trace_clear_recursion(bit);
7015}
7016
7017/*
7018 * Some archs only support passing ip and parent_ip. Even though
7019 * the list function ignores the op parameter, we do not want any
7020 * C side effects, where a function is called without the caller
7021 * sending a third parameter.
7022 * Archs are to support both the regs and ftrace_ops at the same time.
7023 * If they support ftrace_ops, it is assumed they support regs.
7024 * If call backs want to use regs, they must either check for regs
7025 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
7026 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
7027 * An architecture can pass partial regs with ftrace_ops and still
7028 * set the ARCH_SUPPORTS_FTRACE_OPS.
7029 */
7030#if ARCH_SUPPORTS_FTRACE_OPS
7031static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7032                                 struct ftrace_ops *op, struct ftrace_regs *fregs)
7033{
7034        __ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
7035}
7036NOKPROBE_SYMBOL(ftrace_ops_list_func);
7037#else
7038static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
7039{
7040        __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
7041}
7042NOKPROBE_SYMBOL(ftrace_ops_no_ops);
7043#endif
7044
7045/*
7046 * If there's only one function registered but it does not support
7047 * recursion, needs RCU protection and/or requires per cpu handling, then
7048 * this function will be called by the mcount trampoline.
7049 */
7050static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7051                                   struct ftrace_ops *op, struct ftrace_regs *fregs)
7052{
7053        int bit;
7054
7055        bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX);
7056        if (bit < 0)
7057                return;
7058
7059        preempt_disable_notrace();
7060
7061        if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7062                op->func(ip, parent_ip, op, fregs);
7063
7064        preempt_enable_notrace();
7065        trace_clear_recursion(bit);
7066}
7067NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7068
7069/**
7070 * ftrace_ops_get_func - get the function a trampoline should call
7071 * @ops: the ops to get the function for
7072 *
7073 * Normally the mcount trampoline will call the ops->func, but there
7074 * are times that it should not. For example, if the ops does not
7075 * have its own recursion protection, then it should call the
7076 * ftrace_ops_assist_func() instead.
7077 *
7078 * Returns the function that the trampoline should call for @ops.
7079 */
7080ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7081{
7082        /*
7083         * If the function does not handle recursion or needs to be RCU safe,
7084         * then we need to call the assist handler.
7085         */
7086        if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7087                          FTRACE_OPS_FL_RCU))
7088                return ftrace_ops_assist_func;
7089
7090        return ops->func;
7091}
7092
7093static void
7094ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
7095                    struct task_struct *prev, struct task_struct *next)
7096{
7097        struct trace_array *tr = data;
7098        struct trace_pid_list *pid_list;
7099        struct trace_pid_list *no_pid_list;
7100
7101        pid_list = rcu_dereference_sched(tr->function_pids);
7102        no_pid_list = rcu_dereference_sched(tr->function_no_pids);
7103
7104        if (trace_ignore_this_task(pid_list, no_pid_list, next))
7105                this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7106                               FTRACE_PID_IGNORE);
7107        else
7108                this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7109                               next->pid);
7110}
7111
7112static void
7113ftrace_pid_follow_sched_process_fork(void *data,
7114                                     struct task_struct *self,
7115                                     struct task_struct *task)
7116{
7117        struct trace_pid_list *pid_list;
7118        struct trace_array *tr = data;
7119
7120        pid_list = rcu_dereference_sched(tr->function_pids);
7121        trace_filter_add_remove_task(pid_list, self, task);
7122
7123        pid_list = rcu_dereference_sched(tr->function_no_pids);
7124        trace_filter_add_remove_task(pid_list, self, task);
7125}
7126
7127static void
7128ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
7129{
7130        struct trace_pid_list *pid_list;
7131        struct trace_array *tr = data;
7132
7133        pid_list = rcu_dereference_sched(tr->function_pids);
7134        trace_filter_add_remove_task(pid_list, NULL, task);
7135
7136        pid_list = rcu_dereference_sched(tr->function_no_pids);
7137        trace_filter_add_remove_task(pid_list, NULL, task);
7138}
7139
7140void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
7141{
7142        if (enable) {
7143                register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7144                                                  tr);
7145                register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7146                                                  tr);
7147        } else {
7148                unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7149                                                    tr);
7150                unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7151                                                    tr);
7152        }
7153}
7154
7155static void clear_ftrace_pids(struct trace_array *tr, int type)
7156{
7157        struct trace_pid_list *pid_list;
7158        struct trace_pid_list *no_pid_list;
7159        int cpu;
7160
7161        pid_list = rcu_dereference_protected(tr->function_pids,
7162                                             lockdep_is_held(&ftrace_lock));
7163        no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7164                                                lockdep_is_held(&ftrace_lock));
7165
7166        /* Make sure there's something to do */
7167        if (!pid_type_enabled(type, pid_list, no_pid_list))
7168                return;
7169
7170        /* See if the pids still need to be checked after this */
7171        if (!still_need_pid_events(type, pid_list, no_pid_list)) {
7172                unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7173                for_each_possible_cpu(cpu)
7174                        per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
7175        }
7176
7177        if (type & TRACE_PIDS)
7178                rcu_assign_pointer(tr->function_pids, NULL);
7179
7180        if (type & TRACE_NO_PIDS)
7181                rcu_assign_pointer(tr->function_no_pids, NULL);
7182
7183        /* Wait till all users are no longer using pid filtering */
7184        synchronize_rcu();
7185
7186        if ((type & TRACE_PIDS) && pid_list)
7187                trace_free_pid_list(pid_list);
7188
7189        if ((type & TRACE_NO_PIDS) && no_pid_list)
7190                trace_free_pid_list(no_pid_list);
7191}
7192
7193void ftrace_clear_pids(struct trace_array *tr)
7194{
7195        mutex_lock(&ftrace_lock);
7196
7197        clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
7198
7199        mutex_unlock(&ftrace_lock);
7200}
7201
7202static void ftrace_pid_reset(struct trace_array *tr, int type)
7203{
7204        mutex_lock(&ftrace_lock);
7205        clear_ftrace_pids(tr, type);
7206
7207        ftrace_update_pid_func();
7208        ftrace_startup_all(0);
7209
7210        mutex_unlock(&ftrace_lock);
7211}
7212
7213/* Greater than any max PID */
7214#define FTRACE_NO_PIDS          (void *)(PID_MAX_LIMIT + 1)
7215
7216static void *fpid_start(struct seq_file *m, loff_t *pos)
7217        __acquires(RCU)
7218{
7219        struct trace_pid_list *pid_list;
7220        struct trace_array *tr = m->private;
7221
7222        mutex_lock(&ftrace_lock);
7223        rcu_read_lock_sched();
7224
7225        pid_list = rcu_dereference_sched(tr->function_pids);
7226
7227        if (!pid_list)
7228                return !(*pos) ? FTRACE_NO_PIDS : NULL;
7229
7230        return trace_pid_start(pid_list, pos);
7231}
7232
7233static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7234{
7235        struct trace_array *tr = m->private;
7236        struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7237
7238        if (v == FTRACE_NO_PIDS) {
7239                (*pos)++;
7240                return NULL;
7241        }
7242        return trace_pid_next(pid_list, v, pos);
7243}
7244
7245static void fpid_stop(struct seq_file *m, void *p)
7246        __releases(RCU)
7247{
7248        rcu_read_unlock_sched();
7249        mutex_unlock(&ftrace_lock);
7250}
7251
7252static int fpid_show(struct seq_file *m, void *v)
7253{
7254        if (v == FTRACE_NO_PIDS) {
7255                seq_puts(m, "no pid\n");
7256                return 0;
7257        }
7258
7259        return trace_pid_show(m, v);
7260}
7261
7262static const struct seq_operations ftrace_pid_sops = {
7263        .start = fpid_start,
7264        .next = fpid_next,
7265        .stop = fpid_stop,
7266        .show = fpid_show,
7267};
7268
7269static void *fnpid_start(struct seq_file *m, loff_t *pos)
7270        __acquires(RCU)
7271{
7272        struct trace_pid_list *pid_list;
7273        struct trace_array *tr = m->private;
7274
7275        mutex_lock(&ftrace_lock);
7276        rcu_read_lock_sched();
7277
7278        pid_list = rcu_dereference_sched(tr->function_no_pids);
7279
7280        if (!pid_list)
7281                return !(*pos) ? FTRACE_NO_PIDS : NULL;
7282
7283        return trace_pid_start(pid_list, pos);
7284}
7285
7286static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
7287{
7288        struct trace_array *tr = m->private;
7289        struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
7290
7291        if (v == FTRACE_NO_PIDS) {
7292                (*pos)++;
7293                return NULL;
7294        }
7295        return trace_pid_next(pid_list, v, pos);
7296}
7297
7298static const struct seq_operations ftrace_no_pid_sops = {
7299        .start = fnpid_start,
7300        .next = fnpid_next,
7301        .stop = fpid_stop,
7302        .show = fpid_show,
7303};
7304
7305static int pid_open(struct inode *inode, struct file *file, int type)
7306{
7307        const struct seq_operations *seq_ops;
7308        struct trace_array *tr = inode->i_private;
7309        struct seq_file *m;
7310        int ret = 0;
7311
7312        ret = tracing_check_open_get_tr(tr);
7313        if (ret)
7314                return ret;
7315
7316        if ((file->f_mode & FMODE_WRITE) &&
7317            (file->f_flags & O_TRUNC))
7318                ftrace_pid_reset(tr, type);
7319
7320        switch (type) {
7321        case TRACE_PIDS:
7322                seq_ops = &ftrace_pid_sops;
7323                break;
7324        case TRACE_NO_PIDS:
7325                seq_ops = &ftrace_no_pid_sops;
7326                break;
7327        default:
7328                trace_array_put(tr);
7329                WARN_ON_ONCE(1);
7330                return -EINVAL;
7331        }
7332
7333        ret = seq_open(file, seq_ops);
7334        if (ret < 0) {
7335                trace_array_put(tr);
7336        } else {
7337                m = file->private_data;
7338                /* copy tr over to seq ops */
7339                m->private = tr;
7340        }
7341
7342        return ret;
7343}
7344
7345static int
7346ftrace_pid_open(struct inode *inode, struct file *file)
7347{
7348        return pid_open(inode, file, TRACE_PIDS);
7349}
7350
7351static int
7352ftrace_no_pid_open(struct inode *inode, struct file *file)
7353{
7354        return pid_open(inode, file, TRACE_NO_PIDS);
7355}
7356
7357static void ignore_task_cpu(void *data)
7358{
7359        struct trace_array *tr = data;
7360        struct trace_pid_list *pid_list;
7361        struct trace_pid_list *no_pid_list;
7362
7363        /*
7364         * This function is called by on_each_cpu() while the
7365         * event_mutex is held.
7366         */
7367        pid_list = rcu_dereference_protected(tr->function_pids,
7368                                             mutex_is_locked(&ftrace_lock));
7369        no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7370                                                mutex_is_locked(&ftrace_lock));
7371
7372        if (trace_ignore_this_task(pid_list, no_pid_list, current))
7373                this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7374                               FTRACE_PID_IGNORE);
7375        else
7376                this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7377                               current->pid);
7378}
7379
7380static ssize_t
7381pid_write(struct file *filp, const char __user *ubuf,
7382          size_t cnt, loff_t *ppos, int type)
7383{
7384        struct seq_file *m = filp->private_data;
7385        struct trace_array *tr = m->private;
7386        struct trace_pid_list *filtered_pids;
7387        struct trace_pid_list *other_pids;
7388        struct trace_pid_list *pid_list;
7389        ssize_t ret;
7390
7391        if (!cnt)
7392                return 0;
7393
7394        mutex_lock(&ftrace_lock);
7395
7396        switch (type) {
7397        case TRACE_PIDS:
7398                filtered_pids = rcu_dereference_protected(tr->function_pids,
7399                                             lockdep_is_held(&ftrace_lock));
7400                other_pids = rcu_dereference_protected(tr->function_no_pids,
7401                                             lockdep_is_held(&ftrace_lock));
7402                break;
7403        case TRACE_NO_PIDS:
7404                filtered_pids = rcu_dereference_protected(tr->function_no_pids,
7405                                             lockdep_is_held(&ftrace_lock));
7406                other_pids = rcu_dereference_protected(tr->function_pids,
7407                                             lockdep_is_held(&ftrace_lock));
7408                break;
7409        default:
7410                ret = -EINVAL;
7411                WARN_ON_ONCE(1);
7412                goto out;
7413        }
7414
7415        ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7416        if (ret < 0)
7417                goto out;
7418
7419        switch (type) {
7420        case TRACE_PIDS:
7421                rcu_assign_pointer(tr->function_pids, pid_list);
7422                break;
7423        case TRACE_NO_PIDS:
7424                rcu_assign_pointer(tr->function_no_pids, pid_list);
7425                break;
7426        }
7427
7428
7429        if (filtered_pids) {
7430                synchronize_rcu();
7431                trace_free_pid_list(filtered_pids);
7432        } else if (pid_list && !other_pids) {
7433                /* Register a probe to set whether to ignore the tracing of a task */
7434                register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7435        }
7436
7437        /*
7438         * Ignoring of pids is done at task switch. But we have to
7439         * check for those tasks that are currently running.
7440         * Always do this in case a pid was appended or removed.
7441         */
7442        on_each_cpu(ignore_task_cpu, tr, 1);
7443
7444        ftrace_update_pid_func();
7445        ftrace_startup_all(0);
7446 out:
7447        mutex_unlock(&ftrace_lock);
7448
7449        if (ret > 0)
7450                *ppos += ret;
7451
7452        return ret;
7453}
7454
7455static ssize_t
7456ftrace_pid_write(struct file *filp, const char __user *ubuf,
7457                 size_t cnt, loff_t *ppos)
7458{
7459        return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
7460}
7461
7462static ssize_t
7463ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
7464                    size_t cnt, loff_t *ppos)
7465{
7466        return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
7467}
7468
7469static int
7470ftrace_pid_release(struct inode *inode, struct file *file)
7471{
7472        struct trace_array *tr = inode->i_private;
7473
7474        trace_array_put(tr);
7475
7476        return seq_release(inode, file);
7477}
7478
7479static const struct file_operations ftrace_pid_fops = {
7480        .open           = ftrace_pid_open,
7481        .write          = ftrace_pid_write,
7482        .read           = seq_read,
7483        .llseek         = tracing_lseek,
7484        .release        = ftrace_pid_release,
7485};
7486
7487static const struct file_operations ftrace_no_pid_fops = {
7488        .open           = ftrace_no_pid_open,
7489        .write          = ftrace_no_pid_write,
7490        .read           = seq_read,
7491        .llseek         = tracing_lseek,
7492        .release        = ftrace_pid_release,
7493};
7494
7495void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7496{
7497        trace_create_file("set_ftrace_pid", 0644, d_tracer,
7498                            tr, &ftrace_pid_fops);
7499        trace_create_file("set_ftrace_notrace_pid", 0644, d_tracer,
7500                            tr, &ftrace_no_pid_fops);
7501}
7502
7503void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
7504                                         struct dentry *d_tracer)
7505{
7506        /* Only the top level directory has the dyn_tracefs and profile */
7507        WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
7508
7509        ftrace_init_dyn_tracefs(d_tracer);
7510        ftrace_profile_tracefs(d_tracer);
7511}
7512
7513/**
7514 * ftrace_kill - kill ftrace
7515 *
7516 * This function should be used by panic code. It stops ftrace
7517 * but in a not so nice way. If you need to simply kill ftrace
7518 * from a non-atomic section, use ftrace_kill.
7519 */
7520void ftrace_kill(void)
7521{
7522        ftrace_disabled = 1;
7523        ftrace_enabled = 0;
7524        ftrace_trace_function = ftrace_stub;
7525}
7526
7527/**
7528 * Test if ftrace is dead or not.
7529 */
7530int ftrace_is_dead(void)
7531{
7532        return ftrace_disabled;
7533}
7534
7535/**
7536 * register_ftrace_function - register a function for profiling
7537 * @ops - ops structure that holds the function for profiling.
7538 *
7539 * Register a function to be called by all functions in the
7540 * kernel.
7541 *
7542 * Note: @ops->func and all the functions it calls must be labeled
7543 *       with "notrace", otherwise it will go into a
7544 *       recursive loop.
7545 */
7546int register_ftrace_function(struct ftrace_ops *ops)
7547{
7548        int ret;
7549
7550        ftrace_ops_init(ops);
7551
7552        mutex_lock(&ftrace_lock);
7553
7554        ret = ftrace_startup(ops, 0);
7555
7556        mutex_unlock(&ftrace_lock);
7557
7558        return ret;
7559}
7560EXPORT_SYMBOL_GPL(register_ftrace_function);
7561
7562/**
7563 * unregister_ftrace_function - unregister a function for profiling.
7564 * @ops - ops structure that holds the function to unregister
7565 *
7566 * Unregister a function that was added to be called by ftrace profiling.
7567 */
7568int unregister_ftrace_function(struct ftrace_ops *ops)
7569{
7570        int ret;
7571
7572        mutex_lock(&ftrace_lock);
7573        ret = ftrace_shutdown(ops, 0);
7574        mutex_unlock(&ftrace_lock);
7575
7576        return ret;
7577}
7578EXPORT_SYMBOL_GPL(unregister_ftrace_function);
7579
7580static bool is_permanent_ops_registered(void)
7581{
7582        struct ftrace_ops *op;
7583
7584        do_for_each_ftrace_op(op, ftrace_ops_list) {
7585                if (op->flags & FTRACE_OPS_FL_PERMANENT)
7586                        return true;
7587        } while_for_each_ftrace_op(op);
7588
7589        return false;
7590}
7591
7592int
7593ftrace_enable_sysctl(struct ctl_table *table, int write,
7594                     void *buffer, size_t *lenp, loff_t *ppos)
7595{
7596        int ret = -ENODEV;
7597
7598        mutex_lock(&ftrace_lock);
7599
7600        if (unlikely(ftrace_disabled))
7601                goto out;
7602
7603        ret = proc_dointvec(table, write, buffer, lenp, ppos);
7604
7605        if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
7606                goto out;
7607
7608        if (ftrace_enabled) {
7609
7610                /* we are starting ftrace again */
7611                if (rcu_dereference_protected(ftrace_ops_list,
7612                        lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
7613                        update_ftrace_function();
7614
7615                ftrace_startup_sysctl();
7616
7617        } else {
7618                if (is_permanent_ops_registered()) {
7619                        ftrace_enabled = true;
7620                        ret = -EBUSY;
7621                        goto out;
7622                }
7623
7624                /* stopping ftrace calls (just send to ftrace_stub) */
7625                ftrace_trace_function = ftrace_stub;
7626
7627                ftrace_shutdown_sysctl();
7628        }
7629
7630        last_ftrace_enabled = !!ftrace_enabled;
7631 out:
7632        mutex_unlock(&ftrace_lock);
7633        return ret;
7634}
7635