linux/kernel/trace/trace_kprobe.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Kprobes-based tracing events
   4 *
   5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
   6 *
   7 */
   8#define pr_fmt(fmt)     "trace_kprobe: " fmt
   9
  10#include <linux/security.h>
  11#include <linux/module.h>
  12#include <linux/uaccess.h>
  13#include <linux/rculist.h>
  14#include <linux/error-injection.h>
  15
  16#include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
  17
  18#include "trace_dynevent.h"
  19#include "trace_kprobe_selftest.h"
  20#include "trace_probe.h"
  21#include "trace_probe_tmpl.h"
  22
  23#define KPROBE_EVENT_SYSTEM "kprobes"
  24#define KRETPROBE_MAXACTIVE_MAX 4096
  25
  26/* Kprobe early definition from command line */
  27static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
  28
  29static int __init set_kprobe_boot_events(char *str)
  30{
  31        strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
  32        disable_tracing_selftest("running kprobe events");
  33
  34        return 0;
  35}
  36__setup("kprobe_event=", set_kprobe_boot_events);
  37
  38static int trace_kprobe_create(const char *raw_command);
  39static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
  40static int trace_kprobe_release(struct dyn_event *ev);
  41static bool trace_kprobe_is_busy(struct dyn_event *ev);
  42static bool trace_kprobe_match(const char *system, const char *event,
  43                        int argc, const char **argv, struct dyn_event *ev);
  44
  45static struct dyn_event_operations trace_kprobe_ops = {
  46        .create = trace_kprobe_create,
  47        .show = trace_kprobe_show,
  48        .is_busy = trace_kprobe_is_busy,
  49        .free = trace_kprobe_release,
  50        .match = trace_kprobe_match,
  51};
  52
  53/*
  54 * Kprobe event core functions
  55 */
  56struct trace_kprobe {
  57        struct dyn_event        devent;
  58        struct kretprobe        rp;     /* Use rp.kp for kprobe use */
  59        unsigned long __percpu *nhit;
  60        const char              *symbol;        /* symbol name */
  61        struct trace_probe      tp;
  62};
  63
  64static bool is_trace_kprobe(struct dyn_event *ev)
  65{
  66        return ev->ops == &trace_kprobe_ops;
  67}
  68
  69static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
  70{
  71        return container_of(ev, struct trace_kprobe, devent);
  72}
  73
  74/**
  75 * for_each_trace_kprobe - iterate over the trace_kprobe list
  76 * @pos:        the struct trace_kprobe * for each entry
  77 * @dpos:       the struct dyn_event * to use as a loop cursor
  78 */
  79#define for_each_trace_kprobe(pos, dpos)        \
  80        for_each_dyn_event(dpos)                \
  81                if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
  82
  83#define SIZEOF_TRACE_KPROBE(n)                          \
  84        (offsetof(struct trace_kprobe, tp.args) +       \
  85        (sizeof(struct probe_arg) * (n)))
  86
  87static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
  88{
  89        return tk->rp.handler != NULL;
  90}
  91
  92static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
  93{
  94        return tk->symbol ? tk->symbol : "unknown";
  95}
  96
  97static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
  98{
  99        return tk->rp.kp.offset;
 100}
 101
 102static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
 103{
 104        return !!(kprobe_gone(&tk->rp.kp));
 105}
 106
 107static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
 108                                                 struct module *mod)
 109{
 110        int len = strlen(module_name(mod));
 111        const char *name = trace_kprobe_symbol(tk);
 112
 113        return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
 114}
 115
 116static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
 117{
 118        char *p;
 119        bool ret;
 120
 121        if (!tk->symbol)
 122                return false;
 123        p = strchr(tk->symbol, ':');
 124        if (!p)
 125                return true;
 126        *p = '\0';
 127        rcu_read_lock_sched();
 128        ret = !!find_module(tk->symbol);
 129        rcu_read_unlock_sched();
 130        *p = ':';
 131
 132        return ret;
 133}
 134
 135static bool trace_kprobe_is_busy(struct dyn_event *ev)
 136{
 137        struct trace_kprobe *tk = to_trace_kprobe(ev);
 138
 139        return trace_probe_is_enabled(&tk->tp);
 140}
 141
 142static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
 143                                            int argc, const char **argv)
 144{
 145        char buf[MAX_ARGSTR_LEN + 1];
 146
 147        if (!argc)
 148                return true;
 149
 150        if (!tk->symbol)
 151                snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
 152        else if (tk->rp.kp.offset)
 153                snprintf(buf, sizeof(buf), "%s+%u",
 154                         trace_kprobe_symbol(tk), tk->rp.kp.offset);
 155        else
 156                snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
 157        if (strcmp(buf, argv[0]))
 158                return false;
 159        argc--; argv++;
 160
 161        return trace_probe_match_command_args(&tk->tp, argc, argv);
 162}
 163
 164static bool trace_kprobe_match(const char *system, const char *event,
 165                        int argc, const char **argv, struct dyn_event *ev)
 166{
 167        struct trace_kprobe *tk = to_trace_kprobe(ev);
 168
 169        return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
 170            (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
 171            trace_kprobe_match_command_head(tk, argc, argv);
 172}
 173
 174static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
 175{
 176        unsigned long nhit = 0;
 177        int cpu;
 178
 179        for_each_possible_cpu(cpu)
 180                nhit += *per_cpu_ptr(tk->nhit, cpu);
 181
 182        return nhit;
 183}
 184
 185static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
 186{
 187        return !(list_empty(&tk->rp.kp.list) &&
 188                 hlist_unhashed(&tk->rp.kp.hlist));
 189}
 190
 191/* Return 0 if it fails to find the symbol address */
 192static nokprobe_inline
 193unsigned long trace_kprobe_address(struct trace_kprobe *tk)
 194{
 195        unsigned long addr;
 196
 197        if (tk->symbol) {
 198                addr = (unsigned long)
 199                        kallsyms_lookup_name(trace_kprobe_symbol(tk));
 200                if (addr)
 201                        addr += tk->rp.kp.offset;
 202        } else {
 203                addr = (unsigned long)tk->rp.kp.addr;
 204        }
 205        return addr;
 206}
 207
 208static nokprobe_inline struct trace_kprobe *
 209trace_kprobe_primary_from_call(struct trace_event_call *call)
 210{
 211        struct trace_probe *tp;
 212
 213        tp = trace_probe_primary_from_call(call);
 214        if (WARN_ON_ONCE(!tp))
 215                return NULL;
 216
 217        return container_of(tp, struct trace_kprobe, tp);
 218}
 219
 220bool trace_kprobe_on_func_entry(struct trace_event_call *call)
 221{
 222        struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 223
 224        return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
 225                        tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
 226                        tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
 227}
 228
 229bool trace_kprobe_error_injectable(struct trace_event_call *call)
 230{
 231        struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 232
 233        return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
 234               false;
 235}
 236
 237static int register_kprobe_event(struct trace_kprobe *tk);
 238static int unregister_kprobe_event(struct trace_kprobe *tk);
 239
 240static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
 241static int kretprobe_dispatcher(struct kretprobe_instance *ri,
 242                                struct pt_regs *regs);
 243
 244static void free_trace_kprobe(struct trace_kprobe *tk)
 245{
 246        if (tk) {
 247                trace_probe_cleanup(&tk->tp);
 248                kfree(tk->symbol);
 249                free_percpu(tk->nhit);
 250                kfree(tk);
 251        }
 252}
 253
 254/*
 255 * Allocate new trace_probe and initialize it (including kprobes).
 256 */
 257static struct trace_kprobe *alloc_trace_kprobe(const char *group,
 258                                             const char *event,
 259                                             void *addr,
 260                                             const char *symbol,
 261                                             unsigned long offs,
 262                                             int maxactive,
 263                                             int nargs, bool is_return)
 264{
 265        struct trace_kprobe *tk;
 266        int ret = -ENOMEM;
 267
 268        tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
 269        if (!tk)
 270                return ERR_PTR(ret);
 271
 272        tk->nhit = alloc_percpu(unsigned long);
 273        if (!tk->nhit)
 274                goto error;
 275
 276        if (symbol) {
 277                tk->symbol = kstrdup(symbol, GFP_KERNEL);
 278                if (!tk->symbol)
 279                        goto error;
 280                tk->rp.kp.symbol_name = tk->symbol;
 281                tk->rp.kp.offset = offs;
 282        } else
 283                tk->rp.kp.addr = addr;
 284
 285        if (is_return)
 286                tk->rp.handler = kretprobe_dispatcher;
 287        else
 288                tk->rp.kp.pre_handler = kprobe_dispatcher;
 289
 290        tk->rp.maxactive = maxactive;
 291        INIT_HLIST_NODE(&tk->rp.kp.hlist);
 292        INIT_LIST_HEAD(&tk->rp.kp.list);
 293
 294        ret = trace_probe_init(&tk->tp, event, group, false);
 295        if (ret < 0)
 296                goto error;
 297
 298        dyn_event_init(&tk->devent, &trace_kprobe_ops);
 299        return tk;
 300error:
 301        free_trace_kprobe(tk);
 302        return ERR_PTR(ret);
 303}
 304
 305static struct trace_kprobe *find_trace_kprobe(const char *event,
 306                                              const char *group)
 307{
 308        struct dyn_event *pos;
 309        struct trace_kprobe *tk;
 310
 311        for_each_trace_kprobe(tk, pos)
 312                if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
 313                    strcmp(trace_probe_group_name(&tk->tp), group) == 0)
 314                        return tk;
 315        return NULL;
 316}
 317
 318static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
 319{
 320        int ret = 0;
 321
 322        if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
 323                if (trace_kprobe_is_return(tk))
 324                        ret = enable_kretprobe(&tk->rp);
 325                else
 326                        ret = enable_kprobe(&tk->rp.kp);
 327        }
 328
 329        return ret;
 330}
 331
 332static void __disable_trace_kprobe(struct trace_probe *tp)
 333{
 334        struct trace_probe *pos;
 335        struct trace_kprobe *tk;
 336
 337        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
 338                tk = container_of(pos, struct trace_kprobe, tp);
 339                if (!trace_kprobe_is_registered(tk))
 340                        continue;
 341                if (trace_kprobe_is_return(tk))
 342                        disable_kretprobe(&tk->rp);
 343                else
 344                        disable_kprobe(&tk->rp.kp);
 345        }
 346}
 347
 348/*
 349 * Enable trace_probe
 350 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 351 */
 352static int enable_trace_kprobe(struct trace_event_call *call,
 353                                struct trace_event_file *file)
 354{
 355        struct trace_probe *pos, *tp;
 356        struct trace_kprobe *tk;
 357        bool enabled;
 358        int ret = 0;
 359
 360        tp = trace_probe_primary_from_call(call);
 361        if (WARN_ON_ONCE(!tp))
 362                return -ENODEV;
 363        enabled = trace_probe_is_enabled(tp);
 364
 365        /* This also changes "enabled" state */
 366        if (file) {
 367                ret = trace_probe_add_file(tp, file);
 368                if (ret)
 369                        return ret;
 370        } else
 371                trace_probe_set_flag(tp, TP_FLAG_PROFILE);
 372
 373        if (enabled)
 374                return 0;
 375
 376        list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
 377                tk = container_of(pos, struct trace_kprobe, tp);
 378                if (trace_kprobe_has_gone(tk))
 379                        continue;
 380                ret = __enable_trace_kprobe(tk);
 381                if (ret)
 382                        break;
 383                enabled = true;
 384        }
 385
 386        if (ret) {
 387                /* Failed to enable one of them. Roll back all */
 388                if (enabled)
 389                        __disable_trace_kprobe(tp);
 390                if (file)
 391                        trace_probe_remove_file(tp, file);
 392                else
 393                        trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 394        }
 395
 396        return ret;
 397}
 398
 399/*
 400 * Disable trace_probe
 401 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 402 */
 403static int disable_trace_kprobe(struct trace_event_call *call,
 404                                struct trace_event_file *file)
 405{
 406        struct trace_probe *tp;
 407
 408        tp = trace_probe_primary_from_call(call);
 409        if (WARN_ON_ONCE(!tp))
 410                return -ENODEV;
 411
 412        if (file) {
 413                if (!trace_probe_get_file_link(tp, file))
 414                        return -ENOENT;
 415                if (!trace_probe_has_single_file(tp))
 416                        goto out;
 417                trace_probe_clear_flag(tp, TP_FLAG_TRACE);
 418        } else
 419                trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 420
 421        if (!trace_probe_is_enabled(tp))
 422                __disable_trace_kprobe(tp);
 423
 424 out:
 425        if (file)
 426                /*
 427                 * Synchronization is done in below function. For perf event,
 428                 * file == NULL and perf_trace_event_unreg() calls
 429                 * tracepoint_synchronize_unregister() to ensure synchronize
 430                 * event. We don't need to care about it.
 431                 */
 432                trace_probe_remove_file(tp, file);
 433
 434        return 0;
 435}
 436
 437#if defined(CONFIG_DYNAMIC_FTRACE) && \
 438        !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
 439static bool __within_notrace_func(unsigned long addr)
 440{
 441        unsigned long offset, size;
 442
 443        if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
 444                return false;
 445
 446        /* Get the entry address of the target function */
 447        addr -= offset;
 448
 449        /*
 450         * Since ftrace_location_range() does inclusive range check, we need
 451         * to subtract 1 byte from the end address.
 452         */
 453        return !ftrace_location_range(addr, addr + size - 1);
 454}
 455
 456static bool within_notrace_func(struct trace_kprobe *tk)
 457{
 458        unsigned long addr = trace_kprobe_address(tk);
 459        char symname[KSYM_NAME_LEN], *p;
 460
 461        if (!__within_notrace_func(addr))
 462                return false;
 463
 464        /* Check if the address is on a suffixed-symbol */
 465        if (!lookup_symbol_name(addr, symname)) {
 466                p = strchr(symname, '.');
 467                if (!p)
 468                        return true;
 469                *p = '\0';
 470                addr = (unsigned long)kprobe_lookup_name(symname, 0);
 471                if (addr)
 472                        return __within_notrace_func(addr);
 473        }
 474
 475        return true;
 476}
 477#else
 478#define within_notrace_func(tk) (false)
 479#endif
 480
 481/* Internal register function - just handle k*probes and flags */
 482static int __register_trace_kprobe(struct trace_kprobe *tk)
 483{
 484        int i, ret;
 485
 486        ret = security_locked_down(LOCKDOWN_KPROBES);
 487        if (ret)
 488                return ret;
 489
 490        if (trace_kprobe_is_registered(tk))
 491                return -EINVAL;
 492
 493        if (within_notrace_func(tk)) {
 494                pr_warn("Could not probe notrace function %s\n",
 495                        trace_kprobe_symbol(tk));
 496                return -EINVAL;
 497        }
 498
 499        for (i = 0; i < tk->tp.nr_args; i++) {
 500                ret = traceprobe_update_arg(&tk->tp.args[i]);
 501                if (ret)
 502                        return ret;
 503        }
 504
 505        /* Set/clear disabled flag according to tp->flag */
 506        if (trace_probe_is_enabled(&tk->tp))
 507                tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
 508        else
 509                tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
 510
 511        if (trace_kprobe_is_return(tk))
 512                ret = register_kretprobe(&tk->rp);
 513        else
 514                ret = register_kprobe(&tk->rp.kp);
 515
 516        return ret;
 517}
 518
 519/* Internal unregister function - just handle k*probes and flags */
 520static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 521{
 522        if (trace_kprobe_is_registered(tk)) {
 523                if (trace_kprobe_is_return(tk))
 524                        unregister_kretprobe(&tk->rp);
 525                else
 526                        unregister_kprobe(&tk->rp.kp);
 527                /* Cleanup kprobe for reuse and mark it unregistered */
 528                INIT_HLIST_NODE(&tk->rp.kp.hlist);
 529                INIT_LIST_HEAD(&tk->rp.kp.list);
 530                if (tk->rp.kp.symbol_name)
 531                        tk->rp.kp.addr = NULL;
 532        }
 533}
 534
 535/* Unregister a trace_probe and probe_event */
 536static int unregister_trace_kprobe(struct trace_kprobe *tk)
 537{
 538        /* If other probes are on the event, just unregister kprobe */
 539        if (trace_probe_has_sibling(&tk->tp))
 540                goto unreg;
 541
 542        /* Enabled event can not be unregistered */
 543        if (trace_probe_is_enabled(&tk->tp))
 544                return -EBUSY;
 545
 546        /* Will fail if probe is being used by ftrace or perf */
 547        if (unregister_kprobe_event(tk))
 548                return -EBUSY;
 549
 550unreg:
 551        __unregister_trace_kprobe(tk);
 552        dyn_event_remove(&tk->devent);
 553        trace_probe_unlink(&tk->tp);
 554
 555        return 0;
 556}
 557
 558static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
 559                                         struct trace_kprobe *comp)
 560{
 561        struct trace_probe_event *tpe = orig->tp.event;
 562        struct trace_probe *pos;
 563        int i;
 564
 565        list_for_each_entry(pos, &tpe->probes, list) {
 566                orig = container_of(pos, struct trace_kprobe, tp);
 567                if (strcmp(trace_kprobe_symbol(orig),
 568                           trace_kprobe_symbol(comp)) ||
 569                    trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
 570                        continue;
 571
 572                /*
 573                 * trace_probe_compare_arg_type() ensured that nr_args and
 574                 * each argument name and type are same. Let's compare comm.
 575                 */
 576                for (i = 0; i < orig->tp.nr_args; i++) {
 577                        if (strcmp(orig->tp.args[i].comm,
 578                                   comp->tp.args[i].comm))
 579                                break;
 580                }
 581
 582                if (i == orig->tp.nr_args)
 583                        return true;
 584        }
 585
 586        return false;
 587}
 588
 589static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
 590{
 591        int ret;
 592
 593        ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
 594        if (ret) {
 595                /* Note that argument starts index = 2 */
 596                trace_probe_log_set_index(ret + 1);
 597                trace_probe_log_err(0, DIFF_ARG_TYPE);
 598                return -EEXIST;
 599        }
 600        if (trace_kprobe_has_same_kprobe(to, tk)) {
 601                trace_probe_log_set_index(0);
 602                trace_probe_log_err(0, SAME_PROBE);
 603                return -EEXIST;
 604        }
 605
 606        /* Append to existing event */
 607        ret = trace_probe_append(&tk->tp, &to->tp);
 608        if (ret)
 609                return ret;
 610
 611        /* Register k*probe */
 612        ret = __register_trace_kprobe(tk);
 613        if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
 614                pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 615                ret = 0;
 616        }
 617
 618        if (ret)
 619                trace_probe_unlink(&tk->tp);
 620        else
 621                dyn_event_add(&tk->devent);
 622
 623        return ret;
 624}
 625
 626/* Register a trace_probe and probe_event */
 627static int register_trace_kprobe(struct trace_kprobe *tk)
 628{
 629        struct trace_kprobe *old_tk;
 630        int ret;
 631
 632        mutex_lock(&event_mutex);
 633
 634        old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
 635                                   trace_probe_group_name(&tk->tp));
 636        if (old_tk) {
 637                if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
 638                        trace_probe_log_set_index(0);
 639                        trace_probe_log_err(0, DIFF_PROBE_TYPE);
 640                        ret = -EEXIST;
 641                } else {
 642                        ret = append_trace_kprobe(tk, old_tk);
 643                }
 644                goto end;
 645        }
 646
 647        /* Register new event */
 648        ret = register_kprobe_event(tk);
 649        if (ret) {
 650                pr_warn("Failed to register probe event(%d)\n", ret);
 651                goto end;
 652        }
 653
 654        /* Register k*probe */
 655        ret = __register_trace_kprobe(tk);
 656        if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
 657                pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 658                ret = 0;
 659        }
 660
 661        if (ret < 0)
 662                unregister_kprobe_event(tk);
 663        else
 664                dyn_event_add(&tk->devent);
 665
 666end:
 667        mutex_unlock(&event_mutex);
 668        return ret;
 669}
 670
 671/* Module notifier call back, checking event on the module */
 672static int trace_kprobe_module_callback(struct notifier_block *nb,
 673                                       unsigned long val, void *data)
 674{
 675        struct module *mod = data;
 676        struct dyn_event *pos;
 677        struct trace_kprobe *tk;
 678        int ret;
 679
 680        if (val != MODULE_STATE_COMING)
 681                return NOTIFY_DONE;
 682
 683        /* Update probes on coming module */
 684        mutex_lock(&event_mutex);
 685        for_each_trace_kprobe(tk, pos) {
 686                if (trace_kprobe_within_module(tk, mod)) {
 687                        /* Don't need to check busy - this should have gone. */
 688                        __unregister_trace_kprobe(tk);
 689                        ret = __register_trace_kprobe(tk);
 690                        if (ret)
 691                                pr_warn("Failed to re-register probe %s on %s: %d\n",
 692                                        trace_probe_name(&tk->tp),
 693                                        module_name(mod), ret);
 694                }
 695        }
 696        mutex_unlock(&event_mutex);
 697
 698        return NOTIFY_DONE;
 699}
 700
 701static struct notifier_block trace_kprobe_module_nb = {
 702        .notifier_call = trace_kprobe_module_callback,
 703        .priority = 1   /* Invoked after kprobe module callback */
 704};
 705
 706/* Convert certain expected symbols into '_' when generating event names */
 707static inline void sanitize_event_name(char *name)
 708{
 709        while (*name++ != '\0')
 710                if (*name == ':' || *name == '.')
 711                        *name = '_';
 712}
 713
 714static int __trace_kprobe_create(int argc, const char *argv[])
 715{
 716        /*
 717         * Argument syntax:
 718         *  - Add kprobe:
 719         *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
 720         *  - Add kretprobe:
 721         *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
 722         *    Or
 723         *      p:[GRP/]EVENT] [MOD:]KSYM[+0]%return [FETCHARGS]
 724         *
 725         * Fetch args:
 726         *  $retval     : fetch return value
 727         *  $stack      : fetch stack address
 728         *  $stackN     : fetch Nth of stack (N:0-)
 729         *  $comm       : fetch current task comm
 730         *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
 731         *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
 732         *  %REG        : fetch register REG
 733         * Dereferencing memory fetch:
 734         *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
 735         * Alias name of args:
 736         *  NAME=FETCHARG : set NAME as alias of FETCHARG.
 737         * Type of args:
 738         *  FETCHARG:TYPE : use TYPE instead of unsigned long.
 739         */
 740        struct trace_kprobe *tk = NULL;
 741        int i, len, ret = 0;
 742        bool is_return = false;
 743        char *symbol = NULL, *tmp = NULL;
 744        const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
 745        int maxactive = 0;
 746        long offset = 0;
 747        void *addr = NULL;
 748        char buf[MAX_EVENT_NAME_LEN];
 749        unsigned int flags = TPARG_FL_KERNEL;
 750
 751        switch (argv[0][0]) {
 752        case 'r':
 753                is_return = true;
 754                break;
 755        case 'p':
 756                break;
 757        default:
 758                return -ECANCELED;
 759        }
 760        if (argc < 2)
 761                return -ECANCELED;
 762
 763        trace_probe_log_init("trace_kprobe", argc, argv);
 764
 765        event = strchr(&argv[0][1], ':');
 766        if (event)
 767                event++;
 768
 769        if (isdigit(argv[0][1])) {
 770                if (!is_return) {
 771                        trace_probe_log_err(1, MAXACT_NO_KPROBE);
 772                        goto parse_error;
 773                }
 774                if (event)
 775                        len = event - &argv[0][1] - 1;
 776                else
 777                        len = strlen(&argv[0][1]);
 778                if (len > MAX_EVENT_NAME_LEN - 1) {
 779                        trace_probe_log_err(1, BAD_MAXACT);
 780                        goto parse_error;
 781                }
 782                memcpy(buf, &argv[0][1], len);
 783                buf[len] = '\0';
 784                ret = kstrtouint(buf, 0, &maxactive);
 785                if (ret || !maxactive) {
 786                        trace_probe_log_err(1, BAD_MAXACT);
 787                        goto parse_error;
 788                }
 789                /* kretprobes instances are iterated over via a list. The
 790                 * maximum should stay reasonable.
 791                 */
 792                if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
 793                        trace_probe_log_err(1, MAXACT_TOO_BIG);
 794                        goto parse_error;
 795                }
 796        }
 797
 798        /* try to parse an address. if that fails, try to read the
 799         * input as a symbol. */
 800        if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
 801                trace_probe_log_set_index(1);
 802                /* Check whether uprobe event specified */
 803                if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
 804                        ret = -ECANCELED;
 805                        goto error;
 806                }
 807                /* a symbol specified */
 808                symbol = kstrdup(argv[1], GFP_KERNEL);
 809                if (!symbol)
 810                        return -ENOMEM;
 811
 812                tmp = strchr(symbol, '%');
 813                if (tmp) {
 814                        if (!strcmp(tmp, "%return")) {
 815                                *tmp = '\0';
 816                                is_return = true;
 817                        } else {
 818                                trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
 819                                goto parse_error;
 820                        }
 821                }
 822
 823                /* TODO: support .init module functions */
 824                ret = traceprobe_split_symbol_offset(symbol, &offset);
 825                if (ret || offset < 0 || offset > UINT_MAX) {
 826                        trace_probe_log_err(0, BAD_PROBE_ADDR);
 827                        goto parse_error;
 828                }
 829                if (is_return)
 830                        flags |= TPARG_FL_RETURN;
 831                ret = kprobe_on_func_entry(NULL, symbol, offset);
 832                if (ret == 0)
 833                        flags |= TPARG_FL_FENTRY;
 834                /* Defer the ENOENT case until register kprobe */
 835                if (ret == -EINVAL && is_return) {
 836                        trace_probe_log_err(0, BAD_RETPROBE);
 837                        goto parse_error;
 838                }
 839        }
 840
 841        trace_probe_log_set_index(0);
 842        if (event) {
 843                ret = traceprobe_parse_event_name(&event, &group, buf,
 844                                                  event - argv[0]);
 845                if (ret)
 846                        goto parse_error;
 847        } else {
 848                /* Make a new event name */
 849                if (symbol)
 850                        snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
 851                                 is_return ? 'r' : 'p', symbol, offset);
 852                else
 853                        snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
 854                                 is_return ? 'r' : 'p', addr);
 855                sanitize_event_name(buf);
 856                event = buf;
 857        }
 858
 859        /* setup a probe */
 860        tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
 861                               argc - 2, is_return);
 862        if (IS_ERR(tk)) {
 863                ret = PTR_ERR(tk);
 864                /* This must return -ENOMEM, else there is a bug */
 865                WARN_ON_ONCE(ret != -ENOMEM);
 866                goto out;       /* We know tk is not allocated */
 867        }
 868        argc -= 2; argv += 2;
 869
 870        /* parse arguments */
 871        for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 872                tmp = kstrdup(argv[i], GFP_KERNEL);
 873                if (!tmp) {
 874                        ret = -ENOMEM;
 875                        goto error;
 876                }
 877
 878                trace_probe_log_set_index(i + 2);
 879                ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
 880                kfree(tmp);
 881                if (ret)
 882                        goto error;     /* This can be -ENOMEM */
 883        }
 884
 885        ret = traceprobe_set_print_fmt(&tk->tp, is_return);
 886        if (ret < 0)
 887                goto error;
 888
 889        ret = register_trace_kprobe(tk);
 890        if (ret) {
 891                trace_probe_log_set_index(1);
 892                if (ret == -EILSEQ)
 893                        trace_probe_log_err(0, BAD_INSN_BNDRY);
 894                else if (ret == -ENOENT)
 895                        trace_probe_log_err(0, BAD_PROBE_ADDR);
 896                else if (ret != -ENOMEM && ret != -EEXIST)
 897                        trace_probe_log_err(0, FAIL_REG_PROBE);
 898                goto error;
 899        }
 900
 901out:
 902        trace_probe_log_clear();
 903        kfree(symbol);
 904        return ret;
 905
 906parse_error:
 907        ret = -EINVAL;
 908error:
 909        free_trace_kprobe(tk);
 910        goto out;
 911}
 912
 913static int trace_kprobe_create(const char *raw_command)
 914{
 915        return trace_probe_create(raw_command, __trace_kprobe_create);
 916}
 917
 918static int create_or_delete_trace_kprobe(const char *raw_command)
 919{
 920        int ret;
 921
 922        if (raw_command[0] == '-')
 923                return dyn_event_release(raw_command, &trace_kprobe_ops);
 924
 925        ret = trace_kprobe_create(raw_command);
 926        return ret == -ECANCELED ? -EINVAL : ret;
 927}
 928
 929static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
 930{
 931        return create_or_delete_trace_kprobe(cmd->seq.buffer);
 932}
 933
 934/**
 935 * kprobe_event_cmd_init - Initialize a kprobe event command object
 936 * @cmd: A pointer to the dynevent_cmd struct representing the new event
 937 * @buf: A pointer to the buffer used to build the command
 938 * @maxlen: The length of the buffer passed in @buf
 939 *
 940 * Initialize a synthetic event command object.  Use this before
 941 * calling any of the other kprobe_event functions.
 942 */
 943void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
 944{
 945        dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
 946                          trace_kprobe_run_command);
 947}
 948EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
 949
 950/**
 951 * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
 952 * @cmd: A pointer to the dynevent_cmd struct representing the new event
 953 * @name: The name of the kprobe event
 954 * @loc: The location of the kprobe event
 955 * @kretprobe: Is this a return probe?
 956 * @args: Variable number of arg (pairs), one pair for each field
 957 *
 958 * NOTE: Users normally won't want to call this function directly, but
 959 * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
 960 * adds a NULL to the end of the arg list.  If this function is used
 961 * directly, make sure the last arg in the variable arg list is NULL.
 962 *
 963 * Generate a kprobe event command to be executed by
 964 * kprobe_event_gen_cmd_end().  This function can be used to generate the
 965 * complete command or only the first part of it; in the latter case,
 966 * kprobe_event_add_fields() can be used to add more fields following this.
 967 *
 968 * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
 969 * returns -EINVAL if @loc == NULL.
 970 *
 971 * Return: 0 if successful, error otherwise.
 972 */
 973int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
 974                                 const char *name, const char *loc, ...)
 975{
 976        char buf[MAX_EVENT_NAME_LEN];
 977        struct dynevent_arg arg;
 978        va_list args;
 979        int ret;
 980
 981        if (cmd->type != DYNEVENT_TYPE_KPROBE)
 982                return -EINVAL;
 983
 984        if (!loc)
 985                return -EINVAL;
 986
 987        if (kretprobe)
 988                snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
 989        else
 990                snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
 991
 992        ret = dynevent_str_add(cmd, buf);
 993        if (ret)
 994                return ret;
 995
 996        dynevent_arg_init(&arg, 0);
 997        arg.str = loc;
 998        ret = dynevent_arg_add(cmd, &arg, NULL);
 999        if (ret)
1000                return ret;
1001
1002        va_start(args, loc);
1003        for (;;) {
1004                const char *field;
1005
1006                field = va_arg(args, const char *);
1007                if (!field)
1008                        break;
1009
1010                if (++cmd->n_fields > MAX_TRACE_ARGS) {
1011                        ret = -EINVAL;
1012                        break;
1013                }
1014
1015                arg.str = field;
1016                ret = dynevent_arg_add(cmd, &arg, NULL);
1017                if (ret)
1018                        break;
1019        }
1020        va_end(args);
1021
1022        return ret;
1023}
1024EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1025
1026/**
1027 * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1028 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1029 * @args: Variable number of arg (pairs), one pair for each field
1030 *
1031 * NOTE: Users normally won't want to call this function directly, but
1032 * rather use the kprobe_event_add_fields() wrapper, which
1033 * automatically adds a NULL to the end of the arg list.  If this
1034 * function is used directly, make sure the last arg in the variable
1035 * arg list is NULL.
1036 *
1037 * Add probe fields to an existing kprobe command using a variable
1038 * list of args.  Fields are added in the same order they're listed.
1039 *
1040 * Return: 0 if successful, error otherwise.
1041 */
1042int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1043{
1044        struct dynevent_arg arg;
1045        va_list args;
1046        int ret = 0;
1047
1048        if (cmd->type != DYNEVENT_TYPE_KPROBE)
1049                return -EINVAL;
1050
1051        dynevent_arg_init(&arg, 0);
1052
1053        va_start(args, cmd);
1054        for (;;) {
1055                const char *field;
1056
1057                field = va_arg(args, const char *);
1058                if (!field)
1059                        break;
1060
1061                if (++cmd->n_fields > MAX_TRACE_ARGS) {
1062                        ret = -EINVAL;
1063                        break;
1064                }
1065
1066                arg.str = field;
1067                ret = dynevent_arg_add(cmd, &arg, NULL);
1068                if (ret)
1069                        break;
1070        }
1071        va_end(args);
1072
1073        return ret;
1074}
1075EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1076
1077/**
1078 * kprobe_event_delete - Delete a kprobe event
1079 * @name: The name of the kprobe event to delete
1080 *
1081 * Delete a kprobe event with the give @name from kernel code rather
1082 * than directly from the command line.
1083 *
1084 * Return: 0 if successful, error otherwise.
1085 */
1086int kprobe_event_delete(const char *name)
1087{
1088        char buf[MAX_EVENT_NAME_LEN];
1089
1090        snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1091
1092        return create_or_delete_trace_kprobe(buf);
1093}
1094EXPORT_SYMBOL_GPL(kprobe_event_delete);
1095
1096static int trace_kprobe_release(struct dyn_event *ev)
1097{
1098        struct trace_kprobe *tk = to_trace_kprobe(ev);
1099        int ret = unregister_trace_kprobe(tk);
1100
1101        if (!ret)
1102                free_trace_kprobe(tk);
1103        return ret;
1104}
1105
1106static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1107{
1108        struct trace_kprobe *tk = to_trace_kprobe(ev);
1109        int i;
1110
1111        seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1112        if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1113                seq_printf(m, "%d", tk->rp.maxactive);
1114        seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1115                                trace_probe_name(&tk->tp));
1116
1117        if (!tk->symbol)
1118                seq_printf(m, " 0x%p", tk->rp.kp.addr);
1119        else if (tk->rp.kp.offset)
1120                seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1121                           tk->rp.kp.offset);
1122        else
1123                seq_printf(m, " %s", trace_kprobe_symbol(tk));
1124
1125        for (i = 0; i < tk->tp.nr_args; i++)
1126                seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1127        seq_putc(m, '\n');
1128
1129        return 0;
1130}
1131
1132static int probes_seq_show(struct seq_file *m, void *v)
1133{
1134        struct dyn_event *ev = v;
1135
1136        if (!is_trace_kprobe(ev))
1137                return 0;
1138
1139        return trace_kprobe_show(m, ev);
1140}
1141
1142static const struct seq_operations probes_seq_op = {
1143        .start  = dyn_event_seq_start,
1144        .next   = dyn_event_seq_next,
1145        .stop   = dyn_event_seq_stop,
1146        .show   = probes_seq_show
1147};
1148
1149static int probes_open(struct inode *inode, struct file *file)
1150{
1151        int ret;
1152
1153        ret = security_locked_down(LOCKDOWN_TRACEFS);
1154        if (ret)
1155                return ret;
1156
1157        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1158                ret = dyn_events_release_all(&trace_kprobe_ops);
1159                if (ret < 0)
1160                        return ret;
1161        }
1162
1163        return seq_open(file, &probes_seq_op);
1164}
1165
1166static ssize_t probes_write(struct file *file, const char __user *buffer,
1167                            size_t count, loff_t *ppos)
1168{
1169        return trace_parse_run_command(file, buffer, count, ppos,
1170                                       create_or_delete_trace_kprobe);
1171}
1172
1173static const struct file_operations kprobe_events_ops = {
1174        .owner          = THIS_MODULE,
1175        .open           = probes_open,
1176        .read           = seq_read,
1177        .llseek         = seq_lseek,
1178        .release        = seq_release,
1179        .write          = probes_write,
1180};
1181
1182/* Probes profiling interfaces */
1183static int probes_profile_seq_show(struct seq_file *m, void *v)
1184{
1185        struct dyn_event *ev = v;
1186        struct trace_kprobe *tk;
1187
1188        if (!is_trace_kprobe(ev))
1189                return 0;
1190
1191        tk = to_trace_kprobe(ev);
1192        seq_printf(m, "  %-44s %15lu %15lu\n",
1193                   trace_probe_name(&tk->tp),
1194                   trace_kprobe_nhit(tk),
1195                   tk->rp.kp.nmissed);
1196
1197        return 0;
1198}
1199
1200static const struct seq_operations profile_seq_op = {
1201        .start  = dyn_event_seq_start,
1202        .next   = dyn_event_seq_next,
1203        .stop   = dyn_event_seq_stop,
1204        .show   = probes_profile_seq_show
1205};
1206
1207static int profile_open(struct inode *inode, struct file *file)
1208{
1209        int ret;
1210
1211        ret = security_locked_down(LOCKDOWN_TRACEFS);
1212        if (ret)
1213                return ret;
1214
1215        return seq_open(file, &profile_seq_op);
1216}
1217
1218static const struct file_operations kprobe_profile_ops = {
1219        .owner          = THIS_MODULE,
1220        .open           = profile_open,
1221        .read           = seq_read,
1222        .llseek         = seq_lseek,
1223        .release        = seq_release,
1224};
1225
1226/* Kprobe specific fetch functions */
1227
1228/* Return the length of string -- including null terminal byte */
1229static nokprobe_inline int
1230fetch_store_strlen_user(unsigned long addr)
1231{
1232        const void __user *uaddr =  (__force const void __user *)addr;
1233
1234        return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
1235}
1236
1237/* Return the length of string -- including null terminal byte */
1238static nokprobe_inline int
1239fetch_store_strlen(unsigned long addr)
1240{
1241        int ret, len = 0;
1242        u8 c;
1243
1244#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1245        if (addr < TASK_SIZE)
1246                return fetch_store_strlen_user(addr);
1247#endif
1248
1249        do {
1250                ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
1251                len++;
1252        } while (c && ret == 0 && len < MAX_STRING_SIZE);
1253
1254        return (ret < 0) ? ret : len;
1255}
1256
1257/*
1258 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1259 * with max length and relative data location.
1260 */
1261static nokprobe_inline int
1262fetch_store_string_user(unsigned long addr, void *dest, void *base)
1263{
1264        const void __user *uaddr =  (__force const void __user *)addr;
1265        int maxlen = get_loc_len(*(u32 *)dest);
1266        void *__dest;
1267        long ret;
1268
1269        if (unlikely(!maxlen))
1270                return -ENOMEM;
1271
1272        __dest = get_loc_data(dest, base);
1273
1274        ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
1275        if (ret >= 0)
1276                *(u32 *)dest = make_data_loc(ret, __dest - base);
1277
1278        return ret;
1279}
1280
1281/*
1282 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1283 * length and relative data location.
1284 */
1285static nokprobe_inline int
1286fetch_store_string(unsigned long addr, void *dest, void *base)
1287{
1288        int maxlen = get_loc_len(*(u32 *)dest);
1289        void *__dest;
1290        long ret;
1291
1292#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1293        if ((unsigned long)addr < TASK_SIZE)
1294                return fetch_store_string_user(addr, dest, base);
1295#endif
1296
1297        if (unlikely(!maxlen))
1298                return -ENOMEM;
1299
1300        __dest = get_loc_data(dest, base);
1301
1302        /*
1303         * Try to get string again, since the string can be changed while
1304         * probing.
1305         */
1306        ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
1307        if (ret >= 0)
1308                *(u32 *)dest = make_data_loc(ret, __dest - base);
1309
1310        return ret;
1311}
1312
1313static nokprobe_inline int
1314probe_mem_read_user(void *dest, void *src, size_t size)
1315{
1316        const void __user *uaddr =  (__force const void __user *)src;
1317
1318        return copy_from_user_nofault(dest, uaddr, size);
1319}
1320
1321static nokprobe_inline int
1322probe_mem_read(void *dest, void *src, size_t size)
1323{
1324#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1325        if ((unsigned long)src < TASK_SIZE)
1326                return probe_mem_read_user(dest, src, size);
1327#endif
1328        return copy_from_kernel_nofault(dest, src, size);
1329}
1330
1331/* Note that we don't verify it, since the code does not come from user space */
1332static int
1333process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
1334                   void *base)
1335{
1336        unsigned long val;
1337
1338retry:
1339        /* 1st stage: get value from context */
1340        switch (code->op) {
1341        case FETCH_OP_REG:
1342                val = regs_get_register(regs, code->param);
1343                break;
1344        case FETCH_OP_STACK:
1345                val = regs_get_kernel_stack_nth(regs, code->param);
1346                break;
1347        case FETCH_OP_STACKP:
1348                val = kernel_stack_pointer(regs);
1349                break;
1350        case FETCH_OP_RETVAL:
1351                val = regs_return_value(regs);
1352                break;
1353        case FETCH_OP_IMM:
1354                val = code->immediate;
1355                break;
1356        case FETCH_OP_COMM:
1357                val = (unsigned long)current->comm;
1358                break;
1359        case FETCH_OP_DATA:
1360                val = (unsigned long)code->data;
1361                break;
1362#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1363        case FETCH_OP_ARG:
1364                val = regs_get_kernel_argument(regs, code->param);
1365                break;
1366#endif
1367        case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
1368                code++;
1369                goto retry;
1370        default:
1371                return -EILSEQ;
1372        }
1373        code++;
1374
1375        return process_fetch_insn_bottom(code, val, dest, base);
1376}
1377NOKPROBE_SYMBOL(process_fetch_insn)
1378
1379/* Kprobe handler */
1380static nokprobe_inline void
1381__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1382                    struct trace_event_file *trace_file)
1383{
1384        struct kprobe_trace_entry_head *entry;
1385        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1386        struct trace_event_buffer fbuffer;
1387        int dsize;
1388
1389        WARN_ON(call != trace_file->event_call);
1390
1391        if (trace_trigger_soft_disabled(trace_file))
1392                return;
1393
1394        fbuffer.trace_ctx = tracing_gen_ctx();
1395        fbuffer.trace_file = trace_file;
1396
1397        dsize = __get_data_size(&tk->tp, regs);
1398
1399        fbuffer.event =
1400                trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1401                                        call->event.type,
1402                                        sizeof(*entry) + tk->tp.size + dsize,
1403                                        fbuffer.trace_ctx);
1404        if (!fbuffer.event)
1405                return;
1406
1407        fbuffer.regs = regs;
1408        entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1409        entry->ip = (unsigned long)tk->rp.kp.addr;
1410        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1411
1412        trace_event_buffer_commit(&fbuffer);
1413}
1414
1415static void
1416kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1417{
1418        struct event_file_link *link;
1419
1420        trace_probe_for_each_link_rcu(link, &tk->tp)
1421                __kprobe_trace_func(tk, regs, link->file);
1422}
1423NOKPROBE_SYMBOL(kprobe_trace_func);
1424
1425/* Kretprobe handler */
1426static nokprobe_inline void
1427__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1428                       struct pt_regs *regs,
1429                       struct trace_event_file *trace_file)
1430{
1431        struct kretprobe_trace_entry_head *entry;
1432        struct trace_event_buffer fbuffer;
1433        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1434        int dsize;
1435
1436        WARN_ON(call != trace_file->event_call);
1437
1438        if (trace_trigger_soft_disabled(trace_file))
1439                return;
1440
1441        fbuffer.trace_ctx = tracing_gen_ctx();
1442        fbuffer.trace_file = trace_file;
1443
1444        dsize = __get_data_size(&tk->tp, regs);
1445        fbuffer.event =
1446                trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
1447                                        call->event.type,
1448                                        sizeof(*entry) + tk->tp.size + dsize,
1449                                        fbuffer.trace_ctx);
1450        if (!fbuffer.event)
1451                return;
1452
1453        fbuffer.regs = regs;
1454        entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1455        entry->func = (unsigned long)tk->rp.kp.addr;
1456        entry->ret_ip = (unsigned long)ri->ret_addr;
1457        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1458
1459        trace_event_buffer_commit(&fbuffer);
1460}
1461
1462static void
1463kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1464                     struct pt_regs *regs)
1465{
1466        struct event_file_link *link;
1467
1468        trace_probe_for_each_link_rcu(link, &tk->tp)
1469                __kretprobe_trace_func(tk, ri, regs, link->file);
1470}
1471NOKPROBE_SYMBOL(kretprobe_trace_func);
1472
1473/* Event entry printers */
1474static enum print_line_t
1475print_kprobe_event(struct trace_iterator *iter, int flags,
1476                   struct trace_event *event)
1477{
1478        struct kprobe_trace_entry_head *field;
1479        struct trace_seq *s = &iter->seq;
1480        struct trace_probe *tp;
1481
1482        field = (struct kprobe_trace_entry_head *)iter->ent;
1483        tp = trace_probe_primary_from_call(
1484                container_of(event, struct trace_event_call, event));
1485        if (WARN_ON_ONCE(!tp))
1486                goto out;
1487
1488        trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1489
1490        if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1491                goto out;
1492
1493        trace_seq_putc(s, ')');
1494
1495        if (print_probe_args(s, tp->args, tp->nr_args,
1496                             (u8 *)&field[1], field) < 0)
1497                goto out;
1498
1499        trace_seq_putc(s, '\n');
1500 out:
1501        return trace_handle_return(s);
1502}
1503
1504static enum print_line_t
1505print_kretprobe_event(struct trace_iterator *iter, int flags,
1506                      struct trace_event *event)
1507{
1508        struct kretprobe_trace_entry_head *field;
1509        struct trace_seq *s = &iter->seq;
1510        struct trace_probe *tp;
1511
1512        field = (struct kretprobe_trace_entry_head *)iter->ent;
1513        tp = trace_probe_primary_from_call(
1514                container_of(event, struct trace_event_call, event));
1515        if (WARN_ON_ONCE(!tp))
1516                goto out;
1517
1518        trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1519
1520        if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1521                goto out;
1522
1523        trace_seq_puts(s, " <- ");
1524
1525        if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1526                goto out;
1527
1528        trace_seq_putc(s, ')');
1529
1530        if (print_probe_args(s, tp->args, tp->nr_args,
1531                             (u8 *)&field[1], field) < 0)
1532                goto out;
1533
1534        trace_seq_putc(s, '\n');
1535
1536 out:
1537        return trace_handle_return(s);
1538}
1539
1540
1541static int kprobe_event_define_fields(struct trace_event_call *event_call)
1542{
1543        int ret;
1544        struct kprobe_trace_entry_head field;
1545        struct trace_probe *tp;
1546
1547        tp = trace_probe_primary_from_call(event_call);
1548        if (WARN_ON_ONCE(!tp))
1549                return -ENOENT;
1550
1551        DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1552
1553        return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1554}
1555
1556static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1557{
1558        int ret;
1559        struct kretprobe_trace_entry_head field;
1560        struct trace_probe *tp;
1561
1562        tp = trace_probe_primary_from_call(event_call);
1563        if (WARN_ON_ONCE(!tp))
1564                return -ENOENT;
1565
1566        DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1567        DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1568
1569        return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1570}
1571
1572#ifdef CONFIG_PERF_EVENTS
1573
1574/* Kprobe profile handler */
1575static int
1576kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1577{
1578        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1579        struct kprobe_trace_entry_head *entry;
1580        struct hlist_head *head;
1581        int size, __size, dsize;
1582        int rctx;
1583
1584        if (bpf_prog_array_valid(call)) {
1585                unsigned long orig_ip = instruction_pointer(regs);
1586                int ret;
1587
1588                ret = trace_call_bpf(call, regs);
1589
1590                /*
1591                 * We need to check and see if we modified the pc of the
1592                 * pt_regs, and if so return 1 so that we don't do the
1593                 * single stepping.
1594                 */
1595                if (orig_ip != instruction_pointer(regs))
1596                        return 1;
1597                if (!ret)
1598                        return 0;
1599        }
1600
1601        head = this_cpu_ptr(call->perf_events);
1602        if (hlist_empty(head))
1603                return 0;
1604
1605        dsize = __get_data_size(&tk->tp, regs);
1606        __size = sizeof(*entry) + tk->tp.size + dsize;
1607        size = ALIGN(__size + sizeof(u32), sizeof(u64));
1608        size -= sizeof(u32);
1609
1610        entry = perf_trace_buf_alloc(size, NULL, &rctx);
1611        if (!entry)
1612                return 0;
1613
1614        entry->ip = (unsigned long)tk->rp.kp.addr;
1615        memset(&entry[1], 0, dsize);
1616        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1617        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1618                              head, NULL);
1619        return 0;
1620}
1621NOKPROBE_SYMBOL(kprobe_perf_func);
1622
1623/* Kretprobe profile handler */
1624static void
1625kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1626                    struct pt_regs *regs)
1627{
1628        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1629        struct kretprobe_trace_entry_head *entry;
1630        struct hlist_head *head;
1631        int size, __size, dsize;
1632        int rctx;
1633
1634        if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1635                return;
1636
1637        head = this_cpu_ptr(call->perf_events);
1638        if (hlist_empty(head))
1639                return;
1640
1641        dsize = __get_data_size(&tk->tp, regs);
1642        __size = sizeof(*entry) + tk->tp.size + dsize;
1643        size = ALIGN(__size + sizeof(u32), sizeof(u64));
1644        size -= sizeof(u32);
1645
1646        entry = perf_trace_buf_alloc(size, NULL, &rctx);
1647        if (!entry)
1648                return;
1649
1650        entry->func = (unsigned long)tk->rp.kp.addr;
1651        entry->ret_ip = (unsigned long)ri->ret_addr;
1652        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1653        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1654                              head, NULL);
1655}
1656NOKPROBE_SYMBOL(kretprobe_perf_func);
1657
1658int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1659                        const char **symbol, u64 *probe_offset,
1660                        u64 *probe_addr, bool perf_type_tracepoint)
1661{
1662        const char *pevent = trace_event_name(event->tp_event);
1663        const char *group = event->tp_event->class->system;
1664        struct trace_kprobe *tk;
1665
1666        if (perf_type_tracepoint)
1667                tk = find_trace_kprobe(pevent, group);
1668        else
1669                tk = trace_kprobe_primary_from_call(event->tp_event);
1670        if (!tk)
1671                return -EINVAL;
1672
1673        *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1674                                              : BPF_FD_TYPE_KPROBE;
1675        if (tk->symbol) {
1676                *symbol = tk->symbol;
1677                *probe_offset = tk->rp.kp.offset;
1678                *probe_addr = 0;
1679        } else {
1680                *symbol = NULL;
1681                *probe_offset = 0;
1682                *probe_addr = (unsigned long)tk->rp.kp.addr;
1683        }
1684        return 0;
1685}
1686#endif  /* CONFIG_PERF_EVENTS */
1687
1688/*
1689 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1690 *
1691 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1692 * lockless, but we can't race with this __init function.
1693 */
1694static int kprobe_register(struct trace_event_call *event,
1695                           enum trace_reg type, void *data)
1696{
1697        struct trace_event_file *file = data;
1698
1699        switch (type) {
1700        case TRACE_REG_REGISTER:
1701                return enable_trace_kprobe(event, file);
1702        case TRACE_REG_UNREGISTER:
1703                return disable_trace_kprobe(event, file);
1704
1705#ifdef CONFIG_PERF_EVENTS
1706        case TRACE_REG_PERF_REGISTER:
1707                return enable_trace_kprobe(event, NULL);
1708        case TRACE_REG_PERF_UNREGISTER:
1709                return disable_trace_kprobe(event, NULL);
1710        case TRACE_REG_PERF_OPEN:
1711        case TRACE_REG_PERF_CLOSE:
1712        case TRACE_REG_PERF_ADD:
1713        case TRACE_REG_PERF_DEL:
1714                return 0;
1715#endif
1716        }
1717        return 0;
1718}
1719
1720static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1721{
1722        struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1723        int ret = 0;
1724
1725        raw_cpu_inc(*tk->nhit);
1726
1727        if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1728                kprobe_trace_func(tk, regs);
1729#ifdef CONFIG_PERF_EVENTS
1730        if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1731                ret = kprobe_perf_func(tk, regs);
1732#endif
1733        return ret;
1734}
1735NOKPROBE_SYMBOL(kprobe_dispatcher);
1736
1737static int
1738kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1739{
1740        struct kretprobe *rp = get_kretprobe(ri);
1741        struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
1742
1743        raw_cpu_inc(*tk->nhit);
1744
1745        if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1746                kretprobe_trace_func(tk, ri, regs);
1747#ifdef CONFIG_PERF_EVENTS
1748        if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1749                kretprobe_perf_func(tk, ri, regs);
1750#endif
1751        return 0;       /* We don't tweak kernel, so just return 0 */
1752}
1753NOKPROBE_SYMBOL(kretprobe_dispatcher);
1754
1755static struct trace_event_functions kretprobe_funcs = {
1756        .trace          = print_kretprobe_event
1757};
1758
1759static struct trace_event_functions kprobe_funcs = {
1760        .trace          = print_kprobe_event
1761};
1762
1763static struct trace_event_fields kretprobe_fields_array[] = {
1764        { .type = TRACE_FUNCTION_TYPE,
1765          .define_fields = kretprobe_event_define_fields },
1766        {}
1767};
1768
1769static struct trace_event_fields kprobe_fields_array[] = {
1770        { .type = TRACE_FUNCTION_TYPE,
1771          .define_fields = kprobe_event_define_fields },
1772        {}
1773};
1774
1775static inline void init_trace_event_call(struct trace_kprobe *tk)
1776{
1777        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1778
1779        if (trace_kprobe_is_return(tk)) {
1780                call->event.funcs = &kretprobe_funcs;
1781                call->class->fields_array = kretprobe_fields_array;
1782        } else {
1783                call->event.funcs = &kprobe_funcs;
1784                call->class->fields_array = kprobe_fields_array;
1785        }
1786
1787        call->flags = TRACE_EVENT_FL_KPROBE;
1788        call->class->reg = kprobe_register;
1789}
1790
1791static int register_kprobe_event(struct trace_kprobe *tk)
1792{
1793        init_trace_event_call(tk);
1794
1795        return trace_probe_register_event_call(&tk->tp);
1796}
1797
1798static int unregister_kprobe_event(struct trace_kprobe *tk)
1799{
1800        return trace_probe_unregister_event_call(&tk->tp);
1801}
1802
1803#ifdef CONFIG_PERF_EVENTS
1804/* create a trace_kprobe, but don't add it to global lists */
1805struct trace_event_call *
1806create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1807                          bool is_return)
1808{
1809        struct trace_kprobe *tk;
1810        int ret;
1811        char *event;
1812
1813        /*
1814         * local trace_kprobes are not added to dyn_event, so they are never
1815         * searched in find_trace_kprobe(). Therefore, there is no concern of
1816         * duplicated name here.
1817         */
1818        event = func ? func : "DUMMY_EVENT";
1819
1820        tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1821                                offs, 0 /* maxactive */, 0 /* nargs */,
1822                                is_return);
1823
1824        if (IS_ERR(tk)) {
1825                pr_info("Failed to allocate trace_probe.(%d)\n",
1826                        (int)PTR_ERR(tk));
1827                return ERR_CAST(tk);
1828        }
1829
1830        init_trace_event_call(tk);
1831
1832        if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
1833                ret = -ENOMEM;
1834                goto error;
1835        }
1836
1837        ret = __register_trace_kprobe(tk);
1838        if (ret < 0)
1839                goto error;
1840
1841        return trace_probe_event_call(&tk->tp);
1842error:
1843        free_trace_kprobe(tk);
1844        return ERR_PTR(ret);
1845}
1846
1847void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1848{
1849        struct trace_kprobe *tk;
1850
1851        tk = trace_kprobe_primary_from_call(event_call);
1852        if (unlikely(!tk))
1853                return;
1854
1855        if (trace_probe_is_enabled(&tk->tp)) {
1856                WARN_ON(1);
1857                return;
1858        }
1859
1860        __unregister_trace_kprobe(tk);
1861
1862        free_trace_kprobe(tk);
1863}
1864#endif /* CONFIG_PERF_EVENTS */
1865
1866static __init void enable_boot_kprobe_events(void)
1867{
1868        struct trace_array *tr = top_trace_array();
1869        struct trace_event_file *file;
1870        struct trace_kprobe *tk;
1871        struct dyn_event *pos;
1872
1873        mutex_lock(&event_mutex);
1874        for_each_trace_kprobe(tk, pos) {
1875                list_for_each_entry(file, &tr->events, list)
1876                        if (file->event_call == trace_probe_event_call(&tk->tp))
1877                                trace_event_enable_disable(file, 1, 0);
1878        }
1879        mutex_unlock(&event_mutex);
1880}
1881
1882static __init void setup_boot_kprobe_events(void)
1883{
1884        char *p, *cmd = kprobe_boot_events_buf;
1885        int ret;
1886
1887        strreplace(kprobe_boot_events_buf, ',', ' ');
1888
1889        while (cmd && *cmd != '\0') {
1890                p = strchr(cmd, ';');
1891                if (p)
1892                        *p++ = '\0';
1893
1894                ret = create_or_delete_trace_kprobe(cmd);
1895                if (ret)
1896                        pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1897
1898                cmd = p;
1899        }
1900
1901        enable_boot_kprobe_events();
1902}
1903
1904/*
1905 * Register dynevent at core_initcall. This allows kernel to setup kprobe
1906 * events in postcore_initcall without tracefs.
1907 */
1908static __init int init_kprobe_trace_early(void)
1909{
1910        int ret;
1911
1912        ret = dyn_event_register(&trace_kprobe_ops);
1913        if (ret)
1914                return ret;
1915
1916        if (register_module_notifier(&trace_kprobe_module_nb))
1917                return -EINVAL;
1918
1919        return 0;
1920}
1921core_initcall(init_kprobe_trace_early);
1922
1923/* Make a tracefs interface for controlling probe points */
1924static __init int init_kprobe_trace(void)
1925{
1926        int ret;
1927        struct dentry *entry;
1928
1929        ret = tracing_init_dentry();
1930        if (ret)
1931                return 0;
1932
1933        entry = tracefs_create_file("kprobe_events", 0644, NULL,
1934                                    NULL, &kprobe_events_ops);
1935
1936        /* Event list interface */
1937        if (!entry)
1938                pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1939
1940        /* Profile interface */
1941        entry = tracefs_create_file("kprobe_profile", 0444, NULL,
1942                                    NULL, &kprobe_profile_ops);
1943
1944        if (!entry)
1945                pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1946
1947        setup_boot_kprobe_events();
1948
1949        return 0;
1950}
1951fs_initcall(init_kprobe_trace);
1952
1953
1954#ifdef CONFIG_FTRACE_STARTUP_TEST
1955static __init struct trace_event_file *
1956find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1957{
1958        struct trace_event_file *file;
1959
1960        list_for_each_entry(file, &tr->events, list)
1961                if (file->event_call == trace_probe_event_call(&tk->tp))
1962                        return file;
1963
1964        return NULL;
1965}
1966
1967/*
1968 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1969 * stage, we can do this lockless.
1970 */
1971static __init int kprobe_trace_self_tests_init(void)
1972{
1973        int ret, warn = 0;
1974        int (*target)(int, int, int, int, int, int);
1975        struct trace_kprobe *tk;
1976        struct trace_event_file *file;
1977
1978        if (tracing_is_disabled())
1979                return -ENODEV;
1980
1981        if (tracing_selftest_disabled)
1982                return 0;
1983
1984        target = kprobe_trace_selftest_target;
1985
1986        pr_info("Testing kprobe tracing: ");
1987
1988        ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
1989        if (WARN_ON_ONCE(ret)) {
1990                pr_warn("error on probing function entry.\n");
1991                warn++;
1992        } else {
1993                /* Enable trace point */
1994                tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1995                if (WARN_ON_ONCE(tk == NULL)) {
1996                        pr_warn("error on getting new probe.\n");
1997                        warn++;
1998                } else {
1999                        file = find_trace_probe_file(tk, top_trace_array());
2000                        if (WARN_ON_ONCE(file == NULL)) {
2001                                pr_warn("error on getting probe file.\n");
2002                                warn++;
2003                        } else
2004                                enable_trace_kprobe(
2005                                        trace_probe_event_call(&tk->tp), file);
2006                }
2007        }
2008
2009        ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
2010        if (WARN_ON_ONCE(ret)) {
2011                pr_warn("error on probing function return.\n");
2012                warn++;
2013        } else {
2014                /* Enable trace point */
2015                tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2016                if (WARN_ON_ONCE(tk == NULL)) {
2017                        pr_warn("error on getting 2nd new probe.\n");
2018                        warn++;
2019                } else {
2020                        file = find_trace_probe_file(tk, top_trace_array());
2021                        if (WARN_ON_ONCE(file == NULL)) {
2022                                pr_warn("error on getting probe file.\n");
2023                                warn++;
2024                        } else
2025                                enable_trace_kprobe(
2026                                        trace_probe_event_call(&tk->tp), file);
2027                }
2028        }
2029
2030        if (warn)
2031                goto end;
2032
2033        ret = target(1, 2, 3, 4, 5, 6);
2034
2035        /*
2036         * Not expecting an error here, the check is only to prevent the
2037         * optimizer from removing the call to target() as otherwise there
2038         * are no side-effects and the call is never performed.
2039         */
2040        if (ret != 21)
2041                warn++;
2042
2043        /* Disable trace points before removing it */
2044        tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2045        if (WARN_ON_ONCE(tk == NULL)) {
2046                pr_warn("error on getting test probe.\n");
2047                warn++;
2048        } else {
2049                if (trace_kprobe_nhit(tk) != 1) {
2050                        pr_warn("incorrect number of testprobe hits\n");
2051                        warn++;
2052                }
2053
2054                file = find_trace_probe_file(tk, top_trace_array());
2055                if (WARN_ON_ONCE(file == NULL)) {
2056                        pr_warn("error on getting probe file.\n");
2057                        warn++;
2058                } else
2059                        disable_trace_kprobe(
2060                                trace_probe_event_call(&tk->tp), file);
2061        }
2062
2063        tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2064        if (WARN_ON_ONCE(tk == NULL)) {
2065                pr_warn("error on getting 2nd test probe.\n");
2066                warn++;
2067        } else {
2068                if (trace_kprobe_nhit(tk) != 1) {
2069                        pr_warn("incorrect number of testprobe2 hits\n");
2070                        warn++;
2071                }
2072
2073                file = find_trace_probe_file(tk, top_trace_array());
2074                if (WARN_ON_ONCE(file == NULL)) {
2075                        pr_warn("error on getting probe file.\n");
2076                        warn++;
2077                } else
2078                        disable_trace_kprobe(
2079                                trace_probe_event_call(&tk->tp), file);
2080        }
2081
2082        ret = create_or_delete_trace_kprobe("-:testprobe");
2083        if (WARN_ON_ONCE(ret)) {
2084                pr_warn("error on deleting a probe.\n");
2085                warn++;
2086        }
2087
2088        ret = create_or_delete_trace_kprobe("-:testprobe2");
2089        if (WARN_ON_ONCE(ret)) {
2090                pr_warn("error on deleting a probe.\n");
2091                warn++;
2092        }
2093
2094end:
2095        ret = dyn_events_release_all(&trace_kprobe_ops);
2096        if (WARN_ON_ONCE(ret)) {
2097                pr_warn("error on cleaning up probes.\n");
2098                warn++;
2099        }
2100        /*
2101         * Wait for the optimizer work to finish. Otherwise it might fiddle
2102         * with probes in already freed __init text.
2103         */
2104        wait_for_kprobe_optimizer();
2105        if (warn)
2106                pr_cont("NG: Some tests are failed. Please check them.\n");
2107        else
2108                pr_cont("OK\n");
2109        return 0;
2110}
2111
2112late_initcall(kprobe_trace_self_tests_init);
2113
2114#endif
2115