linux/kernel/trace/trace_kprobe.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Kprobes-based tracing events
   4 *
   5 * Created by Masami Hiramatsu <mhiramat@redhat.com>
   6 *
   7 */
   8#define pr_fmt(fmt)     "trace_kprobe: " fmt
   9
  10#include <linux/bpf-cgroup.h>
  11#include <linux/security.h>
  12#include <linux/module.h>
  13#include <linux/uaccess.h>
  14#include <linux/rculist.h>
  15#include <linux/error-injection.h>
  16
  17#include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
  18
  19#include "trace_dynevent.h"
  20#include "trace_kprobe_selftest.h"
  21#include "trace_probe.h"
  22#include "trace_probe_tmpl.h"
  23
  24#define KPROBE_EVENT_SYSTEM "kprobes"
  25#define KRETPROBE_MAXACTIVE_MAX 4096
  26
  27/* Kprobe early definition from command line */
  28static char kprobe_boot_events_buf[COMMAND_LINE_SIZE] __initdata;
  29
  30static int __init set_kprobe_boot_events(char *str)
  31{
  32        strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
  33        disable_tracing_selftest("running kprobe events");
  34
  35        return 1;
  36}
  37__setup("kprobe_event=", set_kprobe_boot_events);
  38
  39static int trace_kprobe_create(const char *raw_command);
  40static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev);
  41static int trace_kprobe_release(struct dyn_event *ev);
  42static bool trace_kprobe_is_busy(struct dyn_event *ev);
  43static bool trace_kprobe_match(const char *system, const char *event,
  44                        int argc, const char **argv, struct dyn_event *ev);
  45
  46static struct dyn_event_operations trace_kprobe_ops = {
  47        .create = trace_kprobe_create,
  48        .show = trace_kprobe_show,
  49        .is_busy = trace_kprobe_is_busy,
  50        .free = trace_kprobe_release,
  51        .match = trace_kprobe_match,
  52};
  53
  54/*
  55 * Kprobe event core functions
  56 */
  57struct trace_kprobe {
  58        struct dyn_event        devent;
  59        struct kretprobe        rp;     /* Use rp.kp for kprobe use */
  60        unsigned long __percpu *nhit;
  61        const char              *symbol;        /* symbol name */
  62        struct trace_probe      tp;
  63};
  64
  65static bool is_trace_kprobe(struct dyn_event *ev)
  66{
  67        return ev->ops == &trace_kprobe_ops;
  68}
  69
  70static struct trace_kprobe *to_trace_kprobe(struct dyn_event *ev)
  71{
  72        return container_of(ev, struct trace_kprobe, devent);
  73}
  74
  75/**
  76 * for_each_trace_kprobe - iterate over the trace_kprobe list
  77 * @pos:        the struct trace_kprobe * for each entry
  78 * @dpos:       the struct dyn_event * to use as a loop cursor
  79 */
  80#define for_each_trace_kprobe(pos, dpos)        \
  81        for_each_dyn_event(dpos)                \
  82                if (is_trace_kprobe(dpos) && (pos = to_trace_kprobe(dpos)))
  83
  84static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
  85{
  86        return tk->rp.handler != NULL;
  87}
  88
  89static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
  90{
  91        return tk->symbol ? tk->symbol : "unknown";
  92}
  93
  94static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
  95{
  96        return tk->rp.kp.offset;
  97}
  98
  99static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
 100{
 101        return kprobe_gone(&tk->rp.kp);
 102}
 103
 104static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
 105                                                 struct module *mod)
 106{
 107        int len = strlen(module_name(mod));
 108        const char *name = trace_kprobe_symbol(tk);
 109
 110        return strncmp(module_name(mod), name, len) == 0 && name[len] == ':';
 111}
 112
 113static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
 114{
 115        char *p;
 116        bool ret;
 117
 118        if (!tk->symbol)
 119                return false;
 120        p = strchr(tk->symbol, ':');
 121        if (!p)
 122                return true;
 123        *p = '\0';
 124        rcu_read_lock_sched();
 125        ret = !!find_module(tk->symbol);
 126        rcu_read_unlock_sched();
 127        *p = ':';
 128
 129        return ret;
 130}
 131
 132static bool trace_kprobe_is_busy(struct dyn_event *ev)
 133{
 134        struct trace_kprobe *tk = to_trace_kprobe(ev);
 135
 136        return trace_probe_is_enabled(&tk->tp);
 137}
 138
 139static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
 140                                            int argc, const char **argv)
 141{
 142        char buf[MAX_ARGSTR_LEN + 1];
 143
 144        if (!argc)
 145                return true;
 146
 147        if (!tk->symbol)
 148                snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
 149        else if (tk->rp.kp.offset)
 150                snprintf(buf, sizeof(buf), "%s+%u",
 151                         trace_kprobe_symbol(tk), tk->rp.kp.offset);
 152        else
 153                snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
 154        if (strcmp(buf, argv[0]))
 155                return false;
 156        argc--; argv++;
 157
 158        return trace_probe_match_command_args(&tk->tp, argc, argv);
 159}
 160
 161static bool trace_kprobe_match(const char *system, const char *event,
 162                        int argc, const char **argv, struct dyn_event *ev)
 163{
 164        struct trace_kprobe *tk = to_trace_kprobe(ev);
 165
 166        return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
 167            (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
 168            trace_kprobe_match_command_head(tk, argc, argv);
 169}
 170
 171static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
 172{
 173        unsigned long nhit = 0;
 174        int cpu;
 175
 176        for_each_possible_cpu(cpu)
 177                nhit += *per_cpu_ptr(tk->nhit, cpu);
 178
 179        return nhit;
 180}
 181
 182static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
 183{
 184        return !(list_empty(&tk->rp.kp.list) &&
 185                 hlist_unhashed(&tk->rp.kp.hlist));
 186}
 187
 188/* Return 0 if it fails to find the symbol address */
 189static nokprobe_inline
 190unsigned long trace_kprobe_address(struct trace_kprobe *tk)
 191{
 192        unsigned long addr;
 193
 194        if (tk->symbol) {
 195                addr = (unsigned long)
 196                        kallsyms_lookup_name(trace_kprobe_symbol(tk));
 197                if (addr)
 198                        addr += tk->rp.kp.offset;
 199        } else {
 200                addr = (unsigned long)tk->rp.kp.addr;
 201        }
 202        return addr;
 203}
 204
 205static nokprobe_inline struct trace_kprobe *
 206trace_kprobe_primary_from_call(struct trace_event_call *call)
 207{
 208        struct trace_probe *tp;
 209
 210        tp = trace_probe_primary_from_call(call);
 211        if (WARN_ON_ONCE(!tp))
 212                return NULL;
 213
 214        return container_of(tp, struct trace_kprobe, tp);
 215}
 216
 217bool trace_kprobe_on_func_entry(struct trace_event_call *call)
 218{
 219        struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 220
 221        return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
 222                        tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
 223                        tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
 224}
 225
 226bool trace_kprobe_error_injectable(struct trace_event_call *call)
 227{
 228        struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 229
 230        return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
 231               false;
 232}
 233
 234static int register_kprobe_event(struct trace_kprobe *tk);
 235static int unregister_kprobe_event(struct trace_kprobe *tk);
 236
 237static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
 238static int kretprobe_dispatcher(struct kretprobe_instance *ri,
 239                                struct pt_regs *regs);
 240
 241static void free_trace_kprobe(struct trace_kprobe *tk)
 242{
 243        if (tk) {
 244                trace_probe_cleanup(&tk->tp);
 245                kfree(tk->symbol);
 246                free_percpu(tk->nhit);
 247                kfree(tk);
 248        }
 249}
 250
 251/*
 252 * Allocate new trace_probe and initialize it (including kprobes).
 253 */
 254static struct trace_kprobe *alloc_trace_kprobe(const char *group,
 255                                             const char *event,
 256                                             void *addr,
 257                                             const char *symbol,
 258                                             unsigned long offs,
 259                                             int maxactive,
 260                                             int nargs, bool is_return)
 261{
 262        struct trace_kprobe *tk;
 263        int ret = -ENOMEM;
 264
 265        tk = kzalloc(struct_size(tk, tp.args, nargs), GFP_KERNEL);
 266        if (!tk)
 267                return ERR_PTR(ret);
 268
 269        tk->nhit = alloc_percpu(unsigned long);
 270        if (!tk->nhit)
 271                goto error;
 272
 273        if (symbol) {
 274                tk->symbol = kstrdup(symbol, GFP_KERNEL);
 275                if (!tk->symbol)
 276                        goto error;
 277                tk->rp.kp.symbol_name = tk->symbol;
 278                tk->rp.kp.offset = offs;
 279        } else
 280                tk->rp.kp.addr = addr;
 281
 282        if (is_return)
 283                tk->rp.handler = kretprobe_dispatcher;
 284        else
 285                tk->rp.kp.pre_handler = kprobe_dispatcher;
 286
 287        tk->rp.maxactive = maxactive;
 288        INIT_HLIST_NODE(&tk->rp.kp.hlist);
 289        INIT_LIST_HEAD(&tk->rp.kp.list);
 290
 291        ret = trace_probe_init(&tk->tp, event, group, false);
 292        if (ret < 0)
 293                goto error;
 294
 295        dyn_event_init(&tk->devent, &trace_kprobe_ops);
 296        return tk;
 297error:
 298        free_trace_kprobe(tk);
 299        return ERR_PTR(ret);
 300}
 301
 302static struct trace_kprobe *find_trace_kprobe(const char *event,
 303                                              const char *group)
 304{
 305        struct dyn_event *pos;
 306        struct trace_kprobe *tk;
 307
 308        for_each_trace_kprobe(tk, pos)
 309                if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
 310                    strcmp(trace_probe_group_name(&tk->tp), group) == 0)
 311                        return tk;
 312        return NULL;
 313}
 314
 315static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
 316{
 317        int ret = 0;
 318
 319        if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
 320                if (trace_kprobe_is_return(tk))
 321                        ret = enable_kretprobe(&tk->rp);
 322                else
 323                        ret = enable_kprobe(&tk->rp.kp);
 324        }
 325
 326        return ret;
 327}
 328
 329static void __disable_trace_kprobe(struct trace_probe *tp)
 330{
 331        struct trace_kprobe *tk;
 332
 333        list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
 334                if (!trace_kprobe_is_registered(tk))
 335                        continue;
 336                if (trace_kprobe_is_return(tk))
 337                        disable_kretprobe(&tk->rp);
 338                else
 339                        disable_kprobe(&tk->rp.kp);
 340        }
 341}
 342
 343/*
 344 * Enable trace_probe
 345 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 346 */
 347static int enable_trace_kprobe(struct trace_event_call *call,
 348                                struct trace_event_file *file)
 349{
 350        struct trace_probe *tp;
 351        struct trace_kprobe *tk;
 352        bool enabled;
 353        int ret = 0;
 354
 355        tp = trace_probe_primary_from_call(call);
 356        if (WARN_ON_ONCE(!tp))
 357                return -ENODEV;
 358        enabled = trace_probe_is_enabled(tp);
 359
 360        /* This also changes "enabled" state */
 361        if (file) {
 362                ret = trace_probe_add_file(tp, file);
 363                if (ret)
 364                        return ret;
 365        } else
 366                trace_probe_set_flag(tp, TP_FLAG_PROFILE);
 367
 368        if (enabled)
 369                return 0;
 370
 371        list_for_each_entry(tk, trace_probe_probe_list(tp), tp.list) {
 372                if (trace_kprobe_has_gone(tk))
 373                        continue;
 374                ret = __enable_trace_kprobe(tk);
 375                if (ret)
 376                        break;
 377                enabled = true;
 378        }
 379
 380        if (ret) {
 381                /* Failed to enable one of them. Roll back all */
 382                if (enabled)
 383                        __disable_trace_kprobe(tp);
 384                if (file)
 385                        trace_probe_remove_file(tp, file);
 386                else
 387                        trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 388        }
 389
 390        return ret;
 391}
 392
 393/*
 394 * Disable trace_probe
 395 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 396 */
 397static int disable_trace_kprobe(struct trace_event_call *call,
 398                                struct trace_event_file *file)
 399{
 400        struct trace_probe *tp;
 401
 402        tp = trace_probe_primary_from_call(call);
 403        if (WARN_ON_ONCE(!tp))
 404                return -ENODEV;
 405
 406        if (file) {
 407                if (!trace_probe_get_file_link(tp, file))
 408                        return -ENOENT;
 409                if (!trace_probe_has_single_file(tp))
 410                        goto out;
 411                trace_probe_clear_flag(tp, TP_FLAG_TRACE);
 412        } else
 413                trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
 414
 415        if (!trace_probe_is_enabled(tp))
 416                __disable_trace_kprobe(tp);
 417
 418 out:
 419        if (file)
 420                /*
 421                 * Synchronization is done in below function. For perf event,
 422                 * file == NULL and perf_trace_event_unreg() calls
 423                 * tracepoint_synchronize_unregister() to ensure synchronize
 424                 * event. We don't need to care about it.
 425                 */
 426                trace_probe_remove_file(tp, file);
 427
 428        return 0;
 429}
 430
 431#if defined(CONFIG_DYNAMIC_FTRACE) && \
 432        !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
 433static bool __within_notrace_func(unsigned long addr)
 434{
 435        unsigned long offset, size;
 436
 437        if (!addr || !kallsyms_lookup_size_offset(addr, &size, &offset))
 438                return false;
 439
 440        /* Get the entry address of the target function */
 441        addr -= offset;
 442
 443        /*
 444         * Since ftrace_location_range() does inclusive range check, we need
 445         * to subtract 1 byte from the end address.
 446         */
 447        return !ftrace_location_range(addr, addr + size - 1);
 448}
 449
 450static bool within_notrace_func(struct trace_kprobe *tk)
 451{
 452        unsigned long addr = trace_kprobe_address(tk);
 453        char symname[KSYM_NAME_LEN], *p;
 454
 455        if (!__within_notrace_func(addr))
 456                return false;
 457
 458        /* Check if the address is on a suffixed-symbol */
 459        if (!lookup_symbol_name(addr, symname)) {
 460                p = strchr(symname, '.');
 461                if (!p)
 462                        return true;
 463                *p = '\0';
 464                addr = (unsigned long)kprobe_lookup_name(symname, 0);
 465                if (addr)
 466                        return __within_notrace_func(addr);
 467        }
 468
 469        return true;
 470}
 471#else
 472#define within_notrace_func(tk) (false)
 473#endif
 474
 475/* Internal register function - just handle k*probes and flags */
 476static int __register_trace_kprobe(struct trace_kprobe *tk)
 477{
 478        int i, ret;
 479
 480        ret = security_locked_down(LOCKDOWN_KPROBES);
 481        if (ret)
 482                return ret;
 483
 484        if (trace_kprobe_is_registered(tk))
 485                return -EINVAL;
 486
 487        if (within_notrace_func(tk)) {
 488                pr_warn("Could not probe notrace function %s\n",
 489                        trace_kprobe_symbol(tk));
 490                return -EINVAL;
 491        }
 492
 493        for (i = 0; i < tk->tp.nr_args; i++) {
 494                ret = traceprobe_update_arg(&tk->tp.args[i]);
 495                if (ret)
 496                        return ret;
 497        }
 498
 499        /* Set/clear disabled flag according to tp->flag */
 500        if (trace_probe_is_enabled(&tk->tp))
 501                tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
 502        else
 503                tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
 504
 505        if (trace_kprobe_is_return(tk))
 506                ret = register_kretprobe(&tk->rp);
 507        else
 508                ret = register_kprobe(&tk->rp.kp);
 509
 510        return ret;
 511}
 512
 513/* Internal unregister function - just handle k*probes and flags */
 514static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 515{
 516        if (trace_kprobe_is_registered(tk)) {
 517                if (trace_kprobe_is_return(tk))
 518                        unregister_kretprobe(&tk->rp);
 519                else
 520                        unregister_kprobe(&tk->rp.kp);
 521                /* Cleanup kprobe for reuse and mark it unregistered */
 522                INIT_HLIST_NODE(&tk->rp.kp.hlist);
 523                INIT_LIST_HEAD(&tk->rp.kp.list);
 524                if (tk->rp.kp.symbol_name)
 525                        tk->rp.kp.addr = NULL;
 526        }
 527}
 528
 529/* Unregister a trace_probe and probe_event */
 530static int unregister_trace_kprobe(struct trace_kprobe *tk)
 531{
 532        /* If other probes are on the event, just unregister kprobe */
 533        if (trace_probe_has_sibling(&tk->tp))
 534                goto unreg;
 535
 536        /* Enabled event can not be unregistered */
 537        if (trace_probe_is_enabled(&tk->tp))
 538                return -EBUSY;
 539
 540        /* If there's a reference to the dynamic event */
 541        if (trace_event_dyn_busy(trace_probe_event_call(&tk->tp)))
 542                return -EBUSY;
 543
 544        /* Will fail if probe is being used by ftrace or perf */
 545        if (unregister_kprobe_event(tk))
 546                return -EBUSY;
 547
 548unreg:
 549        __unregister_trace_kprobe(tk);
 550        dyn_event_remove(&tk->devent);
 551        trace_probe_unlink(&tk->tp);
 552
 553        return 0;
 554}
 555
 556static bool trace_kprobe_has_same_kprobe(struct trace_kprobe *orig,
 557                                         struct trace_kprobe *comp)
 558{
 559        struct trace_probe_event *tpe = orig->tp.event;
 560        int i;
 561
 562        list_for_each_entry(orig, &tpe->probes, tp.list) {
 563                if (strcmp(trace_kprobe_symbol(orig),
 564                           trace_kprobe_symbol(comp)) ||
 565                    trace_kprobe_offset(orig) != trace_kprobe_offset(comp))
 566                        continue;
 567
 568                /*
 569                 * trace_probe_compare_arg_type() ensured that nr_args and
 570                 * each argument name and type are same. Let's compare comm.
 571                 */
 572                for (i = 0; i < orig->tp.nr_args; i++) {
 573                        if (strcmp(orig->tp.args[i].comm,
 574                                   comp->tp.args[i].comm))
 575                                break;
 576                }
 577
 578                if (i == orig->tp.nr_args)
 579                        return true;
 580        }
 581
 582        return false;
 583}
 584
 585static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
 586{
 587        int ret;
 588
 589        ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
 590        if (ret) {
 591                /* Note that argument starts index = 2 */
 592                trace_probe_log_set_index(ret + 1);
 593                trace_probe_log_err(0, DIFF_ARG_TYPE);
 594                return -EEXIST;
 595        }
 596        if (trace_kprobe_has_same_kprobe(to, tk)) {
 597                trace_probe_log_set_index(0);
 598                trace_probe_log_err(0, SAME_PROBE);
 599                return -EEXIST;
 600        }
 601
 602        /* Append to existing event */
 603        ret = trace_probe_append(&tk->tp, &to->tp);
 604        if (ret)
 605                return ret;
 606
 607        /* Register k*probe */
 608        ret = __register_trace_kprobe(tk);
 609        if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
 610                pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 611                ret = 0;
 612        }
 613
 614        if (ret)
 615                trace_probe_unlink(&tk->tp);
 616        else
 617                dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
 618
 619        return ret;
 620}
 621
 622/* Register a trace_probe and probe_event */
 623static int register_trace_kprobe(struct trace_kprobe *tk)
 624{
 625        struct trace_kprobe *old_tk;
 626        int ret;
 627
 628        mutex_lock(&event_mutex);
 629
 630        old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
 631                                   trace_probe_group_name(&tk->tp));
 632        if (old_tk) {
 633                if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
 634                        trace_probe_log_set_index(0);
 635                        trace_probe_log_err(0, DIFF_PROBE_TYPE);
 636                        ret = -EEXIST;
 637                } else {
 638                        ret = append_trace_kprobe(tk, old_tk);
 639                }
 640                goto end;
 641        }
 642
 643        /* Register new event */
 644        ret = register_kprobe_event(tk);
 645        if (ret) {
 646                if (ret == -EEXIST) {
 647                        trace_probe_log_set_index(0);
 648                        trace_probe_log_err(0, EVENT_EXIST);
 649                } else
 650                        pr_warn("Failed to register probe event(%d)\n", ret);
 651                goto end;
 652        }
 653
 654        /* Register k*probe */
 655        ret = __register_trace_kprobe(tk);
 656        if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
 657                pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
 658                ret = 0;
 659        }
 660
 661        if (ret < 0)
 662                unregister_kprobe_event(tk);
 663        else
 664                dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
 665
 666end:
 667        mutex_unlock(&event_mutex);
 668        return ret;
 669}
 670
 671/* Module notifier call back, checking event on the module */
 672static int trace_kprobe_module_callback(struct notifier_block *nb,
 673                                       unsigned long val, void *data)
 674{
 675        struct module *mod = data;
 676        struct dyn_event *pos;
 677        struct trace_kprobe *tk;
 678        int ret;
 679
 680        if (val != MODULE_STATE_COMING)
 681                return NOTIFY_DONE;
 682
 683        /* Update probes on coming module */
 684        mutex_lock(&event_mutex);
 685        for_each_trace_kprobe(tk, pos) {
 686                if (trace_kprobe_within_module(tk, mod)) {
 687                        /* Don't need to check busy - this should have gone. */
 688                        __unregister_trace_kprobe(tk);
 689                        ret = __register_trace_kprobe(tk);
 690                        if (ret)
 691                                pr_warn("Failed to re-register probe %s on %s: %d\n",
 692                                        trace_probe_name(&tk->tp),
 693                                        module_name(mod), ret);
 694                }
 695        }
 696        mutex_unlock(&event_mutex);
 697
 698        return NOTIFY_DONE;
 699}
 700
 701static struct notifier_block trace_kprobe_module_nb = {
 702        .notifier_call = trace_kprobe_module_callback,
 703        .priority = 1   /* Invoked after kprobe module callback */
 704};
 705
 706static int __trace_kprobe_create(int argc, const char *argv[])
 707{
 708        /*
 709         * Argument syntax:
 710         *  - Add kprobe:
 711         *      p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
 712         *  - Add kretprobe:
 713         *      r[MAXACTIVE][:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
 714         *    Or
 715         *      p:[GRP/]EVENT] [MOD:]KSYM[+0]%return [FETCHARGS]
 716         *
 717         * Fetch args:
 718         *  $retval     : fetch return value
 719         *  $stack      : fetch stack address
 720         *  $stackN     : fetch Nth of stack (N:0-)
 721         *  $comm       : fetch current task comm
 722         *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
 723         *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
 724         *  %REG        : fetch register REG
 725         * Dereferencing memory fetch:
 726         *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
 727         * Alias name of args:
 728         *  NAME=FETCHARG : set NAME as alias of FETCHARG.
 729         * Type of args:
 730         *  FETCHARG:TYPE : use TYPE instead of unsigned long.
 731         */
 732        struct trace_kprobe *tk = NULL;
 733        int i, len, ret = 0;
 734        bool is_return = false;
 735        char *symbol = NULL, *tmp = NULL;
 736        const char *event = NULL, *group = KPROBE_EVENT_SYSTEM;
 737        enum probe_print_type ptype;
 738        int maxactive = 0;
 739        long offset = 0;
 740        void *addr = NULL;
 741        char buf[MAX_EVENT_NAME_LEN];
 742        unsigned int flags = TPARG_FL_KERNEL;
 743
 744        switch (argv[0][0]) {
 745        case 'r':
 746                is_return = true;
 747                break;
 748        case 'p':
 749                break;
 750        default:
 751                return -ECANCELED;
 752        }
 753        if (argc < 2)
 754                return -ECANCELED;
 755
 756        trace_probe_log_init("trace_kprobe", argc, argv);
 757
 758        event = strchr(&argv[0][1], ':');
 759        if (event)
 760                event++;
 761
 762        if (isdigit(argv[0][1])) {
 763                if (!is_return) {
 764                        trace_probe_log_err(1, MAXACT_NO_KPROBE);
 765                        goto parse_error;
 766                }
 767                if (event)
 768                        len = event - &argv[0][1] - 1;
 769                else
 770                        len = strlen(&argv[0][1]);
 771                if (len > MAX_EVENT_NAME_LEN - 1) {
 772                        trace_probe_log_err(1, BAD_MAXACT);
 773                        goto parse_error;
 774                }
 775                memcpy(buf, &argv[0][1], len);
 776                buf[len] = '\0';
 777                ret = kstrtouint(buf, 0, &maxactive);
 778                if (ret || !maxactive) {
 779                        trace_probe_log_err(1, BAD_MAXACT);
 780                        goto parse_error;
 781                }
 782                /* kretprobes instances are iterated over via a list. The
 783                 * maximum should stay reasonable.
 784                 */
 785                if (maxactive > KRETPROBE_MAXACTIVE_MAX) {
 786                        trace_probe_log_err(1, MAXACT_TOO_BIG);
 787                        goto parse_error;
 788                }
 789        }
 790
 791        /* try to parse an address. if that fails, try to read the
 792         * input as a symbol. */
 793        if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
 794                trace_probe_log_set_index(1);
 795                /* Check whether uprobe event specified */
 796                if (strchr(argv[1], '/') && strchr(argv[1], ':')) {
 797                        ret = -ECANCELED;
 798                        goto error;
 799                }
 800                /* a symbol specified */
 801                symbol = kstrdup(argv[1], GFP_KERNEL);
 802                if (!symbol)
 803                        return -ENOMEM;
 804
 805                tmp = strchr(symbol, '%');
 806                if (tmp) {
 807                        if (!strcmp(tmp, "%return")) {
 808                                *tmp = '\0';
 809                                is_return = true;
 810                        } else {
 811                                trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
 812                                goto parse_error;
 813                        }
 814                }
 815
 816                /* TODO: support .init module functions */
 817                ret = traceprobe_split_symbol_offset(symbol, &offset);
 818                if (ret || offset < 0 || offset > UINT_MAX) {
 819                        trace_probe_log_err(0, BAD_PROBE_ADDR);
 820                        goto parse_error;
 821                }
 822                if (is_return)
 823                        flags |= TPARG_FL_RETURN;
 824                ret = kprobe_on_func_entry(NULL, symbol, offset);
 825                if (ret == 0)
 826                        flags |= TPARG_FL_FENTRY;
 827                /* Defer the ENOENT case until register kprobe */
 828                if (ret == -EINVAL && is_return) {
 829                        trace_probe_log_err(0, BAD_RETPROBE);
 830                        goto parse_error;
 831                }
 832        }
 833
 834        trace_probe_log_set_index(0);
 835        if (event) {
 836                ret = traceprobe_parse_event_name(&event, &group, buf,
 837                                                  event - argv[0]);
 838                if (ret)
 839                        goto parse_error;
 840        } else {
 841                /* Make a new event name */
 842                if (symbol)
 843                        snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
 844                                 is_return ? 'r' : 'p', symbol, offset);
 845                else
 846                        snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
 847                                 is_return ? 'r' : 'p', addr);
 848                sanitize_event_name(buf);
 849                event = buf;
 850        }
 851
 852        /* setup a probe */
 853        tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
 854                               argc - 2, is_return);
 855        if (IS_ERR(tk)) {
 856                ret = PTR_ERR(tk);
 857                /* This must return -ENOMEM, else there is a bug */
 858                WARN_ON_ONCE(ret != -ENOMEM);
 859                goto out;       /* We know tk is not allocated */
 860        }
 861        argc -= 2; argv += 2;
 862
 863        /* parse arguments */
 864        for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 865                trace_probe_log_set_index(i + 2);
 866                ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], flags);
 867                if (ret)
 868                        goto error;     /* This can be -ENOMEM */
 869        }
 870
 871        ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
 872        ret = traceprobe_set_print_fmt(&tk->tp, ptype);
 873        if (ret < 0)
 874                goto error;
 875
 876        ret = register_trace_kprobe(tk);
 877        if (ret) {
 878                trace_probe_log_set_index(1);
 879                if (ret == -EILSEQ)
 880                        trace_probe_log_err(0, BAD_INSN_BNDRY);
 881                else if (ret == -ENOENT)
 882                        trace_probe_log_err(0, BAD_PROBE_ADDR);
 883                else if (ret != -ENOMEM && ret != -EEXIST)
 884                        trace_probe_log_err(0, FAIL_REG_PROBE);
 885                goto error;
 886        }
 887
 888out:
 889        trace_probe_log_clear();
 890        kfree(symbol);
 891        return ret;
 892
 893parse_error:
 894        ret = -EINVAL;
 895error:
 896        free_trace_kprobe(tk);
 897        goto out;
 898}
 899
 900static int trace_kprobe_create(const char *raw_command)
 901{
 902        return trace_probe_create(raw_command, __trace_kprobe_create);
 903}
 904
 905static int create_or_delete_trace_kprobe(const char *raw_command)
 906{
 907        int ret;
 908
 909        if (raw_command[0] == '-')
 910                return dyn_event_release(raw_command, &trace_kprobe_ops);
 911
 912        ret = trace_kprobe_create(raw_command);
 913        return ret == -ECANCELED ? -EINVAL : ret;
 914}
 915
 916static int trace_kprobe_run_command(struct dynevent_cmd *cmd)
 917{
 918        return create_or_delete_trace_kprobe(cmd->seq.buffer);
 919}
 920
 921/**
 922 * kprobe_event_cmd_init - Initialize a kprobe event command object
 923 * @cmd: A pointer to the dynevent_cmd struct representing the new event
 924 * @buf: A pointer to the buffer used to build the command
 925 * @maxlen: The length of the buffer passed in @buf
 926 *
 927 * Initialize a synthetic event command object.  Use this before
 928 * calling any of the other kprobe_event functions.
 929 */
 930void kprobe_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
 931{
 932        dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_KPROBE,
 933                          trace_kprobe_run_command);
 934}
 935EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
 936
 937/**
 938 * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
 939 * @cmd: A pointer to the dynevent_cmd struct representing the new event
 940 * @name: The name of the kprobe event
 941 * @loc: The location of the kprobe event
 942 * @kretprobe: Is this a return probe?
 943 * @args: Variable number of arg (pairs), one pair for each field
 944 *
 945 * NOTE: Users normally won't want to call this function directly, but
 946 * rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
 947 * adds a NULL to the end of the arg list.  If this function is used
 948 * directly, make sure the last arg in the variable arg list is NULL.
 949 *
 950 * Generate a kprobe event command to be executed by
 951 * kprobe_event_gen_cmd_end().  This function can be used to generate the
 952 * complete command or only the first part of it; in the latter case,
 953 * kprobe_event_add_fields() can be used to add more fields following this.
 954 *
 955 * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
 956 * returns -EINVAL if @loc == NULL.
 957 *
 958 * Return: 0 if successful, error otherwise.
 959 */
 960int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
 961                                 const char *name, const char *loc, ...)
 962{
 963        char buf[MAX_EVENT_NAME_LEN];
 964        struct dynevent_arg arg;
 965        va_list args;
 966        int ret;
 967
 968        if (cmd->type != DYNEVENT_TYPE_KPROBE)
 969                return -EINVAL;
 970
 971        if (!loc)
 972                return -EINVAL;
 973
 974        if (kretprobe)
 975                snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
 976        else
 977                snprintf(buf, MAX_EVENT_NAME_LEN, "p:kprobes/%s", name);
 978
 979        ret = dynevent_str_add(cmd, buf);
 980        if (ret)
 981                return ret;
 982
 983        dynevent_arg_init(&arg, 0);
 984        arg.str = loc;
 985        ret = dynevent_arg_add(cmd, &arg, NULL);
 986        if (ret)
 987                return ret;
 988
 989        va_start(args, loc);
 990        for (;;) {
 991                const char *field;
 992
 993                field = va_arg(args, const char *);
 994                if (!field)
 995                        break;
 996
 997                if (++cmd->n_fields > MAX_TRACE_ARGS) {
 998                        ret = -EINVAL;
 999                        break;
1000                }
1001
1002                arg.str = field;
1003                ret = dynevent_arg_add(cmd, &arg, NULL);
1004                if (ret)
1005                        break;
1006        }
1007        va_end(args);
1008
1009        return ret;
1010}
1011EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
1012
1013/**
1014 * __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
1015 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1016 * @args: Variable number of arg (pairs), one pair for each field
1017 *
1018 * NOTE: Users normally won't want to call this function directly, but
1019 * rather use the kprobe_event_add_fields() wrapper, which
1020 * automatically adds a NULL to the end of the arg list.  If this
1021 * function is used directly, make sure the last arg in the variable
1022 * arg list is NULL.
1023 *
1024 * Add probe fields to an existing kprobe command using a variable
1025 * list of args.  Fields are added in the same order they're listed.
1026 *
1027 * Return: 0 if successful, error otherwise.
1028 */
1029int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
1030{
1031        struct dynevent_arg arg;
1032        va_list args;
1033        int ret = 0;
1034
1035        if (cmd->type != DYNEVENT_TYPE_KPROBE)
1036                return -EINVAL;
1037
1038        dynevent_arg_init(&arg, 0);
1039
1040        va_start(args, cmd);
1041        for (;;) {
1042                const char *field;
1043
1044                field = va_arg(args, const char *);
1045                if (!field)
1046                        break;
1047
1048                if (++cmd->n_fields > MAX_TRACE_ARGS) {
1049                        ret = -EINVAL;
1050                        break;
1051                }
1052
1053                arg.str = field;
1054                ret = dynevent_arg_add(cmd, &arg, NULL);
1055                if (ret)
1056                        break;
1057        }
1058        va_end(args);
1059
1060        return ret;
1061}
1062EXPORT_SYMBOL_GPL(__kprobe_event_add_fields);
1063
1064/**
1065 * kprobe_event_delete - Delete a kprobe event
1066 * @name: The name of the kprobe event to delete
1067 *
1068 * Delete a kprobe event with the give @name from kernel code rather
1069 * than directly from the command line.
1070 *
1071 * Return: 0 if successful, error otherwise.
1072 */
1073int kprobe_event_delete(const char *name)
1074{
1075        char buf[MAX_EVENT_NAME_LEN];
1076
1077        snprintf(buf, MAX_EVENT_NAME_LEN, "-:%s", name);
1078
1079        return create_or_delete_trace_kprobe(buf);
1080}
1081EXPORT_SYMBOL_GPL(kprobe_event_delete);
1082
1083static int trace_kprobe_release(struct dyn_event *ev)
1084{
1085        struct trace_kprobe *tk = to_trace_kprobe(ev);
1086        int ret = unregister_trace_kprobe(tk);
1087
1088        if (!ret)
1089                free_trace_kprobe(tk);
1090        return ret;
1091}
1092
1093static int trace_kprobe_show(struct seq_file *m, struct dyn_event *ev)
1094{
1095        struct trace_kprobe *tk = to_trace_kprobe(ev);
1096        int i;
1097
1098        seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
1099        if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
1100                seq_printf(m, "%d", tk->rp.maxactive);
1101        seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
1102                                trace_probe_name(&tk->tp));
1103
1104        if (!tk->symbol)
1105                seq_printf(m, " 0x%p", tk->rp.kp.addr);
1106        else if (tk->rp.kp.offset)
1107                seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
1108                           tk->rp.kp.offset);
1109        else
1110                seq_printf(m, " %s", trace_kprobe_symbol(tk));
1111
1112        for (i = 0; i < tk->tp.nr_args; i++)
1113                seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
1114        seq_putc(m, '\n');
1115
1116        return 0;
1117}
1118
1119static int probes_seq_show(struct seq_file *m, void *v)
1120{
1121        struct dyn_event *ev = v;
1122
1123        if (!is_trace_kprobe(ev))
1124                return 0;
1125
1126        return trace_kprobe_show(m, ev);
1127}
1128
1129static const struct seq_operations probes_seq_op = {
1130        .start  = dyn_event_seq_start,
1131        .next   = dyn_event_seq_next,
1132        .stop   = dyn_event_seq_stop,
1133        .show   = probes_seq_show
1134};
1135
1136static int probes_open(struct inode *inode, struct file *file)
1137{
1138        int ret;
1139
1140        ret = security_locked_down(LOCKDOWN_TRACEFS);
1141        if (ret)
1142                return ret;
1143
1144        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1145                ret = dyn_events_release_all(&trace_kprobe_ops);
1146                if (ret < 0)
1147                        return ret;
1148        }
1149
1150        return seq_open(file, &probes_seq_op);
1151}
1152
1153static ssize_t probes_write(struct file *file, const char __user *buffer,
1154                            size_t count, loff_t *ppos)
1155{
1156        return trace_parse_run_command(file, buffer, count, ppos,
1157                                       create_or_delete_trace_kprobe);
1158}
1159
1160static const struct file_operations kprobe_events_ops = {
1161        .owner          = THIS_MODULE,
1162        .open           = probes_open,
1163        .read           = seq_read,
1164        .llseek         = seq_lseek,
1165        .release        = seq_release,
1166        .write          = probes_write,
1167};
1168
1169/* Probes profiling interfaces */
1170static int probes_profile_seq_show(struct seq_file *m, void *v)
1171{
1172        struct dyn_event *ev = v;
1173        struct trace_kprobe *tk;
1174        unsigned long nmissed;
1175
1176        if (!is_trace_kprobe(ev))
1177                return 0;
1178
1179        tk = to_trace_kprobe(ev);
1180        nmissed = trace_kprobe_is_return(tk) ?
1181                tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
1182        seq_printf(m, "  %-44s %15lu %15lu\n",
1183                   trace_probe_name(&tk->tp),
1184                   trace_kprobe_nhit(tk),
1185                   nmissed);
1186
1187        return 0;
1188}
1189
1190static const struct seq_operations profile_seq_op = {
1191        .start  = dyn_event_seq_start,
1192        .next   = dyn_event_seq_next,
1193        .stop   = dyn_event_seq_stop,
1194        .show   = probes_profile_seq_show
1195};
1196
1197static int profile_open(struct inode *inode, struct file *file)
1198{
1199        int ret;
1200
1201        ret = security_locked_down(LOCKDOWN_TRACEFS);
1202        if (ret)
1203                return ret;
1204
1205        return seq_open(file, &profile_seq_op);
1206}
1207
1208static const struct file_operations kprobe_profile_ops = {
1209        .owner          = THIS_MODULE,
1210        .open           = profile_open,
1211        .read           = seq_read,
1212        .llseek         = seq_lseek,
1213        .release        = seq_release,
1214};
1215
1216/* Kprobe specific fetch functions */
1217
1218/* Return the length of string -- including null terminal byte */
1219static nokprobe_inline int
1220fetch_store_strlen_user(unsigned long addr)
1221{
1222        const void __user *uaddr =  (__force const void __user *)addr;
1223
1224        return strnlen_user_nofault(uaddr, MAX_STRING_SIZE);
1225}
1226
1227/* Return the length of string -- including null terminal byte */
1228static nokprobe_inline int
1229fetch_store_strlen(unsigned long addr)
1230{
1231        int ret, len = 0;
1232        u8 c;
1233
1234#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1235        if (addr < TASK_SIZE)
1236                return fetch_store_strlen_user(addr);
1237#endif
1238
1239        do {
1240                ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
1241                len++;
1242        } while (c && ret == 0 && len < MAX_STRING_SIZE);
1243
1244        return (ret < 0) ? ret : len;
1245}
1246
1247/*
1248 * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf
1249 * with max length and relative data location.
1250 */
1251static nokprobe_inline int
1252fetch_store_string_user(unsigned long addr, void *dest, void *base)
1253{
1254        const void __user *uaddr =  (__force const void __user *)addr;
1255        int maxlen = get_loc_len(*(u32 *)dest);
1256        void *__dest;
1257        long ret;
1258
1259        if (unlikely(!maxlen))
1260                return -ENOMEM;
1261
1262        __dest = get_loc_data(dest, base);
1263
1264        ret = strncpy_from_user_nofault(__dest, uaddr, maxlen);
1265        if (ret >= 0)
1266                *(u32 *)dest = make_data_loc(ret, __dest - base);
1267
1268        return ret;
1269}
1270
1271/*
1272 * Fetch a null-terminated string. Caller MUST set *(u32 *)buf with max
1273 * length and relative data location.
1274 */
1275static nokprobe_inline int
1276fetch_store_string(unsigned long addr, void *dest, void *base)
1277{
1278        int maxlen = get_loc_len(*(u32 *)dest);
1279        void *__dest;
1280        long ret;
1281
1282#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1283        if ((unsigned long)addr < TASK_SIZE)
1284                return fetch_store_string_user(addr, dest, base);
1285#endif
1286
1287        if (unlikely(!maxlen))
1288                return -ENOMEM;
1289
1290        __dest = get_loc_data(dest, base);
1291
1292        /*
1293         * Try to get string again, since the string can be changed while
1294         * probing.
1295         */
1296        ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen);
1297        if (ret >= 0)
1298                *(u32 *)dest = make_data_loc(ret, __dest - base);
1299
1300        return ret;
1301}
1302
1303static nokprobe_inline int
1304probe_mem_read_user(void *dest, void *src, size_t size)
1305{
1306        const void __user *uaddr =  (__force const void __user *)src;
1307
1308        return copy_from_user_nofault(dest, uaddr, size);
1309}
1310
1311static nokprobe_inline int
1312probe_mem_read(void *dest, void *src, size_t size)
1313{
1314#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
1315        if ((unsigned long)src < TASK_SIZE)
1316                return probe_mem_read_user(dest, src, size);
1317#endif
1318        return copy_from_kernel_nofault(dest, src, size);
1319}
1320
1321/* Note that we don't verify it, since the code does not come from user space */
1322static int
1323process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
1324                   void *base)
1325{
1326        struct pt_regs *regs = rec;
1327        unsigned long val;
1328
1329retry:
1330        /* 1st stage: get value from context */
1331        switch (code->op) {
1332        case FETCH_OP_REG:
1333                val = regs_get_register(regs, code->param);
1334                break;
1335        case FETCH_OP_STACK:
1336                val = regs_get_kernel_stack_nth(regs, code->param);
1337                break;
1338        case FETCH_OP_STACKP:
1339                val = kernel_stack_pointer(regs);
1340                break;
1341        case FETCH_OP_RETVAL:
1342                val = regs_return_value(regs);
1343                break;
1344        case FETCH_OP_IMM:
1345                val = code->immediate;
1346                break;
1347        case FETCH_OP_COMM:
1348                val = (unsigned long)current->comm;
1349                break;
1350        case FETCH_OP_DATA:
1351                val = (unsigned long)code->data;
1352                break;
1353#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
1354        case FETCH_OP_ARG:
1355                val = regs_get_kernel_argument(regs, code->param);
1356                break;
1357#endif
1358        case FETCH_NOP_SYMBOL:  /* Ignore a place holder */
1359                code++;
1360                goto retry;
1361        default:
1362                return -EILSEQ;
1363        }
1364        code++;
1365
1366        return process_fetch_insn_bottom(code, val, dest, base);
1367}
1368NOKPROBE_SYMBOL(process_fetch_insn)
1369
1370/* Kprobe handler */
1371static nokprobe_inline void
1372__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
1373                    struct trace_event_file *trace_file)
1374{
1375        struct kprobe_trace_entry_head *entry;
1376        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1377        struct trace_event_buffer fbuffer;
1378        int dsize;
1379
1380        WARN_ON(call != trace_file->event_call);
1381
1382        if (trace_trigger_soft_disabled(trace_file))
1383                return;
1384
1385        dsize = __get_data_size(&tk->tp, regs);
1386
1387        entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1388                                           sizeof(*entry) + tk->tp.size + dsize);
1389        if (!entry)
1390                return;
1391
1392        fbuffer.regs = regs;
1393        entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1394        entry->ip = (unsigned long)tk->rp.kp.addr;
1395        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1396
1397        trace_event_buffer_commit(&fbuffer);
1398}
1399
1400static void
1401kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
1402{
1403        struct event_file_link *link;
1404
1405        trace_probe_for_each_link_rcu(link, &tk->tp)
1406                __kprobe_trace_func(tk, regs, link->file);
1407}
1408NOKPROBE_SYMBOL(kprobe_trace_func);
1409
1410/* Kretprobe handler */
1411static nokprobe_inline void
1412__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1413                       struct pt_regs *regs,
1414                       struct trace_event_file *trace_file)
1415{
1416        struct kretprobe_trace_entry_head *entry;
1417        struct trace_event_buffer fbuffer;
1418        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1419        int dsize;
1420
1421        WARN_ON(call != trace_file->event_call);
1422
1423        if (trace_trigger_soft_disabled(trace_file))
1424                return;
1425
1426        dsize = __get_data_size(&tk->tp, regs);
1427
1428        entry = trace_event_buffer_reserve(&fbuffer, trace_file,
1429                                           sizeof(*entry) + tk->tp.size + dsize);
1430        if (!entry)
1431                return;
1432
1433        fbuffer.regs = regs;
1434        entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
1435        entry->func = (unsigned long)tk->rp.kp.addr;
1436        entry->ret_ip = get_kretprobe_retaddr(ri);
1437        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1438
1439        trace_event_buffer_commit(&fbuffer);
1440}
1441
1442static void
1443kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1444                     struct pt_regs *regs)
1445{
1446        struct event_file_link *link;
1447
1448        trace_probe_for_each_link_rcu(link, &tk->tp)
1449                __kretprobe_trace_func(tk, ri, regs, link->file);
1450}
1451NOKPROBE_SYMBOL(kretprobe_trace_func);
1452
1453/* Event entry printers */
1454static enum print_line_t
1455print_kprobe_event(struct trace_iterator *iter, int flags,
1456                   struct trace_event *event)
1457{
1458        struct kprobe_trace_entry_head *field;
1459        struct trace_seq *s = &iter->seq;
1460        struct trace_probe *tp;
1461
1462        field = (struct kprobe_trace_entry_head *)iter->ent;
1463        tp = trace_probe_primary_from_call(
1464                container_of(event, struct trace_event_call, event));
1465        if (WARN_ON_ONCE(!tp))
1466                goto out;
1467
1468        trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1469
1470        if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1471                goto out;
1472
1473        trace_seq_putc(s, ')');
1474
1475        if (print_probe_args(s, tp->args, tp->nr_args,
1476                             (u8 *)&field[1], field) < 0)
1477                goto out;
1478
1479        trace_seq_putc(s, '\n');
1480 out:
1481        return trace_handle_return(s);
1482}
1483
1484static enum print_line_t
1485print_kretprobe_event(struct trace_iterator *iter, int flags,
1486                      struct trace_event *event)
1487{
1488        struct kretprobe_trace_entry_head *field;
1489        struct trace_seq *s = &iter->seq;
1490        struct trace_probe *tp;
1491
1492        field = (struct kretprobe_trace_entry_head *)iter->ent;
1493        tp = trace_probe_primary_from_call(
1494                container_of(event, struct trace_event_call, event));
1495        if (WARN_ON_ONCE(!tp))
1496                goto out;
1497
1498        trace_seq_printf(s, "%s: (", trace_probe_name(tp));
1499
1500        if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1501                goto out;
1502
1503        trace_seq_puts(s, " <- ");
1504
1505        if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1506                goto out;
1507
1508        trace_seq_putc(s, ')');
1509
1510        if (print_probe_args(s, tp->args, tp->nr_args,
1511                             (u8 *)&field[1], field) < 0)
1512                goto out;
1513
1514        trace_seq_putc(s, '\n');
1515
1516 out:
1517        return trace_handle_return(s);
1518}
1519
1520
1521static int kprobe_event_define_fields(struct trace_event_call *event_call)
1522{
1523        int ret;
1524        struct kprobe_trace_entry_head field;
1525        struct trace_probe *tp;
1526
1527        tp = trace_probe_primary_from_call(event_call);
1528        if (WARN_ON_ONCE(!tp))
1529                return -ENOENT;
1530
1531        DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1532
1533        return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1534}
1535
1536static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1537{
1538        int ret;
1539        struct kretprobe_trace_entry_head field;
1540        struct trace_probe *tp;
1541
1542        tp = trace_probe_primary_from_call(event_call);
1543        if (WARN_ON_ONCE(!tp))
1544                return -ENOENT;
1545
1546        DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1547        DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1548
1549        return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
1550}
1551
1552#ifdef CONFIG_PERF_EVENTS
1553
1554/* Kprobe profile handler */
1555static int
1556kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1557{
1558        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1559        struct kprobe_trace_entry_head *entry;
1560        struct hlist_head *head;
1561        int size, __size, dsize;
1562        int rctx;
1563
1564        if (bpf_prog_array_valid(call)) {
1565                unsigned long orig_ip = instruction_pointer(regs);
1566                int ret;
1567
1568                ret = trace_call_bpf(call, regs);
1569
1570                /*
1571                 * We need to check and see if we modified the pc of the
1572                 * pt_regs, and if so return 1 so that we don't do the
1573                 * single stepping.
1574                 */
1575                if (orig_ip != instruction_pointer(regs))
1576                        return 1;
1577                if (!ret)
1578                        return 0;
1579        }
1580
1581        head = this_cpu_ptr(call->perf_events);
1582        if (hlist_empty(head))
1583                return 0;
1584
1585        dsize = __get_data_size(&tk->tp, regs);
1586        __size = sizeof(*entry) + tk->tp.size + dsize;
1587        size = ALIGN(__size + sizeof(u32), sizeof(u64));
1588        size -= sizeof(u32);
1589
1590        entry = perf_trace_buf_alloc(size, NULL, &rctx);
1591        if (!entry)
1592                return 0;
1593
1594        entry->ip = (unsigned long)tk->rp.kp.addr;
1595        memset(&entry[1], 0, dsize);
1596        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1597        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1598                              head, NULL);
1599        return 0;
1600}
1601NOKPROBE_SYMBOL(kprobe_perf_func);
1602
1603/* Kretprobe profile handler */
1604static void
1605kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1606                    struct pt_regs *regs)
1607{
1608        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1609        struct kretprobe_trace_entry_head *entry;
1610        struct hlist_head *head;
1611        int size, __size, dsize;
1612        int rctx;
1613
1614        if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1615                return;
1616
1617        head = this_cpu_ptr(call->perf_events);
1618        if (hlist_empty(head))
1619                return;
1620
1621        dsize = __get_data_size(&tk->tp, regs);
1622        __size = sizeof(*entry) + tk->tp.size + dsize;
1623        size = ALIGN(__size + sizeof(u32), sizeof(u64));
1624        size -= sizeof(u32);
1625
1626        entry = perf_trace_buf_alloc(size, NULL, &rctx);
1627        if (!entry)
1628                return;
1629
1630        entry->func = (unsigned long)tk->rp.kp.addr;
1631        entry->ret_ip = get_kretprobe_retaddr(ri);
1632        store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
1633        perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1634                              head, NULL);
1635}
1636NOKPROBE_SYMBOL(kretprobe_perf_func);
1637
1638int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
1639                        const char **symbol, u64 *probe_offset,
1640                        u64 *probe_addr, bool perf_type_tracepoint)
1641{
1642        const char *pevent = trace_event_name(event->tp_event);
1643        const char *group = event->tp_event->class->system;
1644        struct trace_kprobe *tk;
1645
1646        if (perf_type_tracepoint)
1647                tk = find_trace_kprobe(pevent, group);
1648        else
1649                tk = trace_kprobe_primary_from_call(event->tp_event);
1650        if (!tk)
1651                return -EINVAL;
1652
1653        *fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
1654                                              : BPF_FD_TYPE_KPROBE;
1655        if (tk->symbol) {
1656                *symbol = tk->symbol;
1657                *probe_offset = tk->rp.kp.offset;
1658                *probe_addr = 0;
1659        } else {
1660                *symbol = NULL;
1661                *probe_offset = 0;
1662                *probe_addr = (unsigned long)tk->rp.kp.addr;
1663        }
1664        return 0;
1665}
1666#endif  /* CONFIG_PERF_EVENTS */
1667
1668/*
1669 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1670 *
1671 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1672 * lockless, but we can't race with this __init function.
1673 */
1674static int kprobe_register(struct trace_event_call *event,
1675                           enum trace_reg type, void *data)
1676{
1677        struct trace_event_file *file = data;
1678
1679        switch (type) {
1680        case TRACE_REG_REGISTER:
1681                return enable_trace_kprobe(event, file);
1682        case TRACE_REG_UNREGISTER:
1683                return disable_trace_kprobe(event, file);
1684
1685#ifdef CONFIG_PERF_EVENTS
1686        case TRACE_REG_PERF_REGISTER:
1687                return enable_trace_kprobe(event, NULL);
1688        case TRACE_REG_PERF_UNREGISTER:
1689                return disable_trace_kprobe(event, NULL);
1690        case TRACE_REG_PERF_OPEN:
1691        case TRACE_REG_PERF_CLOSE:
1692        case TRACE_REG_PERF_ADD:
1693        case TRACE_REG_PERF_DEL:
1694                return 0;
1695#endif
1696        }
1697        return 0;
1698}
1699
1700static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1701{
1702        struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1703        int ret = 0;
1704
1705        raw_cpu_inc(*tk->nhit);
1706
1707        if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1708                kprobe_trace_func(tk, regs);
1709#ifdef CONFIG_PERF_EVENTS
1710        if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1711                ret = kprobe_perf_func(tk, regs);
1712#endif
1713        return ret;
1714}
1715NOKPROBE_SYMBOL(kprobe_dispatcher);
1716
1717static int
1718kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1719{
1720        struct kretprobe *rp = get_kretprobe(ri);
1721        struct trace_kprobe *tk = container_of(rp, struct trace_kprobe, rp);
1722
1723        raw_cpu_inc(*tk->nhit);
1724
1725        if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
1726                kretprobe_trace_func(tk, ri, regs);
1727#ifdef CONFIG_PERF_EVENTS
1728        if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
1729                kretprobe_perf_func(tk, ri, regs);
1730#endif
1731        return 0;       /* We don't tweak kernel, so just return 0 */
1732}
1733NOKPROBE_SYMBOL(kretprobe_dispatcher);
1734
1735static struct trace_event_functions kretprobe_funcs = {
1736        .trace          = print_kretprobe_event
1737};
1738
1739static struct trace_event_functions kprobe_funcs = {
1740        .trace          = print_kprobe_event
1741};
1742
1743static struct trace_event_fields kretprobe_fields_array[] = {
1744        { .type = TRACE_FUNCTION_TYPE,
1745          .define_fields = kretprobe_event_define_fields },
1746        {}
1747};
1748
1749static struct trace_event_fields kprobe_fields_array[] = {
1750        { .type = TRACE_FUNCTION_TYPE,
1751          .define_fields = kprobe_event_define_fields },
1752        {}
1753};
1754
1755static inline void init_trace_event_call(struct trace_kprobe *tk)
1756{
1757        struct trace_event_call *call = trace_probe_event_call(&tk->tp);
1758
1759        if (trace_kprobe_is_return(tk)) {
1760                call->event.funcs = &kretprobe_funcs;
1761                call->class->fields_array = kretprobe_fields_array;
1762        } else {
1763                call->event.funcs = &kprobe_funcs;
1764                call->class->fields_array = kprobe_fields_array;
1765        }
1766
1767        call->flags = TRACE_EVENT_FL_KPROBE;
1768        call->class->reg = kprobe_register;
1769}
1770
1771static int register_kprobe_event(struct trace_kprobe *tk)
1772{
1773        init_trace_event_call(tk);
1774
1775        return trace_probe_register_event_call(&tk->tp);
1776}
1777
1778static int unregister_kprobe_event(struct trace_kprobe *tk)
1779{
1780        return trace_probe_unregister_event_call(&tk->tp);
1781}
1782
1783#ifdef CONFIG_PERF_EVENTS
1784/* create a trace_kprobe, but don't add it to global lists */
1785struct trace_event_call *
1786create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
1787                          bool is_return)
1788{
1789        enum probe_print_type ptype;
1790        struct trace_kprobe *tk;
1791        int ret;
1792        char *event;
1793
1794        /*
1795         * local trace_kprobes are not added to dyn_event, so they are never
1796         * searched in find_trace_kprobe(). Therefore, there is no concern of
1797         * duplicated name here.
1798         */
1799        event = func ? func : "DUMMY_EVENT";
1800
1801        tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
1802                                offs, 0 /* maxactive */, 0 /* nargs */,
1803                                is_return);
1804
1805        if (IS_ERR(tk)) {
1806                pr_info("Failed to allocate trace_probe.(%d)\n",
1807                        (int)PTR_ERR(tk));
1808                return ERR_CAST(tk);
1809        }
1810
1811        init_trace_event_call(tk);
1812
1813        ptype = trace_kprobe_is_return(tk) ?
1814                PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
1815        if (traceprobe_set_print_fmt(&tk->tp, ptype) < 0) {
1816                ret = -ENOMEM;
1817                goto error;
1818        }
1819
1820        ret = __register_trace_kprobe(tk);
1821        if (ret < 0)
1822                goto error;
1823
1824        return trace_probe_event_call(&tk->tp);
1825error:
1826        free_trace_kprobe(tk);
1827        return ERR_PTR(ret);
1828}
1829
1830void destroy_local_trace_kprobe(struct trace_event_call *event_call)
1831{
1832        struct trace_kprobe *tk;
1833
1834        tk = trace_kprobe_primary_from_call(event_call);
1835        if (unlikely(!tk))
1836                return;
1837
1838        if (trace_probe_is_enabled(&tk->tp)) {
1839                WARN_ON(1);
1840                return;
1841        }
1842
1843        __unregister_trace_kprobe(tk);
1844
1845        free_trace_kprobe(tk);
1846}
1847#endif /* CONFIG_PERF_EVENTS */
1848
1849static __init void enable_boot_kprobe_events(void)
1850{
1851        struct trace_array *tr = top_trace_array();
1852        struct trace_event_file *file;
1853        struct trace_kprobe *tk;
1854        struct dyn_event *pos;
1855
1856        mutex_lock(&event_mutex);
1857        for_each_trace_kprobe(tk, pos) {
1858                list_for_each_entry(file, &tr->events, list)
1859                        if (file->event_call == trace_probe_event_call(&tk->tp))
1860                                trace_event_enable_disable(file, 1, 0);
1861        }
1862        mutex_unlock(&event_mutex);
1863}
1864
1865static __init void setup_boot_kprobe_events(void)
1866{
1867        char *p, *cmd = kprobe_boot_events_buf;
1868        int ret;
1869
1870        strreplace(kprobe_boot_events_buf, ',', ' ');
1871
1872        while (cmd && *cmd != '\0') {
1873                p = strchr(cmd, ';');
1874                if (p)
1875                        *p++ = '\0';
1876
1877                ret = create_or_delete_trace_kprobe(cmd);
1878                if (ret)
1879                        pr_warn("Failed to add event(%d): %s\n", ret, cmd);
1880
1881                cmd = p;
1882        }
1883
1884        enable_boot_kprobe_events();
1885}
1886
1887/*
1888 * Register dynevent at core_initcall. This allows kernel to setup kprobe
1889 * events in postcore_initcall without tracefs.
1890 */
1891static __init int init_kprobe_trace_early(void)
1892{
1893        int ret;
1894
1895        ret = dyn_event_register(&trace_kprobe_ops);
1896        if (ret)
1897                return ret;
1898
1899        if (register_module_notifier(&trace_kprobe_module_nb))
1900                return -EINVAL;
1901
1902        return 0;
1903}
1904core_initcall(init_kprobe_trace_early);
1905
1906/* Make a tracefs interface for controlling probe points */
1907static __init int init_kprobe_trace(void)
1908{
1909        int ret;
1910        struct dentry *entry;
1911
1912        ret = tracing_init_dentry();
1913        if (ret)
1914                return 0;
1915
1916        entry = tracefs_create_file("kprobe_events", TRACE_MODE_WRITE,
1917                                    NULL, NULL, &kprobe_events_ops);
1918
1919        /* Event list interface */
1920        if (!entry)
1921                pr_warn("Could not create tracefs 'kprobe_events' entry\n");
1922
1923        /* Profile interface */
1924        entry = tracefs_create_file("kprobe_profile", TRACE_MODE_READ,
1925                                    NULL, NULL, &kprobe_profile_ops);
1926
1927        if (!entry)
1928                pr_warn("Could not create tracefs 'kprobe_profile' entry\n");
1929
1930        setup_boot_kprobe_events();
1931
1932        return 0;
1933}
1934fs_initcall(init_kprobe_trace);
1935
1936
1937#ifdef CONFIG_FTRACE_STARTUP_TEST
1938static __init struct trace_event_file *
1939find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1940{
1941        struct trace_event_file *file;
1942
1943        list_for_each_entry(file, &tr->events, list)
1944                if (file->event_call == trace_probe_event_call(&tk->tp))
1945                        return file;
1946
1947        return NULL;
1948}
1949
1950/*
1951 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1952 * stage, we can do this lockless.
1953 */
1954static __init int kprobe_trace_self_tests_init(void)
1955{
1956        int ret, warn = 0;
1957        int (*target)(int, int, int, int, int, int);
1958        struct trace_kprobe *tk;
1959        struct trace_event_file *file;
1960
1961        if (tracing_is_disabled())
1962                return -ENODEV;
1963
1964        if (tracing_selftest_disabled)
1965                return 0;
1966
1967        target = kprobe_trace_selftest_target;
1968
1969        pr_info("Testing kprobe tracing: ");
1970
1971        ret = create_or_delete_trace_kprobe("p:testprobe kprobe_trace_selftest_target $stack $stack0 +0($stack)");
1972        if (WARN_ON_ONCE(ret)) {
1973                pr_warn("error on probing function entry.\n");
1974                warn++;
1975        } else {
1976                /* Enable trace point */
1977                tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1978                if (WARN_ON_ONCE(tk == NULL)) {
1979                        pr_warn("error on getting new probe.\n");
1980                        warn++;
1981                } else {
1982                        file = find_trace_probe_file(tk, top_trace_array());
1983                        if (WARN_ON_ONCE(file == NULL)) {
1984                                pr_warn("error on getting probe file.\n");
1985                                warn++;
1986                        } else
1987                                enable_trace_kprobe(
1988                                        trace_probe_event_call(&tk->tp), file);
1989                }
1990        }
1991
1992        ret = create_or_delete_trace_kprobe("r:testprobe2 kprobe_trace_selftest_target $retval");
1993        if (WARN_ON_ONCE(ret)) {
1994                pr_warn("error on probing function return.\n");
1995                warn++;
1996        } else {
1997                /* Enable trace point */
1998                tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1999                if (WARN_ON_ONCE(tk == NULL)) {
2000                        pr_warn("error on getting 2nd new probe.\n");
2001                        warn++;
2002                } else {
2003                        file = find_trace_probe_file(tk, top_trace_array());
2004                        if (WARN_ON_ONCE(file == NULL)) {
2005                                pr_warn("error on getting probe file.\n");
2006                                warn++;
2007                        } else
2008                                enable_trace_kprobe(
2009                                        trace_probe_event_call(&tk->tp), file);
2010                }
2011        }
2012
2013        if (warn)
2014                goto end;
2015
2016        ret = target(1, 2, 3, 4, 5, 6);
2017
2018        /*
2019         * Not expecting an error here, the check is only to prevent the
2020         * optimizer from removing the call to target() as otherwise there
2021         * are no side-effects and the call is never performed.
2022         */
2023        if (ret != 21)
2024                warn++;
2025
2026        /* Disable trace points before removing it */
2027        tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
2028        if (WARN_ON_ONCE(tk == NULL)) {
2029                pr_warn("error on getting test probe.\n");
2030                warn++;
2031        } else {
2032                if (trace_kprobe_nhit(tk) != 1) {
2033                        pr_warn("incorrect number of testprobe hits\n");
2034                        warn++;
2035                }
2036
2037                file = find_trace_probe_file(tk, top_trace_array());
2038                if (WARN_ON_ONCE(file == NULL)) {
2039                        pr_warn("error on getting probe file.\n");
2040                        warn++;
2041                } else
2042                        disable_trace_kprobe(
2043                                trace_probe_event_call(&tk->tp), file);
2044        }
2045
2046        tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
2047        if (WARN_ON_ONCE(tk == NULL)) {
2048                pr_warn("error on getting 2nd test probe.\n");
2049                warn++;
2050        } else {
2051                if (trace_kprobe_nhit(tk) != 1) {
2052                        pr_warn("incorrect number of testprobe2 hits\n");
2053                        warn++;
2054                }
2055
2056                file = find_trace_probe_file(tk, top_trace_array());
2057                if (WARN_ON_ONCE(file == NULL)) {
2058                        pr_warn("error on getting probe file.\n");
2059                        warn++;
2060                } else
2061                        disable_trace_kprobe(
2062                                trace_probe_event_call(&tk->tp), file);
2063        }
2064
2065        ret = create_or_delete_trace_kprobe("-:testprobe");
2066        if (WARN_ON_ONCE(ret)) {
2067                pr_warn("error on deleting a probe.\n");
2068                warn++;
2069        }
2070
2071        ret = create_or_delete_trace_kprobe("-:testprobe2");
2072        if (WARN_ON_ONCE(ret)) {
2073                pr_warn("error on deleting a probe.\n");
2074                warn++;
2075        }
2076
2077end:
2078        ret = dyn_events_release_all(&trace_kprobe_ops);
2079        if (WARN_ON_ONCE(ret)) {
2080                pr_warn("error on cleaning up probes.\n");
2081                warn++;
2082        }
2083        /*
2084         * Wait for the optimizer work to finish. Otherwise it might fiddle
2085         * with probes in already freed __init text.
2086         */
2087        wait_for_kprobe_optimizer();
2088        if (warn)
2089                pr_cont("NG: Some tests are failed. Please check them.\n");
2090        else
2091                pr_cont("OK\n");
2092        return 0;
2093}
2094
2095late_initcall(kprobe_trace_self_tests_init);
2096
2097#endif
2098