linux/kernel/trace/trace_kprobe.c
<<
>>
Prefs
   1/*
   2 * Kprobes-based tracing events
   3 *
   4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/uaccess.h>
  22
  23#include "trace_probe.h"
  24
  25#define KPROBE_EVENT_SYSTEM "kprobes"
  26
  27/**
  28 * Kprobe event core functions
  29 */
  30struct trace_kprobe {
  31        struct list_head        list;
  32        struct kretprobe        rp;     /* Use rp.kp for kprobe use */
  33        unsigned long           nhit;
  34        const char              *symbol;        /* symbol name */
  35        struct trace_probe      tp;
  36};
  37
  38#define SIZEOF_TRACE_KPROBE(n)                          \
  39        (offsetof(struct trace_kprobe, tp.args) +       \
  40        (sizeof(struct probe_arg) * (n)))
  41
  42
  43static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
  44{
  45        return tk->rp.handler != NULL;
  46}
  47
  48static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
  49{
  50        return tk->symbol ? tk->symbol : "unknown";
  51}
  52
  53static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
  54{
  55        return tk->rp.kp.offset;
  56}
  57
  58static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
  59{
  60        return !!(kprobe_gone(&tk->rp.kp));
  61}
  62
  63static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
  64                                                 struct module *mod)
  65{
  66        int len = strlen(mod->name);
  67        const char *name = trace_kprobe_symbol(tk);
  68        return strncmp(mod->name, name, len) == 0 && name[len] == ':';
  69}
  70
  71static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
  72{
  73        return !!strchr(trace_kprobe_symbol(tk), ':');
  74}
  75
  76static int register_kprobe_event(struct trace_kprobe *tk);
  77static int unregister_kprobe_event(struct trace_kprobe *tk);
  78
  79static DEFINE_MUTEX(probe_lock);
  80static LIST_HEAD(probe_list);
  81
  82static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  83static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  84                                struct pt_regs *regs);
  85
  86/* Memory fetching by symbol */
  87struct symbol_cache {
  88        char            *symbol;
  89        long            offset;
  90        unsigned long   addr;
  91};
  92
  93unsigned long update_symbol_cache(struct symbol_cache *sc)
  94{
  95        sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
  96
  97        if (sc->addr)
  98                sc->addr += sc->offset;
  99
 100        return sc->addr;
 101}
 102
 103void free_symbol_cache(struct symbol_cache *sc)
 104{
 105        kfree(sc->symbol);
 106        kfree(sc);
 107}
 108
 109struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
 110{
 111        struct symbol_cache *sc;
 112
 113        if (!sym || strlen(sym) == 0)
 114                return NULL;
 115
 116        sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
 117        if (!sc)
 118                return NULL;
 119
 120        sc->symbol = kstrdup(sym, GFP_KERNEL);
 121        if (!sc->symbol) {
 122                kfree(sc);
 123                return NULL;
 124        }
 125        sc->offset = offset;
 126        update_symbol_cache(sc);
 127
 128        return sc;
 129}
 130
 131/*
 132 * Kprobes-specific fetch functions
 133 */
 134#define DEFINE_FETCH_stack(type)                                        \
 135static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,          \
 136                                          void *offset, void *dest)     \
 137{                                                                       \
 138        *(type *)dest = (type)regs_get_kernel_stack_nth(regs,           \
 139                                (unsigned int)((unsigned long)offset)); \
 140}                                                                       \
 141NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
 142
 143DEFINE_BASIC_FETCH_FUNCS(stack)
 144/* No string on the stack entry */
 145#define fetch_stack_string      NULL
 146#define fetch_stack_string_size NULL
 147
 148#define DEFINE_FETCH_memory(type)                                       \
 149static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,         \
 150                                          void *addr, void *dest)       \
 151{                                                                       \
 152        type retval;                                                    \
 153        if (probe_kernel_address(addr, retval))                         \
 154                *(type *)dest = 0;                                      \
 155        else                                                            \
 156                *(type *)dest = retval;                                 \
 157}                                                                       \
 158NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
 159
 160DEFINE_BASIC_FETCH_FUNCS(memory)
 161/*
 162 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 163 * length and relative data location.
 164 */
 165static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 166                                            void *addr, void *dest)
 167{
 168        long ret;
 169        int maxlen = get_rloc_len(*(u32 *)dest);
 170        u8 *dst = get_rloc_data(dest);
 171        u8 *src = addr;
 172        mm_segment_t old_fs = get_fs();
 173
 174        if (!maxlen)
 175                return;
 176
 177        /*
 178         * Try to get string again, since the string can be changed while
 179         * probing.
 180         */
 181        set_fs(KERNEL_DS);
 182        pagefault_disable();
 183
 184        do
 185                ret = __copy_from_user_inatomic(dst++, src++, 1);
 186        while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
 187
 188        dst[-1] = '\0';
 189        pagefault_enable();
 190        set_fs(old_fs);
 191
 192        if (ret < 0) {  /* Failed to fetch string */
 193                ((u8 *)get_rloc_data(dest))[0] = '\0';
 194                *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
 195        } else {
 196                *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
 197                                              get_rloc_offs(*(u32 *)dest));
 198        }
 199}
 200NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
 201
 202/* Return the length of string -- including null terminal byte */
 203static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
 204                                                 void *addr, void *dest)
 205{
 206        mm_segment_t old_fs;
 207        int ret, len = 0;
 208        u8 c;
 209
 210        old_fs = get_fs();
 211        set_fs(KERNEL_DS);
 212        pagefault_disable();
 213
 214        do {
 215                ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
 216                len++;
 217        } while (c && ret == 0 && len < MAX_STRING_SIZE);
 218
 219        pagefault_enable();
 220        set_fs(old_fs);
 221
 222        if (ret < 0)    /* Failed to check the length */
 223                *(u32 *)dest = 0;
 224        else
 225                *(u32 *)dest = len;
 226}
 227NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
 228
 229#define DEFINE_FETCH_symbol(type)                                       \
 230void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
 231{                                                                       \
 232        struct symbol_cache *sc = data;                                 \
 233        if (sc->addr)                                                   \
 234                fetch_memory_##type(regs, (void *)sc->addr, dest);      \
 235        else                                                            \
 236                *(type *)dest = 0;                                      \
 237}                                                                       \
 238NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
 239
 240DEFINE_BASIC_FETCH_FUNCS(symbol)
 241DEFINE_FETCH_symbol(string)
 242DEFINE_FETCH_symbol(string_size)
 243
 244/* kprobes don't support file_offset fetch methods */
 245#define fetch_file_offset_u8            NULL
 246#define fetch_file_offset_u16           NULL
 247#define fetch_file_offset_u32           NULL
 248#define fetch_file_offset_u64           NULL
 249#define fetch_file_offset_string        NULL
 250#define fetch_file_offset_string_size   NULL
 251
 252/* Fetch type information table */
 253const struct fetch_type kprobes_fetch_type_table[] = {
 254        /* Special types */
 255        [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
 256                                        sizeof(u32), 1, "__data_loc char[]"),
 257        [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
 258                                        string_size, sizeof(u32), 0, "u32"),
 259        /* Basic types */
 260        ASSIGN_FETCH_TYPE(u8,  u8,  0),
 261        ASSIGN_FETCH_TYPE(u16, u16, 0),
 262        ASSIGN_FETCH_TYPE(u32, u32, 0),
 263        ASSIGN_FETCH_TYPE(u64, u64, 0),
 264        ASSIGN_FETCH_TYPE(s8,  u8,  1),
 265        ASSIGN_FETCH_TYPE(s16, u16, 1),
 266        ASSIGN_FETCH_TYPE(s32, u32, 1),
 267        ASSIGN_FETCH_TYPE(s64, u64, 1),
 268
 269        ASSIGN_FETCH_TYPE_END
 270};
 271
 272/*
 273 * Allocate new trace_probe and initialize it (including kprobes).
 274 */
 275static struct trace_kprobe *alloc_trace_kprobe(const char *group,
 276                                             const char *event,
 277                                             void *addr,
 278                                             const char *symbol,
 279                                             unsigned long offs,
 280                                             int nargs, bool is_return)
 281{
 282        struct trace_kprobe *tk;
 283        int ret = -ENOMEM;
 284
 285        tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
 286        if (!tk)
 287                return ERR_PTR(ret);
 288
 289        if (symbol) {
 290                tk->symbol = kstrdup(symbol, GFP_KERNEL);
 291                if (!tk->symbol)
 292                        goto error;
 293                tk->rp.kp.symbol_name = tk->symbol;
 294                tk->rp.kp.offset = offs;
 295        } else
 296                tk->rp.kp.addr = addr;
 297
 298        if (is_return)
 299                tk->rp.handler = kretprobe_dispatcher;
 300        else
 301                tk->rp.kp.pre_handler = kprobe_dispatcher;
 302
 303        if (!event || !is_good_name(event)) {
 304                ret = -EINVAL;
 305                goto error;
 306        }
 307
 308        tk->tp.call.class = &tk->tp.class;
 309        tk->tp.call.name = kstrdup(event, GFP_KERNEL);
 310        if (!tk->tp.call.name)
 311                goto error;
 312
 313        if (!group || !is_good_name(group)) {
 314                ret = -EINVAL;
 315                goto error;
 316        }
 317
 318        tk->tp.class.system = kstrdup(group, GFP_KERNEL);
 319        if (!tk->tp.class.system)
 320                goto error;
 321
 322        INIT_LIST_HEAD(&tk->list);
 323        INIT_LIST_HEAD(&tk->tp.files);
 324        return tk;
 325error:
 326        kfree(tk->tp.call.name);
 327        kfree(tk->symbol);
 328        kfree(tk);
 329        return ERR_PTR(ret);
 330}
 331
 332static void free_trace_kprobe(struct trace_kprobe *tk)
 333{
 334        int i;
 335
 336        for (i = 0; i < tk->tp.nr_args; i++)
 337                traceprobe_free_probe_arg(&tk->tp.args[i]);
 338
 339        kfree(tk->tp.call.class->system);
 340        kfree(tk->tp.call.name);
 341        kfree(tk->symbol);
 342        kfree(tk);
 343}
 344
 345static struct trace_kprobe *find_trace_kprobe(const char *event,
 346                                              const char *group)
 347{
 348        struct trace_kprobe *tk;
 349
 350        list_for_each_entry(tk, &probe_list, list)
 351                if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 &&
 352                    strcmp(tk->tp.call.class->system, group) == 0)
 353                        return tk;
 354        return NULL;
 355}
 356
 357/*
 358 * Enable trace_probe
 359 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 360 */
 361static int
 362enable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
 363{
 364        int ret = 0;
 365
 366        if (file) {
 367                struct event_file_link *link;
 368
 369                link = kmalloc(sizeof(*link), GFP_KERNEL);
 370                if (!link) {
 371                        ret = -ENOMEM;
 372                        goto out;
 373                }
 374
 375                link->file = file;
 376                list_add_tail_rcu(&link->list, &tk->tp.files);
 377
 378                tk->tp.flags |= TP_FLAG_TRACE;
 379        } else
 380                tk->tp.flags |= TP_FLAG_PROFILE;
 381
 382        if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
 383                if (trace_kprobe_is_return(tk))
 384                        ret = enable_kretprobe(&tk->rp);
 385                else
 386                        ret = enable_kprobe(&tk->rp.kp);
 387        }
 388 out:
 389        return ret;
 390}
 391
 392/*
 393 * Disable trace_probe
 394 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 395 */
 396static int
 397disable_trace_kprobe(struct trace_kprobe *tk, struct ftrace_event_file *file)
 398{
 399        struct event_file_link *link = NULL;
 400        int wait = 0;
 401        int ret = 0;
 402
 403        if (file) {
 404                link = find_event_file_link(&tk->tp, file);
 405                if (!link) {
 406                        ret = -EINVAL;
 407                        goto out;
 408                }
 409
 410                list_del_rcu(&link->list);
 411                wait = 1;
 412                if (!list_empty(&tk->tp.files))
 413                        goto out;
 414
 415                tk->tp.flags &= ~TP_FLAG_TRACE;
 416        } else
 417                tk->tp.flags &= ~TP_FLAG_PROFILE;
 418
 419        if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
 420                if (trace_kprobe_is_return(tk))
 421                        disable_kretprobe(&tk->rp);
 422                else
 423                        disable_kprobe(&tk->rp.kp);
 424                wait = 1;
 425        }
 426 out:
 427        if (wait) {
 428                /*
 429                 * Synchronize with kprobe_trace_func/kretprobe_trace_func
 430                 * to ensure disabled (all running handlers are finished).
 431                 * This is not only for kfree(), but also the caller,
 432                 * trace_remove_event_call() supposes it for releasing
 433                 * event_call related objects, which will be accessed in
 434                 * the kprobe_trace_func/kretprobe_trace_func.
 435                 */
 436                synchronize_sched();
 437                kfree(link);    /* Ignored if link == NULL */
 438        }
 439
 440        return ret;
 441}
 442
 443/* Internal register function - just handle k*probes and flags */
 444static int __register_trace_kprobe(struct trace_kprobe *tk)
 445{
 446        int i, ret;
 447
 448        if (trace_probe_is_registered(&tk->tp))
 449                return -EINVAL;
 450
 451        for (i = 0; i < tk->tp.nr_args; i++)
 452                traceprobe_update_arg(&tk->tp.args[i]);
 453
 454        /* Set/clear disabled flag according to tp->flag */
 455        if (trace_probe_is_enabled(&tk->tp))
 456                tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
 457        else
 458                tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
 459
 460        if (trace_kprobe_is_return(tk))
 461                ret = register_kretprobe(&tk->rp);
 462        else
 463                ret = register_kprobe(&tk->rp.kp);
 464
 465        if (ret == 0)
 466                tk->tp.flags |= TP_FLAG_REGISTERED;
 467        else {
 468                pr_warning("Could not insert probe at %s+%lu: %d\n",
 469                           trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
 470                if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
 471                        pr_warning("This probe might be able to register after"
 472                                   "target module is loaded. Continue.\n");
 473                        ret = 0;
 474                } else if (ret == -EILSEQ) {
 475                        pr_warning("Probing address(0x%p) is not an "
 476                                   "instruction boundary.\n",
 477                                   tk->rp.kp.addr);
 478                        ret = -EINVAL;
 479                }
 480        }
 481
 482        return ret;
 483}
 484
 485/* Internal unregister function - just handle k*probes and flags */
 486static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 487{
 488        if (trace_probe_is_registered(&tk->tp)) {
 489                if (trace_kprobe_is_return(tk))
 490                        unregister_kretprobe(&tk->rp);
 491                else
 492                        unregister_kprobe(&tk->rp.kp);
 493                tk->tp.flags &= ~TP_FLAG_REGISTERED;
 494                /* Cleanup kprobe for reuse */
 495                if (tk->rp.kp.symbol_name)
 496                        tk->rp.kp.addr = NULL;
 497        }
 498}
 499
 500/* Unregister a trace_probe and probe_event: call with locking probe_lock */
 501static int unregister_trace_kprobe(struct trace_kprobe *tk)
 502{
 503        /* Enabled event can not be unregistered */
 504        if (trace_probe_is_enabled(&tk->tp))
 505                return -EBUSY;
 506
 507        /* Will fail if probe is being used by ftrace or perf */
 508        if (unregister_kprobe_event(tk))
 509                return -EBUSY;
 510
 511        __unregister_trace_kprobe(tk);
 512        list_del(&tk->list);
 513
 514        return 0;
 515}
 516
 517/* Register a trace_probe and probe_event */
 518static int register_trace_kprobe(struct trace_kprobe *tk)
 519{
 520        struct trace_kprobe *old_tk;
 521        int ret;
 522
 523        mutex_lock(&probe_lock);
 524
 525        /* Delete old (same name) event if exist */
 526        old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call),
 527                        tk->tp.call.class->system);
 528        if (old_tk) {
 529                ret = unregister_trace_kprobe(old_tk);
 530                if (ret < 0)
 531                        goto end;
 532                free_trace_kprobe(old_tk);
 533        }
 534
 535        /* Register new event */
 536        ret = register_kprobe_event(tk);
 537        if (ret) {
 538                pr_warning("Failed to register probe event(%d)\n", ret);
 539                goto end;
 540        }
 541
 542        /* Register k*probe */
 543        ret = __register_trace_kprobe(tk);
 544        if (ret < 0)
 545                unregister_kprobe_event(tk);
 546        else
 547                list_add_tail(&tk->list, &probe_list);
 548
 549end:
 550        mutex_unlock(&probe_lock);
 551        return ret;
 552}
 553
 554/* Module notifier call back, checking event on the module */
 555static int trace_kprobe_module_callback(struct notifier_block *nb,
 556                                       unsigned long val, void *data)
 557{
 558        struct module *mod = data;
 559        struct trace_kprobe *tk;
 560        int ret;
 561
 562        if (val != MODULE_STATE_COMING)
 563                return NOTIFY_DONE;
 564
 565        /* Update probes on coming module */
 566        mutex_lock(&probe_lock);
 567        list_for_each_entry(tk, &probe_list, list) {
 568                if (trace_kprobe_within_module(tk, mod)) {
 569                        /* Don't need to check busy - this should have gone. */
 570                        __unregister_trace_kprobe(tk);
 571                        ret = __register_trace_kprobe(tk);
 572                        if (ret)
 573                                pr_warning("Failed to re-register probe %s on"
 574                                           "%s: %d\n",
 575                                           ftrace_event_name(&tk->tp.call),
 576                                           mod->name, ret);
 577                }
 578        }
 579        mutex_unlock(&probe_lock);
 580
 581        return NOTIFY_DONE;
 582}
 583
 584static struct notifier_block trace_kprobe_module_nb = {
 585        .notifier_call = trace_kprobe_module_callback,
 586        .priority = 1   /* Invoked after kprobe module callback */
 587};
 588
 589static int create_trace_kprobe(int argc, char **argv)
 590{
 591        /*
 592         * Argument syntax:
 593         *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
 594         *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
 595         * Fetch args:
 596         *  $retval     : fetch return value
 597         *  $stack      : fetch stack address
 598         *  $stackN     : fetch Nth of stack (N:0-)
 599         *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
 600         *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
 601         *  %REG        : fetch register REG
 602         * Dereferencing memory fetch:
 603         *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
 604         * Alias name of args:
 605         *  NAME=FETCHARG : set NAME as alias of FETCHARG.
 606         * Type of args:
 607         *  FETCHARG:TYPE : use TYPE instead of unsigned long.
 608         */
 609        struct trace_kprobe *tk;
 610        int i, ret = 0;
 611        bool is_return = false, is_delete = false;
 612        char *symbol = NULL, *event = NULL, *group = NULL;
 613        char *arg;
 614        unsigned long offset = 0;
 615        void *addr = NULL;
 616        char buf[MAX_EVENT_NAME_LEN];
 617
 618        /* argc must be >= 1 */
 619        if (argv[0][0] == 'p')
 620                is_return = false;
 621        else if (argv[0][0] == 'r')
 622                is_return = true;
 623        else if (argv[0][0] == '-')
 624                is_delete = true;
 625        else {
 626                pr_info("Probe definition must be started with 'p', 'r' or"
 627                        " '-'.\n");
 628                return -EINVAL;
 629        }
 630
 631        if (argv[0][1] == ':') {
 632                event = &argv[0][2];
 633                if (strchr(event, '/')) {
 634                        group = event;
 635                        event = strchr(group, '/') + 1;
 636                        event[-1] = '\0';
 637                        if (strlen(group) == 0) {
 638                                pr_info("Group name is not specified\n");
 639                                return -EINVAL;
 640                        }
 641                }
 642                if (strlen(event) == 0) {
 643                        pr_info("Event name is not specified\n");
 644                        return -EINVAL;
 645                }
 646        }
 647        if (!group)
 648                group = KPROBE_EVENT_SYSTEM;
 649
 650        if (is_delete) {
 651                if (!event) {
 652                        pr_info("Delete command needs an event name.\n");
 653                        return -EINVAL;
 654                }
 655                mutex_lock(&probe_lock);
 656                tk = find_trace_kprobe(event, group);
 657                if (!tk) {
 658                        mutex_unlock(&probe_lock);
 659                        pr_info("Event %s/%s doesn't exist.\n", group, event);
 660                        return -ENOENT;
 661                }
 662                /* delete an event */
 663                ret = unregister_trace_kprobe(tk);
 664                if (ret == 0)
 665                        free_trace_kprobe(tk);
 666                mutex_unlock(&probe_lock);
 667                return ret;
 668        }
 669
 670        if (argc < 2) {
 671                pr_info("Probe point is not specified.\n");
 672                return -EINVAL;
 673        }
 674        if (isdigit(argv[1][0])) {
 675                if (is_return) {
 676                        pr_info("Return probe point must be a symbol.\n");
 677                        return -EINVAL;
 678                }
 679                /* an address specified */
 680                ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
 681                if (ret) {
 682                        pr_info("Failed to parse address.\n");
 683                        return ret;
 684                }
 685        } else {
 686                /* a symbol specified */
 687                symbol = argv[1];
 688                /* TODO: support .init module functions */
 689                ret = traceprobe_split_symbol_offset(symbol, &offset);
 690                if (ret) {
 691                        pr_info("Failed to parse symbol.\n");
 692                        return ret;
 693                }
 694                if (offset && is_return) {
 695                        pr_info("Return probe must be used without offset.\n");
 696                        return -EINVAL;
 697                }
 698        }
 699        argc -= 2; argv += 2;
 700
 701        /* setup a probe */
 702        if (!event) {
 703                /* Make a new event name */
 704                if (symbol)
 705                        snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
 706                                 is_return ? 'r' : 'p', symbol, offset);
 707                else
 708                        snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
 709                                 is_return ? 'r' : 'p', addr);
 710                event = buf;
 711        }
 712        tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
 713                               is_return);
 714        if (IS_ERR(tk)) {
 715                pr_info("Failed to allocate trace_probe.(%d)\n",
 716                        (int)PTR_ERR(tk));
 717                return PTR_ERR(tk);
 718        }
 719
 720        /* parse arguments */
 721        ret = 0;
 722        for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 723                struct probe_arg *parg = &tk->tp.args[i];
 724
 725                /* Increment count for freeing args in error case */
 726                tk->tp.nr_args++;
 727
 728                /* Parse argument name */
 729                arg = strchr(argv[i], '=');
 730                if (arg) {
 731                        *arg++ = '\0';
 732                        parg->name = kstrdup(argv[i], GFP_KERNEL);
 733                } else {
 734                        arg = argv[i];
 735                        /* If argument name is omitted, set "argN" */
 736                        snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
 737                        parg->name = kstrdup(buf, GFP_KERNEL);
 738                }
 739
 740                if (!parg->name) {
 741                        pr_info("Failed to allocate argument[%d] name.\n", i);
 742                        ret = -ENOMEM;
 743                        goto error;
 744                }
 745
 746                if (!is_good_name(parg->name)) {
 747                        pr_info("Invalid argument[%d] name: %s\n",
 748                                i, parg->name);
 749                        ret = -EINVAL;
 750                        goto error;
 751                }
 752
 753                if (traceprobe_conflict_field_name(parg->name,
 754                                                        tk->tp.args, i)) {
 755                        pr_info("Argument[%d] name '%s' conflicts with "
 756                                "another field.\n", i, argv[i]);
 757                        ret = -EINVAL;
 758                        goto error;
 759                }
 760
 761                /* Parse fetch argument */
 762                ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
 763                                                is_return, true);
 764                if (ret) {
 765                        pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
 766                        goto error;
 767                }
 768        }
 769
 770        ret = register_trace_kprobe(tk);
 771        if (ret)
 772                goto error;
 773        return 0;
 774
 775error:
 776        free_trace_kprobe(tk);
 777        return ret;
 778}
 779
 780static int release_all_trace_kprobes(void)
 781{
 782        struct trace_kprobe *tk;
 783        int ret = 0;
 784
 785        mutex_lock(&probe_lock);
 786        /* Ensure no probe is in use. */
 787        list_for_each_entry(tk, &probe_list, list)
 788                if (trace_probe_is_enabled(&tk->tp)) {
 789                        ret = -EBUSY;
 790                        goto end;
 791                }
 792        /* TODO: Use batch unregistration */
 793        while (!list_empty(&probe_list)) {
 794                tk = list_entry(probe_list.next, struct trace_kprobe, list);
 795                ret = unregister_trace_kprobe(tk);
 796                if (ret)
 797                        goto end;
 798                free_trace_kprobe(tk);
 799        }
 800
 801end:
 802        mutex_unlock(&probe_lock);
 803
 804        return ret;
 805}
 806
 807/* Probes listing interfaces */
 808static void *probes_seq_start(struct seq_file *m, loff_t *pos)
 809{
 810        mutex_lock(&probe_lock);
 811        return seq_list_start(&probe_list, *pos);
 812}
 813
 814static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
 815{
 816        return seq_list_next(v, &probe_list, pos);
 817}
 818
 819static void probes_seq_stop(struct seq_file *m, void *v)
 820{
 821        mutex_unlock(&probe_lock);
 822}
 823
 824static int probes_seq_show(struct seq_file *m, void *v)
 825{
 826        struct trace_kprobe *tk = v;
 827        int i;
 828
 829        seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
 830        seq_printf(m, ":%s/%s", tk->tp.call.class->system,
 831                        ftrace_event_name(&tk->tp.call));
 832
 833        if (!tk->symbol)
 834                seq_printf(m, " 0x%p", tk->rp.kp.addr);
 835        else if (tk->rp.kp.offset)
 836                seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
 837                           tk->rp.kp.offset);
 838        else
 839                seq_printf(m, " %s", trace_kprobe_symbol(tk));
 840
 841        for (i = 0; i < tk->tp.nr_args; i++)
 842                seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
 843        seq_putc(m, '\n');
 844
 845        return 0;
 846}
 847
 848static const struct seq_operations probes_seq_op = {
 849        .start  = probes_seq_start,
 850        .next   = probes_seq_next,
 851        .stop   = probes_seq_stop,
 852        .show   = probes_seq_show
 853};
 854
 855static int probes_open(struct inode *inode, struct file *file)
 856{
 857        int ret;
 858
 859        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 860                ret = release_all_trace_kprobes();
 861                if (ret < 0)
 862                        return ret;
 863        }
 864
 865        return seq_open(file, &probes_seq_op);
 866}
 867
 868static ssize_t probes_write(struct file *file, const char __user *buffer,
 869                            size_t count, loff_t *ppos)
 870{
 871        return traceprobe_probes_write(file, buffer, count, ppos,
 872                        create_trace_kprobe);
 873}
 874
 875static const struct file_operations kprobe_events_ops = {
 876        .owner          = THIS_MODULE,
 877        .open           = probes_open,
 878        .read           = seq_read,
 879        .llseek         = seq_lseek,
 880        .release        = seq_release,
 881        .write          = probes_write,
 882};
 883
 884/* Probes profiling interfaces */
 885static int probes_profile_seq_show(struct seq_file *m, void *v)
 886{
 887        struct trace_kprobe *tk = v;
 888
 889        seq_printf(m, "  %-44s %15lu %15lu\n",
 890                   ftrace_event_name(&tk->tp.call), tk->nhit,
 891                   tk->rp.kp.nmissed);
 892
 893        return 0;
 894}
 895
 896static const struct seq_operations profile_seq_op = {
 897        .start  = probes_seq_start,
 898        .next   = probes_seq_next,
 899        .stop   = probes_seq_stop,
 900        .show   = probes_profile_seq_show
 901};
 902
 903static int profile_open(struct inode *inode, struct file *file)
 904{
 905        return seq_open(file, &profile_seq_op);
 906}
 907
 908static const struct file_operations kprobe_profile_ops = {
 909        .owner          = THIS_MODULE,
 910        .open           = profile_open,
 911        .read           = seq_read,
 912        .llseek         = seq_lseek,
 913        .release        = seq_release,
 914};
 915
 916/* Kprobe handler */
 917static nokprobe_inline void
 918__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
 919                    struct ftrace_event_file *ftrace_file)
 920{
 921        struct kprobe_trace_entry_head *entry;
 922        struct ring_buffer_event *event;
 923        struct ring_buffer *buffer;
 924        int size, dsize, pc;
 925        unsigned long irq_flags;
 926        struct ftrace_event_call *call = &tk->tp.call;
 927
 928        WARN_ON(call != ftrace_file->event_call);
 929
 930        if (ftrace_trigger_soft_disabled(ftrace_file))
 931                return;
 932
 933        local_save_flags(irq_flags);
 934        pc = preempt_count();
 935
 936        dsize = __get_data_size(&tk->tp, regs);
 937        size = sizeof(*entry) + tk->tp.size + dsize;
 938
 939        event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
 940                                                call->event.type,
 941                                                size, irq_flags, pc);
 942        if (!event)
 943                return;
 944
 945        entry = ring_buffer_event_data(event);
 946        entry->ip = (unsigned long)tk->rp.kp.addr;
 947        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
 948
 949        event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
 950                                         entry, irq_flags, pc, regs);
 951}
 952
 953static void
 954kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
 955{
 956        struct event_file_link *link;
 957
 958        list_for_each_entry_rcu(link, &tk->tp.files, list)
 959                __kprobe_trace_func(tk, regs, link->file);
 960}
 961NOKPROBE_SYMBOL(kprobe_trace_func);
 962
 963/* Kretprobe handler */
 964static nokprobe_inline void
 965__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
 966                       struct pt_regs *regs,
 967                       struct ftrace_event_file *ftrace_file)
 968{
 969        struct kretprobe_trace_entry_head *entry;
 970        struct ring_buffer_event *event;
 971        struct ring_buffer *buffer;
 972        int size, pc, dsize;
 973        unsigned long irq_flags;
 974        struct ftrace_event_call *call = &tk->tp.call;
 975
 976        WARN_ON(call != ftrace_file->event_call);
 977
 978        if (ftrace_trigger_soft_disabled(ftrace_file))
 979                return;
 980
 981        local_save_flags(irq_flags);
 982        pc = preempt_count();
 983
 984        dsize = __get_data_size(&tk->tp, regs);
 985        size = sizeof(*entry) + tk->tp.size + dsize;
 986
 987        event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
 988                                                call->event.type,
 989                                                size, irq_flags, pc);
 990        if (!event)
 991                return;
 992
 993        entry = ring_buffer_event_data(event);
 994        entry->func = (unsigned long)tk->rp.kp.addr;
 995        entry->ret_ip = (unsigned long)ri->ret_addr;
 996        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
 997
 998        event_trigger_unlock_commit_regs(ftrace_file, buffer, event,
 999                                         entry, irq_flags, pc, regs);
1000}
1001
1002static void
1003kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1004                     struct pt_regs *regs)
1005{
1006        struct event_file_link *link;
1007
1008        list_for_each_entry_rcu(link, &tk->tp.files, list)
1009                __kretprobe_trace_func(tk, ri, regs, link->file);
1010}
1011NOKPROBE_SYMBOL(kretprobe_trace_func);
1012
1013/* Event entry printers */
1014static enum print_line_t
1015print_kprobe_event(struct trace_iterator *iter, int flags,
1016                   struct trace_event *event)
1017{
1018        struct kprobe_trace_entry_head *field;
1019        struct trace_seq *s = &iter->seq;
1020        struct trace_probe *tp;
1021        u8 *data;
1022        int i;
1023
1024        field = (struct kprobe_trace_entry_head *)iter->ent;
1025        tp = container_of(event, struct trace_probe, call.event);
1026
1027        trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call));
1028
1029        if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1030                goto out;
1031
1032        trace_seq_putc(s, ')');
1033
1034        data = (u8 *)&field[1];
1035        for (i = 0; i < tp->nr_args; i++)
1036                if (!tp->args[i].type->print(s, tp->args[i].name,
1037                                             data + tp->args[i].offset, field))
1038                        goto out;
1039
1040        trace_seq_putc(s, '\n');
1041 out:
1042        return trace_handle_return(s);
1043}
1044
1045static enum print_line_t
1046print_kretprobe_event(struct trace_iterator *iter, int flags,
1047                      struct trace_event *event)
1048{
1049        struct kretprobe_trace_entry_head *field;
1050        struct trace_seq *s = &iter->seq;
1051        struct trace_probe *tp;
1052        u8 *data;
1053        int i;
1054
1055        field = (struct kretprobe_trace_entry_head *)iter->ent;
1056        tp = container_of(event, struct trace_probe, call.event);
1057
1058        trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call));
1059
1060        if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1061                goto out;
1062
1063        trace_seq_puts(s, " <- ");
1064
1065        if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1066                goto out;
1067
1068        trace_seq_putc(s, ')');
1069
1070        data = (u8 *)&field[1];
1071        for (i = 0; i < tp->nr_args; i++)
1072                if (!tp->args[i].type->print(s, tp->args[i].name,
1073                                             data + tp->args[i].offset, field))
1074                        goto out;
1075
1076        trace_seq_putc(s, '\n');
1077
1078 out:
1079        return trace_handle_return(s);
1080}
1081
1082
1083static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1084{
1085        int ret, i;
1086        struct kprobe_trace_entry_head field;
1087        struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1088
1089        DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1090        /* Set argument names as fields */
1091        for (i = 0; i < tk->tp.nr_args; i++) {
1092                struct probe_arg *parg = &tk->tp.args[i];
1093
1094                ret = trace_define_field(event_call, parg->type->fmttype,
1095                                         parg->name,
1096                                         sizeof(field) + parg->offset,
1097                                         parg->type->size,
1098                                         parg->type->is_signed,
1099                                         FILTER_OTHER);
1100                if (ret)
1101                        return ret;
1102        }
1103        return 0;
1104}
1105
1106static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1107{
1108        int ret, i;
1109        struct kretprobe_trace_entry_head field;
1110        struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1111
1112        DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1113        DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1114        /* Set argument names as fields */
1115        for (i = 0; i < tk->tp.nr_args; i++) {
1116                struct probe_arg *parg = &tk->tp.args[i];
1117
1118                ret = trace_define_field(event_call, parg->type->fmttype,
1119                                         parg->name,
1120                                         sizeof(field) + parg->offset,
1121                                         parg->type->size,
1122                                         parg->type->is_signed,
1123                                         FILTER_OTHER);
1124                if (ret)
1125                        return ret;
1126        }
1127        return 0;
1128}
1129
1130#ifdef CONFIG_PERF_EVENTS
1131
1132/* Kprobe profile handler */
1133static void
1134kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1135{
1136        struct ftrace_event_call *call = &tk->tp.call;
1137        struct kprobe_trace_entry_head *entry;
1138        struct hlist_head *head;
1139        int size, __size, dsize;
1140        int rctx;
1141
1142        head = this_cpu_ptr(call->perf_events);
1143        if (hlist_empty(head))
1144                return;
1145
1146        dsize = __get_data_size(&tk->tp, regs);
1147        __size = sizeof(*entry) + tk->tp.size + dsize;
1148        size = ALIGN(__size + sizeof(u32), sizeof(u64));
1149        size -= sizeof(u32);
1150
1151        entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1152        if (!entry)
1153                return;
1154
1155        entry->ip = (unsigned long)tk->rp.kp.addr;
1156        memset(&entry[1], 0, dsize);
1157        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1158        perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1159}
1160NOKPROBE_SYMBOL(kprobe_perf_func);
1161
1162/* Kretprobe profile handler */
1163static void
1164kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1165                    struct pt_regs *regs)
1166{
1167        struct ftrace_event_call *call = &tk->tp.call;
1168        struct kretprobe_trace_entry_head *entry;
1169        struct hlist_head *head;
1170        int size, __size, dsize;
1171        int rctx;
1172
1173        head = this_cpu_ptr(call->perf_events);
1174        if (hlist_empty(head))
1175                return;
1176
1177        dsize = __get_data_size(&tk->tp, regs);
1178        __size = sizeof(*entry) + tk->tp.size + dsize;
1179        size = ALIGN(__size + sizeof(u32), sizeof(u64));
1180        size -= sizeof(u32);
1181
1182        entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
1183        if (!entry)
1184                return;
1185
1186        entry->func = (unsigned long)tk->rp.kp.addr;
1187        entry->ret_ip = (unsigned long)ri->ret_addr;
1188        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1189        perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1190}
1191NOKPROBE_SYMBOL(kretprobe_perf_func);
1192#endif  /* CONFIG_PERF_EVENTS */
1193
1194/*
1195 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1196 *
1197 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1198 * lockless, but we can't race with this __init function.
1199 */
1200static int kprobe_register(struct ftrace_event_call *event,
1201                           enum trace_reg type, void *data)
1202{
1203        struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1204        struct ftrace_event_file *file = data;
1205
1206        switch (type) {
1207        case TRACE_REG_REGISTER:
1208                return enable_trace_kprobe(tk, file);
1209        case TRACE_REG_UNREGISTER:
1210                return disable_trace_kprobe(tk, file);
1211
1212#ifdef CONFIG_PERF_EVENTS
1213        case TRACE_REG_PERF_REGISTER:
1214                return enable_trace_kprobe(tk, NULL);
1215        case TRACE_REG_PERF_UNREGISTER:
1216                return disable_trace_kprobe(tk, NULL);
1217        case TRACE_REG_PERF_OPEN:
1218        case TRACE_REG_PERF_CLOSE:
1219        case TRACE_REG_PERF_ADD:
1220        case TRACE_REG_PERF_DEL:
1221                return 0;
1222#endif
1223        }
1224        return 0;
1225}
1226
1227static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1228{
1229        struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1230
1231        tk->nhit++;
1232
1233        if (tk->tp.flags & TP_FLAG_TRACE)
1234                kprobe_trace_func(tk, regs);
1235#ifdef CONFIG_PERF_EVENTS
1236        if (tk->tp.flags & TP_FLAG_PROFILE)
1237                kprobe_perf_func(tk, regs);
1238#endif
1239        return 0;       /* We don't tweek kernel, so just return 0 */
1240}
1241NOKPROBE_SYMBOL(kprobe_dispatcher);
1242
1243static int
1244kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1245{
1246        struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1247
1248        tk->nhit++;
1249
1250        if (tk->tp.flags & TP_FLAG_TRACE)
1251                kretprobe_trace_func(tk, ri, regs);
1252#ifdef CONFIG_PERF_EVENTS
1253        if (tk->tp.flags & TP_FLAG_PROFILE)
1254                kretprobe_perf_func(tk, ri, regs);
1255#endif
1256        return 0;       /* We don't tweek kernel, so just return 0 */
1257}
1258NOKPROBE_SYMBOL(kretprobe_dispatcher);
1259
1260static struct trace_event_functions kretprobe_funcs = {
1261        .trace          = print_kretprobe_event
1262};
1263
1264static struct trace_event_functions kprobe_funcs = {
1265        .trace          = print_kprobe_event
1266};
1267
1268static int register_kprobe_event(struct trace_kprobe *tk)
1269{
1270        struct ftrace_event_call *call = &tk->tp.call;
1271        int ret;
1272
1273        /* Initialize ftrace_event_call */
1274        INIT_LIST_HEAD(&call->class->fields);
1275        if (trace_kprobe_is_return(tk)) {
1276                call->event.funcs = &kretprobe_funcs;
1277                call->class->define_fields = kretprobe_event_define_fields;
1278        } else {
1279                call->event.funcs = &kprobe_funcs;
1280                call->class->define_fields = kprobe_event_define_fields;
1281        }
1282        if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1283                return -ENOMEM;
1284        ret = register_ftrace_event(&call->event);
1285        if (!ret) {
1286                kfree(call->print_fmt);
1287                return -ENODEV;
1288        }
1289        call->flags = 0;
1290        call->class->reg = kprobe_register;
1291        call->data = tk;
1292        ret = trace_add_event_call(call);
1293        if (ret) {
1294                pr_info("Failed to register kprobe event: %s\n",
1295                        ftrace_event_name(call));
1296                kfree(call->print_fmt);
1297                unregister_ftrace_event(&call->event);
1298        }
1299        return ret;
1300}
1301
1302static int unregister_kprobe_event(struct trace_kprobe *tk)
1303{
1304        int ret;
1305
1306        /* tp->event is unregistered in trace_remove_event_call() */
1307        ret = trace_remove_event_call(&tk->tp.call);
1308        if (!ret)
1309                kfree(tk->tp.call.print_fmt);
1310        return ret;
1311}
1312
1313/* Make a debugfs interface for controlling probe points */
1314static __init int init_kprobe_trace(void)
1315{
1316        struct dentry *d_tracer;
1317        struct dentry *entry;
1318
1319        if (register_module_notifier(&trace_kprobe_module_nb))
1320                return -EINVAL;
1321
1322        d_tracer = tracing_init_dentry();
1323        if (!d_tracer)
1324                return 0;
1325
1326        entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1327                                    NULL, &kprobe_events_ops);
1328
1329        /* Event list interface */
1330        if (!entry)
1331                pr_warning("Could not create debugfs "
1332                           "'kprobe_events' entry\n");
1333
1334        /* Profile interface */
1335        entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1336                                    NULL, &kprobe_profile_ops);
1337
1338        if (!entry)
1339                pr_warning("Could not create debugfs "
1340                           "'kprobe_profile' entry\n");
1341        return 0;
1342}
1343fs_initcall(init_kprobe_trace);
1344
1345
1346#ifdef CONFIG_FTRACE_STARTUP_TEST
1347
1348/*
1349 * The "__used" keeps gcc from removing the function symbol
1350 * from the kallsyms table.
1351 */
1352static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1353                                               int a4, int a5, int a6)
1354{
1355        return a1 + a2 + a3 + a4 + a5 + a6;
1356}
1357
1358static struct ftrace_event_file *
1359find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1360{
1361        struct ftrace_event_file *file;
1362
1363        list_for_each_entry(file, &tr->events, list)
1364                if (file->event_call == &tk->tp.call)
1365                        return file;
1366
1367        return NULL;
1368}
1369
1370/*
1371 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1372 * stage, we can do this lockless.
1373 */
1374static __init int kprobe_trace_self_tests_init(void)
1375{
1376        int ret, warn = 0;
1377        int (*target)(int, int, int, int, int, int);
1378        struct trace_kprobe *tk;
1379        struct ftrace_event_file *file;
1380
1381        if (tracing_is_disabled())
1382                return -ENODEV;
1383
1384        target = kprobe_trace_selftest_target;
1385
1386        pr_info("Testing kprobe tracing: ");
1387
1388        ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1389                                  "$stack $stack0 +0($stack)",
1390                                  create_trace_kprobe);
1391        if (WARN_ON_ONCE(ret)) {
1392                pr_warn("error on probing function entry.\n");
1393                warn++;
1394        } else {
1395                /* Enable trace point */
1396                tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1397                if (WARN_ON_ONCE(tk == NULL)) {
1398                        pr_warn("error on getting new probe.\n");
1399                        warn++;
1400                } else {
1401                        file = find_trace_probe_file(tk, top_trace_array());
1402                        if (WARN_ON_ONCE(file == NULL)) {
1403                                pr_warn("error on getting probe file.\n");
1404                                warn++;
1405                        } else
1406                                enable_trace_kprobe(tk, file);
1407                }
1408        }
1409
1410        ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1411                                  "$retval", create_trace_kprobe);
1412        if (WARN_ON_ONCE(ret)) {
1413                pr_warn("error on probing function return.\n");
1414                warn++;
1415        } else {
1416                /* Enable trace point */
1417                tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1418                if (WARN_ON_ONCE(tk == NULL)) {
1419                        pr_warn("error on getting 2nd new probe.\n");
1420                        warn++;
1421                } else {
1422                        file = find_trace_probe_file(tk, top_trace_array());
1423                        if (WARN_ON_ONCE(file == NULL)) {
1424                                pr_warn("error on getting probe file.\n");
1425                                warn++;
1426                        } else
1427                                enable_trace_kprobe(tk, file);
1428                }
1429        }
1430
1431        if (warn)
1432                goto end;
1433
1434        ret = target(1, 2, 3, 4, 5, 6);
1435
1436        /* Disable trace points before removing it */
1437        tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1438        if (WARN_ON_ONCE(tk == NULL)) {
1439                pr_warn("error on getting test probe.\n");
1440                warn++;
1441        } else {
1442                file = find_trace_probe_file(tk, top_trace_array());
1443                if (WARN_ON_ONCE(file == NULL)) {
1444                        pr_warn("error on getting probe file.\n");
1445                        warn++;
1446                } else
1447                        disable_trace_kprobe(tk, file);
1448        }
1449
1450        tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1451        if (WARN_ON_ONCE(tk == NULL)) {
1452                pr_warn("error on getting 2nd test probe.\n");
1453                warn++;
1454        } else {
1455                file = find_trace_probe_file(tk, top_trace_array());
1456                if (WARN_ON_ONCE(file == NULL)) {
1457                        pr_warn("error on getting probe file.\n");
1458                        warn++;
1459                } else
1460                        disable_trace_kprobe(tk, file);
1461        }
1462
1463        ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1464        if (WARN_ON_ONCE(ret)) {
1465                pr_warn("error on deleting a probe.\n");
1466                warn++;
1467        }
1468
1469        ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1470        if (WARN_ON_ONCE(ret)) {
1471                pr_warn("error on deleting a probe.\n");
1472                warn++;
1473        }
1474
1475end:
1476        release_all_trace_kprobes();
1477        if (warn)
1478                pr_cont("NG: Some tests are failed. Please check them.\n");
1479        else
1480                pr_cont("OK\n");
1481        return 0;
1482}
1483
1484late_initcall(kprobe_trace_self_tests_init);
1485
1486#endif
1487