linux/kernel/trace/trace_kprobe.c
<<
>>
Prefs
   1/*
   2 * Kprobes-based tracing events
   3 *
   4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/uaccess.h>
  22
  23#include "trace_probe.h"
  24
  25#define KPROBE_EVENT_SYSTEM "kprobes"
  26
  27/**
  28 * Kprobe event core functions
  29 */
  30struct trace_kprobe {
  31        struct list_head        list;
  32        struct kretprobe        rp;     /* Use rp.kp for kprobe use */
  33        unsigned long           nhit;
  34        const char              *symbol;        /* symbol name */
  35        struct trace_probe      tp;
  36};
  37
  38#define SIZEOF_TRACE_KPROBE(n)                          \
  39        (offsetof(struct trace_kprobe, tp.args) +       \
  40        (sizeof(struct probe_arg) * (n)))
  41
  42
  43static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
  44{
  45        return tk->rp.handler != NULL;
  46}
  47
  48static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
  49{
  50        return tk->symbol ? tk->symbol : "unknown";
  51}
  52
  53static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
  54{
  55        return tk->rp.kp.offset;
  56}
  57
  58static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
  59{
  60        return !!(kprobe_gone(&tk->rp.kp));
  61}
  62
  63static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
  64                                                 struct module *mod)
  65{
  66        int len = strlen(mod->name);
  67        const char *name = trace_kprobe_symbol(tk);
  68        return strncmp(mod->name, name, len) == 0 && name[len] == ':';
  69}
  70
  71static nokprobe_inline bool trace_kprobe_is_on_module(struct trace_kprobe *tk)
  72{
  73        return !!strchr(trace_kprobe_symbol(tk), ':');
  74}
  75
  76static int register_kprobe_event(struct trace_kprobe *tk);
  77static int unregister_kprobe_event(struct trace_kprobe *tk);
  78
  79static DEFINE_MUTEX(probe_lock);
  80static LIST_HEAD(probe_list);
  81
  82static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
  83static int kretprobe_dispatcher(struct kretprobe_instance *ri,
  84                                struct pt_regs *regs);
  85
  86/* Memory fetching by symbol */
  87struct symbol_cache {
  88        char            *symbol;
  89        long            offset;
  90        unsigned long   addr;
  91};
  92
  93unsigned long update_symbol_cache(struct symbol_cache *sc)
  94{
  95        sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
  96
  97        if (sc->addr)
  98                sc->addr += sc->offset;
  99
 100        return sc->addr;
 101}
 102
 103void free_symbol_cache(struct symbol_cache *sc)
 104{
 105        kfree(sc->symbol);
 106        kfree(sc);
 107}
 108
 109struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
 110{
 111        struct symbol_cache *sc;
 112
 113        if (!sym || strlen(sym) == 0)
 114                return NULL;
 115
 116        sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
 117        if (!sc)
 118                return NULL;
 119
 120        sc->symbol = kstrdup(sym, GFP_KERNEL);
 121        if (!sc->symbol) {
 122                kfree(sc);
 123                return NULL;
 124        }
 125        sc->offset = offset;
 126        update_symbol_cache(sc);
 127
 128        return sc;
 129}
 130
 131/*
 132 * Kprobes-specific fetch functions
 133 */
 134#define DEFINE_FETCH_stack(type)                                        \
 135static void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,          \
 136                                          void *offset, void *dest)     \
 137{                                                                       \
 138        *(type *)dest = (type)regs_get_kernel_stack_nth(regs,           \
 139                                (unsigned int)((unsigned long)offset)); \
 140}                                                                       \
 141NOKPROBE_SYMBOL(FETCH_FUNC_NAME(stack, type));
 142
 143DEFINE_BASIC_FETCH_FUNCS(stack)
 144/* No string on the stack entry */
 145#define fetch_stack_string      NULL
 146#define fetch_stack_string_size NULL
 147
 148#define DEFINE_FETCH_memory(type)                                       \
 149static void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,         \
 150                                          void *addr, void *dest)       \
 151{                                                                       \
 152        type retval;                                                    \
 153        if (probe_kernel_address(addr, retval))                         \
 154                *(type *)dest = 0;                                      \
 155        else                                                            \
 156                *(type *)dest = retval;                                 \
 157}                                                                       \
 158NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, type));
 159
 160DEFINE_BASIC_FETCH_FUNCS(memory)
 161/*
 162 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
 163 * length and relative data location.
 164 */
 165static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 166                                            void *addr, void *dest)
 167{
 168        int maxlen = get_rloc_len(*(u32 *)dest);
 169        u8 *dst = get_rloc_data(dest);
 170        long ret;
 171
 172        if (!maxlen)
 173                return;
 174
 175        /*
 176         * Try to get string again, since the string can be changed while
 177         * probing.
 178         */
 179        ret = strncpy_from_unsafe(dst, addr, maxlen);
 180
 181        if (ret < 0) {  /* Failed to fetch string */
 182                dst[0] = '\0';
 183                *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
 184        } else {
 185                *(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
 186        }
 187}
 188NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
 189
 190/* Return the length of string -- including null terminal byte */
 191static void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
 192                                                 void *addr, void *dest)
 193{
 194        mm_segment_t old_fs;
 195        int ret, len = 0;
 196        u8 c;
 197
 198        old_fs = get_fs();
 199        set_fs(KERNEL_DS);
 200        pagefault_disable();
 201
 202        do {
 203                ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
 204                len++;
 205        } while (c && ret == 0 && len < MAX_STRING_SIZE);
 206
 207        pagefault_enable();
 208        set_fs(old_fs);
 209
 210        if (ret < 0)    /* Failed to check the length */
 211                *(u32 *)dest = 0;
 212        else
 213                *(u32 *)dest = len;
 214}
 215NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string_size));
 216
 217#define DEFINE_FETCH_symbol(type)                                       \
 218void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs, void *data, void *dest)\
 219{                                                                       \
 220        struct symbol_cache *sc = data;                                 \
 221        if (sc->addr)                                                   \
 222                fetch_memory_##type(regs, (void *)sc->addr, dest);      \
 223        else                                                            \
 224                *(type *)dest = 0;                                      \
 225}                                                                       \
 226NOKPROBE_SYMBOL(FETCH_FUNC_NAME(symbol, type));
 227
 228DEFINE_BASIC_FETCH_FUNCS(symbol)
 229DEFINE_FETCH_symbol(string)
 230DEFINE_FETCH_symbol(string_size)
 231
 232/* kprobes don't support file_offset fetch methods */
 233#define fetch_file_offset_u8            NULL
 234#define fetch_file_offset_u16           NULL
 235#define fetch_file_offset_u32           NULL
 236#define fetch_file_offset_u64           NULL
 237#define fetch_file_offset_string        NULL
 238#define fetch_file_offset_string_size   NULL
 239
 240/* Fetch type information table */
 241static const struct fetch_type kprobes_fetch_type_table[] = {
 242        /* Special types */
 243        [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string,
 244                                        sizeof(u32), 1, "__data_loc char[]"),
 245        [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32,
 246                                        string_size, sizeof(u32), 0, "u32"),
 247        /* Basic types */
 248        ASSIGN_FETCH_TYPE(u8,  u8,  0),
 249        ASSIGN_FETCH_TYPE(u16, u16, 0),
 250        ASSIGN_FETCH_TYPE(u32, u32, 0),
 251        ASSIGN_FETCH_TYPE(u64, u64, 0),
 252        ASSIGN_FETCH_TYPE(s8,  u8,  1),
 253        ASSIGN_FETCH_TYPE(s16, u16, 1),
 254        ASSIGN_FETCH_TYPE(s32, u32, 1),
 255        ASSIGN_FETCH_TYPE(s64, u64, 1),
 256
 257        ASSIGN_FETCH_TYPE_END
 258};
 259
 260/*
 261 * Allocate new trace_probe and initialize it (including kprobes).
 262 */
 263static struct trace_kprobe *alloc_trace_kprobe(const char *group,
 264                                             const char *event,
 265                                             void *addr,
 266                                             const char *symbol,
 267                                             unsigned long offs,
 268                                             int nargs, bool is_return)
 269{
 270        struct trace_kprobe *tk;
 271        int ret = -ENOMEM;
 272
 273        tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
 274        if (!tk)
 275                return ERR_PTR(ret);
 276
 277        if (symbol) {
 278                tk->symbol = kstrdup(symbol, GFP_KERNEL);
 279                if (!tk->symbol)
 280                        goto error;
 281                tk->rp.kp.symbol_name = tk->symbol;
 282                tk->rp.kp.offset = offs;
 283        } else
 284                tk->rp.kp.addr = addr;
 285
 286        if (is_return)
 287                tk->rp.handler = kretprobe_dispatcher;
 288        else
 289                tk->rp.kp.pre_handler = kprobe_dispatcher;
 290
 291        if (!event || !is_good_name(event)) {
 292                ret = -EINVAL;
 293                goto error;
 294        }
 295
 296        tk->tp.call.class = &tk->tp.class;
 297        tk->tp.call.name = kstrdup(event, GFP_KERNEL);
 298        if (!tk->tp.call.name)
 299                goto error;
 300
 301        if (!group || !is_good_name(group)) {
 302                ret = -EINVAL;
 303                goto error;
 304        }
 305
 306        tk->tp.class.system = kstrdup(group, GFP_KERNEL);
 307        if (!tk->tp.class.system)
 308                goto error;
 309
 310        INIT_LIST_HEAD(&tk->list);
 311        INIT_LIST_HEAD(&tk->tp.files);
 312        return tk;
 313error:
 314        kfree(tk->tp.call.name);
 315        kfree(tk->symbol);
 316        kfree(tk);
 317        return ERR_PTR(ret);
 318}
 319
 320static void free_trace_kprobe(struct trace_kprobe *tk)
 321{
 322        int i;
 323
 324        for (i = 0; i < tk->tp.nr_args; i++)
 325                traceprobe_free_probe_arg(&tk->tp.args[i]);
 326
 327        kfree(tk->tp.call.class->system);
 328        kfree(tk->tp.call.name);
 329        kfree(tk->symbol);
 330        kfree(tk);
 331}
 332
 333static struct trace_kprobe *find_trace_kprobe(const char *event,
 334                                              const char *group)
 335{
 336        struct trace_kprobe *tk;
 337
 338        list_for_each_entry(tk, &probe_list, list)
 339                if (strcmp(trace_event_name(&tk->tp.call), event) == 0 &&
 340                    strcmp(tk->tp.call.class->system, group) == 0)
 341                        return tk;
 342        return NULL;
 343}
 344
 345/*
 346 * Enable trace_probe
 347 * if the file is NULL, enable "perf" handler, or enable "trace" handler.
 348 */
 349static int
 350enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 351{
 352        int ret = 0;
 353
 354        if (file) {
 355                struct event_file_link *link;
 356
 357                link = kmalloc(sizeof(*link), GFP_KERNEL);
 358                if (!link) {
 359                        ret = -ENOMEM;
 360                        goto out;
 361                }
 362
 363                link->file = file;
 364                list_add_tail_rcu(&link->list, &tk->tp.files);
 365
 366                tk->tp.flags |= TP_FLAG_TRACE;
 367        } else
 368                tk->tp.flags |= TP_FLAG_PROFILE;
 369
 370        if (trace_probe_is_registered(&tk->tp) && !trace_kprobe_has_gone(tk)) {
 371                if (trace_kprobe_is_return(tk))
 372                        ret = enable_kretprobe(&tk->rp);
 373                else
 374                        ret = enable_kprobe(&tk->rp.kp);
 375        }
 376 out:
 377        return ret;
 378}
 379
 380/*
 381 * Disable trace_probe
 382 * if the file is NULL, disable "perf" handler, or disable "trace" handler.
 383 */
 384static int
 385disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
 386{
 387        struct event_file_link *link = NULL;
 388        int wait = 0;
 389        int ret = 0;
 390
 391        if (file) {
 392                link = find_event_file_link(&tk->tp, file);
 393                if (!link) {
 394                        ret = -EINVAL;
 395                        goto out;
 396                }
 397
 398                list_del_rcu(&link->list);
 399                wait = 1;
 400                if (!list_empty(&tk->tp.files))
 401                        goto out;
 402
 403                tk->tp.flags &= ~TP_FLAG_TRACE;
 404        } else
 405                tk->tp.flags &= ~TP_FLAG_PROFILE;
 406
 407        if (!trace_probe_is_enabled(&tk->tp) && trace_probe_is_registered(&tk->tp)) {
 408                if (trace_kprobe_is_return(tk))
 409                        disable_kretprobe(&tk->rp);
 410                else
 411                        disable_kprobe(&tk->rp.kp);
 412                wait = 1;
 413        }
 414 out:
 415        if (wait) {
 416                /*
 417                 * Synchronize with kprobe_trace_func/kretprobe_trace_func
 418                 * to ensure disabled (all running handlers are finished).
 419                 * This is not only for kfree(), but also the caller,
 420                 * trace_remove_event_call() supposes it for releasing
 421                 * event_call related objects, which will be accessed in
 422                 * the kprobe_trace_func/kretprobe_trace_func.
 423                 */
 424                synchronize_sched();
 425                kfree(link);    /* Ignored if link == NULL */
 426        }
 427
 428        return ret;
 429}
 430
 431/* Internal register function - just handle k*probes and flags */
 432static int __register_trace_kprobe(struct trace_kprobe *tk)
 433{
 434        int i, ret;
 435
 436        if (trace_probe_is_registered(&tk->tp))
 437                return -EINVAL;
 438
 439        for (i = 0; i < tk->tp.nr_args; i++)
 440                traceprobe_update_arg(&tk->tp.args[i]);
 441
 442        /* Set/clear disabled flag according to tp->flag */
 443        if (trace_probe_is_enabled(&tk->tp))
 444                tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
 445        else
 446                tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
 447
 448        if (trace_kprobe_is_return(tk))
 449                ret = register_kretprobe(&tk->rp);
 450        else
 451                ret = register_kprobe(&tk->rp.kp);
 452
 453        if (ret == 0)
 454                tk->tp.flags |= TP_FLAG_REGISTERED;
 455        else {
 456                pr_warning("Could not insert probe at %s+%lu: %d\n",
 457                           trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
 458                if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
 459                        pr_warning("This probe might be able to register after"
 460                                   "target module is loaded. Continue.\n");
 461                        ret = 0;
 462                } else if (ret == -EILSEQ) {
 463                        pr_warning("Probing address(0x%p) is not an "
 464                                   "instruction boundary.\n",
 465                                   tk->rp.kp.addr);
 466                        ret = -EINVAL;
 467                }
 468        }
 469
 470        return ret;
 471}
 472
 473/* Internal unregister function - just handle k*probes and flags */
 474static void __unregister_trace_kprobe(struct trace_kprobe *tk)
 475{
 476        if (trace_probe_is_registered(&tk->tp)) {
 477                if (trace_kprobe_is_return(tk))
 478                        unregister_kretprobe(&tk->rp);
 479                else
 480                        unregister_kprobe(&tk->rp.kp);
 481                tk->tp.flags &= ~TP_FLAG_REGISTERED;
 482                /* Cleanup kprobe for reuse */
 483                if (tk->rp.kp.symbol_name)
 484                        tk->rp.kp.addr = NULL;
 485        }
 486}
 487
 488/* Unregister a trace_probe and probe_event: call with locking probe_lock */
 489static int unregister_trace_kprobe(struct trace_kprobe *tk)
 490{
 491        /* Enabled event can not be unregistered */
 492        if (trace_probe_is_enabled(&tk->tp))
 493                return -EBUSY;
 494
 495        /* Will fail if probe is being used by ftrace or perf */
 496        if (unregister_kprobe_event(tk))
 497                return -EBUSY;
 498
 499        __unregister_trace_kprobe(tk);
 500        list_del(&tk->list);
 501
 502        return 0;
 503}
 504
 505/* Register a trace_probe and probe_event */
 506static int register_trace_kprobe(struct trace_kprobe *tk)
 507{
 508        struct trace_kprobe *old_tk;
 509        int ret;
 510
 511        mutex_lock(&probe_lock);
 512
 513        /* Delete old (same name) event if exist */
 514        old_tk = find_trace_kprobe(trace_event_name(&tk->tp.call),
 515                        tk->tp.call.class->system);
 516        if (old_tk) {
 517                ret = unregister_trace_kprobe(old_tk);
 518                if (ret < 0)
 519                        goto end;
 520                free_trace_kprobe(old_tk);
 521        }
 522
 523        /* Register new event */
 524        ret = register_kprobe_event(tk);
 525        if (ret) {
 526                pr_warning("Failed to register probe event(%d)\n", ret);
 527                goto end;
 528        }
 529
 530        /* Register k*probe */
 531        ret = __register_trace_kprobe(tk);
 532        if (ret < 0)
 533                unregister_kprobe_event(tk);
 534        else
 535                list_add_tail(&tk->list, &probe_list);
 536
 537end:
 538        mutex_unlock(&probe_lock);
 539        return ret;
 540}
 541
 542/* Module notifier call back, checking event on the module */
 543static int trace_kprobe_module_callback(struct notifier_block *nb,
 544                                       unsigned long val, void *data)
 545{
 546        struct module *mod = data;
 547        struct trace_kprobe *tk;
 548        int ret;
 549
 550        if (val != MODULE_STATE_COMING)
 551                return NOTIFY_DONE;
 552
 553        /* Update probes on coming module */
 554        mutex_lock(&probe_lock);
 555        list_for_each_entry(tk, &probe_list, list) {
 556                if (trace_kprobe_within_module(tk, mod)) {
 557                        /* Don't need to check busy - this should have gone. */
 558                        __unregister_trace_kprobe(tk);
 559                        ret = __register_trace_kprobe(tk);
 560                        if (ret)
 561                                pr_warning("Failed to re-register probe %s on"
 562                                           "%s: %d\n",
 563                                           trace_event_name(&tk->tp.call),
 564                                           mod->name, ret);
 565                }
 566        }
 567        mutex_unlock(&probe_lock);
 568
 569        return NOTIFY_DONE;
 570}
 571
 572static struct notifier_block trace_kprobe_module_nb = {
 573        .notifier_call = trace_kprobe_module_callback,
 574        .priority = 1   /* Invoked after kprobe module callback */
 575};
 576
 577static int create_trace_kprobe(int argc, char **argv)
 578{
 579        /*
 580         * Argument syntax:
 581         *  - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS]
 582         *  - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS]
 583         * Fetch args:
 584         *  $retval     : fetch return value
 585         *  $stack      : fetch stack address
 586         *  $stackN     : fetch Nth of stack (N:0-)
 587         *  @ADDR       : fetch memory at ADDR (ADDR should be in kernel)
 588         *  @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
 589         *  %REG        : fetch register REG
 590         * Dereferencing memory fetch:
 591         *  +|-offs(ARG) : fetch memory at ARG +|- offs address.
 592         * Alias name of args:
 593         *  NAME=FETCHARG : set NAME as alias of FETCHARG.
 594         * Type of args:
 595         *  FETCHARG:TYPE : use TYPE instead of unsigned long.
 596         */
 597        struct trace_kprobe *tk;
 598        int i, ret = 0;
 599        bool is_return = false, is_delete = false;
 600        char *symbol = NULL, *event = NULL, *group = NULL;
 601        char *arg;
 602        unsigned long offset = 0;
 603        void *addr = NULL;
 604        char buf[MAX_EVENT_NAME_LEN];
 605
 606        /* argc must be >= 1 */
 607        if (argv[0][0] == 'p')
 608                is_return = false;
 609        else if (argv[0][0] == 'r')
 610                is_return = true;
 611        else if (argv[0][0] == '-')
 612                is_delete = true;
 613        else {
 614                pr_info("Probe definition must be started with 'p', 'r' or"
 615                        " '-'.\n");
 616                return -EINVAL;
 617        }
 618
 619        if (argv[0][1] == ':') {
 620                event = &argv[0][2];
 621                if (strchr(event, '/')) {
 622                        group = event;
 623                        event = strchr(group, '/') + 1;
 624                        event[-1] = '\0';
 625                        if (strlen(group) == 0) {
 626                                pr_info("Group name is not specified\n");
 627                                return -EINVAL;
 628                        }
 629                }
 630                if (strlen(event) == 0) {
 631                        pr_info("Event name is not specified\n");
 632                        return -EINVAL;
 633                }
 634        }
 635        if (!group)
 636                group = KPROBE_EVENT_SYSTEM;
 637
 638        if (is_delete) {
 639                if (!event) {
 640                        pr_info("Delete command needs an event name.\n");
 641                        return -EINVAL;
 642                }
 643                mutex_lock(&probe_lock);
 644                tk = find_trace_kprobe(event, group);
 645                if (!tk) {
 646                        mutex_unlock(&probe_lock);
 647                        pr_info("Event %s/%s doesn't exist.\n", group, event);
 648                        return -ENOENT;
 649                }
 650                /* delete an event */
 651                ret = unregister_trace_kprobe(tk);
 652                if (ret == 0)
 653                        free_trace_kprobe(tk);
 654                mutex_unlock(&probe_lock);
 655                return ret;
 656        }
 657
 658        if (argc < 2) {
 659                pr_info("Probe point is not specified.\n");
 660                return -EINVAL;
 661        }
 662        if (isdigit(argv[1][0])) {
 663                if (is_return) {
 664                        pr_info("Return probe point must be a symbol.\n");
 665                        return -EINVAL;
 666                }
 667                /* an address specified */
 668                ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
 669                if (ret) {
 670                        pr_info("Failed to parse address.\n");
 671                        return ret;
 672                }
 673        } else {
 674                /* a symbol specified */
 675                symbol = argv[1];
 676                /* TODO: support .init module functions */
 677                ret = traceprobe_split_symbol_offset(symbol, &offset);
 678                if (ret) {
 679                        pr_info("Failed to parse symbol.\n");
 680                        return ret;
 681                }
 682                if (offset && is_return) {
 683                        pr_info("Return probe must be used without offset.\n");
 684                        return -EINVAL;
 685                }
 686        }
 687        argc -= 2; argv += 2;
 688
 689        /* setup a probe */
 690        if (!event) {
 691                /* Make a new event name */
 692                if (symbol)
 693                        snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_%ld",
 694                                 is_return ? 'r' : 'p', symbol, offset);
 695                else
 696                        snprintf(buf, MAX_EVENT_NAME_LEN, "%c_0x%p",
 697                                 is_return ? 'r' : 'p', addr);
 698                event = buf;
 699        }
 700        tk = alloc_trace_kprobe(group, event, addr, symbol, offset, argc,
 701                               is_return);
 702        if (IS_ERR(tk)) {
 703                pr_info("Failed to allocate trace_probe.(%d)\n",
 704                        (int)PTR_ERR(tk));
 705                return PTR_ERR(tk);
 706        }
 707
 708        /* parse arguments */
 709        ret = 0;
 710        for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
 711                struct probe_arg *parg = &tk->tp.args[i];
 712
 713                /* Increment count for freeing args in error case */
 714                tk->tp.nr_args++;
 715
 716                /* Parse argument name */
 717                arg = strchr(argv[i], '=');
 718                if (arg) {
 719                        *arg++ = '\0';
 720                        parg->name = kstrdup(argv[i], GFP_KERNEL);
 721                } else {
 722                        arg = argv[i];
 723                        /* If argument name is omitted, set "argN" */
 724                        snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
 725                        parg->name = kstrdup(buf, GFP_KERNEL);
 726                }
 727
 728                if (!parg->name) {
 729                        pr_info("Failed to allocate argument[%d] name.\n", i);
 730                        ret = -ENOMEM;
 731                        goto error;
 732                }
 733
 734                if (!is_good_name(parg->name)) {
 735                        pr_info("Invalid argument[%d] name: %s\n",
 736                                i, parg->name);
 737                        ret = -EINVAL;
 738                        goto error;
 739                }
 740
 741                if (traceprobe_conflict_field_name(parg->name,
 742                                                        tk->tp.args, i)) {
 743                        pr_info("Argument[%d] name '%s' conflicts with "
 744                                "another field.\n", i, argv[i]);
 745                        ret = -EINVAL;
 746                        goto error;
 747                }
 748
 749                /* Parse fetch argument */
 750                ret = traceprobe_parse_probe_arg(arg, &tk->tp.size, parg,
 751                                                is_return, true,
 752                                                kprobes_fetch_type_table);
 753                if (ret) {
 754                        pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
 755                        goto error;
 756                }
 757        }
 758
 759        ret = register_trace_kprobe(tk);
 760        if (ret)
 761                goto error;
 762        return 0;
 763
 764error:
 765        free_trace_kprobe(tk);
 766        return ret;
 767}
 768
 769static int release_all_trace_kprobes(void)
 770{
 771        struct trace_kprobe *tk;
 772        int ret = 0;
 773
 774        mutex_lock(&probe_lock);
 775        /* Ensure no probe is in use. */
 776        list_for_each_entry(tk, &probe_list, list)
 777                if (trace_probe_is_enabled(&tk->tp)) {
 778                        ret = -EBUSY;
 779                        goto end;
 780                }
 781        /* TODO: Use batch unregistration */
 782        while (!list_empty(&probe_list)) {
 783                tk = list_entry(probe_list.next, struct trace_kprobe, list);
 784                ret = unregister_trace_kprobe(tk);
 785                if (ret)
 786                        goto end;
 787                free_trace_kprobe(tk);
 788        }
 789
 790end:
 791        mutex_unlock(&probe_lock);
 792
 793        return ret;
 794}
 795
 796/* Probes listing interfaces */
 797static void *probes_seq_start(struct seq_file *m, loff_t *pos)
 798{
 799        mutex_lock(&probe_lock);
 800        return seq_list_start(&probe_list, *pos);
 801}
 802
 803static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
 804{
 805        return seq_list_next(v, &probe_list, pos);
 806}
 807
 808static void probes_seq_stop(struct seq_file *m, void *v)
 809{
 810        mutex_unlock(&probe_lock);
 811}
 812
 813static int probes_seq_show(struct seq_file *m, void *v)
 814{
 815        struct trace_kprobe *tk = v;
 816        int i;
 817
 818        seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
 819        seq_printf(m, ":%s/%s", tk->tp.call.class->system,
 820                        trace_event_name(&tk->tp.call));
 821
 822        if (!tk->symbol)
 823                seq_printf(m, " 0x%p", tk->rp.kp.addr);
 824        else if (tk->rp.kp.offset)
 825                seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
 826                           tk->rp.kp.offset);
 827        else
 828                seq_printf(m, " %s", trace_kprobe_symbol(tk));
 829
 830        for (i = 0; i < tk->tp.nr_args; i++)
 831                seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
 832        seq_putc(m, '\n');
 833
 834        return 0;
 835}
 836
 837static const struct seq_operations probes_seq_op = {
 838        .start  = probes_seq_start,
 839        .next   = probes_seq_next,
 840        .stop   = probes_seq_stop,
 841        .show   = probes_seq_show
 842};
 843
 844static int probes_open(struct inode *inode, struct file *file)
 845{
 846        int ret;
 847
 848        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
 849                ret = release_all_trace_kprobes();
 850                if (ret < 0)
 851                        return ret;
 852        }
 853
 854        return seq_open(file, &probes_seq_op);
 855}
 856
 857static ssize_t probes_write(struct file *file, const char __user *buffer,
 858                            size_t count, loff_t *ppos)
 859{
 860        return traceprobe_probes_write(file, buffer, count, ppos,
 861                        create_trace_kprobe);
 862}
 863
 864static const struct file_operations kprobe_events_ops = {
 865        .owner          = THIS_MODULE,
 866        .open           = probes_open,
 867        .read           = seq_read,
 868        .llseek         = seq_lseek,
 869        .release        = seq_release,
 870        .write          = probes_write,
 871};
 872
 873/* Probes profiling interfaces */
 874static int probes_profile_seq_show(struct seq_file *m, void *v)
 875{
 876        struct trace_kprobe *tk = v;
 877
 878        seq_printf(m, "  %-44s %15lu %15lu\n",
 879                   trace_event_name(&tk->tp.call), tk->nhit,
 880                   tk->rp.kp.nmissed);
 881
 882        return 0;
 883}
 884
 885static const struct seq_operations profile_seq_op = {
 886        .start  = probes_seq_start,
 887        .next   = probes_seq_next,
 888        .stop   = probes_seq_stop,
 889        .show   = probes_profile_seq_show
 890};
 891
 892static int profile_open(struct inode *inode, struct file *file)
 893{
 894        return seq_open(file, &profile_seq_op);
 895}
 896
 897static const struct file_operations kprobe_profile_ops = {
 898        .owner          = THIS_MODULE,
 899        .open           = profile_open,
 900        .read           = seq_read,
 901        .llseek         = seq_lseek,
 902        .release        = seq_release,
 903};
 904
 905/* Kprobe handler */
 906static nokprobe_inline void
 907__kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
 908                    struct trace_event_file *trace_file)
 909{
 910        struct kprobe_trace_entry_head *entry;
 911        struct ring_buffer_event *event;
 912        struct ring_buffer *buffer;
 913        int size, dsize, pc;
 914        unsigned long irq_flags;
 915        struct trace_event_call *call = &tk->tp.call;
 916
 917        WARN_ON(call != trace_file->event_call);
 918
 919        if (trace_trigger_soft_disabled(trace_file))
 920                return;
 921
 922        local_save_flags(irq_flags);
 923        pc = preempt_count();
 924
 925        dsize = __get_data_size(&tk->tp, regs);
 926        size = sizeof(*entry) + tk->tp.size + dsize;
 927
 928        event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 929                                                call->event.type,
 930                                                size, irq_flags, pc);
 931        if (!event)
 932                return;
 933
 934        entry = ring_buffer_event_data(event);
 935        entry->ip = (unsigned long)tk->rp.kp.addr;
 936        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
 937
 938        event_trigger_unlock_commit_regs(trace_file, buffer, event,
 939                                         entry, irq_flags, pc, regs);
 940}
 941
 942static void
 943kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
 944{
 945        struct event_file_link *link;
 946
 947        list_for_each_entry_rcu(link, &tk->tp.files, list)
 948                __kprobe_trace_func(tk, regs, link->file);
 949}
 950NOKPROBE_SYMBOL(kprobe_trace_func);
 951
 952/* Kretprobe handler */
 953static nokprobe_inline void
 954__kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
 955                       struct pt_regs *regs,
 956                       struct trace_event_file *trace_file)
 957{
 958        struct kretprobe_trace_entry_head *entry;
 959        struct ring_buffer_event *event;
 960        struct ring_buffer *buffer;
 961        int size, pc, dsize;
 962        unsigned long irq_flags;
 963        struct trace_event_call *call = &tk->tp.call;
 964
 965        WARN_ON(call != trace_file->event_call);
 966
 967        if (trace_trigger_soft_disabled(trace_file))
 968                return;
 969
 970        local_save_flags(irq_flags);
 971        pc = preempt_count();
 972
 973        dsize = __get_data_size(&tk->tp, regs);
 974        size = sizeof(*entry) + tk->tp.size + dsize;
 975
 976        event = trace_event_buffer_lock_reserve(&buffer, trace_file,
 977                                                call->event.type,
 978                                                size, irq_flags, pc);
 979        if (!event)
 980                return;
 981
 982        entry = ring_buffer_event_data(event);
 983        entry->func = (unsigned long)tk->rp.kp.addr;
 984        entry->ret_ip = (unsigned long)ri->ret_addr;
 985        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
 986
 987        event_trigger_unlock_commit_regs(trace_file, buffer, event,
 988                                         entry, irq_flags, pc, regs);
 989}
 990
 991static void
 992kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
 993                     struct pt_regs *regs)
 994{
 995        struct event_file_link *link;
 996
 997        list_for_each_entry_rcu(link, &tk->tp.files, list)
 998                __kretprobe_trace_func(tk, ri, regs, link->file);
 999}
1000NOKPROBE_SYMBOL(kretprobe_trace_func);
1001
1002/* Event entry printers */
1003static enum print_line_t
1004print_kprobe_event(struct trace_iterator *iter, int flags,
1005                   struct trace_event *event)
1006{
1007        struct kprobe_trace_entry_head *field;
1008        struct trace_seq *s = &iter->seq;
1009        struct trace_probe *tp;
1010        u8 *data;
1011        int i;
1012
1013        field = (struct kprobe_trace_entry_head *)iter->ent;
1014        tp = container_of(event, struct trace_probe, call.event);
1015
1016        trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1017
1018        if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1019                goto out;
1020
1021        trace_seq_putc(s, ')');
1022
1023        data = (u8 *)&field[1];
1024        for (i = 0; i < tp->nr_args; i++)
1025                if (!tp->args[i].type->print(s, tp->args[i].name,
1026                                             data + tp->args[i].offset, field))
1027                        goto out;
1028
1029        trace_seq_putc(s, '\n');
1030 out:
1031        return trace_handle_return(s);
1032}
1033
1034static enum print_line_t
1035print_kretprobe_event(struct trace_iterator *iter, int flags,
1036                      struct trace_event *event)
1037{
1038        struct kretprobe_trace_entry_head *field;
1039        struct trace_seq *s = &iter->seq;
1040        struct trace_probe *tp;
1041        u8 *data;
1042        int i;
1043
1044        field = (struct kretprobe_trace_entry_head *)iter->ent;
1045        tp = container_of(event, struct trace_probe, call.event);
1046
1047        trace_seq_printf(s, "%s: (", trace_event_name(&tp->call));
1048
1049        if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1050                goto out;
1051
1052        trace_seq_puts(s, " <- ");
1053
1054        if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1055                goto out;
1056
1057        trace_seq_putc(s, ')');
1058
1059        data = (u8 *)&field[1];
1060        for (i = 0; i < tp->nr_args; i++)
1061                if (!tp->args[i].type->print(s, tp->args[i].name,
1062                                             data + tp->args[i].offset, field))
1063                        goto out;
1064
1065        trace_seq_putc(s, '\n');
1066
1067 out:
1068        return trace_handle_return(s);
1069}
1070
1071
1072static int kprobe_event_define_fields(struct trace_event_call *event_call)
1073{
1074        int ret, i;
1075        struct kprobe_trace_entry_head field;
1076        struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1077
1078        DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1079        /* Set argument names as fields */
1080        for (i = 0; i < tk->tp.nr_args; i++) {
1081                struct probe_arg *parg = &tk->tp.args[i];
1082
1083                ret = trace_define_field(event_call, parg->type->fmttype,
1084                                         parg->name,
1085                                         sizeof(field) + parg->offset,
1086                                         parg->type->size,
1087                                         parg->type->is_signed,
1088                                         FILTER_OTHER);
1089                if (ret)
1090                        return ret;
1091        }
1092        return 0;
1093}
1094
1095static int kretprobe_event_define_fields(struct trace_event_call *event_call)
1096{
1097        int ret, i;
1098        struct kretprobe_trace_entry_head field;
1099        struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data;
1100
1101        DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1102        DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1103        /* Set argument names as fields */
1104        for (i = 0; i < tk->tp.nr_args; i++) {
1105                struct probe_arg *parg = &tk->tp.args[i];
1106
1107                ret = trace_define_field(event_call, parg->type->fmttype,
1108                                         parg->name,
1109                                         sizeof(field) + parg->offset,
1110                                         parg->type->size,
1111                                         parg->type->is_signed,
1112                                         FILTER_OTHER);
1113                if (ret)
1114                        return ret;
1115        }
1116        return 0;
1117}
1118
1119#ifdef CONFIG_PERF_EVENTS
1120
1121/* Kprobe profile handler */
1122static void
1123kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
1124{
1125        struct trace_event_call *call = &tk->tp.call;
1126        struct bpf_prog *prog = call->prog;
1127        struct kprobe_trace_entry_head *entry;
1128        struct hlist_head *head;
1129        int size, __size, dsize;
1130        int rctx;
1131
1132        if (prog && !trace_call_bpf(prog, regs))
1133                return;
1134
1135        head = this_cpu_ptr(call->perf_events);
1136        if (hlist_empty(head))
1137                return;
1138
1139        dsize = __get_data_size(&tk->tp, regs);
1140        __size = sizeof(*entry) + tk->tp.size + dsize;
1141        size = ALIGN(__size + sizeof(u32), sizeof(u64));
1142        size -= sizeof(u32);
1143
1144        entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1145        if (!entry)
1146                return;
1147
1148        entry->ip = (unsigned long)tk->rp.kp.addr;
1149        memset(&entry[1], 0, dsize);
1150        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1151        perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1152}
1153NOKPROBE_SYMBOL(kprobe_perf_func);
1154
1155/* Kretprobe profile handler */
1156static void
1157kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
1158                    struct pt_regs *regs)
1159{
1160        struct trace_event_call *call = &tk->tp.call;
1161        struct bpf_prog *prog = call->prog;
1162        struct kretprobe_trace_entry_head *entry;
1163        struct hlist_head *head;
1164        int size, __size, dsize;
1165        int rctx;
1166
1167        if (prog && !trace_call_bpf(prog, regs))
1168                return;
1169
1170        head = this_cpu_ptr(call->perf_events);
1171        if (hlist_empty(head))
1172                return;
1173
1174        dsize = __get_data_size(&tk->tp, regs);
1175        __size = sizeof(*entry) + tk->tp.size + dsize;
1176        size = ALIGN(__size + sizeof(u32), sizeof(u64));
1177        size -= sizeof(u32);
1178
1179        entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
1180        if (!entry)
1181                return;
1182
1183        entry->func = (unsigned long)tk->rp.kp.addr;
1184        entry->ret_ip = (unsigned long)ri->ret_addr;
1185        store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
1186        perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
1187}
1188NOKPROBE_SYMBOL(kretprobe_perf_func);
1189#endif  /* CONFIG_PERF_EVENTS */
1190
1191/*
1192 * called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
1193 *
1194 * kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
1195 * lockless, but we can't race with this __init function.
1196 */
1197static int kprobe_register(struct trace_event_call *event,
1198                           enum trace_reg type, void *data)
1199{
1200        struct trace_kprobe *tk = (struct trace_kprobe *)event->data;
1201        struct trace_event_file *file = data;
1202
1203        switch (type) {
1204        case TRACE_REG_REGISTER:
1205                return enable_trace_kprobe(tk, file);
1206        case TRACE_REG_UNREGISTER:
1207                return disable_trace_kprobe(tk, file);
1208
1209#ifdef CONFIG_PERF_EVENTS
1210        case TRACE_REG_PERF_REGISTER:
1211                return enable_trace_kprobe(tk, NULL);
1212        case TRACE_REG_PERF_UNREGISTER:
1213                return disable_trace_kprobe(tk, NULL);
1214        case TRACE_REG_PERF_OPEN:
1215        case TRACE_REG_PERF_CLOSE:
1216        case TRACE_REG_PERF_ADD:
1217        case TRACE_REG_PERF_DEL:
1218                return 0;
1219#endif
1220        }
1221        return 0;
1222}
1223
1224static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1225{
1226        struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
1227
1228        tk->nhit++;
1229
1230        if (tk->tp.flags & TP_FLAG_TRACE)
1231                kprobe_trace_func(tk, regs);
1232#ifdef CONFIG_PERF_EVENTS
1233        if (tk->tp.flags & TP_FLAG_PROFILE)
1234                kprobe_perf_func(tk, regs);
1235#endif
1236        return 0;       /* We don't tweek kernel, so just return 0 */
1237}
1238NOKPROBE_SYMBOL(kprobe_dispatcher);
1239
1240static int
1241kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1242{
1243        struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
1244
1245        tk->nhit++;
1246
1247        if (tk->tp.flags & TP_FLAG_TRACE)
1248                kretprobe_trace_func(tk, ri, regs);
1249#ifdef CONFIG_PERF_EVENTS
1250        if (tk->tp.flags & TP_FLAG_PROFILE)
1251                kretprobe_perf_func(tk, ri, regs);
1252#endif
1253        return 0;       /* We don't tweek kernel, so just return 0 */
1254}
1255NOKPROBE_SYMBOL(kretprobe_dispatcher);
1256
1257static struct trace_event_functions kretprobe_funcs = {
1258        .trace          = print_kretprobe_event
1259};
1260
1261static struct trace_event_functions kprobe_funcs = {
1262        .trace          = print_kprobe_event
1263};
1264
1265static int register_kprobe_event(struct trace_kprobe *tk)
1266{
1267        struct trace_event_call *call = &tk->tp.call;
1268        int ret;
1269
1270        /* Initialize trace_event_call */
1271        INIT_LIST_HEAD(&call->class->fields);
1272        if (trace_kprobe_is_return(tk)) {
1273                call->event.funcs = &kretprobe_funcs;
1274                call->class->define_fields = kretprobe_event_define_fields;
1275        } else {
1276                call->event.funcs = &kprobe_funcs;
1277                call->class->define_fields = kprobe_event_define_fields;
1278        }
1279        if (set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0)
1280                return -ENOMEM;
1281        ret = register_trace_event(&call->event);
1282        if (!ret) {
1283                kfree(call->print_fmt);
1284                return -ENODEV;
1285        }
1286        call->flags = TRACE_EVENT_FL_KPROBE;
1287        call->class->reg = kprobe_register;
1288        call->data = tk;
1289        ret = trace_add_event_call(call);
1290        if (ret) {
1291                pr_info("Failed to register kprobe event: %s\n",
1292                        trace_event_name(call));
1293                kfree(call->print_fmt);
1294                unregister_trace_event(&call->event);
1295        }
1296        return ret;
1297}
1298
1299static int unregister_kprobe_event(struct trace_kprobe *tk)
1300{
1301        int ret;
1302
1303        /* tp->event is unregistered in trace_remove_event_call() */
1304        ret = trace_remove_event_call(&tk->tp.call);
1305        if (!ret)
1306                kfree(tk->tp.call.print_fmt);
1307        return ret;
1308}
1309
1310/* Make a tracefs interface for controlling probe points */
1311static __init int init_kprobe_trace(void)
1312{
1313        struct dentry *d_tracer;
1314        struct dentry *entry;
1315
1316        if (register_module_notifier(&trace_kprobe_module_nb))
1317                return -EINVAL;
1318
1319        d_tracer = tracing_init_dentry();
1320        if (IS_ERR(d_tracer))
1321                return 0;
1322
1323        entry = tracefs_create_file("kprobe_events", 0644, d_tracer,
1324                                    NULL, &kprobe_events_ops);
1325
1326        /* Event list interface */
1327        if (!entry)
1328                pr_warning("Could not create tracefs "
1329                           "'kprobe_events' entry\n");
1330
1331        /* Profile interface */
1332        entry = tracefs_create_file("kprobe_profile", 0444, d_tracer,
1333                                    NULL, &kprobe_profile_ops);
1334
1335        if (!entry)
1336                pr_warning("Could not create tracefs "
1337                           "'kprobe_profile' entry\n");
1338        return 0;
1339}
1340fs_initcall(init_kprobe_trace);
1341
1342
1343#ifdef CONFIG_FTRACE_STARTUP_TEST
1344
1345/*
1346 * The "__used" keeps gcc from removing the function symbol
1347 * from the kallsyms table.
1348 */
1349static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1350                                               int a4, int a5, int a6)
1351{
1352        return a1 + a2 + a3 + a4 + a5 + a6;
1353}
1354
1355static struct trace_event_file *
1356find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
1357{
1358        struct trace_event_file *file;
1359
1360        list_for_each_entry(file, &tr->events, list)
1361                if (file->event_call == &tk->tp.call)
1362                        return file;
1363
1364        return NULL;
1365}
1366
1367/*
1368 * Nobody but us can call enable_trace_kprobe/disable_trace_kprobe at this
1369 * stage, we can do this lockless.
1370 */
1371static __init int kprobe_trace_self_tests_init(void)
1372{
1373        int ret, warn = 0;
1374        int (*target)(int, int, int, int, int, int);
1375        struct trace_kprobe *tk;
1376        struct trace_event_file *file;
1377
1378        if (tracing_is_disabled())
1379                return -ENODEV;
1380
1381        target = kprobe_trace_selftest_target;
1382
1383        pr_info("Testing kprobe tracing: ");
1384
1385        ret = traceprobe_command("p:testprobe kprobe_trace_selftest_target "
1386                                  "$stack $stack0 +0($stack)",
1387                                  create_trace_kprobe);
1388        if (WARN_ON_ONCE(ret)) {
1389                pr_warn("error on probing function entry.\n");
1390                warn++;
1391        } else {
1392                /* Enable trace point */
1393                tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1394                if (WARN_ON_ONCE(tk == NULL)) {
1395                        pr_warn("error on getting new probe.\n");
1396                        warn++;
1397                } else {
1398                        file = find_trace_probe_file(tk, top_trace_array());
1399                        if (WARN_ON_ONCE(file == NULL)) {
1400                                pr_warn("error on getting probe file.\n");
1401                                warn++;
1402                        } else
1403                                enable_trace_kprobe(tk, file);
1404                }
1405        }
1406
1407        ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target "
1408                                  "$retval", create_trace_kprobe);
1409        if (WARN_ON_ONCE(ret)) {
1410                pr_warn("error on probing function return.\n");
1411                warn++;
1412        } else {
1413                /* Enable trace point */
1414                tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1415                if (WARN_ON_ONCE(tk == NULL)) {
1416                        pr_warn("error on getting 2nd new probe.\n");
1417                        warn++;
1418                } else {
1419                        file = find_trace_probe_file(tk, top_trace_array());
1420                        if (WARN_ON_ONCE(file == NULL)) {
1421                                pr_warn("error on getting probe file.\n");
1422                                warn++;
1423                        } else
1424                                enable_trace_kprobe(tk, file);
1425                }
1426        }
1427
1428        if (warn)
1429                goto end;
1430
1431        ret = target(1, 2, 3, 4, 5, 6);
1432
1433        /* Disable trace points before removing it */
1434        tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
1435        if (WARN_ON_ONCE(tk == NULL)) {
1436                pr_warn("error on getting test probe.\n");
1437                warn++;
1438        } else {
1439                file = find_trace_probe_file(tk, top_trace_array());
1440                if (WARN_ON_ONCE(file == NULL)) {
1441                        pr_warn("error on getting probe file.\n");
1442                        warn++;
1443                } else
1444                        disable_trace_kprobe(tk, file);
1445        }
1446
1447        tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
1448        if (WARN_ON_ONCE(tk == NULL)) {
1449                pr_warn("error on getting 2nd test probe.\n");
1450                warn++;
1451        } else {
1452                file = find_trace_probe_file(tk, top_trace_array());
1453                if (WARN_ON_ONCE(file == NULL)) {
1454                        pr_warn("error on getting probe file.\n");
1455                        warn++;
1456                } else
1457                        disable_trace_kprobe(tk, file);
1458        }
1459
1460        ret = traceprobe_command("-:testprobe", create_trace_kprobe);
1461        if (WARN_ON_ONCE(ret)) {
1462                pr_warn("error on deleting a probe.\n");
1463                warn++;
1464        }
1465
1466        ret = traceprobe_command("-:testprobe2", create_trace_kprobe);
1467        if (WARN_ON_ONCE(ret)) {
1468                pr_warn("error on deleting a probe.\n");
1469                warn++;
1470        }
1471
1472end:
1473        release_all_trace_kprobes();
1474        if (warn)
1475                pr_cont("NG: Some tests are failed. Please check them.\n");
1476        else
1477                pr_cont("OK\n");
1478        return 0;
1479}
1480
1481late_initcall(kprobe_trace_self_tests_init);
1482
1483#endif
1484