linux/kernel/trace/trace_selftest.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Include in trace.c */
   3
   4#include <uapi/linux/sched/types.h>
   5#include <linux/stringify.h>
   6#include <linux/kthread.h>
   7#include <linux/delay.h>
   8#include <linux/slab.h>
   9
  10static inline int trace_valid_entry(struct trace_entry *entry)
  11{
  12        switch (entry->type) {
  13        case TRACE_FN:
  14        case TRACE_CTX:
  15        case TRACE_WAKE:
  16        case TRACE_STACK:
  17        case TRACE_PRINT:
  18        case TRACE_BRANCH:
  19        case TRACE_GRAPH_ENT:
  20        case TRACE_GRAPH_RET:
  21                return 1;
  22        }
  23        return 0;
  24}
  25
  26static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
  27{
  28        struct ring_buffer_event *event;
  29        struct trace_entry *entry;
  30        unsigned int loops = 0;
  31
  32        while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  33                entry = ring_buffer_event_data(event);
  34
  35                /*
  36                 * The ring buffer is a size of trace_buf_size, if
  37                 * we loop more than the size, there's something wrong
  38                 * with the ring buffer.
  39                 */
  40                if (loops++ > trace_buf_size) {
  41                        printk(KERN_CONT ".. bad ring buffer ");
  42                        goto failed;
  43                }
  44                if (!trace_valid_entry(entry)) {
  45                        printk(KERN_CONT ".. invalid entry %d ",
  46                                entry->type);
  47                        goto failed;
  48                }
  49        }
  50        return 0;
  51
  52 failed:
  53        /* disable tracing */
  54        tracing_disabled = 1;
  55        printk(KERN_CONT ".. corrupted trace buffer .. ");
  56        return -1;
  57}
  58
  59/*
  60 * Test the trace buffer to see if all the elements
  61 * are still sane.
  62 */
  63static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
  64{
  65        unsigned long flags, cnt = 0;
  66        int cpu, ret = 0;
  67
  68        /* Don't allow flipping of max traces now */
  69        local_irq_save(flags);
  70        arch_spin_lock(&buf->tr->max_lock);
  71
  72        cnt = ring_buffer_entries(buf->buffer);
  73
  74        /*
  75         * The trace_test_buffer_cpu runs a while loop to consume all data.
  76         * If the calling tracer is broken, and is constantly filling
  77         * the buffer, this will run forever, and hard lock the box.
  78         * We disable the ring buffer while we do this test to prevent
  79         * a hard lock up.
  80         */
  81        tracing_off();
  82        for_each_possible_cpu(cpu) {
  83                ret = trace_test_buffer_cpu(buf, cpu);
  84                if (ret)
  85                        break;
  86        }
  87        tracing_on();
  88        arch_spin_unlock(&buf->tr->max_lock);
  89        local_irq_restore(flags);
  90
  91        if (count)
  92                *count = cnt;
  93
  94        return ret;
  95}
  96
  97static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  98{
  99        printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
 100                trace->name, init_ret);
 101}
 102#ifdef CONFIG_FUNCTION_TRACER
 103
 104#ifdef CONFIG_DYNAMIC_FTRACE
 105
 106static int trace_selftest_test_probe1_cnt;
 107static void trace_selftest_test_probe1_func(unsigned long ip,
 108                                            unsigned long pip,
 109                                            struct ftrace_ops *op,
 110                                            struct ftrace_regs *fregs)
 111{
 112        trace_selftest_test_probe1_cnt++;
 113}
 114
 115static int trace_selftest_test_probe2_cnt;
 116static void trace_selftest_test_probe2_func(unsigned long ip,
 117                                            unsigned long pip,
 118                                            struct ftrace_ops *op,
 119                                            struct ftrace_regs *fregs)
 120{
 121        trace_selftest_test_probe2_cnt++;
 122}
 123
 124static int trace_selftest_test_probe3_cnt;
 125static void trace_selftest_test_probe3_func(unsigned long ip,
 126                                            unsigned long pip,
 127                                            struct ftrace_ops *op,
 128                                            struct ftrace_regs *fregs)
 129{
 130        trace_selftest_test_probe3_cnt++;
 131}
 132
 133static int trace_selftest_test_global_cnt;
 134static void trace_selftest_test_global_func(unsigned long ip,
 135                                            unsigned long pip,
 136                                            struct ftrace_ops *op,
 137                                            struct ftrace_regs *fregs)
 138{
 139        trace_selftest_test_global_cnt++;
 140}
 141
 142static int trace_selftest_test_dyn_cnt;
 143static void trace_selftest_test_dyn_func(unsigned long ip,
 144                                         unsigned long pip,
 145                                         struct ftrace_ops *op,
 146                                         struct ftrace_regs *fregs)
 147{
 148        trace_selftest_test_dyn_cnt++;
 149}
 150
 151static struct ftrace_ops test_probe1 = {
 152        .func                   = trace_selftest_test_probe1_func,
 153};
 154
 155static struct ftrace_ops test_probe2 = {
 156        .func                   = trace_selftest_test_probe2_func,
 157};
 158
 159static struct ftrace_ops test_probe3 = {
 160        .func                   = trace_selftest_test_probe3_func,
 161};
 162
 163static void print_counts(void)
 164{
 165        printk("(%d %d %d %d %d) ",
 166               trace_selftest_test_probe1_cnt,
 167               trace_selftest_test_probe2_cnt,
 168               trace_selftest_test_probe3_cnt,
 169               trace_selftest_test_global_cnt,
 170               trace_selftest_test_dyn_cnt);
 171}
 172
 173static void reset_counts(void)
 174{
 175        trace_selftest_test_probe1_cnt = 0;
 176        trace_selftest_test_probe2_cnt = 0;
 177        trace_selftest_test_probe3_cnt = 0;
 178        trace_selftest_test_global_cnt = 0;
 179        trace_selftest_test_dyn_cnt = 0;
 180}
 181
 182static int trace_selftest_ops(struct trace_array *tr, int cnt)
 183{
 184        int save_ftrace_enabled = ftrace_enabled;
 185        struct ftrace_ops *dyn_ops;
 186        char *func1_name;
 187        char *func2_name;
 188        int len1;
 189        int len2;
 190        int ret = -1;
 191
 192        printk(KERN_CONT "PASSED\n");
 193        pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 194
 195        ftrace_enabled = 1;
 196        reset_counts();
 197
 198        /* Handle PPC64 '.' name */
 199        func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 200        func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 201        len1 = strlen(func1_name);
 202        len2 = strlen(func2_name);
 203
 204        /*
 205         * Probe 1 will trace function 1.
 206         * Probe 2 will trace function 2.
 207         * Probe 3 will trace functions 1 and 2.
 208         */
 209        ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 210        ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 211        ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 212        ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 213
 214        register_ftrace_function(&test_probe1);
 215        register_ftrace_function(&test_probe2);
 216        register_ftrace_function(&test_probe3);
 217        /* First time we are running with main function */
 218        if (cnt > 1) {
 219                ftrace_init_array_ops(tr, trace_selftest_test_global_func);
 220                register_ftrace_function(tr->ops);
 221        }
 222
 223        DYN_FTRACE_TEST_NAME();
 224
 225        print_counts();
 226
 227        if (trace_selftest_test_probe1_cnt != 1)
 228                goto out;
 229        if (trace_selftest_test_probe2_cnt != 0)
 230                goto out;
 231        if (trace_selftest_test_probe3_cnt != 1)
 232                goto out;
 233        if (cnt > 1) {
 234                if (trace_selftest_test_global_cnt == 0)
 235                        goto out;
 236        }
 237
 238        DYN_FTRACE_TEST_NAME2();
 239
 240        print_counts();
 241
 242        if (trace_selftest_test_probe1_cnt != 1)
 243                goto out;
 244        if (trace_selftest_test_probe2_cnt != 1)
 245                goto out;
 246        if (trace_selftest_test_probe3_cnt != 2)
 247                goto out;
 248
 249        /* Add a dynamic probe */
 250        dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 251        if (!dyn_ops) {
 252                printk("MEMORY ERROR ");
 253                goto out;
 254        }
 255
 256        dyn_ops->func = trace_selftest_test_dyn_func;
 257
 258        register_ftrace_function(dyn_ops);
 259
 260        trace_selftest_test_global_cnt = 0;
 261
 262        DYN_FTRACE_TEST_NAME();
 263
 264        print_counts();
 265
 266        if (trace_selftest_test_probe1_cnt != 2)
 267                goto out_free;
 268        if (trace_selftest_test_probe2_cnt != 1)
 269                goto out_free;
 270        if (trace_selftest_test_probe3_cnt != 3)
 271                goto out_free;
 272        if (cnt > 1) {
 273                if (trace_selftest_test_global_cnt == 0)
 274                        goto out_free;
 275        }
 276        if (trace_selftest_test_dyn_cnt == 0)
 277                goto out_free;
 278
 279        DYN_FTRACE_TEST_NAME2();
 280
 281        print_counts();
 282
 283        if (trace_selftest_test_probe1_cnt != 2)
 284                goto out_free;
 285        if (trace_selftest_test_probe2_cnt != 2)
 286                goto out_free;
 287        if (trace_selftest_test_probe3_cnt != 4)
 288                goto out_free;
 289
 290        /* Remove trace function from probe 3 */
 291        func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME);
 292        len1 = strlen(func1_name);
 293
 294        ftrace_set_filter(&test_probe3, func1_name, len1, 0);
 295
 296        DYN_FTRACE_TEST_NAME();
 297
 298        print_counts();
 299
 300        if (trace_selftest_test_probe1_cnt != 3)
 301                goto out_free;
 302        if (trace_selftest_test_probe2_cnt != 2)
 303                goto out_free;
 304        if (trace_selftest_test_probe3_cnt != 4)
 305                goto out_free;
 306        if (cnt > 1) {
 307                if (trace_selftest_test_global_cnt == 0)
 308                        goto out_free;
 309        }
 310        if (trace_selftest_test_dyn_cnt == 0)
 311                goto out_free;
 312
 313        DYN_FTRACE_TEST_NAME2();
 314
 315        print_counts();
 316
 317        if (trace_selftest_test_probe1_cnt != 3)
 318                goto out_free;
 319        if (trace_selftest_test_probe2_cnt != 3)
 320                goto out_free;
 321        if (trace_selftest_test_probe3_cnt != 5)
 322                goto out_free;
 323
 324        ret = 0;
 325 out_free:
 326        unregister_ftrace_function(dyn_ops);
 327        kfree(dyn_ops);
 328
 329 out:
 330        /* Purposely unregister in the same order */
 331        unregister_ftrace_function(&test_probe1);
 332        unregister_ftrace_function(&test_probe2);
 333        unregister_ftrace_function(&test_probe3);
 334        if (cnt > 1)
 335                unregister_ftrace_function(tr->ops);
 336        ftrace_reset_array_ops(tr);
 337
 338        /* Make sure everything is off */
 339        reset_counts();
 340        DYN_FTRACE_TEST_NAME();
 341        DYN_FTRACE_TEST_NAME();
 342
 343        if (trace_selftest_test_probe1_cnt ||
 344            trace_selftest_test_probe2_cnt ||
 345            trace_selftest_test_probe3_cnt ||
 346            trace_selftest_test_global_cnt ||
 347            trace_selftest_test_dyn_cnt)
 348                ret = -1;
 349
 350        ftrace_enabled = save_ftrace_enabled;
 351
 352        return ret;
 353}
 354
 355/* Test dynamic code modification and ftrace filters */
 356static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 357                                                  struct trace_array *tr,
 358                                                  int (*func)(void))
 359{
 360        int save_ftrace_enabled = ftrace_enabled;
 361        unsigned long count;
 362        char *func_name;
 363        int ret;
 364
 365        /* The ftrace test PASSED */
 366        printk(KERN_CONT "PASSED\n");
 367        pr_info("Testing dynamic ftrace: ");
 368
 369        /* enable tracing, and record the filter function */
 370        ftrace_enabled = 1;
 371
 372        /* passed in by parameter to fool gcc from optimizing */
 373        func();
 374
 375        /*
 376         * Some archs *cough*PowerPC*cough* add characters to the
 377         * start of the function names. We simply put a '*' to
 378         * accommodate them.
 379         */
 380        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 381
 382        /* filter only on our function */
 383        ftrace_set_global_filter(func_name, strlen(func_name), 1);
 384
 385        /* enable tracing */
 386        ret = tracer_init(trace, tr);
 387        if (ret) {
 388                warn_failed_init_tracer(trace, ret);
 389                goto out;
 390        }
 391
 392        /* Sleep for a 1/10 of a second */
 393        msleep(100);
 394
 395        /* we should have nothing in the buffer */
 396        ret = trace_test_buffer(&tr->array_buffer, &count);
 397        if (ret)
 398                goto out;
 399
 400        if (count) {
 401                ret = -1;
 402                printk(KERN_CONT ".. filter did not filter .. ");
 403                goto out;
 404        }
 405
 406        /* call our function again */
 407        func();
 408
 409        /* sleep again */
 410        msleep(100);
 411
 412        /* stop the tracing. */
 413        tracing_stop();
 414        ftrace_enabled = 0;
 415
 416        /* check the trace buffer */
 417        ret = trace_test_buffer(&tr->array_buffer, &count);
 418
 419        ftrace_enabled = 1;
 420        tracing_start();
 421
 422        /* we should only have one item */
 423        if (!ret && count != 1) {
 424                trace->reset(tr);
 425                printk(KERN_CONT ".. filter failed count=%ld ..", count);
 426                ret = -1;
 427                goto out;
 428        }
 429
 430        /* Test the ops with global tracing running */
 431        ret = trace_selftest_ops(tr, 1);
 432        trace->reset(tr);
 433
 434 out:
 435        ftrace_enabled = save_ftrace_enabled;
 436
 437        /* Enable tracing on all functions again */
 438        ftrace_set_global_filter(NULL, 0, 1);
 439
 440        /* Test the ops with global tracing off */
 441        if (!ret)
 442                ret = trace_selftest_ops(tr, 2);
 443
 444        return ret;
 445}
 446
 447static int trace_selftest_recursion_cnt;
 448static void trace_selftest_test_recursion_func(unsigned long ip,
 449                                               unsigned long pip,
 450                                               struct ftrace_ops *op,
 451                                               struct ftrace_regs *fregs)
 452{
 453        /*
 454         * This function is registered without the recursion safe flag.
 455         * The ftrace infrastructure should provide the recursion
 456         * protection. If not, this will crash the kernel!
 457         */
 458        if (trace_selftest_recursion_cnt++ > 10)
 459                return;
 460        DYN_FTRACE_TEST_NAME();
 461}
 462
 463static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 464                                                    unsigned long pip,
 465                                                    struct ftrace_ops *op,
 466                                                    struct ftrace_regs *fregs)
 467{
 468        /*
 469         * We said we would provide our own recursion. By calling
 470         * this function again, we should recurse back into this function
 471         * and count again. But this only happens if the arch supports
 472         * all of ftrace features and nothing else is using the function
 473         * tracing utility.
 474         */
 475        if (trace_selftest_recursion_cnt++)
 476                return;
 477        DYN_FTRACE_TEST_NAME();
 478}
 479
 480static struct ftrace_ops test_rec_probe = {
 481        .func                   = trace_selftest_test_recursion_func,
 482        .flags                  = FTRACE_OPS_FL_RECURSION,
 483};
 484
 485static struct ftrace_ops test_recsafe_probe = {
 486        .func                   = trace_selftest_test_recursion_safe_func,
 487};
 488
 489static int
 490trace_selftest_function_recursion(void)
 491{
 492        int save_ftrace_enabled = ftrace_enabled;
 493        char *func_name;
 494        int len;
 495        int ret;
 496
 497        /* The previous test PASSED */
 498        pr_cont("PASSED\n");
 499        pr_info("Testing ftrace recursion: ");
 500
 501
 502        /* enable tracing, and record the filter function */
 503        ftrace_enabled = 1;
 504
 505        /* Handle PPC64 '.' name */
 506        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 507        len = strlen(func_name);
 508
 509        ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 510        if (ret) {
 511                pr_cont("*Could not set filter* ");
 512                goto out;
 513        }
 514
 515        ret = register_ftrace_function(&test_rec_probe);
 516        if (ret) {
 517                pr_cont("*could not register callback* ");
 518                goto out;
 519        }
 520
 521        DYN_FTRACE_TEST_NAME();
 522
 523        unregister_ftrace_function(&test_rec_probe);
 524
 525        ret = -1;
 526        /*
 527         * Recursion allows for transitions between context,
 528         * and may call the callback twice.
 529         */
 530        if (trace_selftest_recursion_cnt != 1 &&
 531            trace_selftest_recursion_cnt != 2) {
 532                pr_cont("*callback not called once (or twice) (%d)* ",
 533                        trace_selftest_recursion_cnt);
 534                goto out;
 535        }
 536
 537        trace_selftest_recursion_cnt = 1;
 538
 539        pr_cont("PASSED\n");
 540        pr_info("Testing ftrace recursion safe: ");
 541
 542        ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 543        if (ret) {
 544                pr_cont("*Could not set filter* ");
 545                goto out;
 546        }
 547
 548        ret = register_ftrace_function(&test_recsafe_probe);
 549        if (ret) {
 550                pr_cont("*could not register callback* ");
 551                goto out;
 552        }
 553
 554        DYN_FTRACE_TEST_NAME();
 555
 556        unregister_ftrace_function(&test_recsafe_probe);
 557
 558        ret = -1;
 559        if (trace_selftest_recursion_cnt != 2) {
 560                pr_cont("*callback not called expected 2 times (%d)* ",
 561                        trace_selftest_recursion_cnt);
 562                goto out;
 563        }
 564
 565        ret = 0;
 566out:
 567        ftrace_enabled = save_ftrace_enabled;
 568
 569        return ret;
 570}
 571#else
 572# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 573# define trace_selftest_function_recursion() ({ 0; })
 574#endif /* CONFIG_DYNAMIC_FTRACE */
 575
 576static enum {
 577        TRACE_SELFTEST_REGS_START,
 578        TRACE_SELFTEST_REGS_FOUND,
 579        TRACE_SELFTEST_REGS_NOT_FOUND,
 580} trace_selftest_regs_stat;
 581
 582static void trace_selftest_test_regs_func(unsigned long ip,
 583                                          unsigned long pip,
 584                                          struct ftrace_ops *op,
 585                                          struct ftrace_regs *fregs)
 586{
 587        struct pt_regs *regs = ftrace_get_regs(fregs);
 588
 589        if (regs)
 590                trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 591        else
 592                trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 593}
 594
 595static struct ftrace_ops test_regs_probe = {
 596        .func           = trace_selftest_test_regs_func,
 597        .flags          = FTRACE_OPS_FL_SAVE_REGS,
 598};
 599
 600static int
 601trace_selftest_function_regs(void)
 602{
 603        int save_ftrace_enabled = ftrace_enabled;
 604        char *func_name;
 605        int len;
 606        int ret;
 607        int supported = 0;
 608
 609#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 610        supported = 1;
 611#endif
 612
 613        /* The previous test PASSED */
 614        pr_cont("PASSED\n");
 615        pr_info("Testing ftrace regs%s: ",
 616                !supported ? "(no arch support)" : "");
 617
 618        /* enable tracing, and record the filter function */
 619        ftrace_enabled = 1;
 620
 621        /* Handle PPC64 '.' name */
 622        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 623        len = strlen(func_name);
 624
 625        ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 626        /*
 627         * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 628         * This test really doesn't care.
 629         */
 630        if (ret && ret != -ENODEV) {
 631                pr_cont("*Could not set filter* ");
 632                goto out;
 633        }
 634
 635        ret = register_ftrace_function(&test_regs_probe);
 636        /*
 637         * Now if the arch does not support passing regs, then this should
 638         * have failed.
 639         */
 640        if (!supported) {
 641                if (!ret) {
 642                        pr_cont("*registered save-regs without arch support* ");
 643                        goto out;
 644                }
 645                test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 646                ret = register_ftrace_function(&test_regs_probe);
 647        }
 648        if (ret) {
 649                pr_cont("*could not register callback* ");
 650                goto out;
 651        }
 652
 653
 654        DYN_FTRACE_TEST_NAME();
 655
 656        unregister_ftrace_function(&test_regs_probe);
 657
 658        ret = -1;
 659
 660        switch (trace_selftest_regs_stat) {
 661        case TRACE_SELFTEST_REGS_START:
 662                pr_cont("*callback never called* ");
 663                goto out;
 664
 665        case TRACE_SELFTEST_REGS_FOUND:
 666                if (supported)
 667                        break;
 668                pr_cont("*callback received regs without arch support* ");
 669                goto out;
 670
 671        case TRACE_SELFTEST_REGS_NOT_FOUND:
 672                if (!supported)
 673                        break;
 674                pr_cont("*callback received NULL regs* ");
 675                goto out;
 676        }
 677
 678        ret = 0;
 679out:
 680        ftrace_enabled = save_ftrace_enabled;
 681
 682        return ret;
 683}
 684
 685/*
 686 * Simple verification test of ftrace function tracer.
 687 * Enable ftrace, sleep 1/10 second, and then read the trace
 688 * buffer to see if all is in order.
 689 */
 690__init int
 691trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 692{
 693        int save_ftrace_enabled = ftrace_enabled;
 694        unsigned long count;
 695        int ret;
 696
 697#ifdef CONFIG_DYNAMIC_FTRACE
 698        if (ftrace_filter_param) {
 699                printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 700                return 0;
 701        }
 702#endif
 703
 704        /* make sure msleep has been recorded */
 705        msleep(1);
 706
 707        /* start the tracing */
 708        ftrace_enabled = 1;
 709
 710        ret = tracer_init(trace, tr);
 711        if (ret) {
 712                warn_failed_init_tracer(trace, ret);
 713                goto out;
 714        }
 715
 716        /* Sleep for a 1/10 of a second */
 717        msleep(100);
 718        /* stop the tracing. */
 719        tracing_stop();
 720        ftrace_enabled = 0;
 721
 722        /* check the trace buffer */
 723        ret = trace_test_buffer(&tr->array_buffer, &count);
 724
 725        ftrace_enabled = 1;
 726        trace->reset(tr);
 727        tracing_start();
 728
 729        if (!ret && !count) {
 730                printk(KERN_CONT ".. no entries found ..");
 731                ret = -1;
 732                goto out;
 733        }
 734
 735        ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 736                                                     DYN_FTRACE_TEST_NAME);
 737        if (ret)
 738                goto out;
 739
 740        ret = trace_selftest_function_recursion();
 741        if (ret)
 742                goto out;
 743
 744        ret = trace_selftest_function_regs();
 745 out:
 746        ftrace_enabled = save_ftrace_enabled;
 747
 748        /* kill ftrace totally if we failed */
 749        if (ret)
 750                ftrace_kill();
 751
 752        return ret;
 753}
 754#endif /* CONFIG_FUNCTION_TRACER */
 755
 756
 757#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 758
 759/* Maximum number of functions to trace before diagnosing a hang */
 760#define GRAPH_MAX_FUNC_TEST     100000000
 761
 762static unsigned int graph_hang_thresh;
 763
 764/* Wrap the real function entry probe to avoid possible hanging */
 765static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 766{
 767        /* This is harmlessly racy, we want to approximately detect a hang */
 768        if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 769                ftrace_graph_stop();
 770                printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 771                if (ftrace_dump_on_oops) {
 772                        ftrace_dump(DUMP_ALL);
 773                        /* ftrace_dump() disables tracing */
 774                        tracing_on();
 775                }
 776                return 0;
 777        }
 778
 779        return trace_graph_entry(trace);
 780}
 781
 782static struct fgraph_ops fgraph_ops __initdata  = {
 783        .entryfunc              = &trace_graph_entry_watchdog,
 784        .retfunc                = &trace_graph_return,
 785};
 786
 787#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 788noinline __noclone static void trace_direct_tramp(void) { }
 789#endif
 790
 791/*
 792 * Pretty much the same than for the function tracer from which the selftest
 793 * has been borrowed.
 794 */
 795__init int
 796trace_selftest_startup_function_graph(struct tracer *trace,
 797                                        struct trace_array *tr)
 798{
 799        int ret;
 800        unsigned long count;
 801        char *func_name __maybe_unused;
 802
 803#ifdef CONFIG_DYNAMIC_FTRACE
 804        if (ftrace_filter_param) {
 805                printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 806                return 0;
 807        }
 808#endif
 809
 810        /*
 811         * Simulate the init() callback but we attach a watchdog callback
 812         * to detect and recover from possible hangs
 813         */
 814        tracing_reset_online_cpus(&tr->array_buffer);
 815        set_graph_array(tr);
 816        ret = register_ftrace_graph(&fgraph_ops);
 817        if (ret) {
 818                warn_failed_init_tracer(trace, ret);
 819                goto out;
 820        }
 821        tracing_start_cmdline_record();
 822
 823        /* Sleep for a 1/10 of a second */
 824        msleep(100);
 825
 826        /* Have we just recovered from a hang? */
 827        if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 828                disable_tracing_selftest("recovering from a hang");
 829                ret = -1;
 830                goto out;
 831        }
 832
 833        tracing_stop();
 834
 835        /* check the trace buffer */
 836        ret = trace_test_buffer(&tr->array_buffer, &count);
 837
 838        /* Need to also simulate the tr->reset to remove this fgraph_ops */
 839        tracing_stop_cmdline_record();
 840        unregister_ftrace_graph(&fgraph_ops);
 841
 842        tracing_start();
 843
 844        if (!ret && !count) {
 845                printk(KERN_CONT ".. no entries found ..");
 846                ret = -1;
 847                goto out;
 848        }
 849
 850#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
 851        tracing_reset_online_cpus(&tr->array_buffer);
 852        set_graph_array(tr);
 853
 854        /*
 855         * Some archs *cough*PowerPC*cough* add characters to the
 856         * start of the function names. We simply put a '*' to
 857         * accommodate them.
 858         */
 859        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 860        ftrace_set_global_filter(func_name, strlen(func_name), 1);
 861
 862        /*
 863         * Register direct function together with graph tracer
 864         * and make sure we get graph trace.
 865         */
 866        ret = register_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME,
 867                                     (unsigned long) trace_direct_tramp);
 868        if (ret)
 869                goto out;
 870
 871        ret = register_ftrace_graph(&fgraph_ops);
 872        if (ret) {
 873                warn_failed_init_tracer(trace, ret);
 874                goto out;
 875        }
 876
 877        DYN_FTRACE_TEST_NAME();
 878
 879        count = 0;
 880
 881        tracing_stop();
 882        /* check the trace buffer */
 883        ret = trace_test_buffer(&tr->array_buffer, &count);
 884
 885        unregister_ftrace_graph(&fgraph_ops);
 886
 887        ret = unregister_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME,
 888                                       (unsigned long) trace_direct_tramp);
 889        if (ret)
 890                goto out;
 891
 892        tracing_start();
 893
 894        if (!ret && !count) {
 895                ret = -1;
 896                goto out;
 897        }
 898
 899        /* Enable tracing on all functions again */
 900        ftrace_set_global_filter(NULL, 0, 1);
 901#endif
 902
 903        /* Don't test dynamic tracing, the function tracer already did */
 904out:
 905        /* Stop it if we failed */
 906        if (ret)
 907                ftrace_graph_stop();
 908
 909        return ret;
 910}
 911#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 912
 913
 914#ifdef CONFIG_IRQSOFF_TRACER
 915int
 916trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 917{
 918        unsigned long save_max = tr->max_latency;
 919        unsigned long count;
 920        int ret;
 921
 922        /* start the tracing */
 923        ret = tracer_init(trace, tr);
 924        if (ret) {
 925                warn_failed_init_tracer(trace, ret);
 926                return ret;
 927        }
 928
 929        /* reset the max latency */
 930        tr->max_latency = 0;
 931        /* disable interrupts for a bit */
 932        local_irq_disable();
 933        udelay(100);
 934        local_irq_enable();
 935
 936        /*
 937         * Stop the tracer to avoid a warning subsequent
 938         * to buffer flipping failure because tracing_stop()
 939         * disables the tr and max buffers, making flipping impossible
 940         * in case of parallels max irqs off latencies.
 941         */
 942        trace->stop(tr);
 943        /* stop the tracing. */
 944        tracing_stop();
 945        /* check both trace buffers */
 946        ret = trace_test_buffer(&tr->array_buffer, NULL);
 947        if (!ret)
 948                ret = trace_test_buffer(&tr->max_buffer, &count);
 949        trace->reset(tr);
 950        tracing_start();
 951
 952        if (!ret && !count) {
 953                printk(KERN_CONT ".. no entries found ..");
 954                ret = -1;
 955        }
 956
 957        tr->max_latency = save_max;
 958
 959        return ret;
 960}
 961#endif /* CONFIG_IRQSOFF_TRACER */
 962
 963#ifdef CONFIG_PREEMPT_TRACER
 964int
 965trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 966{
 967        unsigned long save_max = tr->max_latency;
 968        unsigned long count;
 969        int ret;
 970
 971        /*
 972         * Now that the big kernel lock is no longer preemptible,
 973         * and this is called with the BKL held, it will always
 974         * fail. If preemption is already disabled, simply
 975         * pass the test. When the BKL is removed, or becomes
 976         * preemptible again, we will once again test this,
 977         * so keep it in.
 978         */
 979        if (preempt_count()) {
 980                printk(KERN_CONT "can not test ... force ");
 981                return 0;
 982        }
 983
 984        /* start the tracing */
 985        ret = tracer_init(trace, tr);
 986        if (ret) {
 987                warn_failed_init_tracer(trace, ret);
 988                return ret;
 989        }
 990
 991        /* reset the max latency */
 992        tr->max_latency = 0;
 993        /* disable preemption for a bit */
 994        preempt_disable();
 995        udelay(100);
 996        preempt_enable();
 997
 998        /*
 999         * Stop the tracer to avoid a warning subsequent
1000         * to buffer flipping failure because tracing_stop()
1001         * disables the tr and max buffers, making flipping impossible
1002         * in case of parallels max preempt off latencies.
1003         */
1004        trace->stop(tr);
1005        /* stop the tracing. */
1006        tracing_stop();
1007        /* check both trace buffers */
1008        ret = trace_test_buffer(&tr->array_buffer, NULL);
1009        if (!ret)
1010                ret = trace_test_buffer(&tr->max_buffer, &count);
1011        trace->reset(tr);
1012        tracing_start();
1013
1014        if (!ret && !count) {
1015                printk(KERN_CONT ".. no entries found ..");
1016                ret = -1;
1017        }
1018
1019        tr->max_latency = save_max;
1020
1021        return ret;
1022}
1023#endif /* CONFIG_PREEMPT_TRACER */
1024
1025#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
1026int
1027trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
1028{
1029        unsigned long save_max = tr->max_latency;
1030        unsigned long count;
1031        int ret;
1032
1033        /*
1034         * Now that the big kernel lock is no longer preemptible,
1035         * and this is called with the BKL held, it will always
1036         * fail. If preemption is already disabled, simply
1037         * pass the test. When the BKL is removed, or becomes
1038         * preemptible again, we will once again test this,
1039         * so keep it in.
1040         */
1041        if (preempt_count()) {
1042                printk(KERN_CONT "can not test ... force ");
1043                return 0;
1044        }
1045
1046        /* start the tracing */
1047        ret = tracer_init(trace, tr);
1048        if (ret) {
1049                warn_failed_init_tracer(trace, ret);
1050                goto out_no_start;
1051        }
1052
1053        /* reset the max latency */
1054        tr->max_latency = 0;
1055
1056        /* disable preemption and interrupts for a bit */
1057        preempt_disable();
1058        local_irq_disable();
1059        udelay(100);
1060        preempt_enable();
1061        /* reverse the order of preempt vs irqs */
1062        local_irq_enable();
1063
1064        /*
1065         * Stop the tracer to avoid a warning subsequent
1066         * to buffer flipping failure because tracing_stop()
1067         * disables the tr and max buffers, making flipping impossible
1068         * in case of parallels max irqs/preempt off latencies.
1069         */
1070        trace->stop(tr);
1071        /* stop the tracing. */
1072        tracing_stop();
1073        /* check both trace buffers */
1074        ret = trace_test_buffer(&tr->array_buffer, NULL);
1075        if (ret)
1076                goto out;
1077
1078        ret = trace_test_buffer(&tr->max_buffer, &count);
1079        if (ret)
1080                goto out;
1081
1082        if (!ret && !count) {
1083                printk(KERN_CONT ".. no entries found ..");
1084                ret = -1;
1085                goto out;
1086        }
1087
1088        /* do the test by disabling interrupts first this time */
1089        tr->max_latency = 0;
1090        tracing_start();
1091        trace->start(tr);
1092
1093        preempt_disable();
1094        local_irq_disable();
1095        udelay(100);
1096        preempt_enable();
1097        /* reverse the order of preempt vs irqs */
1098        local_irq_enable();
1099
1100        trace->stop(tr);
1101        /* stop the tracing. */
1102        tracing_stop();
1103        /* check both trace buffers */
1104        ret = trace_test_buffer(&tr->array_buffer, NULL);
1105        if (ret)
1106                goto out;
1107
1108        ret = trace_test_buffer(&tr->max_buffer, &count);
1109
1110        if (!ret && !count) {
1111                printk(KERN_CONT ".. no entries found ..");
1112                ret = -1;
1113                goto out;
1114        }
1115
1116out:
1117        tracing_start();
1118out_no_start:
1119        trace->reset(tr);
1120        tr->max_latency = save_max;
1121
1122        return ret;
1123}
1124#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1125
1126#ifdef CONFIG_NOP_TRACER
1127int
1128trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1129{
1130        /* What could possibly go wrong? */
1131        return 0;
1132}
1133#endif
1134
1135#ifdef CONFIG_SCHED_TRACER
1136
1137struct wakeup_test_data {
1138        struct completion       is_ready;
1139        int                     go;
1140};
1141
1142static int trace_wakeup_test_thread(void *data)
1143{
1144        /* Make this a -deadline thread */
1145        static const struct sched_attr attr = {
1146                .sched_policy = SCHED_DEADLINE,
1147                .sched_runtime = 100000ULL,
1148                .sched_deadline = 10000000ULL,
1149                .sched_period = 10000000ULL
1150        };
1151        struct wakeup_test_data *x = data;
1152
1153        sched_setattr(current, &attr);
1154
1155        /* Make it know we have a new prio */
1156        complete(&x->is_ready);
1157
1158        /* now go to sleep and let the test wake us up */
1159        set_current_state(TASK_INTERRUPTIBLE);
1160        while (!x->go) {
1161                schedule();
1162                set_current_state(TASK_INTERRUPTIBLE);
1163        }
1164
1165        complete(&x->is_ready);
1166
1167        set_current_state(TASK_INTERRUPTIBLE);
1168
1169        /* we are awake, now wait to disappear */
1170        while (!kthread_should_stop()) {
1171                schedule();
1172                set_current_state(TASK_INTERRUPTIBLE);
1173        }
1174
1175        __set_current_state(TASK_RUNNING);
1176
1177        return 0;
1178}
1179int
1180trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1181{
1182        unsigned long save_max = tr->max_latency;
1183        struct task_struct *p;
1184        struct wakeup_test_data data;
1185        unsigned long count;
1186        int ret;
1187
1188        memset(&data, 0, sizeof(data));
1189
1190        init_completion(&data.is_ready);
1191
1192        /* create a -deadline thread */
1193        p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1194        if (IS_ERR(p)) {
1195                printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1196                return -1;
1197        }
1198
1199        /* make sure the thread is running at -deadline policy */
1200        wait_for_completion(&data.is_ready);
1201
1202        /* start the tracing */
1203        ret = tracer_init(trace, tr);
1204        if (ret) {
1205                warn_failed_init_tracer(trace, ret);
1206                return ret;
1207        }
1208
1209        /* reset the max latency */
1210        tr->max_latency = 0;
1211
1212        while (p->on_rq) {
1213                /*
1214                 * Sleep to make sure the -deadline thread is asleep too.
1215                 * On virtual machines we can't rely on timings,
1216                 * but we want to make sure this test still works.
1217                 */
1218                msleep(100);
1219        }
1220
1221        init_completion(&data.is_ready);
1222
1223        data.go = 1;
1224        /* memory barrier is in the wake_up_process() */
1225
1226        wake_up_process(p);
1227
1228        /* Wait for the task to wake up */
1229        wait_for_completion(&data.is_ready);
1230
1231        /* stop the tracing. */
1232        tracing_stop();
1233        /* check both trace buffers */
1234        ret = trace_test_buffer(&tr->array_buffer, NULL);
1235        if (!ret)
1236                ret = trace_test_buffer(&tr->max_buffer, &count);
1237
1238
1239        trace->reset(tr);
1240        tracing_start();
1241
1242        tr->max_latency = save_max;
1243
1244        /* kill the thread */
1245        kthread_stop(p);
1246
1247        if (!ret && !count) {
1248                printk(KERN_CONT ".. no entries found ..");
1249                ret = -1;
1250        }
1251
1252        return ret;
1253}
1254#endif /* CONFIG_SCHED_TRACER */
1255
1256#ifdef CONFIG_BRANCH_TRACER
1257int
1258trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1259{
1260        unsigned long count;
1261        int ret;
1262
1263        /* start the tracing */
1264        ret = tracer_init(trace, tr);
1265        if (ret) {
1266                warn_failed_init_tracer(trace, ret);
1267                return ret;
1268        }
1269
1270        /* Sleep for a 1/10 of a second */
1271        msleep(100);
1272        /* stop the tracing. */
1273        tracing_stop();
1274        /* check the trace buffer */
1275        ret = trace_test_buffer(&tr->array_buffer, &count);
1276        trace->reset(tr);
1277        tracing_start();
1278
1279        if (!ret && !count) {
1280                printk(KERN_CONT ".. no entries found ..");
1281                ret = -1;
1282        }
1283
1284        return ret;
1285}
1286#endif /* CONFIG_BRANCH_TRACER */
1287
1288