linux/kernel/trace/trace_selftest.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Include in trace.c */
   3
   4#include <uapi/linux/sched/types.h>
   5#include <linux/stringify.h>
   6#include <linux/kthread.h>
   7#include <linux/delay.h>
   8#include <linux/slab.h>
   9
  10static inline int trace_valid_entry(struct trace_entry *entry)
  11{
  12        switch (entry->type) {
  13        case TRACE_FN:
  14        case TRACE_CTX:
  15        case TRACE_WAKE:
  16        case TRACE_STACK:
  17        case TRACE_PRINT:
  18        case TRACE_BRANCH:
  19        case TRACE_GRAPH_ENT:
  20        case TRACE_GRAPH_RET:
  21                return 1;
  22        }
  23        return 0;
  24}
  25
  26static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu)
  27{
  28        struct ring_buffer_event *event;
  29        struct trace_entry *entry;
  30        unsigned int loops = 0;
  31
  32        while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
  33                entry = ring_buffer_event_data(event);
  34
  35                /*
  36                 * The ring buffer is a size of trace_buf_size, if
  37                 * we loop more than the size, there's something wrong
  38                 * with the ring buffer.
  39                 */
  40                if (loops++ > trace_buf_size) {
  41                        printk(KERN_CONT ".. bad ring buffer ");
  42                        goto failed;
  43                }
  44                if (!trace_valid_entry(entry)) {
  45                        printk(KERN_CONT ".. invalid entry %d ",
  46                                entry->type);
  47                        goto failed;
  48                }
  49        }
  50        return 0;
  51
  52 failed:
  53        /* disable tracing */
  54        tracing_disabled = 1;
  55        printk(KERN_CONT ".. corrupted trace buffer .. ");
  56        return -1;
  57}
  58
  59/*
  60 * Test the trace buffer to see if all the elements
  61 * are still sane.
  62 */
  63static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count)
  64{
  65        unsigned long flags, cnt = 0;
  66        int cpu, ret = 0;
  67
  68        /* Don't allow flipping of max traces now */
  69        local_irq_save(flags);
  70        arch_spin_lock(&buf->tr->max_lock);
  71
  72        cnt = ring_buffer_entries(buf->buffer);
  73
  74        /*
  75         * The trace_test_buffer_cpu runs a while loop to consume all data.
  76         * If the calling tracer is broken, and is constantly filling
  77         * the buffer, this will run forever, and hard lock the box.
  78         * We disable the ring buffer while we do this test to prevent
  79         * a hard lock up.
  80         */
  81        tracing_off();
  82        for_each_possible_cpu(cpu) {
  83                ret = trace_test_buffer_cpu(buf, cpu);
  84                if (ret)
  85                        break;
  86        }
  87        tracing_on();
  88        arch_spin_unlock(&buf->tr->max_lock);
  89        local_irq_restore(flags);
  90
  91        if (count)
  92                *count = cnt;
  93
  94        return ret;
  95}
  96
  97static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
  98{
  99        printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
 100                trace->name, init_ret);
 101}
 102#ifdef CONFIG_FUNCTION_TRACER
 103
 104#ifdef CONFIG_DYNAMIC_FTRACE
 105
 106static int trace_selftest_test_probe1_cnt;
 107static void trace_selftest_test_probe1_func(unsigned long ip,
 108                                            unsigned long pip,
 109                                            struct ftrace_ops *op,
 110                                            struct ftrace_regs *fregs)
 111{
 112        trace_selftest_test_probe1_cnt++;
 113}
 114
 115static int trace_selftest_test_probe2_cnt;
 116static void trace_selftest_test_probe2_func(unsigned long ip,
 117                                            unsigned long pip,
 118                                            struct ftrace_ops *op,
 119                                            struct ftrace_regs *fregs)
 120{
 121        trace_selftest_test_probe2_cnt++;
 122}
 123
 124static int trace_selftest_test_probe3_cnt;
 125static void trace_selftest_test_probe3_func(unsigned long ip,
 126                                            unsigned long pip,
 127                                            struct ftrace_ops *op,
 128                                            struct ftrace_regs *fregs)
 129{
 130        trace_selftest_test_probe3_cnt++;
 131}
 132
 133static int trace_selftest_test_global_cnt;
 134static void trace_selftest_test_global_func(unsigned long ip,
 135                                            unsigned long pip,
 136                                            struct ftrace_ops *op,
 137                                            struct ftrace_regs *fregs)
 138{
 139        trace_selftest_test_global_cnt++;
 140}
 141
 142static int trace_selftest_test_dyn_cnt;
 143static void trace_selftest_test_dyn_func(unsigned long ip,
 144                                         unsigned long pip,
 145                                         struct ftrace_ops *op,
 146                                         struct ftrace_regs *fregs)
 147{
 148        trace_selftest_test_dyn_cnt++;
 149}
 150
 151static struct ftrace_ops test_probe1 = {
 152        .func                   = trace_selftest_test_probe1_func,
 153};
 154
 155static struct ftrace_ops test_probe2 = {
 156        .func                   = trace_selftest_test_probe2_func,
 157};
 158
 159static struct ftrace_ops test_probe3 = {
 160        .func                   = trace_selftest_test_probe3_func,
 161};
 162
 163static void print_counts(void)
 164{
 165        printk("(%d %d %d %d %d) ",
 166               trace_selftest_test_probe1_cnt,
 167               trace_selftest_test_probe2_cnt,
 168               trace_selftest_test_probe3_cnt,
 169               trace_selftest_test_global_cnt,
 170               trace_selftest_test_dyn_cnt);
 171}
 172
 173static void reset_counts(void)
 174{
 175        trace_selftest_test_probe1_cnt = 0;
 176        trace_selftest_test_probe2_cnt = 0;
 177        trace_selftest_test_probe3_cnt = 0;
 178        trace_selftest_test_global_cnt = 0;
 179        trace_selftest_test_dyn_cnt = 0;
 180}
 181
 182static int trace_selftest_ops(struct trace_array *tr, int cnt)
 183{
 184        int save_ftrace_enabled = ftrace_enabled;
 185        struct ftrace_ops *dyn_ops;
 186        char *func1_name;
 187        char *func2_name;
 188        int len1;
 189        int len2;
 190        int ret = -1;
 191
 192        printk(KERN_CONT "PASSED\n");
 193        pr_info("Testing dynamic ftrace ops #%d: ", cnt);
 194
 195        ftrace_enabled = 1;
 196        reset_counts();
 197
 198        /* Handle PPC64 '.' name */
 199        func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 200        func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
 201        len1 = strlen(func1_name);
 202        len2 = strlen(func2_name);
 203
 204        /*
 205         * Probe 1 will trace function 1.
 206         * Probe 2 will trace function 2.
 207         * Probe 3 will trace functions 1 and 2.
 208         */
 209        ftrace_set_filter(&test_probe1, func1_name, len1, 1);
 210        ftrace_set_filter(&test_probe2, func2_name, len2, 1);
 211        ftrace_set_filter(&test_probe3, func1_name, len1, 1);
 212        ftrace_set_filter(&test_probe3, func2_name, len2, 0);
 213
 214        register_ftrace_function(&test_probe1);
 215        register_ftrace_function(&test_probe2);
 216        register_ftrace_function(&test_probe3);
 217        /* First time we are running with main function */
 218        if (cnt > 1) {
 219                ftrace_init_array_ops(tr, trace_selftest_test_global_func);
 220                register_ftrace_function(tr->ops);
 221        }
 222
 223        DYN_FTRACE_TEST_NAME();
 224
 225        print_counts();
 226
 227        if (trace_selftest_test_probe1_cnt != 1)
 228                goto out;
 229        if (trace_selftest_test_probe2_cnt != 0)
 230                goto out;
 231        if (trace_selftest_test_probe3_cnt != 1)
 232                goto out;
 233        if (cnt > 1) {
 234                if (trace_selftest_test_global_cnt == 0)
 235                        goto out;
 236        }
 237
 238        DYN_FTRACE_TEST_NAME2();
 239
 240        print_counts();
 241
 242        if (trace_selftest_test_probe1_cnt != 1)
 243                goto out;
 244        if (trace_selftest_test_probe2_cnt != 1)
 245                goto out;
 246        if (trace_selftest_test_probe3_cnt != 2)
 247                goto out;
 248
 249        /* Add a dynamic probe */
 250        dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
 251        if (!dyn_ops) {
 252                printk("MEMORY ERROR ");
 253                goto out;
 254        }
 255
 256        dyn_ops->func = trace_selftest_test_dyn_func;
 257
 258        register_ftrace_function(dyn_ops);
 259
 260        trace_selftest_test_global_cnt = 0;
 261
 262        DYN_FTRACE_TEST_NAME();
 263
 264        print_counts();
 265
 266        if (trace_selftest_test_probe1_cnt != 2)
 267                goto out_free;
 268        if (trace_selftest_test_probe2_cnt != 1)
 269                goto out_free;
 270        if (trace_selftest_test_probe3_cnt != 3)
 271                goto out_free;
 272        if (cnt > 1) {
 273                if (trace_selftest_test_global_cnt == 0)
 274                        goto out_free;
 275        }
 276        if (trace_selftest_test_dyn_cnt == 0)
 277                goto out_free;
 278
 279        DYN_FTRACE_TEST_NAME2();
 280
 281        print_counts();
 282
 283        if (trace_selftest_test_probe1_cnt != 2)
 284                goto out_free;
 285        if (trace_selftest_test_probe2_cnt != 2)
 286                goto out_free;
 287        if (trace_selftest_test_probe3_cnt != 4)
 288                goto out_free;
 289
 290        ret = 0;
 291 out_free:
 292        unregister_ftrace_function(dyn_ops);
 293        kfree(dyn_ops);
 294
 295 out:
 296        /* Purposely unregister in the same order */
 297        unregister_ftrace_function(&test_probe1);
 298        unregister_ftrace_function(&test_probe2);
 299        unregister_ftrace_function(&test_probe3);
 300        if (cnt > 1)
 301                unregister_ftrace_function(tr->ops);
 302        ftrace_reset_array_ops(tr);
 303
 304        /* Make sure everything is off */
 305        reset_counts();
 306        DYN_FTRACE_TEST_NAME();
 307        DYN_FTRACE_TEST_NAME();
 308
 309        if (trace_selftest_test_probe1_cnt ||
 310            trace_selftest_test_probe2_cnt ||
 311            trace_selftest_test_probe3_cnt ||
 312            trace_selftest_test_global_cnt ||
 313            trace_selftest_test_dyn_cnt)
 314                ret = -1;
 315
 316        ftrace_enabled = save_ftrace_enabled;
 317
 318        return ret;
 319}
 320
 321/* Test dynamic code modification and ftrace filters */
 322static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
 323                                                  struct trace_array *tr,
 324                                                  int (*func)(void))
 325{
 326        int save_ftrace_enabled = ftrace_enabled;
 327        unsigned long count;
 328        char *func_name;
 329        int ret;
 330
 331        /* The ftrace test PASSED */
 332        printk(KERN_CONT "PASSED\n");
 333        pr_info("Testing dynamic ftrace: ");
 334
 335        /* enable tracing, and record the filter function */
 336        ftrace_enabled = 1;
 337
 338        /* passed in by parameter to fool gcc from optimizing */
 339        func();
 340
 341        /*
 342         * Some archs *cough*PowerPC*cough* add characters to the
 343         * start of the function names. We simply put a '*' to
 344         * accommodate them.
 345         */
 346        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 347
 348        /* filter only on our function */
 349        ftrace_set_global_filter(func_name, strlen(func_name), 1);
 350
 351        /* enable tracing */
 352        ret = tracer_init(trace, tr);
 353        if (ret) {
 354                warn_failed_init_tracer(trace, ret);
 355                goto out;
 356        }
 357
 358        /* Sleep for a 1/10 of a second */
 359        msleep(100);
 360
 361        /* we should have nothing in the buffer */
 362        ret = trace_test_buffer(&tr->array_buffer, &count);
 363        if (ret)
 364                goto out;
 365
 366        if (count) {
 367                ret = -1;
 368                printk(KERN_CONT ".. filter did not filter .. ");
 369                goto out;
 370        }
 371
 372        /* call our function again */
 373        func();
 374
 375        /* sleep again */
 376        msleep(100);
 377
 378        /* stop the tracing. */
 379        tracing_stop();
 380        ftrace_enabled = 0;
 381
 382        /* check the trace buffer */
 383        ret = trace_test_buffer(&tr->array_buffer, &count);
 384
 385        ftrace_enabled = 1;
 386        tracing_start();
 387
 388        /* we should only have one item */
 389        if (!ret && count != 1) {
 390                trace->reset(tr);
 391                printk(KERN_CONT ".. filter failed count=%ld ..", count);
 392                ret = -1;
 393                goto out;
 394        }
 395
 396        /* Test the ops with global tracing running */
 397        ret = trace_selftest_ops(tr, 1);
 398        trace->reset(tr);
 399
 400 out:
 401        ftrace_enabled = save_ftrace_enabled;
 402
 403        /* Enable tracing on all functions again */
 404        ftrace_set_global_filter(NULL, 0, 1);
 405
 406        /* Test the ops with global tracing off */
 407        if (!ret)
 408                ret = trace_selftest_ops(tr, 2);
 409
 410        return ret;
 411}
 412
 413static int trace_selftest_recursion_cnt;
 414static void trace_selftest_test_recursion_func(unsigned long ip,
 415                                               unsigned long pip,
 416                                               struct ftrace_ops *op,
 417                                               struct ftrace_regs *fregs)
 418{
 419        /*
 420         * This function is registered without the recursion safe flag.
 421         * The ftrace infrastructure should provide the recursion
 422         * protection. If not, this will crash the kernel!
 423         */
 424        if (trace_selftest_recursion_cnt++ > 10)
 425                return;
 426        DYN_FTRACE_TEST_NAME();
 427}
 428
 429static void trace_selftest_test_recursion_safe_func(unsigned long ip,
 430                                                    unsigned long pip,
 431                                                    struct ftrace_ops *op,
 432                                                    struct ftrace_regs *fregs)
 433{
 434        /*
 435         * We said we would provide our own recursion. By calling
 436         * this function again, we should recurse back into this function
 437         * and count again. But this only happens if the arch supports
 438         * all of ftrace features and nothing else is using the function
 439         * tracing utility.
 440         */
 441        if (trace_selftest_recursion_cnt++)
 442                return;
 443        DYN_FTRACE_TEST_NAME();
 444}
 445
 446static struct ftrace_ops test_rec_probe = {
 447        .func                   = trace_selftest_test_recursion_func,
 448        .flags                  = FTRACE_OPS_FL_RECURSION,
 449};
 450
 451static struct ftrace_ops test_recsafe_probe = {
 452        .func                   = trace_selftest_test_recursion_safe_func,
 453};
 454
 455static int
 456trace_selftest_function_recursion(void)
 457{
 458        int save_ftrace_enabled = ftrace_enabled;
 459        char *func_name;
 460        int len;
 461        int ret;
 462
 463        /* The previous test PASSED */
 464        pr_cont("PASSED\n");
 465        pr_info("Testing ftrace recursion: ");
 466
 467
 468        /* enable tracing, and record the filter function */
 469        ftrace_enabled = 1;
 470
 471        /* Handle PPC64 '.' name */
 472        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 473        len = strlen(func_name);
 474
 475        ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
 476        if (ret) {
 477                pr_cont("*Could not set filter* ");
 478                goto out;
 479        }
 480
 481        ret = register_ftrace_function(&test_rec_probe);
 482        if (ret) {
 483                pr_cont("*could not register callback* ");
 484                goto out;
 485        }
 486
 487        DYN_FTRACE_TEST_NAME();
 488
 489        unregister_ftrace_function(&test_rec_probe);
 490
 491        ret = -1;
 492        /*
 493         * Recursion allows for transitions between context,
 494         * and may call the callback twice.
 495         */
 496        if (trace_selftest_recursion_cnt != 1 &&
 497            trace_selftest_recursion_cnt != 2) {
 498                pr_cont("*callback not called once (or twice) (%d)* ",
 499                        trace_selftest_recursion_cnt);
 500                goto out;
 501        }
 502
 503        trace_selftest_recursion_cnt = 1;
 504
 505        pr_cont("PASSED\n");
 506        pr_info("Testing ftrace recursion safe: ");
 507
 508        ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
 509        if (ret) {
 510                pr_cont("*Could not set filter* ");
 511                goto out;
 512        }
 513
 514        ret = register_ftrace_function(&test_recsafe_probe);
 515        if (ret) {
 516                pr_cont("*could not register callback* ");
 517                goto out;
 518        }
 519
 520        DYN_FTRACE_TEST_NAME();
 521
 522        unregister_ftrace_function(&test_recsafe_probe);
 523
 524        ret = -1;
 525        if (trace_selftest_recursion_cnt != 2) {
 526                pr_cont("*callback not called expected 2 times (%d)* ",
 527                        trace_selftest_recursion_cnt);
 528                goto out;
 529        }
 530
 531        ret = 0;
 532out:
 533        ftrace_enabled = save_ftrace_enabled;
 534
 535        return ret;
 536}
 537#else
 538# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
 539# define trace_selftest_function_recursion() ({ 0; })
 540#endif /* CONFIG_DYNAMIC_FTRACE */
 541
 542static enum {
 543        TRACE_SELFTEST_REGS_START,
 544        TRACE_SELFTEST_REGS_FOUND,
 545        TRACE_SELFTEST_REGS_NOT_FOUND,
 546} trace_selftest_regs_stat;
 547
 548static void trace_selftest_test_regs_func(unsigned long ip,
 549                                          unsigned long pip,
 550                                          struct ftrace_ops *op,
 551                                          struct ftrace_regs *fregs)
 552{
 553        struct pt_regs *regs = ftrace_get_regs(fregs);
 554
 555        if (regs)
 556                trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
 557        else
 558                trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
 559}
 560
 561static struct ftrace_ops test_regs_probe = {
 562        .func           = trace_selftest_test_regs_func,
 563        .flags          = FTRACE_OPS_FL_SAVE_REGS,
 564};
 565
 566static int
 567trace_selftest_function_regs(void)
 568{
 569        int save_ftrace_enabled = ftrace_enabled;
 570        char *func_name;
 571        int len;
 572        int ret;
 573        int supported = 0;
 574
 575#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 576        supported = 1;
 577#endif
 578
 579        /* The previous test PASSED */
 580        pr_cont("PASSED\n");
 581        pr_info("Testing ftrace regs%s: ",
 582                !supported ? "(no arch support)" : "");
 583
 584        /* enable tracing, and record the filter function */
 585        ftrace_enabled = 1;
 586
 587        /* Handle PPC64 '.' name */
 588        func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
 589        len = strlen(func_name);
 590
 591        ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
 592        /*
 593         * If DYNAMIC_FTRACE is not set, then we just trace all functions.
 594         * This test really doesn't care.
 595         */
 596        if (ret && ret != -ENODEV) {
 597                pr_cont("*Could not set filter* ");
 598                goto out;
 599        }
 600
 601        ret = register_ftrace_function(&test_regs_probe);
 602        /*
 603         * Now if the arch does not support passing regs, then this should
 604         * have failed.
 605         */
 606        if (!supported) {
 607                if (!ret) {
 608                        pr_cont("*registered save-regs without arch support* ");
 609                        goto out;
 610                }
 611                test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
 612                ret = register_ftrace_function(&test_regs_probe);
 613        }
 614        if (ret) {
 615                pr_cont("*could not register callback* ");
 616                goto out;
 617        }
 618
 619
 620        DYN_FTRACE_TEST_NAME();
 621
 622        unregister_ftrace_function(&test_regs_probe);
 623
 624        ret = -1;
 625
 626        switch (trace_selftest_regs_stat) {
 627        case TRACE_SELFTEST_REGS_START:
 628                pr_cont("*callback never called* ");
 629                goto out;
 630
 631        case TRACE_SELFTEST_REGS_FOUND:
 632                if (supported)
 633                        break;
 634                pr_cont("*callback received regs without arch support* ");
 635                goto out;
 636
 637        case TRACE_SELFTEST_REGS_NOT_FOUND:
 638                if (!supported)
 639                        break;
 640                pr_cont("*callback received NULL regs* ");
 641                goto out;
 642        }
 643
 644        ret = 0;
 645out:
 646        ftrace_enabled = save_ftrace_enabled;
 647
 648        return ret;
 649}
 650
 651/*
 652 * Simple verification test of ftrace function tracer.
 653 * Enable ftrace, sleep 1/10 second, and then read the trace
 654 * buffer to see if all is in order.
 655 */
 656__init int
 657trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 658{
 659        int save_ftrace_enabled = ftrace_enabled;
 660        unsigned long count;
 661        int ret;
 662
 663#ifdef CONFIG_DYNAMIC_FTRACE
 664        if (ftrace_filter_param) {
 665                printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 666                return 0;
 667        }
 668#endif
 669
 670        /* make sure msleep has been recorded */
 671        msleep(1);
 672
 673        /* start the tracing */
 674        ftrace_enabled = 1;
 675
 676        ret = tracer_init(trace, tr);
 677        if (ret) {
 678                warn_failed_init_tracer(trace, ret);
 679                goto out;
 680        }
 681
 682        /* Sleep for a 1/10 of a second */
 683        msleep(100);
 684        /* stop the tracing. */
 685        tracing_stop();
 686        ftrace_enabled = 0;
 687
 688        /* check the trace buffer */
 689        ret = trace_test_buffer(&tr->array_buffer, &count);
 690
 691        ftrace_enabled = 1;
 692        trace->reset(tr);
 693        tracing_start();
 694
 695        if (!ret && !count) {
 696                printk(KERN_CONT ".. no entries found ..");
 697                ret = -1;
 698                goto out;
 699        }
 700
 701        ret = trace_selftest_startup_dynamic_tracing(trace, tr,
 702                                                     DYN_FTRACE_TEST_NAME);
 703        if (ret)
 704                goto out;
 705
 706        ret = trace_selftest_function_recursion();
 707        if (ret)
 708                goto out;
 709
 710        ret = trace_selftest_function_regs();
 711 out:
 712        ftrace_enabled = save_ftrace_enabled;
 713
 714        /* kill ftrace totally if we failed */
 715        if (ret)
 716                ftrace_kill();
 717
 718        return ret;
 719}
 720#endif /* CONFIG_FUNCTION_TRACER */
 721
 722
 723#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 724
 725/* Maximum number of functions to trace before diagnosing a hang */
 726#define GRAPH_MAX_FUNC_TEST     100000000
 727
 728static unsigned int graph_hang_thresh;
 729
 730/* Wrap the real function entry probe to avoid possible hanging */
 731static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
 732{
 733        /* This is harmlessly racy, we want to approximately detect a hang */
 734        if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
 735                ftrace_graph_stop();
 736                printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
 737                if (ftrace_dump_on_oops) {
 738                        ftrace_dump(DUMP_ALL);
 739                        /* ftrace_dump() disables tracing */
 740                        tracing_on();
 741                }
 742                return 0;
 743        }
 744
 745        return trace_graph_entry(trace);
 746}
 747
 748static struct fgraph_ops fgraph_ops __initdata  = {
 749        .entryfunc              = &trace_graph_entry_watchdog,
 750        .retfunc                = &trace_graph_return,
 751};
 752
 753/*
 754 * Pretty much the same than for the function tracer from which the selftest
 755 * has been borrowed.
 756 */
 757__init int
 758trace_selftest_startup_function_graph(struct tracer *trace,
 759                                        struct trace_array *tr)
 760{
 761        int ret;
 762        unsigned long count;
 763
 764#ifdef CONFIG_DYNAMIC_FTRACE
 765        if (ftrace_filter_param) {
 766                printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
 767                return 0;
 768        }
 769#endif
 770
 771        /*
 772         * Simulate the init() callback but we attach a watchdog callback
 773         * to detect and recover from possible hangs
 774         */
 775        tracing_reset_online_cpus(&tr->array_buffer);
 776        set_graph_array(tr);
 777        ret = register_ftrace_graph(&fgraph_ops);
 778        if (ret) {
 779                warn_failed_init_tracer(trace, ret);
 780                goto out;
 781        }
 782        tracing_start_cmdline_record();
 783
 784        /* Sleep for a 1/10 of a second */
 785        msleep(100);
 786
 787        /* Have we just recovered from a hang? */
 788        if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
 789                disable_tracing_selftest("recovering from a hang");
 790                ret = -1;
 791                goto out;
 792        }
 793
 794        tracing_stop();
 795
 796        /* check the trace buffer */
 797        ret = trace_test_buffer(&tr->array_buffer, &count);
 798
 799        /* Need to also simulate the tr->reset to remove this fgraph_ops */
 800        tracing_stop_cmdline_record();
 801        unregister_ftrace_graph(&fgraph_ops);
 802
 803        tracing_start();
 804
 805        if (!ret && !count) {
 806                printk(KERN_CONT ".. no entries found ..");
 807                ret = -1;
 808                goto out;
 809        }
 810
 811        /* Don't test dynamic tracing, the function tracer already did */
 812
 813out:
 814        /* Stop it if we failed */
 815        if (ret)
 816                ftrace_graph_stop();
 817
 818        return ret;
 819}
 820#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 821
 822
 823#ifdef CONFIG_IRQSOFF_TRACER
 824int
 825trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
 826{
 827        unsigned long save_max = tr->max_latency;
 828        unsigned long count;
 829        int ret;
 830
 831        /* start the tracing */
 832        ret = tracer_init(trace, tr);
 833        if (ret) {
 834                warn_failed_init_tracer(trace, ret);
 835                return ret;
 836        }
 837
 838        /* reset the max latency */
 839        tr->max_latency = 0;
 840        /* disable interrupts for a bit */
 841        local_irq_disable();
 842        udelay(100);
 843        local_irq_enable();
 844
 845        /*
 846         * Stop the tracer to avoid a warning subsequent
 847         * to buffer flipping failure because tracing_stop()
 848         * disables the tr and max buffers, making flipping impossible
 849         * in case of parallels max irqs off latencies.
 850         */
 851        trace->stop(tr);
 852        /* stop the tracing. */
 853        tracing_stop();
 854        /* check both trace buffers */
 855        ret = trace_test_buffer(&tr->array_buffer, NULL);
 856        if (!ret)
 857                ret = trace_test_buffer(&tr->max_buffer, &count);
 858        trace->reset(tr);
 859        tracing_start();
 860
 861        if (!ret && !count) {
 862                printk(KERN_CONT ".. no entries found ..");
 863                ret = -1;
 864        }
 865
 866        tr->max_latency = save_max;
 867
 868        return ret;
 869}
 870#endif /* CONFIG_IRQSOFF_TRACER */
 871
 872#ifdef CONFIG_PREEMPT_TRACER
 873int
 874trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
 875{
 876        unsigned long save_max = tr->max_latency;
 877        unsigned long count;
 878        int ret;
 879
 880        /*
 881         * Now that the big kernel lock is no longer preemptible,
 882         * and this is called with the BKL held, it will always
 883         * fail. If preemption is already disabled, simply
 884         * pass the test. When the BKL is removed, or becomes
 885         * preemptible again, we will once again test this,
 886         * so keep it in.
 887         */
 888        if (preempt_count()) {
 889                printk(KERN_CONT "can not test ... force ");
 890                return 0;
 891        }
 892
 893        /* start the tracing */
 894        ret = tracer_init(trace, tr);
 895        if (ret) {
 896                warn_failed_init_tracer(trace, ret);
 897                return ret;
 898        }
 899
 900        /* reset the max latency */
 901        tr->max_latency = 0;
 902        /* disable preemption for a bit */
 903        preempt_disable();
 904        udelay(100);
 905        preempt_enable();
 906
 907        /*
 908         * Stop the tracer to avoid a warning subsequent
 909         * to buffer flipping failure because tracing_stop()
 910         * disables the tr and max buffers, making flipping impossible
 911         * in case of parallels max preempt off latencies.
 912         */
 913        trace->stop(tr);
 914        /* stop the tracing. */
 915        tracing_stop();
 916        /* check both trace buffers */
 917        ret = trace_test_buffer(&tr->array_buffer, NULL);
 918        if (!ret)
 919                ret = trace_test_buffer(&tr->max_buffer, &count);
 920        trace->reset(tr);
 921        tracing_start();
 922
 923        if (!ret && !count) {
 924                printk(KERN_CONT ".. no entries found ..");
 925                ret = -1;
 926        }
 927
 928        tr->max_latency = save_max;
 929
 930        return ret;
 931}
 932#endif /* CONFIG_PREEMPT_TRACER */
 933
 934#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
 935int
 936trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
 937{
 938        unsigned long save_max = tr->max_latency;
 939        unsigned long count;
 940        int ret;
 941
 942        /*
 943         * Now that the big kernel lock is no longer preemptible,
 944         * and this is called with the BKL held, it will always
 945         * fail. If preemption is already disabled, simply
 946         * pass the test. When the BKL is removed, or becomes
 947         * preemptible again, we will once again test this,
 948         * so keep it in.
 949         */
 950        if (preempt_count()) {
 951                printk(KERN_CONT "can not test ... force ");
 952                return 0;
 953        }
 954
 955        /* start the tracing */
 956        ret = tracer_init(trace, tr);
 957        if (ret) {
 958                warn_failed_init_tracer(trace, ret);
 959                goto out_no_start;
 960        }
 961
 962        /* reset the max latency */
 963        tr->max_latency = 0;
 964
 965        /* disable preemption and interrupts for a bit */
 966        preempt_disable();
 967        local_irq_disable();
 968        udelay(100);
 969        preempt_enable();
 970        /* reverse the order of preempt vs irqs */
 971        local_irq_enable();
 972
 973        /*
 974         * Stop the tracer to avoid a warning subsequent
 975         * to buffer flipping failure because tracing_stop()
 976         * disables the tr and max buffers, making flipping impossible
 977         * in case of parallels max irqs/preempt off latencies.
 978         */
 979        trace->stop(tr);
 980        /* stop the tracing. */
 981        tracing_stop();
 982        /* check both trace buffers */
 983        ret = trace_test_buffer(&tr->array_buffer, NULL);
 984        if (ret)
 985                goto out;
 986
 987        ret = trace_test_buffer(&tr->max_buffer, &count);
 988        if (ret)
 989                goto out;
 990
 991        if (!ret && !count) {
 992                printk(KERN_CONT ".. no entries found ..");
 993                ret = -1;
 994                goto out;
 995        }
 996
 997        /* do the test by disabling interrupts first this time */
 998        tr->max_latency = 0;
 999        tracing_start();
1000        trace->start(tr);
1001
1002        preempt_disable();
1003        local_irq_disable();
1004        udelay(100);
1005        preempt_enable();
1006        /* reverse the order of preempt vs irqs */
1007        local_irq_enable();
1008
1009        trace->stop(tr);
1010        /* stop the tracing. */
1011        tracing_stop();
1012        /* check both trace buffers */
1013        ret = trace_test_buffer(&tr->array_buffer, NULL);
1014        if (ret)
1015                goto out;
1016
1017        ret = trace_test_buffer(&tr->max_buffer, &count);
1018
1019        if (!ret && !count) {
1020                printk(KERN_CONT ".. no entries found ..");
1021                ret = -1;
1022                goto out;
1023        }
1024
1025out:
1026        tracing_start();
1027out_no_start:
1028        trace->reset(tr);
1029        tr->max_latency = save_max;
1030
1031        return ret;
1032}
1033#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1034
1035#ifdef CONFIG_NOP_TRACER
1036int
1037trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1038{
1039        /* What could possibly go wrong? */
1040        return 0;
1041}
1042#endif
1043
1044#ifdef CONFIG_SCHED_TRACER
1045
1046struct wakeup_test_data {
1047        struct completion       is_ready;
1048        int                     go;
1049};
1050
1051static int trace_wakeup_test_thread(void *data)
1052{
1053        /* Make this a -deadline thread */
1054        static const struct sched_attr attr = {
1055                .sched_policy = SCHED_DEADLINE,
1056                .sched_runtime = 100000ULL,
1057                .sched_deadline = 10000000ULL,
1058                .sched_period = 10000000ULL
1059        };
1060        struct wakeup_test_data *x = data;
1061
1062        sched_setattr(current, &attr);
1063
1064        /* Make it know we have a new prio */
1065        complete(&x->is_ready);
1066
1067        /* now go to sleep and let the test wake us up */
1068        set_current_state(TASK_INTERRUPTIBLE);
1069        while (!x->go) {
1070                schedule();
1071                set_current_state(TASK_INTERRUPTIBLE);
1072        }
1073
1074        complete(&x->is_ready);
1075
1076        set_current_state(TASK_INTERRUPTIBLE);
1077
1078        /* we are awake, now wait to disappear */
1079        while (!kthread_should_stop()) {
1080                schedule();
1081                set_current_state(TASK_INTERRUPTIBLE);
1082        }
1083
1084        __set_current_state(TASK_RUNNING);
1085
1086        return 0;
1087}
1088int
1089trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1090{
1091        unsigned long save_max = tr->max_latency;
1092        struct task_struct *p;
1093        struct wakeup_test_data data;
1094        unsigned long count;
1095        int ret;
1096
1097        memset(&data, 0, sizeof(data));
1098
1099        init_completion(&data.is_ready);
1100
1101        /* create a -deadline thread */
1102        p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1103        if (IS_ERR(p)) {
1104                printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1105                return -1;
1106        }
1107
1108        /* make sure the thread is running at -deadline policy */
1109        wait_for_completion(&data.is_ready);
1110
1111        /* start the tracing */
1112        ret = tracer_init(trace, tr);
1113        if (ret) {
1114                warn_failed_init_tracer(trace, ret);
1115                return ret;
1116        }
1117
1118        /* reset the max latency */
1119        tr->max_latency = 0;
1120
1121        while (p->on_rq) {
1122                /*
1123                 * Sleep to make sure the -deadline thread is asleep too.
1124                 * On virtual machines we can't rely on timings,
1125                 * but we want to make sure this test still works.
1126                 */
1127                msleep(100);
1128        }
1129
1130        init_completion(&data.is_ready);
1131
1132        data.go = 1;
1133        /* memory barrier is in the wake_up_process() */
1134
1135        wake_up_process(p);
1136
1137        /* Wait for the task to wake up */
1138        wait_for_completion(&data.is_ready);
1139
1140        /* stop the tracing. */
1141        tracing_stop();
1142        /* check both trace buffers */
1143        ret = trace_test_buffer(&tr->array_buffer, NULL);
1144        if (!ret)
1145                ret = trace_test_buffer(&tr->max_buffer, &count);
1146
1147
1148        trace->reset(tr);
1149        tracing_start();
1150
1151        tr->max_latency = save_max;
1152
1153        /* kill the thread */
1154        kthread_stop(p);
1155
1156        if (!ret && !count) {
1157                printk(KERN_CONT ".. no entries found ..");
1158                ret = -1;
1159        }
1160
1161        return ret;
1162}
1163#endif /* CONFIG_SCHED_TRACER */
1164
1165#ifdef CONFIG_BRANCH_TRACER
1166int
1167trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1168{
1169        unsigned long count;
1170        int ret;
1171
1172        /* start the tracing */
1173        ret = tracer_init(trace, tr);
1174        if (ret) {
1175                warn_failed_init_tracer(trace, ret);
1176                return ret;
1177        }
1178
1179        /* Sleep for a 1/10 of a second */
1180        msleep(100);
1181        /* stop the tracing. */
1182        tracing_stop();
1183        /* check the trace buffer */
1184        ret = trace_test_buffer(&tr->array_buffer, &count);
1185        trace->reset(tr);
1186        tracing_start();
1187
1188        if (!ret && !count) {
1189                printk(KERN_CONT ".. no entries found ..");
1190                ret = -1;
1191        }
1192
1193        return ret;
1194}
1195#endif /* CONFIG_BRANCH_TRACER */
1196
1197