linux/arch/x86/kernel/ftrace.c
<<
>>
Prefs
   1/*
   2 * Code for replacing ftrace calls with jumps.
   3 *
   4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
   5 *
   6 * Thanks goes to Ingo Molnar, for suggesting the idea.
   7 * Mathieu Desnoyers, for suggesting postponing the modifications.
   8 * Arjan van de Ven, for keeping me straight, and explaining to me
   9 * the dangers of modifying code on the run.
  10 */
  11
  12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13
  14#include <linux/spinlock.h>
  15#include <linux/hardirq.h>
  16#include <linux/uaccess.h>
  17#include <linux/ftrace.h>
  18#include <linux/percpu.h>
  19#include <linux/sched.h>
  20#include <linux/init.h>
  21#include <linux/list.h>
  22#include <linux/module.h>
  23
  24#include <trace/syscall.h>
  25
  26#include <asm/cacheflush.h>
  27#include <asm/kprobes.h>
  28#include <asm/ftrace.h>
  29#include <asm/nops.h>
  30
  31#ifdef CONFIG_DYNAMIC_FTRACE
  32
  33int ftrace_arch_code_modify_prepare(void)
  34{
  35        set_kernel_text_rw();
  36        set_all_modules_text_rw();
  37        return 0;
  38}
  39
  40int ftrace_arch_code_modify_post_process(void)
  41{
  42        set_all_modules_text_ro();
  43        set_kernel_text_ro();
  44        return 0;
  45}
  46
  47union ftrace_code_union {
  48        char code[MCOUNT_INSN_SIZE];
  49        struct {
  50                char e8;
  51                int offset;
  52        } __attribute__((packed));
  53};
  54
  55static int ftrace_calc_offset(long ip, long addr)
  56{
  57        return (int)(addr - ip);
  58}
  59
  60static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  61{
  62        static union ftrace_code_union calc;
  63
  64        calc.e8         = 0xe8;
  65        calc.offset     = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
  66
  67        /*
  68         * No locking needed, this must be called via kstop_machine
  69         * which in essence is like running on a uniprocessor machine.
  70         */
  71        return calc.code;
  72}
  73
  74static inline int
  75within(unsigned long addr, unsigned long start, unsigned long end)
  76{
  77        return addr >= start && addr < end;
  78}
  79
  80static int
  81do_ftrace_mod_code(unsigned long ip, const void *new_code)
  82{
  83        /*
  84         * On x86_64, kernel text mappings are mapped read-only with
  85         * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
  86         * of the kernel text mapping to modify the kernel text.
  87         *
  88         * For 32bit kernels, these mappings are same and we can use
  89         * kernel identity mapping to modify code.
  90         */
  91        if (within(ip, (unsigned long)_text, (unsigned long)_etext))
  92                ip = (unsigned long)__va(__pa_symbol(ip));
  93
  94        return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
  95}
  96
  97static const unsigned char *ftrace_nop_replace(void)
  98{
  99        return ideal_nops[NOP_ATOMIC5];
 100}
 101
 102static int
 103ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
 104                   unsigned const char *new_code)
 105{
 106        unsigned char replaced[MCOUNT_INSN_SIZE];
 107
 108        /*
 109         * Note: Due to modules and __init, code can
 110         *  disappear and change, we need to protect against faulting
 111         *  as well as code changing. We do this by using the
 112         *  probe_kernel_* functions.
 113         *
 114         * No real locking needed, this code is run through
 115         * kstop_machine, or before SMP starts.
 116         */
 117
 118        /* read the text we want to modify */
 119        if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
 120                return -EFAULT;
 121
 122        /* Make sure it is what we expect it to be */
 123        if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
 124                return -EINVAL;
 125
 126        /* replace the text with the new text */
 127        if (do_ftrace_mod_code(ip, new_code))
 128                return -EPERM;
 129
 130        sync_core();
 131
 132        return 0;
 133}
 134
 135int ftrace_make_nop(struct module *mod,
 136                    struct dyn_ftrace *rec, unsigned long addr)
 137{
 138        unsigned const char *new, *old;
 139        unsigned long ip = rec->ip;
 140
 141        old = ftrace_call_replace(ip, addr);
 142        new = ftrace_nop_replace();
 143
 144        /*
 145         * On boot up, and when modules are loaded, the MCOUNT_ADDR
 146         * is converted to a nop, and will never become MCOUNT_ADDR
 147         * again. This code is either running before SMP (on boot up)
 148         * or before the code will ever be executed (module load).
 149         * We do not want to use the breakpoint version in this case,
 150         * just modify the code directly.
 151         */
 152        if (addr == MCOUNT_ADDR)
 153                return ftrace_modify_code_direct(rec->ip, old, new);
 154
 155        /* Normal cases use add_brk_on_nop */
 156        WARN_ONCE(1, "invalid use of ftrace_make_nop");
 157        return -EINVAL;
 158}
 159
 160int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 161{
 162        unsigned const char *new, *old;
 163        unsigned long ip = rec->ip;
 164
 165        old = ftrace_nop_replace();
 166        new = ftrace_call_replace(ip, addr);
 167
 168        /* Should only be called when module is loaded */
 169        return ftrace_modify_code_direct(rec->ip, old, new);
 170}
 171
 172/*
 173 * The modifying_ftrace_code is used to tell the breakpoint
 174 * handler to call ftrace_int3_handler(). If it fails to
 175 * call this handler for a breakpoint added by ftrace, then
 176 * the kernel may crash.
 177 *
 178 * As atomic_writes on x86 do not need a barrier, we do not
 179 * need to add smp_mb()s for this to work. It is also considered
 180 * that we can not read the modifying_ftrace_code before
 181 * executing the breakpoint. That would be quite remarkable if
 182 * it could do that. Here's the flow that is required:
 183 *
 184 *   CPU-0                          CPU-1
 185 *
 186 * atomic_inc(mfc);
 187 * write int3s
 188 *                              <trap-int3> // implicit (r)mb
 189 *                              if (atomic_read(mfc))
 190 *                                      call ftrace_int3_handler()
 191 *
 192 * Then when we are finished:
 193 *
 194 * atomic_dec(mfc);
 195 *
 196 * If we hit a breakpoint that was not set by ftrace, it does not
 197 * matter if ftrace_int3_handler() is called or not. It will
 198 * simply be ignored. But it is crucial that a ftrace nop/caller
 199 * breakpoint is handled. No other user should ever place a
 200 * breakpoint on an ftrace nop/caller location. It must only
 201 * be done by this code.
 202 */
 203atomic_t modifying_ftrace_code __read_mostly;
 204
 205static int
 206ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
 207                   unsigned const char *new_code);
 208
 209/*
 210 * Should never be called:
 211 *  As it is only called by __ftrace_replace_code() which is called by
 212 *  ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
 213 *  which is called to turn mcount into nops or nops into function calls
 214 *  but not to convert a function from not using regs to one that uses
 215 *  regs, which ftrace_modify_call() is for.
 216 */
 217int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
 218                                 unsigned long addr)
 219{
 220        WARN_ON(1);
 221        return -EINVAL;
 222}
 223
 224int ftrace_update_ftrace_func(ftrace_func_t func)
 225{
 226        unsigned long ip = (unsigned long)(&ftrace_call);
 227        unsigned char old[MCOUNT_INSN_SIZE], *new;
 228        int ret;
 229
 230        memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
 231        new = ftrace_call_replace(ip, (unsigned long)func);
 232
 233        /* See comment above by declaration of modifying_ftrace_code */
 234        atomic_inc(&modifying_ftrace_code);
 235
 236        ret = ftrace_modify_code(ip, old, new);
 237
 238        /* Also update the regs callback function */
 239        if (!ret) {
 240                ip = (unsigned long)(&ftrace_regs_call);
 241                memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
 242                new = ftrace_call_replace(ip, (unsigned long)func);
 243                ret = ftrace_modify_code(ip, old, new);
 244        }
 245
 246        atomic_dec(&modifying_ftrace_code);
 247
 248        return ret;
 249}
 250
 251static int is_ftrace_caller(unsigned long ip)
 252{
 253        if (ip == (unsigned long)(&ftrace_call) ||
 254                ip == (unsigned long)(&ftrace_regs_call))
 255                return 1;
 256
 257        return 0;
 258}
 259
 260/*
 261 * A breakpoint was added to the code address we are about to
 262 * modify, and this is the handle that will just skip over it.
 263 * We are either changing a nop into a trace call, or a trace
 264 * call to a nop. While the change is taking place, we treat
 265 * it just like it was a nop.
 266 */
 267int ftrace_int3_handler(struct pt_regs *regs)
 268{
 269        unsigned long ip;
 270
 271        if (WARN_ON_ONCE(!regs))
 272                return 0;
 273
 274        ip = regs->ip - 1;
 275        if (!ftrace_location(ip) && !is_ftrace_caller(ip))
 276                return 0;
 277
 278        regs->ip += MCOUNT_INSN_SIZE - 1;
 279
 280        return 1;
 281}
 282
 283static int ftrace_write(unsigned long ip, const char *val, int size)
 284{
 285        /*
 286         * On x86_64, kernel text mappings are mapped read-only with
 287         * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
 288         * of the kernel text mapping to modify the kernel text.
 289         *
 290         * For 32bit kernels, these mappings are same and we can use
 291         * kernel identity mapping to modify code.
 292         */
 293        if (within(ip, (unsigned long)_text, (unsigned long)_etext))
 294                ip = (unsigned long)__va(__pa_symbol(ip));
 295
 296        return probe_kernel_write((void *)ip, val, size);
 297}
 298
 299static int add_break(unsigned long ip, const char *old)
 300{
 301        unsigned char replaced[MCOUNT_INSN_SIZE];
 302        unsigned char brk = BREAKPOINT_INSTRUCTION;
 303
 304        if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
 305                return -EFAULT;
 306
 307        /* Make sure it is what we expect it to be */
 308        if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
 309                return -EINVAL;
 310
 311        if (ftrace_write(ip, &brk, 1))
 312                return -EPERM;
 313
 314        return 0;
 315}
 316
 317static int add_brk_on_call(struct dyn_ftrace *rec, unsigned long addr)
 318{
 319        unsigned const char *old;
 320        unsigned long ip = rec->ip;
 321
 322        old = ftrace_call_replace(ip, addr);
 323
 324        return add_break(rec->ip, old);
 325}
 326
 327
 328static int add_brk_on_nop(struct dyn_ftrace *rec)
 329{
 330        unsigned const char *old;
 331
 332        old = ftrace_nop_replace();
 333
 334        return add_break(rec->ip, old);
 335}
 336
 337/*
 338 * If the record has the FTRACE_FL_REGS set, that means that it
 339 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
 340 * is not not set, then it wants to convert to the normal callback.
 341 */
 342static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
 343{
 344        if (rec->flags & FTRACE_FL_REGS)
 345                return (unsigned long)FTRACE_REGS_ADDR;
 346        else
 347                return (unsigned long)FTRACE_ADDR;
 348}
 349
 350/*
 351 * The FTRACE_FL_REGS_EN is set when the record already points to
 352 * a function that saves all the regs. Basically the '_EN' version
 353 * represents the current state of the function.
 354 */
 355static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
 356{
 357        if (rec->flags & FTRACE_FL_REGS_EN)
 358                return (unsigned long)FTRACE_REGS_ADDR;
 359        else
 360                return (unsigned long)FTRACE_ADDR;
 361}
 362
 363static int add_breakpoints(struct dyn_ftrace *rec, int enable)
 364{
 365        unsigned long ftrace_addr;
 366        int ret;
 367
 368        ret = ftrace_test_record(rec, enable);
 369
 370        ftrace_addr = get_ftrace_addr(rec);
 371
 372        switch (ret) {
 373        case FTRACE_UPDATE_IGNORE:
 374                return 0;
 375
 376        case FTRACE_UPDATE_MAKE_CALL:
 377                /* converting nop to call */
 378                return add_brk_on_nop(rec);
 379
 380        case FTRACE_UPDATE_MODIFY_CALL_REGS:
 381        case FTRACE_UPDATE_MODIFY_CALL:
 382                ftrace_addr = get_ftrace_old_addr(rec);
 383                /* fall through */
 384        case FTRACE_UPDATE_MAKE_NOP:
 385                /* converting a call to a nop */
 386                return add_brk_on_call(rec, ftrace_addr);
 387        }
 388        return 0;
 389}
 390
 391/*
 392 * On error, we need to remove breakpoints. This needs to
 393 * be done caefully. If the address does not currently have a
 394 * breakpoint, we know we are done. Otherwise, we look at the
 395 * remaining 4 bytes of the instruction. If it matches a nop
 396 * we replace the breakpoint with the nop. Otherwise we replace
 397 * it with the call instruction.
 398 */
 399static int remove_breakpoint(struct dyn_ftrace *rec)
 400{
 401        unsigned char ins[MCOUNT_INSN_SIZE];
 402        unsigned char brk = BREAKPOINT_INSTRUCTION;
 403        const unsigned char *nop;
 404        unsigned long ftrace_addr;
 405        unsigned long ip = rec->ip;
 406
 407        /* If we fail the read, just give up */
 408        if (probe_kernel_read(ins, (void *)ip, MCOUNT_INSN_SIZE))
 409                return -EFAULT;
 410
 411        /* If this does not have a breakpoint, we are done */
 412        if (ins[0] != brk)
 413                return -1;
 414
 415        nop = ftrace_nop_replace();
 416
 417        /*
 418         * If the last 4 bytes of the instruction do not match
 419         * a nop, then we assume that this is a call to ftrace_addr.
 420         */
 421        if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) {
 422                /*
 423                 * For extra paranoidism, we check if the breakpoint is on
 424                 * a call that would actually jump to the ftrace_addr.
 425                 * If not, don't touch the breakpoint, we make just create
 426                 * a disaster.
 427                 */
 428                ftrace_addr = get_ftrace_addr(rec);
 429                nop = ftrace_call_replace(ip, ftrace_addr);
 430
 431                if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
 432                        goto update;
 433
 434                /* Check both ftrace_addr and ftrace_old_addr */
 435                ftrace_addr = get_ftrace_old_addr(rec);
 436                nop = ftrace_call_replace(ip, ftrace_addr);
 437
 438                if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
 439                        return -EINVAL;
 440        }
 441
 442 update:
 443        return probe_kernel_write((void *)ip, &nop[0], 1);
 444}
 445
 446static int add_update_code(unsigned long ip, unsigned const char *new)
 447{
 448        /* skip breakpoint */
 449        ip++;
 450        new++;
 451        if (ftrace_write(ip, new, MCOUNT_INSN_SIZE - 1))
 452                return -EPERM;
 453        return 0;
 454}
 455
 456static int add_update_call(struct dyn_ftrace *rec, unsigned long addr)
 457{
 458        unsigned long ip = rec->ip;
 459        unsigned const char *new;
 460
 461        new = ftrace_call_replace(ip, addr);
 462        return add_update_code(ip, new);
 463}
 464
 465static int add_update_nop(struct dyn_ftrace *rec)
 466{
 467        unsigned long ip = rec->ip;
 468        unsigned const char *new;
 469
 470        new = ftrace_nop_replace();
 471        return add_update_code(ip, new);
 472}
 473
 474static int add_update(struct dyn_ftrace *rec, int enable)
 475{
 476        unsigned long ftrace_addr;
 477        int ret;
 478
 479        ret = ftrace_test_record(rec, enable);
 480
 481        ftrace_addr  = get_ftrace_addr(rec);
 482
 483        switch (ret) {
 484        case FTRACE_UPDATE_IGNORE:
 485                return 0;
 486
 487        case FTRACE_UPDATE_MODIFY_CALL_REGS:
 488        case FTRACE_UPDATE_MODIFY_CALL:
 489        case FTRACE_UPDATE_MAKE_CALL:
 490                /* converting nop to call */
 491                return add_update_call(rec, ftrace_addr);
 492
 493        case FTRACE_UPDATE_MAKE_NOP:
 494                /* converting a call to a nop */
 495                return add_update_nop(rec);
 496        }
 497
 498        return 0;
 499}
 500
 501static int finish_update_call(struct dyn_ftrace *rec, unsigned long addr)
 502{
 503        unsigned long ip = rec->ip;
 504        unsigned const char *new;
 505
 506        new = ftrace_call_replace(ip, addr);
 507
 508        if (ftrace_write(ip, new, 1))
 509                return -EPERM;
 510
 511        return 0;
 512}
 513
 514static int finish_update_nop(struct dyn_ftrace *rec)
 515{
 516        unsigned long ip = rec->ip;
 517        unsigned const char *new;
 518
 519        new = ftrace_nop_replace();
 520
 521        if (ftrace_write(ip, new, 1))
 522                return -EPERM;
 523        return 0;
 524}
 525
 526static int finish_update(struct dyn_ftrace *rec, int enable)
 527{
 528        unsigned long ftrace_addr;
 529        int ret;
 530
 531        ret = ftrace_update_record(rec, enable);
 532
 533        ftrace_addr = get_ftrace_addr(rec);
 534
 535        switch (ret) {
 536        case FTRACE_UPDATE_IGNORE:
 537                return 0;
 538
 539        case FTRACE_UPDATE_MODIFY_CALL_REGS:
 540        case FTRACE_UPDATE_MODIFY_CALL:
 541        case FTRACE_UPDATE_MAKE_CALL:
 542                /* converting nop to call */
 543                return finish_update_call(rec, ftrace_addr);
 544
 545        case FTRACE_UPDATE_MAKE_NOP:
 546                /* converting a call to a nop */
 547                return finish_update_nop(rec);
 548        }
 549
 550        return 0;
 551}
 552
 553static void do_sync_core(void *data)
 554{
 555        sync_core();
 556}
 557
 558static void run_sync(void)
 559{
 560        int enable_irqs = irqs_disabled();
 561
 562        /* We may be called with interrupts disbled (on bootup). */
 563        if (enable_irqs)
 564                local_irq_enable();
 565        on_each_cpu(do_sync_core, NULL, 1);
 566        if (enable_irqs)
 567                local_irq_disable();
 568}
 569
 570void ftrace_replace_code(int enable)
 571{
 572        struct ftrace_rec_iter *iter;
 573        struct dyn_ftrace *rec;
 574        const char *report = "adding breakpoints";
 575        int count = 0;
 576        int ret;
 577
 578        for_ftrace_rec_iter(iter) {
 579                rec = ftrace_rec_iter_record(iter);
 580
 581                ret = add_breakpoints(rec, enable);
 582                if (ret)
 583                        goto remove_breakpoints;
 584                count++;
 585        }
 586
 587        run_sync();
 588
 589        report = "updating code";
 590
 591        for_ftrace_rec_iter(iter) {
 592                rec = ftrace_rec_iter_record(iter);
 593
 594                ret = add_update(rec, enable);
 595                if (ret)
 596                        goto remove_breakpoints;
 597        }
 598
 599        run_sync();
 600
 601        report = "removing breakpoints";
 602
 603        for_ftrace_rec_iter(iter) {
 604                rec = ftrace_rec_iter_record(iter);
 605
 606                ret = finish_update(rec, enable);
 607                if (ret)
 608                        goto remove_breakpoints;
 609        }
 610
 611        run_sync();
 612
 613        return;
 614
 615 remove_breakpoints:
 616        ftrace_bug(ret, rec ? rec->ip : 0);
 617        printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
 618        for_ftrace_rec_iter(iter) {
 619                rec = ftrace_rec_iter_record(iter);
 620                remove_breakpoint(rec);
 621        }
 622}
 623
 624static int
 625ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
 626                   unsigned const char *new_code)
 627{
 628        int ret;
 629
 630        ret = add_break(ip, old_code);
 631        if (ret)
 632                goto out;
 633
 634        run_sync();
 635
 636        ret = add_update_code(ip, new_code);
 637        if (ret)
 638                goto fail_update;
 639
 640        run_sync();
 641
 642        ret = ftrace_write(ip, new_code, 1);
 643        if (ret) {
 644                ret = -EPERM;
 645                goto out;
 646        }
 647        run_sync();
 648 out:
 649        return ret;
 650
 651 fail_update:
 652        probe_kernel_write((void *)ip, &old_code[0], 1);
 653        goto out;
 654}
 655
 656void arch_ftrace_update_code(int command)
 657{
 658        /* See comment above by declaration of modifying_ftrace_code */
 659        atomic_inc(&modifying_ftrace_code);
 660
 661        ftrace_modify_all_code(command);
 662
 663        atomic_dec(&modifying_ftrace_code);
 664}
 665
 666int __init ftrace_dyn_arch_init(void *data)
 667{
 668        /* The return code is retured via data */
 669        *(unsigned long *)data = 0;
 670
 671        return 0;
 672}
 673#endif
 674
 675#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 676
 677#ifdef CONFIG_DYNAMIC_FTRACE
 678extern void ftrace_graph_call(void);
 679
 680static int ftrace_mod_jmp(unsigned long ip,
 681                          int old_offset, int new_offset)
 682{
 683        unsigned char code[MCOUNT_INSN_SIZE];
 684
 685        if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
 686                return -EFAULT;
 687
 688        if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
 689                return -EINVAL;
 690
 691        *(int *)(&code[1]) = new_offset;
 692
 693        if (do_ftrace_mod_code(ip, &code))
 694                return -EPERM;
 695
 696        return 0;
 697}
 698
 699int ftrace_enable_ftrace_graph_caller(void)
 700{
 701        unsigned long ip = (unsigned long)(&ftrace_graph_call);
 702        int old_offset, new_offset;
 703
 704        old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
 705        new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
 706
 707        return ftrace_mod_jmp(ip, old_offset, new_offset);
 708}
 709
 710int ftrace_disable_ftrace_graph_caller(void)
 711{
 712        unsigned long ip = (unsigned long)(&ftrace_graph_call);
 713        int old_offset, new_offset;
 714
 715        old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
 716        new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
 717
 718        return ftrace_mod_jmp(ip, old_offset, new_offset);
 719}
 720
 721#endif /* !CONFIG_DYNAMIC_FTRACE */
 722
 723/*
 724 * Hook the return address and push it in the stack of return addrs
 725 * in current thread info.
 726 */
 727void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 728                           unsigned long frame_pointer)
 729{
 730        unsigned long old;
 731        int faulted;
 732        struct ftrace_graph_ent trace;
 733        unsigned long return_hooker = (unsigned long)
 734                                &return_to_handler;
 735
 736        if (unlikely(atomic_read(&current->tracing_graph_pause)))
 737                return;
 738
 739        /*
 740         * Protect against fault, even if it shouldn't
 741         * happen. This tool is too much intrusive to
 742         * ignore such a protection.
 743         */
 744        asm volatile(
 745                "1: " _ASM_MOV " (%[parent]), %[old]\n"
 746                "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
 747                "   movl $0, %[faulted]\n"
 748                "3:\n"
 749
 750                ".section .fixup, \"ax\"\n"
 751                "4: movl $1, %[faulted]\n"
 752                "   jmp 3b\n"
 753                ".previous\n"
 754
 755                _ASM_EXTABLE(1b, 4b)
 756                _ASM_EXTABLE(2b, 4b)
 757
 758                : [old] "=&r" (old), [faulted] "=r" (faulted)
 759                : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
 760                : "memory"
 761        );
 762
 763        if (unlikely(faulted)) {
 764                ftrace_graph_stop();
 765                WARN_ON(1);
 766                return;
 767        }
 768
 769        trace.func = self_addr;
 770        trace.depth = current->curr_ret_stack + 1;
 771
 772        /* Only trace if the calling function expects to */
 773        if (!ftrace_graph_entry(&trace)) {
 774                *parent = old;
 775                return;
 776        }
 777
 778        if (ftrace_push_return_trace(old, self_addr, &trace.depth,
 779                    frame_pointer) == -EBUSY) {
 780                *parent = old;
 781                return;
 782        }
 783}
 784#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 785