linux/arch/x86/kernel/ftrace_64.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *  Copyright (C) 2014  Steven Rostedt, Red Hat Inc
   4 */
   5
   6#include <linux/linkage.h>
   7#include <asm/ptrace.h>
   8#include <asm/ftrace.h>
   9#include <asm/export.h>
  10#include <asm/nospec-branch.h>
  11#include <asm/unwind_hints.h>
  12
  13        .code64
  14        .section .entry.text, "ax"
  15
  16# define function_hook  __fentry__
  17EXPORT_SYMBOL(__fentry__)
  18
  19#ifdef CONFIG_FRAME_POINTER
  20/* Save parent and function stack frames (rip and rbp) */
  21#  define MCOUNT_FRAME_SIZE     (8+16*2)
  22#else
  23/* No need to save a stack frame */
  24# define MCOUNT_FRAME_SIZE      0
  25#endif /* CONFIG_FRAME_POINTER */
  26
  27/* Size of stack used to save mcount regs in save_mcount_regs */
  28#define MCOUNT_REG_SIZE         (SS+8 + MCOUNT_FRAME_SIZE)
  29
  30/*
  31 * gcc -pg option adds a call to 'mcount' in most functions.
  32 * When -mfentry is used, the call is to 'fentry' and not 'mcount'
  33 * and is done before the function's stack frame is set up.
  34 * They both require a set of regs to be saved before calling
  35 * any C code and restored before returning back to the function.
  36 *
  37 * On boot up, all these calls are converted into nops. When tracing
  38 * is enabled, the call can jump to either ftrace_caller or
  39 * ftrace_regs_caller. Callbacks (tracing functions) that require
  40 * ftrace_regs_caller (like kprobes) need to have pt_regs passed to
  41 * it. For this reason, the size of the pt_regs structure will be
  42 * allocated on the stack and the required mcount registers will
  43 * be saved in the locations that pt_regs has them in.
  44 */
  45
  46/*
  47 * @added: the amount of stack added before calling this
  48 *
  49 * After this is called, the following registers contain:
  50 *
  51 *  %rdi - holds the address that called the trampoline
  52 *  %rsi - holds the parent function (traced function's return address)
  53 *  %rdx - holds the original %rbp
  54 */
  55.macro save_mcount_regs added=0
  56
  57#ifdef CONFIG_FRAME_POINTER
  58        /* Save the original rbp */
  59        pushq %rbp
  60
  61        /*
  62         * Stack traces will stop at the ftrace trampoline if the frame pointer
  63         * is not set up properly. If fentry is used, we need to save a frame
  64         * pointer for the parent as well as the function traced, because the
  65         * fentry is called before the stack frame is set up, where as mcount
  66         * is called afterward.
  67         */
  68
  69        /* Save the parent pointer (skip orig rbp and our return address) */
  70        pushq \added+8*2(%rsp)
  71        pushq %rbp
  72        movq %rsp, %rbp
  73        /* Save the return address (now skip orig rbp, rbp and parent) */
  74        pushq \added+8*3(%rsp)
  75        pushq %rbp
  76        movq %rsp, %rbp
  77#endif /* CONFIG_FRAME_POINTER */
  78
  79        /*
  80         * We add enough stack to save all regs.
  81         */
  82        subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp
  83        movq %rax, RAX(%rsp)
  84        movq %rcx, RCX(%rsp)
  85        movq %rdx, RDX(%rsp)
  86        movq %rsi, RSI(%rsp)
  87        movq %rdi, RDI(%rsp)
  88        movq %r8, R8(%rsp)
  89        movq %r9, R9(%rsp)
  90        /*
  91         * Save the original RBP. Even though the mcount ABI does not
  92         * require this, it helps out callers.
  93         */
  94#ifdef CONFIG_FRAME_POINTER
  95        movq MCOUNT_REG_SIZE-8(%rsp), %rdx
  96#else
  97        movq %rbp, %rdx
  98#endif
  99        movq %rdx, RBP(%rsp)
 100
 101        /* Copy the parent address into %rsi (second parameter) */
 102        movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
 103
 104         /* Move RIP to its proper location */
 105        movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
 106        movq %rdi, RIP(%rsp)
 107
 108        /*
 109         * Now %rdi (the first parameter) has the return address of
 110         * where ftrace_call returns. But the callbacks expect the
 111         * address of the call itself.
 112         */
 113        subq $MCOUNT_INSN_SIZE, %rdi
 114        .endm
 115
 116.macro restore_mcount_regs
 117        movq R9(%rsp), %r9
 118        movq R8(%rsp), %r8
 119        movq RDI(%rsp), %rdi
 120        movq RSI(%rsp), %rsi
 121        movq RDX(%rsp), %rdx
 122        movq RCX(%rsp), %rcx
 123        movq RAX(%rsp), %rax
 124
 125        /* ftrace_regs_caller can modify %rbp */
 126        movq RBP(%rsp), %rbp
 127
 128        addq $MCOUNT_REG_SIZE, %rsp
 129
 130        .endm
 131
 132#ifdef CONFIG_DYNAMIC_FTRACE
 133
 134ENTRY(function_hook)
 135        retq
 136ENDPROC(function_hook)
 137
 138ENTRY(ftrace_caller)
 139        /* save_mcount_regs fills in first two parameters */
 140        save_mcount_regs
 141
 142GLOBAL(ftrace_caller_op_ptr)
 143        /* Load the ftrace_ops into the 3rd parameter */
 144        movq function_trace_op(%rip), %rdx
 145
 146        /* regs go into 4th parameter (but make it NULL) */
 147        movq $0, %rcx
 148
 149GLOBAL(ftrace_call)
 150        call ftrace_stub
 151
 152        restore_mcount_regs
 153
 154        /*
 155         * The code up to this label is copied into trampolines so
 156         * think twice before adding any new code or changing the
 157         * layout here.
 158         */
 159GLOBAL(ftrace_epilogue)
 160
 161#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 162GLOBAL(ftrace_graph_call)
 163        jmp ftrace_stub
 164#endif
 165
 166/*
 167 * This is weak to keep gas from relaxing the jumps.
 168 * It is also used to copy the retq for trampolines.
 169 */
 170WEAK(ftrace_stub)
 171        retq
 172ENDPROC(ftrace_caller)
 173
 174ENTRY(ftrace_regs_caller)
 175        /* Save the current flags before any operations that can change them */
 176        pushfq
 177
 178        /* added 8 bytes to save flags */
 179        save_mcount_regs 8
 180        /* save_mcount_regs fills in first two parameters */
 181
 182GLOBAL(ftrace_regs_caller_op_ptr)
 183        /* Load the ftrace_ops into the 3rd parameter */
 184        movq function_trace_op(%rip), %rdx
 185
 186        /* Save the rest of pt_regs */
 187        movq %r15, R15(%rsp)
 188        movq %r14, R14(%rsp)
 189        movq %r13, R13(%rsp)
 190        movq %r12, R12(%rsp)
 191        movq %r11, R11(%rsp)
 192        movq %r10, R10(%rsp)
 193        movq %rbx, RBX(%rsp)
 194        /* Copy saved flags */
 195        movq MCOUNT_REG_SIZE(%rsp), %rcx
 196        movq %rcx, EFLAGS(%rsp)
 197        /* Kernel segments */
 198        movq $__KERNEL_DS, %rcx
 199        movq %rcx, SS(%rsp)
 200        movq $__KERNEL_CS, %rcx
 201        movq %rcx, CS(%rsp)
 202        /* Stack - skipping return address and flags */
 203        leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
 204        movq %rcx, RSP(%rsp)
 205
 206        /* regs go into 4th parameter */
 207        leaq (%rsp), %rcx
 208
 209GLOBAL(ftrace_regs_call)
 210        call ftrace_stub
 211
 212        /* Copy flags back to SS, to restore them */
 213        movq EFLAGS(%rsp), %rax
 214        movq %rax, MCOUNT_REG_SIZE(%rsp)
 215
 216        /* Handlers can change the RIP */
 217        movq RIP(%rsp), %rax
 218        movq %rax, MCOUNT_REG_SIZE+8(%rsp)
 219
 220        /* restore the rest of pt_regs */
 221        movq R15(%rsp), %r15
 222        movq R14(%rsp), %r14
 223        movq R13(%rsp), %r13
 224        movq R12(%rsp), %r12
 225        movq R10(%rsp), %r10
 226        movq RBX(%rsp), %rbx
 227
 228        restore_mcount_regs
 229
 230        /* Restore flags */
 231        popfq
 232
 233        /*
 234         * As this jmp to ftrace_epilogue can be a short jump
 235         * it must not be copied into the trampoline.
 236         * The trampoline will add the code to jump
 237         * to the return.
 238         */
 239GLOBAL(ftrace_regs_caller_end)
 240
 241        jmp ftrace_epilogue
 242
 243ENDPROC(ftrace_regs_caller)
 244
 245
 246#else /* ! CONFIG_DYNAMIC_FTRACE */
 247
 248ENTRY(function_hook)
 249        cmpq $ftrace_stub, ftrace_trace_function
 250        jnz trace
 251
 252fgraph_trace:
 253#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 254        cmpq $ftrace_stub, ftrace_graph_return
 255        jnz ftrace_graph_caller
 256
 257        cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
 258        jnz ftrace_graph_caller
 259#endif
 260
 261GLOBAL(ftrace_stub)
 262        retq
 263
 264trace:
 265        /* save_mcount_regs fills in first two parameters */
 266        save_mcount_regs
 267
 268        /*
 269         * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
 270         * set (see include/asm/ftrace.h and include/linux/ftrace.h).  Only the
 271         * ip and parent ip are used and the list function is called when
 272         * function tracing is enabled.
 273         */
 274        movq ftrace_trace_function, %r8
 275        CALL_NOSPEC %r8
 276        restore_mcount_regs
 277
 278        jmp fgraph_trace
 279ENDPROC(function_hook)
 280#endif /* CONFIG_DYNAMIC_FTRACE */
 281
 282#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 283ENTRY(ftrace_graph_caller)
 284        /* Saves rbp into %rdx and fills first parameter  */
 285        save_mcount_regs
 286
 287        leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
 288        movq $0, %rdx   /* No framepointers needed */
 289        call    prepare_ftrace_return
 290
 291        restore_mcount_regs
 292
 293        retq
 294ENDPROC(ftrace_graph_caller)
 295
 296ENTRY(return_to_handler)
 297        UNWIND_HINT_EMPTY
 298        subq  $24, %rsp
 299
 300        /* Save the return values */
 301        movq %rax, (%rsp)
 302        movq %rdx, 8(%rsp)
 303        movq %rbp, %rdi
 304
 305        call ftrace_return_to_handler
 306
 307        movq %rax, %rdi
 308        movq 8(%rsp), %rdx
 309        movq (%rsp), %rax
 310        addq $24, %rsp
 311        JMP_NOSPEC %rdi
 312END(return_to_handler)
 313#endif
 314