linux/include/linux/trace_recursion.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_TRACE_RECURSION_H
   3#define _LINUX_TRACE_RECURSION_H
   4
   5#include <linux/interrupt.h>
   6#include <linux/sched.h>
   7
   8#ifdef CONFIG_TRACING
   9
  10/* Only current can touch trace_recursion */
  11
  12/*
  13 * For function tracing recursion:
  14 *  The order of these bits are important.
  15 *
  16 *  When function tracing occurs, the following steps are made:
  17 *   If arch does not support a ftrace feature:
  18 *    call internal function (uses INTERNAL bits) which calls...
  19 *   The function callback, which can use the FTRACE bits to
  20 *    check for recursion.
  21 */
  22enum {
  23        /* Function recursion bits */
  24        TRACE_FTRACE_BIT,
  25        TRACE_FTRACE_NMI_BIT,
  26        TRACE_FTRACE_IRQ_BIT,
  27        TRACE_FTRACE_SIRQ_BIT,
  28        TRACE_FTRACE_TRANSITION_BIT,
  29
  30        /* Internal use recursion bits */
  31        TRACE_INTERNAL_BIT,
  32        TRACE_INTERNAL_NMI_BIT,
  33        TRACE_INTERNAL_IRQ_BIT,
  34        TRACE_INTERNAL_SIRQ_BIT,
  35        TRACE_INTERNAL_TRANSITION_BIT,
  36
  37        TRACE_BRANCH_BIT,
  38/*
  39 * Abuse of the trace_recursion.
  40 * As we need a way to maintain state if we are tracing the function
  41 * graph in irq because we want to trace a particular function that
  42 * was called in irq context but we have irq tracing off. Since this
  43 * can only be modified by current, we can reuse trace_recursion.
  44 */
  45        TRACE_IRQ_BIT,
  46
  47        /* Set if the function is in the set_graph_function file */
  48        TRACE_GRAPH_BIT,
  49
  50        /*
  51         * In the very unlikely case that an interrupt came in
  52         * at a start of graph tracing, and we want to trace
  53         * the function in that interrupt, the depth can be greater
  54         * than zero, because of the preempted start of a previous
  55         * trace. In an even more unlikely case, depth could be 2
  56         * if a softirq interrupted the start of graph tracing,
  57         * followed by an interrupt preempting a start of graph
  58         * tracing in the softirq, and depth can even be 3
  59         * if an NMI came in at the start of an interrupt function
  60         * that preempted a softirq start of a function that
  61         * preempted normal context!!!! Luckily, it can't be
  62         * greater than 3, so the next two bits are a mask
  63         * of what the depth is when we set TRACE_GRAPH_BIT
  64         */
  65
  66        TRACE_GRAPH_DEPTH_START_BIT,
  67        TRACE_GRAPH_DEPTH_END_BIT,
  68
  69        /*
  70         * To implement set_graph_notrace, if this bit is set, we ignore
  71         * function graph tracing of called functions, until the return
  72         * function is called to clear it.
  73         */
  74        TRACE_GRAPH_NOTRACE_BIT,
  75
  76        /* Used to prevent recursion recording from recursing. */
  77        TRACE_RECORD_RECURSION_BIT,
  78};
  79
  80#define trace_recursion_set(bit)        do { (current)->trace_recursion |= (1<<(bit)); } while (0)
  81#define trace_recursion_clear(bit)      do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
  82#define trace_recursion_test(bit)       ((current)->trace_recursion & (1<<(bit)))
  83
  84#define trace_recursion_depth() \
  85        (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
  86#define trace_recursion_set_depth(depth) \
  87        do {                                                            \
  88                current->trace_recursion &=                             \
  89                        ~(3 << TRACE_GRAPH_DEPTH_START_BIT);            \
  90                current->trace_recursion |=                             \
  91                        ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT;   \
  92        } while (0)
  93
  94#define TRACE_CONTEXT_BITS      4
  95
  96#define TRACE_FTRACE_START      TRACE_FTRACE_BIT
  97
  98#define TRACE_LIST_START        TRACE_INTERNAL_BIT
  99
 100#define TRACE_CONTEXT_MASK      ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
 101
 102/*
 103 * Used for setting context
 104 *  NMI     = 0
 105 *  IRQ     = 1
 106 *  SOFTIRQ = 2
 107 *  NORMAL  = 3
 108 */
 109enum {
 110        TRACE_CTX_NMI,
 111        TRACE_CTX_IRQ,
 112        TRACE_CTX_SOFTIRQ,
 113        TRACE_CTX_NORMAL,
 114        TRACE_CTX_TRANSITION,
 115};
 116
 117static __always_inline int trace_get_context_bit(void)
 118{
 119        unsigned long pc = preempt_count();
 120
 121        if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
 122                return TRACE_CTX_NORMAL;
 123        else
 124                return pc & NMI_MASK ? TRACE_CTX_NMI :
 125                        pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
 126}
 127
 128#ifdef CONFIG_FTRACE_RECORD_RECURSION
 129extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
 130# define do_ftrace_record_recursion(ip, pip)                            \
 131        do {                                                            \
 132                if (!trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \
 133                        trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \
 134                        ftrace_record_recursion(ip, pip);               \
 135                        trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \
 136                }                                                       \
 137        } while (0)
 138#else
 139# define do_ftrace_record_recursion(ip, pip)    do { } while (0)
 140#endif
 141
 142static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
 143                                                        int start)
 144{
 145        unsigned int val = READ_ONCE(current->trace_recursion);
 146        int bit;
 147
 148        bit = trace_get_context_bit() + start;
 149        if (unlikely(val & (1 << bit))) {
 150                /*
 151                 * It could be that preempt_count has not been updated during
 152                 * a switch between contexts. Allow for a single recursion.
 153                 */
 154                bit = TRACE_CTX_TRANSITION + start;
 155                if (val & (1 << bit)) {
 156                        do_ftrace_record_recursion(ip, pip);
 157                        return -1;
 158                }
 159        }
 160
 161        val |= 1 << bit;
 162        current->trace_recursion = val;
 163        barrier();
 164
 165        return bit;
 166}
 167
 168static __always_inline void trace_clear_recursion(int bit)
 169{
 170        barrier();
 171        trace_recursion_clear(bit);
 172}
 173
 174/**
 175 * ftrace_test_recursion_trylock - tests for recursion in same context
 176 *
 177 * Use this for ftrace callbacks. This will detect if the function
 178 * tracing recursed in the same context (normal vs interrupt),
 179 *
 180 * Returns: -1 if a recursion happened.
 181 *           >= 0 if no recursion
 182 */
 183static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
 184                                                         unsigned long parent_ip)
 185{
 186        return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START);
 187}
 188
 189/**
 190 * ftrace_test_recursion_unlock - called when function callback is complete
 191 * @bit: The return of a successful ftrace_test_recursion_trylock()
 192 *
 193 * This is used at the end of a ftrace callback.
 194 */
 195static __always_inline void ftrace_test_recursion_unlock(int bit)
 196{
 197        trace_clear_recursion(bit);
 198}
 199
 200#endif /* CONFIG_TRACING */
 201#endif /* _LINUX_TRACE_RECURSION_H */
 202