1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_TRACE_RECURSION_H |
3 | #define _LINUX_TRACE_RECURSION_H |
4 | |
5 | #include <linux/interrupt.h> |
6 | #include <linux/sched.h> |
7 | |
8 | #ifdef CONFIG_TRACING |
9 | |
10 | /* Only current can touch trace_recursion */ |
11 | |
12 | /* |
13 | * For function tracing recursion: |
14 | * The order of these bits are important. |
15 | * |
16 | * When function tracing occurs, the following steps are made: |
17 | * If arch does not support a ftrace feature: |
18 | * call internal function (uses INTERNAL bits) which calls... |
19 | * The function callback, which can use the FTRACE bits to |
20 | * check for recursion. |
21 | */ |
22 | enum { |
23 | /* Function recursion bits */ |
24 | TRACE_FTRACE_BIT, |
25 | TRACE_FTRACE_NMI_BIT, |
26 | TRACE_FTRACE_IRQ_BIT, |
27 | TRACE_FTRACE_SIRQ_BIT, |
28 | TRACE_FTRACE_TRANSITION_BIT, |
29 | |
30 | /* Internal use recursion bits */ |
31 | TRACE_INTERNAL_BIT, |
32 | TRACE_INTERNAL_NMI_BIT, |
33 | TRACE_INTERNAL_IRQ_BIT, |
34 | TRACE_INTERNAL_SIRQ_BIT, |
35 | TRACE_INTERNAL_TRANSITION_BIT, |
36 | |
37 | TRACE_BRANCH_BIT, |
38 | /* |
39 | * Abuse of the trace_recursion. |
40 | * As we need a way to maintain state if we are tracing the function |
41 | * graph in irq because we want to trace a particular function that |
42 | * was called in irq context but we have irq tracing off. Since this |
43 | * can only be modified by current, we can reuse trace_recursion. |
44 | */ |
45 | TRACE_IRQ_BIT, |
46 | |
47 | /* Set if the function is in the set_graph_function file */ |
48 | TRACE_GRAPH_BIT, |
49 | |
50 | /* |
51 | * In the very unlikely case that an interrupt came in |
52 | * at a start of graph tracing, and we want to trace |
53 | * the function in that interrupt, the depth can be greater |
54 | * than zero, because of the preempted start of a previous |
55 | * trace. In an even more unlikely case, depth could be 2 |
56 | * if a softirq interrupted the start of graph tracing, |
57 | * followed by an interrupt preempting a start of graph |
58 | * tracing in the softirq, and depth can even be 3 |
59 | * if an NMI came in at the start of an interrupt function |
60 | * that preempted a softirq start of a function that |
61 | * preempted normal context!!!! Luckily, it can't be |
62 | * greater than 3, so the next two bits are a mask |
63 | * of what the depth is when we set TRACE_GRAPH_BIT |
64 | */ |
65 | |
66 | TRACE_GRAPH_DEPTH_START_BIT, |
67 | TRACE_GRAPH_DEPTH_END_BIT, |
68 | |
69 | /* |
70 | * To implement set_graph_notrace, if this bit is set, we ignore |
71 | * function graph tracing of called functions, until the return |
72 | * function is called to clear it. |
73 | */ |
74 | TRACE_GRAPH_NOTRACE_BIT, |
75 | |
76 | /* Used to prevent recursion recording from recursing. */ |
77 | TRACE_RECORD_RECURSION_BIT, |
78 | }; |
79 | |
80 | #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) |
81 | #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) |
82 | #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) |
83 | |
84 | #define trace_recursion_depth() \ |
85 | (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3) |
86 | #define trace_recursion_set_depth(depth) \ |
87 | do { \ |
88 | current->trace_recursion &= \ |
89 | ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \ |
90 | current->trace_recursion |= \ |
91 | ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \ |
92 | } while (0) |
93 | |
94 | #define TRACE_CONTEXT_BITS 4 |
95 | |
96 | #define TRACE_FTRACE_START TRACE_FTRACE_BIT |
97 | |
98 | #define TRACE_LIST_START TRACE_INTERNAL_BIT |
99 | |
100 | #define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) |
101 | |
102 | /* |
103 | * Used for setting context |
104 | * NMI = 0 |
105 | * IRQ = 1 |
106 | * SOFTIRQ = 2 |
107 | * NORMAL = 3 |
108 | */ |
109 | enum { |
110 | TRACE_CTX_NMI, |
111 | TRACE_CTX_IRQ, |
112 | TRACE_CTX_SOFTIRQ, |
113 | TRACE_CTX_NORMAL, |
114 | TRACE_CTX_TRANSITION, |
115 | }; |
116 | |
117 | static __always_inline int trace_get_context_bit(void) |
118 | { |
119 | unsigned char bit = interrupt_context_level(); |
120 | |
121 | return TRACE_CTX_NORMAL - bit; |
122 | } |
123 | |
124 | #ifdef CONFIG_FTRACE_RECORD_RECURSION |
125 | extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip); |
126 | # define do_ftrace_record_recursion(ip, pip) \ |
127 | do { \ |
128 | if (!trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \ |
129 | trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \ |
130 | ftrace_record_recursion(ip, pip); \ |
131 | trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \ |
132 | } \ |
133 | } while (0) |
134 | #else |
135 | # define do_ftrace_record_recursion(ip, pip) do { } while (0) |
136 | #endif |
137 | |
138 | #ifdef CONFIG_ARCH_WANTS_NO_INSTR |
139 | # define trace_warn_on_no_rcu(ip) \ |
140 | ({ \ |
141 | bool __ret = !rcu_is_watching(); \ |
142 | if (__ret && !trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \ |
143 | trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \ |
144 | WARN_ONCE(true, "RCU not on for: %pS\n", (void *)ip); \ |
145 | trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \ |
146 | } \ |
147 | __ret; \ |
148 | }) |
149 | #else |
150 | # define trace_warn_on_no_rcu(ip) false |
151 | #endif |
152 | |
153 | /* |
154 | * Preemption is promised to be disabled when return bit >= 0. |
155 | */ |
156 | static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip, |
157 | int start) |
158 | { |
159 | unsigned int val = READ_ONCE(current->trace_recursion); |
160 | int bit; |
161 | |
162 | if (trace_warn_on_no_rcu(ip)) |
163 | return -1; |
164 | |
165 | bit = trace_get_context_bit() + start; |
166 | if (unlikely(val & (1 << bit))) { |
167 | /* |
168 | * If an interrupt occurs during a trace, and another trace |
169 | * happens in that interrupt but before the preempt_count is |
170 | * updated to reflect the new interrupt context, then this |
171 | * will think a recursion occurred, and the event will be dropped. |
172 | * Let a single instance happen via the TRANSITION_BIT to |
173 | * not drop those events. |
174 | */ |
175 | bit = TRACE_CTX_TRANSITION + start; |
176 | if (val & (1 << bit)) { |
177 | do_ftrace_record_recursion(ip, pip); |
178 | return -1; |
179 | } |
180 | } |
181 | |
182 | val |= 1 << bit; |
183 | current->trace_recursion = val; |
184 | barrier(); |
185 | |
186 | preempt_disable_notrace(); |
187 | |
188 | return bit; |
189 | } |
190 | |
191 | /* |
192 | * Preemption will be enabled (if it was previously enabled). |
193 | */ |
194 | static __always_inline void trace_clear_recursion(int bit) |
195 | { |
196 | preempt_enable_notrace(); |
197 | barrier(); |
198 | trace_recursion_clear(bit); |
199 | } |
200 | |
201 | /** |
202 | * ftrace_test_recursion_trylock - tests for recursion in same context |
203 | * |
204 | * Use this for ftrace callbacks. This will detect if the function |
205 | * tracing recursed in the same context (normal vs interrupt), |
206 | * |
207 | * Returns: -1 if a recursion happened. |
208 | * >= 0 if no recursion. |
209 | */ |
210 | static __always_inline int ftrace_test_recursion_trylock(unsigned long ip, |
211 | unsigned long parent_ip) |
212 | { |
213 | return trace_test_and_set_recursion(ip, pip: parent_ip, TRACE_FTRACE_START); |
214 | } |
215 | |
216 | /** |
217 | * ftrace_test_recursion_unlock - called when function callback is complete |
218 | * @bit: The return of a successful ftrace_test_recursion_trylock() |
219 | * |
220 | * This is used at the end of a ftrace callback. |
221 | */ |
222 | static __always_inline void ftrace_test_recursion_unlock(int bit) |
223 | { |
224 | trace_clear_recursion(bit); |
225 | } |
226 | |
227 | #endif /* CONFIG_TRACING */ |
228 | #endif /* _LINUX_TRACE_RECURSION_H */ |
229 | |