1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * Infrastructure to took into function calls and returns. |
4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
7 | * |
8 | * Highly modified by Steven Rostedt (VMware). |
9 | */ |
10 | #include <linux/bits.h> |
11 | #include <linux/jump_label.h> |
12 | #include <linux/suspend.h> |
13 | #include <linux/ftrace.h> |
14 | #include <linux/static_call.h> |
15 | #include <linux/slab.h> |
16 | |
17 | #include <trace/events/sched.h> |
18 | |
19 | #include "ftrace_internal.h" |
20 | #include "trace.h" |
21 | |
22 | /* |
23 | * FGRAPH_FRAME_SIZE: Size in bytes of the meta data on the shadow stack |
24 | * FGRAPH_FRAME_OFFSET: Size in long words of the meta data frame |
25 | */ |
26 | #define FGRAPH_FRAME_SIZE sizeof(struct ftrace_ret_stack) |
27 | #define FGRAPH_FRAME_OFFSET DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long)) |
28 | |
29 | /* |
30 | * On entry to a function (via function_graph_enter()), a new fgraph frame |
31 | * (ftrace_ret_stack) is pushed onto the stack as well as a word that |
32 | * holds a bitmask and a type (called "bitmap"). The bitmap is defined as: |
33 | * |
34 | * bits: 0 - 9 offset in words from the previous ftrace_ret_stack |
35 | * |
36 | * bits: 10 - 11 Type of storage |
37 | * 0 - reserved |
38 | * 1 - bitmap of fgraph_array index |
39 | * 2 - reserved data |
40 | * |
41 | * For type with "bitmap of fgraph_array index" (FGRAPH_TYPE_BITMAP): |
42 | * bits: 12 - 27 The bitmap of fgraph_ops fgraph_array index |
43 | * That is, it's a bitmask of 0-15 (16 bits) |
44 | * where if a corresponding ops in the fgraph_array[] |
45 | * expects a callback from the return of the function |
46 | * it's corresponding bit will be set. |
47 | * |
48 | * |
49 | * The top of the ret_stack (when not empty) will always have a reference |
50 | * word that points to the last fgraph frame that was saved. |
51 | * |
52 | * For reserved data: |
53 | * bits: 12 - 17 The size in words that is stored |
54 | * bits: 18 - 23 The index of fgraph_array, which shows who is stored |
55 | * |
56 | * That is, at the end of function_graph_enter, if the first and forth |
57 | * fgraph_ops on the fgraph_array[] (index 0 and 3) needs their retfunc called |
58 | * on the return of the function being traced, and the forth fgraph_ops |
59 | * stored two words of data, this is what will be on the task's shadow |
60 | * ret_stack: (the stack grows upward) |
61 | * |
62 | * ret_stack[SHADOW_STACK_OFFSET] |
63 | * | SHADOW_STACK_TASK_VARS(ret_stack)[15] | |
64 | * ... |
65 | * | SHADOW_STACK_TASK_VARS(ret_stack)[0] | |
66 | * ret_stack[SHADOW_STACK_MAX_OFFSET] |
67 | * ... |
68 | * | | <- task->curr_ret_stack |
69 | * +--------------------------------------------+ |
70 | * | (3 << 12) | (3 << 10) | FGRAPH_FRAME_OFFSET| |
71 | * | *or put another way* | |
72 | * | (3 << FGRAPH_DATA_INDEX_SHIFT)| \ | This is for fgraph_ops[3]. |
73 | * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \ | The data size is 2 words. |
74 | * | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT)| \ | |
75 | * | (offset2:FGRAPH_FRAME_OFFSET+3) | <- the offset2 is from here |
76 | * +--------------------------------------------+ ( It is 4 words from the ret_stack) |
77 | * | STORED DATA WORD 2 | |
78 | * | STORED DATA WORD 1 | |
79 | * +--------------------------------------------+ |
80 | * | (9 << 12) | (1 << 10) | FGRAPH_FRAME_OFFSET| |
81 | * | *or put another way* | |
82 | * | (BIT(3)|BIT(0)) << FGRAPH_INDEX_SHIFT | \ | |
83 | * | FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT| \ | |
84 | * | (offset1:FGRAPH_FRAME_OFFSET) | <- the offset1 is from here |
85 | * +--------------------------------------------+ |
86 | * | struct ftrace_ret_stack | |
87 | * | (stores the saved ret pointer) | <- the offset points here |
88 | * +--------------------------------------------+ |
89 | * | (X) | (N) | ( N words away from |
90 | * | | previous ret_stack) |
91 | * ... |
92 | * ret_stack[0] |
93 | * |
94 | * If a backtrace is required, and the real return pointer needs to be |
95 | * fetched, then it looks at the task's curr_ret_stack offset, if it |
96 | * is greater than zero (reserved, or right before popped), it would mask |
97 | * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the |
98 | * ftrace_ret_stack structure stored on the shadow stack. |
99 | */ |
100 | |
101 | /* |
102 | * The following is for the top word on the stack: |
103 | * |
104 | * FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame |
105 | * FGRAPH_TYPE (10-11) holds the type of word this is. |
106 | * (RESERVED or BITMAP) |
107 | */ |
108 | #define FGRAPH_FRAME_OFFSET_BITS 10 |
109 | #define FGRAPH_FRAME_OFFSET_MASK GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0) |
110 | |
111 | #define FGRAPH_TYPE_BITS 2 |
112 | #define FGRAPH_TYPE_MASK GENMASK(FGRAPH_TYPE_BITS - 1, 0) |
113 | #define FGRAPH_TYPE_SHIFT FGRAPH_FRAME_OFFSET_BITS |
114 | |
115 | enum { |
116 | FGRAPH_TYPE_RESERVED = 0, |
117 | FGRAPH_TYPE_BITMAP = 1, |
118 | FGRAPH_TYPE_DATA = 2, |
119 | }; |
120 | |
121 | /* |
122 | * For BITMAP type: |
123 | * FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called |
124 | */ |
125 | #define FGRAPH_INDEX_BITS 16 |
126 | #define FGRAPH_INDEX_MASK GENMASK(FGRAPH_INDEX_BITS - 1, 0) |
127 | #define FGRAPH_INDEX_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS) |
128 | |
129 | /* |
130 | * For DATA type: |
131 | * FGRAPH_DATA (12-17) bits hold the size of data (in words) |
132 | * FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for |
133 | * |
134 | * Note: |
135 | * data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words. |
136 | */ |
137 | #define FGRAPH_DATA_BITS 5 |
138 | #define FGRAPH_DATA_MASK GENMASK(FGRAPH_DATA_BITS - 1, 0) |
139 | #define FGRAPH_DATA_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS) |
140 | #define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS)) |
141 | |
142 | #define FGRAPH_DATA_INDEX_BITS 4 |
143 | #define FGRAPH_DATA_INDEX_MASK GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0) |
144 | #define FGRAPH_DATA_INDEX_SHIFT (FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS) |
145 | |
146 | #define FGRAPH_MAX_INDEX \ |
147 | ((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX) |
148 | |
149 | #define FGRAPH_ARRAY_SIZE FGRAPH_INDEX_BITS |
150 | |
151 | /* |
152 | * SHADOW_STACK_SIZE: The size in bytes of the entire shadow stack |
153 | * SHADOW_STACK_OFFSET: The size in long words of the shadow stack |
154 | * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added |
155 | */ |
156 | #define SHADOW_STACK_SIZE (4096) |
157 | #define SHADOW_STACK_OFFSET (SHADOW_STACK_SIZE / sizeof(long)) |
158 | /* Leave on a buffer at the end */ |
159 | #define SHADOW_STACK_MAX_OFFSET \ |
160 | (SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE)) |
161 | |
162 | /* RET_STACK(): Return the frame from a given @offset from task @t */ |
163 | #define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset])) |
164 | |
165 | /* |
166 | * Each fgraph_ops has a reservered unsigned long at the end (top) of the |
167 | * ret_stack to store task specific state. |
168 | */ |
169 | #define SHADOW_STACK_TASK_VARS(ret_stack) \ |
170 | ((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE])) |
171 | |
172 | DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); |
173 | int ftrace_graph_active; |
174 | |
175 | static struct kmem_cache *fgraph_stack_cachep; |
176 | |
177 | static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE]; |
178 | static unsigned long fgraph_array_bitmask; |
179 | |
180 | /* LRU index table for fgraph_array */ |
181 | static int fgraph_lru_table[FGRAPH_ARRAY_SIZE]; |
182 | static int fgraph_lru_next; |
183 | static int fgraph_lru_last; |
184 | |
185 | /* Initialize fgraph_lru_table with unused index */ |
186 | static void fgraph_lru_init(void) |
187 | { |
188 | int i; |
189 | |
190 | for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) |
191 | fgraph_lru_table[i] = i; |
192 | } |
193 | |
194 | /* Release the used index to the LRU table */ |
195 | static int fgraph_lru_release_index(int idx) |
196 | { |
197 | if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE || |
198 | WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1)) |
199 | return -1; |
200 | |
201 | fgraph_lru_table[fgraph_lru_last] = idx; |
202 | fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE; |
203 | |
204 | clear_bit(nr: idx, addr: &fgraph_array_bitmask); |
205 | return 0; |
206 | } |
207 | |
208 | /* Allocate a new index from LRU table */ |
209 | static int fgraph_lru_alloc_index(void) |
210 | { |
211 | int idx = fgraph_lru_table[fgraph_lru_next]; |
212 | |
213 | /* No id is available */ |
214 | if (idx == -1) |
215 | return -1; |
216 | |
217 | fgraph_lru_table[fgraph_lru_next] = -1; |
218 | fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE; |
219 | |
220 | set_bit(nr: idx, addr: &fgraph_array_bitmask); |
221 | return idx; |
222 | } |
223 | |
224 | /* Get the offset to the fgraph frame from a ret_stack value */ |
225 | static inline int __get_offset(unsigned long val) |
226 | { |
227 | return val & FGRAPH_FRAME_OFFSET_MASK; |
228 | } |
229 | |
230 | /* Get the type of word from a ret_stack value */ |
231 | static inline int __get_type(unsigned long val) |
232 | { |
233 | return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK; |
234 | } |
235 | |
236 | /* Get the data_index for a DATA type ret_stack word */ |
237 | static inline int __get_data_index(unsigned long val) |
238 | { |
239 | return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK; |
240 | } |
241 | |
242 | /* Get the data_size for a DATA type ret_stack word */ |
243 | static inline int __get_data_size(unsigned long val) |
244 | { |
245 | return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1; |
246 | } |
247 | |
248 | /* Get the word from the ret_stack at @offset */ |
249 | static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset) |
250 | { |
251 | return t->ret_stack[offset]; |
252 | } |
253 | |
254 | /* Get the FRAME_OFFSET from the word from the @offset on ret_stack */ |
255 | static inline int get_frame_offset(struct task_struct *t, int offset) |
256 | { |
257 | return __get_offset(val: t->ret_stack[offset]); |
258 | } |
259 | |
260 | /* For BITMAP type: get the bitmask from the @offset at ret_stack */ |
261 | static inline unsigned long |
262 | get_bitmap_bits(struct task_struct *t, int offset) |
263 | { |
264 | return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK; |
265 | } |
266 | |
267 | /* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */ |
268 | static inline void |
269 | set_bitmap(struct task_struct *t, int offset, unsigned long bitmap) |
270 | { |
271 | t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) | |
272 | (FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET; |
273 | } |
274 | |
275 | /* For DATA type: get the data saved under the ret_stack word at @offset */ |
276 | static inline void *get_data_type_data(struct task_struct *t, int offset) |
277 | { |
278 | unsigned long val = t->ret_stack[offset]; |
279 | |
280 | if (__get_type(val) != FGRAPH_TYPE_DATA) |
281 | return NULL; |
282 | offset -= __get_data_size(val); |
283 | return (void *)&t->ret_stack[offset]; |
284 | } |
285 | |
286 | /* Create the ret_stack word for a DATA type */ |
287 | static inline unsigned long make_data_type_val(int idx, int size, int offset) |
288 | { |
289 | return (idx << FGRAPH_DATA_INDEX_SHIFT) | |
290 | ((size - 1) << FGRAPH_DATA_SHIFT) | |
291 | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset; |
292 | } |
293 | |
294 | /* ftrace_graph_entry set to this to tell some archs to run function graph */ |
295 | static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops, |
296 | struct ftrace_regs *fregs) |
297 | { |
298 | return 0; |
299 | } |
300 | |
301 | /* ftrace_graph_return set to this to tell some archs to run function graph */ |
302 | static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops, |
303 | struct ftrace_regs *fregs) |
304 | { |
305 | } |
306 | |
307 | static void ret_stack_set_task_var(struct task_struct *t, int idx, long val) |
308 | { |
309 | unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack); |
310 | |
311 | gvals[idx] = val; |
312 | } |
313 | |
314 | static unsigned long * |
315 | ret_stack_get_task_var(struct task_struct *t, int idx) |
316 | { |
317 | unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack); |
318 | |
319 | return &gvals[idx]; |
320 | } |
321 | |
322 | static void ret_stack_init_task_vars(unsigned long *ret_stack) |
323 | { |
324 | unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack); |
325 | |
326 | memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE); |
327 | } |
328 | |
329 | /** |
330 | * fgraph_reserve_data - Reserve storage on the task's ret_stack |
331 | * @idx: The index of fgraph_array |
332 | * @size_bytes: The size in bytes to reserve |
333 | * |
334 | * Reserves space of up to FGRAPH_MAX_DATA_SIZE bytes on the |
335 | * task's ret_stack shadow stack, for a given fgraph_ops during |
336 | * the entryfunc() call. If entryfunc() returns zero, the storage |
337 | * is discarded. An entryfunc() can only call this once per iteration. |
338 | * The fgraph_ops retfunc() can retrieve this stored data with |
339 | * fgraph_retrieve_data(). |
340 | * |
341 | * Returns: On success, a pointer to the data on the stack. |
342 | * Otherwise, NULL if there's not enough space left on the |
343 | * ret_stack for the data, or if fgraph_reserve_data() was called |
344 | * more than once for a single entryfunc() call. |
345 | */ |
346 | void *fgraph_reserve_data(int idx, int size_bytes) |
347 | { |
348 | unsigned long val; |
349 | void *data; |
350 | int curr_ret_stack = current->curr_ret_stack; |
351 | int data_size; |
352 | |
353 | if (size_bytes > FGRAPH_MAX_DATA_SIZE) |
354 | return NULL; |
355 | |
356 | /* Convert the data size to number of longs. */ |
357 | data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3); |
358 | |
359 | val = get_fgraph_entry(current, offset: curr_ret_stack - 1); |
360 | data = ¤t->ret_stack[curr_ret_stack]; |
361 | |
362 | curr_ret_stack += data_size + 1; |
363 | if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET)) |
364 | return NULL; |
365 | |
366 | val = make_data_type_val(idx, size: data_size, offset: __get_offset(val) + data_size + 1); |
367 | |
368 | /* Set the last word to be reserved */ |
369 | current->ret_stack[curr_ret_stack - 1] = val; |
370 | |
371 | /* Make sure interrupts see this */ |
372 | barrier(); |
373 | current->curr_ret_stack = curr_ret_stack; |
374 | /* Again sync with interrupts, and reset reserve */ |
375 | current->ret_stack[curr_ret_stack - 1] = val; |
376 | |
377 | return data; |
378 | } |
379 | |
380 | /** |
381 | * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data() |
382 | * @idx: the index of fgraph_array (fgraph_ops::idx) |
383 | * @size_bytes: pointer to retrieved data size. |
384 | * |
385 | * This is to be called by a fgraph_ops retfunc(), to retrieve data that |
386 | * was stored by the fgraph_ops entryfunc() on the function entry. |
387 | * That is, this will retrieve the data that was reserved on the |
388 | * entry of the function that corresponds to the exit of the function |
389 | * that the fgraph_ops retfunc() is called on. |
390 | * |
391 | * Returns: The stored data from fgraph_reserve_data() called by the |
392 | * matching entryfunc() for the retfunc() this is called from. |
393 | * Or NULL if there was nothing stored. |
394 | */ |
395 | void *fgraph_retrieve_data(int idx, int *size_bytes) |
396 | { |
397 | return fgraph_retrieve_parent_data(idx, size_bytes, depth: 0); |
398 | } |
399 | |
400 | /** |
401 | * fgraph_get_task_var - retrieve a task specific state variable |
402 | * @gops: The ftrace_ops that owns the task specific variable |
403 | * |
404 | * Every registered fgraph_ops has a task state variable |
405 | * reserved on the task's ret_stack. This function returns the |
406 | * address to that variable. |
407 | * |
408 | * Returns the address to the fgraph_ops @gops tasks specific |
409 | * unsigned long variable. |
410 | */ |
411 | unsigned long *fgraph_get_task_var(struct fgraph_ops *gops) |
412 | { |
413 | return ret_stack_get_task_var(current, idx: gops->idx); |
414 | } |
415 | |
416 | /* |
417 | * @offset: The offset into @t->ret_stack to find the ret_stack entry |
418 | * @frame_offset: Where to place the offset into @t->ret_stack of that entry |
419 | * |
420 | * Returns a pointer to the previous ret_stack below @offset or NULL |
421 | * when it reaches the bottom of the stack. |
422 | * |
423 | * Calling this with: |
424 | * |
425 | * offset = task->curr_ret_stack; |
426 | * do { |
427 | * ret_stack = get_ret_stack(task, offset, &offset); |
428 | * } while (ret_stack); |
429 | * |
430 | * Will iterate through all the ret_stack entries from curr_ret_stack |
431 | * down to the first one. |
432 | */ |
433 | static inline struct ftrace_ret_stack * |
434 | get_ret_stack(struct task_struct *t, int offset, int *frame_offset) |
435 | { |
436 | int offs; |
437 | |
438 | BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long)); |
439 | |
440 | if (unlikely(offset <= 0)) |
441 | return NULL; |
442 | |
443 | offs = get_frame_offset(t, offset: --offset); |
444 | if (WARN_ON_ONCE(offs <= 0 || offs > offset)) |
445 | return NULL; |
446 | |
447 | offset -= offs; |
448 | |
449 | *frame_offset = offset; |
450 | return RET_STACK(t, offset); |
451 | } |
452 | |
453 | /** |
454 | * fgraph_retrieve_parent_data - get data from a parent function |
455 | * @idx: The index into the fgraph_array (fgraph_ops::idx) |
456 | * @size_bytes: A pointer to retrieved data size |
457 | * @depth: The depth to find the parent (0 is the current function) |
458 | * |
459 | * This is similar to fgraph_retrieve_data() but can be used to retrieve |
460 | * data from a parent caller function. |
461 | * |
462 | * Return: a pointer to the specified parent data or NULL if not found |
463 | */ |
464 | void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth) |
465 | { |
466 | struct ftrace_ret_stack *ret_stack = NULL; |
467 | int offset = current->curr_ret_stack; |
468 | unsigned long val; |
469 | |
470 | if (offset <= 0) |
471 | return NULL; |
472 | |
473 | for (;;) { |
474 | int next_offset; |
475 | |
476 | ret_stack = get_ret_stack(current, offset, frame_offset: &next_offset); |
477 | if (!ret_stack || --depth < 0) |
478 | break; |
479 | offset = next_offset; |
480 | } |
481 | |
482 | if (!ret_stack) |
483 | return NULL; |
484 | |
485 | offset--; |
486 | |
487 | val = get_fgraph_entry(current, offset); |
488 | while (__get_type(val) == FGRAPH_TYPE_DATA) { |
489 | if (__get_data_index(val) == idx) |
490 | goto found; |
491 | offset -= __get_data_size(val) + 1; |
492 | val = get_fgraph_entry(current, offset); |
493 | } |
494 | return NULL; |
495 | found: |
496 | if (size_bytes) |
497 | *size_bytes = __get_data_size(val) * sizeof(long); |
498 | return get_data_type_data(current, offset); |
499 | } |
500 | |
501 | /* Both enabled by default (can be cleared by function_graph tracer flags */ |
502 | bool fgraph_sleep_time = true; |
503 | |
504 | #ifdef CONFIG_DYNAMIC_FTRACE |
505 | /* |
506 | * archs can override this function if they must do something |
507 | * to enable hook for graph tracer. |
508 | */ |
509 | int __weak ftrace_enable_ftrace_graph_caller(void) |
510 | { |
511 | return 0; |
512 | } |
513 | |
514 | /* |
515 | * archs can override this function if they must do something |
516 | * to disable hook for graph tracer. |
517 | */ |
518 | int __weak ftrace_disable_ftrace_graph_caller(void) |
519 | { |
520 | return 0; |
521 | } |
522 | #endif |
523 | |
524 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, |
525 | struct fgraph_ops *gops, |
526 | struct ftrace_regs *fregs) |
527 | { |
528 | return 0; |
529 | } |
530 | |
531 | static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace, |
532 | struct fgraph_ops *gops, |
533 | struct ftrace_regs *fregs) |
534 | { |
535 | } |
536 | |
537 | static struct fgraph_ops fgraph_stub = { |
538 | .entryfunc = ftrace_graph_entry_stub, |
539 | .retfunc = ftrace_graph_ret_stub, |
540 | }; |
541 | |
542 | static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub; |
543 | DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub); |
544 | DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub); |
545 | static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct); |
546 | |
547 | /** |
548 | * ftrace_graph_stop - set to permanently disable function graph tracing |
549 | * |
550 | * In case of an error int function graph tracing, this is called |
551 | * to try to keep function graph tracing from causing any more harm. |
552 | * Usually this is pretty severe and this is called to try to at least |
553 | * get a warning out to the user. |
554 | */ |
555 | void ftrace_graph_stop(void) |
556 | { |
557 | static_branch_enable(&kill_ftrace_graph); |
558 | } |
559 | |
560 | /* Add a function return address to the trace stack on thread info.*/ |
561 | static int |
562 | ftrace_push_return_trace(unsigned long ret, unsigned long func, |
563 | unsigned long frame_pointer, unsigned long *retp, |
564 | int fgraph_idx) |
565 | { |
566 | struct ftrace_ret_stack *ret_stack; |
567 | unsigned long val; |
568 | int offset; |
569 | |
570 | if (unlikely(ftrace_graph_is_dead())) |
571 | return -EBUSY; |
572 | |
573 | if (!current->ret_stack) |
574 | return -EBUSY; |
575 | |
576 | BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long)); |
577 | |
578 | /* Set val to "reserved" with the delta to the new fgraph frame */ |
579 | val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET; |
580 | |
581 | /* |
582 | * We must make sure the ret_stack is tested before we read |
583 | * anything else. |
584 | */ |
585 | smp_rmb(); |
586 | |
587 | /* |
588 | * Check if there's room on the shadow stack to fit a fraph frame |
589 | * and a bitmap word. |
590 | */ |
591 | if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) { |
592 | atomic_inc(v: ¤t->trace_overrun); |
593 | return -EBUSY; |
594 | } |
595 | |
596 | offset = READ_ONCE(current->curr_ret_stack); |
597 | ret_stack = RET_STACK(current, offset); |
598 | offset += FGRAPH_FRAME_OFFSET; |
599 | |
600 | /* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */ |
601 | current->ret_stack[offset] = val; |
602 | ret_stack->ret = ret; |
603 | /* |
604 | * The unwinders expect curr_ret_stack to point to either zero |
605 | * or an offset where to find the next ret_stack. Even though the |
606 | * ret stack might be bogus, we want to write the ret and the |
607 | * offset to find the ret_stack before we increment the stack point. |
608 | * If an interrupt comes in now before we increment the curr_ret_stack |
609 | * it may blow away what we wrote. But that's fine, because the |
610 | * offset will still be correct (even though the 'ret' won't be). |
611 | * What we worry about is the offset being correct after we increment |
612 | * the curr_ret_stack and before we update that offset, as if an |
613 | * interrupt comes in and does an unwind stack dump, it will need |
614 | * at least a correct offset! |
615 | */ |
616 | barrier(); |
617 | WRITE_ONCE(current->curr_ret_stack, offset + 1); |
618 | /* |
619 | * This next barrier is to ensure that an interrupt coming in |
620 | * will not corrupt what we are about to write. |
621 | */ |
622 | barrier(); |
623 | |
624 | /* Still keep it reserved even if an interrupt came in */ |
625 | current->ret_stack[offset] = val; |
626 | |
627 | ret_stack->ret = ret; |
628 | ret_stack->func = func; |
629 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
630 | ret_stack->fp = frame_pointer; |
631 | #endif |
632 | ret_stack->retp = retp; |
633 | return offset; |
634 | } |
635 | |
636 | /* |
637 | * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct |
638 | * functions. But those archs currently don't support direct functions |
639 | * anyway, and ftrace_find_rec_direct() is just a stub for them. |
640 | * Define MCOUNT_INSN_SIZE to keep those archs compiling. |
641 | */ |
642 | #ifndef MCOUNT_INSN_SIZE |
643 | /* Make sure this only works without direct calls */ |
644 | # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS |
645 | # error MCOUNT_INSN_SIZE not defined with direct calls enabled |
646 | # endif |
647 | # define MCOUNT_INSN_SIZE 0 |
648 | #endif |
649 | |
650 | /* If the caller does not use ftrace, call this function. */ |
651 | int function_graph_enter_regs(unsigned long ret, unsigned long func, |
652 | unsigned long frame_pointer, unsigned long *retp, |
653 | struct ftrace_regs *fregs) |
654 | { |
655 | struct ftrace_graph_ent trace; |
656 | unsigned long bitmap = 0; |
657 | int offset; |
658 | int bit; |
659 | int i; |
660 | |
661 | bit = ftrace_test_recursion_trylock(ip: func, parent_ip: ret); |
662 | if (bit < 0) |
663 | return -EBUSY; |
664 | |
665 | trace.func = func; |
666 | trace.depth = ++current->curr_ret_depth; |
667 | |
668 | offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, fgraph_idx: 0); |
669 | if (offset < 0) |
670 | goto out; |
671 | |
672 | #ifdef CONFIG_HAVE_STATIC_CALL |
673 | if (static_branch_likely(&fgraph_do_direct)) { |
674 | int save_curr_ret_stack = current->curr_ret_stack; |
675 | |
676 | if (static_call(fgraph_func)(&trace, fgraph_direct_gops, fregs)) |
677 | bitmap |= BIT(fgraph_direct_gops->idx); |
678 | else |
679 | /* Clear out any saved storage */ |
680 | current->curr_ret_stack = save_curr_ret_stack; |
681 | } else |
682 | #endif |
683 | { |
684 | for_each_set_bit(i, &fgraph_array_bitmask, |
685 | sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) { |
686 | struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]); |
687 | int save_curr_ret_stack; |
688 | |
689 | if (gops == &fgraph_stub) |
690 | continue; |
691 | |
692 | save_curr_ret_stack = current->curr_ret_stack; |
693 | if (ftrace_ops_test(ops: &gops->ops, ip: func, NULL) && |
694 | gops->entryfunc(&trace, gops, fregs)) |
695 | bitmap |= BIT(i); |
696 | else |
697 | /* Clear out any saved storage */ |
698 | current->curr_ret_stack = save_curr_ret_stack; |
699 | } |
700 | } |
701 | |
702 | if (!bitmap) |
703 | goto out_ret; |
704 | |
705 | /* |
706 | * Since this function uses fgraph_idx = 0 as a tail-call checking |
707 | * flag, set that bit always. |
708 | */ |
709 | set_bitmap(current, offset, bitmap: bitmap | BIT(0)); |
710 | ftrace_test_recursion_unlock(bit); |
711 | return 0; |
712 | out_ret: |
713 | current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1; |
714 | out: |
715 | current->curr_ret_depth--; |
716 | ftrace_test_recursion_unlock(bit); |
717 | return -EBUSY; |
718 | } |
719 | |
720 | /* Retrieve a function return address to the trace stack on thread info.*/ |
721 | static struct ftrace_ret_stack * |
722 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, |
723 | unsigned long frame_pointer, int *offset) |
724 | { |
725 | struct ftrace_ret_stack *ret_stack; |
726 | |
727 | ret_stack = get_ret_stack(current, current->curr_ret_stack, frame_offset: offset); |
728 | |
729 | if (unlikely(!ret_stack)) { |
730 | ftrace_graph_stop(); |
731 | WARN(1, "Bad function graph ret_stack pointer: %d", |
732 | current->curr_ret_stack); |
733 | /* Might as well panic, otherwise we have no where to go */ |
734 | *ret = (unsigned long)panic; |
735 | return NULL; |
736 | } |
737 | |
738 | #ifdef HAVE_FUNCTION_GRAPH_FP_TEST |
739 | /* |
740 | * The arch may choose to record the frame pointer used |
741 | * and check it here to make sure that it is what we expect it |
742 | * to be. If gcc does not set the place holder of the return |
743 | * address in the frame pointer, and does a copy instead, then |
744 | * the function graph trace will fail. This test detects this |
745 | * case. |
746 | * |
747 | * Currently, x86_32 with optimize for size (-Os) makes the latest |
748 | * gcc do the above. |
749 | * |
750 | * Note, -mfentry does not use frame pointers, and this test |
751 | * is not needed if CC_USING_FENTRY is set. |
752 | */ |
753 | if (unlikely(ret_stack->fp != frame_pointer)) { |
754 | ftrace_graph_stop(); |
755 | WARN(1, "Bad frame pointer: expected %lx, received %lx\n" |
756 | " from func %ps return to %lx\n", |
757 | ret_stack->fp, |
758 | frame_pointer, |
759 | (void *)ret_stack->func, |
760 | ret_stack->ret); |
761 | *ret = (unsigned long)panic; |
762 | return NULL; |
763 | } |
764 | #endif |
765 | |
766 | *offset += FGRAPH_FRAME_OFFSET; |
767 | *ret = ret_stack->ret; |
768 | trace->func = ret_stack->func; |
769 | trace->overrun = atomic_read(v: ¤t->trace_overrun); |
770 | trace->depth = current->curr_ret_depth; |
771 | /* |
772 | * We still want to trace interrupts coming in if |
773 | * max_depth is set to 1. Make sure the decrement is |
774 | * seen before ftrace_graph_return. |
775 | */ |
776 | barrier(); |
777 | |
778 | return ret_stack; |
779 | } |
780 | |
781 | /* |
782 | * Hibernation protection. |
783 | * The state of the current task is too much unstable during |
784 | * suspend/restore to disk. We want to protect against that. |
785 | */ |
786 | static int |
787 | ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, |
788 | void *unused) |
789 | { |
790 | switch (state) { |
791 | case PM_HIBERNATION_PREPARE: |
792 | pause_graph_tracing(); |
793 | break; |
794 | |
795 | case PM_POST_HIBERNATION: |
796 | unpause_graph_tracing(); |
797 | break; |
798 | } |
799 | return NOTIFY_DONE; |
800 | } |
801 | |
802 | static struct notifier_block ftrace_suspend_notifier = { |
803 | .notifier_call = ftrace_suspend_notifier_call, |
804 | }; |
805 | |
806 | /* |
807 | * Send the trace to the ring-buffer. |
808 | * @return the original return address. |
809 | */ |
810 | static inline unsigned long |
811 | __ftrace_return_to_handler(struct ftrace_regs *fregs, unsigned long frame_pointer) |
812 | { |
813 | struct ftrace_ret_stack *ret_stack; |
814 | struct ftrace_graph_ret trace; |
815 | unsigned long bitmap; |
816 | unsigned long ret; |
817 | int offset; |
818 | int i; |
819 | |
820 | ret_stack = ftrace_pop_return_trace(trace: &trace, ret: &ret, frame_pointer, offset: &offset); |
821 | |
822 | if (unlikely(!ret_stack)) { |
823 | ftrace_graph_stop(); |
824 | WARN_ON(1); |
825 | /* Might as well panic. What else to do? */ |
826 | return (unsigned long)panic; |
827 | } |
828 | |
829 | if (fregs) |
830 | ftrace_regs_set_instruction_pointer(fregs, ret); |
831 | |
832 | #ifdef CONFIG_FUNCTION_GRAPH_RETVAL |
833 | trace.retval = ftrace_regs_get_return_value(fregs); |
834 | #endif |
835 | |
836 | bitmap = get_bitmap_bits(current, offset); |
837 | |
838 | #ifdef CONFIG_HAVE_STATIC_CALL |
839 | if (static_branch_likely(&fgraph_do_direct)) { |
840 | if (test_bit(fgraph_direct_gops->idx, &bitmap)) |
841 | static_call(fgraph_retfunc)(&trace, fgraph_direct_gops, fregs); |
842 | } else |
843 | #endif |
844 | { |
845 | for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) { |
846 | struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]); |
847 | |
848 | if (gops == &fgraph_stub) |
849 | continue; |
850 | |
851 | gops->retfunc(&trace, gops, fregs); |
852 | } |
853 | } |
854 | |
855 | /* |
856 | * The ftrace_graph_return() may still access the current |
857 | * ret_stack structure, we need to make sure the update of |
858 | * curr_ret_stack is after that. |
859 | */ |
860 | barrier(); |
861 | current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET; |
862 | |
863 | current->curr_ret_depth--; |
864 | return ret; |
865 | } |
866 | |
867 | /* |
868 | * After all architectures have selected HAVE_FUNCTION_GRAPH_FREGS, we can |
869 | * leave only ftrace_return_to_handler(fregs). |
870 | */ |
871 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FREGS |
872 | unsigned long ftrace_return_to_handler(struct ftrace_regs *fregs) |
873 | { |
874 | return __ftrace_return_to_handler(fregs, |
875 | ftrace_regs_get_frame_pointer(fregs)); |
876 | } |
877 | #else |
878 | unsigned long ftrace_return_to_handler(unsigned long frame_pointer) |
879 | { |
880 | return __ftrace_return_to_handler(NULL, frame_pointer); |
881 | } |
882 | #endif |
883 | |
884 | /** |
885 | * ftrace_graph_get_ret_stack - return the entry of the shadow stack |
886 | * @task: The task to read the shadow stack from. |
887 | * @idx: Index down the shadow stack |
888 | * |
889 | * Return the ret_struct on the shadow stack of the @task at the |
890 | * call graph at @idx starting with zero. If @idx is zero, it |
891 | * will return the last saved ret_stack entry. If it is greater than |
892 | * zero, it will return the corresponding ret_stack for the depth |
893 | * of saved return addresses. |
894 | */ |
895 | struct ftrace_ret_stack * |
896 | ftrace_graph_get_ret_stack(struct task_struct *task, int idx) |
897 | { |
898 | struct ftrace_ret_stack *ret_stack = NULL; |
899 | int offset = task->curr_ret_stack; |
900 | |
901 | if (offset < 0) |
902 | return NULL; |
903 | |
904 | do { |
905 | ret_stack = get_ret_stack(t: task, offset, frame_offset: &offset); |
906 | } while (ret_stack && --idx >= 0); |
907 | |
908 | return ret_stack; |
909 | } |
910 | |
911 | /** |
912 | * ftrace_graph_top_ret_addr - return the top return address in the shadow stack |
913 | * @task: The task to read the shadow stack from. |
914 | * |
915 | * Return the first return address on the shadow stack of the @task, which is |
916 | * not the fgraph's return_to_handler. |
917 | */ |
918 | unsigned long ftrace_graph_top_ret_addr(struct task_struct *task) |
919 | { |
920 | unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler); |
921 | struct ftrace_ret_stack *ret_stack = NULL; |
922 | int offset = task->curr_ret_stack; |
923 | |
924 | if (offset < 0) |
925 | return 0; |
926 | |
927 | do { |
928 | ret_stack = get_ret_stack(t: task, offset, frame_offset: &offset); |
929 | } while (ret_stack && ret_stack->ret == return_handler); |
930 | |
931 | return ret_stack ? ret_stack->ret : 0; |
932 | } |
933 | |
934 | /** |
935 | * ftrace_graph_ret_addr - return the original value of the return address |
936 | * @task: The task the unwinder is being executed on |
937 | * @idx: An initialized pointer to the next stack index to use |
938 | * @ret: The current return address (likely pointing to return_handler) |
939 | * @retp: The address on the stack of the current return location |
940 | * |
941 | * This function can be called by stack unwinding code to convert a found stack |
942 | * return address (@ret) to its original value, in case the function graph |
943 | * tracer has modified it to be 'return_to_handler'. If the address hasn't |
944 | * been modified, the unchanged value of @ret is returned. |
945 | * |
946 | * @idx holds the last index used to know where to start from. It should be |
947 | * initialized to zero for the first iteration as that will mean to start |
948 | * at the top of the shadow stack. If the location is found, this pointer |
949 | * will be assigned that location so that if called again, it will continue |
950 | * where it left off. |
951 | * |
952 | * @retp is a pointer to the return address on the stack. |
953 | */ |
954 | unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, |
955 | unsigned long ret, unsigned long *retp) |
956 | { |
957 | struct ftrace_ret_stack *ret_stack; |
958 | unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler); |
959 | int i; |
960 | |
961 | if (ret != return_handler) |
962 | return ret; |
963 | |
964 | if (!idx) |
965 | return ret; |
966 | |
967 | i = *idx ? : task->curr_ret_stack; |
968 | while (i > 0) { |
969 | ret_stack = get_ret_stack(t: task, offset: i, frame_offset: &i); |
970 | if (!ret_stack) |
971 | break; |
972 | /* |
973 | * For the tail-call, there would be 2 or more ftrace_ret_stacks on |
974 | * the ret_stack, which records "return_to_handler" as the return |
975 | * address except for the last one. |
976 | * But on the real stack, there should be 1 entry because tail-call |
977 | * reuses the return address on the stack and jump to the next function. |
978 | * Thus we will continue to find real return address. |
979 | */ |
980 | if (ret_stack->retp == retp && |
981 | ret_stack->ret != return_handler) { |
982 | *idx = i; |
983 | return ret_stack->ret; |
984 | } |
985 | } |
986 | |
987 | return ret; |
988 | } |
989 | |
990 | static struct ftrace_ops graph_ops = { |
991 | .func = ftrace_graph_func, |
992 | .flags = FTRACE_OPS_GRAPH_STUB, |
993 | #ifdef FTRACE_GRAPH_TRAMP_ADDR |
994 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, |
995 | /* trampoline_size is only needed for dynamically allocated tramps */ |
996 | #endif |
997 | }; |
998 | |
999 | void fgraph_init_ops(struct ftrace_ops *dst_ops, |
1000 | struct ftrace_ops *src_ops) |
1001 | { |
1002 | dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB; |
1003 | |
1004 | #ifdef CONFIG_DYNAMIC_FTRACE |
1005 | if (src_ops) { |
1006 | dst_ops->func_hash = &src_ops->local_hash; |
1007 | mutex_init(&dst_ops->local_hash.regex_lock); |
1008 | INIT_LIST_HEAD(list: &dst_ops->subop_list); |
1009 | dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED; |
1010 | } |
1011 | #endif |
1012 | } |
1013 | |
1014 | void ftrace_graph_sleep_time_control(bool enable) |
1015 | { |
1016 | fgraph_sleep_time = enable; |
1017 | } |
1018 | |
1019 | /* |
1020 | * Simply points to ftrace_stub, but with the proper protocol. |
1021 | * Defined by the linker script in linux/vmlinux.lds.h |
1022 | */ |
1023 | void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops, |
1024 | struct ftrace_regs *fregs); |
1025 | |
1026 | /* The callbacks that hook a function */ |
1027 | trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph; |
1028 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; |
1029 | |
1030 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ |
1031 | static int alloc_retstack_tasklist(unsigned long **ret_stack_list) |
1032 | { |
1033 | int i; |
1034 | int ret = 0; |
1035 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; |
1036 | struct task_struct *g, *t; |
1037 | |
1038 | if (WARN_ON_ONCE(!fgraph_stack_cachep)) |
1039 | return -ENOMEM; |
1040 | |
1041 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { |
1042 | ret_stack_list[i] = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL); |
1043 | if (!ret_stack_list[i]) { |
1044 | start = 0; |
1045 | end = i; |
1046 | ret = -ENOMEM; |
1047 | goto free; |
1048 | } |
1049 | } |
1050 | |
1051 | rcu_read_lock(); |
1052 | for_each_process_thread(g, t) { |
1053 | if (start == end) { |
1054 | ret = -EAGAIN; |
1055 | goto unlock; |
1056 | } |
1057 | |
1058 | if (t->ret_stack == NULL) { |
1059 | atomic_set(v: &t->trace_overrun, i: 0); |
1060 | ret_stack_init_task_vars(ret_stack: ret_stack_list[start]); |
1061 | t->curr_ret_stack = 0; |
1062 | t->curr_ret_depth = -1; |
1063 | /* Make sure the tasks see the 0 first: */ |
1064 | smp_wmb(); |
1065 | t->ret_stack = ret_stack_list[start++]; |
1066 | } |
1067 | } |
1068 | |
1069 | unlock: |
1070 | rcu_read_unlock(); |
1071 | free: |
1072 | for (i = start; i < end; i++) |
1073 | kmem_cache_free(s: fgraph_stack_cachep, objp: ret_stack_list[i]); |
1074 | return ret; |
1075 | } |
1076 | |
1077 | static void |
1078 | ftrace_graph_probe_sched_switch(void *ignore, bool preempt, |
1079 | struct task_struct *prev, |
1080 | struct task_struct *next, |
1081 | unsigned int prev_state) |
1082 | { |
1083 | unsigned long long timestamp; |
1084 | |
1085 | /* |
1086 | * Does the user want to count the time a function was asleep. |
1087 | * If so, do not update the time stamps. |
1088 | */ |
1089 | if (fgraph_sleep_time) |
1090 | return; |
1091 | |
1092 | timestamp = trace_clock_local(); |
1093 | |
1094 | prev->ftrace_timestamp = timestamp; |
1095 | |
1096 | /* only process tasks that we timestamped */ |
1097 | if (!next->ftrace_timestamp) |
1098 | return; |
1099 | |
1100 | next->ftrace_sleeptime += timestamp - next->ftrace_timestamp; |
1101 | } |
1102 | |
1103 | static DEFINE_PER_CPU(unsigned long *, idle_ret_stack); |
1104 | |
1105 | static void |
1106 | graph_init_task(struct task_struct *t, unsigned long *ret_stack) |
1107 | { |
1108 | atomic_set(v: &t->trace_overrun, i: 0); |
1109 | ret_stack_init_task_vars(ret_stack); |
1110 | t->ftrace_timestamp = 0; |
1111 | t->curr_ret_stack = 0; |
1112 | t->curr_ret_depth = -1; |
1113 | /* make curr_ret_stack visible before we add the ret_stack */ |
1114 | smp_wmb(); |
1115 | t->ret_stack = ret_stack; |
1116 | } |
1117 | |
1118 | /* |
1119 | * Allocate a return stack for the idle task. May be the first |
1120 | * time through, or it may be done by CPU hotplug online. |
1121 | */ |
1122 | void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) |
1123 | { |
1124 | t->curr_ret_stack = 0; |
1125 | t->curr_ret_depth = -1; |
1126 | /* |
1127 | * The idle task has no parent, it either has its own |
1128 | * stack or no stack at all. |
1129 | */ |
1130 | if (t->ret_stack) |
1131 | WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); |
1132 | |
1133 | if (ftrace_graph_active) { |
1134 | unsigned long *ret_stack; |
1135 | |
1136 | if (WARN_ON_ONCE(!fgraph_stack_cachep)) |
1137 | return; |
1138 | |
1139 | ret_stack = per_cpu(idle_ret_stack, cpu); |
1140 | if (!ret_stack) { |
1141 | ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL); |
1142 | if (!ret_stack) |
1143 | return; |
1144 | per_cpu(idle_ret_stack, cpu) = ret_stack; |
1145 | } |
1146 | graph_init_task(t, ret_stack); |
1147 | } |
1148 | } |
1149 | |
1150 | /* Allocate a return stack for newly created task */ |
1151 | void ftrace_graph_init_task(struct task_struct *t) |
1152 | { |
1153 | /* Make sure we do not use the parent ret_stack */ |
1154 | t->ret_stack = NULL; |
1155 | t->curr_ret_stack = 0; |
1156 | t->curr_ret_depth = -1; |
1157 | |
1158 | if (ftrace_graph_active) { |
1159 | unsigned long *ret_stack; |
1160 | |
1161 | if (WARN_ON_ONCE(!fgraph_stack_cachep)) |
1162 | return; |
1163 | |
1164 | ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL); |
1165 | if (!ret_stack) |
1166 | return; |
1167 | graph_init_task(t, ret_stack); |
1168 | } |
1169 | } |
1170 | |
1171 | void ftrace_graph_exit_task(struct task_struct *t) |
1172 | { |
1173 | unsigned long *ret_stack = t->ret_stack; |
1174 | |
1175 | t->ret_stack = NULL; |
1176 | /* NULL must become visible to IRQs before we free it: */ |
1177 | barrier(); |
1178 | |
1179 | if (ret_stack) { |
1180 | if (WARN_ON_ONCE(!fgraph_stack_cachep)) |
1181 | return; |
1182 | kmem_cache_free(s: fgraph_stack_cachep, objp: ret_stack); |
1183 | } |
1184 | } |
1185 | |
1186 | #ifdef CONFIG_DYNAMIC_FTRACE |
1187 | static int fgraph_pid_func(struct ftrace_graph_ent *trace, |
1188 | struct fgraph_ops *gops, |
1189 | struct ftrace_regs *fregs) |
1190 | { |
1191 | struct trace_array *tr = gops->ops.private; |
1192 | int pid; |
1193 | |
1194 | if (tr) { |
1195 | pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid); |
1196 | if (pid == FTRACE_PID_IGNORE) |
1197 | return 0; |
1198 | if (pid != FTRACE_PID_TRACE && |
1199 | pid != current->pid) |
1200 | return 0; |
1201 | } |
1202 | |
1203 | return gops->saved_func(trace, gops, fregs); |
1204 | } |
1205 | |
1206 | void fgraph_update_pid_func(void) |
1207 | { |
1208 | struct fgraph_ops *gops; |
1209 | struct ftrace_ops *op; |
1210 | |
1211 | if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED)) |
1212 | return; |
1213 | |
1214 | list_for_each_entry(op, &graph_ops.subop_list, list) { |
1215 | if (op->flags & FTRACE_OPS_FL_PID) { |
1216 | gops = container_of(op, struct fgraph_ops, ops); |
1217 | gops->entryfunc = ftrace_pids_enabled(ops: op) ? |
1218 | fgraph_pid_func : gops->saved_func; |
1219 | if (ftrace_graph_active == 1) |
1220 | static_call_update(fgraph_func, gops->entryfunc); |
1221 | } |
1222 | } |
1223 | } |
1224 | #endif |
1225 | |
1226 | /* Allocate a return stack for each task */ |
1227 | static int start_graph_tracing(void) |
1228 | { |
1229 | unsigned long **ret_stack_list; |
1230 | int ret, cpu; |
1231 | |
1232 | ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE, |
1233 | sizeof(*ret_stack_list), GFP_KERNEL); |
1234 | |
1235 | if (!ret_stack_list) |
1236 | return -ENOMEM; |
1237 | |
1238 | /* The cpu_boot init_task->ret_stack will never be freed */ |
1239 | for_each_online_cpu(cpu) { |
1240 | if (!idle_task(cpu)->ret_stack) |
1241 | ftrace_graph_init_idle_task(t: idle_task(cpu), cpu); |
1242 | } |
1243 | |
1244 | do { |
1245 | ret = alloc_retstack_tasklist(ret_stack_list); |
1246 | } while (ret == -EAGAIN); |
1247 | |
1248 | if (!ret) { |
1249 | ret = register_trace_sched_switch(probe: ftrace_graph_probe_sched_switch, NULL); |
1250 | if (ret) |
1251 | pr_info("ftrace_graph: Couldn't activate tracepoint" |
1252 | " probe to kernel_sched_switch\n"); |
1253 | } |
1254 | |
1255 | kfree(objp: ret_stack_list); |
1256 | return ret; |
1257 | } |
1258 | |
1259 | static void init_task_vars(int idx) |
1260 | { |
1261 | struct task_struct *g, *t; |
1262 | int cpu; |
1263 | |
1264 | for_each_online_cpu(cpu) { |
1265 | if (idle_task(cpu)->ret_stack) |
1266 | ret_stack_set_task_var(t: idle_task(cpu), idx, val: 0); |
1267 | } |
1268 | |
1269 | read_lock(&tasklist_lock); |
1270 | for_each_process_thread(g, t) { |
1271 | if (t->ret_stack) |
1272 | ret_stack_set_task_var(t, idx, val: 0); |
1273 | } |
1274 | read_unlock(&tasklist_lock); |
1275 | } |
1276 | |
1277 | static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *gops) |
1278 | { |
1279 | trace_func_graph_ent_t func = NULL; |
1280 | trace_func_graph_ret_t retfunc = NULL; |
1281 | int i; |
1282 | |
1283 | if (gops) { |
1284 | func = gops->entryfunc; |
1285 | retfunc = gops->retfunc; |
1286 | fgraph_direct_gops = gops; |
1287 | } else { |
1288 | for_each_set_bit(i, &fgraph_array_bitmask, |
1289 | sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) { |
1290 | func = fgraph_array[i]->entryfunc; |
1291 | retfunc = fgraph_array[i]->retfunc; |
1292 | fgraph_direct_gops = fgraph_array[i]; |
1293 | } |
1294 | } |
1295 | if (WARN_ON_ONCE(!func)) |
1296 | return; |
1297 | |
1298 | static_call_update(fgraph_func, func); |
1299 | static_call_update(fgraph_retfunc, retfunc); |
1300 | if (enable_branch) |
1301 | static_branch_disable(&fgraph_do_direct); |
1302 | } |
1303 | |
1304 | static void ftrace_graph_disable_direct(bool disable_branch) |
1305 | { |
1306 | if (disable_branch) |
1307 | static_branch_disable(&fgraph_do_direct); |
1308 | static_call_update(fgraph_func, ftrace_graph_entry_stub); |
1309 | static_call_update(fgraph_retfunc, ftrace_graph_ret_stub); |
1310 | fgraph_direct_gops = &fgraph_stub; |
1311 | } |
1312 | |
1313 | /* The cpu_boot init_task->ret_stack will never be freed */ |
1314 | static int fgraph_cpu_init(unsigned int cpu) |
1315 | { |
1316 | if (!idle_task(cpu)->ret_stack) |
1317 | ftrace_graph_init_idle_task(t: idle_task(cpu), cpu); |
1318 | return 0; |
1319 | } |
1320 | |
1321 | int register_ftrace_graph(struct fgraph_ops *gops) |
1322 | { |
1323 | static bool fgraph_initialized; |
1324 | int command = 0; |
1325 | int ret = 0; |
1326 | int i = -1; |
1327 | |
1328 | guard(mutex)(T: &ftrace_lock); |
1329 | |
1330 | if (!fgraph_stack_cachep) { |
1331 | fgraph_stack_cachep = kmem_cache_create("fgraph_stack", |
1332 | SHADOW_STACK_SIZE, |
1333 | SHADOW_STACK_SIZE, 0, NULL); |
1334 | if (!fgraph_stack_cachep) |
1335 | return -ENOMEM; |
1336 | } |
1337 | |
1338 | if (!fgraph_initialized) { |
1339 | ret = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, name: "fgraph:online", |
1340 | startup: fgraph_cpu_init, NULL); |
1341 | if (ret < 0) { |
1342 | pr_warn("fgraph: Error to init cpu hotplug support\n"); |
1343 | return ret; |
1344 | } |
1345 | fgraph_initialized = true; |
1346 | ret = 0; |
1347 | } |
1348 | |
1349 | if (!fgraph_array[0]) { |
1350 | /* The array must always have real data on it */ |
1351 | for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) |
1352 | fgraph_array[i] = &fgraph_stub; |
1353 | fgraph_lru_init(); |
1354 | } |
1355 | |
1356 | i = fgraph_lru_alloc_index(); |
1357 | if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub)) |
1358 | return -ENOSPC; |
1359 | gops->idx = i; |
1360 | |
1361 | ftrace_graph_active++; |
1362 | |
1363 | if (ftrace_graph_active == 2) |
1364 | ftrace_graph_disable_direct(disable_branch: true); |
1365 | |
1366 | if (ftrace_graph_active == 1) { |
1367 | ftrace_graph_enable_direct(enable_branch: false, gops); |
1368 | register_pm_notifier(nb: &ftrace_suspend_notifier); |
1369 | ret = start_graph_tracing(); |
1370 | if (ret) |
1371 | goto error; |
1372 | /* |
1373 | * Some archs just test to see if these are not |
1374 | * the default function |
1375 | */ |
1376 | ftrace_graph_return = return_run; |
1377 | ftrace_graph_entry = entry_run; |
1378 | command = FTRACE_START_FUNC_RET; |
1379 | } else { |
1380 | init_task_vars(idx: gops->idx); |
1381 | } |
1382 | /* Always save the function, and reset at unregistering */ |
1383 | gops->saved_func = gops->entryfunc; |
1384 | |
1385 | gops->ops.flags |= FTRACE_OPS_FL_GRAPH; |
1386 | |
1387 | ret = ftrace_startup_subops(ops: &graph_ops, subops: &gops->ops, command); |
1388 | if (!ret) |
1389 | fgraph_array[i] = gops; |
1390 | |
1391 | error: |
1392 | if (ret) { |
1393 | ftrace_graph_active--; |
1394 | gops->saved_func = NULL; |
1395 | fgraph_lru_release_index(idx: i); |
1396 | } |
1397 | return ret; |
1398 | } |
1399 | |
1400 | void unregister_ftrace_graph(struct fgraph_ops *gops) |
1401 | { |
1402 | int command = 0; |
1403 | |
1404 | guard(mutex)(T: &ftrace_lock); |
1405 | |
1406 | if (unlikely(!ftrace_graph_active)) |
1407 | return; |
1408 | |
1409 | if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE || |
1410 | fgraph_array[gops->idx] != gops)) |
1411 | return; |
1412 | |
1413 | if (fgraph_lru_release_index(idx: gops->idx) < 0) |
1414 | return; |
1415 | |
1416 | fgraph_array[gops->idx] = &fgraph_stub; |
1417 | |
1418 | ftrace_graph_active--; |
1419 | |
1420 | if (!ftrace_graph_active) |
1421 | command = FTRACE_STOP_FUNC_RET; |
1422 | |
1423 | ftrace_shutdown_subops(ops: &graph_ops, subops: &gops->ops, command); |
1424 | |
1425 | if (ftrace_graph_active == 1) |
1426 | ftrace_graph_enable_direct(enable_branch: true, NULL); |
1427 | else if (!ftrace_graph_active) |
1428 | ftrace_graph_disable_direct(disable_branch: false); |
1429 | |
1430 | if (!ftrace_graph_active) { |
1431 | ftrace_graph_return = ftrace_stub_graph; |
1432 | ftrace_graph_entry = ftrace_graph_entry_stub; |
1433 | unregister_pm_notifier(nb: &ftrace_suspend_notifier); |
1434 | unregister_trace_sched_switch(probe: ftrace_graph_probe_sched_switch, NULL); |
1435 | } |
1436 | gops->saved_func = NULL; |
1437 | } |
1438 |
Definitions
- kill_ftrace_graph
- ftrace_graph_active
- fgraph_stack_cachep
- fgraph_array
- fgraph_array_bitmask
- fgraph_lru_table
- fgraph_lru_next
- fgraph_lru_last
- fgraph_lru_init
- fgraph_lru_release_index
- fgraph_lru_alloc_index
- __get_offset
- __get_type
- __get_data_index
- __get_data_size
- get_fgraph_entry
- get_frame_offset
- get_bitmap_bits
- set_bitmap
- get_data_type_data
- make_data_type_val
- entry_run
- return_run
- ret_stack_set_task_var
- ret_stack_get_task_var
- ret_stack_init_task_vars
- fgraph_reserve_data
- fgraph_retrieve_data
- fgraph_get_task_var
- get_ret_stack
- fgraph_retrieve_parent_data
- fgraph_sleep_time
- ftrace_enable_ftrace_graph_caller
- ftrace_disable_ftrace_graph_caller
- ftrace_graph_entry_stub
- ftrace_graph_ret_stub
- fgraph_stub
- fgraph_direct_gops
- fgraph_do_direct
- ftrace_graph_stop
- ftrace_push_return_trace
- function_graph_enter_regs
- ftrace_pop_return_trace
- ftrace_suspend_notifier_call
- ftrace_suspend_notifier
- __ftrace_return_to_handler
- ftrace_return_to_handler
- ftrace_graph_get_ret_stack
- ftrace_graph_top_ret_addr
- ftrace_graph_ret_addr
- graph_ops
- fgraph_init_ops
- ftrace_graph_sleep_time_control
- ftrace_graph_return
- ftrace_graph_entry
- alloc_retstack_tasklist
- ftrace_graph_probe_sched_switch
- idle_ret_stack
- graph_init_task
- ftrace_graph_init_idle_task
- ftrace_graph_init_task
- ftrace_graph_exit_task
- fgraph_pid_func
- fgraph_update_pid_func
- start_graph_tracing
- init_task_vars
- ftrace_graph_enable_direct
- ftrace_graph_disable_direct
- fgraph_cpu_init
- register_ftrace_graph
Improve your Profiling and Debugging skills
Find out more