1 | // SPDX-License-Identifier: GPL-2.0 |
2 | |
3 | #ifndef _LINUX_KERNEL_TRACE_H |
4 | #define _LINUX_KERNEL_TRACE_H |
5 | |
6 | #include <linux/fs.h> |
7 | #include <linux/atomic.h> |
8 | #include <linux/sched.h> |
9 | #include <linux/clocksource.h> |
10 | #include <linux/ring_buffer.h> |
11 | #include <linux/mmiotrace.h> |
12 | #include <linux/tracepoint.h> |
13 | #include <linux/ftrace.h> |
14 | #include <linux/trace.h> |
15 | #include <linux/hw_breakpoint.h> |
16 | #include <linux/trace_seq.h> |
17 | #include <linux/trace_events.h> |
18 | #include <linux/compiler.h> |
19 | #include <linux/glob.h> |
20 | #include <linux/irq_work.h> |
21 | #include <linux/workqueue.h> |
22 | #include <linux/ctype.h> |
23 | #include <linux/once_lite.h> |
24 | #include <linux/ftrace_regs.h> |
25 | |
26 | #include "pid_list.h" |
27 | |
28 | #ifdef CONFIG_FTRACE_SYSCALLS |
29 | #include <asm/unistd.h> /* For NR_syscalls */ |
30 | #include <asm/syscall.h> /* some archs define it here */ |
31 | #endif |
32 | |
33 | #define TRACE_MODE_WRITE 0640 |
34 | #define TRACE_MODE_READ 0440 |
35 | |
36 | enum trace_type { |
37 | __TRACE_FIRST_TYPE = 0, |
38 | |
39 | TRACE_FN, |
40 | TRACE_CTX, |
41 | TRACE_WAKE, |
42 | TRACE_STACK, |
43 | TRACE_PRINT, |
44 | TRACE_BPRINT, |
45 | TRACE_MMIO_RW, |
46 | TRACE_MMIO_MAP, |
47 | TRACE_BRANCH, |
48 | TRACE_GRAPH_RET, |
49 | TRACE_GRAPH_ENT, |
50 | TRACE_GRAPH_RETADDR_ENT, |
51 | TRACE_USER_STACK, |
52 | TRACE_BLK, |
53 | TRACE_BPUTS, |
54 | TRACE_HWLAT, |
55 | TRACE_OSNOISE, |
56 | TRACE_TIMERLAT, |
57 | TRACE_RAW_DATA, |
58 | TRACE_FUNC_REPEATS, |
59 | |
60 | __TRACE_LAST_TYPE, |
61 | }; |
62 | |
63 | |
64 | #undef __field |
65 | #define __field(type, item) type item; |
66 | |
67 | #undef __field_fn |
68 | #define __field_fn(type, item) type item; |
69 | |
70 | #undef __field_struct |
71 | #define __field_struct(type, item) __field(type, item) |
72 | |
73 | #undef __field_desc |
74 | #define __field_desc(type, container, item) |
75 | |
76 | #undef __field_packed |
77 | #define __field_packed(type, container, item) |
78 | |
79 | #undef __array |
80 | #define __array(type, item, size) type item[size]; |
81 | |
82 | /* |
83 | * For backward compatibility, older user space expects to see the |
84 | * kernel_stack event with a fixed size caller field. But today the fix |
85 | * size is ignored by the kernel, and the real structure is dynamic. |
86 | * Expose to user space: "unsigned long caller[8];" but the real structure |
87 | * will be "unsigned long caller[] __counted_by(size)" |
88 | */ |
89 | #undef __stack_array |
90 | #define __stack_array(type, item, size, field) type item[] __counted_by(field); |
91 | |
92 | #undef __array_desc |
93 | #define __array_desc(type, container, item, size) |
94 | |
95 | #undef __dynamic_array |
96 | #define __dynamic_array(type, item) type item[]; |
97 | |
98 | #undef __rel_dynamic_array |
99 | #define __rel_dynamic_array(type, item) type item[]; |
100 | |
101 | #undef F_STRUCT |
102 | #define F_STRUCT(args...) args |
103 | |
104 | #undef FTRACE_ENTRY |
105 | #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ |
106 | struct struct_name { \ |
107 | struct trace_entry ent; \ |
108 | tstruct \ |
109 | } |
110 | |
111 | #undef FTRACE_ENTRY_DUP |
112 | #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) |
113 | |
114 | #undef FTRACE_ENTRY_REG |
115 | #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ |
116 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
117 | |
118 | #undef FTRACE_ENTRY_PACKED |
119 | #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ |
120 | FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed |
121 | |
122 | #include "trace_entries.h" |
123 | |
124 | /* Use this for memory failure errors */ |
125 | #define MEM_FAIL(condition, fmt, ...) \ |
126 | DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__) |
127 | |
128 | #define FAULT_STRING "(fault)" |
129 | |
130 | #define HIST_STACKTRACE_DEPTH 16 |
131 | #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) |
132 | #define HIST_STACKTRACE_SKIP 5 |
133 | |
134 | /* |
135 | * syscalls are special, and need special handling, this is why |
136 | * they are not included in trace_entries.h |
137 | */ |
138 | struct syscall_trace_enter { |
139 | struct trace_entry ent; |
140 | int nr; |
141 | unsigned long args[]; |
142 | }; |
143 | |
144 | struct syscall_trace_exit { |
145 | struct trace_entry ent; |
146 | int nr; |
147 | long ret; |
148 | }; |
149 | |
150 | struct kprobe_trace_entry_head { |
151 | struct trace_entry ent; |
152 | unsigned long ip; |
153 | }; |
154 | |
155 | struct eprobe_trace_entry_head { |
156 | struct trace_entry ent; |
157 | }; |
158 | |
159 | struct kretprobe_trace_entry_head { |
160 | struct trace_entry ent; |
161 | unsigned long func; |
162 | unsigned long ret_ip; |
163 | }; |
164 | |
165 | struct fentry_trace_entry_head { |
166 | struct trace_entry ent; |
167 | unsigned long ip; |
168 | }; |
169 | |
170 | struct fexit_trace_entry_head { |
171 | struct trace_entry ent; |
172 | unsigned long func; |
173 | unsigned long ret_ip; |
174 | }; |
175 | |
176 | #define TRACE_BUF_SIZE 1024 |
177 | |
178 | struct trace_array; |
179 | |
180 | /* |
181 | * The CPU trace array - it consists of thousands of trace entries |
182 | * plus some other descriptor data: (for example which task started |
183 | * the trace, etc.) |
184 | */ |
185 | struct trace_array_cpu { |
186 | local_t disabled; |
187 | |
188 | unsigned long entries; |
189 | unsigned long saved_latency; |
190 | unsigned long critical_start; |
191 | unsigned long critical_end; |
192 | unsigned long critical_sequence; |
193 | unsigned long nice; |
194 | unsigned long policy; |
195 | unsigned long rt_priority; |
196 | unsigned long skipped_entries; |
197 | u64 preempt_timestamp; |
198 | pid_t pid; |
199 | kuid_t uid; |
200 | char comm[TASK_COMM_LEN]; |
201 | |
202 | #ifdef CONFIG_FUNCTION_TRACER |
203 | int ftrace_ignore_pid; |
204 | #endif |
205 | bool ignore_pid; |
206 | }; |
207 | |
208 | struct tracer; |
209 | struct trace_option_dentry; |
210 | |
211 | struct array_buffer { |
212 | struct trace_array *tr; |
213 | struct trace_buffer *buffer; |
214 | struct trace_array_cpu __percpu *data; |
215 | u64 time_start; |
216 | int cpu; |
217 | }; |
218 | |
219 | #define TRACE_FLAGS_MAX_SIZE 32 |
220 | |
221 | struct trace_options { |
222 | struct tracer *tracer; |
223 | struct trace_option_dentry *topts; |
224 | }; |
225 | |
226 | struct trace_pid_list *trace_pid_list_alloc(void); |
227 | void trace_pid_list_free(struct trace_pid_list *pid_list); |
228 | bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid); |
229 | int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid); |
230 | int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid); |
231 | int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid); |
232 | int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid, |
233 | unsigned int *next); |
234 | |
235 | enum { |
236 | TRACE_PIDS = BIT(0), |
237 | TRACE_NO_PIDS = BIT(1), |
238 | }; |
239 | |
240 | static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list, |
241 | struct trace_pid_list *no_pid_list) |
242 | { |
243 | /* Return true if the pid list in type has pids */ |
244 | return ((type & TRACE_PIDS) && pid_list) || |
245 | ((type & TRACE_NO_PIDS) && no_pid_list); |
246 | } |
247 | |
248 | static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list, |
249 | struct trace_pid_list *no_pid_list) |
250 | { |
251 | /* |
252 | * Turning off what is in @type, return true if the "other" |
253 | * pid list, still has pids in it. |
254 | */ |
255 | return (!(type & TRACE_PIDS) && pid_list) || |
256 | (!(type & TRACE_NO_PIDS) && no_pid_list); |
257 | } |
258 | |
259 | typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); |
260 | |
261 | /** |
262 | * struct cond_snapshot - conditional snapshot data and callback |
263 | * |
264 | * The cond_snapshot structure encapsulates a callback function and |
265 | * data associated with the snapshot for a given tracing instance. |
266 | * |
267 | * When a snapshot is taken conditionally, by invoking |
268 | * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is |
269 | * passed in turn to the cond_snapshot.update() function. That data |
270 | * can be compared by the update() implementation with the cond_data |
271 | * contained within the struct cond_snapshot instance associated with |
272 | * the trace_array. Because the tr->max_lock is held throughout the |
273 | * update() call, the update() function can directly retrieve the |
274 | * cond_snapshot and cond_data associated with the per-instance |
275 | * snapshot associated with the trace_array. |
276 | * |
277 | * The cond_snapshot.update() implementation can save data to be |
278 | * associated with the snapshot if it decides to, and returns 'true' |
279 | * in that case, or it returns 'false' if the conditional snapshot |
280 | * shouldn't be taken. |
281 | * |
282 | * The cond_snapshot instance is created and associated with the |
283 | * user-defined cond_data by tracing_cond_snapshot_enable(). |
284 | * Likewise, the cond_snapshot instance is destroyed and is no longer |
285 | * associated with the trace instance by |
286 | * tracing_cond_snapshot_disable(). |
287 | * |
288 | * The method below is required. |
289 | * |
290 | * @update: When a conditional snapshot is invoked, the update() |
291 | * callback function is invoked with the tr->max_lock held. The |
292 | * update() implementation signals whether or not to actually |
293 | * take the snapshot, by returning 'true' if so, 'false' if no |
294 | * snapshot should be taken. Because the max_lock is held for |
295 | * the duration of update(), the implementation is safe to |
296 | * directly retrieved and save any implementation data it needs |
297 | * to in association with the snapshot. |
298 | */ |
299 | struct cond_snapshot { |
300 | void *cond_data; |
301 | cond_update_fn_t update; |
302 | }; |
303 | |
304 | /* |
305 | * struct trace_func_repeats - used to keep track of the consecutive |
306 | * (on the same CPU) calls of a single function. |
307 | */ |
308 | struct trace_func_repeats { |
309 | unsigned long ip; |
310 | unsigned long parent_ip; |
311 | unsigned long count; |
312 | u64 ts_last_call; |
313 | }; |
314 | |
315 | struct trace_module_delta { |
316 | struct rcu_head rcu; |
317 | long delta[]; |
318 | }; |
319 | |
320 | /* |
321 | * The trace array - an array of per-CPU trace arrays. This is the |
322 | * highest level data structure that individual tracers deal with. |
323 | * They have on/off state as well: |
324 | */ |
325 | struct trace_array { |
326 | struct list_head list; |
327 | char *name; |
328 | struct array_buffer array_buffer; |
329 | #ifdef CONFIG_TRACER_MAX_TRACE |
330 | /* |
331 | * The max_buffer is used to snapshot the trace when a maximum |
332 | * latency is reached, or when the user initiates a snapshot. |
333 | * Some tracers will use this to store a maximum trace while |
334 | * it continues examining live traces. |
335 | * |
336 | * The buffers for the max_buffer are set up the same as the array_buffer |
337 | * When a snapshot is taken, the buffer of the max_buffer is swapped |
338 | * with the buffer of the array_buffer and the buffers are reset for |
339 | * the array_buffer so the tracing can continue. |
340 | */ |
341 | struct array_buffer max_buffer; |
342 | bool allocated_snapshot; |
343 | spinlock_t snapshot_trigger_lock; |
344 | unsigned int snapshot; |
345 | unsigned long max_latency; |
346 | #ifdef CONFIG_FSNOTIFY |
347 | struct dentry *d_max_latency; |
348 | struct work_struct fsnotify_work; |
349 | struct irq_work fsnotify_irqwork; |
350 | #endif |
351 | #endif |
352 | /* The below is for memory mapped ring buffer */ |
353 | unsigned int mapped; |
354 | unsigned long range_addr_start; |
355 | unsigned long range_addr_size; |
356 | char *range_name; |
357 | long text_delta; |
358 | struct trace_module_delta *module_delta; |
359 | void *scratch; /* pointer in persistent memory */ |
360 | int scratch_size; |
361 | |
362 | int buffer_disabled; |
363 | |
364 | struct trace_pid_list __rcu *filtered_pids; |
365 | struct trace_pid_list __rcu *filtered_no_pids; |
366 | /* |
367 | * max_lock is used to protect the swapping of buffers |
368 | * when taking a max snapshot. The buffers themselves are |
369 | * protected by per_cpu spinlocks. But the action of the swap |
370 | * needs its own lock. |
371 | * |
372 | * This is defined as a arch_spinlock_t in order to help |
373 | * with performance when lockdep debugging is enabled. |
374 | * |
375 | * It is also used in other places outside the update_max_tr |
376 | * so it needs to be defined outside of the |
377 | * CONFIG_TRACER_MAX_TRACE. |
378 | */ |
379 | arch_spinlock_t max_lock; |
380 | #ifdef CONFIG_FTRACE_SYSCALLS |
381 | int sys_refcount_enter; |
382 | int sys_refcount_exit; |
383 | struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; |
384 | struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; |
385 | #endif |
386 | int stop_count; |
387 | int clock_id; |
388 | int nr_topts; |
389 | bool clear_trace; |
390 | int buffer_percent; |
391 | unsigned int n_err_log_entries; |
392 | struct tracer *current_trace; |
393 | unsigned int trace_flags; |
394 | unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; |
395 | unsigned int flags; |
396 | raw_spinlock_t start_lock; |
397 | const char *system_names; |
398 | struct list_head err_log; |
399 | struct dentry *dir; |
400 | struct dentry *options; |
401 | struct dentry *percpu_dir; |
402 | struct eventfs_inode *event_dir; |
403 | struct trace_options *topts; |
404 | struct list_head systems; |
405 | struct list_head events; |
406 | struct list_head marker_list; |
407 | struct trace_event_file *trace_marker_file; |
408 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ |
409 | /* one per_cpu trace_pipe can be opened by only one user */ |
410 | cpumask_var_t pipe_cpumask; |
411 | int ref; |
412 | int trace_ref; |
413 | #ifdef CONFIG_MODULES |
414 | struct list_head mod_events; |
415 | #endif |
416 | #ifdef CONFIG_FUNCTION_TRACER |
417 | struct ftrace_ops *ops; |
418 | struct trace_pid_list __rcu *function_pids; |
419 | struct trace_pid_list __rcu *function_no_pids; |
420 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
421 | struct fgraph_ops *gops; |
422 | #endif |
423 | #ifdef CONFIG_DYNAMIC_FTRACE |
424 | /* All of these are protected by the ftrace_lock */ |
425 | struct list_head func_probes; |
426 | struct list_head mod_trace; |
427 | struct list_head mod_notrace; |
428 | #endif |
429 | /* function tracing enabled */ |
430 | int function_enabled; |
431 | #endif |
432 | int no_filter_buffering_ref; |
433 | struct list_head hist_vars; |
434 | #ifdef CONFIG_TRACER_SNAPSHOT |
435 | struct cond_snapshot *cond_snapshot; |
436 | #endif |
437 | struct trace_func_repeats __percpu *last_func_repeats; |
438 | /* |
439 | * On boot up, the ring buffer is set to the minimum size, so that |
440 | * we do not waste memory on systems that are not using tracing. |
441 | */ |
442 | bool ring_buffer_expanded; |
443 | }; |
444 | |
445 | enum { |
446 | TRACE_ARRAY_FL_GLOBAL = BIT(0), |
447 | TRACE_ARRAY_FL_BOOT = BIT(1), |
448 | TRACE_ARRAY_FL_LAST_BOOT = BIT(2), |
449 | TRACE_ARRAY_FL_MOD_INIT = BIT(3), |
450 | TRACE_ARRAY_FL_MEMMAP = BIT(4), |
451 | }; |
452 | |
453 | #ifdef CONFIG_MODULES |
454 | bool module_exists(const char *module); |
455 | #else |
456 | static inline bool module_exists(const char *module) |
457 | { |
458 | return false; |
459 | } |
460 | #endif |
461 | |
462 | extern struct list_head ftrace_trace_arrays; |
463 | |
464 | extern struct mutex trace_types_lock; |
465 | |
466 | extern int trace_array_get(struct trace_array *tr); |
467 | extern int tracing_check_open_get_tr(struct trace_array *tr); |
468 | extern struct trace_array *trace_array_find(const char *instance); |
469 | extern struct trace_array *trace_array_find_get(const char *instance); |
470 | |
471 | extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe); |
472 | extern int tracing_set_filter_buffering(struct trace_array *tr, bool set); |
473 | extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); |
474 | |
475 | extern bool trace_clock_in_ns(struct trace_array *tr); |
476 | |
477 | extern unsigned long trace_adjust_address(struct trace_array *tr, unsigned long addr); |
478 | |
479 | /* |
480 | * The global tracer (top) should be the first trace array added, |
481 | * but we check the flag anyway. |
482 | */ |
483 | static inline struct trace_array *top_trace_array(void) |
484 | { |
485 | struct trace_array *tr; |
486 | |
487 | if (list_empty(head: &ftrace_trace_arrays)) |
488 | return NULL; |
489 | |
490 | tr = list_entry(ftrace_trace_arrays.prev, |
491 | typeof(*tr), list); |
492 | WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); |
493 | return tr; |
494 | } |
495 | |
496 | #define FTRACE_CMP_TYPE(var, type) \ |
497 | __builtin_types_compatible_p(typeof(var), type *) |
498 | |
499 | #undef IF_ASSIGN |
500 | #define IF_ASSIGN(var, entry, etype, id) \ |
501 | if (FTRACE_CMP_TYPE(var, etype)) { \ |
502 | var = (typeof(var))(entry); \ |
503 | WARN_ON(id != 0 && (entry)->type != id); \ |
504 | break; \ |
505 | } |
506 | |
507 | /* Will cause compile errors if type is not found. */ |
508 | extern void __ftrace_bad_type(void); |
509 | |
510 | /* |
511 | * The trace_assign_type is a verifier that the entry type is |
512 | * the same as the type being assigned. To add new types simply |
513 | * add a line with the following format: |
514 | * |
515 | * IF_ASSIGN(var, ent, type, id); |
516 | * |
517 | * Where "type" is the trace type that includes the trace_entry |
518 | * as the "ent" item. And "id" is the trace identifier that is |
519 | * used in the trace_type enum. |
520 | * |
521 | * If the type can have more than one id, then use zero. |
522 | */ |
523 | #define trace_assign_type(var, ent) \ |
524 | do { \ |
525 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ |
526 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
527 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
528 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
529 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
530 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
531 | IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ |
532 | IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ |
533 | IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\ |
534 | IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\ |
535 | IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ |
536 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
537 | TRACE_MMIO_RW); \ |
538 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
539 | TRACE_MMIO_MAP); \ |
540 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
541 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
542 | TRACE_GRAPH_ENT); \ |
543 | IF_ASSIGN(var, ent, struct fgraph_retaddr_ent_entry,\ |
544 | TRACE_GRAPH_RETADDR_ENT); \ |
545 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ |
546 | TRACE_GRAPH_RET); \ |
547 | IF_ASSIGN(var, ent, struct func_repeats_entry, \ |
548 | TRACE_FUNC_REPEATS); \ |
549 | __ftrace_bad_type(); \ |
550 | } while (0) |
551 | |
552 | /* |
553 | * An option specific to a tracer. This is a boolean value. |
554 | * The bit is the bit index that sets its value on the |
555 | * flags value in struct tracer_flags. |
556 | */ |
557 | struct tracer_opt { |
558 | const char *name; /* Will appear on the trace_options file */ |
559 | u32 bit; /* Mask assigned in val field in tracer_flags */ |
560 | }; |
561 | |
562 | /* |
563 | * The set of specific options for a tracer. Your tracer |
564 | * have to set the initial value of the flags val. |
565 | */ |
566 | struct tracer_flags { |
567 | u32 val; |
568 | struct tracer_opt *opts; |
569 | struct tracer *trace; |
570 | }; |
571 | |
572 | /* Makes more easy to define a tracer opt */ |
573 | #define TRACER_OPT(s, b) .name = #s, .bit = b |
574 | |
575 | |
576 | struct trace_option_dentry { |
577 | struct tracer_opt *opt; |
578 | struct tracer_flags *flags; |
579 | struct trace_array *tr; |
580 | struct dentry *entry; |
581 | }; |
582 | |
583 | /** |
584 | * struct tracer - a specific tracer and its callbacks to interact with tracefs |
585 | * @name: the name chosen to select it on the available_tracers file |
586 | * @init: called when one switches to this tracer (echo name > current_tracer) |
587 | * @reset: called when one switches to another tracer |
588 | * @start: called when tracing is unpaused (echo 1 > tracing_on) |
589 | * @stop: called when tracing is paused (echo 0 > tracing_on) |
590 | * @update_thresh: called when tracing_thresh is updated |
591 | * @open: called when the trace file is opened |
592 | * @pipe_open: called when the trace_pipe file is opened |
593 | * @close: called when the trace file is released |
594 | * @pipe_close: called when the trace_pipe file is released |
595 | * @read: override the default read callback on trace_pipe |
596 | * @splice_read: override the default splice_read callback on trace_pipe |
597 | * @selftest: selftest to run on boot (see trace_selftest.c) |
598 | * @print_headers: override the first lines that describe your columns |
599 | * @print_line: callback that prints a trace |
600 | * @set_flag: signals one of your private flags changed (trace_options file) |
601 | * @flags: your private flags |
602 | */ |
603 | struct tracer { |
604 | const char *name; |
605 | int (*init)(struct trace_array *tr); |
606 | void (*reset)(struct trace_array *tr); |
607 | void (*start)(struct trace_array *tr); |
608 | void (*stop)(struct trace_array *tr); |
609 | int (*update_thresh)(struct trace_array *tr); |
610 | void (*open)(struct trace_iterator *iter); |
611 | void (*pipe_open)(struct trace_iterator *iter); |
612 | void (*close)(struct trace_iterator *iter); |
613 | void (*pipe_close)(struct trace_iterator *iter); |
614 | ssize_t (*read)(struct trace_iterator *iter, |
615 | struct file *filp, char __user *ubuf, |
616 | size_t cnt, loff_t *ppos); |
617 | ssize_t (*splice_read)(struct trace_iterator *iter, |
618 | struct file *filp, |
619 | loff_t *ppos, |
620 | struct pipe_inode_info *pipe, |
621 | size_t len, |
622 | unsigned int flags); |
623 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
624 | int (*selftest)(struct tracer *trace, |
625 | struct trace_array *tr); |
626 | #endif |
627 | void (*)(struct seq_file *m); |
628 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
629 | /* If you handled the flag setting, return 0 */ |
630 | int (*set_flag)(struct trace_array *tr, |
631 | u32 old_flags, u32 bit, int set); |
632 | /* Return 0 if OK with change, else return non-zero */ |
633 | int (*flag_changed)(struct trace_array *tr, |
634 | u32 mask, int set); |
635 | struct tracer *next; |
636 | struct tracer_flags *flags; |
637 | int enabled; |
638 | bool print_max; |
639 | bool allow_instances; |
640 | #ifdef CONFIG_TRACER_MAX_TRACE |
641 | bool use_max_tr; |
642 | #endif |
643 | /* True if tracer cannot be enabled in kernel param */ |
644 | bool noboot; |
645 | }; |
646 | |
647 | static inline struct ring_buffer_iter * |
648 | trace_buffer_iter(struct trace_iterator *iter, int cpu) |
649 | { |
650 | return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL; |
651 | } |
652 | |
653 | int tracer_init(struct tracer *t, struct trace_array *tr); |
654 | int tracing_is_enabled(void); |
655 | void tracing_reset_online_cpus(struct array_buffer *buf); |
656 | void tracing_reset_all_online_cpus(void); |
657 | void tracing_reset_all_online_cpus_unlocked(void); |
658 | int tracing_open_generic(struct inode *inode, struct file *filp); |
659 | int tracing_open_generic_tr(struct inode *inode, struct file *filp); |
660 | int tracing_release_generic_tr(struct inode *inode, struct file *file); |
661 | int tracing_open_file_tr(struct inode *inode, struct file *filp); |
662 | int tracing_release_file_tr(struct inode *inode, struct file *filp); |
663 | int tracing_single_release_file_tr(struct inode *inode, struct file *filp); |
664 | bool tracing_is_disabled(void); |
665 | bool tracer_tracing_is_on(struct trace_array *tr); |
666 | void tracer_tracing_on(struct trace_array *tr); |
667 | void tracer_tracing_off(struct trace_array *tr); |
668 | void tracer_tracing_disable(struct trace_array *tr); |
669 | void tracer_tracing_enable(struct trace_array *tr); |
670 | struct dentry *trace_create_file(const char *name, |
671 | umode_t mode, |
672 | struct dentry *parent, |
673 | void *data, |
674 | const struct file_operations *fops); |
675 | |
676 | |
677 | /** |
678 | * tracer_tracing_is_on_cpu - show real state of ring buffer enabled on for a cpu |
679 | * @tr : the trace array to know if ring buffer is enabled |
680 | * @cpu: The cpu buffer to check if enabled |
681 | * |
682 | * Shows real state of the per CPU buffer if it is enabled or not. |
683 | */ |
684 | static inline bool tracer_tracing_is_on_cpu(struct trace_array *tr, int cpu) |
685 | { |
686 | if (tr->array_buffer.buffer) |
687 | return ring_buffer_record_is_on_cpu(buffer: tr->array_buffer.buffer, cpu); |
688 | return false; |
689 | } |
690 | |
691 | int tracing_init_dentry(void); |
692 | |
693 | struct ring_buffer_event; |
694 | |
695 | struct ring_buffer_event * |
696 | trace_buffer_lock_reserve(struct trace_buffer *buffer, |
697 | int type, |
698 | unsigned long len, |
699 | unsigned int trace_ctx); |
700 | |
701 | int ring_buffer_meta_seq_init(struct file *file, struct trace_buffer *buffer, int cpu); |
702 | |
703 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
704 | struct trace_array_cpu *data); |
705 | |
706 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
707 | int *ent_cpu, u64 *ent_ts); |
708 | |
709 | void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, |
710 | struct ring_buffer_event *event); |
711 | |
712 | bool trace_is_tracepoint_string(const char *str); |
713 | const char *trace_event_format(struct trace_iterator *iter, const char *fmt); |
714 | char *trace_iter_expand_format(struct trace_iterator *iter); |
715 | bool ignore_event(struct trace_iterator *iter); |
716 | |
717 | int trace_empty(struct trace_iterator *iter); |
718 | |
719 | void *trace_find_next_entry_inc(struct trace_iterator *iter); |
720 | |
721 | void trace_init_global_iter(struct trace_iterator *iter); |
722 | |
723 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
724 | |
725 | unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); |
726 | unsigned long trace_total_entries(struct trace_array *tr); |
727 | |
728 | void trace_function(struct trace_array *tr, |
729 | unsigned long ip, |
730 | unsigned long parent_ip, |
731 | unsigned int trace_ctx, |
732 | struct ftrace_regs *regs); |
733 | void trace_graph_function(struct trace_array *tr, |
734 | unsigned long ip, |
735 | unsigned long parent_ip, |
736 | unsigned int trace_ctx); |
737 | void (struct seq_file *m); |
738 | void (struct seq_file *m); |
739 | void (struct seq_file *m, struct trace_iterator *iter); |
740 | |
741 | void trace_graph_return(struct ftrace_graph_ret *trace, struct fgraph_ops *gops, |
742 | struct ftrace_regs *fregs); |
743 | int trace_graph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops, |
744 | struct ftrace_regs *fregs); |
745 | |
746 | void tracing_start_cmdline_record(void); |
747 | void tracing_stop_cmdline_record(void); |
748 | void tracing_start_tgid_record(void); |
749 | void tracing_stop_tgid_record(void); |
750 | |
751 | int register_tracer(struct tracer *type); |
752 | int is_tracing_stopped(void); |
753 | |
754 | loff_t tracing_lseek(struct file *file, loff_t offset, int whence); |
755 | |
756 | extern cpumask_var_t __read_mostly tracing_buffer_mask; |
757 | |
758 | #define for_each_tracing_cpu(cpu) \ |
759 | for_each_cpu(cpu, tracing_buffer_mask) |
760 | |
761 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
762 | |
763 | extern unsigned long tracing_thresh; |
764 | |
765 | /* PID filtering */ |
766 | |
767 | bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, |
768 | pid_t search_pid); |
769 | bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, |
770 | struct trace_pid_list *filtered_no_pids, |
771 | struct task_struct *task); |
772 | void trace_filter_add_remove_task(struct trace_pid_list *pid_list, |
773 | struct task_struct *self, |
774 | struct task_struct *task); |
775 | void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); |
776 | void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); |
777 | int trace_pid_show(struct seq_file *m, void *v); |
778 | int trace_pid_write(struct trace_pid_list *filtered_pids, |
779 | struct trace_pid_list **new_pid_list, |
780 | const char __user *ubuf, size_t cnt); |
781 | |
782 | #ifdef CONFIG_TRACER_MAX_TRACE |
783 | void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, |
784 | void *cond_data); |
785 | void update_max_tr_single(struct trace_array *tr, |
786 | struct task_struct *tsk, int cpu); |
787 | |
788 | #ifdef CONFIG_FSNOTIFY |
789 | #define LATENCY_FS_NOTIFY |
790 | #endif |
791 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
792 | |
793 | #ifdef LATENCY_FS_NOTIFY |
794 | void latency_fsnotify(struct trace_array *tr); |
795 | #else |
796 | static inline void latency_fsnotify(struct trace_array *tr) { } |
797 | #endif |
798 | |
799 | #ifdef CONFIG_STACKTRACE |
800 | void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip); |
801 | #else |
802 | static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, |
803 | int skip) |
804 | { |
805 | } |
806 | #endif /* CONFIG_STACKTRACE */ |
807 | |
808 | void trace_last_func_repeats(struct trace_array *tr, |
809 | struct trace_func_repeats *last_info, |
810 | unsigned int trace_ctx); |
811 | |
812 | extern u64 ftrace_now(int cpu); |
813 | |
814 | extern void trace_find_cmdline(int pid, char comm[]); |
815 | extern int trace_find_tgid(int pid); |
816 | extern void trace_event_follow_fork(struct trace_array *tr, bool enable); |
817 | |
818 | extern int trace_events_enabled(struct trace_array *tr, const char *system); |
819 | |
820 | #ifdef CONFIG_DYNAMIC_FTRACE |
821 | extern unsigned long ftrace_update_tot_cnt; |
822 | extern unsigned long ftrace_number_of_pages; |
823 | extern unsigned long ftrace_number_of_groups; |
824 | extern u64 ftrace_update_time; |
825 | extern u64 ftrace_total_mod_time; |
826 | void ftrace_init_trace_array(struct trace_array *tr); |
827 | #else |
828 | static inline void ftrace_init_trace_array(struct trace_array *tr) { } |
829 | #endif |
830 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
831 | extern int DYN_FTRACE_TEST_NAME(void); |
832 | #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 |
833 | extern int DYN_FTRACE_TEST_NAME2(void); |
834 | |
835 | extern void trace_set_ring_buffer_expanded(struct trace_array *tr); |
836 | extern bool tracing_selftest_disabled; |
837 | |
838 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
839 | extern void __init disable_tracing_selftest(const char *reason); |
840 | |
841 | extern int trace_selftest_startup_function(struct tracer *trace, |
842 | struct trace_array *tr); |
843 | extern int trace_selftest_startup_function_graph(struct tracer *trace, |
844 | struct trace_array *tr); |
845 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
846 | struct trace_array *tr); |
847 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
848 | struct trace_array *tr); |
849 | extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, |
850 | struct trace_array *tr); |
851 | extern int trace_selftest_startup_wakeup(struct tracer *trace, |
852 | struct trace_array *tr); |
853 | extern int trace_selftest_startup_nop(struct tracer *trace, |
854 | struct trace_array *tr); |
855 | extern int trace_selftest_startup_branch(struct tracer *trace, |
856 | struct trace_array *tr); |
857 | /* |
858 | * Tracer data references selftest functions that only occur |
859 | * on boot up. These can be __init functions. Thus, when selftests |
860 | * are enabled, then the tracers need to reference __init functions. |
861 | */ |
862 | #define __tracer_data __refdata |
863 | #else |
864 | static inline void __init disable_tracing_selftest(const char *reason) |
865 | { |
866 | } |
867 | /* Tracers are seldom changed. Optimize when selftests are disabled. */ |
868 | #define __tracer_data __read_mostly |
869 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
870 | |
871 | extern void *head_page(struct trace_array_cpu *data); |
872 | extern unsigned long long ns2usecs(u64 nsec); |
873 | |
874 | __printf(2, 0) |
875 | int trace_vbprintk(unsigned long ip, const char *fmt, va_list args); |
876 | __printf(2, 0) |
877 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args); |
878 | __printf(3, 0) |
879 | int trace_array_vprintk(struct trace_array *tr, |
880 | unsigned long ip, const char *fmt, va_list args); |
881 | __printf(3, 4) |
882 | int trace_array_printk_buf(struct trace_buffer *buffer, |
883 | unsigned long ip, const char *fmt, ...); |
884 | void trace_printk_seq(struct trace_seq *s); |
885 | enum print_line_t print_trace_line(struct trace_iterator *iter); |
886 | |
887 | extern char trace_find_mark(unsigned long long duration); |
888 | |
889 | struct ftrace_hash; |
890 | |
891 | struct ftrace_mod_load { |
892 | struct list_head list; |
893 | char *func; |
894 | char *module; |
895 | int enable; |
896 | }; |
897 | |
898 | enum { |
899 | FTRACE_HASH_FL_MOD = (1 << 0), |
900 | }; |
901 | |
902 | struct ftrace_hash { |
903 | unsigned long size_bits; |
904 | struct hlist_head *buckets; |
905 | unsigned long count; |
906 | unsigned long flags; |
907 | struct rcu_head rcu; |
908 | }; |
909 | |
910 | struct ftrace_func_entry * |
911 | ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); |
912 | |
913 | static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) |
914 | { |
915 | return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD)); |
916 | } |
917 | |
918 | /* Standard output formatting function used for function return traces */ |
919 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
920 | |
921 | /* Flag options */ |
922 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 |
923 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
924 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
925 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
926 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
927 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
928 | #define TRACE_GRAPH_PRINT_REL_TIME 0x40 |
929 | #define TRACE_GRAPH_PRINT_IRQS 0x80 |
930 | #define TRACE_GRAPH_PRINT_TAIL 0x100 |
931 | #define TRACE_GRAPH_SLEEP_TIME 0x200 |
932 | #define TRACE_GRAPH_GRAPH_TIME 0x400 |
933 | #define TRACE_GRAPH_PRINT_RETVAL 0x800 |
934 | #define TRACE_GRAPH_PRINT_RETVAL_HEX 0x1000 |
935 | #define TRACE_GRAPH_PRINT_RETADDR 0x2000 |
936 | #define TRACE_GRAPH_ARGS 0x4000 |
937 | #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 |
938 | #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) |
939 | |
940 | extern void ftrace_graph_sleep_time_control(bool enable); |
941 | |
942 | #ifdef CONFIG_FUNCTION_PROFILER |
943 | extern void ftrace_graph_graph_time_control(bool enable); |
944 | #else |
945 | static inline void ftrace_graph_graph_time_control(bool enable) { } |
946 | #endif |
947 | |
948 | extern enum print_line_t |
949 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); |
950 | extern void (struct seq_file *s, u32 flags); |
951 | extern void |
952 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); |
953 | extern void graph_trace_open(struct trace_iterator *iter); |
954 | extern void graph_trace_close(struct trace_iterator *iter); |
955 | extern int __trace_graph_entry(struct trace_array *tr, |
956 | struct ftrace_graph_ent *trace, |
957 | unsigned int trace_ctx); |
958 | extern int __trace_graph_retaddr_entry(struct trace_array *tr, |
959 | struct ftrace_graph_ent *trace, |
960 | unsigned int trace_ctx, |
961 | unsigned long retaddr); |
962 | extern void __trace_graph_return(struct trace_array *tr, |
963 | struct ftrace_graph_ret *trace, |
964 | unsigned int trace_ctx, |
965 | u64 calltime, u64 rettime); |
966 | |
967 | extern void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); |
968 | extern int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops); |
969 | extern void free_fgraph_ops(struct trace_array *tr); |
970 | |
971 | enum { |
972 | TRACE_GRAPH_FL = 1, |
973 | |
974 | /* |
975 | * In the very unlikely case that an interrupt came in |
976 | * at a start of graph tracing, and we want to trace |
977 | * the function in that interrupt, the depth can be greater |
978 | * than zero, because of the preempted start of a previous |
979 | * trace. In an even more unlikely case, depth could be 2 |
980 | * if a softirq interrupted the start of graph tracing, |
981 | * followed by an interrupt preempting a start of graph |
982 | * tracing in the softirq, and depth can even be 3 |
983 | * if an NMI came in at the start of an interrupt function |
984 | * that preempted a softirq start of a function that |
985 | * preempted normal context!!!! Luckily, it can't be |
986 | * greater than 3, so the next two bits are a mask |
987 | * of what the depth is when we set TRACE_GRAPH_FL |
988 | */ |
989 | |
990 | TRACE_GRAPH_DEPTH_START_BIT, |
991 | TRACE_GRAPH_DEPTH_END_BIT, |
992 | |
993 | /* |
994 | * To implement set_graph_notrace, if this bit is set, we ignore |
995 | * function graph tracing of called functions, until the return |
996 | * function is called to clear it. |
997 | */ |
998 | TRACE_GRAPH_NOTRACE_BIT, |
999 | }; |
1000 | |
1001 | #define TRACE_GRAPH_NOTRACE (1 << TRACE_GRAPH_NOTRACE_BIT) |
1002 | |
1003 | static inline unsigned long ftrace_graph_depth(unsigned long *task_var) |
1004 | { |
1005 | return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3; |
1006 | } |
1007 | |
1008 | static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth) |
1009 | { |
1010 | *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT); |
1011 | *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT; |
1012 | } |
1013 | |
1014 | #ifdef CONFIG_DYNAMIC_FTRACE |
1015 | extern struct ftrace_hash __rcu *ftrace_graph_hash; |
1016 | extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; |
1017 | |
1018 | static inline int |
1019 | ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) |
1020 | { |
1021 | unsigned long addr = trace->func; |
1022 | int ret = 0; |
1023 | struct ftrace_hash *hash; |
1024 | |
1025 | preempt_disable_notrace(); |
1026 | |
1027 | /* |
1028 | * Have to open code "rcu_dereference_sched()" because the |
1029 | * function graph tracer can be called when RCU is not |
1030 | * "watching". |
1031 | * Protected with schedule_on_each_cpu(ftrace_sync) |
1032 | */ |
1033 | hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); |
1034 | |
1035 | if (ftrace_hash_empty(hash)) { |
1036 | ret = 1; |
1037 | goto out; |
1038 | } |
1039 | |
1040 | if (ftrace_lookup_ip(hash, ip: addr)) { |
1041 | /* |
1042 | * This needs to be cleared on the return functions |
1043 | * when the depth is zero. |
1044 | */ |
1045 | *task_var |= TRACE_GRAPH_FL; |
1046 | ftrace_graph_set_depth(task_var, depth: trace->depth); |
1047 | |
1048 | /* |
1049 | * If no irqs are to be traced, but a set_graph_function |
1050 | * is set, and called by an interrupt handler, we still |
1051 | * want to trace it. |
1052 | */ |
1053 | if (in_hardirq()) |
1054 | trace_recursion_set(TRACE_IRQ_BIT); |
1055 | else |
1056 | trace_recursion_clear(TRACE_IRQ_BIT); |
1057 | ret = 1; |
1058 | } |
1059 | |
1060 | out: |
1061 | preempt_enable_notrace(); |
1062 | return ret; |
1063 | } |
1064 | |
1065 | static inline void |
1066 | ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace) |
1067 | { |
1068 | unsigned long *task_var = fgraph_get_task_var(gops); |
1069 | |
1070 | if ((*task_var & TRACE_GRAPH_FL) && |
1071 | trace->depth == ftrace_graph_depth(task_var)) |
1072 | *task_var &= ~TRACE_GRAPH_FL; |
1073 | } |
1074 | |
1075 | static inline int ftrace_graph_notrace_addr(unsigned long addr) |
1076 | { |
1077 | int ret = 0; |
1078 | struct ftrace_hash *notrace_hash; |
1079 | |
1080 | preempt_disable_notrace(); |
1081 | |
1082 | /* |
1083 | * Have to open code "rcu_dereference_sched()" because the |
1084 | * function graph tracer can be called when RCU is not |
1085 | * "watching". |
1086 | * Protected with schedule_on_each_cpu(ftrace_sync) |
1087 | */ |
1088 | notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, |
1089 | !preemptible()); |
1090 | |
1091 | if (ftrace_lookup_ip(hash: notrace_hash, ip: addr)) |
1092 | ret = 1; |
1093 | |
1094 | preempt_enable_notrace(); |
1095 | return ret; |
1096 | } |
1097 | #else |
1098 | static inline int ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) |
1099 | { |
1100 | return 1; |
1101 | } |
1102 | |
1103 | static inline int ftrace_graph_notrace_addr(unsigned long addr) |
1104 | { |
1105 | return 0; |
1106 | } |
1107 | static inline void ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace) |
1108 | { } |
1109 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1110 | |
1111 | extern unsigned int fgraph_max_depth; |
1112 | extern bool fgraph_sleep_time; |
1113 | |
1114 | static inline bool |
1115 | ftrace_graph_ignore_func(struct fgraph_ops *gops, struct ftrace_graph_ent *trace) |
1116 | { |
1117 | unsigned long *task_var = fgraph_get_task_var(gops); |
1118 | |
1119 | /* trace it when it is-nested-in or is a function enabled. */ |
1120 | return !((*task_var & TRACE_GRAPH_FL) || |
1121 | ftrace_graph_addr(task_var, trace)) || |
1122 | (trace->depth < 0) || |
1123 | (fgraph_max_depth && trace->depth >= fgraph_max_depth); |
1124 | } |
1125 | |
1126 | void fgraph_init_ops(struct ftrace_ops *dst_ops, |
1127 | struct ftrace_ops *src_ops); |
1128 | |
1129 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
1130 | static inline enum print_line_t |
1131 | print_graph_function_flags(struct trace_iterator *iter, u32 flags) |
1132 | { |
1133 | return TRACE_TYPE_UNHANDLED; |
1134 | } |
1135 | static inline void free_fgraph_ops(struct trace_array *tr) { } |
1136 | /* ftrace_ops may not be defined */ |
1137 | #define init_array_fgraph_ops(tr, ops) do { } while (0) |
1138 | #define allocate_fgraph_ops(tr, ops) ({ 0; }) |
1139 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
1140 | |
1141 | extern struct list_head ftrace_pids; |
1142 | |
1143 | #ifdef CONFIG_FUNCTION_TRACER |
1144 | |
1145 | #define FTRACE_PID_IGNORE -1 |
1146 | #define FTRACE_PID_TRACE -2 |
1147 | |
1148 | struct ftrace_func_command { |
1149 | struct list_head list; |
1150 | char *name; |
1151 | int (*func)(struct trace_array *tr, |
1152 | struct ftrace_hash *hash, |
1153 | char *func, char *cmd, |
1154 | char *params, int enable); |
1155 | }; |
1156 | extern bool ftrace_filter_param __initdata; |
1157 | static inline int ftrace_trace_task(struct trace_array *tr) |
1158 | { |
1159 | return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) != |
1160 | FTRACE_PID_IGNORE; |
1161 | } |
1162 | extern int ftrace_is_dead(void); |
1163 | int ftrace_create_function_files(struct trace_array *tr, |
1164 | struct dentry *parent); |
1165 | void ftrace_destroy_function_files(struct trace_array *tr); |
1166 | int ftrace_allocate_ftrace_ops(struct trace_array *tr); |
1167 | void ftrace_free_ftrace_ops(struct trace_array *tr); |
1168 | void ftrace_init_global_array_ops(struct trace_array *tr); |
1169 | struct trace_array *trace_get_global_array(void); |
1170 | void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); |
1171 | void ftrace_reset_array_ops(struct trace_array *tr); |
1172 | void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); |
1173 | void ftrace_init_tracefs_toplevel(struct trace_array *tr, |
1174 | struct dentry *d_tracer); |
1175 | void ftrace_clear_pids(struct trace_array *tr); |
1176 | int init_function_trace(void); |
1177 | void ftrace_pid_follow_fork(struct trace_array *tr, bool enable); |
1178 | #else |
1179 | static inline int ftrace_trace_task(struct trace_array *tr) |
1180 | { |
1181 | return 1; |
1182 | } |
1183 | static inline int ftrace_is_dead(void) { return 0; } |
1184 | static inline int |
1185 | ftrace_create_function_files(struct trace_array *tr, |
1186 | struct dentry *parent) |
1187 | { |
1188 | return 0; |
1189 | } |
1190 | static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr) |
1191 | { |
1192 | return 0; |
1193 | } |
1194 | static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { } |
1195 | static inline void ftrace_destroy_function_files(struct trace_array *tr) { } |
1196 | static inline __init void |
1197 | ftrace_init_global_array_ops(struct trace_array *tr) { } |
1198 | static inline void ftrace_reset_array_ops(struct trace_array *tr) { } |
1199 | static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } |
1200 | static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } |
1201 | static inline void ftrace_clear_pids(struct trace_array *tr) { } |
1202 | static inline int init_function_trace(void) { return 0; } |
1203 | static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { } |
1204 | /* ftace_func_t type is not defined, use macro instead of static inline */ |
1205 | #define ftrace_init_array_ops(tr, func) do { } while (0) |
1206 | #endif /* CONFIG_FUNCTION_TRACER */ |
1207 | |
1208 | #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) |
1209 | |
1210 | struct ftrace_probe_ops { |
1211 | void (*func)(unsigned long ip, |
1212 | unsigned long parent_ip, |
1213 | struct trace_array *tr, |
1214 | struct ftrace_probe_ops *ops, |
1215 | void *data); |
1216 | int (*init)(struct ftrace_probe_ops *ops, |
1217 | struct trace_array *tr, |
1218 | unsigned long ip, void *init_data, |
1219 | void **data); |
1220 | void (*free)(struct ftrace_probe_ops *ops, |
1221 | struct trace_array *tr, |
1222 | unsigned long ip, void *data); |
1223 | int (*print)(struct seq_file *m, |
1224 | unsigned long ip, |
1225 | struct ftrace_probe_ops *ops, |
1226 | void *data); |
1227 | }; |
1228 | |
1229 | struct ftrace_func_mapper; |
1230 | typedef int (*ftrace_mapper_func)(void *data); |
1231 | |
1232 | struct ftrace_func_mapper *allocate_ftrace_func_mapper(void); |
1233 | void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, |
1234 | unsigned long ip); |
1235 | int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, |
1236 | unsigned long ip, void *data); |
1237 | void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, |
1238 | unsigned long ip); |
1239 | void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, |
1240 | ftrace_mapper_func free_func); |
1241 | |
1242 | extern int |
1243 | register_ftrace_function_probe(char *glob, struct trace_array *tr, |
1244 | struct ftrace_probe_ops *ops, void *data); |
1245 | extern int |
1246 | unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, |
1247 | struct ftrace_probe_ops *ops); |
1248 | extern void clear_ftrace_function_probes(struct trace_array *tr); |
1249 | |
1250 | int register_ftrace_command(struct ftrace_func_command *cmd); |
1251 | int unregister_ftrace_command(struct ftrace_func_command *cmd); |
1252 | |
1253 | void ftrace_create_filter_files(struct ftrace_ops *ops, |
1254 | struct dentry *parent); |
1255 | void ftrace_destroy_filter_files(struct ftrace_ops *ops); |
1256 | |
1257 | extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, |
1258 | int len, int reset); |
1259 | extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, |
1260 | int len, int reset); |
1261 | #else |
1262 | struct ftrace_func_command; |
1263 | |
1264 | static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) |
1265 | { |
1266 | return -EINVAL; |
1267 | } |
1268 | static inline __init int unregister_ftrace_command(char *cmd_name) |
1269 | { |
1270 | return -EINVAL; |
1271 | } |
1272 | static inline void clear_ftrace_function_probes(struct trace_array *tr) |
1273 | { |
1274 | } |
1275 | |
1276 | /* |
1277 | * The ops parameter passed in is usually undefined. |
1278 | * This must be a macro. |
1279 | */ |
1280 | #define ftrace_create_filter_files(ops, parent) do { } while (0) |
1281 | #define ftrace_destroy_filter_files(ops) do { } while (0) |
1282 | #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ |
1283 | |
1284 | bool ftrace_event_is_function(struct trace_event_call *call); |
1285 | |
1286 | /* |
1287 | * struct trace_parser - servers for reading the user input separated by spaces |
1288 | * @cont: set if the input is not complete - no final space char was found |
1289 | * @buffer: holds the parsed user input |
1290 | * @idx: user input length |
1291 | * @size: buffer size |
1292 | */ |
1293 | struct trace_parser { |
1294 | bool cont; |
1295 | char *buffer; |
1296 | unsigned idx; |
1297 | unsigned size; |
1298 | }; |
1299 | |
1300 | static inline bool trace_parser_loaded(struct trace_parser *parser) |
1301 | { |
1302 | return (parser->idx != 0); |
1303 | } |
1304 | |
1305 | static inline bool trace_parser_cont(struct trace_parser *parser) |
1306 | { |
1307 | return parser->cont; |
1308 | } |
1309 | |
1310 | static inline void trace_parser_clear(struct trace_parser *parser) |
1311 | { |
1312 | parser->cont = false; |
1313 | parser->idx = 0; |
1314 | } |
1315 | |
1316 | extern int trace_parser_get_init(struct trace_parser *parser, int size); |
1317 | extern void trace_parser_put(struct trace_parser *parser); |
1318 | extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, |
1319 | size_t cnt, loff_t *ppos); |
1320 | |
1321 | /* |
1322 | * Only create function graph options if function graph is configured. |
1323 | */ |
1324 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1325 | # define FGRAPH_FLAGS \ |
1326 | C(DISPLAY_GRAPH, "display-graph"), |
1327 | #else |
1328 | # define FGRAPH_FLAGS |
1329 | #endif |
1330 | |
1331 | #ifdef CONFIG_BRANCH_TRACER |
1332 | # define BRANCH_FLAGS \ |
1333 | C(BRANCH, "branch"), |
1334 | #else |
1335 | # define BRANCH_FLAGS |
1336 | #endif |
1337 | |
1338 | #ifdef CONFIG_FUNCTION_TRACER |
1339 | # define FUNCTION_FLAGS \ |
1340 | C(FUNCTION, "function-trace"), \ |
1341 | C(FUNC_FORK, "function-fork"), |
1342 | # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION |
1343 | #else |
1344 | # define FUNCTION_FLAGS |
1345 | # define FUNCTION_DEFAULT_FLAGS 0UL |
1346 | # define TRACE_ITER_FUNC_FORK 0UL |
1347 | #endif |
1348 | |
1349 | #ifdef CONFIG_STACKTRACE |
1350 | # define STACK_FLAGS \ |
1351 | C(STACKTRACE, "stacktrace"), |
1352 | #else |
1353 | # define STACK_FLAGS |
1354 | #endif |
1355 | |
1356 | /* |
1357 | * trace_iterator_flags is an enumeration that defines bit |
1358 | * positions into trace_flags that controls the output. |
1359 | * |
1360 | * NOTE: These bits must match the trace_options array in |
1361 | * trace.c (this macro guarantees it). |
1362 | */ |
1363 | #define TRACE_FLAGS \ |
1364 | C(PRINT_PARENT, "print-parent"), \ |
1365 | C(SYM_OFFSET, "sym-offset"), \ |
1366 | C(SYM_ADDR, "sym-addr"), \ |
1367 | C(VERBOSE, "verbose"), \ |
1368 | C(RAW, "raw"), \ |
1369 | C(HEX, "hex"), \ |
1370 | C(BIN, "bin"), \ |
1371 | C(BLOCK, "block"), \ |
1372 | C(FIELDS, "fields"), \ |
1373 | C(PRINTK, "trace_printk"), \ |
1374 | C(ANNOTATE, "annotate"), \ |
1375 | C(USERSTACKTRACE, "userstacktrace"), \ |
1376 | C(SYM_USEROBJ, "sym-userobj"), \ |
1377 | C(PRINTK_MSGONLY, "printk-msg-only"), \ |
1378 | C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ |
1379 | C(LATENCY_FMT, "latency-format"), \ |
1380 | C(RECORD_CMD, "record-cmd"), \ |
1381 | C(RECORD_TGID, "record-tgid"), \ |
1382 | C(OVERWRITE, "overwrite"), \ |
1383 | C(STOP_ON_FREE, "disable_on_free"), \ |
1384 | C(IRQ_INFO, "irq-info"), \ |
1385 | C(MARKERS, "markers"), \ |
1386 | C(EVENT_FORK, "event-fork"), \ |
1387 | C(TRACE_PRINTK, "trace_printk_dest"), \ |
1388 | C(COPY_MARKER, "copy_trace_marker"),\ |
1389 | C(PAUSE_ON_TRACE, "pause-on-trace"), \ |
1390 | C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \ |
1391 | FUNCTION_FLAGS \ |
1392 | FGRAPH_FLAGS \ |
1393 | STACK_FLAGS \ |
1394 | BRANCH_FLAGS |
1395 | |
1396 | /* |
1397 | * By defining C, we can make TRACE_FLAGS a list of bit names |
1398 | * that will define the bits for the flag masks. |
1399 | */ |
1400 | #undef C |
1401 | #define C(a, b) TRACE_ITER_##a##_BIT |
1402 | |
1403 | enum trace_iterator_bits { |
1404 | TRACE_FLAGS |
1405 | /* Make sure we don't go more than we have bits for */ |
1406 | TRACE_ITER_LAST_BIT |
1407 | }; |
1408 | |
1409 | /* |
1410 | * By redefining C, we can make TRACE_FLAGS a list of masks that |
1411 | * use the bits as defined above. |
1412 | */ |
1413 | #undef C |
1414 | #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) |
1415 | |
1416 | enum trace_iterator_flags { TRACE_FLAGS }; |
1417 | |
1418 | /* |
1419 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that |
1420 | * control the output of kernel symbols. |
1421 | */ |
1422 | #define TRACE_ITER_SYM_MASK \ |
1423 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) |
1424 | |
1425 | extern struct tracer nop_trace; |
1426 | |
1427 | #ifdef CONFIG_BRANCH_TRACER |
1428 | extern int enable_branch_tracing(struct trace_array *tr); |
1429 | extern void disable_branch_tracing(void); |
1430 | static inline int trace_branch_enable(struct trace_array *tr) |
1431 | { |
1432 | if (tr->trace_flags & TRACE_ITER_BRANCH) |
1433 | return enable_branch_tracing(tr); |
1434 | return 0; |
1435 | } |
1436 | static inline void trace_branch_disable(void) |
1437 | { |
1438 | /* due to races, always disable */ |
1439 | disable_branch_tracing(); |
1440 | } |
1441 | #else |
1442 | static inline int trace_branch_enable(struct trace_array *tr) |
1443 | { |
1444 | return 0; |
1445 | } |
1446 | static inline void trace_branch_disable(void) |
1447 | { |
1448 | } |
1449 | #endif /* CONFIG_BRANCH_TRACER */ |
1450 | |
1451 | /* set ring buffers to default size if not already done so */ |
1452 | int tracing_update_buffers(struct trace_array *tr); |
1453 | |
1454 | union trace_synth_field { |
1455 | u8 as_u8; |
1456 | u16 as_u16; |
1457 | u32 as_u32; |
1458 | u64 as_u64; |
1459 | struct trace_dynamic_info as_dynamic; |
1460 | }; |
1461 | |
1462 | struct ftrace_event_field { |
1463 | struct list_head link; |
1464 | const char *name; |
1465 | const char *type; |
1466 | int filter_type; |
1467 | int offset; |
1468 | int size; |
1469 | unsigned int is_signed:1; |
1470 | unsigned int needs_test:1; |
1471 | int len; |
1472 | }; |
1473 | |
1474 | struct prog_entry; |
1475 | |
1476 | struct event_filter { |
1477 | struct prog_entry __rcu *prog; |
1478 | char *filter_string; |
1479 | }; |
1480 | |
1481 | struct event_subsystem { |
1482 | struct list_head list; |
1483 | const char *name; |
1484 | struct event_filter *filter; |
1485 | int ref_count; |
1486 | }; |
1487 | |
1488 | struct trace_subsystem_dir { |
1489 | struct list_head list; |
1490 | struct event_subsystem *subsystem; |
1491 | struct trace_array *tr; |
1492 | struct eventfs_inode *ei; |
1493 | int ref_count; |
1494 | int nr_events; |
1495 | }; |
1496 | |
1497 | void trace_buffer_unlock_commit_regs(struct trace_array *tr, |
1498 | struct trace_buffer *buffer, |
1499 | struct ring_buffer_event *event, |
1500 | unsigned int trcace_ctx, |
1501 | struct pt_regs *regs); |
1502 | |
1503 | static inline void trace_buffer_unlock_commit(struct trace_array *tr, |
1504 | struct trace_buffer *buffer, |
1505 | struct ring_buffer_event *event, |
1506 | unsigned int trace_ctx) |
1507 | { |
1508 | trace_buffer_unlock_commit_regs(tr, buffer, event, trcace_ctx: trace_ctx, NULL); |
1509 | } |
1510 | |
1511 | DECLARE_PER_CPU(bool, trace_taskinfo_save); |
1512 | int trace_save_cmdline(struct task_struct *tsk); |
1513 | int trace_create_savedcmd(void); |
1514 | int trace_alloc_tgid_map(void); |
1515 | void trace_free_saved_cmdlines_buffer(void); |
1516 | |
1517 | extern const struct file_operations tracing_saved_cmdlines_fops; |
1518 | extern const struct file_operations tracing_saved_tgids_fops; |
1519 | extern const struct file_operations tracing_saved_cmdlines_size_fops; |
1520 | |
1521 | DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); |
1522 | DECLARE_PER_CPU(int, trace_buffered_event_cnt); |
1523 | void trace_buffered_event_disable(void); |
1524 | void trace_buffered_event_enable(void); |
1525 | |
1526 | void early_enable_events(struct trace_array *tr, char *buf, bool disable_first); |
1527 | |
1528 | static inline void |
1529 | __trace_event_discard_commit(struct trace_buffer *buffer, |
1530 | struct ring_buffer_event *event) |
1531 | { |
1532 | if (this_cpu_read(trace_buffered_event) == event) { |
1533 | /* Simply release the temp buffer and enable preemption */ |
1534 | this_cpu_dec(trace_buffered_event_cnt); |
1535 | preempt_enable_notrace(); |
1536 | return; |
1537 | } |
1538 | /* ring_buffer_discard_commit() enables preemption */ |
1539 | ring_buffer_discard_commit(buffer, event); |
1540 | } |
1541 | |
1542 | /* |
1543 | * Helper function for event_trigger_unlock_commit{_regs}(). |
1544 | * If there are event triggers attached to this event that requires |
1545 | * filtering against its fields, then they will be called as the |
1546 | * entry already holds the field information of the current event. |
1547 | * |
1548 | * It also checks if the event should be discarded or not. |
1549 | * It is to be discarded if the event is soft disabled and the |
1550 | * event was only recorded to process triggers, or if the event |
1551 | * filter is active and this event did not match the filters. |
1552 | * |
1553 | * Returns true if the event is discarded, false otherwise. |
1554 | */ |
1555 | static inline bool |
1556 | __event_trigger_test_discard(struct trace_event_file *file, |
1557 | struct trace_buffer *buffer, |
1558 | struct ring_buffer_event *event, |
1559 | void *entry, |
1560 | enum event_trigger_type *tt) |
1561 | { |
1562 | unsigned long eflags = file->flags; |
1563 | |
1564 | if (eflags & EVENT_FILE_FL_TRIGGER_COND) |
1565 | *tt = event_triggers_call(file, buffer, rec: entry, event); |
1566 | |
1567 | if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED | |
1568 | EVENT_FILE_FL_FILTERED | |
1569 | EVENT_FILE_FL_PID_FILTER)))) |
1570 | return false; |
1571 | |
1572 | if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) |
1573 | goto discard; |
1574 | |
1575 | if (file->flags & EVENT_FILE_FL_FILTERED && |
1576 | !filter_match_preds(filter: file->filter, rec: entry)) |
1577 | goto discard; |
1578 | |
1579 | if ((file->flags & EVENT_FILE_FL_PID_FILTER) && |
1580 | trace_event_ignore_this_pid(trace_file: file)) |
1581 | goto discard; |
1582 | |
1583 | return false; |
1584 | discard: |
1585 | __trace_event_discard_commit(buffer, event); |
1586 | return true; |
1587 | } |
1588 | |
1589 | /** |
1590 | * event_trigger_unlock_commit - handle triggers and finish event commit |
1591 | * @file: The file pointer associated with the event |
1592 | * @buffer: The ring buffer that the event is being written to |
1593 | * @event: The event meta data in the ring buffer |
1594 | * @entry: The event itself |
1595 | * @trace_ctx: The tracing context flags. |
1596 | * |
1597 | * This is a helper function to handle triggers that require data |
1598 | * from the event itself. It also tests the event against filters and |
1599 | * if the event is soft disabled and should be discarded. |
1600 | */ |
1601 | static inline void |
1602 | event_trigger_unlock_commit(struct trace_event_file *file, |
1603 | struct trace_buffer *buffer, |
1604 | struct ring_buffer_event *event, |
1605 | void *entry, unsigned int trace_ctx) |
1606 | { |
1607 | enum event_trigger_type tt = ETT_NONE; |
1608 | |
1609 | if (!__event_trigger_test_discard(file, buffer, event, entry, tt: &tt)) |
1610 | trace_buffer_unlock_commit(tr: file->tr, buffer, event, trace_ctx); |
1611 | |
1612 | if (tt) |
1613 | event_triggers_post_call(file, tt); |
1614 | } |
1615 | |
1616 | #define FILTER_PRED_INVALID ((unsigned short)-1) |
1617 | #define FILTER_PRED_IS_RIGHT (1 << 15) |
1618 | #define FILTER_PRED_FOLD (1 << 15) |
1619 | |
1620 | /* |
1621 | * The max preds is the size of unsigned short with |
1622 | * two flags at the MSBs. One bit is used for both the IS_RIGHT |
1623 | * and FOLD flags. The other is reserved. |
1624 | * |
1625 | * 2^14 preds is way more than enough. |
1626 | */ |
1627 | #define MAX_FILTER_PRED 16384 |
1628 | |
1629 | struct filter_pred; |
1630 | struct regex; |
1631 | |
1632 | typedef int (*regex_match_func)(char *str, struct regex *r, int len); |
1633 | |
1634 | enum regex_type { |
1635 | MATCH_FULL = 0, |
1636 | MATCH_FRONT_ONLY, |
1637 | MATCH_MIDDLE_ONLY, |
1638 | MATCH_END_ONLY, |
1639 | MATCH_GLOB, |
1640 | MATCH_INDEX, |
1641 | }; |
1642 | |
1643 | struct regex { |
1644 | char pattern[MAX_FILTER_STR_VAL]; |
1645 | int len; |
1646 | int field_len; |
1647 | regex_match_func match; |
1648 | }; |
1649 | |
1650 | static inline bool is_string_field(struct ftrace_event_field *field) |
1651 | { |
1652 | return field->filter_type == FILTER_DYN_STRING || |
1653 | field->filter_type == FILTER_RDYN_STRING || |
1654 | field->filter_type == FILTER_STATIC_STRING || |
1655 | field->filter_type == FILTER_PTR_STRING || |
1656 | field->filter_type == FILTER_COMM; |
1657 | } |
1658 | |
1659 | static inline bool is_function_field(struct ftrace_event_field *field) |
1660 | { |
1661 | return field->filter_type == FILTER_TRACE_FN; |
1662 | } |
1663 | |
1664 | extern enum regex_type |
1665 | filter_parse_regex(char *buff, int len, char **search, int *not); |
1666 | extern void print_event_filter(struct trace_event_file *file, |
1667 | struct trace_seq *s); |
1668 | extern int apply_event_filter(struct trace_event_file *file, |
1669 | char *filter_string); |
1670 | extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, |
1671 | char *filter_string); |
1672 | extern void print_subsystem_event_filter(struct event_subsystem *system, |
1673 | struct trace_seq *s); |
1674 | extern int filter_assign_type(const char *type); |
1675 | extern int create_event_filter(struct trace_array *tr, |
1676 | struct trace_event_call *call, |
1677 | char *filter_str, bool set_str, |
1678 | struct event_filter **filterp); |
1679 | extern void free_event_filter(struct event_filter *filter); |
1680 | |
1681 | struct ftrace_event_field * |
1682 | trace_find_event_field(struct trace_event_call *call, char *name); |
1683 | |
1684 | extern void trace_event_enable_cmd_record(bool enable); |
1685 | extern void trace_event_enable_tgid_record(bool enable); |
1686 | |
1687 | extern int event_trace_init(void); |
1688 | extern int init_events(void); |
1689 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
1690 | extern int event_trace_del_tracer(struct trace_array *tr); |
1691 | extern void __trace_early_add_events(struct trace_array *tr); |
1692 | |
1693 | extern struct trace_event_file *__find_event_file(struct trace_array *tr, |
1694 | const char *system, |
1695 | const char *event); |
1696 | extern struct trace_event_file *find_event_file(struct trace_array *tr, |
1697 | const char *system, |
1698 | const char *event); |
1699 | |
1700 | static inline void *event_file_data(struct file *filp) |
1701 | { |
1702 | return READ_ONCE(file_inode(filp)->i_private); |
1703 | } |
1704 | |
1705 | extern struct mutex event_mutex; |
1706 | extern struct list_head ftrace_events; |
1707 | |
1708 | /* |
1709 | * When the trace_event_file is the filp->i_private pointer, |
1710 | * it must be taken under the event_mutex lock, and then checked |
1711 | * if the EVENT_FILE_FL_FREED flag is set. If it is, then the |
1712 | * data pointed to by the trace_event_file can not be trusted. |
1713 | * |
1714 | * Use the event_file_file() to access the trace_event_file from |
1715 | * the filp the first time under the event_mutex and check for |
1716 | * NULL. If it is needed to be retrieved again and the event_mutex |
1717 | * is still held, then the event_file_data() can be used and it |
1718 | * is guaranteed to be valid. |
1719 | */ |
1720 | static inline struct trace_event_file *event_file_file(struct file *filp) |
1721 | { |
1722 | struct trace_event_file *file; |
1723 | |
1724 | lockdep_assert_held(&event_mutex); |
1725 | file = READ_ONCE(file_inode(filp)->i_private); |
1726 | if (!file || file->flags & EVENT_FILE_FL_FREED) |
1727 | return NULL; |
1728 | return file; |
1729 | } |
1730 | |
1731 | extern const struct file_operations event_trigger_fops; |
1732 | extern const struct file_operations event_hist_fops; |
1733 | extern const struct file_operations event_hist_debug_fops; |
1734 | extern const struct file_operations event_inject_fops; |
1735 | |
1736 | #ifdef CONFIG_HIST_TRIGGERS |
1737 | extern int register_trigger_hist_cmd(void); |
1738 | extern int register_trigger_hist_enable_disable_cmds(void); |
1739 | #else |
1740 | static inline int register_trigger_hist_cmd(void) { return 0; } |
1741 | static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } |
1742 | #endif |
1743 | |
1744 | extern int register_trigger_cmds(void); |
1745 | extern void clear_event_triggers(struct trace_array *tr); |
1746 | |
1747 | enum { |
1748 | EVENT_TRIGGER_FL_PROBE = BIT(0), |
1749 | }; |
1750 | |
1751 | struct event_trigger_data { |
1752 | unsigned long count; |
1753 | int ref; |
1754 | int flags; |
1755 | const struct event_trigger_ops *ops; |
1756 | struct event_command *cmd_ops; |
1757 | struct event_filter __rcu *filter; |
1758 | char *filter_str; |
1759 | void *private_data; |
1760 | bool paused; |
1761 | bool paused_tmp; |
1762 | struct list_head list; |
1763 | char *name; |
1764 | struct list_head named_list; |
1765 | struct event_trigger_data *named_data; |
1766 | }; |
1767 | |
1768 | /* Avoid typos */ |
1769 | #define ENABLE_EVENT_STR "enable_event" |
1770 | #define DISABLE_EVENT_STR "disable_event" |
1771 | #define ENABLE_HIST_STR "enable_hist" |
1772 | #define DISABLE_HIST_STR "disable_hist" |
1773 | |
1774 | struct enable_trigger_data { |
1775 | struct trace_event_file *file; |
1776 | bool enable; |
1777 | bool hist; |
1778 | }; |
1779 | |
1780 | extern int event_enable_trigger_print(struct seq_file *m, |
1781 | struct event_trigger_data *data); |
1782 | extern void event_enable_trigger_free(struct event_trigger_data *data); |
1783 | extern int event_enable_trigger_parse(struct event_command *cmd_ops, |
1784 | struct trace_event_file *file, |
1785 | char *glob, char *cmd, |
1786 | char *param_and_filter); |
1787 | extern int event_enable_register_trigger(char *glob, |
1788 | struct event_trigger_data *data, |
1789 | struct trace_event_file *file); |
1790 | extern void event_enable_unregister_trigger(char *glob, |
1791 | struct event_trigger_data *test, |
1792 | struct trace_event_file *file); |
1793 | extern struct event_trigger_data * |
1794 | trigger_data_alloc(struct event_command *cmd_ops, char *cmd, char *param, |
1795 | void *private_data); |
1796 | extern void trigger_data_free(struct event_trigger_data *data); |
1797 | extern int event_trigger_init(struct event_trigger_data *data); |
1798 | extern int trace_event_trigger_enable_disable(struct trace_event_file *file, |
1799 | int trigger_enable); |
1800 | extern void update_cond_flag(struct trace_event_file *file); |
1801 | extern int set_trigger_filter(char *filter_str, |
1802 | struct event_trigger_data *trigger_data, |
1803 | struct trace_event_file *file); |
1804 | extern struct event_trigger_data *find_named_trigger(const char *name); |
1805 | extern bool is_named_trigger(struct event_trigger_data *test); |
1806 | extern int save_named_trigger(const char *name, |
1807 | struct event_trigger_data *data); |
1808 | extern void del_named_trigger(struct event_trigger_data *data); |
1809 | extern void pause_named_trigger(struct event_trigger_data *data); |
1810 | extern void unpause_named_trigger(struct event_trigger_data *data); |
1811 | extern void set_named_trigger_data(struct event_trigger_data *data, |
1812 | struct event_trigger_data *named_data); |
1813 | extern struct event_trigger_data * |
1814 | get_named_trigger_data(struct event_trigger_data *data); |
1815 | extern int register_event_command(struct event_command *cmd); |
1816 | extern int unregister_event_command(struct event_command *cmd); |
1817 | extern int register_trigger_hist_enable_disable_cmds(void); |
1818 | extern bool event_trigger_check_remove(const char *glob); |
1819 | extern bool event_trigger_empty_param(const char *param); |
1820 | extern int event_trigger_separate_filter(char *param_and_filter, char **param, |
1821 | char **filter, bool param_required); |
1822 | extern int event_trigger_parse_num(char *trigger, |
1823 | struct event_trigger_data *trigger_data); |
1824 | extern int event_trigger_set_filter(struct event_command *cmd_ops, |
1825 | struct trace_event_file *file, |
1826 | char *param, |
1827 | struct event_trigger_data *trigger_data); |
1828 | extern void event_trigger_reset_filter(struct event_command *cmd_ops, |
1829 | struct event_trigger_data *trigger_data); |
1830 | extern int event_trigger_register(struct event_command *cmd_ops, |
1831 | struct trace_event_file *file, |
1832 | char *glob, |
1833 | struct event_trigger_data *trigger_data); |
1834 | extern void event_trigger_unregister(struct event_command *cmd_ops, |
1835 | struct trace_event_file *file, |
1836 | char *glob, |
1837 | struct event_trigger_data *trigger_data); |
1838 | |
1839 | extern void event_file_get(struct trace_event_file *file); |
1840 | extern void event_file_put(struct trace_event_file *file); |
1841 | |
1842 | /** |
1843 | * struct event_trigger_ops - callbacks for trace event triggers |
1844 | * |
1845 | * The methods in this structure provide per-event trigger hooks for |
1846 | * various trigger operations. |
1847 | * |
1848 | * The @init and @free methods are used during trigger setup and |
1849 | * teardown, typically called from an event_command's @parse() |
1850 | * function implementation. |
1851 | * |
1852 | * The @print method is used to print the trigger spec. |
1853 | * |
1854 | * The @trigger method is the function that actually implements the |
1855 | * trigger and is called in the context of the triggering event |
1856 | * whenever that event occurs. |
1857 | * |
1858 | * All the methods below, except for @init() and @free(), must be |
1859 | * implemented. |
1860 | * |
1861 | * @trigger: The trigger 'probe' function called when the triggering |
1862 | * event occurs. The data passed into this callback is the data |
1863 | * that was supplied to the event_command @reg() function that |
1864 | * registered the trigger (see struct event_command) along with |
1865 | * the trace record, rec. |
1866 | * |
1867 | * @init: An optional initialization function called for the trigger |
1868 | * when the trigger is registered (via the event_command reg() |
1869 | * function). This can be used to perform per-trigger |
1870 | * initialization such as incrementing a per-trigger reference |
1871 | * count, for instance. This is usually implemented by the |
1872 | * generic utility function @event_trigger_init() (see |
1873 | * trace_event_triggers.c). |
1874 | * |
1875 | * @free: An optional de-initialization function called for the |
1876 | * trigger when the trigger is unregistered (via the |
1877 | * event_command @reg() function). This can be used to perform |
1878 | * per-trigger de-initialization such as decrementing a |
1879 | * per-trigger reference count and freeing corresponding trigger |
1880 | * data, for instance. This is usually implemented by the |
1881 | * generic utility function @event_trigger_free() (see |
1882 | * trace_event_triggers.c). |
1883 | * |
1884 | * @print: The callback function invoked to have the trigger print |
1885 | * itself. This is usually implemented by a wrapper function |
1886 | * that calls the generic utility function @event_trigger_print() |
1887 | * (see trace_event_triggers.c). |
1888 | */ |
1889 | struct event_trigger_ops { |
1890 | void (*trigger)(struct event_trigger_data *data, |
1891 | struct trace_buffer *buffer, |
1892 | void *rec, |
1893 | struct ring_buffer_event *rbe); |
1894 | int (*init)(struct event_trigger_data *data); |
1895 | void (*free)(struct event_trigger_data *data); |
1896 | int (*print)(struct seq_file *m, |
1897 | struct event_trigger_data *data); |
1898 | }; |
1899 | |
1900 | /** |
1901 | * struct event_command - callbacks and data members for event commands |
1902 | * |
1903 | * Event commands are invoked by users by writing the command name |
1904 | * into the 'trigger' file associated with a trace event. The |
1905 | * parameters associated with a specific invocation of an event |
1906 | * command are used to create an event trigger instance, which is |
1907 | * added to the list of trigger instances associated with that trace |
1908 | * event. When the event is hit, the set of triggers associated with |
1909 | * that event is invoked. |
1910 | * |
1911 | * The data members in this structure provide per-event command data |
1912 | * for various event commands. |
1913 | * |
1914 | * All the data members below, except for @post_trigger, must be set |
1915 | * for each event command. |
1916 | * |
1917 | * @name: The unique name that identifies the event command. This is |
1918 | * the name used when setting triggers via trigger files. |
1919 | * |
1920 | * @trigger_type: A unique id that identifies the event command |
1921 | * 'type'. This value has two purposes, the first to ensure that |
1922 | * only one trigger of the same type can be set at a given time |
1923 | * for a particular event e.g. it doesn't make sense to have both |
1924 | * a traceon and traceoff trigger attached to a single event at |
1925 | * the same time, so traceon and traceoff have the same type |
1926 | * though they have different names. The @trigger_type value is |
1927 | * also used as a bit value for deferring the actual trigger |
1928 | * action until after the current event is finished. Some |
1929 | * commands need to do this if they themselves log to the trace |
1930 | * buffer (see the @post_trigger() member below). @trigger_type |
1931 | * values are defined by adding new values to the trigger_type |
1932 | * enum in include/linux/trace_events.h. |
1933 | * |
1934 | * @flags: See the enum event_command_flags below. |
1935 | * |
1936 | * All the methods below, except for @set_filter() and @unreg_all(), |
1937 | * must be implemented. |
1938 | * |
1939 | * @parse: The callback function responsible for parsing and |
1940 | * registering the trigger written to the 'trigger' file by the |
1941 | * user. It allocates the trigger instance and registers it with |
1942 | * the appropriate trace event. It makes use of the other |
1943 | * event_command callback functions to orchestrate this, and is |
1944 | * usually implemented by the generic utility function |
1945 | * @event_trigger_callback() (see trace_event_triggers.c). |
1946 | * |
1947 | * @reg: Adds the trigger to the list of triggers associated with the |
1948 | * event, and enables the event trigger itself, after |
1949 | * initializing it (via the event_trigger_ops @init() function). |
1950 | * This is also where commands can use the @trigger_type value to |
1951 | * make the decision as to whether or not multiple instances of |
1952 | * the trigger should be allowed. This is usually implemented by |
1953 | * the generic utility function @register_trigger() (see |
1954 | * trace_event_triggers.c). |
1955 | * |
1956 | * @unreg: Removes the trigger from the list of triggers associated |
1957 | * with the event, and disables the event trigger itself, after |
1958 | * initializing it (via the event_trigger_ops @free() function). |
1959 | * This is usually implemented by the generic utility function |
1960 | * @unregister_trigger() (see trace_event_triggers.c). |
1961 | * |
1962 | * @unreg_all: An optional function called to remove all the triggers |
1963 | * from the list of triggers associated with the event. Called |
1964 | * when a trigger file is opened in truncate mode. |
1965 | * |
1966 | * @set_filter: An optional function called to parse and set a filter |
1967 | * for the trigger. If no @set_filter() method is set for the |
1968 | * event command, filters set by the user for the command will be |
1969 | * ignored. This is usually implemented by the generic utility |
1970 | * function @set_trigger_filter() (see trace_event_triggers.c). |
1971 | * |
1972 | * @get_trigger_ops: The callback function invoked to retrieve the |
1973 | * event_trigger_ops implementation associated with the command. |
1974 | * This callback function allows a single event_command to |
1975 | * support multiple trigger implementations via different sets of |
1976 | * event_trigger_ops, depending on the value of the @param |
1977 | * string. |
1978 | */ |
1979 | struct event_command { |
1980 | struct list_head list; |
1981 | char *name; |
1982 | enum event_trigger_type trigger_type; |
1983 | int flags; |
1984 | int (*parse)(struct event_command *cmd_ops, |
1985 | struct trace_event_file *file, |
1986 | char *glob, char *cmd, |
1987 | char *param_and_filter); |
1988 | int (*reg)(char *glob, |
1989 | struct event_trigger_data *data, |
1990 | struct trace_event_file *file); |
1991 | void (*unreg)(char *glob, |
1992 | struct event_trigger_data *data, |
1993 | struct trace_event_file *file); |
1994 | void (*unreg_all)(struct trace_event_file *file); |
1995 | int (*set_filter)(char *filter_str, |
1996 | struct event_trigger_data *data, |
1997 | struct trace_event_file *file); |
1998 | const struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); |
1999 | }; |
2000 | |
2001 | /** |
2002 | * enum event_command_flags - flags for struct event_command |
2003 | * |
2004 | * @POST_TRIGGER: A flag that says whether or not this command needs |
2005 | * to have its action delayed until after the current event has |
2006 | * been closed. Some triggers need to avoid being invoked while |
2007 | * an event is currently in the process of being logged, since |
2008 | * the trigger may itself log data into the trace buffer. Thus |
2009 | * we make sure the current event is committed before invoking |
2010 | * those triggers. To do that, the trigger invocation is split |
2011 | * in two - the first part checks the filter using the current |
2012 | * trace record; if a command has the @post_trigger flag set, it |
2013 | * sets a bit for itself in the return value, otherwise it |
2014 | * directly invokes the trigger. Once all commands have been |
2015 | * either invoked or set their return flag, the current record is |
2016 | * either committed or discarded. At that point, if any commands |
2017 | * have deferred their triggers, those commands are finally |
2018 | * invoked following the close of the current event. In other |
2019 | * words, if the event_trigger_ops @func() probe implementation |
2020 | * itself logs to the trace buffer, this flag should be set, |
2021 | * otherwise it can be left unspecified. |
2022 | * |
2023 | * @NEEDS_REC: A flag that says whether or not this command needs |
2024 | * access to the trace record in order to perform its function, |
2025 | * regardless of whether or not it has a filter associated with |
2026 | * it (filters make a trigger require access to the trace record |
2027 | * but are not always present). |
2028 | */ |
2029 | enum event_command_flags { |
2030 | EVENT_CMD_FL_POST_TRIGGER = 1, |
2031 | EVENT_CMD_FL_NEEDS_REC = 2, |
2032 | }; |
2033 | |
2034 | static inline bool event_command_post_trigger(struct event_command *cmd_ops) |
2035 | { |
2036 | return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; |
2037 | } |
2038 | |
2039 | static inline bool event_command_needs_rec(struct event_command *cmd_ops) |
2040 | { |
2041 | return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; |
2042 | } |
2043 | |
2044 | extern int trace_event_enable_disable(struct trace_event_file *file, |
2045 | int enable, int soft_disable); |
2046 | extern int tracing_alloc_snapshot(void); |
2047 | extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); |
2048 | extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); |
2049 | |
2050 | extern int tracing_snapshot_cond_disable(struct trace_array *tr); |
2051 | extern void *tracing_cond_snapshot_data(struct trace_array *tr); |
2052 | |
2053 | extern const char *__start___trace_bprintk_fmt[]; |
2054 | extern const char *__stop___trace_bprintk_fmt[]; |
2055 | |
2056 | extern const char *__start___tracepoint_str[]; |
2057 | extern const char *__stop___tracepoint_str[]; |
2058 | |
2059 | void trace_printk_control(bool enabled); |
2060 | void trace_printk_start_comm(void); |
2061 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); |
2062 | int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); |
2063 | |
2064 | /* Used from boot time tracer */ |
2065 | extern int trace_set_options(struct trace_array *tr, char *option); |
2066 | extern int tracing_set_tracer(struct trace_array *tr, const char *buf); |
2067 | extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, |
2068 | unsigned long size, int cpu_id); |
2069 | extern int tracing_set_cpumask(struct trace_array *tr, |
2070 | cpumask_var_t tracing_cpumask_new); |
2071 | |
2072 | |
2073 | #define MAX_EVENT_NAME_LEN 64 |
2074 | |
2075 | extern ssize_t trace_parse_run_command(struct file *file, |
2076 | const char __user *buffer, size_t count, loff_t *ppos, |
2077 | int (*createfn)(const char *)); |
2078 | |
2079 | extern unsigned int err_pos(char *cmd, const char *str); |
2080 | extern void tracing_log_err(struct trace_array *tr, |
2081 | const char *loc, const char *cmd, |
2082 | const char **errs, u8 type, u16 pos); |
2083 | |
2084 | /* |
2085 | * Normal trace_printk() and friends allocates special buffers |
2086 | * to do the manipulation, as well as saves the print formats |
2087 | * into sections to display. But the trace infrastructure wants |
2088 | * to use these without the added overhead at the price of being |
2089 | * a bit slower (used mainly for warnings, where we don't care |
2090 | * about performance). The internal_trace_puts() is for such |
2091 | * a purpose. |
2092 | */ |
2093 | #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) |
2094 | |
2095 | #undef FTRACE_ENTRY |
2096 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ |
2097 | extern struct trace_event_call \ |
2098 | __aligned(4) event_##call; |
2099 | #undef FTRACE_ENTRY_DUP |
2100 | #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ |
2101 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
2102 | #undef FTRACE_ENTRY_PACKED |
2103 | #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ |
2104 | FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) |
2105 | |
2106 | #include "trace_entries.h" |
2107 | |
2108 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) |
2109 | int perf_ftrace_event_register(struct trace_event_call *call, |
2110 | enum trace_reg type, void *data); |
2111 | #else |
2112 | #define perf_ftrace_event_register NULL |
2113 | #endif |
2114 | |
2115 | #ifdef CONFIG_FTRACE_SYSCALLS |
2116 | void init_ftrace_syscalls(void); |
2117 | const char *get_syscall_name(int syscall); |
2118 | #else |
2119 | static inline void init_ftrace_syscalls(void) { } |
2120 | static inline const char *get_syscall_name(int syscall) |
2121 | { |
2122 | return NULL; |
2123 | } |
2124 | #endif |
2125 | |
2126 | #ifdef CONFIG_EVENT_TRACING |
2127 | void trace_event_init(void); |
2128 | void trace_event_eval_update(struct trace_eval_map **map, int len); |
2129 | /* Used from boot time tracer */ |
2130 | extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); |
2131 | extern int trigger_process_regex(struct trace_event_file *file, char *buff); |
2132 | #else |
2133 | static inline void __init trace_event_init(void) { } |
2134 | static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } |
2135 | #endif |
2136 | |
2137 | #ifdef CONFIG_TRACER_SNAPSHOT |
2138 | void tracing_snapshot_instance(struct trace_array *tr); |
2139 | int tracing_alloc_snapshot_instance(struct trace_array *tr); |
2140 | int tracing_arm_snapshot(struct trace_array *tr); |
2141 | void tracing_disarm_snapshot(struct trace_array *tr); |
2142 | #else |
2143 | static inline void tracing_snapshot_instance(struct trace_array *tr) { } |
2144 | static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) |
2145 | { |
2146 | return 0; |
2147 | } |
2148 | static inline int tracing_arm_snapshot(struct trace_array *tr) { return 0; } |
2149 | static inline void tracing_disarm_snapshot(struct trace_array *tr) { } |
2150 | #endif |
2151 | |
2152 | #ifdef CONFIG_PREEMPT_TRACER |
2153 | void tracer_preempt_on(unsigned long a0, unsigned long a1); |
2154 | void tracer_preempt_off(unsigned long a0, unsigned long a1); |
2155 | #else |
2156 | static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } |
2157 | static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } |
2158 | #endif |
2159 | #ifdef CONFIG_IRQSOFF_TRACER |
2160 | void tracer_hardirqs_on(unsigned long a0, unsigned long a1); |
2161 | void tracer_hardirqs_off(unsigned long a0, unsigned long a1); |
2162 | #else |
2163 | static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { } |
2164 | static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } |
2165 | #endif |
2166 | |
2167 | /* |
2168 | * Reset the state of the trace_iterator so that it can read consumed data. |
2169 | * Normally, the trace_iterator is used for reading the data when it is not |
2170 | * consumed, and must retain state. |
2171 | */ |
2172 | static __always_inline void trace_iterator_reset(struct trace_iterator *iter) |
2173 | { |
2174 | memset_startat(iter, 0, seq); |
2175 | iter->pos = -1; |
2176 | } |
2177 | |
2178 | /* Check the name is good for event/group/fields */ |
2179 | static inline bool __is_good_name(const char *name, bool hash_ok) |
2180 | { |
2181 | if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-')) |
2182 | return false; |
2183 | while (*++name != '\0') { |
2184 | if (!isalpha(*name) && !isdigit(c: *name) && *name != '_' && |
2185 | (!hash_ok || *name != '-')) |
2186 | return false; |
2187 | } |
2188 | return true; |
2189 | } |
2190 | |
2191 | /* Check the name is good for event/group/fields */ |
2192 | static inline bool is_good_name(const char *name) |
2193 | { |
2194 | return __is_good_name(name, hash_ok: false); |
2195 | } |
2196 | |
2197 | /* Check the name is good for system */ |
2198 | static inline bool is_good_system_name(const char *name) |
2199 | { |
2200 | return __is_good_name(name, hash_ok: true); |
2201 | } |
2202 | |
2203 | /* Convert certain expected symbols into '_' when generating event names */ |
2204 | static inline void sanitize_event_name(char *name) |
2205 | { |
2206 | while (*name++ != '\0') |
2207 | if (*name == ':' || *name == '.') |
2208 | *name = '_'; |
2209 | } |
2210 | |
2211 | /* |
2212 | * This is a generic way to read and write a u64 value from a file in tracefs. |
2213 | * |
2214 | * The value is stored on the variable pointed by *val. The value needs |
2215 | * to be at least *min and at most *max. The write is protected by an |
2216 | * existing *lock. |
2217 | */ |
2218 | struct trace_min_max_param { |
2219 | struct mutex *lock; |
2220 | u64 *val; |
2221 | u64 *min; |
2222 | u64 *max; |
2223 | }; |
2224 | |
2225 | #define U64_STR_SIZE 24 /* 20 digits max */ |
2226 | |
2227 | extern const struct file_operations trace_min_max_fops; |
2228 | |
2229 | #ifdef CONFIG_RV |
2230 | extern int rv_init_interface(void); |
2231 | #else |
2232 | static inline int rv_init_interface(void) |
2233 | { |
2234 | return 0; |
2235 | } |
2236 | #endif |
2237 | |
2238 | /* |
2239 | * This is used only to distinguish |
2240 | * function address from trampoline code. |
2241 | * So this value has no meaning. |
2242 | */ |
2243 | #define FTRACE_TRAMPOLINE_MARKER ((unsigned long) INT_MAX) |
2244 | |
2245 | #endif /* _LINUX_KERNEL_TRACE_H */ |
2246 | |