1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* |
3 | * Read-Copy Update mechanism for mutual exclusion, adapted for tracing. |
4 | * |
5 | * Copyright (C) 2020 Paul E. McKenney. |
6 | */ |
7 | |
8 | #ifndef __LINUX_RCUPDATE_TRACE_H |
9 | #define __LINUX_RCUPDATE_TRACE_H |
10 | |
11 | #include <linux/sched.h> |
12 | #include <linux/rcupdate.h> |
13 | |
14 | extern struct lockdep_map rcu_trace_lock_map; |
15 | |
16 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
17 | |
18 | static inline int rcu_read_lock_trace_held(void) |
19 | { |
20 | return lock_is_held(lock: &rcu_trace_lock_map); |
21 | } |
22 | |
23 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
24 | |
25 | static inline int rcu_read_lock_trace_held(void) |
26 | { |
27 | return 1; |
28 | } |
29 | |
30 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
31 | |
32 | #ifdef CONFIG_TASKS_TRACE_RCU |
33 | |
34 | void rcu_read_unlock_trace_special(struct task_struct *t); |
35 | |
36 | /** |
37 | * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section |
38 | * |
39 | * When synchronize_rcu_tasks_trace() is invoked by one task, then that |
40 | * task is guaranteed to block until all other tasks exit their read-side |
41 | * critical sections. Similarly, if call_rcu_trace() is invoked on one |
42 | * task while other tasks are within RCU read-side critical sections, |
43 | * invocation of the corresponding RCU callback is deferred until after |
44 | * the all the other tasks exit their critical sections. |
45 | * |
46 | * For more details, please see the documentation for rcu_read_lock(). |
47 | */ |
48 | static inline void rcu_read_lock_trace(void) |
49 | { |
50 | struct task_struct *t = current; |
51 | |
52 | WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); |
53 | barrier(); |
54 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && |
55 | t->trc_reader_special.b.need_mb) |
56 | smp_mb(); // Pairs with update-side barriers |
57 | rcu_lock_acquire(map: &rcu_trace_lock_map); |
58 | } |
59 | |
60 | /** |
61 | * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section |
62 | * |
63 | * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is |
64 | * allowed. Invoking a rcu_read_unlock_trace() when there is no matching |
65 | * rcu_read_lock_trace() is verboten, and will result in lockdep complaints. |
66 | * |
67 | * For more details, please see the documentation for rcu_read_unlock(). |
68 | */ |
69 | static inline void rcu_read_unlock_trace(void) |
70 | { |
71 | int nesting; |
72 | struct task_struct *t = current; |
73 | |
74 | rcu_lock_release(map: &rcu_trace_lock_map); |
75 | nesting = READ_ONCE(t->trc_reader_nesting) - 1; |
76 | barrier(); // Critical section before disabling. |
77 | // Disable IPI-based setting of .need_qs. |
78 | WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting); |
79 | if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { |
80 | WRITE_ONCE(t->trc_reader_nesting, nesting); |
81 | return; // We assume shallow reader nesting. |
82 | } |
83 | WARN_ON_ONCE(nesting != 0); |
84 | rcu_read_unlock_trace_special(t); |
85 | } |
86 | |
87 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); |
88 | void synchronize_rcu_tasks_trace(void); |
89 | void rcu_barrier_tasks_trace(void); |
90 | struct task_struct *get_rcu_tasks_trace_gp_kthread(void); |
91 | #else |
92 | /* |
93 | * The BPF JIT forms these addresses even when it doesn't call these |
94 | * functions, so provide definitions that result in runtime errors. |
95 | */ |
96 | static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); } |
97 | static inline void rcu_read_lock_trace(void) { BUG(); } |
98 | static inline void rcu_read_unlock_trace(void) { BUG(); } |
99 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
100 | |
101 | #endif /* __LINUX_RCUPDATE_TRACE_H */ |
102 | |