1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
4 | * |
5 | * Copyright IBM Corporation, 2008 |
6 | * |
7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
8 | * |
9 | * For detailed explanation of Read-Copy Update mechanism see - |
10 | * Documentation/RCU |
11 | */ |
12 | #include <linux/completion.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/notifier.h> |
15 | #include <linux/rcupdate_wait.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/export.h> |
18 | #include <linux/mutex.h> |
19 | #include <linux/sched.h> |
20 | #include <linux/types.h> |
21 | #include <linux/init.h> |
22 | #include <linux/time.h> |
23 | #include <linux/cpu.h> |
24 | #include <linux/prefetch.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/mm.h> |
27 | |
28 | #include "rcu.h" |
29 | |
30 | /* Global control variables for rcupdate callback mechanism. */ |
31 | struct rcu_ctrlblk { |
32 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ |
33 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ |
34 | struct rcu_head **curtail; /* ->next pointer of last CB. */ |
35 | unsigned long gp_seq; /* Grace-period counter. */ |
36 | }; |
37 | |
38 | /* Definition for rcupdate control block. */ |
39 | static struct rcu_ctrlblk rcu_ctrlblk = { |
40 | .donetail = &rcu_ctrlblk.rcucblist, |
41 | .curtail = &rcu_ctrlblk.rcucblist, |
42 | .gp_seq = 0 - 300UL, |
43 | }; |
44 | |
45 | void rcu_barrier(void) |
46 | { |
47 | wait_rcu_gp(call_rcu_hurry); |
48 | } |
49 | EXPORT_SYMBOL(rcu_barrier); |
50 | |
51 | /* Record an rcu quiescent state. */ |
52 | void rcu_qs(void) |
53 | { |
54 | unsigned long flags; |
55 | |
56 | local_irq_save(flags); |
57 | if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { |
58 | rcu_ctrlblk.donetail = rcu_ctrlblk.curtail; |
59 | raise_softirq_irqoff(nr: RCU_SOFTIRQ); |
60 | } |
61 | WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2); |
62 | local_irq_restore(flags); |
63 | } |
64 | |
65 | /* |
66 | * Check to see if the scheduling-clock interrupt came from an extended |
67 | * quiescent state, and, if so, tell RCU about it. This function must |
68 | * be called from hardirq context. It is normally called from the |
69 | * scheduling-clock interrupt. |
70 | */ |
71 | void rcu_sched_clock_irq(int user) |
72 | { |
73 | if (user) { |
74 | rcu_qs(); |
75 | } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { |
76 | set_tsk_need_resched(current); |
77 | set_preempt_need_resched(); |
78 | } |
79 | } |
80 | |
81 | /* |
82 | * Reclaim the specified callback, either by invoking it for non-kfree cases or |
83 | * freeing it directly (for kfree). Return true if kfreeing, false otherwise. |
84 | */ |
85 | static inline bool rcu_reclaim_tiny(struct rcu_head *head) |
86 | { |
87 | rcu_callback_t f; |
88 | |
89 | rcu_lock_acquire(map: &rcu_callback_map); |
90 | |
91 | trace_rcu_invoke_callback(rcuname: "" , rhp: head); |
92 | f = head->func; |
93 | debug_rcu_head_callback(rhp: head); |
94 | WRITE_ONCE(head->func, (rcu_callback_t)0L); |
95 | f(head); |
96 | rcu_lock_release(map: &rcu_callback_map); |
97 | return false; |
98 | } |
99 | |
100 | /* Invoke the RCU callbacks whose grace period has elapsed. */ |
101 | static __latent_entropy void rcu_process_callbacks(void) |
102 | { |
103 | struct rcu_head *next, *list; |
104 | unsigned long flags; |
105 | |
106 | /* Move the ready-to-invoke callbacks to a local list. */ |
107 | local_irq_save(flags); |
108 | if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) { |
109 | /* No callbacks ready, so just leave. */ |
110 | local_irq_restore(flags); |
111 | return; |
112 | } |
113 | list = rcu_ctrlblk.rcucblist; |
114 | rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail; |
115 | *rcu_ctrlblk.donetail = NULL; |
116 | if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail) |
117 | rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist; |
118 | rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist; |
119 | local_irq_restore(flags); |
120 | |
121 | /* Invoke the callbacks on the local list. */ |
122 | while (list) { |
123 | next = list->next; |
124 | prefetch(next); |
125 | debug_rcu_head_unqueue(head: list); |
126 | rcu_reclaim_tiny(head: list); |
127 | list = next; |
128 | } |
129 | } |
130 | |
131 | /* |
132 | * Wait for a grace period to elapse. But it is illegal to invoke |
133 | * synchronize_rcu() from within an RCU read-side critical section. |
134 | * Therefore, any legal call to synchronize_rcu() is a quiescent state, |
135 | * and so on a UP system, synchronize_rcu() need do nothing, other than |
136 | * let the polled APIs know that another grace period elapsed. |
137 | * |
138 | * (But Lai Jiangshan points out the benefits of doing might_sleep() |
139 | * to reduce latency.) |
140 | * |
141 | * Cool, huh? (Due to Josh Triplett.) |
142 | */ |
143 | void synchronize_rcu(void) |
144 | { |
145 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || |
146 | lock_is_held(&rcu_lock_map) || |
147 | lock_is_held(&rcu_sched_lock_map), |
148 | "Illegal synchronize_rcu() in RCU read-side critical section" ); |
149 | preempt_disable(); |
150 | WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2); |
151 | preempt_enable(); |
152 | } |
153 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
154 | |
155 | /* |
156 | * Post an RCU callback to be invoked after the end of an RCU grace |
157 | * period. But since we have but one CPU, that would be after any |
158 | * quiescent state. |
159 | */ |
160 | void call_rcu(struct rcu_head *head, rcu_callback_t func) |
161 | { |
162 | static atomic_t doublefrees; |
163 | unsigned long flags; |
164 | |
165 | if (debug_rcu_head_queue(head)) { |
166 | if (atomic_inc_return(v: &doublefrees) < 4) { |
167 | pr_err("%s(): Double-freed CB %p->%pS()!!! " , __func__, head, head->func); |
168 | mem_dump_obj(object: head); |
169 | } |
170 | return; |
171 | } |
172 | |
173 | head->func = func; |
174 | head->next = NULL; |
175 | |
176 | local_irq_save(flags); |
177 | *rcu_ctrlblk.curtail = head; |
178 | rcu_ctrlblk.curtail = &head->next; |
179 | local_irq_restore(flags); |
180 | |
181 | if (unlikely(is_idle_task(current))) { |
182 | /* force scheduling for rcu_qs() */ |
183 | resched_cpu(cpu: 0); |
184 | } |
185 | } |
186 | EXPORT_SYMBOL_GPL(call_rcu); |
187 | |
188 | /* |
189 | * Store a grace-period-counter "cookie". For more information, |
190 | * see the Tree RCU header comment. |
191 | */ |
192 | void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp) |
193 | { |
194 | rgosp->rgos_norm = RCU_GET_STATE_COMPLETED; |
195 | } |
196 | EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full); |
197 | |
198 | /* |
199 | * Return a grace-period-counter "cookie". For more information, |
200 | * see the Tree RCU header comment. |
201 | */ |
202 | unsigned long get_state_synchronize_rcu(void) |
203 | { |
204 | return READ_ONCE(rcu_ctrlblk.gp_seq); |
205 | } |
206 | EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); |
207 | |
208 | /* |
209 | * Return a grace-period-counter "cookie" and ensure that a future grace |
210 | * period completes. For more information, see the Tree RCU header comment. |
211 | */ |
212 | unsigned long start_poll_synchronize_rcu(void) |
213 | { |
214 | unsigned long gp_seq = get_state_synchronize_rcu(); |
215 | |
216 | if (unlikely(is_idle_task(current))) { |
217 | /* force scheduling for rcu_qs() */ |
218 | resched_cpu(cpu: 0); |
219 | } |
220 | return gp_seq; |
221 | } |
222 | EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu); |
223 | |
224 | /* |
225 | * Return true if the grace period corresponding to oldstate has completed |
226 | * and false otherwise. For more information, see the Tree RCU header |
227 | * comment. |
228 | */ |
229 | bool poll_state_synchronize_rcu(unsigned long oldstate) |
230 | { |
231 | return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate; |
232 | } |
233 | EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); |
234 | |
235 | #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) |
236 | unsigned long long rcutorture_gather_gp_seqs(void) |
237 | { |
238 | return READ_ONCE(rcu_ctrlblk.gp_seq) & 0xffffULL; |
239 | } |
240 | EXPORT_SYMBOL_GPL(rcutorture_gather_gp_seqs); |
241 | |
242 | void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len) |
243 | { |
244 | snprintf(buf: cp, size: len, fmt: "g%04llx" , seqs & 0xffffULL); |
245 | } |
246 | EXPORT_SYMBOL_GPL(rcutorture_format_gp_seqs); |
247 | #endif |
248 | |
249 | void __init rcu_init(void) |
250 | { |
251 | open_softirq(nr: RCU_SOFTIRQ, action: rcu_process_callbacks); |
252 | rcu_early_boot_tests(); |
253 | tasks_cblist_init_generic(); |
254 | } |
255 | |