1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
---|---|
2 | /* |
3 | * Task-based RCU implementations. |
4 | * |
5 | * Copyright (C) 2020 Paul E. McKenney |
6 | */ |
7 | |
8 | #ifdef CONFIG_TASKS_RCU_GENERIC |
9 | #include "rcu_segcblist.h" |
10 | |
11 | //////////////////////////////////////////////////////////////////////// |
12 | // |
13 | // Generic data structures. |
14 | |
15 | struct rcu_tasks; |
16 | typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); |
17 | typedef void (*pregp_func_t)(struct list_head *hop); |
18 | typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); |
19 | typedef void (*postscan_func_t)(struct list_head *hop); |
20 | typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); |
21 | typedef void (*postgp_func_t)(struct rcu_tasks *rtp); |
22 | |
23 | /** |
24 | * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. |
25 | * @cblist: Callback list. |
26 | * @lock: Lock protecting per-CPU callback list. |
27 | * @rtp_jiffies: Jiffies counter value for statistics. |
28 | * @lazy_timer: Timer to unlazify callbacks. |
29 | * @urgent_gp: Number of additional non-lazy grace periods. |
30 | * @rtp_n_lock_retries: Rough lock-contention statistic. |
31 | * @rtp_work: Work queue for invoking callbacks. |
32 | * @rtp_irq_work: IRQ work queue for deferred wakeups. |
33 | * @barrier_q_head: RCU callback for barrier operation. |
34 | * @rtp_blkd_tasks: List of tasks blocked as readers. |
35 | * @rtp_exit_list: List of tasks in the latter portion of do_exit(). |
36 | * @cpu: CPU number corresponding to this entry. |
37 | * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure. |
38 | * @rtpp: Pointer to the rcu_tasks structure. |
39 | */ |
40 | struct rcu_tasks_percpu { |
41 | struct rcu_segcblist cblist; |
42 | raw_spinlock_t __private lock; |
43 | unsigned long rtp_jiffies; |
44 | unsigned long rtp_n_lock_retries; |
45 | struct timer_list lazy_timer; |
46 | unsigned int urgent_gp; |
47 | struct work_struct rtp_work; |
48 | struct irq_work rtp_irq_work; |
49 | struct rcu_head barrier_q_head; |
50 | struct list_head rtp_blkd_tasks; |
51 | struct list_head rtp_exit_list; |
52 | int cpu; |
53 | int index; |
54 | struct rcu_tasks *rtpp; |
55 | }; |
56 | |
57 | /** |
58 | * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. |
59 | * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. |
60 | * @cbs_gbl_lock: Lock protecting callback list. |
61 | * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone. |
62 | * @gp_func: This flavor's grace-period-wait function. |
63 | * @gp_state: Grace period's most recent state transition (debugging). |
64 | * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. |
65 | * @init_fract: Initial backoff sleep interval. |
66 | * @gp_jiffies: Time of last @gp_state transition. |
67 | * @gp_start: Most recent grace-period start in jiffies. |
68 | * @tasks_gp_seq: Number of grace periods completed since boot in upper bits. |
69 | * @n_ipis: Number of IPIs sent to encourage grace periods to end. |
70 | * @n_ipis_fails: Number of IPI-send failures. |
71 | * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. |
72 | * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy. |
73 | * @pregp_func: This flavor's pre-grace-period function (optional). |
74 | * @pertask_func: This flavor's per-task scan function (optional). |
75 | * @postscan_func: This flavor's post-task scan function (optional). |
76 | * @holdouts_func: This flavor's holdout-list scan function (optional). |
77 | * @postgp_func: This flavor's post-grace-period function (optional). |
78 | * @call_func: This flavor's call_rcu()-equivalent function. |
79 | * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE). |
80 | * @rtpcpu: This flavor's rcu_tasks_percpu structure. |
81 | * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask. |
82 | * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. |
83 | * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. |
84 | * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. |
85 | * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. |
86 | * @barrier_q_mutex: Serialize barrier operations. |
87 | * @barrier_q_count: Number of queues being waited on. |
88 | * @barrier_q_completion: Barrier wait/wakeup mechanism. |
89 | * @barrier_q_seq: Sequence number for barrier operations. |
90 | * @barrier_q_start: Most recent barrier start in jiffies. |
91 | * @name: This flavor's textual name. |
92 | * @kname: This flavor's kthread name. |
93 | */ |
94 | struct rcu_tasks { |
95 | struct rcuwait cbs_wait; |
96 | raw_spinlock_t cbs_gbl_lock; |
97 | struct mutex tasks_gp_mutex; |
98 | int gp_state; |
99 | int gp_sleep; |
100 | int init_fract; |
101 | unsigned long gp_jiffies; |
102 | unsigned long gp_start; |
103 | unsigned long tasks_gp_seq; |
104 | unsigned long n_ipis; |
105 | unsigned long n_ipis_fails; |
106 | struct task_struct *kthread_ptr; |
107 | unsigned long lazy_jiffies; |
108 | rcu_tasks_gp_func_t gp_func; |
109 | pregp_func_t pregp_func; |
110 | pertask_func_t pertask_func; |
111 | postscan_func_t postscan_func; |
112 | holdouts_func_t holdouts_func; |
113 | postgp_func_t postgp_func; |
114 | call_rcu_func_t call_func; |
115 | unsigned int wait_state; |
116 | struct rcu_tasks_percpu __percpu *rtpcpu; |
117 | struct rcu_tasks_percpu **rtpcp_array; |
118 | int percpu_enqueue_shift; |
119 | int percpu_enqueue_lim; |
120 | int percpu_dequeue_lim; |
121 | unsigned long percpu_dequeue_gpseq; |
122 | struct mutex barrier_q_mutex; |
123 | atomic_t barrier_q_count; |
124 | struct completion barrier_q_completion; |
125 | unsigned long barrier_q_seq; |
126 | unsigned long barrier_q_start; |
127 | char *name; |
128 | char *kname; |
129 | }; |
130 | |
131 | static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp); |
132 | |
133 | #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ |
134 | static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ |
135 | .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ |
136 | .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \ |
137 | }; \ |
138 | static struct rcu_tasks rt_name = \ |
139 | { \ |
140 | .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \ |
141 | .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ |
142 | .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \ |
143 | .gp_func = gp, \ |
144 | .call_func = call, \ |
145 | .wait_state = TASK_UNINTERRUPTIBLE, \ |
146 | .rtpcpu = &rt_name ## __percpu, \ |
147 | .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \ |
148 | .name = n, \ |
149 | .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \ |
150 | .percpu_enqueue_lim = 1, \ |
151 | .percpu_dequeue_lim = 1, \ |
152 | .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ |
153 | .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ |
154 | .kname = #rt_name, \ |
155 | } |
156 | |
157 | #ifdef CONFIG_TASKS_RCU |
158 | |
159 | /* Report delay of scan exiting tasklist in rcu_tasks_postscan(). */ |
160 | static void tasks_rcu_exit_srcu_stall(struct timer_list *unused); |
161 | static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall); |
162 | #endif |
163 | |
164 | /* Avoid IPIing CPUs early in the grace period. */ |
165 | #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) |
166 | static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; |
167 | module_param(rcu_task_ipi_delay, int, 0644); |
168 | |
169 | /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ |
170 | #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30) |
171 | #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) |
172 | static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; |
173 | module_param(rcu_task_stall_timeout, int, 0644); |
174 | #define RCU_TASK_STALL_INFO (HZ * 10) |
175 | static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO; |
176 | module_param(rcu_task_stall_info, int, 0644); |
177 | static int rcu_task_stall_info_mult __read_mostly = 3; |
178 | module_param(rcu_task_stall_info_mult, int, 0444); |
179 | |
180 | static int rcu_task_enqueue_lim __read_mostly = -1; |
181 | module_param(rcu_task_enqueue_lim, int, 0444); |
182 | |
183 | static bool rcu_task_cb_adjust; |
184 | static int rcu_task_contend_lim __read_mostly = 100; |
185 | module_param(rcu_task_contend_lim, int, 0444); |
186 | static int rcu_task_collapse_lim __read_mostly = 10; |
187 | module_param(rcu_task_collapse_lim, int, 0444); |
188 | static int rcu_task_lazy_lim __read_mostly = 32; |
189 | module_param(rcu_task_lazy_lim, int, 0444); |
190 | |
191 | static int rcu_task_cpu_ids; |
192 | |
193 | /* RCU tasks grace-period state for debugging. */ |
194 | #define RTGS_INIT 0 |
195 | #define RTGS_WAIT_WAIT_CBS 1 |
196 | #define RTGS_WAIT_GP 2 |
197 | #define RTGS_PRE_WAIT_GP 3 |
198 | #define RTGS_SCAN_TASKLIST 4 |
199 | #define RTGS_POST_SCAN_TASKLIST 5 |
200 | #define RTGS_WAIT_SCAN_HOLDOUTS 6 |
201 | #define RTGS_SCAN_HOLDOUTS 7 |
202 | #define RTGS_POST_GP 8 |
203 | #define RTGS_WAIT_READERS 9 |
204 | #define RTGS_INVOKE_CBS 10 |
205 | #define RTGS_WAIT_CBS 11 |
206 | #ifndef CONFIG_TINY_RCU |
207 | static const char * const rcu_tasks_gp_state_names[] = { |
208 | "RTGS_INIT", |
209 | "RTGS_WAIT_WAIT_CBS", |
210 | "RTGS_WAIT_GP", |
211 | "RTGS_PRE_WAIT_GP", |
212 | "RTGS_SCAN_TASKLIST", |
213 | "RTGS_POST_SCAN_TASKLIST", |
214 | "RTGS_WAIT_SCAN_HOLDOUTS", |
215 | "RTGS_SCAN_HOLDOUTS", |
216 | "RTGS_POST_GP", |
217 | "RTGS_WAIT_READERS", |
218 | "RTGS_INVOKE_CBS", |
219 | "RTGS_WAIT_CBS", |
220 | }; |
221 | #endif /* #ifndef CONFIG_TINY_RCU */ |
222 | |
223 | //////////////////////////////////////////////////////////////////////// |
224 | // |
225 | // Generic code. |
226 | |
227 | static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp); |
228 | |
229 | /* Record grace-period phase and time. */ |
230 | static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) |
231 | { |
232 | rtp->gp_state = newstate; |
233 | rtp->gp_jiffies = jiffies; |
234 | } |
235 | |
236 | #ifndef CONFIG_TINY_RCU |
237 | /* Return state name. */ |
238 | static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) |
239 | { |
240 | int i = data_race(rtp->gp_state); // Let KCSAN detect update races |
241 | int j = READ_ONCE(i); // Prevent the compiler from reading twice |
242 | |
243 | if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) |
244 | return "???"; |
245 | return rcu_tasks_gp_state_names[j]; |
246 | } |
247 | #endif /* #ifndef CONFIG_TINY_RCU */ |
248 | |
249 | // Initialize per-CPU callback lists for the specified flavor of |
250 | // Tasks RCU. Do not enqueue callbacks before this function is invoked. |
251 | static void cblist_init_generic(struct rcu_tasks *rtp) |
252 | { |
253 | int cpu; |
254 | int lim; |
255 | int shift; |
256 | int maxcpu; |
257 | int index = 0; |
258 | |
259 | if (rcu_task_enqueue_lim < 0) { |
260 | rcu_task_enqueue_lim = 1; |
261 | rcu_task_cb_adjust = true; |
262 | } else if (rcu_task_enqueue_lim == 0) { |
263 | rcu_task_enqueue_lim = 1; |
264 | } |
265 | lim = rcu_task_enqueue_lim; |
266 | |
267 | rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL); |
268 | BUG_ON(!rtp->rtpcp_array); |
269 | |
270 | for_each_possible_cpu(cpu) { |
271 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
272 | |
273 | WARN_ON_ONCE(!rtpcp); |
274 | if (cpu) |
275 | raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); |
276 | if (rcu_segcblist_empty(rsclp: &rtpcp->cblist)) |
277 | rcu_segcblist_init(rsclp: &rtpcp->cblist); |
278 | INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); |
279 | rtpcp->cpu = cpu; |
280 | rtpcp->rtpp = rtp; |
281 | rtpcp->index = index; |
282 | rtp->rtpcp_array[index] = rtpcp; |
283 | index++; |
284 | if (!rtpcp->rtp_blkd_tasks.next) |
285 | INIT_LIST_HEAD(list: &rtpcp->rtp_blkd_tasks); |
286 | if (!rtpcp->rtp_exit_list.next) |
287 | INIT_LIST_HEAD(list: &rtpcp->rtp_exit_list); |
288 | rtpcp->barrier_q_head.next = &rtpcp->barrier_q_head; |
289 | maxcpu = cpu; |
290 | } |
291 | |
292 | rcu_task_cpu_ids = maxcpu + 1; |
293 | if (lim > rcu_task_cpu_ids) |
294 | lim = rcu_task_cpu_ids; |
295 | shift = ilog2(rcu_task_cpu_ids / lim); |
296 | if (((rcu_task_cpu_ids - 1) >> shift) >= lim) |
297 | shift++; |
298 | WRITE_ONCE(rtp->percpu_enqueue_shift, shift); |
299 | WRITE_ONCE(rtp->percpu_dequeue_lim, lim); |
300 | smp_store_release(&rtp->percpu_enqueue_lim, lim); |
301 | |
302 | pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n", |
303 | rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), |
304 | rcu_task_cb_adjust, rcu_task_cpu_ids); |
305 | } |
306 | |
307 | // Compute wakeup time for lazy callback timer. |
308 | static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp) |
309 | { |
310 | return jiffies + rtp->lazy_jiffies; |
311 | } |
312 | |
313 | // Timer handler that unlazifies lazy callbacks. |
314 | static void call_rcu_tasks_generic_timer(struct timer_list *tlp) |
315 | { |
316 | unsigned long flags; |
317 | bool needwake = false; |
318 | struct rcu_tasks *rtp; |
319 | struct rcu_tasks_percpu *rtpcp = timer_container_of(rtpcp, tlp, |
320 | lazy_timer); |
321 | |
322 | rtp = rtpcp->rtpp; |
323 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
324 | if (!rcu_segcblist_empty(rsclp: &rtpcp->cblist) && rtp->lazy_jiffies) { |
325 | if (!rtpcp->urgent_gp) |
326 | rtpcp->urgent_gp = 1; |
327 | needwake = true; |
328 | mod_timer(timer: &rtpcp->lazy_timer, expires: rcu_tasks_lazy_time(rtp)); |
329 | } |
330 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
331 | if (needwake) |
332 | rcuwait_wake_up(w: &rtp->cbs_wait); |
333 | } |
334 | |
335 | // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). |
336 | static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) |
337 | { |
338 | struct rcu_tasks *rtp; |
339 | struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); |
340 | |
341 | rtp = rtpcp->rtpp; |
342 | rcuwait_wake_up(w: &rtp->cbs_wait); |
343 | } |
344 | |
345 | // Enqueue a callback for the specified flavor of Tasks RCU. |
346 | static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, |
347 | struct rcu_tasks *rtp) |
348 | { |
349 | int chosen_cpu; |
350 | unsigned long flags; |
351 | bool havekthread = smp_load_acquire(&rtp->kthread_ptr); |
352 | int ideal_cpu; |
353 | unsigned long j; |
354 | bool needadjust = false; |
355 | bool needwake; |
356 | struct rcu_tasks_percpu *rtpcp; |
357 | |
358 | rhp->next = NULL; |
359 | rhp->func = func; |
360 | local_irq_save(flags); |
361 | rcu_read_lock(); |
362 | ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); |
363 | chosen_cpu = cpumask_next(n: ideal_cpu - 1, cpu_possible_mask); |
364 | WARN_ON_ONCE(chosen_cpu >= rcu_task_cpu_ids); |
365 | rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); |
366 | if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. |
367 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. |
368 | j = jiffies; |
369 | if (rtpcp->rtp_jiffies != j) { |
370 | rtpcp->rtp_jiffies = j; |
371 | rtpcp->rtp_n_lock_retries = 0; |
372 | } |
373 | if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && |
374 | READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids) |
375 | needadjust = true; // Defer adjustment to avoid deadlock. |
376 | } |
377 | // Queuing callbacks before initialization not yet supported. |
378 | if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist))) |
379 | rcu_segcblist_init(rsclp: &rtpcp->cblist); |
380 | needwake = (func == wakeme_after_rcu) || |
381 | (rcu_segcblist_n_cbs(rsclp: &rtpcp->cblist) == rcu_task_lazy_lim); |
382 | if (havekthread && !needwake && !timer_pending(timer: &rtpcp->lazy_timer)) { |
383 | if (rtp->lazy_jiffies) |
384 | mod_timer(timer: &rtpcp->lazy_timer, expires: rcu_tasks_lazy_time(rtp)); |
385 | else |
386 | needwake = rcu_segcblist_empty(rsclp: &rtpcp->cblist); |
387 | } |
388 | if (needwake) |
389 | rtpcp->urgent_gp = 3; |
390 | rcu_segcblist_enqueue(rsclp: &rtpcp->cblist, rhp); |
391 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
392 | if (unlikely(needadjust)) { |
393 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
394 | if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) { |
395 | WRITE_ONCE(rtp->percpu_enqueue_shift, 0); |
396 | WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids); |
397 | smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids); |
398 | pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name); |
399 | } |
400 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
401 | } |
402 | rcu_read_unlock(); |
403 | /* We can't create the thread unless interrupts are enabled. */ |
404 | if (needwake && READ_ONCE(rtp->kthread_ptr)) |
405 | irq_work_queue(work: &rtpcp->rtp_irq_work); |
406 | } |
407 | |
408 | // RCU callback function for rcu_barrier_tasks_generic(). |
409 | static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) |
410 | { |
411 | struct rcu_tasks *rtp; |
412 | struct rcu_tasks_percpu *rtpcp; |
413 | |
414 | rhp->next = rhp; // Mark the callback as having been invoked. |
415 | rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); |
416 | rtp = rtpcp->rtpp; |
417 | if (atomic_dec_and_test(v: &rtp->barrier_q_count)) |
418 | complete(&rtp->barrier_q_completion); |
419 | } |
420 | |
421 | // Wait for all in-flight callbacks for the specified RCU Tasks flavor. |
422 | // Operates in a manner similar to rcu_barrier(). |
423 | static void __maybe_unused rcu_barrier_tasks_generic(struct rcu_tasks *rtp) |
424 | { |
425 | int cpu; |
426 | unsigned long flags; |
427 | struct rcu_tasks_percpu *rtpcp; |
428 | unsigned long s = rcu_seq_snap(sp: &rtp->barrier_q_seq); |
429 | |
430 | mutex_lock(&rtp->barrier_q_mutex); |
431 | if (rcu_seq_done(sp: &rtp->barrier_q_seq, s)) { |
432 | smp_mb(); |
433 | mutex_unlock(lock: &rtp->barrier_q_mutex); |
434 | return; |
435 | } |
436 | rtp->barrier_q_start = jiffies; |
437 | rcu_seq_start(sp: &rtp->barrier_q_seq); |
438 | init_completion(x: &rtp->barrier_q_completion); |
439 | atomic_set(v: &rtp->barrier_q_count, i: 2); |
440 | for_each_possible_cpu(cpu) { |
441 | if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) |
442 | break; |
443 | rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
444 | rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; |
445 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
446 | if (rcu_segcblist_entrain(rsclp: &rtpcp->cblist, rhp: &rtpcp->barrier_q_head)) |
447 | atomic_inc(v: &rtp->barrier_q_count); |
448 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
449 | } |
450 | if (atomic_sub_and_test(i: 2, v: &rtp->barrier_q_count)) |
451 | complete(&rtp->barrier_q_completion); |
452 | wait_for_completion(&rtp->barrier_q_completion); |
453 | rcu_seq_end(sp: &rtp->barrier_q_seq); |
454 | mutex_unlock(lock: &rtp->barrier_q_mutex); |
455 | } |
456 | |
457 | // Advance callbacks and indicate whether either a grace period or |
458 | // callback invocation is needed. |
459 | static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) |
460 | { |
461 | int cpu; |
462 | int dequeue_limit; |
463 | unsigned long flags; |
464 | bool gpdone = poll_state_synchronize_rcu(oldstate: rtp->percpu_dequeue_gpseq); |
465 | long n; |
466 | long ncbs = 0; |
467 | long ncbsnz = 0; |
468 | int needgpcb = 0; |
469 | |
470 | dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim); |
471 | for (cpu = 0; cpu < dequeue_limit; cpu++) { |
472 | if (!cpu_possible(cpu)) |
473 | continue; |
474 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
475 | |
476 | /* Advance and accelerate any new callbacks. */ |
477 | if (!rcu_segcblist_n_cbs(rsclp: &rtpcp->cblist)) |
478 | continue; |
479 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
480 | // Should we shrink down to a single callback queue? |
481 | n = rcu_segcblist_n_cbs(rsclp: &rtpcp->cblist); |
482 | if (n) { |
483 | ncbs += n; |
484 | if (cpu > 0) |
485 | ncbsnz += n; |
486 | } |
487 | rcu_segcblist_advance(rsclp: &rtpcp->cblist, seq: rcu_seq_current(sp: &rtp->tasks_gp_seq)); |
488 | (void)rcu_segcblist_accelerate(rsclp: &rtpcp->cblist, seq: rcu_seq_snap(sp: &rtp->tasks_gp_seq)); |
489 | if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(rsclp: &rtpcp->cblist)) { |
490 | if (rtp->lazy_jiffies) |
491 | rtpcp->urgent_gp--; |
492 | needgpcb |= 0x3; |
493 | } else if (rcu_segcblist_empty(rsclp: &rtpcp->cblist)) { |
494 | rtpcp->urgent_gp = 0; |
495 | } |
496 | if (rcu_segcblist_ready_cbs(rsclp: &rtpcp->cblist)) |
497 | needgpcb |= 0x1; |
498 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
499 | } |
500 | |
501 | // Shrink down to a single callback queue if appropriate. |
502 | // This is done in two stages: (1) If there are no more than |
503 | // rcu_task_collapse_lim callbacks on CPU 0 and none on any other |
504 | // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, |
505 | // if there has not been an increase in callbacks, limit dequeuing |
506 | // to CPU 0. Note the matching RCU read-side critical section in |
507 | // call_rcu_tasks_generic(). |
508 | if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { |
509 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
510 | if (rtp->percpu_enqueue_lim > 1) { |
511 | WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids)); |
512 | smp_store_release(&rtp->percpu_enqueue_lim, 1); |
513 | rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); |
514 | gpdone = false; |
515 | pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name); |
516 | } |
517 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
518 | } |
519 | if (rcu_task_cb_adjust && !ncbsnz && gpdone) { |
520 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
521 | if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { |
522 | WRITE_ONCE(rtp->percpu_dequeue_lim, 1); |
523 | pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name); |
524 | } |
525 | if (rtp->percpu_dequeue_lim == 1) { |
526 | for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) { |
527 | if (!cpu_possible(cpu)) |
528 | continue; |
529 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
530 | |
531 | WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist)); |
532 | } |
533 | } |
534 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
535 | } |
536 | |
537 | return needgpcb; |
538 | } |
539 | |
540 | // Advance callbacks and invoke any that are ready. |
541 | static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) |
542 | { |
543 | int cpuwq; |
544 | unsigned long flags; |
545 | int len; |
546 | int index; |
547 | struct rcu_head *rhp; |
548 | struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); |
549 | struct rcu_tasks_percpu *rtpcp_next; |
550 | |
551 | index = rtpcp->index * 2 + 1; |
552 | if (index < num_possible_cpus()) { |
553 | rtpcp_next = rtp->rtpcp_array[index]; |
554 | if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) { |
555 | cpuwq = rcu_cpu_beenfullyonline(cpu: rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND; |
556 | queue_work_on(cpu: cpuwq, wq: system_wq, work: &rtpcp_next->rtp_work); |
557 | index++; |
558 | if (index < num_possible_cpus()) { |
559 | rtpcp_next = rtp->rtpcp_array[index]; |
560 | if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) { |
561 | cpuwq = rcu_cpu_beenfullyonline(cpu: rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND; |
562 | queue_work_on(cpu: cpuwq, wq: system_wq, work: &rtpcp_next->rtp_work); |
563 | } |
564 | } |
565 | } |
566 | } |
567 | |
568 | if (rcu_segcblist_empty(rsclp: &rtpcp->cblist)) |
569 | return; |
570 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
571 | rcu_segcblist_advance(rsclp: &rtpcp->cblist, seq: rcu_seq_current(sp: &rtp->tasks_gp_seq)); |
572 | rcu_segcblist_extract_done_cbs(rsclp: &rtpcp->cblist, rclp: &rcl); |
573 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
574 | len = rcl.len; |
575 | for (rhp = rcu_cblist_dequeue(rclp: &rcl); rhp; rhp = rcu_cblist_dequeue(rclp: &rcl)) { |
576 | debug_rcu_head_callback(rhp); |
577 | local_bh_disable(); |
578 | rhp->func(rhp); |
579 | local_bh_enable(); |
580 | cond_resched(); |
581 | } |
582 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
583 | rcu_segcblist_add_len(rsclp: &rtpcp->cblist, v: -len); |
584 | (void)rcu_segcblist_accelerate(rsclp: &rtpcp->cblist, seq: rcu_seq_snap(sp: &rtp->tasks_gp_seq)); |
585 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
586 | } |
587 | |
588 | // Workqueue flood to advance callbacks and invoke any that are ready. |
589 | static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp) |
590 | { |
591 | struct rcu_tasks *rtp; |
592 | struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work); |
593 | |
594 | rtp = rtpcp->rtpp; |
595 | rcu_tasks_invoke_cbs(rtp, rtpcp); |
596 | } |
597 | |
598 | // Wait for one grace period. |
599 | static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) |
600 | { |
601 | int needgpcb; |
602 | |
603 | mutex_lock(&rtp->tasks_gp_mutex); |
604 | |
605 | // If there were none, wait a bit and start over. |
606 | if (unlikely(midboot)) { |
607 | needgpcb = 0x2; |
608 | } else { |
609 | mutex_unlock(lock: &rtp->tasks_gp_mutex); |
610 | set_tasks_gp_state(rtp, RTGS_WAIT_CBS); |
611 | rcuwait_wait_event(&rtp->cbs_wait, |
612 | (needgpcb = rcu_tasks_need_gpcb(rtp)), |
613 | TASK_IDLE); |
614 | mutex_lock(&rtp->tasks_gp_mutex); |
615 | } |
616 | |
617 | if (needgpcb & 0x2) { |
618 | // Wait for one grace period. |
619 | set_tasks_gp_state(rtp, RTGS_WAIT_GP); |
620 | rtp->gp_start = jiffies; |
621 | rcu_seq_start(sp: &rtp->tasks_gp_seq); |
622 | rtp->gp_func(rtp); |
623 | rcu_seq_end(sp: &rtp->tasks_gp_seq); |
624 | } |
625 | |
626 | // Invoke callbacks. |
627 | set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); |
628 | rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); |
629 | mutex_unlock(lock: &rtp->tasks_gp_mutex); |
630 | } |
631 | |
632 | // RCU-tasks kthread that detects grace periods and invokes callbacks. |
633 | static int __noreturn rcu_tasks_kthread(void *arg) |
634 | { |
635 | int cpu; |
636 | struct rcu_tasks *rtp = arg; |
637 | |
638 | for_each_possible_cpu(cpu) { |
639 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
640 | |
641 | timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0); |
642 | rtpcp->urgent_gp = 1; |
643 | } |
644 | |
645 | /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ |
646 | housekeeping_affine(current, type: HK_TYPE_RCU); |
647 | smp_store_release(&rtp->kthread_ptr, current); // Let GPs start! |
648 | |
649 | /* |
650 | * Each pass through the following loop makes one check for |
651 | * newly arrived callbacks, and, if there are some, waits for |
652 | * one RCU-tasks grace period and then invokes the callbacks. |
653 | * This loop is terminated by the system going down. ;-) |
654 | */ |
655 | for (;;) { |
656 | // Wait for one grace period and invoke any callbacks |
657 | // that are ready. |
658 | rcu_tasks_one_gp(rtp, midboot: false); |
659 | |
660 | // Paranoid sleep to keep this from entering a tight loop. |
661 | schedule_timeout_idle(timeout: rtp->gp_sleep); |
662 | } |
663 | } |
664 | |
665 | // Wait for a grace period for the specified flavor of Tasks RCU. |
666 | static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) |
667 | { |
668 | /* Complain if the scheduler has not started. */ |
669 | if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, |
670 | "synchronize_%s() called too soon", rtp->name)) |
671 | return; |
672 | |
673 | // If the grace-period kthread is running, use it. |
674 | if (READ_ONCE(rtp->kthread_ptr)) { |
675 | wait_rcu_gp_state(rtp->wait_state, rtp->call_func); |
676 | return; |
677 | } |
678 | rcu_tasks_one_gp(rtp, midboot: true); |
679 | } |
680 | |
681 | /* Spawn RCU-tasks grace-period kthread. */ |
682 | static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) |
683 | { |
684 | struct task_struct *t; |
685 | |
686 | t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); |
687 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name)) |
688 | return; |
689 | smp_mb(); /* Ensure others see full kthread. */ |
690 | } |
691 | |
692 | #ifndef CONFIG_TINY_RCU |
693 | |
694 | /* |
695 | * Print any non-default Tasks RCU settings. |
696 | */ |
697 | static void __init rcu_tasks_bootup_oddness(void) |
698 | { |
699 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
700 | int rtsimc; |
701 | |
702 | if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) |
703 | pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); |
704 | rtsimc = clamp(rcu_task_stall_info_mult, 1, 10); |
705 | if (rtsimc != rcu_task_stall_info_mult) { |
706 | pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc); |
707 | rcu_task_stall_info_mult = rtsimc; |
708 | } |
709 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
710 | #ifdef CONFIG_TASKS_RCU |
711 | pr_info("\tTrampoline variant of Tasks RCU enabled.\n"); |
712 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
713 | #ifdef CONFIG_TASKS_RUDE_RCU |
714 | pr_info("\tRude variant of Tasks RCU enabled.\n"); |
715 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ |
716 | #ifdef CONFIG_TASKS_TRACE_RCU |
717 | pr_info("\tTracing variant of Tasks RCU enabled.\n"); |
718 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
719 | } |
720 | |
721 | |
722 | /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ |
723 | static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) |
724 | { |
725 | int cpu; |
726 | bool havecbs = false; |
727 | bool haveurgent = false; |
728 | bool haveurgentcbs = false; |
729 | |
730 | for_each_possible_cpu(cpu) { |
731 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
732 | |
733 | if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) |
734 | havecbs = true; |
735 | if (data_race(rtpcp->urgent_gp)) |
736 | haveurgent = true; |
737 | if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp)) |
738 | haveurgentcbs = true; |
739 | if (havecbs && haveurgent && haveurgentcbs) |
740 | break; |
741 | } |
742 | pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n", |
743 | rtp->kname, |
744 | tasks_gp_state_getname(rtp), data_race(rtp->gp_state), |
745 | jiffies - data_race(rtp->gp_jiffies), |
746 | data_race(rcu_seq_current(&rtp->tasks_gp_seq)), |
747 | data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), |
748 | ".k"[!!data_race(rtp->kthread_ptr)], |
749 | ".C"[havecbs], |
750 | ".u"[haveurgent], |
751 | ".U"[haveurgentcbs], |
752 | rtp->lazy_jiffies, |
753 | s); |
754 | } |
755 | |
756 | /* Dump out more rcutorture-relevant state common to all RCU-tasks flavors. */ |
757 | static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *tt, |
758 | char *tf, char *tst) |
759 | { |
760 | cpumask_var_t cm; |
761 | int cpu; |
762 | bool gotcb = false; |
763 | unsigned long j = jiffies; |
764 | |
765 | pr_alert("%s%s Tasks%s RCU g%ld gp_start %lu gp_jiffies %lu gp_state %d (%s).\n", |
766 | tt, tf, tst, data_race(rtp->tasks_gp_seq), |
767 | j - data_race(rtp->gp_start), j - data_race(rtp->gp_jiffies), |
768 | data_race(rtp->gp_state), tasks_gp_state_getname(rtp)); |
769 | pr_alert("\tEnqueue shift %d limit %d Dequeue limit %d gpseq %lu.\n", |
770 | data_race(rtp->percpu_enqueue_shift), |
771 | data_race(rtp->percpu_enqueue_lim), |
772 | data_race(rtp->percpu_dequeue_lim), |
773 | data_race(rtp->percpu_dequeue_gpseq)); |
774 | (void)zalloc_cpumask_var(mask: &cm, GFP_KERNEL); |
775 | pr_alert("\tCallback counts:"); |
776 | for_each_possible_cpu(cpu) { |
777 | long n; |
778 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
779 | |
780 | if (cpumask_available(mask: cm) && !rcu_barrier_cb_is_done(rhp: &rtpcp->barrier_q_head)) |
781 | cpumask_set_cpu(cpu, dstp: cm); |
782 | n = rcu_segcblist_n_cbs(rsclp: &rtpcp->cblist); |
783 | if (!n) |
784 | continue; |
785 | pr_cont(" %d:%ld", cpu, n); |
786 | gotcb = true; |
787 | } |
788 | if (gotcb) |
789 | pr_cont(".\n"); |
790 | else |
791 | pr_cont(" (none).\n"); |
792 | pr_alert("\tBarrier seq %lu start %lu count %d holdout CPUs ", |
793 | data_race(rtp->barrier_q_seq), j - data_race(rtp->barrier_q_start), |
794 | atomic_read(&rtp->barrier_q_count)); |
795 | if (cpumask_available(mask: cm) && !cpumask_empty(srcp: cm)) |
796 | pr_cont(" %*pbl.\n", cpumask_pr_args(cm)); |
797 | else |
798 | pr_cont("(none).\n"); |
799 | free_cpumask_var(mask: cm); |
800 | } |
801 | |
802 | #endif // #ifndef CONFIG_TINY_RCU |
803 | |
804 | static void exit_tasks_rcu_finish_trace(struct task_struct *t); |
805 | |
806 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
807 | |
808 | //////////////////////////////////////////////////////////////////////// |
809 | // |
810 | // Shared code between task-list-scanning variants of Tasks RCU. |
811 | |
812 | /* Wait for one RCU-tasks grace period. */ |
813 | static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) |
814 | { |
815 | struct task_struct *g; |
816 | int fract; |
817 | LIST_HEAD(holdouts); |
818 | unsigned long j; |
819 | unsigned long lastinfo; |
820 | unsigned long lastreport; |
821 | bool reported = false; |
822 | int rtsi; |
823 | struct task_struct *t; |
824 | |
825 | set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); |
826 | rtp->pregp_func(&holdouts); |
827 | |
828 | /* |
829 | * There were callbacks, so we need to wait for an RCU-tasks |
830 | * grace period. Start off by scanning the task list for tasks |
831 | * that are not already voluntarily blocked. Mark these tasks |
832 | * and make a list of them in holdouts. |
833 | */ |
834 | set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); |
835 | if (rtp->pertask_func) { |
836 | rcu_read_lock(); |
837 | for_each_process_thread(g, t) |
838 | rtp->pertask_func(t, &holdouts); |
839 | rcu_read_unlock(); |
840 | } |
841 | |
842 | set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); |
843 | rtp->postscan_func(&holdouts); |
844 | |
845 | /* |
846 | * Each pass through the following loop scans the list of holdout |
847 | * tasks, removing any that are no longer holdouts. When the list |
848 | * is empty, we are done. |
849 | */ |
850 | lastreport = jiffies; |
851 | lastinfo = lastreport; |
852 | rtsi = READ_ONCE(rcu_task_stall_info); |
853 | |
854 | // Start off with initial wait and slowly back off to 1 HZ wait. |
855 | fract = rtp->init_fract; |
856 | |
857 | while (!list_empty(head: &holdouts)) { |
858 | ktime_t exp; |
859 | bool firstreport; |
860 | bool needreport; |
861 | int rtst; |
862 | |
863 | // Slowly back off waiting for holdouts |
864 | set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); |
865 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { |
866 | schedule_timeout_idle(timeout: fract); |
867 | } else { |
868 | exp = jiffies_to_nsecs(j: fract); |
869 | __set_current_state(TASK_IDLE); |
870 | schedule_hrtimeout_range(expires: &exp, delta: jiffies_to_nsecs(HZ / 2), mode: HRTIMER_MODE_REL_HARD); |
871 | } |
872 | |
873 | if (fract < HZ) |
874 | fract++; |
875 | |
876 | rtst = READ_ONCE(rcu_task_stall_timeout); |
877 | needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); |
878 | if (needreport) { |
879 | lastreport = jiffies; |
880 | reported = true; |
881 | } |
882 | firstreport = true; |
883 | WARN_ON(signal_pending(current)); |
884 | set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); |
885 | rtp->holdouts_func(&holdouts, needreport, &firstreport); |
886 | |
887 | // Print pre-stall informational messages if needed. |
888 | j = jiffies; |
889 | if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) { |
890 | lastinfo = j; |
891 | rtsi = rtsi * rcu_task_stall_info_mult; |
892 | pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n", |
893 | __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); |
894 | } |
895 | } |
896 | |
897 | set_tasks_gp_state(rtp, RTGS_POST_GP); |
898 | rtp->postgp_func(rtp); |
899 | } |
900 | |
901 | #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ |
902 | |
903 | #ifdef CONFIG_TASKS_RCU |
904 | |
905 | //////////////////////////////////////////////////////////////////////// |
906 | // |
907 | // Simple variant of RCU whose quiescent states are voluntary context |
908 | // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. |
909 | // As such, grace periods can take one good long time. There are no |
910 | // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() |
911 | // because this implementation is intended to get the system into a safe |
912 | // state for some of the manipulations involved in tracing and the like. |
913 | // Finally, this implementation does not support high call_rcu_tasks() |
914 | // rates from multiple CPUs. If this is required, per-CPU callback lists |
915 | // will be needed. |
916 | // |
917 | // The implementation uses rcu_tasks_wait_gp(), which relies on function |
918 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() |
919 | // function sets these function pointers up so that rcu_tasks_wait_gp() |
920 | // invokes these functions in this order: |
921 | // |
922 | // rcu_tasks_pregp_step(): |
923 | // Invokes synchronize_rcu() in order to wait for all in-flight |
924 | // t->on_rq and t->nvcsw transitions to complete. This works because |
925 | // all such transitions are carried out with interrupts disabled. |
926 | // rcu_tasks_pertask(), invoked on every non-idle task: |
927 | // For every runnable non-idle task other than the current one, use |
928 | // get_task_struct() to pin down that task, snapshot that task's |
929 | // number of voluntary context switches, and add that task to the |
930 | // holdout list. |
931 | // rcu_tasks_postscan(): |
932 | // Gather per-CPU lists of tasks in do_exit() to ensure that all |
933 | // tasks that were in the process of exiting (and which thus might |
934 | // not know to synchronize with this RCU Tasks grace period) have |
935 | // completed exiting. The synchronize_rcu() in rcu_tasks_postgp() |
936 | // will take care of any tasks stuck in the non-preemptible region |
937 | // of do_exit() following its call to exit_tasks_rcu_finish(). |
938 | // check_all_holdout_tasks(), repeatedly until holdout list is empty: |
939 | // Scans the holdout list, attempting to identify a quiescent state |
940 | // for each task on the list. If there is a quiescent state, the |
941 | // corresponding task is removed from the holdout list. |
942 | // rcu_tasks_postgp(): |
943 | // Invokes synchronize_rcu() in order to ensure that all prior |
944 | // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks |
945 | // to have happened before the end of this RCU Tasks grace period. |
946 | // Again, this works because all such transitions are carried out |
947 | // with interrupts disabled. |
948 | // |
949 | // For each exiting task, the exit_tasks_rcu_start() and |
950 | // exit_tasks_rcu_finish() functions add and remove, respectively, the |
951 | // current task to a per-CPU list of tasks that rcu_tasks_postscan() must |
952 | // wait on. This is necessary because rcu_tasks_postscan() must wait on |
953 | // tasks that have already been removed from the global list of tasks. |
954 | // |
955 | // Pre-grace-period update-side code is ordered before the grace |
956 | // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code |
957 | // is ordered before the grace period via synchronize_rcu() call in |
958 | // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt |
959 | // disabling. |
960 | |
961 | /* Pre-grace-period preparation. */ |
962 | static void rcu_tasks_pregp_step(struct list_head *hop) |
963 | { |
964 | /* |
965 | * Wait for all pre-existing t->on_rq and t->nvcsw transitions |
966 | * to complete. Invoking synchronize_rcu() suffices because all |
967 | * these transitions occur with interrupts disabled. Without this |
968 | * synchronize_rcu(), a read-side critical section that started |
969 | * before the grace period might be incorrectly seen as having |
970 | * started after the grace period. |
971 | * |
972 | * This synchronize_rcu() also dispenses with the need for a |
973 | * memory barrier on the first store to t->rcu_tasks_holdout, |
974 | * as it forces the store to happen after the beginning of the |
975 | * grace period. |
976 | */ |
977 | synchronize_rcu(); |
978 | } |
979 | |
980 | /* Check for quiescent states since the pregp's synchronize_rcu() */ |
981 | static bool rcu_tasks_is_holdout(struct task_struct *t) |
982 | { |
983 | int cpu; |
984 | |
985 | /* Has the task been seen voluntarily sleeping? */ |
986 | if (!READ_ONCE(t->on_rq)) |
987 | return false; |
988 | |
989 | /* |
990 | * t->on_rq && !t->se.sched_delayed *could* be considered sleeping but |
991 | * since it is a spurious state (it will transition into the |
992 | * traditional blocked state or get woken up without outside |
993 | * dependencies), not considering it such should only affect timing. |
994 | * |
995 | * Be conservative for now and not include it. |
996 | */ |
997 | |
998 | /* |
999 | * Idle tasks (or idle injection) within the idle loop are RCU-tasks |
1000 | * quiescent states. But CPU boot code performed by the idle task |
1001 | * isn't a quiescent state. |
1002 | */ |
1003 | if (is_idle_task(p: t)) |
1004 | return false; |
1005 | |
1006 | cpu = task_cpu(p: t); |
1007 | |
1008 | /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */ |
1009 | if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) |
1010 | return false; |
1011 | |
1012 | return true; |
1013 | } |
1014 | |
1015 | /* Per-task initial processing. */ |
1016 | static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) |
1017 | { |
1018 | if (t != current && rcu_tasks_is_holdout(t)) { |
1019 | get_task_struct(t); |
1020 | t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); |
1021 | WRITE_ONCE(t->rcu_tasks_holdout, true); |
1022 | list_add(new: &t->rcu_tasks_holdout_list, head: hop); |
1023 | } |
1024 | } |
1025 | |
1026 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); |
1027 | DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks"); |
1028 | |
1029 | /* Processing between scanning taskslist and draining the holdout list. */ |
1030 | static void rcu_tasks_postscan(struct list_head *hop) |
1031 | { |
1032 | int cpu; |
1033 | int rtsi = READ_ONCE(rcu_task_stall_info); |
1034 | |
1035 | if (!IS_ENABLED(CONFIG_TINY_RCU)) { |
1036 | tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; |
1037 | add_timer(timer: &tasks_rcu_exit_srcu_stall_timer); |
1038 | } |
1039 | |
1040 | /* |
1041 | * Exiting tasks may escape the tasklist scan. Those are vulnerable |
1042 | * until their final schedule() with TASK_DEAD state. To cope with |
1043 | * this, divide the fragile exit path part in two intersecting |
1044 | * read side critical sections: |
1045 | * |
1046 | * 1) A task_struct list addition before calling exit_notify(), |
1047 | * which may remove the task from the tasklist, with the |
1048 | * removal after the final preempt_disable() call in do_exit(). |
1049 | * |
1050 | * 2) An _RCU_ read side starting with the final preempt_disable() |
1051 | * call in do_exit() and ending with the final call to schedule() |
1052 | * with TASK_DEAD state. |
1053 | * |
1054 | * This handles the part 1). And postgp will handle part 2) with a |
1055 | * call to synchronize_rcu(). |
1056 | */ |
1057 | |
1058 | for_each_possible_cpu(cpu) { |
1059 | unsigned long j = jiffies + 1; |
1060 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu); |
1061 | struct task_struct *t; |
1062 | struct task_struct *t1; |
1063 | struct list_head tmp; |
1064 | |
1065 | raw_spin_lock_irq_rcu_node(rtpcp); |
1066 | list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) { |
1067 | if (list_empty(head: &t->rcu_tasks_holdout_list)) |
1068 | rcu_tasks_pertask(t, hop); |
1069 | |
1070 | // RT kernels need frequent pauses, otherwise |
1071 | // pause at least once per pair of jiffies. |
1072 | if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j)) |
1073 | continue; |
1074 | |
1075 | // Keep our place in the list while pausing. |
1076 | // Nothing else traverses this list, so adding a |
1077 | // bare list_head is OK. |
1078 | list_add(new: &tmp, head: &t->rcu_tasks_exit_list); |
1079 | raw_spin_unlock_irq_rcu_node(rtpcp); |
1080 | cond_resched(); // For CONFIG_PREEMPT=n kernels |
1081 | raw_spin_lock_irq_rcu_node(rtpcp); |
1082 | t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list); |
1083 | list_del(entry: &tmp); |
1084 | j = jiffies + 1; |
1085 | } |
1086 | raw_spin_unlock_irq_rcu_node(rtpcp); |
1087 | } |
1088 | |
1089 | if (!IS_ENABLED(CONFIG_TINY_RCU)) |
1090 | timer_delete_sync(timer: &tasks_rcu_exit_srcu_stall_timer); |
1091 | } |
1092 | |
1093 | /* See if tasks are still holding out, complain if so. */ |
1094 | static void check_holdout_task(struct task_struct *t, |
1095 | bool needreport, bool *firstreport) |
1096 | { |
1097 | int cpu; |
1098 | |
1099 | if (!READ_ONCE(t->rcu_tasks_holdout) || |
1100 | t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || |
1101 | !rcu_tasks_is_holdout(t) || |
1102 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && |
1103 | !is_idle_task(p: t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) { |
1104 | WRITE_ONCE(t->rcu_tasks_holdout, false); |
1105 | list_del_init(entry: &t->rcu_tasks_holdout_list); |
1106 | put_task_struct(t); |
1107 | return; |
1108 | } |
1109 | rcu_request_urgent_qs_task(t); |
1110 | if (!needreport) |
1111 | return; |
1112 | if (*firstreport) { |
1113 | pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); |
1114 | *firstreport = false; |
1115 | } |
1116 | cpu = task_cpu(p: t); |
1117 | pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", |
1118 | t, ".I"[is_idle_task(t)], |
1119 | "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], |
1120 | t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, |
1121 | data_race(t->rcu_tasks_idle_cpu), cpu); |
1122 | sched_show_task(p: t); |
1123 | } |
1124 | |
1125 | /* Scan the holdout lists for tasks no longer holding out. */ |
1126 | static void check_all_holdout_tasks(struct list_head *hop, |
1127 | bool needreport, bool *firstreport) |
1128 | { |
1129 | struct task_struct *t, *t1; |
1130 | |
1131 | list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { |
1132 | check_holdout_task(t, needreport, firstreport); |
1133 | cond_resched(); |
1134 | } |
1135 | } |
1136 | |
1137 | /* Finish off the Tasks-RCU grace period. */ |
1138 | static void rcu_tasks_postgp(struct rcu_tasks *rtp) |
1139 | { |
1140 | /* |
1141 | * Because ->on_rq and ->nvcsw are not guaranteed to have a full |
1142 | * memory barriers prior to them in the schedule() path, memory |
1143 | * reordering on other CPUs could cause their RCU-tasks read-side |
1144 | * critical sections to extend past the end of the grace period. |
1145 | * However, because these ->nvcsw updates are carried out with |
1146 | * interrupts disabled, we can use synchronize_rcu() to force the |
1147 | * needed ordering on all such CPUs. |
1148 | * |
1149 | * This synchronize_rcu() also confines all ->rcu_tasks_holdout |
1150 | * accesses to be within the grace period, avoiding the need for |
1151 | * memory barriers for ->rcu_tasks_holdout accesses. |
1152 | * |
1153 | * In addition, this synchronize_rcu() waits for exiting tasks |
1154 | * to complete their final preempt_disable() region of execution, |
1155 | * enforcing the whole region before tasklist removal until |
1156 | * the final schedule() with TASK_DEAD state to be an RCU TASKS |
1157 | * read side critical section. |
1158 | */ |
1159 | synchronize_rcu(); |
1160 | } |
1161 | |
1162 | static void tasks_rcu_exit_srcu_stall(struct timer_list *unused) |
1163 | { |
1164 | #ifndef CONFIG_TINY_RCU |
1165 | int rtsi; |
1166 | |
1167 | rtsi = READ_ONCE(rcu_task_stall_info); |
1168 | pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n", |
1169 | __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq, |
1170 | tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies); |
1171 | pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n"); |
1172 | tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; |
1173 | add_timer(timer: &tasks_rcu_exit_srcu_stall_timer); |
1174 | #endif // #ifndef CONFIG_TINY_RCU |
1175 | } |
1176 | |
1177 | /** |
1178 | * call_rcu_tasks() - Queue an RCU for invocation task-based grace period |
1179 | * @rhp: structure to be used for queueing the RCU updates. |
1180 | * @func: actual callback function to be invoked after the grace period |
1181 | * |
1182 | * The callback function will be invoked some time after a full grace |
1183 | * period elapses, in other words after all currently executing RCU |
1184 | * read-side critical sections have completed. call_rcu_tasks() assumes |
1185 | * that the read-side critical sections end at a voluntary context |
1186 | * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, |
1187 | * or transition to usermode execution. As such, there are no read-side |
1188 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because |
1189 | * this primitive is intended to determine that all tasks have passed |
1190 | * through a safe state, not so much for data-structure synchronization. |
1191 | * |
1192 | * See the description of call_rcu() for more detailed information on |
1193 | * memory ordering guarantees. |
1194 | */ |
1195 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) |
1196 | { |
1197 | call_rcu_tasks_generic(rhp, func, rtp: &rcu_tasks); |
1198 | } |
1199 | EXPORT_SYMBOL_GPL(call_rcu_tasks); |
1200 | |
1201 | /** |
1202 | * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. |
1203 | * |
1204 | * Control will return to the caller some time after a full rcu-tasks |
1205 | * grace period has elapsed, in other words after all currently |
1206 | * executing rcu-tasks read-side critical sections have elapsed. These |
1207 | * read-side critical sections are delimited by calls to schedule(), |
1208 | * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls |
1209 | * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). |
1210 | * |
1211 | * This is a very specialized primitive, intended only for a few uses in |
1212 | * tracing and other situations requiring manipulation of function |
1213 | * preambles and profiling hooks. The synchronize_rcu_tasks() function |
1214 | * is not (yet) intended for heavy use from multiple CPUs. |
1215 | * |
1216 | * See the description of synchronize_rcu() for more detailed information |
1217 | * on memory ordering guarantees. |
1218 | */ |
1219 | void synchronize_rcu_tasks(void) |
1220 | { |
1221 | synchronize_rcu_tasks_generic(rtp: &rcu_tasks); |
1222 | } |
1223 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); |
1224 | |
1225 | /** |
1226 | * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. |
1227 | * |
1228 | * Although the current implementation is guaranteed to wait, it is not |
1229 | * obligated to, for example, if there are no pending callbacks. |
1230 | */ |
1231 | void rcu_barrier_tasks(void) |
1232 | { |
1233 | rcu_barrier_tasks_generic(rtp: &rcu_tasks); |
1234 | } |
1235 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks); |
1236 | |
1237 | static int rcu_tasks_lazy_ms = -1; |
1238 | module_param(rcu_tasks_lazy_ms, int, 0444); |
1239 | |
1240 | static int __init rcu_spawn_tasks_kthread(void) |
1241 | { |
1242 | rcu_tasks.gp_sleep = HZ / 10; |
1243 | rcu_tasks.init_fract = HZ / 10; |
1244 | if (rcu_tasks_lazy_ms >= 0) |
1245 | rcu_tasks.lazy_jiffies = msecs_to_jiffies(m: rcu_tasks_lazy_ms); |
1246 | rcu_tasks.pregp_func = rcu_tasks_pregp_step; |
1247 | rcu_tasks.pertask_func = rcu_tasks_pertask; |
1248 | rcu_tasks.postscan_func = rcu_tasks_postscan; |
1249 | rcu_tasks.holdouts_func = check_all_holdout_tasks; |
1250 | rcu_tasks.postgp_func = rcu_tasks_postgp; |
1251 | rcu_tasks.wait_state = TASK_IDLE; |
1252 | rcu_spawn_tasks_kthread_generic(rtp: &rcu_tasks); |
1253 | return 0; |
1254 | } |
1255 | |
1256 | #if !defined(CONFIG_TINY_RCU) |
1257 | void show_rcu_tasks_classic_gp_kthread(void) |
1258 | { |
1259 | show_rcu_tasks_generic_gp_kthread(rtp: &rcu_tasks, s: ""); |
1260 | } |
1261 | EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); |
1262 | |
1263 | void rcu_tasks_torture_stats_print(char *tt, char *tf) |
1264 | { |
1265 | rcu_tasks_torture_stats_print_generic(rtp: &rcu_tasks, tt, tf, tst: ""); |
1266 | } |
1267 | EXPORT_SYMBOL_GPL(rcu_tasks_torture_stats_print); |
1268 | #endif // !defined(CONFIG_TINY_RCU) |
1269 | |
1270 | struct task_struct *get_rcu_tasks_gp_kthread(void) |
1271 | { |
1272 | return rcu_tasks.kthread_ptr; |
1273 | } |
1274 | EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread); |
1275 | |
1276 | void rcu_tasks_get_gp_data(int *flags, unsigned long *gp_seq) |
1277 | { |
1278 | *flags = 0; |
1279 | *gp_seq = rcu_seq_current(sp: &rcu_tasks.tasks_gp_seq); |
1280 | } |
1281 | EXPORT_SYMBOL_GPL(rcu_tasks_get_gp_data); |
1282 | |
1283 | /* |
1284 | * Protect against tasklist scan blind spot while the task is exiting and |
1285 | * may be removed from the tasklist. Do this by adding the task to yet |
1286 | * another list. |
1287 | * |
1288 | * Note that the task will remove itself from this list, so there is no |
1289 | * need for get_task_struct(), except in the case where rcu_tasks_pertask() |
1290 | * adds it to the holdout list, in which case rcu_tasks_pertask() supplies |
1291 | * the needed get_task_struct(). |
1292 | */ |
1293 | void exit_tasks_rcu_start(void) |
1294 | { |
1295 | unsigned long flags; |
1296 | struct rcu_tasks_percpu *rtpcp; |
1297 | struct task_struct *t = current; |
1298 | |
1299 | WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list)); |
1300 | preempt_disable(); |
1301 | rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu); |
1302 | t->rcu_tasks_exit_cpu = smp_processor_id(); |
1303 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
1304 | WARN_ON_ONCE(!rtpcp->rtp_exit_list.next); |
1305 | list_add(new: &t->rcu_tasks_exit_list, head: &rtpcp->rtp_exit_list); |
1306 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1307 | preempt_enable(); |
1308 | } |
1309 | |
1310 | /* |
1311 | * Remove the task from the "yet another list" because do_exit() is now |
1312 | * non-preemptible, allowing synchronize_rcu() to wait beyond this point. |
1313 | */ |
1314 | void exit_tasks_rcu_finish(void) |
1315 | { |
1316 | unsigned long flags; |
1317 | struct rcu_tasks_percpu *rtpcp; |
1318 | struct task_struct *t = current; |
1319 | |
1320 | WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list)); |
1321 | rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu); |
1322 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
1323 | list_del_init(entry: &t->rcu_tasks_exit_list); |
1324 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1325 | |
1326 | exit_tasks_rcu_finish_trace(t); |
1327 | } |
1328 | |
1329 | #else /* #ifdef CONFIG_TASKS_RCU */ |
1330 | void exit_tasks_rcu_start(void) { } |
1331 | void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } |
1332 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ |
1333 | |
1334 | #ifdef CONFIG_TASKS_RUDE_RCU |
1335 | |
1336 | //////////////////////////////////////////////////////////////////////// |
1337 | // |
1338 | // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's |
1339 | // trick of passing an empty function to schedule_on_each_cpu(). |
1340 | // This approach provides batching of concurrent calls to the synchronous |
1341 | // synchronize_rcu_tasks_rude() API. This invokes schedule_on_each_cpu() |
1342 | // in order to send IPIs far and wide and induces otherwise unnecessary |
1343 | // context switches on all online CPUs, whether idle or not. |
1344 | // |
1345 | // Callback handling is provided by the rcu_tasks_kthread() function. |
1346 | // |
1347 | // Ordering is provided by the scheduler's context-switch code. |
1348 | |
1349 | // Empty function to allow workqueues to force a context switch. |
1350 | static void rcu_tasks_be_rude(struct work_struct *work) |
1351 | { |
1352 | } |
1353 | |
1354 | // Wait for one rude RCU-tasks grace period. |
1355 | static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) |
1356 | { |
1357 | rtp->n_ipis += cpumask_weight(cpu_online_mask); |
1358 | schedule_on_each_cpu(func: rcu_tasks_be_rude); |
1359 | } |
1360 | |
1361 | static void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); |
1362 | DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, |
1363 | "RCU Tasks Rude"); |
1364 | |
1365 | /* |
1366 | * call_rcu_tasks_rude() - Queue a callback rude task-based grace period |
1367 | * @rhp: structure to be used for queueing the RCU updates. |
1368 | * @func: actual callback function to be invoked after the grace period |
1369 | * |
1370 | * The callback function will be invoked some time after a full grace |
1371 | * period elapses, in other words after all currently executing RCU |
1372 | * read-side critical sections have completed. call_rcu_tasks_rude() |
1373 | * assumes that the read-side critical sections end at context switch, |
1374 | * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as |
1375 | * usermode execution is schedulable). As such, there are no read-side |
1376 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because |
1377 | * this primitive is intended to determine that all tasks have passed |
1378 | * through a safe state, not so much for data-structure synchronization. |
1379 | * |
1380 | * See the description of call_rcu() for more detailed information on |
1381 | * memory ordering guarantees. |
1382 | * |
1383 | * This is no longer exported, and is instead reserved for use by |
1384 | * synchronize_rcu_tasks_rude(). |
1385 | */ |
1386 | static void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) |
1387 | { |
1388 | call_rcu_tasks_generic(rhp, func, rtp: &rcu_tasks_rude); |
1389 | } |
1390 | |
1391 | /** |
1392 | * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period |
1393 | * |
1394 | * Control will return to the caller some time after a rude rcu-tasks |
1395 | * grace period has elapsed, in other words after all currently |
1396 | * executing rcu-tasks read-side critical sections have elapsed. These |
1397 | * read-side critical sections are delimited by calls to schedule(), |
1398 | * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable |
1399 | * context), and (in theory, anyway) cond_resched(). |
1400 | * |
1401 | * This is a very specialized primitive, intended only for a few uses in |
1402 | * tracing and other situations requiring manipulation of function preambles |
1403 | * and profiling hooks. The synchronize_rcu_tasks_rude() function is not |
1404 | * (yet) intended for heavy use from multiple CPUs. |
1405 | * |
1406 | * See the description of synchronize_rcu() for more detailed information |
1407 | * on memory ordering guarantees. |
1408 | */ |
1409 | void synchronize_rcu_tasks_rude(void) |
1410 | { |
1411 | if (!IS_ENABLED(CONFIG_ARCH_WANTS_NO_INSTR) || IS_ENABLED(CONFIG_FORCE_TASKS_RUDE_RCU)) |
1412 | synchronize_rcu_tasks_generic(rtp: &rcu_tasks_rude); |
1413 | } |
1414 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); |
1415 | |
1416 | static int __init rcu_spawn_tasks_rude_kthread(void) |
1417 | { |
1418 | rcu_tasks_rude.gp_sleep = HZ / 10; |
1419 | rcu_spawn_tasks_kthread_generic(rtp: &rcu_tasks_rude); |
1420 | return 0; |
1421 | } |
1422 | |
1423 | #if !defined(CONFIG_TINY_RCU) |
1424 | void show_rcu_tasks_rude_gp_kthread(void) |
1425 | { |
1426 | show_rcu_tasks_generic_gp_kthread(rtp: &rcu_tasks_rude, s: ""); |
1427 | } |
1428 | EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); |
1429 | |
1430 | void rcu_tasks_rude_torture_stats_print(char *tt, char *tf) |
1431 | { |
1432 | rcu_tasks_torture_stats_print_generic(rtp: &rcu_tasks_rude, tt, tf, tst: ""); |
1433 | } |
1434 | EXPORT_SYMBOL_GPL(rcu_tasks_rude_torture_stats_print); |
1435 | #endif // !defined(CONFIG_TINY_RCU) |
1436 | |
1437 | struct task_struct *get_rcu_tasks_rude_gp_kthread(void) |
1438 | { |
1439 | return rcu_tasks_rude.kthread_ptr; |
1440 | } |
1441 | EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread); |
1442 | |
1443 | void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq) |
1444 | { |
1445 | *flags = 0; |
1446 | *gp_seq = rcu_seq_current(sp: &rcu_tasks_rude.tasks_gp_seq); |
1447 | } |
1448 | EXPORT_SYMBOL_GPL(rcu_tasks_rude_get_gp_data); |
1449 | |
1450 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ |
1451 | |
1452 | //////////////////////////////////////////////////////////////////////// |
1453 | // |
1454 | // Tracing variant of Tasks RCU. This variant is designed to be used |
1455 | // to protect tracing hooks, including those of BPF. This variant |
1456 | // therefore: |
1457 | // |
1458 | // 1. Has explicit read-side markers to allow finite grace periods |
1459 | // in the face of in-kernel loops for PREEMPT=n builds. |
1460 | // |
1461 | // 2. Protects code in the idle loop, exception entry/exit, and |
1462 | // CPU-hotplug code paths, similar to the capabilities of SRCU. |
1463 | // |
1464 | // 3. Avoids expensive read-side instructions, having overhead similar |
1465 | // to that of Preemptible RCU. |
1466 | // |
1467 | // There are of course downsides. For example, the grace-period code |
1468 | // can send IPIs to CPUs, even when those CPUs are in the idle loop or |
1469 | // in nohz_full userspace. If needed, these downsides can be at least |
1470 | // partially remedied. |
1471 | // |
1472 | // Perhaps most important, this variant of RCU does not affect the vanilla |
1473 | // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace |
1474 | // readers can operate from idle, offline, and exception entry/exit in no |
1475 | // way allows rcu_preempt and rcu_sched readers to also do so. |
1476 | // |
1477 | // The implementation uses rcu_tasks_wait_gp(), which relies on function |
1478 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() |
1479 | // function sets these function pointers up so that rcu_tasks_wait_gp() |
1480 | // invokes these functions in this order: |
1481 | // |
1482 | // rcu_tasks_trace_pregp_step(): |
1483 | // Disables CPU hotplug, adds all currently executing tasks to the |
1484 | // holdout list, then checks the state of all tasks that blocked |
1485 | // or were preempted within their current RCU Tasks Trace read-side |
1486 | // critical section, adding them to the holdout list if appropriate. |
1487 | // Finally, this function re-enables CPU hotplug. |
1488 | // The ->pertask_func() pointer is NULL, so there is no per-task processing. |
1489 | // rcu_tasks_trace_postscan(): |
1490 | // Invokes synchronize_rcu() to wait for late-stage exiting tasks |
1491 | // to finish exiting. |
1492 | // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: |
1493 | // Scans the holdout list, attempting to identify a quiescent state |
1494 | // for each task on the list. If there is a quiescent state, the |
1495 | // corresponding task is removed from the holdout list. Once this |
1496 | // list is empty, the grace period has completed. |
1497 | // rcu_tasks_trace_postgp(): |
1498 | // Provides the needed full memory barrier and does debug checks. |
1499 | // |
1500 | // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. |
1501 | // |
1502 | // Pre-grace-period update-side code is ordered before the grace period |
1503 | // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period |
1504 | // read-side code is ordered before the grace period by atomic operations |
1505 | // on .b.need_qs flag of each task involved in this process, or by scheduler |
1506 | // context-switch ordering (for locked-down non-running readers). |
1507 | |
1508 | // The lockdep state must be outside of #ifdef to be useful. |
1509 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
1510 | static struct lock_class_key rcu_lock_trace_key; |
1511 | struct lockdep_map rcu_trace_lock_map = |
1512 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key); |
1513 | EXPORT_SYMBOL_GPL(rcu_trace_lock_map); |
1514 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
1515 | |
1516 | #ifdef CONFIG_TASKS_TRACE_RCU |
1517 | |
1518 | // Record outstanding IPIs to each CPU. No point in sending two... |
1519 | static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); |
1520 | |
1521 | // The number of detections of task quiescent state relying on |
1522 | // heavyweight readers executing explicit memory barriers. |
1523 | static unsigned long n_heavy_reader_attempts; |
1524 | static unsigned long n_heavy_reader_updates; |
1525 | static unsigned long n_heavy_reader_ofl_updates; |
1526 | static unsigned long n_trc_holdouts; |
1527 | |
1528 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); |
1529 | DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, |
1530 | "RCU Tasks Trace"); |
1531 | |
1532 | /* Load from ->trc_reader_special.b.need_qs with proper ordering. */ |
1533 | static u8 rcu_ld_need_qs(struct task_struct *t) |
1534 | { |
1535 | smp_mb(); // Enforce full grace-period ordering. |
1536 | return smp_load_acquire(&t->trc_reader_special.b.need_qs); |
1537 | } |
1538 | |
1539 | /* Store to ->trc_reader_special.b.need_qs with proper ordering. */ |
1540 | static void rcu_st_need_qs(struct task_struct *t, u8 v) |
1541 | { |
1542 | smp_store_release(&t->trc_reader_special.b.need_qs, v); |
1543 | smp_mb(); // Enforce full grace-period ordering. |
1544 | } |
1545 | |
1546 | /* |
1547 | * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for |
1548 | * the four-byte operand-size restriction of some platforms. |
1549 | * |
1550 | * Returns the old value, which is often ignored. |
1551 | */ |
1552 | u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) |
1553 | { |
1554 | return cmpxchg(&t->trc_reader_special.b.need_qs, old, new); |
1555 | } |
1556 | EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs); |
1557 | |
1558 | /* |
1559 | * If we are the last reader, signal the grace-period kthread. |
1560 | * Also remove from the per-CPU list of blocked tasks. |
1561 | */ |
1562 | void rcu_read_unlock_trace_special(struct task_struct *t) |
1563 | { |
1564 | unsigned long flags; |
1565 | struct rcu_tasks_percpu *rtpcp; |
1566 | union rcu_special trs; |
1567 | |
1568 | // Open-coded full-word version of rcu_ld_need_qs(). |
1569 | smp_mb(); // Enforce full grace-period ordering. |
1570 | trs = smp_load_acquire(&t->trc_reader_special); |
1571 | |
1572 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) |
1573 | smp_mb(); // Pairs with update-side barriers. |
1574 | // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. |
1575 | if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) { |
1576 | u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, |
1577 | TRC_NEED_QS_CHECKED); |
1578 | |
1579 | WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result); |
1580 | } |
1581 | if (trs.b.blocked) { |
1582 | rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); |
1583 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
1584 | list_del_init(entry: &t->trc_blkd_node); |
1585 | WRITE_ONCE(t->trc_reader_special.b.blocked, false); |
1586 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1587 | } |
1588 | WRITE_ONCE(t->trc_reader_nesting, 0); |
1589 | } |
1590 | EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); |
1591 | |
1592 | /* Add a newly blocked reader task to its CPU's list. */ |
1593 | void rcu_tasks_trace_qs_blkd(struct task_struct *t) |
1594 | { |
1595 | unsigned long flags; |
1596 | struct rcu_tasks_percpu *rtpcp; |
1597 | |
1598 | local_irq_save(flags); |
1599 | rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu); |
1600 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled |
1601 | t->trc_blkd_cpu = smp_processor_id(); |
1602 | if (!rtpcp->rtp_blkd_tasks.next) |
1603 | INIT_LIST_HEAD(list: &rtpcp->rtp_blkd_tasks); |
1604 | list_add(new: &t->trc_blkd_node, head: &rtpcp->rtp_blkd_tasks); |
1605 | WRITE_ONCE(t->trc_reader_special.b.blocked, true); |
1606 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1607 | } |
1608 | EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd); |
1609 | |
1610 | /* Add a task to the holdout list, if it is not already on the list. */ |
1611 | static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) |
1612 | { |
1613 | if (list_empty(head: &t->trc_holdout_list)) { |
1614 | get_task_struct(t); |
1615 | list_add(new: &t->trc_holdout_list, head: bhp); |
1616 | n_trc_holdouts++; |
1617 | } |
1618 | } |
1619 | |
1620 | /* Remove a task from the holdout list, if it is in fact present. */ |
1621 | static void trc_del_holdout(struct task_struct *t) |
1622 | { |
1623 | if (!list_empty(head: &t->trc_holdout_list)) { |
1624 | list_del_init(entry: &t->trc_holdout_list); |
1625 | put_task_struct(t); |
1626 | n_trc_holdouts--; |
1627 | } |
1628 | } |
1629 | |
1630 | /* IPI handler to check task state. */ |
1631 | static void trc_read_check_handler(void *t_in) |
1632 | { |
1633 | int nesting; |
1634 | struct task_struct *t = current; |
1635 | struct task_struct *texp = t_in; |
1636 | |
1637 | // If the task is no longer running on this CPU, leave. |
1638 | if (unlikely(texp != t)) |
1639 | goto reset_ipi; // Already on holdout list, so will check later. |
1640 | |
1641 | // If the task is not in a read-side critical section, and |
1642 | // if this is the last reader, awaken the grace-period kthread. |
1643 | nesting = READ_ONCE(t->trc_reader_nesting); |
1644 | if (likely(!nesting)) { |
1645 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
1646 | goto reset_ipi; |
1647 | } |
1648 | // If we are racing with an rcu_read_unlock_trace(), try again later. |
1649 | if (unlikely(nesting < 0)) |
1650 | goto reset_ipi; |
1651 | |
1652 | // Get here if the task is in a read-side critical section. |
1653 | // Set its state so that it will update state for the grace-period |
1654 | // kthread upon exit from that critical section. |
1655 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); |
1656 | |
1657 | reset_ipi: |
1658 | // Allow future IPIs to be sent on CPU and for task. |
1659 | // Also order this IPI handler against any later manipulations of |
1660 | // the intended task. |
1661 | smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ |
1662 | smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ |
1663 | } |
1664 | |
1665 | /* Callback function for scheduler to check locked-down task. */ |
1666 | static int trc_inspect_reader(struct task_struct *t, void *bhp_in) |
1667 | { |
1668 | struct list_head *bhp = bhp_in; |
1669 | int cpu = task_cpu(p: t); |
1670 | int nesting; |
1671 | bool ofl = cpu_is_offline(cpu); |
1672 | |
1673 | if (task_curr(p: t) && !ofl) { |
1674 | // If no chance of heavyweight readers, do it the hard way. |
1675 | if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) |
1676 | return -EINVAL; |
1677 | |
1678 | // If heavyweight readers are enabled on the remote task, |
1679 | // we can inspect its state despite its currently running. |
1680 | // However, we cannot safely change its state. |
1681 | n_heavy_reader_attempts++; |
1682 | // Check for "running" idle tasks on offline CPUs. |
1683 | if (!rcu_watching_zero_in_eqs(cpu, vp: &t->trc_reader_nesting)) |
1684 | return -EINVAL; // No quiescent state, do it the hard way. |
1685 | n_heavy_reader_updates++; |
1686 | nesting = 0; |
1687 | } else { |
1688 | // The task is not running, so C-language access is safe. |
1689 | nesting = t->trc_reader_nesting; |
1690 | WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); |
1691 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) |
1692 | n_heavy_reader_ofl_updates++; |
1693 | } |
1694 | |
1695 | // If not exiting a read-side critical section, mark as checked |
1696 | // so that the grace-period kthread will remove it from the |
1697 | // holdout list. |
1698 | if (!nesting) { |
1699 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
1700 | return 0; // In QS, so done. |
1701 | } |
1702 | if (nesting < 0) |
1703 | return -EINVAL; // Reader transitioning, try again later. |
1704 | |
1705 | // The task is in a read-side critical section, so set up its |
1706 | // state so that it will update state upon exit from that critical |
1707 | // section. |
1708 | if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) |
1709 | trc_add_holdout(t, bhp); |
1710 | return 0; |
1711 | } |
1712 | |
1713 | /* Attempt to extract the state for the specified task. */ |
1714 | static void trc_wait_for_one_reader(struct task_struct *t, |
1715 | struct list_head *bhp) |
1716 | { |
1717 | int cpu; |
1718 | |
1719 | // If a previous IPI is still in flight, let it complete. |
1720 | if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI |
1721 | return; |
1722 | |
1723 | // The current task had better be in a quiescent state. |
1724 | if (t == current) { |
1725 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
1726 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
1727 | return; |
1728 | } |
1729 | |
1730 | // Attempt to nail down the task for inspection. |
1731 | get_task_struct(t); |
1732 | if (!task_call_func(p: t, func: trc_inspect_reader, arg: bhp)) { |
1733 | put_task_struct(t); |
1734 | return; |
1735 | } |
1736 | put_task_struct(t); |
1737 | |
1738 | // If this task is not yet on the holdout list, then we are in |
1739 | // an RCU read-side critical section. Otherwise, the invocation of |
1740 | // trc_add_holdout() that added it to the list did the necessary |
1741 | // get_task_struct(). Either way, the task cannot be freed out |
1742 | // from under this code. |
1743 | |
1744 | // If currently running, send an IPI, either way, add to list. |
1745 | trc_add_holdout(t, bhp); |
1746 | if (task_curr(p: t) && |
1747 | time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { |
1748 | // The task is currently running, so try IPIing it. |
1749 | cpu = task_cpu(p: t); |
1750 | |
1751 | // If there is already an IPI outstanding, let it happen. |
1752 | if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) |
1753 | return; |
1754 | |
1755 | per_cpu(trc_ipi_to_cpu, cpu) = true; |
1756 | t->trc_ipi_to_cpu = cpu; |
1757 | rcu_tasks_trace.n_ipis++; |
1758 | if (smp_call_function_single(cpuid: cpu, func: trc_read_check_handler, info: t, wait: 0)) { |
1759 | // Just in case there is some other reason for |
1760 | // failure than the target CPU being offline. |
1761 | WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n", |
1762 | __func__, cpu); |
1763 | rcu_tasks_trace.n_ipis_fails++; |
1764 | per_cpu(trc_ipi_to_cpu, cpu) = false; |
1765 | t->trc_ipi_to_cpu = -1; |
1766 | } |
1767 | } |
1768 | } |
1769 | |
1770 | /* |
1771 | * Initialize for first-round processing for the specified task. |
1772 | * Return false if task is NULL or already taken care of, true otherwise. |
1773 | */ |
1774 | static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) |
1775 | { |
1776 | // During early boot when there is only the one boot CPU, there |
1777 | // is no idle task for the other CPUs. Also, the grace-period |
1778 | // kthread is always in a quiescent state. In addition, just return |
1779 | // if this task is already on the list. |
1780 | if (unlikely(t == NULL) || (t == current && notself) || !list_empty(head: &t->trc_holdout_list)) |
1781 | return false; |
1782 | |
1783 | rcu_st_need_qs(t, v: 0); |
1784 | t->trc_ipi_to_cpu = -1; |
1785 | return true; |
1786 | } |
1787 | |
1788 | /* Do first-round processing for the specified task. */ |
1789 | static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) |
1790 | { |
1791 | if (rcu_tasks_trace_pertask_prep(t, notself: true)) |
1792 | trc_wait_for_one_reader(t, bhp: hop); |
1793 | } |
1794 | |
1795 | /* Initialize for a new RCU-tasks-trace grace period. */ |
1796 | static void rcu_tasks_trace_pregp_step(struct list_head *hop) |
1797 | { |
1798 | LIST_HEAD(blkd_tasks); |
1799 | int cpu; |
1800 | unsigned long flags; |
1801 | struct rcu_tasks_percpu *rtpcp; |
1802 | struct task_struct *t; |
1803 | |
1804 | // There shouldn't be any old IPIs, but... |
1805 | for_each_possible_cpu(cpu) |
1806 | WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); |
1807 | |
1808 | // Disable CPU hotplug across the CPU scan for the benefit of |
1809 | // any IPIs that might be needed. This also waits for all readers |
1810 | // in CPU-hotplug code paths. |
1811 | cpus_read_lock(); |
1812 | |
1813 | // These rcu_tasks_trace_pertask_prep() calls are serialized to |
1814 | // allow safe access to the hop list. |
1815 | for_each_online_cpu(cpu) { |
1816 | rcu_read_lock(); |
1817 | // Note that cpu_curr_snapshot() picks up the target |
1818 | // CPU's current task while its runqueue is locked with |
1819 | // an smp_mb__after_spinlock(). This ensures that either |
1820 | // the grace-period kthread will see that task's read-side |
1821 | // critical section or the task will see the updater's pre-GP |
1822 | // accesses. The trailing smp_mb() in cpu_curr_snapshot() |
1823 | // does not currently play a role other than simplify |
1824 | // that function's ordering semantics. If these simplified |
1825 | // ordering semantics continue to be redundant, that smp_mb() |
1826 | // might be removed. |
1827 | t = cpu_curr_snapshot(cpu); |
1828 | if (rcu_tasks_trace_pertask_prep(t, notself: true)) |
1829 | trc_add_holdout(t, bhp: hop); |
1830 | rcu_read_unlock(); |
1831 | cond_resched_tasks_rcu_qs(); |
1832 | } |
1833 | |
1834 | // Only after all running tasks have been accounted for is it |
1835 | // safe to take care of the tasks that have blocked within their |
1836 | // current RCU tasks trace read-side critical section. |
1837 | for_each_possible_cpu(cpu) { |
1838 | rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu); |
1839 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
1840 | list_splice_init(list: &rtpcp->rtp_blkd_tasks, head: &blkd_tasks); |
1841 | while (!list_empty(head: &blkd_tasks)) { |
1842 | rcu_read_lock(); |
1843 | t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); |
1844 | list_del_init(entry: &t->trc_blkd_node); |
1845 | list_add(new: &t->trc_blkd_node, head: &rtpcp->rtp_blkd_tasks); |
1846 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1847 | rcu_tasks_trace_pertask(t, hop); |
1848 | rcu_read_unlock(); |
1849 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
1850 | } |
1851 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1852 | cond_resched_tasks_rcu_qs(); |
1853 | } |
1854 | |
1855 | // Re-enable CPU hotplug now that the holdout list is populated. |
1856 | cpus_read_unlock(); |
1857 | } |
1858 | |
1859 | /* |
1860 | * Do intermediate processing between task and holdout scans. |
1861 | */ |
1862 | static void rcu_tasks_trace_postscan(struct list_head *hop) |
1863 | { |
1864 | // Wait for late-stage exiting tasks to finish exiting. |
1865 | // These might have passed the call to exit_tasks_rcu_finish(). |
1866 | |
1867 | // If you remove the following line, update rcu_trace_implies_rcu_gp()!!! |
1868 | synchronize_rcu(); |
1869 | // Any tasks that exit after this point will set |
1870 | // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs. |
1871 | } |
1872 | |
1873 | /* Communicate task state back to the RCU tasks trace stall warning request. */ |
1874 | struct trc_stall_chk_rdr { |
1875 | int nesting; |
1876 | int ipi_to_cpu; |
1877 | u8 needqs; |
1878 | }; |
1879 | |
1880 | static int trc_check_slow_task(struct task_struct *t, void *arg) |
1881 | { |
1882 | struct trc_stall_chk_rdr *trc_rdrp = arg; |
1883 | |
1884 | if (task_curr(p: t) && cpu_online(cpu: task_cpu(p: t))) |
1885 | return false; // It is running, so decline to inspect it. |
1886 | trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); |
1887 | trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); |
1888 | trc_rdrp->needqs = rcu_ld_need_qs(t); |
1889 | return true; |
1890 | } |
1891 | |
1892 | /* Show the state of a task stalling the current RCU tasks trace GP. */ |
1893 | static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) |
1894 | { |
1895 | int cpu; |
1896 | struct trc_stall_chk_rdr trc_rdr; |
1897 | bool is_idle_tsk = is_idle_task(p: t); |
1898 | |
1899 | if (*firstreport) { |
1900 | pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n"); |
1901 | *firstreport = false; |
1902 | } |
1903 | cpu = task_cpu(p: t); |
1904 | if (!task_call_func(p: t, func: trc_check_slow_task, arg: &trc_rdr)) |
1905 | pr_alert("P%d: %c%c\n", |
1906 | t->pid, |
1907 | ".I"[t->trc_ipi_to_cpu >= 0], |
1908 | ".i"[is_idle_tsk]); |
1909 | else |
1910 | pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n", |
1911 | t->pid, |
1912 | ".I"[trc_rdr.ipi_to_cpu >= 0], |
1913 | ".i"[is_idle_tsk], |
1914 | ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)], |
1915 | ".B"[!!data_race(t->trc_reader_special.b.blocked)], |
1916 | trc_rdr.nesting, |
1917 | " !CN"[trc_rdr.needqs & 0x3], |
1918 | " ?"[trc_rdr.needqs > 0x3], |
1919 | cpu, cpu_online(cpu) ? "": "(offline)"); |
1920 | sched_show_task(p: t); |
1921 | } |
1922 | |
1923 | /* List stalled IPIs for RCU tasks trace. */ |
1924 | static void show_stalled_ipi_trace(void) |
1925 | { |
1926 | int cpu; |
1927 | |
1928 | for_each_possible_cpu(cpu) |
1929 | if (per_cpu(trc_ipi_to_cpu, cpu)) |
1930 | pr_alert("\tIPI outstanding to CPU %d\n", cpu); |
1931 | } |
1932 | |
1933 | /* Do one scan of the holdout list. */ |
1934 | static void check_all_holdout_tasks_trace(struct list_head *hop, |
1935 | bool needreport, bool *firstreport) |
1936 | { |
1937 | struct task_struct *g, *t; |
1938 | |
1939 | // Disable CPU hotplug across the holdout list scan for IPIs. |
1940 | cpus_read_lock(); |
1941 | |
1942 | list_for_each_entry_safe(t, g, hop, trc_holdout_list) { |
1943 | // If safe and needed, try to check the current task. |
1944 | if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && |
1945 | !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) |
1946 | trc_wait_for_one_reader(t, bhp: hop); |
1947 | |
1948 | // If check succeeded, remove this task from the list. |
1949 | if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && |
1950 | rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) |
1951 | trc_del_holdout(t); |
1952 | else if (needreport) |
1953 | show_stalled_task_trace(t, firstreport); |
1954 | cond_resched_tasks_rcu_qs(); |
1955 | } |
1956 | |
1957 | // Re-enable CPU hotplug now that the holdout list scan has completed. |
1958 | cpus_read_unlock(); |
1959 | |
1960 | if (needreport) { |
1961 | if (*firstreport) |
1962 | pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n"); |
1963 | show_stalled_ipi_trace(); |
1964 | } |
1965 | } |
1966 | |
1967 | static void rcu_tasks_trace_empty_fn(void *unused) |
1968 | { |
1969 | } |
1970 | |
1971 | /* Wait for grace period to complete and provide ordering. */ |
1972 | static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) |
1973 | { |
1974 | int cpu; |
1975 | |
1976 | // Wait for any lingering IPI handlers to complete. Note that |
1977 | // if a CPU has gone offline or transitioned to userspace in the |
1978 | // meantime, all IPI handlers should have been drained beforehand. |
1979 | // Yes, this assumes that CPUs process IPIs in order. If that ever |
1980 | // changes, there will need to be a recheck and/or timed wait. |
1981 | for_each_online_cpu(cpu) |
1982 | if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) |
1983 | smp_call_function_single(cpuid: cpu, func: rcu_tasks_trace_empty_fn, NULL, wait: 1); |
1984 | |
1985 | smp_mb(); // Caller's code must be ordered after wakeup. |
1986 | // Pairs with pretty much every ordering primitive. |
1987 | } |
1988 | |
1989 | /* Report any needed quiescent state for this exiting task. */ |
1990 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) |
1991 | { |
1992 | union rcu_special trs = READ_ONCE(t->trc_reader_special); |
1993 | |
1994 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
1995 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
1996 | if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) |
1997 | rcu_read_unlock_trace_special(t); |
1998 | else |
1999 | WRITE_ONCE(t->trc_reader_nesting, 0); |
2000 | } |
2001 | |
2002 | /** |
2003 | * call_rcu_tasks_trace() - Queue a callback trace task-based grace period |
2004 | * @rhp: structure to be used for queueing the RCU updates. |
2005 | * @func: actual callback function to be invoked after the grace period |
2006 | * |
2007 | * The callback function will be invoked some time after a trace rcu-tasks |
2008 | * grace period elapses, in other words after all currently executing |
2009 | * trace rcu-tasks read-side critical sections have completed. These |
2010 | * read-side critical sections are delimited by calls to rcu_read_lock_trace() |
2011 | * and rcu_read_unlock_trace(). |
2012 | * |
2013 | * See the description of call_rcu() for more detailed information on |
2014 | * memory ordering guarantees. |
2015 | */ |
2016 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) |
2017 | { |
2018 | call_rcu_tasks_generic(rhp, func, rtp: &rcu_tasks_trace); |
2019 | } |
2020 | EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); |
2021 | |
2022 | /** |
2023 | * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period |
2024 | * |
2025 | * Control will return to the caller some time after a trace rcu-tasks |
2026 | * grace period has elapsed, in other words after all currently executing |
2027 | * trace rcu-tasks read-side critical sections have elapsed. These read-side |
2028 | * critical sections are delimited by calls to rcu_read_lock_trace() |
2029 | * and rcu_read_unlock_trace(). |
2030 | * |
2031 | * This is a very specialized primitive, intended only for a few uses in |
2032 | * tracing and other situations requiring manipulation of function preambles |
2033 | * and profiling hooks. The synchronize_rcu_tasks_trace() function is not |
2034 | * (yet) intended for heavy use from multiple CPUs. |
2035 | * |
2036 | * See the description of synchronize_rcu() for more detailed information |
2037 | * on memory ordering guarantees. |
2038 | */ |
2039 | void synchronize_rcu_tasks_trace(void) |
2040 | { |
2041 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section"); |
2042 | synchronize_rcu_tasks_generic(rtp: &rcu_tasks_trace); |
2043 | } |
2044 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); |
2045 | |
2046 | /** |
2047 | * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. |
2048 | * |
2049 | * Although the current implementation is guaranteed to wait, it is not |
2050 | * obligated to, for example, if there are no pending callbacks. |
2051 | */ |
2052 | void rcu_barrier_tasks_trace(void) |
2053 | { |
2054 | rcu_barrier_tasks_generic(rtp: &rcu_tasks_trace); |
2055 | } |
2056 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); |
2057 | |
2058 | int rcu_tasks_trace_lazy_ms = -1; |
2059 | module_param(rcu_tasks_trace_lazy_ms, int, 0444); |
2060 | |
2061 | static int __init rcu_spawn_tasks_trace_kthread(void) |
2062 | { |
2063 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { |
2064 | rcu_tasks_trace.gp_sleep = HZ / 10; |
2065 | rcu_tasks_trace.init_fract = HZ / 10; |
2066 | } else { |
2067 | rcu_tasks_trace.gp_sleep = HZ / 200; |
2068 | if (rcu_tasks_trace.gp_sleep <= 0) |
2069 | rcu_tasks_trace.gp_sleep = 1; |
2070 | rcu_tasks_trace.init_fract = HZ / 200; |
2071 | if (rcu_tasks_trace.init_fract <= 0) |
2072 | rcu_tasks_trace.init_fract = 1; |
2073 | } |
2074 | if (rcu_tasks_trace_lazy_ms >= 0) |
2075 | rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(m: rcu_tasks_trace_lazy_ms); |
2076 | rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; |
2077 | rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; |
2078 | rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; |
2079 | rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; |
2080 | rcu_spawn_tasks_kthread_generic(rtp: &rcu_tasks_trace); |
2081 | return 0; |
2082 | } |
2083 | |
2084 | #if !defined(CONFIG_TINY_RCU) |
2085 | void show_rcu_tasks_trace_gp_kthread(void) |
2086 | { |
2087 | char buf[64]; |
2088 | |
2089 | snprintf(buf, size: sizeof(buf), fmt: "N%lu h:%lu/%lu/%lu", |
2090 | data_race(n_trc_holdouts), |
2091 | data_race(n_heavy_reader_ofl_updates), |
2092 | data_race(n_heavy_reader_updates), |
2093 | data_race(n_heavy_reader_attempts)); |
2094 | show_rcu_tasks_generic_gp_kthread(rtp: &rcu_tasks_trace, s: buf); |
2095 | } |
2096 | EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); |
2097 | |
2098 | void rcu_tasks_trace_torture_stats_print(char *tt, char *tf) |
2099 | { |
2100 | rcu_tasks_torture_stats_print_generic(rtp: &rcu_tasks_trace, tt, tf, tst: ""); |
2101 | } |
2102 | EXPORT_SYMBOL_GPL(rcu_tasks_trace_torture_stats_print); |
2103 | #endif // !defined(CONFIG_TINY_RCU) |
2104 | |
2105 | struct task_struct *get_rcu_tasks_trace_gp_kthread(void) |
2106 | { |
2107 | return rcu_tasks_trace.kthread_ptr; |
2108 | } |
2109 | EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread); |
2110 | |
2111 | void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq) |
2112 | { |
2113 | *flags = 0; |
2114 | *gp_seq = rcu_seq_current(sp: &rcu_tasks_trace.tasks_gp_seq); |
2115 | } |
2116 | EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data); |
2117 | |
2118 | #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
2119 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } |
2120 | #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ |
2121 | |
2122 | #ifndef CONFIG_TINY_RCU |
2123 | void show_rcu_tasks_gp_kthreads(void) |
2124 | { |
2125 | show_rcu_tasks_classic_gp_kthread(); |
2126 | show_rcu_tasks_rude_gp_kthread(); |
2127 | show_rcu_tasks_trace_gp_kthread(); |
2128 | } |
2129 | #endif /* #ifndef CONFIG_TINY_RCU */ |
2130 | |
2131 | #ifdef CONFIG_PROVE_RCU |
2132 | struct rcu_tasks_test_desc { |
2133 | struct rcu_head rh; |
2134 | const char *name; |
2135 | bool notrun; |
2136 | unsigned long runstart; |
2137 | }; |
2138 | |
2139 | static struct rcu_tasks_test_desc tests[] = { |
2140 | { |
2141 | .name = "call_rcu_tasks()", |
2142 | /* If not defined, the test is skipped. */ |
2143 | .notrun = IS_ENABLED(CONFIG_TASKS_RCU), |
2144 | }, |
2145 | { |
2146 | .name = "call_rcu_tasks_trace()", |
2147 | /* If not defined, the test is skipped. */ |
2148 | .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU) |
2149 | } |
2150 | }; |
2151 | |
2152 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
2153 | static void test_rcu_tasks_callback(struct rcu_head *rhp) |
2154 | { |
2155 | struct rcu_tasks_test_desc *rttd = |
2156 | container_of(rhp, struct rcu_tasks_test_desc, rh); |
2157 | |
2158 | pr_info("Callback from %s invoked.\n", rttd->name); |
2159 | |
2160 | rttd->notrun = false; |
2161 | } |
2162 | #endif // #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
2163 | |
2164 | static void rcu_tasks_initiate_self_tests(void) |
2165 | { |
2166 | #ifdef CONFIG_TASKS_RCU |
2167 | pr_info("Running RCU Tasks wait API self tests\n"); |
2168 | tests[0].runstart = jiffies; |
2169 | synchronize_rcu_tasks(); |
2170 | call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); |
2171 | #endif |
2172 | |
2173 | #ifdef CONFIG_TASKS_RUDE_RCU |
2174 | pr_info("Running RCU Tasks Rude wait API self tests\n"); |
2175 | synchronize_rcu_tasks_rude(); |
2176 | #endif |
2177 | |
2178 | #ifdef CONFIG_TASKS_TRACE_RCU |
2179 | pr_info("Running RCU Tasks Trace wait API self tests\n"); |
2180 | tests[1].runstart = jiffies; |
2181 | synchronize_rcu_tasks_trace(); |
2182 | call_rcu_tasks_trace(&tests[1].rh, test_rcu_tasks_callback); |
2183 | #endif |
2184 | } |
2185 | |
2186 | /* |
2187 | * Return: 0 - test passed |
2188 | * 1 - test failed, but have not timed out yet |
2189 | * -1 - test failed and timed out |
2190 | */ |
2191 | static int rcu_tasks_verify_self_tests(void) |
2192 | { |
2193 | int ret = 0; |
2194 | int i; |
2195 | unsigned long bst = rcu_task_stall_timeout; |
2196 | |
2197 | if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT) |
2198 | bst = RCU_TASK_BOOT_STALL_TIMEOUT; |
2199 | for (i = 0; i < ARRAY_SIZE(tests); i++) { |
2200 | while (tests[i].notrun) { // still hanging. |
2201 | if (time_after(jiffies, tests[i].runstart + bst)) { |
2202 | pr_err("%s has failed boot-time tests.\n", tests[i].name); |
2203 | ret = -1; |
2204 | break; |
2205 | } |
2206 | ret = 1; |
2207 | break; |
2208 | } |
2209 | } |
2210 | WARN_ON(ret < 0); |
2211 | |
2212 | return ret; |
2213 | } |
2214 | |
2215 | /* |
2216 | * Repeat the rcu_tasks_verify_self_tests() call once every second until the |
2217 | * test passes or has timed out. |
2218 | */ |
2219 | static struct delayed_work rcu_tasks_verify_work; |
2220 | static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused) |
2221 | { |
2222 | int ret = rcu_tasks_verify_self_tests(); |
2223 | |
2224 | if (ret <= 0) |
2225 | return; |
2226 | |
2227 | /* Test fails but not timed out yet, reschedule another check */ |
2228 | schedule_delayed_work(dwork: &rcu_tasks_verify_work, HZ); |
2229 | } |
2230 | |
2231 | static int rcu_tasks_verify_schedule_work(void) |
2232 | { |
2233 | INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn); |
2234 | rcu_tasks_verify_work_fn(NULL); |
2235 | return 0; |
2236 | } |
2237 | late_initcall(rcu_tasks_verify_schedule_work); |
2238 | #else /* #ifdef CONFIG_PROVE_RCU */ |
2239 | static void rcu_tasks_initiate_self_tests(void) { } |
2240 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
2241 | |
2242 | void __init tasks_cblist_init_generic(void) |
2243 | { |
2244 | lockdep_assert_irqs_disabled(); |
2245 | WARN_ON(num_online_cpus() > 1); |
2246 | |
2247 | #ifdef CONFIG_TASKS_RCU |
2248 | cblist_init_generic(rtp: &rcu_tasks); |
2249 | #endif |
2250 | |
2251 | #ifdef CONFIG_TASKS_RUDE_RCU |
2252 | cblist_init_generic(rtp: &rcu_tasks_rude); |
2253 | #endif |
2254 | |
2255 | #ifdef CONFIG_TASKS_TRACE_RCU |
2256 | cblist_init_generic(rtp: &rcu_tasks_trace); |
2257 | #endif |
2258 | } |
2259 | |
2260 | static int __init rcu_init_tasks_generic(void) |
2261 | { |
2262 | #ifdef CONFIG_TASKS_RCU |
2263 | rcu_spawn_tasks_kthread(); |
2264 | #endif |
2265 | |
2266 | #ifdef CONFIG_TASKS_RUDE_RCU |
2267 | rcu_spawn_tasks_rude_kthread(); |
2268 | #endif |
2269 | |
2270 | #ifdef CONFIG_TASKS_TRACE_RCU |
2271 | rcu_spawn_tasks_trace_kthread(); |
2272 | #endif |
2273 | |
2274 | // Run the self-tests. |
2275 | rcu_tasks_initiate_self_tests(); |
2276 | |
2277 | return 0; |
2278 | } |
2279 | core_initcall(rcu_init_tasks_generic); |
2280 | |
2281 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
2282 | static inline void rcu_tasks_bootup_oddness(void) {} |
2283 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |
2284 |
Definitions
- rcu_tasks_percpu
- rcu_tasks
- tasks_rcu_exit_srcu_stall_timer
- rcu_task_ipi_delay
- rcu_task_stall_timeout
- rcu_task_stall_info
- rcu_task_stall_info_mult
- rcu_task_enqueue_lim
- rcu_task_cb_adjust
- rcu_task_contend_lim
- rcu_task_collapse_lim
- rcu_task_lazy_lim
- rcu_task_cpu_ids
- rcu_tasks_gp_state_names
- set_tasks_gp_state
- tasks_gp_state_getname
- cblist_init_generic
- rcu_tasks_lazy_time
- call_rcu_tasks_generic_timer
- call_rcu_tasks_iw_wakeup
- call_rcu_tasks_generic
- rcu_barrier_tasks_generic_cb
- rcu_barrier_tasks_generic
- rcu_tasks_need_gpcb
- rcu_tasks_invoke_cbs
- rcu_tasks_invoke_cbs_wq
- rcu_tasks_one_gp
- rcu_tasks_kthread
- synchronize_rcu_tasks_generic
- rcu_spawn_tasks_kthread_generic
- rcu_tasks_bootup_oddness
- show_rcu_tasks_generic_gp_kthread
- rcu_tasks_torture_stats_print_generic
- rcu_tasks_wait_gp
- rcu_tasks_pregp_step
- rcu_tasks_is_holdout
- rcu_tasks_pertask
- rcu_tasks
- rcu_tasks_postscan
- check_holdout_task
- check_all_holdout_tasks
- rcu_tasks_postgp
- tasks_rcu_exit_srcu_stall
- call_rcu_tasks
- synchronize_rcu_tasks
- rcu_barrier_tasks
- rcu_tasks_lazy_ms
- rcu_spawn_tasks_kthread
- show_rcu_tasks_classic_gp_kthread
- rcu_tasks_torture_stats_print
- get_rcu_tasks_gp_kthread
- rcu_tasks_get_gp_data
- exit_tasks_rcu_start
- exit_tasks_rcu_finish
- rcu_tasks_be_rude
- rcu_tasks_rude_wait_gp
- rcu_tasks_rude
- call_rcu_tasks_rude
- synchronize_rcu_tasks_rude
- rcu_spawn_tasks_rude_kthread
- show_rcu_tasks_rude_gp_kthread
- rcu_tasks_rude_torture_stats_print
- get_rcu_tasks_rude_gp_kthread
- rcu_tasks_rude_get_gp_data
- rcu_lock_trace_key
- rcu_trace_lock_map
- trc_ipi_to_cpu
- n_heavy_reader_attempts
- n_heavy_reader_updates
- n_heavy_reader_ofl_updates
- n_trc_holdouts
- rcu_tasks_trace
- rcu_ld_need_qs
- rcu_st_need_qs
- rcu_trc_cmpxchg_need_qs
- rcu_read_unlock_trace_special
- rcu_tasks_trace_qs_blkd
- trc_add_holdout
- trc_del_holdout
- trc_read_check_handler
- trc_inspect_reader
- trc_wait_for_one_reader
- rcu_tasks_trace_pertask_prep
- rcu_tasks_trace_pertask
- rcu_tasks_trace_pregp_step
- rcu_tasks_trace_postscan
- trc_stall_chk_rdr
- trc_check_slow_task
- show_stalled_task_trace
- show_stalled_ipi_trace
- check_all_holdout_tasks_trace
- rcu_tasks_trace_empty_fn
- rcu_tasks_trace_postgp
- exit_tasks_rcu_finish_trace
- call_rcu_tasks_trace
- synchronize_rcu_tasks_trace
- rcu_barrier_tasks_trace
- rcu_tasks_trace_lazy_ms
- rcu_spawn_tasks_trace_kthread
- show_rcu_tasks_trace_gp_kthread
- rcu_tasks_trace_torture_stats_print
- get_rcu_tasks_trace_gp_kthread
- rcu_tasks_trace_get_gp_data
- show_rcu_tasks_gp_kthreads
- rcu_tasks_test_desc
- tests
- test_rcu_tasks_callback
- rcu_tasks_initiate_self_tests
- rcu_tasks_verify_self_tests
- rcu_tasks_verify_work
- rcu_tasks_verify_work_fn
- rcu_tasks_verify_schedule_work
- tasks_cblist_init_generic
Improve your Profiling and Debugging skills
Find out more