1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
2 | /* |
3 | * Task-based RCU implementations. |
4 | * |
5 | * Copyright (C) 2020 Paul E. McKenney |
6 | */ |
7 | |
8 | #ifdef CONFIG_TASKS_RCU_GENERIC |
9 | #include "rcu_segcblist.h" |
10 | |
11 | //////////////////////////////////////////////////////////////////////// |
12 | // |
13 | // Generic data structures. |
14 | |
15 | struct rcu_tasks; |
16 | typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp); |
17 | typedef void (*pregp_func_t)(struct list_head *hop); |
18 | typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); |
19 | typedef void (*postscan_func_t)(struct list_head *hop); |
20 | typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp); |
21 | typedef void (*postgp_func_t)(struct rcu_tasks *rtp); |
22 | |
23 | /** |
24 | * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism. |
25 | * @cblist: Callback list. |
26 | * @lock: Lock protecting per-CPU callback list. |
27 | * @rtp_jiffies: Jiffies counter value for statistics. |
28 | * @lazy_timer: Timer to unlazify callbacks. |
29 | * @urgent_gp: Number of additional non-lazy grace periods. |
30 | * @rtp_n_lock_retries: Rough lock-contention statistic. |
31 | * @rtp_work: Work queue for invoking callbacks. |
32 | * @rtp_irq_work: IRQ work queue for deferred wakeups. |
33 | * @barrier_q_head: RCU callback for barrier operation. |
34 | * @rtp_blkd_tasks: List of tasks blocked as readers. |
35 | * @rtp_exit_list: List of tasks in the latter portion of do_exit(). |
36 | * @cpu: CPU number corresponding to this entry. |
37 | * @rtpp: Pointer to the rcu_tasks structure. |
38 | */ |
39 | struct rcu_tasks_percpu { |
40 | struct rcu_segcblist cblist; |
41 | raw_spinlock_t __private lock; |
42 | unsigned long rtp_jiffies; |
43 | unsigned long rtp_n_lock_retries; |
44 | struct timer_list lazy_timer; |
45 | unsigned int urgent_gp; |
46 | struct work_struct rtp_work; |
47 | struct irq_work rtp_irq_work; |
48 | struct rcu_head barrier_q_head; |
49 | struct list_head rtp_blkd_tasks; |
50 | struct list_head rtp_exit_list; |
51 | int cpu; |
52 | struct rcu_tasks *rtpp; |
53 | }; |
54 | |
55 | /** |
56 | * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. |
57 | * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. |
58 | * @cbs_gbl_lock: Lock protecting callback list. |
59 | * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone. |
60 | * @gp_func: This flavor's grace-period-wait function. |
61 | * @gp_state: Grace period's most recent state transition (debugging). |
62 | * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping. |
63 | * @init_fract: Initial backoff sleep interval. |
64 | * @gp_jiffies: Time of last @gp_state transition. |
65 | * @gp_start: Most recent grace-period start in jiffies. |
66 | * @tasks_gp_seq: Number of grace periods completed since boot. |
67 | * @n_ipis: Number of IPIs sent to encourage grace periods to end. |
68 | * @n_ipis_fails: Number of IPI-send failures. |
69 | * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. |
70 | * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy. |
71 | * @pregp_func: This flavor's pre-grace-period function (optional). |
72 | * @pertask_func: This flavor's per-task scan function (optional). |
73 | * @postscan_func: This flavor's post-task scan function (optional). |
74 | * @holdouts_func: This flavor's holdout-list scan function (optional). |
75 | * @postgp_func: This flavor's post-grace-period function (optional). |
76 | * @call_func: This flavor's call_rcu()-equivalent function. |
77 | * @rtpcpu: This flavor's rcu_tasks_percpu structure. |
78 | * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. |
79 | * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. |
80 | * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing. |
81 | * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers. |
82 | * @barrier_q_mutex: Serialize barrier operations. |
83 | * @barrier_q_count: Number of queues being waited on. |
84 | * @barrier_q_completion: Barrier wait/wakeup mechanism. |
85 | * @barrier_q_seq: Sequence number for barrier operations. |
86 | * @name: This flavor's textual name. |
87 | * @kname: This flavor's kthread name. |
88 | */ |
89 | struct rcu_tasks { |
90 | struct rcuwait cbs_wait; |
91 | raw_spinlock_t cbs_gbl_lock; |
92 | struct mutex tasks_gp_mutex; |
93 | int gp_state; |
94 | int gp_sleep; |
95 | int init_fract; |
96 | unsigned long gp_jiffies; |
97 | unsigned long gp_start; |
98 | unsigned long tasks_gp_seq; |
99 | unsigned long n_ipis; |
100 | unsigned long n_ipis_fails; |
101 | struct task_struct *kthread_ptr; |
102 | unsigned long lazy_jiffies; |
103 | rcu_tasks_gp_func_t gp_func; |
104 | pregp_func_t pregp_func; |
105 | pertask_func_t pertask_func; |
106 | postscan_func_t postscan_func; |
107 | holdouts_func_t holdouts_func; |
108 | postgp_func_t postgp_func; |
109 | call_rcu_func_t call_func; |
110 | struct rcu_tasks_percpu __percpu *rtpcpu; |
111 | int percpu_enqueue_shift; |
112 | int percpu_enqueue_lim; |
113 | int percpu_dequeue_lim; |
114 | unsigned long percpu_dequeue_gpseq; |
115 | struct mutex barrier_q_mutex; |
116 | atomic_t barrier_q_count; |
117 | struct completion barrier_q_completion; |
118 | unsigned long barrier_q_seq; |
119 | char *name; |
120 | char *kname; |
121 | }; |
122 | |
123 | static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp); |
124 | |
125 | #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ |
126 | static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ |
127 | .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ |
128 | .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \ |
129 | }; \ |
130 | static struct rcu_tasks rt_name = \ |
131 | { \ |
132 | .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \ |
133 | .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ |
134 | .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \ |
135 | .gp_func = gp, \ |
136 | .call_func = call, \ |
137 | .rtpcpu = &rt_name ## __percpu, \ |
138 | .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \ |
139 | .name = n, \ |
140 | .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \ |
141 | .percpu_enqueue_lim = 1, \ |
142 | .percpu_dequeue_lim = 1, \ |
143 | .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \ |
144 | .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \ |
145 | .kname = #rt_name, \ |
146 | } |
147 | |
148 | #ifdef CONFIG_TASKS_RCU |
149 | |
150 | /* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */ |
151 | static void tasks_rcu_exit_srcu_stall(struct timer_list *unused); |
152 | static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall); |
153 | #endif |
154 | |
155 | /* Avoid IPIing CPUs early in the grace period. */ |
156 | #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0) |
157 | static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY; |
158 | module_param(rcu_task_ipi_delay, int, 0644); |
159 | |
160 | /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ |
161 | #define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30) |
162 | #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) |
163 | static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; |
164 | module_param(rcu_task_stall_timeout, int, 0644); |
165 | #define RCU_TASK_STALL_INFO (HZ * 10) |
166 | static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO; |
167 | module_param(rcu_task_stall_info, int, 0644); |
168 | static int rcu_task_stall_info_mult __read_mostly = 3; |
169 | module_param(rcu_task_stall_info_mult, int, 0444); |
170 | |
171 | static int rcu_task_enqueue_lim __read_mostly = -1; |
172 | module_param(rcu_task_enqueue_lim, int, 0444); |
173 | |
174 | static bool rcu_task_cb_adjust; |
175 | static int rcu_task_contend_lim __read_mostly = 100; |
176 | module_param(rcu_task_contend_lim, int, 0444); |
177 | static int rcu_task_collapse_lim __read_mostly = 10; |
178 | module_param(rcu_task_collapse_lim, int, 0444); |
179 | static int rcu_task_lazy_lim __read_mostly = 32; |
180 | module_param(rcu_task_lazy_lim, int, 0444); |
181 | |
182 | /* RCU tasks grace-period state for debugging. */ |
183 | #define RTGS_INIT 0 |
184 | #define RTGS_WAIT_WAIT_CBS 1 |
185 | #define RTGS_WAIT_GP 2 |
186 | #define RTGS_PRE_WAIT_GP 3 |
187 | #define RTGS_SCAN_TASKLIST 4 |
188 | #define RTGS_POST_SCAN_TASKLIST 5 |
189 | #define RTGS_WAIT_SCAN_HOLDOUTS 6 |
190 | #define RTGS_SCAN_HOLDOUTS 7 |
191 | #define RTGS_POST_GP 8 |
192 | #define RTGS_WAIT_READERS 9 |
193 | #define RTGS_INVOKE_CBS 10 |
194 | #define RTGS_WAIT_CBS 11 |
195 | #ifndef CONFIG_TINY_RCU |
196 | static const char * const rcu_tasks_gp_state_names[] = { |
197 | "RTGS_INIT" , |
198 | "RTGS_WAIT_WAIT_CBS" , |
199 | "RTGS_WAIT_GP" , |
200 | "RTGS_PRE_WAIT_GP" , |
201 | "RTGS_SCAN_TASKLIST" , |
202 | "RTGS_POST_SCAN_TASKLIST" , |
203 | "RTGS_WAIT_SCAN_HOLDOUTS" , |
204 | "RTGS_SCAN_HOLDOUTS" , |
205 | "RTGS_POST_GP" , |
206 | "RTGS_WAIT_READERS" , |
207 | "RTGS_INVOKE_CBS" , |
208 | "RTGS_WAIT_CBS" , |
209 | }; |
210 | #endif /* #ifndef CONFIG_TINY_RCU */ |
211 | |
212 | //////////////////////////////////////////////////////////////////////// |
213 | // |
214 | // Generic code. |
215 | |
216 | static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp); |
217 | |
218 | /* Record grace-period phase and time. */ |
219 | static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate) |
220 | { |
221 | rtp->gp_state = newstate; |
222 | rtp->gp_jiffies = jiffies; |
223 | } |
224 | |
225 | #ifndef CONFIG_TINY_RCU |
226 | /* Return state name. */ |
227 | static const char *tasks_gp_state_getname(struct rcu_tasks *rtp) |
228 | { |
229 | int i = data_race(rtp->gp_state); // Let KCSAN detect update races |
230 | int j = READ_ONCE(i); // Prevent the compiler from reading twice |
231 | |
232 | if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names)) |
233 | return "???" ; |
234 | return rcu_tasks_gp_state_names[j]; |
235 | } |
236 | #endif /* #ifndef CONFIG_TINY_RCU */ |
237 | |
238 | // Initialize per-CPU callback lists for the specified flavor of |
239 | // Tasks RCU. Do not enqueue callbacks before this function is invoked. |
240 | static void cblist_init_generic(struct rcu_tasks *rtp) |
241 | { |
242 | int cpu; |
243 | int lim; |
244 | int shift; |
245 | |
246 | if (rcu_task_enqueue_lim < 0) { |
247 | rcu_task_enqueue_lim = 1; |
248 | rcu_task_cb_adjust = true; |
249 | } else if (rcu_task_enqueue_lim == 0) { |
250 | rcu_task_enqueue_lim = 1; |
251 | } |
252 | lim = rcu_task_enqueue_lim; |
253 | |
254 | if (lim > nr_cpu_ids) |
255 | lim = nr_cpu_ids; |
256 | shift = ilog2(nr_cpu_ids / lim); |
257 | if (((nr_cpu_ids - 1) >> shift) >= lim) |
258 | shift++; |
259 | WRITE_ONCE(rtp->percpu_enqueue_shift, shift); |
260 | WRITE_ONCE(rtp->percpu_dequeue_lim, lim); |
261 | smp_store_release(&rtp->percpu_enqueue_lim, lim); |
262 | for_each_possible_cpu(cpu) { |
263 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
264 | |
265 | WARN_ON_ONCE(!rtpcp); |
266 | if (cpu) |
267 | raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock)); |
268 | if (rcu_segcblist_empty(rsclp: &rtpcp->cblist)) |
269 | rcu_segcblist_init(rsclp: &rtpcp->cblist); |
270 | INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); |
271 | rtpcp->cpu = cpu; |
272 | rtpcp->rtpp = rtp; |
273 | if (!rtpcp->rtp_blkd_tasks.next) |
274 | INIT_LIST_HEAD(list: &rtpcp->rtp_blkd_tasks); |
275 | if (!rtpcp->rtp_exit_list.next) |
276 | INIT_LIST_HEAD(list: &rtpcp->rtp_exit_list); |
277 | } |
278 | |
279 | pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n" , rtp->name, |
280 | data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust); |
281 | } |
282 | |
283 | // Compute wakeup time for lazy callback timer. |
284 | static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp) |
285 | { |
286 | return jiffies + rtp->lazy_jiffies; |
287 | } |
288 | |
289 | // Timer handler that unlazifies lazy callbacks. |
290 | static void call_rcu_tasks_generic_timer(struct timer_list *tlp) |
291 | { |
292 | unsigned long flags; |
293 | bool needwake = false; |
294 | struct rcu_tasks *rtp; |
295 | struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer); |
296 | |
297 | rtp = rtpcp->rtpp; |
298 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
299 | if (!rcu_segcblist_empty(rsclp: &rtpcp->cblist) && rtp->lazy_jiffies) { |
300 | if (!rtpcp->urgent_gp) |
301 | rtpcp->urgent_gp = 1; |
302 | needwake = true; |
303 | mod_timer(timer: &rtpcp->lazy_timer, expires: rcu_tasks_lazy_time(rtp)); |
304 | } |
305 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
306 | if (needwake) |
307 | rcuwait_wake_up(w: &rtp->cbs_wait); |
308 | } |
309 | |
310 | // IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic(). |
311 | static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp) |
312 | { |
313 | struct rcu_tasks *rtp; |
314 | struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); |
315 | |
316 | rtp = rtpcp->rtpp; |
317 | rcuwait_wake_up(w: &rtp->cbs_wait); |
318 | } |
319 | |
320 | // Enqueue a callback for the specified flavor of Tasks RCU. |
321 | static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func, |
322 | struct rcu_tasks *rtp) |
323 | { |
324 | int chosen_cpu; |
325 | unsigned long flags; |
326 | bool havekthread = smp_load_acquire(&rtp->kthread_ptr); |
327 | int ideal_cpu; |
328 | unsigned long j; |
329 | bool needadjust = false; |
330 | bool needwake; |
331 | struct rcu_tasks_percpu *rtpcp; |
332 | |
333 | rhp->next = NULL; |
334 | rhp->func = func; |
335 | local_irq_save(flags); |
336 | rcu_read_lock(); |
337 | ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift); |
338 | chosen_cpu = cpumask_next(n: ideal_cpu - 1, cpu_possible_mask); |
339 | rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu); |
340 | if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled. |
341 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled. |
342 | j = jiffies; |
343 | if (rtpcp->rtp_jiffies != j) { |
344 | rtpcp->rtp_jiffies = j; |
345 | rtpcp->rtp_n_lock_retries = 0; |
346 | } |
347 | if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim && |
348 | READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids) |
349 | needadjust = true; // Defer adjustment to avoid deadlock. |
350 | } |
351 | // Queuing callbacks before initialization not yet supported. |
352 | if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist))) |
353 | rcu_segcblist_init(rsclp: &rtpcp->cblist); |
354 | needwake = (func == wakeme_after_rcu) || |
355 | (rcu_segcblist_n_cbs(rsclp: &rtpcp->cblist) == rcu_task_lazy_lim); |
356 | if (havekthread && !needwake && !timer_pending(timer: &rtpcp->lazy_timer)) { |
357 | if (rtp->lazy_jiffies) |
358 | mod_timer(timer: &rtpcp->lazy_timer, expires: rcu_tasks_lazy_time(rtp)); |
359 | else |
360 | needwake = rcu_segcblist_empty(rsclp: &rtpcp->cblist); |
361 | } |
362 | if (needwake) |
363 | rtpcp->urgent_gp = 3; |
364 | rcu_segcblist_enqueue(rsclp: &rtpcp->cblist, rhp); |
365 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
366 | if (unlikely(needadjust)) { |
367 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
368 | if (rtp->percpu_enqueue_lim != nr_cpu_ids) { |
369 | WRITE_ONCE(rtp->percpu_enqueue_shift, 0); |
370 | WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids); |
371 | smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids); |
372 | pr_info("Switching %s to per-CPU callback queuing.\n" , rtp->name); |
373 | } |
374 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
375 | } |
376 | rcu_read_unlock(); |
377 | /* We can't create the thread unless interrupts are enabled. */ |
378 | if (needwake && READ_ONCE(rtp->kthread_ptr)) |
379 | irq_work_queue(work: &rtpcp->rtp_irq_work); |
380 | } |
381 | |
382 | // RCU callback function for rcu_barrier_tasks_generic(). |
383 | static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp) |
384 | { |
385 | struct rcu_tasks *rtp; |
386 | struct rcu_tasks_percpu *rtpcp; |
387 | |
388 | rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head); |
389 | rtp = rtpcp->rtpp; |
390 | if (atomic_dec_and_test(v: &rtp->barrier_q_count)) |
391 | complete(&rtp->barrier_q_completion); |
392 | } |
393 | |
394 | // Wait for all in-flight callbacks for the specified RCU Tasks flavor. |
395 | // Operates in a manner similar to rcu_barrier(). |
396 | static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp) |
397 | { |
398 | int cpu; |
399 | unsigned long flags; |
400 | struct rcu_tasks_percpu *rtpcp; |
401 | unsigned long s = rcu_seq_snap(sp: &rtp->barrier_q_seq); |
402 | |
403 | mutex_lock(&rtp->barrier_q_mutex); |
404 | if (rcu_seq_done(sp: &rtp->barrier_q_seq, s)) { |
405 | smp_mb(); |
406 | mutex_unlock(lock: &rtp->barrier_q_mutex); |
407 | return; |
408 | } |
409 | rcu_seq_start(sp: &rtp->barrier_q_seq); |
410 | init_completion(x: &rtp->barrier_q_completion); |
411 | atomic_set(v: &rtp->barrier_q_count, i: 2); |
412 | for_each_possible_cpu(cpu) { |
413 | if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim)) |
414 | break; |
415 | rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
416 | rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb; |
417 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
418 | if (rcu_segcblist_entrain(rsclp: &rtpcp->cblist, rhp: &rtpcp->barrier_q_head)) |
419 | atomic_inc(v: &rtp->barrier_q_count); |
420 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
421 | } |
422 | if (atomic_sub_and_test(i: 2, v: &rtp->barrier_q_count)) |
423 | complete(&rtp->barrier_q_completion); |
424 | wait_for_completion(&rtp->barrier_q_completion); |
425 | rcu_seq_end(sp: &rtp->barrier_q_seq); |
426 | mutex_unlock(lock: &rtp->barrier_q_mutex); |
427 | } |
428 | |
429 | // Advance callbacks and indicate whether either a grace period or |
430 | // callback invocation is needed. |
431 | static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp) |
432 | { |
433 | int cpu; |
434 | int dequeue_limit; |
435 | unsigned long flags; |
436 | bool gpdone = poll_state_synchronize_rcu(oldstate: rtp->percpu_dequeue_gpseq); |
437 | long n; |
438 | long ncbs = 0; |
439 | long ncbsnz = 0; |
440 | int needgpcb = 0; |
441 | |
442 | dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim); |
443 | for (cpu = 0; cpu < dequeue_limit; cpu++) { |
444 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
445 | |
446 | /* Advance and accelerate any new callbacks. */ |
447 | if (!rcu_segcblist_n_cbs(rsclp: &rtpcp->cblist)) |
448 | continue; |
449 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
450 | // Should we shrink down to a single callback queue? |
451 | n = rcu_segcblist_n_cbs(rsclp: &rtpcp->cblist); |
452 | if (n) { |
453 | ncbs += n; |
454 | if (cpu > 0) |
455 | ncbsnz += n; |
456 | } |
457 | rcu_segcblist_advance(rsclp: &rtpcp->cblist, seq: rcu_seq_current(sp: &rtp->tasks_gp_seq)); |
458 | (void)rcu_segcblist_accelerate(rsclp: &rtpcp->cblist, seq: rcu_seq_snap(sp: &rtp->tasks_gp_seq)); |
459 | if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(rsclp: &rtpcp->cblist)) { |
460 | if (rtp->lazy_jiffies) |
461 | rtpcp->urgent_gp--; |
462 | needgpcb |= 0x3; |
463 | } else if (rcu_segcblist_empty(rsclp: &rtpcp->cblist)) { |
464 | rtpcp->urgent_gp = 0; |
465 | } |
466 | if (rcu_segcblist_ready_cbs(rsclp: &rtpcp->cblist)) |
467 | needgpcb |= 0x1; |
468 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
469 | } |
470 | |
471 | // Shrink down to a single callback queue if appropriate. |
472 | // This is done in two stages: (1) If there are no more than |
473 | // rcu_task_collapse_lim callbacks on CPU 0 and none on any other |
474 | // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period, |
475 | // if there has not been an increase in callbacks, limit dequeuing |
476 | // to CPU 0. Note the matching RCU read-side critical section in |
477 | // call_rcu_tasks_generic(). |
478 | if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) { |
479 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
480 | if (rtp->percpu_enqueue_lim > 1) { |
481 | WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids)); |
482 | smp_store_release(&rtp->percpu_enqueue_lim, 1); |
483 | rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu(); |
484 | gpdone = false; |
485 | pr_info("Starting switch %s to CPU-0 callback queuing.\n" , rtp->name); |
486 | } |
487 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
488 | } |
489 | if (rcu_task_cb_adjust && !ncbsnz && gpdone) { |
490 | raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags); |
491 | if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) { |
492 | WRITE_ONCE(rtp->percpu_dequeue_lim, 1); |
493 | pr_info("Completing switch %s to CPU-0 callback queuing.\n" , rtp->name); |
494 | } |
495 | if (rtp->percpu_dequeue_lim == 1) { |
496 | for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) { |
497 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
498 | |
499 | WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist)); |
500 | } |
501 | } |
502 | raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); |
503 | } |
504 | |
505 | return needgpcb; |
506 | } |
507 | |
508 | // Advance callbacks and invoke any that are ready. |
509 | static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp) |
510 | { |
511 | int cpu; |
512 | int cpunext; |
513 | int cpuwq; |
514 | unsigned long flags; |
515 | int len; |
516 | struct rcu_head *rhp; |
517 | struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl); |
518 | struct rcu_tasks_percpu *rtpcp_next; |
519 | |
520 | cpu = rtpcp->cpu; |
521 | cpunext = cpu * 2 + 1; |
522 | if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { |
523 | rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); |
524 | cpuwq = rcu_cpu_beenfullyonline(cpu: cpunext) ? cpunext : WORK_CPU_UNBOUND; |
525 | queue_work_on(cpu: cpuwq, wq: system_wq, work: &rtpcp_next->rtp_work); |
526 | cpunext++; |
527 | if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) { |
528 | rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext); |
529 | cpuwq = rcu_cpu_beenfullyonline(cpu: cpunext) ? cpunext : WORK_CPU_UNBOUND; |
530 | queue_work_on(cpu: cpuwq, wq: system_wq, work: &rtpcp_next->rtp_work); |
531 | } |
532 | } |
533 | |
534 | if (rcu_segcblist_empty(rsclp: &rtpcp->cblist) || !cpu_possible(cpu)) |
535 | return; |
536 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
537 | rcu_segcblist_advance(rsclp: &rtpcp->cblist, seq: rcu_seq_current(sp: &rtp->tasks_gp_seq)); |
538 | rcu_segcblist_extract_done_cbs(rsclp: &rtpcp->cblist, rclp: &rcl); |
539 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
540 | len = rcl.len; |
541 | for (rhp = rcu_cblist_dequeue(rclp: &rcl); rhp; rhp = rcu_cblist_dequeue(rclp: &rcl)) { |
542 | debug_rcu_head_callback(rhp); |
543 | local_bh_disable(); |
544 | rhp->func(rhp); |
545 | local_bh_enable(); |
546 | cond_resched(); |
547 | } |
548 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
549 | rcu_segcblist_add_len(rsclp: &rtpcp->cblist, v: -len); |
550 | (void)rcu_segcblist_accelerate(rsclp: &rtpcp->cblist, seq: rcu_seq_snap(sp: &rtp->tasks_gp_seq)); |
551 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
552 | } |
553 | |
554 | // Workqueue flood to advance callbacks and invoke any that are ready. |
555 | static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp) |
556 | { |
557 | struct rcu_tasks *rtp; |
558 | struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work); |
559 | |
560 | rtp = rtpcp->rtpp; |
561 | rcu_tasks_invoke_cbs(rtp, rtpcp); |
562 | } |
563 | |
564 | // Wait for one grace period. |
565 | static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot) |
566 | { |
567 | int needgpcb; |
568 | |
569 | mutex_lock(&rtp->tasks_gp_mutex); |
570 | |
571 | // If there were none, wait a bit and start over. |
572 | if (unlikely(midboot)) { |
573 | needgpcb = 0x2; |
574 | } else { |
575 | mutex_unlock(lock: &rtp->tasks_gp_mutex); |
576 | set_tasks_gp_state(rtp, RTGS_WAIT_CBS); |
577 | rcuwait_wait_event(&rtp->cbs_wait, |
578 | (needgpcb = rcu_tasks_need_gpcb(rtp)), |
579 | TASK_IDLE); |
580 | mutex_lock(&rtp->tasks_gp_mutex); |
581 | } |
582 | |
583 | if (needgpcb & 0x2) { |
584 | // Wait for one grace period. |
585 | set_tasks_gp_state(rtp, RTGS_WAIT_GP); |
586 | rtp->gp_start = jiffies; |
587 | rcu_seq_start(sp: &rtp->tasks_gp_seq); |
588 | rtp->gp_func(rtp); |
589 | rcu_seq_end(sp: &rtp->tasks_gp_seq); |
590 | } |
591 | |
592 | // Invoke callbacks. |
593 | set_tasks_gp_state(rtp, RTGS_INVOKE_CBS); |
594 | rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0)); |
595 | mutex_unlock(lock: &rtp->tasks_gp_mutex); |
596 | } |
597 | |
598 | // RCU-tasks kthread that detects grace periods and invokes callbacks. |
599 | static int __noreturn rcu_tasks_kthread(void *arg) |
600 | { |
601 | int cpu; |
602 | struct rcu_tasks *rtp = arg; |
603 | |
604 | for_each_possible_cpu(cpu) { |
605 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
606 | |
607 | timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0); |
608 | rtpcp->urgent_gp = 1; |
609 | } |
610 | |
611 | /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ |
612 | housekeeping_affine(current, type: HK_TYPE_RCU); |
613 | smp_store_release(&rtp->kthread_ptr, current); // Let GPs start! |
614 | |
615 | /* |
616 | * Each pass through the following loop makes one check for |
617 | * newly arrived callbacks, and, if there are some, waits for |
618 | * one RCU-tasks grace period and then invokes the callbacks. |
619 | * This loop is terminated by the system going down. ;-) |
620 | */ |
621 | for (;;) { |
622 | // Wait for one grace period and invoke any callbacks |
623 | // that are ready. |
624 | rcu_tasks_one_gp(rtp, midboot: false); |
625 | |
626 | // Paranoid sleep to keep this from entering a tight loop. |
627 | schedule_timeout_idle(timeout: rtp->gp_sleep); |
628 | } |
629 | } |
630 | |
631 | // Wait for a grace period for the specified flavor of Tasks RCU. |
632 | static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) |
633 | { |
634 | /* Complain if the scheduler has not started. */ |
635 | if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, |
636 | "synchronize_%s() called too soon" , rtp->name)) |
637 | return; |
638 | |
639 | // If the grace-period kthread is running, use it. |
640 | if (READ_ONCE(rtp->kthread_ptr)) { |
641 | wait_rcu_gp(rtp->call_func); |
642 | return; |
643 | } |
644 | rcu_tasks_one_gp(rtp, midboot: true); |
645 | } |
646 | |
647 | /* Spawn RCU-tasks grace-period kthread. */ |
648 | static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp) |
649 | { |
650 | struct task_struct *t; |
651 | |
652 | t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread" , rtp->kname); |
653 | if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n" , __func__, rtp->name)) |
654 | return; |
655 | smp_mb(); /* Ensure others see full kthread. */ |
656 | } |
657 | |
658 | #ifndef CONFIG_TINY_RCU |
659 | |
660 | /* |
661 | * Print any non-default Tasks RCU settings. |
662 | */ |
663 | static void __init rcu_tasks_bootup_oddness(void) |
664 | { |
665 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
666 | int rtsimc; |
667 | |
668 | if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) |
669 | pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n" , rcu_task_stall_timeout); |
670 | rtsimc = clamp(rcu_task_stall_info_mult, 1, 10); |
671 | if (rtsimc != rcu_task_stall_info_mult) { |
672 | pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n" , rtsimc); |
673 | rcu_task_stall_info_mult = rtsimc; |
674 | } |
675 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
676 | #ifdef CONFIG_TASKS_RCU |
677 | pr_info("\tTrampoline variant of Tasks RCU enabled.\n" ); |
678 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
679 | #ifdef CONFIG_TASKS_RUDE_RCU |
680 | pr_info("\tRude variant of Tasks RCU enabled.\n" ); |
681 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ |
682 | #ifdef CONFIG_TASKS_TRACE_RCU |
683 | pr_info("\tTracing variant of Tasks RCU enabled.\n" ); |
684 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
685 | } |
686 | |
687 | #endif /* #ifndef CONFIG_TINY_RCU */ |
688 | |
689 | #ifndef CONFIG_TINY_RCU |
690 | /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */ |
691 | static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s) |
692 | { |
693 | int cpu; |
694 | bool havecbs = false; |
695 | bool haveurgent = false; |
696 | bool haveurgentcbs = false; |
697 | |
698 | for_each_possible_cpu(cpu) { |
699 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu); |
700 | |
701 | if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) |
702 | havecbs = true; |
703 | if (data_race(rtpcp->urgent_gp)) |
704 | haveurgent = true; |
705 | if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp)) |
706 | haveurgentcbs = true; |
707 | if (havecbs && haveurgent && haveurgentcbs) |
708 | break; |
709 | } |
710 | pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n" , |
711 | rtp->kname, |
712 | tasks_gp_state_getname(rtp), data_race(rtp->gp_state), |
713 | jiffies - data_race(rtp->gp_jiffies), |
714 | data_race(rcu_seq_current(&rtp->tasks_gp_seq)), |
715 | data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis), |
716 | ".k" [!!data_race(rtp->kthread_ptr)], |
717 | ".C" [havecbs], |
718 | ".u" [haveurgent], |
719 | ".U" [haveurgentcbs], |
720 | rtp->lazy_jiffies, |
721 | s); |
722 | } |
723 | #endif // #ifndef CONFIG_TINY_RCU |
724 | |
725 | static void exit_tasks_rcu_finish_trace(struct task_struct *t); |
726 | |
727 | #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) |
728 | |
729 | //////////////////////////////////////////////////////////////////////// |
730 | // |
731 | // Shared code between task-list-scanning variants of Tasks RCU. |
732 | |
733 | /* Wait for one RCU-tasks grace period. */ |
734 | static void rcu_tasks_wait_gp(struct rcu_tasks *rtp) |
735 | { |
736 | struct task_struct *g; |
737 | int fract; |
738 | LIST_HEAD(holdouts); |
739 | unsigned long j; |
740 | unsigned long lastinfo; |
741 | unsigned long lastreport; |
742 | bool reported = false; |
743 | int rtsi; |
744 | struct task_struct *t; |
745 | |
746 | set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP); |
747 | rtp->pregp_func(&holdouts); |
748 | |
749 | /* |
750 | * There were callbacks, so we need to wait for an RCU-tasks |
751 | * grace period. Start off by scanning the task list for tasks |
752 | * that are not already voluntarily blocked. Mark these tasks |
753 | * and make a list of them in holdouts. |
754 | */ |
755 | set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST); |
756 | if (rtp->pertask_func) { |
757 | rcu_read_lock(); |
758 | for_each_process_thread(g, t) |
759 | rtp->pertask_func(t, &holdouts); |
760 | rcu_read_unlock(); |
761 | } |
762 | |
763 | set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST); |
764 | rtp->postscan_func(&holdouts); |
765 | |
766 | /* |
767 | * Each pass through the following loop scans the list of holdout |
768 | * tasks, removing any that are no longer holdouts. When the list |
769 | * is empty, we are done. |
770 | */ |
771 | lastreport = jiffies; |
772 | lastinfo = lastreport; |
773 | rtsi = READ_ONCE(rcu_task_stall_info); |
774 | |
775 | // Start off with initial wait and slowly back off to 1 HZ wait. |
776 | fract = rtp->init_fract; |
777 | |
778 | while (!list_empty(head: &holdouts)) { |
779 | ktime_t exp; |
780 | bool firstreport; |
781 | bool needreport; |
782 | int rtst; |
783 | |
784 | // Slowly back off waiting for holdouts |
785 | set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); |
786 | if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { |
787 | schedule_timeout_idle(timeout: fract); |
788 | } else { |
789 | exp = jiffies_to_nsecs(j: fract); |
790 | __set_current_state(TASK_IDLE); |
791 | schedule_hrtimeout_range(expires: &exp, delta: jiffies_to_nsecs(HZ / 2), mode: HRTIMER_MODE_REL_HARD); |
792 | } |
793 | |
794 | if (fract < HZ) |
795 | fract++; |
796 | |
797 | rtst = READ_ONCE(rcu_task_stall_timeout); |
798 | needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); |
799 | if (needreport) { |
800 | lastreport = jiffies; |
801 | reported = true; |
802 | } |
803 | firstreport = true; |
804 | WARN_ON(signal_pending(current)); |
805 | set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS); |
806 | rtp->holdouts_func(&holdouts, needreport, &firstreport); |
807 | |
808 | // Print pre-stall informational messages if needed. |
809 | j = jiffies; |
810 | if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) { |
811 | lastinfo = j; |
812 | rtsi = rtsi * rcu_task_stall_info_mult; |
813 | pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n" , |
814 | __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start); |
815 | } |
816 | } |
817 | |
818 | set_tasks_gp_state(rtp, RTGS_POST_GP); |
819 | rtp->postgp_func(rtp); |
820 | } |
821 | |
822 | #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */ |
823 | |
824 | #ifdef CONFIG_TASKS_RCU |
825 | |
826 | //////////////////////////////////////////////////////////////////////// |
827 | // |
828 | // Simple variant of RCU whose quiescent states are voluntary context |
829 | // switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle. |
830 | // As such, grace periods can take one good long time. There are no |
831 | // read-side primitives similar to rcu_read_lock() and rcu_read_unlock() |
832 | // because this implementation is intended to get the system into a safe |
833 | // state for some of the manipulations involved in tracing and the like. |
834 | // Finally, this implementation does not support high call_rcu_tasks() |
835 | // rates from multiple CPUs. If this is required, per-CPU callback lists |
836 | // will be needed. |
837 | // |
838 | // The implementation uses rcu_tasks_wait_gp(), which relies on function |
839 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread() |
840 | // function sets these function pointers up so that rcu_tasks_wait_gp() |
841 | // invokes these functions in this order: |
842 | // |
843 | // rcu_tasks_pregp_step(): |
844 | // Invokes synchronize_rcu() in order to wait for all in-flight |
845 | // t->on_rq and t->nvcsw transitions to complete. This works because |
846 | // all such transitions are carried out with interrupts disabled. |
847 | // rcu_tasks_pertask(), invoked on every non-idle task: |
848 | // For every runnable non-idle task other than the current one, use |
849 | // get_task_struct() to pin down that task, snapshot that task's |
850 | // number of voluntary context switches, and add that task to the |
851 | // holdout list. |
852 | // rcu_tasks_postscan(): |
853 | // Gather per-CPU lists of tasks in do_exit() to ensure that all |
854 | // tasks that were in the process of exiting (and which thus might |
855 | // not know to synchronize with this RCU Tasks grace period) have |
856 | // completed exiting. The synchronize_rcu() in rcu_tasks_postgp() |
857 | // will take care of any tasks stuck in the non-preemptible region |
858 | // of do_exit() following its call to exit_tasks_rcu_stop(). |
859 | // check_all_holdout_tasks(), repeatedly until holdout list is empty: |
860 | // Scans the holdout list, attempting to identify a quiescent state |
861 | // for each task on the list. If there is a quiescent state, the |
862 | // corresponding task is removed from the holdout list. |
863 | // rcu_tasks_postgp(): |
864 | // Invokes synchronize_rcu() in order to ensure that all prior |
865 | // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks |
866 | // to have happened before the end of this RCU Tasks grace period. |
867 | // Again, this works because all such transitions are carried out |
868 | // with interrupts disabled. |
869 | // |
870 | // For each exiting task, the exit_tasks_rcu_start() and |
871 | // exit_tasks_rcu_finish() functions add and remove, respectively, the |
872 | // current task to a per-CPU list of tasks that rcu_tasks_postscan() must |
873 | // wait on. This is necessary because rcu_tasks_postscan() must wait on |
874 | // tasks that have already been removed from the global list of tasks. |
875 | // |
876 | // Pre-grace-period update-side code is ordered before the grace |
877 | // via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code |
878 | // is ordered before the grace period via synchronize_rcu() call in |
879 | // rcu_tasks_pregp_step() and by the scheduler's locks and interrupt |
880 | // disabling. |
881 | |
882 | /* Pre-grace-period preparation. */ |
883 | static void rcu_tasks_pregp_step(struct list_head *hop) |
884 | { |
885 | /* |
886 | * Wait for all pre-existing t->on_rq and t->nvcsw transitions |
887 | * to complete. Invoking synchronize_rcu() suffices because all |
888 | * these transitions occur with interrupts disabled. Without this |
889 | * synchronize_rcu(), a read-side critical section that started |
890 | * before the grace period might be incorrectly seen as having |
891 | * started after the grace period. |
892 | * |
893 | * This synchronize_rcu() also dispenses with the need for a |
894 | * memory barrier on the first store to t->rcu_tasks_holdout, |
895 | * as it forces the store to happen after the beginning of the |
896 | * grace period. |
897 | */ |
898 | synchronize_rcu(); |
899 | } |
900 | |
901 | /* Check for quiescent states since the pregp's synchronize_rcu() */ |
902 | static bool rcu_tasks_is_holdout(struct task_struct *t) |
903 | { |
904 | int cpu; |
905 | |
906 | /* Has the task been seen voluntarily sleeping? */ |
907 | if (!READ_ONCE(t->on_rq)) |
908 | return false; |
909 | |
910 | /* |
911 | * Idle tasks (or idle injection) within the idle loop are RCU-tasks |
912 | * quiescent states. But CPU boot code performed by the idle task |
913 | * isn't a quiescent state. |
914 | */ |
915 | if (is_idle_task(p: t)) |
916 | return false; |
917 | |
918 | cpu = task_cpu(p: t); |
919 | |
920 | /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */ |
921 | if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) |
922 | return false; |
923 | |
924 | return true; |
925 | } |
926 | |
927 | /* Per-task initial processing. */ |
928 | static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) |
929 | { |
930 | if (t != current && rcu_tasks_is_holdout(t)) { |
931 | get_task_struct(t); |
932 | t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); |
933 | WRITE_ONCE(t->rcu_tasks_holdout, true); |
934 | list_add(new: &t->rcu_tasks_holdout_list, head: hop); |
935 | } |
936 | } |
937 | |
938 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func); |
939 | DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks" ); |
940 | |
941 | /* Processing between scanning taskslist and draining the holdout list. */ |
942 | static void rcu_tasks_postscan(struct list_head *hop) |
943 | { |
944 | int cpu; |
945 | int rtsi = READ_ONCE(rcu_task_stall_info); |
946 | |
947 | if (!IS_ENABLED(CONFIG_TINY_RCU)) { |
948 | tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; |
949 | add_timer(timer: &tasks_rcu_exit_srcu_stall_timer); |
950 | } |
951 | |
952 | /* |
953 | * Exiting tasks may escape the tasklist scan. Those are vulnerable |
954 | * until their final schedule() with TASK_DEAD state. To cope with |
955 | * this, divide the fragile exit path part in two intersecting |
956 | * read side critical sections: |
957 | * |
958 | * 1) A task_struct list addition before calling exit_notify(), |
959 | * which may remove the task from the tasklist, with the |
960 | * removal after the final preempt_disable() call in do_exit(). |
961 | * |
962 | * 2) An _RCU_ read side starting with the final preempt_disable() |
963 | * call in do_exit() and ending with the final call to schedule() |
964 | * with TASK_DEAD state. |
965 | * |
966 | * This handles the part 1). And postgp will handle part 2) with a |
967 | * call to synchronize_rcu(). |
968 | */ |
969 | |
970 | for_each_possible_cpu(cpu) { |
971 | unsigned long j = jiffies + 1; |
972 | struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu); |
973 | struct task_struct *t; |
974 | struct task_struct *t1; |
975 | struct list_head tmp; |
976 | |
977 | raw_spin_lock_irq_rcu_node(rtpcp); |
978 | list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) { |
979 | if (list_empty(head: &t->rcu_tasks_holdout_list)) |
980 | rcu_tasks_pertask(t, hop); |
981 | |
982 | // RT kernels need frequent pauses, otherwise |
983 | // pause at least once per pair of jiffies. |
984 | if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j)) |
985 | continue; |
986 | |
987 | // Keep our place in the list while pausing. |
988 | // Nothing else traverses this list, so adding a |
989 | // bare list_head is OK. |
990 | list_add(new: &tmp, head: &t->rcu_tasks_exit_list); |
991 | raw_spin_unlock_irq_rcu_node(rtpcp); |
992 | cond_resched(); // For CONFIG_PREEMPT=n kernels |
993 | raw_spin_lock_irq_rcu_node(rtpcp); |
994 | t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list); |
995 | list_del(entry: &tmp); |
996 | j = jiffies + 1; |
997 | } |
998 | raw_spin_unlock_irq_rcu_node(rtpcp); |
999 | } |
1000 | |
1001 | if (!IS_ENABLED(CONFIG_TINY_RCU)) |
1002 | del_timer_sync(timer: &tasks_rcu_exit_srcu_stall_timer); |
1003 | } |
1004 | |
1005 | /* See if tasks are still holding out, complain if so. */ |
1006 | static void check_holdout_task(struct task_struct *t, |
1007 | bool needreport, bool *firstreport) |
1008 | { |
1009 | int cpu; |
1010 | |
1011 | if (!READ_ONCE(t->rcu_tasks_holdout) || |
1012 | t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || |
1013 | !rcu_tasks_is_holdout(t) || |
1014 | (IS_ENABLED(CONFIG_NO_HZ_FULL) && |
1015 | !is_idle_task(p: t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) { |
1016 | WRITE_ONCE(t->rcu_tasks_holdout, false); |
1017 | list_del_init(entry: &t->rcu_tasks_holdout_list); |
1018 | put_task_struct(t); |
1019 | return; |
1020 | } |
1021 | rcu_request_urgent_qs_task(t); |
1022 | if (!needreport) |
1023 | return; |
1024 | if (*firstreport) { |
1025 | pr_err("INFO: rcu_tasks detected stalls on tasks:\n" ); |
1026 | *firstreport = false; |
1027 | } |
1028 | cpu = task_cpu(p: t); |
1029 | pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n" , |
1030 | t, ".I" [is_idle_task(t)], |
1031 | "N." [cpu < 0 || !tick_nohz_full_cpu(cpu)], |
1032 | t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, |
1033 | data_race(t->rcu_tasks_idle_cpu), cpu); |
1034 | sched_show_task(p: t); |
1035 | } |
1036 | |
1037 | /* Scan the holdout lists for tasks no longer holding out. */ |
1038 | static void check_all_holdout_tasks(struct list_head *hop, |
1039 | bool needreport, bool *firstreport) |
1040 | { |
1041 | struct task_struct *t, *t1; |
1042 | |
1043 | list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { |
1044 | check_holdout_task(t, needreport, firstreport); |
1045 | cond_resched(); |
1046 | } |
1047 | } |
1048 | |
1049 | /* Finish off the Tasks-RCU grace period. */ |
1050 | static void rcu_tasks_postgp(struct rcu_tasks *rtp) |
1051 | { |
1052 | /* |
1053 | * Because ->on_rq and ->nvcsw are not guaranteed to have a full |
1054 | * memory barriers prior to them in the schedule() path, memory |
1055 | * reordering on other CPUs could cause their RCU-tasks read-side |
1056 | * critical sections to extend past the end of the grace period. |
1057 | * However, because these ->nvcsw updates are carried out with |
1058 | * interrupts disabled, we can use synchronize_rcu() to force the |
1059 | * needed ordering on all such CPUs. |
1060 | * |
1061 | * This synchronize_rcu() also confines all ->rcu_tasks_holdout |
1062 | * accesses to be within the grace period, avoiding the need for |
1063 | * memory barriers for ->rcu_tasks_holdout accesses. |
1064 | * |
1065 | * In addition, this synchronize_rcu() waits for exiting tasks |
1066 | * to complete their final preempt_disable() region of execution, |
1067 | * enforcing the whole region before tasklist removal until |
1068 | * the final schedule() with TASK_DEAD state to be an RCU TASKS |
1069 | * read side critical section. |
1070 | */ |
1071 | synchronize_rcu(); |
1072 | } |
1073 | |
1074 | static void tasks_rcu_exit_srcu_stall(struct timer_list *unused) |
1075 | { |
1076 | #ifndef CONFIG_TINY_RCU |
1077 | int rtsi; |
1078 | |
1079 | rtsi = READ_ONCE(rcu_task_stall_info); |
1080 | pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n" , |
1081 | __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq, |
1082 | tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies); |
1083 | pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n" ); |
1084 | tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi; |
1085 | add_timer(timer: &tasks_rcu_exit_srcu_stall_timer); |
1086 | #endif // #ifndef CONFIG_TINY_RCU |
1087 | } |
1088 | |
1089 | /** |
1090 | * call_rcu_tasks() - Queue an RCU for invocation task-based grace period |
1091 | * @rhp: structure to be used for queueing the RCU updates. |
1092 | * @func: actual callback function to be invoked after the grace period |
1093 | * |
1094 | * The callback function will be invoked some time after a full grace |
1095 | * period elapses, in other words after all currently executing RCU |
1096 | * read-side critical sections have completed. call_rcu_tasks() assumes |
1097 | * that the read-side critical sections end at a voluntary context |
1098 | * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle, |
1099 | * or transition to usermode execution. As such, there are no read-side |
1100 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because |
1101 | * this primitive is intended to determine that all tasks have passed |
1102 | * through a safe state, not so much for data-structure synchronization. |
1103 | * |
1104 | * See the description of call_rcu() for more detailed information on |
1105 | * memory ordering guarantees. |
1106 | */ |
1107 | void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) |
1108 | { |
1109 | call_rcu_tasks_generic(rhp, func, rtp: &rcu_tasks); |
1110 | } |
1111 | EXPORT_SYMBOL_GPL(call_rcu_tasks); |
1112 | |
1113 | /** |
1114 | * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. |
1115 | * |
1116 | * Control will return to the caller some time after a full rcu-tasks |
1117 | * grace period has elapsed, in other words after all currently |
1118 | * executing rcu-tasks read-side critical sections have elapsed. These |
1119 | * read-side critical sections are delimited by calls to schedule(), |
1120 | * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls |
1121 | * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). |
1122 | * |
1123 | * This is a very specialized primitive, intended only for a few uses in |
1124 | * tracing and other situations requiring manipulation of function |
1125 | * preambles and profiling hooks. The synchronize_rcu_tasks() function |
1126 | * is not (yet) intended for heavy use from multiple CPUs. |
1127 | * |
1128 | * See the description of synchronize_rcu() for more detailed information |
1129 | * on memory ordering guarantees. |
1130 | */ |
1131 | void synchronize_rcu_tasks(void) |
1132 | { |
1133 | synchronize_rcu_tasks_generic(rtp: &rcu_tasks); |
1134 | } |
1135 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); |
1136 | |
1137 | /** |
1138 | * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. |
1139 | * |
1140 | * Although the current implementation is guaranteed to wait, it is not |
1141 | * obligated to, for example, if there are no pending callbacks. |
1142 | */ |
1143 | void rcu_barrier_tasks(void) |
1144 | { |
1145 | rcu_barrier_tasks_generic(rtp: &rcu_tasks); |
1146 | } |
1147 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks); |
1148 | |
1149 | static int rcu_tasks_lazy_ms = -1; |
1150 | module_param(rcu_tasks_lazy_ms, int, 0444); |
1151 | |
1152 | static int __init rcu_spawn_tasks_kthread(void) |
1153 | { |
1154 | rcu_tasks.gp_sleep = HZ / 10; |
1155 | rcu_tasks.init_fract = HZ / 10; |
1156 | if (rcu_tasks_lazy_ms >= 0) |
1157 | rcu_tasks.lazy_jiffies = msecs_to_jiffies(m: rcu_tasks_lazy_ms); |
1158 | rcu_tasks.pregp_func = rcu_tasks_pregp_step; |
1159 | rcu_tasks.pertask_func = rcu_tasks_pertask; |
1160 | rcu_tasks.postscan_func = rcu_tasks_postscan; |
1161 | rcu_tasks.holdouts_func = check_all_holdout_tasks; |
1162 | rcu_tasks.postgp_func = rcu_tasks_postgp; |
1163 | rcu_spawn_tasks_kthread_generic(rtp: &rcu_tasks); |
1164 | return 0; |
1165 | } |
1166 | |
1167 | #if !defined(CONFIG_TINY_RCU) |
1168 | void show_rcu_tasks_classic_gp_kthread(void) |
1169 | { |
1170 | show_rcu_tasks_generic_gp_kthread(rtp: &rcu_tasks, s: "" ); |
1171 | } |
1172 | EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread); |
1173 | #endif // !defined(CONFIG_TINY_RCU) |
1174 | |
1175 | struct task_struct *get_rcu_tasks_gp_kthread(void) |
1176 | { |
1177 | return rcu_tasks.kthread_ptr; |
1178 | } |
1179 | EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread); |
1180 | |
1181 | /* |
1182 | * Protect against tasklist scan blind spot while the task is exiting and |
1183 | * may be removed from the tasklist. Do this by adding the task to yet |
1184 | * another list. |
1185 | * |
1186 | * Note that the task will remove itself from this list, so there is no |
1187 | * need for get_task_struct(), except in the case where rcu_tasks_pertask() |
1188 | * adds it to the holdout list, in which case rcu_tasks_pertask() supplies |
1189 | * the needed get_task_struct(). |
1190 | */ |
1191 | void exit_tasks_rcu_start(void) |
1192 | { |
1193 | unsigned long flags; |
1194 | struct rcu_tasks_percpu *rtpcp; |
1195 | struct task_struct *t = current; |
1196 | |
1197 | WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list)); |
1198 | preempt_disable(); |
1199 | rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu); |
1200 | t->rcu_tasks_exit_cpu = smp_processor_id(); |
1201 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
1202 | if (!rtpcp->rtp_exit_list.next) |
1203 | INIT_LIST_HEAD(list: &rtpcp->rtp_exit_list); |
1204 | list_add(new: &t->rcu_tasks_exit_list, head: &rtpcp->rtp_exit_list); |
1205 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1206 | preempt_enable(); |
1207 | } |
1208 | |
1209 | /* |
1210 | * Remove the task from the "yet another list" because do_exit() is now |
1211 | * non-preemptible, allowing synchronize_rcu() to wait beyond this point. |
1212 | */ |
1213 | void exit_tasks_rcu_stop(void) |
1214 | { |
1215 | unsigned long flags; |
1216 | struct rcu_tasks_percpu *rtpcp; |
1217 | struct task_struct *t = current; |
1218 | |
1219 | WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list)); |
1220 | rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu); |
1221 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
1222 | list_del_init(entry: &t->rcu_tasks_exit_list); |
1223 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1224 | } |
1225 | |
1226 | /* |
1227 | * Contribute to protect against tasklist scan blind spot while the |
1228 | * task is exiting and may be removed from the tasklist. See |
1229 | * corresponding synchronize_srcu() for further details. |
1230 | */ |
1231 | void exit_tasks_rcu_finish(void) |
1232 | { |
1233 | exit_tasks_rcu_stop(); |
1234 | exit_tasks_rcu_finish_trace(current); |
1235 | } |
1236 | |
1237 | #else /* #ifdef CONFIG_TASKS_RCU */ |
1238 | void exit_tasks_rcu_start(void) { } |
1239 | void exit_tasks_rcu_stop(void) { } |
1240 | void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } |
1241 | #endif /* #else #ifdef CONFIG_TASKS_RCU */ |
1242 | |
1243 | #ifdef CONFIG_TASKS_RUDE_RCU |
1244 | |
1245 | //////////////////////////////////////////////////////////////////////// |
1246 | // |
1247 | // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of |
1248 | // passing an empty function to schedule_on_each_cpu(). This approach |
1249 | // provides an asynchronous call_rcu_tasks_rude() API and batching of |
1250 | // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API. |
1251 | // This invokes schedule_on_each_cpu() in order to send IPIs far and wide |
1252 | // and induces otherwise unnecessary context switches on all online CPUs, |
1253 | // whether idle or not. |
1254 | // |
1255 | // Callback handling is provided by the rcu_tasks_kthread() function. |
1256 | // |
1257 | // Ordering is provided by the scheduler's context-switch code. |
1258 | |
1259 | // Empty function to allow workqueues to force a context switch. |
1260 | static void rcu_tasks_be_rude(struct work_struct *work) |
1261 | { |
1262 | } |
1263 | |
1264 | // Wait for one rude RCU-tasks grace period. |
1265 | static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp) |
1266 | { |
1267 | rtp->n_ipis += cpumask_weight(cpu_online_mask); |
1268 | schedule_on_each_cpu(func: rcu_tasks_be_rude); |
1269 | } |
1270 | |
1271 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func); |
1272 | DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude, |
1273 | "RCU Tasks Rude" ); |
1274 | |
1275 | /** |
1276 | * call_rcu_tasks_rude() - Queue a callback rude task-based grace period |
1277 | * @rhp: structure to be used for queueing the RCU updates. |
1278 | * @func: actual callback function to be invoked after the grace period |
1279 | * |
1280 | * The callback function will be invoked some time after a full grace |
1281 | * period elapses, in other words after all currently executing RCU |
1282 | * read-side critical sections have completed. call_rcu_tasks_rude() |
1283 | * assumes that the read-side critical sections end at context switch, |
1284 | * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as |
1285 | * usermode execution is schedulable). As such, there are no read-side |
1286 | * primitives analogous to rcu_read_lock() and rcu_read_unlock() because |
1287 | * this primitive is intended to determine that all tasks have passed |
1288 | * through a safe state, not so much for data-structure synchronization. |
1289 | * |
1290 | * See the description of call_rcu() for more detailed information on |
1291 | * memory ordering guarantees. |
1292 | */ |
1293 | void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func) |
1294 | { |
1295 | call_rcu_tasks_generic(rhp, func, rtp: &rcu_tasks_rude); |
1296 | } |
1297 | EXPORT_SYMBOL_GPL(call_rcu_tasks_rude); |
1298 | |
1299 | /** |
1300 | * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period |
1301 | * |
1302 | * Control will return to the caller some time after a rude rcu-tasks |
1303 | * grace period has elapsed, in other words after all currently |
1304 | * executing rcu-tasks read-side critical sections have elapsed. These |
1305 | * read-side critical sections are delimited by calls to schedule(), |
1306 | * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable |
1307 | * context), and (in theory, anyway) cond_resched(). |
1308 | * |
1309 | * This is a very specialized primitive, intended only for a few uses in |
1310 | * tracing and other situations requiring manipulation of function preambles |
1311 | * and profiling hooks. The synchronize_rcu_tasks_rude() function is not |
1312 | * (yet) intended for heavy use from multiple CPUs. |
1313 | * |
1314 | * See the description of synchronize_rcu() for more detailed information |
1315 | * on memory ordering guarantees. |
1316 | */ |
1317 | void synchronize_rcu_tasks_rude(void) |
1318 | { |
1319 | synchronize_rcu_tasks_generic(rtp: &rcu_tasks_rude); |
1320 | } |
1321 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude); |
1322 | |
1323 | /** |
1324 | * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks. |
1325 | * |
1326 | * Although the current implementation is guaranteed to wait, it is not |
1327 | * obligated to, for example, if there are no pending callbacks. |
1328 | */ |
1329 | void rcu_barrier_tasks_rude(void) |
1330 | { |
1331 | rcu_barrier_tasks_generic(rtp: &rcu_tasks_rude); |
1332 | } |
1333 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude); |
1334 | |
1335 | int rcu_tasks_rude_lazy_ms = -1; |
1336 | module_param(rcu_tasks_rude_lazy_ms, int, 0444); |
1337 | |
1338 | static int __init rcu_spawn_tasks_rude_kthread(void) |
1339 | { |
1340 | rcu_tasks_rude.gp_sleep = HZ / 10; |
1341 | if (rcu_tasks_rude_lazy_ms >= 0) |
1342 | rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(m: rcu_tasks_rude_lazy_ms); |
1343 | rcu_spawn_tasks_kthread_generic(rtp: &rcu_tasks_rude); |
1344 | return 0; |
1345 | } |
1346 | |
1347 | #if !defined(CONFIG_TINY_RCU) |
1348 | void show_rcu_tasks_rude_gp_kthread(void) |
1349 | { |
1350 | show_rcu_tasks_generic_gp_kthread(rtp: &rcu_tasks_rude, s: "" ); |
1351 | } |
1352 | EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread); |
1353 | #endif // !defined(CONFIG_TINY_RCU) |
1354 | |
1355 | struct task_struct *get_rcu_tasks_rude_gp_kthread(void) |
1356 | { |
1357 | return rcu_tasks_rude.kthread_ptr; |
1358 | } |
1359 | EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread); |
1360 | |
1361 | #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */ |
1362 | |
1363 | //////////////////////////////////////////////////////////////////////// |
1364 | // |
1365 | // Tracing variant of Tasks RCU. This variant is designed to be used |
1366 | // to protect tracing hooks, including those of BPF. This variant |
1367 | // therefore: |
1368 | // |
1369 | // 1. Has explicit read-side markers to allow finite grace periods |
1370 | // in the face of in-kernel loops for PREEMPT=n builds. |
1371 | // |
1372 | // 2. Protects code in the idle loop, exception entry/exit, and |
1373 | // CPU-hotplug code paths, similar to the capabilities of SRCU. |
1374 | // |
1375 | // 3. Avoids expensive read-side instructions, having overhead similar |
1376 | // to that of Preemptible RCU. |
1377 | // |
1378 | // There are of course downsides. For example, the grace-period code |
1379 | // can send IPIs to CPUs, even when those CPUs are in the idle loop or |
1380 | // in nohz_full userspace. If needed, these downsides can be at least |
1381 | // partially remedied. |
1382 | // |
1383 | // Perhaps most important, this variant of RCU does not affect the vanilla |
1384 | // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace |
1385 | // readers can operate from idle, offline, and exception entry/exit in no |
1386 | // way allows rcu_preempt and rcu_sched readers to also do so. |
1387 | // |
1388 | // The implementation uses rcu_tasks_wait_gp(), which relies on function |
1389 | // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread() |
1390 | // function sets these function pointers up so that rcu_tasks_wait_gp() |
1391 | // invokes these functions in this order: |
1392 | // |
1393 | // rcu_tasks_trace_pregp_step(): |
1394 | // Disables CPU hotplug, adds all currently executing tasks to the |
1395 | // holdout list, then checks the state of all tasks that blocked |
1396 | // or were preempted within their current RCU Tasks Trace read-side |
1397 | // critical section, adding them to the holdout list if appropriate. |
1398 | // Finally, this function re-enables CPU hotplug. |
1399 | // The ->pertask_func() pointer is NULL, so there is no per-task processing. |
1400 | // rcu_tasks_trace_postscan(): |
1401 | // Invokes synchronize_rcu() to wait for late-stage exiting tasks |
1402 | // to finish exiting. |
1403 | // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty: |
1404 | // Scans the holdout list, attempting to identify a quiescent state |
1405 | // for each task on the list. If there is a quiescent state, the |
1406 | // corresponding task is removed from the holdout list. Once this |
1407 | // list is empty, the grace period has completed. |
1408 | // rcu_tasks_trace_postgp(): |
1409 | // Provides the needed full memory barrier and does debug checks. |
1410 | // |
1411 | // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks. |
1412 | // |
1413 | // Pre-grace-period update-side code is ordered before the grace period |
1414 | // via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period |
1415 | // read-side code is ordered before the grace period by atomic operations |
1416 | // on .b.need_qs flag of each task involved in this process, or by scheduler |
1417 | // context-switch ordering (for locked-down non-running readers). |
1418 | |
1419 | // The lockdep state must be outside of #ifdef to be useful. |
1420 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
1421 | static struct lock_class_key rcu_lock_trace_key; |
1422 | struct lockdep_map rcu_trace_lock_map = |
1423 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace" , &rcu_lock_trace_key); |
1424 | EXPORT_SYMBOL_GPL(rcu_trace_lock_map); |
1425 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
1426 | |
1427 | #ifdef CONFIG_TASKS_TRACE_RCU |
1428 | |
1429 | // Record outstanding IPIs to each CPU. No point in sending two... |
1430 | static DEFINE_PER_CPU(bool, trc_ipi_to_cpu); |
1431 | |
1432 | // The number of detections of task quiescent state relying on |
1433 | // heavyweight readers executing explicit memory barriers. |
1434 | static unsigned long n_heavy_reader_attempts; |
1435 | static unsigned long n_heavy_reader_updates; |
1436 | static unsigned long n_heavy_reader_ofl_updates; |
1437 | static unsigned long n_trc_holdouts; |
1438 | |
1439 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); |
1440 | DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace, |
1441 | "RCU Tasks Trace" ); |
1442 | |
1443 | /* Load from ->trc_reader_special.b.need_qs with proper ordering. */ |
1444 | static u8 rcu_ld_need_qs(struct task_struct *t) |
1445 | { |
1446 | smp_mb(); // Enforce full grace-period ordering. |
1447 | return smp_load_acquire(&t->trc_reader_special.b.need_qs); |
1448 | } |
1449 | |
1450 | /* Store to ->trc_reader_special.b.need_qs with proper ordering. */ |
1451 | static void rcu_st_need_qs(struct task_struct *t, u8 v) |
1452 | { |
1453 | smp_store_release(&t->trc_reader_special.b.need_qs, v); |
1454 | smp_mb(); // Enforce full grace-period ordering. |
1455 | } |
1456 | |
1457 | /* |
1458 | * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for |
1459 | * the four-byte operand-size restriction of some platforms. |
1460 | * Returns the old value, which is often ignored. |
1461 | */ |
1462 | u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) |
1463 | { |
1464 | union rcu_special ret; |
1465 | union rcu_special trs_old = READ_ONCE(t->trc_reader_special); |
1466 | union rcu_special trs_new = trs_old; |
1467 | |
1468 | if (trs_old.b.need_qs != old) |
1469 | return trs_old.b.need_qs; |
1470 | trs_new.b.need_qs = new; |
1471 | ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s); |
1472 | return ret.b.need_qs; |
1473 | } |
1474 | EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs); |
1475 | |
1476 | /* |
1477 | * If we are the last reader, signal the grace-period kthread. |
1478 | * Also remove from the per-CPU list of blocked tasks. |
1479 | */ |
1480 | void rcu_read_unlock_trace_special(struct task_struct *t) |
1481 | { |
1482 | unsigned long flags; |
1483 | struct rcu_tasks_percpu *rtpcp; |
1484 | union rcu_special trs; |
1485 | |
1486 | // Open-coded full-word version of rcu_ld_need_qs(). |
1487 | smp_mb(); // Enforce full grace-period ordering. |
1488 | trs = smp_load_acquire(&t->trc_reader_special); |
1489 | |
1490 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) |
1491 | smp_mb(); // Pairs with update-side barriers. |
1492 | // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers. |
1493 | if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) { |
1494 | u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, |
1495 | TRC_NEED_QS_CHECKED); |
1496 | |
1497 | WARN_ONCE(result != trs.b.need_qs, "%s: result = %d" , __func__, result); |
1498 | } |
1499 | if (trs.b.blocked) { |
1500 | rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); |
1501 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
1502 | list_del_init(entry: &t->trc_blkd_node); |
1503 | WRITE_ONCE(t->trc_reader_special.b.blocked, false); |
1504 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1505 | } |
1506 | WRITE_ONCE(t->trc_reader_nesting, 0); |
1507 | } |
1508 | EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special); |
1509 | |
1510 | /* Add a newly blocked reader task to its CPU's list. */ |
1511 | void rcu_tasks_trace_qs_blkd(struct task_struct *t) |
1512 | { |
1513 | unsigned long flags; |
1514 | struct rcu_tasks_percpu *rtpcp; |
1515 | |
1516 | local_irq_save(flags); |
1517 | rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu); |
1518 | raw_spin_lock_rcu_node(rtpcp); // irqs already disabled |
1519 | t->trc_blkd_cpu = smp_processor_id(); |
1520 | if (!rtpcp->rtp_blkd_tasks.next) |
1521 | INIT_LIST_HEAD(list: &rtpcp->rtp_blkd_tasks); |
1522 | list_add(new: &t->trc_blkd_node, head: &rtpcp->rtp_blkd_tasks); |
1523 | WRITE_ONCE(t->trc_reader_special.b.blocked, true); |
1524 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1525 | } |
1526 | EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd); |
1527 | |
1528 | /* Add a task to the holdout list, if it is not already on the list. */ |
1529 | static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) |
1530 | { |
1531 | if (list_empty(head: &t->trc_holdout_list)) { |
1532 | get_task_struct(t); |
1533 | list_add(new: &t->trc_holdout_list, head: bhp); |
1534 | n_trc_holdouts++; |
1535 | } |
1536 | } |
1537 | |
1538 | /* Remove a task from the holdout list, if it is in fact present. */ |
1539 | static void trc_del_holdout(struct task_struct *t) |
1540 | { |
1541 | if (!list_empty(head: &t->trc_holdout_list)) { |
1542 | list_del_init(entry: &t->trc_holdout_list); |
1543 | put_task_struct(t); |
1544 | n_trc_holdouts--; |
1545 | } |
1546 | } |
1547 | |
1548 | /* IPI handler to check task state. */ |
1549 | static void trc_read_check_handler(void *t_in) |
1550 | { |
1551 | int nesting; |
1552 | struct task_struct *t = current; |
1553 | struct task_struct *texp = t_in; |
1554 | |
1555 | // If the task is no longer running on this CPU, leave. |
1556 | if (unlikely(texp != t)) |
1557 | goto reset_ipi; // Already on holdout list, so will check later. |
1558 | |
1559 | // If the task is not in a read-side critical section, and |
1560 | // if this is the last reader, awaken the grace-period kthread. |
1561 | nesting = READ_ONCE(t->trc_reader_nesting); |
1562 | if (likely(!nesting)) { |
1563 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
1564 | goto reset_ipi; |
1565 | } |
1566 | // If we are racing with an rcu_read_unlock_trace(), try again later. |
1567 | if (unlikely(nesting < 0)) |
1568 | goto reset_ipi; |
1569 | |
1570 | // Get here if the task is in a read-side critical section. |
1571 | // Set its state so that it will update state for the grace-period |
1572 | // kthread upon exit from that critical section. |
1573 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); |
1574 | |
1575 | reset_ipi: |
1576 | // Allow future IPIs to be sent on CPU and for task. |
1577 | // Also order this IPI handler against any later manipulations of |
1578 | // the intended task. |
1579 | smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^ |
1580 | smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^ |
1581 | } |
1582 | |
1583 | /* Callback function for scheduler to check locked-down task. */ |
1584 | static int trc_inspect_reader(struct task_struct *t, void *bhp_in) |
1585 | { |
1586 | struct list_head *bhp = bhp_in; |
1587 | int cpu = task_cpu(p: t); |
1588 | int nesting; |
1589 | bool ofl = cpu_is_offline(cpu); |
1590 | |
1591 | if (task_curr(p: t) && !ofl) { |
1592 | // If no chance of heavyweight readers, do it the hard way. |
1593 | if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) |
1594 | return -EINVAL; |
1595 | |
1596 | // If heavyweight readers are enabled on the remote task, |
1597 | // we can inspect its state despite its currently running. |
1598 | // However, we cannot safely change its state. |
1599 | n_heavy_reader_attempts++; |
1600 | // Check for "running" idle tasks on offline CPUs. |
1601 | if (!rcu_dynticks_zero_in_eqs(cpu, vp: &t->trc_reader_nesting)) |
1602 | return -EINVAL; // No quiescent state, do it the hard way. |
1603 | n_heavy_reader_updates++; |
1604 | nesting = 0; |
1605 | } else { |
1606 | // The task is not running, so C-language access is safe. |
1607 | nesting = t->trc_reader_nesting; |
1608 | WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); |
1609 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) |
1610 | n_heavy_reader_ofl_updates++; |
1611 | } |
1612 | |
1613 | // If not exiting a read-side critical section, mark as checked |
1614 | // so that the grace-period kthread will remove it from the |
1615 | // holdout list. |
1616 | if (!nesting) { |
1617 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
1618 | return 0; // In QS, so done. |
1619 | } |
1620 | if (nesting < 0) |
1621 | return -EINVAL; // Reader transitioning, try again later. |
1622 | |
1623 | // The task is in a read-side critical section, so set up its |
1624 | // state so that it will update state upon exit from that critical |
1625 | // section. |
1626 | if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) |
1627 | trc_add_holdout(t, bhp); |
1628 | return 0; |
1629 | } |
1630 | |
1631 | /* Attempt to extract the state for the specified task. */ |
1632 | static void trc_wait_for_one_reader(struct task_struct *t, |
1633 | struct list_head *bhp) |
1634 | { |
1635 | int cpu; |
1636 | |
1637 | // If a previous IPI is still in flight, let it complete. |
1638 | if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI |
1639 | return; |
1640 | |
1641 | // The current task had better be in a quiescent state. |
1642 | if (t == current) { |
1643 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
1644 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
1645 | return; |
1646 | } |
1647 | |
1648 | // Attempt to nail down the task for inspection. |
1649 | get_task_struct(t); |
1650 | if (!task_call_func(p: t, func: trc_inspect_reader, arg: bhp)) { |
1651 | put_task_struct(t); |
1652 | return; |
1653 | } |
1654 | put_task_struct(t); |
1655 | |
1656 | // If this task is not yet on the holdout list, then we are in |
1657 | // an RCU read-side critical section. Otherwise, the invocation of |
1658 | // trc_add_holdout() that added it to the list did the necessary |
1659 | // get_task_struct(). Either way, the task cannot be freed out |
1660 | // from under this code. |
1661 | |
1662 | // If currently running, send an IPI, either way, add to list. |
1663 | trc_add_holdout(t, bhp); |
1664 | if (task_curr(p: t) && |
1665 | time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) { |
1666 | // The task is currently running, so try IPIing it. |
1667 | cpu = task_cpu(p: t); |
1668 | |
1669 | // If there is already an IPI outstanding, let it happen. |
1670 | if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) |
1671 | return; |
1672 | |
1673 | per_cpu(trc_ipi_to_cpu, cpu) = true; |
1674 | t->trc_ipi_to_cpu = cpu; |
1675 | rcu_tasks_trace.n_ipis++; |
1676 | if (smp_call_function_single(cpuid: cpu, func: trc_read_check_handler, info: t, wait: 0)) { |
1677 | // Just in case there is some other reason for |
1678 | // failure than the target CPU being offline. |
1679 | WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n" , |
1680 | __func__, cpu); |
1681 | rcu_tasks_trace.n_ipis_fails++; |
1682 | per_cpu(trc_ipi_to_cpu, cpu) = false; |
1683 | t->trc_ipi_to_cpu = -1; |
1684 | } |
1685 | } |
1686 | } |
1687 | |
1688 | /* |
1689 | * Initialize for first-round processing for the specified task. |
1690 | * Return false if task is NULL or already taken care of, true otherwise. |
1691 | */ |
1692 | static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) |
1693 | { |
1694 | // During early boot when there is only the one boot CPU, there |
1695 | // is no idle task for the other CPUs. Also, the grace-period |
1696 | // kthread is always in a quiescent state. In addition, just return |
1697 | // if this task is already on the list. |
1698 | if (unlikely(t == NULL) || (t == current && notself) || !list_empty(head: &t->trc_holdout_list)) |
1699 | return false; |
1700 | |
1701 | rcu_st_need_qs(t, v: 0); |
1702 | t->trc_ipi_to_cpu = -1; |
1703 | return true; |
1704 | } |
1705 | |
1706 | /* Do first-round processing for the specified task. */ |
1707 | static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) |
1708 | { |
1709 | if (rcu_tasks_trace_pertask_prep(t, notself: true)) |
1710 | trc_wait_for_one_reader(t, bhp: hop); |
1711 | } |
1712 | |
1713 | /* Initialize for a new RCU-tasks-trace grace period. */ |
1714 | static void rcu_tasks_trace_pregp_step(struct list_head *hop) |
1715 | { |
1716 | LIST_HEAD(blkd_tasks); |
1717 | int cpu; |
1718 | unsigned long flags; |
1719 | struct rcu_tasks_percpu *rtpcp; |
1720 | struct task_struct *t; |
1721 | |
1722 | // There shouldn't be any old IPIs, but... |
1723 | for_each_possible_cpu(cpu) |
1724 | WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu)); |
1725 | |
1726 | // Disable CPU hotplug across the CPU scan for the benefit of |
1727 | // any IPIs that might be needed. This also waits for all readers |
1728 | // in CPU-hotplug code paths. |
1729 | cpus_read_lock(); |
1730 | |
1731 | // These rcu_tasks_trace_pertask_prep() calls are serialized to |
1732 | // allow safe access to the hop list. |
1733 | for_each_online_cpu(cpu) { |
1734 | rcu_read_lock(); |
1735 | t = cpu_curr_snapshot(cpu); |
1736 | if (rcu_tasks_trace_pertask_prep(t, notself: true)) |
1737 | trc_add_holdout(t, bhp: hop); |
1738 | rcu_read_unlock(); |
1739 | cond_resched_tasks_rcu_qs(); |
1740 | } |
1741 | |
1742 | // Only after all running tasks have been accounted for is it |
1743 | // safe to take care of the tasks that have blocked within their |
1744 | // current RCU tasks trace read-side critical section. |
1745 | for_each_possible_cpu(cpu) { |
1746 | rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu); |
1747 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
1748 | list_splice_init(list: &rtpcp->rtp_blkd_tasks, head: &blkd_tasks); |
1749 | while (!list_empty(head: &blkd_tasks)) { |
1750 | rcu_read_lock(); |
1751 | t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); |
1752 | list_del_init(entry: &t->trc_blkd_node); |
1753 | list_add(new: &t->trc_blkd_node, head: &rtpcp->rtp_blkd_tasks); |
1754 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1755 | rcu_tasks_trace_pertask(t, hop); |
1756 | rcu_read_unlock(); |
1757 | raw_spin_lock_irqsave_rcu_node(rtpcp, flags); |
1758 | } |
1759 | raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags); |
1760 | cond_resched_tasks_rcu_qs(); |
1761 | } |
1762 | |
1763 | // Re-enable CPU hotplug now that the holdout list is populated. |
1764 | cpus_read_unlock(); |
1765 | } |
1766 | |
1767 | /* |
1768 | * Do intermediate processing between task and holdout scans. |
1769 | */ |
1770 | static void rcu_tasks_trace_postscan(struct list_head *hop) |
1771 | { |
1772 | // Wait for late-stage exiting tasks to finish exiting. |
1773 | // These might have passed the call to exit_tasks_rcu_finish(). |
1774 | |
1775 | // If you remove the following line, update rcu_trace_implies_rcu_gp()!!! |
1776 | synchronize_rcu(); |
1777 | // Any tasks that exit after this point will set |
1778 | // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs. |
1779 | } |
1780 | |
1781 | /* Communicate task state back to the RCU tasks trace stall warning request. */ |
1782 | struct trc_stall_chk_rdr { |
1783 | int nesting; |
1784 | int ipi_to_cpu; |
1785 | u8 needqs; |
1786 | }; |
1787 | |
1788 | static int trc_check_slow_task(struct task_struct *t, void *arg) |
1789 | { |
1790 | struct trc_stall_chk_rdr *trc_rdrp = arg; |
1791 | |
1792 | if (task_curr(p: t) && cpu_online(cpu: task_cpu(p: t))) |
1793 | return false; // It is running, so decline to inspect it. |
1794 | trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); |
1795 | trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); |
1796 | trc_rdrp->needqs = rcu_ld_need_qs(t); |
1797 | return true; |
1798 | } |
1799 | |
1800 | /* Show the state of a task stalling the current RCU tasks trace GP. */ |
1801 | static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) |
1802 | { |
1803 | int cpu; |
1804 | struct trc_stall_chk_rdr trc_rdr; |
1805 | bool is_idle_tsk = is_idle_task(p: t); |
1806 | |
1807 | if (*firstreport) { |
1808 | pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n" ); |
1809 | *firstreport = false; |
1810 | } |
1811 | cpu = task_cpu(p: t); |
1812 | if (!task_call_func(p: t, func: trc_check_slow_task, arg: &trc_rdr)) |
1813 | pr_alert("P%d: %c%c\n" , |
1814 | t->pid, |
1815 | ".I" [t->trc_ipi_to_cpu >= 0], |
1816 | ".i" [is_idle_tsk]); |
1817 | else |
1818 | pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n" , |
1819 | t->pid, |
1820 | ".I" [trc_rdr.ipi_to_cpu >= 0], |
1821 | ".i" [is_idle_tsk], |
1822 | ".N" [cpu >= 0 && tick_nohz_full_cpu(cpu)], |
1823 | ".B" [!!data_race(t->trc_reader_special.b.blocked)], |
1824 | trc_rdr.nesting, |
1825 | " !CN" [trc_rdr.needqs & 0x3], |
1826 | " ?" [trc_rdr.needqs > 0x3], |
1827 | cpu, cpu_online(cpu) ? "" : "(offline)" ); |
1828 | sched_show_task(p: t); |
1829 | } |
1830 | |
1831 | /* List stalled IPIs for RCU tasks trace. */ |
1832 | static void show_stalled_ipi_trace(void) |
1833 | { |
1834 | int cpu; |
1835 | |
1836 | for_each_possible_cpu(cpu) |
1837 | if (per_cpu(trc_ipi_to_cpu, cpu)) |
1838 | pr_alert("\tIPI outstanding to CPU %d\n" , cpu); |
1839 | } |
1840 | |
1841 | /* Do one scan of the holdout list. */ |
1842 | static void check_all_holdout_tasks_trace(struct list_head *hop, |
1843 | bool needreport, bool *firstreport) |
1844 | { |
1845 | struct task_struct *g, *t; |
1846 | |
1847 | // Disable CPU hotplug across the holdout list scan for IPIs. |
1848 | cpus_read_lock(); |
1849 | |
1850 | list_for_each_entry_safe(t, g, hop, trc_holdout_list) { |
1851 | // If safe and needed, try to check the current task. |
1852 | if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && |
1853 | !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) |
1854 | trc_wait_for_one_reader(t, bhp: hop); |
1855 | |
1856 | // If check succeeded, remove this task from the list. |
1857 | if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && |
1858 | rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) |
1859 | trc_del_holdout(t); |
1860 | else if (needreport) |
1861 | show_stalled_task_trace(t, firstreport); |
1862 | cond_resched_tasks_rcu_qs(); |
1863 | } |
1864 | |
1865 | // Re-enable CPU hotplug now that the holdout list scan has completed. |
1866 | cpus_read_unlock(); |
1867 | |
1868 | if (needreport) { |
1869 | if (*firstreport) |
1870 | pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n" ); |
1871 | show_stalled_ipi_trace(); |
1872 | } |
1873 | } |
1874 | |
1875 | static void rcu_tasks_trace_empty_fn(void *unused) |
1876 | { |
1877 | } |
1878 | |
1879 | /* Wait for grace period to complete and provide ordering. */ |
1880 | static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp) |
1881 | { |
1882 | int cpu; |
1883 | |
1884 | // Wait for any lingering IPI handlers to complete. Note that |
1885 | // if a CPU has gone offline or transitioned to userspace in the |
1886 | // meantime, all IPI handlers should have been drained beforehand. |
1887 | // Yes, this assumes that CPUs process IPIs in order. If that ever |
1888 | // changes, there will need to be a recheck and/or timed wait. |
1889 | for_each_online_cpu(cpu) |
1890 | if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))) |
1891 | smp_call_function_single(cpuid: cpu, func: rcu_tasks_trace_empty_fn, NULL, wait: 1); |
1892 | |
1893 | smp_mb(); // Caller's code must be ordered after wakeup. |
1894 | // Pairs with pretty much every ordering primitive. |
1895 | } |
1896 | |
1897 | /* Report any needed quiescent state for this exiting task. */ |
1898 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) |
1899 | { |
1900 | union rcu_special trs = READ_ONCE(t->trc_reader_special); |
1901 | |
1902 | rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); |
1903 | WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); |
1904 | if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) |
1905 | rcu_read_unlock_trace_special(t); |
1906 | else |
1907 | WRITE_ONCE(t->trc_reader_nesting, 0); |
1908 | } |
1909 | |
1910 | /** |
1911 | * call_rcu_tasks_trace() - Queue a callback trace task-based grace period |
1912 | * @rhp: structure to be used for queueing the RCU updates. |
1913 | * @func: actual callback function to be invoked after the grace period |
1914 | * |
1915 | * The callback function will be invoked some time after a trace rcu-tasks |
1916 | * grace period elapses, in other words after all currently executing |
1917 | * trace rcu-tasks read-side critical sections have completed. These |
1918 | * read-side critical sections are delimited by calls to rcu_read_lock_trace() |
1919 | * and rcu_read_unlock_trace(). |
1920 | * |
1921 | * See the description of call_rcu() for more detailed information on |
1922 | * memory ordering guarantees. |
1923 | */ |
1924 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) |
1925 | { |
1926 | call_rcu_tasks_generic(rhp, func, rtp: &rcu_tasks_trace); |
1927 | } |
1928 | EXPORT_SYMBOL_GPL(call_rcu_tasks_trace); |
1929 | |
1930 | /** |
1931 | * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period |
1932 | * |
1933 | * Control will return to the caller some time after a trace rcu-tasks |
1934 | * grace period has elapsed, in other words after all currently executing |
1935 | * trace rcu-tasks read-side critical sections have elapsed. These read-side |
1936 | * critical sections are delimited by calls to rcu_read_lock_trace() |
1937 | * and rcu_read_unlock_trace(). |
1938 | * |
1939 | * This is a very specialized primitive, intended only for a few uses in |
1940 | * tracing and other situations requiring manipulation of function preambles |
1941 | * and profiling hooks. The synchronize_rcu_tasks_trace() function is not |
1942 | * (yet) intended for heavy use from multiple CPUs. |
1943 | * |
1944 | * See the description of synchronize_rcu() for more detailed information |
1945 | * on memory ordering guarantees. |
1946 | */ |
1947 | void synchronize_rcu_tasks_trace(void) |
1948 | { |
1949 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section" ); |
1950 | synchronize_rcu_tasks_generic(rtp: &rcu_tasks_trace); |
1951 | } |
1952 | EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace); |
1953 | |
1954 | /** |
1955 | * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks. |
1956 | * |
1957 | * Although the current implementation is guaranteed to wait, it is not |
1958 | * obligated to, for example, if there are no pending callbacks. |
1959 | */ |
1960 | void rcu_barrier_tasks_trace(void) |
1961 | { |
1962 | rcu_barrier_tasks_generic(rtp: &rcu_tasks_trace); |
1963 | } |
1964 | EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace); |
1965 | |
1966 | int rcu_tasks_trace_lazy_ms = -1; |
1967 | module_param(rcu_tasks_trace_lazy_ms, int, 0444); |
1968 | |
1969 | static int __init rcu_spawn_tasks_trace_kthread(void) |
1970 | { |
1971 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) { |
1972 | rcu_tasks_trace.gp_sleep = HZ / 10; |
1973 | rcu_tasks_trace.init_fract = HZ / 10; |
1974 | } else { |
1975 | rcu_tasks_trace.gp_sleep = HZ / 200; |
1976 | if (rcu_tasks_trace.gp_sleep <= 0) |
1977 | rcu_tasks_trace.gp_sleep = 1; |
1978 | rcu_tasks_trace.init_fract = HZ / 200; |
1979 | if (rcu_tasks_trace.init_fract <= 0) |
1980 | rcu_tasks_trace.init_fract = 1; |
1981 | } |
1982 | if (rcu_tasks_trace_lazy_ms >= 0) |
1983 | rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(m: rcu_tasks_trace_lazy_ms); |
1984 | rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step; |
1985 | rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan; |
1986 | rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace; |
1987 | rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp; |
1988 | rcu_spawn_tasks_kthread_generic(rtp: &rcu_tasks_trace); |
1989 | return 0; |
1990 | } |
1991 | |
1992 | #if !defined(CONFIG_TINY_RCU) |
1993 | void show_rcu_tasks_trace_gp_kthread(void) |
1994 | { |
1995 | char buf[64]; |
1996 | |
1997 | sprintf(buf, fmt: "N%lu h:%lu/%lu/%lu" , |
1998 | data_race(n_trc_holdouts), |
1999 | data_race(n_heavy_reader_ofl_updates), |
2000 | data_race(n_heavy_reader_updates), |
2001 | data_race(n_heavy_reader_attempts)); |
2002 | show_rcu_tasks_generic_gp_kthread(rtp: &rcu_tasks_trace, s: buf); |
2003 | } |
2004 | EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread); |
2005 | #endif // !defined(CONFIG_TINY_RCU) |
2006 | |
2007 | struct task_struct *get_rcu_tasks_trace_gp_kthread(void) |
2008 | { |
2009 | return rcu_tasks_trace.kthread_ptr; |
2010 | } |
2011 | EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread); |
2012 | |
2013 | #else /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
2014 | static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } |
2015 | #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */ |
2016 | |
2017 | #ifndef CONFIG_TINY_RCU |
2018 | void show_rcu_tasks_gp_kthreads(void) |
2019 | { |
2020 | show_rcu_tasks_classic_gp_kthread(); |
2021 | show_rcu_tasks_rude_gp_kthread(); |
2022 | show_rcu_tasks_trace_gp_kthread(); |
2023 | } |
2024 | #endif /* #ifndef CONFIG_TINY_RCU */ |
2025 | |
2026 | #ifdef CONFIG_PROVE_RCU |
2027 | struct rcu_tasks_test_desc { |
2028 | struct rcu_head rh; |
2029 | const char *name; |
2030 | bool notrun; |
2031 | unsigned long runstart; |
2032 | }; |
2033 | |
2034 | static struct rcu_tasks_test_desc tests[] = { |
2035 | { |
2036 | .name = "call_rcu_tasks()" , |
2037 | /* If not defined, the test is skipped. */ |
2038 | .notrun = IS_ENABLED(CONFIG_TASKS_RCU), |
2039 | }, |
2040 | { |
2041 | .name = "call_rcu_tasks_rude()" , |
2042 | /* If not defined, the test is skipped. */ |
2043 | .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU), |
2044 | }, |
2045 | { |
2046 | .name = "call_rcu_tasks_trace()" , |
2047 | /* If not defined, the test is skipped. */ |
2048 | .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU) |
2049 | } |
2050 | }; |
2051 | |
2052 | static void test_rcu_tasks_callback(struct rcu_head *rhp) |
2053 | { |
2054 | struct rcu_tasks_test_desc *rttd = |
2055 | container_of(rhp, struct rcu_tasks_test_desc, rh); |
2056 | |
2057 | pr_info("Callback from %s invoked.\n" , rttd->name); |
2058 | |
2059 | rttd->notrun = false; |
2060 | } |
2061 | |
2062 | static void rcu_tasks_initiate_self_tests(void) |
2063 | { |
2064 | #ifdef CONFIG_TASKS_RCU |
2065 | pr_info("Running RCU Tasks wait API self tests\n" ); |
2066 | tests[0].runstart = jiffies; |
2067 | synchronize_rcu_tasks(); |
2068 | call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback); |
2069 | #endif |
2070 | |
2071 | #ifdef CONFIG_TASKS_RUDE_RCU |
2072 | pr_info("Running RCU Tasks Rude wait API self tests\n" ); |
2073 | tests[1].runstart = jiffies; |
2074 | synchronize_rcu_tasks_rude(); |
2075 | call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback); |
2076 | #endif |
2077 | |
2078 | #ifdef CONFIG_TASKS_TRACE_RCU |
2079 | pr_info("Running RCU Tasks Trace wait API self tests\n" ); |
2080 | tests[2].runstart = jiffies; |
2081 | synchronize_rcu_tasks_trace(); |
2082 | call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback); |
2083 | #endif |
2084 | } |
2085 | |
2086 | /* |
2087 | * Return: 0 - test passed |
2088 | * 1 - test failed, but have not timed out yet |
2089 | * -1 - test failed and timed out |
2090 | */ |
2091 | static int rcu_tasks_verify_self_tests(void) |
2092 | { |
2093 | int ret = 0; |
2094 | int i; |
2095 | unsigned long bst = rcu_task_stall_timeout; |
2096 | |
2097 | if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT) |
2098 | bst = RCU_TASK_BOOT_STALL_TIMEOUT; |
2099 | for (i = 0; i < ARRAY_SIZE(tests); i++) { |
2100 | while (tests[i].notrun) { // still hanging. |
2101 | if (time_after(jiffies, tests[i].runstart + bst)) { |
2102 | pr_err("%s has failed boot-time tests.\n" , tests[i].name); |
2103 | ret = -1; |
2104 | break; |
2105 | } |
2106 | ret = 1; |
2107 | break; |
2108 | } |
2109 | } |
2110 | WARN_ON(ret < 0); |
2111 | |
2112 | return ret; |
2113 | } |
2114 | |
2115 | /* |
2116 | * Repeat the rcu_tasks_verify_self_tests() call once every second until the |
2117 | * test passes or has timed out. |
2118 | */ |
2119 | static struct delayed_work rcu_tasks_verify_work; |
2120 | static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused) |
2121 | { |
2122 | int ret = rcu_tasks_verify_self_tests(); |
2123 | |
2124 | if (ret <= 0) |
2125 | return; |
2126 | |
2127 | /* Test fails but not timed out yet, reschedule another check */ |
2128 | schedule_delayed_work(dwork: &rcu_tasks_verify_work, HZ); |
2129 | } |
2130 | |
2131 | static int rcu_tasks_verify_schedule_work(void) |
2132 | { |
2133 | INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn); |
2134 | rcu_tasks_verify_work_fn(NULL); |
2135 | return 0; |
2136 | } |
2137 | late_initcall(rcu_tasks_verify_schedule_work); |
2138 | #else /* #ifdef CONFIG_PROVE_RCU */ |
2139 | static void rcu_tasks_initiate_self_tests(void) { } |
2140 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
2141 | |
2142 | void __init tasks_cblist_init_generic(void) |
2143 | { |
2144 | lockdep_assert_irqs_disabled(); |
2145 | WARN_ON(num_online_cpus() > 1); |
2146 | |
2147 | #ifdef CONFIG_TASKS_RCU |
2148 | cblist_init_generic(rtp: &rcu_tasks); |
2149 | #endif |
2150 | |
2151 | #ifdef CONFIG_TASKS_RUDE_RCU |
2152 | cblist_init_generic(rtp: &rcu_tasks_rude); |
2153 | #endif |
2154 | |
2155 | #ifdef CONFIG_TASKS_TRACE_RCU |
2156 | cblist_init_generic(rtp: &rcu_tasks_trace); |
2157 | #endif |
2158 | } |
2159 | |
2160 | void __init rcu_init_tasks_generic(void) |
2161 | { |
2162 | #ifdef CONFIG_TASKS_RCU |
2163 | rcu_spawn_tasks_kthread(); |
2164 | #endif |
2165 | |
2166 | #ifdef CONFIG_TASKS_RUDE_RCU |
2167 | rcu_spawn_tasks_rude_kthread(); |
2168 | #endif |
2169 | |
2170 | #ifdef CONFIG_TASKS_TRACE_RCU |
2171 | rcu_spawn_tasks_trace_kthread(); |
2172 | #endif |
2173 | |
2174 | // Run the self-tests. |
2175 | rcu_tasks_initiate_self_tests(); |
2176 | } |
2177 | |
2178 | #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ |
2179 | static inline void rcu_tasks_bootup_oddness(void) {} |
2180 | #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ |
2181 | |