1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * This file contains the base functions to manage periodic tick |
4 | * related events. |
5 | * |
6 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
7 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
8 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner |
9 | */ |
10 | #include <linux/cpu.h> |
11 | #include <linux/err.h> |
12 | #include <linux/hrtimer.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/nmi.h> |
15 | #include <linux/percpu.h> |
16 | #include <linux/profile.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/module.h> |
19 | #include <trace/events/power.h> |
20 | |
21 | #include <asm/irq_regs.h> |
22 | |
23 | #include "tick-internal.h" |
24 | |
25 | /* |
26 | * Tick devices |
27 | */ |
28 | DEFINE_PER_CPU(struct tick_device, tick_cpu_device); |
29 | /* |
30 | * Tick next event: keeps track of the tick time. It's updated by the |
31 | * CPU which handles the tick and protected by jiffies_lock. There is |
32 | * no requirement to write hold the jiffies seqcount for it. |
33 | */ |
34 | ktime_t tick_next_period; |
35 | |
36 | /* |
37 | * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR |
38 | * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This |
39 | * variable has two functions: |
40 | * |
41 | * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the |
42 | * timekeeping lock all at once. Only the CPU which is assigned to do the |
43 | * update is handling it. |
44 | * |
45 | * 2) Hand off the duty in the NOHZ idle case by setting the value to |
46 | * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks |
47 | * at it will take over and keep the time keeping alive. The handover |
48 | * procedure also covers cpu hotplug. |
49 | */ |
50 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
51 | #ifdef CONFIG_NO_HZ_FULL |
52 | /* |
53 | * tick_do_timer_boot_cpu indicates the boot CPU temporarily owns |
54 | * tick_do_timer_cpu and it should be taken over by an eligible secondary |
55 | * when one comes online. |
56 | */ |
57 | static int tick_do_timer_boot_cpu __read_mostly = -1; |
58 | #endif |
59 | |
60 | /* |
61 | * Debugging: see timer_list.c |
62 | */ |
63 | struct tick_device *tick_get_device(int cpu) |
64 | { |
65 | return &per_cpu(tick_cpu_device, cpu); |
66 | } |
67 | |
68 | /** |
69 | * tick_is_oneshot_available - check for a oneshot capable event device |
70 | */ |
71 | int tick_is_oneshot_available(void) |
72 | { |
73 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
74 | |
75 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) |
76 | return 0; |
77 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
78 | return 1; |
79 | return tick_broadcast_oneshot_available(); |
80 | } |
81 | |
82 | /* |
83 | * Periodic tick |
84 | */ |
85 | static void tick_periodic(int cpu) |
86 | { |
87 | if (tick_do_timer_cpu == cpu) { |
88 | raw_spin_lock(&jiffies_lock); |
89 | write_seqcount_begin(&jiffies_seq); |
90 | |
91 | /* Keep track of the next tick event */ |
92 | tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC); |
93 | |
94 | do_timer(ticks: 1); |
95 | write_seqcount_end(&jiffies_seq); |
96 | raw_spin_unlock(&jiffies_lock); |
97 | update_wall_time(); |
98 | } |
99 | |
100 | update_process_times(user: user_mode(regs: get_irq_regs())); |
101 | profile_tick(CPU_PROFILING); |
102 | } |
103 | |
104 | /* |
105 | * Event handler for periodic ticks |
106 | */ |
107 | void tick_handle_periodic(struct clock_event_device *dev) |
108 | { |
109 | int cpu = smp_processor_id(); |
110 | ktime_t next = dev->next_event; |
111 | |
112 | tick_periodic(cpu); |
113 | |
114 | #if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON) |
115 | /* |
116 | * The cpu might have transitioned to HIGHRES or NOHZ mode via |
117 | * update_process_times() -> run_local_timers() -> |
118 | * hrtimer_run_queues(). |
119 | */ |
120 | if (dev->event_handler != tick_handle_periodic) |
121 | return; |
122 | #endif |
123 | |
124 | if (!clockevent_state_oneshot(dev)) |
125 | return; |
126 | for (;;) { |
127 | /* |
128 | * Setup the next period for devices, which do not have |
129 | * periodic mode: |
130 | */ |
131 | next = ktime_add_ns(next, TICK_NSEC); |
132 | |
133 | if (!clockevents_program_event(dev, expires: next, force: false)) |
134 | return; |
135 | /* |
136 | * Have to be careful here. If we're in oneshot mode, |
137 | * before we call tick_periodic() in a loop, we need |
138 | * to be sure we're using a real hardware clocksource. |
139 | * Otherwise we could get trapped in an infinite |
140 | * loop, as the tick_periodic() increments jiffies, |
141 | * which then will increment time, possibly causing |
142 | * the loop to trigger again and again. |
143 | */ |
144 | if (timekeeping_valid_for_hres()) |
145 | tick_periodic(cpu); |
146 | } |
147 | } |
148 | |
149 | /* |
150 | * Setup the device for a periodic tick |
151 | */ |
152 | void tick_setup_periodic(struct clock_event_device *dev, int broadcast) |
153 | { |
154 | tick_set_periodic_handler(dev, broadcast); |
155 | |
156 | /* Broadcast setup ? */ |
157 | if (!tick_device_is_functional(dev)) |
158 | return; |
159 | |
160 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && |
161 | !tick_broadcast_oneshot_active()) { |
162 | clockevents_switch_state(dev, state: CLOCK_EVT_STATE_PERIODIC); |
163 | } else { |
164 | unsigned int seq; |
165 | ktime_t next; |
166 | |
167 | do { |
168 | seq = read_seqcount_begin(&jiffies_seq); |
169 | next = tick_next_period; |
170 | } while (read_seqcount_retry(&jiffies_seq, seq)); |
171 | |
172 | clockevents_switch_state(dev, state: CLOCK_EVT_STATE_ONESHOT); |
173 | |
174 | for (;;) { |
175 | if (!clockevents_program_event(dev, expires: next, force: false)) |
176 | return; |
177 | next = ktime_add_ns(next, TICK_NSEC); |
178 | } |
179 | } |
180 | } |
181 | |
182 | #ifdef CONFIG_NO_HZ_FULL |
183 | static void giveup_do_timer(void *info) |
184 | { |
185 | int cpu = *(unsigned int *)info; |
186 | |
187 | WARN_ON(tick_do_timer_cpu != smp_processor_id()); |
188 | |
189 | tick_do_timer_cpu = cpu; |
190 | } |
191 | |
192 | static void tick_take_do_timer_from_boot(void) |
193 | { |
194 | int cpu = smp_processor_id(); |
195 | int from = tick_do_timer_boot_cpu; |
196 | |
197 | if (from >= 0 && from != cpu) |
198 | smp_call_function_single(from, giveup_do_timer, &cpu, 1); |
199 | } |
200 | #endif |
201 | |
202 | /* |
203 | * Setup the tick device |
204 | */ |
205 | static void tick_setup_device(struct tick_device *td, |
206 | struct clock_event_device *newdev, int cpu, |
207 | const struct cpumask *cpumask) |
208 | { |
209 | void (*handler)(struct clock_event_device *) = NULL; |
210 | ktime_t next_event = 0; |
211 | |
212 | /* |
213 | * First device setup ? |
214 | */ |
215 | if (!td->evtdev) { |
216 | /* |
217 | * If no cpu took the do_timer update, assign it to |
218 | * this cpu: |
219 | */ |
220 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
221 | tick_do_timer_cpu = cpu; |
222 | tick_next_period = ktime_get(); |
223 | #ifdef CONFIG_NO_HZ_FULL |
224 | /* |
225 | * The boot CPU may be nohz_full, in which case set |
226 | * tick_do_timer_boot_cpu so the first housekeeping |
227 | * secondary that comes up will take do_timer from |
228 | * us. |
229 | */ |
230 | if (tick_nohz_full_cpu(cpu)) |
231 | tick_do_timer_boot_cpu = cpu; |
232 | |
233 | } else if (tick_do_timer_boot_cpu != -1 && |
234 | !tick_nohz_full_cpu(cpu)) { |
235 | tick_take_do_timer_from_boot(); |
236 | tick_do_timer_boot_cpu = -1; |
237 | WARN_ON(tick_do_timer_cpu != cpu); |
238 | #endif |
239 | } |
240 | |
241 | /* |
242 | * Startup in periodic mode first. |
243 | */ |
244 | td->mode = TICKDEV_MODE_PERIODIC; |
245 | } else { |
246 | handler = td->evtdev->event_handler; |
247 | next_event = td->evtdev->next_event; |
248 | td->evtdev->event_handler = clockevents_handle_noop; |
249 | } |
250 | |
251 | td->evtdev = newdev; |
252 | |
253 | /* |
254 | * When the device is not per cpu, pin the interrupt to the |
255 | * current cpu: |
256 | */ |
257 | if (!cpumask_equal(src1p: newdev->cpumask, src2p: cpumask)) |
258 | irq_set_affinity(irq: newdev->irq, cpumask); |
259 | |
260 | /* |
261 | * When global broadcasting is active, check if the current |
262 | * device is registered as a placeholder for broadcast mode. |
263 | * This allows us to handle this x86 misfeature in a generic |
264 | * way. This function also returns !=0 when we keep the |
265 | * current active broadcast state for this CPU. |
266 | */ |
267 | if (tick_device_uses_broadcast(dev: newdev, cpu)) |
268 | return; |
269 | |
270 | if (td->mode == TICKDEV_MODE_PERIODIC) |
271 | tick_setup_periodic(dev: newdev, broadcast: 0); |
272 | else |
273 | tick_setup_oneshot(newdev, handler, nextevt: next_event); |
274 | } |
275 | |
276 | void tick_install_replacement(struct clock_event_device *newdev) |
277 | { |
278 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
279 | int cpu = smp_processor_id(); |
280 | |
281 | clockevents_exchange_device(old: td->evtdev, new: newdev); |
282 | tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); |
283 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) |
284 | tick_oneshot_notify(); |
285 | } |
286 | |
287 | static bool tick_check_percpu(struct clock_event_device *curdev, |
288 | struct clock_event_device *newdev, int cpu) |
289 | { |
290 | if (!cpumask_test_cpu(cpu, cpumask: newdev->cpumask)) |
291 | return false; |
292 | if (cpumask_equal(src1p: newdev->cpumask, cpumask_of(cpu))) |
293 | return true; |
294 | /* Check if irq affinity can be set */ |
295 | if (newdev->irq >= 0 && !irq_can_set_affinity(irq: newdev->irq)) |
296 | return false; |
297 | /* Prefer an existing cpu local device */ |
298 | if (curdev && cpumask_equal(src1p: curdev->cpumask, cpumask_of(cpu))) |
299 | return false; |
300 | return true; |
301 | } |
302 | |
303 | static bool tick_check_preferred(struct clock_event_device *curdev, |
304 | struct clock_event_device *newdev) |
305 | { |
306 | /* Prefer oneshot capable device */ |
307 | if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) { |
308 | if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT)) |
309 | return false; |
310 | if (tick_oneshot_mode_active()) |
311 | return false; |
312 | } |
313 | |
314 | /* |
315 | * Use the higher rated one, but prefer a CPU local device with a lower |
316 | * rating than a non-CPU local device |
317 | */ |
318 | return !curdev || |
319 | newdev->rating > curdev->rating || |
320 | !cpumask_equal(src1p: curdev->cpumask, src2p: newdev->cpumask); |
321 | } |
322 | |
323 | /* |
324 | * Check whether the new device is a better fit than curdev. curdev |
325 | * can be NULL ! |
326 | */ |
327 | bool tick_check_replacement(struct clock_event_device *curdev, |
328 | struct clock_event_device *newdev) |
329 | { |
330 | if (!tick_check_percpu(curdev, newdev, smp_processor_id())) |
331 | return false; |
332 | |
333 | return tick_check_preferred(curdev, newdev); |
334 | } |
335 | |
336 | /* |
337 | * Check, if the new registered device should be used. Called with |
338 | * clockevents_lock held and interrupts disabled. |
339 | */ |
340 | void tick_check_new_device(struct clock_event_device *newdev) |
341 | { |
342 | struct clock_event_device *curdev; |
343 | struct tick_device *td; |
344 | int cpu; |
345 | |
346 | cpu = smp_processor_id(); |
347 | td = &per_cpu(tick_cpu_device, cpu); |
348 | curdev = td->evtdev; |
349 | |
350 | if (!tick_check_replacement(curdev, newdev)) |
351 | goto out_bc; |
352 | |
353 | if (!try_module_get(module: newdev->owner)) |
354 | return; |
355 | |
356 | /* |
357 | * Replace the eventually existing device by the new |
358 | * device. If the current device is the broadcast device, do |
359 | * not give it back to the clockevents layer ! |
360 | */ |
361 | if (tick_is_broadcast_device(dev: curdev)) { |
362 | clockevents_shutdown(dev: curdev); |
363 | curdev = NULL; |
364 | } |
365 | clockevents_exchange_device(old: curdev, new: newdev); |
366 | tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); |
367 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) |
368 | tick_oneshot_notify(); |
369 | return; |
370 | |
371 | out_bc: |
372 | /* |
373 | * Can the new device be used as a broadcast device ? |
374 | */ |
375 | tick_install_broadcast_device(dev: newdev, cpu); |
376 | } |
377 | |
378 | /** |
379 | * tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode |
380 | * @state: The target state (enter/exit) |
381 | * |
382 | * The system enters/leaves a state, where affected devices might stop |
383 | * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups. |
384 | * |
385 | * Called with interrupts disabled, so clockevents_lock is not |
386 | * required here because the local clock event device cannot go away |
387 | * under us. |
388 | */ |
389 | int tick_broadcast_oneshot_control(enum tick_broadcast_state state) |
390 | { |
391 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
392 | |
393 | if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP)) |
394 | return 0; |
395 | |
396 | return __tick_broadcast_oneshot_control(state); |
397 | } |
398 | EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control); |
399 | |
400 | #ifdef CONFIG_HOTPLUG_CPU |
401 | /* |
402 | * Transfer the do_timer job away from a dying cpu. |
403 | * |
404 | * Called with interrupts disabled. No locking required. If |
405 | * tick_do_timer_cpu is owned by this cpu, nothing can change it. |
406 | */ |
407 | void tick_handover_do_timer(void) |
408 | { |
409 | if (tick_do_timer_cpu == smp_processor_id()) |
410 | tick_do_timer_cpu = cpumask_first(cpu_online_mask); |
411 | } |
412 | |
413 | /* |
414 | * Shutdown an event device on a given cpu: |
415 | * |
416 | * This is called on a life CPU, when a CPU is dead. So we cannot |
417 | * access the hardware device itself. |
418 | * We just set the mode and remove it from the lists. |
419 | */ |
420 | void tick_shutdown(unsigned int cpu) |
421 | { |
422 | struct tick_device *td = &per_cpu(tick_cpu_device, cpu); |
423 | struct clock_event_device *dev = td->evtdev; |
424 | |
425 | td->mode = TICKDEV_MODE_PERIODIC; |
426 | if (dev) { |
427 | /* |
428 | * Prevent that the clock events layer tries to call |
429 | * the set mode function! |
430 | */ |
431 | clockevent_set_state(dev, state: CLOCK_EVT_STATE_DETACHED); |
432 | clockevents_exchange_device(old: dev, NULL); |
433 | dev->event_handler = clockevents_handle_noop; |
434 | td->evtdev = NULL; |
435 | } |
436 | } |
437 | #endif |
438 | |
439 | /** |
440 | * tick_suspend_local - Suspend the local tick device |
441 | * |
442 | * Called from the local cpu for freeze with interrupts disabled. |
443 | * |
444 | * No locks required. Nothing can change the per cpu device. |
445 | */ |
446 | void tick_suspend_local(void) |
447 | { |
448 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
449 | |
450 | clockevents_shutdown(dev: td->evtdev); |
451 | } |
452 | |
453 | /** |
454 | * tick_resume_local - Resume the local tick device |
455 | * |
456 | * Called from the local CPU for unfreeze or XEN resume magic. |
457 | * |
458 | * No locks required. Nothing can change the per cpu device. |
459 | */ |
460 | void tick_resume_local(void) |
461 | { |
462 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
463 | bool broadcast = tick_resume_check_broadcast(); |
464 | |
465 | clockevents_tick_resume(dev: td->evtdev); |
466 | if (!broadcast) { |
467 | if (td->mode == TICKDEV_MODE_PERIODIC) |
468 | tick_setup_periodic(dev: td->evtdev, broadcast: 0); |
469 | else |
470 | tick_resume_oneshot(); |
471 | } |
472 | |
473 | /* |
474 | * Ensure that hrtimers are up to date and the clockevents device |
475 | * is reprogrammed correctly when high resolution timers are |
476 | * enabled. |
477 | */ |
478 | hrtimers_resume_local(); |
479 | } |
480 | |
481 | /** |
482 | * tick_suspend - Suspend the tick and the broadcast device |
483 | * |
484 | * Called from syscore_suspend() via timekeeping_suspend with only one |
485 | * CPU online and interrupts disabled or from tick_unfreeze() under |
486 | * tick_freeze_lock. |
487 | * |
488 | * No locks required. Nothing can change the per cpu device. |
489 | */ |
490 | void tick_suspend(void) |
491 | { |
492 | tick_suspend_local(); |
493 | tick_suspend_broadcast(); |
494 | } |
495 | |
496 | /** |
497 | * tick_resume - Resume the tick and the broadcast device |
498 | * |
499 | * Called from syscore_resume() via timekeeping_resume with only one |
500 | * CPU online and interrupts disabled. |
501 | * |
502 | * No locks required. Nothing can change the per cpu device. |
503 | */ |
504 | void tick_resume(void) |
505 | { |
506 | tick_resume_broadcast(); |
507 | tick_resume_local(); |
508 | } |
509 | |
510 | #ifdef CONFIG_SUSPEND |
511 | static DEFINE_RAW_SPINLOCK(tick_freeze_lock); |
512 | static unsigned int tick_freeze_depth; |
513 | |
514 | /** |
515 | * tick_freeze - Suspend the local tick and (possibly) timekeeping. |
516 | * |
517 | * Check if this is the last online CPU executing the function and if so, |
518 | * suspend timekeeping. Otherwise suspend the local tick. |
519 | * |
520 | * Call with interrupts disabled. Must be balanced with %tick_unfreeze(). |
521 | * Interrupts must not be enabled before the subsequent %tick_unfreeze(). |
522 | */ |
523 | void tick_freeze(void) |
524 | { |
525 | raw_spin_lock(&tick_freeze_lock); |
526 | |
527 | tick_freeze_depth++; |
528 | if (tick_freeze_depth == num_online_cpus()) { |
529 | trace_suspend_resume(TPS("timekeeping_freeze" ), |
530 | smp_processor_id(), start: true); |
531 | system_state = SYSTEM_SUSPEND; |
532 | sched_clock_suspend(); |
533 | timekeeping_suspend(); |
534 | } else { |
535 | tick_suspend_local(); |
536 | } |
537 | |
538 | raw_spin_unlock(&tick_freeze_lock); |
539 | } |
540 | |
541 | /** |
542 | * tick_unfreeze - Resume the local tick and (possibly) timekeeping. |
543 | * |
544 | * Check if this is the first CPU executing the function and if so, resume |
545 | * timekeeping. Otherwise resume the local tick. |
546 | * |
547 | * Call with interrupts disabled. Must be balanced with %tick_freeze(). |
548 | * Interrupts must not be enabled after the preceding %tick_freeze(). |
549 | */ |
550 | void tick_unfreeze(void) |
551 | { |
552 | raw_spin_lock(&tick_freeze_lock); |
553 | |
554 | if (tick_freeze_depth == num_online_cpus()) { |
555 | timekeeping_resume(); |
556 | sched_clock_resume(); |
557 | system_state = SYSTEM_RUNNING; |
558 | trace_suspend_resume(TPS("timekeeping_freeze" ), |
559 | smp_processor_id(), start: false); |
560 | } else { |
561 | touch_softlockup_watchdog(); |
562 | tick_resume_local(); |
563 | } |
564 | |
565 | tick_freeze_depth--; |
566 | |
567 | raw_spin_unlock(&tick_freeze_lock); |
568 | } |
569 | #endif /* CONFIG_SUSPEND */ |
570 | |
571 | /** |
572 | * tick_init - initialize the tick control |
573 | */ |
574 | void __init tick_init(void) |
575 | { |
576 | tick_broadcast_init(); |
577 | tick_nohz_init(); |
578 | } |
579 | |