1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * This file contains functions which manage clock event devices. |
4 | * |
5 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
6 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
7 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner |
8 | */ |
9 | |
10 | #include <linux/clockchips.h> |
11 | #include <linux/hrtimer.h> |
12 | #include <linux/init.h> |
13 | #include <linux/module.h> |
14 | #include <linux/smp.h> |
15 | #include <linux/device.h> |
16 | |
17 | #include "tick-internal.h" |
18 | |
19 | /* The registered clock event devices */ |
20 | static LIST_HEAD(clockevent_devices); |
21 | static LIST_HEAD(clockevents_released); |
22 | /* Protection for the above */ |
23 | static DEFINE_RAW_SPINLOCK(clockevents_lock); |
24 | /* Protection for unbind operations */ |
25 | static DEFINE_MUTEX(clockevents_mutex); |
26 | |
27 | struct ce_unbind { |
28 | struct clock_event_device *ce; |
29 | int res; |
30 | }; |
31 | |
32 | static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, |
33 | bool ismax) |
34 | { |
35 | u64 clc = (u64) latch << evt->shift; |
36 | u64 rnd; |
37 | |
38 | if (WARN_ON(!evt->mult)) |
39 | evt->mult = 1; |
40 | rnd = (u64) evt->mult - 1; |
41 | |
42 | /* |
43 | * Upper bound sanity check. If the backwards conversion is |
44 | * not equal latch, we know that the above shift overflowed. |
45 | */ |
46 | if ((clc >> evt->shift) != (u64)latch) |
47 | clc = ~0ULL; |
48 | |
49 | /* |
50 | * Scaled math oddities: |
51 | * |
52 | * For mult <= (1 << shift) we can safely add mult - 1 to |
53 | * prevent integer rounding loss. So the backwards conversion |
54 | * from nsec to device ticks will be correct. |
55 | * |
56 | * For mult > (1 << shift), i.e. device frequency is > 1GHz we |
57 | * need to be careful. Adding mult - 1 will result in a value |
58 | * which when converted back to device ticks can be larger |
59 | * than latch by up to (mult - 1) >> shift. For the min_delta |
60 | * calculation we still want to apply this in order to stay |
61 | * above the minimum device ticks limit. For the upper limit |
62 | * we would end up with a latch value larger than the upper |
63 | * limit of the device, so we omit the add to stay below the |
64 | * device upper boundary. |
65 | * |
66 | * Also omit the add if it would overflow the u64 boundary. |
67 | */ |
68 | if ((~0ULL - clc > rnd) && |
69 | (!ismax || evt->mult <= (1ULL << evt->shift))) |
70 | clc += rnd; |
71 | |
72 | do_div(clc, evt->mult); |
73 | |
74 | /* Deltas less than 1usec are pointless noise */ |
75 | return clc > 1000 ? clc : 1000; |
76 | } |
77 | |
78 | /** |
79 | * clockevent_delta2ns - Convert a latch value (device ticks) to nanoseconds |
80 | * @latch: value to convert |
81 | * @evt: pointer to clock event device descriptor |
82 | * |
83 | * Math helper, returns latch value converted to nanoseconds (bound checked) |
84 | */ |
85 | u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) |
86 | { |
87 | return cev_delta2ns(latch, evt, ismax: false); |
88 | } |
89 | EXPORT_SYMBOL_GPL(clockevent_delta2ns); |
90 | |
91 | static int __clockevents_switch_state(struct clock_event_device *dev, |
92 | enum clock_event_state state) |
93 | { |
94 | if (dev->features & CLOCK_EVT_FEAT_DUMMY) |
95 | return 0; |
96 | |
97 | /* Transition with new state-specific callbacks */ |
98 | switch (state) { |
99 | case CLOCK_EVT_STATE_DETACHED: |
100 | /* The clockevent device is getting replaced. Shut it down. */ |
101 | |
102 | case CLOCK_EVT_STATE_SHUTDOWN: |
103 | if (dev->set_state_shutdown) |
104 | return dev->set_state_shutdown(dev); |
105 | return 0; |
106 | |
107 | case CLOCK_EVT_STATE_PERIODIC: |
108 | /* Core internal bug */ |
109 | if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) |
110 | return -ENOSYS; |
111 | if (dev->set_state_periodic) |
112 | return dev->set_state_periodic(dev); |
113 | return 0; |
114 | |
115 | case CLOCK_EVT_STATE_ONESHOT: |
116 | /* Core internal bug */ |
117 | if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) |
118 | return -ENOSYS; |
119 | if (dev->set_state_oneshot) |
120 | return dev->set_state_oneshot(dev); |
121 | return 0; |
122 | |
123 | case CLOCK_EVT_STATE_ONESHOT_STOPPED: |
124 | /* Core internal bug */ |
125 | if (WARN_ONCE(!clockevent_state_oneshot(dev), |
126 | "Current state: %d\n" , |
127 | clockevent_get_state(dev))) |
128 | return -EINVAL; |
129 | |
130 | if (dev->set_state_oneshot_stopped) |
131 | return dev->set_state_oneshot_stopped(dev); |
132 | else |
133 | return -ENOSYS; |
134 | |
135 | default: |
136 | return -ENOSYS; |
137 | } |
138 | } |
139 | |
140 | /** |
141 | * clockevents_switch_state - set the operating state of a clock event device |
142 | * @dev: device to modify |
143 | * @state: new state |
144 | * |
145 | * Must be called with interrupts disabled ! |
146 | */ |
147 | void clockevents_switch_state(struct clock_event_device *dev, |
148 | enum clock_event_state state) |
149 | { |
150 | if (clockevent_get_state(dev) != state) { |
151 | if (__clockevents_switch_state(dev, state)) |
152 | return; |
153 | |
154 | clockevent_set_state(dev, state); |
155 | |
156 | /* |
157 | * A nsec2cyc multiplicator of 0 is invalid and we'd crash |
158 | * on it, so fix it up and emit a warning: |
159 | */ |
160 | if (clockevent_state_oneshot(dev)) { |
161 | if (WARN_ON(!dev->mult)) |
162 | dev->mult = 1; |
163 | } |
164 | } |
165 | } |
166 | |
167 | /** |
168 | * clockevents_shutdown - shutdown the device and clear next_event |
169 | * @dev: device to shutdown |
170 | */ |
171 | void clockevents_shutdown(struct clock_event_device *dev) |
172 | { |
173 | clockevents_switch_state(dev, state: CLOCK_EVT_STATE_SHUTDOWN); |
174 | dev->next_event = KTIME_MAX; |
175 | } |
176 | |
177 | /** |
178 | * clockevents_tick_resume - Resume the tick device before using it again |
179 | * @dev: device to resume |
180 | */ |
181 | int clockevents_tick_resume(struct clock_event_device *dev) |
182 | { |
183 | int ret = 0; |
184 | |
185 | if (dev->tick_resume) |
186 | ret = dev->tick_resume(dev); |
187 | |
188 | return ret; |
189 | } |
190 | |
191 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST |
192 | |
193 | /* Limit min_delta to a jiffie */ |
194 | #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) |
195 | |
196 | /** |
197 | * clockevents_increase_min_delta - raise minimum delta of a clock event device |
198 | * @dev: device to increase the minimum delta |
199 | * |
200 | * Returns 0 on success, -ETIME when the minimum delta reached the limit. |
201 | */ |
202 | static int clockevents_increase_min_delta(struct clock_event_device *dev) |
203 | { |
204 | /* Nothing to do if we already reached the limit */ |
205 | if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { |
206 | printk_deferred(KERN_WARNING |
207 | "CE: Reprogramming failure. Giving up\n" ); |
208 | dev->next_event = KTIME_MAX; |
209 | return -ETIME; |
210 | } |
211 | |
212 | if (dev->min_delta_ns < 5000) |
213 | dev->min_delta_ns = 5000; |
214 | else |
215 | dev->min_delta_ns += dev->min_delta_ns >> 1; |
216 | |
217 | if (dev->min_delta_ns > MIN_DELTA_LIMIT) |
218 | dev->min_delta_ns = MIN_DELTA_LIMIT; |
219 | |
220 | printk_deferred(KERN_WARNING |
221 | "CE: %s increased min_delta_ns to %llu nsec\n" , |
222 | dev->name ? dev->name : "?" , |
223 | (unsigned long long) dev->min_delta_ns); |
224 | return 0; |
225 | } |
226 | |
227 | /** |
228 | * clockevents_program_min_delta - Set clock event device to the minimum delay. |
229 | * @dev: device to program |
230 | * |
231 | * Returns 0 on success, -ETIME when the retry loop failed. |
232 | */ |
233 | static int clockevents_program_min_delta(struct clock_event_device *dev) |
234 | { |
235 | unsigned long long clc; |
236 | int64_t delta; |
237 | int i; |
238 | |
239 | for (i = 0;;) { |
240 | delta = dev->min_delta_ns; |
241 | dev->next_event = ktime_add_ns(ktime_get(), delta); |
242 | |
243 | if (clockevent_state_shutdown(dev)) |
244 | return 0; |
245 | |
246 | dev->retries++; |
247 | clc = ((unsigned long long) delta * dev->mult) >> dev->shift; |
248 | if (dev->set_next_event((unsigned long) clc, dev) == 0) |
249 | return 0; |
250 | |
251 | if (++i > 2) { |
252 | /* |
253 | * We tried 3 times to program the device with the |
254 | * given min_delta_ns. Try to increase the minimum |
255 | * delta, if that fails as well get out of here. |
256 | */ |
257 | if (clockevents_increase_min_delta(dev)) |
258 | return -ETIME; |
259 | i = 0; |
260 | } |
261 | } |
262 | } |
263 | |
264 | #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ |
265 | |
266 | /** |
267 | * clockevents_program_min_delta - Set clock event device to the minimum delay. |
268 | * @dev: device to program |
269 | * |
270 | * Returns 0 on success, -ETIME when the retry loop failed. |
271 | */ |
272 | static int clockevents_program_min_delta(struct clock_event_device *dev) |
273 | { |
274 | unsigned long long clc; |
275 | int64_t delta = 0; |
276 | int i; |
277 | |
278 | for (i = 0; i < 10; i++) { |
279 | delta += dev->min_delta_ns; |
280 | dev->next_event = ktime_add_ns(ktime_get(), delta); |
281 | |
282 | if (clockevent_state_shutdown(dev)) |
283 | return 0; |
284 | |
285 | dev->retries++; |
286 | clc = ((unsigned long long) delta * dev->mult) >> dev->shift; |
287 | if (dev->set_next_event((unsigned long) clc, dev) == 0) |
288 | return 0; |
289 | } |
290 | return -ETIME; |
291 | } |
292 | |
293 | #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */ |
294 | |
295 | /** |
296 | * clockevents_program_event - Reprogram the clock event device. |
297 | * @dev: device to program |
298 | * @expires: absolute expiry time (monotonic clock) |
299 | * @force: program minimum delay if expires can not be set |
300 | * |
301 | * Returns 0 on success, -ETIME when the event is in the past. |
302 | */ |
303 | int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, |
304 | bool force) |
305 | { |
306 | unsigned long long clc; |
307 | int64_t delta; |
308 | int rc; |
309 | |
310 | if (WARN_ON_ONCE(expires < 0)) |
311 | return -ETIME; |
312 | |
313 | dev->next_event = expires; |
314 | |
315 | if (clockevent_state_shutdown(dev)) |
316 | return 0; |
317 | |
318 | /* We must be in ONESHOT state here */ |
319 | WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n" , |
320 | clockevent_get_state(dev)); |
321 | |
322 | /* Shortcut for clockevent devices that can deal with ktime. */ |
323 | if (dev->features & CLOCK_EVT_FEAT_KTIME) |
324 | return dev->set_next_ktime(expires, dev); |
325 | |
326 | delta = ktime_to_ns(ktime_sub(expires, ktime_get())); |
327 | if (delta <= 0) |
328 | return force ? clockevents_program_min_delta(dev) : -ETIME; |
329 | |
330 | delta = min(delta, (int64_t) dev->max_delta_ns); |
331 | delta = max(delta, (int64_t) dev->min_delta_ns); |
332 | |
333 | clc = ((unsigned long long) delta * dev->mult) >> dev->shift; |
334 | rc = dev->set_next_event((unsigned long) clc, dev); |
335 | |
336 | return (rc && force) ? clockevents_program_min_delta(dev) : rc; |
337 | } |
338 | |
339 | /* |
340 | * Called after a notify add to make devices available which were |
341 | * released from the notifier call. |
342 | */ |
343 | static void clockevents_notify_released(void) |
344 | { |
345 | struct clock_event_device *dev; |
346 | |
347 | while (!list_empty(head: &clockevents_released)) { |
348 | dev = list_entry(clockevents_released.next, |
349 | struct clock_event_device, list); |
350 | list_move(list: &dev->list, head: &clockevent_devices); |
351 | tick_check_new_device(dev); |
352 | } |
353 | } |
354 | |
355 | /* |
356 | * Try to install a replacement clock event device |
357 | */ |
358 | static int clockevents_replace(struct clock_event_device *ced) |
359 | { |
360 | struct clock_event_device *dev, *newdev = NULL; |
361 | |
362 | list_for_each_entry(dev, &clockevent_devices, list) { |
363 | if (dev == ced || !clockevent_state_detached(dev)) |
364 | continue; |
365 | |
366 | if (!tick_check_replacement(curdev: newdev, newdev: dev)) |
367 | continue; |
368 | |
369 | if (!try_module_get(module: dev->owner)) |
370 | continue; |
371 | |
372 | if (newdev) |
373 | module_put(module: newdev->owner); |
374 | newdev = dev; |
375 | } |
376 | if (newdev) { |
377 | tick_install_replacement(dev: newdev); |
378 | list_del_init(entry: &ced->list); |
379 | } |
380 | return newdev ? 0 : -EBUSY; |
381 | } |
382 | |
383 | /* |
384 | * Called with clockevents_mutex and clockevents_lock held |
385 | */ |
386 | static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu) |
387 | { |
388 | /* Fast track. Device is unused */ |
389 | if (clockevent_state_detached(dev: ced)) { |
390 | list_del_init(entry: &ced->list); |
391 | return 0; |
392 | } |
393 | |
394 | return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; |
395 | } |
396 | |
397 | /* |
398 | * SMP function call to unbind a device |
399 | */ |
400 | static void __clockevents_unbind(void *arg) |
401 | { |
402 | struct ce_unbind *cu = arg; |
403 | int res; |
404 | |
405 | raw_spin_lock(&clockevents_lock); |
406 | res = __clockevents_try_unbind(ced: cu->ce, smp_processor_id()); |
407 | if (res == -EAGAIN) |
408 | res = clockevents_replace(ced: cu->ce); |
409 | cu->res = res; |
410 | raw_spin_unlock(&clockevents_lock); |
411 | } |
412 | |
413 | /* |
414 | * Issues smp function call to unbind a per cpu device. Called with |
415 | * clockevents_mutex held. |
416 | */ |
417 | static int clockevents_unbind(struct clock_event_device *ced, int cpu) |
418 | { |
419 | struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; |
420 | |
421 | smp_call_function_single(cpuid: cpu, func: __clockevents_unbind, info: &cu, wait: 1); |
422 | return cu.res; |
423 | } |
424 | |
425 | /* |
426 | * Unbind a clockevents device. |
427 | */ |
428 | int clockevents_unbind_device(struct clock_event_device *ced, int cpu) |
429 | { |
430 | int ret; |
431 | |
432 | mutex_lock(&clockevents_mutex); |
433 | ret = clockevents_unbind(ced, cpu); |
434 | mutex_unlock(lock: &clockevents_mutex); |
435 | return ret; |
436 | } |
437 | EXPORT_SYMBOL_GPL(clockevents_unbind_device); |
438 | |
439 | /** |
440 | * clockevents_register_device - register a clock event device |
441 | * @dev: device to register |
442 | */ |
443 | void clockevents_register_device(struct clock_event_device *dev) |
444 | { |
445 | unsigned long flags; |
446 | |
447 | /* Initialize state to DETACHED */ |
448 | clockevent_set_state(dev, state: CLOCK_EVT_STATE_DETACHED); |
449 | |
450 | if (!dev->cpumask) { |
451 | WARN_ON(num_possible_cpus() > 1); |
452 | dev->cpumask = cpumask_of(smp_processor_id()); |
453 | } |
454 | |
455 | if (dev->cpumask == cpu_all_mask) { |
456 | WARN(1, "%s cpumask == cpu_all_mask, using cpu_possible_mask instead\n" , |
457 | dev->name); |
458 | dev->cpumask = cpu_possible_mask; |
459 | } |
460 | |
461 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
462 | |
463 | list_add(new: &dev->list, head: &clockevent_devices); |
464 | tick_check_new_device(dev); |
465 | clockevents_notify_released(); |
466 | |
467 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
468 | } |
469 | EXPORT_SYMBOL_GPL(clockevents_register_device); |
470 | |
471 | static void clockevents_config(struct clock_event_device *dev, u32 freq) |
472 | { |
473 | u64 sec; |
474 | |
475 | if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) |
476 | return; |
477 | |
478 | /* |
479 | * Calculate the maximum number of seconds we can sleep. Limit |
480 | * to 10 minutes for hardware which can program more than |
481 | * 32bit ticks so we still get reasonable conversion values. |
482 | */ |
483 | sec = dev->max_delta_ticks; |
484 | do_div(sec, freq); |
485 | if (!sec) |
486 | sec = 1; |
487 | else if (sec > 600 && dev->max_delta_ticks > UINT_MAX) |
488 | sec = 600; |
489 | |
490 | clockevents_calc_mult_shift(ce: dev, freq, maxsec: sec); |
491 | dev->min_delta_ns = cev_delta2ns(latch: dev->min_delta_ticks, evt: dev, ismax: false); |
492 | dev->max_delta_ns = cev_delta2ns(latch: dev->max_delta_ticks, evt: dev, ismax: true); |
493 | } |
494 | |
495 | /** |
496 | * clockevents_config_and_register - Configure and register a clock event device |
497 | * @dev: device to register |
498 | * @freq: The clock frequency |
499 | * @min_delta: The minimum clock ticks to program in oneshot mode |
500 | * @max_delta: The maximum clock ticks to program in oneshot mode |
501 | * |
502 | * min/max_delta can be 0 for devices which do not support oneshot mode. |
503 | */ |
504 | void clockevents_config_and_register(struct clock_event_device *dev, |
505 | u32 freq, unsigned long min_delta, |
506 | unsigned long max_delta) |
507 | { |
508 | dev->min_delta_ticks = min_delta; |
509 | dev->max_delta_ticks = max_delta; |
510 | clockevents_config(dev, freq); |
511 | clockevents_register_device(dev); |
512 | } |
513 | EXPORT_SYMBOL_GPL(clockevents_config_and_register); |
514 | |
515 | int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) |
516 | { |
517 | clockevents_config(dev, freq); |
518 | |
519 | if (clockevent_state_oneshot(dev)) |
520 | return clockevents_program_event(dev, expires: dev->next_event, force: false); |
521 | |
522 | if (clockevent_state_periodic(dev)) |
523 | return __clockevents_switch_state(dev, state: CLOCK_EVT_STATE_PERIODIC); |
524 | |
525 | return 0; |
526 | } |
527 | |
528 | /** |
529 | * clockevents_update_freq - Update frequency and reprogram a clock event device. |
530 | * @dev: device to modify |
531 | * @freq: new device frequency |
532 | * |
533 | * Reconfigure and reprogram a clock event device in oneshot |
534 | * mode. Must be called on the cpu for which the device delivers per |
535 | * cpu timer events. If called for the broadcast device the core takes |
536 | * care of serialization. |
537 | * |
538 | * Returns 0 on success, -ETIME when the event is in the past. |
539 | */ |
540 | int clockevents_update_freq(struct clock_event_device *dev, u32 freq) |
541 | { |
542 | unsigned long flags; |
543 | int ret; |
544 | |
545 | local_irq_save(flags); |
546 | ret = tick_broadcast_update_freq(dev, freq); |
547 | if (ret == -ENODEV) |
548 | ret = __clockevents_update_freq(dev, freq); |
549 | local_irq_restore(flags); |
550 | return ret; |
551 | } |
552 | |
553 | /* |
554 | * Noop handler when we shut down an event device |
555 | */ |
556 | void clockevents_handle_noop(struct clock_event_device *dev) |
557 | { |
558 | } |
559 | |
560 | /** |
561 | * clockevents_exchange_device - release and request clock devices |
562 | * @old: device to release (can be NULL) |
563 | * @new: device to request (can be NULL) |
564 | * |
565 | * Called from various tick functions with clockevents_lock held and |
566 | * interrupts disabled. |
567 | */ |
568 | void clockevents_exchange_device(struct clock_event_device *old, |
569 | struct clock_event_device *new) |
570 | { |
571 | /* |
572 | * Caller releases a clock event device. We queue it into the |
573 | * released list and do a notify add later. |
574 | */ |
575 | if (old) { |
576 | module_put(module: old->owner); |
577 | clockevents_switch_state(dev: old, state: CLOCK_EVT_STATE_DETACHED); |
578 | list_move(list: &old->list, head: &clockevents_released); |
579 | } |
580 | |
581 | if (new) { |
582 | BUG_ON(!clockevent_state_detached(new)); |
583 | clockevents_shutdown(dev: new); |
584 | } |
585 | } |
586 | |
587 | /** |
588 | * clockevents_suspend - suspend clock devices |
589 | */ |
590 | void clockevents_suspend(void) |
591 | { |
592 | struct clock_event_device *dev; |
593 | |
594 | list_for_each_entry_reverse(dev, &clockevent_devices, list) |
595 | if (dev->suspend && !clockevent_state_detached(dev)) |
596 | dev->suspend(dev); |
597 | } |
598 | |
599 | /** |
600 | * clockevents_resume - resume clock devices |
601 | */ |
602 | void clockevents_resume(void) |
603 | { |
604 | struct clock_event_device *dev; |
605 | |
606 | list_for_each_entry(dev, &clockevent_devices, list) |
607 | if (dev->resume && !clockevent_state_detached(dev)) |
608 | dev->resume(dev); |
609 | } |
610 | |
611 | #ifdef CONFIG_HOTPLUG_CPU |
612 | |
613 | # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
614 | /** |
615 | * tick_offline_cpu - Take CPU out of the broadcast mechanism |
616 | * @cpu: The outgoing CPU |
617 | * |
618 | * Called on the outgoing CPU after it took itself offline. |
619 | */ |
620 | void tick_offline_cpu(unsigned int cpu) |
621 | { |
622 | raw_spin_lock(&clockevents_lock); |
623 | tick_broadcast_offline(cpu); |
624 | raw_spin_unlock(&clockevents_lock); |
625 | } |
626 | # endif |
627 | |
628 | /** |
629 | * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu |
630 | * @cpu: The dead CPU |
631 | */ |
632 | void tick_cleanup_dead_cpu(int cpu) |
633 | { |
634 | struct clock_event_device *dev, *tmp; |
635 | unsigned long flags; |
636 | |
637 | raw_spin_lock_irqsave(&clockevents_lock, flags); |
638 | |
639 | tick_shutdown(cpu); |
640 | /* |
641 | * Unregister the clock event devices which were |
642 | * released from the users in the notify chain. |
643 | */ |
644 | list_for_each_entry_safe(dev, tmp, &clockevents_released, list) |
645 | list_del(entry: &dev->list); |
646 | /* |
647 | * Now check whether the CPU has left unused per cpu devices |
648 | */ |
649 | list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { |
650 | if (cpumask_test_cpu(cpu, cpumask: dev->cpumask) && |
651 | cpumask_weight(srcp: dev->cpumask) == 1 && |
652 | !tick_is_broadcast_device(dev)) { |
653 | BUG_ON(!clockevent_state_detached(dev)); |
654 | list_del(entry: &dev->list); |
655 | } |
656 | } |
657 | raw_spin_unlock_irqrestore(&clockevents_lock, flags); |
658 | } |
659 | #endif |
660 | |
661 | #ifdef CONFIG_SYSFS |
662 | static struct bus_type clockevents_subsys = { |
663 | .name = "clockevents" , |
664 | .dev_name = "clockevent" , |
665 | }; |
666 | |
667 | static DEFINE_PER_CPU(struct device, tick_percpu_dev); |
668 | static struct tick_device *tick_get_tick_dev(struct device *dev); |
669 | |
670 | static ssize_t current_device_show(struct device *dev, |
671 | struct device_attribute *attr, |
672 | char *buf) |
673 | { |
674 | struct tick_device *td; |
675 | ssize_t count = 0; |
676 | |
677 | raw_spin_lock_irq(&clockevents_lock); |
678 | td = tick_get_tick_dev(dev); |
679 | if (td && td->evtdev) |
680 | count = snprintf(buf, PAGE_SIZE, fmt: "%s\n" , td->evtdev->name); |
681 | raw_spin_unlock_irq(&clockevents_lock); |
682 | return count; |
683 | } |
684 | static DEVICE_ATTR_RO(current_device); |
685 | |
686 | /* We don't support the abomination of removable broadcast devices */ |
687 | static ssize_t unbind_device_store(struct device *dev, |
688 | struct device_attribute *attr, |
689 | const char *buf, size_t count) |
690 | { |
691 | char name[CS_NAME_LEN]; |
692 | ssize_t ret = sysfs_get_uname(buf, dst: name, cnt: count); |
693 | struct clock_event_device *ce = NULL, *iter; |
694 | |
695 | if (ret < 0) |
696 | return ret; |
697 | |
698 | ret = -ENODEV; |
699 | mutex_lock(&clockevents_mutex); |
700 | raw_spin_lock_irq(&clockevents_lock); |
701 | list_for_each_entry(iter, &clockevent_devices, list) { |
702 | if (!strcmp(iter->name, name)) { |
703 | ret = __clockevents_try_unbind(ced: iter, cpu: dev->id); |
704 | ce = iter; |
705 | break; |
706 | } |
707 | } |
708 | raw_spin_unlock_irq(&clockevents_lock); |
709 | /* |
710 | * We hold clockevents_mutex, so ce can't go away |
711 | */ |
712 | if (ret == -EAGAIN) |
713 | ret = clockevents_unbind(ced: ce, cpu: dev->id); |
714 | mutex_unlock(lock: &clockevents_mutex); |
715 | return ret ? ret : count; |
716 | } |
717 | static DEVICE_ATTR_WO(unbind_device); |
718 | |
719 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
720 | static struct device tick_bc_dev = { |
721 | .init_name = "broadcast" , |
722 | .id = 0, |
723 | .bus = &clockevents_subsys, |
724 | }; |
725 | |
726 | static struct tick_device *tick_get_tick_dev(struct device *dev) |
727 | { |
728 | return dev == &tick_bc_dev ? tick_get_broadcast_device() : |
729 | &per_cpu(tick_cpu_device, dev->id); |
730 | } |
731 | |
732 | static __init int tick_broadcast_init_sysfs(void) |
733 | { |
734 | int err = device_register(dev: &tick_bc_dev); |
735 | |
736 | if (!err) |
737 | err = device_create_file(device: &tick_bc_dev, entry: &dev_attr_current_device); |
738 | return err; |
739 | } |
740 | #else |
741 | static struct tick_device *tick_get_tick_dev(struct device *dev) |
742 | { |
743 | return &per_cpu(tick_cpu_device, dev->id); |
744 | } |
745 | static inline int tick_broadcast_init_sysfs(void) { return 0; } |
746 | #endif |
747 | |
748 | static int __init tick_init_sysfs(void) |
749 | { |
750 | int cpu; |
751 | |
752 | for_each_possible_cpu(cpu) { |
753 | struct device *dev = &per_cpu(tick_percpu_dev, cpu); |
754 | int err; |
755 | |
756 | dev->id = cpu; |
757 | dev->bus = &clockevents_subsys; |
758 | err = device_register(dev); |
759 | if (!err) |
760 | err = device_create_file(device: dev, entry: &dev_attr_current_device); |
761 | if (!err) |
762 | err = device_create_file(device: dev, entry: &dev_attr_unbind_device); |
763 | if (err) |
764 | return err; |
765 | } |
766 | return tick_broadcast_init_sysfs(); |
767 | } |
768 | |
769 | static int __init clockevents_init_sysfs(void) |
770 | { |
771 | int err = subsys_system_register(subsys: &clockevents_subsys, NULL); |
772 | |
773 | if (!err) |
774 | err = tick_init_sysfs(); |
775 | return err; |
776 | } |
777 | device_initcall(clockevents_init_sysfs); |
778 | #endif /* SYSFS */ |
779 | |