1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SCHED_CLOCK_H
3#define _LINUX_SCHED_CLOCK_H
4
5#include <linux/smp.h>
6
7/*
8 * Do not use outside of architecture code which knows its limitations.
9 *
10 * sched_clock() has no promise of monotonicity or bounded drift between
11 * CPUs, use (which you should not) requires disabling IRQs.
12 *
13 * Please use one of the three interfaces below.
14 */
15extern u64 sched_clock(void);
16
17#if defined(CONFIG_ARCH_WANTS_NO_INSTR) || defined(CONFIG_GENERIC_SCHED_CLOCK)
18extern u64 sched_clock_noinstr(void);
19#else
20static __always_inline u64 sched_clock_noinstr(void)
21{
22 return sched_clock();
23}
24#endif
25
26/*
27 * See the comment in kernel/sched/clock.c
28 */
29extern u64 running_clock(void);
30extern u64 sched_clock_cpu(int cpu);
31
32
33extern void sched_clock_init(void);
34
35#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
36static inline void sched_clock_tick(void)
37{
38}
39
40static inline void clear_sched_clock_stable(void)
41{
42}
43
44static inline void sched_clock_idle_sleep_event(void)
45{
46}
47
48static inline void sched_clock_idle_wakeup_event(void)
49{
50}
51
52static inline u64 cpu_clock(int cpu)
53{
54 return sched_clock();
55}
56
57static __always_inline u64 local_clock_noinstr(void)
58{
59 return sched_clock_noinstr();
60}
61
62static __always_inline u64 local_clock(void)
63{
64 return sched_clock();
65}
66#else
67extern int sched_clock_stable(void);
68extern void clear_sched_clock_stable(void);
69
70/*
71 * When sched_clock_stable(), __sched_clock_offset provides the offset
72 * between local_clock() and sched_clock().
73 */
74extern u64 __sched_clock_offset;
75
76extern void sched_clock_tick(void);
77extern void sched_clock_tick_stable(void);
78extern void sched_clock_idle_sleep_event(void);
79extern void sched_clock_idle_wakeup_event(void);
80
81/*
82 * As outlined in clock.c, provides a fast, high resolution, nanosecond
83 * time source that is monotonic per cpu argument and has bounded drift
84 * between cpus.
85 *
86 * ######################### BIG FAT WARNING ##########################
87 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
88 * # go backwards !! #
89 * ####################################################################
90 */
91static inline u64 cpu_clock(int cpu)
92{
93 return sched_clock_cpu(cpu);
94}
95
96extern u64 local_clock_noinstr(void);
97extern u64 local_clock(void);
98
99#endif
100
101#ifdef CONFIG_IRQ_TIME_ACCOUNTING
102/*
103 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
104 * The reason for this explicit opt-in is not to have perf penalty with
105 * slow sched_clocks.
106 */
107extern void enable_sched_clock_irqtime(void);
108extern void disable_sched_clock_irqtime(void);
109#else
110static inline void enable_sched_clock_irqtime(void) {}
111static inline void disable_sched_clock_irqtime(void) {}
112#endif
113
114#endif /* _LINUX_SCHED_CLOCK_H */
115

source code of linux/include/linux/sched/clock.h