1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * Generic sched_clock() support, to extend low level hardware time |
4 | * counters to full 64-bit ns values. |
5 | */ |
6 | #include <linux/clocksource.h> |
7 | #include <linux/init.h> |
8 | #include <linux/jiffies.h> |
9 | #include <linux/ktime.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/math.h> |
12 | #include <linux/moduleparam.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/sched/clock.h> |
15 | #include <linux/syscore_ops.h> |
16 | #include <linux/hrtimer.h> |
17 | #include <linux/sched_clock.h> |
18 | #include <linux/seqlock.h> |
19 | #include <linux/bitops.h> |
20 | |
21 | #include "timekeeping.h" |
22 | |
23 | /** |
24 | * struct clock_data - all data needed for sched_clock() (including |
25 | * registration of a new clock source) |
26 | * |
27 | * @seq: Sequence counter for protecting updates. The lowest |
28 | * bit is the index for @read_data. |
29 | * @read_data: Data required to read from sched_clock. |
30 | * @wrap_kt: Duration for which clock can run before wrapping. |
31 | * @rate: Tick rate of the registered clock. |
32 | * @actual_read_sched_clock: Registered hardware level clock read function. |
33 | * |
34 | * The ordering of this structure has been chosen to optimize cache |
35 | * performance. In particular 'seq' and 'read_data[0]' (combined) should fit |
36 | * into a single 64-byte cache line. |
37 | */ |
38 | struct clock_data { |
39 | seqcount_latch_t seq; |
40 | struct clock_read_data read_data[2]; |
41 | ktime_t wrap_kt; |
42 | unsigned long rate; |
43 | |
44 | u64 (*actual_read_sched_clock)(void); |
45 | }; |
46 | |
47 | static struct hrtimer sched_clock_timer; |
48 | static int irqtime = -1; |
49 | |
50 | core_param(irqtime, irqtime, int, 0400); |
51 | |
52 | static u64 notrace jiffy_sched_clock_read(void) |
53 | { |
54 | /* |
55 | * We don't need to use get_jiffies_64 on 32-bit arches here |
56 | * because we register with BITS_PER_LONG |
57 | */ |
58 | return (u64)(jiffies - INITIAL_JIFFIES); |
59 | } |
60 | |
61 | static struct clock_data cd ____cacheline_aligned = { |
62 | .read_data[0] = { .mult = NSEC_PER_SEC / HZ, |
63 | .read_sched_clock = jiffy_sched_clock_read, }, |
64 | .actual_read_sched_clock = jiffy_sched_clock_read, |
65 | }; |
66 | |
67 | static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
68 | { |
69 | return (cyc * mult) >> shift; |
70 | } |
71 | |
72 | notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq) |
73 | { |
74 | *seq = read_seqcount_latch(s: &cd.seq); |
75 | return cd.read_data + (*seq & 1); |
76 | } |
77 | |
78 | notrace int sched_clock_read_retry(unsigned int seq) |
79 | { |
80 | return read_seqcount_latch_retry(s: &cd.seq, start: seq); |
81 | } |
82 | |
83 | static __always_inline unsigned long long __sched_clock(void) |
84 | { |
85 | struct clock_read_data *rd; |
86 | unsigned int seq; |
87 | u64 cyc, res; |
88 | |
89 | do { |
90 | seq = raw_read_seqcount_latch(s: &cd.seq); |
91 | rd = cd.read_data + (seq & 1); |
92 | |
93 | cyc = (rd->read_sched_clock() - rd->epoch_cyc) & |
94 | rd->sched_clock_mask; |
95 | res = rd->epoch_ns + cyc_to_ns(cyc, mult: rd->mult, shift: rd->shift); |
96 | } while (raw_read_seqcount_latch_retry(s: &cd.seq, start: seq)); |
97 | |
98 | return res; |
99 | } |
100 | |
101 | unsigned long long noinstr sched_clock_noinstr(void) |
102 | { |
103 | return __sched_clock(); |
104 | } |
105 | |
106 | unsigned long long notrace sched_clock(void) |
107 | { |
108 | unsigned long long ns; |
109 | preempt_disable_notrace(); |
110 | /* |
111 | * All of __sched_clock() is a seqcount_latch reader critical section, |
112 | * but relies on the raw helpers which are uninstrumented. For KCSAN, |
113 | * mark all accesses in __sched_clock() as atomic. |
114 | */ |
115 | kcsan_nestable_atomic_begin(); |
116 | ns = __sched_clock(); |
117 | kcsan_nestable_atomic_end(); |
118 | preempt_enable_notrace(); |
119 | return ns; |
120 | } |
121 | |
122 | /* |
123 | * Updating the data required to read the clock. |
124 | * |
125 | * sched_clock() will never observe mis-matched data even if called from |
126 | * an NMI. We do this by maintaining an odd/even copy of the data and |
127 | * steering sched_clock() to one or the other using a sequence counter. |
128 | * In order to preserve the data cache profile of sched_clock() as much |
129 | * as possible the system reverts back to the even copy when the update |
130 | * completes; the odd copy is used *only* during an update. |
131 | */ |
132 | static void update_clock_read_data(struct clock_read_data *rd) |
133 | { |
134 | /* steer readers towards the odd copy */ |
135 | write_seqcount_latch_begin(s: &cd.seq); |
136 | |
137 | /* now its safe for us to update the normal (even) copy */ |
138 | cd.read_data[0] = *rd; |
139 | |
140 | /* switch readers back to the even copy */ |
141 | write_seqcount_latch(s: &cd.seq); |
142 | |
143 | /* update the backup (odd) copy with the new data */ |
144 | cd.read_data[1] = *rd; |
145 | |
146 | write_seqcount_latch_end(s: &cd.seq); |
147 | } |
148 | |
149 | /* |
150 | * Atomically update the sched_clock() epoch. |
151 | */ |
152 | static void update_sched_clock(void) |
153 | { |
154 | u64 cyc; |
155 | u64 ns; |
156 | struct clock_read_data rd; |
157 | |
158 | rd = cd.read_data[0]; |
159 | |
160 | cyc = cd.actual_read_sched_clock(); |
161 | ns = rd.epoch_ns + cyc_to_ns(cyc: (cyc - rd.epoch_cyc) & rd.sched_clock_mask, mult: rd.mult, shift: rd.shift); |
162 | |
163 | rd.epoch_ns = ns; |
164 | rd.epoch_cyc = cyc; |
165 | |
166 | update_clock_read_data(rd: &rd); |
167 | } |
168 | |
169 | static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) |
170 | { |
171 | update_sched_clock(); |
172 | hrtimer_forward_now(timer: hrt, interval: cd.wrap_kt); |
173 | |
174 | return HRTIMER_RESTART; |
175 | } |
176 | |
177 | void __init |
178 | sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) |
179 | { |
180 | u64 res, wrap, new_mask, new_epoch, cyc, ns; |
181 | u32 new_mult, new_shift; |
182 | unsigned long r, flags; |
183 | char r_unit; |
184 | struct clock_read_data rd; |
185 | |
186 | if (cd.rate > rate) |
187 | return; |
188 | |
189 | /* Cannot register a sched_clock with interrupts on */ |
190 | local_irq_save(flags); |
191 | |
192 | /* Calculate the mult/shift to convert counter ticks to ns. */ |
193 | clocks_calc_mult_shift(mult: &new_mult, shift: &new_shift, from: rate, NSEC_PER_SEC, minsec: 3600); |
194 | |
195 | new_mask = CLOCKSOURCE_MASK(bits); |
196 | cd.rate = rate; |
197 | |
198 | /* Calculate how many nanosecs until we risk wrapping */ |
199 | wrap = clocks_calc_max_nsecs(mult: new_mult, shift: new_shift, maxadj: 0, mask: new_mask, NULL); |
200 | cd.wrap_kt = ns_to_ktime(ns: wrap); |
201 | |
202 | rd = cd.read_data[0]; |
203 | |
204 | /* Update epoch for new counter and update 'epoch_ns' from old counter*/ |
205 | new_epoch = read(); |
206 | cyc = cd.actual_read_sched_clock(); |
207 | ns = rd.epoch_ns + cyc_to_ns(cyc: (cyc - rd.epoch_cyc) & rd.sched_clock_mask, mult: rd.mult, shift: rd.shift); |
208 | cd.actual_read_sched_clock = read; |
209 | |
210 | rd.read_sched_clock = read; |
211 | rd.sched_clock_mask = new_mask; |
212 | rd.mult = new_mult; |
213 | rd.shift = new_shift; |
214 | rd.epoch_cyc = new_epoch; |
215 | rd.epoch_ns = ns; |
216 | |
217 | update_clock_read_data(rd: &rd); |
218 | |
219 | if (sched_clock_timer.function != NULL) { |
220 | /* update timeout for clock wrap */ |
221 | hrtimer_start(timer: &sched_clock_timer, tim: cd.wrap_kt, |
222 | mode: HRTIMER_MODE_REL_HARD); |
223 | } |
224 | |
225 | r = rate; |
226 | if (r >= 4000000) { |
227 | r = DIV_ROUND_CLOSEST(r, 1000000); |
228 | r_unit = 'M'; |
229 | } else if (r >= 4000) { |
230 | r = DIV_ROUND_CLOSEST(r, 1000); |
231 | r_unit = 'k'; |
232 | } else { |
233 | r_unit = ' '; |
234 | } |
235 | |
236 | /* Calculate the ns resolution of this counter */ |
237 | res = cyc_to_ns(cyc: 1ULL, mult: new_mult, shift: new_shift); |
238 | |
239 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", |
240 | bits, r, r_unit, res, wrap); |
241 | |
242 | /* Enable IRQ time accounting if we have a fast enough sched_clock() */ |
243 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) |
244 | enable_sched_clock_irqtime(); |
245 | |
246 | local_irq_restore(flags); |
247 | |
248 | pr_debug("Registered %pS as sched_clock source\n", read); |
249 | } |
250 | |
251 | void __init generic_sched_clock_init(void) |
252 | { |
253 | /* |
254 | * If no sched_clock() function has been provided at that point, |
255 | * make it the final one. |
256 | */ |
257 | if (cd.actual_read_sched_clock == jiffy_sched_clock_read) |
258 | sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); |
259 | |
260 | update_sched_clock(); |
261 | |
262 | /* |
263 | * Start the timer to keep sched_clock() properly updated and |
264 | * sets the initial epoch. |
265 | */ |
266 | hrtimer_setup(timer: &sched_clock_timer, function: sched_clock_poll, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL_HARD); |
267 | hrtimer_start(timer: &sched_clock_timer, tim: cd.wrap_kt, mode: HRTIMER_MODE_REL_HARD); |
268 | } |
269 | |
270 | /* |
271 | * Clock read function for use when the clock is suspended. |
272 | * |
273 | * This function makes it appear to sched_clock() as if the clock |
274 | * stopped counting at its last update. |
275 | * |
276 | * This function must only be called from the critical |
277 | * section in sched_clock(). It relies on the read_seqcount_retry() |
278 | * at the end of the critical section to be sure we observe the |
279 | * correct copy of 'epoch_cyc'. |
280 | */ |
281 | static u64 notrace suspended_sched_clock_read(void) |
282 | { |
283 | unsigned int seq = read_seqcount_latch(s: &cd.seq); |
284 | |
285 | return cd.read_data[seq & 1].epoch_cyc; |
286 | } |
287 | |
288 | int sched_clock_suspend(void) |
289 | { |
290 | struct clock_read_data *rd = &cd.read_data[0]; |
291 | |
292 | update_sched_clock(); |
293 | hrtimer_cancel(timer: &sched_clock_timer); |
294 | rd->read_sched_clock = suspended_sched_clock_read; |
295 | |
296 | return 0; |
297 | } |
298 | |
299 | void sched_clock_resume(void) |
300 | { |
301 | struct clock_read_data *rd = &cd.read_data[0]; |
302 | |
303 | rd->epoch_cyc = cd.actual_read_sched_clock(); |
304 | hrtimer_start(timer: &sched_clock_timer, tim: cd.wrap_kt, mode: HRTIMER_MODE_REL_HARD); |
305 | rd->read_sched_clock = cd.actual_read_sched_clock; |
306 | } |
307 | |
308 | static struct syscore_ops sched_clock_ops = { |
309 | .suspend = sched_clock_suspend, |
310 | .resume = sched_clock_resume, |
311 | }; |
312 | |
313 | static int __init sched_clock_syscore_init(void) |
314 | { |
315 | register_syscore_ops(ops: &sched_clock_ops); |
316 | |
317 | return 0; |
318 | } |
319 | device_initcall(sched_clock_syscore_init); |
320 |
Definitions
- clock_data
- sched_clock_timer
- irqtime
- jiffy_sched_clock_read
- cd
- cyc_to_ns
- sched_clock_read_begin
- sched_clock_read_retry
- __sched_clock
- sched_clock_noinstr
- sched_clock
- update_clock_read_data
- update_sched_clock
- sched_clock_poll
- sched_clock_register
- generic_sched_clock_init
- suspended_sched_clock_read
- sched_clock_suspend
- sched_clock_resume
- sched_clock_ops
Improve your Profiling and Debugging skills
Find out more