1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Generic sched_clock() support, to extend low level hardware time |
4 | * counters to full 64-bit ns values. |
5 | */ |
6 | #include <linux/clocksource.h> |
7 | #include <linux/init.h> |
8 | #include <linux/jiffies.h> |
9 | #include <linux/ktime.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/math.h> |
12 | #include <linux/moduleparam.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/sched/clock.h> |
15 | #include <linux/syscore_ops.h> |
16 | #include <linux/hrtimer.h> |
17 | #include <linux/sched_clock.h> |
18 | #include <linux/seqlock.h> |
19 | #include <linux/bitops.h> |
20 | |
21 | #include "timekeeping.h" |
22 | |
23 | /** |
24 | * struct clock_data - all data needed for sched_clock() (including |
25 | * registration of a new clock source) |
26 | * |
27 | * @seq: Sequence counter for protecting updates. The lowest |
28 | * bit is the index for @read_data. |
29 | * @read_data: Data required to read from sched_clock. |
30 | * @wrap_kt: Duration for which clock can run before wrapping. |
31 | * @rate: Tick rate of the registered clock. |
32 | * @actual_read_sched_clock: Registered hardware level clock read function. |
33 | * |
34 | * The ordering of this structure has been chosen to optimize cache |
35 | * performance. In particular 'seq' and 'read_data[0]' (combined) should fit |
36 | * into a single 64-byte cache line. |
37 | */ |
38 | struct clock_data { |
39 | seqcount_latch_t seq; |
40 | struct clock_read_data read_data[2]; |
41 | ktime_t wrap_kt; |
42 | unsigned long rate; |
43 | |
44 | u64 (*actual_read_sched_clock)(void); |
45 | }; |
46 | |
47 | static struct hrtimer sched_clock_timer; |
48 | static int irqtime = -1; |
49 | |
50 | core_param(irqtime, irqtime, int, 0400); |
51 | |
52 | static u64 notrace jiffy_sched_clock_read(void) |
53 | { |
54 | /* |
55 | * We don't need to use get_jiffies_64 on 32-bit arches here |
56 | * because we register with BITS_PER_LONG |
57 | */ |
58 | return (u64)(jiffies - INITIAL_JIFFIES); |
59 | } |
60 | |
61 | static struct clock_data cd ____cacheline_aligned = { |
62 | .read_data[0] = { .mult = NSEC_PER_SEC / HZ, |
63 | .read_sched_clock = jiffy_sched_clock_read, }, |
64 | .actual_read_sched_clock = jiffy_sched_clock_read, |
65 | }; |
66 | |
67 | static __always_inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift) |
68 | { |
69 | return (cyc * mult) >> shift; |
70 | } |
71 | |
72 | notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq) |
73 | { |
74 | *seq = raw_read_seqcount_latch(s: &cd.seq); |
75 | return cd.read_data + (*seq & 1); |
76 | } |
77 | |
78 | notrace int sched_clock_read_retry(unsigned int seq) |
79 | { |
80 | return raw_read_seqcount_latch_retry(s: &cd.seq, start: seq); |
81 | } |
82 | |
83 | unsigned long long noinstr sched_clock_noinstr(void) |
84 | { |
85 | struct clock_read_data *rd; |
86 | unsigned int seq; |
87 | u64 cyc, res; |
88 | |
89 | do { |
90 | seq = raw_read_seqcount_latch(s: &cd.seq); |
91 | rd = cd.read_data + (seq & 1); |
92 | |
93 | cyc = (rd->read_sched_clock() - rd->epoch_cyc) & |
94 | rd->sched_clock_mask; |
95 | res = rd->epoch_ns + cyc_to_ns(cyc, mult: rd->mult, shift: rd->shift); |
96 | } while (raw_read_seqcount_latch_retry(s: &cd.seq, start: seq)); |
97 | |
98 | return res; |
99 | } |
100 | |
101 | unsigned long long notrace sched_clock(void) |
102 | { |
103 | unsigned long long ns; |
104 | preempt_disable_notrace(); |
105 | ns = sched_clock_noinstr(); |
106 | preempt_enable_notrace(); |
107 | return ns; |
108 | } |
109 | |
110 | /* |
111 | * Updating the data required to read the clock. |
112 | * |
113 | * sched_clock() will never observe mis-matched data even if called from |
114 | * an NMI. We do this by maintaining an odd/even copy of the data and |
115 | * steering sched_clock() to one or the other using a sequence counter. |
116 | * In order to preserve the data cache profile of sched_clock() as much |
117 | * as possible the system reverts back to the even copy when the update |
118 | * completes; the odd copy is used *only* during an update. |
119 | */ |
120 | static void update_clock_read_data(struct clock_read_data *rd) |
121 | { |
122 | /* update the backup (odd) copy with the new data */ |
123 | cd.read_data[1] = *rd; |
124 | |
125 | /* steer readers towards the odd copy */ |
126 | raw_write_seqcount_latch(s: &cd.seq); |
127 | |
128 | /* now its safe for us to update the normal (even) copy */ |
129 | cd.read_data[0] = *rd; |
130 | |
131 | /* switch readers back to the even copy */ |
132 | raw_write_seqcount_latch(s: &cd.seq); |
133 | } |
134 | |
135 | /* |
136 | * Atomically update the sched_clock() epoch. |
137 | */ |
138 | static void update_sched_clock(void) |
139 | { |
140 | u64 cyc; |
141 | u64 ns; |
142 | struct clock_read_data rd; |
143 | |
144 | rd = cd.read_data[0]; |
145 | |
146 | cyc = cd.actual_read_sched_clock(); |
147 | ns = rd.epoch_ns + cyc_to_ns(cyc: (cyc - rd.epoch_cyc) & rd.sched_clock_mask, mult: rd.mult, shift: rd.shift); |
148 | |
149 | rd.epoch_ns = ns; |
150 | rd.epoch_cyc = cyc; |
151 | |
152 | update_clock_read_data(rd: &rd); |
153 | } |
154 | |
155 | static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) |
156 | { |
157 | update_sched_clock(); |
158 | hrtimer_forward_now(timer: hrt, interval: cd.wrap_kt); |
159 | |
160 | return HRTIMER_RESTART; |
161 | } |
162 | |
163 | void __init |
164 | sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) |
165 | { |
166 | u64 res, wrap, new_mask, new_epoch, cyc, ns; |
167 | u32 new_mult, new_shift; |
168 | unsigned long r, flags; |
169 | char r_unit; |
170 | struct clock_read_data rd; |
171 | |
172 | if (cd.rate > rate) |
173 | return; |
174 | |
175 | /* Cannot register a sched_clock with interrupts on */ |
176 | local_irq_save(flags); |
177 | |
178 | /* Calculate the mult/shift to convert counter ticks to ns. */ |
179 | clocks_calc_mult_shift(mult: &new_mult, shift: &new_shift, from: rate, NSEC_PER_SEC, minsec: 3600); |
180 | |
181 | new_mask = CLOCKSOURCE_MASK(bits); |
182 | cd.rate = rate; |
183 | |
184 | /* Calculate how many nanosecs until we risk wrapping */ |
185 | wrap = clocks_calc_max_nsecs(mult: new_mult, shift: new_shift, maxadj: 0, mask: new_mask, NULL); |
186 | cd.wrap_kt = ns_to_ktime(ns: wrap); |
187 | |
188 | rd = cd.read_data[0]; |
189 | |
190 | /* Update epoch for new counter and update 'epoch_ns' from old counter*/ |
191 | new_epoch = read(); |
192 | cyc = cd.actual_read_sched_clock(); |
193 | ns = rd.epoch_ns + cyc_to_ns(cyc: (cyc - rd.epoch_cyc) & rd.sched_clock_mask, mult: rd.mult, shift: rd.shift); |
194 | cd.actual_read_sched_clock = read; |
195 | |
196 | rd.read_sched_clock = read; |
197 | rd.sched_clock_mask = new_mask; |
198 | rd.mult = new_mult; |
199 | rd.shift = new_shift; |
200 | rd.epoch_cyc = new_epoch; |
201 | rd.epoch_ns = ns; |
202 | |
203 | update_clock_read_data(rd: &rd); |
204 | |
205 | if (sched_clock_timer.function != NULL) { |
206 | /* update timeout for clock wrap */ |
207 | hrtimer_start(timer: &sched_clock_timer, tim: cd.wrap_kt, |
208 | mode: HRTIMER_MODE_REL_HARD); |
209 | } |
210 | |
211 | r = rate; |
212 | if (r >= 4000000) { |
213 | r = DIV_ROUND_CLOSEST(r, 1000000); |
214 | r_unit = 'M'; |
215 | } else if (r >= 4000) { |
216 | r = DIV_ROUND_CLOSEST(r, 1000); |
217 | r_unit = 'k'; |
218 | } else { |
219 | r_unit = ' '; |
220 | } |
221 | |
222 | /* Calculate the ns resolution of this counter */ |
223 | res = cyc_to_ns(cyc: 1ULL, mult: new_mult, shift: new_shift); |
224 | |
225 | pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n" , |
226 | bits, r, r_unit, res, wrap); |
227 | |
228 | /* Enable IRQ time accounting if we have a fast enough sched_clock() */ |
229 | if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) |
230 | enable_sched_clock_irqtime(); |
231 | |
232 | local_irq_restore(flags); |
233 | |
234 | pr_debug("Registered %pS as sched_clock source\n" , read); |
235 | } |
236 | |
237 | void __init generic_sched_clock_init(void) |
238 | { |
239 | /* |
240 | * If no sched_clock() function has been provided at that point, |
241 | * make it the final one. |
242 | */ |
243 | if (cd.actual_read_sched_clock == jiffy_sched_clock_read) |
244 | sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); |
245 | |
246 | update_sched_clock(); |
247 | |
248 | /* |
249 | * Start the timer to keep sched_clock() properly updated and |
250 | * sets the initial epoch. |
251 | */ |
252 | hrtimer_init(timer: &sched_clock_timer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL_HARD); |
253 | sched_clock_timer.function = sched_clock_poll; |
254 | hrtimer_start(timer: &sched_clock_timer, tim: cd.wrap_kt, mode: HRTIMER_MODE_REL_HARD); |
255 | } |
256 | |
257 | /* |
258 | * Clock read function for use when the clock is suspended. |
259 | * |
260 | * This function makes it appear to sched_clock() as if the clock |
261 | * stopped counting at its last update. |
262 | * |
263 | * This function must only be called from the critical |
264 | * section in sched_clock(). It relies on the read_seqcount_retry() |
265 | * at the end of the critical section to be sure we observe the |
266 | * correct copy of 'epoch_cyc'. |
267 | */ |
268 | static u64 notrace suspended_sched_clock_read(void) |
269 | { |
270 | unsigned int seq = raw_read_seqcount_latch(s: &cd.seq); |
271 | |
272 | return cd.read_data[seq & 1].epoch_cyc; |
273 | } |
274 | |
275 | int sched_clock_suspend(void) |
276 | { |
277 | struct clock_read_data *rd = &cd.read_data[0]; |
278 | |
279 | update_sched_clock(); |
280 | hrtimer_cancel(timer: &sched_clock_timer); |
281 | rd->read_sched_clock = suspended_sched_clock_read; |
282 | |
283 | return 0; |
284 | } |
285 | |
286 | void sched_clock_resume(void) |
287 | { |
288 | struct clock_read_data *rd = &cd.read_data[0]; |
289 | |
290 | rd->epoch_cyc = cd.actual_read_sched_clock(); |
291 | hrtimer_start(timer: &sched_clock_timer, tim: cd.wrap_kt, mode: HRTIMER_MODE_REL_HARD); |
292 | rd->read_sched_clock = cd.actual_read_sched_clock; |
293 | } |
294 | |
295 | static struct syscore_ops sched_clock_ops = { |
296 | .suspend = sched_clock_suspend, |
297 | .resume = sched_clock_resume, |
298 | }; |
299 | |
300 | static int __init sched_clock_syscore_init(void) |
301 | { |
302 | register_syscore_ops(ops: &sched_clock_ops); |
303 | |
304 | return 0; |
305 | } |
306 | device_initcall(sched_clock_syscore_init); |
307 | |