1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * NTP state machine interfaces and logic. |
4 | * |
5 | * This code was mainly moved from kernel/timer.c and kernel/time.c |
6 | * Please see those files for relevant copyright info and historical |
7 | * changelogs. |
8 | */ |
9 | #include <linux/capability.h> |
10 | #include <linux/clocksource.h> |
11 | #include <linux/workqueue.h> |
12 | #include <linux/hrtimer.h> |
13 | #include <linux/jiffies.h> |
14 | #include <linux/math64.h> |
15 | #include <linux/timex.h> |
16 | #include <linux/time.h> |
17 | #include <linux/mm.h> |
18 | #include <linux/module.h> |
19 | #include <linux/rtc.h> |
20 | #include <linux/audit.h> |
21 | |
22 | #include "ntp_internal.h" |
23 | #include "timekeeping_internal.h" |
24 | |
25 | /** |
26 | * struct ntp_data - Structure holding all NTP related state |
27 | * @tick_usec: USER_HZ period in microseconds |
28 | * @tick_length: Adjusted tick length |
29 | * @tick_length_base: Base value for @tick_length |
30 | * @time_state: State of the clock synchronization |
31 | * @time_status: Clock status bits |
32 | * @time_offset: Time adjustment in nanoseconds |
33 | * @time_constant: PLL time constant |
34 | * @time_maxerror: Maximum error in microseconds holding the NTP sync distance |
35 | * (NTP dispersion + delay / 2) |
36 | * @time_esterror: Estimated error in microseconds holding NTP dispersion |
37 | * @time_freq: Frequency offset scaled nsecs/secs |
38 | * @time_reftime: Time at last adjustment in seconds |
39 | * @time_adjust: Adjustment value |
40 | * @ntp_tick_adj: Constant boot-param configurable NTP tick adjustment (upscaled) |
41 | * @ntp_next_leap_sec: Second value of the next pending leapsecond, or TIME64_MAX if no leap |
42 | * |
43 | * @pps_valid: PPS signal watchdog counter |
44 | * @pps_tf: PPS phase median filter |
45 | * @pps_jitter: PPS current jitter in nanoseconds |
46 | * @pps_fbase: PPS beginning of the last freq interval |
47 | * @pps_shift: PPS current interval duration in seconds (shift value) |
48 | * @pps_intcnt: PPS interval counter |
49 | * @pps_freq: PPS frequency offset in scaled ns/s |
50 | * @pps_stabil: PPS current stability in scaled ns/s |
51 | * @pps_calcnt: PPS monitor: calibration intervals |
52 | * @pps_jitcnt: PPS monitor: jitter limit exceeded |
53 | * @pps_stbcnt: PPS monitor: stability limit exceeded |
54 | * @pps_errcnt: PPS monitor: calibration errors |
55 | * |
56 | * Protected by the timekeeping locks. |
57 | */ |
58 | struct ntp_data { |
59 | unsigned long tick_usec; |
60 | u64 tick_length; |
61 | u64 tick_length_base; |
62 | int time_state; |
63 | int time_status; |
64 | s64 time_offset; |
65 | long time_constant; |
66 | long time_maxerror; |
67 | long time_esterror; |
68 | s64 time_freq; |
69 | time64_t time_reftime; |
70 | long time_adjust; |
71 | s64 ntp_tick_adj; |
72 | time64_t ntp_next_leap_sec; |
73 | #ifdef CONFIG_NTP_PPS |
74 | int pps_valid; |
75 | long pps_tf[3]; |
76 | long pps_jitter; |
77 | struct timespec64 pps_fbase; |
78 | int pps_shift; |
79 | int pps_intcnt; |
80 | s64 pps_freq; |
81 | long pps_stabil; |
82 | long pps_calcnt; |
83 | long pps_jitcnt; |
84 | long pps_stbcnt; |
85 | long pps_errcnt; |
86 | #endif |
87 | }; |
88 | |
89 | static struct ntp_data tk_ntp_data = { |
90 | .tick_usec = USER_TICK_USEC, |
91 | .time_state = TIME_OK, |
92 | .time_status = STA_UNSYNC, |
93 | .time_constant = 2, |
94 | .time_maxerror = NTP_PHASE_LIMIT, |
95 | .time_esterror = NTP_PHASE_LIMIT, |
96 | .ntp_next_leap_sec = TIME64_MAX, |
97 | }; |
98 | |
99 | #define SECS_PER_DAY 86400 |
100 | #define MAX_TICKADJ 500LL /* usecs */ |
101 | #define MAX_TICKADJ_SCALED \ |
102 | (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) |
103 | #define MAX_TAI_OFFSET 100000 |
104 | |
105 | #ifdef CONFIG_NTP_PPS |
106 | |
107 | /* |
108 | * The following variables are used when a pulse-per-second (PPS) signal |
109 | * is available. They establish the engineering parameters of the clock |
110 | * discipline loop when controlled by the PPS signal. |
111 | */ |
112 | #define PPS_VALID 10 /* PPS signal watchdog max (s) */ |
113 | #define PPS_POPCORN 4 /* popcorn spike threshold (shift) */ |
114 | #define PPS_INTMIN 2 /* min freq interval (s) (shift) */ |
115 | #define PPS_INTMAX 8 /* max freq interval (s) (shift) */ |
116 | #define PPS_INTCOUNT 4 /* number of consecutive good intervals to |
117 | increase pps_shift or consecutive bad |
118 | intervals to decrease it */ |
119 | #define PPS_MAXWANDER 100000 /* max PPS freq wander (ns/s) */ |
120 | |
121 | /* |
122 | * PPS kernel consumer compensates the whole phase error immediately. |
123 | * Otherwise, reduce the offset by a fixed factor times the time constant. |
124 | */ |
125 | static inline s64 ntp_offset_chunk(struct ntp_data *ntpdata, s64 offset) |
126 | { |
127 | if (ntpdata->time_status & STA_PPSTIME && ntpdata->time_status & STA_PPSSIGNAL) |
128 | return offset; |
129 | else |
130 | return shift_right(offset, SHIFT_PLL + ntpdata->time_constant); |
131 | } |
132 | |
133 | static inline void pps_reset_freq_interval(struct ntp_data *ntpdata) |
134 | { |
135 | /* The PPS calibration interval may end surprisingly early */ |
136 | ntpdata->pps_shift = PPS_INTMIN; |
137 | ntpdata->pps_intcnt = 0; |
138 | } |
139 | |
140 | /** |
141 | * pps_clear - Clears the PPS state variables |
142 | * @ntpdata: Pointer to ntp data |
143 | */ |
144 | static inline void pps_clear(struct ntp_data *ntpdata) |
145 | { |
146 | pps_reset_freq_interval(ntpdata); |
147 | ntpdata->pps_tf[0] = 0; |
148 | ntpdata->pps_tf[1] = 0; |
149 | ntpdata->pps_tf[2] = 0; |
150 | ntpdata->pps_fbase.tv_sec = ntpdata->pps_fbase.tv_nsec = 0; |
151 | ntpdata->pps_freq = 0; |
152 | } |
153 | |
154 | /* |
155 | * Decrease pps_valid to indicate that another second has passed since the |
156 | * last PPS signal. When it reaches 0, indicate that PPS signal is missing. |
157 | */ |
158 | static inline void pps_dec_valid(struct ntp_data *ntpdata) |
159 | { |
160 | if (ntpdata->pps_valid > 0) { |
161 | ntpdata->pps_valid--; |
162 | } else { |
163 | ntpdata->time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | |
164 | STA_PPSWANDER | STA_PPSERROR); |
165 | pps_clear(ntpdata); |
166 | } |
167 | } |
168 | |
169 | static inline void pps_set_freq(struct ntp_data *ntpdata) |
170 | { |
171 | ntpdata->pps_freq = ntpdata->time_freq; |
172 | } |
173 | |
174 | static inline bool is_error_status(int status) |
175 | { |
176 | return (status & (STA_UNSYNC|STA_CLOCKERR)) |
177 | /* |
178 | * PPS signal lost when either PPS time or PPS frequency |
179 | * synchronization requested |
180 | */ |
181 | || ((status & (STA_PPSFREQ|STA_PPSTIME)) |
182 | && !(status & STA_PPSSIGNAL)) |
183 | /* |
184 | * PPS jitter exceeded when PPS time synchronization |
185 | * requested |
186 | */ |
187 | || ((status & (STA_PPSTIME|STA_PPSJITTER)) |
188 | == (STA_PPSTIME|STA_PPSJITTER)) |
189 | /* |
190 | * PPS wander exceeded or calibration error when PPS |
191 | * frequency synchronization requested |
192 | */ |
193 | || ((status & STA_PPSFREQ) |
194 | && (status & (STA_PPSWANDER|STA_PPSERROR))); |
195 | } |
196 | |
197 | static inline void pps_fill_timex(struct ntp_data *ntpdata, struct __kernel_timex *txc) |
198 | { |
199 | txc->ppsfreq = shift_right((ntpdata->pps_freq >> PPM_SCALE_INV_SHIFT) * |
200 | PPM_SCALE_INV, NTP_SCALE_SHIFT); |
201 | txc->jitter = ntpdata->pps_jitter; |
202 | if (!(ntpdata->time_status & STA_NANO)) |
203 | txc->jitter = ntpdata->pps_jitter / NSEC_PER_USEC; |
204 | txc->shift = ntpdata->pps_shift; |
205 | txc->stabil = ntpdata->pps_stabil; |
206 | txc->jitcnt = ntpdata->pps_jitcnt; |
207 | txc->calcnt = ntpdata->pps_calcnt; |
208 | txc->errcnt = ntpdata->pps_errcnt; |
209 | txc->stbcnt = ntpdata->pps_stbcnt; |
210 | } |
211 | |
212 | #else /* !CONFIG_NTP_PPS */ |
213 | |
214 | static inline s64 ntp_offset_chunk(struct ntp_data *ntpdata, s64 offset) |
215 | { |
216 | return shift_right(offset, SHIFT_PLL + ntpdata->time_constant); |
217 | } |
218 | |
219 | static inline void pps_reset_freq_interval(struct ntp_data *ntpdata) {} |
220 | static inline void pps_clear(struct ntp_data *ntpdata) {} |
221 | static inline void pps_dec_valid(struct ntp_data *ntpdata) {} |
222 | static inline void pps_set_freq(struct ntp_data *ntpdata) {} |
223 | |
224 | static inline bool is_error_status(int status) |
225 | { |
226 | return status & (STA_UNSYNC|STA_CLOCKERR); |
227 | } |
228 | |
229 | static inline void pps_fill_timex(struct ntp_data *ntpdata, struct __kernel_timex *txc) |
230 | { |
231 | /* PPS is not implemented, so these are zero */ |
232 | txc->ppsfreq = 0; |
233 | txc->jitter = 0; |
234 | txc->shift = 0; |
235 | txc->stabil = 0; |
236 | txc->jitcnt = 0; |
237 | txc->calcnt = 0; |
238 | txc->errcnt = 0; |
239 | txc->stbcnt = 0; |
240 | } |
241 | |
242 | #endif /* CONFIG_NTP_PPS */ |
243 | |
244 | /* |
245 | * Update tick_length and tick_length_base, based on tick_usec, ntp_tick_adj and |
246 | * time_freq: |
247 | */ |
248 | static void ntp_update_frequency(struct ntp_data *ntpdata) |
249 | { |
250 | u64 second_length, new_base, tick_usec = (u64)ntpdata->tick_usec; |
251 | |
252 | second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) << NTP_SCALE_SHIFT; |
253 | |
254 | second_length += ntpdata->ntp_tick_adj; |
255 | second_length += ntpdata->time_freq; |
256 | |
257 | new_base = div_u64(dividend: second_length, NTP_INTERVAL_FREQ); |
258 | |
259 | /* |
260 | * Don't wait for the next second_overflow, apply the change to the |
261 | * tick length immediately: |
262 | */ |
263 | ntpdata->tick_length += new_base - ntpdata->tick_length_base; |
264 | ntpdata->tick_length_base = new_base; |
265 | } |
266 | |
267 | static inline s64 ntp_update_offset_fll(struct ntp_data *ntpdata, s64 offset64, long secs) |
268 | { |
269 | ntpdata->time_status &= ~STA_MODE; |
270 | |
271 | if (secs < MINSEC) |
272 | return 0; |
273 | |
274 | if (!(ntpdata->time_status & STA_FLL) && (secs <= MAXSEC)) |
275 | return 0; |
276 | |
277 | ntpdata->time_status |= STA_MODE; |
278 | |
279 | return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs); |
280 | } |
281 | |
282 | static void ntp_update_offset(struct ntp_data *ntpdata, long offset) |
283 | { |
284 | s64 freq_adj, offset64; |
285 | long secs, real_secs; |
286 | |
287 | if (!(ntpdata->time_status & STA_PLL)) |
288 | return; |
289 | |
290 | if (!(ntpdata->time_status & STA_NANO)) { |
291 | /* Make sure the multiplication below won't overflow */ |
292 | offset = clamp(offset, -USEC_PER_SEC, USEC_PER_SEC); |
293 | offset *= NSEC_PER_USEC; |
294 | } |
295 | |
296 | /* Scale the phase adjustment and clamp to the operating range. */ |
297 | offset = clamp(offset, -MAXPHASE, MAXPHASE); |
298 | |
299 | /* |
300 | * Select how the frequency is to be controlled |
301 | * and in which mode (PLL or FLL). |
302 | */ |
303 | real_secs = __ktime_get_real_seconds(); |
304 | secs = (long)(real_secs - ntpdata->time_reftime); |
305 | if (unlikely(ntpdata->time_status & STA_FREQHOLD)) |
306 | secs = 0; |
307 | |
308 | ntpdata->time_reftime = real_secs; |
309 | |
310 | offset64 = offset; |
311 | freq_adj = ntp_update_offset_fll(ntpdata, offset64, secs); |
312 | |
313 | /* |
314 | * Clamp update interval to reduce PLL gain with low |
315 | * sampling rate (e.g. intermittent network connection) |
316 | * to avoid instability. |
317 | */ |
318 | if (unlikely(secs > 1 << (SHIFT_PLL + 1 + ntpdata->time_constant))) |
319 | secs = 1 << (SHIFT_PLL + 1 + ntpdata->time_constant); |
320 | |
321 | freq_adj += (offset64 * secs) << |
322 | (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + ntpdata->time_constant)); |
323 | |
324 | freq_adj = min(freq_adj + ntpdata->time_freq, MAXFREQ_SCALED); |
325 | |
326 | ntpdata->time_freq = max(freq_adj, -MAXFREQ_SCALED); |
327 | |
328 | ntpdata->time_offset = div_s64(dividend: offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); |
329 | } |
330 | |
331 | static void __ntp_clear(struct ntp_data *ntpdata) |
332 | { |
333 | /* Stop active adjtime() */ |
334 | ntpdata->time_adjust = 0; |
335 | ntpdata->time_status |= STA_UNSYNC; |
336 | ntpdata->time_maxerror = NTP_PHASE_LIMIT; |
337 | ntpdata->time_esterror = NTP_PHASE_LIMIT; |
338 | |
339 | ntp_update_frequency(ntpdata); |
340 | |
341 | ntpdata->tick_length = ntpdata->tick_length_base; |
342 | ntpdata->time_offset = 0; |
343 | |
344 | ntpdata->ntp_next_leap_sec = TIME64_MAX; |
345 | /* Clear PPS state variables */ |
346 | pps_clear(ntpdata); |
347 | } |
348 | |
349 | /** |
350 | * ntp_clear - Clears the NTP state variables |
351 | */ |
352 | void ntp_clear(void) |
353 | { |
354 | __ntp_clear(ntpdata: &tk_ntp_data); |
355 | } |
356 | |
357 | |
358 | u64 ntp_tick_length(void) |
359 | { |
360 | return tk_ntp_data.tick_length; |
361 | } |
362 | |
363 | /** |
364 | * ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t |
365 | * |
366 | * Provides the time of the next leapsecond against CLOCK_REALTIME in |
367 | * a ktime_t format. Returns KTIME_MAX if no leapsecond is pending. |
368 | */ |
369 | ktime_t ntp_get_next_leap(void) |
370 | { |
371 | struct ntp_data *ntpdata = &tk_ntp_data; |
372 | ktime_t ret; |
373 | |
374 | if ((ntpdata->time_state == TIME_INS) && (ntpdata->time_status & STA_INS)) |
375 | return ktime_set(secs: ntpdata->ntp_next_leap_sec, nsecs: 0); |
376 | ret = KTIME_MAX; |
377 | return ret; |
378 | } |
379 | |
380 | /* |
381 | * This routine handles the overflow of the microsecond field |
382 | * |
383 | * The tricky bits of code to handle the accurate clock support |
384 | * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame. |
385 | * They were originally developed for SUN and DEC kernels. |
386 | * All the kudos should go to Dave for this stuff. |
387 | * |
388 | * Also handles leap second processing, and returns leap offset |
389 | */ |
390 | int second_overflow(time64_t secs) |
391 | { |
392 | struct ntp_data *ntpdata = &tk_ntp_data; |
393 | s64 delta; |
394 | int leap = 0; |
395 | s32 rem; |
396 | |
397 | /* |
398 | * Leap second processing. If in leap-insert state at the end of the |
399 | * day, the system clock is set back one second; if in leap-delete |
400 | * state, the system clock is set ahead one second. |
401 | */ |
402 | switch (ntpdata->time_state) { |
403 | case TIME_OK: |
404 | if (ntpdata->time_status & STA_INS) { |
405 | ntpdata->time_state = TIME_INS; |
406 | div_s64_rem(dividend: secs, SECS_PER_DAY, remainder: &rem); |
407 | ntpdata->ntp_next_leap_sec = secs + SECS_PER_DAY - rem; |
408 | } else if (ntpdata->time_status & STA_DEL) { |
409 | ntpdata->time_state = TIME_DEL; |
410 | div_s64_rem(dividend: secs + 1, SECS_PER_DAY, remainder: &rem); |
411 | ntpdata->ntp_next_leap_sec = secs + SECS_PER_DAY - rem; |
412 | } |
413 | break; |
414 | case TIME_INS: |
415 | if (!(ntpdata->time_status & STA_INS)) { |
416 | ntpdata->ntp_next_leap_sec = TIME64_MAX; |
417 | ntpdata->time_state = TIME_OK; |
418 | } else if (secs == ntpdata->ntp_next_leap_sec) { |
419 | leap = -1; |
420 | ntpdata->time_state = TIME_OOP; |
421 | pr_notice("Clock: inserting leap second 23:59:60 UTC\n"); |
422 | } |
423 | break; |
424 | case TIME_DEL: |
425 | if (!(ntpdata->time_status & STA_DEL)) { |
426 | ntpdata->ntp_next_leap_sec = TIME64_MAX; |
427 | ntpdata->time_state = TIME_OK; |
428 | } else if (secs == ntpdata->ntp_next_leap_sec) { |
429 | leap = 1; |
430 | ntpdata->ntp_next_leap_sec = TIME64_MAX; |
431 | ntpdata->time_state = TIME_WAIT; |
432 | pr_notice("Clock: deleting leap second 23:59:59 UTC\n"); |
433 | } |
434 | break; |
435 | case TIME_OOP: |
436 | ntpdata->ntp_next_leap_sec = TIME64_MAX; |
437 | ntpdata->time_state = TIME_WAIT; |
438 | break; |
439 | case TIME_WAIT: |
440 | if (!(ntpdata->time_status & (STA_INS | STA_DEL))) |
441 | ntpdata->time_state = TIME_OK; |
442 | break; |
443 | } |
444 | |
445 | /* Bump the maxerror field */ |
446 | ntpdata->time_maxerror += MAXFREQ / NSEC_PER_USEC; |
447 | if (ntpdata->time_maxerror > NTP_PHASE_LIMIT) { |
448 | ntpdata->time_maxerror = NTP_PHASE_LIMIT; |
449 | ntpdata->time_status |= STA_UNSYNC; |
450 | } |
451 | |
452 | /* Compute the phase adjustment for the next second */ |
453 | ntpdata->tick_length = ntpdata->tick_length_base; |
454 | |
455 | delta = ntp_offset_chunk(ntpdata, offset: ntpdata->time_offset); |
456 | ntpdata->time_offset -= delta; |
457 | ntpdata->tick_length += delta; |
458 | |
459 | /* Check PPS signal */ |
460 | pps_dec_valid(ntpdata); |
461 | |
462 | if (!ntpdata->time_adjust) |
463 | goto out; |
464 | |
465 | if (ntpdata->time_adjust > MAX_TICKADJ) { |
466 | ntpdata->time_adjust -= MAX_TICKADJ; |
467 | ntpdata->tick_length += MAX_TICKADJ_SCALED; |
468 | goto out; |
469 | } |
470 | |
471 | if (ntpdata->time_adjust < -MAX_TICKADJ) { |
472 | ntpdata->time_adjust += MAX_TICKADJ; |
473 | ntpdata->tick_length -= MAX_TICKADJ_SCALED; |
474 | goto out; |
475 | } |
476 | |
477 | ntpdata->tick_length += (s64)(ntpdata->time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ) |
478 | << NTP_SCALE_SHIFT; |
479 | ntpdata->time_adjust = 0; |
480 | |
481 | out: |
482 | return leap; |
483 | } |
484 | |
485 | #if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) |
486 | static void sync_hw_clock(struct work_struct *work); |
487 | static DECLARE_WORK(sync_work, sync_hw_clock); |
488 | static struct hrtimer sync_hrtimer; |
489 | #define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC) |
490 | |
491 | static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer) |
492 | { |
493 | queue_work(wq: system_freezable_power_efficient_wq, work: &sync_work); |
494 | |
495 | return HRTIMER_NORESTART; |
496 | } |
497 | |
498 | static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry) |
499 | { |
500 | ktime_t exp = ktime_set(secs: ktime_get_real_seconds(), nsecs: 0); |
501 | |
502 | if (retry) |
503 | exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec); |
504 | else |
505 | exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec); |
506 | |
507 | hrtimer_start(timer: &sync_hrtimer, tim: exp, mode: HRTIMER_MODE_ABS); |
508 | } |
509 | |
510 | /* |
511 | * Check whether @now is correct versus the required time to update the RTC |
512 | * and calculate the value which needs to be written to the RTC so that the |
513 | * next seconds increment of the RTC after the write is aligned with the next |
514 | * seconds increment of clock REALTIME. |
515 | * |
516 | * tsched t1 write(t2.tv_sec - 1sec)) t2 RTC increments seconds |
517 | * |
518 | * t2.tv_nsec == 0 |
519 | * tsched = t2 - set_offset_nsec |
520 | * newval = t2 - NSEC_PER_SEC |
521 | * |
522 | * ==> neval = tsched + set_offset_nsec - NSEC_PER_SEC |
523 | * |
524 | * As the execution of this code is not guaranteed to happen exactly at |
525 | * tsched this allows it to happen within a fuzzy region: |
526 | * |
527 | * abs(now - tsched) < FUZZ |
528 | * |
529 | * If @now is not inside the allowed window the function returns false. |
530 | */ |
531 | static inline bool rtc_tv_nsec_ok(unsigned long set_offset_nsec, |
532 | struct timespec64 *to_set, |
533 | const struct timespec64 *now) |
534 | { |
535 | /* Allowed error in tv_nsec, arbitrarily set to 5 jiffies in ns. */ |
536 | const unsigned long TIME_SET_NSEC_FUZZ = TICK_NSEC * 5; |
537 | struct timespec64 delay = {.tv_sec = -1, |
538 | .tv_nsec = set_offset_nsec}; |
539 | |
540 | *to_set = timespec64_add(lhs: *now, rhs: delay); |
541 | |
542 | if (to_set->tv_nsec < TIME_SET_NSEC_FUZZ) { |
543 | to_set->tv_nsec = 0; |
544 | return true; |
545 | } |
546 | |
547 | if (to_set->tv_nsec > NSEC_PER_SEC - TIME_SET_NSEC_FUZZ) { |
548 | to_set->tv_sec++; |
549 | to_set->tv_nsec = 0; |
550 | return true; |
551 | } |
552 | return false; |
553 | } |
554 | |
555 | #ifdef CONFIG_GENERIC_CMOS_UPDATE |
556 | int __weak update_persistent_clock64(struct timespec64 now64) |
557 | { |
558 | return -ENODEV; |
559 | } |
560 | #else |
561 | static inline int update_persistent_clock64(struct timespec64 now64) |
562 | { |
563 | return -ENODEV; |
564 | } |
565 | #endif |
566 | |
567 | #ifdef CONFIG_RTC_SYSTOHC |
568 | /* Save NTP synchronized time to the RTC */ |
569 | static int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec) |
570 | { |
571 | struct rtc_device *rtc; |
572 | struct rtc_time tm; |
573 | int err = -ENODEV; |
574 | |
575 | rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE); |
576 | if (!rtc) |
577 | return -ENODEV; |
578 | |
579 | if (!rtc->ops || !rtc->ops->set_time) |
580 | goto out_close; |
581 | |
582 | /* First call might not have the correct offset */ |
583 | if (*offset_nsec == rtc->set_offset_nsec) { |
584 | rtc_time64_to_tm(time: to_set->tv_sec, tm: &tm); |
585 | err = rtc_set_time(rtc, tm: &tm); |
586 | } else { |
587 | /* Store the update offset and let the caller try again */ |
588 | *offset_nsec = rtc->set_offset_nsec; |
589 | err = -EAGAIN; |
590 | } |
591 | out_close: |
592 | rtc_class_close(rtc); |
593 | return err; |
594 | } |
595 | #else |
596 | static inline int update_rtc(struct timespec64 *to_set, unsigned long *offset_nsec) |
597 | { |
598 | return -ENODEV; |
599 | } |
600 | #endif |
601 | |
602 | /** |
603 | * ntp_synced - Tells whether the NTP status is not UNSYNC |
604 | * Returns: true if not UNSYNC, false otherwise |
605 | */ |
606 | static inline bool ntp_synced(void) |
607 | { |
608 | return !(tk_ntp_data.time_status & STA_UNSYNC); |
609 | } |
610 | |
611 | /* |
612 | * If we have an externally synchronized Linux clock, then update RTC clock |
613 | * accordingly every ~11 minutes. Generally RTCs can only store second |
614 | * precision, but many RTCs will adjust the phase of their second tick to |
615 | * match the moment of update. This infrastructure arranges to call to the RTC |
616 | * set at the correct moment to phase synchronize the RTC second tick over |
617 | * with the kernel clock. |
618 | */ |
619 | static void sync_hw_clock(struct work_struct *work) |
620 | { |
621 | /* |
622 | * The default synchronization offset is 500ms for the deprecated |
623 | * update_persistent_clock64() under the assumption that it uses |
624 | * the infamous CMOS clock (MC146818). |
625 | */ |
626 | static unsigned long offset_nsec = NSEC_PER_SEC / 2; |
627 | struct timespec64 now, to_set; |
628 | int res = -EAGAIN; |
629 | |
630 | /* |
631 | * Don't update if STA_UNSYNC is set and if ntp_notify_cmos_timer() |
632 | * managed to schedule the work between the timer firing and the |
633 | * work being able to rearm the timer. Wait for the timer to expire. |
634 | */ |
635 | if (!ntp_synced() || hrtimer_is_queued(timer: &sync_hrtimer)) |
636 | return; |
637 | |
638 | ktime_get_real_ts64(tv: &now); |
639 | /* If @now is not in the allowed window, try again */ |
640 | if (!rtc_tv_nsec_ok(set_offset_nsec: offset_nsec, to_set: &to_set, now: &now)) |
641 | goto rearm; |
642 | |
643 | /* Take timezone adjusted RTCs into account */ |
644 | if (persistent_clock_is_local) |
645 | to_set.tv_sec -= (sys_tz.tz_minuteswest * 60); |
646 | |
647 | /* Try the legacy RTC first. */ |
648 | res = update_persistent_clock64(now64: to_set); |
649 | if (res != -ENODEV) |
650 | goto rearm; |
651 | |
652 | /* Try the RTC class */ |
653 | res = update_rtc(to_set: &to_set, offset_nsec: &offset_nsec); |
654 | if (res == -ENODEV) |
655 | return; |
656 | rearm: |
657 | sched_sync_hw_clock(offset_nsec, retry: res != 0); |
658 | } |
659 | |
660 | void ntp_notify_cmos_timer(bool offset_set) |
661 | { |
662 | /* |
663 | * If the time jumped (using ADJ_SETOFFSET) cancels sync timer, |
664 | * which may have been running if the time was synchronized |
665 | * prior to the ADJ_SETOFFSET call. |
666 | */ |
667 | if (offset_set) |
668 | hrtimer_cancel(timer: &sync_hrtimer); |
669 | |
670 | /* |
671 | * When the work is currently executed but has not yet the timer |
672 | * rearmed this queues the work immediately again. No big issue, |
673 | * just a pointless work scheduled. |
674 | */ |
675 | if (ntp_synced() && !hrtimer_is_queued(timer: &sync_hrtimer)) |
676 | queue_work(wq: system_freezable_power_efficient_wq, work: &sync_work); |
677 | } |
678 | |
679 | static void __init ntp_init_cmos_sync(void) |
680 | { |
681 | hrtimer_setup(timer: &sync_hrtimer, function: sync_timer_callback, CLOCK_REALTIME, mode: HRTIMER_MODE_ABS); |
682 | } |
683 | #else /* CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */ |
684 | static inline void __init ntp_init_cmos_sync(void) { } |
685 | #endif /* !CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC) */ |
686 | |
687 | /* |
688 | * Propagate a new txc->status value into the NTP state: |
689 | */ |
690 | static inline void process_adj_status(struct ntp_data *ntpdata, const struct __kernel_timex *txc) |
691 | { |
692 | if ((ntpdata->time_status & STA_PLL) && !(txc->status & STA_PLL)) { |
693 | ntpdata->time_state = TIME_OK; |
694 | ntpdata->time_status = STA_UNSYNC; |
695 | ntpdata->ntp_next_leap_sec = TIME64_MAX; |
696 | /* Restart PPS frequency calibration */ |
697 | pps_reset_freq_interval(ntpdata); |
698 | } |
699 | |
700 | /* |
701 | * If we turn on PLL adjustments then reset the |
702 | * reference time to current time. |
703 | */ |
704 | if (!(ntpdata->time_status & STA_PLL) && (txc->status & STA_PLL)) |
705 | ntpdata->time_reftime = __ktime_get_real_seconds(); |
706 | |
707 | /* only set allowed bits */ |
708 | ntpdata->time_status &= STA_RONLY; |
709 | ntpdata->time_status |= txc->status & ~STA_RONLY; |
710 | } |
711 | |
712 | static inline void process_adjtimex_modes(struct ntp_data *ntpdata, const struct __kernel_timex *txc, |
713 | s32 *time_tai) |
714 | { |
715 | if (txc->modes & ADJ_STATUS) |
716 | process_adj_status(ntpdata, txc); |
717 | |
718 | if (txc->modes & ADJ_NANO) |
719 | ntpdata->time_status |= STA_NANO; |
720 | |
721 | if (txc->modes & ADJ_MICRO) |
722 | ntpdata->time_status &= ~STA_NANO; |
723 | |
724 | if (txc->modes & ADJ_FREQUENCY) { |
725 | ntpdata->time_freq = txc->freq * PPM_SCALE; |
726 | ntpdata->time_freq = min(ntpdata->time_freq, MAXFREQ_SCALED); |
727 | ntpdata->time_freq = max(ntpdata->time_freq, -MAXFREQ_SCALED); |
728 | /* Update pps_freq */ |
729 | pps_set_freq(ntpdata); |
730 | } |
731 | |
732 | if (txc->modes & ADJ_MAXERROR) |
733 | ntpdata->time_maxerror = clamp(txc->maxerror, 0, NTP_PHASE_LIMIT); |
734 | |
735 | if (txc->modes & ADJ_ESTERROR) |
736 | ntpdata->time_esterror = clamp(txc->esterror, 0, NTP_PHASE_LIMIT); |
737 | |
738 | if (txc->modes & ADJ_TIMECONST) { |
739 | ntpdata->time_constant = clamp(txc->constant, 0, MAXTC); |
740 | if (!(ntpdata->time_status & STA_NANO)) |
741 | ntpdata->time_constant += 4; |
742 | ntpdata->time_constant = clamp(ntpdata->time_constant, 0, MAXTC); |
743 | } |
744 | |
745 | if (txc->modes & ADJ_TAI && txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET) |
746 | *time_tai = txc->constant; |
747 | |
748 | if (txc->modes & ADJ_OFFSET) |
749 | ntp_update_offset(ntpdata, offset: txc->offset); |
750 | |
751 | if (txc->modes & ADJ_TICK) |
752 | ntpdata->tick_usec = txc->tick; |
753 | |
754 | if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) |
755 | ntp_update_frequency(ntpdata); |
756 | } |
757 | |
758 | /* |
759 | * adjtimex() mainly allows reading (and writing, if superuser) of |
760 | * kernel time-keeping variables. used by xntpd. |
761 | */ |
762 | int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts, |
763 | s32 *time_tai, struct audit_ntp_data *ad) |
764 | { |
765 | struct ntp_data *ntpdata = &tk_ntp_data; |
766 | int result; |
767 | |
768 | if (txc->modes & ADJ_ADJTIME) { |
769 | long save_adjust = ntpdata->time_adjust; |
770 | |
771 | if (!(txc->modes & ADJ_OFFSET_READONLY)) { |
772 | /* adjtime() is independent from ntp_adjtime() */ |
773 | ntpdata->time_adjust = txc->offset; |
774 | ntp_update_frequency(ntpdata); |
775 | |
776 | audit_ntp_set_old(ad, type: AUDIT_NTP_ADJUST, val: save_adjust); |
777 | audit_ntp_set_new(ad, type: AUDIT_NTP_ADJUST, val: ntpdata->time_adjust); |
778 | } |
779 | txc->offset = save_adjust; |
780 | } else { |
781 | /* If there are input parameters, then process them: */ |
782 | if (txc->modes) { |
783 | audit_ntp_set_old(ad, type: AUDIT_NTP_OFFSET, val: ntpdata->time_offset); |
784 | audit_ntp_set_old(ad, type: AUDIT_NTP_FREQ, val: ntpdata->time_freq); |
785 | audit_ntp_set_old(ad, type: AUDIT_NTP_STATUS, val: ntpdata->time_status); |
786 | audit_ntp_set_old(ad, type: AUDIT_NTP_TAI, val: *time_tai); |
787 | audit_ntp_set_old(ad, type: AUDIT_NTP_TICK, val: ntpdata->tick_usec); |
788 | |
789 | process_adjtimex_modes(ntpdata, txc, time_tai); |
790 | |
791 | audit_ntp_set_new(ad, type: AUDIT_NTP_OFFSET, val: ntpdata->time_offset); |
792 | audit_ntp_set_new(ad, type: AUDIT_NTP_FREQ, val: ntpdata->time_freq); |
793 | audit_ntp_set_new(ad, type: AUDIT_NTP_STATUS, val: ntpdata->time_status); |
794 | audit_ntp_set_new(ad, type: AUDIT_NTP_TAI, val: *time_tai); |
795 | audit_ntp_set_new(ad, type: AUDIT_NTP_TICK, val: ntpdata->tick_usec); |
796 | } |
797 | |
798 | txc->offset = shift_right(ntpdata->time_offset * NTP_INTERVAL_FREQ, NTP_SCALE_SHIFT); |
799 | if (!(ntpdata->time_status & STA_NANO)) |
800 | txc->offset = div_s64(dividend: txc->offset, NSEC_PER_USEC); |
801 | } |
802 | |
803 | result = ntpdata->time_state; |
804 | if (is_error_status(status: ntpdata->time_status)) |
805 | result = TIME_ERROR; |
806 | |
807 | txc->freq = shift_right((ntpdata->time_freq >> PPM_SCALE_INV_SHIFT) * |
808 | PPM_SCALE_INV, NTP_SCALE_SHIFT); |
809 | txc->maxerror = ntpdata->time_maxerror; |
810 | txc->esterror = ntpdata->time_esterror; |
811 | txc->status = ntpdata->time_status; |
812 | txc->constant = ntpdata->time_constant; |
813 | txc->precision = 1; |
814 | txc->tolerance = MAXFREQ_SCALED / PPM_SCALE; |
815 | txc->tick = ntpdata->tick_usec; |
816 | txc->tai = *time_tai; |
817 | |
818 | /* Fill PPS status fields */ |
819 | pps_fill_timex(ntpdata, txc); |
820 | |
821 | txc->time.tv_sec = ts->tv_sec; |
822 | txc->time.tv_usec = ts->tv_nsec; |
823 | if (!(ntpdata->time_status & STA_NANO)) |
824 | txc->time.tv_usec = ts->tv_nsec / NSEC_PER_USEC; |
825 | |
826 | /* Handle leapsec adjustments */ |
827 | if (unlikely(ts->tv_sec >= ntpdata->ntp_next_leap_sec)) { |
828 | if ((ntpdata->time_state == TIME_INS) && (ntpdata->time_status & STA_INS)) { |
829 | result = TIME_OOP; |
830 | txc->tai++; |
831 | txc->time.tv_sec--; |
832 | } |
833 | if ((ntpdata->time_state == TIME_DEL) && (ntpdata->time_status & STA_DEL)) { |
834 | result = TIME_WAIT; |
835 | txc->tai--; |
836 | txc->time.tv_sec++; |
837 | } |
838 | if ((ntpdata->time_state == TIME_OOP) && (ts->tv_sec == ntpdata->ntp_next_leap_sec)) |
839 | result = TIME_WAIT; |
840 | } |
841 | |
842 | return result; |
843 | } |
844 | |
845 | #ifdef CONFIG_NTP_PPS |
846 | |
847 | /* |
848 | * struct pps_normtime is basically a struct timespec, but it is |
849 | * semantically different (and it is the reason why it was invented): |
850 | * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] |
851 | * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) |
852 | */ |
853 | struct pps_normtime { |
854 | s64 sec; /* seconds */ |
855 | long nsec; /* nanoseconds */ |
856 | }; |
857 | |
858 | /* |
859 | * Normalize the timestamp so that nsec is in the |
860 | * [ -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval |
861 | */ |
862 | static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts) |
863 | { |
864 | struct pps_normtime norm = { |
865 | .sec = ts.tv_sec, |
866 | .nsec = ts.tv_nsec |
867 | }; |
868 | |
869 | if (norm.nsec > (NSEC_PER_SEC >> 1)) { |
870 | norm.nsec -= NSEC_PER_SEC; |
871 | norm.sec++; |
872 | } |
873 | |
874 | return norm; |
875 | } |
876 | |
877 | /* Get current phase correction and jitter */ |
878 | static inline long pps_phase_filter_get(struct ntp_data *ntpdata, long *jitter) |
879 | { |
880 | *jitter = ntpdata->pps_tf[0] - ntpdata->pps_tf[1]; |
881 | if (*jitter < 0) |
882 | *jitter = -*jitter; |
883 | |
884 | /* TODO: test various filters */ |
885 | return ntpdata->pps_tf[0]; |
886 | } |
887 | |
888 | /* Add the sample to the phase filter */ |
889 | static inline void pps_phase_filter_add(struct ntp_data *ntpdata, long err) |
890 | { |
891 | ntpdata->pps_tf[2] = ntpdata->pps_tf[1]; |
892 | ntpdata->pps_tf[1] = ntpdata->pps_tf[0]; |
893 | ntpdata->pps_tf[0] = err; |
894 | } |
895 | |
896 | /* |
897 | * Decrease frequency calibration interval length. It is halved after four |
898 | * consecutive unstable intervals. |
899 | */ |
900 | static inline void pps_dec_freq_interval(struct ntp_data *ntpdata) |
901 | { |
902 | if (--ntpdata->pps_intcnt <= -PPS_INTCOUNT) { |
903 | ntpdata->pps_intcnt = -PPS_INTCOUNT; |
904 | if (ntpdata->pps_shift > PPS_INTMIN) { |
905 | ntpdata->pps_shift--; |
906 | ntpdata->pps_intcnt = 0; |
907 | } |
908 | } |
909 | } |
910 | |
911 | /* |
912 | * Increase frequency calibration interval length. It is doubled after |
913 | * four consecutive stable intervals. |
914 | */ |
915 | static inline void pps_inc_freq_interval(struct ntp_data *ntpdata) |
916 | { |
917 | if (++ntpdata->pps_intcnt >= PPS_INTCOUNT) { |
918 | ntpdata->pps_intcnt = PPS_INTCOUNT; |
919 | if (ntpdata->pps_shift < PPS_INTMAX) { |
920 | ntpdata->pps_shift++; |
921 | ntpdata->pps_intcnt = 0; |
922 | } |
923 | } |
924 | } |
925 | |
926 | /* |
927 | * Update clock frequency based on MONOTONIC_RAW clock PPS signal |
928 | * timestamps |
929 | * |
930 | * At the end of the calibration interval the difference between the |
931 | * first and last MONOTONIC_RAW clock timestamps divided by the length |
932 | * of the interval becomes the frequency update. If the interval was |
933 | * too long, the data are discarded. |
934 | * Returns the difference between old and new frequency values. |
935 | */ |
936 | static long hardpps_update_freq(struct ntp_data *ntpdata, struct pps_normtime freq_norm) |
937 | { |
938 | long delta, delta_mod; |
939 | s64 ftemp; |
940 | |
941 | /* Check if the frequency interval was too long */ |
942 | if (freq_norm.sec > (2 << ntpdata->pps_shift)) { |
943 | ntpdata->time_status |= STA_PPSERROR; |
944 | ntpdata->pps_errcnt++; |
945 | pps_dec_freq_interval(ntpdata); |
946 | printk_deferred(KERN_ERR "hardpps: PPSERROR: interval too long - %lld s\n", |
947 | freq_norm.sec); |
948 | return 0; |
949 | } |
950 | |
951 | /* |
952 | * Here the raw frequency offset and wander (stability) is |
953 | * calculated. If the wander is less than the wander threshold the |
954 | * interval is increased; otherwise it is decreased. |
955 | */ |
956 | ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT, |
957 | freq_norm.sec); |
958 | delta = shift_right(ftemp - ntpdata->pps_freq, NTP_SCALE_SHIFT); |
959 | ntpdata->pps_freq = ftemp; |
960 | if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) { |
961 | printk_deferred(KERN_WARNING "hardpps: PPSWANDER: change=%ld\n", delta); |
962 | ntpdata->time_status |= STA_PPSWANDER; |
963 | ntpdata->pps_stbcnt++; |
964 | pps_dec_freq_interval(ntpdata); |
965 | } else { |
966 | /* Good sample */ |
967 | pps_inc_freq_interval(ntpdata); |
968 | } |
969 | |
970 | /* |
971 | * The stability metric is calculated as the average of recent |
972 | * frequency changes, but is used only for performance monitoring |
973 | */ |
974 | delta_mod = delta; |
975 | if (delta_mod < 0) |
976 | delta_mod = -delta_mod; |
977 | ntpdata->pps_stabil += (div_s64(((s64)delta_mod) << (NTP_SCALE_SHIFT - SHIFT_USEC), |
978 | NSEC_PER_USEC) - ntpdata->pps_stabil) >> PPS_INTMIN; |
979 | |
980 | /* If enabled, the system clock frequency is updated */ |
981 | if ((ntpdata->time_status & STA_PPSFREQ) && !(ntpdata->time_status & STA_FREQHOLD)) { |
982 | ntpdata->time_freq = ntpdata->pps_freq; |
983 | ntp_update_frequency(ntpdata); |
984 | } |
985 | |
986 | return delta; |
987 | } |
988 | |
989 | /* Correct REALTIME clock phase error against PPS signal */ |
990 | static void hardpps_update_phase(struct ntp_data *ntpdata, long error) |
991 | { |
992 | long correction = -error; |
993 | long jitter; |
994 | |
995 | /* Add the sample to the median filter */ |
996 | pps_phase_filter_add(ntpdata, correction); |
997 | correction = pps_phase_filter_get(ntpdata, &jitter); |
998 | |
999 | /* |
1000 | * Nominal jitter is due to PPS signal noise. If it exceeds the |
1001 | * threshold, the sample is discarded; otherwise, if so enabled, |
1002 | * the time offset is updated. |
1003 | */ |
1004 | if (jitter > (ntpdata->pps_jitter << PPS_POPCORN)) { |
1005 | printk_deferred(KERN_WARNING "hardpps: PPSJITTER: jitter=%ld, limit=%ld\n", |
1006 | jitter, (ntpdata->pps_jitter << PPS_POPCORN)); |
1007 | ntpdata->time_status |= STA_PPSJITTER; |
1008 | ntpdata->pps_jitcnt++; |
1009 | } else if (ntpdata->time_status & STA_PPSTIME) { |
1010 | /* Correct the time using the phase offset */ |
1011 | ntpdata->time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT, |
1012 | NTP_INTERVAL_FREQ); |
1013 | /* Cancel running adjtime() */ |
1014 | ntpdata->time_adjust = 0; |
1015 | } |
1016 | /* Update jitter */ |
1017 | ntpdata->pps_jitter += (jitter - ntpdata->pps_jitter) >> PPS_INTMIN; |
1018 | } |
1019 | |
1020 | /* |
1021 | * __hardpps() - discipline CPU clock oscillator to external PPS signal |
1022 | * |
1023 | * This routine is called at each PPS signal arrival in order to |
1024 | * discipline the CPU clock oscillator to the PPS signal. It takes two |
1025 | * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former |
1026 | * is used to correct clock phase error and the latter is used to |
1027 | * correct the frequency. |
1028 | * |
1029 | * This code is based on David Mills's reference nanokernel |
1030 | * implementation. It was mostly rewritten but keeps the same idea. |
1031 | */ |
1032 | void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) |
1033 | { |
1034 | struct pps_normtime pts_norm, freq_norm; |
1035 | struct ntp_data *ntpdata = &tk_ntp_data; |
1036 | |
1037 | pts_norm = pps_normalize_ts(*phase_ts); |
1038 | |
1039 | /* Clear the error bits, they will be set again if needed */ |
1040 | ntpdata->time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); |
1041 | |
1042 | /* indicate signal presence */ |
1043 | ntpdata->time_status |= STA_PPSSIGNAL; |
1044 | ntpdata->pps_valid = PPS_VALID; |
1045 | |
1046 | /* |
1047 | * When called for the first time, just start the frequency |
1048 | * interval |
1049 | */ |
1050 | if (unlikely(ntpdata->pps_fbase.tv_sec == 0)) { |
1051 | ntpdata->pps_fbase = *raw_ts; |
1052 | return; |
1053 | } |
1054 | |
1055 | /* Ok, now we have a base for frequency calculation */ |
1056 | freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, ntpdata->pps_fbase)); |
1057 | |
1058 | /* |
1059 | * Check that the signal is in the range |
1060 | * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it |
1061 | */ |
1062 | if ((freq_norm.sec == 0) || (freq_norm.nsec > MAXFREQ * freq_norm.sec) || |
1063 | (freq_norm.nsec < -MAXFREQ * freq_norm.sec)) { |
1064 | ntpdata->time_status |= STA_PPSJITTER; |
1065 | /* Restart the frequency calibration interval */ |
1066 | ntpdata->pps_fbase = *raw_ts; |
1067 | printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n"); |
1068 | return; |
1069 | } |
1070 | |
1071 | /* Signal is ok. Check if the current frequency interval is finished */ |
1072 | if (freq_norm.sec >= (1 << ntpdata->pps_shift)) { |
1073 | ntpdata->pps_calcnt++; |
1074 | /* Restart the frequency calibration interval */ |
1075 | ntpdata->pps_fbase = *raw_ts; |
1076 | hardpps_update_freq(ntpdata, freq_norm); |
1077 | } |
1078 | |
1079 | hardpps_update_phase(ntpdata, pts_norm.nsec); |
1080 | |
1081 | } |
1082 | #endif /* CONFIG_NTP_PPS */ |
1083 | |
1084 | static int __init ntp_tick_adj_setup(char *str) |
1085 | { |
1086 | int rc = kstrtos64(s: str, base: 0, res: &tk_ntp_data.ntp_tick_adj); |
1087 | if (rc) |
1088 | return rc; |
1089 | |
1090 | tk_ntp_data.ntp_tick_adj <<= NTP_SCALE_SHIFT; |
1091 | return 1; |
1092 | } |
1093 | |
1094 | __setup("ntp_tick_adj=", ntp_tick_adj_setup); |
1095 | |
1096 | void __init ntp_init(void) |
1097 | { |
1098 | ntp_clear(); |
1099 | ntp_init_cmos_sync(); |
1100 | } |
1101 |
Definitions
- ntp_data
- tk_ntp_data
- ntp_offset_chunk
- pps_reset_freq_interval
- pps_clear
- pps_dec_valid
- pps_set_freq
- is_error_status
- pps_fill_timex
- ntp_update_frequency
- ntp_update_offset_fll
- ntp_update_offset
- __ntp_clear
- ntp_clear
- ntp_tick_length
- ntp_get_next_leap
- second_overflow
- sync_work
- sync_hrtimer
- sync_timer_callback
- sched_sync_hw_clock
- rtc_tv_nsec_ok
- update_persistent_clock64
- update_rtc
- ntp_synced
- sync_hw_clock
- ntp_notify_cmos_timer
- ntp_init_cmos_sync
- process_adj_status
- process_adjtimex_modes
- __do_adjtimex
- ntp_tick_adj_setup
Improve your Profiling and Debugging skills
Find out more