1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PTP 1588 clock support
4 *
5 * Copyright (C) 2010 OMICRON electronics GmbH
6 */
7#include <linux/device.h>
8#include <linux/err.h>
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/posix-clock.h>
13#include <linux/pps_kernel.h>
14#include <linux/slab.h>
15#include <linux/syscalls.h>
16#include <linux/uaccess.h>
17#include <linux/debugfs.h>
18#include <linux/xarray.h>
19#include <uapi/linux/sched/types.h>
20
21#include "ptp_private.h"
22
23#define PTP_MAX_ALARMS 4
24#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT)
25#define PTP_PPS_EVENT PPS_CAPTUREASSERT
26#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC)
27
28const struct class ptp_class = {
29 .name = "ptp",
30 .dev_groups = ptp_groups
31};
32
33/* private globals */
34
35static dev_t ptp_devt;
36
37static DEFINE_XARRAY_ALLOC(ptp_clocks_map);
38
39/* time stamp event queue operations */
40
41static inline int queue_free(struct timestamp_event_queue *q)
42{
43 return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1;
44}
45
46static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
47 struct ptp_clock_event *src)
48{
49 struct ptp_extts_event *dst;
50 struct timespec64 offset_ts;
51 unsigned long flags;
52 s64 seconds;
53 u32 remainder;
54
55 if (src->type == PTP_CLOCK_EXTTS) {
56 seconds = div_u64_rem(dividend: src->timestamp, divisor: 1000000000, remainder: &remainder);
57 } else if (src->type == PTP_CLOCK_EXTOFF) {
58 offset_ts = ns_to_timespec64(nsec: src->offset);
59 seconds = offset_ts.tv_sec;
60 remainder = offset_ts.tv_nsec;
61 } else {
62 WARN(1, "%s: unknown type %d\n", __func__, src->type);
63 return;
64 }
65
66 spin_lock_irqsave(&queue->lock, flags);
67
68 dst = &queue->buf[queue->tail];
69 dst->index = src->index;
70 dst->flags = PTP_EXTTS_EVENT_VALID;
71 dst->t.sec = seconds;
72 dst->t.nsec = remainder;
73 if (src->type == PTP_CLOCK_EXTOFF)
74 dst->flags |= PTP_EXT_OFFSET;
75
76 /* Both WRITE_ONCE() are paired with READ_ONCE() in queue_cnt() */
77 if (!queue_free(q: queue))
78 WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
79
80 WRITE_ONCE(queue->tail, (queue->tail + 1) % PTP_MAX_TIMESTAMPS);
81
82 spin_unlock_irqrestore(lock: &queue->lock, flags);
83}
84
85/* posix clock implementation */
86
87static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp)
88{
89 tp->tv_sec = 0;
90 tp->tv_nsec = 1;
91 return 0;
92}
93
94static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp)
95{
96 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
97
98 if (ptp_clock_freerun(ptp)) {
99 pr_err("ptp: physical clock is free running\n");
100 return -EBUSY;
101 }
102
103 return ptp->info->settime64(ptp->info, tp);
104}
105
106static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
107{
108 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
109 int err;
110
111 if (ptp->info->gettimex64)
112 err = ptp->info->gettimex64(ptp->info, tp, NULL);
113 else
114 err = ptp->info->gettime64(ptp->info, tp);
115 return err;
116}
117
118static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
119{
120 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
121 struct ptp_clock_info *ops;
122 int err = -EOPNOTSUPP;
123
124 if (ptp_clock_freerun(ptp)) {
125 pr_err("ptp: physical clock is free running\n");
126 return -EBUSY;
127 }
128
129 ops = ptp->info;
130
131 if (tx->modes & ADJ_SETOFFSET) {
132 struct timespec64 ts;
133 ktime_t kt;
134 s64 delta;
135
136 ts.tv_sec = tx->time.tv_sec;
137 ts.tv_nsec = tx->time.tv_usec;
138
139 if (!(tx->modes & ADJ_NANO))
140 ts.tv_nsec *= 1000;
141
142 if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC)
143 return -EINVAL;
144
145 kt = timespec64_to_ktime(ts);
146 delta = ktime_to_ns(kt);
147 err = ops->adjtime(ops, delta);
148 } else if (tx->modes & ADJ_FREQUENCY) {
149 long ppb = scaled_ppm_to_ppb(ppm: tx->freq);
150 if (ppb > ops->max_adj || ppb < -ops->max_adj)
151 return -ERANGE;
152 err = ops->adjfine(ops, tx->freq);
153 ptp->dialed_frequency = tx->freq;
154 } else if (tx->modes & ADJ_OFFSET) {
155 if (ops->adjphase) {
156 s32 max_phase_adj = ops->getmaxphase(ops);
157 s32 offset = tx->offset;
158
159 if (!(tx->modes & ADJ_NANO))
160 offset *= NSEC_PER_USEC;
161
162 if (offset > max_phase_adj || offset < -max_phase_adj)
163 return -ERANGE;
164
165 err = ops->adjphase(ops, offset);
166 }
167 } else if (tx->modes == 0) {
168 tx->freq = ptp->dialed_frequency;
169 err = 0;
170 }
171
172 return err;
173}
174
175static struct posix_clock_operations ptp_clock_ops = {
176 .owner = THIS_MODULE,
177 .clock_adjtime = ptp_clock_adjtime,
178 .clock_gettime = ptp_clock_gettime,
179 .clock_getres = ptp_clock_getres,
180 .clock_settime = ptp_clock_settime,
181 .ioctl = ptp_ioctl,
182 .open = ptp_open,
183 .release = ptp_release,
184 .poll = ptp_poll,
185 .read = ptp_read,
186};
187
188static void ptp_clock_release(struct device *dev)
189{
190 struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
191 struct timestamp_event_queue *tsevq;
192 unsigned long flags;
193
194 ptp_cleanup_pin_groups(ptp);
195 kfree(objp: ptp->vclock_index);
196 mutex_destroy(lock: &ptp->pincfg_mux);
197 mutex_destroy(lock: &ptp->n_vclocks_mux);
198 /* Delete first entry */
199 spin_lock_irqsave(&ptp->tsevqs_lock, flags);
200 tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
201 qlist);
202 list_del(entry: &tsevq->qlist);
203 spin_unlock_irqrestore(lock: &ptp->tsevqs_lock, flags);
204 bitmap_free(bitmap: tsevq->mask);
205 kfree(objp: tsevq);
206 debugfs_remove(dentry: ptp->debugfs_root);
207 xa_erase(&ptp_clocks_map, index: ptp->index);
208 kfree(objp: ptp);
209}
210
211static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts)
212{
213 if (info->getcyclesx64)
214 return info->getcyclesx64(info, ts, NULL);
215 else
216 return info->gettime64(info, ts);
217}
218
219static void ptp_aux_kworker(struct kthread_work *work)
220{
221 struct ptp_clock *ptp = container_of(work, struct ptp_clock,
222 aux_work.work);
223 struct ptp_clock_info *info = ptp->info;
224 long delay;
225
226 delay = info->do_aux_work(info);
227
228 if (delay >= 0)
229 kthread_queue_delayed_work(worker: ptp->kworker, dwork: &ptp->aux_work, delay);
230}
231
232/* public interface */
233
234struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
235 struct device *parent)
236{
237 struct ptp_clock *ptp;
238 struct timestamp_event_queue *queue = NULL;
239 int err, index, major = MAJOR(ptp_devt);
240 char debugfsname[16];
241 size_t size;
242
243 if (info->n_alarm > PTP_MAX_ALARMS)
244 return ERR_PTR(error: -EINVAL);
245
246 /* Initialize a clock structure. */
247 ptp = kzalloc(size: sizeof(struct ptp_clock), GFP_KERNEL);
248 if (!ptp) {
249 err = -ENOMEM;
250 goto no_memory;
251 }
252
253 err = xa_alloc(xa: &ptp_clocks_map, id: &index, entry: ptp, xa_limit_31b,
254 GFP_KERNEL);
255 if (err)
256 goto no_slot;
257
258 ptp->clock.ops = ptp_clock_ops;
259 ptp->info = info;
260 ptp->devid = MKDEV(major, index);
261 ptp->index = index;
262 INIT_LIST_HEAD(list: &ptp->tsevqs);
263 queue = kzalloc(size: sizeof(*queue), GFP_KERNEL);
264 if (!queue) {
265 err = -ENOMEM;
266 goto no_memory_queue;
267 }
268 list_add_tail(new: &queue->qlist, head: &ptp->tsevqs);
269 spin_lock_init(&ptp->tsevqs_lock);
270 queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
271 if (!queue->mask) {
272 err = -ENOMEM;
273 goto no_memory_bitmap;
274 }
275 bitmap_set(map: queue->mask, start: 0, PTP_MAX_CHANNELS);
276 spin_lock_init(&queue->lock);
277 mutex_init(&ptp->pincfg_mux);
278 mutex_init(&ptp->n_vclocks_mux);
279 init_waitqueue_head(&ptp->tsev_wq);
280
281 if (ptp->info->getcycles64 || ptp->info->getcyclesx64) {
282 ptp->has_cycles = true;
283 if (!ptp->info->getcycles64 && ptp->info->getcyclesx64)
284 ptp->info->getcycles64 = ptp_getcycles64;
285 } else {
286 /* Free running cycle counter not supported, use time. */
287 ptp->info->getcycles64 = ptp_getcycles64;
288
289 if (ptp->info->gettimex64)
290 ptp->info->getcyclesx64 = ptp->info->gettimex64;
291
292 if (ptp->info->getcrosststamp)
293 ptp->info->getcrosscycles = ptp->info->getcrosststamp;
294 }
295
296 if (ptp->info->do_aux_work) {
297 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
298 ptp->kworker = kthread_create_worker(flags: 0, namefmt: "ptp%d", ptp->index);
299 if (IS_ERR(ptr: ptp->kworker)) {
300 err = PTR_ERR(ptr: ptp->kworker);
301 pr_err("failed to create ptp aux_worker %d\n", err);
302 goto kworker_err;
303 }
304 }
305
306 /* PTP virtual clock is being registered under physical clock */
307 if (parent && parent->class && parent->class->name &&
308 strcmp(parent->class->name, "ptp") == 0)
309 ptp->is_virtual_clock = true;
310
311 if (!ptp->is_virtual_clock) {
312 ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS;
313
314 size = sizeof(int) * ptp->max_vclocks;
315 ptp->vclock_index = kzalloc(size, GFP_KERNEL);
316 if (!ptp->vclock_index) {
317 err = -ENOMEM;
318 goto no_mem_for_vclocks;
319 }
320 }
321
322 err = ptp_populate_pin_groups(ptp);
323 if (err)
324 goto no_pin_groups;
325
326 /* Register a new PPS source. */
327 if (info->pps) {
328 struct pps_source_info pps;
329 memset(&pps, 0, sizeof(pps));
330 snprintf(buf: pps.name, PPS_MAX_NAME_LEN, fmt: "ptp%d", index);
331 pps.mode = PTP_PPS_MODE;
332 pps.owner = info->owner;
333 ptp->pps_source = pps_register_source(info: &pps, PTP_PPS_DEFAULTS);
334 if (IS_ERR(ptr: ptp->pps_source)) {
335 err = PTR_ERR(ptr: ptp->pps_source);
336 pr_err("failed to register pps source\n");
337 goto no_pps;
338 }
339 ptp->pps_source->lookup_cookie = ptp;
340 }
341
342 /* Initialize a new device of our class in our clock structure. */
343 device_initialize(dev: &ptp->dev);
344 ptp->dev.devt = ptp->devid;
345 ptp->dev.class = &ptp_class;
346 ptp->dev.parent = parent;
347 ptp->dev.groups = ptp->pin_attr_groups;
348 ptp->dev.release = ptp_clock_release;
349 dev_set_drvdata(dev: &ptp->dev, data: ptp);
350 dev_set_name(dev: &ptp->dev, name: "ptp%d", ptp->index);
351
352 /* Create a posix clock and link it to the device. */
353 err = posix_clock_register(clk: &ptp->clock, dev: &ptp->dev);
354 if (err) {
355 if (ptp->pps_source)
356 pps_unregister_source(pps: ptp->pps_source);
357
358 if (ptp->kworker)
359 kthread_destroy_worker(worker: ptp->kworker);
360
361 put_device(dev: &ptp->dev);
362
363 pr_err("failed to create posix clock\n");
364 return ERR_PTR(error: err);
365 }
366
367 /* Debugfs initialization */
368 snprintf(buf: debugfsname, size: sizeof(debugfsname), fmt: "ptp%d", ptp->index);
369 ptp->debugfs_root = debugfs_create_dir(name: debugfsname, NULL);
370
371 return ptp;
372
373no_pps:
374 ptp_cleanup_pin_groups(ptp);
375no_pin_groups:
376 kfree(objp: ptp->vclock_index);
377no_mem_for_vclocks:
378 if (ptp->kworker)
379 kthread_destroy_worker(worker: ptp->kworker);
380kworker_err:
381 mutex_destroy(lock: &ptp->pincfg_mux);
382 mutex_destroy(lock: &ptp->n_vclocks_mux);
383 bitmap_free(bitmap: queue->mask);
384no_memory_bitmap:
385 list_del(entry: &queue->qlist);
386 kfree(objp: queue);
387no_memory_queue:
388 xa_erase(&ptp_clocks_map, index);
389no_slot:
390 kfree(objp: ptp);
391no_memory:
392 return ERR_PTR(error: err);
393}
394EXPORT_SYMBOL(ptp_clock_register);
395
396static int unregister_vclock(struct device *dev, void *data)
397{
398 struct ptp_clock *ptp = dev_get_drvdata(dev);
399
400 ptp_vclock_unregister(info_to_vclock(ptp->info));
401 return 0;
402}
403
404int ptp_clock_unregister(struct ptp_clock *ptp)
405{
406 if (ptp_vclock_in_use(ptp)) {
407 device_for_each_child(dev: &ptp->dev, NULL, fn: unregister_vclock);
408 }
409
410 ptp->defunct = 1;
411 wake_up_interruptible(&ptp->tsev_wq);
412
413 if (ptp->kworker) {
414 kthread_cancel_delayed_work_sync(work: &ptp->aux_work);
415 kthread_destroy_worker(worker: ptp->kworker);
416 }
417
418 /* Release the clock's resources. */
419 if (ptp->pps_source)
420 pps_unregister_source(pps: ptp->pps_source);
421
422 posix_clock_unregister(clk: &ptp->clock);
423
424 return 0;
425}
426EXPORT_SYMBOL(ptp_clock_unregister);
427
428void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
429{
430 struct timestamp_event_queue *tsevq;
431 struct pps_event_time evt;
432 unsigned long flags;
433
434 switch (event->type) {
435
436 case PTP_CLOCK_ALARM:
437 break;
438
439 case PTP_CLOCK_EXTTS:
440 case PTP_CLOCK_EXTOFF:
441 /* Enqueue timestamp on selected queues */
442 spin_lock_irqsave(&ptp->tsevqs_lock, flags);
443 list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
444 if (test_bit((unsigned int)event->index, tsevq->mask))
445 enqueue_external_timestamp(queue: tsevq, src: event);
446 }
447 spin_unlock_irqrestore(lock: &ptp->tsevqs_lock, flags);
448 wake_up_interruptible(&ptp->tsev_wq);
449 break;
450
451 case PTP_CLOCK_PPS:
452 pps_get_ts(ts: &evt);
453 pps_event(pps: ptp->pps_source, ts: &evt, PTP_PPS_EVENT, NULL);
454 break;
455
456 case PTP_CLOCK_PPSUSR:
457 pps_event(pps: ptp->pps_source, ts: &event->pps_times,
458 PTP_PPS_EVENT, NULL);
459 break;
460 }
461}
462EXPORT_SYMBOL(ptp_clock_event);
463
464int ptp_clock_index(struct ptp_clock *ptp)
465{
466 return ptp->index;
467}
468EXPORT_SYMBOL(ptp_clock_index);
469
470int ptp_find_pin(struct ptp_clock *ptp,
471 enum ptp_pin_function func, unsigned int chan)
472{
473 struct ptp_pin_desc *pin = NULL;
474 int i;
475
476 for (i = 0; i < ptp->info->n_pins; i++) {
477 if (ptp->info->pin_config[i].func == func &&
478 ptp->info->pin_config[i].chan == chan) {
479 pin = &ptp->info->pin_config[i];
480 break;
481 }
482 }
483
484 return pin ? i : -1;
485}
486EXPORT_SYMBOL(ptp_find_pin);
487
488int ptp_find_pin_unlocked(struct ptp_clock *ptp,
489 enum ptp_pin_function func, unsigned int chan)
490{
491 int result;
492
493 mutex_lock(&ptp->pincfg_mux);
494
495 result = ptp_find_pin(ptp, func, chan);
496
497 mutex_unlock(lock: &ptp->pincfg_mux);
498
499 return result;
500}
501EXPORT_SYMBOL(ptp_find_pin_unlocked);
502
503int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
504{
505 return kthread_mod_delayed_work(worker: ptp->kworker, dwork: &ptp->aux_work, delay);
506}
507EXPORT_SYMBOL(ptp_schedule_worker);
508
509void ptp_cancel_worker_sync(struct ptp_clock *ptp)
510{
511 kthread_cancel_delayed_work_sync(work: &ptp->aux_work);
512}
513EXPORT_SYMBOL(ptp_cancel_worker_sync);
514
515/* module operations */
516
517static void __exit ptp_exit(void)
518{
519 class_unregister(class: &ptp_class);
520 unregister_chrdev_region(ptp_devt, MINORMASK + 1);
521 xa_destroy(&ptp_clocks_map);
522}
523
524static int __init ptp_init(void)
525{
526 int err;
527
528 err = class_register(class: &ptp_class);
529 if (err) {
530 pr_err("ptp: failed to allocate class\n");
531 return err;
532 }
533
534 err = alloc_chrdev_region(&ptp_devt, 0, MINORMASK + 1, "ptp");
535 if (err < 0) {
536 pr_err("ptp: failed to allocate device region\n");
537 goto no_region;
538 }
539
540 pr_info("PTP clock support registered\n");
541 return 0;
542
543no_region:
544 class_unregister(class: &ptp_class);
545 return err;
546}
547
548subsys_initcall(ptp_init);
549module_exit(ptp_exit);
550
551MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
552MODULE_DESCRIPTION("PTP clocks support");
553MODULE_LICENSE("GPL");
554

source code of linux/drivers/ptp/ptp_clock.c