1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * TI Common Platform Time Sync |
4 | * |
5 | * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com> |
6 | * |
7 | */ |
8 | #include <linux/clk-provider.h> |
9 | #include <linux/err.h> |
10 | #include <linux/if.h> |
11 | #include <linux/hrtimer.h> |
12 | #include <linux/module.h> |
13 | #include <linux/net_tstamp.h> |
14 | #include <linux/ptp_classify.h> |
15 | #include <linux/time.h> |
16 | #include <linux/uaccess.h> |
17 | #include <linux/workqueue.h> |
18 | #include <linux/if_ether.h> |
19 | #include <linux/if_vlan.h> |
20 | |
21 | #include "cpts.h" |
22 | |
23 | #define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */ |
24 | #define CPTS_SKB_RX_TX_TMO 100 /*ms */ |
25 | #define CPTS_EVENT_RX_TX_TIMEOUT (100) /* ms */ |
26 | |
27 | struct cpts_skb_cb_data { |
28 | u32 skb_mtype_seqid; |
29 | unsigned long tmo; |
30 | }; |
31 | |
32 | #define cpts_read32(c, r) readl_relaxed(&c->reg->r) |
33 | #define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) |
34 | |
35 | static int cpts_event_port(struct cpts_event *event) |
36 | { |
37 | return (event->high >> PORT_NUMBER_SHIFT) & PORT_NUMBER_MASK; |
38 | } |
39 | |
40 | static int event_expired(struct cpts_event *event) |
41 | { |
42 | return time_after(jiffies, event->tmo); |
43 | } |
44 | |
45 | static int event_type(struct cpts_event *event) |
46 | { |
47 | return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; |
48 | } |
49 | |
50 | static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low) |
51 | { |
52 | u32 r = cpts_read32(cpts, intstat_raw); |
53 | |
54 | if (r & TS_PEND_RAW) { |
55 | *high = cpts_read32(cpts, event_high); |
56 | *low = cpts_read32(cpts, event_low); |
57 | cpts_write32(cpts, EVENT_POP, event_pop); |
58 | return 0; |
59 | } |
60 | return -1; |
61 | } |
62 | |
63 | static int cpts_purge_events(struct cpts *cpts) |
64 | { |
65 | struct list_head *this, *next; |
66 | struct cpts_event *event; |
67 | int removed = 0; |
68 | |
69 | list_for_each_safe(this, next, &cpts->events) { |
70 | event = list_entry(this, struct cpts_event, list); |
71 | if (event_expired(event)) { |
72 | list_del_init(entry: &event->list); |
73 | list_add(new: &event->list, head: &cpts->pool); |
74 | ++removed; |
75 | } |
76 | } |
77 | |
78 | if (removed) |
79 | dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n" , removed); |
80 | return removed ? 0 : -1; |
81 | } |
82 | |
83 | static void cpts_purge_txq(struct cpts *cpts) |
84 | { |
85 | struct cpts_skb_cb_data *skb_cb; |
86 | struct sk_buff *skb, *tmp; |
87 | int removed = 0; |
88 | |
89 | skb_queue_walk_safe(&cpts->txq, skb, tmp) { |
90 | skb_cb = (struct cpts_skb_cb_data *)skb->cb; |
91 | if (time_after(jiffies, skb_cb->tmo)) { |
92 | __skb_unlink(skb, list: &cpts->txq); |
93 | dev_consume_skb_any(skb); |
94 | ++removed; |
95 | } |
96 | } |
97 | |
98 | if (removed) |
99 | dev_dbg(cpts->dev, "txq cleaned up %d\n" , removed); |
100 | } |
101 | |
102 | /* |
103 | * Returns zero if matching event type was found. |
104 | */ |
105 | static int cpts_fifo_read(struct cpts *cpts, int match) |
106 | { |
107 | struct ptp_clock_event pevent; |
108 | bool need_schedule = false; |
109 | struct cpts_event *event; |
110 | unsigned long flags; |
111 | int i, type = -1; |
112 | u32 hi, lo; |
113 | |
114 | spin_lock_irqsave(&cpts->lock, flags); |
115 | |
116 | for (i = 0; i < CPTS_FIFO_DEPTH; i++) { |
117 | if (cpts_fifo_pop(cpts, high: &hi, low: &lo)) |
118 | break; |
119 | |
120 | if (list_empty(head: &cpts->pool) && cpts_purge_events(cpts)) { |
121 | dev_warn(cpts->dev, "cpts: event pool empty\n" ); |
122 | break; |
123 | } |
124 | |
125 | event = list_first_entry(&cpts->pool, struct cpts_event, list); |
126 | event->high = hi; |
127 | event->low = lo; |
128 | event->timestamp = timecounter_cyc2time(tc: &cpts->tc, cycle_tstamp: event->low); |
129 | type = event_type(event); |
130 | |
131 | dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n" , |
132 | type, event->high, event->low); |
133 | switch (type) { |
134 | case CPTS_EV_PUSH: |
135 | WRITE_ONCE(cpts->cur_timestamp, lo); |
136 | timecounter_read(tc: &cpts->tc); |
137 | if (cpts->mult_new) { |
138 | cpts->cc.mult = cpts->mult_new; |
139 | cpts->mult_new = 0; |
140 | } |
141 | if (!cpts->irq_poll) |
142 | complete(&cpts->ts_push_complete); |
143 | break; |
144 | case CPTS_EV_TX: |
145 | case CPTS_EV_RX: |
146 | event->tmo = jiffies + |
147 | msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT); |
148 | |
149 | list_del_init(entry: &event->list); |
150 | list_add_tail(new: &event->list, head: &cpts->events); |
151 | need_schedule = true; |
152 | break; |
153 | case CPTS_EV_ROLL: |
154 | case CPTS_EV_HALF: |
155 | break; |
156 | case CPTS_EV_HW: |
157 | pevent.timestamp = event->timestamp; |
158 | pevent.type = PTP_CLOCK_EXTTS; |
159 | pevent.index = cpts_event_port(event) - 1; |
160 | ptp_clock_event(ptp: cpts->clock, event: &pevent); |
161 | break; |
162 | default: |
163 | dev_err(cpts->dev, "cpts: unknown event type\n" ); |
164 | break; |
165 | } |
166 | if (type == match) |
167 | break; |
168 | } |
169 | |
170 | spin_unlock_irqrestore(lock: &cpts->lock, flags); |
171 | |
172 | if (!cpts->irq_poll && need_schedule) |
173 | ptp_schedule_worker(ptp: cpts->clock, delay: 0); |
174 | |
175 | return type == match ? 0 : -1; |
176 | } |
177 | |
178 | void cpts_misc_interrupt(struct cpts *cpts) |
179 | { |
180 | cpts_fifo_read(cpts, match: -1); |
181 | } |
182 | EXPORT_SYMBOL_GPL(cpts_misc_interrupt); |
183 | |
184 | static u64 cpts_systim_read(const struct cyclecounter *cc) |
185 | { |
186 | struct cpts *cpts = container_of(cc, struct cpts, cc); |
187 | |
188 | return READ_ONCE(cpts->cur_timestamp); |
189 | } |
190 | |
191 | static void cpts_update_cur_time(struct cpts *cpts, int match, |
192 | struct ptp_system_timestamp *sts) |
193 | { |
194 | unsigned long flags; |
195 | |
196 | reinit_completion(x: &cpts->ts_push_complete); |
197 | |
198 | /* use spin_lock_irqsave() here as it has to run very fast */ |
199 | spin_lock_irqsave(&cpts->lock, flags); |
200 | ptp_read_system_prets(sts); |
201 | cpts_write32(cpts, TS_PUSH, ts_push); |
202 | cpts_read32(cpts, ts_push); |
203 | ptp_read_system_postts(sts); |
204 | spin_unlock_irqrestore(lock: &cpts->lock, flags); |
205 | |
206 | if (cpts->irq_poll && cpts_fifo_read(cpts, match) && match != -1) |
207 | dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n" ); |
208 | |
209 | if (!cpts->irq_poll && |
210 | !wait_for_completion_timeout(x: &cpts->ts_push_complete, HZ)) |
211 | dev_err(cpts->dev, "cpts: obtain a time stamp timeout\n" ); |
212 | } |
213 | |
214 | /* PTP clock operations */ |
215 | |
216 | static int cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) |
217 | { |
218 | struct cpts *cpts = container_of(ptp, struct cpts, info); |
219 | |
220 | mutex_lock(&cpts->ptp_clk_mutex); |
221 | |
222 | cpts->mult_new = adjust_by_scaled_ppm(base: cpts->cc_mult, scaled_ppm); |
223 | |
224 | cpts_update_cur_time(cpts, match: CPTS_EV_PUSH, NULL); |
225 | |
226 | mutex_unlock(lock: &cpts->ptp_clk_mutex); |
227 | return 0; |
228 | } |
229 | |
230 | static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
231 | { |
232 | struct cpts *cpts = container_of(ptp, struct cpts, info); |
233 | |
234 | mutex_lock(&cpts->ptp_clk_mutex); |
235 | timecounter_adjtime(tc: &cpts->tc, delta); |
236 | mutex_unlock(lock: &cpts->ptp_clk_mutex); |
237 | |
238 | return 0; |
239 | } |
240 | |
241 | static int cpts_ptp_gettimeex(struct ptp_clock_info *ptp, |
242 | struct timespec64 *ts, |
243 | struct ptp_system_timestamp *sts) |
244 | { |
245 | struct cpts *cpts = container_of(ptp, struct cpts, info); |
246 | u64 ns; |
247 | |
248 | mutex_lock(&cpts->ptp_clk_mutex); |
249 | |
250 | cpts_update_cur_time(cpts, match: CPTS_EV_PUSH, sts); |
251 | |
252 | ns = timecounter_read(tc: &cpts->tc); |
253 | mutex_unlock(lock: &cpts->ptp_clk_mutex); |
254 | |
255 | *ts = ns_to_timespec64(nsec: ns); |
256 | |
257 | return 0; |
258 | } |
259 | |
260 | static int cpts_ptp_settime(struct ptp_clock_info *ptp, |
261 | const struct timespec64 *ts) |
262 | { |
263 | struct cpts *cpts = container_of(ptp, struct cpts, info); |
264 | u64 ns; |
265 | |
266 | ns = timespec64_to_ns(ts); |
267 | |
268 | mutex_lock(&cpts->ptp_clk_mutex); |
269 | timecounter_init(tc: &cpts->tc, cc: &cpts->cc, start_tstamp: ns); |
270 | mutex_unlock(lock: &cpts->ptp_clk_mutex); |
271 | |
272 | return 0; |
273 | } |
274 | |
275 | static int cpts_extts_enable(struct cpts *cpts, u32 index, int on) |
276 | { |
277 | u32 v; |
278 | |
279 | if (((cpts->hw_ts_enable & BIT(index)) >> index) == on) |
280 | return 0; |
281 | |
282 | mutex_lock(&cpts->ptp_clk_mutex); |
283 | |
284 | v = cpts_read32(cpts, control); |
285 | if (on) { |
286 | v |= BIT(8 + index); |
287 | cpts->hw_ts_enable |= BIT(index); |
288 | } else { |
289 | v &= ~BIT(8 + index); |
290 | cpts->hw_ts_enable &= ~BIT(index); |
291 | } |
292 | cpts_write32(cpts, v, control); |
293 | |
294 | mutex_unlock(lock: &cpts->ptp_clk_mutex); |
295 | |
296 | return 0; |
297 | } |
298 | |
299 | static int cpts_ptp_enable(struct ptp_clock_info *ptp, |
300 | struct ptp_clock_request *rq, int on) |
301 | { |
302 | struct cpts *cpts = container_of(ptp, struct cpts, info); |
303 | |
304 | switch (rq->type) { |
305 | case PTP_CLK_REQ_EXTTS: |
306 | return cpts_extts_enable(cpts, index: rq->extts.index, on); |
307 | default: |
308 | break; |
309 | } |
310 | |
311 | return -EOPNOTSUPP; |
312 | } |
313 | |
314 | static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) |
315 | { |
316 | struct sk_buff_head txq_list; |
317 | struct sk_buff *skb, *tmp; |
318 | unsigned long flags; |
319 | bool found = false; |
320 | u32 mtype_seqid; |
321 | |
322 | mtype_seqid = event->high & |
323 | ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) | |
324 | (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) | |
325 | (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT)); |
326 | |
327 | __skb_queue_head_init(list: &txq_list); |
328 | |
329 | spin_lock_irqsave(&cpts->txq.lock, flags); |
330 | skb_queue_splice_init(list: &cpts->txq, head: &txq_list); |
331 | spin_unlock_irqrestore(lock: &cpts->txq.lock, flags); |
332 | |
333 | skb_queue_walk_safe(&txq_list, skb, tmp) { |
334 | struct skb_shared_hwtstamps ssh; |
335 | struct cpts_skb_cb_data *skb_cb = |
336 | (struct cpts_skb_cb_data *)skb->cb; |
337 | |
338 | if (mtype_seqid == skb_cb->skb_mtype_seqid) { |
339 | memset(&ssh, 0, sizeof(ssh)); |
340 | ssh.hwtstamp = ns_to_ktime(ns: event->timestamp); |
341 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &ssh); |
342 | found = true; |
343 | __skb_unlink(skb, list: &txq_list); |
344 | dev_consume_skb_any(skb); |
345 | dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n" , |
346 | mtype_seqid); |
347 | break; |
348 | } |
349 | |
350 | if (time_after(jiffies, skb_cb->tmo)) { |
351 | /* timeout any expired skbs over 1s */ |
352 | dev_dbg(cpts->dev, "expiring tx timestamp from txq\n" ); |
353 | __skb_unlink(skb, list: &txq_list); |
354 | dev_consume_skb_any(skb); |
355 | } |
356 | } |
357 | |
358 | spin_lock_irqsave(&cpts->txq.lock, flags); |
359 | skb_queue_splice(list: &txq_list, head: &cpts->txq); |
360 | spin_unlock_irqrestore(lock: &cpts->txq.lock, flags); |
361 | |
362 | return found; |
363 | } |
364 | |
365 | static void cpts_process_events(struct cpts *cpts) |
366 | { |
367 | struct list_head *this, *next; |
368 | struct cpts_event *event; |
369 | LIST_HEAD(events_free); |
370 | unsigned long flags; |
371 | LIST_HEAD(events); |
372 | |
373 | spin_lock_irqsave(&cpts->lock, flags); |
374 | list_splice_init(list: &cpts->events, head: &events); |
375 | spin_unlock_irqrestore(lock: &cpts->lock, flags); |
376 | |
377 | list_for_each_safe(this, next, &events) { |
378 | event = list_entry(this, struct cpts_event, list); |
379 | if (cpts_match_tx_ts(cpts, event) || |
380 | time_after(jiffies, event->tmo)) { |
381 | list_del_init(entry: &event->list); |
382 | list_add(new: &event->list, head: &events_free); |
383 | } |
384 | } |
385 | |
386 | spin_lock_irqsave(&cpts->lock, flags); |
387 | list_splice_tail(list: &events, head: &cpts->events); |
388 | list_splice_tail(list: &events_free, head: &cpts->pool); |
389 | spin_unlock_irqrestore(lock: &cpts->lock, flags); |
390 | } |
391 | |
392 | static long cpts_overflow_check(struct ptp_clock_info *ptp) |
393 | { |
394 | struct cpts *cpts = container_of(ptp, struct cpts, info); |
395 | unsigned long delay = cpts->ov_check_period; |
396 | unsigned long flags; |
397 | u64 ns; |
398 | |
399 | mutex_lock(&cpts->ptp_clk_mutex); |
400 | |
401 | cpts_update_cur_time(cpts, match: -1, NULL); |
402 | ns = timecounter_read(tc: &cpts->tc); |
403 | |
404 | cpts_process_events(cpts); |
405 | |
406 | spin_lock_irqsave(&cpts->txq.lock, flags); |
407 | if (!skb_queue_empty(list: &cpts->txq)) { |
408 | cpts_purge_txq(cpts); |
409 | if (!skb_queue_empty(list: &cpts->txq)) |
410 | delay = CPTS_SKB_TX_WORK_TIMEOUT; |
411 | } |
412 | spin_unlock_irqrestore(lock: &cpts->txq.lock, flags); |
413 | |
414 | dev_dbg(cpts->dev, "cpts overflow check at %lld\n" , ns); |
415 | mutex_unlock(lock: &cpts->ptp_clk_mutex); |
416 | return (long)delay; |
417 | } |
418 | |
419 | static const struct ptp_clock_info cpts_info = { |
420 | .owner = THIS_MODULE, |
421 | .name = "CTPS timer" , |
422 | .max_adj = 1000000, |
423 | .n_ext_ts = 0, |
424 | .n_pins = 0, |
425 | .pps = 0, |
426 | .adjfine = cpts_ptp_adjfine, |
427 | .adjtime = cpts_ptp_adjtime, |
428 | .gettimex64 = cpts_ptp_gettimeex, |
429 | .settime64 = cpts_ptp_settime, |
430 | .enable = cpts_ptp_enable, |
431 | .do_aux_work = cpts_overflow_check, |
432 | }; |
433 | |
434 | static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid) |
435 | { |
436 | unsigned int ptp_class = ptp_classify_raw(skb); |
437 | struct ptp_header *hdr; |
438 | u8 msgtype; |
439 | u16 seqid; |
440 | |
441 | if (ptp_class == PTP_CLASS_NONE) |
442 | return 0; |
443 | |
444 | hdr = ptp_parse_header(skb, type: ptp_class); |
445 | if (!hdr) |
446 | return 0; |
447 | |
448 | msgtype = ptp_get_msgtype(hdr, type: ptp_class); |
449 | seqid = ntohs(hdr->sequence_id); |
450 | |
451 | *mtype_seqid = (msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT; |
452 | *mtype_seqid |= (seqid & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT; |
453 | |
454 | return 1; |
455 | } |
456 | |
457 | static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, |
458 | int ev_type, u32 skb_mtype_seqid) |
459 | { |
460 | struct list_head *this, *next; |
461 | struct cpts_event *event; |
462 | unsigned long flags; |
463 | u32 mtype_seqid; |
464 | u64 ns = 0; |
465 | |
466 | cpts_fifo_read(cpts, match: -1); |
467 | spin_lock_irqsave(&cpts->lock, flags); |
468 | list_for_each_safe(this, next, &cpts->events) { |
469 | event = list_entry(this, struct cpts_event, list); |
470 | if (event_expired(event)) { |
471 | list_del_init(entry: &event->list); |
472 | list_add(new: &event->list, head: &cpts->pool); |
473 | continue; |
474 | } |
475 | |
476 | mtype_seqid = event->high & |
477 | ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) | |
478 | (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) | |
479 | (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT)); |
480 | |
481 | if (mtype_seqid == skb_mtype_seqid) { |
482 | ns = event->timestamp; |
483 | list_del_init(entry: &event->list); |
484 | list_add(new: &event->list, head: &cpts->pool); |
485 | break; |
486 | } |
487 | } |
488 | spin_unlock_irqrestore(lock: &cpts->lock, flags); |
489 | |
490 | return ns; |
491 | } |
492 | |
493 | void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) |
494 | { |
495 | struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb; |
496 | struct skb_shared_hwtstamps *ssh; |
497 | int ret; |
498 | u64 ns; |
499 | |
500 | /* cpts_rx_timestamp() is called before eth_type_trans(), so |
501 | * skb MAC Hdr properties are not configured yet. Hence need to |
502 | * reset skb MAC header here |
503 | */ |
504 | skb_reset_mac_header(skb); |
505 | ret = cpts_skb_get_mtype_seqid(skb, mtype_seqid: &skb_cb->skb_mtype_seqid); |
506 | if (!ret) |
507 | return; |
508 | |
509 | skb_cb->skb_mtype_seqid |= (CPTS_EV_RX << EVENT_TYPE_SHIFT); |
510 | |
511 | dev_dbg(cpts->dev, "%s mtype seqid %08x\n" , |
512 | __func__, skb_cb->skb_mtype_seqid); |
513 | |
514 | ns = cpts_find_ts(cpts, skb, ev_type: CPTS_EV_RX, skb_mtype_seqid: skb_cb->skb_mtype_seqid); |
515 | if (!ns) |
516 | return; |
517 | ssh = skb_hwtstamps(skb); |
518 | memset(ssh, 0, sizeof(*ssh)); |
519 | ssh->hwtstamp = ns_to_ktime(ns); |
520 | } |
521 | EXPORT_SYMBOL_GPL(cpts_rx_timestamp); |
522 | |
523 | void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb) |
524 | { |
525 | struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb; |
526 | int ret; |
527 | |
528 | if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) |
529 | return; |
530 | |
531 | ret = cpts_skb_get_mtype_seqid(skb, mtype_seqid: &skb_cb->skb_mtype_seqid); |
532 | if (!ret) |
533 | return; |
534 | |
535 | skb_cb->skb_mtype_seqid |= (CPTS_EV_TX << EVENT_TYPE_SHIFT); |
536 | |
537 | dev_dbg(cpts->dev, "%s mtype seqid %08x\n" , |
538 | __func__, skb_cb->skb_mtype_seqid); |
539 | |
540 | /* Always defer TX TS processing to PTP worker */ |
541 | skb_get(skb); |
542 | /* get the timestamp for timeouts */ |
543 | skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO); |
544 | skb_queue_tail(list: &cpts->txq, newsk: skb); |
545 | ptp_schedule_worker(ptp: cpts->clock, delay: 0); |
546 | } |
547 | EXPORT_SYMBOL_GPL(cpts_tx_timestamp); |
548 | |
549 | int cpts_register(struct cpts *cpts) |
550 | { |
551 | int err, i; |
552 | |
553 | skb_queue_head_init(list: &cpts->txq); |
554 | INIT_LIST_HEAD(list: &cpts->events); |
555 | INIT_LIST_HEAD(list: &cpts->pool); |
556 | for (i = 0; i < CPTS_MAX_EVENTS; i++) |
557 | list_add(new: &cpts->pool_data[i].list, head: &cpts->pool); |
558 | |
559 | err = clk_enable(clk: cpts->refclk); |
560 | if (err) |
561 | return err; |
562 | |
563 | cpts_write32(cpts, CPTS_EN, control); |
564 | cpts_write32(cpts, TS_PEND_EN, int_enable); |
565 | |
566 | timecounter_init(tc: &cpts->tc, cc: &cpts->cc, start_tstamp: ktime_get_real_ns()); |
567 | |
568 | cpts->clock = ptp_clock_register(info: &cpts->info, parent: cpts->dev); |
569 | if (IS_ERR(ptr: cpts->clock)) { |
570 | err = PTR_ERR(ptr: cpts->clock); |
571 | cpts->clock = NULL; |
572 | goto err_ptp; |
573 | } |
574 | cpts->phc_index = ptp_clock_index(ptp: cpts->clock); |
575 | |
576 | ptp_schedule_worker(ptp: cpts->clock, delay: cpts->ov_check_period); |
577 | return 0; |
578 | |
579 | err_ptp: |
580 | clk_disable(clk: cpts->refclk); |
581 | return err; |
582 | } |
583 | EXPORT_SYMBOL_GPL(cpts_register); |
584 | |
585 | void cpts_unregister(struct cpts *cpts) |
586 | { |
587 | if (WARN_ON(!cpts->clock)) |
588 | return; |
589 | |
590 | ptp_clock_unregister(ptp: cpts->clock); |
591 | cpts->clock = NULL; |
592 | cpts->phc_index = -1; |
593 | |
594 | cpts_write32(cpts, 0, int_enable); |
595 | cpts_write32(cpts, 0, control); |
596 | |
597 | /* Drop all packet */ |
598 | skb_queue_purge(list: &cpts->txq); |
599 | |
600 | clk_disable(clk: cpts->refclk); |
601 | } |
602 | EXPORT_SYMBOL_GPL(cpts_unregister); |
603 | |
604 | static void cpts_calc_mult_shift(struct cpts *cpts) |
605 | { |
606 | u64 frac, maxsec, ns; |
607 | u32 freq; |
608 | |
609 | freq = clk_get_rate(clk: cpts->refclk); |
610 | |
611 | /* Calc the maximum number of seconds which we can run before |
612 | * wrapping around. |
613 | */ |
614 | maxsec = cpts->cc.mask; |
615 | do_div(maxsec, freq); |
616 | /* limit conversation rate to 10 sec as higher values will produce |
617 | * too small mult factors and so reduce the conversion accuracy |
618 | */ |
619 | if (maxsec > 10) |
620 | maxsec = 10; |
621 | |
622 | /* Calc overflow check period (maxsec / 2) */ |
623 | cpts->ov_check_period = (HZ * maxsec) / 2; |
624 | dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n" , |
625 | cpts->ov_check_period); |
626 | |
627 | if (cpts->cc.mult || cpts->cc.shift) |
628 | return; |
629 | |
630 | clocks_calc_mult_shift(mult: &cpts->cc.mult, shift: &cpts->cc.shift, |
631 | from: freq, NSEC_PER_SEC, minsec: maxsec); |
632 | |
633 | frac = 0; |
634 | ns = cyclecounter_cyc2ns(cc: &cpts->cc, cycles: freq, mask: cpts->cc.mask, frac: &frac); |
635 | |
636 | dev_info(cpts->dev, |
637 | "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n" , |
638 | freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC)); |
639 | } |
640 | |
641 | static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) |
642 | { |
643 | struct device_node *refclk_np; |
644 | const char **parent_names; |
645 | unsigned int num_parents; |
646 | struct clk_hw *clk_hw; |
647 | int ret = -EINVAL; |
648 | u32 *mux_table; |
649 | |
650 | refclk_np = of_get_child_by_name(node, name: "cpts-refclk-mux" ); |
651 | if (!refclk_np) |
652 | /* refclk selection supported not for all SoCs */ |
653 | return 0; |
654 | |
655 | num_parents = of_clk_get_parent_count(np: refclk_np); |
656 | if (num_parents < 1) { |
657 | dev_err(cpts->dev, "mux-clock %s must have parents\n" , |
658 | refclk_np->name); |
659 | goto mux_fail; |
660 | } |
661 | |
662 | parent_names = devm_kcalloc(dev: cpts->dev, n: num_parents, |
663 | size: sizeof(*parent_names), GFP_KERNEL); |
664 | |
665 | mux_table = devm_kcalloc(dev: cpts->dev, n: num_parents, size: sizeof(*mux_table), |
666 | GFP_KERNEL); |
667 | if (!mux_table || !parent_names) { |
668 | ret = -ENOMEM; |
669 | goto mux_fail; |
670 | } |
671 | |
672 | of_clk_parent_fill(np: refclk_np, parents: parent_names, size: num_parents); |
673 | |
674 | ret = of_property_read_variable_u32_array(np: refclk_np, propname: "ti,mux-tbl" , |
675 | out_values: mux_table, |
676 | sz_min: num_parents, sz_max: num_parents); |
677 | if (ret < 0) |
678 | goto mux_fail; |
679 | |
680 | clk_hw = clk_hw_register_mux_table(cpts->dev, refclk_np->name, |
681 | parent_names, num_parents, |
682 | 0, |
683 | &cpts->reg->rftclk_sel, 0, 0x1F, |
684 | 0, mux_table, NULL); |
685 | if (IS_ERR(ptr: clk_hw)) { |
686 | ret = PTR_ERR(ptr: clk_hw); |
687 | goto mux_fail; |
688 | } |
689 | |
690 | ret = devm_add_action_or_reset(cpts->dev, |
691 | (void(*)(void *))clk_hw_unregister_mux, |
692 | clk_hw); |
693 | if (ret) { |
694 | dev_err(cpts->dev, "add clkmux unreg action %d" , ret); |
695 | goto mux_fail; |
696 | } |
697 | |
698 | ret = of_clk_add_hw_provider(np: refclk_np, get: of_clk_hw_simple_get, data: clk_hw); |
699 | if (ret) |
700 | goto mux_fail; |
701 | |
702 | ret = devm_add_action_or_reset(cpts->dev, |
703 | (void(*)(void *))of_clk_del_provider, |
704 | refclk_np); |
705 | if (ret) { |
706 | dev_err(cpts->dev, "add clkmux provider unreg action %d" , ret); |
707 | goto mux_fail; |
708 | } |
709 | |
710 | return ret; |
711 | |
712 | mux_fail: |
713 | of_node_put(node: refclk_np); |
714 | return ret; |
715 | } |
716 | |
717 | static int cpts_of_parse(struct cpts *cpts, struct device_node *node) |
718 | { |
719 | int ret = -EINVAL; |
720 | u32 prop; |
721 | |
722 | if (!of_property_read_u32(np: node, propname: "cpts_clock_mult" , out_value: &prop)) |
723 | cpts->cc.mult = prop; |
724 | |
725 | if (!of_property_read_u32(np: node, propname: "cpts_clock_shift" , out_value: &prop)) |
726 | cpts->cc.shift = prop; |
727 | |
728 | if ((cpts->cc.mult && !cpts->cc.shift) || |
729 | (!cpts->cc.mult && cpts->cc.shift)) |
730 | goto of_error; |
731 | |
732 | return cpts_of_mux_clk_setup(cpts, node); |
733 | |
734 | of_error: |
735 | dev_err(cpts->dev, "CPTS: Missing property in the DT.\n" ); |
736 | return ret; |
737 | } |
738 | |
739 | struct cpts *cpts_create(struct device *dev, void __iomem *regs, |
740 | struct device_node *node, u32 n_ext_ts) |
741 | { |
742 | struct cpts *cpts; |
743 | int ret; |
744 | |
745 | cpts = devm_kzalloc(dev, size: sizeof(*cpts), GFP_KERNEL); |
746 | if (!cpts) |
747 | return ERR_PTR(error: -ENOMEM); |
748 | |
749 | cpts->dev = dev; |
750 | cpts->reg = (struct cpsw_cpts __iomem *)regs; |
751 | cpts->irq_poll = true; |
752 | spin_lock_init(&cpts->lock); |
753 | mutex_init(&cpts->ptp_clk_mutex); |
754 | init_completion(x: &cpts->ts_push_complete); |
755 | |
756 | ret = cpts_of_parse(cpts, node); |
757 | if (ret) |
758 | return ERR_PTR(error: ret); |
759 | |
760 | cpts->refclk = devm_get_clk_from_child(dev, np: node, con_id: "cpts" ); |
761 | if (IS_ERR(ptr: cpts->refclk)) |
762 | /* try get clk from dev node for compatibility */ |
763 | cpts->refclk = devm_clk_get(dev, id: "cpts" ); |
764 | |
765 | if (IS_ERR(ptr: cpts->refclk)) { |
766 | dev_err(dev, "Failed to get cpts refclk %ld\n" , |
767 | PTR_ERR(cpts->refclk)); |
768 | return ERR_CAST(ptr: cpts->refclk); |
769 | } |
770 | |
771 | ret = clk_prepare(clk: cpts->refclk); |
772 | if (ret) |
773 | return ERR_PTR(error: ret); |
774 | |
775 | cpts->cc.read = cpts_systim_read; |
776 | cpts->cc.mask = CLOCKSOURCE_MASK(32); |
777 | cpts->info = cpts_info; |
778 | cpts->phc_index = -1; |
779 | |
780 | if (n_ext_ts) |
781 | cpts->info.n_ext_ts = n_ext_ts; |
782 | |
783 | cpts_calc_mult_shift(cpts); |
784 | /* save cc.mult original value as it can be modified |
785 | * by cpts_ptp_adjfine(). |
786 | */ |
787 | cpts->cc_mult = cpts->cc.mult; |
788 | |
789 | return cpts; |
790 | } |
791 | EXPORT_SYMBOL_GPL(cpts_create); |
792 | |
793 | void cpts_release(struct cpts *cpts) |
794 | { |
795 | if (!cpts) |
796 | return; |
797 | |
798 | if (WARN_ON(!cpts->refclk)) |
799 | return; |
800 | |
801 | clk_unprepare(clk: cpts->refclk); |
802 | } |
803 | EXPORT_SYMBOL_GPL(cpts_release); |
804 | |
805 | MODULE_LICENSE("GPL v2" ); |
806 | MODULE_DESCRIPTION("TI CPTS driver" ); |
807 | MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>" ); |
808 | |