1// SPDX-License-Identifier: GPL-2.0
2/* cavium_ptp.c - PTP 1588 clock on Cavium hardware
3 * Copyright (c) 2003-2015, 2017 Cavium, Inc.
4 */
5
6#include <linux/device.h>
7#include <linux/module.h>
8#include <linux/timecounter.h>
9#include <linux/pci.h>
10
11#include "cavium_ptp.h"
12
13#define DRV_NAME "cavium_ptp"
14
15#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
16#define PCI_SUBSYS_DEVID_88XX_PTP 0xA10C
17#define PCI_SUBSYS_DEVID_81XX_PTP 0XA20C
18#define PCI_SUBSYS_DEVID_83XX_PTP 0xA30C
19#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
20
21#define PCI_PTP_BAR_NO 0
22#define PCI_RST_BAR_NO 0
23
24#define PTP_CLOCK_CFG 0xF00ULL
25#define PTP_CLOCK_CFG_PTP_EN BIT(0)
26#define PTP_CLOCK_LO 0xF08ULL
27#define PTP_CLOCK_HI 0xF10ULL
28#define PTP_CLOCK_COMP 0xF18ULL
29
30#define RST_BOOT 0x1600ULL
31#define CLOCK_BASE_RATE 50000000ULL
32
33static u64 ptp_cavium_clock_get(void)
34{
35 struct pci_dev *pdev;
36 void __iomem *base;
37 u64 ret = CLOCK_BASE_RATE * 16;
38
39 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
40 PCI_DEVICE_ID_CAVIUM_RST, NULL);
41 if (!pdev)
42 goto error;
43
44 base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
45 if (!base)
46 goto error_put_pdev;
47
48 ret = CLOCK_BASE_RATE * ((readq(addr: base + RST_BOOT) >> 33) & 0x3f);
49
50 iounmap(addr: base);
51
52error_put_pdev:
53 pci_dev_put(dev: pdev);
54
55error:
56 return ret;
57}
58
59struct cavium_ptp *cavium_ptp_get(void)
60{
61 struct cavium_ptp *ptp;
62 struct pci_dev *pdev;
63
64 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
65 PCI_DEVICE_ID_CAVIUM_PTP, NULL);
66 if (!pdev)
67 return ERR_PTR(error: -ENODEV);
68
69 ptp = pci_get_drvdata(pdev);
70 if (!ptp)
71 ptp = ERR_PTR(error: -EPROBE_DEFER);
72 if (IS_ERR(ptr: ptp))
73 pci_dev_put(dev: pdev);
74
75 return ptp;
76}
77EXPORT_SYMBOL(cavium_ptp_get);
78
79void cavium_ptp_put(struct cavium_ptp *ptp)
80{
81 if (!ptp)
82 return;
83 pci_dev_put(dev: ptp->pdev);
84}
85EXPORT_SYMBOL(cavium_ptp_put);
86
87/**
88 * cavium_ptp_adjfine() - Adjust ptp frequency
89 * @ptp_info: PTP clock info
90 * @scaled_ppm: how much to adjust by, in parts per million, but with a
91 * 16 bit binary fractional field
92 */
93static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
94{
95 struct cavium_ptp *clock =
96 container_of(ptp_info, struct cavium_ptp, ptp_info);
97 unsigned long flags;
98 u64 comp;
99 u64 adj;
100 bool neg_adj = false;
101
102 if (scaled_ppm < 0) {
103 neg_adj = true;
104 scaled_ppm = -scaled_ppm;
105 }
106
107 /* The hardware adds the clock compensation value to the PTP clock
108 * on every coprocessor clock cycle. Typical convention is that it
109 * represent number of nanosecond betwen each cycle. In this
110 * convention compensation value is in 64 bit fixed-point
111 * representation where upper 32 bits are number of nanoseconds
112 * and lower is fractions of nanosecond.
113 * The scaled_ppm represent the ratio in "parts per bilion" by which the
114 * compensation value should be corrected.
115 * To calculate new compenstation value we use 64bit fixed point
116 * arithmetic on following formula
117 * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
118 * where tbase is the basic compensation value calculated initialy
119 * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian
120 * independent structure definition to write data to PTP register.
121 */
122 comp = ((u64)1000000000ull << 32) / clock->clock_rate;
123 adj = comp * scaled_ppm;
124 adj >>= 16;
125 adj = div_u64(dividend: adj, divisor: 1000000ull);
126 comp = neg_adj ? comp - adj : comp + adj;
127
128 spin_lock_irqsave(&clock->spin_lock, flags);
129 writeq(val: comp, addr: clock->reg_base + PTP_CLOCK_COMP);
130 spin_unlock_irqrestore(lock: &clock->spin_lock, flags);
131
132 return 0;
133}
134
135/**
136 * cavium_ptp_adjtime() - Adjust ptp time
137 * @ptp_info: PTP clock info
138 * @delta: how much to adjust by, in nanosecs
139 */
140static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
141{
142 struct cavium_ptp *clock =
143 container_of(ptp_info, struct cavium_ptp, ptp_info);
144 unsigned long flags;
145
146 spin_lock_irqsave(&clock->spin_lock, flags);
147 timecounter_adjtime(tc: &clock->time_counter, delta);
148 spin_unlock_irqrestore(lock: &clock->spin_lock, flags);
149
150 /* Sync, for network driver to get latest value */
151 smp_mb();
152
153 return 0;
154}
155
156/**
157 * cavium_ptp_gettime() - Get hardware clock time with adjustment
158 * @ptp_info: PTP clock info
159 * @ts: timespec
160 */
161static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
162 struct timespec64 *ts)
163{
164 struct cavium_ptp *clock =
165 container_of(ptp_info, struct cavium_ptp, ptp_info);
166 unsigned long flags;
167 u64 nsec;
168
169 spin_lock_irqsave(&clock->spin_lock, flags);
170 nsec = timecounter_read(tc: &clock->time_counter);
171 spin_unlock_irqrestore(lock: &clock->spin_lock, flags);
172
173 *ts = ns_to_timespec64(nsec);
174
175 return 0;
176}
177
178/**
179 * cavium_ptp_settime() - Set hardware clock time. Reset adjustment
180 * @ptp_info: PTP clock info
181 * @ts: timespec
182 */
183static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
184 const struct timespec64 *ts)
185{
186 struct cavium_ptp *clock =
187 container_of(ptp_info, struct cavium_ptp, ptp_info);
188 unsigned long flags;
189 u64 nsec;
190
191 nsec = timespec64_to_ns(ts);
192
193 spin_lock_irqsave(&clock->spin_lock, flags);
194 timecounter_init(tc: &clock->time_counter, cc: &clock->cycle_counter, start_tstamp: nsec);
195 spin_unlock_irqrestore(lock: &clock->spin_lock, flags);
196
197 return 0;
198}
199
200/**
201 * cavium_ptp_enable() - Request to enable or disable an ancillary feature.
202 * @ptp_info: PTP clock info
203 * @rq: request
204 * @on: is it on
205 */
206static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
207 struct ptp_clock_request *rq, int on)
208{
209 return -EOPNOTSUPP;
210}
211
212static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
213{
214 struct cavium_ptp *clock =
215 container_of(cc, struct cavium_ptp, cycle_counter);
216
217 return readq(addr: clock->reg_base + PTP_CLOCK_HI);
218}
219
220static int cavium_ptp_probe(struct pci_dev *pdev,
221 const struct pci_device_id *ent)
222{
223 struct device *dev = &pdev->dev;
224 struct cavium_ptp *clock;
225 struct cyclecounter *cc;
226 u64 clock_cfg;
227 u64 clock_comp;
228 int err;
229
230 clock = devm_kzalloc(dev, size: sizeof(*clock), GFP_KERNEL);
231 if (!clock) {
232 err = -ENOMEM;
233 goto error;
234 }
235
236 clock->pdev = pdev;
237
238 err = pcim_enable_device(pdev);
239 if (err)
240 goto error_free;
241
242 err = pcim_iomap_regions(pdev, mask: 1 << PCI_PTP_BAR_NO, name: pci_name(pdev));
243 if (err)
244 goto error_free;
245
246 clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
247
248 spin_lock_init(&clock->spin_lock);
249
250 cc = &clock->cycle_counter;
251 cc->read = cavium_ptp_cc_read;
252 cc->mask = CYCLECOUNTER_MASK(64);
253 cc->mult = 1;
254 cc->shift = 0;
255
256 timecounter_init(tc: &clock->time_counter, cc: &clock->cycle_counter,
257 start_tstamp: ktime_to_ns(kt: ktime_get_real()));
258
259 clock->clock_rate = ptp_cavium_clock_get();
260
261 clock->ptp_info = (struct ptp_clock_info) {
262 .owner = THIS_MODULE,
263 .name = "ThunderX PTP",
264 .max_adj = 1000000000ull,
265 .n_ext_ts = 0,
266 .n_pins = 0,
267 .pps = 0,
268 .adjfine = cavium_ptp_adjfine,
269 .adjtime = cavium_ptp_adjtime,
270 .gettime64 = cavium_ptp_gettime,
271 .settime64 = cavium_ptp_settime,
272 .enable = cavium_ptp_enable,
273 };
274
275 clock_cfg = readq(addr: clock->reg_base + PTP_CLOCK_CFG);
276 clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
277 writeq(val: clock_cfg, addr: clock->reg_base + PTP_CLOCK_CFG);
278
279 clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate;
280 writeq(val: clock_comp, addr: clock->reg_base + PTP_CLOCK_COMP);
281
282 clock->ptp_clock = ptp_clock_register(info: &clock->ptp_info, parent: dev);
283 if (IS_ERR(ptr: clock->ptp_clock)) {
284 err = PTR_ERR(ptr: clock->ptp_clock);
285 goto error_stop;
286 }
287
288 pci_set_drvdata(pdev, data: clock);
289 return 0;
290
291error_stop:
292 clock_cfg = readq(addr: clock->reg_base + PTP_CLOCK_CFG);
293 clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
294 writeq(val: clock_cfg, addr: clock->reg_base + PTP_CLOCK_CFG);
295 pcim_iounmap_regions(pdev, mask: 1 << PCI_PTP_BAR_NO);
296
297error_free:
298 devm_kfree(dev, p: clock);
299
300error:
301 /* For `cavium_ptp_get()` we need to differentiate between the case
302 * when the core has not tried to probe this device and the case when
303 * the probe failed. In the later case we pretend that the
304 * initialization was successful and keep the error in
305 * `dev->driver_data`.
306 */
307 pci_set_drvdata(pdev, data: ERR_PTR(error: err));
308 return 0;
309}
310
311static void cavium_ptp_remove(struct pci_dev *pdev)
312{
313 struct cavium_ptp *clock = pci_get_drvdata(pdev);
314 u64 clock_cfg;
315
316 if (IS_ERR_OR_NULL(ptr: clock))
317 return;
318
319 ptp_clock_unregister(ptp: clock->ptp_clock);
320
321 clock_cfg = readq(addr: clock->reg_base + PTP_CLOCK_CFG);
322 clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
323 writeq(val: clock_cfg, addr: clock->reg_base + PTP_CLOCK_CFG);
324}
325
326static const struct pci_device_id cavium_ptp_id_table[] = {
327 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
328 PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_88XX_PTP) },
329 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
330 PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_81XX_PTP) },
331 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
332 PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_83XX_PTP) },
333 { 0, }
334};
335
336static struct pci_driver cavium_ptp_driver = {
337 .name = DRV_NAME,
338 .id_table = cavium_ptp_id_table,
339 .probe = cavium_ptp_probe,
340 .remove = cavium_ptp_remove,
341};
342
343module_pci_driver(cavium_ptp_driver);
344
345MODULE_DESCRIPTION(DRV_NAME);
346MODULE_AUTHOR("Cavium Networks <support@cavium.com>");
347MODULE_LICENSE("GPL v2");
348MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table);
349

source code of linux/drivers/net/ethernet/cavium/common/cavium_ptp.c