1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* NXP C45 PHY driver |
3 | * Copyright 2021-2025 NXP |
4 | * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com> |
5 | */ |
6 | |
7 | #include <linux/delay.h> |
8 | #include <linux/ethtool.h> |
9 | #include <linux/ethtool_netlink.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/mii.h> |
12 | #include <linux/module.h> |
13 | #include <linux/of.h> |
14 | #include <linux/phy.h> |
15 | #include <linux/processor.h> |
16 | #include <linux/property.h> |
17 | #include <linux/ptp_classify.h> |
18 | #include <linux/net_tstamp.h> |
19 | |
20 | #include "nxp-c45-tja11xx.h" |
21 | |
22 | /* Same id: TJA1103, TJA1104 */ |
23 | #define PHY_ID_TJA_1103 0x001BB010 |
24 | /* Same id: TJA1120, TJA1121 */ |
25 | #define PHY_ID_TJA_1120 0x001BB031 |
26 | |
27 | #define VEND1_DEVICE_ID3 0x0004 |
28 | #define TJA1120_DEV_ID3_SILICON_VERSION GENMASK(15, 12) |
29 | #define TJA1120_DEV_ID3_SAMPLE_TYPE GENMASK(11, 8) |
30 | #define DEVICE_ID3_SAMPLE_TYPE_R 0x9 |
31 | |
32 | #define VEND1_DEVICE_CONTROL 0x0040 |
33 | #define DEVICE_CONTROL_RESET BIT(15) |
34 | #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14) |
35 | #define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13) |
36 | |
37 | #define VEND1_DEVICE_CONFIG 0x0048 |
38 | |
39 | #define TJA1120_VEND1_EXT_TS_MODE 0x1012 |
40 | |
41 | #define TJA1120_GLOBAL_INFRA_IRQ_ACK 0x2C08 |
42 | #define TJA1120_GLOBAL_INFRA_IRQ_EN 0x2C0A |
43 | #define TJA1120_GLOBAL_INFRA_IRQ_STATUS 0x2C0C |
44 | #define TJA1120_DEV_BOOT_DONE BIT(1) |
45 | |
46 | #define TJA1120_VEND1_PTP_TRIG_DATA_S 0x1070 |
47 | |
48 | #define TJA1120_EGRESS_TS_DATA_S 0x9060 |
49 | #define TJA1120_EGRESS_TS_END 0x9067 |
50 | #define TJA1120_TS_VALID BIT(0) |
51 | #define TJA1120_MORE_TS BIT(15) |
52 | |
53 | #define VEND1_PHY_IRQ_ACK 0x80A0 |
54 | #define VEND1_PHY_IRQ_EN 0x80A1 |
55 | #define VEND1_PHY_IRQ_STATUS 0x80A2 |
56 | #define PHY_IRQ_LINK_EVENT BIT(1) |
57 | |
58 | #define VEND1_ALWAYS_ACCESSIBLE 0x801F |
59 | #define FUSA_PASS BIT(4) |
60 | |
61 | #define VEND1_PHY_CONTROL 0x8100 |
62 | #define PHY_CONFIG_EN BIT(14) |
63 | #define PHY_START_OP BIT(0) |
64 | |
65 | #define VEND1_PHY_CONFIG 0x8108 |
66 | #define PHY_CONFIG_AUTO BIT(0) |
67 | |
68 | #define TJA1120_EPHY_RESETS 0x810A |
69 | #define EPHY_PCS_RESET BIT(3) |
70 | |
71 | #define VEND1_SIGNAL_QUALITY 0x8320 |
72 | #define SQI_VALID BIT(14) |
73 | #define SQI_MASK GENMASK(2, 0) |
74 | #define MAX_SQI SQI_MASK |
75 | |
76 | #define CABLE_TEST_ENABLE BIT(15) |
77 | #define CABLE_TEST_START BIT(14) |
78 | #define CABLE_TEST_OK 0x00 |
79 | #define CABLE_TEST_SHORTED 0x01 |
80 | #define CABLE_TEST_OPEN 0x02 |
81 | #define CABLE_TEST_UNKNOWN 0x07 |
82 | |
83 | #define VEND1_PORT_CONTROL 0x8040 |
84 | #define PORT_CONTROL_EN BIT(14) |
85 | |
86 | #define VEND1_PORT_ABILITIES 0x8046 |
87 | #define MACSEC_ABILITY BIT(5) |
88 | #define PTP_ABILITY BIT(3) |
89 | |
90 | #define VEND1_PORT_FUNC_IRQ_EN 0x807A |
91 | #define MACSEC_IRQS BIT(5) |
92 | #define PTP_IRQS BIT(3) |
93 | |
94 | #define VEND1_PTP_IRQ_ACK 0x9008 |
95 | #define EGR_TS_IRQ BIT(1) |
96 | |
97 | #define VEND1_PORT_INFRA_CONTROL 0xAC00 |
98 | #define PORT_INFRA_CONTROL_EN BIT(14) |
99 | |
100 | #define VEND1_RXID 0xAFCC |
101 | #define VEND1_TXID 0xAFCD |
102 | #define ID_ENABLE BIT(15) |
103 | |
104 | #define VEND1_ABILITIES 0xAFC4 |
105 | #define RGMII_ID_ABILITY BIT(15) |
106 | #define RGMII_ABILITY BIT(14) |
107 | #define RMII_ABILITY BIT(10) |
108 | #define REVMII_ABILITY BIT(9) |
109 | #define MII_ABILITY BIT(8) |
110 | #define SGMII_ABILITY BIT(0) |
111 | |
112 | #define VEND1_MII_BASIC_CONFIG 0xAFC6 |
113 | #define MII_BASIC_CONFIG_REV BIT(4) |
114 | #define MII_BASIC_CONFIG_SGMII 0x9 |
115 | #define MII_BASIC_CONFIG_RGMII 0x7 |
116 | #define MII_BASIC_CONFIG_RMII 0x5 |
117 | #define MII_BASIC_CONFIG_MII 0x4 |
118 | |
119 | #define VEND1_SGMII_BASIC_CONTROL 0xB000 |
120 | #define SGMII_LPM BIT(11) |
121 | |
122 | #define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351 |
123 | #define EXTENDED_CNT_EN BIT(15) |
124 | #define VEND1_MONITOR_STATUS 0xAC80 |
125 | #define MONITOR_RESET BIT(15) |
126 | #define VEND1_MONITOR_CONFIG 0xAC86 |
127 | #define LOST_FRAMES_CNT_EN BIT(9) |
128 | #define ALL_FRAMES_CNT_EN BIT(8) |
129 | |
130 | #define VEND1_SYMBOL_ERROR_COUNTER 0x8350 |
131 | #define VEND1_LINK_DROP_COUNTER 0x8352 |
132 | #define VEND1_LINK_LOSSES_AND_FAILURES 0x8353 |
133 | #define VEND1_RX_PREAMBLE_COUNT 0xAFCE |
134 | #define VEND1_TX_PREAMBLE_COUNT 0xAFCF |
135 | #define VEND1_RX_IPG_LENGTH 0xAFD0 |
136 | #define VEND1_TX_IPG_LENGTH 0xAFD1 |
137 | #define COUNTER_EN BIT(15) |
138 | |
139 | #define VEND1_PTP_CONFIG 0x1102 |
140 | #define EXT_TRG_EDGE BIT(1) |
141 | |
142 | #define TJA1120_SYNC_TRIG_FILTER 0x1010 |
143 | #define PTP_TRIG_RISE_TS BIT(3) |
144 | #define PTP_TRIG_FALLING_TS BIT(2) |
145 | |
146 | #define CLK_RATE_ADJ_LD BIT(15) |
147 | #define CLK_RATE_ADJ_DIR BIT(14) |
148 | |
149 | #define VEND1_RX_TS_INSRT_CTRL 0x114D |
150 | #define TJA1103_RX_TS_INSRT_MODE2 0x02 |
151 | |
152 | #define TJA1120_RX_TS_INSRT_CTRL 0x9012 |
153 | #define TJA1120_RX_TS_INSRT_EN BIT(15) |
154 | #define TJA1120_TS_INSRT_MODE BIT(4) |
155 | |
156 | #define VEND1_EGR_RING_DATA_0 0x114E |
157 | #define VEND1_EGR_RING_CTRL 0x1154 |
158 | |
159 | #define RING_DATA_0_TS_VALID BIT(15) |
160 | |
161 | #define RING_DONE BIT(0) |
162 | |
163 | #define TS_SEC_MASK GENMASK(1, 0) |
164 | |
165 | #define PTP_ENABLE BIT(3) |
166 | #define PHY_TEST_ENABLE BIT(0) |
167 | |
168 | #define VEND1_PORT_PTP_CONTROL 0x9000 |
169 | #define PORT_PTP_CONTROL_BYPASS BIT(11) |
170 | |
171 | #define PTP_CLK_PERIOD_100BT1 15ULL |
172 | #define PTP_CLK_PERIOD_1000BT1 8ULL |
173 | |
174 | #define EVENT_MSG_FILT_ALL 0x0F |
175 | #define EVENT_MSG_FILT_NONE 0x00 |
176 | |
177 | #define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40 |
178 | #define GPIO_FUNC_EN BIT(15) |
179 | #define GPIO_FUNC_PTP BIT(6) |
180 | #define GPIO_SIGNAL_PTP_TRIGGER 0x01 |
181 | #define GPIO_SIGNAL_PPS_OUT 0x12 |
182 | #define GPIO_DISABLE 0 |
183 | #define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \ |
184 | GPIO_SIGNAL_PPS_OUT) |
185 | #define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \ |
186 | GPIO_SIGNAL_PTP_TRIGGER) |
187 | |
188 | #define RGMII_PERIOD_PS 8000U |
189 | #define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360) |
190 | #define MIN_ID_PS 1644U |
191 | #define MAX_ID_PS 2260U |
192 | #define DEFAULT_ID_PS 2000U |
193 | |
194 | #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \ |
195 | (ppb) * (ptp_clk_period), NSEC_PER_SEC) |
196 | |
197 | #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb) |
198 | |
199 | #define TJA11XX_REVERSE_MODE BIT(0) |
200 | |
201 | struct nxp_c45_phy; |
202 | |
203 | struct nxp_c45_skb_cb { |
204 | struct ptp_header *header; |
205 | unsigned int type; |
206 | }; |
207 | |
208 | #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size) \ |
209 | ((struct nxp_c45_reg_field) { \ |
210 | .reg = _reg, \ |
211 | .devad = _devad, \ |
212 | .offset = _offset, \ |
213 | .size = _size, \ |
214 | }) |
215 | |
216 | struct nxp_c45_reg_field { |
217 | u16 reg; |
218 | u8 devad; |
219 | u8 offset; |
220 | u8 size; |
221 | }; |
222 | |
223 | struct nxp_c45_hwts { |
224 | u32 nsec; |
225 | u32 sec; |
226 | u8 domain_number; |
227 | u16 sequence_id; |
228 | u8 msg_type; |
229 | }; |
230 | |
231 | struct nxp_c45_regmap { |
232 | /* PTP config regs. */ |
233 | u16 vend1_ptp_clk_period; |
234 | u16 vend1_event_msg_filt; |
235 | |
236 | /* LTC bits and regs. */ |
237 | struct nxp_c45_reg_field ltc_read; |
238 | struct nxp_c45_reg_field ltc_write; |
239 | struct nxp_c45_reg_field ltc_lock_ctrl; |
240 | u16 vend1_ltc_wr_nsec_0; |
241 | u16 vend1_ltc_wr_nsec_1; |
242 | u16 vend1_ltc_wr_sec_0; |
243 | u16 vend1_ltc_wr_sec_1; |
244 | u16 vend1_ltc_rd_nsec_0; |
245 | u16 vend1_ltc_rd_nsec_1; |
246 | u16 vend1_ltc_rd_sec_0; |
247 | u16 vend1_ltc_rd_sec_1; |
248 | u16 vend1_rate_adj_subns_0; |
249 | u16 vend1_rate_adj_subns_1; |
250 | |
251 | /* External trigger reg fields. */ |
252 | struct nxp_c45_reg_field irq_egr_ts_en; |
253 | struct nxp_c45_reg_field irq_egr_ts_status; |
254 | struct nxp_c45_reg_field domain_number; |
255 | struct nxp_c45_reg_field msg_type; |
256 | struct nxp_c45_reg_field sequence_id; |
257 | struct nxp_c45_reg_field sec_1_0; |
258 | struct nxp_c45_reg_field sec_4_2; |
259 | struct nxp_c45_reg_field nsec_15_0; |
260 | struct nxp_c45_reg_field nsec_29_16; |
261 | |
262 | /* PPS and EXT Trigger bits and regs. */ |
263 | struct nxp_c45_reg_field pps_enable; |
264 | struct nxp_c45_reg_field pps_polarity; |
265 | u16 vend1_ext_trg_data_0; |
266 | u16 vend1_ext_trg_data_1; |
267 | u16 vend1_ext_trg_data_2; |
268 | u16 vend1_ext_trg_data_3; |
269 | u16 vend1_ext_trg_ctrl; |
270 | |
271 | /* Cable test reg fields. */ |
272 | u16 cable_test; |
273 | struct nxp_c45_reg_field cable_test_valid; |
274 | struct nxp_c45_reg_field cable_test_result; |
275 | }; |
276 | |
277 | struct nxp_c45_phy_stats { |
278 | const char *name; |
279 | const struct nxp_c45_reg_field counter; |
280 | }; |
281 | |
282 | struct nxp_c45_phy_data { |
283 | const struct nxp_c45_regmap *regmap; |
284 | const struct nxp_c45_phy_stats *stats; |
285 | int n_stats; |
286 | u8 ptp_clk_period; |
287 | bool ext_ts_both_edges; |
288 | bool ack_ptp_irq; |
289 | void (*counters_enable)(struct phy_device *phydev); |
290 | bool (*get_egressts)(struct nxp_c45_phy *priv, |
291 | struct nxp_c45_hwts *hwts); |
292 | bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts); |
293 | void (*ptp_init)(struct phy_device *phydev); |
294 | void (*ptp_enable)(struct phy_device *phydev, bool enable); |
295 | void (*nmi_handler)(struct phy_device *phydev, |
296 | irqreturn_t *irq_status); |
297 | }; |
298 | |
299 | static const |
300 | struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev) |
301 | { |
302 | return phydev->drv->driver_data; |
303 | } |
304 | |
305 | static const |
306 | struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev) |
307 | { |
308 | const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev); |
309 | |
310 | return phy_data->regmap; |
311 | } |
312 | |
313 | static int nxp_c45_read_reg_field(struct phy_device *phydev, |
314 | const struct nxp_c45_reg_field *reg_field) |
315 | { |
316 | u16 mask; |
317 | int ret; |
318 | |
319 | if (reg_field->size == 0) { |
320 | phydev_err(phydev, "Trying to read a reg field of size 0.\n"); |
321 | return -EINVAL; |
322 | } |
323 | |
324 | ret = phy_read_mmd(phydev, devad: reg_field->devad, regnum: reg_field->reg); |
325 | if (ret < 0) |
326 | return ret; |
327 | |
328 | mask = reg_field->size == 1 ? BIT(reg_field->offset) : |
329 | GENMASK(reg_field->offset + reg_field->size - 1, |
330 | reg_field->offset); |
331 | ret &= mask; |
332 | ret >>= reg_field->offset; |
333 | |
334 | return ret; |
335 | } |
336 | |
337 | static int nxp_c45_write_reg_field(struct phy_device *phydev, |
338 | const struct nxp_c45_reg_field *reg_field, |
339 | u16 val) |
340 | { |
341 | u16 mask; |
342 | u16 set; |
343 | |
344 | if (reg_field->size == 0) { |
345 | phydev_err(phydev, "Trying to write a reg field of size 0.\n"); |
346 | return -EINVAL; |
347 | } |
348 | |
349 | mask = reg_field->size == 1 ? BIT(reg_field->offset) : |
350 | GENMASK(reg_field->offset + reg_field->size - 1, |
351 | reg_field->offset); |
352 | set = val << reg_field->offset; |
353 | |
354 | return phy_modify_mmd_changed(phydev, devad: reg_field->devad, |
355 | regnum: reg_field->reg, mask, set); |
356 | } |
357 | |
358 | static int nxp_c45_set_reg_field(struct phy_device *phydev, |
359 | const struct nxp_c45_reg_field *reg_field) |
360 | { |
361 | if (reg_field->size != 1) { |
362 | phydev_err(phydev, "Trying to set a reg field of size different than 1.\n"); |
363 | return -EINVAL; |
364 | } |
365 | |
366 | return nxp_c45_write_reg_field(phydev, reg_field, val: 1); |
367 | } |
368 | |
369 | static int nxp_c45_clear_reg_field(struct phy_device *phydev, |
370 | const struct nxp_c45_reg_field *reg_field) |
371 | { |
372 | if (reg_field->size != 1) { |
373 | phydev_err(phydev, "Trying to set a reg field of size different than 1.\n"); |
374 | return -EINVAL; |
375 | } |
376 | |
377 | return nxp_c45_write_reg_field(phydev, reg_field, val: 0); |
378 | } |
379 | |
380 | static bool nxp_c45_poll_txts(struct phy_device *phydev) |
381 | { |
382 | return phydev->irq <= 0; |
383 | } |
384 | |
385 | static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp, |
386 | struct timespec64 *ts, |
387 | struct ptp_system_timestamp *sts) |
388 | { |
389 | struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); |
390 | const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev: priv->phydev); |
391 | |
392 | nxp_c45_set_reg_field(phydev: priv->phydev, reg_field: ®map->ltc_read); |
393 | ts->tv_nsec = phy_read_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
394 | regnum: regmap->vend1_ltc_rd_nsec_0); |
395 | ts->tv_nsec |= phy_read_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
396 | regnum: regmap->vend1_ltc_rd_nsec_1) << 16; |
397 | ts->tv_sec = phy_read_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
398 | regnum: regmap->vend1_ltc_rd_sec_0); |
399 | ts->tv_sec |= phy_read_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
400 | regnum: regmap->vend1_ltc_rd_sec_1) << 16; |
401 | |
402 | return 0; |
403 | } |
404 | |
405 | static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp, |
406 | struct timespec64 *ts, |
407 | struct ptp_system_timestamp *sts) |
408 | { |
409 | struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); |
410 | |
411 | mutex_lock(&priv->ptp_lock); |
412 | _nxp_c45_ptp_gettimex64(ptp, ts, sts); |
413 | mutex_unlock(lock: &priv->ptp_lock); |
414 | |
415 | return 0; |
416 | } |
417 | |
418 | static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp, |
419 | const struct timespec64 *ts) |
420 | { |
421 | struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); |
422 | const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev: priv->phydev); |
423 | |
424 | phy_write_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, regnum: regmap->vend1_ltc_wr_nsec_0, |
425 | val: ts->tv_nsec); |
426 | phy_write_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, regnum: regmap->vend1_ltc_wr_nsec_1, |
427 | val: ts->tv_nsec >> 16); |
428 | phy_write_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, regnum: regmap->vend1_ltc_wr_sec_0, |
429 | val: ts->tv_sec); |
430 | phy_write_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, regnum: regmap->vend1_ltc_wr_sec_1, |
431 | val: ts->tv_sec >> 16); |
432 | nxp_c45_set_reg_field(phydev: priv->phydev, reg_field: ®map->ltc_write); |
433 | |
434 | return 0; |
435 | } |
436 | |
437 | static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp, |
438 | const struct timespec64 *ts) |
439 | { |
440 | struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); |
441 | |
442 | mutex_lock(&priv->ptp_lock); |
443 | _nxp_c45_ptp_settime64(ptp, ts); |
444 | mutex_unlock(lock: &priv->ptp_lock); |
445 | |
446 | return 0; |
447 | } |
448 | |
449 | static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) |
450 | { |
451 | struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); |
452 | const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev: priv->phydev); |
453 | const struct nxp_c45_regmap *regmap = data->regmap; |
454 | s32 ppb = scaled_ppm_to_ppb(ppm: scaled_ppm); |
455 | u64 subns_inc_val; |
456 | bool inc; |
457 | |
458 | mutex_lock(&priv->ptp_lock); |
459 | inc = ppb >= 0; |
460 | ppb = abs(ppb); |
461 | |
462 | subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period); |
463 | |
464 | phy_write_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
465 | regnum: regmap->vend1_rate_adj_subns_0, |
466 | val: subns_inc_val); |
467 | subns_inc_val >>= 16; |
468 | subns_inc_val |= CLK_RATE_ADJ_LD; |
469 | if (inc) |
470 | subns_inc_val |= CLK_RATE_ADJ_DIR; |
471 | |
472 | phy_write_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
473 | regnum: regmap->vend1_rate_adj_subns_1, |
474 | val: subns_inc_val); |
475 | mutex_unlock(lock: &priv->ptp_lock); |
476 | |
477 | return 0; |
478 | } |
479 | |
480 | static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) |
481 | { |
482 | struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); |
483 | struct timespec64 now, then; |
484 | |
485 | mutex_lock(&priv->ptp_lock); |
486 | then = ns_to_timespec64(nsec: delta); |
487 | _nxp_c45_ptp_gettimex64(ptp, ts: &now, NULL); |
488 | now = timespec64_add(lhs: now, rhs: then); |
489 | _nxp_c45_ptp_settime64(ptp, ts: &now); |
490 | mutex_unlock(lock: &priv->ptp_lock); |
491 | |
492 | return 0; |
493 | } |
494 | |
495 | static void nxp_c45_reconstruct_ts(struct timespec64 *ts, |
496 | struct nxp_c45_hwts *hwts) |
497 | { |
498 | ts->tv_nsec = hwts->nsec; |
499 | if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK)) |
500 | ts->tv_sec -= TS_SEC_MASK + 1; |
501 | ts->tv_sec &= ~TS_SEC_MASK; |
502 | ts->tv_sec |= hwts->sec & TS_SEC_MASK; |
503 | } |
504 | |
505 | static bool nxp_c45_match_ts(struct ptp_header *header, |
506 | struct nxp_c45_hwts *hwts, |
507 | unsigned int type) |
508 | { |
509 | return ntohs(header->sequence_id) == hwts->sequence_id && |
510 | ptp_get_msgtype(hdr: header, type) == hwts->msg_type && |
511 | header->domain_number == hwts->domain_number; |
512 | } |
513 | |
514 | static bool nxp_c45_get_extts(struct nxp_c45_phy *priv, |
515 | struct timespec64 *extts) |
516 | { |
517 | const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev: priv->phydev); |
518 | |
519 | extts->tv_nsec = phy_read_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
520 | regnum: regmap->vend1_ext_trg_data_0); |
521 | extts->tv_nsec |= phy_read_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
522 | regnum: regmap->vend1_ext_trg_data_1) << 16; |
523 | extts->tv_sec = phy_read_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
524 | regnum: regmap->vend1_ext_trg_data_2); |
525 | extts->tv_sec |= phy_read_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
526 | regnum: regmap->vend1_ext_trg_data_3) << 16; |
527 | phy_write_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, |
528 | regnum: regmap->vend1_ext_trg_ctrl, RING_DONE); |
529 | |
530 | return true; |
531 | } |
532 | |
533 | static bool tja1120_extts_is_valid(struct phy_device *phydev) |
534 | { |
535 | bool valid; |
536 | int reg; |
537 | |
538 | reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, |
539 | TJA1120_VEND1_PTP_TRIG_DATA_S); |
540 | valid = !!(reg & TJA1120_TS_VALID); |
541 | |
542 | return valid; |
543 | } |
544 | |
545 | static bool tja1120_get_extts(struct nxp_c45_phy *priv, |
546 | struct timespec64 *extts) |
547 | { |
548 | const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev: priv->phydev); |
549 | struct phy_device *phydev = priv->phydev; |
550 | bool more_ts; |
551 | bool valid; |
552 | u16 reg; |
553 | |
554 | reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, |
555 | regnum: regmap->vend1_ext_trg_ctrl); |
556 | more_ts = !!(reg & TJA1120_MORE_TS); |
557 | |
558 | valid = tja1120_extts_is_valid(phydev); |
559 | if (!valid) { |
560 | if (!more_ts) |
561 | goto tja1120_get_extts_out; |
562 | |
563 | /* Bug workaround for TJA1120 engineering samples: move the new |
564 | * timestamp from the FIFO to the buffer. |
565 | */ |
566 | phy_write_mmd(phydev, MDIO_MMD_VEND1, |
567 | regnum: regmap->vend1_ext_trg_ctrl, RING_DONE); |
568 | valid = tja1120_extts_is_valid(phydev); |
569 | if (!valid) |
570 | goto tja1120_get_extts_out; |
571 | } |
572 | |
573 | nxp_c45_get_extts(priv, extts); |
574 | tja1120_get_extts_out: |
575 | return valid; |
576 | } |
577 | |
578 | static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv, |
579 | struct nxp_c45_hwts *hwts) |
580 | { |
581 | const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev: priv->phydev); |
582 | struct phy_device *phydev = priv->phydev; |
583 | |
584 | hwts->domain_number = |
585 | nxp_c45_read_reg_field(phydev, reg_field: ®map->domain_number); |
586 | hwts->msg_type = |
587 | nxp_c45_read_reg_field(phydev, reg_field: ®map->msg_type); |
588 | hwts->sequence_id = |
589 | nxp_c45_read_reg_field(phydev, reg_field: ®map->sequence_id); |
590 | hwts->nsec = |
591 | nxp_c45_read_reg_field(phydev, reg_field: ®map->nsec_15_0); |
592 | hwts->nsec |= |
593 | nxp_c45_read_reg_field(phydev, reg_field: ®map->nsec_29_16) << 16; |
594 | hwts->sec = nxp_c45_read_reg_field(phydev, reg_field: ®map->sec_1_0); |
595 | hwts->sec |= nxp_c45_read_reg_field(phydev, reg_field: ®map->sec_4_2) << 2; |
596 | } |
597 | |
598 | static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv, |
599 | struct nxp_c45_hwts *hwts) |
600 | { |
601 | bool valid; |
602 | u16 reg; |
603 | |
604 | mutex_lock(&priv->ptp_lock); |
605 | phy_write_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL, |
606 | RING_DONE); |
607 | reg = phy_read_mmd(phydev: priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0); |
608 | valid = !!(reg & RING_DATA_0_TS_VALID); |
609 | if (!valid) |
610 | goto nxp_c45_get_hwtxts_out; |
611 | |
612 | nxp_c45_read_egress_ts(priv, hwts); |
613 | nxp_c45_get_hwtxts_out: |
614 | mutex_unlock(lock: &priv->ptp_lock); |
615 | return valid; |
616 | } |
617 | |
618 | static bool tja1120_egress_ts_is_valid(struct phy_device *phydev) |
619 | { |
620 | bool valid; |
621 | u16 reg; |
622 | |
623 | reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S); |
624 | valid = !!(reg & TJA1120_TS_VALID); |
625 | |
626 | return valid; |
627 | } |
628 | |
629 | static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv, |
630 | struct nxp_c45_hwts *hwts) |
631 | { |
632 | struct phy_device *phydev = priv->phydev; |
633 | bool more_ts; |
634 | bool valid; |
635 | u16 reg; |
636 | |
637 | mutex_lock(&priv->ptp_lock); |
638 | reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END); |
639 | more_ts = !!(reg & TJA1120_MORE_TS); |
640 | valid = tja1120_egress_ts_is_valid(phydev); |
641 | if (!valid) { |
642 | if (!more_ts) |
643 | goto tja1120_get_hwtxts_out; |
644 | |
645 | /* Bug workaround for TJA1120 engineering samples: move the |
646 | * new timestamp from the FIFO to the buffer. |
647 | */ |
648 | phy_write_mmd(phydev, MDIO_MMD_VEND1, |
649 | TJA1120_EGRESS_TS_END, TJA1120_TS_VALID); |
650 | valid = tja1120_egress_ts_is_valid(phydev); |
651 | if (!valid) |
652 | goto tja1120_get_hwtxts_out; |
653 | } |
654 | nxp_c45_read_egress_ts(priv, hwts); |
655 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S, |
656 | TJA1120_TS_VALID); |
657 | tja1120_get_hwtxts_out: |
658 | mutex_unlock(lock: &priv->ptp_lock); |
659 | return valid; |
660 | } |
661 | |
662 | static void nxp_c45_process_txts(struct nxp_c45_phy *priv, |
663 | struct nxp_c45_hwts *txts) |
664 | { |
665 | struct sk_buff *skb, *tmp, *skb_match = NULL; |
666 | struct skb_shared_hwtstamps shhwtstamps; |
667 | struct timespec64 ts; |
668 | unsigned long flags; |
669 | bool ts_match; |
670 | s64 ts_ns; |
671 | |
672 | spin_lock_irqsave(&priv->tx_queue.lock, flags); |
673 | skb_queue_walk_safe(&priv->tx_queue, skb, tmp) { |
674 | ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, hwts: txts, |
675 | NXP_C45_SKB_CB(skb)->type); |
676 | if (!ts_match) |
677 | continue; |
678 | skb_match = skb; |
679 | __skb_unlink(skb, list: &priv->tx_queue); |
680 | break; |
681 | } |
682 | spin_unlock_irqrestore(lock: &priv->tx_queue.lock, flags); |
683 | |
684 | if (skb_match) { |
685 | nxp_c45_ptp_gettimex64(ptp: &priv->caps, ts: &ts, NULL); |
686 | nxp_c45_reconstruct_ts(ts: &ts, hwts: txts); |
687 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
688 | ts_ns = timespec64_to_ns(ts: &ts); |
689 | shhwtstamps.hwtstamp = ns_to_ktime(ns: ts_ns); |
690 | skb_complete_tx_timestamp(skb: skb_match, hwtstamps: &shhwtstamps); |
691 | } else { |
692 | phydev_warn(priv->phydev, |
693 | "the tx timestamp doesn't match with any skb\n"); |
694 | } |
695 | } |
696 | |
697 | static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp) |
698 | { |
699 | struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); |
700 | const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev: priv->phydev); |
701 | bool poll_txts = nxp_c45_poll_txts(phydev: priv->phydev); |
702 | struct skb_shared_hwtstamps *shhwtstamps_rx; |
703 | struct ptp_clock_event event; |
704 | struct nxp_c45_hwts hwts; |
705 | bool reschedule = false; |
706 | struct timespec64 ts; |
707 | struct sk_buff *skb; |
708 | bool ts_valid; |
709 | u32 ts_raw; |
710 | |
711 | while (!skb_queue_empty_lockless(list: &priv->tx_queue) && poll_txts) { |
712 | ts_valid = data->get_egressts(priv, &hwts); |
713 | if (unlikely(!ts_valid)) { |
714 | /* Still more skbs in the queue */ |
715 | reschedule = true; |
716 | break; |
717 | } |
718 | |
719 | nxp_c45_process_txts(priv, txts: &hwts); |
720 | } |
721 | |
722 | while ((skb = skb_dequeue(list: &priv->rx_queue)) != NULL) { |
723 | nxp_c45_ptp_gettimex64(ptp: &priv->caps, ts: &ts, NULL); |
724 | ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2); |
725 | hwts.sec = ts_raw >> 30; |
726 | hwts.nsec = ts_raw & GENMASK(29, 0); |
727 | nxp_c45_reconstruct_ts(ts: &ts, hwts: &hwts); |
728 | shhwtstamps_rx = skb_hwtstamps(skb); |
729 | shhwtstamps_rx->hwtstamp = ns_to_ktime(ns: timespec64_to_ns(ts: &ts)); |
730 | NXP_C45_SKB_CB(skb)->header->reserved2 = 0; |
731 | netif_rx(skb); |
732 | } |
733 | |
734 | if (priv->extts) { |
735 | ts_valid = data->get_extts(priv, &ts); |
736 | if (ts_valid && timespec64_compare(lhs: &ts, rhs: &priv->extts_ts) != 0) { |
737 | priv->extts_ts = ts; |
738 | event.index = priv->extts_index; |
739 | event.type = PTP_CLOCK_EXTTS; |
740 | event.timestamp = ns_to_ktime(ns: timespec64_to_ns(ts: &ts)); |
741 | ptp_clock_event(ptp: priv->ptp_clock, event: &event); |
742 | } |
743 | reschedule = true; |
744 | } |
745 | |
746 | return reschedule ? 1 : -1; |
747 | } |
748 | |
749 | static void nxp_c45_gpio_config(struct nxp_c45_phy *priv, |
750 | int pin, u16 pin_cfg) |
751 | { |
752 | struct phy_device *phydev = priv->phydev; |
753 | |
754 | phy_write_mmd(phydev, MDIO_MMD_VEND1, |
755 | VEND1_GPIO_FUNC_CONFIG_BASE + pin, val: pin_cfg); |
756 | } |
757 | |
758 | static int nxp_c45_perout_enable(struct nxp_c45_phy *priv, |
759 | struct ptp_perout_request *perout, int on) |
760 | { |
761 | const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev: priv->phydev); |
762 | struct phy_device *phydev = priv->phydev; |
763 | int pin; |
764 | |
765 | pin = ptp_find_pin(ptp: priv->ptp_clock, func: PTP_PF_PEROUT, chan: perout->index); |
766 | if (pin < 0) |
767 | return pin; |
768 | |
769 | if (!on) { |
770 | nxp_c45_clear_reg_field(phydev: priv->phydev, |
771 | reg_field: ®map->pps_enable); |
772 | nxp_c45_clear_reg_field(phydev: priv->phydev, |
773 | reg_field: ®map->pps_polarity); |
774 | |
775 | nxp_c45_gpio_config(priv, pin, GPIO_DISABLE); |
776 | |
777 | return 0; |
778 | } |
779 | |
780 | /* The PPS signal is fixed to 1 second and is always generated when the |
781 | * seconds counter is incremented. The start time is not configurable. |
782 | * If the clock is adjusted, the PPS signal is automatically readjusted. |
783 | */ |
784 | if (perout->period.sec != 1 || perout->period.nsec != 0) { |
785 | phydev_warn(phydev, "The period can be set only to 1 second."); |
786 | return -EINVAL; |
787 | } |
788 | |
789 | if (!(perout->flags & PTP_PEROUT_PHASE)) { |
790 | if (perout->start.sec != 0 || perout->start.nsec != 0) { |
791 | phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds."); |
792 | return -EINVAL; |
793 | } |
794 | } else { |
795 | if (perout->phase.nsec != 0 && |
796 | perout->phase.nsec != (NSEC_PER_SEC >> 1)) { |
797 | phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds."); |
798 | return -EINVAL; |
799 | } |
800 | |
801 | if (perout->phase.nsec == 0) |
802 | nxp_c45_clear_reg_field(phydev: priv->phydev, |
803 | reg_field: ®map->pps_polarity); |
804 | else |
805 | nxp_c45_set_reg_field(phydev: priv->phydev, |
806 | reg_field: ®map->pps_polarity); |
807 | } |
808 | |
809 | nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG); |
810 | |
811 | nxp_c45_set_reg_field(phydev: priv->phydev, reg_field: ®map->pps_enable); |
812 | |
813 | return 0; |
814 | } |
815 | |
816 | static void nxp_c45_set_rising_or_falling(struct phy_device *phydev, |
817 | struct ptp_extts_request *extts) |
818 | { |
819 | if (extts->flags & PTP_RISING_EDGE) |
820 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
821 | VEND1_PTP_CONFIG, EXT_TRG_EDGE); |
822 | |
823 | if (extts->flags & PTP_FALLING_EDGE) |
824 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
825 | VEND1_PTP_CONFIG, EXT_TRG_EDGE); |
826 | } |
827 | |
828 | static void nxp_c45_set_rising_and_falling(struct phy_device *phydev, |
829 | struct ptp_extts_request *extts) |
830 | { |
831 | /* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In |
832 | * this case external ts will be enabled on rising edge. |
833 | */ |
834 | if (extts->flags & PTP_RISING_EDGE || |
835 | extts->flags == PTP_ENABLE_FEATURE) |
836 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
837 | TJA1120_SYNC_TRIG_FILTER, |
838 | PTP_TRIG_RISE_TS); |
839 | else |
840 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
841 | TJA1120_SYNC_TRIG_FILTER, |
842 | PTP_TRIG_RISE_TS); |
843 | |
844 | if (extts->flags & PTP_FALLING_EDGE) |
845 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
846 | TJA1120_SYNC_TRIG_FILTER, |
847 | PTP_TRIG_FALLING_TS); |
848 | else |
849 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
850 | TJA1120_SYNC_TRIG_FILTER, |
851 | PTP_TRIG_FALLING_TS); |
852 | } |
853 | |
854 | static int nxp_c45_extts_enable(struct nxp_c45_phy *priv, |
855 | struct ptp_extts_request *extts, int on) |
856 | { |
857 | const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev: priv->phydev); |
858 | int pin; |
859 | |
860 | /* Sampling on both edges is not supported */ |
861 | if ((extts->flags & PTP_RISING_EDGE) && |
862 | (extts->flags & PTP_FALLING_EDGE) && |
863 | !data->ext_ts_both_edges) |
864 | return -EOPNOTSUPP; |
865 | |
866 | pin = ptp_find_pin(ptp: priv->ptp_clock, func: PTP_PF_EXTTS, chan: extts->index); |
867 | if (pin < 0) |
868 | return pin; |
869 | |
870 | if (!on) { |
871 | nxp_c45_gpio_config(priv, pin, GPIO_DISABLE); |
872 | priv->extts = false; |
873 | |
874 | return 0; |
875 | } |
876 | |
877 | if (data->ext_ts_both_edges) |
878 | nxp_c45_set_rising_and_falling(phydev: priv->phydev, extts); |
879 | else |
880 | nxp_c45_set_rising_or_falling(phydev: priv->phydev, extts); |
881 | |
882 | nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG); |
883 | priv->extts = true; |
884 | priv->extts_index = extts->index; |
885 | ptp_schedule_worker(ptp: priv->ptp_clock, delay: 0); |
886 | |
887 | return 0; |
888 | } |
889 | |
890 | static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp, |
891 | struct ptp_clock_request *req, int on) |
892 | { |
893 | struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps); |
894 | |
895 | switch (req->type) { |
896 | case PTP_CLK_REQ_EXTTS: |
897 | return nxp_c45_extts_enable(priv, extts: &req->extts, on); |
898 | case PTP_CLK_REQ_PEROUT: |
899 | return nxp_c45_perout_enable(priv, perout: &req->perout, on); |
900 | default: |
901 | return -EOPNOTSUPP; |
902 | } |
903 | } |
904 | |
905 | static struct ptp_pin_desc nxp_c45_ptp_pins[] = { |
906 | { "nxp_c45_gpio0", 0, PTP_PF_NONE}, |
907 | { "nxp_c45_gpio1", 1, PTP_PF_NONE}, |
908 | { "nxp_c45_gpio2", 2, PTP_PF_NONE}, |
909 | { "nxp_c45_gpio3", 3, PTP_PF_NONE}, |
910 | { "nxp_c45_gpio4", 4, PTP_PF_NONE}, |
911 | { "nxp_c45_gpio5", 5, PTP_PF_NONE}, |
912 | { "nxp_c45_gpio6", 6, PTP_PF_NONE}, |
913 | { "nxp_c45_gpio7", 7, PTP_PF_NONE}, |
914 | { "nxp_c45_gpio8", 8, PTP_PF_NONE}, |
915 | { "nxp_c45_gpio9", 9, PTP_PF_NONE}, |
916 | { "nxp_c45_gpio10", 10, PTP_PF_NONE}, |
917 | { "nxp_c45_gpio11", 11, PTP_PF_NONE}, |
918 | }; |
919 | |
920 | static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, |
921 | enum ptp_pin_function func, unsigned int chan) |
922 | { |
923 | if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins)) |
924 | return -EINVAL; |
925 | |
926 | switch (func) { |
927 | case PTP_PF_NONE: |
928 | case PTP_PF_PEROUT: |
929 | case PTP_PF_EXTTS: |
930 | break; |
931 | default: |
932 | return -EOPNOTSUPP; |
933 | } |
934 | |
935 | return 0; |
936 | } |
937 | |
938 | static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv) |
939 | { |
940 | priv->caps = (struct ptp_clock_info) { |
941 | .owner = THIS_MODULE, |
942 | .name = "NXP C45 PHC", |
943 | .max_adj = 16666666, |
944 | .adjfine = nxp_c45_ptp_adjfine, |
945 | .adjtime = nxp_c45_ptp_adjtime, |
946 | .gettimex64 = nxp_c45_ptp_gettimex64, |
947 | .settime64 = nxp_c45_ptp_settime64, |
948 | .enable = nxp_c45_ptp_enable, |
949 | .verify = nxp_c45_ptp_verify_pin, |
950 | .do_aux_work = nxp_c45_do_aux_work, |
951 | .pin_config = nxp_c45_ptp_pins, |
952 | .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins), |
953 | .n_ext_ts = 1, |
954 | .n_per_out = 1, |
955 | .supported_extts_flags = PTP_RISING_EDGE | |
956 | PTP_FALLING_EDGE | |
957 | PTP_STRICT_FLAGS, |
958 | .supported_perout_flags = PTP_PEROUT_PHASE, |
959 | }; |
960 | |
961 | priv->ptp_clock = ptp_clock_register(info: &priv->caps, |
962 | parent: &priv->phydev->mdio.dev); |
963 | |
964 | if (IS_ERR(ptr: priv->ptp_clock)) |
965 | return PTR_ERR(ptr: priv->ptp_clock); |
966 | |
967 | if (!priv->ptp_clock) |
968 | return -ENOMEM; |
969 | |
970 | return 0; |
971 | } |
972 | |
973 | static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts, |
974 | struct sk_buff *skb, int type) |
975 | { |
976 | struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy, |
977 | mii_ts); |
978 | |
979 | switch (priv->hwts_tx) { |
980 | case HWTSTAMP_TX_ON: |
981 | NXP_C45_SKB_CB(skb)->type = type; |
982 | NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type); |
983 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
984 | skb_queue_tail(list: &priv->tx_queue, newsk: skb); |
985 | if (nxp_c45_poll_txts(phydev: priv->phydev)) |
986 | ptp_schedule_worker(ptp: priv->ptp_clock, delay: 0); |
987 | break; |
988 | case HWTSTAMP_TX_OFF: |
989 | default: |
990 | kfree_skb(skb); |
991 | break; |
992 | } |
993 | } |
994 | |
995 | static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts, |
996 | struct sk_buff *skb, int type) |
997 | { |
998 | struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy, |
999 | mii_ts); |
1000 | struct ptp_header *header = ptp_parse_header(skb, type); |
1001 | |
1002 | if (!header) |
1003 | return false; |
1004 | |
1005 | if (!priv->hwts_rx) |
1006 | return false; |
1007 | |
1008 | NXP_C45_SKB_CB(skb)->header = header; |
1009 | skb_queue_tail(list: &priv->rx_queue, newsk: skb); |
1010 | ptp_schedule_worker(ptp: priv->ptp_clock, delay: 0); |
1011 | |
1012 | return true; |
1013 | } |
1014 | |
1015 | static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts, |
1016 | struct kernel_hwtstamp_config *cfg, |
1017 | struct netlink_ext_ack *extack) |
1018 | { |
1019 | struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy, |
1020 | mii_ts); |
1021 | struct phy_device *phydev = priv->phydev; |
1022 | const struct nxp_c45_phy_data *data; |
1023 | |
1024 | if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON) |
1025 | return -ERANGE; |
1026 | |
1027 | data = nxp_c45_get_data(phydev); |
1028 | priv->hwts_tx = cfg->tx_type; |
1029 | |
1030 | switch (cfg->rx_filter) { |
1031 | case HWTSTAMP_FILTER_NONE: |
1032 | priv->hwts_rx = 0; |
1033 | break; |
1034 | case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: |
1035 | case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: |
1036 | case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: |
1037 | priv->hwts_rx = 1; |
1038 | cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; |
1039 | break; |
1040 | default: |
1041 | return -ERANGE; |
1042 | } |
1043 | |
1044 | if (priv->hwts_rx || priv->hwts_tx) { |
1045 | phy_write_mmd(phydev, MDIO_MMD_VEND1, |
1046 | regnum: data->regmap->vend1_event_msg_filt, |
1047 | EVENT_MSG_FILT_ALL); |
1048 | data->ptp_enable(phydev, true); |
1049 | } else { |
1050 | phy_write_mmd(phydev, MDIO_MMD_VEND1, |
1051 | regnum: data->regmap->vend1_event_msg_filt, |
1052 | EVENT_MSG_FILT_NONE); |
1053 | data->ptp_enable(phydev, false); |
1054 | } |
1055 | |
1056 | if (nxp_c45_poll_txts(phydev: priv->phydev)) |
1057 | goto nxp_c45_no_ptp_irq; |
1058 | |
1059 | if (priv->hwts_tx) |
1060 | nxp_c45_set_reg_field(phydev, reg_field: &data->regmap->irq_egr_ts_en); |
1061 | else |
1062 | nxp_c45_clear_reg_field(phydev, reg_field: &data->regmap->irq_egr_ts_en); |
1063 | |
1064 | nxp_c45_no_ptp_irq: |
1065 | return 0; |
1066 | } |
1067 | |
1068 | static int nxp_c45_ts_info(struct mii_timestamper *mii_ts, |
1069 | struct kernel_ethtool_ts_info *ts_info) |
1070 | { |
1071 | struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy, |
1072 | mii_ts); |
1073 | |
1074 | ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | |
1075 | SOF_TIMESTAMPING_RX_HARDWARE | |
1076 | SOF_TIMESTAMPING_RAW_HARDWARE; |
1077 | ts_info->phc_index = ptp_clock_index(ptp: priv->ptp_clock); |
1078 | ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); |
1079 | ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | |
1080 | (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | |
1081 | (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | |
1082 | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT); |
1083 | |
1084 | return 0; |
1085 | } |
1086 | |
1087 | static const struct nxp_c45_phy_stats common_hw_stats[] = { |
1088 | { "phy_link_status_drop_cnt", |
1089 | NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), }, |
1090 | { "phy_link_availability_drop_cnt", |
1091 | NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), }, |
1092 | { "phy_link_loss_cnt", |
1093 | NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), }, |
1094 | { "phy_link_failure_cnt", |
1095 | NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), }, |
1096 | { "phy_symbol_error_cnt", |
1097 | NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) }, |
1098 | }; |
1099 | |
1100 | static const struct nxp_c45_phy_stats tja1103_hw_stats[] = { |
1101 | { "rx_preamble_count", |
1102 | NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), }, |
1103 | { "tx_preamble_count", |
1104 | NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), }, |
1105 | { "rx_ipg_length", |
1106 | NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), }, |
1107 | { "tx_ipg_length", |
1108 | NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), }, |
1109 | }; |
1110 | |
1111 | static const struct nxp_c45_phy_stats tja1120_hw_stats[] = { |
1112 | { "phy_symbol_error_cnt_ext", |
1113 | NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) }, |
1114 | { "tx_frames_xtd", |
1115 | NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), }, |
1116 | { "tx_frames", |
1117 | NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), }, |
1118 | { "rx_frames_xtd", |
1119 | NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), }, |
1120 | { "rx_frames", |
1121 | NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), }, |
1122 | { "tx_lost_frames_xtd", |
1123 | NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), }, |
1124 | { "tx_lost_frames", |
1125 | NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), }, |
1126 | { "rx_lost_frames_xtd", |
1127 | NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), }, |
1128 | { "rx_lost_frames", |
1129 | NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), }, |
1130 | }; |
1131 | |
1132 | static int nxp_c45_get_sset_count(struct phy_device *phydev) |
1133 | { |
1134 | const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev); |
1135 | |
1136 | return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0); |
1137 | } |
1138 | |
1139 | static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data) |
1140 | { |
1141 | const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev); |
1142 | size_t count = nxp_c45_get_sset_count(phydev); |
1143 | size_t idx; |
1144 | size_t i; |
1145 | |
1146 | for (i = 0; i < count; i++) { |
1147 | if (i < ARRAY_SIZE(common_hw_stats)) { |
1148 | ethtool_puts(data: &data, str: common_hw_stats[i].name); |
1149 | continue; |
1150 | } |
1151 | idx = i - ARRAY_SIZE(common_hw_stats); |
1152 | ethtool_puts(data: &data, str: phy_data->stats[idx].name); |
1153 | } |
1154 | } |
1155 | |
1156 | static void nxp_c45_get_stats(struct phy_device *phydev, |
1157 | struct ethtool_stats *stats, u64 *data) |
1158 | { |
1159 | const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev); |
1160 | size_t count = nxp_c45_get_sset_count(phydev); |
1161 | const struct nxp_c45_reg_field *reg_field; |
1162 | size_t idx; |
1163 | size_t i; |
1164 | int ret; |
1165 | |
1166 | for (i = 0; i < count; i++) { |
1167 | if (i < ARRAY_SIZE(common_hw_stats)) { |
1168 | reg_field = &common_hw_stats[i].counter; |
1169 | } else { |
1170 | idx = i - ARRAY_SIZE(common_hw_stats); |
1171 | reg_field = &phy_data->stats[idx].counter; |
1172 | } |
1173 | |
1174 | ret = nxp_c45_read_reg_field(phydev, reg_field); |
1175 | if (ret < 0) |
1176 | data[i] = U64_MAX; |
1177 | else |
1178 | data[i] = ret; |
1179 | } |
1180 | } |
1181 | |
1182 | static int nxp_c45_config_enable(struct phy_device *phydev) |
1183 | { |
1184 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL, |
1185 | DEVICE_CONTROL_CONFIG_GLOBAL_EN | |
1186 | DEVICE_CONTROL_CONFIG_ALL_EN); |
1187 | usleep_range(min: 400, max: 450); |
1188 | |
1189 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL, |
1190 | PORT_CONTROL_EN); |
1191 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL, |
1192 | PHY_CONFIG_EN); |
1193 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL, |
1194 | PORT_INFRA_CONTROL_EN); |
1195 | |
1196 | return 0; |
1197 | } |
1198 | |
1199 | static int nxp_c45_start_op(struct phy_device *phydev) |
1200 | { |
1201 | return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL, |
1202 | PHY_START_OP); |
1203 | } |
1204 | |
1205 | static int nxp_c45_config_intr(struct phy_device *phydev) |
1206 | { |
1207 | int ret; |
1208 | |
1209 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { |
1210 | ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
1211 | VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS); |
1212 | if (ret) |
1213 | return ret; |
1214 | |
1215 | return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
1216 | VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT); |
1217 | } |
1218 | |
1219 | ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
1220 | VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS); |
1221 | if (ret) |
1222 | return ret; |
1223 | |
1224 | return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
1225 | VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT); |
1226 | } |
1227 | |
1228 | static int tja1103_config_intr(struct phy_device *phydev) |
1229 | { |
1230 | int ret; |
1231 | |
1232 | /* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */ |
1233 | ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE, |
1234 | FUSA_PASS); |
1235 | if (ret) |
1236 | return ret; |
1237 | |
1238 | return nxp_c45_config_intr(phydev); |
1239 | } |
1240 | |
1241 | static int tja1120_config_intr(struct phy_device *phydev) |
1242 | { |
1243 | int ret; |
1244 | |
1245 | if (phydev->interrupts == PHY_INTERRUPT_ENABLED) |
1246 | ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
1247 | TJA1120_GLOBAL_INFRA_IRQ_EN, |
1248 | TJA1120_DEV_BOOT_DONE); |
1249 | else |
1250 | ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
1251 | TJA1120_GLOBAL_INFRA_IRQ_EN, |
1252 | TJA1120_DEV_BOOT_DONE); |
1253 | if (ret) |
1254 | return ret; |
1255 | |
1256 | return nxp_c45_config_intr(phydev); |
1257 | } |
1258 | |
1259 | static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev) |
1260 | { |
1261 | const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev); |
1262 | struct nxp_c45_phy *priv = phydev->priv; |
1263 | irqreturn_t ret = IRQ_NONE; |
1264 | struct nxp_c45_hwts hwts; |
1265 | int irq; |
1266 | |
1267 | irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS); |
1268 | if (irq & PHY_IRQ_LINK_EVENT) { |
1269 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK, |
1270 | PHY_IRQ_LINK_EVENT); |
1271 | phy_trigger_machine(phydev); |
1272 | ret = IRQ_HANDLED; |
1273 | } |
1274 | |
1275 | irq = nxp_c45_read_reg_field(phydev, reg_field: &data->regmap->irq_egr_ts_status); |
1276 | if (irq) { |
1277 | /* If ack_ptp_irq is false, the IRQ bit is self-clear and will |
1278 | * be cleared when the EGR TS FIFO is empty. Otherwise, the |
1279 | * IRQ bit should be cleared before reading the timestamp, |
1280 | */ |
1281 | if (data->ack_ptp_irq) |
1282 | phy_write_mmd(phydev, MDIO_MMD_VEND1, |
1283 | VEND1_PTP_IRQ_ACK, EGR_TS_IRQ); |
1284 | while (data->get_egressts(priv, &hwts)) |
1285 | nxp_c45_process_txts(priv, txts: &hwts); |
1286 | |
1287 | ret = IRQ_HANDLED; |
1288 | } |
1289 | |
1290 | data->nmi_handler(phydev, &ret); |
1291 | nxp_c45_handle_macsec_interrupt(phydev, ret: &ret); |
1292 | |
1293 | return ret; |
1294 | } |
1295 | |
1296 | static int nxp_c45_soft_reset(struct phy_device *phydev) |
1297 | { |
1298 | int ret; |
1299 | |
1300 | ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL, |
1301 | DEVICE_CONTROL_RESET); |
1302 | if (ret) |
1303 | return ret; |
1304 | |
1305 | usleep_range(min: 2000, max: 2050); |
1306 | |
1307 | return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, |
1308 | VEND1_DEVICE_CONTROL, ret, |
1309 | !(ret & DEVICE_CONTROL_RESET), 20000, |
1310 | 240000, false); |
1311 | } |
1312 | |
1313 | static int nxp_c45_cable_test_start(struct phy_device *phydev) |
1314 | { |
1315 | const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev); |
1316 | |
1317 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
1318 | VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE); |
1319 | return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regnum: regmap->cable_test, |
1320 | CABLE_TEST_ENABLE | CABLE_TEST_START); |
1321 | } |
1322 | |
1323 | static int nxp_c45_cable_test_get_status(struct phy_device *phydev, |
1324 | bool *finished) |
1325 | { |
1326 | const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev); |
1327 | int ret; |
1328 | u8 cable_test_result; |
1329 | |
1330 | ret = nxp_c45_read_reg_field(phydev, reg_field: ®map->cable_test_valid); |
1331 | if (!ret) { |
1332 | *finished = false; |
1333 | return 0; |
1334 | } |
1335 | |
1336 | *finished = true; |
1337 | cable_test_result = nxp_c45_read_reg_field(phydev, |
1338 | reg_field: ®map->cable_test_result); |
1339 | |
1340 | switch (cable_test_result) { |
1341 | case CABLE_TEST_OK: |
1342 | ethnl_cable_test_result(phydev, pair: ETHTOOL_A_CABLE_PAIR_A, |
1343 | result: ETHTOOL_A_CABLE_RESULT_CODE_OK); |
1344 | break; |
1345 | case CABLE_TEST_SHORTED: |
1346 | ethnl_cable_test_result(phydev, pair: ETHTOOL_A_CABLE_PAIR_A, |
1347 | result: ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT); |
1348 | break; |
1349 | case CABLE_TEST_OPEN: |
1350 | ethnl_cable_test_result(phydev, pair: ETHTOOL_A_CABLE_PAIR_A, |
1351 | result: ETHTOOL_A_CABLE_RESULT_CODE_OPEN); |
1352 | break; |
1353 | default: |
1354 | ethnl_cable_test_result(phydev, pair: ETHTOOL_A_CABLE_PAIR_A, |
1355 | result: ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC); |
1356 | } |
1357 | |
1358 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regnum: regmap->cable_test, |
1359 | CABLE_TEST_ENABLE); |
1360 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
1361 | VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE); |
1362 | |
1363 | return nxp_c45_start_op(phydev); |
1364 | } |
1365 | |
1366 | static int nxp_c45_get_sqi(struct phy_device *phydev) |
1367 | { |
1368 | int reg; |
1369 | |
1370 | reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY); |
1371 | if (!(reg & SQI_VALID)) |
1372 | return -EINVAL; |
1373 | |
1374 | reg &= SQI_MASK; |
1375 | |
1376 | return reg; |
1377 | } |
1378 | |
1379 | static void tja1120_link_change_notify(struct phy_device *phydev) |
1380 | { |
1381 | /* Bug workaround for TJA1120 enegineering samples: fix egress |
1382 | * timestamps lost after link recovery. |
1383 | */ |
1384 | if (phydev->state == PHY_NOLINK) { |
1385 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
1386 | TJA1120_EPHY_RESETS, EPHY_PCS_RESET); |
1387 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
1388 | TJA1120_EPHY_RESETS, EPHY_PCS_RESET); |
1389 | } |
1390 | } |
1391 | |
1392 | static int nxp_c45_get_sqi_max(struct phy_device *phydev) |
1393 | { |
1394 | return MAX_SQI; |
1395 | } |
1396 | |
1397 | static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay) |
1398 | { |
1399 | if (delay < MIN_ID_PS) { |
1400 | phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS); |
1401 | return -EINVAL; |
1402 | } |
1403 | |
1404 | if (delay > MAX_ID_PS) { |
1405 | phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS); |
1406 | return -EINVAL; |
1407 | } |
1408 | |
1409 | return 0; |
1410 | } |
1411 | |
1412 | static void nxp_c45_counters_enable(struct phy_device *phydev) |
1413 | { |
1414 | const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev); |
1415 | |
1416 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER, |
1417 | COUNTER_EN); |
1418 | |
1419 | data->counters_enable(phydev); |
1420 | } |
1421 | |
1422 | static void nxp_c45_ptp_init(struct phy_device *phydev) |
1423 | { |
1424 | const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev); |
1425 | |
1426 | phy_write_mmd(phydev, MDIO_MMD_VEND1, |
1427 | regnum: data->regmap->vend1_ptp_clk_period, |
1428 | val: data->ptp_clk_period); |
1429 | nxp_c45_clear_reg_field(phydev, reg_field: &data->regmap->ltc_lock_ctrl); |
1430 | |
1431 | data->ptp_init(phydev); |
1432 | } |
1433 | |
1434 | static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw) |
1435 | { |
1436 | /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9. |
1437 | * To avoid floating point operations we'll multiply by 10 |
1438 | * and get 1 decimal point precision. |
1439 | */ |
1440 | phase_offset_raw *= 10; |
1441 | phase_offset_raw -= 738; |
1442 | return div_u64(dividend: phase_offset_raw, divisor: 9); |
1443 | } |
1444 | |
1445 | static void nxp_c45_disable_delays(struct phy_device *phydev) |
1446 | { |
1447 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE); |
1448 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE); |
1449 | } |
1450 | |
1451 | static void nxp_c45_set_delays(struct phy_device *phydev) |
1452 | { |
1453 | struct nxp_c45_phy *priv = phydev->priv; |
1454 | u64 tx_delay = priv->tx_delay; |
1455 | u64 rx_delay = priv->rx_delay; |
1456 | u64 degree; |
1457 | |
1458 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || |
1459 | phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { |
1460 | degree = div_u64(dividend: tx_delay, PS_PER_DEGREE); |
1461 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, |
1462 | ID_ENABLE | nxp_c45_get_phase_shift(phase_offset_raw: degree)); |
1463 | } else { |
1464 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, |
1465 | ID_ENABLE); |
1466 | } |
1467 | |
1468 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || |
1469 | phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { |
1470 | degree = div_u64(dividend: rx_delay, PS_PER_DEGREE); |
1471 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, |
1472 | ID_ENABLE | nxp_c45_get_phase_shift(phase_offset_raw: degree)); |
1473 | } else { |
1474 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, |
1475 | ID_ENABLE); |
1476 | } |
1477 | } |
1478 | |
1479 | static int nxp_c45_get_delays(struct phy_device *phydev) |
1480 | { |
1481 | struct nxp_c45_phy *priv = phydev->priv; |
1482 | int ret; |
1483 | |
1484 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || |
1485 | phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { |
1486 | ret = device_property_read_u32(dev: &phydev->mdio.dev, |
1487 | propname: "tx-internal-delay-ps", |
1488 | val: &priv->tx_delay); |
1489 | if (ret) |
1490 | priv->tx_delay = DEFAULT_ID_PS; |
1491 | |
1492 | ret = nxp_c45_check_delay(phydev, delay: priv->tx_delay); |
1493 | if (ret) { |
1494 | phydev_err(phydev, |
1495 | "tx-internal-delay-ps invalid value\n"); |
1496 | return ret; |
1497 | } |
1498 | } |
1499 | |
1500 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || |
1501 | phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { |
1502 | ret = device_property_read_u32(dev: &phydev->mdio.dev, |
1503 | propname: "rx-internal-delay-ps", |
1504 | val: &priv->rx_delay); |
1505 | if (ret) |
1506 | priv->rx_delay = DEFAULT_ID_PS; |
1507 | |
1508 | ret = nxp_c45_check_delay(phydev, delay: priv->rx_delay); |
1509 | if (ret) { |
1510 | phydev_err(phydev, |
1511 | "rx-internal-delay-ps invalid value\n"); |
1512 | return ret; |
1513 | } |
1514 | } |
1515 | |
1516 | return 0; |
1517 | } |
1518 | |
1519 | static int nxp_c45_set_phy_mode(struct phy_device *phydev) |
1520 | { |
1521 | struct nxp_c45_phy *priv = phydev->priv; |
1522 | u16 basic_config; |
1523 | int ret; |
1524 | |
1525 | ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES); |
1526 | phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret); |
1527 | |
1528 | switch (phydev->interface) { |
1529 | case PHY_INTERFACE_MODE_RGMII: |
1530 | if (!(ret & RGMII_ABILITY)) { |
1531 | phydev_err(phydev, "rgmii mode not supported\n"); |
1532 | return -EINVAL; |
1533 | } |
1534 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, |
1535 | MII_BASIC_CONFIG_RGMII); |
1536 | nxp_c45_disable_delays(phydev); |
1537 | break; |
1538 | case PHY_INTERFACE_MODE_RGMII_ID: |
1539 | case PHY_INTERFACE_MODE_RGMII_TXID: |
1540 | case PHY_INTERFACE_MODE_RGMII_RXID: |
1541 | if (!(ret & RGMII_ID_ABILITY)) { |
1542 | phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n"); |
1543 | return -EINVAL; |
1544 | } |
1545 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, |
1546 | MII_BASIC_CONFIG_RGMII); |
1547 | ret = nxp_c45_get_delays(phydev); |
1548 | if (ret) |
1549 | return ret; |
1550 | |
1551 | nxp_c45_set_delays(phydev); |
1552 | break; |
1553 | case PHY_INTERFACE_MODE_MII: |
1554 | if (!(ret & MII_ABILITY)) { |
1555 | phydev_err(phydev, "mii mode not supported\n"); |
1556 | return -EINVAL; |
1557 | } |
1558 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, |
1559 | MII_BASIC_CONFIG_MII); |
1560 | break; |
1561 | case PHY_INTERFACE_MODE_REVMII: |
1562 | if (!(ret & REVMII_ABILITY)) { |
1563 | phydev_err(phydev, "rev-mii mode not supported\n"); |
1564 | return -EINVAL; |
1565 | } |
1566 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, |
1567 | MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV); |
1568 | break; |
1569 | case PHY_INTERFACE_MODE_RMII: |
1570 | if (!(ret & RMII_ABILITY)) { |
1571 | phydev_err(phydev, "rmii mode not supported\n"); |
1572 | return -EINVAL; |
1573 | } |
1574 | |
1575 | basic_config = MII_BASIC_CONFIG_RMII; |
1576 | |
1577 | /* This is not PHY_INTERFACE_MODE_REVRMII */ |
1578 | if (priv->flags & TJA11XX_REVERSE_MODE) |
1579 | basic_config |= MII_BASIC_CONFIG_REV; |
1580 | |
1581 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, |
1582 | val: basic_config); |
1583 | break; |
1584 | case PHY_INTERFACE_MODE_SGMII: |
1585 | if (!(ret & SGMII_ABILITY)) { |
1586 | phydev_err(phydev, "sgmii mode not supported\n"); |
1587 | return -EINVAL; |
1588 | } |
1589 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG, |
1590 | MII_BASIC_CONFIG_SGMII); |
1591 | break; |
1592 | case PHY_INTERFACE_MODE_INTERNAL: |
1593 | break; |
1594 | default: |
1595 | return -EINVAL; |
1596 | } |
1597 | |
1598 | return 0; |
1599 | } |
1600 | |
1601 | /* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */ |
1602 | static void nxp_c45_tja1120_errata(struct phy_device *phydev) |
1603 | { |
1604 | bool macsec_ability, sgmii_ability; |
1605 | int silicon_version, sample_type; |
1606 | int phy_abilities; |
1607 | int ret = 0; |
1608 | |
1609 | ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3); |
1610 | if (ret < 0) |
1611 | return; |
1612 | |
1613 | sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret); |
1614 | if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R) |
1615 | return; |
1616 | |
1617 | silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret); |
1618 | |
1619 | phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1, |
1620 | VEND1_PORT_ABILITIES); |
1621 | macsec_ability = !!(phy_abilities & MACSEC_ABILITY); |
1622 | sgmii_ability = !!(phy_abilities & SGMII_ABILITY); |
1623 | if ((!macsec_ability && silicon_version == 2) || |
1624 | (macsec_ability && silicon_version == 1)) { |
1625 | /* TJA1120/TJA1121 PHY configuration errata workaround. |
1626 | * Apply PHY writes sequence before link up. |
1627 | */ |
1628 | if (!macsec_ability) { |
1629 | phy_write_mmd(phydev, MDIO_MMD_VEND1, regnum: 0x01F8, val: 0x4b95); |
1630 | phy_write_mmd(phydev, MDIO_MMD_VEND1, regnum: 0x01F9, val: 0xf3cd); |
1631 | } else { |
1632 | phy_write_mmd(phydev, MDIO_MMD_VEND1, regnum: 0x01F8, val: 0x89c7); |
1633 | phy_write_mmd(phydev, MDIO_MMD_VEND1, regnum: 0x01F9, val: 0x0893); |
1634 | } |
1635 | |
1636 | phy_write_mmd(phydev, MDIO_MMD_VEND1, regnum: 0x0476, val: 0x58a0); |
1637 | |
1638 | phy_write_mmd(phydev, MDIO_MMD_PMAPMD, regnum: 0x8921, val: 0xa3a); |
1639 | phy_write_mmd(phydev, MDIO_MMD_PMAPMD, regnum: 0x89F1, val: 0x16c1); |
1640 | |
1641 | phy_write_mmd(phydev, MDIO_MMD_VEND1, regnum: 0x01F8, val: 0x0); |
1642 | phy_write_mmd(phydev, MDIO_MMD_VEND1, regnum: 0x01F9, val: 0x0); |
1643 | |
1644 | if (sgmii_ability) { |
1645 | /* TJA1120B/TJA1121B SGMII PCS restart errata workaround. |
1646 | * Put SGMII PCS into power down mode and back up. |
1647 | */ |
1648 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
1649 | VEND1_SGMII_BASIC_CONTROL, |
1650 | SGMII_LPM); |
1651 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
1652 | VEND1_SGMII_BASIC_CONTROL, |
1653 | SGMII_LPM); |
1654 | } |
1655 | } |
1656 | } |
1657 | |
1658 | static int nxp_c45_config_init(struct phy_device *phydev) |
1659 | { |
1660 | int ret; |
1661 | |
1662 | ret = nxp_c45_config_enable(phydev); |
1663 | if (ret) { |
1664 | phydev_err(phydev, "Failed to enable config\n"); |
1665 | return ret; |
1666 | } |
1667 | |
1668 | /* Bug workaround for SJA1110 rev B: enable write access |
1669 | * to MDIO_MMD_PMAPMD |
1670 | */ |
1671 | phy_write_mmd(phydev, MDIO_MMD_VEND1, regnum: 0x01F8, val: 1); |
1672 | phy_write_mmd(phydev, MDIO_MMD_VEND1, regnum: 0x01F9, val: 2); |
1673 | |
1674 | if (phy_id_compare(id1: phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4))) |
1675 | nxp_c45_tja1120_errata(phydev); |
1676 | |
1677 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG, |
1678 | PHY_CONFIG_AUTO); |
1679 | |
1680 | ret = nxp_c45_set_phy_mode(phydev); |
1681 | if (ret) |
1682 | return ret; |
1683 | |
1684 | phydev->autoneg = AUTONEG_DISABLE; |
1685 | |
1686 | nxp_c45_counters_enable(phydev); |
1687 | nxp_c45_ptp_init(phydev); |
1688 | ret = nxp_c45_macsec_config_init(phydev); |
1689 | if (ret) |
1690 | return ret; |
1691 | |
1692 | return nxp_c45_start_op(phydev); |
1693 | } |
1694 | |
1695 | static int nxp_c45_get_features(struct phy_device *phydev) |
1696 | { |
1697 | linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported); |
1698 | linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported); |
1699 | |
1700 | return genphy_c45_pma_read_abilities(phydev); |
1701 | } |
1702 | |
1703 | static int nxp_c45_parse_dt(struct phy_device *phydev) |
1704 | { |
1705 | struct device_node *node = phydev->mdio.dev.of_node; |
1706 | struct nxp_c45_phy *priv = phydev->priv; |
1707 | |
1708 | if (!IS_ENABLED(CONFIG_OF_MDIO)) |
1709 | return 0; |
1710 | |
1711 | if (of_property_read_bool(np: node, propname: "nxp,rmii-refclk-out")) |
1712 | priv->flags |= TJA11XX_REVERSE_MODE; |
1713 | |
1714 | return 0; |
1715 | } |
1716 | |
1717 | static int nxp_c45_probe(struct phy_device *phydev) |
1718 | { |
1719 | struct nxp_c45_phy *priv; |
1720 | bool macsec_ability; |
1721 | int phy_abilities; |
1722 | bool ptp_ability; |
1723 | int ret = 0; |
1724 | |
1725 | priv = devm_kzalloc(dev: &phydev->mdio.dev, size: sizeof(*priv), GFP_KERNEL); |
1726 | if (!priv) |
1727 | return -ENOMEM; |
1728 | |
1729 | skb_queue_head_init(list: &priv->tx_queue); |
1730 | skb_queue_head_init(list: &priv->rx_queue); |
1731 | |
1732 | priv->phydev = phydev; |
1733 | |
1734 | phydev->priv = priv; |
1735 | |
1736 | nxp_c45_parse_dt(phydev); |
1737 | |
1738 | mutex_init(&priv->ptp_lock); |
1739 | |
1740 | phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1, |
1741 | VEND1_PORT_ABILITIES); |
1742 | ptp_ability = !!(phy_abilities & PTP_ABILITY); |
1743 | if (!ptp_ability) { |
1744 | phydev_dbg(phydev, "the phy does not support PTP"); |
1745 | goto no_ptp_support; |
1746 | } |
1747 | |
1748 | if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) && |
1749 | IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) { |
1750 | priv->mii_ts.rxtstamp = nxp_c45_rxtstamp; |
1751 | priv->mii_ts.txtstamp = nxp_c45_txtstamp; |
1752 | priv->mii_ts.hwtstamp = nxp_c45_hwtstamp; |
1753 | priv->mii_ts.ts_info = nxp_c45_ts_info; |
1754 | phydev->mii_ts = &priv->mii_ts; |
1755 | ret = nxp_c45_init_ptp_clock(priv); |
1756 | |
1757 | /* Timestamp selected by default to keep legacy API */ |
1758 | phydev->default_timestamp = true; |
1759 | } else { |
1760 | phydev_dbg(phydev, "PTP support not enabled even if the phy supports it"); |
1761 | } |
1762 | |
1763 | no_ptp_support: |
1764 | macsec_ability = !!(phy_abilities & MACSEC_ABILITY); |
1765 | if (!macsec_ability) { |
1766 | phydev_info(phydev, "the phy does not support MACsec\n"); |
1767 | goto no_macsec_support; |
1768 | } |
1769 | |
1770 | if (IS_ENABLED(CONFIG_MACSEC)) { |
1771 | ret = nxp_c45_macsec_probe(phydev); |
1772 | phydev_dbg(phydev, "MACsec support enabled."); |
1773 | } else { |
1774 | phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it"); |
1775 | } |
1776 | |
1777 | no_macsec_support: |
1778 | |
1779 | return ret; |
1780 | } |
1781 | |
1782 | static void nxp_c45_remove(struct phy_device *phydev) |
1783 | { |
1784 | struct nxp_c45_phy *priv = phydev->priv; |
1785 | |
1786 | if (priv->ptp_clock) |
1787 | ptp_clock_unregister(ptp: priv->ptp_clock); |
1788 | |
1789 | skb_queue_purge(list: &priv->tx_queue); |
1790 | skb_queue_purge(list: &priv->rx_queue); |
1791 | nxp_c45_macsec_remove(phydev); |
1792 | } |
1793 | |
1794 | static void tja1103_counters_enable(struct phy_device *phydev) |
1795 | { |
1796 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT, |
1797 | COUNTER_EN); |
1798 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT, |
1799 | COUNTER_EN); |
1800 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH, |
1801 | COUNTER_EN); |
1802 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH, |
1803 | COUNTER_EN); |
1804 | } |
1805 | |
1806 | static void tja1103_ptp_init(struct phy_device *phydev) |
1807 | { |
1808 | phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL, |
1809 | TJA1103_RX_TS_INSRT_MODE2); |
1810 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES, |
1811 | PTP_ENABLE); |
1812 | } |
1813 | |
1814 | static void tja1103_ptp_enable(struct phy_device *phydev, bool enable) |
1815 | { |
1816 | if (enable) |
1817 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
1818 | VEND1_PORT_PTP_CONTROL, |
1819 | PORT_PTP_CONTROL_BYPASS); |
1820 | else |
1821 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
1822 | VEND1_PORT_PTP_CONTROL, |
1823 | PORT_PTP_CONTROL_BYPASS); |
1824 | } |
1825 | |
1826 | static void tja1103_nmi_handler(struct phy_device *phydev, |
1827 | irqreturn_t *irq_status) |
1828 | { |
1829 | int ret; |
1830 | |
1831 | ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, |
1832 | VEND1_ALWAYS_ACCESSIBLE); |
1833 | if (ret & FUSA_PASS) { |
1834 | phy_write_mmd(phydev, MDIO_MMD_VEND1, |
1835 | VEND1_ALWAYS_ACCESSIBLE, |
1836 | FUSA_PASS); |
1837 | *irq_status = IRQ_HANDLED; |
1838 | } |
1839 | } |
1840 | |
1841 | static const struct nxp_c45_regmap tja1103_regmap = { |
1842 | .vend1_ptp_clk_period = 0x1104, |
1843 | .vend1_event_msg_filt = 0x1148, |
1844 | .pps_enable = |
1845 | NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1), |
1846 | .pps_polarity = |
1847 | NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1), |
1848 | .ltc_lock_ctrl = |
1849 | NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1), |
1850 | .ltc_read = |
1851 | NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1), |
1852 | .ltc_write = |
1853 | NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1), |
1854 | .vend1_ltc_wr_nsec_0 = 0x1106, |
1855 | .vend1_ltc_wr_nsec_1 = 0x1107, |
1856 | .vend1_ltc_wr_sec_0 = 0x1108, |
1857 | .vend1_ltc_wr_sec_1 = 0x1109, |
1858 | .vend1_ltc_rd_nsec_0 = 0x110A, |
1859 | .vend1_ltc_rd_nsec_1 = 0x110B, |
1860 | .vend1_ltc_rd_sec_0 = 0x110C, |
1861 | .vend1_ltc_rd_sec_1 = 0x110D, |
1862 | .vend1_rate_adj_subns_0 = 0x110F, |
1863 | .vend1_rate_adj_subns_1 = 0x1110, |
1864 | .irq_egr_ts_en = |
1865 | NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1), |
1866 | .irq_egr_ts_status = |
1867 | NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1), |
1868 | .domain_number = |
1869 | NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8), |
1870 | .msg_type = |
1871 | NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4), |
1872 | .sequence_id = |
1873 | NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16), |
1874 | .sec_1_0 = |
1875 | NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2), |
1876 | .sec_4_2 = |
1877 | NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3), |
1878 | .nsec_15_0 = |
1879 | NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16), |
1880 | .nsec_29_16 = |
1881 | NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14), |
1882 | .vend1_ext_trg_data_0 = 0x1121, |
1883 | .vend1_ext_trg_data_1 = 0x1122, |
1884 | .vend1_ext_trg_data_2 = 0x1123, |
1885 | .vend1_ext_trg_data_3 = 0x1124, |
1886 | .vend1_ext_trg_ctrl = 0x1126, |
1887 | .cable_test = 0x8330, |
1888 | .cable_test_valid = |
1889 | NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1), |
1890 | .cable_test_result = |
1891 | NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3), |
1892 | }; |
1893 | |
1894 | static const struct nxp_c45_phy_data tja1103_phy_data = { |
1895 | .regmap = &tja1103_regmap, |
1896 | .stats = tja1103_hw_stats, |
1897 | .n_stats = ARRAY_SIZE(tja1103_hw_stats), |
1898 | .ptp_clk_period = PTP_CLK_PERIOD_100BT1, |
1899 | .ext_ts_both_edges = false, |
1900 | .ack_ptp_irq = false, |
1901 | .counters_enable = tja1103_counters_enable, |
1902 | .get_egressts = nxp_c45_get_hwtxts, |
1903 | .get_extts = nxp_c45_get_extts, |
1904 | .ptp_init = tja1103_ptp_init, |
1905 | .ptp_enable = tja1103_ptp_enable, |
1906 | .nmi_handler = tja1103_nmi_handler, |
1907 | }; |
1908 | |
1909 | static void tja1120_counters_enable(struct phy_device *phydev) |
1910 | { |
1911 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD, |
1912 | EXTENDED_CNT_EN); |
1913 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS, |
1914 | MONITOR_RESET); |
1915 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG, |
1916 | ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN); |
1917 | } |
1918 | |
1919 | static void tja1120_ptp_init(struct phy_device *phydev) |
1920 | { |
1921 | phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL, |
1922 | TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE); |
1923 | phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE, |
1924 | TJA1120_TS_INSRT_MODE); |
1925 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG, |
1926 | PTP_ENABLE); |
1927 | } |
1928 | |
1929 | static void tja1120_ptp_enable(struct phy_device *phydev, bool enable) |
1930 | { |
1931 | if (enable) |
1932 | phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, |
1933 | VEND1_PORT_FUNC_ENABLES, |
1934 | PTP_ENABLE); |
1935 | else |
1936 | phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, |
1937 | VEND1_PORT_FUNC_ENABLES, |
1938 | PTP_ENABLE); |
1939 | } |
1940 | |
1941 | static void tja1120_nmi_handler(struct phy_device *phydev, |
1942 | irqreturn_t *irq_status) |
1943 | { |
1944 | int ret; |
1945 | |
1946 | ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, |
1947 | TJA1120_GLOBAL_INFRA_IRQ_STATUS); |
1948 | if (ret & TJA1120_DEV_BOOT_DONE) { |
1949 | phy_write_mmd(phydev, MDIO_MMD_VEND1, |
1950 | TJA1120_GLOBAL_INFRA_IRQ_ACK, |
1951 | TJA1120_DEV_BOOT_DONE); |
1952 | *irq_status = IRQ_HANDLED; |
1953 | } |
1954 | } |
1955 | |
1956 | static int nxp_c45_macsec_ability(struct phy_device *phydev) |
1957 | { |
1958 | bool macsec_ability; |
1959 | int phy_abilities; |
1960 | |
1961 | phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1, |
1962 | VEND1_PORT_ABILITIES); |
1963 | macsec_ability = !!(phy_abilities & MACSEC_ABILITY); |
1964 | |
1965 | return macsec_ability; |
1966 | } |
1967 | |
1968 | static int tja11xx_no_macsec_match_phy_device(struct phy_device *phydev, |
1969 | const struct phy_driver *phydrv) |
1970 | { |
1971 | if (!phy_id_compare(id1: phydev->phy_id, id2: phydrv->phy_id, |
1972 | mask: phydrv->phy_id_mask)) |
1973 | return 0; |
1974 | |
1975 | return !nxp_c45_macsec_ability(phydev); |
1976 | } |
1977 | |
1978 | static int tja11xx_macsec_match_phy_device(struct phy_device *phydev, |
1979 | const struct phy_driver *phydrv) |
1980 | { |
1981 | if (!phy_id_compare(id1: phydev->phy_id, id2: phydrv->phy_id, |
1982 | mask: phydrv->phy_id_mask)) |
1983 | return 0; |
1984 | |
1985 | return nxp_c45_macsec_ability(phydev); |
1986 | } |
1987 | |
1988 | static const struct nxp_c45_regmap tja1120_regmap = { |
1989 | .vend1_ptp_clk_period = 0x1020, |
1990 | .vend1_event_msg_filt = 0x9010, |
1991 | .pps_enable = |
1992 | NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1), |
1993 | .pps_polarity = |
1994 | NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1), |
1995 | .ltc_lock_ctrl = |
1996 | NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1), |
1997 | .ltc_read = |
1998 | NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1), |
1999 | .ltc_write = |
2000 | NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1), |
2001 | .vend1_ltc_wr_nsec_0 = 0x1040, |
2002 | .vend1_ltc_wr_nsec_1 = 0x1041, |
2003 | .vend1_ltc_wr_sec_0 = 0x1042, |
2004 | .vend1_ltc_wr_sec_1 = 0x1043, |
2005 | .vend1_ltc_rd_nsec_0 = 0x1048, |
2006 | .vend1_ltc_rd_nsec_1 = 0x1049, |
2007 | .vend1_ltc_rd_sec_0 = 0x104A, |
2008 | .vend1_ltc_rd_sec_1 = 0x104B, |
2009 | .vend1_rate_adj_subns_0 = 0x1030, |
2010 | .vend1_rate_adj_subns_1 = 0x1031, |
2011 | .irq_egr_ts_en = |
2012 | NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1), |
2013 | .irq_egr_ts_status = |
2014 | NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1), |
2015 | .domain_number = |
2016 | NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8), |
2017 | .msg_type = |
2018 | NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4), |
2019 | .sequence_id = |
2020 | NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16), |
2021 | .sec_1_0 = |
2022 | NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2), |
2023 | .sec_4_2 = |
2024 | NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3), |
2025 | .nsec_15_0 = |
2026 | NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16), |
2027 | .nsec_29_16 = |
2028 | NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14), |
2029 | .vend1_ext_trg_data_0 = 0x1071, |
2030 | .vend1_ext_trg_data_1 = 0x1072, |
2031 | .vend1_ext_trg_data_2 = 0x1073, |
2032 | .vend1_ext_trg_data_3 = 0x1074, |
2033 | .vend1_ext_trg_ctrl = 0x1075, |
2034 | .cable_test = 0x8360, |
2035 | .cable_test_valid = |
2036 | NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1), |
2037 | .cable_test_result = |
2038 | NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3), |
2039 | }; |
2040 | |
2041 | static const struct nxp_c45_phy_data tja1120_phy_data = { |
2042 | .regmap = &tja1120_regmap, |
2043 | .stats = tja1120_hw_stats, |
2044 | .n_stats = ARRAY_SIZE(tja1120_hw_stats), |
2045 | .ptp_clk_period = PTP_CLK_PERIOD_1000BT1, |
2046 | .ext_ts_both_edges = true, |
2047 | .ack_ptp_irq = true, |
2048 | .counters_enable = tja1120_counters_enable, |
2049 | .get_egressts = tja1120_get_hwtxts, |
2050 | .get_extts = tja1120_get_extts, |
2051 | .ptp_init = tja1120_ptp_init, |
2052 | .ptp_enable = tja1120_ptp_enable, |
2053 | .nmi_handler = tja1120_nmi_handler, |
2054 | }; |
2055 | |
2056 | static struct phy_driver nxp_c45_driver[] = { |
2057 | { |
2058 | PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103), |
2059 | .name = "NXP C45 TJA1103", |
2060 | .get_features = nxp_c45_get_features, |
2061 | .driver_data = &tja1103_phy_data, |
2062 | .probe = nxp_c45_probe, |
2063 | .soft_reset = nxp_c45_soft_reset, |
2064 | .config_aneg = genphy_c45_config_aneg, |
2065 | .config_init = nxp_c45_config_init, |
2066 | .config_intr = tja1103_config_intr, |
2067 | .handle_interrupt = nxp_c45_handle_interrupt, |
2068 | .read_status = genphy_c45_read_status, |
2069 | .suspend = genphy_c45_pma_suspend, |
2070 | .resume = genphy_c45_pma_resume, |
2071 | .get_sset_count = nxp_c45_get_sset_count, |
2072 | .get_strings = nxp_c45_get_strings, |
2073 | .get_stats = nxp_c45_get_stats, |
2074 | .cable_test_start = nxp_c45_cable_test_start, |
2075 | .cable_test_get_status = nxp_c45_cable_test_get_status, |
2076 | .set_loopback = genphy_c45_loopback, |
2077 | .get_sqi = nxp_c45_get_sqi, |
2078 | .get_sqi_max = nxp_c45_get_sqi_max, |
2079 | .remove = nxp_c45_remove, |
2080 | .match_phy_device = tja11xx_no_macsec_match_phy_device, |
2081 | }, |
2082 | { |
2083 | PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103), |
2084 | .name = "NXP C45 TJA1104", |
2085 | .get_features = nxp_c45_get_features, |
2086 | .driver_data = &tja1103_phy_data, |
2087 | .probe = nxp_c45_probe, |
2088 | .soft_reset = nxp_c45_soft_reset, |
2089 | .config_aneg = genphy_c45_config_aneg, |
2090 | .config_init = nxp_c45_config_init, |
2091 | .config_intr = tja1103_config_intr, |
2092 | .handle_interrupt = nxp_c45_handle_interrupt, |
2093 | .read_status = genphy_c45_read_status, |
2094 | .suspend = genphy_c45_pma_suspend, |
2095 | .resume = genphy_c45_pma_resume, |
2096 | .get_sset_count = nxp_c45_get_sset_count, |
2097 | .get_strings = nxp_c45_get_strings, |
2098 | .get_stats = nxp_c45_get_stats, |
2099 | .cable_test_start = nxp_c45_cable_test_start, |
2100 | .cable_test_get_status = nxp_c45_cable_test_get_status, |
2101 | .set_loopback = genphy_c45_loopback, |
2102 | .get_sqi = nxp_c45_get_sqi, |
2103 | .get_sqi_max = nxp_c45_get_sqi_max, |
2104 | .remove = nxp_c45_remove, |
2105 | .match_phy_device = tja11xx_macsec_match_phy_device, |
2106 | }, |
2107 | { |
2108 | PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120), |
2109 | .name = "NXP C45 TJA1120", |
2110 | .get_features = nxp_c45_get_features, |
2111 | .driver_data = &tja1120_phy_data, |
2112 | .probe = nxp_c45_probe, |
2113 | .soft_reset = nxp_c45_soft_reset, |
2114 | .config_aneg = genphy_c45_config_aneg, |
2115 | .config_init = nxp_c45_config_init, |
2116 | .config_intr = tja1120_config_intr, |
2117 | .handle_interrupt = nxp_c45_handle_interrupt, |
2118 | .read_status = genphy_c45_read_status, |
2119 | .link_change_notify = tja1120_link_change_notify, |
2120 | .suspend = genphy_c45_pma_suspend, |
2121 | .resume = genphy_c45_pma_resume, |
2122 | .get_sset_count = nxp_c45_get_sset_count, |
2123 | .get_strings = nxp_c45_get_strings, |
2124 | .get_stats = nxp_c45_get_stats, |
2125 | .cable_test_start = nxp_c45_cable_test_start, |
2126 | .cable_test_get_status = nxp_c45_cable_test_get_status, |
2127 | .set_loopback = genphy_c45_loopback, |
2128 | .get_sqi = nxp_c45_get_sqi, |
2129 | .get_sqi_max = nxp_c45_get_sqi_max, |
2130 | .remove = nxp_c45_remove, |
2131 | .match_phy_device = tja11xx_no_macsec_match_phy_device, |
2132 | }, |
2133 | { |
2134 | PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120), |
2135 | .name = "NXP C45 TJA1121", |
2136 | .get_features = nxp_c45_get_features, |
2137 | .driver_data = &tja1120_phy_data, |
2138 | .probe = nxp_c45_probe, |
2139 | .soft_reset = nxp_c45_soft_reset, |
2140 | .config_aneg = genphy_c45_config_aneg, |
2141 | .config_init = nxp_c45_config_init, |
2142 | .config_intr = tja1120_config_intr, |
2143 | .handle_interrupt = nxp_c45_handle_interrupt, |
2144 | .read_status = genphy_c45_read_status, |
2145 | .link_change_notify = tja1120_link_change_notify, |
2146 | .suspend = genphy_c45_pma_suspend, |
2147 | .resume = genphy_c45_pma_resume, |
2148 | .get_sset_count = nxp_c45_get_sset_count, |
2149 | .get_strings = nxp_c45_get_strings, |
2150 | .get_stats = nxp_c45_get_stats, |
2151 | .cable_test_start = nxp_c45_cable_test_start, |
2152 | .cable_test_get_status = nxp_c45_cable_test_get_status, |
2153 | .set_loopback = genphy_c45_loopback, |
2154 | .get_sqi = nxp_c45_get_sqi, |
2155 | .get_sqi_max = nxp_c45_get_sqi_max, |
2156 | .remove = nxp_c45_remove, |
2157 | .match_phy_device = tja11xx_macsec_match_phy_device, |
2158 | }, |
2159 | }; |
2160 | |
2161 | module_phy_driver(nxp_c45_driver); |
2162 | |
2163 | static const struct mdio_device_id __maybe_unused nxp_c45_tbl[] = { |
2164 | { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) }, |
2165 | { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) }, |
2166 | { /*sentinel*/ }, |
2167 | }; |
2168 | |
2169 | MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl); |
2170 | |
2171 | MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>"); |
2172 | MODULE_DESCRIPTION("NXP C45 PHY driver"); |
2173 | MODULE_LICENSE("GPL v2"); |
2174 |
Definitions
- nxp_c45_skb_cb
- nxp_c45_reg_field
- nxp_c45_hwts
- nxp_c45_regmap
- nxp_c45_phy_stats
- nxp_c45_phy_data
- nxp_c45_get_data
- nxp_c45_get_regmap
- nxp_c45_read_reg_field
- nxp_c45_write_reg_field
- nxp_c45_set_reg_field
- nxp_c45_clear_reg_field
- nxp_c45_poll_txts
- _nxp_c45_ptp_gettimex64
- nxp_c45_ptp_gettimex64
- _nxp_c45_ptp_settime64
- nxp_c45_ptp_settime64
- nxp_c45_ptp_adjfine
- nxp_c45_ptp_adjtime
- nxp_c45_reconstruct_ts
- nxp_c45_match_ts
- nxp_c45_get_extts
- tja1120_extts_is_valid
- tja1120_get_extts
- nxp_c45_read_egress_ts
- nxp_c45_get_hwtxts
- tja1120_egress_ts_is_valid
- tja1120_get_hwtxts
- nxp_c45_process_txts
- nxp_c45_do_aux_work
- nxp_c45_gpio_config
- nxp_c45_perout_enable
- nxp_c45_set_rising_or_falling
- nxp_c45_set_rising_and_falling
- nxp_c45_extts_enable
- nxp_c45_ptp_enable
- nxp_c45_ptp_pins
- nxp_c45_ptp_verify_pin
- nxp_c45_init_ptp_clock
- nxp_c45_txtstamp
- nxp_c45_rxtstamp
- nxp_c45_hwtstamp
- nxp_c45_ts_info
- common_hw_stats
- tja1103_hw_stats
- tja1120_hw_stats
- nxp_c45_get_sset_count
- nxp_c45_get_strings
- nxp_c45_get_stats
- nxp_c45_config_enable
- nxp_c45_start_op
- nxp_c45_config_intr
- tja1103_config_intr
- tja1120_config_intr
- nxp_c45_handle_interrupt
- nxp_c45_soft_reset
- nxp_c45_cable_test_start
- nxp_c45_cable_test_get_status
- nxp_c45_get_sqi
- tja1120_link_change_notify
- nxp_c45_get_sqi_max
- nxp_c45_check_delay
- nxp_c45_counters_enable
- nxp_c45_ptp_init
- nxp_c45_get_phase_shift
- nxp_c45_disable_delays
- nxp_c45_set_delays
- nxp_c45_get_delays
- nxp_c45_set_phy_mode
- nxp_c45_tja1120_errata
- nxp_c45_config_init
- nxp_c45_get_features
- nxp_c45_parse_dt
- nxp_c45_probe
- nxp_c45_remove
- tja1103_counters_enable
- tja1103_ptp_init
- tja1103_ptp_enable
- tja1103_nmi_handler
- tja1103_regmap
- tja1103_phy_data
- tja1120_counters_enable
- tja1120_ptp_init
- tja1120_ptp_enable
- tja1120_nmi_handler
- nxp_c45_macsec_ability
- tja11xx_no_macsec_match_phy_device
- tja11xx_macsec_match_phy_device
- tja1120_regmap
- tja1120_phy_data
- nxp_c45_driver
Improve your Profiling and Debugging skills
Find out more