1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2023, Intel Corporation |
4 | * stmmac EST(802.3 Qbv) handling |
5 | */ |
6 | #include <linux/iopoll.h> |
7 | #include <linux/types.h> |
8 | #include "stmmac.h" |
9 | #include "stmmac_est.h" |
10 | |
11 | static int est_write(void __iomem *est_addr, u32 reg, u32 val, bool gcl) |
12 | { |
13 | u32 ctrl; |
14 | |
15 | writel(val, addr: est_addr + EST_GCL_DATA); |
16 | |
17 | ctrl = (reg << EST_ADDR_SHIFT); |
18 | ctrl |= gcl ? 0 : EST_GCRR; |
19 | writel(val: ctrl, addr: est_addr + EST_GCL_CONTROL); |
20 | |
21 | ctrl |= EST_SRWO; |
22 | writel(val: ctrl, addr: est_addr + EST_GCL_CONTROL); |
23 | |
24 | return readl_poll_timeout(est_addr + EST_GCL_CONTROL, ctrl, |
25 | !(ctrl & EST_SRWO), 100, 5000); |
26 | } |
27 | |
28 | static int est_configure(struct stmmac_priv *priv, struct stmmac_est *cfg, |
29 | unsigned int ptp_rate) |
30 | { |
31 | void __iomem *est_addr = priv->estaddr; |
32 | int i, ret = 0; |
33 | u32 ctrl; |
34 | |
35 | ret |= est_write(est_addr, EST_BTR_LOW, val: cfg->btr[0], gcl: false); |
36 | ret |= est_write(est_addr, EST_BTR_HIGH, val: cfg->btr[1], gcl: false); |
37 | ret |= est_write(est_addr, EST_TER, val: cfg->ter, gcl: false); |
38 | ret |= est_write(est_addr, EST_LLR, val: cfg->gcl_size, gcl: false); |
39 | ret |= est_write(est_addr, EST_CTR_LOW, val: cfg->ctr[0], gcl: false); |
40 | ret |= est_write(est_addr, EST_CTR_HIGH, val: cfg->ctr[1], gcl: false); |
41 | if (ret) |
42 | return ret; |
43 | |
44 | for (i = 0; i < cfg->gcl_size; i++) { |
45 | ret = est_write(est_addr, reg: i, val: cfg->gcl[i], gcl: true); |
46 | if (ret) |
47 | return ret; |
48 | } |
49 | |
50 | ctrl = readl(addr: est_addr + EST_CONTROL); |
51 | if (priv->plat->has_xgmac) { |
52 | ctrl &= ~EST_XGMAC_PTOV; |
53 | ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_XGMAC_PTOV_MUL) << |
54 | EST_XGMAC_PTOV_SHIFT; |
55 | } else { |
56 | ctrl &= ~EST_GMAC5_PTOV; |
57 | ctrl |= ((NSEC_PER_SEC / ptp_rate) * EST_GMAC5_PTOV_MUL) << |
58 | EST_GMAC5_PTOV_SHIFT; |
59 | } |
60 | if (cfg->enable) |
61 | ctrl |= EST_EEST | EST_SSWL; |
62 | else |
63 | ctrl &= ~EST_EEST; |
64 | |
65 | writel(val: ctrl, addr: est_addr + EST_CONTROL); |
66 | |
67 | /* Configure EST interrupt */ |
68 | if (cfg->enable) |
69 | ctrl = EST_IECGCE | EST_IEHS | EST_IEHF | EST_IEBE | EST_IECC; |
70 | else |
71 | ctrl = 0; |
72 | |
73 | writel(val: ctrl, addr: est_addr + EST_INT_EN); |
74 | |
75 | return 0; |
76 | } |
77 | |
78 | static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev, |
79 | struct stmmac_extra_stats *x, u32 txqcnt) |
80 | { |
81 | u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max; |
82 | void __iomem *est_addr = priv->estaddr; |
83 | u32 txqcnt_mask = BIT(txqcnt) - 1; |
84 | int i; |
85 | |
86 | status = readl(addr: est_addr + EST_STATUS); |
87 | |
88 | value = EST_CGCE | EST_HLBS | EST_HLBF | EST_BTRE | EST_SWLC; |
89 | |
90 | /* Return if there is no error */ |
91 | if (!(status & value)) |
92 | return; |
93 | |
94 | if (status & EST_CGCE) { |
95 | /* Clear Interrupt */ |
96 | writel(EST_CGCE, addr: est_addr + EST_STATUS); |
97 | |
98 | x->mtl_est_cgce++; |
99 | } |
100 | |
101 | if (status & EST_HLBS) { |
102 | value = readl(addr: est_addr + EST_SCH_ERR); |
103 | value &= txqcnt_mask; |
104 | |
105 | x->mtl_est_hlbs++; |
106 | |
107 | /* Clear Interrupt */ |
108 | writel(val: value, addr: est_addr + EST_SCH_ERR); |
109 | |
110 | /* Collecting info to shows all the queues that has HLBS |
111 | * issue. The only way to clear this is to clear the |
112 | * statistic |
113 | */ |
114 | if (net_ratelimit()) |
115 | netdev_err(dev, format: "EST: HLB(sched) Queue 0x%x\n" , value); |
116 | } |
117 | |
118 | if (status & EST_HLBF) { |
119 | value = readl(addr: est_addr + EST_FRM_SZ_ERR); |
120 | feqn = value & txqcnt_mask; |
121 | |
122 | value = readl(addr: est_addr + EST_FRM_SZ_CAP); |
123 | hbfq = (value & EST_SZ_CAP_HBFQ_MASK(txqcnt)) >> |
124 | EST_SZ_CAP_HBFQ_SHIFT; |
125 | hbfs = value & EST_SZ_CAP_HBFS_MASK; |
126 | |
127 | x->mtl_est_hlbf++; |
128 | |
129 | for (i = 0; i < txqcnt; i++) { |
130 | if (feqn & BIT(i)) |
131 | x->mtl_est_txq_hlbf[i]++; |
132 | } |
133 | |
134 | /* Clear Interrupt */ |
135 | writel(val: feqn, addr: est_addr + EST_FRM_SZ_ERR); |
136 | |
137 | if (net_ratelimit()) |
138 | netdev_err(dev, format: "EST: HLB(size) Queue %u Size %u\n" , |
139 | hbfq, hbfs); |
140 | } |
141 | |
142 | if (status & EST_BTRE) { |
143 | if (priv->plat->has_xgmac) { |
144 | btrl = FIELD_GET(EST_XGMAC_BTRL, status); |
145 | btrl_max = FIELD_MAX(EST_XGMAC_BTRL); |
146 | } else { |
147 | btrl = FIELD_GET(EST_GMAC5_BTRL, status); |
148 | btrl_max = FIELD_MAX(EST_GMAC5_BTRL); |
149 | } |
150 | if (btrl == btrl_max) |
151 | x->mtl_est_btrlm++; |
152 | else |
153 | x->mtl_est_btre++; |
154 | |
155 | if (net_ratelimit()) |
156 | netdev_info(dev, format: "EST: BTR Error Loop Count %u\n" , |
157 | btrl); |
158 | |
159 | writel(EST_BTRE, addr: est_addr + EST_STATUS); |
160 | } |
161 | |
162 | if (status & EST_SWLC) { |
163 | writel(EST_SWLC, addr: est_addr + EST_STATUS); |
164 | netdev_info(dev, format: "EST: SWOL has been switched\n" ); |
165 | } |
166 | } |
167 | |
168 | const struct stmmac_est_ops dwmac510_est_ops = { |
169 | .configure = est_configure, |
170 | .irq_status = est_irq_status, |
171 | }; |
172 | |