1 | /* |
2 | * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. |
3 | * |
4 | * Copyright (C) 2012 Marvell |
5 | * |
6 | * Rami Rosen <rosenr@marvell.com> |
7 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> |
8 | * |
9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any |
11 | * warranty of any kind, whether express or implied. |
12 | */ |
13 | |
14 | #include <linux/clk.h> |
15 | #include <linux/cpu.h> |
16 | #include <linux/etherdevice.h> |
17 | #include <linux/if_vlan.h> |
18 | #include <linux/inetdevice.h> |
19 | #include <linux/interrupt.h> |
20 | #include <linux/io.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/mbus.h> |
23 | #include <linux/module.h> |
24 | #include <linux/netdevice.h> |
25 | #include <linux/of.h> |
26 | #include <linux/of_address.h> |
27 | #include <linux/of_irq.h> |
28 | #include <linux/of_mdio.h> |
29 | #include <linux/of_net.h> |
30 | #include <linux/phy/phy.h> |
31 | #include <linux/phy.h> |
32 | #include <linux/phylink.h> |
33 | #include <linux/platform_device.h> |
34 | #include <linux/skbuff.h> |
35 | #include <net/hwbm.h> |
36 | #include "mvneta_bm.h" |
37 | #include <net/ip.h> |
38 | #include <net/ipv6.h> |
39 | #include <net/tso.h> |
40 | #include <net/page_pool/helpers.h> |
41 | #include <net/pkt_sched.h> |
42 | #include <linux/bpf_trace.h> |
43 | |
44 | /* Registers */ |
45 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) |
46 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) |
47 | #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 |
48 | #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 |
49 | #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 |
50 | #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 |
51 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) |
52 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) |
53 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) |
54 | #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) |
55 | #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) |
56 | #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) |
57 | #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 |
58 | #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) |
59 | #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) |
60 | #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff |
61 | #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) |
62 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 |
63 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 |
64 | #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) |
65 | #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 |
66 | #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 |
67 | #define MVNETA_PORT_RX_RESET 0x1cc0 |
68 | #define MVNETA_PORT_RX_DMA_RESET BIT(0) |
69 | #define MVNETA_PHY_ADDR 0x2000 |
70 | #define MVNETA_PHY_ADDR_MASK 0x1f |
71 | #define MVNETA_MBUS_RETRY 0x2010 |
72 | #define MVNETA_UNIT_INTR_CAUSE 0x2080 |
73 | #define MVNETA_UNIT_CONTROL 0x20B0 |
74 | #define MVNETA_PHY_POLLING_ENABLE BIT(1) |
75 | #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) |
76 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) |
77 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) |
78 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 |
79 | #define MVNETA_AC5_CNM_DDR_TARGET 0x2 |
80 | #define MVNETA_AC5_CNM_DDR_ATTR 0xb |
81 | #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 |
82 | #define MVNETA_PORT_CONFIG 0x2400 |
83 | #define MVNETA_UNI_PROMISC_MODE BIT(0) |
84 | #define MVNETA_DEF_RXQ(q) ((q) << 1) |
85 | #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) |
86 | #define MVNETA_TX_UNSET_ERR_SUM BIT(12) |
87 | #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) |
88 | #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) |
89 | #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) |
90 | #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) |
91 | #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ |
92 | MVNETA_DEF_RXQ_ARP(q) | \ |
93 | MVNETA_DEF_RXQ_TCP(q) | \ |
94 | MVNETA_DEF_RXQ_UDP(q) | \ |
95 | MVNETA_DEF_RXQ_BPDU(q) | \ |
96 | MVNETA_TX_UNSET_ERR_SUM | \ |
97 | MVNETA_RX_CSUM_WITH_PSEUDO_HDR) |
98 | #define MVNETA_PORT_CONFIG_EXTEND 0x2404 |
99 | #define MVNETA_MAC_ADDR_LOW 0x2414 |
100 | #define MVNETA_MAC_ADDR_HIGH 0x2418 |
101 | #define MVNETA_SDMA_CONFIG 0x241c |
102 | #define MVNETA_SDMA_BRST_SIZE_16 4 |
103 | #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) |
104 | #define MVNETA_RX_NO_DATA_SWAP BIT(4) |
105 | #define MVNETA_TX_NO_DATA_SWAP BIT(5) |
106 | #define MVNETA_DESC_SWAP BIT(6) |
107 | #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) |
108 | #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 |
109 | #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) |
110 | #define MVNETA_PORT_STATUS 0x2444 |
111 | #define MVNETA_TX_IN_PRGRS BIT(0) |
112 | #define MVNETA_TX_FIFO_EMPTY BIT(8) |
113 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c |
114 | /* Only exists on Armada XP and Armada 370 */ |
115 | #define MVNETA_SERDES_CFG 0x24A0 |
116 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 |
117 | #define MVNETA_QSGMII_SERDES_PROTO 0x0667 |
118 | #define MVNETA_HSGMII_SERDES_PROTO 0x1107 |
119 | #define MVNETA_TYPE_PRIO 0x24bc |
120 | #define MVNETA_FORCE_UNI BIT(21) |
121 | #define MVNETA_TXQ_CMD_1 0x24e4 |
122 | #define MVNETA_TXQ_CMD 0x2448 |
123 | #define MVNETA_TXQ_DISABLE_SHIFT 8 |
124 | #define MVNETA_TXQ_ENABLE_MASK 0x000000ff |
125 | #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 |
126 | #define MVNETA_OVERRUN_FRAME_COUNT 0x2488 |
127 | #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 |
128 | #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) |
129 | #define MVNETA_ACC_MODE 0x2500 |
130 | #define MVNETA_BM_ADDRESS 0x2504 |
131 | #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) |
132 | #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff |
133 | #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 |
134 | #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) |
135 | #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) |
136 | #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) |
137 | |
138 | /* Exception Interrupt Port/Queue Cause register |
139 | * |
140 | * Their behavior depend of the mapping done using the PCPX2Q |
141 | * registers. For a given CPU if the bit associated to a queue is not |
142 | * set, then for the register a read from this CPU will always return |
143 | * 0 and a write won't do anything |
144 | */ |
145 | |
146 | #define MVNETA_INTR_NEW_CAUSE 0x25a0 |
147 | #define MVNETA_INTR_NEW_MASK 0x25a4 |
148 | |
149 | /* bits 0..7 = TXQ SENT, one bit per queue. |
150 | * bits 8..15 = RXQ OCCUP, one bit per queue. |
151 | * bits 16..23 = RXQ FREE, one bit per queue. |
152 | * bit 29 = OLD_REG_SUM, see old reg ? |
153 | * bit 30 = TX_ERR_SUM, one bit for 4 ports |
154 | * bit 31 = MISC_SUM, one bit for 4 ports |
155 | */ |
156 | #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) |
157 | #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) |
158 | #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) |
159 | #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) |
160 | #define MVNETA_MISCINTR_INTR_MASK BIT(31) |
161 | |
162 | #define MVNETA_INTR_OLD_CAUSE 0x25a8 |
163 | #define MVNETA_INTR_OLD_MASK 0x25ac |
164 | |
165 | /* Data Path Port/Queue Cause Register */ |
166 | #define MVNETA_INTR_MISC_CAUSE 0x25b0 |
167 | #define MVNETA_INTR_MISC_MASK 0x25b4 |
168 | |
169 | #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0) |
170 | #define MVNETA_CAUSE_LINK_CHANGE BIT(1) |
171 | #define MVNETA_CAUSE_PTP BIT(4) |
172 | |
173 | #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7) |
174 | #define MVNETA_CAUSE_RX_OVERRUN BIT(8) |
175 | #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9) |
176 | #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10) |
177 | #define MVNETA_CAUSE_TX_UNDERUN BIT(11) |
178 | #define MVNETA_CAUSE_PRBS_ERR BIT(12) |
179 | #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13) |
180 | #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14) |
181 | |
182 | #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16 |
183 | #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT) |
184 | #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool))) |
185 | |
186 | #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24 |
187 | #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT) |
188 | #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) |
189 | |
190 | #define MVNETA_INTR_ENABLE 0x25b8 |
191 | #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 |
192 | #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff |
193 | |
194 | #define MVNETA_RXQ_CMD 0x2680 |
195 | #define MVNETA_RXQ_DISABLE_SHIFT 8 |
196 | #define MVNETA_RXQ_ENABLE_MASK 0x000000ff |
197 | #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) |
198 | #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) |
199 | #define MVNETA_GMAC_CTRL_0 0x2c00 |
200 | #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 |
201 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc |
202 | #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1) |
203 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) |
204 | #define MVNETA_GMAC_CTRL_2 0x2c08 |
205 | #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) |
206 | #define MVNETA_GMAC2_PCS_ENABLE BIT(3) |
207 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) |
208 | #define MVNETA_GMAC2_PORT_RESET BIT(6) |
209 | #define MVNETA_GMAC_STATUS 0x2c10 |
210 | #define MVNETA_GMAC_LINK_UP BIT(0) |
211 | #define MVNETA_GMAC_SPEED_1000 BIT(1) |
212 | #define MVNETA_GMAC_SPEED_100 BIT(2) |
213 | #define MVNETA_GMAC_FULL_DUPLEX BIT(3) |
214 | #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) |
215 | #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) |
216 | #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) |
217 | #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) |
218 | #define MVNETA_GMAC_AN_COMPLETE BIT(11) |
219 | #define MVNETA_GMAC_SYNC_OK BIT(14) |
220 | #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c |
221 | #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) |
222 | #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) |
223 | #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) |
224 | #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3) |
225 | #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4) |
226 | #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) |
227 | #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) |
228 | #define MVNETA_GMAC_AN_SPEED_EN BIT(7) |
229 | #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8) |
230 | #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9) |
231 | #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) |
232 | #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) |
233 | #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) |
234 | #define MVNETA_GMAC_CTRL_4 0x2c90 |
235 | #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1) |
236 | #define MVNETA_MIB_COUNTERS_BASE 0x3000 |
237 | #define MVNETA_MIB_LATE_COLLISION 0x7c |
238 | #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 |
239 | #define MVNETA_DA_FILT_OTH_MCAST 0x3500 |
240 | #define MVNETA_DA_FILT_UCAST_BASE 0x3600 |
241 | #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) |
242 | #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) |
243 | #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 |
244 | #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) |
245 | #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) |
246 | #define MVNETA_TXQ_DEC_SENT_SHIFT 16 |
247 | #define MVNETA_TXQ_DEC_SENT_MASK 0xff |
248 | #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) |
249 | #define MVNETA_TXQ_SENT_DESC_SHIFT 16 |
250 | #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 |
251 | #define MVNETA_PORT_TX_RESET 0x3cf0 |
252 | #define MVNETA_PORT_TX_DMA_RESET BIT(0) |
253 | #define MVNETA_TXQ_CMD1_REG 0x3e00 |
254 | #define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3) |
255 | #define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0) |
256 | #define MVNETA_REFILL_NUM_CLK_REG 0x3e08 |
257 | #define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff |
258 | #define MVNETA_TX_MTU 0x3e0c |
259 | #define MVNETA_TX_TOKEN_SIZE 0x3e14 |
260 | #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff |
261 | #define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2)) |
262 | #define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000 |
263 | #define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20 |
264 | #define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff |
265 | #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) |
266 | #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff |
267 | |
268 | /* The values of the bucket refill base period and refill period are taken from |
269 | * the reference manual, and adds up to a base resolution of 10Kbps. This allows |
270 | * to cover all rate-limit values from 10Kbps up to 5Gbps |
271 | */ |
272 | |
273 | /* Base period for the rate limit algorithm */ |
274 | #define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100 |
275 | |
276 | /* Number of Base Period to wait between each bucket refill */ |
277 | #define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000 |
278 | |
279 | /* The base resolution for rate limiting, in bps. Any max_rate value should be |
280 | * a multiple of that value. |
281 | */ |
282 | #define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \ |
283 | (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \ |
284 | MVNETA_TXQ_BUCKET_REFILL_PERIOD)) |
285 | |
286 | #define MVNETA_LPI_CTRL_0 0x2cc0 |
287 | #define MVNETA_LPI_CTRL_1 0x2cc4 |
288 | #define MVNETA_LPI_REQUEST_ENABLE BIT(0) |
289 | #define MVNETA_LPI_CTRL_2 0x2cc8 |
290 | #define MVNETA_LPI_STATUS 0x2ccc |
291 | |
292 | #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff |
293 | |
294 | /* Descriptor ring Macros */ |
295 | #define MVNETA_QUEUE_NEXT_DESC(q, index) \ |
296 | (((index) < (q)->last_desc) ? ((index) + 1) : 0) |
297 | |
298 | /* Various constants */ |
299 | |
300 | /* Coalescing */ |
301 | #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */ |
302 | #define MVNETA_RX_COAL_PKTS 32 |
303 | #define MVNETA_RX_COAL_USEC 100 |
304 | |
305 | /* The two bytes Marvell header. Either contains a special value used |
306 | * by Marvell switches when a specific hardware mode is enabled (not |
307 | * supported by this driver) or is filled automatically by zeroes on |
308 | * the RX side. Those two bytes being at the front of the Ethernet |
309 | * header, they allow to have the IP header aligned on a 4 bytes |
310 | * boundary automatically: the hardware skips those two bytes on its |
311 | * own. |
312 | */ |
313 | #define MVNETA_MH_SIZE 2 |
314 | |
315 | #define MVNETA_VLAN_TAG_LEN 4 |
316 | |
317 | #define MVNETA_TX_CSUM_DEF_SIZE 1600 |
318 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 |
319 | #define MVNETA_ACC_MODE_EXT1 1 |
320 | #define MVNETA_ACC_MODE_EXT2 2 |
321 | |
322 | #define MVNETA_MAX_DECODE_WIN 6 |
323 | |
324 | /* Timeout constants */ |
325 | #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 |
326 | #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 |
327 | #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 |
328 | |
329 | #define MVNETA_TX_MTU_MAX 0x3ffff |
330 | |
331 | /* The RSS lookup table actually has 256 entries but we do not use |
332 | * them yet |
333 | */ |
334 | #define 1 |
335 | |
336 | /* Max number of Rx descriptors */ |
337 | #define MVNETA_MAX_RXD 512 |
338 | |
339 | /* Max number of Tx descriptors */ |
340 | #define MVNETA_MAX_TXD 1024 |
341 | |
342 | /* Max number of allowed TCP segments for software TSO */ |
343 | #define MVNETA_MAX_TSO_SEGS 100 |
344 | |
345 | #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) |
346 | |
347 | /* The size of a TSO header page */ |
348 | #define MVNETA_TSO_PAGE_SIZE (2 * PAGE_SIZE) |
349 | |
350 | /* Number of TSO headers per page. This should be a power of 2 */ |
351 | #define MVNETA_TSO_PER_PAGE (MVNETA_TSO_PAGE_SIZE / TSO_HEADER_SIZE) |
352 | |
353 | /* Maximum number of TSO header pages */ |
354 | #define MVNETA_MAX_TSO_PAGES (MVNETA_MAX_TXD / MVNETA_TSO_PER_PAGE) |
355 | |
356 | /* descriptor aligned size */ |
357 | #define MVNETA_DESC_ALIGNED_SIZE 32 |
358 | |
359 | /* Number of bytes to be taken into account by HW when putting incoming data |
360 | * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet |
361 | * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers. |
362 | */ |
363 | #define MVNETA_RX_PKT_OFFSET_CORRECTION 64 |
364 | |
365 | #define MVNETA_RX_PKT_SIZE(mtu) \ |
366 | ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ |
367 | ETH_HLEN + ETH_FCS_LEN, \ |
368 | cache_line_size()) |
369 | |
370 | /* Driver assumes that the last 3 bits are 0 */ |
371 | #define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) |
372 | #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \ |
373 | MVNETA_SKB_HEADROOM)) |
374 | #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD) |
375 | |
376 | #define MVNETA_RX_GET_BM_POOL_ID(rxd) \ |
377 | (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) |
378 | |
379 | enum { |
380 | ETHTOOL_STAT_EEE_WAKEUP, |
381 | ETHTOOL_STAT_SKB_ALLOC_ERR, |
382 | ETHTOOL_STAT_REFILL_ERR, |
383 | ETHTOOL_XDP_REDIRECT, |
384 | ETHTOOL_XDP_PASS, |
385 | ETHTOOL_XDP_DROP, |
386 | ETHTOOL_XDP_TX, |
387 | ETHTOOL_XDP_TX_ERR, |
388 | ETHTOOL_XDP_XMIT, |
389 | ETHTOOL_XDP_XMIT_ERR, |
390 | ETHTOOL_MAX_STATS, |
391 | }; |
392 | |
393 | struct mvneta_statistic { |
394 | unsigned short offset; |
395 | unsigned short type; |
396 | const char name[ETH_GSTRING_LEN]; |
397 | }; |
398 | |
399 | #define T_REG_32 32 |
400 | #define T_REG_64 64 |
401 | #define T_SW 1 |
402 | |
403 | #define MVNETA_XDP_PASS 0 |
404 | #define MVNETA_XDP_DROPPED BIT(0) |
405 | #define MVNETA_XDP_TX BIT(1) |
406 | #define MVNETA_XDP_REDIR BIT(2) |
407 | |
408 | static const struct mvneta_statistic mvneta_statistics[] = { |
409 | { 0x3000, T_REG_64, "good_octets_received" , }, |
410 | { 0x3010, T_REG_32, "good_frames_received" , }, |
411 | { 0x3008, T_REG_32, "bad_octets_received" , }, |
412 | { 0x3014, T_REG_32, "bad_frames_received" , }, |
413 | { 0x3018, T_REG_32, "broadcast_frames_received" , }, |
414 | { 0x301c, T_REG_32, "multicast_frames_received" , }, |
415 | { 0x3050, T_REG_32, "unrec_mac_control_received" , }, |
416 | { 0x3058, T_REG_32, "good_fc_received" , }, |
417 | { 0x305c, T_REG_32, "bad_fc_received" , }, |
418 | { 0x3060, T_REG_32, "undersize_received" , }, |
419 | { 0x3064, T_REG_32, "fragments_received" , }, |
420 | { 0x3068, T_REG_32, "oversize_received" , }, |
421 | { 0x306c, T_REG_32, "jabber_received" , }, |
422 | { 0x3070, T_REG_32, "mac_receive_error" , }, |
423 | { 0x3074, T_REG_32, "bad_crc_event" , }, |
424 | { 0x3078, T_REG_32, "collision" , }, |
425 | { 0x307c, T_REG_32, "late_collision" , }, |
426 | { 0x2484, T_REG_32, "rx_discard" , }, |
427 | { 0x2488, T_REG_32, "rx_overrun" , }, |
428 | { 0x3020, T_REG_32, "frames_64_octets" , }, |
429 | { 0x3024, T_REG_32, "frames_65_to_127_octets" , }, |
430 | { 0x3028, T_REG_32, "frames_128_to_255_octets" , }, |
431 | { 0x302c, T_REG_32, "frames_256_to_511_octets" , }, |
432 | { 0x3030, T_REG_32, "frames_512_to_1023_octets" , }, |
433 | { 0x3034, T_REG_32, "frames_1024_to_max_octets" , }, |
434 | { 0x3038, T_REG_64, "good_octets_sent" , }, |
435 | { 0x3040, T_REG_32, "good_frames_sent" , }, |
436 | { 0x3044, T_REG_32, "excessive_collision" , }, |
437 | { 0x3048, T_REG_32, "multicast_frames_sent" , }, |
438 | { 0x304c, T_REG_32, "broadcast_frames_sent" , }, |
439 | { 0x3054, T_REG_32, "fc_sent" , }, |
440 | { 0x300c, T_REG_32, "internal_mac_transmit_err" , }, |
441 | { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors" , }, |
442 | { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors" , }, |
443 | { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors" , }, |
444 | { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect" , }, |
445 | { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass" , }, |
446 | { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop" , }, |
447 | { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx" , }, |
448 | { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors" , }, |
449 | { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit" , }, |
450 | { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors" , }, |
451 | }; |
452 | |
453 | struct mvneta_stats { |
454 | u64 rx_packets; |
455 | u64 rx_bytes; |
456 | u64 tx_packets; |
457 | u64 tx_bytes; |
458 | /* xdp */ |
459 | u64 xdp_redirect; |
460 | u64 xdp_pass; |
461 | u64 xdp_drop; |
462 | u64 xdp_xmit; |
463 | u64 xdp_xmit_err; |
464 | u64 xdp_tx; |
465 | u64 xdp_tx_err; |
466 | }; |
467 | |
468 | struct mvneta_ethtool_stats { |
469 | struct mvneta_stats ps; |
470 | u64 skb_alloc_error; |
471 | u64 refill_error; |
472 | }; |
473 | |
474 | struct mvneta_pcpu_stats { |
475 | struct u64_stats_sync syncp; |
476 | |
477 | struct mvneta_ethtool_stats es; |
478 | u64 rx_dropped; |
479 | u64 rx_errors; |
480 | }; |
481 | |
482 | struct mvneta_pcpu_port { |
483 | /* Pointer to the shared port */ |
484 | struct mvneta_port *pp; |
485 | |
486 | /* Pointer to the CPU-local NAPI struct */ |
487 | struct napi_struct napi; |
488 | |
489 | /* Cause of the previous interrupt */ |
490 | u32 cause_rx_tx; |
491 | }; |
492 | |
493 | enum { |
494 | __MVNETA_DOWN, |
495 | }; |
496 | |
497 | struct mvneta_port { |
498 | u8 id; |
499 | struct mvneta_pcpu_port __percpu *ports; |
500 | struct mvneta_pcpu_stats __percpu *stats; |
501 | |
502 | unsigned long state; |
503 | |
504 | int pkt_size; |
505 | void __iomem *base; |
506 | struct mvneta_rx_queue *rxqs; |
507 | struct mvneta_tx_queue *txqs; |
508 | struct net_device *dev; |
509 | struct hlist_node node_online; |
510 | struct hlist_node node_dead; |
511 | int rxq_def; |
512 | /* Protect the access to the percpu interrupt registers, |
513 | * ensuring that the configuration remains coherent. |
514 | */ |
515 | spinlock_t lock; |
516 | bool is_stopped; |
517 | |
518 | u32 cause_rx_tx; |
519 | struct napi_struct napi; |
520 | |
521 | struct bpf_prog *xdp_prog; |
522 | |
523 | /* Core clock */ |
524 | struct clk *clk; |
525 | /* AXI clock */ |
526 | struct clk *clk_bus; |
527 | u8 mcast_count[256]; |
528 | u16 tx_ring_size; |
529 | u16 rx_ring_size; |
530 | |
531 | phy_interface_t phy_interface; |
532 | struct device_node *dn; |
533 | unsigned int tx_csum_limit; |
534 | struct phylink *phylink; |
535 | struct phylink_config phylink_config; |
536 | struct phylink_pcs phylink_pcs; |
537 | struct phy *comphy; |
538 | |
539 | struct mvneta_bm *bm_priv; |
540 | struct mvneta_bm_pool *pool_long; |
541 | struct mvneta_bm_pool *pool_short; |
542 | int bm_win_id; |
543 | |
544 | bool eee_enabled; |
545 | bool eee_active; |
546 | bool tx_lpi_enabled; |
547 | |
548 | u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; |
549 | |
550 | u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; |
551 | |
552 | /* Flags for special SoC configurations */ |
553 | bool neta_armada3700; |
554 | bool neta_ac5; |
555 | u16 rx_offset_correction; |
556 | const struct mbus_dram_target_info *dram_target_info; |
557 | }; |
558 | |
559 | /* The mvneta_tx_desc and mvneta_rx_desc structures describe the |
560 | * layout of the transmit and reception DMA descriptors, and their |
561 | * layout is therefore defined by the hardware design |
562 | */ |
563 | |
564 | #define MVNETA_TX_L3_OFF_SHIFT 0 |
565 | #define MVNETA_TX_IP_HLEN_SHIFT 8 |
566 | #define MVNETA_TX_L4_UDP BIT(16) |
567 | #define MVNETA_TX_L3_IP6 BIT(17) |
568 | #define MVNETA_TXD_IP_CSUM BIT(18) |
569 | #define MVNETA_TXD_Z_PAD BIT(19) |
570 | #define MVNETA_TXD_L_DESC BIT(20) |
571 | #define MVNETA_TXD_F_DESC BIT(21) |
572 | #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ |
573 | MVNETA_TXD_L_DESC | \ |
574 | MVNETA_TXD_F_DESC) |
575 | #define MVNETA_TX_L4_CSUM_FULL BIT(30) |
576 | #define MVNETA_TX_L4_CSUM_NOT BIT(31) |
577 | |
578 | #define MVNETA_RXD_ERR_CRC 0x0 |
579 | #define MVNETA_RXD_BM_POOL_SHIFT 13 |
580 | #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) |
581 | #define MVNETA_RXD_ERR_SUMMARY BIT(16) |
582 | #define MVNETA_RXD_ERR_OVERRUN BIT(17) |
583 | #define MVNETA_RXD_ERR_LEN BIT(18) |
584 | #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) |
585 | #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) |
586 | #define MVNETA_RXD_L3_IP4 BIT(25) |
587 | #define MVNETA_RXD_LAST_DESC BIT(26) |
588 | #define MVNETA_RXD_FIRST_DESC BIT(27) |
589 | #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \ |
590 | MVNETA_RXD_LAST_DESC) |
591 | #define MVNETA_RXD_L4_CSUM_OK BIT(30) |
592 | |
593 | #if defined(__LITTLE_ENDIAN) |
594 | struct mvneta_tx_desc { |
595 | u32 command; /* Options used by HW for packet transmitting.*/ |
596 | u16 reserved1; /* csum_l4 (for future use) */ |
597 | u16 data_size; /* Data size of transmitted packet in bytes */ |
598 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ |
599 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ |
600 | u32 reserved3[4]; /* Reserved - (for future use) */ |
601 | }; |
602 | |
603 | struct mvneta_rx_desc { |
604 | u32 status; /* Info about received packet */ |
605 | u16 reserved1; /* pnc_info - (for future use, PnC) */ |
606 | u16 data_size; /* Size of received packet in bytes */ |
607 | |
608 | u32 buf_phys_addr; /* Physical address of the buffer */ |
609 | u32 reserved2; /* pnc_flow_id (for future use, PnC) */ |
610 | |
611 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ |
612 | u16 reserved3; /* prefetch_cmd, for future use */ |
613 | u16 reserved4; /* csum_l4 - (for future use, PnC) */ |
614 | |
615 | u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ |
616 | u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ |
617 | }; |
618 | #else |
619 | struct mvneta_tx_desc { |
620 | u16 data_size; /* Data size of transmitted packet in bytes */ |
621 | u16 reserved1; /* csum_l4 (for future use) */ |
622 | u32 command; /* Options used by HW for packet transmitting.*/ |
623 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ |
624 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ |
625 | u32 reserved3[4]; /* Reserved - (for future use) */ |
626 | }; |
627 | |
628 | struct mvneta_rx_desc { |
629 | u16 data_size; /* Size of received packet in bytes */ |
630 | u16 reserved1; /* pnc_info - (for future use, PnC) */ |
631 | u32 status; /* Info about received packet */ |
632 | |
633 | u32 reserved2; /* pnc_flow_id (for future use, PnC) */ |
634 | u32 buf_phys_addr; /* Physical address of the buffer */ |
635 | |
636 | u16 reserved4; /* csum_l4 - (for future use, PnC) */ |
637 | u16 reserved3; /* prefetch_cmd, for future use */ |
638 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ |
639 | |
640 | u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ |
641 | u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ |
642 | }; |
643 | #endif |
644 | |
645 | enum mvneta_tx_buf_type { |
646 | MVNETA_TYPE_TSO, |
647 | MVNETA_TYPE_SKB, |
648 | MVNETA_TYPE_XDP_TX, |
649 | MVNETA_TYPE_XDP_NDO, |
650 | }; |
651 | |
652 | struct mvneta_tx_buf { |
653 | enum mvneta_tx_buf_type type; |
654 | union { |
655 | struct xdp_frame *xdpf; |
656 | struct sk_buff *skb; |
657 | }; |
658 | }; |
659 | |
660 | struct mvneta_tx_queue { |
661 | /* Number of this TX queue, in the range 0-7 */ |
662 | u8 id; |
663 | |
664 | /* Number of TX DMA descriptors in the descriptor ring */ |
665 | int size; |
666 | |
667 | /* Number of currently used TX DMA descriptor in the |
668 | * descriptor ring |
669 | */ |
670 | int count; |
671 | int pending; |
672 | int tx_stop_threshold; |
673 | int tx_wake_threshold; |
674 | |
675 | /* Array of transmitted buffers */ |
676 | struct mvneta_tx_buf *buf; |
677 | |
678 | /* Index of last TX DMA descriptor that was inserted */ |
679 | int txq_put_index; |
680 | |
681 | /* Index of the TX DMA descriptor to be cleaned up */ |
682 | int txq_get_index; |
683 | |
684 | u32 done_pkts_coal; |
685 | |
686 | /* Virtual address of the TX DMA descriptors array */ |
687 | struct mvneta_tx_desc *descs; |
688 | |
689 | /* DMA address of the TX DMA descriptors array */ |
690 | dma_addr_t descs_phys; |
691 | |
692 | /* Index of the last TX DMA descriptor */ |
693 | int last_desc; |
694 | |
695 | /* Index of the next TX DMA descriptor to process */ |
696 | int next_desc_to_proc; |
697 | |
698 | /* DMA buffers for TSO headers */ |
699 | char *tso_hdrs[MVNETA_MAX_TSO_PAGES]; |
700 | |
701 | /* DMA address of TSO headers */ |
702 | dma_addr_t tso_hdrs_phys[MVNETA_MAX_TSO_PAGES]; |
703 | |
704 | /* Affinity mask for CPUs*/ |
705 | cpumask_t affinity_mask; |
706 | }; |
707 | |
708 | struct mvneta_rx_queue { |
709 | /* rx queue number, in the range 0-7 */ |
710 | u8 id; |
711 | |
712 | /* num of rx descriptors in the rx descriptor ring */ |
713 | int size; |
714 | |
715 | u32 pkts_coal; |
716 | u32 time_coal; |
717 | |
718 | /* page_pool */ |
719 | struct page_pool *page_pool; |
720 | struct xdp_rxq_info xdp_rxq; |
721 | |
722 | /* Virtual address of the RX buffer */ |
723 | void **buf_virt_addr; |
724 | |
725 | /* Virtual address of the RX DMA descriptors array */ |
726 | struct mvneta_rx_desc *descs; |
727 | |
728 | /* DMA address of the RX DMA descriptors array */ |
729 | dma_addr_t descs_phys; |
730 | |
731 | /* Index of the last RX DMA descriptor */ |
732 | int last_desc; |
733 | |
734 | /* Index of the next RX DMA descriptor to process */ |
735 | int next_desc_to_proc; |
736 | |
737 | /* Index of first RX DMA descriptor to refill */ |
738 | int first_to_refill; |
739 | u32 refill_num; |
740 | }; |
741 | |
742 | static enum cpuhp_state online_hpstate; |
743 | /* The hardware supports eight (8) rx queues, but we are only allowing |
744 | * the first one to be used. Therefore, let's just allocate one queue. |
745 | */ |
746 | static int rxq_number = 8; |
747 | static int txq_number = 8; |
748 | |
749 | static int rxq_def; |
750 | |
751 | static int rx_copybreak __read_mostly = 256; |
752 | |
753 | /* HW BM need that each port be identify by a unique ID */ |
754 | static int global_port_id; |
755 | |
756 | #define MVNETA_DRIVER_NAME "mvneta" |
757 | #define MVNETA_DRIVER_VERSION "1.0" |
758 | |
759 | /* Utility/helper methods */ |
760 | |
761 | /* Write helper method */ |
762 | static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) |
763 | { |
764 | writel(val: data, addr: pp->base + offset); |
765 | } |
766 | |
767 | /* Read helper method */ |
768 | static u32 mvreg_read(struct mvneta_port *pp, u32 offset) |
769 | { |
770 | return readl(addr: pp->base + offset); |
771 | } |
772 | |
773 | /* Increment txq get counter */ |
774 | static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq) |
775 | { |
776 | txq->txq_get_index++; |
777 | if (txq->txq_get_index == txq->size) |
778 | txq->txq_get_index = 0; |
779 | } |
780 | |
781 | /* Increment txq put counter */ |
782 | static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq) |
783 | { |
784 | txq->txq_put_index++; |
785 | if (txq->txq_put_index == txq->size) |
786 | txq->txq_put_index = 0; |
787 | } |
788 | |
789 | |
790 | /* Clear all MIB counters */ |
791 | static void mvneta_mib_counters_clear(struct mvneta_port *pp) |
792 | { |
793 | int i; |
794 | |
795 | /* Perform dummy reads from MIB counters */ |
796 | for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) |
797 | mvreg_read(pp, offset: (MVNETA_MIB_COUNTERS_BASE + i)); |
798 | mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); |
799 | mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); |
800 | } |
801 | |
802 | /* Get System Network Statistics */ |
803 | static void |
804 | mvneta_get_stats64(struct net_device *dev, |
805 | struct rtnl_link_stats64 *stats) |
806 | { |
807 | struct mvneta_port *pp = netdev_priv(dev); |
808 | unsigned int start; |
809 | int cpu; |
810 | |
811 | for_each_possible_cpu(cpu) { |
812 | struct mvneta_pcpu_stats *cpu_stats; |
813 | u64 rx_packets; |
814 | u64 rx_bytes; |
815 | u64 rx_dropped; |
816 | u64 rx_errors; |
817 | u64 tx_packets; |
818 | u64 tx_bytes; |
819 | |
820 | cpu_stats = per_cpu_ptr(pp->stats, cpu); |
821 | do { |
822 | start = u64_stats_fetch_begin(syncp: &cpu_stats->syncp); |
823 | rx_packets = cpu_stats->es.ps.rx_packets; |
824 | rx_bytes = cpu_stats->es.ps.rx_bytes; |
825 | rx_dropped = cpu_stats->rx_dropped; |
826 | rx_errors = cpu_stats->rx_errors; |
827 | tx_packets = cpu_stats->es.ps.tx_packets; |
828 | tx_bytes = cpu_stats->es.ps.tx_bytes; |
829 | } while (u64_stats_fetch_retry(syncp: &cpu_stats->syncp, start)); |
830 | |
831 | stats->rx_packets += rx_packets; |
832 | stats->rx_bytes += rx_bytes; |
833 | stats->rx_dropped += rx_dropped; |
834 | stats->rx_errors += rx_errors; |
835 | stats->tx_packets += tx_packets; |
836 | stats->tx_bytes += tx_bytes; |
837 | } |
838 | |
839 | stats->tx_dropped = dev->stats.tx_dropped; |
840 | } |
841 | |
842 | /* Rx descriptors helper methods */ |
843 | |
844 | /* Checks whether the RX descriptor having this status is both the first |
845 | * and the last descriptor for the RX packet. Each RX packet is currently |
846 | * received through a single RX descriptor, so not having each RX |
847 | * descriptor with its first and last bits set is an error |
848 | */ |
849 | static int mvneta_rxq_desc_is_first_last(u32 status) |
850 | { |
851 | return (status & MVNETA_RXD_FIRST_LAST_DESC) == |
852 | MVNETA_RXD_FIRST_LAST_DESC; |
853 | } |
854 | |
855 | /* Add number of descriptors ready to receive new packets */ |
856 | static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, |
857 | struct mvneta_rx_queue *rxq, |
858 | int ndescs) |
859 | { |
860 | /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can |
861 | * be added at once |
862 | */ |
863 | while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { |
864 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), |
865 | data: (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << |
866 | MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); |
867 | ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; |
868 | } |
869 | |
870 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), |
871 | data: (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); |
872 | } |
873 | |
874 | /* Get number of RX descriptors occupied by received packets */ |
875 | static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, |
876 | struct mvneta_rx_queue *rxq) |
877 | { |
878 | u32 val; |
879 | |
880 | val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); |
881 | return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; |
882 | } |
883 | |
884 | /* Update num of rx desc called upon return from rx path or |
885 | * from mvneta_rxq_drop_pkts(). |
886 | */ |
887 | static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, |
888 | struct mvneta_rx_queue *rxq, |
889 | int rx_done, int rx_filled) |
890 | { |
891 | u32 val; |
892 | |
893 | if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { |
894 | val = rx_done | |
895 | (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); |
896 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), data: val); |
897 | return; |
898 | } |
899 | |
900 | /* Only 255 descriptors can be added at once */ |
901 | while ((rx_done > 0) || (rx_filled > 0)) { |
902 | if (rx_done <= 0xff) { |
903 | val = rx_done; |
904 | rx_done = 0; |
905 | } else { |
906 | val = 0xff; |
907 | rx_done -= 0xff; |
908 | } |
909 | if (rx_filled <= 0xff) { |
910 | val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; |
911 | rx_filled = 0; |
912 | } else { |
913 | val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; |
914 | rx_filled -= 0xff; |
915 | } |
916 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), data: val); |
917 | } |
918 | } |
919 | |
920 | /* Get pointer to next RX descriptor to be processed by SW */ |
921 | static struct mvneta_rx_desc * |
922 | mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) |
923 | { |
924 | int rx_desc = rxq->next_desc_to_proc; |
925 | |
926 | rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); |
927 | prefetch(rxq->descs + rxq->next_desc_to_proc); |
928 | return rxq->descs + rx_desc; |
929 | } |
930 | |
931 | /* Change maximum receive size of the port. */ |
932 | static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) |
933 | { |
934 | u32 val; |
935 | |
936 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); |
937 | val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK; |
938 | val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) << |
939 | MVNETA_GMAC_MAX_RX_SIZE_SHIFT; |
940 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, data: val); |
941 | } |
942 | |
943 | |
944 | /* Set rx queue offset */ |
945 | static void mvneta_rxq_offset_set(struct mvneta_port *pp, |
946 | struct mvneta_rx_queue *rxq, |
947 | int offset) |
948 | { |
949 | u32 val; |
950 | |
951 | val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); |
952 | val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK; |
953 | |
954 | /* Offset is in */ |
955 | val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3); |
956 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), data: val); |
957 | } |
958 | |
959 | |
960 | /* Tx descriptors helper methods */ |
961 | |
962 | /* Update HW with number of TX descriptors to be sent */ |
963 | static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, |
964 | struct mvneta_tx_queue *txq, |
965 | int pend_desc) |
966 | { |
967 | u32 val; |
968 | |
969 | pend_desc += txq->pending; |
970 | |
971 | /* Only 255 Tx descriptors can be added at once */ |
972 | do { |
973 | val = min(pend_desc, 255); |
974 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), data: val); |
975 | pend_desc -= val; |
976 | } while (pend_desc > 0); |
977 | txq->pending = 0; |
978 | } |
979 | |
980 | /* Get pointer to next TX descriptor to be processed (send) by HW */ |
981 | static struct mvneta_tx_desc * |
982 | mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) |
983 | { |
984 | int tx_desc = txq->next_desc_to_proc; |
985 | |
986 | txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); |
987 | return txq->descs + tx_desc; |
988 | } |
989 | |
990 | /* Release the last allocated TX descriptor. Useful to handle DMA |
991 | * mapping failures in the TX path. |
992 | */ |
993 | static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq) |
994 | { |
995 | if (txq->next_desc_to_proc == 0) |
996 | txq->next_desc_to_proc = txq->last_desc - 1; |
997 | else |
998 | txq->next_desc_to_proc--; |
999 | } |
1000 | |
1001 | /* Set rxq buf size */ |
1002 | static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, |
1003 | struct mvneta_rx_queue *rxq, |
1004 | int buf_size) |
1005 | { |
1006 | u32 val; |
1007 | |
1008 | val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); |
1009 | |
1010 | val &= ~MVNETA_RXQ_BUF_SIZE_MASK; |
1011 | val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); |
1012 | |
1013 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), data: val); |
1014 | } |
1015 | |
1016 | /* Disable buffer management (BM) */ |
1017 | static void mvneta_rxq_bm_disable(struct mvneta_port *pp, |
1018 | struct mvneta_rx_queue *rxq) |
1019 | { |
1020 | u32 val; |
1021 | |
1022 | val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); |
1023 | val &= ~MVNETA_RXQ_HW_BUF_ALLOC; |
1024 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), data: val); |
1025 | } |
1026 | |
1027 | /* Enable buffer management (BM) */ |
1028 | static void mvneta_rxq_bm_enable(struct mvneta_port *pp, |
1029 | struct mvneta_rx_queue *rxq) |
1030 | { |
1031 | u32 val; |
1032 | |
1033 | val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); |
1034 | val |= MVNETA_RXQ_HW_BUF_ALLOC; |
1035 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), data: val); |
1036 | } |
1037 | |
1038 | /* Notify HW about port's assignment of pool for bigger packets */ |
1039 | static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, |
1040 | struct mvneta_rx_queue *rxq) |
1041 | { |
1042 | u32 val; |
1043 | |
1044 | val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); |
1045 | val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; |
1046 | val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); |
1047 | |
1048 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), data: val); |
1049 | } |
1050 | |
1051 | /* Notify HW about port's assignment of pool for smaller packets */ |
1052 | static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, |
1053 | struct mvneta_rx_queue *rxq) |
1054 | { |
1055 | u32 val; |
1056 | |
1057 | val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); |
1058 | val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; |
1059 | val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); |
1060 | |
1061 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), data: val); |
1062 | } |
1063 | |
1064 | /* Set port's receive buffer size for assigned BM pool */ |
1065 | static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, |
1066 | int buf_size, |
1067 | u8 pool_id) |
1068 | { |
1069 | u32 val; |
1070 | |
1071 | if (!IS_ALIGNED(buf_size, 8)) { |
1072 | dev_warn(pp->dev->dev.parent, |
1073 | "illegal buf_size value %d, round to %d\n" , |
1074 | buf_size, ALIGN(buf_size, 8)); |
1075 | buf_size = ALIGN(buf_size, 8); |
1076 | } |
1077 | |
1078 | val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); |
1079 | val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; |
1080 | mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), data: val); |
1081 | } |
1082 | |
1083 | /* Configure MBUS window in order to enable access BM internal SRAM */ |
1084 | static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, |
1085 | u8 target, u8 attr) |
1086 | { |
1087 | u32 win_enable, win_protect; |
1088 | int i; |
1089 | |
1090 | win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); |
1091 | |
1092 | if (pp->bm_win_id < 0) { |
1093 | /* Find first not occupied window */ |
1094 | for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { |
1095 | if (win_enable & (1 << i)) { |
1096 | pp->bm_win_id = i; |
1097 | break; |
1098 | } |
1099 | } |
1100 | if (i == MVNETA_MAX_DECODE_WIN) |
1101 | return -ENOMEM; |
1102 | } else { |
1103 | i = pp->bm_win_id; |
1104 | } |
1105 | |
1106 | mvreg_write(pp, MVNETA_WIN_BASE(i), data: 0); |
1107 | mvreg_write(pp, MVNETA_WIN_SIZE(i), data: 0); |
1108 | |
1109 | if (i < 4) |
1110 | mvreg_write(pp, MVNETA_WIN_REMAP(i), data: 0); |
1111 | |
1112 | mvreg_write(pp, MVNETA_WIN_BASE(i), data: (base & 0xffff0000) | |
1113 | (attr << 8) | target); |
1114 | |
1115 | mvreg_write(pp, MVNETA_WIN_SIZE(i), data: (wsize - 1) & 0xffff0000); |
1116 | |
1117 | win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); |
1118 | win_protect |= 3 << (2 * i); |
1119 | mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, data: win_protect); |
1120 | |
1121 | win_enable &= ~(1 << i); |
1122 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, data: win_enable); |
1123 | |
1124 | return 0; |
1125 | } |
1126 | |
1127 | static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) |
1128 | { |
1129 | u32 wsize; |
1130 | u8 target, attr; |
1131 | int err; |
1132 | |
1133 | /* Get BM window information */ |
1134 | err = mvebu_mbus_get_io_win_info(phyaddr: pp->bm_priv->bppi_phys_addr, size: &wsize, |
1135 | target: &target, attr: &attr); |
1136 | if (err < 0) |
1137 | return err; |
1138 | |
1139 | pp->bm_win_id = -1; |
1140 | |
1141 | /* Open NETA -> BM window */ |
1142 | err = mvneta_mbus_io_win_set(pp, base: pp->bm_priv->bppi_phys_addr, wsize, |
1143 | target, attr); |
1144 | if (err < 0) { |
1145 | netdev_info(dev: pp->dev, format: "fail to configure mbus window to BM\n" ); |
1146 | return err; |
1147 | } |
1148 | return 0; |
1149 | } |
1150 | |
1151 | /* Assign and initialize pools for port. In case of fail |
1152 | * buffer manager will remain disabled for current port. |
1153 | */ |
1154 | static int mvneta_bm_port_init(struct platform_device *pdev, |
1155 | struct mvneta_port *pp) |
1156 | { |
1157 | struct device_node *dn = pdev->dev.of_node; |
1158 | u32 long_pool_id, short_pool_id; |
1159 | |
1160 | if (!pp->neta_armada3700) { |
1161 | int ret; |
1162 | |
1163 | ret = mvneta_bm_port_mbus_init(pp); |
1164 | if (ret) |
1165 | return ret; |
1166 | } |
1167 | |
1168 | if (of_property_read_u32(np: dn, propname: "bm,pool-long" , out_value: &long_pool_id)) { |
1169 | netdev_info(dev: pp->dev, format: "missing long pool id\n" ); |
1170 | return -EINVAL; |
1171 | } |
1172 | |
1173 | /* Create port's long pool depending on mtu */ |
1174 | pp->pool_long = mvneta_bm_pool_use(priv: pp->bm_priv, pool_id: long_pool_id, |
1175 | type: MVNETA_BM_LONG, port_id: pp->id, |
1176 | MVNETA_RX_PKT_SIZE(pp->dev->mtu)); |
1177 | if (!pp->pool_long) { |
1178 | netdev_info(dev: pp->dev, format: "fail to obtain long pool for port\n" ); |
1179 | return -ENOMEM; |
1180 | } |
1181 | |
1182 | pp->pool_long->port_map |= 1 << pp->id; |
1183 | |
1184 | mvneta_bm_pool_bufsize_set(pp, buf_size: pp->pool_long->buf_size, |
1185 | pool_id: pp->pool_long->id); |
1186 | |
1187 | /* If short pool id is not defined, assume using single pool */ |
1188 | if (of_property_read_u32(np: dn, propname: "bm,pool-short" , out_value: &short_pool_id)) |
1189 | short_pool_id = long_pool_id; |
1190 | |
1191 | /* Create port's short pool */ |
1192 | pp->pool_short = mvneta_bm_pool_use(priv: pp->bm_priv, pool_id: short_pool_id, |
1193 | type: MVNETA_BM_SHORT, port_id: pp->id, |
1194 | MVNETA_BM_SHORT_PKT_SIZE); |
1195 | if (!pp->pool_short) { |
1196 | netdev_info(dev: pp->dev, format: "fail to obtain short pool for port\n" ); |
1197 | mvneta_bm_pool_destroy(priv: pp->bm_priv, bm_pool: pp->pool_long, port_map: 1 << pp->id); |
1198 | return -ENOMEM; |
1199 | } |
1200 | |
1201 | if (short_pool_id != long_pool_id) { |
1202 | pp->pool_short->port_map |= 1 << pp->id; |
1203 | mvneta_bm_pool_bufsize_set(pp, buf_size: pp->pool_short->buf_size, |
1204 | pool_id: pp->pool_short->id); |
1205 | } |
1206 | |
1207 | return 0; |
1208 | } |
1209 | |
1210 | /* Update settings of a pool for bigger packets */ |
1211 | static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) |
1212 | { |
1213 | struct mvneta_bm_pool *bm_pool = pp->pool_long; |
1214 | struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; |
1215 | int num; |
1216 | |
1217 | /* Release all buffers from long pool */ |
1218 | mvneta_bm_bufs_free(priv: pp->bm_priv, bm_pool, port_map: 1 << pp->id); |
1219 | if (hwbm_pool->buf_num) { |
1220 | WARN(1, "cannot free all buffers in pool %d\n" , |
1221 | bm_pool->id); |
1222 | goto bm_mtu_err; |
1223 | } |
1224 | |
1225 | bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); |
1226 | bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); |
1227 | hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + |
1228 | SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); |
1229 | |
1230 | /* Fill entire long pool */ |
1231 | num = hwbm_pool_add(bm_pool: hwbm_pool, buf_num: hwbm_pool->size); |
1232 | if (num != hwbm_pool->size) { |
1233 | WARN(1, "pool %d: %d of %d allocated\n" , |
1234 | bm_pool->id, num, hwbm_pool->size); |
1235 | goto bm_mtu_err; |
1236 | } |
1237 | mvneta_bm_pool_bufsize_set(pp, buf_size: bm_pool->buf_size, pool_id: bm_pool->id); |
1238 | |
1239 | return; |
1240 | |
1241 | bm_mtu_err: |
1242 | mvneta_bm_pool_destroy(priv: pp->bm_priv, bm_pool: pp->pool_long, port_map: 1 << pp->id); |
1243 | mvneta_bm_pool_destroy(priv: pp->bm_priv, bm_pool: pp->pool_short, port_map: 1 << pp->id); |
1244 | |
1245 | pp->bm_priv = NULL; |
1246 | pp->rx_offset_correction = MVNETA_SKB_HEADROOM; |
1247 | mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); |
1248 | netdev_info(dev: pp->dev, format: "fail to update MTU, fall back to software BM\n" ); |
1249 | } |
1250 | |
1251 | /* Start the Ethernet port RX and TX activity */ |
1252 | static void mvneta_port_up(struct mvneta_port *pp) |
1253 | { |
1254 | int queue; |
1255 | u32 q_map; |
1256 | |
1257 | /* Enable all initialized TXs. */ |
1258 | q_map = 0; |
1259 | for (queue = 0; queue < txq_number; queue++) { |
1260 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
1261 | if (txq->descs) |
1262 | q_map |= (1 << queue); |
1263 | } |
1264 | mvreg_write(pp, MVNETA_TXQ_CMD, data: q_map); |
1265 | |
1266 | q_map = 0; |
1267 | /* Enable all initialized RXQs. */ |
1268 | for (queue = 0; queue < rxq_number; queue++) { |
1269 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
1270 | |
1271 | if (rxq->descs) |
1272 | q_map |= (1 << queue); |
1273 | } |
1274 | mvreg_write(pp, MVNETA_RXQ_CMD, data: q_map); |
1275 | } |
1276 | |
1277 | /* Stop the Ethernet port activity */ |
1278 | static void mvneta_port_down(struct mvneta_port *pp) |
1279 | { |
1280 | u32 val; |
1281 | int count; |
1282 | |
1283 | /* Stop Rx port activity. Check port Rx activity. */ |
1284 | val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; |
1285 | |
1286 | /* Issue stop command for active channels only */ |
1287 | if (val != 0) |
1288 | mvreg_write(pp, MVNETA_RXQ_CMD, |
1289 | data: val << MVNETA_RXQ_DISABLE_SHIFT); |
1290 | |
1291 | /* Wait for all Rx activity to terminate. */ |
1292 | count = 0; |
1293 | do { |
1294 | if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { |
1295 | netdev_warn(dev: pp->dev, |
1296 | format: "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n" , |
1297 | val); |
1298 | break; |
1299 | } |
1300 | mdelay(1); |
1301 | |
1302 | val = mvreg_read(pp, MVNETA_RXQ_CMD); |
1303 | } while (val & MVNETA_RXQ_ENABLE_MASK); |
1304 | |
1305 | /* Stop Tx port activity. Check port Tx activity. Issue stop |
1306 | * command for active channels only |
1307 | */ |
1308 | val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; |
1309 | |
1310 | if (val != 0) |
1311 | mvreg_write(pp, MVNETA_TXQ_CMD, |
1312 | data: (val << MVNETA_TXQ_DISABLE_SHIFT)); |
1313 | |
1314 | /* Wait for all Tx activity to terminate. */ |
1315 | count = 0; |
1316 | do { |
1317 | if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { |
1318 | netdev_warn(dev: pp->dev, |
1319 | format: "TIMEOUT for TX stopped status=0x%08x\n" , |
1320 | val); |
1321 | break; |
1322 | } |
1323 | mdelay(1); |
1324 | |
1325 | /* Check TX Command reg that all Txqs are stopped */ |
1326 | val = mvreg_read(pp, MVNETA_TXQ_CMD); |
1327 | |
1328 | } while (val & MVNETA_TXQ_ENABLE_MASK); |
1329 | |
1330 | /* Double check to verify that TX FIFO is empty */ |
1331 | count = 0; |
1332 | do { |
1333 | if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { |
1334 | netdev_warn(dev: pp->dev, |
1335 | format: "TX FIFO empty timeout status=0x%08x\n" , |
1336 | val); |
1337 | break; |
1338 | } |
1339 | mdelay(1); |
1340 | |
1341 | val = mvreg_read(pp, MVNETA_PORT_STATUS); |
1342 | } while (!(val & MVNETA_TX_FIFO_EMPTY) && |
1343 | (val & MVNETA_TX_IN_PRGRS)); |
1344 | |
1345 | udelay(200); |
1346 | } |
1347 | |
1348 | /* Enable the port by setting the port enable bit of the MAC control register */ |
1349 | static void mvneta_port_enable(struct mvneta_port *pp) |
1350 | { |
1351 | u32 val; |
1352 | |
1353 | /* Enable port */ |
1354 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); |
1355 | val |= MVNETA_GMAC0_PORT_ENABLE; |
1356 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, data: val); |
1357 | } |
1358 | |
1359 | /* Disable the port and wait for about 200 usec before retuning */ |
1360 | static void mvneta_port_disable(struct mvneta_port *pp) |
1361 | { |
1362 | u32 val; |
1363 | |
1364 | /* Reset the Enable bit in the Serial Control Register */ |
1365 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); |
1366 | val &= ~MVNETA_GMAC0_PORT_ENABLE; |
1367 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, data: val); |
1368 | |
1369 | udelay(200); |
1370 | } |
1371 | |
1372 | /* Multicast tables methods */ |
1373 | |
1374 | /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ |
1375 | static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) |
1376 | { |
1377 | int offset; |
1378 | u32 val; |
1379 | |
1380 | if (queue == -1) { |
1381 | val = 0; |
1382 | } else { |
1383 | val = 0x1 | (queue << 1); |
1384 | val |= (val << 24) | (val << 16) | (val << 8); |
1385 | } |
1386 | |
1387 | for (offset = 0; offset <= 0xc; offset += 4) |
1388 | mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, data: val); |
1389 | } |
1390 | |
1391 | /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ |
1392 | static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) |
1393 | { |
1394 | int offset; |
1395 | u32 val; |
1396 | |
1397 | if (queue == -1) { |
1398 | val = 0; |
1399 | } else { |
1400 | val = 0x1 | (queue << 1); |
1401 | val |= (val << 24) | (val << 16) | (val << 8); |
1402 | } |
1403 | |
1404 | for (offset = 0; offset <= 0xfc; offset += 4) |
1405 | mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, data: val); |
1406 | |
1407 | } |
1408 | |
1409 | /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ |
1410 | static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) |
1411 | { |
1412 | int offset; |
1413 | u32 val; |
1414 | |
1415 | if (queue == -1) { |
1416 | memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); |
1417 | val = 0; |
1418 | } else { |
1419 | memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); |
1420 | val = 0x1 | (queue << 1); |
1421 | val |= (val << 24) | (val << 16) | (val << 8); |
1422 | } |
1423 | |
1424 | for (offset = 0; offset <= 0xfc; offset += 4) |
1425 | mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, data: val); |
1426 | } |
1427 | |
1428 | static void mvneta_percpu_unmask_interrupt(void *arg) |
1429 | { |
1430 | struct mvneta_port *pp = arg; |
1431 | |
1432 | /* All the queue are unmasked, but actually only the ones |
1433 | * mapped to this CPU will be unmasked |
1434 | */ |
1435 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, |
1436 | MVNETA_RX_INTR_MASK_ALL | |
1437 | MVNETA_TX_INTR_MASK_ALL | |
1438 | MVNETA_MISCINTR_INTR_MASK); |
1439 | } |
1440 | |
1441 | static void mvneta_percpu_mask_interrupt(void *arg) |
1442 | { |
1443 | struct mvneta_port *pp = arg; |
1444 | |
1445 | /* All the queue are masked, but actually only the ones |
1446 | * mapped to this CPU will be masked |
1447 | */ |
1448 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, data: 0); |
1449 | mvreg_write(pp, MVNETA_INTR_OLD_MASK, data: 0); |
1450 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, data: 0); |
1451 | } |
1452 | |
1453 | static void mvneta_percpu_clear_intr_cause(void *arg) |
1454 | { |
1455 | struct mvneta_port *pp = arg; |
1456 | |
1457 | /* All the queue are cleared, but actually only the ones |
1458 | * mapped to this CPU will be cleared |
1459 | */ |
1460 | mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, data: 0); |
1461 | mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, data: 0); |
1462 | mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, data: 0); |
1463 | } |
1464 | |
1465 | /* This method sets defaults to the NETA port: |
1466 | * Clears interrupt Cause and Mask registers. |
1467 | * Clears all MAC tables. |
1468 | * Sets defaults to all registers. |
1469 | * Resets RX and TX descriptor rings. |
1470 | * Resets PHY. |
1471 | * This method can be called after mvneta_port_down() to return the port |
1472 | * settings to defaults. |
1473 | */ |
1474 | static void mvneta_defaults_set(struct mvneta_port *pp) |
1475 | { |
1476 | int cpu; |
1477 | int queue; |
1478 | u32 val; |
1479 | int max_cpu = num_present_cpus(); |
1480 | |
1481 | /* Clear all Cause registers */ |
1482 | on_each_cpu(func: mvneta_percpu_clear_intr_cause, info: pp, wait: true); |
1483 | |
1484 | /* Mask all interrupts */ |
1485 | on_each_cpu(func: mvneta_percpu_mask_interrupt, info: pp, wait: true); |
1486 | mvreg_write(pp, MVNETA_INTR_ENABLE, data: 0); |
1487 | |
1488 | /* Enable MBUS Retry bit16 */ |
1489 | mvreg_write(pp, MVNETA_MBUS_RETRY, data: 0x20); |
1490 | |
1491 | /* Set CPU queue access map. CPUs are assigned to the RX and |
1492 | * TX queues modulo their number. If there is only one TX |
1493 | * queue then it is assigned to the CPU associated to the |
1494 | * default RX queue. |
1495 | */ |
1496 | for_each_present_cpu(cpu) { |
1497 | int rxq_map = 0, txq_map = 0; |
1498 | int rxq, txq; |
1499 | if (!pp->neta_armada3700) { |
1500 | for (rxq = 0; rxq < rxq_number; rxq++) |
1501 | if ((rxq % max_cpu) == cpu) |
1502 | rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); |
1503 | |
1504 | for (txq = 0; txq < txq_number; txq++) |
1505 | if ((txq % max_cpu) == cpu) |
1506 | txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); |
1507 | |
1508 | /* With only one TX queue we configure a special case |
1509 | * which will allow to get all the irq on a single |
1510 | * CPU |
1511 | */ |
1512 | if (txq_number == 1) |
1513 | txq_map = (cpu == pp->rxq_def) ? |
1514 | MVNETA_CPU_TXQ_ACCESS(0) : 0; |
1515 | |
1516 | } else { |
1517 | txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; |
1518 | rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK; |
1519 | } |
1520 | |
1521 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), data: rxq_map | txq_map); |
1522 | } |
1523 | |
1524 | /* Reset RX and TX DMAs */ |
1525 | mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); |
1526 | mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); |
1527 | |
1528 | /* Disable Legacy WRR, Disable EJP, Release from reset */ |
1529 | mvreg_write(pp, MVNETA_TXQ_CMD_1, data: 0); |
1530 | for (queue = 0; queue < txq_number; queue++) { |
1531 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), data: 0); |
1532 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), data: 0); |
1533 | } |
1534 | |
1535 | mvreg_write(pp, MVNETA_PORT_TX_RESET, data: 0); |
1536 | mvreg_write(pp, MVNETA_PORT_RX_RESET, data: 0); |
1537 | |
1538 | /* Set Port Acceleration Mode */ |
1539 | if (pp->bm_priv) |
1540 | /* HW buffer management + legacy parser */ |
1541 | val = MVNETA_ACC_MODE_EXT2; |
1542 | else |
1543 | /* SW buffer management + legacy parser */ |
1544 | val = MVNETA_ACC_MODE_EXT1; |
1545 | mvreg_write(pp, MVNETA_ACC_MODE, data: val); |
1546 | |
1547 | if (pp->bm_priv) |
1548 | mvreg_write(pp, MVNETA_BM_ADDRESS, data: pp->bm_priv->bppi_phys_addr); |
1549 | |
1550 | /* Update val of portCfg register accordingly with all RxQueue types */ |
1551 | val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); |
1552 | mvreg_write(pp, MVNETA_PORT_CONFIG, data: val); |
1553 | |
1554 | val = 0; |
1555 | mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, data: val); |
1556 | mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, data: 64); |
1557 | |
1558 | /* Build PORT_SDMA_CONFIG_REG */ |
1559 | val = 0; |
1560 | |
1561 | /* Default burst size */ |
1562 | val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); |
1563 | val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); |
1564 | val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; |
1565 | |
1566 | #if defined(__BIG_ENDIAN) |
1567 | val |= MVNETA_DESC_SWAP; |
1568 | #endif |
1569 | |
1570 | /* Assign port SDMA configuration */ |
1571 | mvreg_write(pp, MVNETA_SDMA_CONFIG, data: val); |
1572 | |
1573 | /* Disable PHY polling in hardware, since we're using the |
1574 | * kernel phylib to do this. |
1575 | */ |
1576 | val = mvreg_read(pp, MVNETA_UNIT_CONTROL); |
1577 | val &= ~MVNETA_PHY_POLLING_ENABLE; |
1578 | mvreg_write(pp, MVNETA_UNIT_CONTROL, data: val); |
1579 | |
1580 | mvneta_set_ucast_table(pp, queue: -1); |
1581 | mvneta_set_special_mcast_table(pp, queue: -1); |
1582 | mvneta_set_other_mcast_table(pp, queue: -1); |
1583 | |
1584 | /* Set port interrupt enable register - default enable all */ |
1585 | mvreg_write(pp, MVNETA_INTR_ENABLE, |
1586 | data: (MVNETA_RXQ_INTR_ENABLE_ALL_MASK |
1587 | | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); |
1588 | |
1589 | mvneta_mib_counters_clear(pp); |
1590 | } |
1591 | |
1592 | /* Set max sizes for tx queues */ |
1593 | static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) |
1594 | |
1595 | { |
1596 | u32 val, size, mtu; |
1597 | int queue; |
1598 | |
1599 | mtu = max_tx_size * 8; |
1600 | if (mtu > MVNETA_TX_MTU_MAX) |
1601 | mtu = MVNETA_TX_MTU_MAX; |
1602 | |
1603 | /* Set MTU */ |
1604 | val = mvreg_read(pp, MVNETA_TX_MTU); |
1605 | val &= ~MVNETA_TX_MTU_MAX; |
1606 | val |= mtu; |
1607 | mvreg_write(pp, MVNETA_TX_MTU, data: val); |
1608 | |
1609 | /* TX token size and all TXQs token size must be larger that MTU */ |
1610 | val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); |
1611 | |
1612 | size = val & MVNETA_TX_TOKEN_SIZE_MAX; |
1613 | if (size < mtu) { |
1614 | size = mtu; |
1615 | val &= ~MVNETA_TX_TOKEN_SIZE_MAX; |
1616 | val |= size; |
1617 | mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, data: val); |
1618 | } |
1619 | for (queue = 0; queue < txq_number; queue++) { |
1620 | val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); |
1621 | |
1622 | size = val & MVNETA_TXQ_TOKEN_SIZE_MAX; |
1623 | if (size < mtu) { |
1624 | size = mtu; |
1625 | val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX; |
1626 | val |= size; |
1627 | mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), data: val); |
1628 | } |
1629 | } |
1630 | } |
1631 | |
1632 | /* Set unicast address */ |
1633 | static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, |
1634 | int queue) |
1635 | { |
1636 | unsigned int unicast_reg; |
1637 | unsigned int tbl_offset; |
1638 | unsigned int reg_offset; |
1639 | |
1640 | /* Locate the Unicast table entry */ |
1641 | last_nibble = (0xf & last_nibble); |
1642 | |
1643 | /* offset from unicast tbl base */ |
1644 | tbl_offset = (last_nibble / 4) * 4; |
1645 | |
1646 | /* offset within the above reg */ |
1647 | reg_offset = last_nibble % 4; |
1648 | |
1649 | unicast_reg = mvreg_read(pp, offset: (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); |
1650 | |
1651 | if (queue == -1) { |
1652 | /* Clear accepts frame bit at specified unicast DA tbl entry */ |
1653 | unicast_reg &= ~(0xff << (8 * reg_offset)); |
1654 | } else { |
1655 | unicast_reg &= ~(0xff << (8 * reg_offset)); |
1656 | unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); |
1657 | } |
1658 | |
1659 | mvreg_write(pp, offset: (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), data: unicast_reg); |
1660 | } |
1661 | |
1662 | /* Set mac address */ |
1663 | static void mvneta_mac_addr_set(struct mvneta_port *pp, |
1664 | const unsigned char *addr, int queue) |
1665 | { |
1666 | unsigned int mac_h; |
1667 | unsigned int mac_l; |
1668 | |
1669 | if (queue != -1) { |
1670 | mac_l = (addr[4] << 8) | (addr[5]); |
1671 | mac_h = (addr[0] << 24) | (addr[1] << 16) | |
1672 | (addr[2] << 8) | (addr[3] << 0); |
1673 | |
1674 | mvreg_write(pp, MVNETA_MAC_ADDR_LOW, data: mac_l); |
1675 | mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, data: mac_h); |
1676 | } |
1677 | |
1678 | /* Accept frames of this address */ |
1679 | mvneta_set_ucast_addr(pp, last_nibble: addr[5], queue); |
1680 | } |
1681 | |
1682 | /* Set the number of packets that will be received before RX interrupt |
1683 | * will be generated by HW. |
1684 | */ |
1685 | static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, |
1686 | struct mvneta_rx_queue *rxq, u32 value) |
1687 | { |
1688 | mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), |
1689 | data: value | MVNETA_RXQ_NON_OCCUPIED(0)); |
1690 | } |
1691 | |
1692 | /* Set the time delay in usec before RX interrupt will be generated by |
1693 | * HW. |
1694 | */ |
1695 | static void mvneta_rx_time_coal_set(struct mvneta_port *pp, |
1696 | struct mvneta_rx_queue *rxq, u32 value) |
1697 | { |
1698 | u32 val; |
1699 | unsigned long clk_rate; |
1700 | |
1701 | clk_rate = clk_get_rate(clk: pp->clk); |
1702 | val = (clk_rate / 1000000) * value; |
1703 | |
1704 | mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), data: val); |
1705 | } |
1706 | |
1707 | /* Set threshold for TX_DONE pkts coalescing */ |
1708 | static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, |
1709 | struct mvneta_tx_queue *txq, u32 value) |
1710 | { |
1711 | u32 val; |
1712 | |
1713 | val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); |
1714 | |
1715 | val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK; |
1716 | val |= MVNETA_TXQ_SENT_THRESH_MASK(value); |
1717 | |
1718 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), data: val); |
1719 | } |
1720 | |
1721 | /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ |
1722 | static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, |
1723 | u32 phys_addr, void *virt_addr, |
1724 | struct mvneta_rx_queue *rxq) |
1725 | { |
1726 | int i; |
1727 | |
1728 | rx_desc->buf_phys_addr = phys_addr; |
1729 | i = rx_desc - rxq->descs; |
1730 | rxq->buf_virt_addr[i] = virt_addr; |
1731 | } |
1732 | |
1733 | /* Decrement sent descriptors counter */ |
1734 | static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, |
1735 | struct mvneta_tx_queue *txq, |
1736 | int sent_desc) |
1737 | { |
1738 | u32 val; |
1739 | |
1740 | /* Only 255 TX descriptors can be updated at once */ |
1741 | while (sent_desc > 0xff) { |
1742 | val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; |
1743 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), data: val); |
1744 | sent_desc = sent_desc - 0xff; |
1745 | } |
1746 | |
1747 | val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; |
1748 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), data: val); |
1749 | } |
1750 | |
1751 | /* Get number of TX descriptors already sent by HW */ |
1752 | static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, |
1753 | struct mvneta_tx_queue *txq) |
1754 | { |
1755 | u32 val; |
1756 | int sent_desc; |
1757 | |
1758 | val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); |
1759 | sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> |
1760 | MVNETA_TXQ_SENT_DESC_SHIFT; |
1761 | |
1762 | return sent_desc; |
1763 | } |
1764 | |
1765 | /* Get number of sent descriptors and decrement counter. |
1766 | * The number of sent descriptors is returned. |
1767 | */ |
1768 | static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, |
1769 | struct mvneta_tx_queue *txq) |
1770 | { |
1771 | int sent_desc; |
1772 | |
1773 | /* Get number of sent descriptors */ |
1774 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); |
1775 | |
1776 | /* Decrement sent descriptors counter */ |
1777 | if (sent_desc) |
1778 | mvneta_txq_sent_desc_dec(pp, txq, sent_desc); |
1779 | |
1780 | return sent_desc; |
1781 | } |
1782 | |
1783 | /* Set TXQ descriptors fields relevant for CSUM calculation */ |
1784 | static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto, |
1785 | int ip_hdr_len, int l4_proto) |
1786 | { |
1787 | u32 command; |
1788 | |
1789 | /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, |
1790 | * G_L4_chk, L4_type; required only for checksum |
1791 | * calculation |
1792 | */ |
1793 | command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; |
1794 | command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; |
1795 | |
1796 | if (l3_proto == htons(ETH_P_IP)) |
1797 | command |= MVNETA_TXD_IP_CSUM; |
1798 | else |
1799 | command |= MVNETA_TX_L3_IP6; |
1800 | |
1801 | if (l4_proto == IPPROTO_TCP) |
1802 | command |= MVNETA_TX_L4_CSUM_FULL; |
1803 | else if (l4_proto == IPPROTO_UDP) |
1804 | command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL; |
1805 | else |
1806 | command |= MVNETA_TX_L4_CSUM_NOT; |
1807 | |
1808 | return command; |
1809 | } |
1810 | |
1811 | |
1812 | /* Display more error info */ |
1813 | static void mvneta_rx_error(struct mvneta_port *pp, |
1814 | struct mvneta_rx_desc *rx_desc) |
1815 | { |
1816 | struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); |
1817 | u32 status = rx_desc->status; |
1818 | |
1819 | /* update per-cpu counter */ |
1820 | u64_stats_update_begin(syncp: &stats->syncp); |
1821 | stats->rx_errors++; |
1822 | u64_stats_update_end(syncp: &stats->syncp); |
1823 | |
1824 | switch (status & MVNETA_RXD_ERR_CODE_MASK) { |
1825 | case MVNETA_RXD_ERR_CRC: |
1826 | netdev_err(dev: pp->dev, format: "bad rx status %08x (crc error), size=%d\n" , |
1827 | status, rx_desc->data_size); |
1828 | break; |
1829 | case MVNETA_RXD_ERR_OVERRUN: |
1830 | netdev_err(dev: pp->dev, format: "bad rx status %08x (overrun error), size=%d\n" , |
1831 | status, rx_desc->data_size); |
1832 | break; |
1833 | case MVNETA_RXD_ERR_LEN: |
1834 | netdev_err(dev: pp->dev, format: "bad rx status %08x (max frame length error), size=%d\n" , |
1835 | status, rx_desc->data_size); |
1836 | break; |
1837 | case MVNETA_RXD_ERR_RESOURCE: |
1838 | netdev_err(dev: pp->dev, format: "bad rx status %08x (resource error), size=%d\n" , |
1839 | status, rx_desc->data_size); |
1840 | break; |
1841 | } |
1842 | } |
1843 | |
1844 | /* Handle RX checksum offload based on the descriptor's status */ |
1845 | static int mvneta_rx_csum(struct mvneta_port *pp, u32 status) |
1846 | { |
1847 | if ((pp->dev->features & NETIF_F_RXCSUM) && |
1848 | (status & MVNETA_RXD_L3_IP4) && |
1849 | (status & MVNETA_RXD_L4_CSUM_OK)) |
1850 | return CHECKSUM_UNNECESSARY; |
1851 | |
1852 | return CHECKSUM_NONE; |
1853 | } |
1854 | |
1855 | /* Return tx queue pointer (find last set bit) according to <cause> returned |
1856 | * form tx_done reg. <cause> must not be null. The return value is always a |
1857 | * valid queue for matching the first one found in <cause>. |
1858 | */ |
1859 | static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, |
1860 | u32 cause) |
1861 | { |
1862 | int queue = fls(x: cause) - 1; |
1863 | |
1864 | return &pp->txqs[queue]; |
1865 | } |
1866 | |
1867 | /* Free tx queue skbuffs */ |
1868 | static void mvneta_txq_bufs_free(struct mvneta_port *pp, |
1869 | struct mvneta_tx_queue *txq, int num, |
1870 | struct netdev_queue *nq, bool napi) |
1871 | { |
1872 | unsigned int bytes_compl = 0, pkts_compl = 0; |
1873 | struct xdp_frame_bulk bq; |
1874 | int i; |
1875 | |
1876 | xdp_frame_bulk_init(bq: &bq); |
1877 | |
1878 | rcu_read_lock(); /* need for xdp_return_frame_bulk */ |
1879 | |
1880 | for (i = 0; i < num; i++) { |
1881 | struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; |
1882 | struct mvneta_tx_desc *tx_desc = txq->descs + |
1883 | txq->txq_get_index; |
1884 | |
1885 | mvneta_txq_inc_get(txq); |
1886 | |
1887 | if (buf->type == MVNETA_TYPE_XDP_NDO || |
1888 | buf->type == MVNETA_TYPE_SKB) |
1889 | dma_unmap_single(pp->dev->dev.parent, |
1890 | tx_desc->buf_phys_addr, |
1891 | tx_desc->data_size, DMA_TO_DEVICE); |
1892 | if ((buf->type == MVNETA_TYPE_TSO || |
1893 | buf->type == MVNETA_TYPE_SKB) && buf->skb) { |
1894 | bytes_compl += buf->skb->len; |
1895 | pkts_compl++; |
1896 | dev_kfree_skb_any(skb: buf->skb); |
1897 | } else if ((buf->type == MVNETA_TYPE_XDP_TX || |
1898 | buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) { |
1899 | if (napi && buf->type == MVNETA_TYPE_XDP_TX) |
1900 | xdp_return_frame_rx_napi(xdpf: buf->xdpf); |
1901 | else |
1902 | xdp_return_frame_bulk(xdpf: buf->xdpf, bq: &bq); |
1903 | } |
1904 | } |
1905 | xdp_flush_frame_bulk(bq: &bq); |
1906 | |
1907 | rcu_read_unlock(); |
1908 | |
1909 | netdev_tx_completed_queue(dev_queue: nq, pkts: pkts_compl, bytes: bytes_compl); |
1910 | } |
1911 | |
1912 | /* Handle end of transmission */ |
1913 | static void mvneta_txq_done(struct mvneta_port *pp, |
1914 | struct mvneta_tx_queue *txq) |
1915 | { |
1916 | struct netdev_queue *nq = netdev_get_tx_queue(dev: pp->dev, index: txq->id); |
1917 | int tx_done; |
1918 | |
1919 | tx_done = mvneta_txq_sent_desc_proc(pp, txq); |
1920 | if (!tx_done) |
1921 | return; |
1922 | |
1923 | mvneta_txq_bufs_free(pp, txq, num: tx_done, nq, napi: true); |
1924 | |
1925 | txq->count -= tx_done; |
1926 | |
1927 | if (netif_tx_queue_stopped(dev_queue: nq)) { |
1928 | if (txq->count <= txq->tx_wake_threshold) |
1929 | netif_tx_wake_queue(dev_queue: nq); |
1930 | } |
1931 | } |
1932 | |
1933 | /* Refill processing for SW buffer management */ |
1934 | /* Allocate page per descriptor */ |
1935 | static int mvneta_rx_refill(struct mvneta_port *pp, |
1936 | struct mvneta_rx_desc *rx_desc, |
1937 | struct mvneta_rx_queue *rxq, |
1938 | gfp_t gfp_mask) |
1939 | { |
1940 | dma_addr_t phys_addr; |
1941 | struct page *page; |
1942 | |
1943 | page = page_pool_alloc_pages(pool: rxq->page_pool, |
1944 | gfp: gfp_mask | __GFP_NOWARN); |
1945 | if (!page) |
1946 | return -ENOMEM; |
1947 | |
1948 | phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction; |
1949 | mvneta_rx_desc_fill(rx_desc, phys_addr, virt_addr: page, rxq); |
1950 | |
1951 | return 0; |
1952 | } |
1953 | |
1954 | /* Handle tx checksum */ |
1955 | static u32 mvneta_skb_tx_csum(struct sk_buff *skb) |
1956 | { |
1957 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1958 | int ip_hdr_len = 0; |
1959 | __be16 l3_proto = vlan_get_protocol(skb); |
1960 | u8 l4_proto; |
1961 | |
1962 | if (l3_proto == htons(ETH_P_IP)) { |
1963 | struct iphdr *ip4h = ip_hdr(skb); |
1964 | |
1965 | /* Calculate IPv4 checksum and L4 checksum */ |
1966 | ip_hdr_len = ip4h->ihl; |
1967 | l4_proto = ip4h->protocol; |
1968 | } else if (l3_proto == htons(ETH_P_IPV6)) { |
1969 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
1970 | |
1971 | /* Read l4_protocol from one of IPv6 extra headers */ |
1972 | if (skb_network_header_len(skb) > 0) |
1973 | ip_hdr_len = (skb_network_header_len(skb) >> 2); |
1974 | l4_proto = ip6h->nexthdr; |
1975 | } else |
1976 | return MVNETA_TX_L4_CSUM_NOT; |
1977 | |
1978 | return mvneta_txq_desc_csum(l3_offs: skb_network_offset(skb), |
1979 | l3_proto, ip_hdr_len, l4_proto); |
1980 | } |
1981 | |
1982 | return MVNETA_TX_L4_CSUM_NOT; |
1983 | } |
1984 | |
1985 | /* Drop packets received by the RXQ and free buffers */ |
1986 | static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, |
1987 | struct mvneta_rx_queue *rxq) |
1988 | { |
1989 | int rx_done, i; |
1990 | |
1991 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); |
1992 | if (rx_done) |
1993 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled: rx_done); |
1994 | |
1995 | if (pp->bm_priv) { |
1996 | for (i = 0; i < rx_done; i++) { |
1997 | struct mvneta_rx_desc *rx_desc = |
1998 | mvneta_rxq_next_desc_get(rxq); |
1999 | u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); |
2000 | struct mvneta_bm_pool *bm_pool; |
2001 | |
2002 | bm_pool = &pp->bm_priv->bm_pools[pool_id]; |
2003 | /* Return dropped buffer to the pool */ |
2004 | mvneta_bm_pool_put_bp(priv: pp->bm_priv, bm_pool, |
2005 | buf_phys_addr: rx_desc->buf_phys_addr); |
2006 | } |
2007 | return; |
2008 | } |
2009 | |
2010 | for (i = 0; i < rxq->size; i++) { |
2011 | struct mvneta_rx_desc *rx_desc = rxq->descs + i; |
2012 | void *data = rxq->buf_virt_addr[i]; |
2013 | if (!data || !(rx_desc->buf_phys_addr)) |
2014 | continue; |
2015 | |
2016 | page_pool_put_full_page(pool: rxq->page_pool, page: data, allow_direct: false); |
2017 | } |
2018 | if (xdp_rxq_info_is_reg(xdp_rxq: &rxq->xdp_rxq)) |
2019 | xdp_rxq_info_unreg(xdp_rxq: &rxq->xdp_rxq); |
2020 | page_pool_destroy(pool: rxq->page_pool); |
2021 | rxq->page_pool = NULL; |
2022 | } |
2023 | |
2024 | static void |
2025 | mvneta_update_stats(struct mvneta_port *pp, |
2026 | struct mvneta_stats *ps) |
2027 | { |
2028 | struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); |
2029 | |
2030 | u64_stats_update_begin(syncp: &stats->syncp); |
2031 | stats->es.ps.rx_packets += ps->rx_packets; |
2032 | stats->es.ps.rx_bytes += ps->rx_bytes; |
2033 | /* xdp */ |
2034 | stats->es.ps.xdp_redirect += ps->xdp_redirect; |
2035 | stats->es.ps.xdp_pass += ps->xdp_pass; |
2036 | stats->es.ps.xdp_drop += ps->xdp_drop; |
2037 | u64_stats_update_end(syncp: &stats->syncp); |
2038 | } |
2039 | |
2040 | static inline |
2041 | int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) |
2042 | { |
2043 | struct mvneta_rx_desc *rx_desc; |
2044 | int curr_desc = rxq->first_to_refill; |
2045 | int i; |
2046 | |
2047 | for (i = 0; (i < rxq->refill_num) && (i < 64); i++) { |
2048 | rx_desc = rxq->descs + curr_desc; |
2049 | if (!(rx_desc->buf_phys_addr)) { |
2050 | if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { |
2051 | struct mvneta_pcpu_stats *stats; |
2052 | |
2053 | pr_err("Can't refill queue %d. Done %d from %d\n" , |
2054 | rxq->id, i, rxq->refill_num); |
2055 | |
2056 | stats = this_cpu_ptr(pp->stats); |
2057 | u64_stats_update_begin(syncp: &stats->syncp); |
2058 | stats->es.refill_error++; |
2059 | u64_stats_update_end(syncp: &stats->syncp); |
2060 | break; |
2061 | } |
2062 | } |
2063 | curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc); |
2064 | } |
2065 | rxq->refill_num -= i; |
2066 | rxq->first_to_refill = curr_desc; |
2067 | |
2068 | return i; |
2069 | } |
2070 | |
2071 | static void |
2072 | mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, |
2073 | struct xdp_buff *xdp, int sync_len) |
2074 | { |
2075 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); |
2076 | int i; |
2077 | |
2078 | if (likely(!xdp_buff_has_frags(xdp))) |
2079 | goto out; |
2080 | |
2081 | for (i = 0; i < sinfo->nr_frags; i++) |
2082 | page_pool_put_full_page(pool: rxq->page_pool, |
2083 | page: skb_frag_page(frag: &sinfo->frags[i]), allow_direct: true); |
2084 | |
2085 | out: |
2086 | page_pool_put_page(pool: rxq->page_pool, page: virt_to_head_page(x: xdp->data), |
2087 | dma_sync_size: sync_len, allow_direct: true); |
2088 | } |
2089 | |
2090 | static int |
2091 | mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, |
2092 | struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map) |
2093 | { |
2094 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
2095 | struct device *dev = pp->dev->dev.parent; |
2096 | struct mvneta_tx_desc *tx_desc; |
2097 | int i, num_frames = 1; |
2098 | struct page *page; |
2099 | |
2100 | if (unlikely(xdp_frame_has_frags(xdpf))) |
2101 | num_frames += sinfo->nr_frags; |
2102 | |
2103 | if (txq->count + num_frames >= txq->size) |
2104 | return MVNETA_XDP_DROPPED; |
2105 | |
2106 | for (i = 0; i < num_frames; i++) { |
2107 | struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; |
2108 | skb_frag_t *frag = NULL; |
2109 | int len = xdpf->len; |
2110 | dma_addr_t dma_addr; |
2111 | |
2112 | if (unlikely(i)) { /* paged area */ |
2113 | frag = &sinfo->frags[i - 1]; |
2114 | len = skb_frag_size(frag); |
2115 | } |
2116 | |
2117 | tx_desc = mvneta_txq_next_desc_get(txq); |
2118 | if (dma_map) { |
2119 | /* ndo_xdp_xmit */ |
2120 | void *data; |
2121 | |
2122 | data = unlikely(frag) ? skb_frag_address(frag) |
2123 | : xdpf->data; |
2124 | dma_addr = dma_map_single(dev, data, len, |
2125 | DMA_TO_DEVICE); |
2126 | if (dma_mapping_error(dev, dma_addr)) { |
2127 | mvneta_txq_desc_put(txq); |
2128 | goto unmap; |
2129 | } |
2130 | |
2131 | buf->type = MVNETA_TYPE_XDP_NDO; |
2132 | } else { |
2133 | page = unlikely(frag) ? skb_frag_page(frag) |
2134 | : virt_to_page(xdpf->data); |
2135 | dma_addr = page_pool_get_dma_addr(page); |
2136 | if (unlikely(frag)) |
2137 | dma_addr += skb_frag_off(frag); |
2138 | else |
2139 | dma_addr += sizeof(*xdpf) + xdpf->headroom; |
2140 | dma_sync_single_for_device(dev, addr: dma_addr, size: len, |
2141 | dir: DMA_BIDIRECTIONAL); |
2142 | buf->type = MVNETA_TYPE_XDP_TX; |
2143 | } |
2144 | buf->xdpf = unlikely(i) ? NULL : xdpf; |
2145 | |
2146 | tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC; |
2147 | tx_desc->buf_phys_addr = dma_addr; |
2148 | tx_desc->data_size = len; |
2149 | *nxmit_byte += len; |
2150 | |
2151 | mvneta_txq_inc_put(txq); |
2152 | } |
2153 | /*last descriptor */ |
2154 | tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; |
2155 | |
2156 | txq->pending += num_frames; |
2157 | txq->count += num_frames; |
2158 | |
2159 | return MVNETA_XDP_TX; |
2160 | |
2161 | unmap: |
2162 | for (i--; i >= 0; i--) { |
2163 | mvneta_txq_desc_put(txq); |
2164 | tx_desc = txq->descs + txq->next_desc_to_proc; |
2165 | dma_unmap_single(dev, tx_desc->buf_phys_addr, |
2166 | tx_desc->data_size, |
2167 | DMA_TO_DEVICE); |
2168 | } |
2169 | |
2170 | return MVNETA_XDP_DROPPED; |
2171 | } |
2172 | |
2173 | static int |
2174 | mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp) |
2175 | { |
2176 | struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); |
2177 | struct mvneta_tx_queue *txq; |
2178 | struct netdev_queue *nq; |
2179 | int cpu, nxmit_byte = 0; |
2180 | struct xdp_frame *xdpf; |
2181 | u32 ret; |
2182 | |
2183 | xdpf = xdp_convert_buff_to_frame(xdp); |
2184 | if (unlikely(!xdpf)) |
2185 | return MVNETA_XDP_DROPPED; |
2186 | |
2187 | cpu = smp_processor_id(); |
2188 | txq = &pp->txqs[cpu % txq_number]; |
2189 | nq = netdev_get_tx_queue(dev: pp->dev, index: txq->id); |
2190 | |
2191 | __netif_tx_lock(txq: nq, cpu); |
2192 | ret = mvneta_xdp_submit_frame(pp, txq, xdpf, nxmit_byte: &nxmit_byte, dma_map: false); |
2193 | if (ret == MVNETA_XDP_TX) { |
2194 | u64_stats_update_begin(syncp: &stats->syncp); |
2195 | stats->es.ps.tx_bytes += nxmit_byte; |
2196 | stats->es.ps.tx_packets++; |
2197 | stats->es.ps.xdp_tx++; |
2198 | u64_stats_update_end(syncp: &stats->syncp); |
2199 | |
2200 | mvneta_txq_pend_desc_add(pp, txq, pend_desc: 0); |
2201 | } else { |
2202 | u64_stats_update_begin(syncp: &stats->syncp); |
2203 | stats->es.ps.xdp_tx_err++; |
2204 | u64_stats_update_end(syncp: &stats->syncp); |
2205 | } |
2206 | __netif_tx_unlock(txq: nq); |
2207 | |
2208 | return ret; |
2209 | } |
2210 | |
2211 | static int |
2212 | mvneta_xdp_xmit(struct net_device *dev, int num_frame, |
2213 | struct xdp_frame **frames, u32 flags) |
2214 | { |
2215 | struct mvneta_port *pp = netdev_priv(dev); |
2216 | struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); |
2217 | int i, nxmit_byte = 0, nxmit = 0; |
2218 | int cpu = smp_processor_id(); |
2219 | struct mvneta_tx_queue *txq; |
2220 | struct netdev_queue *nq; |
2221 | u32 ret; |
2222 | |
2223 | if (unlikely(test_bit(__MVNETA_DOWN, &pp->state))) |
2224 | return -ENETDOWN; |
2225 | |
2226 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
2227 | return -EINVAL; |
2228 | |
2229 | txq = &pp->txqs[cpu % txq_number]; |
2230 | nq = netdev_get_tx_queue(dev: pp->dev, index: txq->id); |
2231 | |
2232 | __netif_tx_lock(txq: nq, cpu); |
2233 | for (i = 0; i < num_frame; i++) { |
2234 | ret = mvneta_xdp_submit_frame(pp, txq, xdpf: frames[i], nxmit_byte: &nxmit_byte, |
2235 | dma_map: true); |
2236 | if (ret != MVNETA_XDP_TX) |
2237 | break; |
2238 | |
2239 | nxmit++; |
2240 | } |
2241 | |
2242 | if (unlikely(flags & XDP_XMIT_FLUSH)) |
2243 | mvneta_txq_pend_desc_add(pp, txq, pend_desc: 0); |
2244 | __netif_tx_unlock(txq: nq); |
2245 | |
2246 | u64_stats_update_begin(syncp: &stats->syncp); |
2247 | stats->es.ps.tx_bytes += nxmit_byte; |
2248 | stats->es.ps.tx_packets += nxmit; |
2249 | stats->es.ps.xdp_xmit += nxmit; |
2250 | stats->es.ps.xdp_xmit_err += num_frame - nxmit; |
2251 | u64_stats_update_end(syncp: &stats->syncp); |
2252 | |
2253 | return nxmit; |
2254 | } |
2255 | |
2256 | static int |
2257 | mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, |
2258 | struct bpf_prog *prog, struct xdp_buff *xdp, |
2259 | u32 frame_sz, struct mvneta_stats *stats) |
2260 | { |
2261 | unsigned int len, data_len, sync; |
2262 | u32 ret, act; |
2263 | |
2264 | len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; |
2265 | data_len = xdp->data_end - xdp->data; |
2266 | act = bpf_prog_run_xdp(prog, xdp); |
2267 | |
2268 | /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ |
2269 | sync = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; |
2270 | sync = max(sync, len); |
2271 | |
2272 | switch (act) { |
2273 | case XDP_PASS: |
2274 | stats->xdp_pass++; |
2275 | return MVNETA_XDP_PASS; |
2276 | case XDP_REDIRECT: { |
2277 | int err; |
2278 | |
2279 | err = xdp_do_redirect(dev: pp->dev, xdp, prog); |
2280 | if (unlikely(err)) { |
2281 | mvneta_xdp_put_buff(pp, rxq, xdp, sync_len: sync); |
2282 | ret = MVNETA_XDP_DROPPED; |
2283 | } else { |
2284 | ret = MVNETA_XDP_REDIR; |
2285 | stats->xdp_redirect++; |
2286 | } |
2287 | break; |
2288 | } |
2289 | case XDP_TX: |
2290 | ret = mvneta_xdp_xmit_back(pp, xdp); |
2291 | if (ret != MVNETA_XDP_TX) |
2292 | mvneta_xdp_put_buff(pp, rxq, xdp, sync_len: sync); |
2293 | break; |
2294 | default: |
2295 | bpf_warn_invalid_xdp_action(dev: pp->dev, prog, act); |
2296 | fallthrough; |
2297 | case XDP_ABORTED: |
2298 | trace_xdp_exception(dev: pp->dev, xdp: prog, act); |
2299 | fallthrough; |
2300 | case XDP_DROP: |
2301 | mvneta_xdp_put_buff(pp, rxq, xdp, sync_len: sync); |
2302 | ret = MVNETA_XDP_DROPPED; |
2303 | stats->xdp_drop++; |
2304 | break; |
2305 | } |
2306 | |
2307 | stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; |
2308 | stats->rx_packets++; |
2309 | |
2310 | return ret; |
2311 | } |
2312 | |
2313 | static void |
2314 | mvneta_swbm_rx_frame(struct mvneta_port *pp, |
2315 | struct mvneta_rx_desc *rx_desc, |
2316 | struct mvneta_rx_queue *rxq, |
2317 | struct xdp_buff *xdp, int *size, |
2318 | struct page *page) |
2319 | { |
2320 | unsigned char *data = page_address(page); |
2321 | int data_len = -MVNETA_MH_SIZE, len; |
2322 | struct net_device *dev = pp->dev; |
2323 | enum dma_data_direction dma_dir; |
2324 | |
2325 | if (*size > MVNETA_MAX_RX_BUF_SIZE) { |
2326 | len = MVNETA_MAX_RX_BUF_SIZE; |
2327 | data_len += len; |
2328 | } else { |
2329 | len = *size; |
2330 | data_len += len - ETH_FCS_LEN; |
2331 | } |
2332 | *size = *size - len; |
2333 | |
2334 | dma_dir = page_pool_get_dma_dir(pool: rxq->page_pool); |
2335 | dma_sync_single_for_cpu(dev: dev->dev.parent, |
2336 | addr: rx_desc->buf_phys_addr, |
2337 | size: len, dir: dma_dir); |
2338 | |
2339 | rx_desc->buf_phys_addr = 0; |
2340 | |
2341 | /* Prefetch header */ |
2342 | prefetch(data); |
2343 | xdp_buff_clear_frags_flag(xdp); |
2344 | xdp_prepare_buff(xdp, hard_start: data, headroom: pp->rx_offset_correction + MVNETA_MH_SIZE, |
2345 | data_len, meta_valid: false); |
2346 | } |
2347 | |
2348 | static void |
2349 | mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, |
2350 | struct mvneta_rx_desc *rx_desc, |
2351 | struct mvneta_rx_queue *rxq, |
2352 | struct xdp_buff *xdp, int *size, |
2353 | struct page *page) |
2354 | { |
2355 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); |
2356 | struct net_device *dev = pp->dev; |
2357 | enum dma_data_direction dma_dir; |
2358 | int data_len, len; |
2359 | |
2360 | if (*size > MVNETA_MAX_RX_BUF_SIZE) { |
2361 | len = MVNETA_MAX_RX_BUF_SIZE; |
2362 | data_len = len; |
2363 | } else { |
2364 | len = *size; |
2365 | data_len = len - ETH_FCS_LEN; |
2366 | } |
2367 | dma_dir = page_pool_get_dma_dir(pool: rxq->page_pool); |
2368 | dma_sync_single_for_cpu(dev: dev->dev.parent, |
2369 | addr: rx_desc->buf_phys_addr, |
2370 | size: len, dir: dma_dir); |
2371 | rx_desc->buf_phys_addr = 0; |
2372 | |
2373 | if (!xdp_buff_has_frags(xdp)) |
2374 | sinfo->nr_frags = 0; |
2375 | |
2376 | if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { |
2377 | skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++]; |
2378 | |
2379 | skb_frag_fill_page_desc(frag, page, |
2380 | off: pp->rx_offset_correction, size: data_len); |
2381 | |
2382 | if (!xdp_buff_has_frags(xdp)) { |
2383 | sinfo->xdp_frags_size = *size; |
2384 | xdp_buff_set_frags_flag(xdp); |
2385 | } |
2386 | if (page_is_pfmemalloc(page)) |
2387 | xdp_buff_set_frag_pfmemalloc(xdp); |
2388 | } else { |
2389 | page_pool_put_full_page(pool: rxq->page_pool, page, allow_direct: true); |
2390 | } |
2391 | *size -= len; |
2392 | } |
2393 | |
2394 | static struct sk_buff * |
2395 | mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, |
2396 | struct xdp_buff *xdp, u32 desc_status) |
2397 | { |
2398 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); |
2399 | struct sk_buff *skb; |
2400 | u8 num_frags; |
2401 | |
2402 | if (unlikely(xdp_buff_has_frags(xdp))) |
2403 | num_frags = sinfo->nr_frags; |
2404 | |
2405 | skb = build_skb(data: xdp->data_hard_start, PAGE_SIZE); |
2406 | if (!skb) |
2407 | return ERR_PTR(error: -ENOMEM); |
2408 | |
2409 | skb_mark_for_recycle(skb); |
2410 | |
2411 | skb_reserve(skb, len: xdp->data - xdp->data_hard_start); |
2412 | skb_put(skb, len: xdp->data_end - xdp->data); |
2413 | skb->ip_summed = mvneta_rx_csum(pp, status: desc_status); |
2414 | |
2415 | if (unlikely(xdp_buff_has_frags(xdp))) |
2416 | xdp_update_skb_shared_info(skb, nr_frags: num_frags, |
2417 | size: sinfo->xdp_frags_size, |
2418 | truesize: num_frags * xdp->frame_sz, |
2419 | pfmemalloc: xdp_buff_is_frag_pfmemalloc(xdp)); |
2420 | |
2421 | return skb; |
2422 | } |
2423 | |
2424 | /* Main rx processing when using software buffer management */ |
2425 | static int mvneta_rx_swbm(struct napi_struct *napi, |
2426 | struct mvneta_port *pp, int budget, |
2427 | struct mvneta_rx_queue *rxq) |
2428 | { |
2429 | int rx_proc = 0, rx_todo, refill, size = 0; |
2430 | struct net_device *dev = pp->dev; |
2431 | struct mvneta_stats ps = {}; |
2432 | struct bpf_prog *xdp_prog; |
2433 | u32 desc_status, frame_sz; |
2434 | struct xdp_buff xdp_buf; |
2435 | |
2436 | xdp_init_buff(xdp: &xdp_buf, PAGE_SIZE, rxq: &rxq->xdp_rxq); |
2437 | xdp_buf.data_hard_start = NULL; |
2438 | |
2439 | /* Get number of received packets */ |
2440 | rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); |
2441 | |
2442 | xdp_prog = READ_ONCE(pp->xdp_prog); |
2443 | |
2444 | /* Fairness NAPI loop */ |
2445 | while (rx_proc < budget && rx_proc < rx_todo) { |
2446 | struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); |
2447 | u32 rx_status, index; |
2448 | struct sk_buff *skb; |
2449 | struct page *page; |
2450 | |
2451 | index = rx_desc - rxq->descs; |
2452 | page = (struct page *)rxq->buf_virt_addr[index]; |
2453 | |
2454 | rx_status = rx_desc->status; |
2455 | rx_proc++; |
2456 | rxq->refill_num++; |
2457 | |
2458 | if (rx_status & MVNETA_RXD_FIRST_DESC) { |
2459 | /* Check errors only for FIRST descriptor */ |
2460 | if (rx_status & MVNETA_RXD_ERR_SUMMARY) { |
2461 | mvneta_rx_error(pp, rx_desc); |
2462 | goto next; |
2463 | } |
2464 | |
2465 | size = rx_desc->data_size; |
2466 | frame_sz = size - ETH_FCS_LEN; |
2467 | desc_status = rx_status; |
2468 | |
2469 | mvneta_swbm_rx_frame(pp, rx_desc, rxq, xdp: &xdp_buf, |
2470 | size: &size, page); |
2471 | } else { |
2472 | if (unlikely(!xdp_buf.data_hard_start)) { |
2473 | rx_desc->buf_phys_addr = 0; |
2474 | page_pool_put_full_page(pool: rxq->page_pool, page, |
2475 | allow_direct: true); |
2476 | goto next; |
2477 | } |
2478 | |
2479 | mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, xdp: &xdp_buf, |
2480 | size: &size, page); |
2481 | } /* Middle or Last descriptor */ |
2482 | |
2483 | if (!(rx_status & MVNETA_RXD_LAST_DESC)) |
2484 | /* no last descriptor this time */ |
2485 | continue; |
2486 | |
2487 | if (size) { |
2488 | mvneta_xdp_put_buff(pp, rxq, xdp: &xdp_buf, sync_len: -1); |
2489 | goto next; |
2490 | } |
2491 | |
2492 | if (xdp_prog && |
2493 | mvneta_run_xdp(pp, rxq, prog: xdp_prog, xdp: &xdp_buf, frame_sz, stats: &ps)) |
2494 | goto next; |
2495 | |
2496 | skb = mvneta_swbm_build_skb(pp, pool: rxq->page_pool, xdp: &xdp_buf, desc_status); |
2497 | if (IS_ERR(ptr: skb)) { |
2498 | struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); |
2499 | |
2500 | mvneta_xdp_put_buff(pp, rxq, xdp: &xdp_buf, sync_len: -1); |
2501 | |
2502 | u64_stats_update_begin(syncp: &stats->syncp); |
2503 | stats->es.skb_alloc_error++; |
2504 | stats->rx_dropped++; |
2505 | u64_stats_update_end(syncp: &stats->syncp); |
2506 | |
2507 | goto next; |
2508 | } |
2509 | |
2510 | ps.rx_bytes += skb->len; |
2511 | ps.rx_packets++; |
2512 | |
2513 | skb->protocol = eth_type_trans(skb, dev); |
2514 | napi_gro_receive(napi, skb); |
2515 | next: |
2516 | xdp_buf.data_hard_start = NULL; |
2517 | } |
2518 | |
2519 | if (xdp_buf.data_hard_start) |
2520 | mvneta_xdp_put_buff(pp, rxq, xdp: &xdp_buf, sync_len: -1); |
2521 | |
2522 | if (ps.xdp_redirect) |
2523 | xdp_do_flush(); |
2524 | |
2525 | if (ps.rx_packets) |
2526 | mvneta_update_stats(pp, ps: &ps); |
2527 | |
2528 | /* return some buffers to hardware queue, one at a time is too slow */ |
2529 | refill = mvneta_rx_refill_queue(pp, rxq); |
2530 | |
2531 | /* Update rxq management counters */ |
2532 | mvneta_rxq_desc_num_update(pp, rxq, rx_done: rx_proc, rx_filled: refill); |
2533 | |
2534 | return ps.rx_packets; |
2535 | } |
2536 | |
2537 | /* Main rx processing when using hardware buffer management */ |
2538 | static int mvneta_rx_hwbm(struct napi_struct *napi, |
2539 | struct mvneta_port *pp, int rx_todo, |
2540 | struct mvneta_rx_queue *rxq) |
2541 | { |
2542 | struct net_device *dev = pp->dev; |
2543 | int rx_done; |
2544 | u32 rcvd_pkts = 0; |
2545 | u32 rcvd_bytes = 0; |
2546 | |
2547 | /* Get number of received packets */ |
2548 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); |
2549 | |
2550 | if (rx_todo > rx_done) |
2551 | rx_todo = rx_done; |
2552 | |
2553 | rx_done = 0; |
2554 | |
2555 | /* Fairness NAPI loop */ |
2556 | while (rx_done < rx_todo) { |
2557 | struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); |
2558 | struct mvneta_bm_pool *bm_pool = NULL; |
2559 | struct sk_buff *skb; |
2560 | unsigned char *data; |
2561 | dma_addr_t phys_addr; |
2562 | u32 rx_status, frag_size; |
2563 | int rx_bytes, err; |
2564 | u8 pool_id; |
2565 | |
2566 | rx_done++; |
2567 | rx_status = rx_desc->status; |
2568 | rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); |
2569 | data = (u8 *)(uintptr_t)rx_desc->buf_cookie; |
2570 | phys_addr = rx_desc->buf_phys_addr; |
2571 | pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); |
2572 | bm_pool = &pp->bm_priv->bm_pools[pool_id]; |
2573 | |
2574 | if (!mvneta_rxq_desc_is_first_last(status: rx_status) || |
2575 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { |
2576 | err_drop_frame_ret_pool: |
2577 | /* Return the buffer to the pool */ |
2578 | mvneta_bm_pool_put_bp(priv: pp->bm_priv, bm_pool, |
2579 | buf_phys_addr: rx_desc->buf_phys_addr); |
2580 | err_drop_frame: |
2581 | mvneta_rx_error(pp, rx_desc); |
2582 | /* leave the descriptor untouched */ |
2583 | continue; |
2584 | } |
2585 | |
2586 | if (rx_bytes <= rx_copybreak) { |
2587 | /* better copy a small frame and not unmap the DMA region */ |
2588 | skb = netdev_alloc_skb_ip_align(dev, length: rx_bytes); |
2589 | if (unlikely(!skb)) |
2590 | goto err_drop_frame_ret_pool; |
2591 | |
2592 | dma_sync_single_range_for_cpu(dev: &pp->bm_priv->pdev->dev, |
2593 | addr: rx_desc->buf_phys_addr, |
2594 | MVNETA_MH_SIZE + NET_SKB_PAD, |
2595 | size: rx_bytes, |
2596 | dir: DMA_FROM_DEVICE); |
2597 | skb_put_data(skb, data: data + MVNETA_MH_SIZE + NET_SKB_PAD, |
2598 | len: rx_bytes); |
2599 | |
2600 | skb->protocol = eth_type_trans(skb, dev); |
2601 | skb->ip_summed = mvneta_rx_csum(pp, status: rx_status); |
2602 | napi_gro_receive(napi, skb); |
2603 | |
2604 | rcvd_pkts++; |
2605 | rcvd_bytes += rx_bytes; |
2606 | |
2607 | /* Return the buffer to the pool */ |
2608 | mvneta_bm_pool_put_bp(priv: pp->bm_priv, bm_pool, |
2609 | buf_phys_addr: rx_desc->buf_phys_addr); |
2610 | |
2611 | /* leave the descriptor and buffer untouched */ |
2612 | continue; |
2613 | } |
2614 | |
2615 | /* Refill processing */ |
2616 | err = hwbm_pool_refill(bm_pool: &bm_pool->hwbm_pool, GFP_ATOMIC); |
2617 | if (err) { |
2618 | struct mvneta_pcpu_stats *stats; |
2619 | |
2620 | netdev_err(dev, format: "Linux processing - Can't refill\n" ); |
2621 | |
2622 | stats = this_cpu_ptr(pp->stats); |
2623 | u64_stats_update_begin(syncp: &stats->syncp); |
2624 | stats->es.refill_error++; |
2625 | u64_stats_update_end(syncp: &stats->syncp); |
2626 | |
2627 | goto err_drop_frame_ret_pool; |
2628 | } |
2629 | |
2630 | frag_size = bm_pool->hwbm_pool.frag_size; |
2631 | |
2632 | skb = build_skb(data, frag_size: frag_size > PAGE_SIZE ? 0 : frag_size); |
2633 | |
2634 | /* After refill old buffer has to be unmapped regardless |
2635 | * the skb is successfully built or not. |
2636 | */ |
2637 | dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, |
2638 | bm_pool->buf_size, DMA_FROM_DEVICE); |
2639 | if (!skb) |
2640 | goto err_drop_frame; |
2641 | |
2642 | rcvd_pkts++; |
2643 | rcvd_bytes += rx_bytes; |
2644 | |
2645 | /* Linux processing */ |
2646 | skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); |
2647 | skb_put(skb, len: rx_bytes); |
2648 | |
2649 | skb->protocol = eth_type_trans(skb, dev); |
2650 | skb->ip_summed = mvneta_rx_csum(pp, status: rx_status); |
2651 | |
2652 | napi_gro_receive(napi, skb); |
2653 | } |
2654 | |
2655 | if (rcvd_pkts) { |
2656 | struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); |
2657 | |
2658 | u64_stats_update_begin(syncp: &stats->syncp); |
2659 | stats->es.ps.rx_packets += rcvd_pkts; |
2660 | stats->es.ps.rx_bytes += rcvd_bytes; |
2661 | u64_stats_update_end(syncp: &stats->syncp); |
2662 | } |
2663 | |
2664 | /* Update rxq management counters */ |
2665 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled: rx_done); |
2666 | |
2667 | return rx_done; |
2668 | } |
2669 | |
2670 | static void mvneta_free_tso_hdrs(struct mvneta_port *pp, |
2671 | struct mvneta_tx_queue *txq) |
2672 | { |
2673 | struct device *dev = pp->dev->dev.parent; |
2674 | int i; |
2675 | |
2676 | for (i = 0; i < MVNETA_MAX_TSO_PAGES; i++) { |
2677 | if (txq->tso_hdrs[i]) { |
2678 | dma_free_coherent(dev, MVNETA_TSO_PAGE_SIZE, |
2679 | cpu_addr: txq->tso_hdrs[i], |
2680 | dma_handle: txq->tso_hdrs_phys[i]); |
2681 | txq->tso_hdrs[i] = NULL; |
2682 | } |
2683 | } |
2684 | } |
2685 | |
2686 | static int mvneta_alloc_tso_hdrs(struct mvneta_port *pp, |
2687 | struct mvneta_tx_queue *txq) |
2688 | { |
2689 | struct device *dev = pp->dev->dev.parent; |
2690 | int i, num; |
2691 | |
2692 | num = DIV_ROUND_UP(txq->size, MVNETA_TSO_PER_PAGE); |
2693 | for (i = 0; i < num; i++) { |
2694 | txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE, |
2695 | dma_handle: &txq->tso_hdrs_phys[i], |
2696 | GFP_KERNEL); |
2697 | if (!txq->tso_hdrs[i]) { |
2698 | mvneta_free_tso_hdrs(pp, txq); |
2699 | return -ENOMEM; |
2700 | } |
2701 | } |
2702 | |
2703 | return 0; |
2704 | } |
2705 | |
2706 | static char *mvneta_get_tso_hdr(struct mvneta_tx_queue *txq, dma_addr_t *dma) |
2707 | { |
2708 | int index, offset; |
2709 | |
2710 | index = txq->txq_put_index / MVNETA_TSO_PER_PAGE; |
2711 | offset = (txq->txq_put_index % MVNETA_TSO_PER_PAGE) * TSO_HEADER_SIZE; |
2712 | |
2713 | *dma = txq->tso_hdrs_phys[index] + offset; |
2714 | |
2715 | return txq->tso_hdrs[index] + offset; |
2716 | } |
2717 | |
2718 | static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq, |
2719 | struct tso_t *tso, int size, bool is_last) |
2720 | { |
2721 | struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; |
2722 | int hdr_len = skb_tcp_all_headers(skb); |
2723 | struct mvneta_tx_desc *tx_desc; |
2724 | dma_addr_t hdr_phys; |
2725 | char *hdr; |
2726 | |
2727 | hdr = mvneta_get_tso_hdr(txq, dma: &hdr_phys); |
2728 | tso_build_hdr(skb, hdr, tso, size, is_last); |
2729 | |
2730 | tx_desc = mvneta_txq_next_desc_get(txq); |
2731 | tx_desc->data_size = hdr_len; |
2732 | tx_desc->command = mvneta_skb_tx_csum(skb); |
2733 | tx_desc->command |= MVNETA_TXD_F_DESC; |
2734 | tx_desc->buf_phys_addr = hdr_phys; |
2735 | buf->type = MVNETA_TYPE_TSO; |
2736 | buf->skb = NULL; |
2737 | |
2738 | mvneta_txq_inc_put(txq); |
2739 | } |
2740 | |
2741 | static inline int |
2742 | mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, |
2743 | struct sk_buff *skb, char *data, int size, |
2744 | bool last_tcp, bool is_last) |
2745 | { |
2746 | struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; |
2747 | struct mvneta_tx_desc *tx_desc; |
2748 | |
2749 | tx_desc = mvneta_txq_next_desc_get(txq); |
2750 | tx_desc->data_size = size; |
2751 | tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data, |
2752 | size, DMA_TO_DEVICE); |
2753 | if (unlikely(dma_mapping_error(dev->dev.parent, |
2754 | tx_desc->buf_phys_addr))) { |
2755 | mvneta_txq_desc_put(txq); |
2756 | return -ENOMEM; |
2757 | } |
2758 | |
2759 | tx_desc->command = 0; |
2760 | buf->type = MVNETA_TYPE_SKB; |
2761 | buf->skb = NULL; |
2762 | |
2763 | if (last_tcp) { |
2764 | /* last descriptor in the TCP packet */ |
2765 | tx_desc->command = MVNETA_TXD_L_DESC; |
2766 | |
2767 | /* last descriptor in SKB */ |
2768 | if (is_last) |
2769 | buf->skb = skb; |
2770 | } |
2771 | mvneta_txq_inc_put(txq); |
2772 | return 0; |
2773 | } |
2774 | |
2775 | static void mvneta_release_descs(struct mvneta_port *pp, |
2776 | struct mvneta_tx_queue *txq, |
2777 | int first, int num) |
2778 | { |
2779 | int desc_idx, i; |
2780 | |
2781 | desc_idx = first + num; |
2782 | if (desc_idx >= txq->size) |
2783 | desc_idx -= txq->size; |
2784 | |
2785 | for (i = num; i >= 0; i--) { |
2786 | struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx; |
2787 | struct mvneta_tx_buf *buf = &txq->buf[desc_idx]; |
2788 | |
2789 | if (buf->type == MVNETA_TYPE_SKB) |
2790 | dma_unmap_single(pp->dev->dev.parent, |
2791 | tx_desc->buf_phys_addr, |
2792 | tx_desc->data_size, |
2793 | DMA_TO_DEVICE); |
2794 | |
2795 | mvneta_txq_desc_put(txq); |
2796 | |
2797 | if (desc_idx == 0) |
2798 | desc_idx = txq->size; |
2799 | desc_idx -= 1; |
2800 | } |
2801 | } |
2802 | |
2803 | static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, |
2804 | struct mvneta_tx_queue *txq) |
2805 | { |
2806 | int hdr_len, total_len, data_left; |
2807 | int first_desc, desc_count = 0; |
2808 | struct mvneta_port *pp = netdev_priv(dev); |
2809 | struct tso_t tso; |
2810 | |
2811 | /* Count needed descriptors */ |
2812 | if ((txq->count + tso_count_descs(skb)) >= txq->size) |
2813 | return 0; |
2814 | |
2815 | if (skb_headlen(skb) < skb_tcp_all_headers(skb)) { |
2816 | pr_info("*** Is this even possible?\n" ); |
2817 | return 0; |
2818 | } |
2819 | |
2820 | first_desc = txq->txq_put_index; |
2821 | |
2822 | /* Initialize the TSO handler, and prepare the first payload */ |
2823 | hdr_len = tso_start(skb, tso: &tso); |
2824 | |
2825 | total_len = skb->len - hdr_len; |
2826 | while (total_len > 0) { |
2827 | data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); |
2828 | total_len -= data_left; |
2829 | desc_count++; |
2830 | |
2831 | /* prepare packet headers: MAC + IP + TCP */ |
2832 | mvneta_tso_put_hdr(skb, txq, tso: &tso, size: data_left, is_last: total_len == 0); |
2833 | |
2834 | while (data_left > 0) { |
2835 | int size; |
2836 | desc_count++; |
2837 | |
2838 | size = min_t(int, tso.size, data_left); |
2839 | |
2840 | if (mvneta_tso_put_data(dev, txq, skb, |
2841 | data: tso.data, size, |
2842 | last_tcp: size == data_left, |
2843 | is_last: total_len == 0)) |
2844 | goto err_release; |
2845 | data_left -= size; |
2846 | |
2847 | tso_build_data(skb, tso: &tso, size); |
2848 | } |
2849 | } |
2850 | |
2851 | return desc_count; |
2852 | |
2853 | err_release: |
2854 | /* Release all used data descriptors; header descriptors must not |
2855 | * be DMA-unmapped. |
2856 | */ |
2857 | mvneta_release_descs(pp, txq, first: first_desc, num: desc_count - 1); |
2858 | return 0; |
2859 | } |
2860 | |
2861 | /* Handle tx fragmentation processing */ |
2862 | static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, |
2863 | struct mvneta_tx_queue *txq) |
2864 | { |
2865 | struct mvneta_tx_desc *tx_desc; |
2866 | int i, nr_frags = skb_shinfo(skb)->nr_frags; |
2867 | int first_desc = txq->txq_put_index; |
2868 | |
2869 | for (i = 0; i < nr_frags; i++) { |
2870 | struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; |
2871 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2872 | void *addr = skb_frag_address(frag); |
2873 | |
2874 | tx_desc = mvneta_txq_next_desc_get(txq); |
2875 | tx_desc->data_size = skb_frag_size(frag); |
2876 | |
2877 | tx_desc->buf_phys_addr = |
2878 | dma_map_single(pp->dev->dev.parent, addr, |
2879 | tx_desc->data_size, DMA_TO_DEVICE); |
2880 | |
2881 | if (dma_mapping_error(dev: pp->dev->dev.parent, |
2882 | dma_addr: tx_desc->buf_phys_addr)) { |
2883 | mvneta_txq_desc_put(txq); |
2884 | goto error; |
2885 | } |
2886 | |
2887 | if (i == nr_frags - 1) { |
2888 | /* Last descriptor */ |
2889 | tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; |
2890 | buf->skb = skb; |
2891 | } else { |
2892 | /* Descriptor in the middle: Not First, Not Last */ |
2893 | tx_desc->command = 0; |
2894 | buf->skb = NULL; |
2895 | } |
2896 | buf->type = MVNETA_TYPE_SKB; |
2897 | mvneta_txq_inc_put(txq); |
2898 | } |
2899 | |
2900 | return 0; |
2901 | |
2902 | error: |
2903 | /* Release all descriptors that were used to map fragments of |
2904 | * this packet, as well as the corresponding DMA mappings |
2905 | */ |
2906 | mvneta_release_descs(pp, txq, first: first_desc, num: i - 1); |
2907 | return -ENOMEM; |
2908 | } |
2909 | |
2910 | /* Main tx processing */ |
2911 | static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) |
2912 | { |
2913 | struct mvneta_port *pp = netdev_priv(dev); |
2914 | u16 txq_id = skb_get_queue_mapping(skb); |
2915 | struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; |
2916 | struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; |
2917 | struct mvneta_tx_desc *tx_desc; |
2918 | int len = skb->len; |
2919 | int frags = 0; |
2920 | u32 tx_cmd; |
2921 | |
2922 | if (!netif_running(dev)) |
2923 | goto out; |
2924 | |
2925 | if (skb_is_gso(skb)) { |
2926 | frags = mvneta_tx_tso(skb, dev, txq); |
2927 | goto out; |
2928 | } |
2929 | |
2930 | frags = skb_shinfo(skb)->nr_frags + 1; |
2931 | |
2932 | /* Get a descriptor for the first part of the packet */ |
2933 | tx_desc = mvneta_txq_next_desc_get(txq); |
2934 | |
2935 | tx_cmd = mvneta_skb_tx_csum(skb); |
2936 | |
2937 | tx_desc->data_size = skb_headlen(skb); |
2938 | |
2939 | tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data, |
2940 | tx_desc->data_size, |
2941 | DMA_TO_DEVICE); |
2942 | if (unlikely(dma_mapping_error(dev->dev.parent, |
2943 | tx_desc->buf_phys_addr))) { |
2944 | mvneta_txq_desc_put(txq); |
2945 | frags = 0; |
2946 | goto out; |
2947 | } |
2948 | |
2949 | buf->type = MVNETA_TYPE_SKB; |
2950 | if (frags == 1) { |
2951 | /* First and Last descriptor */ |
2952 | tx_cmd |= MVNETA_TXD_FLZ_DESC; |
2953 | tx_desc->command = tx_cmd; |
2954 | buf->skb = skb; |
2955 | mvneta_txq_inc_put(txq); |
2956 | } else { |
2957 | /* First but not Last */ |
2958 | tx_cmd |= MVNETA_TXD_F_DESC; |
2959 | buf->skb = NULL; |
2960 | mvneta_txq_inc_put(txq); |
2961 | tx_desc->command = tx_cmd; |
2962 | /* Continue with other skb fragments */ |
2963 | if (mvneta_tx_frag_process(pp, skb, txq)) { |
2964 | dma_unmap_single(dev->dev.parent, |
2965 | tx_desc->buf_phys_addr, |
2966 | tx_desc->data_size, |
2967 | DMA_TO_DEVICE); |
2968 | mvneta_txq_desc_put(txq); |
2969 | frags = 0; |
2970 | goto out; |
2971 | } |
2972 | } |
2973 | |
2974 | out: |
2975 | if (frags > 0) { |
2976 | struct netdev_queue *nq = netdev_get_tx_queue(dev, index: txq_id); |
2977 | struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); |
2978 | |
2979 | netdev_tx_sent_queue(dev_queue: nq, bytes: len); |
2980 | |
2981 | txq->count += frags; |
2982 | if (txq->count >= txq->tx_stop_threshold) |
2983 | netif_tx_stop_queue(dev_queue: nq); |
2984 | |
2985 | if (!netdev_xmit_more() || netif_xmit_stopped(dev_queue: nq) || |
2986 | txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) |
2987 | mvneta_txq_pend_desc_add(pp, txq, pend_desc: frags); |
2988 | else |
2989 | txq->pending += frags; |
2990 | |
2991 | u64_stats_update_begin(syncp: &stats->syncp); |
2992 | stats->es.ps.tx_bytes += len; |
2993 | stats->es.ps.tx_packets++; |
2994 | u64_stats_update_end(syncp: &stats->syncp); |
2995 | } else { |
2996 | dev->stats.tx_dropped++; |
2997 | dev_kfree_skb_any(skb); |
2998 | } |
2999 | |
3000 | return NETDEV_TX_OK; |
3001 | } |
3002 | |
3003 | |
3004 | /* Free tx resources, when resetting a port */ |
3005 | static void mvneta_txq_done_force(struct mvneta_port *pp, |
3006 | struct mvneta_tx_queue *txq) |
3007 | |
3008 | { |
3009 | struct netdev_queue *nq = netdev_get_tx_queue(dev: pp->dev, index: txq->id); |
3010 | int tx_done = txq->count; |
3011 | |
3012 | mvneta_txq_bufs_free(pp, txq, num: tx_done, nq, napi: false); |
3013 | |
3014 | /* reset txq */ |
3015 | txq->count = 0; |
3016 | txq->txq_put_index = 0; |
3017 | txq->txq_get_index = 0; |
3018 | } |
3019 | |
3020 | /* Handle tx done - called in softirq context. The <cause_tx_done> argument |
3021 | * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. |
3022 | */ |
3023 | static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) |
3024 | { |
3025 | struct mvneta_tx_queue *txq; |
3026 | struct netdev_queue *nq; |
3027 | int cpu = smp_processor_id(); |
3028 | |
3029 | while (cause_tx_done) { |
3030 | txq = mvneta_tx_done_policy(pp, cause: cause_tx_done); |
3031 | |
3032 | nq = netdev_get_tx_queue(dev: pp->dev, index: txq->id); |
3033 | __netif_tx_lock(txq: nq, cpu); |
3034 | |
3035 | if (txq->count) |
3036 | mvneta_txq_done(pp, txq); |
3037 | |
3038 | __netif_tx_unlock(txq: nq); |
3039 | cause_tx_done &= ~((1 << txq->id)); |
3040 | } |
3041 | } |
3042 | |
3043 | /* Compute crc8 of the specified address, using a unique algorithm , |
3044 | * according to hw spec, different than generic crc8 algorithm |
3045 | */ |
3046 | static int mvneta_addr_crc(unsigned char *addr) |
3047 | { |
3048 | int crc = 0; |
3049 | int i; |
3050 | |
3051 | for (i = 0; i < ETH_ALEN; i++) { |
3052 | int j; |
3053 | |
3054 | crc = (crc ^ addr[i]) << 8; |
3055 | for (j = 7; j >= 0; j--) { |
3056 | if (crc & (0x100 << j)) |
3057 | crc ^= 0x107 << j; |
3058 | } |
3059 | } |
3060 | |
3061 | return crc; |
3062 | } |
3063 | |
3064 | /* This method controls the net device special MAC multicast support. |
3065 | * The Special Multicast Table for MAC addresses supports MAC of the form |
3066 | * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). |
3067 | * The MAC DA[7:0] bits are used as a pointer to the Special Multicast |
3068 | * Table entries in the DA-Filter table. This method set the Special |
3069 | * Multicast Table appropriate entry. |
3070 | */ |
3071 | static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, |
3072 | unsigned char last_byte, |
3073 | int queue) |
3074 | { |
3075 | unsigned int smc_table_reg; |
3076 | unsigned int tbl_offset; |
3077 | unsigned int reg_offset; |
3078 | |
3079 | /* Register offset from SMC table base */ |
3080 | tbl_offset = (last_byte / 4); |
3081 | /* Entry offset within the above reg */ |
3082 | reg_offset = last_byte % 4; |
3083 | |
3084 | smc_table_reg = mvreg_read(pp, offset: (MVNETA_DA_FILT_SPEC_MCAST |
3085 | + tbl_offset * 4)); |
3086 | |
3087 | if (queue == -1) |
3088 | smc_table_reg &= ~(0xff << (8 * reg_offset)); |
3089 | else { |
3090 | smc_table_reg &= ~(0xff << (8 * reg_offset)); |
3091 | smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); |
3092 | } |
3093 | |
3094 | mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, |
3095 | data: smc_table_reg); |
3096 | } |
3097 | |
3098 | /* This method controls the network device Other MAC multicast support. |
3099 | * The Other Multicast Table is used for multicast of another type. |
3100 | * A CRC-8 is used as an index to the Other Multicast Table entries |
3101 | * in the DA-Filter table. |
3102 | * The method gets the CRC-8 value from the calling routine and |
3103 | * sets the Other Multicast Table appropriate entry according to the |
3104 | * specified CRC-8 . |
3105 | */ |
3106 | static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, |
3107 | unsigned char crc8, |
3108 | int queue) |
3109 | { |
3110 | unsigned int omc_table_reg; |
3111 | unsigned int tbl_offset; |
3112 | unsigned int reg_offset; |
3113 | |
3114 | tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */ |
3115 | reg_offset = crc8 % 4; /* Entry offset within the above reg */ |
3116 | |
3117 | omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); |
3118 | |
3119 | if (queue == -1) { |
3120 | /* Clear accepts frame bit at specified Other DA table entry */ |
3121 | omc_table_reg &= ~(0xff << (8 * reg_offset)); |
3122 | } else { |
3123 | omc_table_reg &= ~(0xff << (8 * reg_offset)); |
3124 | omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); |
3125 | } |
3126 | |
3127 | mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, data: omc_table_reg); |
3128 | } |
3129 | |
3130 | /* The network device supports multicast using two tables: |
3131 | * 1) Special Multicast Table for MAC addresses of the form |
3132 | * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF). |
3133 | * The MAC DA[7:0] bits are used as a pointer to the Special Multicast |
3134 | * Table entries in the DA-Filter table. |
3135 | * 2) Other Multicast Table for multicast of another type. A CRC-8 value |
3136 | * is used as an index to the Other Multicast Table entries in the |
3137 | * DA-Filter table. |
3138 | */ |
3139 | static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, |
3140 | int queue) |
3141 | { |
3142 | unsigned char crc_result = 0; |
3143 | |
3144 | if (memcmp(p: p_addr, q: "\x01\x00\x5e\x00\x00" , size: 5) == 0) { |
3145 | mvneta_set_special_mcast_addr(pp, last_byte: p_addr[5], queue); |
3146 | return 0; |
3147 | } |
3148 | |
3149 | crc_result = mvneta_addr_crc(addr: p_addr); |
3150 | if (queue == -1) { |
3151 | if (pp->mcast_count[crc_result] == 0) { |
3152 | netdev_info(dev: pp->dev, format: "No valid Mcast for crc8=0x%02x\n" , |
3153 | crc_result); |
3154 | return -EINVAL; |
3155 | } |
3156 | |
3157 | pp->mcast_count[crc_result]--; |
3158 | if (pp->mcast_count[crc_result] != 0) { |
3159 | netdev_info(dev: pp->dev, |
3160 | format: "After delete there are %d valid Mcast for crc8=0x%02x\n" , |
3161 | pp->mcast_count[crc_result], crc_result); |
3162 | return -EINVAL; |
3163 | } |
3164 | } else |
3165 | pp->mcast_count[crc_result]++; |
3166 | |
3167 | mvneta_set_other_mcast_addr(pp, crc8: crc_result, queue); |
3168 | |
3169 | return 0; |
3170 | } |
3171 | |
3172 | /* Configure Fitering mode of Ethernet port */ |
3173 | static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, |
3174 | int is_promisc) |
3175 | { |
3176 | u32 port_cfg_reg, val; |
3177 | |
3178 | port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); |
3179 | |
3180 | val = mvreg_read(pp, MVNETA_TYPE_PRIO); |
3181 | |
3182 | /* Set / Clear UPM bit in port configuration register */ |
3183 | if (is_promisc) { |
3184 | /* Accept all Unicast addresses */ |
3185 | port_cfg_reg |= MVNETA_UNI_PROMISC_MODE; |
3186 | val |= MVNETA_FORCE_UNI; |
3187 | mvreg_write(pp, MVNETA_MAC_ADDR_LOW, data: 0xffff); |
3188 | mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, data: 0xffffffff); |
3189 | } else { |
3190 | /* Reject all Unicast addresses */ |
3191 | port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE; |
3192 | val &= ~MVNETA_FORCE_UNI; |
3193 | } |
3194 | |
3195 | mvreg_write(pp, MVNETA_PORT_CONFIG, data: port_cfg_reg); |
3196 | mvreg_write(pp, MVNETA_TYPE_PRIO, data: val); |
3197 | } |
3198 | |
3199 | /* register unicast and multicast addresses */ |
3200 | static void mvneta_set_rx_mode(struct net_device *dev) |
3201 | { |
3202 | struct mvneta_port *pp = netdev_priv(dev); |
3203 | struct netdev_hw_addr *ha; |
3204 | |
3205 | if (dev->flags & IFF_PROMISC) { |
3206 | /* Accept all: Multicast + Unicast */ |
3207 | mvneta_rx_unicast_promisc_set(pp, is_promisc: 1); |
3208 | mvneta_set_ucast_table(pp, queue: pp->rxq_def); |
3209 | mvneta_set_special_mcast_table(pp, queue: pp->rxq_def); |
3210 | mvneta_set_other_mcast_table(pp, queue: pp->rxq_def); |
3211 | } else { |
3212 | /* Accept single Unicast */ |
3213 | mvneta_rx_unicast_promisc_set(pp, is_promisc: 0); |
3214 | mvneta_set_ucast_table(pp, queue: -1); |
3215 | mvneta_mac_addr_set(pp, addr: dev->dev_addr, queue: pp->rxq_def); |
3216 | |
3217 | if (dev->flags & IFF_ALLMULTI) { |
3218 | /* Accept all multicast */ |
3219 | mvneta_set_special_mcast_table(pp, queue: pp->rxq_def); |
3220 | mvneta_set_other_mcast_table(pp, queue: pp->rxq_def); |
3221 | } else { |
3222 | /* Accept only initialized multicast */ |
3223 | mvneta_set_special_mcast_table(pp, queue: -1); |
3224 | mvneta_set_other_mcast_table(pp, queue: -1); |
3225 | |
3226 | if (!netdev_mc_empty(dev)) { |
3227 | netdev_for_each_mc_addr(ha, dev) { |
3228 | mvneta_mcast_addr_set(pp, p_addr: ha->addr, |
3229 | queue: pp->rxq_def); |
3230 | } |
3231 | } |
3232 | } |
3233 | } |
3234 | } |
3235 | |
3236 | /* Interrupt handling - the callback for request_irq() */ |
3237 | static irqreturn_t mvneta_isr(int irq, void *dev_id) |
3238 | { |
3239 | struct mvneta_port *pp = (struct mvneta_port *)dev_id; |
3240 | |
3241 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, data: 0); |
3242 | napi_schedule(n: &pp->napi); |
3243 | |
3244 | return IRQ_HANDLED; |
3245 | } |
3246 | |
3247 | /* Interrupt handling - the callback for request_percpu_irq() */ |
3248 | static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id) |
3249 | { |
3250 | struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; |
3251 | |
3252 | disable_percpu_irq(irq: port->pp->dev->irq); |
3253 | napi_schedule(n: &port->napi); |
3254 | |
3255 | return IRQ_HANDLED; |
3256 | } |
3257 | |
3258 | static void mvneta_link_change(struct mvneta_port *pp) |
3259 | { |
3260 | u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); |
3261 | |
3262 | phylink_mac_change(pp->phylink, up: !!(gmac_stat & MVNETA_GMAC_LINK_UP)); |
3263 | } |
3264 | |
3265 | /* NAPI handler |
3266 | * Bits 0 - 7 of the causeRxTx register indicate that are transmitted |
3267 | * packets on the corresponding TXQ (Bit 0 is for TX queue 1). |
3268 | * Bits 8 -15 of the cause Rx Tx register indicate that are received |
3269 | * packets on the corresponding RXQ (Bit 8 is for RX queue 0). |
3270 | * Each CPU has its own causeRxTx register |
3271 | */ |
3272 | static int mvneta_poll(struct napi_struct *napi, int budget) |
3273 | { |
3274 | int rx_done = 0; |
3275 | u32 cause_rx_tx; |
3276 | int rx_queue; |
3277 | struct mvneta_port *pp = netdev_priv(dev: napi->dev); |
3278 | struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); |
3279 | |
3280 | if (!netif_running(dev: pp->dev)) { |
3281 | napi_complete(n: napi); |
3282 | return rx_done; |
3283 | } |
3284 | |
3285 | /* Read cause register */ |
3286 | cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); |
3287 | if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { |
3288 | u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); |
3289 | |
3290 | mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, data: 0); |
3291 | |
3292 | if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE | |
3293 | MVNETA_CAUSE_LINK_CHANGE)) |
3294 | mvneta_link_change(pp); |
3295 | } |
3296 | |
3297 | /* Release Tx descriptors */ |
3298 | if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { |
3299 | mvneta_tx_done_gbe(pp, cause_tx_done: (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); |
3300 | cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL; |
3301 | } |
3302 | |
3303 | /* For the case where the last mvneta_poll did not process all |
3304 | * RX packets |
3305 | */ |
3306 | cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : |
3307 | port->cause_rx_tx; |
3308 | |
3309 | rx_queue = fls(x: ((cause_rx_tx >> 8) & 0xff)); |
3310 | if (rx_queue) { |
3311 | rx_queue = rx_queue - 1; |
3312 | if (pp->bm_priv) |
3313 | rx_done = mvneta_rx_hwbm(napi, pp, rx_todo: budget, |
3314 | rxq: &pp->rxqs[rx_queue]); |
3315 | else |
3316 | rx_done = mvneta_rx_swbm(napi, pp, budget, |
3317 | rxq: &pp->rxqs[rx_queue]); |
3318 | } |
3319 | |
3320 | if (rx_done < budget) { |
3321 | cause_rx_tx = 0; |
3322 | napi_complete_done(n: napi, work_done: rx_done); |
3323 | |
3324 | if (pp->neta_armada3700) { |
3325 | unsigned long flags; |
3326 | |
3327 | local_irq_save(flags); |
3328 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, |
3329 | MVNETA_RX_INTR_MASK(rxq_number) | |
3330 | MVNETA_TX_INTR_MASK(txq_number) | |
3331 | MVNETA_MISCINTR_INTR_MASK); |
3332 | local_irq_restore(flags); |
3333 | } else { |
3334 | enable_percpu_irq(irq: pp->dev->irq, type: 0); |
3335 | } |
3336 | } |
3337 | |
3338 | if (pp->neta_armada3700) |
3339 | pp->cause_rx_tx = cause_rx_tx; |
3340 | else |
3341 | port->cause_rx_tx = cause_rx_tx; |
3342 | |
3343 | return rx_done; |
3344 | } |
3345 | |
3346 | static int mvneta_create_page_pool(struct mvneta_port *pp, |
3347 | struct mvneta_rx_queue *rxq, int size) |
3348 | { |
3349 | struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog); |
3350 | struct page_pool_params pp_params = { |
3351 | .order = 0, |
3352 | .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, |
3353 | .pool_size = size, |
3354 | .nid = NUMA_NO_NODE, |
3355 | .dev = pp->dev->dev.parent, |
3356 | .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, |
3357 | .offset = pp->rx_offset_correction, |
3358 | .max_len = MVNETA_MAX_RX_BUF_SIZE, |
3359 | }; |
3360 | int err; |
3361 | |
3362 | rxq->page_pool = page_pool_create(params: &pp_params); |
3363 | if (IS_ERR(ptr: rxq->page_pool)) { |
3364 | err = PTR_ERR(ptr: rxq->page_pool); |
3365 | rxq->page_pool = NULL; |
3366 | return err; |
3367 | } |
3368 | |
3369 | err = __xdp_rxq_info_reg(xdp_rxq: &rxq->xdp_rxq, dev: pp->dev, queue_index: rxq->id, napi_id: 0, |
3370 | PAGE_SIZE); |
3371 | if (err < 0) |
3372 | goto err_free_pp; |
3373 | |
3374 | err = xdp_rxq_info_reg_mem_model(xdp_rxq: &rxq->xdp_rxq, type: MEM_TYPE_PAGE_POOL, |
3375 | allocator: rxq->page_pool); |
3376 | if (err) |
3377 | goto err_unregister_rxq; |
3378 | |
3379 | return 0; |
3380 | |
3381 | err_unregister_rxq: |
3382 | xdp_rxq_info_unreg(xdp_rxq: &rxq->xdp_rxq); |
3383 | err_free_pp: |
3384 | page_pool_destroy(pool: rxq->page_pool); |
3385 | rxq->page_pool = NULL; |
3386 | return err; |
3387 | } |
3388 | |
3389 | /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ |
3390 | static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, |
3391 | int num) |
3392 | { |
3393 | int i, err; |
3394 | |
3395 | err = mvneta_create_page_pool(pp, rxq, size: num); |
3396 | if (err < 0) |
3397 | return err; |
3398 | |
3399 | for (i = 0; i < num; i++) { |
3400 | memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); |
3401 | if (mvneta_rx_refill(pp, rx_desc: rxq->descs + i, rxq, |
3402 | GFP_KERNEL) != 0) { |
3403 | netdev_err(dev: pp->dev, |
3404 | format: "%s:rxq %d, %d of %d buffs filled\n" , |
3405 | __func__, rxq->id, i, num); |
3406 | break; |
3407 | } |
3408 | } |
3409 | |
3410 | /* Add this number of RX descriptors as non occupied (ready to |
3411 | * get packets) |
3412 | */ |
3413 | mvneta_rxq_non_occup_desc_add(pp, rxq, ndescs: i); |
3414 | |
3415 | return i; |
3416 | } |
3417 | |
3418 | /* Free all packets pending transmit from all TXQs and reset TX port */ |
3419 | static void mvneta_tx_reset(struct mvneta_port *pp) |
3420 | { |
3421 | int queue; |
3422 | |
3423 | /* free the skb's in the tx ring */ |
3424 | for (queue = 0; queue < txq_number; queue++) |
3425 | mvneta_txq_done_force(pp, txq: &pp->txqs[queue]); |
3426 | |
3427 | mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); |
3428 | mvreg_write(pp, MVNETA_PORT_TX_RESET, data: 0); |
3429 | } |
3430 | |
3431 | static void mvneta_rx_reset(struct mvneta_port *pp) |
3432 | { |
3433 | mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); |
3434 | mvreg_write(pp, MVNETA_PORT_RX_RESET, data: 0); |
3435 | } |
3436 | |
3437 | /* Rx/Tx queue initialization/cleanup methods */ |
3438 | |
3439 | static int mvneta_rxq_sw_init(struct mvneta_port *pp, |
3440 | struct mvneta_rx_queue *rxq) |
3441 | { |
3442 | rxq->size = pp->rx_ring_size; |
3443 | |
3444 | /* Allocate memory for RX descriptors */ |
3445 | rxq->descs = dma_alloc_coherent(dev: pp->dev->dev.parent, |
3446 | size: rxq->size * MVNETA_DESC_ALIGNED_SIZE, |
3447 | dma_handle: &rxq->descs_phys, GFP_KERNEL); |
3448 | if (!rxq->descs) |
3449 | return -ENOMEM; |
3450 | |
3451 | rxq->last_desc = rxq->size - 1; |
3452 | |
3453 | return 0; |
3454 | } |
3455 | |
3456 | static void mvneta_rxq_hw_init(struct mvneta_port *pp, |
3457 | struct mvneta_rx_queue *rxq) |
3458 | { |
3459 | /* Set Rx descriptors queue starting address */ |
3460 | mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), data: rxq->descs_phys); |
3461 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), data: rxq->size); |
3462 | |
3463 | /* Set coalescing pkts and time */ |
3464 | mvneta_rx_pkts_coal_set(pp, rxq, value: rxq->pkts_coal); |
3465 | mvneta_rx_time_coal_set(pp, rxq, value: rxq->time_coal); |
3466 | |
3467 | if (!pp->bm_priv) { |
3468 | /* Set Offset */ |
3469 | mvneta_rxq_offset_set(pp, rxq, offset: 0); |
3470 | mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? |
3471 | MVNETA_MAX_RX_BUF_SIZE : |
3472 | MVNETA_RX_BUF_SIZE(pp->pkt_size)); |
3473 | mvneta_rxq_bm_disable(pp, rxq); |
3474 | mvneta_rxq_fill(pp, rxq, num: rxq->size); |
3475 | } else { |
3476 | /* Set Offset */ |
3477 | mvneta_rxq_offset_set(pp, rxq, |
3478 | NET_SKB_PAD - pp->rx_offset_correction); |
3479 | |
3480 | mvneta_rxq_bm_enable(pp, rxq); |
3481 | /* Fill RXQ with buffers from RX pool */ |
3482 | mvneta_rxq_long_pool_set(pp, rxq); |
3483 | mvneta_rxq_short_pool_set(pp, rxq); |
3484 | mvneta_rxq_non_occup_desc_add(pp, rxq, ndescs: rxq->size); |
3485 | } |
3486 | } |
3487 | |
3488 | /* Create a specified RX queue */ |
3489 | static int mvneta_rxq_init(struct mvneta_port *pp, |
3490 | struct mvneta_rx_queue *rxq) |
3491 | |
3492 | { |
3493 | int ret; |
3494 | |
3495 | ret = mvneta_rxq_sw_init(pp, rxq); |
3496 | if (ret < 0) |
3497 | return ret; |
3498 | |
3499 | mvneta_rxq_hw_init(pp, rxq); |
3500 | |
3501 | return 0; |
3502 | } |
3503 | |
3504 | /* Cleanup Rx queue */ |
3505 | static void mvneta_rxq_deinit(struct mvneta_port *pp, |
3506 | struct mvneta_rx_queue *rxq) |
3507 | { |
3508 | mvneta_rxq_drop_pkts(pp, rxq); |
3509 | |
3510 | if (rxq->descs) |
3511 | dma_free_coherent(dev: pp->dev->dev.parent, |
3512 | size: rxq->size * MVNETA_DESC_ALIGNED_SIZE, |
3513 | cpu_addr: rxq->descs, |
3514 | dma_handle: rxq->descs_phys); |
3515 | |
3516 | rxq->descs = NULL; |
3517 | rxq->last_desc = 0; |
3518 | rxq->next_desc_to_proc = 0; |
3519 | rxq->descs_phys = 0; |
3520 | rxq->first_to_refill = 0; |
3521 | rxq->refill_num = 0; |
3522 | } |
3523 | |
3524 | static int mvneta_txq_sw_init(struct mvneta_port *pp, |
3525 | struct mvneta_tx_queue *txq) |
3526 | { |
3527 | int cpu, err; |
3528 | |
3529 | txq->size = pp->tx_ring_size; |
3530 | |
3531 | /* A queue must always have room for at least one skb. |
3532 | * Therefore, stop the queue when the free entries reaches |
3533 | * the maximum number of descriptors per skb. |
3534 | */ |
3535 | txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS; |
3536 | txq->tx_wake_threshold = txq->tx_stop_threshold / 2; |
3537 | |
3538 | /* Allocate memory for TX descriptors */ |
3539 | txq->descs = dma_alloc_coherent(dev: pp->dev->dev.parent, |
3540 | size: txq->size * MVNETA_DESC_ALIGNED_SIZE, |
3541 | dma_handle: &txq->descs_phys, GFP_KERNEL); |
3542 | if (!txq->descs) |
3543 | return -ENOMEM; |
3544 | |
3545 | txq->last_desc = txq->size - 1; |
3546 | |
3547 | txq->buf = kmalloc_array(n: txq->size, size: sizeof(*txq->buf), GFP_KERNEL); |
3548 | if (!txq->buf) |
3549 | return -ENOMEM; |
3550 | |
3551 | /* Allocate DMA buffers for TSO MAC/IP/TCP headers */ |
3552 | err = mvneta_alloc_tso_hdrs(pp, txq); |
3553 | if (err) |
3554 | return err; |
3555 | |
3556 | /* Setup XPS mapping */ |
3557 | if (pp->neta_armada3700) |
3558 | cpu = 0; |
3559 | else if (txq_number > 1) |
3560 | cpu = txq->id % num_present_cpus(); |
3561 | else |
3562 | cpu = pp->rxq_def % num_present_cpus(); |
3563 | cpumask_set_cpu(cpu, dstp: &txq->affinity_mask); |
3564 | netif_set_xps_queue(dev: pp->dev, mask: &txq->affinity_mask, index: txq->id); |
3565 | |
3566 | return 0; |
3567 | } |
3568 | |
3569 | static void mvneta_txq_hw_init(struct mvneta_port *pp, |
3570 | struct mvneta_tx_queue *txq) |
3571 | { |
3572 | /* Set maximum bandwidth for enabled TXQs */ |
3573 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), data: 0x03ffffff); |
3574 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), data: 0x3fffffff); |
3575 | |
3576 | /* Set Tx descriptors queue starting address */ |
3577 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), data: txq->descs_phys); |
3578 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), data: txq->size); |
3579 | |
3580 | mvneta_tx_done_pkts_coal_set(pp, txq, value: txq->done_pkts_coal); |
3581 | } |
3582 | |
3583 | /* Create and initialize a tx queue */ |
3584 | static int mvneta_txq_init(struct mvneta_port *pp, |
3585 | struct mvneta_tx_queue *txq) |
3586 | { |
3587 | int ret; |
3588 | |
3589 | ret = mvneta_txq_sw_init(pp, txq); |
3590 | if (ret < 0) |
3591 | return ret; |
3592 | |
3593 | mvneta_txq_hw_init(pp, txq); |
3594 | |
3595 | return 0; |
3596 | } |
3597 | |
3598 | /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ |
3599 | static void mvneta_txq_sw_deinit(struct mvneta_port *pp, |
3600 | struct mvneta_tx_queue *txq) |
3601 | { |
3602 | struct netdev_queue *nq = netdev_get_tx_queue(dev: pp->dev, index: txq->id); |
3603 | |
3604 | kfree(objp: txq->buf); |
3605 | |
3606 | mvneta_free_tso_hdrs(pp, txq); |
3607 | if (txq->descs) |
3608 | dma_free_coherent(dev: pp->dev->dev.parent, |
3609 | size: txq->size * MVNETA_DESC_ALIGNED_SIZE, |
3610 | cpu_addr: txq->descs, dma_handle: txq->descs_phys); |
3611 | |
3612 | netdev_tx_reset_queue(q: nq); |
3613 | |
3614 | txq->buf = NULL; |
3615 | txq->descs = NULL; |
3616 | txq->last_desc = 0; |
3617 | txq->next_desc_to_proc = 0; |
3618 | txq->descs_phys = 0; |
3619 | } |
3620 | |
3621 | static void mvneta_txq_hw_deinit(struct mvneta_port *pp, |
3622 | struct mvneta_tx_queue *txq) |
3623 | { |
3624 | /* Set minimum bandwidth for disabled TXQs */ |
3625 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), data: 0); |
3626 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), data: 0); |
3627 | |
3628 | /* Set Tx descriptors queue starting address and size */ |
3629 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), data: 0); |
3630 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), data: 0); |
3631 | } |
3632 | |
3633 | static void mvneta_txq_deinit(struct mvneta_port *pp, |
3634 | struct mvneta_tx_queue *txq) |
3635 | { |
3636 | mvneta_txq_sw_deinit(pp, txq); |
3637 | mvneta_txq_hw_deinit(pp, txq); |
3638 | } |
3639 | |
3640 | /* Cleanup all Tx queues */ |
3641 | static void mvneta_cleanup_txqs(struct mvneta_port *pp) |
3642 | { |
3643 | int queue; |
3644 | |
3645 | for (queue = 0; queue < txq_number; queue++) |
3646 | mvneta_txq_deinit(pp, txq: &pp->txqs[queue]); |
3647 | } |
3648 | |
3649 | /* Cleanup all Rx queues */ |
3650 | static void mvneta_cleanup_rxqs(struct mvneta_port *pp) |
3651 | { |
3652 | int queue; |
3653 | |
3654 | for (queue = 0; queue < rxq_number; queue++) |
3655 | mvneta_rxq_deinit(pp, rxq: &pp->rxqs[queue]); |
3656 | } |
3657 | |
3658 | |
3659 | /* Init all Rx queues */ |
3660 | static int mvneta_setup_rxqs(struct mvneta_port *pp) |
3661 | { |
3662 | int queue; |
3663 | |
3664 | for (queue = 0; queue < rxq_number; queue++) { |
3665 | int err = mvneta_rxq_init(pp, rxq: &pp->rxqs[queue]); |
3666 | |
3667 | if (err) { |
3668 | netdev_err(dev: pp->dev, format: "%s: can't create rxq=%d\n" , |
3669 | __func__, queue); |
3670 | mvneta_cleanup_rxqs(pp); |
3671 | return err; |
3672 | } |
3673 | } |
3674 | |
3675 | return 0; |
3676 | } |
3677 | |
3678 | /* Init all tx queues */ |
3679 | static int mvneta_setup_txqs(struct mvneta_port *pp) |
3680 | { |
3681 | int queue; |
3682 | |
3683 | for (queue = 0; queue < txq_number; queue++) { |
3684 | int err = mvneta_txq_init(pp, txq: &pp->txqs[queue]); |
3685 | if (err) { |
3686 | netdev_err(dev: pp->dev, format: "%s: can't create txq=%d\n" , |
3687 | __func__, queue); |
3688 | mvneta_cleanup_txqs(pp); |
3689 | return err; |
3690 | } |
3691 | } |
3692 | |
3693 | return 0; |
3694 | } |
3695 | |
3696 | static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface) |
3697 | { |
3698 | int ret; |
3699 | |
3700 | ret = phy_set_mode_ext(phy: pp->comphy, mode: PHY_MODE_ETHERNET, submode: interface); |
3701 | if (ret) |
3702 | return ret; |
3703 | |
3704 | return phy_power_on(phy: pp->comphy); |
3705 | } |
3706 | |
3707 | static int mvneta_config_interface(struct mvneta_port *pp, |
3708 | phy_interface_t interface) |
3709 | { |
3710 | int ret = 0; |
3711 | |
3712 | if (pp->comphy) { |
3713 | if (interface == PHY_INTERFACE_MODE_SGMII || |
3714 | interface == PHY_INTERFACE_MODE_1000BASEX || |
3715 | interface == PHY_INTERFACE_MODE_2500BASEX) { |
3716 | ret = mvneta_comphy_init(pp, interface); |
3717 | } |
3718 | } else { |
3719 | switch (interface) { |
3720 | case PHY_INTERFACE_MODE_QSGMII: |
3721 | mvreg_write(pp, MVNETA_SERDES_CFG, |
3722 | MVNETA_QSGMII_SERDES_PROTO); |
3723 | break; |
3724 | |
3725 | case PHY_INTERFACE_MODE_SGMII: |
3726 | case PHY_INTERFACE_MODE_1000BASEX: |
3727 | mvreg_write(pp, MVNETA_SERDES_CFG, |
3728 | MVNETA_SGMII_SERDES_PROTO); |
3729 | break; |
3730 | |
3731 | case PHY_INTERFACE_MODE_2500BASEX: |
3732 | mvreg_write(pp, MVNETA_SERDES_CFG, |
3733 | MVNETA_HSGMII_SERDES_PROTO); |
3734 | break; |
3735 | default: |
3736 | break; |
3737 | } |
3738 | } |
3739 | |
3740 | pp->phy_interface = interface; |
3741 | |
3742 | return ret; |
3743 | } |
3744 | |
3745 | static void mvneta_start_dev(struct mvneta_port *pp) |
3746 | { |
3747 | int cpu; |
3748 | |
3749 | WARN_ON(mvneta_config_interface(pp, pp->phy_interface)); |
3750 | |
3751 | mvneta_max_rx_size_set(pp, max_rx_size: pp->pkt_size); |
3752 | mvneta_txq_max_tx_size_set(pp, max_tx_size: pp->pkt_size); |
3753 | |
3754 | /* start the Rx/Tx activity */ |
3755 | mvneta_port_enable(pp); |
3756 | |
3757 | if (!pp->neta_armada3700) { |
3758 | /* Enable polling on the port */ |
3759 | for_each_online_cpu(cpu) { |
3760 | struct mvneta_pcpu_port *port = |
3761 | per_cpu_ptr(pp->ports, cpu); |
3762 | |
3763 | napi_enable(n: &port->napi); |
3764 | } |
3765 | } else { |
3766 | napi_enable(n: &pp->napi); |
3767 | } |
3768 | |
3769 | /* Unmask interrupts. It has to be done from each CPU */ |
3770 | on_each_cpu(func: mvneta_percpu_unmask_interrupt, info: pp, wait: true); |
3771 | |
3772 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, |
3773 | MVNETA_CAUSE_PHY_STATUS_CHANGE | |
3774 | MVNETA_CAUSE_LINK_CHANGE); |
3775 | |
3776 | phylink_start(pp->phylink); |
3777 | |
3778 | /* We may have called phylink_speed_down before */ |
3779 | phylink_speed_up(pl: pp->phylink); |
3780 | |
3781 | netif_tx_start_all_queues(dev: pp->dev); |
3782 | |
3783 | clear_bit(nr: __MVNETA_DOWN, addr: &pp->state); |
3784 | } |
3785 | |
3786 | static void mvneta_stop_dev(struct mvneta_port *pp) |
3787 | { |
3788 | unsigned int cpu; |
3789 | |
3790 | set_bit(nr: __MVNETA_DOWN, addr: &pp->state); |
3791 | |
3792 | if (device_may_wakeup(dev: &pp->dev->dev)) |
3793 | phylink_speed_down(pl: pp->phylink, sync: false); |
3794 | |
3795 | phylink_stop(pp->phylink); |
3796 | |
3797 | if (!pp->neta_armada3700) { |
3798 | for_each_online_cpu(cpu) { |
3799 | struct mvneta_pcpu_port *port = |
3800 | per_cpu_ptr(pp->ports, cpu); |
3801 | |
3802 | napi_disable(n: &port->napi); |
3803 | } |
3804 | } else { |
3805 | napi_disable(n: &pp->napi); |
3806 | } |
3807 | |
3808 | netif_carrier_off(dev: pp->dev); |
3809 | |
3810 | mvneta_port_down(pp); |
3811 | netif_tx_stop_all_queues(dev: pp->dev); |
3812 | |
3813 | /* Stop the port activity */ |
3814 | mvneta_port_disable(pp); |
3815 | |
3816 | /* Clear all ethernet port interrupts */ |
3817 | on_each_cpu(func: mvneta_percpu_clear_intr_cause, info: pp, wait: true); |
3818 | |
3819 | /* Mask all ethernet port interrupts */ |
3820 | on_each_cpu(func: mvneta_percpu_mask_interrupt, info: pp, wait: true); |
3821 | |
3822 | mvneta_tx_reset(pp); |
3823 | mvneta_rx_reset(pp); |
3824 | |
3825 | WARN_ON(phy_power_off(pp->comphy)); |
3826 | } |
3827 | |
3828 | static void mvneta_percpu_enable(void *arg) |
3829 | { |
3830 | struct mvneta_port *pp = arg; |
3831 | |
3832 | enable_percpu_irq(irq: pp->dev->irq, type: IRQ_TYPE_NONE); |
3833 | } |
3834 | |
3835 | static void mvneta_percpu_disable(void *arg) |
3836 | { |
3837 | struct mvneta_port *pp = arg; |
3838 | |
3839 | disable_percpu_irq(irq: pp->dev->irq); |
3840 | } |
3841 | |
3842 | /* Change the device mtu */ |
3843 | static int mvneta_change_mtu(struct net_device *dev, int mtu) |
3844 | { |
3845 | struct mvneta_port *pp = netdev_priv(dev); |
3846 | struct bpf_prog *prog = pp->xdp_prog; |
3847 | int ret; |
3848 | |
3849 | if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) { |
3850 | netdev_info(dev, format: "Illegal MTU value %d, rounding to %d\n" , |
3851 | mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8)); |
3852 | mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); |
3853 | } |
3854 | |
3855 | if (prog && !prog->aux->xdp_has_frags && |
3856 | mtu > MVNETA_MAX_RX_BUF_SIZE) { |
3857 | netdev_info(dev, format: "Illegal MTU %d for XDP prog without frags\n" , |
3858 | mtu); |
3859 | |
3860 | return -EINVAL; |
3861 | } |
3862 | |
3863 | dev->mtu = mtu; |
3864 | |
3865 | if (!netif_running(dev)) { |
3866 | if (pp->bm_priv) |
3867 | mvneta_bm_update_mtu(pp, mtu); |
3868 | |
3869 | netdev_update_features(dev); |
3870 | return 0; |
3871 | } |
3872 | |
3873 | /* The interface is running, so we have to force a |
3874 | * reallocation of the queues |
3875 | */ |
3876 | mvneta_stop_dev(pp); |
3877 | on_each_cpu(func: mvneta_percpu_disable, info: pp, wait: true); |
3878 | |
3879 | mvneta_cleanup_txqs(pp); |
3880 | mvneta_cleanup_rxqs(pp); |
3881 | |
3882 | if (pp->bm_priv) |
3883 | mvneta_bm_update_mtu(pp, mtu); |
3884 | |
3885 | pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); |
3886 | |
3887 | ret = mvneta_setup_rxqs(pp); |
3888 | if (ret) { |
3889 | netdev_err(dev, format: "unable to setup rxqs after MTU change\n" ); |
3890 | return ret; |
3891 | } |
3892 | |
3893 | ret = mvneta_setup_txqs(pp); |
3894 | if (ret) { |
3895 | netdev_err(dev, format: "unable to setup txqs after MTU change\n" ); |
3896 | return ret; |
3897 | } |
3898 | |
3899 | on_each_cpu(func: mvneta_percpu_enable, info: pp, wait: true); |
3900 | mvneta_start_dev(pp); |
3901 | |
3902 | netdev_update_features(dev); |
3903 | |
3904 | return 0; |
3905 | } |
3906 | |
3907 | static netdev_features_t mvneta_fix_features(struct net_device *dev, |
3908 | netdev_features_t features) |
3909 | { |
3910 | struct mvneta_port *pp = netdev_priv(dev); |
3911 | |
3912 | if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { |
3913 | features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); |
3914 | netdev_info(dev, |
3915 | format: "Disable IP checksum for MTU greater than %dB\n" , |
3916 | pp->tx_csum_limit); |
3917 | } |
3918 | |
3919 | return features; |
3920 | } |
3921 | |
3922 | /* Get mac address */ |
3923 | static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) |
3924 | { |
3925 | u32 mac_addr_l, mac_addr_h; |
3926 | |
3927 | mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); |
3928 | mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); |
3929 | addr[0] = (mac_addr_h >> 24) & 0xFF; |
3930 | addr[1] = (mac_addr_h >> 16) & 0xFF; |
3931 | addr[2] = (mac_addr_h >> 8) & 0xFF; |
3932 | addr[3] = mac_addr_h & 0xFF; |
3933 | addr[4] = (mac_addr_l >> 8) & 0xFF; |
3934 | addr[5] = mac_addr_l & 0xFF; |
3935 | } |
3936 | |
3937 | /* Handle setting mac address */ |
3938 | static int mvneta_set_mac_addr(struct net_device *dev, void *addr) |
3939 | { |
3940 | struct mvneta_port *pp = netdev_priv(dev); |
3941 | struct sockaddr *sockaddr = addr; |
3942 | int ret; |
3943 | |
3944 | ret = eth_prepare_mac_addr_change(dev, p: addr); |
3945 | if (ret < 0) |
3946 | return ret; |
3947 | /* Remove previous address table entry */ |
3948 | mvneta_mac_addr_set(pp, addr: dev->dev_addr, queue: -1); |
3949 | |
3950 | /* Set new addr in hw */ |
3951 | mvneta_mac_addr_set(pp, addr: sockaddr->sa_data, queue: pp->rxq_def); |
3952 | |
3953 | eth_commit_mac_addr_change(dev, p: addr); |
3954 | return 0; |
3955 | } |
3956 | |
3957 | static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs) |
3958 | { |
3959 | return container_of(pcs, struct mvneta_port, phylink_pcs); |
3960 | } |
3961 | |
3962 | static int mvneta_pcs_validate(struct phylink_pcs *pcs, |
3963 | unsigned long *supported, |
3964 | const struct phylink_link_state *state) |
3965 | { |
3966 | /* We only support QSGMII, SGMII, 802.3z and RGMII modes. |
3967 | * When in 802.3z mode, we must have AN enabled: |
3968 | * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... |
3969 | * When <PortType> = 1 (1000BASE-X) this field must be set to 1." |
3970 | */ |
3971 | if (phy_interface_mode_is_8023z(mode: state->interface) && |
3972 | !phylink_test(state->advertising, Autoneg)) |
3973 | return -EINVAL; |
3974 | |
3975 | return 0; |
3976 | } |
3977 | |
3978 | static void mvneta_pcs_get_state(struct phylink_pcs *pcs, |
3979 | struct phylink_link_state *state) |
3980 | { |
3981 | struct mvneta_port *pp = mvneta_pcs_to_port(pcs); |
3982 | u32 gmac_stat; |
3983 | |
3984 | gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); |
3985 | |
3986 | if (gmac_stat & MVNETA_GMAC_SPEED_1000) |
3987 | state->speed = |
3988 | state->interface == PHY_INTERFACE_MODE_2500BASEX ? |
3989 | SPEED_2500 : SPEED_1000; |
3990 | else if (gmac_stat & MVNETA_GMAC_SPEED_100) |
3991 | state->speed = SPEED_100; |
3992 | else |
3993 | state->speed = SPEED_10; |
3994 | |
3995 | state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); |
3996 | state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); |
3997 | state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); |
3998 | |
3999 | if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) |
4000 | state->pause |= MLO_PAUSE_RX; |
4001 | if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) |
4002 | state->pause |= MLO_PAUSE_TX; |
4003 | } |
4004 | |
4005 | static int mvneta_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, |
4006 | phy_interface_t interface, |
4007 | const unsigned long *advertising, |
4008 | bool permit_pause_to_mac) |
4009 | { |
4010 | struct mvneta_port *pp = mvneta_pcs_to_port(pcs); |
4011 | u32 mask, val, an, old_an, changed; |
4012 | |
4013 | mask = MVNETA_GMAC_INBAND_AN_ENABLE | |
4014 | MVNETA_GMAC_INBAND_RESTART_AN | |
4015 | MVNETA_GMAC_AN_SPEED_EN | |
4016 | MVNETA_GMAC_AN_FLOW_CTRL_EN | |
4017 | MVNETA_GMAC_AN_DUPLEX_EN; |
4018 | |
4019 | if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED) { |
4020 | mask |= MVNETA_GMAC_CONFIG_MII_SPEED | |
4021 | MVNETA_GMAC_CONFIG_GMII_SPEED | |
4022 | MVNETA_GMAC_CONFIG_FULL_DUPLEX; |
4023 | val = MVNETA_GMAC_INBAND_AN_ENABLE; |
4024 | |
4025 | if (interface == PHY_INTERFACE_MODE_SGMII) { |
4026 | /* SGMII mode receives the speed and duplex from PHY */ |
4027 | val |= MVNETA_GMAC_AN_SPEED_EN | |
4028 | MVNETA_GMAC_AN_DUPLEX_EN; |
4029 | } else { |
4030 | /* 802.3z mode has fixed speed and duplex */ |
4031 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED | |
4032 | MVNETA_GMAC_CONFIG_FULL_DUPLEX; |
4033 | |
4034 | /* The FLOW_CTRL_EN bit selects either the hardware |
4035 | * automatically or the CONFIG_FLOW_CTRL manually |
4036 | * controls the GMAC pause mode. |
4037 | */ |
4038 | if (permit_pause_to_mac) |
4039 | val |= MVNETA_GMAC_AN_FLOW_CTRL_EN; |
4040 | |
4041 | /* Update the advertisement bits */ |
4042 | mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; |
4043 | if (phylink_test(advertising, Pause)) |
4044 | val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; |
4045 | } |
4046 | } else { |
4047 | /* Phy or fixed speed - disable in-band AN modes */ |
4048 | val = 0; |
4049 | } |
4050 | |
4051 | old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
4052 | an = (an & ~mask) | val; |
4053 | changed = old_an ^ an; |
4054 | if (changed) |
4055 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, data: an); |
4056 | |
4057 | /* We are only interested in the advertisement bits changing */ |
4058 | return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL); |
4059 | } |
4060 | |
4061 | static void mvneta_pcs_an_restart(struct phylink_pcs *pcs) |
4062 | { |
4063 | struct mvneta_port *pp = mvneta_pcs_to_port(pcs); |
4064 | u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
4065 | |
4066 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, |
4067 | data: gmac_an | MVNETA_GMAC_INBAND_RESTART_AN); |
4068 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, |
4069 | data: gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); |
4070 | } |
4071 | |
4072 | static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = { |
4073 | .pcs_validate = mvneta_pcs_validate, |
4074 | .pcs_get_state = mvneta_pcs_get_state, |
4075 | .pcs_config = mvneta_pcs_config, |
4076 | .pcs_an_restart = mvneta_pcs_an_restart, |
4077 | }; |
4078 | |
4079 | static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config, |
4080 | phy_interface_t interface) |
4081 | { |
4082 | struct net_device *ndev = to_net_dev(config->dev); |
4083 | struct mvneta_port *pp = netdev_priv(dev: ndev); |
4084 | |
4085 | return &pp->phylink_pcs; |
4086 | } |
4087 | |
4088 | static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode, |
4089 | phy_interface_t interface) |
4090 | { |
4091 | struct net_device *ndev = to_net_dev(config->dev); |
4092 | struct mvneta_port *pp = netdev_priv(dev: ndev); |
4093 | u32 val; |
4094 | |
4095 | if (pp->phy_interface != interface || |
4096 | phylink_autoneg_inband(mode)) { |
4097 | /* Force the link down when changing the interface or if in |
4098 | * in-band mode. According to Armada 370 documentation, we |
4099 | * can only change the port mode and in-band enable when the |
4100 | * link is down. |
4101 | */ |
4102 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
4103 | val &= ~MVNETA_GMAC_FORCE_LINK_PASS; |
4104 | val |= MVNETA_GMAC_FORCE_LINK_DOWN; |
4105 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, data: val); |
4106 | } |
4107 | |
4108 | if (pp->phy_interface != interface) |
4109 | WARN_ON(phy_power_off(pp->comphy)); |
4110 | |
4111 | /* Enable the 1ms clock */ |
4112 | if (phylink_autoneg_inband(mode)) { |
4113 | unsigned long rate = clk_get_rate(clk: pp->clk); |
4114 | |
4115 | mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, |
4116 | MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000)); |
4117 | } |
4118 | |
4119 | return 0; |
4120 | } |
4121 | |
4122 | static void mvneta_mac_config(struct phylink_config *config, unsigned int mode, |
4123 | const struct phylink_link_state *state) |
4124 | { |
4125 | struct net_device *ndev = to_net_dev(config->dev); |
4126 | struct mvneta_port *pp = netdev_priv(dev: ndev); |
4127 | u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); |
4128 | u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); |
4129 | u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); |
4130 | |
4131 | new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; |
4132 | new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | |
4133 | MVNETA_GMAC2_PORT_RESET); |
4134 | new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE); |
4135 | |
4136 | /* Even though it might look weird, when we're configured in |
4137 | * SGMII or QSGMII mode, the RGMII bit needs to be set. |
4138 | */ |
4139 | new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII; |
4140 | |
4141 | if (state->interface == PHY_INTERFACE_MODE_QSGMII || |
4142 | state->interface == PHY_INTERFACE_MODE_SGMII || |
4143 | phy_interface_mode_is_8023z(mode: state->interface)) |
4144 | new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; |
4145 | |
4146 | if (!phylink_autoneg_inband(mode)) { |
4147 | /* Phy or fixed speed - nothing to do, leave the |
4148 | * configured speed, duplex and flow control as-is. |
4149 | */ |
4150 | } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { |
4151 | /* SGMII mode receives the state from the PHY */ |
4152 | new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; |
4153 | } else { |
4154 | /* 802.3z negotiation - only 1000base-X */ |
4155 | new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; |
4156 | } |
4157 | |
4158 | /* When at 2.5G, the link partner can send frames with shortened |
4159 | * preambles. |
4160 | */ |
4161 | if (state->interface == PHY_INTERFACE_MODE_2500BASEX) |
4162 | new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; |
4163 | |
4164 | if (new_ctrl0 != gmac_ctrl0) |
4165 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, data: new_ctrl0); |
4166 | if (new_ctrl2 != gmac_ctrl2) |
4167 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, data: new_ctrl2); |
4168 | if (new_ctrl4 != gmac_ctrl4) |
4169 | mvreg_write(pp, MVNETA_GMAC_CTRL_4, data: new_ctrl4); |
4170 | |
4171 | if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { |
4172 | while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & |
4173 | MVNETA_GMAC2_PORT_RESET) != 0) |
4174 | continue; |
4175 | } |
4176 | } |
4177 | |
4178 | static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode, |
4179 | phy_interface_t interface) |
4180 | { |
4181 | struct net_device *ndev = to_net_dev(config->dev); |
4182 | struct mvneta_port *pp = netdev_priv(dev: ndev); |
4183 | u32 val, clk; |
4184 | |
4185 | /* Disable 1ms clock if not in in-band mode */ |
4186 | if (!phylink_autoneg_inband(mode)) { |
4187 | clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); |
4188 | clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; |
4189 | mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, data: clk); |
4190 | } |
4191 | |
4192 | if (pp->phy_interface != interface) |
4193 | /* Enable the Serdes PHY */ |
4194 | WARN_ON(mvneta_config_interface(pp, interface)); |
4195 | |
4196 | /* Allow the link to come up if in in-band mode, otherwise the |
4197 | * link is forced via mac_link_down()/mac_link_up() |
4198 | */ |
4199 | if (phylink_autoneg_inband(mode)) { |
4200 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
4201 | val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; |
4202 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, data: val); |
4203 | } |
4204 | |
4205 | return 0; |
4206 | } |
4207 | |
4208 | static void mvneta_set_eee(struct mvneta_port *pp, bool enable) |
4209 | { |
4210 | u32 lpi_ctl1; |
4211 | |
4212 | lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); |
4213 | if (enable) |
4214 | lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; |
4215 | else |
4216 | lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; |
4217 | mvreg_write(pp, MVNETA_LPI_CTRL_1, data: lpi_ctl1); |
4218 | } |
4219 | |
4220 | static void mvneta_mac_link_down(struct phylink_config *config, |
4221 | unsigned int mode, phy_interface_t interface) |
4222 | { |
4223 | struct net_device *ndev = to_net_dev(config->dev); |
4224 | struct mvneta_port *pp = netdev_priv(dev: ndev); |
4225 | u32 val; |
4226 | |
4227 | mvneta_port_down(pp); |
4228 | |
4229 | if (!phylink_autoneg_inband(mode)) { |
4230 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
4231 | val &= ~MVNETA_GMAC_FORCE_LINK_PASS; |
4232 | val |= MVNETA_GMAC_FORCE_LINK_DOWN; |
4233 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, data: val); |
4234 | } |
4235 | |
4236 | pp->eee_active = false; |
4237 | mvneta_set_eee(pp, enable: false); |
4238 | } |
4239 | |
4240 | static void mvneta_mac_link_up(struct phylink_config *config, |
4241 | struct phy_device *phy, |
4242 | unsigned int mode, phy_interface_t interface, |
4243 | int speed, int duplex, |
4244 | bool tx_pause, bool rx_pause) |
4245 | { |
4246 | struct net_device *ndev = to_net_dev(config->dev); |
4247 | struct mvneta_port *pp = netdev_priv(dev: ndev); |
4248 | u32 val; |
4249 | |
4250 | if (!phylink_autoneg_inband(mode)) { |
4251 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
4252 | val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN | |
4253 | MVNETA_GMAC_CONFIG_MII_SPEED | |
4254 | MVNETA_GMAC_CONFIG_GMII_SPEED | |
4255 | MVNETA_GMAC_CONFIG_FLOW_CTRL | |
4256 | MVNETA_GMAC_CONFIG_FULL_DUPLEX); |
4257 | val |= MVNETA_GMAC_FORCE_LINK_PASS; |
4258 | |
4259 | if (speed == SPEED_1000 || speed == SPEED_2500) |
4260 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; |
4261 | else if (speed == SPEED_100) |
4262 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; |
4263 | |
4264 | if (duplex == DUPLEX_FULL) |
4265 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; |
4266 | |
4267 | if (tx_pause || rx_pause) |
4268 | val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; |
4269 | |
4270 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, data: val); |
4271 | } else { |
4272 | /* When inband doesn't cover flow control or flow control is |
4273 | * disabled, we need to manually configure it. This bit will |
4274 | * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset. |
4275 | */ |
4276 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
4277 | val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL; |
4278 | |
4279 | if (tx_pause || rx_pause) |
4280 | val |= MVNETA_GMAC_CONFIG_FLOW_CTRL; |
4281 | |
4282 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, data: val); |
4283 | } |
4284 | |
4285 | mvneta_port_up(pp); |
4286 | |
4287 | if (phy && pp->eee_enabled) { |
4288 | pp->eee_active = phy_init_eee(phydev: phy, clk_stop_enable: false) >= 0; |
4289 | mvneta_set_eee(pp, enable: pp->eee_active && pp->tx_lpi_enabled); |
4290 | } |
4291 | } |
4292 | |
4293 | static const struct phylink_mac_ops mvneta_phylink_ops = { |
4294 | .mac_select_pcs = mvneta_mac_select_pcs, |
4295 | .mac_prepare = mvneta_mac_prepare, |
4296 | .mac_config = mvneta_mac_config, |
4297 | .mac_finish = mvneta_mac_finish, |
4298 | .mac_link_down = mvneta_mac_link_down, |
4299 | .mac_link_up = mvneta_mac_link_up, |
4300 | }; |
4301 | |
4302 | static int mvneta_mdio_probe(struct mvneta_port *pp) |
4303 | { |
4304 | struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
4305 | int err = phylink_of_phy_connect(pp->phylink, pp->dn, flags: 0); |
4306 | |
4307 | if (err) |
4308 | netdev_err(dev: pp->dev, format: "could not attach PHY: %d\n" , err); |
4309 | |
4310 | phylink_ethtool_get_wol(pp->phylink, &wol); |
4311 | device_set_wakeup_capable(dev: &pp->dev->dev, capable: !!wol.supported); |
4312 | |
4313 | /* PHY WoL may be enabled but device wakeup disabled */ |
4314 | if (wol.supported) |
4315 | device_set_wakeup_enable(dev: &pp->dev->dev, enable: !!wol.wolopts); |
4316 | |
4317 | return err; |
4318 | } |
4319 | |
4320 | static void mvneta_mdio_remove(struct mvneta_port *pp) |
4321 | { |
4322 | phylink_disconnect_phy(pp->phylink); |
4323 | } |
4324 | |
4325 | /* Electing a CPU must be done in an atomic way: it should be done |
4326 | * after or before the removal/insertion of a CPU and this function is |
4327 | * not reentrant. |
4328 | */ |
4329 | static void mvneta_percpu_elect(struct mvneta_port *pp) |
4330 | { |
4331 | int elected_cpu = 0, max_cpu, cpu; |
4332 | |
4333 | /* Use the cpu associated to the rxq when it is online, in all |
4334 | * the other cases, use the cpu 0 which can't be offline. |
4335 | */ |
4336 | if (pp->rxq_def < nr_cpu_ids && cpu_online(cpu: pp->rxq_def)) |
4337 | elected_cpu = pp->rxq_def; |
4338 | |
4339 | max_cpu = num_present_cpus(); |
4340 | |
4341 | for_each_online_cpu(cpu) { |
4342 | int rxq_map = 0, txq_map = 0; |
4343 | int rxq; |
4344 | |
4345 | for (rxq = 0; rxq < rxq_number; rxq++) |
4346 | if ((rxq % max_cpu) == cpu) |
4347 | rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); |
4348 | |
4349 | if (cpu == elected_cpu) |
4350 | /* Map the default receive queue to the elected CPU */ |
4351 | rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); |
4352 | |
4353 | /* We update the TX queue map only if we have one |
4354 | * queue. In this case we associate the TX queue to |
4355 | * the CPU bound to the default RX queue |
4356 | */ |
4357 | if (txq_number == 1) |
4358 | txq_map = (cpu == elected_cpu) ? |
4359 | MVNETA_CPU_TXQ_ACCESS(0) : 0; |
4360 | else |
4361 | txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & |
4362 | MVNETA_CPU_TXQ_ACCESS_ALL_MASK; |
4363 | |
4364 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), data: rxq_map | txq_map); |
4365 | |
4366 | /* Update the interrupt mask on each CPU according the |
4367 | * new mapping |
4368 | */ |
4369 | smp_call_function_single(cpuid: cpu, func: mvneta_percpu_unmask_interrupt, |
4370 | info: pp, wait: true); |
4371 | } |
4372 | }; |
4373 | |
4374 | static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) |
4375 | { |
4376 | int other_cpu; |
4377 | struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, |
4378 | node_online); |
4379 | struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); |
4380 | |
4381 | /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts |
4382 | * are routed to CPU 0, so we don't need all the cpu-hotplug support |
4383 | */ |
4384 | if (pp->neta_armada3700) |
4385 | return 0; |
4386 | |
4387 | spin_lock(lock: &pp->lock); |
4388 | /* |
4389 | * Configuring the driver for a new CPU while the driver is |
4390 | * stopping is racy, so just avoid it. |
4391 | */ |
4392 | if (pp->is_stopped) { |
4393 | spin_unlock(lock: &pp->lock); |
4394 | return 0; |
4395 | } |
4396 | netif_tx_stop_all_queues(dev: pp->dev); |
4397 | |
4398 | /* |
4399 | * We have to synchronise on tha napi of each CPU except the one |
4400 | * just being woken up |
4401 | */ |
4402 | for_each_online_cpu(other_cpu) { |
4403 | if (other_cpu != cpu) { |
4404 | struct mvneta_pcpu_port *other_port = |
4405 | per_cpu_ptr(pp->ports, other_cpu); |
4406 | |
4407 | napi_synchronize(n: &other_port->napi); |
4408 | } |
4409 | } |
4410 | |
4411 | /* Mask all ethernet port interrupts */ |
4412 | on_each_cpu(func: mvneta_percpu_mask_interrupt, info: pp, wait: true); |
4413 | napi_enable(n: &port->napi); |
4414 | |
4415 | /* |
4416 | * Enable per-CPU interrupts on the CPU that is |
4417 | * brought up. |
4418 | */ |
4419 | mvneta_percpu_enable(arg: pp); |
4420 | |
4421 | /* |
4422 | * Enable per-CPU interrupt on the one CPU we care |
4423 | * about. |
4424 | */ |
4425 | mvneta_percpu_elect(pp); |
4426 | |
4427 | /* Unmask all ethernet port interrupts */ |
4428 | on_each_cpu(func: mvneta_percpu_unmask_interrupt, info: pp, wait: true); |
4429 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, |
4430 | MVNETA_CAUSE_PHY_STATUS_CHANGE | |
4431 | MVNETA_CAUSE_LINK_CHANGE); |
4432 | netif_tx_start_all_queues(dev: pp->dev); |
4433 | spin_unlock(lock: &pp->lock); |
4434 | return 0; |
4435 | } |
4436 | |
4437 | static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node) |
4438 | { |
4439 | struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, |
4440 | node_online); |
4441 | struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); |
4442 | |
4443 | /* |
4444 | * Thanks to this lock we are sure that any pending cpu election is |
4445 | * done. |
4446 | */ |
4447 | spin_lock(lock: &pp->lock); |
4448 | /* Mask all ethernet port interrupts */ |
4449 | on_each_cpu(func: mvneta_percpu_mask_interrupt, info: pp, wait: true); |
4450 | spin_unlock(lock: &pp->lock); |
4451 | |
4452 | napi_synchronize(n: &port->napi); |
4453 | napi_disable(n: &port->napi); |
4454 | /* Disable per-CPU interrupts on the CPU that is brought down. */ |
4455 | mvneta_percpu_disable(arg: pp); |
4456 | return 0; |
4457 | } |
4458 | |
4459 | static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) |
4460 | { |
4461 | struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, |
4462 | node_dead); |
4463 | |
4464 | /* Check if a new CPU must be elected now this on is down */ |
4465 | spin_lock(lock: &pp->lock); |
4466 | mvneta_percpu_elect(pp); |
4467 | spin_unlock(lock: &pp->lock); |
4468 | /* Unmask all ethernet port interrupts */ |
4469 | on_each_cpu(func: mvneta_percpu_unmask_interrupt, info: pp, wait: true); |
4470 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, |
4471 | MVNETA_CAUSE_PHY_STATUS_CHANGE | |
4472 | MVNETA_CAUSE_LINK_CHANGE); |
4473 | netif_tx_start_all_queues(dev: pp->dev); |
4474 | return 0; |
4475 | } |
4476 | |
4477 | static int mvneta_open(struct net_device *dev) |
4478 | { |
4479 | struct mvneta_port *pp = netdev_priv(dev); |
4480 | int ret; |
4481 | |
4482 | pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); |
4483 | |
4484 | ret = mvneta_setup_rxqs(pp); |
4485 | if (ret) |
4486 | return ret; |
4487 | |
4488 | ret = mvneta_setup_txqs(pp); |
4489 | if (ret) |
4490 | goto err_cleanup_rxqs; |
4491 | |
4492 | /* Connect to port interrupt line */ |
4493 | if (pp->neta_armada3700) |
4494 | ret = request_irq(irq: pp->dev->irq, handler: mvneta_isr, flags: 0, |
4495 | name: dev->name, dev: pp); |
4496 | else |
4497 | ret = request_percpu_irq(irq: pp->dev->irq, handler: mvneta_percpu_isr, |
4498 | devname: dev->name, percpu_dev_id: pp->ports); |
4499 | if (ret) { |
4500 | netdev_err(dev: pp->dev, format: "cannot request irq %d\n" , pp->dev->irq); |
4501 | goto err_cleanup_txqs; |
4502 | } |
4503 | |
4504 | if (!pp->neta_armada3700) { |
4505 | /* Enable per-CPU interrupt on all the CPU to handle our RX |
4506 | * queue interrupts |
4507 | */ |
4508 | on_each_cpu(func: mvneta_percpu_enable, info: pp, wait: true); |
4509 | |
4510 | pp->is_stopped = false; |
4511 | /* Register a CPU notifier to handle the case where our CPU |
4512 | * might be taken offline. |
4513 | */ |
4514 | ret = cpuhp_state_add_instance_nocalls(state: online_hpstate, |
4515 | node: &pp->node_online); |
4516 | if (ret) |
4517 | goto err_free_irq; |
4518 | |
4519 | ret = cpuhp_state_add_instance_nocalls(state: CPUHP_NET_MVNETA_DEAD, |
4520 | node: &pp->node_dead); |
4521 | if (ret) |
4522 | goto err_free_online_hp; |
4523 | } |
4524 | |
4525 | ret = mvneta_mdio_probe(pp); |
4526 | if (ret < 0) { |
4527 | netdev_err(dev, format: "cannot probe MDIO bus\n" ); |
4528 | goto err_free_dead_hp; |
4529 | } |
4530 | |
4531 | mvneta_start_dev(pp); |
4532 | |
4533 | return 0; |
4534 | |
4535 | err_free_dead_hp: |
4536 | if (!pp->neta_armada3700) |
4537 | cpuhp_state_remove_instance_nocalls(state: CPUHP_NET_MVNETA_DEAD, |
4538 | node: &pp->node_dead); |
4539 | err_free_online_hp: |
4540 | if (!pp->neta_armada3700) |
4541 | cpuhp_state_remove_instance_nocalls(state: online_hpstate, |
4542 | node: &pp->node_online); |
4543 | err_free_irq: |
4544 | if (pp->neta_armada3700) { |
4545 | free_irq(pp->dev->irq, pp); |
4546 | } else { |
4547 | on_each_cpu(func: mvneta_percpu_disable, info: pp, wait: true); |
4548 | free_percpu_irq(pp->dev->irq, pp->ports); |
4549 | } |
4550 | err_cleanup_txqs: |
4551 | mvneta_cleanup_txqs(pp); |
4552 | err_cleanup_rxqs: |
4553 | mvneta_cleanup_rxqs(pp); |
4554 | return ret; |
4555 | } |
4556 | |
4557 | /* Stop the port, free port interrupt line */ |
4558 | static int mvneta_stop(struct net_device *dev) |
4559 | { |
4560 | struct mvneta_port *pp = netdev_priv(dev); |
4561 | |
4562 | if (!pp->neta_armada3700) { |
4563 | /* Inform that we are stopping so we don't want to setup the |
4564 | * driver for new CPUs in the notifiers. The code of the |
4565 | * notifier for CPU online is protected by the same spinlock, |
4566 | * so when we get the lock, the notifer work is done. |
4567 | */ |
4568 | spin_lock(lock: &pp->lock); |
4569 | pp->is_stopped = true; |
4570 | spin_unlock(lock: &pp->lock); |
4571 | |
4572 | mvneta_stop_dev(pp); |
4573 | mvneta_mdio_remove(pp); |
4574 | |
4575 | cpuhp_state_remove_instance_nocalls(state: online_hpstate, |
4576 | node: &pp->node_online); |
4577 | cpuhp_state_remove_instance_nocalls(state: CPUHP_NET_MVNETA_DEAD, |
4578 | node: &pp->node_dead); |
4579 | on_each_cpu(func: mvneta_percpu_disable, info: pp, wait: true); |
4580 | free_percpu_irq(dev->irq, pp->ports); |
4581 | } else { |
4582 | mvneta_stop_dev(pp); |
4583 | mvneta_mdio_remove(pp); |
4584 | free_irq(dev->irq, pp); |
4585 | } |
4586 | |
4587 | mvneta_cleanup_rxqs(pp); |
4588 | mvneta_cleanup_txqs(pp); |
4589 | |
4590 | return 0; |
4591 | } |
4592 | |
4593 | static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
4594 | { |
4595 | struct mvneta_port *pp = netdev_priv(dev); |
4596 | |
4597 | return phylink_mii_ioctl(pp->phylink, ifr, cmd); |
4598 | } |
4599 | |
4600 | static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, |
4601 | struct netlink_ext_ack *extack) |
4602 | { |
4603 | bool need_update, running = netif_running(dev); |
4604 | struct mvneta_port *pp = netdev_priv(dev); |
4605 | struct bpf_prog *old_prog; |
4606 | |
4607 | if (prog && !prog->aux->xdp_has_frags && |
4608 | dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { |
4609 | NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags" ); |
4610 | return -EOPNOTSUPP; |
4611 | } |
4612 | |
4613 | if (pp->bm_priv) { |
4614 | NL_SET_ERR_MSG_MOD(extack, |
4615 | "Hardware Buffer Management not supported on XDP" ); |
4616 | return -EOPNOTSUPP; |
4617 | } |
4618 | |
4619 | need_update = !!pp->xdp_prog != !!prog; |
4620 | if (running && need_update) |
4621 | mvneta_stop(dev); |
4622 | |
4623 | old_prog = xchg(&pp->xdp_prog, prog); |
4624 | if (old_prog) |
4625 | bpf_prog_put(prog: old_prog); |
4626 | |
4627 | if (running && need_update) |
4628 | return mvneta_open(dev); |
4629 | |
4630 | return 0; |
4631 | } |
4632 | |
4633 | static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) |
4634 | { |
4635 | switch (xdp->command) { |
4636 | case XDP_SETUP_PROG: |
4637 | return mvneta_xdp_setup(dev, prog: xdp->prog, extack: xdp->extack); |
4638 | default: |
4639 | return -EINVAL; |
4640 | } |
4641 | } |
4642 | |
4643 | /* Ethtool methods */ |
4644 | |
4645 | /* Set link ksettings (phy address, speed) for ethtools */ |
4646 | static int |
4647 | mvneta_ethtool_set_link_ksettings(struct net_device *ndev, |
4648 | const struct ethtool_link_ksettings *cmd) |
4649 | { |
4650 | struct mvneta_port *pp = netdev_priv(dev: ndev); |
4651 | |
4652 | return phylink_ethtool_ksettings_set(pp->phylink, cmd); |
4653 | } |
4654 | |
4655 | /* Get link ksettings for ethtools */ |
4656 | static int |
4657 | mvneta_ethtool_get_link_ksettings(struct net_device *ndev, |
4658 | struct ethtool_link_ksettings *cmd) |
4659 | { |
4660 | struct mvneta_port *pp = netdev_priv(dev: ndev); |
4661 | |
4662 | return phylink_ethtool_ksettings_get(pp->phylink, cmd); |
4663 | } |
4664 | |
4665 | static int mvneta_ethtool_nway_reset(struct net_device *dev) |
4666 | { |
4667 | struct mvneta_port *pp = netdev_priv(dev); |
4668 | |
4669 | return phylink_ethtool_nway_reset(pp->phylink); |
4670 | } |
4671 | |
4672 | /* Set interrupt coalescing for ethtools */ |
4673 | static int |
4674 | mvneta_ethtool_set_coalesce(struct net_device *dev, |
4675 | struct ethtool_coalesce *c, |
4676 | struct kernel_ethtool_coalesce *kernel_coal, |
4677 | struct netlink_ext_ack *extack) |
4678 | { |
4679 | struct mvneta_port *pp = netdev_priv(dev); |
4680 | int queue; |
4681 | |
4682 | for (queue = 0; queue < rxq_number; queue++) { |
4683 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
4684 | rxq->time_coal = c->rx_coalesce_usecs; |
4685 | rxq->pkts_coal = c->rx_max_coalesced_frames; |
4686 | mvneta_rx_pkts_coal_set(pp, rxq, value: rxq->pkts_coal); |
4687 | mvneta_rx_time_coal_set(pp, rxq, value: rxq->time_coal); |
4688 | } |
4689 | |
4690 | for (queue = 0; queue < txq_number; queue++) { |
4691 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
4692 | txq->done_pkts_coal = c->tx_max_coalesced_frames; |
4693 | mvneta_tx_done_pkts_coal_set(pp, txq, value: txq->done_pkts_coal); |
4694 | } |
4695 | |
4696 | return 0; |
4697 | } |
4698 | |
4699 | /* get coalescing for ethtools */ |
4700 | static int |
4701 | mvneta_ethtool_get_coalesce(struct net_device *dev, |
4702 | struct ethtool_coalesce *c, |
4703 | struct kernel_ethtool_coalesce *kernel_coal, |
4704 | struct netlink_ext_ack *extack) |
4705 | { |
4706 | struct mvneta_port *pp = netdev_priv(dev); |
4707 | |
4708 | c->rx_coalesce_usecs = pp->rxqs[0].time_coal; |
4709 | c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; |
4710 | |
4711 | c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; |
4712 | return 0; |
4713 | } |
4714 | |
4715 | |
4716 | static void mvneta_ethtool_get_drvinfo(struct net_device *dev, |
4717 | struct ethtool_drvinfo *drvinfo) |
4718 | { |
4719 | strscpy(drvinfo->driver, MVNETA_DRIVER_NAME, |
4720 | sizeof(drvinfo->driver)); |
4721 | strscpy(drvinfo->version, MVNETA_DRIVER_VERSION, |
4722 | sizeof(drvinfo->version)); |
4723 | strscpy(drvinfo->bus_info, dev_name(&dev->dev), |
4724 | sizeof(drvinfo->bus_info)); |
4725 | } |
4726 | |
4727 | |
4728 | static void |
4729 | mvneta_ethtool_get_ringparam(struct net_device *netdev, |
4730 | struct ethtool_ringparam *ring, |
4731 | struct kernel_ethtool_ringparam *kernel_ring, |
4732 | struct netlink_ext_ack *extack) |
4733 | { |
4734 | struct mvneta_port *pp = netdev_priv(dev: netdev); |
4735 | |
4736 | ring->rx_max_pending = MVNETA_MAX_RXD; |
4737 | ring->tx_max_pending = MVNETA_MAX_TXD; |
4738 | ring->rx_pending = pp->rx_ring_size; |
4739 | ring->tx_pending = pp->tx_ring_size; |
4740 | } |
4741 | |
4742 | static int |
4743 | mvneta_ethtool_set_ringparam(struct net_device *dev, |
4744 | struct ethtool_ringparam *ring, |
4745 | struct kernel_ethtool_ringparam *kernel_ring, |
4746 | struct netlink_ext_ack *extack) |
4747 | { |
4748 | struct mvneta_port *pp = netdev_priv(dev); |
4749 | |
4750 | if ((ring->rx_pending == 0) || (ring->tx_pending == 0)) |
4751 | return -EINVAL; |
4752 | pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? |
4753 | ring->rx_pending : MVNETA_MAX_RXD; |
4754 | |
4755 | pp->tx_ring_size = clamp_t(u16, ring->tx_pending, |
4756 | MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD); |
4757 | if (pp->tx_ring_size != ring->tx_pending) |
4758 | netdev_warn(dev, format: "TX queue size set to %u (requested %u)\n" , |
4759 | pp->tx_ring_size, ring->tx_pending); |
4760 | |
4761 | if (netif_running(dev)) { |
4762 | mvneta_stop(dev); |
4763 | if (mvneta_open(dev)) { |
4764 | netdev_err(dev, |
4765 | format: "error on opening device after ring param change\n" ); |
4766 | return -ENOMEM; |
4767 | } |
4768 | } |
4769 | |
4770 | return 0; |
4771 | } |
4772 | |
4773 | static void mvneta_ethtool_get_pauseparam(struct net_device *dev, |
4774 | struct ethtool_pauseparam *pause) |
4775 | { |
4776 | struct mvneta_port *pp = netdev_priv(dev); |
4777 | |
4778 | phylink_ethtool_get_pauseparam(pp->phylink, pause); |
4779 | } |
4780 | |
4781 | static int mvneta_ethtool_set_pauseparam(struct net_device *dev, |
4782 | struct ethtool_pauseparam *pause) |
4783 | { |
4784 | struct mvneta_port *pp = netdev_priv(dev); |
4785 | |
4786 | return phylink_ethtool_set_pauseparam(pp->phylink, pause); |
4787 | } |
4788 | |
4789 | static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, |
4790 | u8 *data) |
4791 | { |
4792 | if (sset == ETH_SS_STATS) { |
4793 | struct mvneta_port *pp = netdev_priv(dev: netdev); |
4794 | int i; |
4795 | |
4796 | for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) |
4797 | memcpy(data + i * ETH_GSTRING_LEN, |
4798 | mvneta_statistics[i].name, ETH_GSTRING_LEN); |
4799 | |
4800 | if (!pp->bm_priv) { |
4801 | data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics); |
4802 | page_pool_ethtool_stats_get_strings(data); |
4803 | } |
4804 | } |
4805 | } |
4806 | |
4807 | static void |
4808 | mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp, |
4809 | struct mvneta_ethtool_stats *es) |
4810 | { |
4811 | unsigned int start; |
4812 | int cpu; |
4813 | |
4814 | for_each_possible_cpu(cpu) { |
4815 | struct mvneta_pcpu_stats *stats; |
4816 | u64 skb_alloc_error; |
4817 | u64 refill_error; |
4818 | u64 xdp_redirect; |
4819 | u64 xdp_xmit_err; |
4820 | u64 xdp_tx_err; |
4821 | u64 xdp_pass; |
4822 | u64 xdp_drop; |
4823 | u64 xdp_xmit; |
4824 | u64 xdp_tx; |
4825 | |
4826 | stats = per_cpu_ptr(pp->stats, cpu); |
4827 | do { |
4828 | start = u64_stats_fetch_begin(syncp: &stats->syncp); |
4829 | skb_alloc_error = stats->es.skb_alloc_error; |
4830 | refill_error = stats->es.refill_error; |
4831 | xdp_redirect = stats->es.ps.xdp_redirect; |
4832 | xdp_pass = stats->es.ps.xdp_pass; |
4833 | xdp_drop = stats->es.ps.xdp_drop; |
4834 | xdp_xmit = stats->es.ps.xdp_xmit; |
4835 | xdp_xmit_err = stats->es.ps.xdp_xmit_err; |
4836 | xdp_tx = stats->es.ps.xdp_tx; |
4837 | xdp_tx_err = stats->es.ps.xdp_tx_err; |
4838 | } while (u64_stats_fetch_retry(syncp: &stats->syncp, start)); |
4839 | |
4840 | es->skb_alloc_error += skb_alloc_error; |
4841 | es->refill_error += refill_error; |
4842 | es->ps.xdp_redirect += xdp_redirect; |
4843 | es->ps.xdp_pass += xdp_pass; |
4844 | es->ps.xdp_drop += xdp_drop; |
4845 | es->ps.xdp_xmit += xdp_xmit; |
4846 | es->ps.xdp_xmit_err += xdp_xmit_err; |
4847 | es->ps.xdp_tx += xdp_tx; |
4848 | es->ps.xdp_tx_err += xdp_tx_err; |
4849 | } |
4850 | } |
4851 | |
4852 | static void mvneta_ethtool_update_stats(struct mvneta_port *pp) |
4853 | { |
4854 | struct mvneta_ethtool_stats stats = {}; |
4855 | const struct mvneta_statistic *s; |
4856 | void __iomem *base = pp->base; |
4857 | u32 high, low; |
4858 | u64 val; |
4859 | int i; |
4860 | |
4861 | mvneta_ethtool_update_pcpu_stats(pp, es: &stats); |
4862 | for (i = 0, s = mvneta_statistics; |
4863 | s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); |
4864 | s++, i++) { |
4865 | switch (s->type) { |
4866 | case T_REG_32: |
4867 | val = readl_relaxed(base + s->offset); |
4868 | pp->ethtool_stats[i] += val; |
4869 | break; |
4870 | case T_REG_64: |
4871 | /* Docs say to read low 32-bit then high */ |
4872 | low = readl_relaxed(base + s->offset); |
4873 | high = readl_relaxed(base + s->offset + 4); |
4874 | val = (u64)high << 32 | low; |
4875 | pp->ethtool_stats[i] += val; |
4876 | break; |
4877 | case T_SW: |
4878 | switch (s->offset) { |
4879 | case ETHTOOL_STAT_EEE_WAKEUP: |
4880 | val = phylink_get_eee_err(pp->phylink); |
4881 | pp->ethtool_stats[i] += val; |
4882 | break; |
4883 | case ETHTOOL_STAT_SKB_ALLOC_ERR: |
4884 | pp->ethtool_stats[i] = stats.skb_alloc_error; |
4885 | break; |
4886 | case ETHTOOL_STAT_REFILL_ERR: |
4887 | pp->ethtool_stats[i] = stats.refill_error; |
4888 | break; |
4889 | case ETHTOOL_XDP_REDIRECT: |
4890 | pp->ethtool_stats[i] = stats.ps.xdp_redirect; |
4891 | break; |
4892 | case ETHTOOL_XDP_PASS: |
4893 | pp->ethtool_stats[i] = stats.ps.xdp_pass; |
4894 | break; |
4895 | case ETHTOOL_XDP_DROP: |
4896 | pp->ethtool_stats[i] = stats.ps.xdp_drop; |
4897 | break; |
4898 | case ETHTOOL_XDP_TX: |
4899 | pp->ethtool_stats[i] = stats.ps.xdp_tx; |
4900 | break; |
4901 | case ETHTOOL_XDP_TX_ERR: |
4902 | pp->ethtool_stats[i] = stats.ps.xdp_tx_err; |
4903 | break; |
4904 | case ETHTOOL_XDP_XMIT: |
4905 | pp->ethtool_stats[i] = stats.ps.xdp_xmit; |
4906 | break; |
4907 | case ETHTOOL_XDP_XMIT_ERR: |
4908 | pp->ethtool_stats[i] = stats.ps.xdp_xmit_err; |
4909 | break; |
4910 | } |
4911 | break; |
4912 | } |
4913 | } |
4914 | } |
4915 | |
4916 | static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data) |
4917 | { |
4918 | struct page_pool_stats stats = {}; |
4919 | int i; |
4920 | |
4921 | for (i = 0; i < rxq_number; i++) { |
4922 | if (pp->rxqs[i].page_pool) |
4923 | page_pool_get_stats(pool: pp->rxqs[i].page_pool, stats: &stats); |
4924 | } |
4925 | |
4926 | page_pool_ethtool_stats_get(data, stats: &stats); |
4927 | } |
4928 | |
4929 | static void mvneta_ethtool_get_stats(struct net_device *dev, |
4930 | struct ethtool_stats *stats, u64 *data) |
4931 | { |
4932 | struct mvneta_port *pp = netdev_priv(dev); |
4933 | int i; |
4934 | |
4935 | mvneta_ethtool_update_stats(pp); |
4936 | |
4937 | for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) |
4938 | *data++ = pp->ethtool_stats[i]; |
4939 | |
4940 | if (!pp->bm_priv) |
4941 | mvneta_ethtool_pp_stats(pp, data); |
4942 | } |
4943 | |
4944 | static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) |
4945 | { |
4946 | if (sset == ETH_SS_STATS) { |
4947 | int count = ARRAY_SIZE(mvneta_statistics); |
4948 | struct mvneta_port *pp = netdev_priv(dev); |
4949 | |
4950 | if (!pp->bm_priv) |
4951 | count += page_pool_ethtool_stats_get_count(); |
4952 | |
4953 | return count; |
4954 | } |
4955 | |
4956 | return -EOPNOTSUPP; |
4957 | } |
4958 | |
4959 | static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) |
4960 | { |
4961 | return MVNETA_RSS_LU_TABLE_SIZE; |
4962 | } |
4963 | |
4964 | static int mvneta_ethtool_get_rxnfc(struct net_device *dev, |
4965 | struct ethtool_rxnfc *info, |
4966 | u32 *rules __always_unused) |
4967 | { |
4968 | switch (info->cmd) { |
4969 | case ETHTOOL_GRXRINGS: |
4970 | info->data = rxq_number; |
4971 | return 0; |
4972 | case ETHTOOL_GRXFH: |
4973 | return -EOPNOTSUPP; |
4974 | default: |
4975 | return -EOPNOTSUPP; |
4976 | } |
4977 | } |
4978 | |
4979 | static int (struct mvneta_port *pp) |
4980 | { |
4981 | int cpu; |
4982 | u32 val; |
4983 | |
4984 | netif_tx_stop_all_queues(dev: pp->dev); |
4985 | |
4986 | on_each_cpu(func: mvneta_percpu_mask_interrupt, info: pp, wait: true); |
4987 | |
4988 | if (!pp->neta_armada3700) { |
4989 | /* We have to synchronise on the napi of each CPU */ |
4990 | for_each_online_cpu(cpu) { |
4991 | struct mvneta_pcpu_port *pcpu_port = |
4992 | per_cpu_ptr(pp->ports, cpu); |
4993 | |
4994 | napi_synchronize(n: &pcpu_port->napi); |
4995 | napi_disable(n: &pcpu_port->napi); |
4996 | } |
4997 | } else { |
4998 | napi_synchronize(n: &pp->napi); |
4999 | napi_disable(n: &pp->napi); |
5000 | } |
5001 | |
5002 | pp->rxq_def = pp->indir[0]; |
5003 | |
5004 | /* Update unicast mapping */ |
5005 | mvneta_set_rx_mode(dev: pp->dev); |
5006 | |
5007 | /* Update val of portCfg register accordingly with all RxQueue types */ |
5008 | val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); |
5009 | mvreg_write(pp, MVNETA_PORT_CONFIG, data: val); |
5010 | |
5011 | /* Update the elected CPU matching the new rxq_def */ |
5012 | spin_lock(lock: &pp->lock); |
5013 | mvneta_percpu_elect(pp); |
5014 | spin_unlock(lock: &pp->lock); |
5015 | |
5016 | if (!pp->neta_armada3700) { |
5017 | /* We have to synchronise on the napi of each CPU */ |
5018 | for_each_online_cpu(cpu) { |
5019 | struct mvneta_pcpu_port *pcpu_port = |
5020 | per_cpu_ptr(pp->ports, cpu); |
5021 | |
5022 | napi_enable(n: &pcpu_port->napi); |
5023 | } |
5024 | } else { |
5025 | napi_enable(n: &pp->napi); |
5026 | } |
5027 | |
5028 | netif_tx_start_all_queues(dev: pp->dev); |
5029 | |
5030 | return 0; |
5031 | } |
5032 | |
5033 | static int mvneta_ethtool_set_rxfh(struct net_device *dev, |
5034 | struct ethtool_rxfh_param *rxfh, |
5035 | struct netlink_ext_ack *extack) |
5036 | { |
5037 | struct mvneta_port *pp = netdev_priv(dev); |
5038 | |
5039 | /* Current code for Armada 3700 doesn't support RSS features yet */ |
5040 | if (pp->neta_armada3700) |
5041 | return -EOPNOTSUPP; |
5042 | |
5043 | /* We require at least one supported parameter to be changed |
5044 | * and no change in any of the unsupported parameters |
5045 | */ |
5046 | if (rxfh->key || |
5047 | (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && |
5048 | rxfh->hfunc != ETH_RSS_HASH_TOP)) |
5049 | return -EOPNOTSUPP; |
5050 | |
5051 | if (!rxfh->indir) |
5052 | return 0; |
5053 | |
5054 | memcpy(pp->indir, rxfh->indir, MVNETA_RSS_LU_TABLE_SIZE); |
5055 | |
5056 | return mvneta_config_rss(pp); |
5057 | } |
5058 | |
5059 | static int mvneta_ethtool_get_rxfh(struct net_device *dev, |
5060 | struct ethtool_rxfh_param *rxfh) |
5061 | { |
5062 | struct mvneta_port *pp = netdev_priv(dev); |
5063 | |
5064 | /* Current code for Armada 3700 doesn't support RSS features yet */ |
5065 | if (pp->neta_armada3700) |
5066 | return -EOPNOTSUPP; |
5067 | |
5068 | rxfh->hfunc = ETH_RSS_HASH_TOP; |
5069 | |
5070 | if (!rxfh->indir) |
5071 | return 0; |
5072 | |
5073 | memcpy(rxfh->indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); |
5074 | |
5075 | return 0; |
5076 | } |
5077 | |
5078 | static void mvneta_ethtool_get_wol(struct net_device *dev, |
5079 | struct ethtool_wolinfo *wol) |
5080 | { |
5081 | struct mvneta_port *pp = netdev_priv(dev); |
5082 | |
5083 | phylink_ethtool_get_wol(pp->phylink, wol); |
5084 | } |
5085 | |
5086 | static int mvneta_ethtool_set_wol(struct net_device *dev, |
5087 | struct ethtool_wolinfo *wol) |
5088 | { |
5089 | struct mvneta_port *pp = netdev_priv(dev); |
5090 | int ret; |
5091 | |
5092 | ret = phylink_ethtool_set_wol(pp->phylink, wol); |
5093 | if (!ret) |
5094 | device_set_wakeup_enable(dev: &dev->dev, enable: !!wol->wolopts); |
5095 | |
5096 | return ret; |
5097 | } |
5098 | |
5099 | static int mvneta_ethtool_get_eee(struct net_device *dev, |
5100 | struct ethtool_keee *eee) |
5101 | { |
5102 | struct mvneta_port *pp = netdev_priv(dev); |
5103 | u32 lpi_ctl0; |
5104 | |
5105 | lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); |
5106 | |
5107 | eee->eee_enabled = pp->eee_enabled; |
5108 | eee->eee_active = pp->eee_active; |
5109 | eee->tx_lpi_enabled = pp->tx_lpi_enabled; |
5110 | eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; |
5111 | |
5112 | return phylink_ethtool_get_eee(link: pp->phylink, eee); |
5113 | } |
5114 | |
5115 | static int mvneta_ethtool_set_eee(struct net_device *dev, |
5116 | struct ethtool_keee *eee) |
5117 | { |
5118 | struct mvneta_port *pp = netdev_priv(dev); |
5119 | u32 lpi_ctl0; |
5120 | |
5121 | /* The Armada 37x documents do not give limits for this other than |
5122 | * it being an 8-bit register. |
5123 | */ |
5124 | if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) |
5125 | return -EINVAL; |
5126 | |
5127 | lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); |
5128 | lpi_ctl0 &= ~(0xff << 8); |
5129 | lpi_ctl0 |= eee->tx_lpi_timer << 8; |
5130 | mvreg_write(pp, MVNETA_LPI_CTRL_0, data: lpi_ctl0); |
5131 | |
5132 | pp->eee_enabled = eee->eee_enabled; |
5133 | pp->tx_lpi_enabled = eee->tx_lpi_enabled; |
5134 | |
5135 | mvneta_set_eee(pp, enable: eee->tx_lpi_enabled && eee->eee_enabled); |
5136 | |
5137 | return phylink_ethtool_set_eee(link: pp->phylink, eee); |
5138 | } |
5139 | |
5140 | static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) |
5141 | { |
5142 | mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, data: 0); |
5143 | } |
5144 | |
5145 | static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq) |
5146 | { |
5147 | u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ); |
5148 | |
5149 | val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7); |
5150 | val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq); |
5151 | |
5152 | mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, data: val); |
5153 | } |
5154 | |
5155 | static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp) |
5156 | { |
5157 | unsigned long core_clk_rate; |
5158 | u32 refill_cycles; |
5159 | u32 val; |
5160 | |
5161 | core_clk_rate = clk_get_rate(clk: pp->clk); |
5162 | if (!core_clk_rate) |
5163 | return -EINVAL; |
5164 | |
5165 | refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS / |
5166 | (NSEC_PER_SEC / core_clk_rate); |
5167 | |
5168 | if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK) |
5169 | return -EINVAL; |
5170 | |
5171 | /* Enable bw limit algorithm version 3 */ |
5172 | val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); |
5173 | val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); |
5174 | mvreg_write(pp, MVNETA_TXQ_CMD1_REG, data: val); |
5175 | |
5176 | /* Set the base refill rate */ |
5177 | mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, data: refill_cycles); |
5178 | |
5179 | return 0; |
5180 | } |
5181 | |
5182 | static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp) |
5183 | { |
5184 | u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG); |
5185 | |
5186 | val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN); |
5187 | mvreg_write(pp, MVNETA_TXQ_CMD1_REG, data: val); |
5188 | } |
5189 | |
5190 | static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue, |
5191 | u64 min_rate, u64 max_rate) |
5192 | { |
5193 | u32 refill_val, rem; |
5194 | u32 val = 0; |
5195 | |
5196 | /* Convert to from Bps to bps */ |
5197 | max_rate *= 8; |
5198 | |
5199 | if (min_rate) |
5200 | return -EINVAL; |
5201 | |
5202 | refill_val = div_u64_rem(dividend: max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION, |
5203 | remainder: &rem); |
5204 | |
5205 | if (rem || !refill_val || |
5206 | refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX) |
5207 | return -EINVAL; |
5208 | |
5209 | val = refill_val; |
5210 | val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD << |
5211 | MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT); |
5212 | |
5213 | mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), data: val); |
5214 | |
5215 | return 0; |
5216 | } |
5217 | |
5218 | static int mvneta_setup_mqprio(struct net_device *dev, |
5219 | struct tc_mqprio_qopt_offload *mqprio) |
5220 | { |
5221 | struct mvneta_port *pp = netdev_priv(dev); |
5222 | int rxq, txq, tc, ret; |
5223 | u8 num_tc; |
5224 | |
5225 | if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS) |
5226 | return 0; |
5227 | |
5228 | num_tc = mqprio->qopt.num_tc; |
5229 | |
5230 | if (num_tc > rxq_number) |
5231 | return -EINVAL; |
5232 | |
5233 | mvneta_clear_rx_prio_map(pp); |
5234 | |
5235 | if (!num_tc) { |
5236 | mvneta_disable_per_queue_rate_limit(pp); |
5237 | netdev_reset_tc(dev); |
5238 | return 0; |
5239 | } |
5240 | |
5241 | netdev_set_num_tc(dev, num_tc: mqprio->qopt.num_tc); |
5242 | |
5243 | for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { |
5244 | netdev_set_tc_queue(dev, tc, count: mqprio->qopt.count[tc], |
5245 | offset: mqprio->qopt.offset[tc]); |
5246 | |
5247 | for (rxq = mqprio->qopt.offset[tc]; |
5248 | rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; |
5249 | rxq++) { |
5250 | if (rxq >= rxq_number) |
5251 | return -EINVAL; |
5252 | |
5253 | mvneta_map_vlan_prio_to_rxq(pp, pri: tc, rxq); |
5254 | } |
5255 | } |
5256 | |
5257 | if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) { |
5258 | mvneta_disable_per_queue_rate_limit(pp); |
5259 | return 0; |
5260 | } |
5261 | |
5262 | if (mqprio->qopt.num_tc > txq_number) |
5263 | return -EINVAL; |
5264 | |
5265 | ret = mvneta_enable_per_queue_rate_limit(pp); |
5266 | if (ret) |
5267 | return ret; |
5268 | |
5269 | for (tc = 0; tc < mqprio->qopt.num_tc; tc++) { |
5270 | for (txq = mqprio->qopt.offset[tc]; |
5271 | txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc]; |
5272 | txq++) { |
5273 | if (txq >= txq_number) |
5274 | return -EINVAL; |
5275 | |
5276 | ret = mvneta_setup_queue_rates(pp, queue: txq, |
5277 | min_rate: mqprio->min_rate[tc], |
5278 | max_rate: mqprio->max_rate[tc]); |
5279 | if (ret) |
5280 | return ret; |
5281 | } |
5282 | } |
5283 | |
5284 | return 0; |
5285 | } |
5286 | |
5287 | static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type, |
5288 | void *type_data) |
5289 | { |
5290 | switch (type) { |
5291 | case TC_SETUP_QDISC_MQPRIO: |
5292 | return mvneta_setup_mqprio(dev, mqprio: type_data); |
5293 | default: |
5294 | return -EOPNOTSUPP; |
5295 | } |
5296 | } |
5297 | |
5298 | static const struct net_device_ops mvneta_netdev_ops = { |
5299 | .ndo_open = mvneta_open, |
5300 | .ndo_stop = mvneta_stop, |
5301 | .ndo_start_xmit = mvneta_tx, |
5302 | .ndo_set_rx_mode = mvneta_set_rx_mode, |
5303 | .ndo_set_mac_address = mvneta_set_mac_addr, |
5304 | .ndo_change_mtu = mvneta_change_mtu, |
5305 | .ndo_fix_features = mvneta_fix_features, |
5306 | .ndo_get_stats64 = mvneta_get_stats64, |
5307 | .ndo_eth_ioctl = mvneta_ioctl, |
5308 | .ndo_bpf = mvneta_xdp, |
5309 | .ndo_xdp_xmit = mvneta_xdp_xmit, |
5310 | .ndo_setup_tc = mvneta_setup_tc, |
5311 | }; |
5312 | |
5313 | static const struct ethtool_ops mvneta_eth_tool_ops = { |
5314 | .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | |
5315 | ETHTOOL_COALESCE_MAX_FRAMES, |
5316 | .nway_reset = mvneta_ethtool_nway_reset, |
5317 | .get_link = ethtool_op_get_link, |
5318 | .set_coalesce = mvneta_ethtool_set_coalesce, |
5319 | .get_coalesce = mvneta_ethtool_get_coalesce, |
5320 | .get_drvinfo = mvneta_ethtool_get_drvinfo, |
5321 | .get_ringparam = mvneta_ethtool_get_ringparam, |
5322 | .set_ringparam = mvneta_ethtool_set_ringparam, |
5323 | .get_pauseparam = mvneta_ethtool_get_pauseparam, |
5324 | .set_pauseparam = mvneta_ethtool_set_pauseparam, |
5325 | .get_strings = mvneta_ethtool_get_strings, |
5326 | .get_ethtool_stats = mvneta_ethtool_get_stats, |
5327 | .get_sset_count = mvneta_ethtool_get_sset_count, |
5328 | .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, |
5329 | .get_rxnfc = mvneta_ethtool_get_rxnfc, |
5330 | .get_rxfh = mvneta_ethtool_get_rxfh, |
5331 | .set_rxfh = mvneta_ethtool_set_rxfh, |
5332 | .get_link_ksettings = mvneta_ethtool_get_link_ksettings, |
5333 | .set_link_ksettings = mvneta_ethtool_set_link_ksettings, |
5334 | .get_wol = mvneta_ethtool_get_wol, |
5335 | .set_wol = mvneta_ethtool_set_wol, |
5336 | .get_eee = mvneta_ethtool_get_eee, |
5337 | .set_eee = mvneta_ethtool_set_eee, |
5338 | }; |
5339 | |
5340 | /* Initialize hw */ |
5341 | static int mvneta_init(struct device *dev, struct mvneta_port *pp) |
5342 | { |
5343 | int queue; |
5344 | |
5345 | /* Disable port */ |
5346 | mvneta_port_disable(pp); |
5347 | |
5348 | /* Set port default values */ |
5349 | mvneta_defaults_set(pp); |
5350 | |
5351 | pp->txqs = devm_kcalloc(dev, n: txq_number, size: sizeof(*pp->txqs), GFP_KERNEL); |
5352 | if (!pp->txqs) |
5353 | return -ENOMEM; |
5354 | |
5355 | /* Initialize TX descriptor rings */ |
5356 | for (queue = 0; queue < txq_number; queue++) { |
5357 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
5358 | txq->id = queue; |
5359 | txq->size = pp->tx_ring_size; |
5360 | txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS; |
5361 | } |
5362 | |
5363 | pp->rxqs = devm_kcalloc(dev, n: rxq_number, size: sizeof(*pp->rxqs), GFP_KERNEL); |
5364 | if (!pp->rxqs) |
5365 | return -ENOMEM; |
5366 | |
5367 | /* Create Rx descriptor rings */ |
5368 | for (queue = 0; queue < rxq_number; queue++) { |
5369 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
5370 | rxq->id = queue; |
5371 | rxq->size = pp->rx_ring_size; |
5372 | rxq->pkts_coal = MVNETA_RX_COAL_PKTS; |
5373 | rxq->time_coal = MVNETA_RX_COAL_USEC; |
5374 | rxq->buf_virt_addr |
5375 | = devm_kmalloc_array(dev: pp->dev->dev.parent, |
5376 | n: rxq->size, |
5377 | size: sizeof(*rxq->buf_virt_addr), |
5378 | GFP_KERNEL); |
5379 | if (!rxq->buf_virt_addr) |
5380 | return -ENOMEM; |
5381 | } |
5382 | |
5383 | return 0; |
5384 | } |
5385 | |
5386 | /* platform glue : initialize decoding windows */ |
5387 | static void mvneta_conf_mbus_windows(struct mvneta_port *pp, |
5388 | const struct mbus_dram_target_info *dram) |
5389 | { |
5390 | u32 win_enable; |
5391 | u32 win_protect; |
5392 | int i; |
5393 | |
5394 | for (i = 0; i < 6; i++) { |
5395 | mvreg_write(pp, MVNETA_WIN_BASE(i), data: 0); |
5396 | mvreg_write(pp, MVNETA_WIN_SIZE(i), data: 0); |
5397 | |
5398 | if (i < 4) |
5399 | mvreg_write(pp, MVNETA_WIN_REMAP(i), data: 0); |
5400 | } |
5401 | |
5402 | win_enable = 0x3f; |
5403 | win_protect = 0; |
5404 | |
5405 | if (dram) { |
5406 | for (i = 0; i < dram->num_cs; i++) { |
5407 | const struct mbus_dram_window *cs = dram->cs + i; |
5408 | |
5409 | mvreg_write(pp, MVNETA_WIN_BASE(i), |
5410 | data: (cs->base & 0xffff0000) | |
5411 | (cs->mbus_attr << 8) | |
5412 | dram->mbus_dram_target_id); |
5413 | |
5414 | mvreg_write(pp, MVNETA_WIN_SIZE(i), |
5415 | data: (cs->size - 1) & 0xffff0000); |
5416 | |
5417 | win_enable &= ~(1 << i); |
5418 | win_protect |= 3 << (2 * i); |
5419 | } |
5420 | } else { |
5421 | if (pp->neta_ac5) |
5422 | mvreg_write(pp, MVNETA_WIN_BASE(0), |
5423 | data: (MVNETA_AC5_CNM_DDR_ATTR << 8) | |
5424 | MVNETA_AC5_CNM_DDR_TARGET); |
5425 | /* For Armada3700 open default 4GB Mbus window, leaving |
5426 | * arbitration of target/attribute to a different layer |
5427 | * of configuration. |
5428 | */ |
5429 | mvreg_write(pp, MVNETA_WIN_SIZE(0), data: 0xffff0000); |
5430 | win_enable &= ~BIT(0); |
5431 | win_protect = 3; |
5432 | } |
5433 | |
5434 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, data: win_enable); |
5435 | mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, data: win_protect); |
5436 | } |
5437 | |
5438 | /* Power up the port */ |
5439 | static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) |
5440 | { |
5441 | /* MAC Cause register should be cleared */ |
5442 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, data: 0); |
5443 | |
5444 | if (phy_mode != PHY_INTERFACE_MODE_QSGMII && |
5445 | phy_mode != PHY_INTERFACE_MODE_SGMII && |
5446 | !phy_interface_mode_is_8023z(mode: phy_mode) && |
5447 | !phy_interface_mode_is_rgmii(mode: phy_mode)) |
5448 | return -EINVAL; |
5449 | |
5450 | return 0; |
5451 | } |
5452 | |
5453 | /* Device initialization routine */ |
5454 | static int mvneta_probe(struct platform_device *pdev) |
5455 | { |
5456 | struct device_node *dn = pdev->dev.of_node; |
5457 | struct device_node *bm_node; |
5458 | struct mvneta_port *pp; |
5459 | struct net_device *dev; |
5460 | struct phylink *phylink; |
5461 | struct phy *comphy; |
5462 | char hw_mac_addr[ETH_ALEN]; |
5463 | phy_interface_t phy_mode; |
5464 | const char *mac_from; |
5465 | int tx_csum_limit; |
5466 | int err; |
5467 | int cpu; |
5468 | |
5469 | dev = devm_alloc_etherdev_mqs(dev: &pdev->dev, sizeof_priv: sizeof(struct mvneta_port), |
5470 | txqs: txq_number, rxqs: rxq_number); |
5471 | if (!dev) |
5472 | return -ENOMEM; |
5473 | |
5474 | dev->tx_queue_len = MVNETA_MAX_TXD; |
5475 | dev->watchdog_timeo = 5 * HZ; |
5476 | dev->netdev_ops = &mvneta_netdev_ops; |
5477 | dev->ethtool_ops = &mvneta_eth_tool_ops; |
5478 | |
5479 | pp = netdev_priv(dev); |
5480 | spin_lock_init(&pp->lock); |
5481 | pp->dn = dn; |
5482 | |
5483 | pp->rxq_def = rxq_def; |
5484 | pp->indir[0] = rxq_def; |
5485 | |
5486 | err = of_get_phy_mode(np: dn, interface: &phy_mode); |
5487 | if (err) { |
5488 | dev_err(&pdev->dev, "incorrect phy-mode\n" ); |
5489 | return err; |
5490 | } |
5491 | |
5492 | pp->phy_interface = phy_mode; |
5493 | |
5494 | comphy = devm_of_phy_get(dev: &pdev->dev, np: dn, NULL); |
5495 | if (comphy == ERR_PTR(error: -EPROBE_DEFER)) |
5496 | return -EPROBE_DEFER; |
5497 | |
5498 | if (IS_ERR(ptr: comphy)) |
5499 | comphy = NULL; |
5500 | |
5501 | pp->comphy = comphy; |
5502 | |
5503 | pp->base = devm_platform_ioremap_resource(pdev, index: 0); |
5504 | if (IS_ERR(ptr: pp->base)) |
5505 | return PTR_ERR(ptr: pp->base); |
5506 | |
5507 | /* Get special SoC configurations */ |
5508 | if (of_device_is_compatible(device: dn, "marvell,armada-3700-neta" )) |
5509 | pp->neta_armada3700 = true; |
5510 | if (of_device_is_compatible(device: dn, "marvell,armada-ac5-neta" )) { |
5511 | pp->neta_armada3700 = true; |
5512 | pp->neta_ac5 = true; |
5513 | } |
5514 | |
5515 | dev->irq = irq_of_parse_and_map(node: dn, index: 0); |
5516 | if (dev->irq == 0) |
5517 | return -EINVAL; |
5518 | |
5519 | pp->clk = devm_clk_get(dev: &pdev->dev, id: "core" ); |
5520 | if (IS_ERR(ptr: pp->clk)) |
5521 | pp->clk = devm_clk_get(dev: &pdev->dev, NULL); |
5522 | if (IS_ERR(ptr: pp->clk)) { |
5523 | err = PTR_ERR(ptr: pp->clk); |
5524 | goto err_free_irq; |
5525 | } |
5526 | |
5527 | clk_prepare_enable(clk: pp->clk); |
5528 | |
5529 | pp->clk_bus = devm_clk_get(dev: &pdev->dev, id: "bus" ); |
5530 | if (!IS_ERR(ptr: pp->clk_bus)) |
5531 | clk_prepare_enable(clk: pp->clk_bus); |
5532 | |
5533 | pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; |
5534 | pp->phylink_pcs.neg_mode = true; |
5535 | |
5536 | pp->phylink_config.dev = &dev->dev; |
5537 | pp->phylink_config.type = PHYLINK_NETDEV; |
5538 | pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | |
5539 | MAC_100 | MAC_1000FD | MAC_2500FD; |
5540 | |
5541 | phy_interface_set_rgmii(intf: pp->phylink_config.supported_interfaces); |
5542 | __set_bit(PHY_INTERFACE_MODE_QSGMII, |
5543 | pp->phylink_config.supported_interfaces); |
5544 | if (comphy) { |
5545 | /* If a COMPHY is present, we can support any of the serdes |
5546 | * modes and switch between them. |
5547 | */ |
5548 | __set_bit(PHY_INTERFACE_MODE_SGMII, |
5549 | pp->phylink_config.supported_interfaces); |
5550 | __set_bit(PHY_INTERFACE_MODE_1000BASEX, |
5551 | pp->phylink_config.supported_interfaces); |
5552 | __set_bit(PHY_INTERFACE_MODE_2500BASEX, |
5553 | pp->phylink_config.supported_interfaces); |
5554 | } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) { |
5555 | /* No COMPHY, with only 2500BASE-X mode supported */ |
5556 | __set_bit(PHY_INTERFACE_MODE_2500BASEX, |
5557 | pp->phylink_config.supported_interfaces); |
5558 | } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX || |
5559 | phy_mode == PHY_INTERFACE_MODE_SGMII) { |
5560 | /* No COMPHY, we can switch between 1000BASE-X and SGMII */ |
5561 | __set_bit(PHY_INTERFACE_MODE_1000BASEX, |
5562 | pp->phylink_config.supported_interfaces); |
5563 | __set_bit(PHY_INTERFACE_MODE_SGMII, |
5564 | pp->phylink_config.supported_interfaces); |
5565 | } |
5566 | |
5567 | phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, |
5568 | phy_mode, &mvneta_phylink_ops); |
5569 | if (IS_ERR(ptr: phylink)) { |
5570 | err = PTR_ERR(ptr: phylink); |
5571 | goto err_clk; |
5572 | } |
5573 | |
5574 | pp->phylink = phylink; |
5575 | |
5576 | /* Alloc per-cpu port structure */ |
5577 | pp->ports = alloc_percpu(struct mvneta_pcpu_port); |
5578 | if (!pp->ports) { |
5579 | err = -ENOMEM; |
5580 | goto err_free_phylink; |
5581 | } |
5582 | |
5583 | /* Alloc per-cpu stats */ |
5584 | pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); |
5585 | if (!pp->stats) { |
5586 | err = -ENOMEM; |
5587 | goto err_free_ports; |
5588 | } |
5589 | |
5590 | err = of_get_ethdev_address(np: dn, dev); |
5591 | if (!err) { |
5592 | mac_from = "device tree" ; |
5593 | } else { |
5594 | mvneta_get_mac_addr(pp, addr: hw_mac_addr); |
5595 | if (is_valid_ether_addr(addr: hw_mac_addr)) { |
5596 | mac_from = "hardware" ; |
5597 | eth_hw_addr_set(dev, addr: hw_mac_addr); |
5598 | } else { |
5599 | mac_from = "random" ; |
5600 | eth_hw_addr_random(dev); |
5601 | } |
5602 | } |
5603 | |
5604 | if (!of_property_read_u32(np: dn, propname: "tx-csum-limit" , out_value: &tx_csum_limit)) { |
5605 | if (tx_csum_limit < 0 || |
5606 | tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { |
5607 | tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; |
5608 | dev_info(&pdev->dev, |
5609 | "Wrong TX csum limit in DT, set to %dB\n" , |
5610 | MVNETA_TX_CSUM_DEF_SIZE); |
5611 | } |
5612 | } else if (of_device_is_compatible(device: dn, "marvell,armada-370-neta" )) { |
5613 | tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; |
5614 | } else { |
5615 | tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; |
5616 | } |
5617 | |
5618 | pp->tx_csum_limit = tx_csum_limit; |
5619 | |
5620 | pp->dram_target_info = mv_mbus_dram_info(); |
5621 | /* Armada3700 requires setting default configuration of Mbus |
5622 | * windows, however without using filled mbus_dram_target_info |
5623 | * structure. |
5624 | */ |
5625 | if (pp->dram_target_info || pp->neta_armada3700) |
5626 | mvneta_conf_mbus_windows(pp, dram: pp->dram_target_info); |
5627 | |
5628 | pp->tx_ring_size = MVNETA_MAX_TXD; |
5629 | pp->rx_ring_size = MVNETA_MAX_RXD; |
5630 | |
5631 | pp->dev = dev; |
5632 | SET_NETDEV_DEV(dev, &pdev->dev); |
5633 | |
5634 | pp->id = global_port_id++; |
5635 | |
5636 | /* Obtain access to BM resources if enabled and already initialized */ |
5637 | bm_node = of_parse_phandle(np: dn, phandle_name: "buffer-manager" , index: 0); |
5638 | if (bm_node) { |
5639 | pp->bm_priv = mvneta_bm_get(node: bm_node); |
5640 | if (pp->bm_priv) { |
5641 | err = mvneta_bm_port_init(pdev, pp); |
5642 | if (err < 0) { |
5643 | dev_info(&pdev->dev, |
5644 | "use SW buffer management\n" ); |
5645 | mvneta_bm_put(priv: pp->bm_priv); |
5646 | pp->bm_priv = NULL; |
5647 | } |
5648 | } |
5649 | /* Set RX packet offset correction for platforms, whose |
5650 | * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit |
5651 | * platforms and 0B for 32-bit ones. |
5652 | */ |
5653 | pp->rx_offset_correction = max(0, |
5654 | NET_SKB_PAD - |
5655 | MVNETA_RX_PKT_OFFSET_CORRECTION); |
5656 | } |
5657 | of_node_put(node: bm_node); |
5658 | |
5659 | /* sw buffer management */ |
5660 | if (!pp->bm_priv) |
5661 | pp->rx_offset_correction = MVNETA_SKB_HEADROOM; |
5662 | |
5663 | err = mvneta_init(dev: &pdev->dev, pp); |
5664 | if (err < 0) |
5665 | goto err_netdev; |
5666 | |
5667 | err = mvneta_port_power_up(pp, phy_mode: pp->phy_interface); |
5668 | if (err < 0) { |
5669 | dev_err(&pdev->dev, "can't power up port\n" ); |
5670 | goto err_netdev; |
5671 | } |
5672 | |
5673 | /* Armada3700 network controller does not support per-cpu |
5674 | * operation, so only single NAPI should be initialized. |
5675 | */ |
5676 | if (pp->neta_armada3700) { |
5677 | netif_napi_add(dev, napi: &pp->napi, poll: mvneta_poll); |
5678 | } else { |
5679 | for_each_present_cpu(cpu) { |
5680 | struct mvneta_pcpu_port *port = |
5681 | per_cpu_ptr(pp->ports, cpu); |
5682 | |
5683 | netif_napi_add(dev, napi: &port->napi, poll: mvneta_poll); |
5684 | port->pp = pp; |
5685 | } |
5686 | } |
5687 | |
5688 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
5689 | NETIF_F_TSO | NETIF_F_RXCSUM; |
5690 | dev->hw_features |= dev->features; |
5691 | dev->vlan_features |= dev->features; |
5692 | if (!pp->bm_priv) |
5693 | dev->xdp_features = NETDEV_XDP_ACT_BASIC | |
5694 | NETDEV_XDP_ACT_REDIRECT | |
5695 | NETDEV_XDP_ACT_NDO_XMIT | |
5696 | NETDEV_XDP_ACT_RX_SG | |
5697 | NETDEV_XDP_ACT_NDO_XMIT_SG; |
5698 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
5699 | netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS); |
5700 | |
5701 | /* MTU range: 68 - 9676 */ |
5702 | dev->min_mtu = ETH_MIN_MTU; |
5703 | /* 9676 == 9700 - 20 and rounding to 8 */ |
5704 | dev->max_mtu = 9676; |
5705 | |
5706 | err = register_netdev(dev); |
5707 | if (err < 0) { |
5708 | dev_err(&pdev->dev, "failed to register\n" ); |
5709 | goto err_netdev; |
5710 | } |
5711 | |
5712 | netdev_info(dev, format: "Using %s mac address %pM\n" , mac_from, |
5713 | dev->dev_addr); |
5714 | |
5715 | platform_set_drvdata(pdev, data: pp->dev); |
5716 | |
5717 | return 0; |
5718 | |
5719 | err_netdev: |
5720 | if (pp->bm_priv) { |
5721 | mvneta_bm_pool_destroy(priv: pp->bm_priv, bm_pool: pp->pool_long, port_map: 1 << pp->id); |
5722 | mvneta_bm_pool_destroy(priv: pp->bm_priv, bm_pool: pp->pool_short, |
5723 | port_map: 1 << pp->id); |
5724 | mvneta_bm_put(priv: pp->bm_priv); |
5725 | } |
5726 | free_percpu(pdata: pp->stats); |
5727 | err_free_ports: |
5728 | free_percpu(pdata: pp->ports); |
5729 | err_free_phylink: |
5730 | if (pp->phylink) |
5731 | phylink_destroy(pp->phylink); |
5732 | err_clk: |
5733 | clk_disable_unprepare(clk: pp->clk_bus); |
5734 | clk_disable_unprepare(clk: pp->clk); |
5735 | err_free_irq: |
5736 | irq_dispose_mapping(virq: dev->irq); |
5737 | return err; |
5738 | } |
5739 | |
5740 | /* Device removal routine */ |
5741 | static void mvneta_remove(struct platform_device *pdev) |
5742 | { |
5743 | struct net_device *dev = platform_get_drvdata(pdev); |
5744 | struct mvneta_port *pp = netdev_priv(dev); |
5745 | |
5746 | unregister_netdev(dev); |
5747 | clk_disable_unprepare(clk: pp->clk_bus); |
5748 | clk_disable_unprepare(clk: pp->clk); |
5749 | free_percpu(pdata: pp->ports); |
5750 | free_percpu(pdata: pp->stats); |
5751 | irq_dispose_mapping(virq: dev->irq); |
5752 | phylink_destroy(pp->phylink); |
5753 | |
5754 | if (pp->bm_priv) { |
5755 | mvneta_bm_pool_destroy(priv: pp->bm_priv, bm_pool: pp->pool_long, port_map: 1 << pp->id); |
5756 | mvneta_bm_pool_destroy(priv: pp->bm_priv, bm_pool: pp->pool_short, |
5757 | port_map: 1 << pp->id); |
5758 | mvneta_bm_put(priv: pp->bm_priv); |
5759 | } |
5760 | } |
5761 | |
5762 | #ifdef CONFIG_PM_SLEEP |
5763 | static int mvneta_suspend(struct device *device) |
5764 | { |
5765 | int queue; |
5766 | struct net_device *dev = dev_get_drvdata(dev: device); |
5767 | struct mvneta_port *pp = netdev_priv(dev); |
5768 | |
5769 | if (!netif_running(dev)) |
5770 | goto clean_exit; |
5771 | |
5772 | if (!pp->neta_armada3700) { |
5773 | spin_lock(lock: &pp->lock); |
5774 | pp->is_stopped = true; |
5775 | spin_unlock(lock: &pp->lock); |
5776 | |
5777 | cpuhp_state_remove_instance_nocalls(state: online_hpstate, |
5778 | node: &pp->node_online); |
5779 | cpuhp_state_remove_instance_nocalls(state: CPUHP_NET_MVNETA_DEAD, |
5780 | node: &pp->node_dead); |
5781 | } |
5782 | |
5783 | rtnl_lock(); |
5784 | mvneta_stop_dev(pp); |
5785 | rtnl_unlock(); |
5786 | |
5787 | for (queue = 0; queue < rxq_number; queue++) { |
5788 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
5789 | |
5790 | mvneta_rxq_drop_pkts(pp, rxq); |
5791 | } |
5792 | |
5793 | for (queue = 0; queue < txq_number; queue++) { |
5794 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
5795 | |
5796 | mvneta_txq_hw_deinit(pp, txq); |
5797 | } |
5798 | |
5799 | clean_exit: |
5800 | netif_device_detach(dev); |
5801 | clk_disable_unprepare(clk: pp->clk_bus); |
5802 | clk_disable_unprepare(clk: pp->clk); |
5803 | |
5804 | return 0; |
5805 | } |
5806 | |
5807 | static int mvneta_resume(struct device *device) |
5808 | { |
5809 | struct platform_device *pdev = to_platform_device(device); |
5810 | struct net_device *dev = dev_get_drvdata(dev: device); |
5811 | struct mvneta_port *pp = netdev_priv(dev); |
5812 | int err, queue; |
5813 | |
5814 | clk_prepare_enable(clk: pp->clk); |
5815 | if (!IS_ERR(ptr: pp->clk_bus)) |
5816 | clk_prepare_enable(clk: pp->clk_bus); |
5817 | if (pp->dram_target_info || pp->neta_armada3700) |
5818 | mvneta_conf_mbus_windows(pp, dram: pp->dram_target_info); |
5819 | if (pp->bm_priv) { |
5820 | err = mvneta_bm_port_init(pdev, pp); |
5821 | if (err < 0) { |
5822 | dev_info(&pdev->dev, "use SW buffer management\n" ); |
5823 | pp->rx_offset_correction = MVNETA_SKB_HEADROOM; |
5824 | pp->bm_priv = NULL; |
5825 | } |
5826 | } |
5827 | mvneta_defaults_set(pp); |
5828 | err = mvneta_port_power_up(pp, phy_mode: pp->phy_interface); |
5829 | if (err < 0) { |
5830 | dev_err(device, "can't power up port\n" ); |
5831 | return err; |
5832 | } |
5833 | |
5834 | netif_device_attach(dev); |
5835 | |
5836 | if (!netif_running(dev)) |
5837 | return 0; |
5838 | |
5839 | for (queue = 0; queue < rxq_number; queue++) { |
5840 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
5841 | |
5842 | rxq->next_desc_to_proc = 0; |
5843 | mvneta_rxq_hw_init(pp, rxq); |
5844 | } |
5845 | |
5846 | for (queue = 0; queue < txq_number; queue++) { |
5847 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
5848 | |
5849 | txq->next_desc_to_proc = 0; |
5850 | mvneta_txq_hw_init(pp, txq); |
5851 | } |
5852 | |
5853 | if (!pp->neta_armada3700) { |
5854 | spin_lock(lock: &pp->lock); |
5855 | pp->is_stopped = false; |
5856 | spin_unlock(lock: &pp->lock); |
5857 | cpuhp_state_add_instance_nocalls(state: online_hpstate, |
5858 | node: &pp->node_online); |
5859 | cpuhp_state_add_instance_nocalls(state: CPUHP_NET_MVNETA_DEAD, |
5860 | node: &pp->node_dead); |
5861 | } |
5862 | |
5863 | rtnl_lock(); |
5864 | mvneta_start_dev(pp); |
5865 | rtnl_unlock(); |
5866 | mvneta_set_rx_mode(dev); |
5867 | |
5868 | return 0; |
5869 | } |
5870 | #endif |
5871 | |
5872 | static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume); |
5873 | |
5874 | static const struct of_device_id mvneta_match[] = { |
5875 | { .compatible = "marvell,armada-370-neta" }, |
5876 | { .compatible = "marvell,armada-xp-neta" }, |
5877 | { .compatible = "marvell,armada-3700-neta" }, |
5878 | { .compatible = "marvell,armada-ac5-neta" }, |
5879 | { } |
5880 | }; |
5881 | MODULE_DEVICE_TABLE(of, mvneta_match); |
5882 | |
5883 | static struct platform_driver mvneta_driver = { |
5884 | .probe = mvneta_probe, |
5885 | .remove_new = mvneta_remove, |
5886 | .driver = { |
5887 | .name = MVNETA_DRIVER_NAME, |
5888 | .of_match_table = mvneta_match, |
5889 | .pm = &mvneta_pm_ops, |
5890 | }, |
5891 | }; |
5892 | |
5893 | static int __init mvneta_driver_init(void) |
5894 | { |
5895 | int ret; |
5896 | |
5897 | BUILD_BUG_ON_NOT_POWER_OF_2(MVNETA_TSO_PER_PAGE); |
5898 | |
5899 | ret = cpuhp_setup_state_multi(state: CPUHP_AP_ONLINE_DYN, name: "net/mvneta:online" , |
5900 | startup: mvneta_cpu_online, |
5901 | teardown: mvneta_cpu_down_prepare); |
5902 | if (ret < 0) |
5903 | goto out; |
5904 | online_hpstate = ret; |
5905 | ret = cpuhp_setup_state_multi(state: CPUHP_NET_MVNETA_DEAD, name: "net/mvneta:dead" , |
5906 | NULL, teardown: mvneta_cpu_dead); |
5907 | if (ret) |
5908 | goto err_dead; |
5909 | |
5910 | ret = platform_driver_register(&mvneta_driver); |
5911 | if (ret) |
5912 | goto err; |
5913 | return 0; |
5914 | |
5915 | err: |
5916 | cpuhp_remove_multi_state(state: CPUHP_NET_MVNETA_DEAD); |
5917 | err_dead: |
5918 | cpuhp_remove_multi_state(state: online_hpstate); |
5919 | out: |
5920 | return ret; |
5921 | } |
5922 | module_init(mvneta_driver_init); |
5923 | |
5924 | static void __exit mvneta_driver_exit(void) |
5925 | { |
5926 | platform_driver_unregister(&mvneta_driver); |
5927 | cpuhp_remove_multi_state(state: CPUHP_NET_MVNETA_DEAD); |
5928 | cpuhp_remove_multi_state(state: online_hpstate); |
5929 | } |
5930 | module_exit(mvneta_driver_exit); |
5931 | |
5932 | MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com" ); |
5933 | MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>" ); |
5934 | MODULE_LICENSE("GPL" ); |
5935 | |
5936 | module_param(rxq_number, int, 0444); |
5937 | module_param(txq_number, int, 0444); |
5938 | |
5939 | module_param(rxq_def, int, 0444); |
5940 | module_param(rx_copybreak, int, 0644); |
5941 | |