1// SPDX-License-Identifier: GPL-2.0+
2
3#include <linux/types.h>
4#include <linux/clk.h>
5#include <linux/platform_device.h>
6#include <linux/pm_runtime.h>
7#include <linux/acpi.h>
8#include <linux/of_mdio.h>
9#include <linux/of_net.h>
10#include <linux/etherdevice.h>
11#include <linux/interrupt.h>
12#include <linux/io.h>
13#include <linux/netlink.h>
14#include <linux/bpf.h>
15#include <linux/bpf_trace.h>
16
17#include <net/tcp.h>
18#include <net/page_pool/helpers.h>
19#include <net/ip6_checksum.h>
20
21#define NETSEC_REG_SOFT_RST 0x104
22#define NETSEC_REG_COM_INIT 0x120
23
24#define NETSEC_REG_TOP_STATUS 0x200
25#define NETSEC_IRQ_RX BIT(1)
26#define NETSEC_IRQ_TX BIT(0)
27
28#define NETSEC_REG_TOP_INTEN 0x204
29#define NETSEC_REG_INTEN_SET 0x234
30#define NETSEC_REG_INTEN_CLR 0x238
31
32#define NETSEC_REG_NRM_TX_STATUS 0x400
33#define NETSEC_REG_NRM_TX_INTEN 0x404
34#define NETSEC_REG_NRM_TX_INTEN_SET 0x428
35#define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c
36#define NRM_TX_ST_NTOWNR BIT(17)
37#define NRM_TX_ST_TR_ERR BIT(16)
38#define NRM_TX_ST_TXDONE BIT(15)
39#define NRM_TX_ST_TMREXP BIT(14)
40
41#define NETSEC_REG_NRM_RX_STATUS 0x440
42#define NETSEC_REG_NRM_RX_INTEN 0x444
43#define NETSEC_REG_NRM_RX_INTEN_SET 0x468
44#define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c
45#define NRM_RX_ST_RC_ERR BIT(16)
46#define NRM_RX_ST_PKTCNT BIT(15)
47#define NRM_RX_ST_TMREXP BIT(14)
48
49#define NETSEC_REG_PKT_CMD_BUF 0xd0
50
51#define NETSEC_REG_CLK_EN 0x100
52
53#define NETSEC_REG_PKT_CTRL 0x140
54
55#define NETSEC_REG_DMA_TMR_CTRL 0x20c
56#define NETSEC_REG_F_TAIKI_MC_VER 0x22c
57#define NETSEC_REG_F_TAIKI_VER 0x230
58#define NETSEC_REG_DMA_HM_CTRL 0x214
59#define NETSEC_REG_DMA_MH_CTRL 0x220
60#define NETSEC_REG_ADDR_DIS_CORE 0x218
61#define NETSEC_REG_DMAC_HM_CMD_BUF 0x210
62#define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c
63
64#define NETSEC_REG_NRM_TX_PKTCNT 0x410
65
66#define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414
67#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418
68
69#define NETSEC_REG_NRM_TX_TMR 0x41c
70
71#define NETSEC_REG_NRM_RX_PKTCNT 0x454
72#define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458
73#define NETSEC_REG_NRM_TX_TXINT_TMR 0x420
74#define NETSEC_REG_NRM_RX_RXINT_TMR 0x460
75
76#define NETSEC_REG_NRM_RX_TMR 0x45c
77
78#define NETSEC_REG_NRM_TX_DESC_START_UP 0x434
79#define NETSEC_REG_NRM_TX_DESC_START_LW 0x408
80#define NETSEC_REG_NRM_RX_DESC_START_UP 0x474
81#define NETSEC_REG_NRM_RX_DESC_START_LW 0x448
82
83#define NETSEC_REG_NRM_TX_CONFIG 0x430
84#define NETSEC_REG_NRM_RX_CONFIG 0x470
85
86#define MAC_REG_STATUS 0x1024
87#define MAC_REG_DATA 0x11c0
88#define MAC_REG_CMD 0x11c4
89#define MAC_REG_FLOW_TH 0x11cc
90#define MAC_REG_INTF_SEL 0x11d4
91#define MAC_REG_DESC_INIT 0x11fc
92#define MAC_REG_DESC_SOFT_RST 0x1204
93#define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500
94
95#define GMAC_REG_MCR 0x0000
96#define GMAC_REG_MFFR 0x0004
97#define GMAC_REG_GAR 0x0010
98#define GMAC_REG_GDR 0x0014
99#define GMAC_REG_FCR 0x0018
100#define GMAC_REG_BMR 0x1000
101#define GMAC_REG_RDLAR 0x100c
102#define GMAC_REG_TDLAR 0x1010
103#define GMAC_REG_OMR 0x1018
104
105#define MHZ(n) ((n) * 1000 * 1000)
106
107#define NETSEC_TX_SHIFT_OWN_FIELD 31
108#define NETSEC_TX_SHIFT_LD_FIELD 30
109#define NETSEC_TX_SHIFT_DRID_FIELD 24
110#define NETSEC_TX_SHIFT_PT_FIELD 21
111#define NETSEC_TX_SHIFT_TDRID_FIELD 16
112#define NETSEC_TX_SHIFT_CC_FIELD 15
113#define NETSEC_TX_SHIFT_FS_FIELD 9
114#define NETSEC_TX_LAST 8
115#define NETSEC_TX_SHIFT_CO 7
116#define NETSEC_TX_SHIFT_SO 6
117#define NETSEC_TX_SHIFT_TRS_FIELD 4
118
119#define NETSEC_RX_PKT_OWN_FIELD 31
120#define NETSEC_RX_PKT_LD_FIELD 30
121#define NETSEC_RX_PKT_SDRID_FIELD 24
122#define NETSEC_RX_PKT_FR_FIELD 23
123#define NETSEC_RX_PKT_ER_FIELD 21
124#define NETSEC_RX_PKT_ERR_FIELD 16
125#define NETSEC_RX_PKT_TDRID_FIELD 12
126#define NETSEC_RX_PKT_FS_FIELD 9
127#define NETSEC_RX_PKT_LS_FIELD 8
128#define NETSEC_RX_PKT_CO_FIELD 6
129
130#define NETSEC_RX_PKT_ERR_MASK 3
131
132#define NETSEC_MAX_TX_PKT_LEN 1518
133#define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018
134
135#define NETSEC_RING_GMAC 15
136#define NETSEC_RING_MAX 2
137
138#define NETSEC_TCP_SEG_LEN_MAX 1460
139#define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960
140
141#define NETSEC_RX_CKSUM_NOTAVAIL 0
142#define NETSEC_RX_CKSUM_OK 1
143#define NETSEC_RX_CKSUM_NG 2
144
145#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20)
146#define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4)
147
148#define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20)
149#define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19)
150
151#define NETSEC_INT_PKTCNT_MAX 2047
152
153#define NETSEC_FLOW_START_TH_MAX 95
154#define NETSEC_FLOW_STOP_TH_MAX 95
155#define NETSEC_FLOW_PAUSE_TIME_MIN 5
156
157#define NETSEC_CLK_EN_REG_DOM_ALL 0x3f
158
159#define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28)
160#define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27)
161#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3)
162#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2)
163#define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1)
164#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0)
165
166#define NETSEC_CLK_EN_REG_DOM_G BIT(5)
167#define NETSEC_CLK_EN_REG_DOM_C BIT(1)
168#define NETSEC_CLK_EN_REG_DOM_D BIT(0)
169
170#define NETSEC_COM_INIT_REG_DB BIT(2)
171#define NETSEC_COM_INIT_REG_CLS BIT(1)
172#define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \
173 NETSEC_COM_INIT_REG_DB)
174
175#define NETSEC_SOFT_RST_REG_RESET 0
176#define NETSEC_SOFT_RST_REG_RUN BIT(31)
177
178#define NETSEC_DMA_CTRL_REG_STOP 1
179#define MH_CTRL__MODE_TRANS BIT(20)
180
181#define NETSEC_GMAC_CMD_ST_READ 0
182#define NETSEC_GMAC_CMD_ST_WRITE BIT(28)
183#define NETSEC_GMAC_CMD_ST_BUSY BIT(31)
184
185#define NETSEC_GMAC_BMR_REG_COMMON 0x00412080
186#define NETSEC_GMAC_BMR_REG_RESET 0x00020181
187#define NETSEC_GMAC_BMR_REG_SWR 0x00000001
188
189#define NETSEC_GMAC_OMR_REG_ST BIT(13)
190#define NETSEC_GMAC_OMR_REG_SR BIT(1)
191
192#define NETSEC_GMAC_MCR_REG_IBN BIT(30)
193#define NETSEC_GMAC_MCR_REG_CST BIT(25)
194#define NETSEC_GMAC_MCR_REG_JE BIT(20)
195#define NETSEC_MCR_PS BIT(15)
196#define NETSEC_GMAC_MCR_REG_FES BIT(14)
197#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c
198#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c
199
200#define NETSEC_FCR_RFE BIT(2)
201#define NETSEC_FCR_TFE BIT(1)
202
203#define NETSEC_GMAC_GAR_REG_GW BIT(1)
204#define NETSEC_GMAC_GAR_REG_GB BIT(0)
205
206#define NETSEC_GMAC_GAR_REG_SHIFT_PA 11
207#define NETSEC_GMAC_GAR_REG_SHIFT_GR 6
208#define GMAC_REG_SHIFT_CR_GAR 2
209
210#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2
211#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3
212#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0
213#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1
214#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4
215#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5
216
217#define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000
218#define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000
219
220#define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000
221
222#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31)
223#define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30)
224#define NETSEC_REG_DESC_TMR_MODE 4
225#define NETSEC_REG_DESC_ENDIAN 0
226
227#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1
228#define NETSEC_MAC_DESC_INIT_REG_INIT 1
229
230#define NETSEC_EEPROM_MAC_ADDRESS 0x00
231#define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08
232#define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C
233#define NETSEC_EEPROM_HM_ME_SIZE 0x10
234#define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14
235#define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18
236#define NETSEC_EEPROM_MH_ME_SIZE 0x1C
237#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20
238#define NETSEC_EEPROM_PKT_ME_SIZE 0x24
239
240#define DESC_NUM 256
241
242#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
243#define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
244 NET_IP_ALIGN)
245#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
246 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
247#define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
248
249#define DESC_SZ sizeof(struct netsec_de)
250
251#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
252
253#define NETSEC_XDP_PASS 0
254#define NETSEC_XDP_CONSUMED BIT(0)
255#define NETSEC_XDP_TX BIT(1)
256#define NETSEC_XDP_REDIR BIT(2)
257
258enum ring_id {
259 NETSEC_RING_TX = 0,
260 NETSEC_RING_RX
261};
262
263enum buf_type {
264 TYPE_NETSEC_SKB = 0,
265 TYPE_NETSEC_XDP_TX,
266 TYPE_NETSEC_XDP_NDO,
267};
268
269struct netsec_desc {
270 union {
271 struct sk_buff *skb;
272 struct xdp_frame *xdpf;
273 };
274 dma_addr_t dma_addr;
275 void *addr;
276 u16 len;
277 u8 buf_type;
278};
279
280struct netsec_desc_ring {
281 dma_addr_t desc_dma;
282 struct netsec_desc *desc;
283 void *vaddr;
284 u16 head, tail;
285 u16 xdp_xmit; /* netsec_xdp_xmit packets */
286 struct page_pool *page_pool;
287 struct xdp_rxq_info xdp_rxq;
288 spinlock_t lock; /* XDP tx queue locking */
289};
290
291struct netsec_priv {
292 struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
293 struct ethtool_coalesce et_coalesce;
294 struct bpf_prog *xdp_prog;
295 spinlock_t reglock; /* protect reg access */
296 struct napi_struct napi;
297 phy_interface_t phy_interface;
298 struct net_device *ndev;
299 struct device_node *phy_np;
300 struct phy_device *phydev;
301 struct mii_bus *mii_bus;
302 void __iomem *ioaddr;
303 void __iomem *eeprom_base;
304 struct device *dev;
305 struct clk *clk;
306 u32 msg_enable;
307 u32 freq;
308 u32 phy_addr;
309 bool rx_cksum_offload_flag;
310};
311
312struct netsec_de { /* Netsec Descriptor layout */
313 u32 attr;
314 u32 data_buf_addr_up;
315 u32 data_buf_addr_lw;
316 u32 buf_len_info;
317};
318
319struct netsec_tx_pkt_ctrl {
320 u16 tcp_seg_len;
321 bool tcp_seg_offload_flag;
322 bool cksum_offload_flag;
323};
324
325struct netsec_rx_pkt_info {
326 int rx_cksum_result;
327 int err_code;
328 bool err_flag;
329};
330
331static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
332{
333 writel(val, addr: priv->ioaddr + reg_addr);
334}
335
336static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
337{
338 return readl(addr: priv->ioaddr + reg_addr);
339}
340
341/************* MDIO BUS OPS FOLLOW *************/
342
343#define TIMEOUT_SPINS_MAC 1000
344#define TIMEOUT_SECONDARY_MS_MAC 100
345
346static u32 netsec_clk_type(u32 freq)
347{
348 if (freq < MHZ(35))
349 return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
350 if (freq < MHZ(60))
351 return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
352 if (freq < MHZ(100))
353 return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
354 if (freq < MHZ(150))
355 return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
356 if (freq < MHZ(250))
357 return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
358
359 return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
360}
361
362static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
363{
364 u32 timeout = TIMEOUT_SPINS_MAC;
365
366 while (--timeout && netsec_read(priv, reg_addr: addr) & mask)
367 cpu_relax();
368 if (timeout)
369 return 0;
370
371 timeout = TIMEOUT_SECONDARY_MS_MAC;
372 while (--timeout && netsec_read(priv, reg_addr: addr) & mask)
373 usleep_range(min: 1000, max: 2000);
374
375 if (timeout)
376 return 0;
377
378 netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
379
380 return -ETIMEDOUT;
381}
382
383static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
384{
385 netsec_write(priv, MAC_REG_DATA, val: value);
386 netsec_write(priv, MAC_REG_CMD, val: addr | NETSEC_GMAC_CMD_ST_WRITE);
387 return netsec_wait_while_busy(priv,
388 MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
389}
390
391static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
392{
393 int ret;
394
395 netsec_write(priv, MAC_REG_CMD, val: addr | NETSEC_GMAC_CMD_ST_READ);
396 ret = netsec_wait_while_busy(priv,
397 MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
398 if (ret)
399 return ret;
400
401 *read = netsec_read(priv, MAC_REG_DATA);
402
403 return 0;
404}
405
406static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
407 u32 addr, u32 mask)
408{
409 u32 timeout = TIMEOUT_SPINS_MAC;
410 int ret, data;
411
412 do {
413 ret = netsec_mac_read(priv, addr, read: &data);
414 if (ret)
415 break;
416 cpu_relax();
417 } while (--timeout && (data & mask));
418
419 if (timeout)
420 return 0;
421
422 timeout = TIMEOUT_SECONDARY_MS_MAC;
423 do {
424 usleep_range(min: 1000, max: 2000);
425
426 ret = netsec_mac_read(priv, addr, read: &data);
427 if (ret)
428 break;
429 cpu_relax();
430 } while (--timeout && (data & mask));
431
432 if (timeout && !ret)
433 return 0;
434
435 netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
436
437 return -ETIMEDOUT;
438}
439
440static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
441{
442 struct phy_device *phydev = priv->ndev->phydev;
443 u32 value = 0;
444
445 value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
446 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
447
448 if (phydev->speed != SPEED_1000)
449 value |= NETSEC_MCR_PS;
450
451 if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
452 phydev->speed == SPEED_100)
453 value |= NETSEC_GMAC_MCR_REG_FES;
454
455 value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
456
457 if (phy_interface_mode_is_rgmii(mode: priv->phy_interface))
458 value |= NETSEC_GMAC_MCR_REG_IBN;
459
460 if (netsec_mac_write(priv, GMAC_REG_MCR, value))
461 return -ETIMEDOUT;
462
463 return 0;
464}
465
466static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
467
468static int netsec_phy_write(struct mii_bus *bus,
469 int phy_addr, int reg, u16 val)
470{
471 int status;
472 struct netsec_priv *priv = bus->priv;
473
474 if (netsec_mac_write(priv, GMAC_REG_GDR, value: val))
475 return -ETIMEDOUT;
476 if (netsec_mac_write(priv, GMAC_REG_GAR,
477 value: phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
478 reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
479 NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
480 (netsec_clk_type(freq: priv->freq) <<
481 GMAC_REG_SHIFT_CR_GAR)))
482 return -ETIMEDOUT;
483
484 status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
485 NETSEC_GMAC_GAR_REG_GB);
486
487 /* Developerbox implements RTL8211E PHY and there is
488 * a compatibility problem with F_GMAC4.
489 * RTL8211E expects MDC clock must be kept toggling for several
490 * clock cycle with MDIO high before entering the IDLE state.
491 * To meet this requirement, netsec driver needs to issue dummy
492 * read(e.g. read PHYID1(offset 0x2) register) right after write.
493 */
494 netsec_phy_read(bus, phy_addr, MII_PHYSID1);
495
496 return status;
497}
498
499static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
500{
501 struct netsec_priv *priv = bus->priv;
502 u32 data;
503 int ret;
504
505 if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
506 phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
507 reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
508 (netsec_clk_type(freq: priv->freq) <<
509 GMAC_REG_SHIFT_CR_GAR)))
510 return -ETIMEDOUT;
511
512 ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
513 NETSEC_GMAC_GAR_REG_GB);
514 if (ret)
515 return ret;
516
517 ret = netsec_mac_read(priv, GMAC_REG_GDR, read: &data);
518 if (ret)
519 return ret;
520
521 return data;
522}
523
524/************* ETHTOOL_OPS FOLLOW *************/
525
526static void netsec_et_get_drvinfo(struct net_device *net_device,
527 struct ethtool_drvinfo *info)
528{
529 strscpy(info->driver, "netsec", sizeof(info->driver));
530 strscpy(info->bus_info, dev_name(net_device->dev.parent),
531 sizeof(info->bus_info));
532}
533
534static int netsec_et_get_coalesce(struct net_device *net_device,
535 struct ethtool_coalesce *et_coalesce,
536 struct kernel_ethtool_coalesce *kernel_coal,
537 struct netlink_ext_ack *extack)
538{
539 struct netsec_priv *priv = netdev_priv(dev: net_device);
540
541 *et_coalesce = priv->et_coalesce;
542
543 return 0;
544}
545
546static int netsec_et_set_coalesce(struct net_device *net_device,
547 struct ethtool_coalesce *et_coalesce,
548 struct kernel_ethtool_coalesce *kernel_coal,
549 struct netlink_ext_ack *extack)
550{
551 struct netsec_priv *priv = netdev_priv(dev: net_device);
552
553 priv->et_coalesce = *et_coalesce;
554
555 if (priv->et_coalesce.tx_coalesce_usecs < 50)
556 priv->et_coalesce.tx_coalesce_usecs = 50;
557 if (priv->et_coalesce.tx_max_coalesced_frames < 1)
558 priv->et_coalesce.tx_max_coalesced_frames = 1;
559
560 netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
561 val: priv->et_coalesce.tx_max_coalesced_frames);
562 netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
563 val: priv->et_coalesce.tx_coalesce_usecs);
564 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
565 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
566
567 if (priv->et_coalesce.rx_coalesce_usecs < 50)
568 priv->et_coalesce.rx_coalesce_usecs = 50;
569 if (priv->et_coalesce.rx_max_coalesced_frames < 1)
570 priv->et_coalesce.rx_max_coalesced_frames = 1;
571
572 netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
573 val: priv->et_coalesce.rx_max_coalesced_frames);
574 netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
575 val: priv->et_coalesce.rx_coalesce_usecs);
576 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
577 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
578
579 return 0;
580}
581
582static u32 netsec_et_get_msglevel(struct net_device *dev)
583{
584 struct netsec_priv *priv = netdev_priv(dev);
585
586 return priv->msg_enable;
587}
588
589static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
590{
591 struct netsec_priv *priv = netdev_priv(dev);
592
593 priv->msg_enable = datum;
594}
595
596static const struct ethtool_ops netsec_ethtool_ops = {
597 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
598 ETHTOOL_COALESCE_MAX_FRAMES,
599 .get_drvinfo = netsec_et_get_drvinfo,
600 .get_link_ksettings = phy_ethtool_get_link_ksettings,
601 .set_link_ksettings = phy_ethtool_set_link_ksettings,
602 .get_link = ethtool_op_get_link,
603 .get_coalesce = netsec_et_get_coalesce,
604 .set_coalesce = netsec_et_set_coalesce,
605 .get_msglevel = netsec_et_get_msglevel,
606 .set_msglevel = netsec_et_set_msglevel,
607};
608
609/************* NETDEV_OPS FOLLOW *************/
610
611
612static void netsec_set_rx_de(struct netsec_priv *priv,
613 struct netsec_desc_ring *dring, u16 idx,
614 const struct netsec_desc *desc)
615{
616 struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
617 u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
618 (1 << NETSEC_RX_PKT_FS_FIELD) |
619 (1 << NETSEC_RX_PKT_LS_FIELD);
620
621 if (idx == DESC_NUM - 1)
622 attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
623
624 de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
625 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
626 de->buf_len_info = desc->len;
627 de->attr = attr;
628 dma_wmb();
629
630 dring->desc[idx].dma_addr = desc->dma_addr;
631 dring->desc[idx].addr = desc->addr;
632 dring->desc[idx].len = desc->len;
633}
634
635static bool netsec_clean_tx_dring(struct netsec_priv *priv)
636{
637 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
638 struct xdp_frame_bulk bq;
639 struct netsec_de *entry;
640 int tail = dring->tail;
641 unsigned int bytes;
642 int cnt = 0;
643
644 spin_lock(lock: &dring->lock);
645
646 bytes = 0;
647 xdp_frame_bulk_init(bq: &bq);
648 entry = dring->vaddr + DESC_SZ * tail;
649
650 rcu_read_lock(); /* need for xdp_return_frame_bulk */
651
652 while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
653 cnt < DESC_NUM) {
654 struct netsec_desc *desc;
655 int eop;
656
657 desc = &dring->desc[tail];
658 eop = (entry->attr >> NETSEC_TX_LAST) & 1;
659 dma_rmb();
660
661 /* if buf_type is either TYPE_NETSEC_SKB or
662 * TYPE_NETSEC_XDP_NDO we mapped it
663 */
664 if (desc->buf_type != TYPE_NETSEC_XDP_TX)
665 dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
666 DMA_TO_DEVICE);
667
668 if (!eop)
669 goto next;
670
671 if (desc->buf_type == TYPE_NETSEC_SKB) {
672 bytes += desc->skb->len;
673 dev_kfree_skb(desc->skb);
674 } else {
675 bytes += desc->xdpf->len;
676 if (desc->buf_type == TYPE_NETSEC_XDP_TX)
677 xdp_return_frame_rx_napi(xdpf: desc->xdpf);
678 else
679 xdp_return_frame_bulk(xdpf: desc->xdpf, bq: &bq);
680 }
681next:
682 /* clean up so netsec_uninit_pkt_dring() won't free the skb
683 * again
684 */
685 *desc = (struct netsec_desc){};
686
687 /* entry->attr is not going to be accessed by the NIC until
688 * netsec_set_tx_de() is called. No need for a dma_wmb() here
689 */
690 entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
691 /* move tail ahead */
692 dring->tail = (tail + 1) % DESC_NUM;
693
694 tail = dring->tail;
695 entry = dring->vaddr + DESC_SZ * tail;
696 cnt++;
697 }
698 xdp_flush_frame_bulk(bq: &bq);
699
700 rcu_read_unlock();
701
702 spin_unlock(lock: &dring->lock);
703
704 if (!cnt)
705 return false;
706
707 /* reading the register clears the irq */
708 netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
709
710 priv->ndev->stats.tx_packets += cnt;
711 priv->ndev->stats.tx_bytes += bytes;
712
713 netdev_completed_queue(dev: priv->ndev, pkts: cnt, bytes);
714
715 return true;
716}
717
718static void netsec_process_tx(struct netsec_priv *priv)
719{
720 struct net_device *ndev = priv->ndev;
721 bool cleaned;
722
723 cleaned = netsec_clean_tx_dring(priv);
724
725 if (cleaned && netif_queue_stopped(dev: ndev)) {
726 /* Make sure we update the value, anyone stopping the queue
727 * after this will read the proper consumer idx
728 */
729 smp_wmb();
730 netif_wake_queue(dev: ndev);
731 }
732}
733
734static void *netsec_alloc_rx_data(struct netsec_priv *priv,
735 dma_addr_t *dma_handle, u16 *desc_len)
736
737{
738
739 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
740 struct page *page;
741
742 page = page_pool_dev_alloc_pages(pool: dring->page_pool);
743 if (!page)
744 return NULL;
745
746 /* We allocate the same buffer length for XDP and non-XDP cases.
747 * page_pool API will map the whole page, skip what's needed for
748 * network payloads and/or XDP
749 */
750 *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
751 /* Make sure the incoming payload fits in the page for XDP and non-XDP
752 * cases and reserve enough space for headroom + skb_shared_info
753 */
754 *desc_len = NETSEC_RX_BUF_SIZE;
755
756 return page_address(page);
757}
758
759static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
760{
761 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
762 u16 idx = from;
763
764 while (num) {
765 netsec_set_rx_de(priv, dring, idx, desc: &dring->desc[idx]);
766 idx++;
767 if (idx >= DESC_NUM)
768 idx = 0;
769 num--;
770 }
771}
772
773static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts)
774{
775 if (likely(pkts))
776 netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, val: pkts);
777}
778
779static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
780 u16 pkts)
781{
782 if (xdp_res & NETSEC_XDP_REDIR)
783 xdp_do_flush();
784
785 if (xdp_res & NETSEC_XDP_TX)
786 netsec_xdp_ring_tx_db(priv, pkts);
787}
788
789static void netsec_set_tx_de(struct netsec_priv *priv,
790 struct netsec_desc_ring *dring,
791 const struct netsec_tx_pkt_ctrl *tx_ctrl,
792 const struct netsec_desc *desc, void *buf)
793{
794 int idx = dring->head;
795 struct netsec_de *de;
796 u32 attr;
797
798 de = dring->vaddr + (DESC_SZ * idx);
799
800 attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
801 (1 << NETSEC_TX_SHIFT_PT_FIELD) |
802 (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
803 (1 << NETSEC_TX_SHIFT_FS_FIELD) |
804 (1 << NETSEC_TX_LAST) |
805 (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
806 (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
807 (1 << NETSEC_TX_SHIFT_TRS_FIELD);
808 if (idx == DESC_NUM - 1)
809 attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
810
811 de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
812 de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
813 de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
814 de->attr = attr;
815
816 dring->desc[idx] = *desc;
817 if (desc->buf_type == TYPE_NETSEC_SKB)
818 dring->desc[idx].skb = buf;
819 else if (desc->buf_type == TYPE_NETSEC_XDP_TX ||
820 desc->buf_type == TYPE_NETSEC_XDP_NDO)
821 dring->desc[idx].xdpf = buf;
822
823 /* move head ahead */
824 dring->head = (dring->head + 1) % DESC_NUM;
825}
826
827/* The current driver only supports 1 Txq, this should run under spin_lock() */
828static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
829 struct xdp_frame *xdpf, bool is_ndo)
830
831{
832 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
833 struct page *page = virt_to_page(xdpf->data);
834 struct netsec_tx_pkt_ctrl tx_ctrl = {};
835 struct netsec_desc tx_desc;
836 dma_addr_t dma_handle;
837 u16 filled;
838
839 if (tx_ring->head >= tx_ring->tail)
840 filled = tx_ring->head - tx_ring->tail;
841 else
842 filled = tx_ring->head + DESC_NUM - tx_ring->tail;
843
844 if (DESC_NUM - filled <= 1)
845 return NETSEC_XDP_CONSUMED;
846
847 if (is_ndo) {
848 /* this is for ndo_xdp_xmit, the buffer needs mapping before
849 * sending
850 */
851 dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len,
852 DMA_TO_DEVICE);
853 if (dma_mapping_error(dev: priv->dev, dma_addr: dma_handle))
854 return NETSEC_XDP_CONSUMED;
855 tx_desc.buf_type = TYPE_NETSEC_XDP_NDO;
856 } else {
857 /* This is the device Rx buffer from page_pool. No need to remap
858 * just sync and send it
859 */
860 struct netsec_desc_ring *rx_ring =
861 &priv->desc_ring[NETSEC_RING_RX];
862 enum dma_data_direction dma_dir =
863 page_pool_get_dma_dir(pool: rx_ring->page_pool);
864
865 dma_handle = page_pool_get_dma_addr(page) + xdpf->headroom +
866 sizeof(*xdpf);
867 dma_sync_single_for_device(dev: priv->dev, addr: dma_handle, size: xdpf->len,
868 dir: dma_dir);
869 tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
870 }
871
872 tx_desc.dma_addr = dma_handle;
873 tx_desc.addr = xdpf->data;
874 tx_desc.len = xdpf->len;
875
876 netdev_sent_queue(dev: priv->ndev, bytes: xdpf->len);
877 netsec_set_tx_de(priv, dring: tx_ring, tx_ctrl: &tx_ctrl, desc: &tx_desc, buf: xdpf);
878
879 return NETSEC_XDP_TX;
880}
881
882static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
883{
884 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
885 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
886 u32 ret;
887
888 if (unlikely(!xdpf))
889 return NETSEC_XDP_CONSUMED;
890
891 spin_lock(lock: &tx_ring->lock);
892 ret = netsec_xdp_queue_one(priv, xdpf, is_ndo: false);
893 spin_unlock(lock: &tx_ring->lock);
894
895 return ret;
896}
897
898static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
899 struct xdp_buff *xdp)
900{
901 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
902 unsigned int sync, len = xdp->data_end - xdp->data;
903 u32 ret = NETSEC_XDP_PASS;
904 struct page *page;
905 int err;
906 u32 act;
907
908 act = bpf_prog_run_xdp(prog, xdp);
909
910 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
911 sync = xdp->data_end - xdp->data_hard_start - NETSEC_RXBUF_HEADROOM;
912 sync = max(sync, len);
913
914 switch (act) {
915 case XDP_PASS:
916 ret = NETSEC_XDP_PASS;
917 break;
918 case XDP_TX:
919 ret = netsec_xdp_xmit_back(priv, xdp);
920 if (ret != NETSEC_XDP_TX) {
921 page = virt_to_head_page(x: xdp->data);
922 page_pool_put_page(pool: dring->page_pool, page, dma_sync_size: sync, allow_direct: true);
923 }
924 break;
925 case XDP_REDIRECT:
926 err = xdp_do_redirect(dev: priv->ndev, xdp, prog);
927 if (!err) {
928 ret = NETSEC_XDP_REDIR;
929 } else {
930 ret = NETSEC_XDP_CONSUMED;
931 page = virt_to_head_page(x: xdp->data);
932 page_pool_put_page(pool: dring->page_pool, page, dma_sync_size: sync, allow_direct: true);
933 }
934 break;
935 default:
936 bpf_warn_invalid_xdp_action(dev: priv->ndev, prog, act);
937 fallthrough;
938 case XDP_ABORTED:
939 trace_xdp_exception(dev: priv->ndev, xdp: prog, act);
940 fallthrough; /* handle aborts by dropping packet */
941 case XDP_DROP:
942 ret = NETSEC_XDP_CONSUMED;
943 page = virt_to_head_page(x: xdp->data);
944 page_pool_put_page(pool: dring->page_pool, page, dma_sync_size: sync, allow_direct: true);
945 break;
946 }
947
948 return ret;
949}
950
951static int netsec_process_rx(struct netsec_priv *priv, int budget)
952{
953 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
954 struct net_device *ndev = priv->ndev;
955 struct netsec_rx_pkt_info rx_info;
956 enum dma_data_direction dma_dir;
957 struct bpf_prog *xdp_prog;
958 struct xdp_buff xdp;
959 u16 xdp_xmit = 0;
960 u32 xdp_act = 0;
961 int done = 0;
962
963 xdp_init_buff(xdp: &xdp, PAGE_SIZE, rxq: &dring->xdp_rxq);
964
965 xdp_prog = READ_ONCE(priv->xdp_prog);
966 dma_dir = page_pool_get_dma_dir(pool: dring->page_pool);
967
968 while (done < budget) {
969 u16 idx = dring->tail;
970 struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
971 struct netsec_desc *desc = &dring->desc[idx];
972 struct page *page = virt_to_page(desc->addr);
973 u32 xdp_result = NETSEC_XDP_PASS;
974 struct sk_buff *skb = NULL;
975 u16 pkt_len, desc_len;
976 dma_addr_t dma_handle;
977 void *buf_addr;
978
979 if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
980 /* reading the register clears the irq */
981 netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
982 break;
983 }
984
985 /* This barrier is needed to keep us from reading
986 * any other fields out of the netsec_de until we have
987 * verified the descriptor has been written back
988 */
989 dma_rmb();
990 done++;
991
992 pkt_len = de->buf_len_info >> 16;
993 rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
994 NETSEC_RX_PKT_ERR_MASK;
995 rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
996 if (rx_info.err_flag) {
997 netif_err(priv, drv, priv->ndev,
998 "%s: rx fail err(%d)\n", __func__,
999 rx_info.err_code);
1000 ndev->stats.rx_dropped++;
1001 dring->tail = (dring->tail + 1) % DESC_NUM;
1002 /* reuse buffer page frag */
1003 netsec_rx_fill(priv, from: idx, num: 1);
1004 continue;
1005 }
1006 rx_info.rx_cksum_result =
1007 (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
1008
1009 /* allocate a fresh buffer and map it to the hardware.
1010 * This will eventually replace the old buffer in the hardware
1011 */
1012 buf_addr = netsec_alloc_rx_data(priv, dma_handle: &dma_handle, desc_len: &desc_len);
1013
1014 if (unlikely(!buf_addr))
1015 break;
1016
1017 dma_sync_single_for_cpu(dev: priv->dev, addr: desc->dma_addr, size: pkt_len,
1018 dir: dma_dir);
1019 prefetch(desc->addr);
1020
1021 xdp_prepare_buff(xdp: &xdp, hard_start: desc->addr, NETSEC_RXBUF_HEADROOM,
1022 data_len: pkt_len, meta_valid: false);
1023
1024 if (xdp_prog) {
1025 xdp_result = netsec_run_xdp(priv, prog: xdp_prog, xdp: &xdp);
1026 if (xdp_result != NETSEC_XDP_PASS) {
1027 xdp_act |= xdp_result;
1028 if (xdp_result == NETSEC_XDP_TX)
1029 xdp_xmit++;
1030 goto next;
1031 }
1032 }
1033 skb = build_skb(data: desc->addr, frag_size: desc->len + NETSEC_RX_BUF_NON_DATA);
1034
1035 if (unlikely(!skb)) {
1036 /* If skb fails recycle_direct will either unmap and
1037 * free the page or refill the cache depending on the
1038 * cache state. Since we paid the allocation cost if
1039 * building an skb fails try to put the page into cache
1040 */
1041 page_pool_put_page(pool: dring->page_pool, page, dma_sync_size: pkt_len,
1042 allow_direct: true);
1043 netif_err(priv, drv, priv->ndev,
1044 "rx failed to build skb\n");
1045 break;
1046 }
1047 skb_mark_for_recycle(skb);
1048
1049 skb_reserve(skb, len: xdp.data - xdp.data_hard_start);
1050 skb_put(skb, len: xdp.data_end - xdp.data);
1051 skb->protocol = eth_type_trans(skb, dev: priv->ndev);
1052
1053 if (priv->rx_cksum_offload_flag &&
1054 rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
1055 skb->ip_summed = CHECKSUM_UNNECESSARY;
1056
1057next:
1058 if (skb)
1059 napi_gro_receive(napi: &priv->napi, skb);
1060 if (skb || xdp_result) {
1061 ndev->stats.rx_packets++;
1062 ndev->stats.rx_bytes += xdp.data_end - xdp.data;
1063 }
1064
1065 /* Update the descriptor with fresh buffers */
1066 desc->len = desc_len;
1067 desc->dma_addr = dma_handle;
1068 desc->addr = buf_addr;
1069
1070 netsec_rx_fill(priv, from: idx, num: 1);
1071 dring->tail = (dring->tail + 1) % DESC_NUM;
1072 }
1073 netsec_finalize_xdp_rx(priv, xdp_res: xdp_act, pkts: xdp_xmit);
1074
1075 return done;
1076}
1077
1078static int netsec_napi_poll(struct napi_struct *napi, int budget)
1079{
1080 struct netsec_priv *priv;
1081 int done;
1082
1083 priv = container_of(napi, struct netsec_priv, napi);
1084
1085 netsec_process_tx(priv);
1086 done = netsec_process_rx(priv, budget);
1087
1088 if (done < budget && napi_complete_done(n: napi, work_done: done)) {
1089 unsigned long flags;
1090
1091 spin_lock_irqsave(&priv->reglock, flags);
1092 netsec_write(priv, NETSEC_REG_INTEN_SET,
1093 NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1094 spin_unlock_irqrestore(lock: &priv->reglock, flags);
1095 }
1096
1097 return done;
1098}
1099
1100
1101static int netsec_desc_used(struct netsec_desc_ring *dring)
1102{
1103 int used;
1104
1105 if (dring->head >= dring->tail)
1106 used = dring->head - dring->tail;
1107 else
1108 used = dring->head + DESC_NUM - dring->tail;
1109
1110 return used;
1111}
1112
1113static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
1114{
1115 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1116
1117 /* keep tail from touching the queue */
1118 if (DESC_NUM - used < 2) {
1119 netif_stop_queue(dev: priv->ndev);
1120
1121 /* Make sure we read the updated value in case
1122 * descriptors got freed
1123 */
1124 smp_rmb();
1125
1126 used = netsec_desc_used(dring);
1127 if (DESC_NUM - used < 2)
1128 return NETDEV_TX_BUSY;
1129
1130 netif_wake_queue(dev: priv->ndev);
1131 }
1132
1133 return 0;
1134}
1135
1136static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
1137 struct net_device *ndev)
1138{
1139 struct netsec_priv *priv = netdev_priv(dev: ndev);
1140 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1141 struct netsec_tx_pkt_ctrl tx_ctrl = {};
1142 struct netsec_desc tx_desc;
1143 u16 tso_seg_len = 0;
1144 int filled;
1145
1146 spin_lock_bh(lock: &dring->lock);
1147 filled = netsec_desc_used(dring);
1148 if (netsec_check_stop_tx(priv, used: filled)) {
1149 spin_unlock_bh(lock: &dring->lock);
1150 net_warn_ratelimited("%s %s Tx queue full\n",
1151 dev_name(priv->dev), ndev->name);
1152 return NETDEV_TX_BUSY;
1153 }
1154
1155 if (skb->ip_summed == CHECKSUM_PARTIAL)
1156 tx_ctrl.cksum_offload_flag = true;
1157
1158 if (skb_is_gso(skb))
1159 tso_seg_len = skb_shinfo(skb)->gso_size;
1160
1161 if (tso_seg_len > 0) {
1162 if (skb->protocol == htons(ETH_P_IP)) {
1163 ip_hdr(skb)->tot_len = 0;
1164 tcp_hdr(skb)->check =
1165 ~tcp_v4_check(len: 0, saddr: ip_hdr(skb)->saddr,
1166 daddr: ip_hdr(skb)->daddr, base: 0);
1167 } else {
1168 tcp_v6_gso_csum_prep(skb);
1169 }
1170
1171 tx_ctrl.tcp_seg_offload_flag = true;
1172 tx_ctrl.tcp_seg_len = tso_seg_len;
1173 }
1174
1175 tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
1176 skb_headlen(skb), DMA_TO_DEVICE);
1177 if (dma_mapping_error(dev: priv->dev, dma_addr: tx_desc.dma_addr)) {
1178 spin_unlock_bh(lock: &dring->lock);
1179 netif_err(priv, drv, priv->ndev,
1180 "%s: DMA mapping failed\n", __func__);
1181 ndev->stats.tx_dropped++;
1182 dev_kfree_skb_any(skb);
1183 return NETDEV_TX_OK;
1184 }
1185 tx_desc.addr = skb->data;
1186 tx_desc.len = skb_headlen(skb);
1187 tx_desc.buf_type = TYPE_NETSEC_SKB;
1188
1189 skb_tx_timestamp(skb);
1190 netdev_sent_queue(dev: priv->ndev, bytes: skb->len);
1191
1192 netsec_set_tx_de(priv, dring, tx_ctrl: &tx_ctrl, desc: &tx_desc, buf: skb);
1193 spin_unlock_bh(lock: &dring->lock);
1194 netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, val: 1); /* submit another tx */
1195
1196 return NETDEV_TX_OK;
1197}
1198
1199static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
1200{
1201 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1202 struct netsec_desc *desc;
1203 u16 idx;
1204
1205 if (!dring->vaddr || !dring->desc)
1206 return;
1207 for (idx = 0; idx < DESC_NUM; idx++) {
1208 desc = &dring->desc[idx];
1209 if (!desc->addr)
1210 continue;
1211
1212 if (id == NETSEC_RING_RX) {
1213 struct page *page = virt_to_page(desc->addr);
1214
1215 page_pool_put_full_page(pool: dring->page_pool, page, allow_direct: false);
1216 } else if (id == NETSEC_RING_TX) {
1217 dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
1218 DMA_TO_DEVICE);
1219 dev_kfree_skb(desc->skb);
1220 }
1221 }
1222
1223 /* Rx is currently using page_pool */
1224 if (id == NETSEC_RING_RX) {
1225 if (xdp_rxq_info_is_reg(xdp_rxq: &dring->xdp_rxq))
1226 xdp_rxq_info_unreg(xdp_rxq: &dring->xdp_rxq);
1227 page_pool_destroy(pool: dring->page_pool);
1228 }
1229
1230 memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
1231 memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
1232
1233 dring->head = 0;
1234 dring->tail = 0;
1235
1236 if (id == NETSEC_RING_TX)
1237 netdev_reset_queue(dev_queue: priv->ndev);
1238}
1239
1240static void netsec_free_dring(struct netsec_priv *priv, int id)
1241{
1242 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1243
1244 if (dring->vaddr) {
1245 dma_free_coherent(dev: priv->dev, DESC_SZ * DESC_NUM,
1246 cpu_addr: dring->vaddr, dma_handle: dring->desc_dma);
1247 dring->vaddr = NULL;
1248 }
1249
1250 kfree(objp: dring->desc);
1251 dring->desc = NULL;
1252}
1253
1254static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1255{
1256 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1257
1258 dring->vaddr = dma_alloc_coherent(dev: priv->dev, DESC_SZ * DESC_NUM,
1259 dma_handle: &dring->desc_dma, GFP_KERNEL);
1260 if (!dring->vaddr)
1261 goto err;
1262
1263 dring->desc = kcalloc(DESC_NUM, size: sizeof(*dring->desc), GFP_KERNEL);
1264 if (!dring->desc)
1265 goto err;
1266
1267 return 0;
1268err:
1269 netsec_free_dring(priv, id);
1270
1271 return -ENOMEM;
1272}
1273
1274static void netsec_setup_tx_dring(struct netsec_priv *priv)
1275{
1276 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1277 int i;
1278
1279 for (i = 0; i < DESC_NUM; i++) {
1280 struct netsec_de *de;
1281
1282 de = dring->vaddr + (DESC_SZ * i);
1283 /* de->attr is not going to be accessed by the NIC
1284 * until netsec_set_tx_de() is called.
1285 * No need for a dma_wmb() here
1286 */
1287 de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
1288 }
1289}
1290
1291static int netsec_setup_rx_dring(struct netsec_priv *priv)
1292{
1293 struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
1294 struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
1295 struct page_pool_params pp_params = {
1296 .order = 0,
1297 /* internal DMA mapping in page_pool */
1298 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
1299 .pool_size = DESC_NUM,
1300 .nid = NUMA_NO_NODE,
1301 .dev = priv->dev,
1302 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
1303 .offset = NETSEC_RXBUF_HEADROOM,
1304 .max_len = NETSEC_RX_BUF_SIZE,
1305 .napi = &priv->napi,
1306 .netdev = priv->ndev,
1307 };
1308 int i, err;
1309
1310 dring->page_pool = page_pool_create(params: &pp_params);
1311 if (IS_ERR(ptr: dring->page_pool)) {
1312 err = PTR_ERR(ptr: dring->page_pool);
1313 dring->page_pool = NULL;
1314 goto err_out;
1315 }
1316
1317 err = xdp_rxq_info_reg(xdp_rxq: &dring->xdp_rxq, dev: priv->ndev, queue_index: 0, napi_id: priv->napi.napi_id);
1318 if (err)
1319 goto err_out;
1320
1321 err = xdp_rxq_info_reg_mem_model(xdp_rxq: &dring->xdp_rxq, type: MEM_TYPE_PAGE_POOL,
1322 allocator: dring->page_pool);
1323 if (err)
1324 goto err_out;
1325
1326 for (i = 0; i < DESC_NUM; i++) {
1327 struct netsec_desc *desc = &dring->desc[i];
1328 dma_addr_t dma_handle;
1329 void *buf;
1330 u16 len;
1331
1332 buf = netsec_alloc_rx_data(priv, dma_handle: &dma_handle, desc_len: &len);
1333
1334 if (!buf) {
1335 err = -ENOMEM;
1336 goto err_out;
1337 }
1338 desc->dma_addr = dma_handle;
1339 desc->addr = buf;
1340 desc->len = len;
1341 }
1342
1343 netsec_rx_fill(priv, from: 0, DESC_NUM);
1344
1345 return 0;
1346
1347err_out:
1348 netsec_uninit_pkt_dring(priv, id: NETSEC_RING_RX);
1349 return err;
1350}
1351
1352static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
1353 u32 addr_h, u32 addr_l, u32 size)
1354{
1355 u64 base = (u64)addr_h << 32 | addr_l;
1356 void __iomem *ucode;
1357 u32 i;
1358
1359 ucode = ioremap(offset: base, size: size * sizeof(u32));
1360 if (!ucode)
1361 return -ENOMEM;
1362
1363 for (i = 0; i < size; i++)
1364 netsec_write(priv, reg_addr: reg, readl(addr: ucode + i * 4));
1365
1366 iounmap(addr: ucode);
1367 return 0;
1368}
1369
1370static int netsec_netdev_load_microcode(struct netsec_priv *priv)
1371{
1372 u32 addr_h, addr_l, size;
1373 int err;
1374
1375 addr_h = readl(addr: priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
1376 addr_l = readl(addr: priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
1377 size = readl(addr: priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
1378 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
1379 addr_h, addr_l, size);
1380 if (err)
1381 return err;
1382
1383 addr_h = readl(addr: priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
1384 addr_l = readl(addr: priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
1385 size = readl(addr: priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
1386 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
1387 addr_h, addr_l, size);
1388 if (err)
1389 return err;
1390
1391 addr_h = 0;
1392 addr_l = readl(addr: priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
1393 size = readl(addr: priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
1394 err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
1395 addr_h, addr_l, size);
1396 if (err)
1397 return err;
1398
1399 return 0;
1400}
1401
1402static int netsec_reset_hardware(struct netsec_priv *priv,
1403 bool load_ucode)
1404{
1405 u32 value;
1406 int err;
1407
1408 /* stop DMA engines */
1409 if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
1410 netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
1411 NETSEC_DMA_CTRL_REG_STOP);
1412 netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
1413 NETSEC_DMA_CTRL_REG_STOP);
1414
1415 while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
1416 NETSEC_DMA_CTRL_REG_STOP)
1417 cpu_relax();
1418
1419 while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
1420 NETSEC_DMA_CTRL_REG_STOP)
1421 cpu_relax();
1422 }
1423
1424 netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
1425 netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
1426 netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
1427
1428 while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
1429 cpu_relax();
1430
1431 /* set desc_start addr */
1432 netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
1433 upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1434 netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
1435 lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1436
1437 netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
1438 upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1439 netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
1440 lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1441
1442 /* set normal tx dring ring config */
1443 netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
1444 val: 1 << NETSEC_REG_DESC_ENDIAN);
1445 netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
1446 val: 1 << NETSEC_REG_DESC_ENDIAN);
1447
1448 if (load_ucode) {
1449 err = netsec_netdev_load_microcode(priv);
1450 if (err) {
1451 netif_err(priv, probe, priv->ndev,
1452 "%s: failed to load microcode (%d)\n",
1453 __func__, err);
1454 return err;
1455 }
1456 }
1457
1458 /* start DMA engines */
1459 netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, val: priv->freq / 1000000 - 1);
1460 netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, val: 0);
1461
1462 usleep_range(min: 1000, max: 2000);
1463
1464 if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
1465 NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
1466 netif_err(priv, probe, priv->ndev,
1467 "microengine start failed\n");
1468 return -ENXIO;
1469 }
1470 netsec_write(priv, NETSEC_REG_TOP_STATUS,
1471 NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
1472
1473 value = NETSEC_PKT_CTRL_REG_MODE_NRM;
1474 if (priv->ndev->mtu > ETH_DATA_LEN)
1475 value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
1476
1477 /* change to normal mode */
1478 netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
1479 netsec_write(priv, NETSEC_REG_PKT_CTRL, val: value);
1480
1481 while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
1482 NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
1483 cpu_relax();
1484
1485 /* clear any pending EMPTY/ERR irq status */
1486 netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val: ~0);
1487
1488 /* Disable TX & RX intr */
1489 netsec_write(priv, NETSEC_REG_INTEN_CLR, val: ~0);
1490
1491 return 0;
1492}
1493
1494static int netsec_start_gmac(struct netsec_priv *priv)
1495{
1496 struct phy_device *phydev = priv->ndev->phydev;
1497 u32 value = 0;
1498 int ret;
1499
1500 if (phydev->speed != SPEED_1000)
1501 value = (NETSEC_GMAC_MCR_REG_CST |
1502 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
1503
1504 if (netsec_mac_write(priv, GMAC_REG_MCR, value))
1505 return -ETIMEDOUT;
1506 if (netsec_mac_write(priv, GMAC_REG_BMR,
1507 NETSEC_GMAC_BMR_REG_RESET))
1508 return -ETIMEDOUT;
1509
1510 /* Wait soft reset */
1511 usleep_range(min: 1000, max: 5000);
1512
1513 ret = netsec_mac_read(priv, GMAC_REG_BMR, read: &value);
1514 if (ret)
1515 return ret;
1516 if (value & NETSEC_GMAC_BMR_REG_SWR)
1517 return -EAGAIN;
1518
1519 netsec_write(priv, MAC_REG_DESC_SOFT_RST, val: 1);
1520 if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, mask: 1))
1521 return -ETIMEDOUT;
1522
1523 netsec_write(priv, MAC_REG_DESC_INIT, val: 1);
1524 if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, mask: 1))
1525 return -ETIMEDOUT;
1526
1527 if (netsec_mac_write(priv, GMAC_REG_BMR,
1528 NETSEC_GMAC_BMR_REG_COMMON))
1529 return -ETIMEDOUT;
1530 if (netsec_mac_write(priv, GMAC_REG_RDLAR,
1531 NETSEC_GMAC_RDLAR_REG_COMMON))
1532 return -ETIMEDOUT;
1533 if (netsec_mac_write(priv, GMAC_REG_TDLAR,
1534 NETSEC_GMAC_TDLAR_REG_COMMON))
1535 return -ETIMEDOUT;
1536 if (netsec_mac_write(priv, GMAC_REG_MFFR, value: 0x80000001))
1537 return -ETIMEDOUT;
1538
1539 ret = netsec_mac_update_to_phy_state(priv);
1540 if (ret)
1541 return ret;
1542
1543 ret = netsec_mac_read(priv, GMAC_REG_OMR, read: &value);
1544 if (ret)
1545 return ret;
1546
1547 value |= NETSEC_GMAC_OMR_REG_SR;
1548 value |= NETSEC_GMAC_OMR_REG_ST;
1549
1550 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, val: ~0);
1551 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, val: ~0);
1552
1553 netsec_et_set_coalesce(net_device: priv->ndev, et_coalesce: &priv->et_coalesce, NULL, NULL);
1554
1555 if (netsec_mac_write(priv, GMAC_REG_OMR, value))
1556 return -ETIMEDOUT;
1557
1558 return 0;
1559}
1560
1561static int netsec_stop_gmac(struct netsec_priv *priv)
1562{
1563 u32 value;
1564 int ret;
1565
1566 ret = netsec_mac_read(priv, GMAC_REG_OMR, read: &value);
1567 if (ret)
1568 return ret;
1569 value &= ~NETSEC_GMAC_OMR_REG_SR;
1570 value &= ~NETSEC_GMAC_OMR_REG_ST;
1571
1572 /* disable all interrupts */
1573 netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, val: ~0);
1574 netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, val: ~0);
1575
1576 return netsec_mac_write(priv, GMAC_REG_OMR, value);
1577}
1578
1579static void netsec_phy_adjust_link(struct net_device *ndev)
1580{
1581 struct netsec_priv *priv = netdev_priv(dev: ndev);
1582
1583 if (ndev->phydev->link)
1584 netsec_start_gmac(priv);
1585 else
1586 netsec_stop_gmac(priv);
1587
1588 phy_print_status(phydev: ndev->phydev);
1589}
1590
1591static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
1592{
1593 struct netsec_priv *priv = dev_id;
1594 u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
1595 unsigned long flags;
1596
1597 /* Disable interrupts */
1598 if (status & NETSEC_IRQ_TX) {
1599 val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
1600 netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
1601 }
1602 if (status & NETSEC_IRQ_RX) {
1603 val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
1604 netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
1605 }
1606
1607 spin_lock_irqsave(&priv->reglock, flags);
1608 netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1609 spin_unlock_irqrestore(lock: &priv->reglock, flags);
1610
1611 napi_schedule(n: &priv->napi);
1612
1613 return IRQ_HANDLED;
1614}
1615
1616static int netsec_netdev_open(struct net_device *ndev)
1617{
1618 struct netsec_priv *priv = netdev_priv(dev: ndev);
1619 int ret;
1620
1621 pm_runtime_get_sync(dev: priv->dev);
1622
1623 netsec_setup_tx_dring(priv);
1624 ret = netsec_setup_rx_dring(priv);
1625 if (ret) {
1626 netif_err(priv, probe, priv->ndev,
1627 "%s: fail setup ring\n", __func__);
1628 goto err1;
1629 }
1630
1631 ret = request_irq(irq: priv->ndev->irq, handler: netsec_irq_handler,
1632 IRQF_SHARED, name: "netsec", dev: priv);
1633 if (ret) {
1634 netif_err(priv, drv, priv->ndev, "request_irq failed\n");
1635 goto err2;
1636 }
1637
1638 if (dev_of_node(dev: priv->dev)) {
1639 if (!of_phy_connect(dev: priv->ndev, phy_np: priv->phy_np,
1640 hndlr: netsec_phy_adjust_link, flags: 0,
1641 iface: priv->phy_interface)) {
1642 netif_err(priv, link, priv->ndev, "missing PHY\n");
1643 ret = -ENODEV;
1644 goto err3;
1645 }
1646 } else {
1647 ret = phy_connect_direct(dev: priv->ndev, phydev: priv->phydev,
1648 handler: netsec_phy_adjust_link,
1649 interface: priv->phy_interface);
1650 if (ret) {
1651 netif_err(priv, link, priv->ndev,
1652 "phy_connect_direct() failed (%d)\n", ret);
1653 goto err3;
1654 }
1655 }
1656
1657 phy_start(phydev: ndev->phydev);
1658
1659 netsec_start_gmac(priv);
1660 napi_enable(n: &priv->napi);
1661 netif_start_queue(dev: ndev);
1662
1663 /* Enable TX+RX intr. */
1664 netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1665
1666 return 0;
1667err3:
1668 free_irq(priv->ndev->irq, priv);
1669err2:
1670 netsec_uninit_pkt_dring(priv, id: NETSEC_RING_RX);
1671err1:
1672 pm_runtime_put_sync(dev: priv->dev);
1673 return ret;
1674}
1675
1676static int netsec_netdev_stop(struct net_device *ndev)
1677{
1678 int ret;
1679 struct netsec_priv *priv = netdev_priv(dev: ndev);
1680
1681 netif_stop_queue(dev: priv->ndev);
1682 dma_wmb();
1683
1684 napi_disable(n: &priv->napi);
1685
1686 netsec_write(priv, NETSEC_REG_INTEN_CLR, val: ~0);
1687 netsec_stop_gmac(priv);
1688
1689 free_irq(priv->ndev->irq, priv);
1690
1691 netsec_uninit_pkt_dring(priv, id: NETSEC_RING_TX);
1692 netsec_uninit_pkt_dring(priv, id: NETSEC_RING_RX);
1693
1694 phy_stop(phydev: ndev->phydev);
1695 phy_disconnect(phydev: ndev->phydev);
1696
1697 ret = netsec_reset_hardware(priv, load_ucode: false);
1698
1699 pm_runtime_put_sync(dev: priv->dev);
1700
1701 return ret;
1702}
1703
1704static int netsec_netdev_init(struct net_device *ndev)
1705{
1706 struct netsec_priv *priv = netdev_priv(dev: ndev);
1707 int ret;
1708 u16 data;
1709
1710 BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
1711
1712 ret = netsec_alloc_dring(priv, id: NETSEC_RING_TX);
1713 if (ret)
1714 return ret;
1715
1716 ret = netsec_alloc_dring(priv, id: NETSEC_RING_RX);
1717 if (ret)
1718 goto err1;
1719
1720 /* set phy power down */
1721 data = netsec_phy_read(bus: priv->mii_bus, phy_addr: priv->phy_addr, MII_BMCR);
1722 netsec_phy_write(bus: priv->mii_bus, phy_addr: priv->phy_addr, MII_BMCR,
1723 val: data | BMCR_PDOWN);
1724
1725 ret = netsec_reset_hardware(priv, load_ucode: true);
1726 if (ret)
1727 goto err2;
1728
1729 /* Restore phy power state */
1730 netsec_phy_write(bus: priv->mii_bus, phy_addr: priv->phy_addr, MII_BMCR, val: data);
1731
1732 spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
1733 spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
1734
1735 return 0;
1736err2:
1737 netsec_free_dring(priv, id: NETSEC_RING_RX);
1738err1:
1739 netsec_free_dring(priv, id: NETSEC_RING_TX);
1740 return ret;
1741}
1742
1743static void netsec_netdev_uninit(struct net_device *ndev)
1744{
1745 struct netsec_priv *priv = netdev_priv(dev: ndev);
1746
1747 netsec_free_dring(priv, id: NETSEC_RING_RX);
1748 netsec_free_dring(priv, id: NETSEC_RING_TX);
1749}
1750
1751static int netsec_netdev_set_features(struct net_device *ndev,
1752 netdev_features_t features)
1753{
1754 struct netsec_priv *priv = netdev_priv(dev: ndev);
1755
1756 priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
1757
1758 return 0;
1759}
1760
1761static int netsec_xdp_xmit(struct net_device *ndev, int n,
1762 struct xdp_frame **frames, u32 flags)
1763{
1764 struct netsec_priv *priv = netdev_priv(dev: ndev);
1765 struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
1766 int i, nxmit = 0;
1767
1768 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1769 return -EINVAL;
1770
1771 spin_lock(lock: &tx_ring->lock);
1772 for (i = 0; i < n; i++) {
1773 struct xdp_frame *xdpf = frames[i];
1774 int err;
1775
1776 err = netsec_xdp_queue_one(priv, xdpf, is_ndo: true);
1777 if (err != NETSEC_XDP_TX)
1778 break;
1779
1780 tx_ring->xdp_xmit++;
1781 nxmit++;
1782 }
1783 spin_unlock(lock: &tx_ring->lock);
1784
1785 if (unlikely(flags & XDP_XMIT_FLUSH)) {
1786 netsec_xdp_ring_tx_db(priv, pkts: tx_ring->xdp_xmit);
1787 tx_ring->xdp_xmit = 0;
1788 }
1789
1790 return nxmit;
1791}
1792
1793static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
1794 struct netlink_ext_ack *extack)
1795{
1796 struct net_device *dev = priv->ndev;
1797 struct bpf_prog *old_prog;
1798
1799 /* For now just support only the usual MTU sized frames */
1800 if (prog && dev->mtu > 1500) {
1801 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
1802 return -EOPNOTSUPP;
1803 }
1804
1805 if (netif_running(dev))
1806 netsec_netdev_stop(ndev: dev);
1807
1808 /* Detach old prog, if any */
1809 old_prog = xchg(&priv->xdp_prog, prog);
1810 if (old_prog)
1811 bpf_prog_put(prog: old_prog);
1812
1813 if (netif_running(dev))
1814 netsec_netdev_open(ndev: dev);
1815
1816 return 0;
1817}
1818
1819static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
1820{
1821 struct netsec_priv *priv = netdev_priv(dev: ndev);
1822
1823 switch (xdp->command) {
1824 case XDP_SETUP_PROG:
1825 return netsec_xdp_setup(priv, prog: xdp->prog, extack: xdp->extack);
1826 default:
1827 return -EINVAL;
1828 }
1829}
1830
1831static const struct net_device_ops netsec_netdev_ops = {
1832 .ndo_init = netsec_netdev_init,
1833 .ndo_uninit = netsec_netdev_uninit,
1834 .ndo_open = netsec_netdev_open,
1835 .ndo_stop = netsec_netdev_stop,
1836 .ndo_start_xmit = netsec_netdev_start_xmit,
1837 .ndo_set_features = netsec_netdev_set_features,
1838 .ndo_set_mac_address = eth_mac_addr,
1839 .ndo_validate_addr = eth_validate_addr,
1840 .ndo_eth_ioctl = phy_do_ioctl,
1841 .ndo_xdp_xmit = netsec_xdp_xmit,
1842 .ndo_bpf = netsec_xdp,
1843};
1844
1845static int netsec_of_probe(struct platform_device *pdev,
1846 struct netsec_priv *priv, u32 *phy_addr)
1847{
1848 int err;
1849
1850 err = of_get_phy_mode(np: pdev->dev.of_node, interface: &priv->phy_interface);
1851 if (err) {
1852 dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
1853 return err;
1854 }
1855
1856 /*
1857 * SynQuacer is physically configured with TX and RX delays
1858 * but the standard firmware claimed otherwise for a long
1859 * time, ignore it.
1860 */
1861 if (of_machine_is_compatible(compat: "socionext,developer-box") &&
1862 priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) {
1863 dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n");
1864 priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
1865 }
1866
1867 priv->phy_np = of_parse_phandle(np: pdev->dev.of_node, phandle_name: "phy-handle", index: 0);
1868 if (!priv->phy_np) {
1869 dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
1870 return -EINVAL;
1871 }
1872
1873 *phy_addr = of_mdio_parse_addr(dev: &pdev->dev, np: priv->phy_np);
1874
1875 priv->clk = devm_clk_get(dev: &pdev->dev, NULL); /* get by 'phy_ref_clk' */
1876 if (IS_ERR(ptr: priv->clk))
1877 return dev_err_probe(dev: &pdev->dev, err: PTR_ERR(ptr: priv->clk),
1878 fmt: "phy_ref_clk not found\n");
1879 priv->freq = clk_get_rate(clk: priv->clk);
1880
1881 return 0;
1882}
1883
1884static int netsec_acpi_probe(struct platform_device *pdev,
1885 struct netsec_priv *priv, u32 *phy_addr)
1886{
1887 int ret;
1888
1889 if (!IS_ENABLED(CONFIG_ACPI))
1890 return -ENODEV;
1891
1892 /* ACPI systems are assumed to configure the PHY in firmware, so
1893 * there is really no need to discover the PHY mode from the DSDT.
1894 * Since firmware is known to exist in the field that configures the
1895 * PHY correctly but passes the wrong mode string in the phy-mode
1896 * device property, we have no choice but to ignore it.
1897 */
1898 priv->phy_interface = PHY_INTERFACE_MODE_NA;
1899
1900 ret = device_property_read_u32(dev: &pdev->dev, propname: "phy-channel", val: phy_addr);
1901 if (ret)
1902 return dev_err_probe(dev: &pdev->dev, err: ret,
1903 fmt: "missing required property 'phy-channel'\n");
1904
1905 ret = device_property_read_u32(dev: &pdev->dev,
1906 propname: "socionext,phy-clock-frequency",
1907 val: &priv->freq);
1908 if (ret)
1909 return dev_err_probe(dev: &pdev->dev, err: ret,
1910 fmt: "missing required property 'socionext,phy-clock-frequency'\n");
1911 return 0;
1912}
1913
1914static void netsec_unregister_mdio(struct netsec_priv *priv)
1915{
1916 struct phy_device *phydev = priv->phydev;
1917
1918 if (!dev_of_node(dev: priv->dev) && phydev) {
1919 phy_device_remove(phydev);
1920 phy_device_free(phydev);
1921 }
1922
1923 mdiobus_unregister(bus: priv->mii_bus);
1924}
1925
1926static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
1927{
1928 struct mii_bus *bus;
1929 int ret;
1930
1931 bus = devm_mdiobus_alloc(dev: priv->dev);
1932 if (!bus)
1933 return -ENOMEM;
1934
1935 snprintf(buf: bus->id, MII_BUS_ID_SIZE, fmt: "%s", dev_name(dev: priv->dev));
1936 bus->priv = priv;
1937 bus->name = "SNI NETSEC MDIO";
1938 bus->read = netsec_phy_read;
1939 bus->write = netsec_phy_write;
1940 bus->parent = priv->dev;
1941 priv->mii_bus = bus;
1942
1943 if (dev_of_node(dev: priv->dev)) {
1944 struct device_node *mdio_node, *parent = dev_of_node(dev: priv->dev);
1945
1946 mdio_node = of_get_child_by_name(node: parent, name: "mdio");
1947 if (mdio_node) {
1948 parent = mdio_node;
1949 } else {
1950 /* older f/w doesn't populate the mdio subnode,
1951 * allow relaxed upgrade of f/w in due time.
1952 */
1953 dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
1954 }
1955
1956 ret = of_mdiobus_register(mdio: bus, np: parent);
1957 of_node_put(node: mdio_node);
1958
1959 if (ret) {
1960 dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1961 return ret;
1962 }
1963 } else {
1964 /* Mask out all PHYs from auto probing. */
1965 bus->phy_mask = ~0;
1966 ret = mdiobus_register(bus);
1967 if (ret) {
1968 dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1969 return ret;
1970 }
1971
1972 priv->phydev = get_phy_device(bus, addr: phy_addr, is_c45: false);
1973 if (IS_ERR(ptr: priv->phydev)) {
1974 ret = PTR_ERR(ptr: priv->phydev);
1975 dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
1976 priv->phydev = NULL;
1977 mdiobus_unregister(bus);
1978 return -ENODEV;
1979 }
1980
1981 ret = phy_device_register(phy: priv->phydev);
1982 if (ret) {
1983 phy_device_free(phydev: priv->phydev);
1984 mdiobus_unregister(bus);
1985 dev_err(priv->dev,
1986 "phy_device_register err(%d)\n", ret);
1987 }
1988 }
1989
1990 return ret;
1991}
1992
1993static int netsec_probe(struct platform_device *pdev)
1994{
1995 struct resource *mmio_res, *eeprom_res;
1996 struct netsec_priv *priv;
1997 u32 hw_ver, phy_addr = 0;
1998 struct net_device *ndev;
1999 int ret;
2000 int irq;
2001
2002 mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2003 if (!mmio_res) {
2004 dev_err(&pdev->dev, "No MMIO resource found.\n");
2005 return -ENODEV;
2006 }
2007
2008 eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2009 if (!eeprom_res) {
2010 dev_info(&pdev->dev, "No EEPROM resource found.\n");
2011 return -ENODEV;
2012 }
2013
2014 irq = platform_get_irq(pdev, 0);
2015 if (irq < 0)
2016 return irq;
2017
2018 ndev = alloc_etherdev(sizeof(*priv));
2019 if (!ndev)
2020 return -ENOMEM;
2021
2022 priv = netdev_priv(dev: ndev);
2023
2024 spin_lock_init(&priv->reglock);
2025 SET_NETDEV_DEV(ndev, &pdev->dev);
2026 platform_set_drvdata(pdev, data: priv);
2027 ndev->irq = irq;
2028 priv->dev = &pdev->dev;
2029 priv->ndev = ndev;
2030
2031 priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
2032 NETIF_MSG_LINK | NETIF_MSG_PROBE;
2033
2034 priv->ioaddr = devm_ioremap(dev: &pdev->dev, offset: mmio_res->start,
2035 size: resource_size(res: mmio_res));
2036 if (!priv->ioaddr) {
2037 dev_err(&pdev->dev, "devm_ioremap() failed\n");
2038 ret = -ENXIO;
2039 goto free_ndev;
2040 }
2041
2042 priv->eeprom_base = devm_ioremap(dev: &pdev->dev, offset: eeprom_res->start,
2043 size: resource_size(res: eeprom_res));
2044 if (!priv->eeprom_base) {
2045 dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
2046 ret = -ENXIO;
2047 goto free_ndev;
2048 }
2049
2050 ret = device_get_ethdev_address(dev: &pdev->dev, netdev: ndev);
2051 if (ret && priv->eeprom_base) {
2052 void __iomem *macp = priv->eeprom_base +
2053 NETSEC_EEPROM_MAC_ADDRESS;
2054 u8 addr[ETH_ALEN];
2055
2056 addr[0] = readb(addr: macp + 3);
2057 addr[1] = readb(addr: macp + 2);
2058 addr[2] = readb(addr: macp + 1);
2059 addr[3] = readb(addr: macp + 0);
2060 addr[4] = readb(addr: macp + 7);
2061 addr[5] = readb(addr: macp + 6);
2062 eth_hw_addr_set(dev: ndev, addr);
2063 }
2064
2065 if (!is_valid_ether_addr(addr: ndev->dev_addr)) {
2066 dev_warn(&pdev->dev, "No MAC address found, using random\n");
2067 eth_hw_addr_random(dev: ndev);
2068 }
2069
2070 if (dev_of_node(dev: &pdev->dev))
2071 ret = netsec_of_probe(pdev, priv, phy_addr: &phy_addr);
2072 else
2073 ret = netsec_acpi_probe(pdev, priv, phy_addr: &phy_addr);
2074 if (ret)
2075 goto free_ndev;
2076
2077 priv->phy_addr = phy_addr;
2078
2079 if (!priv->freq) {
2080 dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
2081 ret = -ENODEV;
2082 goto free_ndev;
2083 }
2084
2085 /* default for throughput */
2086 priv->et_coalesce.rx_coalesce_usecs = 500;
2087 priv->et_coalesce.rx_max_coalesced_frames = 8;
2088 priv->et_coalesce.tx_coalesce_usecs = 500;
2089 priv->et_coalesce.tx_max_coalesced_frames = 8;
2090
2091 ret = device_property_read_u32(dev: &pdev->dev, propname: "max-frame-size",
2092 val: &ndev->max_mtu);
2093 if (ret < 0)
2094 ndev->max_mtu = ETH_DATA_LEN;
2095
2096 /* runtime_pm coverage just for probe, open/close also cover it */
2097 pm_runtime_enable(dev: &pdev->dev);
2098 pm_runtime_get_sync(dev: &pdev->dev);
2099
2100 hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
2101 /* this driver only supports F_TAIKI style NETSEC */
2102 if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
2103 NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
2104 ret = -ENODEV;
2105 goto pm_disable;
2106 }
2107
2108 dev_info(&pdev->dev, "hardware revision %d.%d\n",
2109 hw_ver >> 16, hw_ver & 0xffff);
2110
2111 netif_napi_add(dev: ndev, napi: &priv->napi, poll: netsec_napi_poll);
2112
2113 ndev->netdev_ops = &netsec_netdev_ops;
2114 ndev->ethtool_ops = &netsec_ethtool_ops;
2115
2116 ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
2117 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2118 ndev->hw_features = ndev->features;
2119
2120 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
2121 NETDEV_XDP_ACT_NDO_XMIT;
2122
2123 priv->rx_cksum_offload_flag = true;
2124
2125 ret = netsec_register_mdio(priv, phy_addr);
2126 if (ret)
2127 goto unreg_napi;
2128
2129 if (dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(40)))
2130 dev_warn(&pdev->dev, "Failed to set DMA mask\n");
2131
2132 ret = register_netdev(dev: ndev);
2133 if (ret) {
2134 netif_err(priv, probe, ndev, "register_netdev() failed\n");
2135 goto unreg_mii;
2136 }
2137
2138 pm_runtime_put_sync(dev: &pdev->dev);
2139 return 0;
2140
2141unreg_mii:
2142 netsec_unregister_mdio(priv);
2143unreg_napi:
2144 netif_napi_del(napi: &priv->napi);
2145pm_disable:
2146 pm_runtime_put_sync(dev: &pdev->dev);
2147 pm_runtime_disable(dev: &pdev->dev);
2148free_ndev:
2149 free_netdev(dev: ndev);
2150 dev_err(&pdev->dev, "init failed\n");
2151
2152 return ret;
2153}
2154
2155static void netsec_remove(struct platform_device *pdev)
2156{
2157 struct netsec_priv *priv = platform_get_drvdata(pdev);
2158
2159 unregister_netdev(dev: priv->ndev);
2160
2161 netsec_unregister_mdio(priv);
2162
2163 netif_napi_del(napi: &priv->napi);
2164
2165 pm_runtime_disable(dev: &pdev->dev);
2166 free_netdev(dev: priv->ndev);
2167}
2168
2169#ifdef CONFIG_PM
2170static int netsec_runtime_suspend(struct device *dev)
2171{
2172 struct netsec_priv *priv = dev_get_drvdata(dev);
2173
2174 netsec_write(priv, NETSEC_REG_CLK_EN, val: 0);
2175
2176 clk_disable_unprepare(clk: priv->clk);
2177
2178 return 0;
2179}
2180
2181static int netsec_runtime_resume(struct device *dev)
2182{
2183 struct netsec_priv *priv = dev_get_drvdata(dev);
2184
2185 clk_prepare_enable(clk: priv->clk);
2186
2187 netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
2188 NETSEC_CLK_EN_REG_DOM_C |
2189 NETSEC_CLK_EN_REG_DOM_G);
2190 return 0;
2191}
2192#endif
2193
2194static const struct dev_pm_ops netsec_pm_ops = {
2195 SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
2196};
2197
2198static const struct of_device_id netsec_dt_ids[] = {
2199 { .compatible = "socionext,synquacer-netsec" },
2200 { }
2201};
2202MODULE_DEVICE_TABLE(of, netsec_dt_ids);
2203
2204#ifdef CONFIG_ACPI
2205static const struct acpi_device_id netsec_acpi_ids[] = {
2206 { "SCX0001" },
2207 { }
2208};
2209MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
2210#endif
2211
2212static struct platform_driver netsec_driver = {
2213 .probe = netsec_probe,
2214 .remove_new = netsec_remove,
2215 .driver = {
2216 .name = "netsec",
2217 .pm = &netsec_pm_ops,
2218 .of_match_table = netsec_dt_ids,
2219 .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
2220 },
2221};
2222module_platform_driver(netsec_driver);
2223
2224MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
2225MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
2226MODULE_DESCRIPTION("NETSEC Ethernet driver");
2227MODULE_LICENSE("GPL");
2228

source code of linux/drivers/net/ethernet/socionext/netsec.c