1// SPDX-License-Identifier: GPL-2.0
2#define pr_fmt(fmt) "bcmasp_intf: " fmt
3
4#include <asm/byteorder.h>
5#include <linux/brcmphy.h>
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/etherdevice.h>
9#include <linux/netdevice.h>
10#include <linux/of_net.h>
11#include <linux/of_mdio.h>
12#include <linux/phy.h>
13#include <linux/phy_fixed.h>
14#include <linux/ptp_classify.h>
15#include <linux/platform_device.h>
16#include <net/ip.h>
17#include <net/ipv6.h>
18
19#include "bcmasp.h"
20#include "bcmasp_intf_defs.h"
21
22static int incr_ring(int index, int ring_count)
23{
24 index++;
25 if (index == ring_count)
26 return 0;
27
28 return index;
29}
30
31/* Points to last byte of descriptor */
32static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg,
33 int ring_count)
34{
35 dma_addr_t end = beg + (ring_count * DESC_SIZE);
36
37 addr += DESC_SIZE;
38 if (addr > end)
39 return beg + DESC_SIZE - 1;
40
41 return addr;
42}
43
44/* Points to first byte of descriptor */
45static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg,
46 int ring_count)
47{
48 dma_addr_t end = beg + (ring_count * DESC_SIZE);
49
50 addr += DESC_SIZE;
51 if (addr >= end)
52 return beg;
53
54 return addr;
55}
56
57static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en)
58{
59 if (en) {
60 tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE);
61 tx_epkt_core_wl(intf, val: (TX_EPKT_C_CFG_MISC_EN |
62 TX_EPKT_C_CFG_MISC_PT |
63 (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)),
64 TX_EPKT_C_CFG_MISC);
65 } else {
66 tx_spb_ctrl_wl(intf, val: 0x0, TX_SPB_CTRL_ENABLE);
67 tx_epkt_core_wl(intf, val: 0x0, TX_EPKT_C_CFG_MISC);
68 }
69}
70
71static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en)
72{
73 if (en)
74 rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN,
75 RX_EDPKT_CFG_ENABLE);
76 else
77 rx_edpkt_cfg_wl(intf, val: 0x0, RX_EDPKT_CFG_ENABLE);
78}
79
80static void bcmasp_set_rx_mode(struct net_device *dev)
81{
82 unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
83 struct bcmasp_intf *intf = netdev_priv(dev);
84 struct netdev_hw_addr *ha;
85 int ret;
86
87 spin_lock_bh(lock: &intf->parent->mda_lock);
88
89 bcmasp_disable_all_filters(intf);
90
91 if (dev->flags & IFF_PROMISC)
92 goto set_promisc;
93
94 bcmasp_set_promisc(intf, en: 0);
95
96 bcmasp_set_broad(intf, en: 1);
97
98 bcmasp_set_oaddr(intf, addr: dev->dev_addr, en: 1);
99
100 if (dev->flags & IFF_ALLMULTI) {
101 bcmasp_set_allmulti(intf, en: 1);
102 } else {
103 bcmasp_set_allmulti(intf, en: 0);
104
105 netdev_for_each_mc_addr(ha, dev) {
106 ret = bcmasp_set_en_mda_filter(intf, addr: ha->addr, mask);
107 if (ret) {
108 intf->mib.mc_filters_full_cnt++;
109 goto set_promisc;
110 }
111 }
112 }
113
114 netdev_for_each_uc_addr(ha, dev) {
115 ret = bcmasp_set_en_mda_filter(intf, addr: ha->addr, mask);
116 if (ret) {
117 intf->mib.uc_filters_full_cnt++;
118 goto set_promisc;
119 }
120 }
121
122 spin_unlock_bh(lock: &intf->parent->mda_lock);
123 return;
124
125set_promisc:
126 bcmasp_set_promisc(intf, en: 1);
127 intf->mib.promisc_filters_cnt++;
128
129 /* disable all filters used by this port */
130 bcmasp_disable_all_filters(intf);
131
132 spin_unlock_bh(lock: &intf->parent->mda_lock);
133}
134
135static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index)
136{
137 struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index];
138
139 txcb->skb = NULL;
140 dma_unmap_addr_set(txcb, dma_addr, 0);
141 dma_unmap_len_set(txcb, dma_len, 0);
142 txcb->last = false;
143}
144
145static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt)
146{
147 int next_index, i;
148
149 /* Check if we have enough room for cnt descriptors */
150 for (i = 0; i < cnt; i++) {
151 next_index = incr_ring(index: intf->tx_spb_index, DESC_RING_COUNT);
152 if (next_index == intf->tx_spb_clean_index)
153 return 1;
154 }
155
156 return 0;
157}
158
159static struct sk_buff *bcmasp_csum_offload(struct net_device *dev,
160 struct sk_buff *skb,
161 bool *csum_hw)
162{
163 struct bcmasp_intf *intf = netdev_priv(dev);
164 u32 header = 0, header2 = 0, epkt = 0;
165 struct bcmasp_pkt_offload *offload;
166 unsigned int header_cnt = 0;
167 u8 ip_proto;
168 int ret;
169
170 if (skb->ip_summed != CHECKSUM_PARTIAL)
171 return skb;
172
173 ret = skb_cow_head(skb, headroom: sizeof(*offload));
174 if (ret < 0) {
175 intf->mib.tx_realloc_offload_failed++;
176 goto help;
177 }
178
179 switch (skb->protocol) {
180 case htons(ETH_P_IP):
181 header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf);
182 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff);
183 epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2;
184 ip_proto = ip_hdr(skb)->protocol;
185 header_cnt += 2;
186 break;
187 case htons(ETH_P_IPV6):
188 header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf);
189 header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff);
190 epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2;
191 ip_proto = ipv6_hdr(skb)->nexthdr;
192 header_cnt += 2;
193 break;
194 default:
195 goto help;
196 }
197
198 switch (ip_proto) {
199 case IPPROTO_TCP:
200 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb));
201 epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3;
202 header_cnt++;
203 break;
204 case IPPROTO_UDP:
205 header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN);
206 epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3;
207 header_cnt++;
208 break;
209 default:
210 goto help;
211 }
212
213 offload = (struct bcmasp_pkt_offload *)skb_push(skb, len: sizeof(*offload));
214
215 header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) |
216 PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN);
217 epkt |= PKT_OFFLOAD_EPKT_OP;
218
219 offload->nop = htonl(PKT_OFFLOAD_NOP);
220 offload->header = htonl(header);
221 offload->header2 = htonl(header2);
222 offload->epkt = htonl(epkt);
223 offload->end = htonl(PKT_OFFLOAD_END_OP);
224 *csum_hw = true;
225
226 return skb;
227
228help:
229 skb_checksum_help(skb);
230
231 return skb;
232}
233
234static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf)
235{
236 return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID);
237}
238
239static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr)
240{
241 rx_edpkt_cfg_wq(intf, val: addr, RX_EDPKT_RING_BUFFER_READ);
242}
243
244static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
245{
246 rx_edpkt_dma_wq(intf, val: addr, RX_EDPKT_DMA_READ);
247}
248
249static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf)
250{
251 return tx_spb_dma_rq(intf, TX_SPB_DMA_READ);
252}
253
254static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr)
255{
256 tx_spb_dma_wq(intf, val: addr, TX_SPB_DMA_VALID);
257}
258
259static const struct bcmasp_intf_ops bcmasp_intf_ops = {
260 .rx_desc_read = bcmasp_rx_edpkt_dma_rq,
261 .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq,
262 .rx_desc_write = bcmasp_rx_edpkt_dma_wq,
263 .tx_read = bcmasp_tx_spb_dma_rq,
264 .tx_write = bcmasp_tx_spb_dma_wq,
265};
266
267static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
268{
269 struct bcmasp_intf *intf = netdev_priv(dev);
270 unsigned int total_bytes, size;
271 int spb_index, nr_frags, i, j;
272 struct bcmasp_tx_cb *txcb;
273 dma_addr_t mapping, valid;
274 struct bcmasp_desc *desc;
275 bool csum_hw = false;
276 struct device *kdev;
277 skb_frag_t *frag;
278
279 kdev = &intf->parent->pdev->dev;
280
281 nr_frags = skb_shinfo(skb)->nr_frags;
282
283 if (tx_spb_ring_full(intf, cnt: nr_frags + 1)) {
284 netif_stop_queue(dev);
285 if (net_ratelimit())
286 netdev_err(dev, format: "Tx Ring Full!\n");
287 return NETDEV_TX_BUSY;
288 }
289
290 /* Save skb len before adding csum offload header */
291 total_bytes = skb->len;
292 skb = bcmasp_csum_offload(dev, skb, csum_hw: &csum_hw);
293 if (!skb)
294 return NETDEV_TX_OK;
295
296 spb_index = intf->tx_spb_index;
297 valid = intf->tx_spb_dma_valid;
298 for (i = 0; i <= nr_frags; i++) {
299 if (!i) {
300 size = skb_headlen(skb);
301 if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) {
302 if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN))
303 return NETDEV_TX_OK;
304 size = skb->len;
305 }
306 mapping = dma_map_single(kdev, skb->data, size,
307 DMA_TO_DEVICE);
308 } else {
309 frag = &skb_shinfo(skb)->frags[i - 1];
310 size = skb_frag_size(frag);
311 mapping = skb_frag_dma_map(dev: kdev, frag, offset: 0, size,
312 dir: DMA_TO_DEVICE);
313 }
314
315 if (dma_mapping_error(dev: kdev, dma_addr: mapping)) {
316 intf->mib.tx_dma_failed++;
317 spb_index = intf->tx_spb_index;
318 for (j = 0; j < i; j++) {
319 bcmasp_clean_txcb(intf, index: spb_index);
320 spb_index = incr_ring(index: spb_index,
321 DESC_RING_COUNT);
322 }
323 /* Rewind so we do not have a hole */
324 spb_index = intf->tx_spb_index;
325 return NETDEV_TX_OK;
326 }
327
328 txcb = &intf->tx_cbs[spb_index];
329 desc = &intf->tx_spb_cpu[spb_index];
330 memset(desc, 0, sizeof(*desc));
331 txcb->skb = skb;
332 txcb->bytes_sent = total_bytes;
333 dma_unmap_addr_set(txcb, dma_addr, mapping);
334 dma_unmap_len_set(txcb, dma_len, size);
335 if (!i) {
336 desc->flags |= DESC_SOF;
337 if (csum_hw)
338 desc->flags |= DESC_EPKT_CMD;
339 }
340
341 if (i == nr_frags) {
342 desc->flags |= DESC_EOF;
343 txcb->last = true;
344 }
345
346 desc->buf = mapping;
347 desc->size = size;
348 desc->flags |= DESC_INT_EN;
349
350 netif_dbg(intf, tx_queued, dev,
351 "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n",
352 __func__, &mapping, desc->size, desc->flags,
353 spb_index);
354
355 spb_index = incr_ring(index: spb_index, DESC_RING_COUNT);
356 valid = incr_last_byte(addr: valid, beg: intf->tx_spb_dma_addr,
357 DESC_RING_COUNT);
358 }
359
360 /* Ensure all descriptors have been written to DRAM for the
361 * hardware to see up-to-date contents.
362 */
363 wmb();
364
365 intf->tx_spb_index = spb_index;
366 intf->tx_spb_dma_valid = valid;
367 bcmasp_intf_tx_write(intf, addr: intf->tx_spb_dma_valid);
368
369 if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1))
370 netif_stop_queue(dev);
371
372 return NETDEV_TX_OK;
373}
374
375static void bcmasp_netif_start(struct net_device *dev)
376{
377 struct bcmasp_intf *intf = netdev_priv(dev);
378
379 bcmasp_set_rx_mode(dev);
380 napi_enable(n: &intf->tx_napi);
381 napi_enable(n: &intf->rx_napi);
382
383 bcmasp_enable_rx_irq(intf, en: 1);
384 bcmasp_enable_tx_irq(intf, en: 1);
385 bcmasp_enable_phy_irq(intf, en: 1);
386
387 phy_start(phydev: dev->phydev);
388}
389
390static void umac_reset(struct bcmasp_intf *intf)
391{
392 umac_wl(intf, val: 0x0, UMC_CMD);
393 umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD);
394 usleep_range(min: 10, max: 100);
395 /* We hold the umac in reset and bring it out of
396 * reset when phy link is up.
397 */
398}
399
400static void umac_set_hw_addr(struct bcmasp_intf *intf,
401 const unsigned char *addr)
402{
403 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
404 addr[3];
405 u32 mac1 = (addr[4] << 8) | addr[5];
406
407 umac_wl(intf, val: mac0, UMC_MAC0);
408 umac_wl(intf, val: mac1, UMC_MAC1);
409}
410
411static void umac_enable_set(struct bcmasp_intf *intf, u32 mask,
412 unsigned int enable)
413{
414 u32 reg;
415
416 reg = umac_rl(intf, UMC_CMD);
417 if (reg & UMC_CMD_SW_RESET)
418 return;
419 if (enable)
420 reg |= mask;
421 else
422 reg &= ~mask;
423 umac_wl(intf, val: reg, UMC_CMD);
424
425 /* UniMAC stops on a packet boundary, wait for a full-sized packet
426 * to be processed (1 msec).
427 */
428 if (enable == 0)
429 usleep_range(min: 1000, max: 2000);
430}
431
432static void umac_init(struct bcmasp_intf *intf)
433{
434 umac_wl(intf, val: 0x800, UMC_FRM_LEN);
435 umac_wl(intf, val: 0xffff, UMC_PAUSE_CNTRL);
436 umac_wl(intf, val: 0x800, UMC_RX_MAX_PKT_SZ);
437}
438
439static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
440{
441 struct bcmasp_intf *intf =
442 container_of(napi, struct bcmasp_intf, tx_napi);
443 struct bcmasp_intf_stats64 *stats = &intf->stats64;
444 struct device *kdev = &intf->parent->pdev->dev;
445 unsigned long read, released = 0;
446 struct bcmasp_tx_cb *txcb;
447 struct bcmasp_desc *desc;
448 dma_addr_t mapping;
449
450 read = bcmasp_intf_tx_read(intf);
451 while (intf->tx_spb_dma_read != read) {
452 txcb = &intf->tx_cbs[intf->tx_spb_clean_index];
453 mapping = dma_unmap_addr(txcb, dma_addr);
454
455 dma_unmap_single(kdev, mapping,
456 dma_unmap_len(txcb, dma_len),
457 DMA_TO_DEVICE);
458
459 if (txcb->last) {
460 dev_consume_skb_any(skb: txcb->skb);
461
462 u64_stats_update_begin(syncp: &stats->syncp);
463 u64_stats_inc(p: &stats->tx_packets);
464 u64_stats_add(p: &stats->tx_bytes, val: txcb->bytes_sent);
465 u64_stats_update_end(syncp: &stats->syncp);
466 }
467
468 desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index];
469
470 netif_dbg(intf, tx_done, intf->ndev,
471 "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n",
472 __func__, &mapping, desc->size, desc->flags,
473 intf->tx_spb_clean_index);
474
475 bcmasp_clean_txcb(intf, index: intf->tx_spb_clean_index);
476 released++;
477
478 intf->tx_spb_clean_index = incr_ring(index: intf->tx_spb_clean_index,
479 DESC_RING_COUNT);
480 intf->tx_spb_dma_read = incr_first_byte(addr: intf->tx_spb_dma_read,
481 beg: intf->tx_spb_dma_addr,
482 DESC_RING_COUNT);
483 }
484
485 /* Ensure all descriptors have been written to DRAM for the hardware
486 * to see updated contents.
487 */
488 wmb();
489
490 napi_complete(n: &intf->tx_napi);
491
492 bcmasp_enable_tx_irq(intf, en: 1);
493
494 if (released)
495 netif_wake_queue(dev: intf->ndev);
496
497 return 0;
498}
499
500static int bcmasp_rx_poll(struct napi_struct *napi, int budget)
501{
502 struct bcmasp_intf *intf =
503 container_of(napi, struct bcmasp_intf, rx_napi);
504 struct bcmasp_intf_stats64 *stats = &intf->stats64;
505 struct device *kdev = &intf->parent->pdev->dev;
506 unsigned long processed = 0;
507 struct bcmasp_desc *desc;
508 struct sk_buff *skb;
509 dma_addr_t valid;
510 void *data;
511 u64 flags;
512 u32 len;
513
514 valid = bcmasp_intf_rx_desc_read(intf) + 1;
515 if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE)
516 valid = intf->rx_edpkt_dma_addr;
517
518 while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) {
519 desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index];
520
521 /* Ensure that descriptor has been fully written to DRAM by
522 * hardware before reading by the CPU
523 */
524 rmb();
525
526 /* Calculate virt addr by offsetting from physical addr */
527 data = intf->rx_ring_cpu +
528 (DESC_ADDR(desc->buf) - intf->rx_ring_dma);
529
530 flags = DESC_FLAGS(desc->buf);
531 if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) {
532 if (net_ratelimit()) {
533 netif_err(intf, rx_status, intf->ndev,
534 "flags=0x%llx\n", flags);
535 }
536
537 u64_stats_update_begin(syncp: &stats->syncp);
538 if (flags & DESC_CRC_ERR)
539 u64_stats_inc(p: &stats->rx_crc_errs);
540 if (flags & DESC_RX_SYM_ERR)
541 u64_stats_inc(p: &stats->rx_sym_errs);
542 u64_stats_update_end(syncp: &stats->syncp);
543
544 goto next;
545 }
546
547 dma_sync_single_for_cpu(dev: kdev, DESC_ADDR(desc->buf), size: desc->size,
548 dir: DMA_FROM_DEVICE);
549
550 len = desc->size;
551
552 skb = napi_alloc_skb(napi, length: len);
553 if (!skb) {
554 u64_stats_update_begin(syncp: &stats->syncp);
555 u64_stats_inc(p: &stats->rx_dropped);
556 u64_stats_update_end(syncp: &stats->syncp);
557 intf->mib.alloc_rx_skb_failed++;
558
559 goto next;
560 }
561
562 skb_put(skb, len);
563 memcpy(skb->data, data, len);
564
565 skb_pull(skb, len: 2);
566 len -= 2;
567 if (likely(intf->crc_fwd)) {
568 skb_trim(skb, len: len - ETH_FCS_LEN);
569 len -= ETH_FCS_LEN;
570 }
571
572 if ((intf->ndev->features & NETIF_F_RXCSUM) &&
573 (desc->buf & DESC_CHKSUM))
574 skb->ip_summed = CHECKSUM_UNNECESSARY;
575
576 skb->protocol = eth_type_trans(skb, dev: intf->ndev);
577
578 napi_gro_receive(napi, skb);
579
580 u64_stats_update_begin(syncp: &stats->syncp);
581 u64_stats_inc(p: &stats->rx_packets);
582 u64_stats_add(p: &stats->rx_bytes, val: len);
583 u64_stats_update_end(syncp: &stats->syncp);
584
585next:
586 bcmasp_intf_rx_buffer_write(intf, addr: (DESC_ADDR(desc->buf) +
587 desc->size));
588
589 processed++;
590 intf->rx_edpkt_dma_read =
591 incr_first_byte(addr: intf->rx_edpkt_dma_read,
592 beg: intf->rx_edpkt_dma_addr,
593 DESC_RING_COUNT);
594 intf->rx_edpkt_index = incr_ring(index: intf->rx_edpkt_index,
595 DESC_RING_COUNT);
596 }
597
598 bcmasp_intf_rx_desc_write(intf, addr: intf->rx_edpkt_dma_read);
599
600 if (processed < budget) {
601 napi_complete_done(n: &intf->rx_napi, work_done: processed);
602 bcmasp_enable_rx_irq(intf, en: 1);
603 }
604
605 return processed;
606}
607
608static void bcmasp_adj_link(struct net_device *dev)
609{
610 struct bcmasp_intf *intf = netdev_priv(dev);
611 struct phy_device *phydev = dev->phydev;
612 u32 cmd_bits = 0, reg;
613 int changed = 0;
614 bool active;
615
616 if (intf->old_link != phydev->link) {
617 changed = 1;
618 intf->old_link = phydev->link;
619 }
620
621 if (intf->old_duplex != phydev->duplex) {
622 changed = 1;
623 intf->old_duplex = phydev->duplex;
624 }
625
626 switch (phydev->speed) {
627 case SPEED_2500:
628 cmd_bits = UMC_CMD_SPEED_2500;
629 break;
630 case SPEED_1000:
631 cmd_bits = UMC_CMD_SPEED_1000;
632 break;
633 case SPEED_100:
634 cmd_bits = UMC_CMD_SPEED_100;
635 break;
636 case SPEED_10:
637 cmd_bits = UMC_CMD_SPEED_10;
638 break;
639 default:
640 break;
641 }
642 cmd_bits <<= UMC_CMD_SPEED_SHIFT;
643
644 if (phydev->duplex == DUPLEX_HALF)
645 cmd_bits |= UMC_CMD_HD_EN;
646
647 if (intf->old_pause != phydev->pause) {
648 changed = 1;
649 intf->old_pause = phydev->pause;
650 }
651
652 if (!phydev->pause)
653 cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE;
654
655 if (!changed)
656 return;
657
658 if (phydev->link) {
659 reg = umac_rl(intf, UMC_CMD);
660 reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) |
661 UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE |
662 UMC_CMD_TX_PAUSE_IGNORE);
663 reg |= cmd_bits;
664 if (reg & UMC_CMD_SW_RESET) {
665 reg &= ~UMC_CMD_SW_RESET;
666 umac_wl(intf, val: reg, UMC_CMD);
667 udelay(2);
668 reg |= UMC_CMD_TX_EN | UMC_CMD_RX_EN | UMC_CMD_PROMISC;
669 }
670 umac_wl(intf, val: reg, UMC_CMD);
671
672 active = phy_init_eee(phydev, clk_stop_enable: 0) >= 0;
673 bcmasp_eee_enable_set(intf, enable: active);
674 }
675
676 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
677 if (phydev->link)
678 reg |= RGMII_LINK;
679 else
680 reg &= ~RGMII_LINK;
681 rgmii_wl(intf, val: reg, RGMII_OOB_CNTRL);
682
683 if (changed)
684 phy_print_status(phydev);
685}
686
687static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
688{
689 struct device *kdev = &intf->parent->pdev->dev;
690 struct page *buffer_pg;
691
692 /* Alloc RX */
693 intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
694 buffer_pg = alloc_pages(GFP_KERNEL, order: intf->rx_buf_order);
695 if (!buffer_pg)
696 return -ENOMEM;
697
698 intf->rx_ring_cpu = page_to_virt(buffer_pg);
699 intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
700 DMA_FROM_DEVICE);
701 if (dma_mapping_error(dev: kdev, dma_addr: intf->rx_ring_dma))
702 goto free_rx_buffer;
703
704 intf->rx_edpkt_cpu = dma_alloc_coherent(dev: kdev, DESC_RING_SIZE,
705 dma_handle: &intf->rx_edpkt_dma_addr, GFP_KERNEL);
706 if (!intf->rx_edpkt_cpu)
707 goto free_rx_buffer_dma;
708
709 /* Alloc TX */
710 intf->tx_spb_cpu = dma_alloc_coherent(dev: kdev, DESC_RING_SIZE,
711 dma_handle: &intf->tx_spb_dma_addr, GFP_KERNEL);
712 if (!intf->tx_spb_cpu)
713 goto free_rx_edpkt_dma;
714
715 intf->tx_cbs = kcalloc(DESC_RING_COUNT, size: sizeof(struct bcmasp_tx_cb),
716 GFP_KERNEL);
717 if (!intf->tx_cbs)
718 goto free_tx_spb_dma;
719
720 return 0;
721
722free_tx_spb_dma:
723 dma_free_coherent(dev: kdev, DESC_RING_SIZE, cpu_addr: intf->tx_spb_cpu,
724 dma_handle: intf->tx_spb_dma_addr);
725free_rx_edpkt_dma:
726 dma_free_coherent(dev: kdev, DESC_RING_SIZE, cpu_addr: intf->rx_edpkt_cpu,
727 dma_handle: intf->rx_edpkt_dma_addr);
728free_rx_buffer_dma:
729 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
730 DMA_FROM_DEVICE);
731free_rx_buffer:
732 __free_pages(page: buffer_pg, order: intf->rx_buf_order);
733
734 return -ENOMEM;
735}
736
737static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
738{
739 struct device *kdev = &intf->parent->pdev->dev;
740
741 /* RX buffers */
742 dma_free_coherent(dev: kdev, DESC_RING_SIZE, cpu_addr: intf->rx_edpkt_cpu,
743 dma_handle: intf->rx_edpkt_dma_addr);
744 dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
745 DMA_FROM_DEVICE);
746 __free_pages(virt_to_page(intf->rx_ring_cpu), order: intf->rx_buf_order);
747
748 /* TX buffers */
749 dma_free_coherent(dev: kdev, DESC_RING_SIZE, cpu_addr: intf->tx_spb_cpu,
750 dma_handle: intf->tx_spb_dma_addr);
751 kfree(objp: intf->tx_cbs);
752}
753
754static void bcmasp_init_rx(struct bcmasp_intf *intf)
755{
756 /* Restart from index 0 */
757 intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
758 intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
759 intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
760 intf->rx_edpkt_index = 0;
761
762 /* Make sure channels are disabled */
763 rx_edpkt_cfg_wl(intf, val: 0x0, RX_EDPKT_CFG_ENABLE);
764
765 /* Rx SPB */
766 rx_edpkt_cfg_wq(intf, val: intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ);
767 rx_edpkt_cfg_wq(intf, val: intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE);
768 rx_edpkt_cfg_wq(intf, val: intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE);
769 rx_edpkt_cfg_wq(intf, val: intf->rx_ring_dma_valid,
770 RX_EDPKT_RING_BUFFER_END);
771 rx_edpkt_cfg_wq(intf, val: intf->rx_ring_dma_valid,
772 RX_EDPKT_RING_BUFFER_VALID);
773
774 /* EDPKT */
775 rx_edpkt_cfg_wl(intf, val: (RX_EDPKT_CFG_CFG0_RBUF_4K <<
776 RX_EDPKT_CFG_CFG0_DBUF_SHIFT) |
777 (RX_EDPKT_CFG_CFG0_64_ALN <<
778 RX_EDPKT_CFG_CFG0_BALN_SHIFT) |
779 (RX_EDPKT_CFG_CFG0_EFRM_STUF),
780 RX_EDPKT_CFG_CFG0);
781 rx_edpkt_dma_wq(intf, val: intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
782 rx_edpkt_dma_wq(intf, val: intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
783 rx_edpkt_dma_wq(intf, val: intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
784 rx_edpkt_dma_wq(intf, val: intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
785 rx_edpkt_dma_wq(intf, val: intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
786
787 umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
788 UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
789 UMAC2FB_CFG);
790}
791
792
793static void bcmasp_init_tx(struct bcmasp_intf *intf)
794{
795 /* Restart from index 0 */
796 intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
797 intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
798 intf->tx_spb_index = 0;
799 intf->tx_spb_clean_index = 0;
800
801 /* Make sure channels are disabled */
802 tx_spb_ctrl_wl(intf, val: 0x0, TX_SPB_CTRL_ENABLE);
803 tx_epkt_core_wl(intf, val: 0x0, TX_EPKT_C_CFG_MISC);
804
805 /* Tx SPB */
806 tx_spb_ctrl_wl(intf, val: ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
807 TX_SPB_CTRL_XF_CTRL2);
808 tx_pause_ctrl_wl(intf, val: (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
809 tx_spb_top_wl(intf, val: 0x1e, TX_SPB_TOP_BLKOUT);
810 tx_spb_top_wl(intf, val: 0x0, TX_SPB_TOP_SPRE_BW_CTRL);
811
812 tx_spb_dma_wq(intf, val: intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
813 tx_spb_dma_wq(intf, val: intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
814 tx_spb_dma_wq(intf, val: intf->tx_spb_dma_valid, TX_SPB_DMA_END);
815 tx_spb_dma_wq(intf, val: intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
816}
817
818static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
819{
820 u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN |
821 RGMII_EPHY_CFG_IDDQ_GLOBAL;
822 u32 reg;
823
824 reg = rgmii_rl(intf, RGMII_EPHY_CNTRL);
825 if (enable) {
826 reg &= ~RGMII_EPHY_CK25_DIS;
827 rgmii_wl(intf, val: reg, RGMII_EPHY_CNTRL);
828 mdelay(1);
829
830 reg &= ~mask;
831 reg |= RGMII_EPHY_RESET;
832 rgmii_wl(intf, val: reg, RGMII_EPHY_CNTRL);
833 mdelay(1);
834
835 reg &= ~RGMII_EPHY_RESET;
836 } else {
837 reg |= mask | RGMII_EPHY_RESET;
838 rgmii_wl(intf, val: reg, RGMII_EPHY_CNTRL);
839 mdelay(1);
840 reg |= RGMII_EPHY_CK25_DIS;
841 }
842 rgmii_wl(intf, val: reg, RGMII_EPHY_CNTRL);
843 mdelay(1);
844
845 /* Set or clear the LED control override to avoid lighting up LEDs
846 * while the EPHY is powered off and drawing unnecessary current.
847 */
848 reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL);
849 if (enable)
850 reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD;
851 else
852 reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD;
853 rgmii_wl(intf, val: reg, RGMII_SYS_LED_CNTRL);
854}
855
856static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable)
857{
858 u32 reg;
859
860 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
861 reg &= ~RGMII_OOB_DIS;
862 if (enable)
863 reg |= RGMII_MODE_EN;
864 else
865 reg &= ~RGMII_MODE_EN;
866 rgmii_wl(intf, val: reg, RGMII_OOB_CNTRL);
867}
868
869static void bcmasp_netif_deinit(struct net_device *dev)
870{
871 struct bcmasp_intf *intf = netdev_priv(dev);
872 u32 reg, timeout = 1000;
873
874 napi_disable(n: &intf->tx_napi);
875
876 bcmasp_enable_tx(intf, en: 0);
877
878 /* Flush any TX packets in the pipe */
879 tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL);
880 do {
881 reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS);
882 if (!(reg & TX_SPB_DMA_FIFO_FLUSH))
883 break;
884 usleep_range(min: 1000, max: 2000);
885 } while (timeout-- > 0);
886 tx_spb_dma_wl(intf, val: 0x0, TX_SPB_DMA_FIFO_CTRL);
887
888 umac_enable_set(intf, UMC_CMD_TX_EN, enable: 0);
889
890 phy_stop(phydev: dev->phydev);
891
892 umac_enable_set(intf, UMC_CMD_RX_EN, enable: 0);
893
894 bcmasp_flush_rx_port(intf);
895 usleep_range(min: 1000, max: 2000);
896 bcmasp_enable_rx(intf, en: 0);
897
898 napi_disable(n: &intf->rx_napi);
899
900 /* Disable interrupts */
901 bcmasp_enable_tx_irq(intf, en: 0);
902 bcmasp_enable_rx_irq(intf, en: 0);
903 bcmasp_enable_phy_irq(intf, en: 0);
904
905 netif_napi_del(napi: &intf->tx_napi);
906 netif_napi_del(napi: &intf->rx_napi);
907}
908
909static int bcmasp_stop(struct net_device *dev)
910{
911 struct bcmasp_intf *intf = netdev_priv(dev);
912
913 netif_dbg(intf, ifdown, dev, "bcmasp stop\n");
914
915 /* Stop tx from updating HW */
916 netif_tx_disable(dev);
917
918 bcmasp_netif_deinit(dev);
919
920 bcmasp_reclaim_free_buffers(intf);
921
922 phy_disconnect(phydev: dev->phydev);
923
924 /* Disable internal EPHY or external PHY */
925 if (intf->internal_phy)
926 bcmasp_ephy_enable_set(intf, enable: false);
927 else
928 bcmasp_rgmii_mode_en_set(intf, enable: false);
929
930 /* Disable the interface clocks */
931 bcmasp_core_clock_set_intf(intf, en: false);
932
933 clk_disable_unprepare(clk: intf->parent->clk);
934
935 return 0;
936}
937
938static void bcmasp_configure_port(struct bcmasp_intf *intf)
939{
940 u32 reg, id_mode_dis = 0;
941
942 reg = rgmii_rl(intf, RGMII_PORT_CNTRL);
943 reg &= ~RGMII_PORT_MODE_MASK;
944
945 switch (intf->phy_interface) {
946 case PHY_INTERFACE_MODE_RGMII:
947 /* RGMII_NO_ID: TXC transitions at the same time as TXD
948 * (requires PCB or receiver-side delay)
949 * RGMII: Add 2ns delay on TXC (90 degree shift)
950 *
951 * ID is implicitly disabled for 100Mbps (RG)MII operation.
952 */
953 id_mode_dis = RGMII_ID_MODE_DIS;
954 fallthrough;
955 case PHY_INTERFACE_MODE_RGMII_TXID:
956 reg |= RGMII_PORT_MODE_EXT_GPHY;
957 break;
958 case PHY_INTERFACE_MODE_MII:
959 reg |= RGMII_PORT_MODE_EXT_EPHY;
960 break;
961 default:
962 break;
963 }
964
965 if (intf->internal_phy)
966 reg |= RGMII_PORT_MODE_EPHY;
967
968 rgmii_wl(intf, val: reg, RGMII_PORT_CNTRL);
969
970 reg = rgmii_rl(intf, RGMII_OOB_CNTRL);
971 reg &= ~RGMII_ID_MODE_DIS;
972 reg |= id_mode_dis;
973 rgmii_wl(intf, val: reg, RGMII_OOB_CNTRL);
974}
975
976static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
977{
978 struct bcmasp_intf *intf = netdev_priv(dev);
979 phy_interface_t phy_iface = intf->phy_interface;
980 u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
981 PHY_BRCM_DIS_TXCRXC_NOENRGY |
982 PHY_BRCM_IDDQ_SUSPEND;
983 struct phy_device *phydev = NULL;
984 int ret;
985
986 /* Always enable interface clocks */
987 bcmasp_core_clock_set_intf(intf, en: true);
988
989 /* Enable internal PHY or external PHY before any MAC activity */
990 if (intf->internal_phy)
991 bcmasp_ephy_enable_set(intf, enable: true);
992 else
993 bcmasp_rgmii_mode_en_set(intf, enable: true);
994 bcmasp_configure_port(intf);
995
996 /* This is an ugly quirk but we have not been correctly
997 * interpreting the phy_interface values and we have done that
998 * across different drivers, so at least we are consistent in
999 * our mistakes.
1000 *
1001 * When the Generic PHY driver is in use either the PHY has
1002 * been strapped or programmed correctly by the boot loader so
1003 * we should stick to our incorrect interpretation since we
1004 * have validated it.
1005 *
1006 * Now when a dedicated PHY driver is in use, we need to
1007 * reverse the meaning of the phy_interface_mode values to
1008 * something that the PHY driver will interpret and act on such
1009 * that we have two mistakes canceling themselves so to speak.
1010 * We only do this for the two modes that GENET driver
1011 * officially supports on Broadcom STB chips:
1012 * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID.
1013 * Other modes are not *officially* supported with the boot
1014 * loader and the scripted environment generating Device Tree
1015 * blobs for those platforms.
1016 *
1017 * Note that internal PHY and fixed-link configurations are not
1018 * affected because they use different phy_interface_t values
1019 * or the Generic PHY driver.
1020 */
1021 switch (phy_iface) {
1022 case PHY_INTERFACE_MODE_RGMII:
1023 phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
1024 break;
1025 case PHY_INTERFACE_MODE_RGMII_TXID:
1026 phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
1027 break;
1028 default:
1029 break;
1030 }
1031
1032 if (phy_connect) {
1033 phydev = of_phy_connect(dev, phy_np: intf->phy_dn,
1034 hndlr: bcmasp_adj_link, flags: phy_flags,
1035 iface: phy_iface);
1036 if (!phydev) {
1037 ret = -ENODEV;
1038 netdev_err(dev, format: "could not attach to PHY\n");
1039 goto err_phy_disable;
1040 }
1041
1042 if (intf->internal_phy)
1043 dev->phydev->irq = PHY_MAC_INTERRUPT;
1044
1045 /* Indicate that the MAC is responsible for PHY PM */
1046 phydev->mac_managed_pm = true;
1047 }
1048
1049 umac_reset(intf);
1050
1051 umac_init(intf);
1052
1053 umac_set_hw_addr(intf, addr: dev->dev_addr);
1054
1055 intf->old_duplex = -1;
1056 intf->old_link = -1;
1057 intf->old_pause = -1;
1058
1059 bcmasp_init_tx(intf);
1060 netif_napi_add_tx(dev: intf->ndev, napi: &intf->tx_napi, poll: bcmasp_tx_poll);
1061 bcmasp_enable_tx(intf, en: 1);
1062
1063 bcmasp_init_rx(intf);
1064 netif_napi_add(dev: intf->ndev, napi: &intf->rx_napi, poll: bcmasp_rx_poll);
1065 bcmasp_enable_rx(intf, en: 1);
1066
1067 intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD);
1068
1069 bcmasp_netif_start(dev);
1070
1071 netif_start_queue(dev);
1072
1073 return 0;
1074
1075err_phy_disable:
1076 if (intf->internal_phy)
1077 bcmasp_ephy_enable_set(intf, enable: false);
1078 else
1079 bcmasp_rgmii_mode_en_set(intf, enable: false);
1080 return ret;
1081}
1082
1083static int bcmasp_open(struct net_device *dev)
1084{
1085 struct bcmasp_intf *intf = netdev_priv(dev);
1086 int ret;
1087
1088 netif_dbg(intf, ifup, dev, "bcmasp open\n");
1089
1090 ret = bcmasp_alloc_buffers(intf);
1091 if (ret)
1092 return ret;
1093
1094 ret = clk_prepare_enable(clk: intf->parent->clk);
1095 if (ret)
1096 goto err_free_mem;
1097
1098 ret = bcmasp_netif_init(dev, phy_connect: true);
1099 if (ret) {
1100 clk_disable_unprepare(clk: intf->parent->clk);
1101 goto err_free_mem;
1102 }
1103
1104 return ret;
1105
1106err_free_mem:
1107 bcmasp_reclaim_free_buffers(intf);
1108
1109 return ret;
1110}
1111
1112static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue)
1113{
1114 struct bcmasp_intf *intf = netdev_priv(dev);
1115
1116 netif_dbg(intf, tx_err, dev, "transmit timeout!\n");
1117 intf->mib.tx_timeout_cnt++;
1118}
1119
1120static int bcmasp_get_phys_port_name(struct net_device *dev,
1121 char *name, size_t len)
1122{
1123 struct bcmasp_intf *intf = netdev_priv(dev);
1124
1125 if (snprintf(buf: name, size: len, fmt: "p%d", intf->port) >= len)
1126 return -EINVAL;
1127
1128 return 0;
1129}
1130
1131static void bcmasp_get_stats64(struct net_device *dev,
1132 struct rtnl_link_stats64 *stats)
1133{
1134 struct bcmasp_intf *intf = netdev_priv(dev);
1135 struct bcmasp_intf_stats64 *lstats;
1136 unsigned int start;
1137
1138 lstats = &intf->stats64;
1139
1140 do {
1141 start = u64_stats_fetch_begin(syncp: &lstats->syncp);
1142 stats->rx_packets = u64_stats_read(p: &lstats->rx_packets);
1143 stats->rx_bytes = u64_stats_read(p: &lstats->rx_bytes);
1144 stats->rx_dropped = u64_stats_read(p: &lstats->rx_dropped);
1145 stats->rx_crc_errors = u64_stats_read(p: &lstats->rx_crc_errs);
1146 stats->rx_frame_errors = u64_stats_read(p: &lstats->rx_sym_errs);
1147 stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1148
1149 stats->tx_packets = u64_stats_read(p: &lstats->tx_packets);
1150 stats->tx_bytes = u64_stats_read(p: &lstats->tx_bytes);
1151 } while (u64_stats_fetch_retry(syncp: &lstats->syncp, start));
1152}
1153
1154static const struct net_device_ops bcmasp_netdev_ops = {
1155 .ndo_open = bcmasp_open,
1156 .ndo_stop = bcmasp_stop,
1157 .ndo_start_xmit = bcmasp_xmit,
1158 .ndo_tx_timeout = bcmasp_tx_timeout,
1159 .ndo_set_rx_mode = bcmasp_set_rx_mode,
1160 .ndo_get_phys_port_name = bcmasp_get_phys_port_name,
1161 .ndo_eth_ioctl = phy_do_ioctl_running,
1162 .ndo_set_mac_address = eth_mac_addr,
1163 .ndo_get_stats64 = bcmasp_get_stats64,
1164};
1165
1166static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf)
1167{
1168 /* Per port */
1169 intf->res.umac = priv->base + UMC_OFFSET(intf);
1170 intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb +
1171 (intf->port * 0x4));
1172 intf->res.rgmii = priv->base + RGMII_OFFSET(intf);
1173
1174 /* Per ch */
1175 intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf);
1176 intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf);
1177 intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf);
1178 intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf);
1179 intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf);
1180
1181 intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf);
1182 intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf);
1183}
1184
1185#define MAX_IRQ_STR_LEN 64
1186struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv,
1187 struct device_node *ndev_dn, int i)
1188{
1189 struct device *dev = &priv->pdev->dev;
1190 struct bcmasp_intf *intf;
1191 struct net_device *ndev;
1192 int ch, port, ret;
1193
1194 if (of_property_read_u32(np: ndev_dn, propname: "reg", out_value: &port)) {
1195 dev_warn(dev, "%s: invalid port number\n", ndev_dn->name);
1196 goto err;
1197 }
1198
1199 if (of_property_read_u32(np: ndev_dn, propname: "brcm,channel", out_value: &ch)) {
1200 dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name);
1201 goto err;
1202 }
1203
1204 ndev = alloc_etherdev(sizeof(struct bcmasp_intf));
1205 if (!ndev) {
1206 dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name);
1207 goto err;
1208 }
1209 intf = netdev_priv(dev: ndev);
1210
1211 intf->parent = priv;
1212 intf->ndev = ndev;
1213 intf->channel = ch;
1214 intf->port = port;
1215 intf->ndev_dn = ndev_dn;
1216 intf->index = i;
1217
1218 ret = of_get_phy_mode(np: ndev_dn, interface: &intf->phy_interface);
1219 if (ret < 0) {
1220 dev_err(dev, "invalid PHY mode property\n");
1221 goto err_free_netdev;
1222 }
1223
1224 if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL)
1225 intf->internal_phy = true;
1226
1227 intf->phy_dn = of_parse_phandle(np: ndev_dn, phandle_name: "phy-handle", index: 0);
1228 if (!intf->phy_dn && of_phy_is_fixed_link(np: ndev_dn)) {
1229 ret = of_phy_register_fixed_link(np: ndev_dn);
1230 if (ret) {
1231 dev_warn(dev, "%s: failed to register fixed PHY\n",
1232 ndev_dn->name);
1233 goto err_free_netdev;
1234 }
1235 intf->phy_dn = ndev_dn;
1236 }
1237
1238 /* Map resource */
1239 bcmasp_map_res(priv, intf);
1240
1241 if ((!phy_interface_mode_is_rgmii(mode: intf->phy_interface) &&
1242 intf->phy_interface != PHY_INTERFACE_MODE_MII &&
1243 intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) ||
1244 (intf->port != 1 && intf->internal_phy)) {
1245 netdev_err(dev: intf->ndev, format: "invalid PHY mode: %s for port %d\n",
1246 phy_modes(interface: intf->phy_interface), intf->port);
1247 ret = -EINVAL;
1248 goto err_free_netdev;
1249 }
1250
1251 ret = of_get_ethdev_address(np: ndev_dn, dev: ndev);
1252 if (ret) {
1253 netdev_warn(dev: ndev, format: "using random Ethernet MAC\n");
1254 eth_hw_addr_random(dev: ndev);
1255 }
1256
1257 SET_NETDEV_DEV(ndev, dev);
1258 intf->ops = &bcmasp_intf_ops;
1259 ndev->netdev_ops = &bcmasp_netdev_ops;
1260 ndev->ethtool_ops = &bcmasp_ethtool_ops;
1261 intf->msg_enable = netif_msg_init(debug_value: -1, NETIF_MSG_DRV |
1262 NETIF_MSG_PROBE |
1263 NETIF_MSG_LINK);
1264 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
1265 NETIF_F_RXCSUM;
1266 ndev->hw_features |= ndev->features;
1267 ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload);
1268
1269 return intf;
1270
1271err_free_netdev:
1272 free_netdev(dev: ndev);
1273err:
1274 return NULL;
1275}
1276
1277void bcmasp_interface_destroy(struct bcmasp_intf *intf)
1278{
1279 if (intf->ndev->reg_state == NETREG_REGISTERED)
1280 unregister_netdev(dev: intf->ndev);
1281 if (of_phy_is_fixed_link(np: intf->ndev_dn))
1282 of_phy_deregister_fixed_link(np: intf->ndev_dn);
1283 free_netdev(dev: intf->ndev);
1284}
1285
1286static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf)
1287{
1288 struct net_device *ndev = intf->ndev;
1289 u32 reg;
1290
1291 reg = umac_rl(intf, UMC_MPD_CTRL);
1292 if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
1293 reg |= UMC_MPD_CTRL_MPD_EN;
1294 reg &= ~UMC_MPD_CTRL_PSW_EN;
1295 if (intf->wolopts & WAKE_MAGICSECURE) {
1296 /* Program the SecureOn password */
1297 umac_wl(intf, val: get_unaligned_be16(p: &intf->sopass[0]),
1298 UMC_PSW_MS);
1299 umac_wl(intf, val: get_unaligned_be32(p: &intf->sopass[2]),
1300 UMC_PSW_LS);
1301 reg |= UMC_MPD_CTRL_PSW_EN;
1302 }
1303 umac_wl(intf, val: reg, UMC_MPD_CTRL);
1304
1305 if (intf->wolopts & WAKE_FILTER)
1306 bcmasp_netfilt_suspend(intf);
1307
1308 /* Bring UniMAC out of reset if needed and enable RX */
1309 reg = umac_rl(intf, UMC_CMD);
1310 if (reg & UMC_CMD_SW_RESET)
1311 reg &= ~UMC_CMD_SW_RESET;
1312
1313 reg |= UMC_CMD_RX_EN | UMC_CMD_PROMISC;
1314 umac_wl(intf, val: reg, UMC_CMD);
1315
1316 umac_enable_set(intf, UMC_CMD_RX_EN, enable: 1);
1317
1318 if (intf->parent->wol_irq > 0) {
1319 wakeup_intr2_core_wl(priv: intf->parent, val: 0xffffffff,
1320 ASP_WAKEUP_INTR2_MASK_CLEAR);
1321 }
1322
1323 if (intf->eee.eee_enabled && intf->parent->eee_fixup)
1324 intf->parent->eee_fixup(intf, true);
1325
1326 netif_dbg(intf, wol, ndev, "entered WOL mode\n");
1327}
1328
1329int bcmasp_interface_suspend(struct bcmasp_intf *intf)
1330{
1331 struct device *kdev = &intf->parent->pdev->dev;
1332 struct net_device *dev = intf->ndev;
1333
1334 if (!netif_running(dev))
1335 return 0;
1336
1337 netif_device_detach(dev);
1338
1339 bcmasp_netif_deinit(dev);
1340
1341 if (!intf->wolopts) {
1342 if (intf->internal_phy)
1343 bcmasp_ephy_enable_set(intf, enable: false);
1344 else
1345 bcmasp_rgmii_mode_en_set(intf, enable: false);
1346
1347 /* If Wake-on-LAN is disabled, we can safely
1348 * disable the network interface clocks.
1349 */
1350 bcmasp_core_clock_set_intf(intf, en: false);
1351 }
1352
1353 if (device_may_wakeup(dev: kdev) && intf->wolopts)
1354 bcmasp_suspend_to_wol(intf);
1355
1356 clk_disable_unprepare(clk: intf->parent->clk);
1357
1358 return 0;
1359}
1360
1361static void bcmasp_resume_from_wol(struct bcmasp_intf *intf)
1362{
1363 u32 reg;
1364
1365 if (intf->eee.eee_enabled && intf->parent->eee_fixup)
1366 intf->parent->eee_fixup(intf, false);
1367
1368 reg = umac_rl(intf, UMC_MPD_CTRL);
1369 reg &= ~UMC_MPD_CTRL_MPD_EN;
1370 umac_wl(intf, val: reg, UMC_MPD_CTRL);
1371
1372 if (intf->parent->wol_irq > 0) {
1373 wakeup_intr2_core_wl(priv: intf->parent, val: 0xffffffff,
1374 ASP_WAKEUP_INTR2_MASK_SET);
1375 }
1376}
1377
1378int bcmasp_interface_resume(struct bcmasp_intf *intf)
1379{
1380 struct net_device *dev = intf->ndev;
1381 int ret;
1382
1383 if (!netif_running(dev))
1384 return 0;
1385
1386 ret = clk_prepare_enable(clk: intf->parent->clk);
1387 if (ret)
1388 return ret;
1389
1390 ret = bcmasp_netif_init(dev, phy_connect: false);
1391 if (ret)
1392 goto out;
1393
1394 bcmasp_resume_from_wol(intf);
1395
1396 if (intf->eee.eee_enabled)
1397 bcmasp_eee_enable_set(intf, enable: true);
1398
1399 netif_device_attach(dev);
1400
1401 return 0;
1402
1403out:
1404 clk_disable_unprepare(clk: intf->parent->clk);
1405 return ret;
1406}
1407

source code of linux/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c