1// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
2
3/* Gigabit Ethernet driver for Mellanox BlueField SoC
4 *
5 * Copyright (C) 2020-2021 NVIDIA CORPORATION & AFFILIATES
6 */
7
8#include <linux/acpi.h>
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/iopoll.h>
14#include <linux/module.h>
15#include <linux/phy.h>
16#include <linux/platform_device.h>
17#include <linux/skbuff.h>
18
19#include "mlxbf_gige.h"
20#include "mlxbf_gige_regs.h"
21
22/* Allocate SKB whose payload pointer aligns with the Bluefield
23 * hardware DMA limitation, i.e. DMA operation can't cross
24 * a 4KB boundary. A maximum packet size of 2KB is assumed in the
25 * alignment formula. The alignment logic overallocates an SKB,
26 * and then adjusts the headroom so that the SKB data pointer is
27 * naturally aligned to a 2KB boundary.
28 */
29struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
30 unsigned int map_len,
31 dma_addr_t *buf_dma,
32 enum dma_data_direction dir)
33{
34 struct sk_buff *skb;
35 u64 addr, offset;
36
37 /* Overallocate the SKB so that any headroom adjustment (to
38 * provide 2KB natural alignment) does not exceed payload area
39 */
40 skb = netdev_alloc_skb(dev: priv->netdev, MLXBF_GIGE_DEFAULT_BUF_SZ * 2);
41 if (!skb)
42 return NULL;
43
44 /* Adjust the headroom so that skb->data is naturally aligned to
45 * a 2KB boundary, which is the maximum packet size supported.
46 */
47 addr = (long)skb->data;
48 offset = (addr + MLXBF_GIGE_DEFAULT_BUF_SZ - 1) &
49 ~(MLXBF_GIGE_DEFAULT_BUF_SZ - 1);
50 offset -= addr;
51 if (offset)
52 skb_reserve(skb, len: offset);
53
54 /* Return streaming DMA mapping to caller */
55 *buf_dma = dma_map_single(priv->dev, skb->data, map_len, dir);
56 if (dma_mapping_error(dev: priv->dev, dma_addr: *buf_dma)) {
57 dev_kfree_skb(skb);
58 *buf_dma = (dma_addr_t)0;
59 return NULL;
60 }
61
62 return skb;
63}
64
65static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
66{
67 u8 mac[ETH_ALEN];
68 u64 local_mac;
69
70 eth_zero_addr(addr: mac);
71 mlxbf_gige_get_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
72 dmac: &local_mac);
73 u64_to_ether_addr(u: local_mac, addr: mac);
74
75 if (is_valid_ether_addr(addr: mac)) {
76 eth_hw_addr_set(dev: priv->netdev, addr: mac);
77 } else {
78 /* Provide a random MAC if for some reason the device has
79 * not been configured with a valid MAC address already.
80 */
81 eth_hw_addr_random(dev: priv->netdev);
82 }
83
84 local_mac = ether_addr_to_u64(addr: priv->netdev->dev_addr);
85 mlxbf_gige_set_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX,
86 dmac: local_mac);
87}
88
89static void mlxbf_gige_cache_stats(struct mlxbf_gige *priv)
90{
91 struct mlxbf_gige_stats *p;
92
93 /* Cache stats that will be cleared by clean port operation */
94 p = &priv->stats;
95 p->rx_din_dropped_pkts += readq(addr: priv->base +
96 MLXBF_GIGE_RX_DIN_DROP_COUNTER);
97 p->rx_filter_passed_pkts += readq(addr: priv->base +
98 MLXBF_GIGE_RX_PASS_COUNTER_ALL);
99 p->rx_filter_discard_pkts += readq(addr: priv->base +
100 MLXBF_GIGE_RX_DISC_COUNTER_ALL);
101}
102
103static int mlxbf_gige_clean_port(struct mlxbf_gige *priv)
104{
105 u64 control;
106 u64 temp;
107 int err;
108
109 /* Set the CLEAN_PORT_EN bit to trigger SW reset */
110 control = readq(addr: priv->base + MLXBF_GIGE_CONTROL);
111 control |= MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
112 writeq(val: control, addr: priv->base + MLXBF_GIGE_CONTROL);
113
114 /* Ensure completion of "clean port" write before polling status */
115 mb();
116
117 err = readq_poll_timeout_atomic(priv->base + MLXBF_GIGE_STATUS, temp,
118 (temp & MLXBF_GIGE_STATUS_READY),
119 100, 100000);
120
121 /* Clear the CLEAN_PORT_EN bit at end of this loop */
122 control = readq(addr: priv->base + MLXBF_GIGE_CONTROL);
123 control &= ~MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
124 writeq(val: control, addr: priv->base + MLXBF_GIGE_CONTROL);
125
126 return err;
127}
128
129static int mlxbf_gige_open(struct net_device *netdev)
130{
131 struct mlxbf_gige *priv = netdev_priv(dev: netdev);
132 struct phy_device *phydev = netdev->phydev;
133 u64 int_en;
134 int err;
135
136 err = mlxbf_gige_request_irqs(priv);
137 if (err)
138 return err;
139 mlxbf_gige_cache_stats(priv);
140 err = mlxbf_gige_clean_port(priv);
141 if (err)
142 goto free_irqs;
143
144 /* Clear driver's valid_polarity to match hardware,
145 * since the above call to clean_port() resets the
146 * receive polarity used by hardware.
147 */
148 priv->valid_polarity = 0;
149
150 err = mlxbf_gige_rx_init(priv);
151 if (err)
152 goto free_irqs;
153 err = mlxbf_gige_tx_init(priv);
154 if (err)
155 goto rx_deinit;
156
157 phy_start(phydev);
158
159 netif_napi_add(dev: netdev, napi: &priv->napi, poll: mlxbf_gige_poll);
160 napi_enable(n: &priv->napi);
161 netif_start_queue(dev: netdev);
162
163 /* Set bits in INT_EN that we care about */
164 int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
165 MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
166 MLXBF_GIGE_INT_EN_TX_SMALL_FRAME_SIZE |
167 MLXBF_GIGE_INT_EN_TX_PI_CI_EXCEED_WQ_SIZE |
168 MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR |
169 MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR |
170 MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET;
171
172 /* Ensure completion of all initialization before enabling interrupts */
173 mb();
174
175 writeq(val: int_en, addr: priv->base + MLXBF_GIGE_INT_EN);
176
177 return 0;
178
179rx_deinit:
180 mlxbf_gige_rx_deinit(priv);
181
182free_irqs:
183 mlxbf_gige_free_irqs(priv);
184 return err;
185}
186
187static int mlxbf_gige_stop(struct net_device *netdev)
188{
189 struct mlxbf_gige *priv = netdev_priv(dev: netdev);
190
191 writeq(val: 0, addr: priv->base + MLXBF_GIGE_INT_EN);
192 netif_stop_queue(dev: netdev);
193 napi_disable(n: &priv->napi);
194 netif_napi_del(napi: &priv->napi);
195 mlxbf_gige_free_irqs(priv);
196
197 phy_stop(phydev: netdev->phydev);
198
199 mlxbf_gige_rx_deinit(priv);
200 mlxbf_gige_tx_deinit(priv);
201 mlxbf_gige_cache_stats(priv);
202 mlxbf_gige_clean_port(priv);
203
204 return 0;
205}
206
207static int mlxbf_gige_eth_ioctl(struct net_device *netdev,
208 struct ifreq *ifr, int cmd)
209{
210 if (!(netif_running(dev: netdev)))
211 return -EINVAL;
212
213 return phy_mii_ioctl(phydev: netdev->phydev, ifr, cmd);
214}
215
216static void mlxbf_gige_set_rx_mode(struct net_device *netdev)
217{
218 struct mlxbf_gige *priv = netdev_priv(dev: netdev);
219 bool new_promisc_enabled;
220
221 new_promisc_enabled = netdev->flags & IFF_PROMISC;
222
223 /* Only write to the hardware registers if the new setting
224 * of promiscuous mode is different from the current one.
225 */
226 if (new_promisc_enabled != priv->promisc_enabled) {
227 priv->promisc_enabled = new_promisc_enabled;
228
229 if (new_promisc_enabled)
230 mlxbf_gige_enable_promisc(priv);
231 else
232 mlxbf_gige_disable_promisc(priv);
233 }
234}
235
236static void mlxbf_gige_get_stats64(struct net_device *netdev,
237 struct rtnl_link_stats64 *stats)
238{
239 struct mlxbf_gige *priv = netdev_priv(dev: netdev);
240
241 netdev_stats_to_stats64(stats64: stats, netdev_stats: &netdev->stats);
242
243 stats->rx_length_errors = priv->stats.rx_truncate_errors;
244 stats->rx_fifo_errors = priv->stats.rx_din_dropped_pkts +
245 readq(addr: priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER);
246 stats->rx_crc_errors = priv->stats.rx_mac_errors;
247 stats->rx_errors = stats->rx_length_errors +
248 stats->rx_fifo_errors +
249 stats->rx_crc_errors;
250
251 stats->tx_fifo_errors = priv->stats.tx_fifo_full;
252 stats->tx_errors = stats->tx_fifo_errors;
253}
254
255static const struct net_device_ops mlxbf_gige_netdev_ops = {
256 .ndo_open = mlxbf_gige_open,
257 .ndo_stop = mlxbf_gige_stop,
258 .ndo_start_xmit = mlxbf_gige_start_xmit,
259 .ndo_set_mac_address = eth_mac_addr,
260 .ndo_validate_addr = eth_validate_addr,
261 .ndo_eth_ioctl = mlxbf_gige_eth_ioctl,
262 .ndo_set_rx_mode = mlxbf_gige_set_rx_mode,
263 .ndo_get_stats64 = mlxbf_gige_get_stats64,
264};
265
266static void mlxbf_gige_bf2_adjust_link(struct net_device *netdev)
267{
268 struct phy_device *phydev = netdev->phydev;
269
270 phy_print_status(phydev);
271}
272
273static void mlxbf_gige_bf3_adjust_link(struct net_device *netdev)
274{
275 struct mlxbf_gige *priv = netdev_priv(dev: netdev);
276 struct phy_device *phydev = netdev->phydev;
277 u8 sgmii_mode;
278 u16 ipg_size;
279 u32 val;
280
281 if (phydev->link && phydev->speed != priv->prev_speed) {
282 switch (phydev->speed) {
283 case 1000:
284 ipg_size = MLXBF_GIGE_1G_IPG_SIZE;
285 sgmii_mode = MLXBF_GIGE_1G_SGMII_MODE;
286 break;
287 case 100:
288 ipg_size = MLXBF_GIGE_100M_IPG_SIZE;
289 sgmii_mode = MLXBF_GIGE_100M_SGMII_MODE;
290 break;
291 case 10:
292 ipg_size = MLXBF_GIGE_10M_IPG_SIZE;
293 sgmii_mode = MLXBF_GIGE_10M_SGMII_MODE;
294 break;
295 default:
296 return;
297 }
298
299 val = readl(addr: priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
300 val &= ~(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK | MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK);
301 val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK, ipg_size);
302 val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK, sgmii_mode);
303 writel(val, addr: priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
304
305 val = readl(addr: priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
306 val &= ~MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK;
307 val |= FIELD_PREP(MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK, sgmii_mode);
308 writel(val, addr: priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
309
310 priv->prev_speed = phydev->speed;
311 }
312
313 phy_print_status(phydev);
314}
315
316static void mlxbf_gige_bf2_set_phy_link_mode(struct phy_device *phydev)
317{
318 /* MAC only supports 1000T full duplex mode */
319 phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
320 phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_100baseT_Full_BIT);
321 phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_100baseT_Half_BIT);
322 phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_10baseT_Full_BIT);
323 phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_10baseT_Half_BIT);
324
325 /* Only symmetric pause with flow control enabled is supported so no
326 * need to negotiate pause.
327 */
328 linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_Pause_BIT, addr: phydev->advertising);
329 linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_Asym_Pause_BIT, addr: phydev->advertising);
330}
331
332static void mlxbf_gige_bf3_set_phy_link_mode(struct phy_device *phydev)
333{
334 /* MAC only supports full duplex mode */
335 phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
336 phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_100baseT_Half_BIT);
337 phy_remove_link_mode(phydev, link_mode: ETHTOOL_LINK_MODE_10baseT_Half_BIT);
338
339 /* Only symmetric pause with flow control enabled is supported so no
340 * need to negotiate pause.
341 */
342 linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_Pause_BIT, addr: phydev->advertising);
343 linkmode_clear_bit(nr: ETHTOOL_LINK_MODE_Asym_Pause_BIT, addr: phydev->advertising);
344}
345
346static struct mlxbf_gige_link_cfg mlxbf_gige_link_cfgs[] = {
347 [MLXBF_GIGE_VERSION_BF2] = {
348 .set_phy_link_mode = mlxbf_gige_bf2_set_phy_link_mode,
349 .adjust_link = mlxbf_gige_bf2_adjust_link,
350 .phy_mode = PHY_INTERFACE_MODE_GMII
351 },
352 [MLXBF_GIGE_VERSION_BF3] = {
353 .set_phy_link_mode = mlxbf_gige_bf3_set_phy_link_mode,
354 .adjust_link = mlxbf_gige_bf3_adjust_link,
355 .phy_mode = PHY_INTERFACE_MODE_SGMII
356 }
357};
358
359static int mlxbf_gige_probe(struct platform_device *pdev)
360{
361 struct phy_device *phydev;
362 struct net_device *netdev;
363 struct mlxbf_gige *priv;
364 void __iomem *llu_base;
365 void __iomem *plu_base;
366 void __iomem *base;
367 int addr, phy_irq;
368 u64 control;
369 int err;
370
371 base = devm_platform_ioremap_resource(pdev, index: MLXBF_GIGE_RES_MAC);
372 if (IS_ERR(ptr: base))
373 return PTR_ERR(ptr: base);
374
375 llu_base = devm_platform_ioremap_resource(pdev, index: MLXBF_GIGE_RES_LLU);
376 if (IS_ERR(ptr: llu_base))
377 return PTR_ERR(ptr: llu_base);
378
379 plu_base = devm_platform_ioremap_resource(pdev, index: MLXBF_GIGE_RES_PLU);
380 if (IS_ERR(ptr: plu_base))
381 return PTR_ERR(ptr: plu_base);
382
383 /* Perform general init of GigE block */
384 control = readq(addr: base + MLXBF_GIGE_CONTROL);
385 control |= MLXBF_GIGE_CONTROL_PORT_EN;
386 writeq(val: control, addr: base + MLXBF_GIGE_CONTROL);
387
388 netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
389 if (!netdev)
390 return -ENOMEM;
391
392 SET_NETDEV_DEV(netdev, &pdev->dev);
393 netdev->netdev_ops = &mlxbf_gige_netdev_ops;
394 netdev->ethtool_ops = &mlxbf_gige_ethtool_ops;
395 priv = netdev_priv(dev: netdev);
396 priv->netdev = netdev;
397
398 platform_set_drvdata(pdev, data: priv);
399 priv->dev = &pdev->dev;
400 priv->pdev = pdev;
401
402 spin_lock_init(&priv->lock);
403
404 priv->hw_version = readq(addr: base + MLXBF_GIGE_VERSION);
405
406 /* Attach MDIO device */
407 err = mlxbf_gige_mdio_probe(pdev, priv);
408 if (err)
409 return err;
410
411 priv->base = base;
412 priv->llu_base = llu_base;
413 priv->plu_base = plu_base;
414
415 priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
416 priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
417
418 /* Write initial MAC address to hardware */
419 mlxbf_gige_initial_mac(priv);
420
421 err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64));
422 if (err) {
423 dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
424 goto out;
425 }
426
427 priv->error_irq = platform_get_irq(pdev, MLXBF_GIGE_ERROR_INTR_IDX);
428 priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
429 priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
430
431 phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), name: "phy-gpios", index: 0);
432 if (phy_irq < 0) {
433 dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
434 phy_irq = PHY_POLL;
435 }
436
437 phydev = phy_find_first(bus: priv->mdiobus);
438 if (!phydev) {
439 err = -ENODEV;
440 goto out;
441 }
442
443 addr = phydev->mdio.addr;
444 priv->mdiobus->irq[addr] = phy_irq;
445 phydev->irq = phy_irq;
446
447 err = phy_connect_direct(dev: netdev, phydev,
448 handler: mlxbf_gige_link_cfgs[priv->hw_version].adjust_link,
449 interface: mlxbf_gige_link_cfgs[priv->hw_version].phy_mode);
450 if (err) {
451 dev_err(&pdev->dev, "Could not attach to PHY\n");
452 goto out;
453 }
454
455 mlxbf_gige_link_cfgs[priv->hw_version].set_phy_link_mode(phydev);
456
457 /* Display information about attached PHY device */
458 phy_attached_info(phydev);
459
460 err = register_netdev(dev: netdev);
461 if (err) {
462 dev_err(&pdev->dev, "Failed to register netdev\n");
463 phy_disconnect(phydev);
464 goto out;
465 }
466
467 return 0;
468
469out:
470 mlxbf_gige_mdio_remove(priv);
471 return err;
472}
473
474static void mlxbf_gige_remove(struct platform_device *pdev)
475{
476 struct mlxbf_gige *priv = platform_get_drvdata(pdev);
477
478 unregister_netdev(dev: priv->netdev);
479 phy_disconnect(phydev: priv->netdev->phydev);
480 mlxbf_gige_mdio_remove(priv);
481 platform_set_drvdata(pdev, NULL);
482}
483
484static void mlxbf_gige_shutdown(struct platform_device *pdev)
485{
486 struct mlxbf_gige *priv = platform_get_drvdata(pdev);
487
488 writeq(val: 0, addr: priv->base + MLXBF_GIGE_INT_EN);
489 mlxbf_gige_clean_port(priv);
490}
491
492static const struct acpi_device_id __maybe_unused mlxbf_gige_acpi_match[] = {
493 { "MLNXBF17", 0 },
494 {},
495};
496MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
497
498static struct platform_driver mlxbf_gige_driver = {
499 .probe = mlxbf_gige_probe,
500 .remove_new = mlxbf_gige_remove,
501 .shutdown = mlxbf_gige_shutdown,
502 .driver = {
503 .name = KBUILD_MODNAME,
504 .acpi_match_table = ACPI_PTR(mlxbf_gige_acpi_match),
505 },
506};
507
508module_platform_driver(mlxbf_gige_driver);
509
510MODULE_DESCRIPTION("Mellanox BlueField SoC Gigabit Ethernet Driver");
511MODULE_AUTHOR("David Thompson <davthompson@nvidia.com>");
512MODULE_AUTHOR("Asmaa Mnebhi <asmaa@nvidia.com>");
513MODULE_LICENSE("Dual BSD/GPL");
514

source code of linux/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c