1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Intel IXP4xx Ethernet driver for Linux |
4 | * |
5 | * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> |
6 | * |
7 | * Ethernet port config (0x00 is not present on IXP42X): |
8 | * |
9 | * logical port 0x00 0x10 0x20 |
10 | * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) |
11 | * physical PortId 2 0 1 |
12 | * TX queue 23 24 25 |
13 | * RX-free queue 26 27 28 |
14 | * TX-done queue is always 31, per-port RX and TX-ready queues are configurable |
15 | * |
16 | * Queue entries: |
17 | * bits 0 -> 1 - NPE ID (RX and TX-done) |
18 | * bits 0 -> 2 - priority (TX, per 802.1D) |
19 | * bits 3 -> 4 - port ID (user-set?) |
20 | * bits 5 -> 31 - physical descriptor address |
21 | */ |
22 | |
23 | #include <linux/delay.h> |
24 | #include <linux/dma-mapping.h> |
25 | #include <linux/dmapool.h> |
26 | #include <linux/etherdevice.h> |
27 | #include <linux/if_vlan.h> |
28 | #include <linux/io.h> |
29 | #include <linux/kernel.h> |
30 | #include <linux/net_tstamp.h> |
31 | #include <linux/of.h> |
32 | #include <linux/of_mdio.h> |
33 | #include <linux/of_net.h> |
34 | #include <linux/phy.h> |
35 | #include <linux/platform_device.h> |
36 | #include <linux/ptp_classify.h> |
37 | #include <linux/slab.h> |
38 | #include <linux/module.h> |
39 | #include <linux/soc/ixp4xx/npe.h> |
40 | #include <linux/soc/ixp4xx/qmgr.h> |
41 | #include <linux/soc/ixp4xx/cpu.h> |
42 | #include <linux/types.h> |
43 | |
44 | #define IXP4XX_ETH_NPEA 0x00 |
45 | #define IXP4XX_ETH_NPEB 0x10 |
46 | #define IXP4XX_ETH_NPEC 0x20 |
47 | |
48 | #include "ixp46x_ts.h" |
49 | |
50 | #define DEBUG_DESC 0 |
51 | #define DEBUG_RX 0 |
52 | #define DEBUG_TX 0 |
53 | #define DEBUG_PKT_BYTES 0 |
54 | #define DEBUG_MDIO 0 |
55 | #define DEBUG_CLOSE 0 |
56 | |
57 | #define DRV_NAME "ixp4xx_eth" |
58 | |
59 | #define MAX_NPES 3 |
60 | |
61 | #define RX_DESCS 64 /* also length of all RX queues */ |
62 | #define TX_DESCS 16 /* also length of all TX queues */ |
63 | #define TXDONE_QUEUE_LEN 64 /* dwords */ |
64 | |
65 | #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) |
66 | #define REGS_SIZE 0x1000 |
67 | |
68 | /* MRU is said to be 14320 in a code dump, the SW manual says that |
69 | * MRU/MTU is 16320 and includes VLAN and ethernet headers. |
70 | * See "IXP400 Software Programmer's Guide" section 10.3.2, page 161. |
71 | * |
72 | * FIXME: we have chosen the safe default (14320) but if you can test |
73 | * jumboframes, experiment with 16320 and see what happens! |
74 | */ |
75 | #define MAX_MRU (14320 - VLAN_ETH_HLEN) |
76 | #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) |
77 | |
78 | #define NAPI_WEIGHT 16 |
79 | #define MDIO_INTERVAL (3 * HZ) |
80 | #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ |
81 | #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ |
82 | |
83 | #define NPE_ID(port_id) ((port_id) >> 4) |
84 | #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) |
85 | #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) |
86 | #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) |
87 | #define TXDONE_QUEUE 31 |
88 | |
89 | #define PTP_SLAVE_MODE 1 |
90 | #define PTP_MASTER_MODE 2 |
91 | #define PORT2CHANNEL(p) NPE_ID(p->id) |
92 | |
93 | /* TX Control Registers */ |
94 | #define TX_CNTRL0_TX_EN 0x01 |
95 | #define TX_CNTRL0_HALFDUPLEX 0x02 |
96 | #define TX_CNTRL0_RETRY 0x04 |
97 | #define TX_CNTRL0_PAD_EN 0x08 |
98 | #define TX_CNTRL0_APPEND_FCS 0x10 |
99 | #define TX_CNTRL0_2DEFER 0x20 |
100 | #define TX_CNTRL0_RMII 0x40 /* reduced MII */ |
101 | #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ |
102 | |
103 | /* RX Control Registers */ |
104 | #define RX_CNTRL0_RX_EN 0x01 |
105 | #define RX_CNTRL0_PADSTRIP_EN 0x02 |
106 | #define RX_CNTRL0_SEND_FCS 0x04 |
107 | #define RX_CNTRL0_PAUSE_EN 0x08 |
108 | #define RX_CNTRL0_LOOP_EN 0x10 |
109 | #define RX_CNTRL0_ADDR_FLTR_EN 0x20 |
110 | #define RX_CNTRL0_RX_RUNT_EN 0x40 |
111 | #define RX_CNTRL0_BCAST_DIS 0x80 |
112 | #define RX_CNTRL1_DEFER_EN 0x01 |
113 | |
114 | /* Core Control Register */ |
115 | #define CORE_RESET 0x01 |
116 | #define CORE_RX_FIFO_FLUSH 0x02 |
117 | #define CORE_TX_FIFO_FLUSH 0x04 |
118 | #define CORE_SEND_JAM 0x08 |
119 | #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ |
120 | |
121 | #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ |
122 | TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ |
123 | TX_CNTRL0_2DEFER) |
124 | #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN |
125 | #define DEFAULT_CORE_CNTRL CORE_MDC_EN |
126 | |
127 | |
128 | /* NPE message codes */ |
129 | #define NPE_GETSTATUS 0x00 |
130 | #define NPE_EDB_SETPORTADDRESS 0x01 |
131 | #define NPE_EDB_GETMACADDRESSDATABASE 0x02 |
132 | #define NPE_EDB_SETMACADDRESSSDATABASE 0x03 |
133 | #define NPE_GETSTATS 0x04 |
134 | #define NPE_RESETSTATS 0x05 |
135 | #define NPE_SETMAXFRAMELENGTHS 0x06 |
136 | #define NPE_VLAN_SETRXTAGMODE 0x07 |
137 | #define NPE_VLAN_SETDEFAULTRXVID 0x08 |
138 | #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 |
139 | #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A |
140 | #define NPE_VLAN_SETRXQOSENTRY 0x0B |
141 | #define 0x0C |
142 | #define NPE_STP_SETBLOCKINGSTATE 0x0D |
143 | #define NPE_FW_SETFIREWALLMODE 0x0E |
144 | #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F |
145 | #define NPE_PC_SETAPMACTABLE 0x11 |
146 | #define NPE_SETLOOPBACK_MODE 0x12 |
147 | #define NPE_PC_SETBSSIDTABLE 0x13 |
148 | #define NPE_ADDRESS_FILTER_CONFIG 0x14 |
149 | #define NPE_APPENDFCSCONFIG 0x15 |
150 | #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 |
151 | #define NPE_MAC_RECOVERY_START 0x17 |
152 | |
153 | |
154 | #ifdef __ARMEB__ |
155 | typedef struct sk_buff buffer_t; |
156 | #define free_buffer dev_kfree_skb |
157 | #define free_buffer_irq dev_consume_skb_irq |
158 | #else |
159 | typedef void buffer_t; |
160 | #define free_buffer kfree |
161 | #define free_buffer_irq kfree |
162 | #endif |
163 | |
164 | /* Information about built-in Ethernet MAC interfaces */ |
165 | struct eth_plat_info { |
166 | u8 rxq; /* configurable, currently 0 - 31 only */ |
167 | u8 txreadyq; |
168 | u8 hwaddr[ETH_ALEN]; |
169 | u8 npe; /* NPE instance used by this interface */ |
170 | bool has_mdio; /* If this instance has an MDIO bus */ |
171 | }; |
172 | |
173 | struct eth_regs { |
174 | u32 tx_control[2], __res1[2]; /* 000 */ |
175 | u32 rx_control[2], __res2[2]; /* 010 */ |
176 | u32 random_seed, __res3[3]; /* 020 */ |
177 | u32 partial_empty_threshold, __res4; /* 030 */ |
178 | u32 partial_full_threshold, __res5; /* 038 */ |
179 | u32 tx_start_bytes, __res6[3]; /* 040 */ |
180 | u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ |
181 | u32 tx_2part_deferral[2], __res8[2]; /* 060 */ |
182 | u32 slot_time, __res9[3]; /* 070 */ |
183 | u32 mdio_command[4]; /* 080 */ |
184 | u32 mdio_status[4]; /* 090 */ |
185 | u32 mcast_mask[6], __res10[2]; /* 0A0 */ |
186 | u32 mcast_addr[6], __res11[2]; /* 0C0 */ |
187 | u32 int_clock_threshold, __res12[3]; /* 0E0 */ |
188 | u32 hw_addr[6], __res13[61]; /* 0F0 */ |
189 | u32 core_control; /* 1FC */ |
190 | }; |
191 | |
192 | struct port { |
193 | struct eth_regs __iomem *regs; |
194 | struct ixp46x_ts_regs __iomem *timesync_regs; |
195 | int phc_index; |
196 | struct npe *npe; |
197 | struct net_device *netdev; |
198 | struct napi_struct napi; |
199 | struct eth_plat_info *plat; |
200 | buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; |
201 | struct desc *desc_tab; /* coherent */ |
202 | dma_addr_t desc_tab_phys; |
203 | int id; /* logical port ID */ |
204 | int speed, duplex; |
205 | u8 firmware[4]; |
206 | int hwts_tx_en; |
207 | int hwts_rx_en; |
208 | }; |
209 | |
210 | /* NPE message structure */ |
211 | struct msg { |
212 | #ifdef __ARMEB__ |
213 | u8 cmd, eth_id, byte2, byte3; |
214 | u8 byte4, byte5, byte6, byte7; |
215 | #else |
216 | u8 byte3, byte2, eth_id, cmd; |
217 | u8 byte7, byte6, byte5, byte4; |
218 | #endif |
219 | }; |
220 | |
221 | /* Ethernet packet descriptor */ |
222 | struct desc { |
223 | u32 next; /* pointer to next buffer, unused */ |
224 | |
225 | #ifdef __ARMEB__ |
226 | u16 buf_len; /* buffer length */ |
227 | u16 pkt_len; /* packet length */ |
228 | u32 data; /* pointer to data buffer in RAM */ |
229 | u8 dest_id; |
230 | u8 src_id; |
231 | u16 flags; |
232 | u8 qos; |
233 | u8 padlen; |
234 | u16 vlan_tci; |
235 | #else |
236 | u16 pkt_len; /* packet length */ |
237 | u16 buf_len; /* buffer length */ |
238 | u32 data; /* pointer to data buffer in RAM */ |
239 | u16 flags; |
240 | u8 src_id; |
241 | u8 dest_id; |
242 | u16 vlan_tci; |
243 | u8 padlen; |
244 | u8 qos; |
245 | #endif |
246 | |
247 | #ifdef __ARMEB__ |
248 | u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; |
249 | u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; |
250 | u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; |
251 | #else |
252 | u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; |
253 | u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; |
254 | u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; |
255 | #endif |
256 | }; |
257 | |
258 | |
259 | #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ |
260 | (n) * sizeof(struct desc)) |
261 | #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) |
262 | |
263 | #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ |
264 | ((n) + RX_DESCS) * sizeof(struct desc)) |
265 | #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) |
266 | |
267 | #ifndef __ARMEB__ |
268 | static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) |
269 | { |
270 | int i; |
271 | for (i = 0; i < cnt; i++) |
272 | dest[i] = swab32(src[i]); |
273 | } |
274 | #endif |
275 | |
276 | static DEFINE_SPINLOCK(mdio_lock); |
277 | static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ |
278 | static struct mii_bus *mdio_bus; |
279 | static struct device_node *mdio_bus_np; |
280 | static int ports_open; |
281 | static struct port *npe_port_tab[MAX_NPES]; |
282 | static struct dma_pool *dma_pool; |
283 | |
284 | static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) |
285 | { |
286 | u8 *data = skb->data; |
287 | unsigned int offset; |
288 | u16 *hi, *id; |
289 | u32 lo; |
290 | |
291 | if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4) |
292 | return 0; |
293 | |
294 | offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; |
295 | |
296 | if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) |
297 | return 0; |
298 | |
299 | hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); |
300 | id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); |
301 | |
302 | memcpy(&lo, &hi[1], sizeof(lo)); |
303 | |
304 | return (uid_hi == ntohs(*hi) && |
305 | uid_lo == ntohl(lo) && |
306 | seqid == ntohs(*id)); |
307 | } |
308 | |
309 | static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) |
310 | { |
311 | struct skb_shared_hwtstamps *shhwtstamps; |
312 | struct ixp46x_ts_regs *regs; |
313 | u64 ns; |
314 | u32 ch, hi, lo, val; |
315 | u16 uid, seq; |
316 | |
317 | if (!port->hwts_rx_en) |
318 | return; |
319 | |
320 | ch = PORT2CHANNEL(port); |
321 | |
322 | regs = port->timesync_regs; |
323 | |
324 | val = __raw_readl(addr: ®s->channel[ch].ch_event); |
325 | |
326 | if (!(val & RX_SNAPSHOT_LOCKED)) |
327 | return; |
328 | |
329 | lo = __raw_readl(addr: ®s->channel[ch].src_uuid_lo); |
330 | hi = __raw_readl(addr: ®s->channel[ch].src_uuid_hi); |
331 | |
332 | uid = hi & 0xffff; |
333 | seq = (hi >> 16) & 0xffff; |
334 | |
335 | if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) |
336 | goto out; |
337 | |
338 | lo = __raw_readl(addr: ®s->channel[ch].rx_snap_lo); |
339 | hi = __raw_readl(addr: ®s->channel[ch].rx_snap_hi); |
340 | ns = ((u64) hi) << 32; |
341 | ns |= lo; |
342 | ns <<= TICKS_NS_SHIFT; |
343 | |
344 | shhwtstamps = skb_hwtstamps(skb); |
345 | memset(shhwtstamps, 0, sizeof(*shhwtstamps)); |
346 | shhwtstamps->hwtstamp = ns_to_ktime(ns); |
347 | out: |
348 | __raw_writel(RX_SNAPSHOT_LOCKED, addr: ®s->channel[ch].ch_event); |
349 | } |
350 | |
351 | static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) |
352 | { |
353 | struct skb_shared_hwtstamps shhwtstamps; |
354 | struct ixp46x_ts_regs *regs; |
355 | struct skb_shared_info *shtx; |
356 | u64 ns; |
357 | u32 ch, cnt, hi, lo, val; |
358 | |
359 | shtx = skb_shinfo(skb); |
360 | if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en)) |
361 | shtx->tx_flags |= SKBTX_IN_PROGRESS; |
362 | else |
363 | return; |
364 | |
365 | ch = PORT2CHANNEL(port); |
366 | |
367 | regs = port->timesync_regs; |
368 | |
369 | /* |
370 | * This really stinks, but we have to poll for the Tx time stamp. |
371 | * Usually, the time stamp is ready after 4 to 6 microseconds. |
372 | */ |
373 | for (cnt = 0; cnt < 100; cnt++) { |
374 | val = __raw_readl(addr: ®s->channel[ch].ch_event); |
375 | if (val & TX_SNAPSHOT_LOCKED) |
376 | break; |
377 | udelay(1); |
378 | } |
379 | if (!(val & TX_SNAPSHOT_LOCKED)) { |
380 | shtx->tx_flags &= ~SKBTX_IN_PROGRESS; |
381 | return; |
382 | } |
383 | |
384 | lo = __raw_readl(addr: ®s->channel[ch].tx_snap_lo); |
385 | hi = __raw_readl(addr: ®s->channel[ch].tx_snap_hi); |
386 | ns = ((u64) hi) << 32; |
387 | ns |= lo; |
388 | ns <<= TICKS_NS_SHIFT; |
389 | |
390 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
391 | shhwtstamps.hwtstamp = ns_to_ktime(ns); |
392 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &shhwtstamps); |
393 | |
394 | __raw_writel(TX_SNAPSHOT_LOCKED, addr: ®s->channel[ch].ch_event); |
395 | } |
396 | |
397 | static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) |
398 | { |
399 | struct hwtstamp_config cfg; |
400 | struct ixp46x_ts_regs *regs; |
401 | struct port *port = netdev_priv(dev: netdev); |
402 | int ret; |
403 | int ch; |
404 | |
405 | if (copy_from_user(to: &cfg, from: ifr->ifr_data, n: sizeof(cfg))) |
406 | return -EFAULT; |
407 | |
408 | ret = ixp46x_ptp_find(regs: &port->timesync_regs, phc_index: &port->phc_index); |
409 | if (ret) |
410 | return ret; |
411 | |
412 | ch = PORT2CHANNEL(port); |
413 | regs = port->timesync_regs; |
414 | |
415 | if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) |
416 | return -ERANGE; |
417 | |
418 | switch (cfg.rx_filter) { |
419 | case HWTSTAMP_FILTER_NONE: |
420 | port->hwts_rx_en = 0; |
421 | break; |
422 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
423 | port->hwts_rx_en = PTP_SLAVE_MODE; |
424 | __raw_writel(val: 0, addr: ®s->channel[ch].ch_control); |
425 | break; |
426 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
427 | port->hwts_rx_en = PTP_MASTER_MODE; |
428 | __raw_writel(MASTER_MODE, addr: ®s->channel[ch].ch_control); |
429 | break; |
430 | default: |
431 | return -ERANGE; |
432 | } |
433 | |
434 | port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON; |
435 | |
436 | /* Clear out any old time stamps. */ |
437 | __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, |
438 | addr: ®s->channel[ch].ch_event); |
439 | |
440 | return copy_to_user(to: ifr->ifr_data, from: &cfg, n: sizeof(cfg)) ? -EFAULT : 0; |
441 | } |
442 | |
443 | static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) |
444 | { |
445 | struct hwtstamp_config cfg; |
446 | struct port *port = netdev_priv(dev: netdev); |
447 | |
448 | cfg.flags = 0; |
449 | cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; |
450 | |
451 | switch (port->hwts_rx_en) { |
452 | case 0: |
453 | cfg.rx_filter = HWTSTAMP_FILTER_NONE; |
454 | break; |
455 | case PTP_SLAVE_MODE: |
456 | cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; |
457 | break; |
458 | case PTP_MASTER_MODE: |
459 | cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; |
460 | break; |
461 | default: |
462 | WARN_ON_ONCE(1); |
463 | return -ERANGE; |
464 | } |
465 | |
466 | return copy_to_user(to: ifr->ifr_data, from: &cfg, n: sizeof(cfg)) ? -EFAULT : 0; |
467 | } |
468 | |
469 | static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, |
470 | int write, u16 cmd) |
471 | { |
472 | int cycles = 0; |
473 | |
474 | if (__raw_readl(addr: &mdio_regs->mdio_command[3]) & 0x80) { |
475 | printk(KERN_ERR "%s: MII not ready to transmit\n" , bus->name); |
476 | return -1; |
477 | } |
478 | |
479 | if (write) { |
480 | __raw_writel(val: cmd & 0xFF, addr: &mdio_regs->mdio_command[0]); |
481 | __raw_writel(val: cmd >> 8, addr: &mdio_regs->mdio_command[1]); |
482 | } |
483 | __raw_writel(val: ((phy_id << 5) | location) & 0xFF, |
484 | addr: &mdio_regs->mdio_command[2]); |
485 | __raw_writel(val: (phy_id >> 3) | (write << 2) | 0x80 /* GO */, |
486 | addr: &mdio_regs->mdio_command[3]); |
487 | |
488 | while ((cycles < MAX_MDIO_RETRIES) && |
489 | (__raw_readl(addr: &mdio_regs->mdio_command[3]) & 0x80)) { |
490 | udelay(1); |
491 | cycles++; |
492 | } |
493 | |
494 | if (cycles == MAX_MDIO_RETRIES) { |
495 | printk(KERN_ERR "%s #%i: MII write failed\n" , bus->name, |
496 | phy_id); |
497 | return -1; |
498 | } |
499 | |
500 | #if DEBUG_MDIO |
501 | printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n" , bus->name, |
502 | phy_id, write ? "write" : "read" , cycles); |
503 | #endif |
504 | |
505 | if (write) |
506 | return 0; |
507 | |
508 | if (__raw_readl(addr: &mdio_regs->mdio_status[3]) & 0x80) { |
509 | #if DEBUG_MDIO |
510 | printk(KERN_DEBUG "%s #%i: MII read failed\n" , bus->name, |
511 | phy_id); |
512 | #endif |
513 | return 0xFFFF; /* don't return error */ |
514 | } |
515 | |
516 | return (__raw_readl(addr: &mdio_regs->mdio_status[0]) & 0xFF) | |
517 | ((__raw_readl(addr: &mdio_regs->mdio_status[1]) & 0xFF) << 8); |
518 | } |
519 | |
520 | static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location) |
521 | { |
522 | unsigned long flags; |
523 | int ret; |
524 | |
525 | spin_lock_irqsave(&mdio_lock, flags); |
526 | ret = ixp4xx_mdio_cmd(bus, phy_id, location, write: 0, cmd: 0); |
527 | spin_unlock_irqrestore(lock: &mdio_lock, flags); |
528 | #if DEBUG_MDIO |
529 | printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n" , bus->name, |
530 | phy_id, location, ret); |
531 | #endif |
532 | return ret; |
533 | } |
534 | |
535 | static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location, |
536 | u16 val) |
537 | { |
538 | unsigned long flags; |
539 | int ret; |
540 | |
541 | spin_lock_irqsave(&mdio_lock, flags); |
542 | ret = ixp4xx_mdio_cmd(bus, phy_id, location, write: 1, cmd: val); |
543 | spin_unlock_irqrestore(lock: &mdio_lock, flags); |
544 | #if DEBUG_MDIO |
545 | printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n" , |
546 | bus->name, phy_id, location, val, ret); |
547 | #endif |
548 | return ret; |
549 | } |
550 | |
551 | static int ixp4xx_mdio_register(struct eth_regs __iomem *regs) |
552 | { |
553 | int err; |
554 | |
555 | if (!(mdio_bus = mdiobus_alloc())) |
556 | return -ENOMEM; |
557 | |
558 | mdio_regs = regs; |
559 | __raw_writel(DEFAULT_CORE_CNTRL, addr: &mdio_regs->core_control); |
560 | mdio_bus->name = "IXP4xx MII Bus" ; |
561 | mdio_bus->read = &ixp4xx_mdio_read; |
562 | mdio_bus->write = &ixp4xx_mdio_write; |
563 | snprintf(buf: mdio_bus->id, MII_BUS_ID_SIZE, fmt: "ixp4xx-eth-0" ); |
564 | |
565 | err = of_mdiobus_register(mdio: mdio_bus, np: mdio_bus_np); |
566 | if (err) |
567 | mdiobus_free(bus: mdio_bus); |
568 | return err; |
569 | } |
570 | |
571 | static void ixp4xx_mdio_remove(void) |
572 | { |
573 | mdiobus_unregister(bus: mdio_bus); |
574 | mdiobus_free(bus: mdio_bus); |
575 | } |
576 | |
577 | |
578 | static void ixp4xx_adjust_link(struct net_device *dev) |
579 | { |
580 | struct port *port = netdev_priv(dev); |
581 | struct phy_device *phydev = dev->phydev; |
582 | |
583 | if (!phydev->link) { |
584 | if (port->speed) { |
585 | port->speed = 0; |
586 | printk(KERN_INFO "%s: link down\n" , dev->name); |
587 | } |
588 | return; |
589 | } |
590 | |
591 | if (port->speed == phydev->speed && port->duplex == phydev->duplex) |
592 | return; |
593 | |
594 | port->speed = phydev->speed; |
595 | port->duplex = phydev->duplex; |
596 | |
597 | if (port->duplex) |
598 | __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, |
599 | addr: &port->regs->tx_control[0]); |
600 | else |
601 | __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, |
602 | addr: &port->regs->tx_control[0]); |
603 | |
604 | netdev_info(dev, format: "%s: link up, speed %u Mb/s, %s duplex\n" , |
605 | dev->name, port->speed, port->duplex ? "full" : "half" ); |
606 | } |
607 | |
608 | |
609 | static inline void debug_pkt(struct net_device *dev, const char *func, |
610 | u8 *data, int len) |
611 | { |
612 | #if DEBUG_PKT_BYTES |
613 | int i; |
614 | |
615 | netdev_debug(dev, "%s(%i) " , func, len); |
616 | for (i = 0; i < len; i++) { |
617 | if (i >= DEBUG_PKT_BYTES) |
618 | break; |
619 | printk("%s%02X" , |
620 | ((i == 6) || (i == 12) || (i >= 14)) ? " " : "" , |
621 | data[i]); |
622 | } |
623 | printk("\n" ); |
624 | #endif |
625 | } |
626 | |
627 | |
628 | static inline void debug_desc(u32 phys, struct desc *desc) |
629 | { |
630 | #if DEBUG_DESC |
631 | printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" |
632 | " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n" , |
633 | phys, desc->next, desc->buf_len, desc->pkt_len, |
634 | desc->data, desc->dest_id, desc->src_id, desc->flags, |
635 | desc->qos, desc->padlen, desc->vlan_tci, |
636 | desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, |
637 | desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, |
638 | desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, |
639 | desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); |
640 | #endif |
641 | } |
642 | |
643 | static inline int queue_get_desc(unsigned int queue, struct port *port, |
644 | int is_tx) |
645 | { |
646 | u32 phys, tab_phys, n_desc; |
647 | struct desc *tab; |
648 | |
649 | if (!(phys = qmgr_get_entry(queue))) |
650 | return -1; |
651 | |
652 | phys &= ~0x1F; /* mask out non-address bits */ |
653 | tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); |
654 | tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); |
655 | n_desc = (phys - tab_phys) / sizeof(struct desc); |
656 | BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); |
657 | debug_desc(phys, desc: &tab[n_desc]); |
658 | BUG_ON(tab[n_desc].next); |
659 | return n_desc; |
660 | } |
661 | |
662 | static inline void queue_put_desc(unsigned int queue, u32 phys, |
663 | struct desc *desc) |
664 | { |
665 | debug_desc(phys, desc); |
666 | BUG_ON(phys & 0x1F); |
667 | qmgr_put_entry(queue, val: phys); |
668 | /* Don't check for queue overflow here, we've allocated sufficient |
669 | length and queues >= 32 don't support this check anyway. */ |
670 | } |
671 | |
672 | |
673 | static inline void dma_unmap_tx(struct port *port, struct desc *desc) |
674 | { |
675 | #ifdef __ARMEB__ |
676 | dma_unmap_single(&port->netdev->dev, desc->data, |
677 | desc->buf_len, DMA_TO_DEVICE); |
678 | #else |
679 | dma_unmap_single(&port->netdev->dev, desc->data & ~3, |
680 | ALIGN((desc->data & 3) + desc->buf_len, 4), |
681 | DMA_TO_DEVICE); |
682 | #endif |
683 | } |
684 | |
685 | |
686 | static void eth_rx_irq(void *pdev) |
687 | { |
688 | struct net_device *dev = pdev; |
689 | struct port *port = netdev_priv(dev); |
690 | |
691 | #if DEBUG_RX |
692 | printk(KERN_DEBUG "%s: eth_rx_irq\n" , dev->name); |
693 | #endif |
694 | qmgr_disable_irq(queue: port->plat->rxq); |
695 | napi_schedule(n: &port->napi); |
696 | } |
697 | |
698 | static int eth_poll(struct napi_struct *napi, int budget) |
699 | { |
700 | struct port *port = container_of(napi, struct port, napi); |
701 | struct net_device *dev = port->netdev; |
702 | unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); |
703 | int received = 0; |
704 | |
705 | #if DEBUG_RX |
706 | netdev_debug(dev, "eth_poll\n" ); |
707 | #endif |
708 | |
709 | while (received < budget) { |
710 | struct sk_buff *skb; |
711 | struct desc *desc; |
712 | int n; |
713 | #ifdef __ARMEB__ |
714 | struct sk_buff *temp; |
715 | u32 phys; |
716 | #endif |
717 | |
718 | if ((n = queue_get_desc(queue: rxq, port, is_tx: 0)) < 0) { |
719 | #if DEBUG_RX |
720 | netdev_debug(dev, "eth_poll napi_complete\n" ); |
721 | #endif |
722 | napi_complete(n: napi); |
723 | qmgr_enable_irq(queue: rxq); |
724 | if (!qmgr_stat_below_low_watermark(queue: rxq) && |
725 | napi_schedule(n: napi)) { /* not empty again */ |
726 | #if DEBUG_RX |
727 | netdev_debug(dev, "eth_poll napi_schedule succeeded\n" ); |
728 | #endif |
729 | qmgr_disable_irq(queue: rxq); |
730 | continue; |
731 | } |
732 | #if DEBUG_RX |
733 | netdev_debug(dev, "eth_poll all done\n" ); |
734 | #endif |
735 | return received; /* all work done */ |
736 | } |
737 | |
738 | desc = rx_desc_ptr(port, n); |
739 | |
740 | #ifdef __ARMEB__ |
741 | if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { |
742 | phys = dma_map_single(&dev->dev, skb->data, |
743 | RX_BUFF_SIZE, DMA_FROM_DEVICE); |
744 | if (dma_mapping_error(&dev->dev, phys)) { |
745 | dev_kfree_skb(skb); |
746 | skb = NULL; |
747 | } |
748 | } |
749 | #else |
750 | skb = netdev_alloc_skb(dev, |
751 | ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); |
752 | #endif |
753 | |
754 | if (!skb) { |
755 | dev->stats.rx_dropped++; |
756 | /* put the desc back on RX-ready queue */ |
757 | desc->buf_len = MAX_MRU; |
758 | desc->pkt_len = 0; |
759 | queue_put_desc(queue: rxfreeq, rx_desc_phys(port, n), desc); |
760 | continue; |
761 | } |
762 | |
763 | /* process received frame */ |
764 | #ifdef __ARMEB__ |
765 | temp = skb; |
766 | skb = port->rx_buff_tab[n]; |
767 | dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, |
768 | RX_BUFF_SIZE, DMA_FROM_DEVICE); |
769 | #else |
770 | dma_sync_single_for_cpu(dev: &dev->dev, addr: desc->data - NET_IP_ALIGN, |
771 | RX_BUFF_SIZE, dir: DMA_FROM_DEVICE); |
772 | memcpy_swab32(dest: (u32 *)skb->data, src: (u32 *)port->rx_buff_tab[n], |
773 | ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); |
774 | #endif |
775 | skb_reserve(skb, NET_IP_ALIGN); |
776 | skb_put(skb, len: desc->pkt_len); |
777 | |
778 | debug_pkt(dev, func: "eth_poll" , data: skb->data, len: skb->len); |
779 | |
780 | ixp_rx_timestamp(port, skb); |
781 | skb->protocol = eth_type_trans(skb, dev); |
782 | dev->stats.rx_packets++; |
783 | dev->stats.rx_bytes += skb->len; |
784 | netif_receive_skb(skb); |
785 | |
786 | /* put the new buffer on RX-free queue */ |
787 | #ifdef __ARMEB__ |
788 | port->rx_buff_tab[n] = temp; |
789 | desc->data = phys + NET_IP_ALIGN; |
790 | #endif |
791 | desc->buf_len = MAX_MRU; |
792 | desc->pkt_len = 0; |
793 | queue_put_desc(queue: rxfreeq, rx_desc_phys(port, n), desc); |
794 | received++; |
795 | } |
796 | |
797 | #if DEBUG_RX |
798 | netdev_debug(dev, "eth_poll(): end, not all work done\n" ); |
799 | #endif |
800 | return received; /* not all work done */ |
801 | } |
802 | |
803 | |
804 | static void eth_txdone_irq(void *unused) |
805 | { |
806 | u32 phys; |
807 | |
808 | #if DEBUG_TX |
809 | printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n" ); |
810 | #endif |
811 | while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) { |
812 | u32 npe_id, n_desc; |
813 | struct port *port; |
814 | struct desc *desc; |
815 | int start; |
816 | |
817 | npe_id = phys & 3; |
818 | BUG_ON(npe_id >= MAX_NPES); |
819 | port = npe_port_tab[npe_id]; |
820 | BUG_ON(!port); |
821 | phys &= ~0x1F; /* mask out non-address bits */ |
822 | n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); |
823 | BUG_ON(n_desc >= TX_DESCS); |
824 | desc = tx_desc_ptr(port, n_desc); |
825 | debug_desc(phys, desc); |
826 | |
827 | if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ |
828 | port->netdev->stats.tx_packets++; |
829 | port->netdev->stats.tx_bytes += desc->pkt_len; |
830 | |
831 | dma_unmap_tx(port, desc); |
832 | #if DEBUG_TX |
833 | printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n" , |
834 | port->netdev->name, port->tx_buff_tab[n_desc]); |
835 | #endif |
836 | free_buffer_irq(objp: port->tx_buff_tab[n_desc]); |
837 | port->tx_buff_tab[n_desc] = NULL; |
838 | } |
839 | |
840 | start = qmgr_stat_below_low_watermark(queue: port->plat->txreadyq); |
841 | queue_put_desc(queue: port->plat->txreadyq, phys, desc); |
842 | if (start) { /* TX-ready queue was empty */ |
843 | #if DEBUG_TX |
844 | printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n" , |
845 | port->netdev->name); |
846 | #endif |
847 | netif_wake_queue(dev: port->netdev); |
848 | } |
849 | } |
850 | } |
851 | |
852 | static netdev_tx_t eth_xmit(struct sk_buff *skb, struct net_device *dev) |
853 | { |
854 | struct port *port = netdev_priv(dev); |
855 | unsigned int txreadyq = port->plat->txreadyq; |
856 | int len, offset, bytes, n; |
857 | void *mem; |
858 | u32 phys; |
859 | struct desc *desc; |
860 | |
861 | #if DEBUG_TX |
862 | netdev_debug(dev, "eth_xmit\n" ); |
863 | #endif |
864 | |
865 | if (unlikely(skb->len > MAX_MRU)) { |
866 | dev_kfree_skb(skb); |
867 | dev->stats.tx_errors++; |
868 | return NETDEV_TX_OK; |
869 | } |
870 | |
871 | debug_pkt(dev, func: "eth_xmit" , data: skb->data, len: skb->len); |
872 | |
873 | len = skb->len; |
874 | #ifdef __ARMEB__ |
875 | offset = 0; /* no need to keep alignment */ |
876 | bytes = len; |
877 | mem = skb->data; |
878 | #else |
879 | offset = (uintptr_t)skb->data & 3; /* keep 32-bit alignment */ |
880 | bytes = ALIGN(offset + len, 4); |
881 | if (!(mem = kmalloc(size: bytes, GFP_ATOMIC))) { |
882 | dev_kfree_skb(skb); |
883 | dev->stats.tx_dropped++; |
884 | return NETDEV_TX_OK; |
885 | } |
886 | memcpy_swab32(dest: mem, src: (u32 *)((uintptr_t)skb->data & ~3), cnt: bytes / 4); |
887 | #endif |
888 | |
889 | phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); |
890 | if (dma_mapping_error(dev: &dev->dev, dma_addr: phys)) { |
891 | dev_kfree_skb(skb); |
892 | #ifndef __ARMEB__ |
893 | kfree(objp: mem); |
894 | #endif |
895 | dev->stats.tx_dropped++; |
896 | return NETDEV_TX_OK; |
897 | } |
898 | |
899 | n = queue_get_desc(queue: txreadyq, port, is_tx: 1); |
900 | BUG_ON(n < 0); |
901 | desc = tx_desc_ptr(port, n); |
902 | |
903 | #ifdef __ARMEB__ |
904 | port->tx_buff_tab[n] = skb; |
905 | #else |
906 | port->tx_buff_tab[n] = mem; |
907 | #endif |
908 | desc->data = phys + offset; |
909 | desc->buf_len = desc->pkt_len = len; |
910 | |
911 | /* NPE firmware pads short frames with zeros internally */ |
912 | wmb(); |
913 | queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); |
914 | |
915 | if (qmgr_stat_below_low_watermark(queue: txreadyq)) { /* empty */ |
916 | #if DEBUG_TX |
917 | netdev_debug(dev, "eth_xmit queue full\n" ); |
918 | #endif |
919 | netif_stop_queue(dev); |
920 | /* we could miss TX ready interrupt */ |
921 | /* really empty in fact */ |
922 | if (!qmgr_stat_below_low_watermark(queue: txreadyq)) { |
923 | #if DEBUG_TX |
924 | netdev_debug(dev, "eth_xmit ready again\n" ); |
925 | #endif |
926 | netif_wake_queue(dev); |
927 | } |
928 | } |
929 | |
930 | #if DEBUG_TX |
931 | netdev_debug(dev, "eth_xmit end\n" ); |
932 | #endif |
933 | |
934 | ixp_tx_timestamp(port, skb); |
935 | skb_tx_timestamp(skb); |
936 | |
937 | #ifndef __ARMEB__ |
938 | dev_kfree_skb(skb); |
939 | #endif |
940 | return NETDEV_TX_OK; |
941 | } |
942 | |
943 | |
944 | static void eth_set_mcast_list(struct net_device *dev) |
945 | { |
946 | struct port *port = netdev_priv(dev); |
947 | struct netdev_hw_addr *ha; |
948 | u8 diffs[ETH_ALEN], *addr; |
949 | int i; |
950 | static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; |
951 | |
952 | if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { |
953 | for (i = 0; i < ETH_ALEN; i++) { |
954 | __raw_writel(val: allmulti[i], addr: &port->regs->mcast_addr[i]); |
955 | __raw_writel(val: allmulti[i], addr: &port->regs->mcast_mask[i]); |
956 | } |
957 | __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, |
958 | addr: &port->regs->rx_control[0]); |
959 | return; |
960 | } |
961 | |
962 | if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) { |
963 | __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, |
964 | addr: &port->regs->rx_control[0]); |
965 | return; |
966 | } |
967 | |
968 | eth_zero_addr(addr: diffs); |
969 | |
970 | addr = NULL; |
971 | netdev_for_each_mc_addr(ha, dev) { |
972 | if (!addr) |
973 | addr = ha->addr; /* first MAC address */ |
974 | for (i = 0; i < ETH_ALEN; i++) |
975 | diffs[i] |= addr[i] ^ ha->addr[i]; |
976 | } |
977 | |
978 | for (i = 0; i < ETH_ALEN; i++) { |
979 | __raw_writel(val: addr[i], addr: &port->regs->mcast_addr[i]); |
980 | __raw_writel(val: ~diffs[i], addr: &port->regs->mcast_mask[i]); |
981 | } |
982 | |
983 | __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, |
984 | addr: &port->regs->rx_control[0]); |
985 | } |
986 | |
987 | |
988 | static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) |
989 | { |
990 | if (!netif_running(dev)) |
991 | return -EINVAL; |
992 | |
993 | if (cpu_is_ixp46x()) { |
994 | if (cmd == SIOCSHWTSTAMP) |
995 | return hwtstamp_set(netdev: dev, ifr: req); |
996 | if (cmd == SIOCGHWTSTAMP) |
997 | return hwtstamp_get(netdev: dev, ifr: req); |
998 | } |
999 | |
1000 | return phy_mii_ioctl(phydev: dev->phydev, ifr: req, cmd); |
1001 | } |
1002 | |
1003 | /* ethtool support */ |
1004 | |
1005 | static void ixp4xx_get_drvinfo(struct net_device *dev, |
1006 | struct ethtool_drvinfo *info) |
1007 | { |
1008 | struct port *port = netdev_priv(dev); |
1009 | |
1010 | strscpy(info->driver, DRV_NAME, sizeof(info->driver)); |
1011 | snprintf(buf: info->fw_version, size: sizeof(info->fw_version), fmt: "%u:%u:%u:%u" , |
1012 | port->firmware[0], port->firmware[1], |
1013 | port->firmware[2], port->firmware[3]); |
1014 | strscpy(info->bus_info, "internal" , sizeof(info->bus_info)); |
1015 | } |
1016 | |
1017 | static int ixp4xx_get_ts_info(struct net_device *dev, |
1018 | struct ethtool_ts_info *info) |
1019 | { |
1020 | struct port *port = netdev_priv(dev); |
1021 | |
1022 | if (port->phc_index < 0) |
1023 | ixp46x_ptp_find(regs: &port->timesync_regs, phc_index: &port->phc_index); |
1024 | |
1025 | info->phc_index = port->phc_index; |
1026 | |
1027 | if (info->phc_index < 0) { |
1028 | info->so_timestamping = |
1029 | SOF_TIMESTAMPING_TX_SOFTWARE | |
1030 | SOF_TIMESTAMPING_RX_SOFTWARE | |
1031 | SOF_TIMESTAMPING_SOFTWARE; |
1032 | return 0; |
1033 | } |
1034 | info->so_timestamping = |
1035 | SOF_TIMESTAMPING_TX_HARDWARE | |
1036 | SOF_TIMESTAMPING_RX_HARDWARE | |
1037 | SOF_TIMESTAMPING_RAW_HARDWARE; |
1038 | info->tx_types = |
1039 | (1 << HWTSTAMP_TX_OFF) | |
1040 | (1 << HWTSTAMP_TX_ON); |
1041 | info->rx_filters = |
1042 | (1 << HWTSTAMP_FILTER_NONE) | |
1043 | (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | |
1044 | (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ); |
1045 | return 0; |
1046 | } |
1047 | |
1048 | static const struct ethtool_ops ixp4xx_ethtool_ops = { |
1049 | .get_drvinfo = ixp4xx_get_drvinfo, |
1050 | .nway_reset = phy_ethtool_nway_reset, |
1051 | .get_link = ethtool_op_get_link, |
1052 | .get_ts_info = ixp4xx_get_ts_info, |
1053 | .get_link_ksettings = phy_ethtool_get_link_ksettings, |
1054 | .set_link_ksettings = phy_ethtool_set_link_ksettings, |
1055 | }; |
1056 | |
1057 | |
1058 | static int request_queues(struct port *port) |
1059 | { |
1060 | int err; |
1061 | |
1062 | err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0, |
1063 | "%s:RX-free" , port->netdev->name); |
1064 | if (err) |
1065 | return err; |
1066 | |
1067 | err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, |
1068 | "%s:RX" , port->netdev->name); |
1069 | if (err) |
1070 | goto rel_rxfree; |
1071 | |
1072 | err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0, |
1073 | "%s:TX" , port->netdev->name); |
1074 | if (err) |
1075 | goto rel_rx; |
1076 | |
1077 | err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0, |
1078 | "%s:TX-ready" , port->netdev->name); |
1079 | if (err) |
1080 | goto rel_tx; |
1081 | |
1082 | /* TX-done queue handles skbs sent out by the NPEs */ |
1083 | if (!ports_open) { |
1084 | err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0, |
1085 | "%s:TX-done" , DRV_NAME); |
1086 | if (err) |
1087 | goto rel_txready; |
1088 | } |
1089 | return 0; |
1090 | |
1091 | rel_txready: |
1092 | qmgr_release_queue(queue: port->plat->txreadyq); |
1093 | rel_tx: |
1094 | qmgr_release_queue(TX_QUEUE(port->id)); |
1095 | rel_rx: |
1096 | qmgr_release_queue(queue: port->plat->rxq); |
1097 | rel_rxfree: |
1098 | qmgr_release_queue(RXFREE_QUEUE(port->id)); |
1099 | printk(KERN_DEBUG "%s: unable to request hardware queues\n" , |
1100 | port->netdev->name); |
1101 | return err; |
1102 | } |
1103 | |
1104 | static void release_queues(struct port *port) |
1105 | { |
1106 | qmgr_release_queue(RXFREE_QUEUE(port->id)); |
1107 | qmgr_release_queue(queue: port->plat->rxq); |
1108 | qmgr_release_queue(TX_QUEUE(port->id)); |
1109 | qmgr_release_queue(queue: port->plat->txreadyq); |
1110 | |
1111 | if (!ports_open) |
1112 | qmgr_release_queue(TXDONE_QUEUE); |
1113 | } |
1114 | |
1115 | static int init_queues(struct port *port) |
1116 | { |
1117 | int i; |
1118 | |
1119 | if (!ports_open) { |
1120 | dma_pool = dma_pool_create(DRV_NAME, dev: &port->netdev->dev, |
1121 | POOL_ALLOC_SIZE, align: 32, allocation: 0); |
1122 | if (!dma_pool) |
1123 | return -ENOMEM; |
1124 | } |
1125 | |
1126 | port->desc_tab = dma_pool_zalloc(pool: dma_pool, GFP_KERNEL, handle: &port->desc_tab_phys); |
1127 | if (!port->desc_tab) |
1128 | return -ENOMEM; |
1129 | memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ |
1130 | memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); |
1131 | |
1132 | /* Setup RX buffers */ |
1133 | for (i = 0; i < RX_DESCS; i++) { |
1134 | struct desc *desc = rx_desc_ptr(port, i); |
1135 | buffer_t *buff; /* skb or kmalloc()ated memory */ |
1136 | void *data; |
1137 | #ifdef __ARMEB__ |
1138 | if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) |
1139 | return -ENOMEM; |
1140 | data = buff->data; |
1141 | #else |
1142 | if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) |
1143 | return -ENOMEM; |
1144 | data = buff; |
1145 | #endif |
1146 | desc->buf_len = MAX_MRU; |
1147 | desc->data = dma_map_single(&port->netdev->dev, data, |
1148 | RX_BUFF_SIZE, DMA_FROM_DEVICE); |
1149 | if (dma_mapping_error(dev: &port->netdev->dev, dma_addr: desc->data)) { |
1150 | free_buffer(objp: buff); |
1151 | return -EIO; |
1152 | } |
1153 | desc->data += NET_IP_ALIGN; |
1154 | port->rx_buff_tab[i] = buff; |
1155 | } |
1156 | |
1157 | return 0; |
1158 | } |
1159 | |
1160 | static void destroy_queues(struct port *port) |
1161 | { |
1162 | int i; |
1163 | |
1164 | if (port->desc_tab) { |
1165 | for (i = 0; i < RX_DESCS; i++) { |
1166 | struct desc *desc = rx_desc_ptr(port, i); |
1167 | buffer_t *buff = port->rx_buff_tab[i]; |
1168 | if (buff) { |
1169 | dma_unmap_single(&port->netdev->dev, |
1170 | desc->data - NET_IP_ALIGN, |
1171 | RX_BUFF_SIZE, DMA_FROM_DEVICE); |
1172 | free_buffer(objp: buff); |
1173 | } |
1174 | } |
1175 | for (i = 0; i < TX_DESCS; i++) { |
1176 | struct desc *desc = tx_desc_ptr(port, i); |
1177 | buffer_t *buff = port->tx_buff_tab[i]; |
1178 | if (buff) { |
1179 | dma_unmap_tx(port, desc); |
1180 | free_buffer(objp: buff); |
1181 | } |
1182 | } |
1183 | dma_pool_free(pool: dma_pool, vaddr: port->desc_tab, addr: port->desc_tab_phys); |
1184 | port->desc_tab = NULL; |
1185 | } |
1186 | |
1187 | if (!ports_open && dma_pool) { |
1188 | dma_pool_destroy(pool: dma_pool); |
1189 | dma_pool = NULL; |
1190 | } |
1191 | } |
1192 | |
1193 | static int ixp4xx_do_change_mtu(struct net_device *dev, int new_mtu) |
1194 | { |
1195 | struct port *port = netdev_priv(dev); |
1196 | struct npe *npe = port->npe; |
1197 | int framesize, chunks; |
1198 | struct msg msg = {}; |
1199 | |
1200 | /* adjust for ethernet headers */ |
1201 | framesize = new_mtu + VLAN_ETH_HLEN; |
1202 | /* max rx/tx 64 byte chunks */ |
1203 | chunks = DIV_ROUND_UP(framesize, 64); |
1204 | |
1205 | msg.cmd = NPE_SETMAXFRAMELENGTHS; |
1206 | msg.eth_id = port->id; |
1207 | |
1208 | /* Firmware wants to know buffer size in 64 byte chunks */ |
1209 | msg.byte2 = chunks << 8; |
1210 | msg.byte3 = chunks << 8; |
1211 | |
1212 | msg.byte4 = msg.byte6 = framesize >> 8; |
1213 | msg.byte5 = msg.byte7 = framesize & 0xff; |
1214 | |
1215 | if (npe_send_recv_message(npe, msg: &msg, what: "ETH_SET_MAX_FRAME_LENGTH" )) |
1216 | return -EIO; |
1217 | netdev_dbg(dev, "set MTU on NPE %s to %d bytes\n" , |
1218 | npe_name(npe), new_mtu); |
1219 | |
1220 | return 0; |
1221 | } |
1222 | |
1223 | static int ixp4xx_eth_change_mtu(struct net_device *dev, int new_mtu) |
1224 | { |
1225 | int ret; |
1226 | |
1227 | /* MTU can only be changed when the interface is up. We also |
1228 | * set the MTU from dev->mtu when opening the device. |
1229 | */ |
1230 | if (dev->flags & IFF_UP) { |
1231 | ret = ixp4xx_do_change_mtu(dev, new_mtu); |
1232 | if (ret < 0) |
1233 | return ret; |
1234 | } |
1235 | |
1236 | dev->mtu = new_mtu; |
1237 | |
1238 | return 0; |
1239 | } |
1240 | |
1241 | static int eth_open(struct net_device *dev) |
1242 | { |
1243 | struct port *port = netdev_priv(dev); |
1244 | struct npe *npe = port->npe; |
1245 | struct msg msg; |
1246 | int i, err; |
1247 | |
1248 | if (!npe_running(npe)) { |
1249 | err = npe_load_firmware(npe, name: npe_name(npe), dev: &dev->dev); |
1250 | if (err) |
1251 | return err; |
1252 | |
1253 | if (npe_recv_message(npe, msg: &msg, what: "ETH_GET_STATUS" )) { |
1254 | netdev_err(dev, format: "%s not responding\n" , npe_name(npe)); |
1255 | return -EIO; |
1256 | } |
1257 | port->firmware[0] = msg.byte4; |
1258 | port->firmware[1] = msg.byte5; |
1259 | port->firmware[2] = msg.byte6; |
1260 | port->firmware[3] = msg.byte7; |
1261 | } |
1262 | |
1263 | memset(&msg, 0, sizeof(msg)); |
1264 | msg.cmd = NPE_VLAN_SETRXQOSENTRY; |
1265 | msg.eth_id = port->id; |
1266 | msg.byte5 = port->plat->rxq | 0x80; |
1267 | msg.byte7 = port->plat->rxq << 4; |
1268 | for (i = 0; i < 8; i++) { |
1269 | msg.byte3 = i; |
1270 | if (npe_send_recv_message(npe: port->npe, msg: &msg, what: "ETH_SET_RXQ" )) |
1271 | return -EIO; |
1272 | } |
1273 | |
1274 | msg.cmd = NPE_EDB_SETPORTADDRESS; |
1275 | msg.eth_id = PHYSICAL_ID(port->id); |
1276 | msg.byte2 = dev->dev_addr[0]; |
1277 | msg.byte3 = dev->dev_addr[1]; |
1278 | msg.byte4 = dev->dev_addr[2]; |
1279 | msg.byte5 = dev->dev_addr[3]; |
1280 | msg.byte6 = dev->dev_addr[4]; |
1281 | msg.byte7 = dev->dev_addr[5]; |
1282 | if (npe_send_recv_message(npe: port->npe, msg: &msg, what: "ETH_SET_MAC" )) |
1283 | return -EIO; |
1284 | |
1285 | memset(&msg, 0, sizeof(msg)); |
1286 | msg.cmd = NPE_FW_SETFIREWALLMODE; |
1287 | msg.eth_id = port->id; |
1288 | if (npe_send_recv_message(npe: port->npe, msg: &msg, what: "ETH_SET_FIREWALL_MODE" )) |
1289 | return -EIO; |
1290 | |
1291 | ixp4xx_do_change_mtu(dev, new_mtu: dev->mtu); |
1292 | |
1293 | if ((err = request_queues(port)) != 0) |
1294 | return err; |
1295 | |
1296 | if ((err = init_queues(port)) != 0) { |
1297 | destroy_queues(port); |
1298 | release_queues(port); |
1299 | return err; |
1300 | } |
1301 | |
1302 | port->speed = 0; /* force "link up" message */ |
1303 | phy_start(phydev: dev->phydev); |
1304 | |
1305 | for (i = 0; i < ETH_ALEN; i++) |
1306 | __raw_writel(val: dev->dev_addr[i], addr: &port->regs->hw_addr[i]); |
1307 | __raw_writel(val: 0x08, addr: &port->regs->random_seed); |
1308 | __raw_writel(val: 0x12, addr: &port->regs->partial_empty_threshold); |
1309 | __raw_writel(val: 0x30, addr: &port->regs->partial_full_threshold); |
1310 | __raw_writel(val: 0x08, addr: &port->regs->tx_start_bytes); |
1311 | __raw_writel(val: 0x15, addr: &port->regs->tx_deferral); |
1312 | __raw_writel(val: 0x08, addr: &port->regs->tx_2part_deferral[0]); |
1313 | __raw_writel(val: 0x07, addr: &port->regs->tx_2part_deferral[1]); |
1314 | __raw_writel(val: 0x80, addr: &port->regs->slot_time); |
1315 | __raw_writel(val: 0x01, addr: &port->regs->int_clock_threshold); |
1316 | |
1317 | /* Populate queues with buffers, no failure after this point */ |
1318 | for (i = 0; i < TX_DESCS; i++) |
1319 | queue_put_desc(queue: port->plat->txreadyq, |
1320 | tx_desc_phys(port, i), tx_desc_ptr(port, i)); |
1321 | |
1322 | for (i = 0; i < RX_DESCS; i++) |
1323 | queue_put_desc(RXFREE_QUEUE(port->id), |
1324 | rx_desc_phys(port, i), rx_desc_ptr(port, i)); |
1325 | |
1326 | __raw_writel(TX_CNTRL1_RETRIES, addr: &port->regs->tx_control[1]); |
1327 | __raw_writel(DEFAULT_TX_CNTRL0, addr: &port->regs->tx_control[0]); |
1328 | __raw_writel(val: 0, addr: &port->regs->rx_control[1]); |
1329 | __raw_writel(DEFAULT_RX_CNTRL0, addr: &port->regs->rx_control[0]); |
1330 | |
1331 | napi_enable(n: &port->napi); |
1332 | eth_set_mcast_list(dev); |
1333 | netif_start_queue(dev); |
1334 | |
1335 | qmgr_set_irq(queue: port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, |
1336 | handler: eth_rx_irq, pdev: dev); |
1337 | if (!ports_open) { |
1338 | qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, |
1339 | handler: eth_txdone_irq, NULL); |
1340 | qmgr_enable_irq(TXDONE_QUEUE); |
1341 | } |
1342 | ports_open++; |
1343 | /* we may already have RX data, enables IRQ */ |
1344 | napi_schedule(n: &port->napi); |
1345 | return 0; |
1346 | } |
1347 | |
1348 | static int eth_close(struct net_device *dev) |
1349 | { |
1350 | struct port *port = netdev_priv(dev); |
1351 | struct msg msg; |
1352 | int buffs = RX_DESCS; /* allocated RX buffers */ |
1353 | int i; |
1354 | |
1355 | ports_open--; |
1356 | qmgr_disable_irq(queue: port->plat->rxq); |
1357 | napi_disable(n: &port->napi); |
1358 | netif_stop_queue(dev); |
1359 | |
1360 | while (queue_get_desc(RXFREE_QUEUE(port->id), port, is_tx: 0) >= 0) |
1361 | buffs--; |
1362 | |
1363 | memset(&msg, 0, sizeof(msg)); |
1364 | msg.cmd = NPE_SETLOOPBACK_MODE; |
1365 | msg.eth_id = port->id; |
1366 | msg.byte3 = 1; |
1367 | if (npe_send_recv_message(npe: port->npe, msg: &msg, what: "ETH_ENABLE_LOOPBACK" )) |
1368 | netdev_crit(dev, format: "unable to enable loopback\n" ); |
1369 | |
1370 | i = 0; |
1371 | do { /* drain RX buffers */ |
1372 | while (queue_get_desc(queue: port->plat->rxq, port, is_tx: 0) >= 0) |
1373 | buffs--; |
1374 | if (!buffs) |
1375 | break; |
1376 | if (qmgr_stat_empty(TX_QUEUE(port->id))) { |
1377 | /* we have to inject some packet */ |
1378 | struct desc *desc; |
1379 | u32 phys; |
1380 | int n = queue_get_desc(queue: port->plat->txreadyq, port, is_tx: 1); |
1381 | BUG_ON(n < 0); |
1382 | desc = tx_desc_ptr(port, n); |
1383 | phys = tx_desc_phys(port, n); |
1384 | desc->buf_len = desc->pkt_len = 1; |
1385 | wmb(); |
1386 | queue_put_desc(TX_QUEUE(port->id), phys, desc); |
1387 | } |
1388 | udelay(1); |
1389 | } while (++i < MAX_CLOSE_WAIT); |
1390 | |
1391 | if (buffs) |
1392 | netdev_crit(dev, format: "unable to drain RX queue, %i buffer(s)" |
1393 | " left in NPE\n" , buffs); |
1394 | #if DEBUG_CLOSE |
1395 | if (!buffs) |
1396 | netdev_debug(dev, "draining RX queue took %i cycles\n" , i); |
1397 | #endif |
1398 | |
1399 | buffs = TX_DESCS; |
1400 | while (queue_get_desc(TX_QUEUE(port->id), port, is_tx: 1) >= 0) |
1401 | buffs--; /* cancel TX */ |
1402 | |
1403 | i = 0; |
1404 | do { |
1405 | while (queue_get_desc(queue: port->plat->txreadyq, port, is_tx: 1) >= 0) |
1406 | buffs--; |
1407 | if (!buffs) |
1408 | break; |
1409 | } while (++i < MAX_CLOSE_WAIT); |
1410 | |
1411 | if (buffs) |
1412 | netdev_crit(dev, format: "unable to drain TX queue, %i buffer(s) " |
1413 | "left in NPE\n" , buffs); |
1414 | #if DEBUG_CLOSE |
1415 | if (!buffs) |
1416 | netdev_debug(dev, "draining TX queues took %i cycles\n" , i); |
1417 | #endif |
1418 | |
1419 | msg.byte3 = 0; |
1420 | if (npe_send_recv_message(npe: port->npe, msg: &msg, what: "ETH_DISABLE_LOOPBACK" )) |
1421 | netdev_crit(dev, format: "unable to disable loopback\n" ); |
1422 | |
1423 | phy_stop(phydev: dev->phydev); |
1424 | |
1425 | if (!ports_open) |
1426 | qmgr_disable_irq(TXDONE_QUEUE); |
1427 | destroy_queues(port); |
1428 | release_queues(port); |
1429 | return 0; |
1430 | } |
1431 | |
1432 | static const struct net_device_ops ixp4xx_netdev_ops = { |
1433 | .ndo_open = eth_open, |
1434 | .ndo_stop = eth_close, |
1435 | .ndo_change_mtu = ixp4xx_eth_change_mtu, |
1436 | .ndo_start_xmit = eth_xmit, |
1437 | .ndo_set_rx_mode = eth_set_mcast_list, |
1438 | .ndo_eth_ioctl = eth_ioctl, |
1439 | .ndo_set_mac_address = eth_mac_addr, |
1440 | .ndo_validate_addr = eth_validate_addr, |
1441 | }; |
1442 | |
1443 | static struct eth_plat_info *ixp4xx_of_get_platdata(struct device *dev) |
1444 | { |
1445 | struct device_node *np = dev->of_node; |
1446 | struct of_phandle_args queue_spec; |
1447 | struct of_phandle_args npe_spec; |
1448 | struct device_node *mdio_np; |
1449 | struct eth_plat_info *plat; |
1450 | u8 mac[ETH_ALEN]; |
1451 | int ret; |
1452 | |
1453 | plat = devm_kzalloc(dev, size: sizeof(*plat), GFP_KERNEL); |
1454 | if (!plat) |
1455 | return NULL; |
1456 | |
1457 | ret = of_parse_phandle_with_fixed_args(np, list_name: "intel,npe-handle" , cell_count: 1, index: 0, |
1458 | out_args: &npe_spec); |
1459 | if (ret) { |
1460 | dev_err(dev, "no NPE engine specified\n" ); |
1461 | return NULL; |
1462 | } |
1463 | /* NPE ID 0x00, 0x10, 0x20... */ |
1464 | plat->npe = (npe_spec.args[0] << 4); |
1465 | |
1466 | /* Check if this device has an MDIO bus */ |
1467 | mdio_np = of_get_child_by_name(node: np, name: "mdio" ); |
1468 | if (mdio_np) { |
1469 | plat->has_mdio = true; |
1470 | mdio_bus_np = mdio_np; |
1471 | /* DO NOT put the mdio_np, it will be used */ |
1472 | } |
1473 | |
1474 | /* Get the rx queue as a resource from queue manager */ |
1475 | ret = of_parse_phandle_with_fixed_args(np, list_name: "queue-rx" , cell_count: 1, index: 0, |
1476 | out_args: &queue_spec); |
1477 | if (ret) { |
1478 | dev_err(dev, "no rx queue phandle\n" ); |
1479 | return NULL; |
1480 | } |
1481 | plat->rxq = queue_spec.args[0]; |
1482 | |
1483 | /* Get the txready queue as resource from queue manager */ |
1484 | ret = of_parse_phandle_with_fixed_args(np, list_name: "queue-txready" , cell_count: 1, index: 0, |
1485 | out_args: &queue_spec); |
1486 | if (ret) { |
1487 | dev_err(dev, "no txready queue phandle\n" ); |
1488 | return NULL; |
1489 | } |
1490 | plat->txreadyq = queue_spec.args[0]; |
1491 | |
1492 | ret = of_get_mac_address(np, mac); |
1493 | if (!ret) { |
1494 | dev_info(dev, "Setting macaddr from DT %pM\n" , mac); |
1495 | memcpy(plat->hwaddr, mac, ETH_ALEN); |
1496 | } |
1497 | |
1498 | return plat; |
1499 | } |
1500 | |
1501 | static int ixp4xx_eth_probe(struct platform_device *pdev) |
1502 | { |
1503 | struct phy_device *phydev = NULL; |
1504 | struct device *dev = &pdev->dev; |
1505 | struct device_node *np = dev->of_node; |
1506 | struct eth_plat_info *plat; |
1507 | struct net_device *ndev; |
1508 | struct port *port; |
1509 | int err; |
1510 | |
1511 | plat = ixp4xx_of_get_platdata(dev); |
1512 | if (!plat) |
1513 | return -ENODEV; |
1514 | |
1515 | if (!(ndev = devm_alloc_etherdev(dev, sizeof(struct port)))) |
1516 | return -ENOMEM; |
1517 | |
1518 | SET_NETDEV_DEV(ndev, dev); |
1519 | port = netdev_priv(dev: ndev); |
1520 | port->netdev = ndev; |
1521 | port->id = plat->npe; |
1522 | port->phc_index = -1; |
1523 | |
1524 | /* Get the port resource and remap */ |
1525 | port->regs = devm_platform_get_and_ioremap_resource(pdev, index: 0, NULL); |
1526 | if (IS_ERR(ptr: port->regs)) |
1527 | return PTR_ERR(ptr: port->regs); |
1528 | |
1529 | /* Register the MDIO bus if we have it */ |
1530 | if (plat->has_mdio) { |
1531 | err = ixp4xx_mdio_register(regs: port->regs); |
1532 | if (err) { |
1533 | dev_err(dev, "failed to register MDIO bus\n" ); |
1534 | return err; |
1535 | } |
1536 | } |
1537 | /* If the instance with the MDIO bus has not yet appeared, |
1538 | * defer probing until it gets probed. |
1539 | */ |
1540 | if (!mdio_bus) |
1541 | return -EPROBE_DEFER; |
1542 | |
1543 | ndev->netdev_ops = &ixp4xx_netdev_ops; |
1544 | ndev->ethtool_ops = &ixp4xx_ethtool_ops; |
1545 | ndev->tx_queue_len = 100; |
1546 | /* Inherit the DMA masks from the platform device */ |
1547 | ndev->dev.dma_mask = dev->dma_mask; |
1548 | ndev->dev.coherent_dma_mask = dev->coherent_dma_mask; |
1549 | |
1550 | ndev->min_mtu = ETH_MIN_MTU; |
1551 | ndev->max_mtu = MAX_MRU; |
1552 | |
1553 | netif_napi_add_weight(dev: ndev, napi: &port->napi, poll: eth_poll, NAPI_WEIGHT); |
1554 | |
1555 | if (!(port->npe = npe_request(NPE_ID(port->id)))) |
1556 | return -EIO; |
1557 | |
1558 | port->plat = plat; |
1559 | npe_port_tab[NPE_ID(port->id)] = port; |
1560 | if (is_valid_ether_addr(addr: plat->hwaddr)) |
1561 | eth_hw_addr_set(dev: ndev, addr: plat->hwaddr); |
1562 | else |
1563 | eth_hw_addr_random(dev: ndev); |
1564 | |
1565 | platform_set_drvdata(pdev, data: ndev); |
1566 | |
1567 | __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, |
1568 | addr: &port->regs->core_control); |
1569 | udelay(50); |
1570 | __raw_writel(DEFAULT_CORE_CNTRL, addr: &port->regs->core_control); |
1571 | udelay(50); |
1572 | |
1573 | phydev = of_phy_get_and_connect(dev: ndev, np, hndlr: ixp4xx_adjust_link); |
1574 | if (!phydev) { |
1575 | err = -ENODEV; |
1576 | dev_err(dev, "no phydev\n" ); |
1577 | goto err_free_mem; |
1578 | } |
1579 | |
1580 | phydev->irq = PHY_POLL; |
1581 | |
1582 | if ((err = register_netdev(dev: ndev))) |
1583 | goto err_phy_dis; |
1584 | |
1585 | netdev_info(dev: ndev, format: "%s: MII PHY %s on %s\n" , ndev->name, phydev_name(phydev), |
1586 | npe_name(npe: port->npe)); |
1587 | |
1588 | return 0; |
1589 | |
1590 | err_phy_dis: |
1591 | phy_disconnect(phydev); |
1592 | err_free_mem: |
1593 | npe_port_tab[NPE_ID(port->id)] = NULL; |
1594 | npe_release(npe: port->npe); |
1595 | return err; |
1596 | } |
1597 | |
1598 | static void ixp4xx_eth_remove(struct platform_device *pdev) |
1599 | { |
1600 | struct net_device *ndev = platform_get_drvdata(pdev); |
1601 | struct phy_device *phydev = ndev->phydev; |
1602 | struct port *port = netdev_priv(dev: ndev); |
1603 | |
1604 | unregister_netdev(dev: ndev); |
1605 | phy_disconnect(phydev); |
1606 | ixp4xx_mdio_remove(); |
1607 | npe_port_tab[NPE_ID(port->id)] = NULL; |
1608 | npe_release(npe: port->npe); |
1609 | } |
1610 | |
1611 | static const struct of_device_id ixp4xx_eth_of_match[] = { |
1612 | { |
1613 | .compatible = "intel,ixp4xx-ethernet" , |
1614 | }, |
1615 | { }, |
1616 | }; |
1617 | |
1618 | static struct platform_driver ixp4xx_eth_driver = { |
1619 | .driver = { |
1620 | .name = DRV_NAME, |
1621 | .of_match_table = of_match_ptr(ixp4xx_eth_of_match), |
1622 | }, |
1623 | .probe = ixp4xx_eth_probe, |
1624 | .remove_new = ixp4xx_eth_remove, |
1625 | }; |
1626 | module_platform_driver(ixp4xx_eth_driver); |
1627 | |
1628 | MODULE_AUTHOR("Krzysztof Halasa" ); |
1629 | MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver" ); |
1630 | MODULE_LICENSE("GPL v2" ); |
1631 | MODULE_ALIAS("platform:ixp4xx_eth" ); |
1632 | |