1 | /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */ |
2 | /* |
3 | Written 1999-2000 by Donald Becker. |
4 | |
5 | This software may be used and distributed according to the terms of |
6 | the GNU General Public License (GPL), incorporated herein by reference. |
7 | Drivers based on or derived from this code fall under the GPL and must |
8 | retain the authorship, copyright and license notice. This file is not |
9 | a complete program and may only be used when the entire operating |
10 | system is licensed under the GPL. |
11 | |
12 | The author may be reached as becker@scyld.com, or C/O |
13 | Scyld Computing Corporation |
14 | 410 Severn Ave., Suite 210 |
15 | Annapolis MD 21403 |
16 | |
17 | Support and updates available at |
18 | http://www.scyld.com/network/sundance.html |
19 | [link no longer provides useful info -jgarzik] |
20 | Archives of the mailing list are still available at |
21 | https://www.beowulf.org/pipermail/netdrivers/ |
22 | |
23 | */ |
24 | |
25 | #define DRV_NAME "sundance" |
26 | |
27 | /* The user-configurable values. |
28 | These may be modified when a driver module is loaded.*/ |
29 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ |
30 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). |
31 | Typical is a 64 element hash table based on the Ethernet CRC. */ |
32 | static const int multicast_filter_limit = 32; |
33 | |
34 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. |
35 | Setting to > 1518 effectively disables this feature. |
36 | This chip can receive into offset buffers, so the Alpha does not |
37 | need a copy-align. */ |
38 | static int rx_copybreak; |
39 | static int flowctrl=1; |
40 | |
41 | /* media[] specifies the media type the NIC operates at. |
42 | autosense Autosensing active media. |
43 | 10mbps_hd 10Mbps half duplex. |
44 | 10mbps_fd 10Mbps full duplex. |
45 | 100mbps_hd 100Mbps half duplex. |
46 | 100mbps_fd 100Mbps full duplex. |
47 | 0 Autosensing active media. |
48 | 1 10Mbps half duplex. |
49 | 2 10Mbps full duplex. |
50 | 3 100Mbps half duplex. |
51 | 4 100Mbps full duplex. |
52 | */ |
53 | #define MAX_UNITS 8 |
54 | static char *media[MAX_UNITS]; |
55 | |
56 | |
57 | /* Operational parameters that are set at compile time. */ |
58 | |
59 | /* Keep the ring sizes a power of two for compile efficiency. |
60 | The compiler will convert <unsigned>'%'<2^N> into a bit mask. |
61 | Making the Tx ring too large decreases the effectiveness of channel |
62 | bonding and packet priority, and more than 128 requires modifying the |
63 | Tx error recovery. |
64 | Large receive rings merely waste memory. */ |
65 | #define TX_RING_SIZE 32 |
66 | #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */ |
67 | #define RX_RING_SIZE 64 |
68 | #define RX_BUDGET 32 |
69 | #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) |
70 | #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) |
71 | |
72 | /* Operational parameters that usually are not changed. */ |
73 | /* Time in jiffies before concluding the transmitter is hung. */ |
74 | #define TX_TIMEOUT (4*HZ) |
75 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ |
76 | |
77 | /* Include files, designed to support most kernel versions 2.0.0 and later. */ |
78 | #include <linux/module.h> |
79 | #include <linux/kernel.h> |
80 | #include <linux/string.h> |
81 | #include <linux/timer.h> |
82 | #include <linux/errno.h> |
83 | #include <linux/ioport.h> |
84 | #include <linux/interrupt.h> |
85 | #include <linux/pci.h> |
86 | #include <linux/netdevice.h> |
87 | #include <linux/etherdevice.h> |
88 | #include <linux/skbuff.h> |
89 | #include <linux/init.h> |
90 | #include <linux/bitops.h> |
91 | #include <linux/uaccess.h> |
92 | #include <asm/processor.h> /* Processor type for cache alignment. */ |
93 | #include <asm/io.h> |
94 | #include <linux/delay.h> |
95 | #include <linux/spinlock.h> |
96 | #include <linux/dma-mapping.h> |
97 | #include <linux/crc32.h> |
98 | #include <linux/ethtool.h> |
99 | #include <linux/mii.h> |
100 | |
101 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>" ); |
102 | MODULE_DESCRIPTION("Sundance Alta Ethernet driver" ); |
103 | MODULE_LICENSE("GPL" ); |
104 | |
105 | module_param(debug, int, 0); |
106 | module_param(rx_copybreak, int, 0); |
107 | module_param_array(media, charp, NULL, 0); |
108 | module_param(flowctrl, int, 0); |
109 | MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)" ); |
110 | MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames" ); |
111 | MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]" ); |
112 | |
113 | /* |
114 | Theory of Operation |
115 | |
116 | I. Board Compatibility |
117 | |
118 | This driver is designed for the Sundance Technologies "Alta" ST201 chip. |
119 | |
120 | II. Board-specific settings |
121 | |
122 | III. Driver operation |
123 | |
124 | IIIa. Ring buffers |
125 | |
126 | This driver uses two statically allocated fixed-size descriptor lists |
127 | formed into rings by a branch from the final descriptor to the beginning of |
128 | the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. |
129 | Some chips explicitly use only 2^N sized rings, while others use a |
130 | 'next descriptor' pointer that the driver forms into rings. |
131 | |
132 | IIIb/c. Transmit/Receive Structure |
133 | |
134 | This driver uses a zero-copy receive and transmit scheme. |
135 | The driver allocates full frame size skbuffs for the Rx ring buffers at |
136 | open() time and passes the skb->data field to the chip as receive data |
137 | buffers. When an incoming frame is less than RX_COPYBREAK bytes long, |
138 | a fresh skbuff is allocated and the frame is copied to the new skbuff. |
139 | When the incoming frame is larger, the skbuff is passed directly up the |
140 | protocol stack. Buffers consumed this way are replaced by newly allocated |
141 | skbuffs in a later phase of receives. |
142 | |
143 | The RX_COPYBREAK value is chosen to trade-off the memory wasted by |
144 | using a full-sized skbuff for small frames vs. the copying costs of larger |
145 | frames. New boards are typically used in generously configured machines |
146 | and the underfilled buffers have negligible impact compared to the benefit of |
147 | a single allocation size, so the default value of zero results in never |
148 | copying packets. When copying is done, the cost is usually mitigated by using |
149 | a combined copy/checksum routine. Copying also preloads the cache, which is |
150 | most useful with small frames. |
151 | |
152 | A subtle aspect of the operation is that the IP header at offset 14 in an |
153 | ethernet frame isn't longword aligned for further processing. |
154 | Unaligned buffers are permitted by the Sundance hardware, so |
155 | frames are received into the skbuff at an offset of "+2", 16-byte aligning |
156 | the IP header. |
157 | |
158 | IIId. Synchronization |
159 | |
160 | The driver runs as two independent, single-threaded flows of control. One |
161 | is the send-packet routine, which enforces single-threaded use by the |
162 | dev->tbusy flag. The other thread is the interrupt handler, which is single |
163 | threaded by the hardware and interrupt handling software. |
164 | |
165 | The send packet thread has partial control over the Tx ring and 'dev->tbusy' |
166 | flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next |
167 | queue slot is empty, it clears the tbusy flag when finished otherwise it sets |
168 | the 'lp->tx_full' flag. |
169 | |
170 | The interrupt handler has exclusive control over the Rx ring and records stats |
171 | from the Tx ring. After reaping the stats, it marks the Tx queue entry as |
172 | empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it |
173 | clears both the tx_full and tbusy flags. |
174 | |
175 | IV. Notes |
176 | |
177 | IVb. References |
178 | |
179 | The Sundance ST201 datasheet, preliminary version. |
180 | The Kendin KS8723 datasheet, preliminary version. |
181 | The ICplus IP100 datasheet, preliminary version. |
182 | http://www.scyld.com/expert/100mbps.html |
183 | http://www.scyld.com/expert/NWay.html |
184 | |
185 | IVc. Errata |
186 | |
187 | */ |
188 | |
189 | /* Work-around for Kendin chip bugs. */ |
190 | #ifndef CONFIG_SUNDANCE_MMIO |
191 | #define USE_IO_OPS 1 |
192 | #endif |
193 | |
194 | static const struct pci_device_id sundance_pci_tbl[] = { |
195 | { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, |
196 | { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, |
197 | { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, |
198 | { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 }, |
199 | { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, |
200 | { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, |
201 | { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, |
202 | { } |
203 | }; |
204 | MODULE_DEVICE_TABLE(pci, sundance_pci_tbl); |
205 | |
206 | enum { |
207 | netdev_io_size = 128 |
208 | }; |
209 | |
210 | struct pci_id_info { |
211 | const char *name; |
212 | }; |
213 | static const struct pci_id_info pci_id_tbl[] = { |
214 | {"D-Link DFE-550TX FAST Ethernet Adapter" }, |
215 | {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter" }, |
216 | {"D-Link DFE-580TX 4 port Server Adapter" }, |
217 | {"D-Link DFE-530TXS FAST Ethernet Adapter" }, |
218 | {"D-Link DL10050-based FAST Ethernet Adapter" }, |
219 | {"Sundance Technology Alta" }, |
220 | {"IC Plus Corporation IP100A FAST Ethernet Adapter" }, |
221 | { } /* terminate list. */ |
222 | }; |
223 | |
224 | /* This driver was written to use PCI memory space, however x86-oriented |
225 | hardware often uses I/O space accesses. */ |
226 | |
227 | /* Offsets to the device registers. |
228 | Unlike software-only systems, device drivers interact with complex hardware. |
229 | It's not useful to define symbolic names for every register bit in the |
230 | device. The name can only partially document the semantics and make |
231 | the driver longer and more difficult to read. |
232 | In general, only the important configuration values or bits changed |
233 | multiple times should be defined symbolically. |
234 | */ |
235 | enum alta_offsets { |
236 | DMACtrl = 0x00, |
237 | TxListPtr = 0x04, |
238 | TxDMABurstThresh = 0x08, |
239 | TxDMAUrgentThresh = 0x09, |
240 | TxDMAPollPeriod = 0x0a, |
241 | RxDMAStatus = 0x0c, |
242 | RxListPtr = 0x10, |
243 | DebugCtrl0 = 0x1a, |
244 | DebugCtrl1 = 0x1c, |
245 | RxDMABurstThresh = 0x14, |
246 | RxDMAUrgentThresh = 0x15, |
247 | RxDMAPollPeriod = 0x16, |
248 | LEDCtrl = 0x1a, |
249 | ASICCtrl = 0x30, |
250 | EEData = 0x34, |
251 | EECtrl = 0x36, |
252 | FlashAddr = 0x40, |
253 | FlashData = 0x44, |
254 | WakeEvent = 0x45, |
255 | TxStatus = 0x46, |
256 | TxFrameId = 0x47, |
257 | DownCounter = 0x18, |
258 | IntrClear = 0x4a, |
259 | IntrEnable = 0x4c, |
260 | IntrStatus = 0x4e, |
261 | MACCtrl0 = 0x50, |
262 | MACCtrl1 = 0x52, |
263 | StationAddr = 0x54, |
264 | MaxFrameSize = 0x5A, |
265 | RxMode = 0x5c, |
266 | MIICtrl = 0x5e, |
267 | MulticastFilter0 = 0x60, |
268 | MulticastFilter1 = 0x64, |
269 | RxOctetsLow = 0x68, |
270 | RxOctetsHigh = 0x6a, |
271 | TxOctetsLow = 0x6c, |
272 | TxOctetsHigh = 0x6e, |
273 | TxFramesOK = 0x70, |
274 | RxFramesOK = 0x72, |
275 | StatsCarrierError = 0x74, |
276 | StatsLateColl = 0x75, |
277 | StatsMultiColl = 0x76, |
278 | StatsOneColl = 0x77, |
279 | StatsTxDefer = 0x78, |
280 | RxMissed = 0x79, |
281 | StatsTxXSDefer = 0x7a, |
282 | StatsTxAbort = 0x7b, |
283 | StatsBcastTx = 0x7c, |
284 | StatsBcastRx = 0x7d, |
285 | StatsMcastTx = 0x7e, |
286 | StatsMcastRx = 0x7f, |
287 | /* Aliased and bogus values! */ |
288 | RxStatus = 0x0c, |
289 | }; |
290 | |
291 | #define ASIC_HI_WORD(x) ((x) + 2) |
292 | |
293 | enum ASICCtrl_HiWord_bit { |
294 | GlobalReset = 0x0001, |
295 | RxReset = 0x0002, |
296 | TxReset = 0x0004, |
297 | DMAReset = 0x0008, |
298 | FIFOReset = 0x0010, |
299 | NetworkReset = 0x0020, |
300 | HostReset = 0x0040, |
301 | ResetBusy = 0x0400, |
302 | }; |
303 | |
304 | /* Bits in the interrupt status/mask registers. */ |
305 | enum intr_status_bits { |
306 | IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008, |
307 | IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020, |
308 | IntrDrvRqst=0x0040, |
309 | StatsMax=0x0080, LinkChange=0x0100, |
310 | IntrTxDMADone=0x0200, IntrRxDMADone=0x0400, |
311 | }; |
312 | |
313 | /* Bits in the RxMode register. */ |
314 | enum rx_mode_bits { |
315 | AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08, |
316 | AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01, |
317 | }; |
318 | /* Bits in MACCtrl. */ |
319 | enum mac_ctrl0_bits { |
320 | EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40, |
321 | EnbFlowCtrl=0x100, EnbPassRxCRC=0x200, |
322 | }; |
323 | enum mac_ctrl1_bits { |
324 | StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080, |
325 | TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400, |
326 | RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000, |
327 | }; |
328 | |
329 | /* Bits in WakeEvent register. */ |
330 | enum wake_event_bits { |
331 | WakePktEnable = 0x01, |
332 | MagicPktEnable = 0x02, |
333 | LinkEventEnable = 0x04, |
334 | WolEnable = 0x80, |
335 | }; |
336 | |
337 | /* The Rx and Tx buffer descriptors. */ |
338 | /* Note that using only 32 bit fields simplifies conversion to big-endian |
339 | architectures. */ |
340 | struct netdev_desc { |
341 | __le32 next_desc; |
342 | __le32 status; |
343 | struct desc_frag { __le32 addr, length; } frag; |
344 | }; |
345 | |
346 | /* Bits in netdev_desc.status */ |
347 | enum desc_status_bits { |
348 | DescOwn=0x8000, |
349 | DescEndPacket=0x4000, |
350 | DescEndRing=0x2000, |
351 | LastFrag=0x80000000, |
352 | DescIntrOnTx=0x8000, |
353 | DescIntrOnDMADone=0x80000000, |
354 | DisableAlign = 0x00000001, |
355 | }; |
356 | |
357 | #define PRIV_ALIGN 15 /* Required alignment mask */ |
358 | /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment |
359 | within the structure. */ |
360 | #define MII_CNT 4 |
361 | struct netdev_private { |
362 | /* Descriptor rings first for alignment. */ |
363 | struct netdev_desc *rx_ring; |
364 | struct netdev_desc *tx_ring; |
365 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; |
366 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; |
367 | dma_addr_t tx_ring_dma; |
368 | dma_addr_t rx_ring_dma; |
369 | struct timer_list timer; /* Media monitoring timer. */ |
370 | struct net_device *ndev; /* backpointer */ |
371 | /* ethtool extra stats */ |
372 | struct { |
373 | u64 tx_multiple_collisions; |
374 | u64 tx_single_collisions; |
375 | u64 tx_late_collisions; |
376 | u64 tx_deferred; |
377 | u64 tx_deferred_excessive; |
378 | u64 tx_aborted; |
379 | u64 tx_bcasts; |
380 | u64 rx_bcasts; |
381 | u64 tx_mcasts; |
382 | u64 rx_mcasts; |
383 | } xstats; |
384 | /* Frequently used values: keep some adjacent for cache effect. */ |
385 | spinlock_t lock; |
386 | int msg_enable; |
387 | int chip_id; |
388 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ |
389 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ |
390 | struct netdev_desc *last_tx; /* Last Tx descriptor used. */ |
391 | unsigned int cur_tx, dirty_tx; |
392 | /* These values are keep track of the transceiver/media in use. */ |
393 | unsigned int flowctrl:1; |
394 | unsigned int default_port:4; /* Last dev->if_port value. */ |
395 | unsigned int an_enable:1; |
396 | unsigned int speed; |
397 | unsigned int wol_enabled:1; /* Wake on LAN enabled */ |
398 | struct tasklet_struct rx_tasklet; |
399 | struct tasklet_struct tx_tasklet; |
400 | int budget; |
401 | int cur_task; |
402 | /* Multicast and receive mode. */ |
403 | spinlock_t mcastlock; /* SMP lock multicast updates. */ |
404 | u16 mcast_filter[4]; |
405 | /* MII transceiver section. */ |
406 | struct mii_if_info mii_if; |
407 | int mii_preamble_required; |
408 | unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ |
409 | struct pci_dev *pci_dev; |
410 | void __iomem *base; |
411 | spinlock_t statlock; |
412 | }; |
413 | |
414 | /* The station address location in the EEPROM. */ |
415 | #define EEPROM_SA_OFFSET 0x10 |
416 | #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \ |
417 | IntrDrvRqst | IntrTxDone | StatsMax | \ |
418 | LinkChange) |
419 | |
420 | static int change_mtu(struct net_device *dev, int new_mtu); |
421 | static int eeprom_read(void __iomem *ioaddr, int location); |
422 | static int mdio_read(struct net_device *dev, int phy_id, int location); |
423 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); |
424 | static int mdio_wait_link(struct net_device *dev, int wait); |
425 | static int netdev_open(struct net_device *dev); |
426 | static void check_duplex(struct net_device *dev); |
427 | static void netdev_timer(struct timer_list *t); |
428 | static void tx_timeout(struct net_device *dev, unsigned int txqueue); |
429 | static void init_ring(struct net_device *dev); |
430 | static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); |
431 | static int reset_tx (struct net_device *dev); |
432 | static irqreturn_t intr_handler(int irq, void *dev_instance); |
433 | static void rx_poll(struct tasklet_struct *t); |
434 | static void tx_poll(struct tasklet_struct *t); |
435 | static void refill_rx (struct net_device *dev); |
436 | static void netdev_error(struct net_device *dev, int intr_status); |
437 | static void netdev_error(struct net_device *dev, int intr_status); |
438 | static void set_rx_mode(struct net_device *dev); |
439 | static int __set_mac_addr(struct net_device *dev); |
440 | static int sundance_set_mac_addr(struct net_device *dev, void *data); |
441 | static struct net_device_stats *get_stats(struct net_device *dev); |
442 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
443 | static int netdev_close(struct net_device *dev); |
444 | static const struct ethtool_ops ethtool_ops; |
445 | |
446 | static void sundance_reset(struct net_device *dev, unsigned long reset_cmd) |
447 | { |
448 | struct netdev_private *np = netdev_priv(dev); |
449 | void __iomem *ioaddr = np->base + ASICCtrl; |
450 | int countdown; |
451 | |
452 | /* ST201 documentation states ASICCtrl is a 32bit register */ |
453 | iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr); |
454 | /* ST201 documentation states reset can take up to 1 ms */ |
455 | countdown = 10 + 1; |
456 | while (ioread32 (ioaddr) & (ResetBusy << 16)) { |
457 | if (--countdown == 0) { |
458 | printk(KERN_WARNING "%s : reset not completed !!\n" , dev->name); |
459 | break; |
460 | } |
461 | udelay(100); |
462 | } |
463 | } |
464 | |
465 | #ifdef CONFIG_NET_POLL_CONTROLLER |
466 | static void sundance_poll_controller(struct net_device *dev) |
467 | { |
468 | struct netdev_private *np = netdev_priv(dev); |
469 | |
470 | disable_irq(irq: np->pci_dev->irq); |
471 | intr_handler(irq: np->pci_dev->irq, dev_instance: dev); |
472 | enable_irq(irq: np->pci_dev->irq); |
473 | } |
474 | #endif |
475 | |
476 | static const struct net_device_ops netdev_ops = { |
477 | .ndo_open = netdev_open, |
478 | .ndo_stop = netdev_close, |
479 | .ndo_start_xmit = start_tx, |
480 | .ndo_get_stats = get_stats, |
481 | .ndo_set_rx_mode = set_rx_mode, |
482 | .ndo_eth_ioctl = netdev_ioctl, |
483 | .ndo_tx_timeout = tx_timeout, |
484 | .ndo_change_mtu = change_mtu, |
485 | .ndo_set_mac_address = sundance_set_mac_addr, |
486 | .ndo_validate_addr = eth_validate_addr, |
487 | #ifdef CONFIG_NET_POLL_CONTROLLER |
488 | .ndo_poll_controller = sundance_poll_controller, |
489 | #endif |
490 | }; |
491 | |
492 | static int sundance_probe1(struct pci_dev *pdev, |
493 | const struct pci_device_id *ent) |
494 | { |
495 | struct net_device *dev; |
496 | struct netdev_private *np; |
497 | static int card_idx; |
498 | int chip_idx = ent->driver_data; |
499 | int irq; |
500 | int i; |
501 | void __iomem *ioaddr; |
502 | u16 mii_ctl; |
503 | void *ring_space; |
504 | dma_addr_t ring_dma; |
505 | #ifdef USE_IO_OPS |
506 | int bar = 0; |
507 | #else |
508 | int bar = 1; |
509 | #endif |
510 | int phy, phy_end, phy_idx = 0; |
511 | __le16 addr[ETH_ALEN / 2]; |
512 | |
513 | if (pci_enable_device(dev: pdev)) |
514 | return -EIO; |
515 | pci_set_master(dev: pdev); |
516 | |
517 | irq = pdev->irq; |
518 | |
519 | dev = alloc_etherdev(sizeof(*np)); |
520 | if (!dev) |
521 | return -ENOMEM; |
522 | SET_NETDEV_DEV(dev, &pdev->dev); |
523 | |
524 | if (pci_request_regions(pdev, DRV_NAME)) |
525 | goto err_out_netdev; |
526 | |
527 | ioaddr = pci_iomap(dev: pdev, bar, max: netdev_io_size); |
528 | if (!ioaddr) |
529 | goto err_out_res; |
530 | |
531 | for (i = 0; i < 3; i++) |
532 | addr[i] = |
533 | cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); |
534 | eth_hw_addr_set(dev, addr: (u8 *)addr); |
535 | |
536 | np = netdev_priv(dev); |
537 | np->ndev = dev; |
538 | np->base = ioaddr; |
539 | np->pci_dev = pdev; |
540 | np->chip_id = chip_idx; |
541 | np->msg_enable = (1 << debug) - 1; |
542 | spin_lock_init(&np->lock); |
543 | spin_lock_init(&np->statlock); |
544 | tasklet_setup(t: &np->rx_tasklet, callback: rx_poll); |
545 | tasklet_setup(t: &np->tx_tasklet, callback: tx_poll); |
546 | |
547 | ring_space = dma_alloc_coherent(dev: &pdev->dev, TX_TOTAL_SIZE, |
548 | dma_handle: &ring_dma, GFP_KERNEL); |
549 | if (!ring_space) |
550 | goto err_out_cleardev; |
551 | np->tx_ring = (struct netdev_desc *)ring_space; |
552 | np->tx_ring_dma = ring_dma; |
553 | |
554 | ring_space = dma_alloc_coherent(dev: &pdev->dev, RX_TOTAL_SIZE, |
555 | dma_handle: &ring_dma, GFP_KERNEL); |
556 | if (!ring_space) |
557 | goto err_out_unmap_tx; |
558 | np->rx_ring = (struct netdev_desc *)ring_space; |
559 | np->rx_ring_dma = ring_dma; |
560 | |
561 | np->mii_if.dev = dev; |
562 | np->mii_if.mdio_read = mdio_read; |
563 | np->mii_if.mdio_write = mdio_write; |
564 | np->mii_if.phy_id_mask = 0x1f; |
565 | np->mii_if.reg_num_mask = 0x1f; |
566 | |
567 | /* The chip-specific entries in the device structure. */ |
568 | dev->netdev_ops = &netdev_ops; |
569 | dev->ethtool_ops = ðtool_ops; |
570 | dev->watchdog_timeo = TX_TIMEOUT; |
571 | |
572 | /* MTU range: 68 - 8191 */ |
573 | dev->min_mtu = ETH_MIN_MTU; |
574 | dev->max_mtu = 8191; |
575 | |
576 | pci_set_drvdata(pdev, data: dev); |
577 | |
578 | i = register_netdev(dev); |
579 | if (i) |
580 | goto err_out_unmap_rx; |
581 | |
582 | printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n" , |
583 | dev->name, pci_id_tbl[chip_idx].name, ioaddr, |
584 | dev->dev_addr, irq); |
585 | |
586 | np->phys[0] = 1; /* Default setting */ |
587 | np->mii_preamble_required++; |
588 | |
589 | /* |
590 | * It seems some phys doesn't deal well with address 0 being accessed |
591 | * first |
592 | */ |
593 | if (sundance_pci_tbl[np->chip_id].device == 0x0200) { |
594 | phy = 0; |
595 | phy_end = 31; |
596 | } else { |
597 | phy = 1; |
598 | phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */ |
599 | } |
600 | for (; phy <= phy_end && phy_idx < MII_CNT; phy++) { |
601 | int phyx = phy & 0x1f; |
602 | int mii_status = mdio_read(dev, phy_id: phyx, MII_BMSR); |
603 | if (mii_status != 0xffff && mii_status != 0x0000) { |
604 | np->phys[phy_idx++] = phyx; |
605 | np->mii_if.advertising = mdio_read(dev, phy_id: phyx, MII_ADVERTISE); |
606 | if ((mii_status & 0x0040) == 0) |
607 | np->mii_preamble_required++; |
608 | printk(KERN_INFO "%s: MII PHY found at address %d, status " |
609 | "0x%4.4x advertising %4.4x.\n" , |
610 | dev->name, phyx, mii_status, np->mii_if.advertising); |
611 | } |
612 | } |
613 | np->mii_preamble_required--; |
614 | |
615 | if (phy_idx == 0) { |
616 | printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n" , |
617 | dev->name, ioread32(ioaddr + ASICCtrl)); |
618 | goto err_out_unregister; |
619 | } |
620 | |
621 | np->mii_if.phy_id = np->phys[0]; |
622 | |
623 | /* Parse override configuration */ |
624 | np->an_enable = 1; |
625 | if (card_idx < MAX_UNITS) { |
626 | if (media[card_idx] != NULL) { |
627 | np->an_enable = 0; |
628 | if (strcmp (media[card_idx], "100mbps_fd" ) == 0 || |
629 | strcmp (media[card_idx], "4" ) == 0) { |
630 | np->speed = 100; |
631 | np->mii_if.full_duplex = 1; |
632 | } else if (strcmp (media[card_idx], "100mbps_hd" ) == 0 || |
633 | strcmp (media[card_idx], "3" ) == 0) { |
634 | np->speed = 100; |
635 | np->mii_if.full_duplex = 0; |
636 | } else if (strcmp (media[card_idx], "10mbps_fd" ) == 0 || |
637 | strcmp (media[card_idx], "2" ) == 0) { |
638 | np->speed = 10; |
639 | np->mii_if.full_duplex = 1; |
640 | } else if (strcmp (media[card_idx], "10mbps_hd" ) == 0 || |
641 | strcmp (media[card_idx], "1" ) == 0) { |
642 | np->speed = 10; |
643 | np->mii_if.full_duplex = 0; |
644 | } else { |
645 | np->an_enable = 1; |
646 | } |
647 | } |
648 | if (flowctrl == 1) |
649 | np->flowctrl = 1; |
650 | } |
651 | |
652 | /* Fibre PHY? */ |
653 | if (ioread32 (ioaddr + ASICCtrl) & 0x80) { |
654 | /* Default 100Mbps Full */ |
655 | if (np->an_enable) { |
656 | np->speed = 100; |
657 | np->mii_if.full_duplex = 1; |
658 | np->an_enable = 0; |
659 | } |
660 | } |
661 | /* Reset PHY */ |
662 | mdio_write (dev, phy_id: np->phys[0], MII_BMCR, BMCR_RESET); |
663 | mdelay (300); |
664 | /* If flow control enabled, we need to advertise it.*/ |
665 | if (np->flowctrl) |
666 | mdio_write (dev, phy_id: np->phys[0], MII_ADVERTISE, value: np->mii_if.advertising | 0x0400); |
667 | mdio_write (dev, phy_id: np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); |
668 | /* Force media type */ |
669 | if (!np->an_enable) { |
670 | mii_ctl = 0; |
671 | mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0; |
672 | mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0; |
673 | mdio_write (dev, phy_id: np->phys[0], MII_BMCR, value: mii_ctl); |
674 | printk (KERN_INFO "Override speed=%d, %s duplex\n" , |
675 | np->speed, np->mii_if.full_duplex ? "Full" : "Half" ); |
676 | |
677 | } |
678 | |
679 | /* Perhaps move the reset here? */ |
680 | /* Reset the chip to erase previous misconfiguration. */ |
681 | if (netif_msg_hw(np)) |
682 | printk("ASIC Control is %x.\n" , ioread32(ioaddr + ASICCtrl)); |
683 | sundance_reset(dev, reset_cmd: 0x00ff << 16); |
684 | if (netif_msg_hw(np)) |
685 | printk("ASIC Control is now %x.\n" , ioread32(ioaddr + ASICCtrl)); |
686 | |
687 | card_idx++; |
688 | return 0; |
689 | |
690 | err_out_unregister: |
691 | unregister_netdev(dev); |
692 | err_out_unmap_rx: |
693 | dma_free_coherent(dev: &pdev->dev, RX_TOTAL_SIZE, |
694 | cpu_addr: np->rx_ring, dma_handle: np->rx_ring_dma); |
695 | err_out_unmap_tx: |
696 | dma_free_coherent(dev: &pdev->dev, TX_TOTAL_SIZE, |
697 | cpu_addr: np->tx_ring, dma_handle: np->tx_ring_dma); |
698 | err_out_cleardev: |
699 | pci_iounmap(dev: pdev, ioaddr); |
700 | err_out_res: |
701 | pci_release_regions(pdev); |
702 | err_out_netdev: |
703 | free_netdev (dev); |
704 | return -ENODEV; |
705 | } |
706 | |
707 | static int change_mtu(struct net_device *dev, int new_mtu) |
708 | { |
709 | if (netif_running(dev)) |
710 | return -EBUSY; |
711 | dev->mtu = new_mtu; |
712 | return 0; |
713 | } |
714 | |
715 | #define eeprom_delay(ee_addr) ioread32(ee_addr) |
716 | /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */ |
717 | static int eeprom_read(void __iomem *ioaddr, int location) |
718 | { |
719 | int boguscnt = 10000; /* Typical 1900 ticks. */ |
720 | iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl); |
721 | do { |
722 | eeprom_delay(ioaddr + EECtrl); |
723 | if (! (ioread16(ioaddr + EECtrl) & 0x8000)) { |
724 | return ioread16(ioaddr + EEData); |
725 | } |
726 | } while (--boguscnt > 0); |
727 | return 0; |
728 | } |
729 | |
730 | /* MII transceiver control section. |
731 | Read and write the MII registers using software-generated serial |
732 | MDIO protocol. See the MII specifications or DP83840A data sheet |
733 | for details. |
734 | |
735 | The maximum data clock rate is 2.5 Mhz. The minimum timing is usually |
736 | met by back-to-back 33Mhz PCI cycles. */ |
737 | #define mdio_delay() ioread8(mdio_addr) |
738 | |
739 | enum mii_reg_bits { |
740 | MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004, |
741 | }; |
742 | #define MDIO_EnbIn (0) |
743 | #define MDIO_WRITE0 (MDIO_EnbOutput) |
744 | #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput) |
745 | |
746 | /* Generate the preamble required for initial synchronization and |
747 | a few older transceivers. */ |
748 | static void mdio_sync(void __iomem *mdio_addr) |
749 | { |
750 | int bits = 32; |
751 | |
752 | /* Establish sync by sending at least 32 logic ones. */ |
753 | while (--bits >= 0) { |
754 | iowrite8(MDIO_WRITE1, mdio_addr); |
755 | mdio_delay(); |
756 | iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); |
757 | mdio_delay(); |
758 | } |
759 | } |
760 | |
761 | static int mdio_read(struct net_device *dev, int phy_id, int location) |
762 | { |
763 | struct netdev_private *np = netdev_priv(dev); |
764 | void __iomem *mdio_addr = np->base + MIICtrl; |
765 | int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; |
766 | int i, retval = 0; |
767 | |
768 | if (np->mii_preamble_required) |
769 | mdio_sync(mdio_addr); |
770 | |
771 | /* Shift the read command bits out. */ |
772 | for (i = 15; i >= 0; i--) { |
773 | int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; |
774 | |
775 | iowrite8(dataval, mdio_addr); |
776 | mdio_delay(); |
777 | iowrite8(dataval | MDIO_ShiftClk, mdio_addr); |
778 | mdio_delay(); |
779 | } |
780 | /* Read the two transition, 16 data, and wire-idle bits. */ |
781 | for (i = 19; i > 0; i--) { |
782 | iowrite8(MDIO_EnbIn, mdio_addr); |
783 | mdio_delay(); |
784 | retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0); |
785 | iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); |
786 | mdio_delay(); |
787 | } |
788 | return (retval>>1) & 0xffff; |
789 | } |
790 | |
791 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value) |
792 | { |
793 | struct netdev_private *np = netdev_priv(dev); |
794 | void __iomem *mdio_addr = np->base + MIICtrl; |
795 | int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; |
796 | int i; |
797 | |
798 | if (np->mii_preamble_required) |
799 | mdio_sync(mdio_addr); |
800 | |
801 | /* Shift the command bits out. */ |
802 | for (i = 31; i >= 0; i--) { |
803 | int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; |
804 | |
805 | iowrite8(dataval, mdio_addr); |
806 | mdio_delay(); |
807 | iowrite8(dataval | MDIO_ShiftClk, mdio_addr); |
808 | mdio_delay(); |
809 | } |
810 | /* Clear out extra bits. */ |
811 | for (i = 2; i > 0; i--) { |
812 | iowrite8(MDIO_EnbIn, mdio_addr); |
813 | mdio_delay(); |
814 | iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); |
815 | mdio_delay(); |
816 | } |
817 | } |
818 | |
819 | static int mdio_wait_link(struct net_device *dev, int wait) |
820 | { |
821 | int bmsr; |
822 | int phy_id; |
823 | struct netdev_private *np; |
824 | |
825 | np = netdev_priv(dev); |
826 | phy_id = np->phys[0]; |
827 | |
828 | do { |
829 | bmsr = mdio_read(dev, phy_id, MII_BMSR); |
830 | if (bmsr & 0x0004) |
831 | return 0; |
832 | mdelay(1); |
833 | } while (--wait > 0); |
834 | return -1; |
835 | } |
836 | |
837 | static int netdev_open(struct net_device *dev) |
838 | { |
839 | struct netdev_private *np = netdev_priv(dev); |
840 | void __iomem *ioaddr = np->base; |
841 | const int irq = np->pci_dev->irq; |
842 | unsigned long flags; |
843 | int i; |
844 | |
845 | sundance_reset(dev, reset_cmd: 0x00ff << 16); |
846 | |
847 | i = request_irq(irq, handler: intr_handler, IRQF_SHARED, name: dev->name, dev); |
848 | if (i) |
849 | return i; |
850 | |
851 | if (netif_msg_ifup(np)) |
852 | printk(KERN_DEBUG "%s: netdev_open() irq %d\n" , dev->name, irq); |
853 | |
854 | init_ring(dev); |
855 | |
856 | iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); |
857 | /* The Tx list pointer is written as packets are queued. */ |
858 | |
859 | /* Initialize other registers. */ |
860 | __set_mac_addr(dev); |
861 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
862 | iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize); |
863 | #else |
864 | iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize); |
865 | #endif |
866 | if (dev->mtu > 2047) |
867 | iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl); |
868 | |
869 | /* Configure the PCI bus bursts and FIFO thresholds. */ |
870 | |
871 | if (dev->if_port == 0) |
872 | dev->if_port = np->default_port; |
873 | |
874 | spin_lock_init(&np->mcastlock); |
875 | |
876 | set_rx_mode(dev); |
877 | iowrite16(0, ioaddr + IntrEnable); |
878 | iowrite16(0, ioaddr + DownCounter); |
879 | /* Set the chip to poll every N*320nsec. */ |
880 | iowrite8(100, ioaddr + RxDMAPollPeriod); |
881 | iowrite8(127, ioaddr + TxDMAPollPeriod); |
882 | /* Fix DFE-580TX packet drop issue */ |
883 | if (np->pci_dev->revision >= 0x14) |
884 | iowrite8(0x01, ioaddr + DebugCtrl1); |
885 | netif_start_queue(dev); |
886 | |
887 | spin_lock_irqsave(&np->lock, flags); |
888 | reset_tx(dev); |
889 | spin_unlock_irqrestore(lock: &np->lock, flags); |
890 | |
891 | iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); |
892 | |
893 | /* Disable Wol */ |
894 | iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent); |
895 | np->wol_enabled = 0; |
896 | |
897 | if (netif_msg_ifup(np)) |
898 | printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x " |
899 | "MAC Control %x, %4.4x %4.4x.\n" , |
900 | dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus), |
901 | ioread32(ioaddr + MACCtrl0), |
902 | ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0)); |
903 | |
904 | /* Set the timer to check for link beat. */ |
905 | timer_setup(&np->timer, netdev_timer, 0); |
906 | np->timer.expires = jiffies + 3*HZ; |
907 | add_timer(timer: &np->timer); |
908 | |
909 | /* Enable interrupts by setting the interrupt mask. */ |
910 | iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); |
911 | |
912 | return 0; |
913 | } |
914 | |
915 | static void check_duplex(struct net_device *dev) |
916 | { |
917 | struct netdev_private *np = netdev_priv(dev); |
918 | void __iomem *ioaddr = np->base; |
919 | int mii_lpa = mdio_read(dev, phy_id: np->phys[0], MII_LPA); |
920 | int negotiated = mii_lpa & np->mii_if.advertising; |
921 | int duplex; |
922 | |
923 | /* Force media */ |
924 | if (!np->an_enable || mii_lpa == 0xffff) { |
925 | if (np->mii_if.full_duplex) |
926 | iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex, |
927 | ioaddr + MACCtrl0); |
928 | return; |
929 | } |
930 | |
931 | /* Autonegotiation */ |
932 | duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; |
933 | if (np->mii_if.full_duplex != duplex) { |
934 | np->mii_if.full_duplex = duplex; |
935 | if (netif_msg_link(np)) |
936 | printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " |
937 | "negotiated capability %4.4x.\n" , dev->name, |
938 | duplex ? "full" : "half" , np->phys[0], negotiated); |
939 | iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0); |
940 | } |
941 | } |
942 | |
943 | static void netdev_timer(struct timer_list *t) |
944 | { |
945 | struct netdev_private *np = from_timer(np, t, timer); |
946 | struct net_device *dev = np->mii_if.dev; |
947 | void __iomem *ioaddr = np->base; |
948 | int next_tick = 10*HZ; |
949 | |
950 | if (netif_msg_timer(np)) { |
951 | printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, " |
952 | "Tx %x Rx %x.\n" , |
953 | dev->name, ioread16(ioaddr + IntrEnable), |
954 | ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus)); |
955 | } |
956 | check_duplex(dev); |
957 | np->timer.expires = jiffies + next_tick; |
958 | add_timer(timer: &np->timer); |
959 | } |
960 | |
961 | static void tx_timeout(struct net_device *dev, unsigned int txqueue) |
962 | { |
963 | struct netdev_private *np = netdev_priv(dev); |
964 | void __iomem *ioaddr = np->base; |
965 | unsigned long flag; |
966 | |
967 | netif_stop_queue(dev); |
968 | tasklet_disable_in_atomic(t: &np->tx_tasklet); |
969 | iowrite16(0, ioaddr + IntrEnable); |
970 | printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x " |
971 | "TxFrameId %2.2x," |
972 | " resetting...\n" , dev->name, ioread8(ioaddr + TxStatus), |
973 | ioread8(ioaddr + TxFrameId)); |
974 | |
975 | { |
976 | int i; |
977 | for (i=0; i<TX_RING_SIZE; i++) { |
978 | printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n" , i, |
979 | (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), |
980 | le32_to_cpu(np->tx_ring[i].next_desc), |
981 | le32_to_cpu(np->tx_ring[i].status), |
982 | (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, |
983 | le32_to_cpu(np->tx_ring[i].frag.addr), |
984 | le32_to_cpu(np->tx_ring[i].frag.length)); |
985 | } |
986 | printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n" , |
987 | ioread32(np->base + TxListPtr), |
988 | netif_queue_stopped(dev)); |
989 | printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n" , |
990 | np->cur_tx, np->cur_tx % TX_RING_SIZE, |
991 | np->dirty_tx, np->dirty_tx % TX_RING_SIZE); |
992 | printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n" , np->cur_rx, np->dirty_rx); |
993 | printk(KERN_DEBUG "cur_task=%d\n" , np->cur_task); |
994 | } |
995 | spin_lock_irqsave(&np->lock, flag); |
996 | |
997 | /* Stop and restart the chip's Tx processes . */ |
998 | reset_tx(dev); |
999 | spin_unlock_irqrestore(lock: &np->lock, flags: flag); |
1000 | |
1001 | dev->if_port = 0; |
1002 | |
1003 | netif_trans_update(dev); /* prevent tx timeout */ |
1004 | dev->stats.tx_errors++; |
1005 | if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { |
1006 | netif_wake_queue(dev); |
1007 | } |
1008 | iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); |
1009 | tasklet_enable(t: &np->tx_tasklet); |
1010 | } |
1011 | |
1012 | |
1013 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ |
1014 | static void init_ring(struct net_device *dev) |
1015 | { |
1016 | struct netdev_private *np = netdev_priv(dev); |
1017 | int i; |
1018 | |
1019 | np->cur_rx = np->cur_tx = 0; |
1020 | np->dirty_rx = np->dirty_tx = 0; |
1021 | np->cur_task = 0; |
1022 | |
1023 | np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); |
1024 | |
1025 | /* Initialize all Rx descriptors. */ |
1026 | for (i = 0; i < RX_RING_SIZE; i++) { |
1027 | np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + |
1028 | ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); |
1029 | np->rx_ring[i].status = 0; |
1030 | np->rx_ring[i].frag.length = 0; |
1031 | np->rx_skbuff[i] = NULL; |
1032 | } |
1033 | |
1034 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ |
1035 | for (i = 0; i < RX_RING_SIZE; i++) { |
1036 | struct sk_buff *skb = |
1037 | netdev_alloc_skb(dev, length: np->rx_buf_sz + 2); |
1038 | np->rx_skbuff[i] = skb; |
1039 | if (skb == NULL) |
1040 | break; |
1041 | skb_reserve(skb, len: 2); /* 16 byte align the IP header. */ |
1042 | np->rx_ring[i].frag.addr = cpu_to_le32( |
1043 | dma_map_single(&np->pci_dev->dev, skb->data, |
1044 | np->rx_buf_sz, DMA_FROM_DEVICE)); |
1045 | if (dma_mapping_error(dev: &np->pci_dev->dev, |
1046 | dma_addr: np->rx_ring[i].frag.addr)) { |
1047 | dev_kfree_skb(skb); |
1048 | np->rx_skbuff[i] = NULL; |
1049 | break; |
1050 | } |
1051 | np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag); |
1052 | } |
1053 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); |
1054 | |
1055 | for (i = 0; i < TX_RING_SIZE; i++) { |
1056 | np->tx_skbuff[i] = NULL; |
1057 | np->tx_ring[i].status = 0; |
1058 | } |
1059 | } |
1060 | |
1061 | static void tx_poll(struct tasklet_struct *t) |
1062 | { |
1063 | struct netdev_private *np = from_tasklet(np, t, tx_tasklet); |
1064 | unsigned head = np->cur_task % TX_RING_SIZE; |
1065 | struct netdev_desc *txdesc = |
1066 | &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; |
1067 | |
1068 | /* Chain the next pointer */ |
1069 | for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { |
1070 | int entry = np->cur_task % TX_RING_SIZE; |
1071 | txdesc = &np->tx_ring[entry]; |
1072 | if (np->last_tx) { |
1073 | np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + |
1074 | entry*sizeof(struct netdev_desc)); |
1075 | } |
1076 | np->last_tx = txdesc; |
1077 | } |
1078 | /* Indicate the latest descriptor of tx ring */ |
1079 | txdesc->status |= cpu_to_le32(DescIntrOnTx); |
1080 | |
1081 | if (ioread32 (np->base + TxListPtr) == 0) |
1082 | iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), |
1083 | np->base + TxListPtr); |
1084 | } |
1085 | |
1086 | static netdev_tx_t |
1087 | start_tx (struct sk_buff *skb, struct net_device *dev) |
1088 | { |
1089 | struct netdev_private *np = netdev_priv(dev); |
1090 | struct netdev_desc *txdesc; |
1091 | unsigned entry; |
1092 | |
1093 | /* Calculate the next Tx descriptor entry. */ |
1094 | entry = np->cur_tx % TX_RING_SIZE; |
1095 | np->tx_skbuff[entry] = skb; |
1096 | txdesc = &np->tx_ring[entry]; |
1097 | |
1098 | txdesc->next_desc = 0; |
1099 | txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); |
1100 | txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, |
1101 | skb->data, skb->len, DMA_TO_DEVICE)); |
1102 | if (dma_mapping_error(dev: &np->pci_dev->dev, |
1103 | dma_addr: txdesc->frag.addr)) |
1104 | goto drop_frame; |
1105 | txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag); |
1106 | |
1107 | /* Increment cur_tx before tasklet_schedule() */ |
1108 | np->cur_tx++; |
1109 | mb(); |
1110 | /* Schedule a tx_poll() task */ |
1111 | tasklet_schedule(t: &np->tx_tasklet); |
1112 | |
1113 | /* On some architectures: explicitly flush cache lines here. */ |
1114 | if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && |
1115 | !netif_queue_stopped(dev)) { |
1116 | /* do nothing */ |
1117 | } else { |
1118 | netif_stop_queue (dev); |
1119 | } |
1120 | if (netif_msg_tx_queued(np)) { |
1121 | printk (KERN_DEBUG |
1122 | "%s: Transmit frame #%d queued in slot %d.\n" , |
1123 | dev->name, np->cur_tx, entry); |
1124 | } |
1125 | return NETDEV_TX_OK; |
1126 | |
1127 | drop_frame: |
1128 | dev_kfree_skb_any(skb); |
1129 | np->tx_skbuff[entry] = NULL; |
1130 | dev->stats.tx_dropped++; |
1131 | return NETDEV_TX_OK; |
1132 | } |
1133 | |
1134 | /* Reset hardware tx and free all of tx buffers */ |
1135 | static int |
1136 | reset_tx (struct net_device *dev) |
1137 | { |
1138 | struct netdev_private *np = netdev_priv(dev); |
1139 | void __iomem *ioaddr = np->base; |
1140 | struct sk_buff *skb; |
1141 | int i; |
1142 | |
1143 | /* Reset tx logic, TxListPtr will be cleaned */ |
1144 | iowrite16 (TxDisable, ioaddr + MACCtrl1); |
1145 | sundance_reset(dev, reset_cmd: (NetworkReset|FIFOReset|DMAReset|TxReset) << 16); |
1146 | |
1147 | /* free all tx skbuff */ |
1148 | for (i = 0; i < TX_RING_SIZE; i++) { |
1149 | np->tx_ring[i].next_desc = 0; |
1150 | |
1151 | skb = np->tx_skbuff[i]; |
1152 | if (skb) { |
1153 | dma_unmap_single(&np->pci_dev->dev, |
1154 | le32_to_cpu(np->tx_ring[i].frag.addr), |
1155 | skb->len, DMA_TO_DEVICE); |
1156 | dev_kfree_skb_any(skb); |
1157 | np->tx_skbuff[i] = NULL; |
1158 | dev->stats.tx_dropped++; |
1159 | } |
1160 | } |
1161 | np->cur_tx = np->dirty_tx = 0; |
1162 | np->cur_task = 0; |
1163 | |
1164 | np->last_tx = NULL; |
1165 | iowrite8(127, ioaddr + TxDMAPollPeriod); |
1166 | |
1167 | iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); |
1168 | return 0; |
1169 | } |
1170 | |
1171 | /* The interrupt handler cleans up after the Tx thread, |
1172 | and schedule a Rx thread work */ |
1173 | static irqreturn_t intr_handler(int irq, void *dev_instance) |
1174 | { |
1175 | struct net_device *dev = (struct net_device *)dev_instance; |
1176 | struct netdev_private *np = netdev_priv(dev); |
1177 | void __iomem *ioaddr = np->base; |
1178 | int hw_frame_id; |
1179 | int tx_cnt; |
1180 | int tx_status; |
1181 | int handled = 0; |
1182 | int i; |
1183 | |
1184 | do { |
1185 | int intr_status = ioread16(ioaddr + IntrStatus); |
1186 | iowrite16(intr_status, ioaddr + IntrStatus); |
1187 | |
1188 | if (netif_msg_intr(np)) |
1189 | printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n" , |
1190 | dev->name, intr_status); |
1191 | |
1192 | if (!(intr_status & DEFAULT_INTR)) |
1193 | break; |
1194 | |
1195 | handled = 1; |
1196 | |
1197 | if (intr_status & (IntrRxDMADone)) { |
1198 | iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone), |
1199 | ioaddr + IntrEnable); |
1200 | if (np->budget < 0) |
1201 | np->budget = RX_BUDGET; |
1202 | tasklet_schedule(t: &np->rx_tasklet); |
1203 | } |
1204 | if (intr_status & (IntrTxDone | IntrDrvRqst)) { |
1205 | tx_status = ioread16 (ioaddr + TxStatus); |
1206 | for (tx_cnt=32; tx_status & 0x80; --tx_cnt) { |
1207 | if (netif_msg_tx_done(np)) |
1208 | printk |
1209 | ("%s: Transmit status is %2.2x.\n" , |
1210 | dev->name, tx_status); |
1211 | if (tx_status & 0x1e) { |
1212 | if (netif_msg_tx_err(np)) |
1213 | printk("%s: Transmit error status %4.4x.\n" , |
1214 | dev->name, tx_status); |
1215 | dev->stats.tx_errors++; |
1216 | if (tx_status & 0x10) |
1217 | dev->stats.tx_fifo_errors++; |
1218 | if (tx_status & 0x08) |
1219 | dev->stats.collisions++; |
1220 | if (tx_status & 0x04) |
1221 | dev->stats.tx_fifo_errors++; |
1222 | if (tx_status & 0x02) |
1223 | dev->stats.tx_window_errors++; |
1224 | |
1225 | /* |
1226 | ** This reset has been verified on |
1227 | ** DFE-580TX boards ! phdm@macqel.be. |
1228 | */ |
1229 | if (tx_status & 0x10) { /* TxUnderrun */ |
1230 | /* Restart Tx FIFO and transmitter */ |
1231 | sundance_reset(dev, reset_cmd: (NetworkReset|FIFOReset|TxReset) << 16); |
1232 | /* No need to reset the Tx pointer here */ |
1233 | } |
1234 | /* Restart the Tx. Need to make sure tx enabled */ |
1235 | i = 10; |
1236 | do { |
1237 | iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1); |
1238 | if (ioread16(ioaddr + MACCtrl1) & TxEnabled) |
1239 | break; |
1240 | mdelay(1); |
1241 | } while (--i); |
1242 | } |
1243 | /* Yup, this is a documentation bug. It cost me *hours*. */ |
1244 | iowrite16 (0, ioaddr + TxStatus); |
1245 | if (tx_cnt < 0) { |
1246 | iowrite32(5000, ioaddr + DownCounter); |
1247 | break; |
1248 | } |
1249 | tx_status = ioread16 (ioaddr + TxStatus); |
1250 | } |
1251 | hw_frame_id = (tx_status >> 8) & 0xff; |
1252 | } else { |
1253 | hw_frame_id = ioread8(ioaddr + TxFrameId); |
1254 | } |
1255 | |
1256 | if (np->pci_dev->revision >= 0x14) { |
1257 | spin_lock(lock: &np->lock); |
1258 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { |
1259 | int entry = np->dirty_tx % TX_RING_SIZE; |
1260 | struct sk_buff *skb; |
1261 | int sw_frame_id; |
1262 | sw_frame_id = (le32_to_cpu( |
1263 | np->tx_ring[entry].status) >> 2) & 0xff; |
1264 | if (sw_frame_id == hw_frame_id && |
1265 | !(le32_to_cpu(np->tx_ring[entry].status) |
1266 | & 0x00010000)) |
1267 | break; |
1268 | if (sw_frame_id == (hw_frame_id + 1) % |
1269 | TX_RING_SIZE) |
1270 | break; |
1271 | skb = np->tx_skbuff[entry]; |
1272 | /* Free the original skb. */ |
1273 | dma_unmap_single(&np->pci_dev->dev, |
1274 | le32_to_cpu(np->tx_ring[entry].frag.addr), |
1275 | skb->len, DMA_TO_DEVICE); |
1276 | dev_consume_skb_irq(skb: np->tx_skbuff[entry]); |
1277 | np->tx_skbuff[entry] = NULL; |
1278 | np->tx_ring[entry].frag.addr = 0; |
1279 | np->tx_ring[entry].frag.length = 0; |
1280 | } |
1281 | spin_unlock(lock: &np->lock); |
1282 | } else { |
1283 | spin_lock(lock: &np->lock); |
1284 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { |
1285 | int entry = np->dirty_tx % TX_RING_SIZE; |
1286 | struct sk_buff *skb; |
1287 | if (!(le32_to_cpu(np->tx_ring[entry].status) |
1288 | & 0x00010000)) |
1289 | break; |
1290 | skb = np->tx_skbuff[entry]; |
1291 | /* Free the original skb. */ |
1292 | dma_unmap_single(&np->pci_dev->dev, |
1293 | le32_to_cpu(np->tx_ring[entry].frag.addr), |
1294 | skb->len, DMA_TO_DEVICE); |
1295 | dev_consume_skb_irq(skb: np->tx_skbuff[entry]); |
1296 | np->tx_skbuff[entry] = NULL; |
1297 | np->tx_ring[entry].frag.addr = 0; |
1298 | np->tx_ring[entry].frag.length = 0; |
1299 | } |
1300 | spin_unlock(lock: &np->lock); |
1301 | } |
1302 | |
1303 | if (netif_queue_stopped(dev) && |
1304 | np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { |
1305 | /* The ring is no longer full, clear busy flag. */ |
1306 | netif_wake_queue (dev); |
1307 | } |
1308 | /* Abnormal error summary/uncommon events handlers. */ |
1309 | if (intr_status & (IntrPCIErr | LinkChange | StatsMax)) |
1310 | netdev_error(dev, intr_status); |
1311 | } while (0); |
1312 | if (netif_msg_intr(np)) |
1313 | printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n" , |
1314 | dev->name, ioread16(ioaddr + IntrStatus)); |
1315 | return IRQ_RETVAL(handled); |
1316 | } |
1317 | |
1318 | static void rx_poll(struct tasklet_struct *t) |
1319 | { |
1320 | struct netdev_private *np = from_tasklet(np, t, rx_tasklet); |
1321 | struct net_device *dev = np->ndev; |
1322 | int entry = np->cur_rx % RX_RING_SIZE; |
1323 | int boguscnt = np->budget; |
1324 | void __iomem *ioaddr = np->base; |
1325 | int received = 0; |
1326 | |
1327 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ |
1328 | while (1) { |
1329 | struct netdev_desc *desc = &(np->rx_ring[entry]); |
1330 | u32 frame_status = le32_to_cpu(desc->status); |
1331 | int pkt_len; |
1332 | |
1333 | if (--boguscnt < 0) { |
1334 | goto not_done; |
1335 | } |
1336 | if (!(frame_status & DescOwn)) |
1337 | break; |
1338 | pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */ |
1339 | if (netif_msg_rx_status(np)) |
1340 | printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n" , |
1341 | frame_status); |
1342 | if (frame_status & 0x001f4000) { |
1343 | /* There was a error. */ |
1344 | if (netif_msg_rx_err(np)) |
1345 | printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n" , |
1346 | frame_status); |
1347 | dev->stats.rx_errors++; |
1348 | if (frame_status & 0x00100000) |
1349 | dev->stats.rx_length_errors++; |
1350 | if (frame_status & 0x00010000) |
1351 | dev->stats.rx_fifo_errors++; |
1352 | if (frame_status & 0x00060000) |
1353 | dev->stats.rx_frame_errors++; |
1354 | if (frame_status & 0x00080000) |
1355 | dev->stats.rx_crc_errors++; |
1356 | if (frame_status & 0x00100000) { |
1357 | printk(KERN_WARNING "%s: Oversized Ethernet frame," |
1358 | " status %8.8x.\n" , |
1359 | dev->name, frame_status); |
1360 | } |
1361 | } else { |
1362 | struct sk_buff *skb; |
1363 | #ifndef final_version |
1364 | if (netif_msg_rx_status(np)) |
1365 | printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" |
1366 | ", bogus_cnt %d.\n" , |
1367 | pkt_len, boguscnt); |
1368 | #endif |
1369 | /* Check if the packet is long enough to accept without copying |
1370 | to a minimally-sized skbuff. */ |
1371 | if (pkt_len < rx_copybreak && |
1372 | (skb = netdev_alloc_skb(dev, length: pkt_len + 2)) != NULL) { |
1373 | skb_reserve(skb, len: 2); /* 16 byte align the IP header */ |
1374 | dma_sync_single_for_cpu(dev: &np->pci_dev->dev, |
1375 | le32_to_cpu(desc->frag.addr), |
1376 | size: np->rx_buf_sz, dir: DMA_FROM_DEVICE); |
1377 | skb_copy_to_linear_data(skb, from: np->rx_skbuff[entry]->data, len: pkt_len); |
1378 | dma_sync_single_for_device(dev: &np->pci_dev->dev, |
1379 | le32_to_cpu(desc->frag.addr), |
1380 | size: np->rx_buf_sz, dir: DMA_FROM_DEVICE); |
1381 | skb_put(skb, len: pkt_len); |
1382 | } else { |
1383 | dma_unmap_single(&np->pci_dev->dev, |
1384 | le32_to_cpu(desc->frag.addr), |
1385 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1386 | skb_put(skb: skb = np->rx_skbuff[entry], len: pkt_len); |
1387 | np->rx_skbuff[entry] = NULL; |
1388 | } |
1389 | skb->protocol = eth_type_trans(skb, dev); |
1390 | /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */ |
1391 | netif_rx(skb); |
1392 | } |
1393 | entry = (entry + 1) % RX_RING_SIZE; |
1394 | received++; |
1395 | } |
1396 | np->cur_rx = entry; |
1397 | refill_rx (dev); |
1398 | np->budget -= received; |
1399 | iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); |
1400 | return; |
1401 | |
1402 | not_done: |
1403 | np->cur_rx = entry; |
1404 | refill_rx (dev); |
1405 | if (!received) |
1406 | received = 1; |
1407 | np->budget -= received; |
1408 | if (np->budget <= 0) |
1409 | np->budget = RX_BUDGET; |
1410 | tasklet_schedule(t: &np->rx_tasklet); |
1411 | } |
1412 | |
1413 | static void refill_rx (struct net_device *dev) |
1414 | { |
1415 | struct netdev_private *np = netdev_priv(dev); |
1416 | int entry; |
1417 | |
1418 | /* Refill the Rx ring buffers. */ |
1419 | for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; |
1420 | np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { |
1421 | struct sk_buff *skb; |
1422 | entry = np->dirty_rx % RX_RING_SIZE; |
1423 | if (np->rx_skbuff[entry] == NULL) { |
1424 | skb = netdev_alloc_skb(dev, length: np->rx_buf_sz + 2); |
1425 | np->rx_skbuff[entry] = skb; |
1426 | if (skb == NULL) |
1427 | break; /* Better luck next round. */ |
1428 | skb_reserve(skb, len: 2); /* Align IP on 16 byte boundaries */ |
1429 | np->rx_ring[entry].frag.addr = cpu_to_le32( |
1430 | dma_map_single(&np->pci_dev->dev, skb->data, |
1431 | np->rx_buf_sz, DMA_FROM_DEVICE)); |
1432 | if (dma_mapping_error(dev: &np->pci_dev->dev, |
1433 | dma_addr: np->rx_ring[entry].frag.addr)) { |
1434 | dev_kfree_skb_irq(skb); |
1435 | np->rx_skbuff[entry] = NULL; |
1436 | break; |
1437 | } |
1438 | } |
1439 | /* Perhaps we need not reset this field. */ |
1440 | np->rx_ring[entry].frag.length = |
1441 | cpu_to_le32(np->rx_buf_sz | LastFrag); |
1442 | np->rx_ring[entry].status = 0; |
1443 | } |
1444 | } |
1445 | static void netdev_error(struct net_device *dev, int intr_status) |
1446 | { |
1447 | struct netdev_private *np = netdev_priv(dev); |
1448 | void __iomem *ioaddr = np->base; |
1449 | u16 mii_ctl, mii_advertise, mii_lpa; |
1450 | int speed; |
1451 | |
1452 | if (intr_status & LinkChange) { |
1453 | if (mdio_wait_link(dev, wait: 10) == 0) { |
1454 | printk(KERN_INFO "%s: Link up\n" , dev->name); |
1455 | if (np->an_enable) { |
1456 | mii_advertise = mdio_read(dev, phy_id: np->phys[0], |
1457 | MII_ADVERTISE); |
1458 | mii_lpa = mdio_read(dev, phy_id: np->phys[0], MII_LPA); |
1459 | mii_advertise &= mii_lpa; |
1460 | printk(KERN_INFO "%s: Link changed: " , |
1461 | dev->name); |
1462 | if (mii_advertise & ADVERTISE_100FULL) { |
1463 | np->speed = 100; |
1464 | printk("100Mbps, full duplex\n" ); |
1465 | } else if (mii_advertise & ADVERTISE_100HALF) { |
1466 | np->speed = 100; |
1467 | printk("100Mbps, half duplex\n" ); |
1468 | } else if (mii_advertise & ADVERTISE_10FULL) { |
1469 | np->speed = 10; |
1470 | printk("10Mbps, full duplex\n" ); |
1471 | } else if (mii_advertise & ADVERTISE_10HALF) { |
1472 | np->speed = 10; |
1473 | printk("10Mbps, half duplex\n" ); |
1474 | } else |
1475 | printk("\n" ); |
1476 | |
1477 | } else { |
1478 | mii_ctl = mdio_read(dev, phy_id: np->phys[0], MII_BMCR); |
1479 | speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; |
1480 | np->speed = speed; |
1481 | printk(KERN_INFO "%s: Link changed: %dMbps ," , |
1482 | dev->name, speed); |
1483 | printk("%s duplex.\n" , |
1484 | (mii_ctl & BMCR_FULLDPLX) ? |
1485 | "full" : "half" ); |
1486 | } |
1487 | check_duplex(dev); |
1488 | if (np->flowctrl && np->mii_if.full_duplex) { |
1489 | iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200, |
1490 | ioaddr + MulticastFilter1+2); |
1491 | iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl, |
1492 | ioaddr + MACCtrl0); |
1493 | } |
1494 | netif_carrier_on(dev); |
1495 | } else { |
1496 | printk(KERN_INFO "%s: Link down\n" , dev->name); |
1497 | netif_carrier_off(dev); |
1498 | } |
1499 | } |
1500 | if (intr_status & StatsMax) { |
1501 | get_stats(dev); |
1502 | } |
1503 | if (intr_status & IntrPCIErr) { |
1504 | printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n" , |
1505 | dev->name, intr_status); |
1506 | /* We must do a global reset of DMA to continue. */ |
1507 | } |
1508 | } |
1509 | |
1510 | static struct net_device_stats *get_stats(struct net_device *dev) |
1511 | { |
1512 | struct netdev_private *np = netdev_priv(dev); |
1513 | void __iomem *ioaddr = np->base; |
1514 | unsigned long flags; |
1515 | u8 late_coll, single_coll, mult_coll; |
1516 | |
1517 | spin_lock_irqsave(&np->statlock, flags); |
1518 | /* The chip only need report frame silently dropped. */ |
1519 | dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); |
1520 | dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); |
1521 | dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); |
1522 | dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); |
1523 | |
1524 | mult_coll = ioread8(ioaddr + StatsMultiColl); |
1525 | np->xstats.tx_multiple_collisions += mult_coll; |
1526 | single_coll = ioread8(ioaddr + StatsOneColl); |
1527 | np->xstats.tx_single_collisions += single_coll; |
1528 | late_coll = ioread8(ioaddr + StatsLateColl); |
1529 | np->xstats.tx_late_collisions += late_coll; |
1530 | dev->stats.collisions += mult_coll |
1531 | + single_coll |
1532 | + late_coll; |
1533 | |
1534 | np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); |
1535 | np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); |
1536 | np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); |
1537 | np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); |
1538 | np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); |
1539 | np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); |
1540 | np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); |
1541 | |
1542 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); |
1543 | dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; |
1544 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); |
1545 | dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; |
1546 | |
1547 | spin_unlock_irqrestore(lock: &np->statlock, flags); |
1548 | |
1549 | return &dev->stats; |
1550 | } |
1551 | |
1552 | static void set_rx_mode(struct net_device *dev) |
1553 | { |
1554 | struct netdev_private *np = netdev_priv(dev); |
1555 | void __iomem *ioaddr = np->base; |
1556 | u16 mc_filter[4]; /* Multicast hash filter */ |
1557 | u32 rx_mode; |
1558 | int i; |
1559 | |
1560 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ |
1561 | memset(mc_filter, 0xff, sizeof(mc_filter)); |
1562 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys; |
1563 | } else if ((netdev_mc_count(dev) > multicast_filter_limit) || |
1564 | (dev->flags & IFF_ALLMULTI)) { |
1565 | /* Too many to match, or accept all multicasts. */ |
1566 | memset(mc_filter, 0xff, sizeof(mc_filter)); |
1567 | rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; |
1568 | } else if (!netdev_mc_empty(dev)) { |
1569 | struct netdev_hw_addr *ha; |
1570 | int bit; |
1571 | int index; |
1572 | int crc; |
1573 | memset (mc_filter, 0, sizeof (mc_filter)); |
1574 | netdev_for_each_mc_addr(ha, dev) { |
1575 | crc = ether_crc_le(ETH_ALEN, ha->addr); |
1576 | for (index=0, bit=0; bit < 6; bit++, crc <<= 1) |
1577 | if (crc & 0x80000000) index |= 1 << bit; |
1578 | mc_filter[index/16] |= (1 << (index % 16)); |
1579 | } |
1580 | rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys; |
1581 | } else { |
1582 | iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); |
1583 | return; |
1584 | } |
1585 | if (np->mii_if.full_duplex && np->flowctrl) |
1586 | mc_filter[3] |= 0x0200; |
1587 | |
1588 | for (i = 0; i < 4; i++) |
1589 | iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2); |
1590 | iowrite8(rx_mode, ioaddr + RxMode); |
1591 | } |
1592 | |
1593 | static int __set_mac_addr(struct net_device *dev) |
1594 | { |
1595 | struct netdev_private *np = netdev_priv(dev); |
1596 | u16 addr16; |
1597 | |
1598 | addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8)); |
1599 | iowrite16(addr16, np->base + StationAddr); |
1600 | addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8)); |
1601 | iowrite16(addr16, np->base + StationAddr+2); |
1602 | addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8)); |
1603 | iowrite16(addr16, np->base + StationAddr+4); |
1604 | return 0; |
1605 | } |
1606 | |
1607 | /* Invoked with rtnl_lock held */ |
1608 | static int sundance_set_mac_addr(struct net_device *dev, void *data) |
1609 | { |
1610 | const struct sockaddr *addr = data; |
1611 | |
1612 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
1613 | return -EADDRNOTAVAIL; |
1614 | eth_hw_addr_set(dev, addr: addr->sa_data); |
1615 | __set_mac_addr(dev); |
1616 | |
1617 | return 0; |
1618 | } |
1619 | |
1620 | static const struct { |
1621 | const char name[ETH_GSTRING_LEN]; |
1622 | } sundance_stats[] = { |
1623 | { "tx_multiple_collisions" }, |
1624 | { "tx_single_collisions" }, |
1625 | { "tx_late_collisions" }, |
1626 | { "tx_deferred" }, |
1627 | { "tx_deferred_excessive" }, |
1628 | { "tx_aborted" }, |
1629 | { "tx_bcasts" }, |
1630 | { "rx_bcasts" }, |
1631 | { "tx_mcasts" }, |
1632 | { "rx_mcasts" }, |
1633 | }; |
1634 | |
1635 | static int check_if_running(struct net_device *dev) |
1636 | { |
1637 | if (!netif_running(dev)) |
1638 | return -EINVAL; |
1639 | return 0; |
1640 | } |
1641 | |
1642 | static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
1643 | { |
1644 | struct netdev_private *np = netdev_priv(dev); |
1645 | strscpy(p: info->driver, DRV_NAME, size: sizeof(info->driver)); |
1646 | strscpy(p: info->bus_info, q: pci_name(pdev: np->pci_dev), size: sizeof(info->bus_info)); |
1647 | } |
1648 | |
1649 | static int get_link_ksettings(struct net_device *dev, |
1650 | struct ethtool_link_ksettings *cmd) |
1651 | { |
1652 | struct netdev_private *np = netdev_priv(dev); |
1653 | spin_lock_irq(lock: &np->lock); |
1654 | mii_ethtool_get_link_ksettings(mii: &np->mii_if, cmd); |
1655 | spin_unlock_irq(lock: &np->lock); |
1656 | return 0; |
1657 | } |
1658 | |
1659 | static int set_link_ksettings(struct net_device *dev, |
1660 | const struct ethtool_link_ksettings *cmd) |
1661 | { |
1662 | struct netdev_private *np = netdev_priv(dev); |
1663 | int res; |
1664 | spin_lock_irq(lock: &np->lock); |
1665 | res = mii_ethtool_set_link_ksettings(mii: &np->mii_if, cmd); |
1666 | spin_unlock_irq(lock: &np->lock); |
1667 | return res; |
1668 | } |
1669 | |
1670 | static int nway_reset(struct net_device *dev) |
1671 | { |
1672 | struct netdev_private *np = netdev_priv(dev); |
1673 | return mii_nway_restart(mii: &np->mii_if); |
1674 | } |
1675 | |
1676 | static u32 get_link(struct net_device *dev) |
1677 | { |
1678 | struct netdev_private *np = netdev_priv(dev); |
1679 | return mii_link_ok(mii: &np->mii_if); |
1680 | } |
1681 | |
1682 | static u32 get_msglevel(struct net_device *dev) |
1683 | { |
1684 | struct netdev_private *np = netdev_priv(dev); |
1685 | return np->msg_enable; |
1686 | } |
1687 | |
1688 | static void set_msglevel(struct net_device *dev, u32 val) |
1689 | { |
1690 | struct netdev_private *np = netdev_priv(dev); |
1691 | np->msg_enable = val; |
1692 | } |
1693 | |
1694 | static void get_strings(struct net_device *dev, u32 stringset, |
1695 | u8 *data) |
1696 | { |
1697 | if (stringset == ETH_SS_STATS) |
1698 | memcpy(data, sundance_stats, sizeof(sundance_stats)); |
1699 | } |
1700 | |
1701 | static int get_sset_count(struct net_device *dev, int sset) |
1702 | { |
1703 | switch (sset) { |
1704 | case ETH_SS_STATS: |
1705 | return ARRAY_SIZE(sundance_stats); |
1706 | default: |
1707 | return -EOPNOTSUPP; |
1708 | } |
1709 | } |
1710 | |
1711 | static void get_ethtool_stats(struct net_device *dev, |
1712 | struct ethtool_stats *stats, u64 *data) |
1713 | { |
1714 | struct netdev_private *np = netdev_priv(dev); |
1715 | int i = 0; |
1716 | |
1717 | get_stats(dev); |
1718 | data[i++] = np->xstats.tx_multiple_collisions; |
1719 | data[i++] = np->xstats.tx_single_collisions; |
1720 | data[i++] = np->xstats.tx_late_collisions; |
1721 | data[i++] = np->xstats.tx_deferred; |
1722 | data[i++] = np->xstats.tx_deferred_excessive; |
1723 | data[i++] = np->xstats.tx_aborted; |
1724 | data[i++] = np->xstats.tx_bcasts; |
1725 | data[i++] = np->xstats.rx_bcasts; |
1726 | data[i++] = np->xstats.tx_mcasts; |
1727 | data[i++] = np->xstats.rx_mcasts; |
1728 | } |
1729 | |
1730 | #ifdef CONFIG_PM |
1731 | |
1732 | static void sundance_get_wol(struct net_device *dev, |
1733 | struct ethtool_wolinfo *wol) |
1734 | { |
1735 | struct netdev_private *np = netdev_priv(dev); |
1736 | void __iomem *ioaddr = np->base; |
1737 | u8 wol_bits; |
1738 | |
1739 | wol->wolopts = 0; |
1740 | |
1741 | wol->supported = (WAKE_PHY | WAKE_MAGIC); |
1742 | if (!np->wol_enabled) |
1743 | return; |
1744 | |
1745 | wol_bits = ioread8(ioaddr + WakeEvent); |
1746 | if (wol_bits & MagicPktEnable) |
1747 | wol->wolopts |= WAKE_MAGIC; |
1748 | if (wol_bits & LinkEventEnable) |
1749 | wol->wolopts |= WAKE_PHY; |
1750 | } |
1751 | |
1752 | static int sundance_set_wol(struct net_device *dev, |
1753 | struct ethtool_wolinfo *wol) |
1754 | { |
1755 | struct netdev_private *np = netdev_priv(dev); |
1756 | void __iomem *ioaddr = np->base; |
1757 | u8 wol_bits; |
1758 | |
1759 | if (!device_can_wakeup(dev: &np->pci_dev->dev)) |
1760 | return -EOPNOTSUPP; |
1761 | |
1762 | np->wol_enabled = !!(wol->wolopts); |
1763 | wol_bits = ioread8(ioaddr + WakeEvent); |
1764 | wol_bits &= ~(WakePktEnable | MagicPktEnable | |
1765 | LinkEventEnable | WolEnable); |
1766 | |
1767 | if (np->wol_enabled) { |
1768 | if (wol->wolopts & WAKE_MAGIC) |
1769 | wol_bits |= (MagicPktEnable | WolEnable); |
1770 | if (wol->wolopts & WAKE_PHY) |
1771 | wol_bits |= (LinkEventEnable | WolEnable); |
1772 | } |
1773 | iowrite8(wol_bits, ioaddr + WakeEvent); |
1774 | |
1775 | device_set_wakeup_enable(dev: &np->pci_dev->dev, enable: np->wol_enabled); |
1776 | |
1777 | return 0; |
1778 | } |
1779 | #else |
1780 | #define sundance_get_wol NULL |
1781 | #define sundance_set_wol NULL |
1782 | #endif /* CONFIG_PM */ |
1783 | |
1784 | static const struct ethtool_ops ethtool_ops = { |
1785 | .begin = check_if_running, |
1786 | .get_drvinfo = get_drvinfo, |
1787 | .nway_reset = nway_reset, |
1788 | .get_link = get_link, |
1789 | .get_wol = sundance_get_wol, |
1790 | .set_wol = sundance_set_wol, |
1791 | .get_msglevel = get_msglevel, |
1792 | .set_msglevel = set_msglevel, |
1793 | .get_strings = get_strings, |
1794 | .get_sset_count = get_sset_count, |
1795 | .get_ethtool_stats = get_ethtool_stats, |
1796 | .get_link_ksettings = get_link_ksettings, |
1797 | .set_link_ksettings = set_link_ksettings, |
1798 | }; |
1799 | |
1800 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1801 | { |
1802 | struct netdev_private *np = netdev_priv(dev); |
1803 | int rc; |
1804 | |
1805 | if (!netif_running(dev)) |
1806 | return -EINVAL; |
1807 | |
1808 | spin_lock_irq(lock: &np->lock); |
1809 | rc = generic_mii_ioctl(mii_if: &np->mii_if, mii_data: if_mii(rq), cmd, NULL); |
1810 | spin_unlock_irq(lock: &np->lock); |
1811 | |
1812 | return rc; |
1813 | } |
1814 | |
1815 | static int netdev_close(struct net_device *dev) |
1816 | { |
1817 | struct netdev_private *np = netdev_priv(dev); |
1818 | void __iomem *ioaddr = np->base; |
1819 | struct sk_buff *skb; |
1820 | int i; |
1821 | |
1822 | /* Wait and kill tasklet */ |
1823 | tasklet_kill(t: &np->rx_tasklet); |
1824 | tasklet_kill(t: &np->tx_tasklet); |
1825 | np->cur_tx = 0; |
1826 | np->dirty_tx = 0; |
1827 | np->cur_task = 0; |
1828 | np->last_tx = NULL; |
1829 | |
1830 | netif_stop_queue(dev); |
1831 | |
1832 | if (netif_msg_ifdown(np)) { |
1833 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x " |
1834 | "Rx %4.4x Int %2.2x.\n" , |
1835 | dev->name, ioread8(ioaddr + TxStatus), |
1836 | ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus)); |
1837 | printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n" , |
1838 | dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); |
1839 | } |
1840 | |
1841 | /* Disable interrupts by clearing the interrupt mask. */ |
1842 | iowrite16(0x0000, ioaddr + IntrEnable); |
1843 | |
1844 | /* Disable Rx and Tx DMA for safely release resource */ |
1845 | iowrite32(0x500, ioaddr + DMACtrl); |
1846 | |
1847 | /* Stop the chip's Tx and Rx processes. */ |
1848 | iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1); |
1849 | |
1850 | for (i = 2000; i > 0; i--) { |
1851 | if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0) |
1852 | break; |
1853 | mdelay(1); |
1854 | } |
1855 | |
1856 | iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, |
1857 | ioaddr + ASIC_HI_WORD(ASICCtrl)); |
1858 | |
1859 | for (i = 2000; i > 0; i--) { |
1860 | if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0) |
1861 | break; |
1862 | mdelay(1); |
1863 | } |
1864 | |
1865 | #ifdef __i386__ |
1866 | if (netif_msg_hw(np)) { |
1867 | printk(KERN_DEBUG " Tx ring at %8.8x:\n" , |
1868 | (int)(np->tx_ring_dma)); |
1869 | for (i = 0; i < TX_RING_SIZE; i++) |
1870 | printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n" , |
1871 | i, np->tx_ring[i].status, np->tx_ring[i].frag.addr, |
1872 | np->tx_ring[i].frag.length); |
1873 | printk(KERN_DEBUG " Rx ring %8.8x:\n" , |
1874 | (int)(np->rx_ring_dma)); |
1875 | for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) { |
1876 | printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n" , |
1877 | i, np->rx_ring[i].status, np->rx_ring[i].frag.addr, |
1878 | np->rx_ring[i].frag.length); |
1879 | } |
1880 | } |
1881 | #endif /* __i386__ debugging only */ |
1882 | |
1883 | free_irq(np->pci_dev->irq, dev); |
1884 | |
1885 | del_timer_sync(timer: &np->timer); |
1886 | |
1887 | /* Free all the skbuffs in the Rx queue. */ |
1888 | for (i = 0; i < RX_RING_SIZE; i++) { |
1889 | np->rx_ring[i].status = 0; |
1890 | skb = np->rx_skbuff[i]; |
1891 | if (skb) { |
1892 | dma_unmap_single(&np->pci_dev->dev, |
1893 | le32_to_cpu(np->rx_ring[i].frag.addr), |
1894 | np->rx_buf_sz, DMA_FROM_DEVICE); |
1895 | dev_kfree_skb(skb); |
1896 | np->rx_skbuff[i] = NULL; |
1897 | } |
1898 | np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */ |
1899 | } |
1900 | for (i = 0; i < TX_RING_SIZE; i++) { |
1901 | np->tx_ring[i].next_desc = 0; |
1902 | skb = np->tx_skbuff[i]; |
1903 | if (skb) { |
1904 | dma_unmap_single(&np->pci_dev->dev, |
1905 | le32_to_cpu(np->tx_ring[i].frag.addr), |
1906 | skb->len, DMA_TO_DEVICE); |
1907 | dev_kfree_skb(skb); |
1908 | np->tx_skbuff[i] = NULL; |
1909 | } |
1910 | } |
1911 | |
1912 | return 0; |
1913 | } |
1914 | |
1915 | static void sundance_remove1(struct pci_dev *pdev) |
1916 | { |
1917 | struct net_device *dev = pci_get_drvdata(pdev); |
1918 | |
1919 | if (dev) { |
1920 | struct netdev_private *np = netdev_priv(dev); |
1921 | unregister_netdev(dev); |
1922 | dma_free_coherent(dev: &pdev->dev, RX_TOTAL_SIZE, |
1923 | cpu_addr: np->rx_ring, dma_handle: np->rx_ring_dma); |
1924 | dma_free_coherent(dev: &pdev->dev, TX_TOTAL_SIZE, |
1925 | cpu_addr: np->tx_ring, dma_handle: np->tx_ring_dma); |
1926 | pci_iounmap(dev: pdev, np->base); |
1927 | pci_release_regions(pdev); |
1928 | free_netdev(dev); |
1929 | } |
1930 | } |
1931 | |
1932 | static int __maybe_unused sundance_suspend(struct device *dev_d) |
1933 | { |
1934 | struct net_device *dev = dev_get_drvdata(dev: dev_d); |
1935 | struct netdev_private *np = netdev_priv(dev); |
1936 | void __iomem *ioaddr = np->base; |
1937 | |
1938 | if (!netif_running(dev)) |
1939 | return 0; |
1940 | |
1941 | netdev_close(dev); |
1942 | netif_device_detach(dev); |
1943 | |
1944 | if (np->wol_enabled) { |
1945 | iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); |
1946 | iowrite16(RxEnable, ioaddr + MACCtrl1); |
1947 | } |
1948 | |
1949 | device_set_wakeup_enable(dev: dev_d, enable: np->wol_enabled); |
1950 | |
1951 | return 0; |
1952 | } |
1953 | |
1954 | static int __maybe_unused sundance_resume(struct device *dev_d) |
1955 | { |
1956 | struct net_device *dev = dev_get_drvdata(dev: dev_d); |
1957 | int err = 0; |
1958 | |
1959 | if (!netif_running(dev)) |
1960 | return 0; |
1961 | |
1962 | err = netdev_open(dev); |
1963 | if (err) { |
1964 | printk(KERN_ERR "%s: Can't resume interface!\n" , |
1965 | dev->name); |
1966 | goto out; |
1967 | } |
1968 | |
1969 | netif_device_attach(dev); |
1970 | |
1971 | out: |
1972 | return err; |
1973 | } |
1974 | |
1975 | static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume); |
1976 | |
1977 | static struct pci_driver sundance_driver = { |
1978 | .name = DRV_NAME, |
1979 | .id_table = sundance_pci_tbl, |
1980 | .probe = sundance_probe1, |
1981 | .remove = sundance_remove1, |
1982 | .driver.pm = &sundance_pm_ops, |
1983 | }; |
1984 | |
1985 | module_pci_driver(sundance_driver); |
1986 | |