1 | /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. |
2 | |
3 | Copyright 2000,2001 The Linux Kernel Team |
4 | Written/copyright 1994-2001 by Donald Becker. |
5 | |
6 | This software may be used and distributed according to the terms |
7 | of the GNU General Public License, incorporated herein by reference. |
8 | |
9 | Please submit bugs to http://bugzilla.kernel.org/ . |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) "tulip: " fmt |
13 | |
14 | #define DRV_NAME "tulip" |
15 | |
16 | #include <linux/module.h> |
17 | #include <linux/pci.h> |
18 | #include <linux/slab.h> |
19 | #include "tulip.h" |
20 | #include <linux/init.h> |
21 | #include <linux/interrupt.h> |
22 | #include <linux/etherdevice.h> |
23 | #include <linux/delay.h> |
24 | #include <linux/mii.h> |
25 | #include <linux/crc32.h> |
26 | #include <asm/unaligned.h> |
27 | #include <linux/uaccess.h> |
28 | |
29 | #ifdef CONFIG_SPARC |
30 | #include <asm/prom.h> |
31 | #endif |
32 | |
33 | /* A few user-configurable values. */ |
34 | |
35 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ |
36 | static unsigned int max_interrupt_work = 25; |
37 | |
38 | #define MAX_UNITS 8 |
39 | /* Used to pass the full-duplex flag, etc. */ |
40 | static int full_duplex[MAX_UNITS]; |
41 | static int options[MAX_UNITS]; |
42 | static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */ |
43 | |
44 | /* The possible media types that can be set in options[] are: */ |
45 | const char * const medianame[32] = { |
46 | "10baseT" , "10base2" , "AUI" , "100baseTx" , |
47 | "10baseT-FDX" , "100baseTx-FDX" , "100baseT4" , "100baseFx" , |
48 | "100baseFx-FDX" , "MII 10baseT" , "MII 10baseT-FDX" , "MII" , |
49 | "10baseT(forced)" , "MII 100baseTx" , "MII 100baseTx-FDX" , "MII 100baseT4" , |
50 | "MII 100baseFx-HDX" , "MII 100baseFx-FDX" , "Home-PNA 1Mbps" , "Invalid-19" , |
51 | "" ,"" ,"" ,"" , "" ,"" ,"" ,"" , "" ,"" ,"" ,"Transceiver reset" , |
52 | }; |
53 | |
54 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ |
55 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ |
56 | defined(CONFIG_SPARC) || defined(__ia64__) || \ |
57 | defined(__sh__) || defined(__mips__) |
58 | static int rx_copybreak = 1518; |
59 | #else |
60 | static int rx_copybreak = 100; |
61 | #endif |
62 | |
63 | /* |
64 | Set the bus performance register. |
65 | Typical: Set 16 longword cache alignment, no burst limit. |
66 | Cache alignment bits 15:14 Burst length 13:8 |
67 | 0000 No alignment 0x00000000 unlimited 0800 8 longwords |
68 | 4000 8 longwords 0100 1 longword 1000 16 longwords |
69 | 8000 16 longwords 0200 2 longwords 2000 32 longwords |
70 | C000 32 longwords 0400 4 longwords |
71 | Warning: many older 486 systems are broken and require setting 0x00A04800 |
72 | 8 longword cache alignment, 8 longword burst. |
73 | ToDo: Non-Intel setting could be better. |
74 | */ |
75 | |
76 | #if defined(__alpha__) || defined(__ia64__) |
77 | static int csr0 = 0x01A00000 | 0xE000; |
78 | #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) |
79 | static int csr0 = 0x01A00000 | 0x8000; |
80 | #elif defined(CONFIG_SPARC) || defined(__hppa__) |
81 | /* The UltraSparc PCI controllers will disconnect at every 64-byte |
82 | * crossing anyways so it makes no sense to tell Tulip to burst |
83 | * any more than that. |
84 | */ |
85 | static int csr0 = 0x01A00000 | 0x9000; |
86 | #elif defined(__arm__) || defined(__sh__) |
87 | static int csr0 = 0x01A00000 | 0x4800; |
88 | #elif defined(__mips__) |
89 | static int csr0 = 0x00200000 | 0x4000; |
90 | #else |
91 | static int csr0; |
92 | #endif |
93 | |
94 | /* Operational parameters that usually are not changed. */ |
95 | /* Time in jiffies before concluding the transmitter is hung. */ |
96 | #define TX_TIMEOUT (4*HZ) |
97 | |
98 | |
99 | MODULE_AUTHOR("The Linux Kernel Team" ); |
100 | MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver" ); |
101 | MODULE_LICENSE("GPL" ); |
102 | module_param(tulip_debug, int, 0); |
103 | module_param(max_interrupt_work, int, 0); |
104 | module_param(rx_copybreak, int, 0); |
105 | module_param(csr0, int, 0); |
106 | module_param_array(options, int, NULL, 0); |
107 | module_param_array(full_duplex, int, NULL, 0); |
108 | |
109 | #ifdef TULIP_DEBUG |
110 | int tulip_debug = TULIP_DEBUG; |
111 | #else |
112 | int tulip_debug = 1; |
113 | #endif |
114 | |
115 | static void tulip_timer(struct timer_list *t) |
116 | { |
117 | struct tulip_private *tp = from_timer(tp, t, timer); |
118 | struct net_device *dev = tp->dev; |
119 | |
120 | if (netif_running(dev)) |
121 | schedule_work(work: &tp->media_work); |
122 | } |
123 | |
124 | /* |
125 | * This table use during operation for capabilities and media timer. |
126 | * |
127 | * It is indexed via the values in 'enum chips' |
128 | */ |
129 | |
130 | const struct tulip_chip_table tulip_tbl[] = { |
131 | { }, /* placeholder for array, slot unused currently */ |
132 | { }, /* placeholder for array, slot unused currently */ |
133 | |
134 | /* DC21140 */ |
135 | { "Digital DS21140 Tulip" , 128, 0x0001ebef, |
136 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer, |
137 | tulip_media_task }, |
138 | |
139 | /* DC21142, DC21143 */ |
140 | { "Digital DS21142/43 Tulip" , 128, 0x0801fbff, |
141 | HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY |
142 | | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task }, |
143 | |
144 | /* LC82C168 */ |
145 | { "Lite-On 82c168 PNIC" , 256, 0x0001fbef, |
146 | HAS_MII | HAS_PNICNWAY, pnic_timer, }, |
147 | |
148 | /* MX98713 */ |
149 | { "Macronix 98713 PMAC" , 128, 0x0001ebef, |
150 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, }, |
151 | |
152 | /* MX98715 */ |
153 | { "Macronix 98715 PMAC" , 256, 0x0001ebef, |
154 | HAS_MEDIA_TABLE, mxic_timer, }, |
155 | |
156 | /* MX98725 */ |
157 | { "Macronix 98725 PMAC" , 256, 0x0001ebef, |
158 | HAS_MEDIA_TABLE, mxic_timer, }, |
159 | |
160 | /* AX88140 */ |
161 | { "ASIX AX88140" , 128, 0x0001fbff, |
162 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY |
163 | | IS_ASIX, tulip_timer, tulip_media_task }, |
164 | |
165 | /* PNIC2 */ |
166 | { "Lite-On PNIC-II" , 256, 0x0801fbff, |
167 | HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, }, |
168 | |
169 | /* COMET */ |
170 | { "ADMtek Comet" , 256, 0x0001abef, |
171 | HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, }, |
172 | |
173 | /* COMPEX9881 */ |
174 | { "Compex 9881 PMAC" , 128, 0x0001ebef, |
175 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, }, |
176 | |
177 | /* I21145 */ |
178 | { "Intel DS21145 Tulip" , 128, 0x0801fbff, |
179 | HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI |
180 | | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task }, |
181 | |
182 | /* DM910X */ |
183 | #ifdef CONFIG_TULIP_DM910X |
184 | { "Davicom DM9102/DM9102A" , 128, 0x0001ebef, |
185 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, |
186 | tulip_timer, tulip_media_task }, |
187 | #else |
188 | { NULL }, |
189 | #endif |
190 | |
191 | /* RS7112 */ |
192 | { "Conexant LANfinity" , 256, 0x0001ebef, |
193 | HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task }, |
194 | |
195 | }; |
196 | |
197 | |
198 | static const struct pci_device_id tulip_pci_tbl[] = { |
199 | { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 }, |
200 | { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 }, |
201 | { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 }, |
202 | { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 }, |
203 | { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, |
204 | /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/ |
205 | { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 }, |
206 | { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 }, |
207 | { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
208 | { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
209 | { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
210 | { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
211 | { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
212 | { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
213 | { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
214 | { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
215 | { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
216 | { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
217 | { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 }, |
218 | { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 }, |
219 | #ifdef CONFIG_TULIP_DM910X |
220 | { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, |
221 | { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, |
222 | #endif |
223 | { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
224 | { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, |
225 | { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
226 | { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
227 | { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
228 | { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
229 | { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT }, |
230 | { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
231 | { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
232 | { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
233 | { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
234 | { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ |
235 | { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ |
236 | { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */ |
237 | { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, |
238 | { } /* terminate list */ |
239 | }; |
240 | MODULE_DEVICE_TABLE(pci, tulip_pci_tbl); |
241 | |
242 | |
243 | /* A full-duplex map for media types. */ |
244 | const char tulip_media_cap[32] = |
245 | {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, }; |
246 | |
247 | static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue); |
248 | static void tulip_init_ring(struct net_device *dev); |
249 | static void tulip_free_ring(struct net_device *dev); |
250 | static netdev_tx_t tulip_start_xmit(struct sk_buff *skb, |
251 | struct net_device *dev); |
252 | static int tulip_open(struct net_device *dev); |
253 | static int tulip_close(struct net_device *dev); |
254 | static void tulip_up(struct net_device *dev); |
255 | static void tulip_down(struct net_device *dev); |
256 | static struct net_device_stats *tulip_get_stats(struct net_device *dev); |
257 | static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
258 | static void set_rx_mode(struct net_device *dev); |
259 | static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts); |
260 | #ifdef CONFIG_NET_POLL_CONTROLLER |
261 | static void poll_tulip(struct net_device *dev); |
262 | #endif |
263 | |
264 | static void tulip_set_power_state (struct tulip_private *tp, |
265 | int sleep, int snooze) |
266 | { |
267 | if (tp->flags & HAS_ACPI) { |
268 | u32 tmp, newtmp; |
269 | pci_read_config_dword (dev: tp->pdev, where: CFDD, val: &tmp); |
270 | newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze); |
271 | if (sleep) |
272 | newtmp |= CFDD_Sleep; |
273 | else if (snooze) |
274 | newtmp |= CFDD_Snooze; |
275 | if (tmp != newtmp) |
276 | pci_write_config_dword (dev: tp->pdev, where: CFDD, val: newtmp); |
277 | } |
278 | |
279 | } |
280 | |
281 | |
282 | static void tulip_up(struct net_device *dev) |
283 | { |
284 | struct tulip_private *tp = netdev_priv(dev); |
285 | void __iomem *ioaddr = tp->base_addr; |
286 | int next_tick = 3*HZ; |
287 | u32 reg; |
288 | int i; |
289 | |
290 | #ifdef CONFIG_TULIP_NAPI |
291 | napi_enable(n: &tp->napi); |
292 | #endif |
293 | |
294 | /* Wake the chip from sleep/snooze mode. */ |
295 | tulip_set_power_state (tp, sleep: 0, snooze: 0); |
296 | |
297 | /* Disable all WOL events */ |
298 | pci_enable_wake(dev: tp->pdev, PCI_D3hot, enable: 0); |
299 | pci_enable_wake(dev: tp->pdev, PCI_D3cold, enable: 0); |
300 | tulip_set_wolopts(pdev: tp->pdev, wolopts: 0); |
301 | |
302 | /* On some chip revs we must set the MII/SYM port before the reset!? */ |
303 | if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii)) |
304 | iowrite32(0x00040000, ioaddr + CSR6); |
305 | |
306 | /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ |
307 | iowrite32(0x00000001, ioaddr + CSR0); |
308 | pci_read_config_dword(dev: tp->pdev, PCI_COMMAND, val: ®); /* flush write */ |
309 | udelay(100); |
310 | |
311 | /* Deassert reset. |
312 | Wait the specified 50 PCI cycles after a reset by initializing |
313 | Tx and Rx queues and the address filter list. */ |
314 | iowrite32(tp->csr0, ioaddr + CSR0); |
315 | pci_read_config_dword(dev: tp->pdev, PCI_COMMAND, val: ®); /* flush write */ |
316 | udelay(100); |
317 | |
318 | if (tulip_debug > 1) |
319 | netdev_dbg(dev, "tulip_up(), irq==%d\n" , tp->pdev->irq); |
320 | |
321 | iowrite32(tp->rx_ring_dma, ioaddr + CSR3); |
322 | iowrite32(tp->tx_ring_dma, ioaddr + CSR4); |
323 | tp->cur_rx = tp->cur_tx = 0; |
324 | tp->dirty_rx = tp->dirty_tx = 0; |
325 | |
326 | if (tp->flags & MC_HASH_ONLY) { |
327 | u32 addr_low = get_unaligned_le32(p: dev->dev_addr); |
328 | u32 addr_high = get_unaligned_le16(p: dev->dev_addr + 4); |
329 | if (tp->chip_id == AX88140) { |
330 | iowrite32(0, ioaddr + CSR13); |
331 | iowrite32(addr_low, ioaddr + CSR14); |
332 | iowrite32(1, ioaddr + CSR13); |
333 | iowrite32(addr_high, ioaddr + CSR14); |
334 | } else if (tp->flags & COMET_MAC_ADDR) { |
335 | iowrite32(addr_low, ioaddr + 0xA4); |
336 | iowrite32(addr_high, ioaddr + 0xA8); |
337 | iowrite32(0, ioaddr + CSR27); |
338 | iowrite32(0, ioaddr + CSR28); |
339 | } |
340 | } else { |
341 | /* This is set_rx_mode(), but without starting the transmitter. */ |
342 | const u16 *eaddrs = (const u16 *)dev->dev_addr; |
343 | u16 *setup_frm = &tp->setup_frame[15*6]; |
344 | dma_addr_t mapping; |
345 | |
346 | /* 21140 bug: you must add the broadcast address. */ |
347 | memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame)); |
348 | /* Fill the final entry of the table with our physical address. */ |
349 | *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; |
350 | *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; |
351 | *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; |
352 | |
353 | mapping = dma_map_single(&tp->pdev->dev, tp->setup_frame, |
354 | sizeof(tp->setup_frame), |
355 | DMA_TO_DEVICE); |
356 | tp->tx_buffers[tp->cur_tx].skb = NULL; |
357 | tp->tx_buffers[tp->cur_tx].mapping = mapping; |
358 | |
359 | /* Put the setup frame on the Tx list. */ |
360 | tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192); |
361 | tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping); |
362 | tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned); |
363 | |
364 | tp->cur_tx++; |
365 | } |
366 | |
367 | tp->saved_if_port = dev->if_port; |
368 | if (dev->if_port == 0) |
369 | dev->if_port = tp->default_port; |
370 | |
371 | /* Allow selecting a default media. */ |
372 | i = 0; |
373 | if (tp->mtable == NULL) |
374 | goto media_picked; |
375 | if (dev->if_port) { |
376 | int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 : |
377 | (dev->if_port == 12 ? 0 : dev->if_port); |
378 | for (i = 0; i < tp->mtable->leafcount; i++) |
379 | if (tp->mtable->mleaf[i].media == looking_for) { |
380 | dev_info(&dev->dev, |
381 | "Using user-specified media %s\n" , |
382 | medianame[dev->if_port]); |
383 | goto media_picked; |
384 | } |
385 | } |
386 | if ((tp->mtable->defaultmedia & 0x0800) == 0) { |
387 | int looking_for = tp->mtable->defaultmedia & MEDIA_MASK; |
388 | for (i = 0; i < tp->mtable->leafcount; i++) |
389 | if (tp->mtable->mleaf[i].media == looking_for) { |
390 | dev_info(&dev->dev, |
391 | "Using EEPROM-set media %s\n" , |
392 | medianame[looking_for]); |
393 | goto media_picked; |
394 | } |
395 | } |
396 | /* Start sensing first non-full-duplex media. */ |
397 | for (i = tp->mtable->leafcount - 1; |
398 | (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--) |
399 | ; |
400 | media_picked: |
401 | |
402 | tp->csr6 = 0; |
403 | tp->cur_index = i; |
404 | tp->nwayset = 0; |
405 | |
406 | if (dev->if_port) { |
407 | if (tp->chip_id == DC21143 && |
408 | (tulip_media_cap[dev->if_port] & MediaIsMII)) { |
409 | /* We must reset the media CSRs when we force-select MII mode. */ |
410 | iowrite32(0x0000, ioaddr + CSR13); |
411 | iowrite32(0x0000, ioaddr + CSR14); |
412 | iowrite32(0x0008, ioaddr + CSR15); |
413 | } |
414 | tulip_select_media(dev, startup: 1); |
415 | } else if (tp->chip_id == DC21142) { |
416 | if (tp->mii_cnt) { |
417 | tulip_select_media(dev, startup: 1); |
418 | if (tulip_debug > 1) |
419 | dev_info(&dev->dev, |
420 | "Using MII transceiver %d, status %04x\n" , |
421 | tp->phys[0], |
422 | tulip_mdio_read(dev, tp->phys[0], 1)); |
423 | iowrite32(csr6_mask_defstate, ioaddr + CSR6); |
424 | tp->csr6 = csr6_mask_hdcap; |
425 | dev->if_port = 11; |
426 | iowrite32(0x0000, ioaddr + CSR13); |
427 | iowrite32(0x0000, ioaddr + CSR14); |
428 | } else |
429 | t21142_start_nway(dev); |
430 | } else if (tp->chip_id == PNIC2) { |
431 | /* for initial startup advertise 10/100 Full and Half */ |
432 | tp->sym_advertise = 0x01E0; |
433 | /* enable autonegotiate end interrupt */ |
434 | iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5); |
435 | iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7); |
436 | pnic2_start_nway(dev); |
437 | } else if (tp->chip_id == LC82C168 && ! tp->medialock) { |
438 | if (tp->mii_cnt) { |
439 | dev->if_port = 11; |
440 | tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0); |
441 | iowrite32(0x0001, ioaddr + CSR15); |
442 | } else if (ioread32(ioaddr + CSR5) & TPLnkPass) |
443 | pnic_do_nway(dev); |
444 | else { |
445 | /* Start with 10mbps to do autonegotiation. */ |
446 | iowrite32(0x32, ioaddr + CSR12); |
447 | tp->csr6 = 0x00420000; |
448 | iowrite32(0x0001B078, ioaddr + 0xB8); |
449 | iowrite32(0x0201B078, ioaddr + 0xB8); |
450 | next_tick = 1*HZ; |
451 | } |
452 | } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) && |
453 | ! tp->medialock) { |
454 | dev->if_port = 0; |
455 | tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0); |
456 | iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80); |
457 | } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) { |
458 | /* Provided by BOLO, Macronix - 12/10/1998. */ |
459 | dev->if_port = 0; |
460 | tp->csr6 = 0x01a80200; |
461 | iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80); |
462 | iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0); |
463 | } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) { |
464 | /* Enable automatic Tx underrun recovery. */ |
465 | iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88); |
466 | dev->if_port = tp->mii_cnt ? 11 : 0; |
467 | tp->csr6 = 0x00040000; |
468 | } else if (tp->chip_id == AX88140) { |
469 | tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100; |
470 | } else |
471 | tulip_select_media(dev, startup: 1); |
472 | |
473 | /* Start the chip's Tx to process setup frame. */ |
474 | tulip_stop_rxtx(tp); |
475 | barrier(); |
476 | udelay(5); |
477 | iowrite32(tp->csr6 | TxOn, ioaddr + CSR6); |
478 | |
479 | /* Enable interrupts by setting the interrupt mask. */ |
480 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5); |
481 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); |
482 | tulip_start_rxtx(tp); |
483 | iowrite32(0, ioaddr + CSR2); /* Rx poll demand */ |
484 | |
485 | if (tulip_debug > 2) { |
486 | netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n" , |
487 | ioread32(ioaddr + CSR0), |
488 | ioread32(ioaddr + CSR5), |
489 | ioread32(ioaddr + CSR6)); |
490 | } |
491 | |
492 | /* Set the timer to switch to check for link beat and perhaps switch |
493 | to an alternate media type. */ |
494 | tp->timer.expires = RUN_AT(next_tick); |
495 | add_timer(timer: &tp->timer); |
496 | #ifdef CONFIG_TULIP_NAPI |
497 | timer_setup(&tp->oom_timer, oom_timer, 0); |
498 | #endif |
499 | } |
500 | |
501 | static int |
502 | tulip_open(struct net_device *dev) |
503 | { |
504 | struct tulip_private *tp = netdev_priv(dev); |
505 | int retval; |
506 | |
507 | tulip_init_ring (dev); |
508 | |
509 | retval = request_irq(irq: tp->pdev->irq, handler: tulip_interrupt, IRQF_SHARED, |
510 | name: dev->name, dev); |
511 | if (retval) |
512 | goto free_ring; |
513 | |
514 | tulip_up (dev); |
515 | |
516 | netif_start_queue (dev); |
517 | |
518 | return 0; |
519 | |
520 | free_ring: |
521 | tulip_free_ring (dev); |
522 | return retval; |
523 | } |
524 | |
525 | |
526 | static void tulip_tx_timeout(struct net_device *dev, unsigned int txqueue) |
527 | { |
528 | struct tulip_private *tp = netdev_priv(dev); |
529 | void __iomem *ioaddr = tp->base_addr; |
530 | unsigned long flags; |
531 | |
532 | spin_lock_irqsave (&tp->lock, flags); |
533 | |
534 | if (tulip_media_cap[dev->if_port] & MediaIsMII) { |
535 | /* Do nothing -- the media monitor should handle this. */ |
536 | if (tulip_debug > 1) |
537 | dev_warn(&dev->dev, |
538 | "Transmit timeout using MII device\n" ); |
539 | } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 || |
540 | tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 || |
541 | tp->chip_id == DM910X) { |
542 | dev_warn(&dev->dev, |
543 | "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n" , |
544 | ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), |
545 | ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), |
546 | ioread32(ioaddr + CSR15)); |
547 | tp->timeout_recovery = 1; |
548 | schedule_work(work: &tp->media_work); |
549 | goto out_unlock; |
550 | } else if (tp->chip_id == PNIC2) { |
551 | dev_warn(&dev->dev, |
552 | "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n" , |
553 | (int)ioread32(ioaddr + CSR5), |
554 | (int)ioread32(ioaddr + CSR6), |
555 | (int)ioread32(ioaddr + CSR7), |
556 | (int)ioread32(ioaddr + CSR12)); |
557 | } else { |
558 | dev_warn(&dev->dev, |
559 | "Transmit timed out, status %08x, CSR12 %08x, resetting...\n" , |
560 | ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12)); |
561 | dev->if_port = 0; |
562 | } |
563 | |
564 | #if defined(way_too_many_messages) |
565 | if (tulip_debug > 3) { |
566 | int i; |
567 | for (i = 0; i < RX_RING_SIZE; i++) { |
568 | u8 *buf = (u8 *)(tp->rx_ring[i].buffer1); |
569 | int j; |
570 | printk(KERN_DEBUG |
571 | "%2d: %08x %08x %08x %08x %02x %02x %02x\n" , |
572 | i, |
573 | (unsigned int)tp->rx_ring[i].status, |
574 | (unsigned int)tp->rx_ring[i].length, |
575 | (unsigned int)tp->rx_ring[i].buffer1, |
576 | (unsigned int)tp->rx_ring[i].buffer2, |
577 | buf[0], buf[1], buf[2]); |
578 | for (j = 0; ((j < 1600) && buf[j] != 0xee); j++) |
579 | if (j < 100) |
580 | pr_cont(" %02x" , buf[j]); |
581 | pr_cont(" j=%d\n" , j); |
582 | } |
583 | printk(KERN_DEBUG " Rx ring %p: " , tp->rx_ring); |
584 | for (i = 0; i < RX_RING_SIZE; i++) |
585 | pr_cont(" %08x" , (unsigned int)tp->rx_ring[i].status); |
586 | printk(KERN_DEBUG " Tx ring %p: " , tp->tx_ring); |
587 | for (i = 0; i < TX_RING_SIZE; i++) |
588 | pr_cont(" %08x" , (unsigned int)tp->tx_ring[i].status); |
589 | pr_cont("\n" ); |
590 | } |
591 | #endif |
592 | |
593 | tulip_tx_timeout_complete(tp, ioaddr); |
594 | |
595 | out_unlock: |
596 | spin_unlock_irqrestore (lock: &tp->lock, flags); |
597 | netif_trans_update(dev); /* prevent tx timeout */ |
598 | netif_wake_queue (dev); |
599 | } |
600 | |
601 | |
602 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ |
603 | static void tulip_init_ring(struct net_device *dev) |
604 | { |
605 | struct tulip_private *tp = netdev_priv(dev); |
606 | int i; |
607 | |
608 | tp->susp_rx = 0; |
609 | tp->ttimer = 0; |
610 | tp->nir = 0; |
611 | |
612 | for (i = 0; i < RX_RING_SIZE; i++) { |
613 | tp->rx_ring[i].status = 0x00000000; |
614 | tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ); |
615 | tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1)); |
616 | tp->rx_buffers[i].skb = NULL; |
617 | tp->rx_buffers[i].mapping = 0; |
618 | } |
619 | /* Mark the last entry as wrapping the ring. */ |
620 | tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP); |
621 | tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma); |
622 | |
623 | for (i = 0; i < RX_RING_SIZE; i++) { |
624 | dma_addr_t mapping; |
625 | |
626 | /* Note the receive buffer must be longword aligned. |
627 | netdev_alloc_skb() provides 16 byte alignment. But do *not* |
628 | use skb_reserve() to align the IP header! */ |
629 | struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ); |
630 | tp->rx_buffers[i].skb = skb; |
631 | if (skb == NULL) |
632 | break; |
633 | mapping = dma_map_single(&tp->pdev->dev, skb->data, |
634 | PKT_BUF_SZ, DMA_FROM_DEVICE); |
635 | tp->rx_buffers[i].mapping = mapping; |
636 | tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ |
637 | tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); |
638 | } |
639 | tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); |
640 | |
641 | /* The Tx buffer descriptor is filled in as needed, but we |
642 | do need to clear the ownership bit. */ |
643 | for (i = 0; i < TX_RING_SIZE; i++) { |
644 | tp->tx_buffers[i].skb = NULL; |
645 | tp->tx_buffers[i].mapping = 0; |
646 | tp->tx_ring[i].status = 0x00000000; |
647 | tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1)); |
648 | } |
649 | tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma); |
650 | } |
651 | |
652 | static netdev_tx_t |
653 | tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) |
654 | { |
655 | struct tulip_private *tp = netdev_priv(dev); |
656 | int entry; |
657 | u32 flag; |
658 | dma_addr_t mapping; |
659 | unsigned long flags; |
660 | |
661 | spin_lock_irqsave(&tp->lock, flags); |
662 | |
663 | /* Calculate the next Tx descriptor entry. */ |
664 | entry = tp->cur_tx % TX_RING_SIZE; |
665 | |
666 | tp->tx_buffers[entry].skb = skb; |
667 | mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len, |
668 | DMA_TO_DEVICE); |
669 | tp->tx_buffers[entry].mapping = mapping; |
670 | tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping); |
671 | |
672 | if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ |
673 | flag = 0x60000000; /* No interrupt */ |
674 | } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { |
675 | flag = 0xe0000000; /* Tx-done intr. */ |
676 | } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { |
677 | flag = 0x60000000; /* No Tx-done intr. */ |
678 | } else { /* Leave room for set_rx_mode() to fill entries. */ |
679 | flag = 0xe0000000; /* Tx-done intr. */ |
680 | netif_stop_queue(dev); |
681 | } |
682 | if (entry == TX_RING_SIZE-1) |
683 | flag = 0xe0000000 | DESC_RING_WRAP; |
684 | |
685 | tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag); |
686 | /* if we were using Transmit Automatic Polling, we would need a |
687 | * wmb() here. */ |
688 | tp->tx_ring[entry].status = cpu_to_le32(DescOwned); |
689 | wmb(); |
690 | |
691 | tp->cur_tx++; |
692 | |
693 | /* Trigger an immediate transmit demand. */ |
694 | iowrite32(0, tp->base_addr + CSR1); |
695 | |
696 | spin_unlock_irqrestore(lock: &tp->lock, flags); |
697 | |
698 | return NETDEV_TX_OK; |
699 | } |
700 | |
701 | static void tulip_clean_tx_ring(struct tulip_private *tp) |
702 | { |
703 | unsigned int dirty_tx; |
704 | |
705 | for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0; |
706 | dirty_tx++) { |
707 | int entry = dirty_tx % TX_RING_SIZE; |
708 | int status = le32_to_cpu(tp->tx_ring[entry].status); |
709 | |
710 | if (status < 0) { |
711 | tp->dev->stats.tx_errors++; /* It wasn't Txed */ |
712 | tp->tx_ring[entry].status = 0; |
713 | } |
714 | |
715 | /* Check for Tx filter setup frames. */ |
716 | if (tp->tx_buffers[entry].skb == NULL) { |
717 | /* test because dummy frames not mapped */ |
718 | if (tp->tx_buffers[entry].mapping) |
719 | dma_unmap_single(&tp->pdev->dev, |
720 | tp->tx_buffers[entry].mapping, |
721 | sizeof(tp->setup_frame), |
722 | DMA_TO_DEVICE); |
723 | continue; |
724 | } |
725 | |
726 | dma_unmap_single(&tp->pdev->dev, |
727 | tp->tx_buffers[entry].mapping, |
728 | tp->tx_buffers[entry].skb->len, |
729 | DMA_TO_DEVICE); |
730 | |
731 | /* Free the original skb. */ |
732 | dev_kfree_skb_irq(skb: tp->tx_buffers[entry].skb); |
733 | tp->tx_buffers[entry].skb = NULL; |
734 | tp->tx_buffers[entry].mapping = 0; |
735 | } |
736 | } |
737 | |
738 | static void tulip_down (struct net_device *dev) |
739 | { |
740 | struct tulip_private *tp = netdev_priv(dev); |
741 | void __iomem *ioaddr = tp->base_addr; |
742 | unsigned long flags; |
743 | |
744 | cancel_work_sync(work: &tp->media_work); |
745 | |
746 | #ifdef CONFIG_TULIP_NAPI |
747 | napi_disable(n: &tp->napi); |
748 | #endif |
749 | |
750 | del_timer_sync (timer: &tp->timer); |
751 | #ifdef CONFIG_TULIP_NAPI |
752 | del_timer_sync (timer: &tp->oom_timer); |
753 | #endif |
754 | spin_lock_irqsave (&tp->lock, flags); |
755 | |
756 | /* Disable interrupts by clearing the interrupt mask. */ |
757 | iowrite32 (0x00000000, ioaddr + CSR7); |
758 | |
759 | /* Stop the Tx and Rx processes. */ |
760 | tulip_stop_rxtx(tp); |
761 | |
762 | /* prepare receive buffers */ |
763 | tulip_refill_rx(dev); |
764 | |
765 | /* release any unconsumed transmit buffers */ |
766 | tulip_clean_tx_ring(tp); |
767 | |
768 | if (ioread32(ioaddr + CSR6) != 0xffffffff) |
769 | dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; |
770 | |
771 | spin_unlock_irqrestore (lock: &tp->lock, flags); |
772 | |
773 | timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0); |
774 | |
775 | dev->if_port = tp->saved_if_port; |
776 | |
777 | /* Leave the driver in snooze, not sleep, mode. */ |
778 | tulip_set_power_state (tp, sleep: 0, snooze: 1); |
779 | } |
780 | |
781 | static void tulip_free_ring (struct net_device *dev) |
782 | { |
783 | struct tulip_private *tp = netdev_priv(dev); |
784 | int i; |
785 | |
786 | /* Free all the skbuffs in the Rx queue. */ |
787 | for (i = 0; i < RX_RING_SIZE; i++) { |
788 | struct sk_buff *skb = tp->rx_buffers[i].skb; |
789 | dma_addr_t mapping = tp->rx_buffers[i].mapping; |
790 | |
791 | tp->rx_buffers[i].skb = NULL; |
792 | tp->rx_buffers[i].mapping = 0; |
793 | |
794 | tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */ |
795 | tp->rx_ring[i].length = 0; |
796 | /* An invalid address. */ |
797 | tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0); |
798 | if (skb) { |
799 | dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ, |
800 | DMA_FROM_DEVICE); |
801 | dev_kfree_skb (skb); |
802 | } |
803 | } |
804 | |
805 | for (i = 0; i < TX_RING_SIZE; i++) { |
806 | struct sk_buff *skb = tp->tx_buffers[i].skb; |
807 | |
808 | if (skb != NULL) { |
809 | dma_unmap_single(&tp->pdev->dev, |
810 | tp->tx_buffers[i].mapping, skb->len, |
811 | DMA_TO_DEVICE); |
812 | dev_kfree_skb (skb); |
813 | } |
814 | tp->tx_buffers[i].skb = NULL; |
815 | tp->tx_buffers[i].mapping = 0; |
816 | } |
817 | } |
818 | |
819 | static int tulip_close (struct net_device *dev) |
820 | { |
821 | struct tulip_private *tp = netdev_priv(dev); |
822 | void __iomem *ioaddr = tp->base_addr; |
823 | |
824 | netif_stop_queue (dev); |
825 | |
826 | tulip_down (dev); |
827 | |
828 | if (tulip_debug > 1) |
829 | netdev_dbg(dev, "Shutting down ethercard, status was %02x\n" , |
830 | ioread32 (ioaddr + CSR5)); |
831 | |
832 | free_irq (tp->pdev->irq, dev); |
833 | |
834 | tulip_free_ring (dev); |
835 | |
836 | return 0; |
837 | } |
838 | |
839 | static struct net_device_stats *tulip_get_stats(struct net_device *dev) |
840 | { |
841 | struct tulip_private *tp = netdev_priv(dev); |
842 | void __iomem *ioaddr = tp->base_addr; |
843 | |
844 | if (netif_running(dev)) { |
845 | unsigned long flags; |
846 | |
847 | spin_lock_irqsave (&tp->lock, flags); |
848 | |
849 | dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; |
850 | |
851 | spin_unlock_irqrestore(lock: &tp->lock, flags); |
852 | } |
853 | |
854 | return &dev->stats; |
855 | } |
856 | |
857 | |
858 | static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
859 | { |
860 | struct tulip_private *np = netdev_priv(dev); |
861 | strscpy(info->driver, DRV_NAME, sizeof(info->driver)); |
862 | strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info)); |
863 | } |
864 | |
865 | |
866 | static int tulip_ethtool_set_wol(struct net_device *dev, |
867 | struct ethtool_wolinfo *wolinfo) |
868 | { |
869 | struct tulip_private *tp = netdev_priv(dev); |
870 | |
871 | if (wolinfo->wolopts & (~tp->wolinfo.supported)) |
872 | return -EOPNOTSUPP; |
873 | |
874 | tp->wolinfo.wolopts = wolinfo->wolopts; |
875 | device_set_wakeup_enable(dev: &tp->pdev->dev, enable: tp->wolinfo.wolopts); |
876 | return 0; |
877 | } |
878 | |
879 | static void tulip_ethtool_get_wol(struct net_device *dev, |
880 | struct ethtool_wolinfo *wolinfo) |
881 | { |
882 | struct tulip_private *tp = netdev_priv(dev); |
883 | |
884 | wolinfo->supported = tp->wolinfo.supported; |
885 | wolinfo->wolopts = tp->wolinfo.wolopts; |
886 | return; |
887 | } |
888 | |
889 | |
890 | static const struct ethtool_ops ops = { |
891 | .get_drvinfo = tulip_get_drvinfo, |
892 | .set_wol = tulip_ethtool_set_wol, |
893 | .get_wol = tulip_ethtool_get_wol, |
894 | }; |
895 | |
896 | /* Provide ioctl() calls to examine the MII xcvr state. */ |
897 | static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) |
898 | { |
899 | struct tulip_private *tp = netdev_priv(dev); |
900 | void __iomem *ioaddr = tp->base_addr; |
901 | struct mii_ioctl_data *data = if_mii(rq); |
902 | const unsigned int phy_idx = 0; |
903 | int phy = tp->phys[phy_idx] & 0x1f; |
904 | unsigned int regnum = data->reg_num; |
905 | |
906 | switch (cmd) { |
907 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ |
908 | if (tp->mii_cnt) |
909 | data->phy_id = phy; |
910 | else if (tp->flags & HAS_NWAY) |
911 | data->phy_id = 32; |
912 | else if (tp->chip_id == COMET) |
913 | data->phy_id = 1; |
914 | else |
915 | return -ENODEV; |
916 | fallthrough; |
917 | |
918 | case SIOCGMIIREG: /* Read MII PHY register. */ |
919 | if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { |
920 | int csr12 = ioread32 (ioaddr + CSR12); |
921 | int csr14 = ioread32 (ioaddr + CSR14); |
922 | switch (regnum) { |
923 | case 0: |
924 | if (((csr14<<5) & 0x1000) || |
925 | (dev->if_port == 5 && tp->nwayset)) |
926 | data->val_out = 0x1000; |
927 | else |
928 | data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0) |
929 | | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0); |
930 | break; |
931 | case 1: |
932 | data->val_out = |
933 | 0x1848 + |
934 | ((csr12&0x7000) == 0x5000 ? 0x20 : 0) + |
935 | ((csr12&0x06) == 6 ? 0 : 4); |
936 | data->val_out |= 0x6048; |
937 | break; |
938 | case 4: |
939 | /* Advertised value, bogus 10baseTx-FD value from CSR6. */ |
940 | data->val_out = |
941 | ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) + |
942 | ((csr14 >> 1) & 0x20) + 1; |
943 | data->val_out |= ((csr14 >> 9) & 0x03C0); |
944 | break; |
945 | case 5: data->val_out = tp->lpar; break; |
946 | default: data->val_out = 0; break; |
947 | } |
948 | } else { |
949 | data->val_out = tulip_mdio_read (dev, phy_id: data->phy_id & 0x1f, location: regnum); |
950 | } |
951 | return 0; |
952 | |
953 | case SIOCSMIIREG: /* Write MII PHY register. */ |
954 | if (regnum & ~0x1f) |
955 | return -EINVAL; |
956 | if (data->phy_id == phy) { |
957 | u16 value = data->val_in; |
958 | switch (regnum) { |
959 | case 0: /* Check for autonegotiation on or reset. */ |
960 | tp->full_duplex_lock = (value & 0x9000) ? 0 : 1; |
961 | if (tp->full_duplex_lock) |
962 | tp->full_duplex = (value & 0x0100) ? 1 : 0; |
963 | break; |
964 | case 4: |
965 | tp->advertising[phy_idx] = |
966 | tp->mii_advertise = data->val_in; |
967 | break; |
968 | } |
969 | } |
970 | if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { |
971 | u16 value = data->val_in; |
972 | if (regnum == 0) { |
973 | if ((value & 0x1200) == 0x1200) { |
974 | if (tp->chip_id == PNIC2) { |
975 | pnic2_start_nway (dev); |
976 | } else { |
977 | t21142_start_nway (dev); |
978 | } |
979 | } |
980 | } else if (regnum == 4) |
981 | tp->sym_advertise = value; |
982 | } else { |
983 | tulip_mdio_write (dev, phy_id: data->phy_id & 0x1f, location: regnum, value: data->val_in); |
984 | } |
985 | return 0; |
986 | default: |
987 | return -EOPNOTSUPP; |
988 | } |
989 | |
990 | return -EOPNOTSUPP; |
991 | } |
992 | |
993 | |
994 | /* Set or clear the multicast filter for this adaptor. |
995 | Note that we only use exclusion around actually queueing the |
996 | new frame, not around filling tp->setup_frame. This is non-deterministic |
997 | when re-entered but still correct. */ |
998 | |
999 | static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) |
1000 | { |
1001 | struct tulip_private *tp = netdev_priv(dev); |
1002 | u16 hash_table[32]; |
1003 | struct netdev_hw_addr *ha; |
1004 | const u16 *eaddrs; |
1005 | int i; |
1006 | |
1007 | memset(hash_table, 0, sizeof(hash_table)); |
1008 | __set_bit_le(nr: 255, addr: hash_table); /* Broadcast entry */ |
1009 | /* This should work on big-endian machines as well. */ |
1010 | netdev_for_each_mc_addr(ha, dev) { |
1011 | int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; |
1012 | |
1013 | __set_bit_le(nr: index, addr: hash_table); |
1014 | } |
1015 | for (i = 0; i < 32; i++) { |
1016 | *setup_frm++ = hash_table[i]; |
1017 | *setup_frm++ = hash_table[i]; |
1018 | } |
1019 | setup_frm = &tp->setup_frame[13*6]; |
1020 | |
1021 | /* Fill the final entry with our physical address. */ |
1022 | eaddrs = (const u16 *)dev->dev_addr; |
1023 | *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; |
1024 | *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; |
1025 | *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; |
1026 | } |
1027 | |
1028 | static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) |
1029 | { |
1030 | struct tulip_private *tp = netdev_priv(dev); |
1031 | struct netdev_hw_addr *ha; |
1032 | const u16 *eaddrs; |
1033 | |
1034 | /* We have <= 14 addresses so we can use the wonderful |
1035 | 16 address perfect filtering of the Tulip. */ |
1036 | netdev_for_each_mc_addr(ha, dev) { |
1037 | eaddrs = (u16 *) ha->addr; |
1038 | *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; |
1039 | *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; |
1040 | *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; |
1041 | } |
1042 | /* Fill the unused entries with the broadcast address. */ |
1043 | memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12); |
1044 | setup_frm = &tp->setup_frame[15*6]; |
1045 | |
1046 | /* Fill the final entry with our physical address. */ |
1047 | eaddrs = (const u16 *)dev->dev_addr; |
1048 | *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; |
1049 | *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; |
1050 | *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; |
1051 | } |
1052 | |
1053 | |
1054 | static void set_rx_mode(struct net_device *dev) |
1055 | { |
1056 | struct tulip_private *tp = netdev_priv(dev); |
1057 | void __iomem *ioaddr = tp->base_addr; |
1058 | int csr6; |
1059 | |
1060 | csr6 = ioread32(ioaddr + CSR6) & ~0x00D5; |
1061 | |
1062 | tp->csr6 &= ~0x00D5; |
1063 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ |
1064 | tp->csr6 |= AcceptAllMulticast | AcceptAllPhys; |
1065 | csr6 |= AcceptAllMulticast | AcceptAllPhys; |
1066 | } else if ((netdev_mc_count(dev) > 1000) || |
1067 | (dev->flags & IFF_ALLMULTI)) { |
1068 | /* Too many to filter well -- accept all multicasts. */ |
1069 | tp->csr6 |= AcceptAllMulticast; |
1070 | csr6 |= AcceptAllMulticast; |
1071 | } else if (tp->flags & MC_HASH_ONLY) { |
1072 | /* Some work-alikes have only a 64-entry hash filter table. */ |
1073 | /* Should verify correctness on big-endian/__powerpc__ */ |
1074 | struct netdev_hw_addr *ha; |
1075 | if (netdev_mc_count(dev) > 64) { |
1076 | /* Arbitrary non-effective limit. */ |
1077 | tp->csr6 |= AcceptAllMulticast; |
1078 | csr6 |= AcceptAllMulticast; |
1079 | } else { |
1080 | u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ |
1081 | int filterbit; |
1082 | netdev_for_each_mc_addr(ha, dev) { |
1083 | if (tp->flags & COMET_MAC_ADDR) |
1084 | filterbit = ether_crc_le(ETH_ALEN, |
1085 | ha->addr); |
1086 | else |
1087 | filterbit = ether_crc(ETH_ALEN, |
1088 | ha->addr) >> 26; |
1089 | filterbit &= 0x3f; |
1090 | mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); |
1091 | if (tulip_debug > 2) |
1092 | dev_info(&dev->dev, |
1093 | "Added filter for %pM %08x bit %d\n" , |
1094 | ha->addr, |
1095 | ether_crc(ETH_ALEN, ha->addr), |
1096 | filterbit); |
1097 | } |
1098 | if (mc_filter[0] == tp->mc_filter[0] && |
1099 | mc_filter[1] == tp->mc_filter[1]) |
1100 | ; /* No change. */ |
1101 | else if (tp->flags & IS_ASIX) { |
1102 | iowrite32(2, ioaddr + CSR13); |
1103 | iowrite32(mc_filter[0], ioaddr + CSR14); |
1104 | iowrite32(3, ioaddr + CSR13); |
1105 | iowrite32(mc_filter[1], ioaddr + CSR14); |
1106 | } else if (tp->flags & COMET_MAC_ADDR) { |
1107 | iowrite32(mc_filter[0], ioaddr + CSR27); |
1108 | iowrite32(mc_filter[1], ioaddr + CSR28); |
1109 | } |
1110 | tp->mc_filter[0] = mc_filter[0]; |
1111 | tp->mc_filter[1] = mc_filter[1]; |
1112 | } |
1113 | } else { |
1114 | unsigned long flags; |
1115 | u32 tx_flags = 0x08000000 | 192; |
1116 | |
1117 | /* Note that only the low-address shortword of setup_frame is valid! |
1118 | The values are doubled for big-endian architectures. */ |
1119 | if (netdev_mc_count(dev) > 14) { |
1120 | /* Must use a multicast hash table. */ |
1121 | build_setup_frame_hash(setup_frm: tp->setup_frame, dev); |
1122 | tx_flags = 0x08400000 | 192; |
1123 | } else { |
1124 | build_setup_frame_perfect(setup_frm: tp->setup_frame, dev); |
1125 | } |
1126 | |
1127 | spin_lock_irqsave(&tp->lock, flags); |
1128 | |
1129 | if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { |
1130 | /* Same setup recently queued, we need not add it. */ |
1131 | } else { |
1132 | unsigned int entry; |
1133 | int dummy = -1; |
1134 | |
1135 | /* Now add this frame to the Tx list. */ |
1136 | |
1137 | entry = tp->cur_tx++ % TX_RING_SIZE; |
1138 | |
1139 | if (entry != 0) { |
1140 | /* Avoid a chip errata by prefixing a dummy entry. */ |
1141 | tp->tx_buffers[entry].skb = NULL; |
1142 | tp->tx_buffers[entry].mapping = 0; |
1143 | tp->tx_ring[entry].length = |
1144 | (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; |
1145 | tp->tx_ring[entry].buffer1 = 0; |
1146 | /* Must set DescOwned later to avoid race with chip */ |
1147 | dummy = entry; |
1148 | entry = tp->cur_tx++ % TX_RING_SIZE; |
1149 | |
1150 | } |
1151 | |
1152 | tp->tx_buffers[entry].skb = NULL; |
1153 | tp->tx_buffers[entry].mapping = |
1154 | dma_map_single(&tp->pdev->dev, |
1155 | tp->setup_frame, |
1156 | sizeof(tp->setup_frame), |
1157 | DMA_TO_DEVICE); |
1158 | /* Put the setup frame on the Tx list. */ |
1159 | if (entry == TX_RING_SIZE-1) |
1160 | tx_flags |= DESC_RING_WRAP; /* Wrap ring. */ |
1161 | tp->tx_ring[entry].length = cpu_to_le32(tx_flags); |
1162 | tp->tx_ring[entry].buffer1 = |
1163 | cpu_to_le32(tp->tx_buffers[entry].mapping); |
1164 | tp->tx_ring[entry].status = cpu_to_le32(DescOwned); |
1165 | if (dummy >= 0) |
1166 | tp->tx_ring[dummy].status = cpu_to_le32(DescOwned); |
1167 | if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) |
1168 | netif_stop_queue(dev); |
1169 | |
1170 | /* Trigger an immediate transmit demand. */ |
1171 | iowrite32(0, ioaddr + CSR1); |
1172 | } |
1173 | |
1174 | spin_unlock_irqrestore(lock: &tp->lock, flags); |
1175 | } |
1176 | |
1177 | iowrite32(csr6, ioaddr + CSR6); |
1178 | } |
1179 | |
1180 | #ifdef CONFIG_TULIP_MWI |
1181 | static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev) |
1182 | { |
1183 | struct tulip_private *tp = netdev_priv(dev); |
1184 | u8 cache; |
1185 | u16 pci_command; |
1186 | u32 csr0; |
1187 | |
1188 | if (tulip_debug > 3) |
1189 | netdev_dbg(dev, "tulip_mwi_config()\n" ); |
1190 | |
1191 | tp->csr0 = csr0 = 0; |
1192 | |
1193 | /* if we have any cache line size at all, we can do MRM and MWI */ |
1194 | csr0 |= MRM | MWI; |
1195 | |
1196 | /* Enable MWI in the standard PCI command bit. |
1197 | * Check for the case where MWI is desired but not available |
1198 | */ |
1199 | pci_try_set_mwi(dev: pdev); |
1200 | |
1201 | /* read result from hardware (in case bit refused to enable) */ |
1202 | pci_read_config_word(dev: pdev, PCI_COMMAND, val: &pci_command); |
1203 | if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE))) |
1204 | csr0 &= ~MWI; |
1205 | |
1206 | /* if cache line size hardwired to zero, no MWI */ |
1207 | pci_read_config_byte(dev: pdev, PCI_CACHE_LINE_SIZE, val: &cache); |
1208 | if ((csr0 & MWI) && (cache == 0)) { |
1209 | csr0 &= ~MWI; |
1210 | pci_clear_mwi(dev: pdev); |
1211 | } |
1212 | |
1213 | /* assign per-cacheline-size cache alignment and |
1214 | * burst length values |
1215 | */ |
1216 | switch (cache) { |
1217 | case 8: |
1218 | csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift); |
1219 | break; |
1220 | case 16: |
1221 | csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift); |
1222 | break; |
1223 | case 32: |
1224 | csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift); |
1225 | break; |
1226 | default: |
1227 | cache = 0; |
1228 | break; |
1229 | } |
1230 | |
1231 | /* if we have a good cache line size, we by now have a good |
1232 | * csr0, so save it and exit |
1233 | */ |
1234 | if (cache) |
1235 | goto out; |
1236 | |
1237 | /* we don't have a good csr0 or cache line size, disable MWI */ |
1238 | if (csr0 & MWI) { |
1239 | pci_clear_mwi(dev: pdev); |
1240 | csr0 &= ~MWI; |
1241 | } |
1242 | |
1243 | /* sane defaults for burst length and cache alignment |
1244 | * originally from de4x5 driver |
1245 | */ |
1246 | csr0 |= (8 << BurstLenShift) | (1 << CALShift); |
1247 | |
1248 | out: |
1249 | tp->csr0 = csr0; |
1250 | if (tulip_debug > 2) |
1251 | netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n" , |
1252 | cache, csr0); |
1253 | } |
1254 | #endif |
1255 | |
1256 | /* |
1257 | * Chips that have the MRM/reserved bit quirk and the burst quirk. That |
1258 | * is the DM910X and the on chip ULi devices |
1259 | */ |
1260 | |
1261 | static int tulip_uli_dm_quirk(struct pci_dev *pdev) |
1262 | { |
1263 | if (pdev->vendor == 0x1282 && pdev->device == 0x9102) |
1264 | return 1; |
1265 | return 0; |
1266 | } |
1267 | |
1268 | static const struct net_device_ops tulip_netdev_ops = { |
1269 | .ndo_open = tulip_open, |
1270 | .ndo_start_xmit = tulip_start_xmit, |
1271 | .ndo_tx_timeout = tulip_tx_timeout, |
1272 | .ndo_stop = tulip_close, |
1273 | .ndo_get_stats = tulip_get_stats, |
1274 | .ndo_eth_ioctl = private_ioctl, |
1275 | .ndo_set_rx_mode = set_rx_mode, |
1276 | .ndo_set_mac_address = eth_mac_addr, |
1277 | .ndo_validate_addr = eth_validate_addr, |
1278 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1279 | .ndo_poll_controller = poll_tulip, |
1280 | #endif |
1281 | }; |
1282 | |
1283 | static const struct pci_device_id early_486_chipsets[] = { |
1284 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) }, |
1285 | { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) }, |
1286 | { }, |
1287 | }; |
1288 | |
1289 | static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1290 | { |
1291 | struct tulip_private *tp; |
1292 | /* See note below on the multiport cards. */ |
1293 | static unsigned char last_phys_addr[ETH_ALEN] = { |
1294 | 0x00, 'L', 'i', 'n', 'u', 'x' |
1295 | }; |
1296 | #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ |
1297 | static int last_irq; |
1298 | #endif |
1299 | int i, irq; |
1300 | unsigned short sum; |
1301 | unsigned char *ee_data; |
1302 | struct net_device *dev; |
1303 | void __iomem *ioaddr; |
1304 | static int board_idx = -1; |
1305 | int chip_idx = ent->driver_data; |
1306 | const char *chip_name = tulip_tbl[chip_idx].chip_name; |
1307 | unsigned int eeprom_missing = 0; |
1308 | u8 addr[ETH_ALEN] __aligned(2); |
1309 | unsigned int force_csr0 = 0; |
1310 | |
1311 | board_idx++; |
1312 | |
1313 | /* |
1314 | * Lan media wire a tulip chip to a wan interface. Needs a very |
1315 | * different driver (lmc driver) |
1316 | */ |
1317 | |
1318 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { |
1319 | pr_err("skipping LMC card\n" ); |
1320 | return -ENODEV; |
1321 | } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE && |
1322 | (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 || |
1323 | pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 || |
1324 | pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) { |
1325 | pr_err("skipping SBE T3E3 port\n" ); |
1326 | return -ENODEV; |
1327 | } |
1328 | |
1329 | /* |
1330 | * DM910x chips should be handled by the dmfe driver, except |
1331 | * on-board chips on SPARC systems. Also, early DM9100s need |
1332 | * software CRC which only the dmfe driver supports. |
1333 | */ |
1334 | |
1335 | #ifdef CONFIG_TULIP_DM910X |
1336 | if (chip_idx == DM910X) { |
1337 | struct device_node *dp; |
1338 | |
1339 | if (pdev->vendor == 0x1282 && pdev->device == 0x9100 && |
1340 | pdev->revision < 0x30) { |
1341 | pr_info("skipping early DM9100 with Crc bug (use dmfe)\n" ); |
1342 | return -ENODEV; |
1343 | } |
1344 | |
1345 | dp = pci_device_to_OF_node(pdev); |
1346 | if (!(dp && of_get_property(dp, "local-mac-address" , NULL))) { |
1347 | pr_info("skipping DM910x expansion card (use dmfe)\n" ); |
1348 | return -ENODEV; |
1349 | } |
1350 | } |
1351 | #endif |
1352 | |
1353 | /* |
1354 | * Looks for early PCI chipsets where people report hangs |
1355 | * without the workarounds being on. |
1356 | */ |
1357 | |
1358 | /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache |
1359 | aligned. Aries might need this too. The Saturn errata are not |
1360 | pretty reading but thankfully it's an old 486 chipset. |
1361 | |
1362 | 2. The dreaded SiS496 486 chipset. Same workaround as Intel |
1363 | Saturn. |
1364 | */ |
1365 | |
1366 | if (pci_dev_present(ids: early_486_chipsets)) { |
1367 | csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift); |
1368 | force_csr0 = 1; |
1369 | } |
1370 | |
1371 | /* bugfix: the ASIX must have a burst limit or horrible things happen. */ |
1372 | if (chip_idx == AX88140) { |
1373 | if ((csr0 & 0x3f00) == 0) |
1374 | csr0 |= 0x2000; |
1375 | } |
1376 | |
1377 | /* PNIC doesn't have MWI/MRL/MRM... */ |
1378 | if (chip_idx == LC82C168) |
1379 | csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */ |
1380 | |
1381 | /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ |
1382 | if (tulip_uli_dm_quirk(pdev)) { |
1383 | csr0 &= ~0x01f100ff; |
1384 | #if defined(CONFIG_SPARC) |
1385 | csr0 = (csr0 & ~0xff00) | 0xe000; |
1386 | #endif |
1387 | } |
1388 | /* |
1389 | * And back to business |
1390 | */ |
1391 | |
1392 | i = pcim_enable_device(pdev); |
1393 | if (i) { |
1394 | pr_err("Cannot enable tulip board #%d, aborting\n" , board_idx); |
1395 | return i; |
1396 | } |
1397 | |
1398 | irq = pdev->irq; |
1399 | |
1400 | /* alloc_etherdev ensures aligned and zeroed private structures */ |
1401 | dev = devm_alloc_etherdev(&pdev->dev, sizeof(*tp)); |
1402 | if (!dev) |
1403 | return -ENOMEM; |
1404 | |
1405 | SET_NETDEV_DEV(dev, &pdev->dev); |
1406 | if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { |
1407 | pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n" , |
1408 | pci_name(pdev), |
1409 | (unsigned long long)pci_resource_len (pdev, 0), |
1410 | (unsigned long long)pci_resource_start (pdev, 0)); |
1411 | return -ENODEV; |
1412 | } |
1413 | |
1414 | /* grab all resources from both PIO and MMIO regions, as we |
1415 | * don't want anyone else messing around with our hardware */ |
1416 | if (pci_request_regions(pdev, DRV_NAME)) |
1417 | return -ENODEV; |
1418 | |
1419 | ioaddr = pcim_iomap(pdev, TULIP_BAR, maxlen: tulip_tbl[chip_idx].io_size); |
1420 | |
1421 | if (!ioaddr) |
1422 | return -ENODEV; |
1423 | |
1424 | /* |
1425 | * initialize private data structure 'tp' |
1426 | * it is zeroed and aligned in alloc_etherdev |
1427 | */ |
1428 | tp = netdev_priv(dev); |
1429 | tp->dev = dev; |
1430 | |
1431 | tp->rx_ring = dmam_alloc_coherent(dev: &pdev->dev, |
1432 | size: sizeof(struct tulip_rx_desc) * RX_RING_SIZE + |
1433 | sizeof(struct tulip_tx_desc) * TX_RING_SIZE, |
1434 | dma_handle: &tp->rx_ring_dma, GFP_KERNEL); |
1435 | if (!tp->rx_ring) |
1436 | return -ENODEV; |
1437 | tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE); |
1438 | tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE; |
1439 | |
1440 | tp->chip_id = chip_idx; |
1441 | tp->flags = tulip_tbl[chip_idx].flags; |
1442 | |
1443 | tp->wolinfo.supported = 0; |
1444 | tp->wolinfo.wolopts = 0; |
1445 | /* COMET: Enable power management only for AN983B */ |
1446 | if (chip_idx == COMET ) { |
1447 | u32 sig; |
1448 | pci_read_config_dword (dev: pdev, where: 0x80, val: &sig); |
1449 | if (sig == 0x09811317) { |
1450 | tp->flags |= COMET_PM; |
1451 | tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC; |
1452 | pr_info("%s: Enabled WOL support for AN983B\n" , |
1453 | __func__); |
1454 | } |
1455 | } |
1456 | tp->pdev = pdev; |
1457 | tp->base_addr = ioaddr; |
1458 | tp->revision = pdev->revision; |
1459 | tp->csr0 = csr0; |
1460 | spin_lock_init(&tp->lock); |
1461 | spin_lock_init(&tp->mii_lock); |
1462 | timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0); |
1463 | |
1464 | INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); |
1465 | |
1466 | #ifdef CONFIG_TULIP_MWI |
1467 | if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) |
1468 | tulip_mwi_config (pdev, dev); |
1469 | #endif |
1470 | |
1471 | /* Stop the chip's Tx and Rx processes. */ |
1472 | tulip_stop_rxtx(tp); |
1473 | |
1474 | pci_set_master(dev: pdev); |
1475 | |
1476 | #ifdef CONFIG_GSC |
1477 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) { |
1478 | switch (pdev->subsystem_device) { |
1479 | default: |
1480 | break; |
1481 | case 0x1061: |
1482 | case 0x1062: |
1483 | case 0x1063: |
1484 | case 0x1098: |
1485 | case 0x1099: |
1486 | case 0x10EE: |
1487 | tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE; |
1488 | chip_name = "GSC DS21140 Tulip" ; |
1489 | } |
1490 | } |
1491 | #endif |
1492 | |
1493 | /* Clear the missed-packet counter. */ |
1494 | ioread32(ioaddr + CSR8); |
1495 | |
1496 | /* The station address ROM is read byte serially. The register must |
1497 | be polled, waiting for the value to be read bit serially from the |
1498 | EEPROM. |
1499 | */ |
1500 | ee_data = tp->eeprom; |
1501 | memset(ee_data, 0, sizeof(tp->eeprom)); |
1502 | sum = 0; |
1503 | if (chip_idx == LC82C168) { |
1504 | for (i = 0; i < 3; i++) { |
1505 | int value, boguscnt = 100000; |
1506 | iowrite32(0x600 | i, ioaddr + 0x98); |
1507 | do { |
1508 | value = ioread32(ioaddr + CSR9); |
1509 | } while (value < 0 && --boguscnt > 0); |
1510 | put_unaligned_le16(val: value, p: ((__le16 *)addr) + i); |
1511 | sum += value & 0xffff; |
1512 | } |
1513 | eth_hw_addr_set(dev, addr); |
1514 | } else if (chip_idx == COMET) { |
1515 | /* No need to read the EEPROM. */ |
1516 | put_unaligned_le32(val: ioread32(ioaddr + 0xA4), p: addr); |
1517 | put_unaligned_le16(val: ioread32(ioaddr + 0xA8), p: addr + 4); |
1518 | eth_hw_addr_set(dev, addr); |
1519 | for (i = 0; i < 6; i ++) |
1520 | sum += dev->dev_addr[i]; |
1521 | } else { |
1522 | /* A serial EEPROM interface, we read now and sort it out later. */ |
1523 | int sa_offset = 0; |
1524 | int ee_addr_size = tulip_read_eeprom(dev, location: 0xff, addr_len: 8) & 0x40000 ? 8 : 6; |
1525 | int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16); |
1526 | |
1527 | if (ee_max_addr > sizeof(tp->eeprom)) |
1528 | ee_max_addr = sizeof(tp->eeprom); |
1529 | |
1530 | for (i = 0; i < ee_max_addr ; i += sizeof(u16)) { |
1531 | u16 data = tulip_read_eeprom(dev, location: i/2, addr_len: ee_addr_size); |
1532 | ee_data[i] = data & 0xff; |
1533 | ee_data[i + 1] = data >> 8; |
1534 | } |
1535 | |
1536 | /* DEC now has a specification (see Notes) but early board makers |
1537 | just put the address in the first EEPROM locations. */ |
1538 | /* This does memcmp(ee_data, ee_data+16, 8) */ |
1539 | for (i = 0; i < 8; i ++) |
1540 | if (ee_data[i] != ee_data[16+i]) |
1541 | sa_offset = 20; |
1542 | if (chip_idx == CONEXANT) { |
1543 | /* Check that the tuple type and length is correct. */ |
1544 | if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6) |
1545 | sa_offset = 0x19A; |
1546 | } else if (ee_data[0] == 0xff && ee_data[1] == 0xff && |
1547 | ee_data[2] == 0) { |
1548 | sa_offset = 2; /* Grrr, damn Matrox boards. */ |
1549 | } |
1550 | #ifdef CONFIG_MIPS_COBALT |
1551 | if ((pdev->bus->number == 0) && |
1552 | ((PCI_SLOT(pdev->devfn) == 7) || |
1553 | (PCI_SLOT(pdev->devfn) == 12))) { |
1554 | /* Cobalt MAC address in first EEPROM locations. */ |
1555 | sa_offset = 0; |
1556 | /* Ensure our media table fixup get's applied */ |
1557 | memcpy(ee_data + 16, ee_data, 8); |
1558 | } |
1559 | #endif |
1560 | #ifdef CONFIG_GSC |
1561 | /* Check to see if we have a broken srom */ |
1562 | if (ee_data[0] == 0x61 && ee_data[1] == 0x10) { |
1563 | /* pci_vendor_id and subsystem_id are swapped */ |
1564 | ee_data[0] = ee_data[2]; |
1565 | ee_data[1] = ee_data[3]; |
1566 | ee_data[2] = 0x61; |
1567 | ee_data[3] = 0x10; |
1568 | |
1569 | /* HSC-PCI boards need to be byte-swaped and shifted |
1570 | * up 1 word. This shift needs to happen at the end |
1571 | * of the MAC first because of the 2 byte overlap. |
1572 | */ |
1573 | for (i = 4; i >= 0; i -= 2) { |
1574 | ee_data[17 + i + 3] = ee_data[17 + i]; |
1575 | ee_data[16 + i + 5] = ee_data[16 + i]; |
1576 | } |
1577 | } |
1578 | #endif |
1579 | |
1580 | for (i = 0; i < 6; i ++) { |
1581 | addr[i] = ee_data[i + sa_offset]; |
1582 | sum += ee_data[i + sa_offset]; |
1583 | } |
1584 | eth_hw_addr_set(dev, addr); |
1585 | } |
1586 | /* Lite-On boards have the address byte-swapped. */ |
1587 | if ((dev->dev_addr[0] == 0xA0 || |
1588 | dev->dev_addr[0] == 0xC0 || |
1589 | dev->dev_addr[0] == 0x02) && |
1590 | dev->dev_addr[1] == 0x00) { |
1591 | for (i = 0; i < 6; i+=2) { |
1592 | addr[i] = dev->dev_addr[i+1]; |
1593 | addr[i+1] = dev->dev_addr[i]; |
1594 | } |
1595 | eth_hw_addr_set(dev, addr); |
1596 | } |
1597 | |
1598 | /* On the Zynx 315 Etherarray and other multiport boards only the |
1599 | first Tulip has an EEPROM. |
1600 | On Sparc systems the mac address is held in the OBP property |
1601 | "local-mac-address". |
1602 | The addresses of the subsequent ports are derived from the first. |
1603 | Many PCI BIOSes also incorrectly report the IRQ line, so we correct |
1604 | that here as well. */ |
1605 | if (sum == 0 || sum == 6*0xff) { |
1606 | #if defined(CONFIG_SPARC) |
1607 | struct device_node *dp = pci_device_to_OF_node(pdev); |
1608 | const unsigned char *addr2; |
1609 | int len; |
1610 | #endif |
1611 | eeprom_missing = 1; |
1612 | for (i = 0; i < 5; i++) |
1613 | addr[i] = last_phys_addr[i]; |
1614 | addr[i] = last_phys_addr[i] + 1; |
1615 | eth_hw_addr_set(dev, addr); |
1616 | #if defined(CONFIG_SPARC) |
1617 | addr2 = of_get_property(dp, "local-mac-address" , &len); |
1618 | if (addr2 && len == ETH_ALEN) |
1619 | eth_hw_addr_set(dev, addr2); |
1620 | #endif |
1621 | #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ |
1622 | if (last_irq) |
1623 | irq = last_irq; |
1624 | #endif |
1625 | } |
1626 | |
1627 | for (i = 0; i < 6; i++) |
1628 | last_phys_addr[i] = dev->dev_addr[i]; |
1629 | #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ |
1630 | last_irq = irq; |
1631 | #endif |
1632 | |
1633 | /* The lower four bits are the media type. */ |
1634 | if (board_idx >= 0 && board_idx < MAX_UNITS) { |
1635 | if (options[board_idx] & MEDIA_MASK) |
1636 | tp->default_port = options[board_idx] & MEDIA_MASK; |
1637 | if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0) |
1638 | tp->full_duplex = 1; |
1639 | if (mtu[board_idx] > 0) |
1640 | dev->mtu = mtu[board_idx]; |
1641 | } |
1642 | if (dev->mem_start & MEDIA_MASK) |
1643 | tp->default_port = dev->mem_start & MEDIA_MASK; |
1644 | if (tp->default_port) { |
1645 | pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n" , |
1646 | board_idx, medianame[tp->default_port & MEDIA_MASK]); |
1647 | tp->medialock = 1; |
1648 | if (tulip_media_cap[tp->default_port] & MediaAlwaysFD) |
1649 | tp->full_duplex = 1; |
1650 | } |
1651 | if (tp->full_duplex) |
1652 | tp->full_duplex_lock = 1; |
1653 | |
1654 | if (tulip_media_cap[tp->default_port] & MediaIsMII) { |
1655 | static const u16 media2advert[] = { |
1656 | 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 |
1657 | }; |
1658 | tp->mii_advertise = media2advert[tp->default_port - 9]; |
1659 | tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */ |
1660 | } |
1661 | |
1662 | if (tp->flags & HAS_MEDIA_TABLE) { |
1663 | sprintf(buf: dev->name, DRV_NAME "%d" , board_idx); /* hack */ |
1664 | tulip_parse_eeprom(dev); |
1665 | strcpy(p: dev->name, q: "eth%d" ); /* un-hack */ |
1666 | } |
1667 | |
1668 | if ((tp->flags & ALWAYS_CHECK_MII) || |
1669 | (tp->mtable && tp->mtable->has_mii) || |
1670 | ( ! tp->mtable && (tp->flags & HAS_MII))) { |
1671 | if (tp->mtable && tp->mtable->has_mii) { |
1672 | for (i = 0; i < tp->mtable->leafcount; i++) |
1673 | if (tp->mtable->mleaf[i].media == 11) { |
1674 | tp->cur_index = i; |
1675 | tp->saved_if_port = dev->if_port; |
1676 | tulip_select_media(dev, startup: 2); |
1677 | dev->if_port = tp->saved_if_port; |
1678 | break; |
1679 | } |
1680 | } |
1681 | |
1682 | /* Find the connected MII xcvrs. |
1683 | Doing this in open() would allow detecting external xcvrs |
1684 | later, but takes much time. */ |
1685 | tulip_find_mii (dev, board_idx); |
1686 | } |
1687 | |
1688 | /* The Tulip-specific entries in the device structure. */ |
1689 | dev->netdev_ops = &tulip_netdev_ops; |
1690 | dev->watchdog_timeo = TX_TIMEOUT; |
1691 | #ifdef CONFIG_TULIP_NAPI |
1692 | netif_napi_add_weight(dev, napi: &tp->napi, poll: tulip_poll, weight: 16); |
1693 | #endif |
1694 | dev->ethtool_ops = &ops; |
1695 | |
1696 | i = register_netdev(dev); |
1697 | if (i) |
1698 | return i; |
1699 | |
1700 | pci_set_drvdata(pdev, data: dev); |
1701 | |
1702 | dev_info(&dev->dev, |
1703 | #ifdef CONFIG_TULIP_MMIO |
1704 | "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n" , |
1705 | #else |
1706 | "%s rev %d at Port %#llx,%s %pM, IRQ %d\n" , |
1707 | #endif |
1708 | chip_name, pdev->revision, |
1709 | (unsigned long long)pci_resource_start(pdev, TULIP_BAR), |
1710 | eeprom_missing ? " EEPROM not present," : "" , |
1711 | dev->dev_addr, irq); |
1712 | |
1713 | if (tp->chip_id == PNIC2) |
1714 | tp->link_change = pnic2_lnk_change; |
1715 | else if (tp->flags & HAS_NWAY) |
1716 | tp->link_change = t21142_lnk_change; |
1717 | else if (tp->flags & HAS_PNICNWAY) |
1718 | tp->link_change = pnic_lnk_change; |
1719 | |
1720 | /* Reset the xcvr interface and turn on heartbeat. */ |
1721 | switch (chip_idx) { |
1722 | case DC21140: |
1723 | case DM910X: |
1724 | default: |
1725 | if (tp->mtable) |
1726 | iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); |
1727 | break; |
1728 | case DC21142: |
1729 | if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) { |
1730 | iowrite32(csr6_mask_defstate, ioaddr + CSR6); |
1731 | iowrite32(0x0000, ioaddr + CSR13); |
1732 | iowrite32(0x0000, ioaddr + CSR14); |
1733 | iowrite32(csr6_mask_hdcap, ioaddr + CSR6); |
1734 | } else |
1735 | t21142_start_nway(dev); |
1736 | break; |
1737 | case PNIC2: |
1738 | /* just do a reset for sanity sake */ |
1739 | iowrite32(0x0000, ioaddr + CSR13); |
1740 | iowrite32(0x0000, ioaddr + CSR14); |
1741 | break; |
1742 | case LC82C168: |
1743 | if ( ! tp->mii_cnt) { |
1744 | tp->nway = 1; |
1745 | tp->nwayset = 0; |
1746 | iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6); |
1747 | iowrite32(0x30, ioaddr + CSR12); |
1748 | iowrite32(0x0001F078, ioaddr + CSR6); |
1749 | iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */ |
1750 | } |
1751 | break; |
1752 | case MX98713: |
1753 | case COMPEX9881: |
1754 | iowrite32(0x00000000, ioaddr + CSR6); |
1755 | iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */ |
1756 | iowrite32(0x00000001, ioaddr + CSR13); |
1757 | break; |
1758 | case MX98715: |
1759 | case MX98725: |
1760 | iowrite32(0x01a80000, ioaddr + CSR6); |
1761 | iowrite32(0xFFFFFFFF, ioaddr + CSR14); |
1762 | iowrite32(0x00001000, ioaddr + CSR12); |
1763 | break; |
1764 | case COMET: |
1765 | /* No initialization necessary. */ |
1766 | break; |
1767 | } |
1768 | |
1769 | /* put the chip in snooze mode until opened */ |
1770 | tulip_set_power_state (tp, sleep: 0, snooze: 1); |
1771 | |
1772 | return 0; |
1773 | } |
1774 | |
1775 | |
1776 | /* set the registers according to the given wolopts */ |
1777 | static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts) |
1778 | { |
1779 | struct net_device *dev = pci_get_drvdata(pdev); |
1780 | struct tulip_private *tp = netdev_priv(dev); |
1781 | void __iomem *ioaddr = tp->base_addr; |
1782 | |
1783 | if (tp->flags & COMET_PM) { |
1784 | unsigned int tmp; |
1785 | |
1786 | tmp = ioread32(ioaddr + CSR18); |
1787 | tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a); |
1788 | tmp |= comet_csr18_pm_mode; |
1789 | iowrite32(tmp, ioaddr + CSR18); |
1790 | |
1791 | /* Set the Wake-up Control/Status Register to the given WOL options*/ |
1792 | tmp = ioread32(ioaddr + CSR13); |
1793 | tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre); |
1794 | if (wolopts & WAKE_MAGIC) |
1795 | tmp |= comet_csr13_mpre; |
1796 | if (wolopts & WAKE_PHY) |
1797 | tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce; |
1798 | /* Clear the event flags */ |
1799 | tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc; |
1800 | iowrite32(tmp, ioaddr + CSR13); |
1801 | } |
1802 | } |
1803 | |
1804 | static int __maybe_unused tulip_suspend(struct device *dev_d) |
1805 | { |
1806 | struct net_device *dev = dev_get_drvdata(dev: dev_d); |
1807 | struct tulip_private *tp = netdev_priv(dev); |
1808 | |
1809 | if (!dev) |
1810 | return -EINVAL; |
1811 | |
1812 | if (!netif_running(dev)) |
1813 | goto save_state; |
1814 | |
1815 | tulip_down(dev); |
1816 | |
1817 | netif_device_detach(dev); |
1818 | /* FIXME: it needlessly adds an error path. */ |
1819 | free_irq(tp->pdev->irq, dev); |
1820 | |
1821 | save_state: |
1822 | tulip_set_wolopts(to_pci_dev(dev_d), wolopts: tp->wolinfo.wolopts); |
1823 | device_set_wakeup_enable(dev: dev_d, enable: !!tp->wolinfo.wolopts); |
1824 | |
1825 | return 0; |
1826 | } |
1827 | |
1828 | static int __maybe_unused tulip_resume(struct device *dev_d) |
1829 | { |
1830 | struct pci_dev *pdev = to_pci_dev(dev_d); |
1831 | struct net_device *dev = dev_get_drvdata(dev: dev_d); |
1832 | struct tulip_private *tp = netdev_priv(dev); |
1833 | void __iomem *ioaddr = tp->base_addr; |
1834 | unsigned int tmp; |
1835 | int retval = 0; |
1836 | |
1837 | if (!dev) |
1838 | return -EINVAL; |
1839 | |
1840 | if (!netif_running(dev)) |
1841 | return 0; |
1842 | |
1843 | retval = request_irq(irq: pdev->irq, handler: tulip_interrupt, IRQF_SHARED, |
1844 | name: dev->name, dev); |
1845 | if (retval) { |
1846 | pr_err("request_irq failed in resume\n" ); |
1847 | return retval; |
1848 | } |
1849 | |
1850 | if (tp->flags & COMET_PM) { |
1851 | device_set_wakeup_enable(dev: dev_d, enable: 0); |
1852 | |
1853 | /* Clear the PMES flag */ |
1854 | tmp = ioread32(ioaddr + CSR20); |
1855 | tmp |= comet_csr20_pmes; |
1856 | iowrite32(tmp, ioaddr + CSR20); |
1857 | |
1858 | /* Disable all wake-up events */ |
1859 | tulip_set_wolopts(pdev, wolopts: 0); |
1860 | } |
1861 | netif_device_attach(dev); |
1862 | |
1863 | if (netif_running(dev)) |
1864 | tulip_up(dev); |
1865 | |
1866 | return 0; |
1867 | } |
1868 | |
1869 | static void tulip_remove_one(struct pci_dev *pdev) |
1870 | { |
1871 | struct net_device *dev = pci_get_drvdata (pdev); |
1872 | |
1873 | if (!dev) |
1874 | return; |
1875 | |
1876 | unregister_netdev(dev); |
1877 | } |
1878 | |
1879 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1880 | /* |
1881 | * Polling 'interrupt' - used by things like netconsole to send skbs |
1882 | * without having to re-enable interrupts. It's not called while |
1883 | * the interrupt routine is executing. |
1884 | */ |
1885 | |
1886 | static void poll_tulip (struct net_device *dev) |
1887 | { |
1888 | struct tulip_private *tp = netdev_priv(dev); |
1889 | const int irq = tp->pdev->irq; |
1890 | |
1891 | /* disable_irq here is not very nice, but with the lockless |
1892 | interrupt handler we have no other choice. */ |
1893 | disable_irq(irq); |
1894 | tulip_interrupt (irq, dev_instance: dev); |
1895 | enable_irq(irq); |
1896 | } |
1897 | #endif |
1898 | |
1899 | static SIMPLE_DEV_PM_OPS(tulip_pm_ops, tulip_suspend, tulip_resume); |
1900 | |
1901 | static struct pci_driver tulip_driver = { |
1902 | .name = DRV_NAME, |
1903 | .id_table = tulip_pci_tbl, |
1904 | .probe = tulip_init_one, |
1905 | .remove = tulip_remove_one, |
1906 | .driver.pm = &tulip_pm_ops, |
1907 | }; |
1908 | |
1909 | |
1910 | static int __init tulip_init (void) |
1911 | { |
1912 | if (!csr0) { |
1913 | pr_warn("tulip: unknown CPU architecture, using default csr0\n" ); |
1914 | /* default to 8 longword cache line alignment */ |
1915 | csr0 = 0x00A00000 | 0x4800; |
1916 | } |
1917 | |
1918 | /* copy module parms into globals */ |
1919 | tulip_rx_copybreak = rx_copybreak; |
1920 | tulip_max_interrupt_work = max_interrupt_work; |
1921 | |
1922 | /* probe for and init boards */ |
1923 | return pci_register_driver(&tulip_driver); |
1924 | } |
1925 | |
1926 | |
1927 | static void __exit tulip_cleanup (void) |
1928 | { |
1929 | pci_unregister_driver (dev: &tulip_driver); |
1930 | } |
1931 | |
1932 | |
1933 | module_init(tulip_init); |
1934 | module_exit(tulip_cleanup); |
1935 | |