1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 1999 - 2006 Intel Corporation. */ |
3 | |
4 | /* |
5 | * e100.c: Intel(R) PRO/100 ethernet driver |
6 | * |
7 | * (Re)written 2003 by scott.feldman@intel.com. Based loosely on |
8 | * original e100 driver, but better described as a munging of |
9 | * e100, e1000, eepro100, tg3, 8139cp, and other drivers. |
10 | * |
11 | * References: |
12 | * Intel 8255x 10/100 Mbps Ethernet Controller Family, |
13 | * Open Source Software Developers Manual, |
14 | * http://sourceforge.net/projects/e1000 |
15 | * |
16 | * |
17 | * Theory of Operation |
18 | * |
19 | * I. General |
20 | * |
21 | * The driver supports Intel(R) 10/100 Mbps PCI Fast Ethernet |
22 | * controller family, which includes the 82557, 82558, 82559, 82550, |
23 | * 82551, and 82562 devices. 82558 and greater controllers |
24 | * integrate the Intel 82555 PHY. The controllers are used in |
25 | * server and client network interface cards, as well as in |
26 | * LAN-On-Motherboard (LOM), CardBus, MiniPCI, and ICHx |
27 | * configurations. 8255x supports a 32-bit linear addressing |
28 | * mode and operates at 33Mhz PCI clock rate. |
29 | * |
30 | * II. Driver Operation |
31 | * |
32 | * Memory-mapped mode is used exclusively to access the device's |
33 | * shared-memory structure, the Control/Status Registers (CSR). All |
34 | * setup, configuration, and control of the device, including queuing |
35 | * of Tx, Rx, and configuration commands is through the CSR. |
36 | * cmd_lock serializes accesses to the CSR command register. cb_lock |
37 | * protects the shared Command Block List (CBL). |
38 | * |
39 | * 8255x is highly MII-compliant and all access to the PHY go |
40 | * through the Management Data Interface (MDI). Consequently, the |
41 | * driver leverages the mii.c library shared with other MII-compliant |
42 | * devices. |
43 | * |
44 | * Big- and Little-Endian byte order as well as 32- and 64-bit |
45 | * archs are supported. Weak-ordered memory and non-cache-coherent |
46 | * archs are supported. |
47 | * |
48 | * III. Transmit |
49 | * |
50 | * A Tx skb is mapped and hangs off of a TCB. TCBs are linked |
51 | * together in a fixed-size ring (CBL) thus forming the flexible mode |
52 | * memory structure. A TCB marked with the suspend-bit indicates |
53 | * the end of the ring. The last TCB processed suspends the |
54 | * controller, and the controller can be restarted by issue a CU |
55 | * resume command to continue from the suspend point, or a CU start |
56 | * command to start at a given position in the ring. |
57 | * |
58 | * Non-Tx commands (config, multicast setup, etc) are linked |
59 | * into the CBL ring along with Tx commands. The common structure |
60 | * used for both Tx and non-Tx commands is the Command Block (CB). |
61 | * |
62 | * cb_to_use is the next CB to use for queuing a command; cb_to_clean |
63 | * is the next CB to check for completion; cb_to_send is the first |
64 | * CB to start on in case of a previous failure to resume. CB clean |
65 | * up happens in interrupt context in response to a CU interrupt. |
66 | * cbs_avail keeps track of number of free CB resources available. |
67 | * |
68 | * Hardware padding of short packets to minimum packet size is |
69 | * enabled. 82557 pads with 7Eh, while the later controllers pad |
70 | * with 00h. |
71 | * |
72 | * IV. Receive |
73 | * |
74 | * The Receive Frame Area (RFA) comprises a ring of Receive Frame |
75 | * Descriptors (RFD) + data buffer, thus forming the simplified mode |
76 | * memory structure. Rx skbs are allocated to contain both the RFD |
77 | * and the data buffer, but the RFD is pulled off before the skb is |
78 | * indicated. The data buffer is aligned such that encapsulated |
79 | * protocol headers are u32-aligned. Since the RFD is part of the |
80 | * mapped shared memory, and completion status is contained within |
81 | * the RFD, the RFD must be dma_sync'ed to maintain a consistent |
82 | * view from software and hardware. |
83 | * |
84 | * In order to keep updates to the RFD link field from colliding with |
85 | * hardware writes to mark packets complete, we use the feature that |
86 | * hardware will not write to a size 0 descriptor and mark the previous |
87 | * packet as end-of-list (EL). After updating the link, we remove EL |
88 | * and only then restore the size such that hardware may use the |
89 | * previous-to-end RFD. |
90 | * |
91 | * Under typical operation, the receive unit (RU) is start once, |
92 | * and the controller happily fills RFDs as frames arrive. If |
93 | * replacement RFDs cannot be allocated, or the RU goes non-active, |
94 | * the RU must be restarted. Frame arrival generates an interrupt, |
95 | * and Rx indication and re-allocation happen in the same context, |
96 | * therefore no locking is required. A software-generated interrupt |
97 | * is generated from the watchdog to recover from a failed allocation |
98 | * scenario where all Rx resources have been indicated and none re- |
99 | * placed. |
100 | * |
101 | * V. Miscellaneous |
102 | * |
103 | * VLAN offloading of tagging, stripping and filtering is not |
104 | * supported, but driver will accommodate the extra 4-byte VLAN tag |
105 | * for processing by upper layers. Tx/Rx Checksum offloading is not |
106 | * supported. Tx Scatter/Gather is not supported. Jumbo Frames is |
107 | * not supported (hardware limitation). |
108 | * |
109 | * MagicPacket(tm) WoL support is enabled/disabled via ethtool. |
110 | * |
111 | * Thanks to JC (jchapman@katalix.com) for helping with |
112 | * testing/troubleshooting the development driver. |
113 | * |
114 | * TODO: |
115 | * o several entry points race with dev->close |
116 | * o check for tx-no-resources/stop Q races with tx clean/wake Q |
117 | * |
118 | * FIXES: |
119 | * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com> |
120 | * - Stratus87247: protect MDI control register manipulations |
121 | * 2009/06/01 - Andreas Mohr <andi at lisas dot de> |
122 | * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs |
123 | */ |
124 | |
125 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
126 | |
127 | #include <linux/hardirq.h> |
128 | #include <linux/interrupt.h> |
129 | #include <linux/module.h> |
130 | #include <linux/moduleparam.h> |
131 | #include <linux/kernel.h> |
132 | #include <linux/types.h> |
133 | #include <linux/sched.h> |
134 | #include <linux/slab.h> |
135 | #include <linux/delay.h> |
136 | #include <linux/init.h> |
137 | #include <linux/pci.h> |
138 | #include <linux/dma-mapping.h> |
139 | #include <linux/dmapool.h> |
140 | #include <linux/netdevice.h> |
141 | #include <linux/etherdevice.h> |
142 | #include <linux/mii.h> |
143 | #include <linux/if_vlan.h> |
144 | #include <linux/skbuff.h> |
145 | #include <linux/ethtool.h> |
146 | #include <linux/string.h> |
147 | #include <linux/firmware.h> |
148 | #include <linux/rtnetlink.h> |
149 | #include <asm/unaligned.h> |
150 | |
151 | |
152 | #define DRV_NAME "e100" |
153 | #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" |
154 | #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" |
155 | |
156 | #define E100_WATCHDOG_PERIOD (2 * HZ) |
157 | #define E100_NAPI_WEIGHT 16 |
158 | |
159 | #define FIRMWARE_D101M "e100/d101m_ucode.bin" |
160 | #define FIRMWARE_D101S "e100/d101s_ucode.bin" |
161 | #define FIRMWARE_D102E "e100/d102e_ucode.bin" |
162 | |
163 | MODULE_DESCRIPTION(DRV_DESCRIPTION); |
164 | MODULE_AUTHOR(DRV_COPYRIGHT); |
165 | MODULE_LICENSE("GPL v2" ); |
166 | MODULE_FIRMWARE(FIRMWARE_D101M); |
167 | MODULE_FIRMWARE(FIRMWARE_D101S); |
168 | MODULE_FIRMWARE(FIRMWARE_D102E); |
169 | |
170 | static int debug = 3; |
171 | static int eeprom_bad_csum_allow = 0; |
172 | static int use_io = 0; |
173 | module_param(debug, int, 0); |
174 | module_param(eeprom_bad_csum_allow, int, 0); |
175 | module_param(use_io, int, 0); |
176 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)" ); |
177 | MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums" ); |
178 | MODULE_PARM_DESC(use_io, "Force use of i/o access mode" ); |
179 | |
180 | #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ |
181 | PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ |
182 | PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich } |
183 | static const struct pci_device_id e100_id_table[] = { |
184 | INTEL_8255X_ETHERNET_DEVICE(0x1029, 0), |
185 | INTEL_8255X_ETHERNET_DEVICE(0x1030, 0), |
186 | INTEL_8255X_ETHERNET_DEVICE(0x1031, 3), |
187 | INTEL_8255X_ETHERNET_DEVICE(0x1032, 3), |
188 | INTEL_8255X_ETHERNET_DEVICE(0x1033, 3), |
189 | INTEL_8255X_ETHERNET_DEVICE(0x1034, 3), |
190 | INTEL_8255X_ETHERNET_DEVICE(0x1038, 3), |
191 | INTEL_8255X_ETHERNET_DEVICE(0x1039, 4), |
192 | INTEL_8255X_ETHERNET_DEVICE(0x103A, 4), |
193 | INTEL_8255X_ETHERNET_DEVICE(0x103B, 4), |
194 | INTEL_8255X_ETHERNET_DEVICE(0x103C, 4), |
195 | INTEL_8255X_ETHERNET_DEVICE(0x103D, 4), |
196 | INTEL_8255X_ETHERNET_DEVICE(0x103E, 4), |
197 | INTEL_8255X_ETHERNET_DEVICE(0x1050, 5), |
198 | INTEL_8255X_ETHERNET_DEVICE(0x1051, 5), |
199 | INTEL_8255X_ETHERNET_DEVICE(0x1052, 5), |
200 | INTEL_8255X_ETHERNET_DEVICE(0x1053, 5), |
201 | INTEL_8255X_ETHERNET_DEVICE(0x1054, 5), |
202 | INTEL_8255X_ETHERNET_DEVICE(0x1055, 5), |
203 | INTEL_8255X_ETHERNET_DEVICE(0x1056, 5), |
204 | INTEL_8255X_ETHERNET_DEVICE(0x1057, 5), |
205 | INTEL_8255X_ETHERNET_DEVICE(0x1059, 0), |
206 | INTEL_8255X_ETHERNET_DEVICE(0x1064, 6), |
207 | INTEL_8255X_ETHERNET_DEVICE(0x1065, 6), |
208 | INTEL_8255X_ETHERNET_DEVICE(0x1066, 6), |
209 | INTEL_8255X_ETHERNET_DEVICE(0x1067, 6), |
210 | INTEL_8255X_ETHERNET_DEVICE(0x1068, 6), |
211 | INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), |
212 | INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), |
213 | INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), |
214 | INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), |
215 | INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), |
216 | INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), |
217 | INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), |
218 | INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), |
219 | INTEL_8255X_ETHERNET_DEVICE(0x10fe, 7), |
220 | INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), |
221 | INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), |
222 | INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), |
223 | INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), |
224 | INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), |
225 | INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), |
226 | { 0, } |
227 | }; |
228 | MODULE_DEVICE_TABLE(pci, e100_id_table); |
229 | |
230 | enum mac { |
231 | mac_82557_D100_A = 0, |
232 | mac_82557_D100_B = 1, |
233 | mac_82557_D100_C = 2, |
234 | mac_82558_D101_A4 = 4, |
235 | mac_82558_D101_B0 = 5, |
236 | mac_82559_D101M = 8, |
237 | mac_82559_D101S = 9, |
238 | mac_82550_D102 = 12, |
239 | mac_82550_D102_C = 13, |
240 | mac_82551_E = 14, |
241 | mac_82551_F = 15, |
242 | mac_82551_10 = 16, |
243 | mac_unknown = 0xFF, |
244 | }; |
245 | |
246 | enum phy { |
247 | phy_100a = 0x000003E0, |
248 | phy_100c = 0x035002A8, |
249 | phy_82555_tx = 0x015002A8, |
250 | phy_nsc_tx = 0x5C002000, |
251 | phy_82562_et = 0x033002A8, |
252 | phy_82562_em = 0x032002A8, |
253 | phy_82562_ek = 0x031002A8, |
254 | phy_82562_eh = 0x017002A8, |
255 | phy_82552_v = 0xd061004d, |
256 | phy_unknown = 0xFFFFFFFF, |
257 | }; |
258 | |
259 | /* CSR (Control/Status Registers) */ |
260 | struct csr { |
261 | struct { |
262 | u8 status; |
263 | u8 stat_ack; |
264 | u8 cmd_lo; |
265 | u8 cmd_hi; |
266 | u32 gen_ptr; |
267 | } scb; |
268 | u32 port; |
269 | u16 flash_ctrl; |
270 | u8 eeprom_ctrl_lo; |
271 | u8 eeprom_ctrl_hi; |
272 | u32 mdi_ctrl; |
273 | u32 rx_dma_count; |
274 | }; |
275 | |
276 | enum scb_status { |
277 | rus_no_res = 0x08, |
278 | rus_ready = 0x10, |
279 | rus_mask = 0x3C, |
280 | }; |
281 | |
282 | enum ru_state { |
283 | RU_SUSPENDED = 0, |
284 | RU_RUNNING = 1, |
285 | RU_UNINITIALIZED = -1, |
286 | }; |
287 | |
288 | enum scb_stat_ack { |
289 | stat_ack_not_ours = 0x00, |
290 | stat_ack_sw_gen = 0x04, |
291 | stat_ack_rnr = 0x10, |
292 | stat_ack_cu_idle = 0x20, |
293 | stat_ack_frame_rx = 0x40, |
294 | stat_ack_cu_cmd_done = 0x80, |
295 | stat_ack_not_present = 0xFF, |
296 | stat_ack_rx = (stat_ack_sw_gen | stat_ack_rnr | stat_ack_frame_rx), |
297 | stat_ack_tx = (stat_ack_cu_idle | stat_ack_cu_cmd_done), |
298 | }; |
299 | |
300 | enum scb_cmd_hi { |
301 | irq_mask_none = 0x00, |
302 | irq_mask_all = 0x01, |
303 | irq_sw_gen = 0x02, |
304 | }; |
305 | |
306 | enum scb_cmd_lo { |
307 | cuc_nop = 0x00, |
308 | ruc_start = 0x01, |
309 | ruc_load_base = 0x06, |
310 | cuc_start = 0x10, |
311 | cuc_resume = 0x20, |
312 | cuc_dump_addr = 0x40, |
313 | cuc_dump_stats = 0x50, |
314 | cuc_load_base = 0x60, |
315 | cuc_dump_reset = 0x70, |
316 | }; |
317 | |
318 | enum cuc_dump { |
319 | cuc_dump_complete = 0x0000A005, |
320 | cuc_dump_reset_complete = 0x0000A007, |
321 | }; |
322 | |
323 | enum port { |
324 | software_reset = 0x0000, |
325 | selftest = 0x0001, |
326 | selective_reset = 0x0002, |
327 | }; |
328 | |
329 | enum eeprom_ctrl_lo { |
330 | eesk = 0x01, |
331 | eecs = 0x02, |
332 | eedi = 0x04, |
333 | eedo = 0x08, |
334 | }; |
335 | |
336 | enum mdi_ctrl { |
337 | mdi_write = 0x04000000, |
338 | mdi_read = 0x08000000, |
339 | mdi_ready = 0x10000000, |
340 | }; |
341 | |
342 | enum eeprom_op { |
343 | op_write = 0x05, |
344 | op_read = 0x06, |
345 | op_ewds = 0x10, |
346 | op_ewen = 0x13, |
347 | }; |
348 | |
349 | enum eeprom_offsets { |
350 | eeprom_cnfg_mdix = 0x03, |
351 | eeprom_phy_iface = 0x06, |
352 | eeprom_id = 0x0A, |
353 | eeprom_config_asf = 0x0D, |
354 | eeprom_smbus_addr = 0x90, |
355 | }; |
356 | |
357 | enum eeprom_cnfg_mdix { |
358 | eeprom_mdix_enabled = 0x0080, |
359 | }; |
360 | |
361 | enum eeprom_phy_iface { |
362 | NoSuchPhy = 0, |
363 | I82553AB, |
364 | I82553C, |
365 | I82503, |
366 | DP83840, |
367 | S80C240, |
368 | S80C24, |
369 | I82555, |
370 | DP83840A = 10, |
371 | }; |
372 | |
373 | enum eeprom_id { |
374 | eeprom_id_wol = 0x0020, |
375 | }; |
376 | |
377 | enum eeprom_config_asf { |
378 | eeprom_asf = 0x8000, |
379 | eeprom_gcl = 0x4000, |
380 | }; |
381 | |
382 | enum cb_status { |
383 | cb_complete = 0x8000, |
384 | cb_ok = 0x2000, |
385 | }; |
386 | |
387 | /* |
388 | * cb_command - Command Block flags |
389 | * @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory |
390 | */ |
391 | enum cb_command { |
392 | cb_nop = 0x0000, |
393 | cb_iaaddr = 0x0001, |
394 | cb_config = 0x0002, |
395 | cb_multi = 0x0003, |
396 | cb_tx = 0x0004, |
397 | cb_ucode = 0x0005, |
398 | cb_dump = 0x0006, |
399 | cb_tx_sf = 0x0008, |
400 | cb_tx_nc = 0x0010, |
401 | cb_cid = 0x1f00, |
402 | cb_i = 0x2000, |
403 | cb_s = 0x4000, |
404 | cb_el = 0x8000, |
405 | }; |
406 | |
407 | struct rfd { |
408 | __le16 status; |
409 | __le16 command; |
410 | __le32 link; |
411 | __le32 rbd; |
412 | __le16 actual_size; |
413 | __le16 size; |
414 | }; |
415 | |
416 | struct rx { |
417 | struct rx *next, *prev; |
418 | struct sk_buff *skb; |
419 | dma_addr_t dma_addr; |
420 | }; |
421 | |
422 | #if defined(__BIG_ENDIAN_BITFIELD) |
423 | #define X(a,b) b,a |
424 | #else |
425 | #define X(a,b) a,b |
426 | #endif |
427 | struct config { |
428 | /*0*/ u8 X(byte_count:6, pad0:2); |
429 | /*1*/ u8 X(X(rx_fifo_limit:4, tx_fifo_limit:3), pad1:1); |
430 | /*2*/ u8 adaptive_ifs; |
431 | /*3*/ u8 X(X(X(X(mwi_enable:1, type_enable:1), read_align_enable:1), |
432 | term_write_cache_line:1), pad3:4); |
433 | /*4*/ u8 X(rx_dma_max_count:7, pad4:1); |
434 | /*5*/ u8 X(tx_dma_max_count:7, dma_max_count_enable:1); |
435 | /*6*/ u8 X(X(X(X(X(X(X(late_scb_update:1, direct_rx_dma:1), |
436 | tno_intr:1), cna_intr:1), standard_tcb:1), standard_stat_counter:1), |
437 | rx_save_overruns : 1), rx_save_bad_frames : 1); |
438 | /*7*/ u8 X(X(X(X(X(rx_discard_short_frames:1, tx_underrun_retry:2), |
439 | pad7:2), rx_extended_rfd:1), tx_two_frames_in_fifo:1), |
440 | tx_dynamic_tbd:1); |
441 | /*8*/ u8 X(X(mii_mode:1, pad8:6), csma_disabled:1); |
442 | /*9*/ u8 X(X(X(X(X(rx_tcpudp_checksum:1, pad9:3), vlan_arp_tco:1), |
443 | link_status_wake:1), arp_wake:1), mcmatch_wake:1); |
444 | /*10*/ u8 X(X(X(pad10:3, no_source_addr_insertion:1), preamble_length:2), |
445 | loopback:2); |
446 | /*11*/ u8 X(linear_priority:3, pad11:5); |
447 | /*12*/ u8 X(X(linear_priority_mode:1, pad12:3), ifs:4); |
448 | /*13*/ u8 ip_addr_lo; |
449 | /*14*/ u8 ip_addr_hi; |
450 | /*15*/ u8 X(X(X(X(X(X(X(promiscuous_mode:1, broadcast_disabled:1), |
451 | wait_after_win:1), pad15_1:1), ignore_ul_bit:1), crc_16_bit:1), |
452 | pad15_2:1), crs_or_cdt:1); |
453 | /*16*/ u8 fc_delay_lo; |
454 | /*17*/ u8 fc_delay_hi; |
455 | /*18*/ u8 X(X(X(X(X(rx_stripping:1, tx_padding:1), rx_crc_transfer:1), |
456 | rx_long_ok:1), fc_priority_threshold:3), pad18:1); |
457 | /*19*/ u8 X(X(X(X(X(X(X(addr_wake:1, magic_packet_disable:1), |
458 | fc_disable:1), fc_restop:1), fc_restart:1), fc_reject:1), |
459 | full_duplex_force:1), full_duplex_pin:1); |
460 | /*20*/ u8 X(X(X(pad20_1:5, fc_priority_location:1), multi_ia:1), pad20_2:1); |
461 | /*21*/ u8 X(X(pad21_1:3, multicast_all:1), pad21_2:4); |
462 | /*22*/ u8 X(X(rx_d102_mode:1, rx_vlan_drop:1), pad22:6); |
463 | u8 pad_d102[9]; |
464 | }; |
465 | |
466 | #define E100_MAX_MULTICAST_ADDRS 64 |
467 | struct multi { |
468 | __le16 count; |
469 | u8 addr[E100_MAX_MULTICAST_ADDRS * ETH_ALEN + 2/*pad*/]; |
470 | }; |
471 | |
472 | /* Important: keep total struct u32-aligned */ |
473 | #define UCODE_SIZE 134 |
474 | struct cb { |
475 | __le16 status; |
476 | __le16 command; |
477 | __le32 link; |
478 | union { |
479 | u8 iaaddr[ETH_ALEN]; |
480 | __le32 ucode[UCODE_SIZE]; |
481 | struct config config; |
482 | struct multi multi; |
483 | struct { |
484 | u32 tbd_array; |
485 | u16 tcb_byte_count; |
486 | u8 threshold; |
487 | u8 tbd_count; |
488 | struct { |
489 | __le32 buf_addr; |
490 | __le16 size; |
491 | u16 eol; |
492 | } tbd; |
493 | } tcb; |
494 | __le32 dump_buffer_addr; |
495 | } u; |
496 | struct cb *next, *prev; |
497 | dma_addr_t dma_addr; |
498 | struct sk_buff *skb; |
499 | }; |
500 | |
501 | enum loopback { |
502 | lb_none = 0, lb_mac = 1, lb_phy = 3, |
503 | }; |
504 | |
505 | struct stats { |
506 | __le32 tx_good_frames, tx_max_collisions, tx_late_collisions, |
507 | tx_underruns, tx_lost_crs, tx_deferred, tx_single_collisions, |
508 | tx_multiple_collisions, tx_total_collisions; |
509 | __le32 rx_good_frames, rx_crc_errors, rx_alignment_errors, |
510 | rx_resource_errors, rx_overrun_errors, rx_cdt_errors, |
511 | rx_short_frame_errors; |
512 | __le32 fc_xmt_pause, fc_rcv_pause, fc_rcv_unsupported; |
513 | __le16 xmt_tco_frames, rcv_tco_frames; |
514 | __le32 complete; |
515 | }; |
516 | |
517 | struct mem { |
518 | struct { |
519 | u32 signature; |
520 | u32 result; |
521 | } selftest; |
522 | struct stats stats; |
523 | u8 dump_buf[596]; |
524 | }; |
525 | |
526 | struct param_range { |
527 | u32 min; |
528 | u32 max; |
529 | u32 count; |
530 | }; |
531 | |
532 | struct params { |
533 | struct param_range rfds; |
534 | struct param_range cbs; |
535 | }; |
536 | |
537 | struct nic { |
538 | /* Begin: frequently used values: keep adjacent for cache effect */ |
539 | u32 msg_enable ____cacheline_aligned; |
540 | struct net_device *netdev; |
541 | struct pci_dev *pdev; |
542 | u16 (*mdio_ctrl)(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data); |
543 | |
544 | struct rx *rxs ____cacheline_aligned; |
545 | struct rx *rx_to_use; |
546 | struct rx *rx_to_clean; |
547 | struct rfd blank_rfd; |
548 | enum ru_state ru_running; |
549 | |
550 | spinlock_t cb_lock ____cacheline_aligned; |
551 | spinlock_t cmd_lock; |
552 | struct csr __iomem *csr; |
553 | enum scb_cmd_lo cuc_cmd; |
554 | unsigned int cbs_avail; |
555 | struct napi_struct napi; |
556 | struct cb *cbs; |
557 | struct cb *cb_to_use; |
558 | struct cb *cb_to_send; |
559 | struct cb *cb_to_clean; |
560 | __le16 tx_command; |
561 | /* End: frequently used values: keep adjacent for cache effect */ |
562 | |
563 | enum { |
564 | ich = (1 << 0), |
565 | promiscuous = (1 << 1), |
566 | multicast_all = (1 << 2), |
567 | wol_magic = (1 << 3), |
568 | ich_10h_workaround = (1 << 4), |
569 | } flags ____cacheline_aligned; |
570 | |
571 | enum mac mac; |
572 | enum phy phy; |
573 | struct params params; |
574 | struct timer_list watchdog; |
575 | struct mii_if_info mii; |
576 | struct work_struct tx_timeout_task; |
577 | enum loopback loopback; |
578 | |
579 | struct mem *mem; |
580 | dma_addr_t dma_addr; |
581 | |
582 | struct dma_pool *cbs_pool; |
583 | dma_addr_t cbs_dma_addr; |
584 | u8 adaptive_ifs; |
585 | u8 tx_threshold; |
586 | u32 tx_frames; |
587 | u32 tx_collisions; |
588 | u32 tx_deferred; |
589 | u32 tx_single_collisions; |
590 | u32 tx_multiple_collisions; |
591 | u32 tx_fc_pause; |
592 | u32 tx_tco_frames; |
593 | |
594 | u32 rx_fc_pause; |
595 | u32 rx_fc_unsupported; |
596 | u32 rx_tco_frames; |
597 | u32 rx_short_frame_errors; |
598 | u32 rx_over_length_errors; |
599 | |
600 | u16 eeprom_wc; |
601 | __le16 eeprom[256]; |
602 | spinlock_t mdio_lock; |
603 | const struct firmware *fw; |
604 | }; |
605 | |
606 | static inline void e100_write_flush(struct nic *nic) |
607 | { |
608 | /* Flush previous PCI writes through intermediate bridges |
609 | * by doing a benign read */ |
610 | (void)ioread8(&nic->csr->scb.status); |
611 | } |
612 | |
613 | static void e100_enable_irq(struct nic *nic) |
614 | { |
615 | unsigned long flags; |
616 | |
617 | spin_lock_irqsave(&nic->cmd_lock, flags); |
618 | iowrite8(irq_mask_none, &nic->csr->scb.cmd_hi); |
619 | e100_write_flush(nic); |
620 | spin_unlock_irqrestore(lock: &nic->cmd_lock, flags); |
621 | } |
622 | |
623 | static void e100_disable_irq(struct nic *nic) |
624 | { |
625 | unsigned long flags; |
626 | |
627 | spin_lock_irqsave(&nic->cmd_lock, flags); |
628 | iowrite8(irq_mask_all, &nic->csr->scb.cmd_hi); |
629 | e100_write_flush(nic); |
630 | spin_unlock_irqrestore(lock: &nic->cmd_lock, flags); |
631 | } |
632 | |
633 | static void e100_hw_reset(struct nic *nic) |
634 | { |
635 | /* Put CU and RU into idle with a selective reset to get |
636 | * device off of PCI bus */ |
637 | iowrite32(selective_reset, &nic->csr->port); |
638 | e100_write_flush(nic); udelay(20); |
639 | |
640 | /* Now fully reset device */ |
641 | iowrite32(software_reset, &nic->csr->port); |
642 | e100_write_flush(nic); udelay(20); |
643 | |
644 | /* Mask off our interrupt line - it's unmasked after reset */ |
645 | e100_disable_irq(nic); |
646 | } |
647 | |
648 | static int e100_self_test(struct nic *nic) |
649 | { |
650 | u32 dma_addr = nic->dma_addr + offsetof(struct mem, selftest); |
651 | |
652 | /* Passing the self-test is a pretty good indication |
653 | * that the device can DMA to/from host memory */ |
654 | |
655 | nic->mem->selftest.signature = 0; |
656 | nic->mem->selftest.result = 0xFFFFFFFF; |
657 | |
658 | iowrite32(selftest | dma_addr, &nic->csr->port); |
659 | e100_write_flush(nic); |
660 | /* Wait 10 msec for self-test to complete */ |
661 | msleep(msecs: 10); |
662 | |
663 | /* Interrupts are enabled after self-test */ |
664 | e100_disable_irq(nic); |
665 | |
666 | /* Check results of self-test */ |
667 | if (nic->mem->selftest.result != 0) { |
668 | netif_err(nic, hw, nic->netdev, |
669 | "Self-test failed: result=0x%08X\n" , |
670 | nic->mem->selftest.result); |
671 | return -ETIMEDOUT; |
672 | } |
673 | if (nic->mem->selftest.signature == 0) { |
674 | netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n" ); |
675 | return -ETIMEDOUT; |
676 | } |
677 | |
678 | return 0; |
679 | } |
680 | |
681 | static void e100_eeprom_write(struct nic *nic, u16 addr_len, u16 addr, __le16 data) |
682 | { |
683 | u32 cmd_addr_data[3]; |
684 | u8 ctrl; |
685 | int i, j; |
686 | |
687 | /* Three cmds: write/erase enable, write data, write/erase disable */ |
688 | cmd_addr_data[0] = op_ewen << (addr_len - 2); |
689 | cmd_addr_data[1] = (((op_write << addr_len) | addr) << 16) | |
690 | le16_to_cpu(data); |
691 | cmd_addr_data[2] = op_ewds << (addr_len - 2); |
692 | |
693 | /* Bit-bang cmds to write word to eeprom */ |
694 | for (j = 0; j < 3; j++) { |
695 | |
696 | /* Chip select */ |
697 | iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); |
698 | e100_write_flush(nic); udelay(4); |
699 | |
700 | for (i = 31; i >= 0; i--) { |
701 | ctrl = (cmd_addr_data[j] & (1 << i)) ? |
702 | eecs | eedi : eecs; |
703 | iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); |
704 | e100_write_flush(nic); udelay(4); |
705 | |
706 | iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); |
707 | e100_write_flush(nic); udelay(4); |
708 | } |
709 | /* Wait 10 msec for cmd to complete */ |
710 | msleep(msecs: 10); |
711 | |
712 | /* Chip deselect */ |
713 | iowrite8(0, &nic->csr->eeprom_ctrl_lo); |
714 | e100_write_flush(nic); udelay(4); |
715 | } |
716 | }; |
717 | |
718 | /* General technique stolen from the eepro100 driver - very clever */ |
719 | static __le16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) |
720 | { |
721 | u32 cmd_addr_data; |
722 | u16 data = 0; |
723 | u8 ctrl; |
724 | int i; |
725 | |
726 | cmd_addr_data = ((op_read << *addr_len) | addr) << 16; |
727 | |
728 | /* Chip select */ |
729 | iowrite8(eecs | eesk, &nic->csr->eeprom_ctrl_lo); |
730 | e100_write_flush(nic); udelay(4); |
731 | |
732 | /* Bit-bang to read word from eeprom */ |
733 | for (i = 31; i >= 0; i--) { |
734 | ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; |
735 | iowrite8(ctrl, &nic->csr->eeprom_ctrl_lo); |
736 | e100_write_flush(nic); udelay(4); |
737 | |
738 | iowrite8(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); |
739 | e100_write_flush(nic); udelay(4); |
740 | |
741 | /* Eeprom drives a dummy zero to EEDO after receiving |
742 | * complete address. Use this to adjust addr_len. */ |
743 | ctrl = ioread8(&nic->csr->eeprom_ctrl_lo); |
744 | if (!(ctrl & eedo) && i > 16) { |
745 | *addr_len -= (i - 16); |
746 | i = 17; |
747 | } |
748 | |
749 | data = (data << 1) | (ctrl & eedo ? 1 : 0); |
750 | } |
751 | |
752 | /* Chip deselect */ |
753 | iowrite8(0, &nic->csr->eeprom_ctrl_lo); |
754 | e100_write_flush(nic); udelay(4); |
755 | |
756 | return cpu_to_le16(data); |
757 | }; |
758 | |
759 | /* Load entire EEPROM image into driver cache and validate checksum */ |
760 | static int e100_eeprom_load(struct nic *nic) |
761 | { |
762 | u16 addr, addr_len = 8, checksum = 0; |
763 | |
764 | /* Try reading with an 8-bit addr len to discover actual addr len */ |
765 | e100_eeprom_read(nic, addr_len: &addr_len, addr: 0); |
766 | nic->eeprom_wc = 1 << addr_len; |
767 | |
768 | for (addr = 0; addr < nic->eeprom_wc; addr++) { |
769 | nic->eeprom[addr] = e100_eeprom_read(nic, addr_len: &addr_len, addr); |
770 | if (addr < nic->eeprom_wc - 1) |
771 | checksum += le16_to_cpu(nic->eeprom[addr]); |
772 | } |
773 | |
774 | /* The checksum, stored in the last word, is calculated such that |
775 | * the sum of words should be 0xBABA */ |
776 | if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { |
777 | netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n" ); |
778 | if (!eeprom_bad_csum_allow) |
779 | return -EAGAIN; |
780 | } |
781 | |
782 | return 0; |
783 | } |
784 | |
785 | /* Save (portion of) driver EEPROM cache to device and update checksum */ |
786 | static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) |
787 | { |
788 | u16 addr, addr_len = 8, checksum = 0; |
789 | |
790 | /* Try reading with an 8-bit addr len to discover actual addr len */ |
791 | e100_eeprom_read(nic, addr_len: &addr_len, addr: 0); |
792 | nic->eeprom_wc = 1 << addr_len; |
793 | |
794 | if (start + count >= nic->eeprom_wc) |
795 | return -EINVAL; |
796 | |
797 | for (addr = start; addr < start + count; addr++) |
798 | e100_eeprom_write(nic, addr_len, addr, data: nic->eeprom[addr]); |
799 | |
800 | /* The checksum, stored in the last word, is calculated such that |
801 | * the sum of words should be 0xBABA */ |
802 | for (addr = 0; addr < nic->eeprom_wc - 1; addr++) |
803 | checksum += le16_to_cpu(nic->eeprom[addr]); |
804 | nic->eeprom[nic->eeprom_wc - 1] = cpu_to_le16(0xBABA - checksum); |
805 | e100_eeprom_write(nic, addr_len, addr: nic->eeprom_wc - 1, |
806 | data: nic->eeprom[nic->eeprom_wc - 1]); |
807 | |
808 | return 0; |
809 | } |
810 | |
811 | #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ |
812 | #define E100_WAIT_SCB_FAST 20 /* delay like the old code */ |
813 | static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) |
814 | { |
815 | unsigned long flags; |
816 | unsigned int i; |
817 | int err = 0; |
818 | |
819 | spin_lock_irqsave(&nic->cmd_lock, flags); |
820 | |
821 | /* Previous command is accepted when SCB clears */ |
822 | for (i = 0; i < E100_WAIT_SCB_TIMEOUT; i++) { |
823 | if (likely(!ioread8(&nic->csr->scb.cmd_lo))) |
824 | break; |
825 | cpu_relax(); |
826 | if (unlikely(i > E100_WAIT_SCB_FAST)) |
827 | udelay(5); |
828 | } |
829 | if (unlikely(i == E100_WAIT_SCB_TIMEOUT)) { |
830 | err = -EAGAIN; |
831 | goto err_unlock; |
832 | } |
833 | |
834 | if (unlikely(cmd != cuc_resume)) |
835 | iowrite32(dma_addr, &nic->csr->scb.gen_ptr); |
836 | iowrite8(cmd, &nic->csr->scb.cmd_lo); |
837 | |
838 | err_unlock: |
839 | spin_unlock_irqrestore(lock: &nic->cmd_lock, flags); |
840 | |
841 | return err; |
842 | } |
843 | |
844 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, |
845 | int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) |
846 | { |
847 | struct cb *cb; |
848 | unsigned long flags; |
849 | int err; |
850 | |
851 | spin_lock_irqsave(&nic->cb_lock, flags); |
852 | |
853 | if (unlikely(!nic->cbs_avail)) { |
854 | err = -ENOMEM; |
855 | goto err_unlock; |
856 | } |
857 | |
858 | cb = nic->cb_to_use; |
859 | nic->cb_to_use = cb->next; |
860 | nic->cbs_avail--; |
861 | cb->skb = skb; |
862 | |
863 | err = cb_prepare(nic, cb, skb); |
864 | if (err) |
865 | goto err_unlock; |
866 | |
867 | if (unlikely(!nic->cbs_avail)) |
868 | err = -ENOSPC; |
869 | |
870 | |
871 | /* Order is important otherwise we'll be in a race with h/w: |
872 | * set S-bit in current first, then clear S-bit in previous. */ |
873 | cb->command |= cpu_to_le16(cb_s); |
874 | dma_wmb(); |
875 | cb->prev->command &= cpu_to_le16(~cb_s); |
876 | |
877 | while (nic->cb_to_send != nic->cb_to_use) { |
878 | if (unlikely(e100_exec_cmd(nic, nic->cuc_cmd, |
879 | nic->cb_to_send->dma_addr))) { |
880 | /* Ok, here's where things get sticky. It's |
881 | * possible that we can't schedule the command |
882 | * because the controller is too busy, so |
883 | * let's just queue the command and try again |
884 | * when another command is scheduled. */ |
885 | if (err == -ENOSPC) { |
886 | //request a reset |
887 | schedule_work(work: &nic->tx_timeout_task); |
888 | } |
889 | break; |
890 | } else { |
891 | nic->cuc_cmd = cuc_resume; |
892 | nic->cb_to_send = nic->cb_to_send->next; |
893 | } |
894 | } |
895 | |
896 | err_unlock: |
897 | spin_unlock_irqrestore(lock: &nic->cb_lock, flags); |
898 | |
899 | return err; |
900 | } |
901 | |
902 | static int mdio_read(struct net_device *netdev, int addr, int reg) |
903 | { |
904 | struct nic *nic = netdev_priv(dev: netdev); |
905 | return nic->mdio_ctrl(nic, addr, mdi_read, reg, 0); |
906 | } |
907 | |
908 | static void mdio_write(struct net_device *netdev, int addr, int reg, int data) |
909 | { |
910 | struct nic *nic = netdev_priv(dev: netdev); |
911 | |
912 | nic->mdio_ctrl(nic, addr, mdi_write, reg, data); |
913 | } |
914 | |
915 | /* the standard mdio_ctrl() function for usual MII-compliant hardware */ |
916 | static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data) |
917 | { |
918 | u32 data_out = 0; |
919 | unsigned int i; |
920 | unsigned long flags; |
921 | |
922 | |
923 | /* |
924 | * Stratus87247: we shouldn't be writing the MDI control |
925 | * register until the Ready bit shows True. Also, since |
926 | * manipulation of the MDI control registers is a multi-step |
927 | * procedure it should be done under lock. |
928 | */ |
929 | spin_lock_irqsave(&nic->mdio_lock, flags); |
930 | for (i = 100; i; --i) { |
931 | if (ioread32(&nic->csr->mdi_ctrl) & mdi_ready) |
932 | break; |
933 | udelay(20); |
934 | } |
935 | if (unlikely(!i)) { |
936 | netdev_err(dev: nic->netdev, format: "e100.mdio_ctrl won't go Ready\n" ); |
937 | spin_unlock_irqrestore(lock: &nic->mdio_lock, flags); |
938 | return 0; /* No way to indicate timeout error */ |
939 | } |
940 | iowrite32((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl); |
941 | |
942 | for (i = 0; i < 100; i++) { |
943 | udelay(20); |
944 | if ((data_out = ioread32(&nic->csr->mdi_ctrl)) & mdi_ready) |
945 | break; |
946 | } |
947 | spin_unlock_irqrestore(lock: &nic->mdio_lock, flags); |
948 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
949 | "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n" , |
950 | dir == mdi_read ? "READ" : "WRITE" , |
951 | addr, reg, data, data_out); |
952 | return (u16)data_out; |
953 | } |
954 | |
955 | /* slightly tweaked mdio_ctrl() function for phy_82552_v specifics */ |
956 | static u16 mdio_ctrl_phy_82552_v(struct nic *nic, |
957 | u32 addr, |
958 | u32 dir, |
959 | u32 reg, |
960 | u16 data) |
961 | { |
962 | if ((reg == MII_BMCR) && (dir == mdi_write)) { |
963 | if (data & (BMCR_ANRESTART | BMCR_ANENABLE)) { |
964 | u16 advert = mdio_read(netdev: nic->netdev, addr: nic->mii.phy_id, |
965 | MII_ADVERTISE); |
966 | |
967 | /* |
968 | * Workaround Si issue where sometimes the part will not |
969 | * autoneg to 100Mbps even when advertised. |
970 | */ |
971 | if (advert & ADVERTISE_100FULL) |
972 | data |= BMCR_SPEED100 | BMCR_FULLDPLX; |
973 | else if (advert & ADVERTISE_100HALF) |
974 | data |= BMCR_SPEED100; |
975 | } |
976 | } |
977 | return mdio_ctrl_hw(nic, addr, dir, reg, data); |
978 | } |
979 | |
980 | /* Fully software-emulated mdio_ctrl() function for cards without |
981 | * MII-compliant PHYs. |
982 | * For now, this is mainly geared towards 80c24 support; in case of further |
983 | * requirements for other types (i82503, ...?) either extend this mechanism |
984 | * or split it, whichever is cleaner. |
985 | */ |
986 | static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic, |
987 | u32 addr, |
988 | u32 dir, |
989 | u32 reg, |
990 | u16 data) |
991 | { |
992 | /* might need to allocate a netdev_priv'ed register array eventually |
993 | * to be able to record state changes, but for now |
994 | * some fully hardcoded register handling ought to be ok I guess. */ |
995 | |
996 | if (dir == mdi_read) { |
997 | switch (reg) { |
998 | case MII_BMCR: |
999 | /* Auto-negotiation, right? */ |
1000 | return BMCR_ANENABLE | |
1001 | BMCR_FULLDPLX; |
1002 | case MII_BMSR: |
1003 | return BMSR_LSTATUS /* for mii_link_ok() */ | |
1004 | BMSR_ANEGCAPABLE | |
1005 | BMSR_10FULL; |
1006 | case MII_ADVERTISE: |
1007 | /* 80c24 is a "combo card" PHY, right? */ |
1008 | return ADVERTISE_10HALF | |
1009 | ADVERTISE_10FULL; |
1010 | default: |
1011 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1012 | "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n" , |
1013 | dir == mdi_read ? "READ" : "WRITE" , |
1014 | addr, reg, data); |
1015 | return 0xFFFF; |
1016 | } |
1017 | } else { |
1018 | switch (reg) { |
1019 | default: |
1020 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1021 | "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n" , |
1022 | dir == mdi_read ? "READ" : "WRITE" , |
1023 | addr, reg, data); |
1024 | return 0xFFFF; |
1025 | } |
1026 | } |
1027 | } |
1028 | static inline int e100_phy_supports_mii(struct nic *nic) |
1029 | { |
1030 | /* for now, just check it by comparing whether we |
1031 | are using MII software emulation. |
1032 | */ |
1033 | return (nic->mdio_ctrl != mdio_ctrl_phy_mii_emulated); |
1034 | } |
1035 | |
1036 | static void e100_get_defaults(struct nic *nic) |
1037 | { |
1038 | struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; |
1039 | struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; |
1040 | |
1041 | /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ |
1042 | nic->mac = (nic->flags & ich) ? mac_82559_D101M : nic->pdev->revision; |
1043 | if (nic->mac == mac_unknown) |
1044 | nic->mac = mac_82557_D100_A; |
1045 | |
1046 | nic->params.rfds = rfds; |
1047 | nic->params.cbs = cbs; |
1048 | |
1049 | /* Quadwords to DMA into FIFO before starting frame transmit */ |
1050 | nic->tx_threshold = 0xE0; |
1051 | |
1052 | /* no interrupt for every tx completion, delay = 256us if not 557 */ |
1053 | nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | |
1054 | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); |
1055 | |
1056 | /* Template for a freshly allocated RFD */ |
1057 | nic->blank_rfd.command = 0; |
1058 | nic->blank_rfd.rbd = cpu_to_le32(0xFFFFFFFF); |
1059 | nic->blank_rfd.size = cpu_to_le16(VLAN_ETH_FRAME_LEN + ETH_FCS_LEN); |
1060 | |
1061 | /* MII setup */ |
1062 | nic->mii.phy_id_mask = 0x1F; |
1063 | nic->mii.reg_num_mask = 0x1F; |
1064 | nic->mii.dev = nic->netdev; |
1065 | nic->mii.mdio_read = mdio_read; |
1066 | nic->mii.mdio_write = mdio_write; |
1067 | } |
1068 | |
1069 | static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1070 | { |
1071 | struct config *config = &cb->u.config; |
1072 | u8 *c = (u8 *)config; |
1073 | struct net_device *netdev = nic->netdev; |
1074 | |
1075 | cb->command = cpu_to_le16(cb_config); |
1076 | |
1077 | memset(config, 0, sizeof(struct config)); |
1078 | |
1079 | config->byte_count = 0x16; /* bytes in this struct */ |
1080 | config->rx_fifo_limit = 0x8; /* bytes in FIFO before DMA */ |
1081 | config->direct_rx_dma = 0x1; /* reserved */ |
1082 | config->standard_tcb = 0x1; /* 1=standard, 0=extended */ |
1083 | config->standard_stat_counter = 0x1; /* 1=standard, 0=extended */ |
1084 | config->rx_discard_short_frames = 0x1; /* 1=discard, 0=pass */ |
1085 | config->tx_underrun_retry = 0x3; /* # of underrun retries */ |
1086 | if (e100_phy_supports_mii(nic)) |
1087 | config->mii_mode = 1; /* 1=MII mode, 0=i82503 mode */ |
1088 | config->pad10 = 0x6; |
1089 | config->no_source_addr_insertion = 0x1; /* 1=no, 0=yes */ |
1090 | config->preamble_length = 0x2; /* 0=1, 1=3, 2=7, 3=15 bytes */ |
1091 | config->ifs = 0x6; /* x16 = inter frame spacing */ |
1092 | config->ip_addr_hi = 0xF2; /* ARP IP filter - not used */ |
1093 | config->pad15_1 = 0x1; |
1094 | config->pad15_2 = 0x1; |
1095 | config->crs_or_cdt = 0x0; /* 0=CRS only, 1=CRS or CDT */ |
1096 | config->fc_delay_hi = 0x40; /* time delay for fc frame */ |
1097 | config->tx_padding = 0x1; /* 1=pad short frames */ |
1098 | config->fc_priority_threshold = 0x7; /* 7=priority fc disabled */ |
1099 | config->pad18 = 0x1; |
1100 | config->full_duplex_pin = 0x1; /* 1=examine FDX# pin */ |
1101 | config->pad20_1 = 0x1F; |
1102 | config->fc_priority_location = 0x1; /* 1=byte#31, 0=byte#19 */ |
1103 | config->pad21_1 = 0x5; |
1104 | |
1105 | config->adaptive_ifs = nic->adaptive_ifs; |
1106 | config->loopback = nic->loopback; |
1107 | |
1108 | if (nic->mii.force_media && nic->mii.full_duplex) |
1109 | config->full_duplex_force = 0x1; /* 1=force, 0=auto */ |
1110 | |
1111 | if (nic->flags & promiscuous || nic->loopback) { |
1112 | config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ |
1113 | config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ |
1114 | config->promiscuous_mode = 0x1; /* 1=on, 0=off */ |
1115 | } |
1116 | |
1117 | if (unlikely(netdev->features & NETIF_F_RXFCS)) |
1118 | config->rx_crc_transfer = 0x1; /* 1=save, 0=discard */ |
1119 | |
1120 | if (nic->flags & multicast_all) |
1121 | config->multicast_all = 0x1; /* 1=accept, 0=no */ |
1122 | |
1123 | /* disable WoL when up */ |
1124 | if (netif_running(dev: nic->netdev) || !(nic->flags & wol_magic)) |
1125 | config->magic_packet_disable = 0x1; /* 1=off, 0=on */ |
1126 | |
1127 | if (nic->mac >= mac_82558_D101_A4) { |
1128 | config->fc_disable = 0x1; /* 1=Tx fc off, 0=Tx fc on */ |
1129 | config->mwi_enable = 0x1; /* 1=enable, 0=disable */ |
1130 | config->standard_tcb = 0x0; /* 1=standard, 0=extended */ |
1131 | config->rx_long_ok = 0x1; /* 1=VLANs ok, 0=standard */ |
1132 | if (nic->mac >= mac_82559_D101M) { |
1133 | config->tno_intr = 0x1; /* TCO stats enable */ |
1134 | /* Enable TCO in extended config */ |
1135 | if (nic->mac >= mac_82551_10) { |
1136 | config->byte_count = 0x20; /* extended bytes */ |
1137 | config->rx_d102_mode = 0x1; /* GMRC for TCO */ |
1138 | } |
1139 | } else { |
1140 | config->standard_stat_counter = 0x0; |
1141 | } |
1142 | } |
1143 | |
1144 | if (netdev->features & NETIF_F_RXALL) { |
1145 | config->rx_save_overruns = 0x1; /* 1=save, 0=discard */ |
1146 | config->rx_save_bad_frames = 0x1; /* 1=save, 0=discard */ |
1147 | config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */ |
1148 | } |
1149 | |
1150 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n" , |
1151 | c + 0); |
1152 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n" , |
1153 | c + 8); |
1154 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n" , |
1155 | c + 16); |
1156 | return 0; |
1157 | } |
1158 | |
1159 | /************************************************************************* |
1160 | * CPUSaver parameters |
1161 | * |
1162 | * All CPUSaver parameters are 16-bit literals that are part of a |
1163 | * "move immediate value" instruction. By changing the value of |
1164 | * the literal in the instruction before the code is loaded, the |
1165 | * driver can change the algorithm. |
1166 | * |
1167 | * INTDELAY - This loads the dead-man timer with its initial value. |
1168 | * When this timer expires the interrupt is asserted, and the |
1169 | * timer is reset each time a new packet is received. (see |
1170 | * BUNDLEMAX below to set the limit on number of chained packets) |
1171 | * The current default is 0x600 or 1536. Experiments show that |
1172 | * the value should probably stay within the 0x200 - 0x1000. |
1173 | * |
1174 | * BUNDLEMAX - |
1175 | * This sets the maximum number of frames that will be bundled. In |
1176 | * some situations, such as the TCP windowing algorithm, it may be |
1177 | * better to limit the growth of the bundle size than let it go as |
1178 | * high as it can, because that could cause too much added latency. |
1179 | * The default is six, because this is the number of packets in the |
1180 | * default TCP window size. A value of 1 would make CPUSaver indicate |
1181 | * an interrupt for every frame received. If you do not want to put |
1182 | * a limit on the bundle size, set this value to xFFFF. |
1183 | * |
1184 | * BUNDLESMALL - |
1185 | * This contains a bit-mask describing the minimum size frame that |
1186 | * will be bundled. The default masks the lower 7 bits, which means |
1187 | * that any frame less than 128 bytes in length will not be bundled, |
1188 | * but will instead immediately generate an interrupt. This does |
1189 | * not affect the current bundle in any way. Any frame that is 128 |
1190 | * bytes or large will be bundled normally. This feature is meant |
1191 | * to provide immediate indication of ACK frames in a TCP environment. |
1192 | * Customers were seeing poor performance when a machine with CPUSaver |
1193 | * enabled was sending but not receiving. The delay introduced when |
1194 | * the ACKs were received was enough to reduce total throughput, because |
1195 | * the sender would sit idle until the ACK was finally seen. |
1196 | * |
1197 | * The current default is 0xFF80, which masks out the lower 7 bits. |
1198 | * This means that any frame which is x7F (127) bytes or smaller |
1199 | * will cause an immediate interrupt. Because this value must be a |
1200 | * bit mask, there are only a few valid values that can be used. To |
1201 | * turn this feature off, the driver can write the value xFFFF to the |
1202 | * lower word of this instruction (in the same way that the other |
1203 | * parameters are used). Likewise, a value of 0xF800 (2047) would |
1204 | * cause an interrupt to be generated for every frame, because all |
1205 | * standard Ethernet frames are <= 2047 bytes in length. |
1206 | *************************************************************************/ |
1207 | |
1208 | /* if you wish to disable the ucode functionality, while maintaining the |
1209 | * workarounds it provides, set the following defines to: |
1210 | * BUNDLESMALL 0 |
1211 | * BUNDLEMAX 1 |
1212 | * INTDELAY 1 |
1213 | */ |
1214 | #define BUNDLESMALL 1 |
1215 | #define BUNDLEMAX (u16)6 |
1216 | #define INTDELAY (u16)1536 /* 0x600 */ |
1217 | |
1218 | /* Initialize firmware */ |
1219 | static const struct firmware *e100_request_firmware(struct nic *nic) |
1220 | { |
1221 | const char *fw_name; |
1222 | const struct firmware *fw = nic->fw; |
1223 | u8 timer, bundle, min_size; |
1224 | int err = 0; |
1225 | bool required = false; |
1226 | |
1227 | /* do not load u-code for ICH devices */ |
1228 | if (nic->flags & ich) |
1229 | return NULL; |
1230 | |
1231 | /* Search for ucode match against h/w revision |
1232 | * |
1233 | * Based on comments in the source code for the FreeBSD fxp |
1234 | * driver, the FIRMWARE_D102E ucode includes both CPUSaver and |
1235 | * |
1236 | * "fixes for bugs in the B-step hardware (specifically, bugs |
1237 | * with Inline Receive)." |
1238 | * |
1239 | * So we must fail if it cannot be loaded. |
1240 | * |
1241 | * The other microcode files are only required for the optional |
1242 | * CPUSaver feature. Nice to have, but no reason to fail. |
1243 | */ |
1244 | if (nic->mac == mac_82559_D101M) { |
1245 | fw_name = FIRMWARE_D101M; |
1246 | } else if (nic->mac == mac_82559_D101S) { |
1247 | fw_name = FIRMWARE_D101S; |
1248 | } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { |
1249 | fw_name = FIRMWARE_D102E; |
1250 | required = true; |
1251 | } else { /* No ucode on other devices */ |
1252 | return NULL; |
1253 | } |
1254 | |
1255 | /* If the firmware has not previously been loaded, request a pointer |
1256 | * to it. If it was previously loaded, we are reinitializing the |
1257 | * adapter, possibly in a resume from hibernate, in which case |
1258 | * request_firmware() cannot be used. |
1259 | */ |
1260 | if (!fw) |
1261 | err = request_firmware(fw: &fw, name: fw_name, device: &nic->pdev->dev); |
1262 | |
1263 | if (err) { |
1264 | if (required) { |
1265 | netif_err(nic, probe, nic->netdev, |
1266 | "Failed to load firmware \"%s\": %d\n" , |
1267 | fw_name, err); |
1268 | return ERR_PTR(error: err); |
1269 | } else { |
1270 | netif_info(nic, probe, nic->netdev, |
1271 | "CPUSaver disabled. Needs \"%s\": %d\n" , |
1272 | fw_name, err); |
1273 | return NULL; |
1274 | } |
1275 | } |
1276 | |
1277 | /* Firmware should be precisely UCODE_SIZE (words) plus three bytes |
1278 | indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ |
1279 | if (fw->size != UCODE_SIZE * 4 + 3) { |
1280 | netif_err(nic, probe, nic->netdev, |
1281 | "Firmware \"%s\" has wrong size %zu\n" , |
1282 | fw_name, fw->size); |
1283 | release_firmware(fw); |
1284 | return ERR_PTR(error: -EINVAL); |
1285 | } |
1286 | |
1287 | /* Read timer, bundle and min_size from end of firmware blob */ |
1288 | timer = fw->data[UCODE_SIZE * 4]; |
1289 | bundle = fw->data[UCODE_SIZE * 4 + 1]; |
1290 | min_size = fw->data[UCODE_SIZE * 4 + 2]; |
1291 | |
1292 | if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || |
1293 | min_size >= UCODE_SIZE) { |
1294 | netif_err(nic, probe, nic->netdev, |
1295 | "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n" , |
1296 | fw_name, timer, bundle, min_size); |
1297 | release_firmware(fw); |
1298 | return ERR_PTR(error: -EINVAL); |
1299 | } |
1300 | |
1301 | /* OK, firmware is validated and ready to use. Save a pointer |
1302 | * to it in the nic */ |
1303 | nic->fw = fw; |
1304 | return fw; |
1305 | } |
1306 | |
1307 | static int e100_setup_ucode(struct nic *nic, struct cb *cb, |
1308 | struct sk_buff *skb) |
1309 | { |
1310 | const struct firmware *fw = (void *)skb; |
1311 | u8 timer, bundle, min_size; |
1312 | |
1313 | /* It's not a real skb; we just abused the fact that e100_exec_cb |
1314 | will pass it through to here... */ |
1315 | cb->skb = NULL; |
1316 | |
1317 | /* firmware is stored as little endian already */ |
1318 | memcpy(cb->u.ucode, fw->data, UCODE_SIZE * 4); |
1319 | |
1320 | /* Read timer, bundle and min_size from end of firmware blob */ |
1321 | timer = fw->data[UCODE_SIZE * 4]; |
1322 | bundle = fw->data[UCODE_SIZE * 4 + 1]; |
1323 | min_size = fw->data[UCODE_SIZE * 4 + 2]; |
1324 | |
1325 | /* Insert user-tunable settings in cb->u.ucode */ |
1326 | cb->u.ucode[timer] &= cpu_to_le32(0xFFFF0000); |
1327 | cb->u.ucode[timer] |= cpu_to_le32(INTDELAY); |
1328 | cb->u.ucode[bundle] &= cpu_to_le32(0xFFFF0000); |
1329 | cb->u.ucode[bundle] |= cpu_to_le32(BUNDLEMAX); |
1330 | cb->u.ucode[min_size] &= cpu_to_le32(0xFFFF0000); |
1331 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); |
1332 | |
1333 | cb->command = cpu_to_le16(cb_ucode | cb_el); |
1334 | return 0; |
1335 | } |
1336 | |
1337 | static inline int e100_load_ucode_wait(struct nic *nic) |
1338 | { |
1339 | const struct firmware *fw; |
1340 | int err = 0, counter = 50; |
1341 | struct cb *cb = nic->cb_to_clean; |
1342 | |
1343 | fw = e100_request_firmware(nic); |
1344 | /* If it's NULL, then no ucode is required */ |
1345 | if (IS_ERR_OR_NULL(ptr: fw)) |
1346 | return PTR_ERR_OR_ZERO(ptr: fw); |
1347 | |
1348 | if ((err = e100_exec_cb(nic, skb: (void *)fw, cb_prepare: e100_setup_ucode))) |
1349 | netif_err(nic, probe, nic->netdev, |
1350 | "ucode cmd failed with error %d\n" , err); |
1351 | |
1352 | /* must restart cuc */ |
1353 | nic->cuc_cmd = cuc_start; |
1354 | |
1355 | /* wait for completion */ |
1356 | e100_write_flush(nic); |
1357 | udelay(10); |
1358 | |
1359 | /* wait for possibly (ouch) 500ms */ |
1360 | while (!(cb->status & cpu_to_le16(cb_complete))) { |
1361 | msleep(msecs: 10); |
1362 | if (!--counter) break; |
1363 | } |
1364 | |
1365 | /* ack any interrupts, something could have been set */ |
1366 | iowrite8(~0, &nic->csr->scb.stat_ack); |
1367 | |
1368 | /* if the command failed, or is not OK, notify and return */ |
1369 | if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { |
1370 | netif_err(nic, probe, nic->netdev, "ucode load failed\n" ); |
1371 | err = -EPERM; |
1372 | } |
1373 | |
1374 | return err; |
1375 | } |
1376 | |
1377 | static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, |
1378 | struct sk_buff *skb) |
1379 | { |
1380 | cb->command = cpu_to_le16(cb_iaaddr); |
1381 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); |
1382 | return 0; |
1383 | } |
1384 | |
1385 | static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1386 | { |
1387 | cb->command = cpu_to_le16(cb_dump); |
1388 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + |
1389 | offsetof(struct mem, dump_buf)); |
1390 | return 0; |
1391 | } |
1392 | |
1393 | static int e100_phy_check_without_mii(struct nic *nic) |
1394 | { |
1395 | u8 phy_type; |
1396 | int without_mii; |
1397 | |
1398 | phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f; |
1399 | |
1400 | switch (phy_type) { |
1401 | case NoSuchPhy: /* Non-MII PHY; UNTESTED! */ |
1402 | case I82503: /* Non-MII PHY; UNTESTED! */ |
1403 | case S80C24: /* Non-MII PHY; tested and working */ |
1404 | /* paragraph from the FreeBSD driver, "FXP_PHY_80C24": |
1405 | * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter |
1406 | * doesn't have a programming interface of any sort. The |
1407 | * media is sensed automatically based on how the link partner |
1408 | * is configured. This is, in essence, manual configuration. |
1409 | */ |
1410 | netif_info(nic, probe, nic->netdev, |
1411 | "found MII-less i82503 or 80c24 or other PHY\n" ); |
1412 | |
1413 | nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; |
1414 | nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ |
1415 | |
1416 | /* these might be needed for certain MII-less cards... |
1417 | * nic->flags |= ich; |
1418 | * nic->flags |= ich_10h_workaround; */ |
1419 | |
1420 | without_mii = 1; |
1421 | break; |
1422 | default: |
1423 | without_mii = 0; |
1424 | break; |
1425 | } |
1426 | return without_mii; |
1427 | } |
1428 | |
1429 | #define NCONFIG_AUTO_SWITCH 0x0080 |
1430 | #define MII_NSC_CONG MII_RESV1 |
1431 | #define NSC_CONG_ENABLE 0x0100 |
1432 | #define NSC_CONG_TXREADY 0x0400 |
1433 | static int e100_phy_init(struct nic *nic) |
1434 | { |
1435 | struct net_device *netdev = nic->netdev; |
1436 | u32 addr; |
1437 | u16 bmcr, stat, id_lo, id_hi, cong; |
1438 | |
1439 | /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */ |
1440 | for (addr = 0; addr < 32; addr++) { |
1441 | nic->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr; |
1442 | bmcr = mdio_read(netdev, addr: nic->mii.phy_id, MII_BMCR); |
1443 | stat = mdio_read(netdev, addr: nic->mii.phy_id, MII_BMSR); |
1444 | stat = mdio_read(netdev, addr: nic->mii.phy_id, MII_BMSR); |
1445 | if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0)))) |
1446 | break; |
1447 | } |
1448 | if (addr == 32) { |
1449 | /* uhoh, no PHY detected: check whether we seem to be some |
1450 | * weird, rare variant which is *known* to not have any MII. |
1451 | * But do this AFTER MII checking only, since this does |
1452 | * lookup of EEPROM values which may easily be unreliable. */ |
1453 | if (e100_phy_check_without_mii(nic)) |
1454 | return 0; /* simply return and hope for the best */ |
1455 | else { |
1456 | /* for unknown cases log a fatal error */ |
1457 | netif_err(nic, hw, nic->netdev, |
1458 | "Failed to locate any known PHY, aborting\n" ); |
1459 | return -EAGAIN; |
1460 | } |
1461 | } else |
1462 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1463 | "phy_addr = %d\n" , nic->mii.phy_id); |
1464 | |
1465 | /* Get phy ID */ |
1466 | id_lo = mdio_read(netdev, addr: nic->mii.phy_id, MII_PHYSID1); |
1467 | id_hi = mdio_read(netdev, addr: nic->mii.phy_id, MII_PHYSID2); |
1468 | nic->phy = (u32)id_hi << 16 | (u32)id_lo; |
1469 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1470 | "phy ID = 0x%08X\n" , nic->phy); |
1471 | |
1472 | /* Select the phy and isolate the rest */ |
1473 | for (addr = 0; addr < 32; addr++) { |
1474 | if (addr != nic->mii.phy_id) { |
1475 | mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); |
1476 | } else if (nic->phy != phy_82552_v) { |
1477 | bmcr = mdio_read(netdev, addr, MII_BMCR); |
1478 | mdio_write(netdev, addr, MII_BMCR, |
1479 | data: bmcr & ~BMCR_ISOLATE); |
1480 | } |
1481 | } |
1482 | /* |
1483 | * Workaround for 82552: |
1484 | * Clear the ISOLATE bit on selected phy_id last (mirrored on all |
1485 | * other phy_id's) using bmcr value from addr discovery loop above. |
1486 | */ |
1487 | if (nic->phy == phy_82552_v) |
1488 | mdio_write(netdev, addr: nic->mii.phy_id, MII_BMCR, |
1489 | data: bmcr & ~BMCR_ISOLATE); |
1490 | |
1491 | /* Handle National tx phys */ |
1492 | #define NCS_PHY_MODEL_MASK 0xFFF0FFFF |
1493 | if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { |
1494 | /* Disable congestion control */ |
1495 | cong = mdio_read(netdev, addr: nic->mii.phy_id, MII_NSC_CONG); |
1496 | cong |= NSC_CONG_TXREADY; |
1497 | cong &= ~NSC_CONG_ENABLE; |
1498 | mdio_write(netdev, addr: nic->mii.phy_id, MII_NSC_CONG, data: cong); |
1499 | } |
1500 | |
1501 | if (nic->phy == phy_82552_v) { |
1502 | u16 advert = mdio_read(netdev, addr: nic->mii.phy_id, MII_ADVERTISE); |
1503 | |
1504 | /* assign special tweaked mdio_ctrl() function */ |
1505 | nic->mdio_ctrl = mdio_ctrl_phy_82552_v; |
1506 | |
1507 | /* Workaround Si not advertising flow-control during autoneg */ |
1508 | advert |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; |
1509 | mdio_write(netdev, addr: nic->mii.phy_id, MII_ADVERTISE, data: advert); |
1510 | |
1511 | /* Reset for the above changes to take effect */ |
1512 | bmcr = mdio_read(netdev, addr: nic->mii.phy_id, MII_BMCR); |
1513 | bmcr |= BMCR_RESET; |
1514 | mdio_write(netdev, addr: nic->mii.phy_id, MII_BMCR, data: bmcr); |
1515 | } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && |
1516 | (mdio_read(netdev, addr: nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && |
1517 | (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) { |
1518 | /* enable/disable MDI/MDI-X auto-switching. */ |
1519 | mdio_write(netdev, addr: nic->mii.phy_id, MII_NCONFIG, |
1520 | data: nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); |
1521 | } |
1522 | |
1523 | return 0; |
1524 | } |
1525 | |
1526 | static int e100_hw_init(struct nic *nic) |
1527 | { |
1528 | int err = 0; |
1529 | |
1530 | e100_hw_reset(nic); |
1531 | |
1532 | netif_err(nic, hw, nic->netdev, "e100_hw_init\n" ); |
1533 | if ((err = e100_self_test(nic))) |
1534 | return err; |
1535 | |
1536 | if ((err = e100_phy_init(nic))) |
1537 | return err; |
1538 | if ((err = e100_exec_cmd(nic, cmd: cuc_load_base, dma_addr: 0))) |
1539 | return err; |
1540 | if ((err = e100_exec_cmd(nic, cmd: ruc_load_base, dma_addr: 0))) |
1541 | return err; |
1542 | if ((err = e100_load_ucode_wait(nic))) |
1543 | return err; |
1544 | if ((err = e100_exec_cb(nic, NULL, cb_prepare: e100_configure))) |
1545 | return err; |
1546 | if ((err = e100_exec_cb(nic, NULL, cb_prepare: e100_setup_iaaddr))) |
1547 | return err; |
1548 | if ((err = e100_exec_cmd(nic, cmd: cuc_dump_addr, |
1549 | dma_addr: nic->dma_addr + offsetof(struct mem, stats)))) |
1550 | return err; |
1551 | if ((err = e100_exec_cmd(nic, cmd: cuc_dump_reset, dma_addr: 0))) |
1552 | return err; |
1553 | |
1554 | e100_disable_irq(nic); |
1555 | |
1556 | return 0; |
1557 | } |
1558 | |
1559 | static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1560 | { |
1561 | struct net_device *netdev = nic->netdev; |
1562 | struct netdev_hw_addr *ha; |
1563 | u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); |
1564 | |
1565 | cb->command = cpu_to_le16(cb_multi); |
1566 | cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); |
1567 | i = 0; |
1568 | netdev_for_each_mc_addr(ha, netdev) { |
1569 | if (i == count) |
1570 | break; |
1571 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, |
1572 | ETH_ALEN); |
1573 | } |
1574 | return 0; |
1575 | } |
1576 | |
1577 | static void e100_set_multicast_list(struct net_device *netdev) |
1578 | { |
1579 | struct nic *nic = netdev_priv(dev: netdev); |
1580 | |
1581 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1582 | "mc_count=%d, flags=0x%04X\n" , |
1583 | netdev_mc_count(netdev), netdev->flags); |
1584 | |
1585 | if (netdev->flags & IFF_PROMISC) |
1586 | nic->flags |= promiscuous; |
1587 | else |
1588 | nic->flags &= ~promiscuous; |
1589 | |
1590 | if (netdev->flags & IFF_ALLMULTI || |
1591 | netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS) |
1592 | nic->flags |= multicast_all; |
1593 | else |
1594 | nic->flags &= ~multicast_all; |
1595 | |
1596 | e100_exec_cb(nic, NULL, cb_prepare: e100_configure); |
1597 | e100_exec_cb(nic, NULL, cb_prepare: e100_multi); |
1598 | } |
1599 | |
1600 | static void e100_update_stats(struct nic *nic) |
1601 | { |
1602 | struct net_device *dev = nic->netdev; |
1603 | struct net_device_stats *ns = &dev->stats; |
1604 | struct stats *s = &nic->mem->stats; |
1605 | __le32 *complete = (nic->mac < mac_82558_D101_A4) ? &s->fc_xmt_pause : |
1606 | (nic->mac < mac_82559_D101M) ? (__le32 *)&s->xmt_tco_frames : |
1607 | &s->complete; |
1608 | |
1609 | /* Device's stats reporting may take several microseconds to |
1610 | * complete, so we're always waiting for results of the |
1611 | * previous command. */ |
1612 | |
1613 | if (*complete == cpu_to_le32(cuc_dump_reset_complete)) { |
1614 | *complete = 0; |
1615 | nic->tx_frames = le32_to_cpu(s->tx_good_frames); |
1616 | nic->tx_collisions = le32_to_cpu(s->tx_total_collisions); |
1617 | ns->tx_aborted_errors += le32_to_cpu(s->tx_max_collisions); |
1618 | ns->tx_window_errors += le32_to_cpu(s->tx_late_collisions); |
1619 | ns->tx_carrier_errors += le32_to_cpu(s->tx_lost_crs); |
1620 | ns->tx_fifo_errors += le32_to_cpu(s->tx_underruns); |
1621 | ns->collisions += nic->tx_collisions; |
1622 | ns->tx_errors += le32_to_cpu(s->tx_max_collisions) + |
1623 | le32_to_cpu(s->tx_lost_crs); |
1624 | nic->rx_short_frame_errors += |
1625 | le32_to_cpu(s->rx_short_frame_errors); |
1626 | ns->rx_length_errors = nic->rx_short_frame_errors + |
1627 | nic->rx_over_length_errors; |
1628 | ns->rx_crc_errors += le32_to_cpu(s->rx_crc_errors); |
1629 | ns->rx_frame_errors += le32_to_cpu(s->rx_alignment_errors); |
1630 | ns->rx_over_errors += le32_to_cpu(s->rx_overrun_errors); |
1631 | ns->rx_fifo_errors += le32_to_cpu(s->rx_overrun_errors); |
1632 | ns->rx_missed_errors += le32_to_cpu(s->rx_resource_errors); |
1633 | ns->rx_errors += le32_to_cpu(s->rx_crc_errors) + |
1634 | le32_to_cpu(s->rx_alignment_errors) + |
1635 | le32_to_cpu(s->rx_short_frame_errors) + |
1636 | le32_to_cpu(s->rx_cdt_errors); |
1637 | nic->tx_deferred += le32_to_cpu(s->tx_deferred); |
1638 | nic->tx_single_collisions += |
1639 | le32_to_cpu(s->tx_single_collisions); |
1640 | nic->tx_multiple_collisions += |
1641 | le32_to_cpu(s->tx_multiple_collisions); |
1642 | if (nic->mac >= mac_82558_D101_A4) { |
1643 | nic->tx_fc_pause += le32_to_cpu(s->fc_xmt_pause); |
1644 | nic->rx_fc_pause += le32_to_cpu(s->fc_rcv_pause); |
1645 | nic->rx_fc_unsupported += |
1646 | le32_to_cpu(s->fc_rcv_unsupported); |
1647 | if (nic->mac >= mac_82559_D101M) { |
1648 | nic->tx_tco_frames += |
1649 | le16_to_cpu(s->xmt_tco_frames); |
1650 | nic->rx_tco_frames += |
1651 | le16_to_cpu(s->rcv_tco_frames); |
1652 | } |
1653 | } |
1654 | } |
1655 | |
1656 | |
1657 | if (e100_exec_cmd(nic, cmd: cuc_dump_reset, dma_addr: 0)) |
1658 | netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, |
1659 | "exec cuc_dump_reset failed\n" ); |
1660 | } |
1661 | |
1662 | static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) |
1663 | { |
1664 | /* Adjust inter-frame-spacing (IFS) between two transmits if |
1665 | * we're getting collisions on a half-duplex connection. */ |
1666 | |
1667 | if (duplex == DUPLEX_HALF) { |
1668 | u32 prev = nic->adaptive_ifs; |
1669 | u32 min_frames = (speed == SPEED_100) ? 1000 : 100; |
1670 | |
1671 | if ((nic->tx_frames / 32 < nic->tx_collisions) && |
1672 | (nic->tx_frames > min_frames)) { |
1673 | if (nic->adaptive_ifs < 60) |
1674 | nic->adaptive_ifs += 5; |
1675 | } else if (nic->tx_frames < min_frames) { |
1676 | if (nic->adaptive_ifs >= 5) |
1677 | nic->adaptive_ifs -= 5; |
1678 | } |
1679 | if (nic->adaptive_ifs != prev) |
1680 | e100_exec_cb(nic, NULL, cb_prepare: e100_configure); |
1681 | } |
1682 | } |
1683 | |
1684 | static void e100_watchdog(struct timer_list *t) |
1685 | { |
1686 | struct nic *nic = from_timer(nic, t, watchdog); |
1687 | struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; |
1688 | u32 speed; |
1689 | |
1690 | netif_printk(nic, timer, KERN_DEBUG, nic->netdev, |
1691 | "right now = %ld\n" , jiffies); |
1692 | |
1693 | /* mii library handles link maintenance tasks */ |
1694 | |
1695 | mii_ethtool_gset(mii: &nic->mii, ecmd: &cmd); |
1696 | speed = ethtool_cmd_speed(ep: &cmd); |
1697 | |
1698 | if (mii_link_ok(mii: &nic->mii) && !netif_carrier_ok(dev: nic->netdev)) { |
1699 | netdev_info(dev: nic->netdev, format: "NIC Link is Up %u Mbps %s Duplex\n" , |
1700 | speed == SPEED_100 ? 100 : 10, |
1701 | cmd.duplex == DUPLEX_FULL ? "Full" : "Half" ); |
1702 | } else if (!mii_link_ok(mii: &nic->mii) && netif_carrier_ok(dev: nic->netdev)) { |
1703 | netdev_info(dev: nic->netdev, format: "NIC Link is Down\n" ); |
1704 | } |
1705 | |
1706 | mii_check_link(mii: &nic->mii); |
1707 | |
1708 | /* Software generated interrupt to recover from (rare) Rx |
1709 | * allocation failure. |
1710 | * Unfortunately have to use a spinlock to not re-enable interrupts |
1711 | * accidentally, due to hardware that shares a register between the |
1712 | * interrupt mask bit and the SW Interrupt generation bit */ |
1713 | spin_lock_irq(lock: &nic->cmd_lock); |
1714 | iowrite8(ioread8(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); |
1715 | e100_write_flush(nic); |
1716 | spin_unlock_irq(lock: &nic->cmd_lock); |
1717 | |
1718 | e100_update_stats(nic); |
1719 | e100_adjust_adaptive_ifs(nic, speed, duplex: cmd.duplex); |
1720 | |
1721 | if (nic->mac <= mac_82557_D100_C) |
1722 | /* Issue a multicast command to workaround a 557 lock up */ |
1723 | e100_set_multicast_list(netdev: nic->netdev); |
1724 | |
1725 | if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF) |
1726 | /* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */ |
1727 | nic->flags |= ich_10h_workaround; |
1728 | else |
1729 | nic->flags &= ~ich_10h_workaround; |
1730 | |
1731 | mod_timer(timer: &nic->watchdog, |
1732 | expires: round_jiffies(j: jiffies + E100_WATCHDOG_PERIOD)); |
1733 | } |
1734 | |
1735 | static int e100_xmit_prepare(struct nic *nic, struct cb *cb, |
1736 | struct sk_buff *skb) |
1737 | { |
1738 | dma_addr_t dma_addr; |
1739 | cb->command = nic->tx_command; |
1740 | |
1741 | dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len, |
1742 | DMA_TO_DEVICE); |
1743 | /* If we can't map the skb, have the upper layer try later */ |
1744 | if (dma_mapping_error(dev: &nic->pdev->dev, dma_addr)) |
1745 | return -ENOMEM; |
1746 | |
1747 | /* |
1748 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for |
1749 | * testing, ie sending frames with bad CRC. |
1750 | */ |
1751 | if (unlikely(skb->no_fcs)) |
1752 | cb->command |= cpu_to_le16(cb_tx_nc); |
1753 | else |
1754 | cb->command &= ~cpu_to_le16(cb_tx_nc); |
1755 | |
1756 | /* interrupt every 16 packets regardless of delay */ |
1757 | if ((nic->cbs_avail & ~15) == nic->cbs_avail) |
1758 | cb->command |= cpu_to_le16(cb_i); |
1759 | cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); |
1760 | cb->u.tcb.tcb_byte_count = 0; |
1761 | cb->u.tcb.threshold = nic->tx_threshold; |
1762 | cb->u.tcb.tbd_count = 1; |
1763 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); |
1764 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); |
1765 | skb_tx_timestamp(skb); |
1766 | return 0; |
1767 | } |
1768 | |
1769 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, |
1770 | struct net_device *netdev) |
1771 | { |
1772 | struct nic *nic = netdev_priv(dev: netdev); |
1773 | int err; |
1774 | |
1775 | if (nic->flags & ich_10h_workaround) { |
1776 | /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. |
1777 | Issue a NOP command followed by a 1us delay before |
1778 | issuing the Tx command. */ |
1779 | if (e100_exec_cmd(nic, cmd: cuc_nop, dma_addr: 0)) |
1780 | netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, |
1781 | "exec cuc_nop failed\n" ); |
1782 | udelay(1); |
1783 | } |
1784 | |
1785 | err = e100_exec_cb(nic, skb, cb_prepare: e100_xmit_prepare); |
1786 | |
1787 | switch (err) { |
1788 | case -ENOSPC: |
1789 | /* We queued the skb, but now we're out of space. */ |
1790 | netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, |
1791 | "No space for CB\n" ); |
1792 | netif_stop_queue(dev: netdev); |
1793 | break; |
1794 | case -ENOMEM: |
1795 | /* This is a hard error - log it. */ |
1796 | netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, |
1797 | "Out of Tx resources, returning skb\n" ); |
1798 | netif_stop_queue(dev: netdev); |
1799 | return NETDEV_TX_BUSY; |
1800 | } |
1801 | |
1802 | return NETDEV_TX_OK; |
1803 | } |
1804 | |
1805 | static int e100_tx_clean(struct nic *nic) |
1806 | { |
1807 | struct net_device *dev = nic->netdev; |
1808 | struct cb *cb; |
1809 | int tx_cleaned = 0; |
1810 | |
1811 | spin_lock(lock: &nic->cb_lock); |
1812 | |
1813 | /* Clean CBs marked complete */ |
1814 | for (cb = nic->cb_to_clean; |
1815 | cb->status & cpu_to_le16(cb_complete); |
1816 | cb = nic->cb_to_clean = cb->next) { |
1817 | dma_rmb(); /* read skb after status */ |
1818 | netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, |
1819 | "cb[%d]->status = 0x%04X\n" , |
1820 | (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), |
1821 | cb->status); |
1822 | |
1823 | if (likely(cb->skb != NULL)) { |
1824 | dev->stats.tx_packets++; |
1825 | dev->stats.tx_bytes += cb->skb->len; |
1826 | |
1827 | dma_unmap_single(&nic->pdev->dev, |
1828 | le32_to_cpu(cb->u.tcb.tbd.buf_addr), |
1829 | le16_to_cpu(cb->u.tcb.tbd.size), |
1830 | DMA_TO_DEVICE); |
1831 | dev_kfree_skb_any(skb: cb->skb); |
1832 | cb->skb = NULL; |
1833 | tx_cleaned = 1; |
1834 | } |
1835 | cb->status = 0; |
1836 | nic->cbs_avail++; |
1837 | } |
1838 | |
1839 | spin_unlock(lock: &nic->cb_lock); |
1840 | |
1841 | /* Recover from running out of Tx resources in xmit_frame */ |
1842 | if (unlikely(tx_cleaned && netif_queue_stopped(nic->netdev))) |
1843 | netif_wake_queue(dev: nic->netdev); |
1844 | |
1845 | return tx_cleaned; |
1846 | } |
1847 | |
1848 | static void e100_clean_cbs(struct nic *nic) |
1849 | { |
1850 | if (nic->cbs) { |
1851 | while (nic->cbs_avail != nic->params.cbs.count) { |
1852 | struct cb *cb = nic->cb_to_clean; |
1853 | if (cb->skb) { |
1854 | dma_unmap_single(&nic->pdev->dev, |
1855 | le32_to_cpu(cb->u.tcb.tbd.buf_addr), |
1856 | le16_to_cpu(cb->u.tcb.tbd.size), |
1857 | DMA_TO_DEVICE); |
1858 | dev_kfree_skb(cb->skb); |
1859 | } |
1860 | nic->cb_to_clean = nic->cb_to_clean->next; |
1861 | nic->cbs_avail++; |
1862 | } |
1863 | dma_pool_free(pool: nic->cbs_pool, vaddr: nic->cbs, addr: nic->cbs_dma_addr); |
1864 | nic->cbs = NULL; |
1865 | nic->cbs_avail = 0; |
1866 | } |
1867 | nic->cuc_cmd = cuc_start; |
1868 | nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = |
1869 | nic->cbs; |
1870 | } |
1871 | |
1872 | static int e100_alloc_cbs(struct nic *nic) |
1873 | { |
1874 | struct cb *cb; |
1875 | unsigned int i, count = nic->params.cbs.count; |
1876 | |
1877 | nic->cuc_cmd = cuc_start; |
1878 | nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; |
1879 | nic->cbs_avail = 0; |
1880 | |
1881 | nic->cbs = dma_pool_zalloc(pool: nic->cbs_pool, GFP_KERNEL, |
1882 | handle: &nic->cbs_dma_addr); |
1883 | if (!nic->cbs) |
1884 | return -ENOMEM; |
1885 | |
1886 | for (cb = nic->cbs, i = 0; i < count; cb++, i++) { |
1887 | cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; |
1888 | cb->prev = (i == 0) ? nic->cbs + count - 1 : cb - 1; |
1889 | |
1890 | cb->dma_addr = nic->cbs_dma_addr + i * sizeof(struct cb); |
1891 | cb->link = cpu_to_le32(nic->cbs_dma_addr + |
1892 | ((i+1) % count) * sizeof(struct cb)); |
1893 | } |
1894 | |
1895 | nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = nic->cbs; |
1896 | nic->cbs_avail = count; |
1897 | |
1898 | return 0; |
1899 | } |
1900 | |
1901 | static inline void e100_start_receiver(struct nic *nic, struct rx *rx) |
1902 | { |
1903 | if (!nic->rxs) return; |
1904 | if (RU_SUSPENDED != nic->ru_running) return; |
1905 | |
1906 | /* handle init time starts */ |
1907 | if (!rx) rx = nic->rxs; |
1908 | |
1909 | /* (Re)start RU if suspended or idle and RFA is non-NULL */ |
1910 | if (rx->skb) { |
1911 | e100_exec_cmd(nic, cmd: ruc_start, dma_addr: rx->dma_addr); |
1912 | nic->ru_running = RU_RUNNING; |
1913 | } |
1914 | } |
1915 | |
1916 | #define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) |
1917 | static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) |
1918 | { |
1919 | if (!(rx->skb = netdev_alloc_skb_ip_align(dev: nic->netdev, RFD_BUF_LEN))) |
1920 | return -ENOMEM; |
1921 | |
1922 | /* Init, and map the RFD. */ |
1923 | skb_copy_to_linear_data(skb: rx->skb, from: &nic->blank_rfd, len: sizeof(struct rfd)); |
1924 | rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data, |
1925 | RFD_BUF_LEN, DMA_BIDIRECTIONAL); |
1926 | |
1927 | if (dma_mapping_error(dev: &nic->pdev->dev, dma_addr: rx->dma_addr)) { |
1928 | dev_kfree_skb_any(skb: rx->skb); |
1929 | rx->skb = NULL; |
1930 | rx->dma_addr = 0; |
1931 | return -ENOMEM; |
1932 | } |
1933 | |
1934 | /* Link the RFD to end of RFA by linking previous RFD to |
1935 | * this one. We are safe to touch the previous RFD because |
1936 | * it is protected by the before last buffer's el bit being set */ |
1937 | if (rx->prev->skb) { |
1938 | struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; |
1939 | put_unaligned_le32(val: rx->dma_addr, p: &prev_rfd->link); |
1940 | dma_sync_single_for_device(dev: &nic->pdev->dev, |
1941 | addr: rx->prev->dma_addr, |
1942 | size: sizeof(struct rfd), |
1943 | dir: DMA_BIDIRECTIONAL); |
1944 | } |
1945 | |
1946 | return 0; |
1947 | } |
1948 | |
1949 | static int e100_rx_indicate(struct nic *nic, struct rx *rx, |
1950 | unsigned int *work_done, unsigned int work_to_do) |
1951 | { |
1952 | struct net_device *dev = nic->netdev; |
1953 | struct sk_buff *skb = rx->skb; |
1954 | struct rfd *rfd = (struct rfd *)skb->data; |
1955 | u16 rfd_status, actual_size; |
1956 | u16 fcs_pad = 0; |
1957 | |
1958 | if (unlikely(work_done && *work_done >= work_to_do)) |
1959 | return -EAGAIN; |
1960 | |
1961 | /* Need to sync before taking a peek at cb_complete bit */ |
1962 | dma_sync_single_for_cpu(dev: &nic->pdev->dev, addr: rx->dma_addr, |
1963 | size: sizeof(struct rfd), dir: DMA_BIDIRECTIONAL); |
1964 | rfd_status = le16_to_cpu(rfd->status); |
1965 | |
1966 | netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, |
1967 | "status=0x%04X\n" , rfd_status); |
1968 | dma_rmb(); /* read size after status bit */ |
1969 | |
1970 | /* If data isn't ready, nothing to indicate */ |
1971 | if (unlikely(!(rfd_status & cb_complete))) { |
1972 | /* If the next buffer has the el bit, but we think the receiver |
1973 | * is still running, check to see if it really stopped while |
1974 | * we had interrupts off. |
1975 | * This allows for a fast restart without re-enabling |
1976 | * interrupts */ |
1977 | if ((le16_to_cpu(rfd->command) & cb_el) && |
1978 | (RU_RUNNING == nic->ru_running)) |
1979 | |
1980 | if (ioread8(&nic->csr->scb.status) & rus_no_res) |
1981 | nic->ru_running = RU_SUSPENDED; |
1982 | dma_sync_single_for_device(dev: &nic->pdev->dev, addr: rx->dma_addr, |
1983 | size: sizeof(struct rfd), |
1984 | dir: DMA_FROM_DEVICE); |
1985 | return -ENODATA; |
1986 | } |
1987 | |
1988 | /* Get actual data size */ |
1989 | if (unlikely(dev->features & NETIF_F_RXFCS)) |
1990 | fcs_pad = 4; |
1991 | actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; |
1992 | if (unlikely(actual_size > RFD_BUF_LEN - sizeof(struct rfd))) |
1993 | actual_size = RFD_BUF_LEN - sizeof(struct rfd); |
1994 | |
1995 | /* Get data */ |
1996 | dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN, |
1997 | DMA_BIDIRECTIONAL); |
1998 | |
1999 | /* If this buffer has the el bit, but we think the receiver |
2000 | * is still running, check to see if it really stopped while |
2001 | * we had interrupts off. |
2002 | * This allows for a fast restart without re-enabling interrupts. |
2003 | * This can happen when the RU sees the size change but also sees |
2004 | * the el bit set. */ |
2005 | if ((le16_to_cpu(rfd->command) & cb_el) && |
2006 | (RU_RUNNING == nic->ru_running)) { |
2007 | |
2008 | if (ioread8(&nic->csr->scb.status) & rus_no_res) |
2009 | nic->ru_running = RU_SUSPENDED; |
2010 | } |
2011 | |
2012 | /* Pull off the RFD and put the actual data (minus eth hdr) */ |
2013 | skb_reserve(skb, len: sizeof(struct rfd)); |
2014 | skb_put(skb, len: actual_size); |
2015 | skb->protocol = eth_type_trans(skb, dev: nic->netdev); |
2016 | |
2017 | /* If we are receiving all frames, then don't bother |
2018 | * checking for errors. |
2019 | */ |
2020 | if (unlikely(dev->features & NETIF_F_RXALL)) { |
2021 | if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) |
2022 | /* Received oversized frame, but keep it. */ |
2023 | nic->rx_over_length_errors++; |
2024 | goto process_skb; |
2025 | } |
2026 | |
2027 | if (unlikely(!(rfd_status & cb_ok))) { |
2028 | /* Don't indicate if hardware indicates errors */ |
2029 | dev_kfree_skb_any(skb); |
2030 | } else if (actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN + fcs_pad) { |
2031 | /* Don't indicate oversized frames */ |
2032 | nic->rx_over_length_errors++; |
2033 | dev_kfree_skb_any(skb); |
2034 | } else { |
2035 | process_skb: |
2036 | dev->stats.rx_packets++; |
2037 | dev->stats.rx_bytes += (actual_size - fcs_pad); |
2038 | netif_receive_skb(skb); |
2039 | if (work_done) |
2040 | (*work_done)++; |
2041 | } |
2042 | |
2043 | rx->skb = NULL; |
2044 | |
2045 | return 0; |
2046 | } |
2047 | |
2048 | static void e100_rx_clean(struct nic *nic, unsigned int *work_done, |
2049 | unsigned int work_to_do) |
2050 | { |
2051 | struct rx *rx; |
2052 | int restart_required = 0, err = 0; |
2053 | struct rx *old_before_last_rx, *new_before_last_rx; |
2054 | struct rfd *old_before_last_rfd, *new_before_last_rfd; |
2055 | |
2056 | /* Indicate newly arrived packets */ |
2057 | for (rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { |
2058 | err = e100_rx_indicate(nic, rx, work_done, work_to_do); |
2059 | /* Hit quota or no more to clean */ |
2060 | if (-EAGAIN == err || -ENODATA == err) |
2061 | break; |
2062 | } |
2063 | |
2064 | |
2065 | /* On EAGAIN, hit quota so have more work to do, restart once |
2066 | * cleanup is complete. |
2067 | * Else, are we already rnr? then pay attention!!! this ensures that |
2068 | * the state machine progression never allows a start with a |
2069 | * partially cleaned list, avoiding a race between hardware |
2070 | * and rx_to_clean when in NAPI mode */ |
2071 | if (-EAGAIN != err && RU_SUSPENDED == nic->ru_running) |
2072 | restart_required = 1; |
2073 | |
2074 | old_before_last_rx = nic->rx_to_use->prev->prev; |
2075 | old_before_last_rfd = (struct rfd *)old_before_last_rx->skb->data; |
2076 | |
2077 | /* Alloc new skbs to refill list */ |
2078 | for (rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { |
2079 | if (unlikely(e100_rx_alloc_skb(nic, rx))) |
2080 | break; /* Better luck next time (see watchdog) */ |
2081 | } |
2082 | |
2083 | new_before_last_rx = nic->rx_to_use->prev->prev; |
2084 | if (new_before_last_rx != old_before_last_rx) { |
2085 | /* Set the el-bit on the buffer that is before the last buffer. |
2086 | * This lets us update the next pointer on the last buffer |
2087 | * without worrying about hardware touching it. |
2088 | * We set the size to 0 to prevent hardware from touching this |
2089 | * buffer. |
2090 | * When the hardware hits the before last buffer with el-bit |
2091 | * and size of 0, it will RNR interrupt, the RUS will go into |
2092 | * the No Resources state. It will not complete nor write to |
2093 | * this buffer. */ |
2094 | new_before_last_rfd = |
2095 | (struct rfd *)new_before_last_rx->skb->data; |
2096 | new_before_last_rfd->size = 0; |
2097 | new_before_last_rfd->command |= cpu_to_le16(cb_el); |
2098 | dma_sync_single_for_device(dev: &nic->pdev->dev, |
2099 | addr: new_before_last_rx->dma_addr, |
2100 | size: sizeof(struct rfd), |
2101 | dir: DMA_BIDIRECTIONAL); |
2102 | |
2103 | /* Now that we have a new stopping point, we can clear the old |
2104 | * stopping point. We must sync twice to get the proper |
2105 | * ordering on the hardware side of things. */ |
2106 | old_before_last_rfd->command &= ~cpu_to_le16(cb_el); |
2107 | dma_sync_single_for_device(dev: &nic->pdev->dev, |
2108 | addr: old_before_last_rx->dma_addr, |
2109 | size: sizeof(struct rfd), |
2110 | dir: DMA_BIDIRECTIONAL); |
2111 | old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN |
2112 | + ETH_FCS_LEN); |
2113 | dma_sync_single_for_device(dev: &nic->pdev->dev, |
2114 | addr: old_before_last_rx->dma_addr, |
2115 | size: sizeof(struct rfd), |
2116 | dir: DMA_BIDIRECTIONAL); |
2117 | } |
2118 | |
2119 | if (restart_required) { |
2120 | // ack the rnr? |
2121 | iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack); |
2122 | e100_start_receiver(nic, rx: nic->rx_to_clean); |
2123 | if (work_done) |
2124 | (*work_done)++; |
2125 | } |
2126 | } |
2127 | |
2128 | static void e100_rx_clean_list(struct nic *nic) |
2129 | { |
2130 | struct rx *rx; |
2131 | unsigned int i, count = nic->params.rfds.count; |
2132 | |
2133 | nic->ru_running = RU_UNINITIALIZED; |
2134 | |
2135 | if (nic->rxs) { |
2136 | for (rx = nic->rxs, i = 0; i < count; rx++, i++) { |
2137 | if (rx->skb) { |
2138 | dma_unmap_single(&nic->pdev->dev, |
2139 | rx->dma_addr, RFD_BUF_LEN, |
2140 | DMA_BIDIRECTIONAL); |
2141 | dev_kfree_skb(rx->skb); |
2142 | } |
2143 | } |
2144 | kfree(objp: nic->rxs); |
2145 | nic->rxs = NULL; |
2146 | } |
2147 | |
2148 | nic->rx_to_use = nic->rx_to_clean = NULL; |
2149 | } |
2150 | |
2151 | static int e100_rx_alloc_list(struct nic *nic) |
2152 | { |
2153 | struct rx *rx; |
2154 | unsigned int i, count = nic->params.rfds.count; |
2155 | struct rfd *before_last; |
2156 | |
2157 | nic->rx_to_use = nic->rx_to_clean = NULL; |
2158 | nic->ru_running = RU_UNINITIALIZED; |
2159 | |
2160 | if (!(nic->rxs = kcalloc(n: count, size: sizeof(struct rx), GFP_KERNEL))) |
2161 | return -ENOMEM; |
2162 | |
2163 | for (rx = nic->rxs, i = 0; i < count; rx++, i++) { |
2164 | rx->next = (i + 1 < count) ? rx + 1 : nic->rxs; |
2165 | rx->prev = (i == 0) ? nic->rxs + count - 1 : rx - 1; |
2166 | if (e100_rx_alloc_skb(nic, rx)) { |
2167 | e100_rx_clean_list(nic); |
2168 | return -ENOMEM; |
2169 | } |
2170 | } |
2171 | /* Set the el-bit on the buffer that is before the last buffer. |
2172 | * This lets us update the next pointer on the last buffer without |
2173 | * worrying about hardware touching it. |
2174 | * We set the size to 0 to prevent hardware from touching this buffer. |
2175 | * When the hardware hits the before last buffer with el-bit and size |
2176 | * of 0, it will RNR interrupt, the RU will go into the No Resources |
2177 | * state. It will not complete nor write to this buffer. */ |
2178 | rx = nic->rxs->prev->prev; |
2179 | before_last = (struct rfd *)rx->skb->data; |
2180 | before_last->command |= cpu_to_le16(cb_el); |
2181 | before_last->size = 0; |
2182 | dma_sync_single_for_device(dev: &nic->pdev->dev, addr: rx->dma_addr, |
2183 | size: sizeof(struct rfd), dir: DMA_BIDIRECTIONAL); |
2184 | |
2185 | nic->rx_to_use = nic->rx_to_clean = nic->rxs; |
2186 | nic->ru_running = RU_SUSPENDED; |
2187 | |
2188 | return 0; |
2189 | } |
2190 | |
2191 | static irqreturn_t e100_intr(int irq, void *dev_id) |
2192 | { |
2193 | struct net_device *netdev = dev_id; |
2194 | struct nic *nic = netdev_priv(dev: netdev); |
2195 | u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); |
2196 | |
2197 | netif_printk(nic, intr, KERN_DEBUG, nic->netdev, |
2198 | "stat_ack = 0x%02X\n" , stat_ack); |
2199 | |
2200 | if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ |
2201 | stat_ack == stat_ack_not_present) /* Hardware is ejected */ |
2202 | return IRQ_NONE; |
2203 | |
2204 | /* Ack interrupt(s) */ |
2205 | iowrite8(stat_ack, &nic->csr->scb.stat_ack); |
2206 | |
2207 | /* We hit Receive No Resource (RNR); restart RU after cleaning */ |
2208 | if (stat_ack & stat_ack_rnr) |
2209 | nic->ru_running = RU_SUSPENDED; |
2210 | |
2211 | if (likely(napi_schedule_prep(&nic->napi))) { |
2212 | e100_disable_irq(nic); |
2213 | __napi_schedule(n: &nic->napi); |
2214 | } |
2215 | |
2216 | return IRQ_HANDLED; |
2217 | } |
2218 | |
2219 | static int e100_poll(struct napi_struct *napi, int budget) |
2220 | { |
2221 | struct nic *nic = container_of(napi, struct nic, napi); |
2222 | unsigned int work_done = 0; |
2223 | |
2224 | e100_rx_clean(nic, work_done: &work_done, work_to_do: budget); |
2225 | e100_tx_clean(nic); |
2226 | |
2227 | /* If budget fully consumed, continue polling */ |
2228 | if (work_done == budget) |
2229 | return budget; |
2230 | |
2231 | /* only re-enable interrupt if stack agrees polling is really done */ |
2232 | if (likely(napi_complete_done(napi, work_done))) |
2233 | e100_enable_irq(nic); |
2234 | |
2235 | return work_done; |
2236 | } |
2237 | |
2238 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2239 | static void e100_netpoll(struct net_device *netdev) |
2240 | { |
2241 | struct nic *nic = netdev_priv(dev: netdev); |
2242 | |
2243 | e100_disable_irq(nic); |
2244 | e100_intr(irq: nic->pdev->irq, dev_id: netdev); |
2245 | e100_tx_clean(nic); |
2246 | e100_enable_irq(nic); |
2247 | } |
2248 | #endif |
2249 | |
2250 | static int e100_set_mac_address(struct net_device *netdev, void *p) |
2251 | { |
2252 | struct nic *nic = netdev_priv(dev: netdev); |
2253 | struct sockaddr *addr = p; |
2254 | |
2255 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
2256 | return -EADDRNOTAVAIL; |
2257 | |
2258 | eth_hw_addr_set(dev: netdev, addr: addr->sa_data); |
2259 | e100_exec_cb(nic, NULL, cb_prepare: e100_setup_iaaddr); |
2260 | |
2261 | return 0; |
2262 | } |
2263 | |
2264 | static int e100_asf(struct nic *nic) |
2265 | { |
2266 | /* ASF can be enabled from eeprom */ |
2267 | return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) && |
2268 | (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) && |
2269 | !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) && |
2270 | ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE); |
2271 | } |
2272 | |
2273 | static int e100_up(struct nic *nic) |
2274 | { |
2275 | int err; |
2276 | |
2277 | if ((err = e100_rx_alloc_list(nic))) |
2278 | return err; |
2279 | if ((err = e100_alloc_cbs(nic))) |
2280 | goto err_rx_clean_list; |
2281 | if ((err = e100_hw_init(nic))) |
2282 | goto err_clean_cbs; |
2283 | e100_set_multicast_list(netdev: nic->netdev); |
2284 | e100_start_receiver(nic, NULL); |
2285 | mod_timer(timer: &nic->watchdog, expires: jiffies); |
2286 | if ((err = request_irq(irq: nic->pdev->irq, handler: e100_intr, IRQF_SHARED, |
2287 | name: nic->netdev->name, dev: nic->netdev))) |
2288 | goto err_no_irq; |
2289 | netif_wake_queue(dev: nic->netdev); |
2290 | napi_enable(n: &nic->napi); |
2291 | /* enable ints _after_ enabling poll, preventing a race between |
2292 | * disable ints+schedule */ |
2293 | e100_enable_irq(nic); |
2294 | return 0; |
2295 | |
2296 | err_no_irq: |
2297 | del_timer_sync(timer: &nic->watchdog); |
2298 | err_clean_cbs: |
2299 | e100_clean_cbs(nic); |
2300 | err_rx_clean_list: |
2301 | e100_rx_clean_list(nic); |
2302 | return err; |
2303 | } |
2304 | |
2305 | static void e100_down(struct nic *nic) |
2306 | { |
2307 | /* wait here for poll to complete */ |
2308 | napi_disable(n: &nic->napi); |
2309 | netif_stop_queue(dev: nic->netdev); |
2310 | e100_hw_reset(nic); |
2311 | free_irq(nic->pdev->irq, nic->netdev); |
2312 | del_timer_sync(timer: &nic->watchdog); |
2313 | netif_carrier_off(dev: nic->netdev); |
2314 | e100_clean_cbs(nic); |
2315 | e100_rx_clean_list(nic); |
2316 | } |
2317 | |
2318 | static void e100_tx_timeout(struct net_device *netdev, unsigned int txqueue) |
2319 | { |
2320 | struct nic *nic = netdev_priv(dev: netdev); |
2321 | |
2322 | /* Reset outside of interrupt context, to avoid request_irq |
2323 | * in interrupt context */ |
2324 | schedule_work(work: &nic->tx_timeout_task); |
2325 | } |
2326 | |
2327 | static void e100_tx_timeout_task(struct work_struct *work) |
2328 | { |
2329 | struct nic *nic = container_of(work, struct nic, tx_timeout_task); |
2330 | struct net_device *netdev = nic->netdev; |
2331 | |
2332 | netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev, |
2333 | "scb.status=0x%02X\n" , ioread8(&nic->csr->scb.status)); |
2334 | |
2335 | rtnl_lock(); |
2336 | if (netif_running(dev: netdev)) { |
2337 | e100_down(nic: netdev_priv(dev: netdev)); |
2338 | e100_up(nic: netdev_priv(dev: netdev)); |
2339 | } |
2340 | rtnl_unlock(); |
2341 | } |
2342 | |
2343 | static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) |
2344 | { |
2345 | int err; |
2346 | struct sk_buff *skb; |
2347 | |
2348 | /* Use driver resources to perform internal MAC or PHY |
2349 | * loopback test. A single packet is prepared and transmitted |
2350 | * in loopback mode, and the test passes if the received |
2351 | * packet compares byte-for-byte to the transmitted packet. */ |
2352 | |
2353 | if ((err = e100_rx_alloc_list(nic))) |
2354 | return err; |
2355 | if ((err = e100_alloc_cbs(nic))) |
2356 | goto err_clean_rx; |
2357 | |
2358 | /* ICH PHY loopback is broken so do MAC loopback instead */ |
2359 | if (nic->flags & ich && loopback_mode == lb_phy) |
2360 | loopback_mode = lb_mac; |
2361 | |
2362 | nic->loopback = loopback_mode; |
2363 | if ((err = e100_hw_init(nic))) |
2364 | goto err_loopback_none; |
2365 | |
2366 | if (loopback_mode == lb_phy) |
2367 | mdio_write(netdev: nic->netdev, addr: nic->mii.phy_id, MII_BMCR, |
2368 | BMCR_LOOPBACK); |
2369 | |
2370 | e100_start_receiver(nic, NULL); |
2371 | |
2372 | if (!(skb = netdev_alloc_skb(dev: nic->netdev, ETH_DATA_LEN))) { |
2373 | err = -ENOMEM; |
2374 | goto err_loopback_none; |
2375 | } |
2376 | skb_put(skb, ETH_DATA_LEN); |
2377 | memset(skb->data, 0xFF, ETH_DATA_LEN); |
2378 | e100_xmit_frame(skb, netdev: nic->netdev); |
2379 | |
2380 | msleep(msecs: 10); |
2381 | |
2382 | dma_sync_single_for_cpu(dev: &nic->pdev->dev, addr: nic->rx_to_clean->dma_addr, |
2383 | RFD_BUF_LEN, dir: DMA_BIDIRECTIONAL); |
2384 | |
2385 | if (memcmp(p: nic->rx_to_clean->skb->data + sizeof(struct rfd), |
2386 | q: skb->data, ETH_DATA_LEN)) |
2387 | err = -EAGAIN; |
2388 | |
2389 | err_loopback_none: |
2390 | mdio_write(netdev: nic->netdev, addr: nic->mii.phy_id, MII_BMCR, data: 0); |
2391 | nic->loopback = lb_none; |
2392 | e100_clean_cbs(nic); |
2393 | e100_hw_reset(nic); |
2394 | err_clean_rx: |
2395 | e100_rx_clean_list(nic); |
2396 | return err; |
2397 | } |
2398 | |
2399 | #define MII_LED_CONTROL 0x1B |
2400 | #define E100_82552_LED_OVERRIDE 0x19 |
2401 | #define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */ |
2402 | #define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */ |
2403 | |
2404 | static int e100_get_link_ksettings(struct net_device *netdev, |
2405 | struct ethtool_link_ksettings *cmd) |
2406 | { |
2407 | struct nic *nic = netdev_priv(dev: netdev); |
2408 | |
2409 | mii_ethtool_get_link_ksettings(mii: &nic->mii, cmd); |
2410 | |
2411 | return 0; |
2412 | } |
2413 | |
2414 | static int e100_set_link_ksettings(struct net_device *netdev, |
2415 | const struct ethtool_link_ksettings *cmd) |
2416 | { |
2417 | struct nic *nic = netdev_priv(dev: netdev); |
2418 | int err; |
2419 | |
2420 | mdio_write(netdev, addr: nic->mii.phy_id, MII_BMCR, BMCR_RESET); |
2421 | err = mii_ethtool_set_link_ksettings(mii: &nic->mii, cmd); |
2422 | e100_exec_cb(nic, NULL, cb_prepare: e100_configure); |
2423 | |
2424 | return err; |
2425 | } |
2426 | |
2427 | static void e100_get_drvinfo(struct net_device *netdev, |
2428 | struct ethtool_drvinfo *info) |
2429 | { |
2430 | struct nic *nic = netdev_priv(dev: netdev); |
2431 | strscpy(p: info->driver, DRV_NAME, size: sizeof(info->driver)); |
2432 | strscpy(p: info->bus_info, q: pci_name(pdev: nic->pdev), |
2433 | size: sizeof(info->bus_info)); |
2434 | } |
2435 | |
2436 | #define E100_PHY_REGS 0x1D |
2437 | static int e100_get_regs_len(struct net_device *netdev) |
2438 | { |
2439 | struct nic *nic = netdev_priv(dev: netdev); |
2440 | |
2441 | /* We know the number of registers, and the size of the dump buffer. |
2442 | * Calculate the total size in bytes. |
2443 | */ |
2444 | return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); |
2445 | } |
2446 | |
2447 | static void e100_get_regs(struct net_device *netdev, |
2448 | struct ethtool_regs *regs, void *p) |
2449 | { |
2450 | struct nic *nic = netdev_priv(dev: netdev); |
2451 | u32 *buff = p; |
2452 | int i; |
2453 | |
2454 | regs->version = (1 << 24) | nic->pdev->revision; |
2455 | buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | |
2456 | ioread8(&nic->csr->scb.cmd_lo) << 16 | |
2457 | ioread16(&nic->csr->scb.status); |
2458 | for (i = 0; i < E100_PHY_REGS; i++) |
2459 | /* Note that we read the registers in reverse order. This |
2460 | * ordering is the ABI apparently used by ethtool and other |
2461 | * applications. |
2462 | */ |
2463 | buff[1 + i] = mdio_read(netdev, addr: nic->mii.phy_id, |
2464 | E100_PHY_REGS - 1 - i); |
2465 | memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); |
2466 | e100_exec_cb(nic, NULL, cb_prepare: e100_dump); |
2467 | msleep(msecs: 10); |
2468 | memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, |
2469 | sizeof(nic->mem->dump_buf)); |
2470 | } |
2471 | |
2472 | static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) |
2473 | { |
2474 | struct nic *nic = netdev_priv(dev: netdev); |
2475 | wol->supported = (nic->mac >= mac_82558_D101_A4) ? WAKE_MAGIC : 0; |
2476 | wol->wolopts = (nic->flags & wol_magic) ? WAKE_MAGIC : 0; |
2477 | } |
2478 | |
2479 | static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) |
2480 | { |
2481 | struct nic *nic = netdev_priv(dev: netdev); |
2482 | |
2483 | if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) || |
2484 | !device_can_wakeup(dev: &nic->pdev->dev)) |
2485 | return -EOPNOTSUPP; |
2486 | |
2487 | if (wol->wolopts) |
2488 | nic->flags |= wol_magic; |
2489 | else |
2490 | nic->flags &= ~wol_magic; |
2491 | |
2492 | device_set_wakeup_enable(dev: &nic->pdev->dev, enable: wol->wolopts); |
2493 | |
2494 | e100_exec_cb(nic, NULL, cb_prepare: e100_configure); |
2495 | |
2496 | return 0; |
2497 | } |
2498 | |
2499 | static u32 e100_get_msglevel(struct net_device *netdev) |
2500 | { |
2501 | struct nic *nic = netdev_priv(dev: netdev); |
2502 | return nic->msg_enable; |
2503 | } |
2504 | |
2505 | static void e100_set_msglevel(struct net_device *netdev, u32 value) |
2506 | { |
2507 | struct nic *nic = netdev_priv(dev: netdev); |
2508 | nic->msg_enable = value; |
2509 | } |
2510 | |
2511 | static int e100_nway_reset(struct net_device *netdev) |
2512 | { |
2513 | struct nic *nic = netdev_priv(dev: netdev); |
2514 | return mii_nway_restart(mii: &nic->mii); |
2515 | } |
2516 | |
2517 | static u32 e100_get_link(struct net_device *netdev) |
2518 | { |
2519 | struct nic *nic = netdev_priv(dev: netdev); |
2520 | return mii_link_ok(mii: &nic->mii); |
2521 | } |
2522 | |
2523 | static int e100_get_eeprom_len(struct net_device *netdev) |
2524 | { |
2525 | struct nic *nic = netdev_priv(dev: netdev); |
2526 | return nic->eeprom_wc << 1; |
2527 | } |
2528 | |
2529 | #define E100_EEPROM_MAGIC 0x1234 |
2530 | static int e100_get_eeprom(struct net_device *netdev, |
2531 | struct ethtool_eeprom *eeprom, u8 *bytes) |
2532 | { |
2533 | struct nic *nic = netdev_priv(dev: netdev); |
2534 | |
2535 | eeprom->magic = E100_EEPROM_MAGIC; |
2536 | memcpy(bytes, &((u8 *)nic->eeprom)[eeprom->offset], eeprom->len); |
2537 | |
2538 | return 0; |
2539 | } |
2540 | |
2541 | static int e100_set_eeprom(struct net_device *netdev, |
2542 | struct ethtool_eeprom *eeprom, u8 *bytes) |
2543 | { |
2544 | struct nic *nic = netdev_priv(dev: netdev); |
2545 | |
2546 | if (eeprom->magic != E100_EEPROM_MAGIC) |
2547 | return -EINVAL; |
2548 | |
2549 | memcpy(&((u8 *)nic->eeprom)[eeprom->offset], bytes, eeprom->len); |
2550 | |
2551 | return e100_eeprom_save(nic, start: eeprom->offset >> 1, |
2552 | count: (eeprom->len >> 1) + 1); |
2553 | } |
2554 | |
2555 | static void e100_get_ringparam(struct net_device *netdev, |
2556 | struct ethtool_ringparam *ring, |
2557 | struct kernel_ethtool_ringparam *kernel_ring, |
2558 | struct netlink_ext_ack *extack) |
2559 | { |
2560 | struct nic *nic = netdev_priv(dev: netdev); |
2561 | struct param_range *rfds = &nic->params.rfds; |
2562 | struct param_range *cbs = &nic->params.cbs; |
2563 | |
2564 | ring->rx_max_pending = rfds->max; |
2565 | ring->tx_max_pending = cbs->max; |
2566 | ring->rx_pending = rfds->count; |
2567 | ring->tx_pending = cbs->count; |
2568 | } |
2569 | |
2570 | static int e100_set_ringparam(struct net_device *netdev, |
2571 | struct ethtool_ringparam *ring, |
2572 | struct kernel_ethtool_ringparam *kernel_ring, |
2573 | struct netlink_ext_ack *extack) |
2574 | { |
2575 | struct nic *nic = netdev_priv(dev: netdev); |
2576 | struct param_range *rfds = &nic->params.rfds; |
2577 | struct param_range *cbs = &nic->params.cbs; |
2578 | |
2579 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
2580 | return -EINVAL; |
2581 | |
2582 | if (netif_running(dev: netdev)) |
2583 | e100_down(nic); |
2584 | rfds->count = max(ring->rx_pending, rfds->min); |
2585 | rfds->count = min(rfds->count, rfds->max); |
2586 | cbs->count = max(ring->tx_pending, cbs->min); |
2587 | cbs->count = min(cbs->count, cbs->max); |
2588 | netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n" , |
2589 | rfds->count, cbs->count); |
2590 | if (netif_running(dev: netdev)) |
2591 | e100_up(nic); |
2592 | |
2593 | return 0; |
2594 | } |
2595 | |
2596 | static const char e100_gstrings_test[][ETH_GSTRING_LEN] = { |
2597 | "Link test (on/offline)" , |
2598 | "Eeprom test (on/offline)" , |
2599 | "Self test (offline)" , |
2600 | "Mac loopback (offline)" , |
2601 | "Phy loopback (offline)" , |
2602 | }; |
2603 | #define E100_TEST_LEN ARRAY_SIZE(e100_gstrings_test) |
2604 | |
2605 | static void e100_diag_test(struct net_device *netdev, |
2606 | struct ethtool_test *test, u64 *data) |
2607 | { |
2608 | struct ethtool_cmd cmd; |
2609 | struct nic *nic = netdev_priv(dev: netdev); |
2610 | int i; |
2611 | |
2612 | memset(data, 0, E100_TEST_LEN * sizeof(u64)); |
2613 | data[0] = !mii_link_ok(mii: &nic->mii); |
2614 | data[1] = e100_eeprom_load(nic); |
2615 | if (test->flags & ETH_TEST_FL_OFFLINE) { |
2616 | |
2617 | /* save speed, duplex & autoneg settings */ |
2618 | mii_ethtool_gset(mii: &nic->mii, ecmd: &cmd); |
2619 | |
2620 | if (netif_running(dev: netdev)) |
2621 | e100_down(nic); |
2622 | data[2] = e100_self_test(nic); |
2623 | data[3] = e100_loopback_test(nic, loopback_mode: lb_mac); |
2624 | data[4] = e100_loopback_test(nic, loopback_mode: lb_phy); |
2625 | |
2626 | /* restore speed, duplex & autoneg settings */ |
2627 | mii_ethtool_sset(mii: &nic->mii, ecmd: &cmd); |
2628 | |
2629 | if (netif_running(dev: netdev)) |
2630 | e100_up(nic); |
2631 | } |
2632 | for (i = 0; i < E100_TEST_LEN; i++) |
2633 | test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; |
2634 | |
2635 | msleep_interruptible(msecs: 4 * 1000); |
2636 | } |
2637 | |
2638 | static int e100_set_phys_id(struct net_device *netdev, |
2639 | enum ethtool_phys_id_state state) |
2640 | { |
2641 | struct nic *nic = netdev_priv(dev: netdev); |
2642 | enum led_state { |
2643 | led_on = 0x01, |
2644 | led_off = 0x04, |
2645 | led_on_559 = 0x05, |
2646 | led_on_557 = 0x07, |
2647 | }; |
2648 | u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE : |
2649 | MII_LED_CONTROL; |
2650 | u16 leds = 0; |
2651 | |
2652 | switch (state) { |
2653 | case ETHTOOL_ID_ACTIVE: |
2654 | return 2; |
2655 | |
2656 | case ETHTOOL_ID_ON: |
2657 | leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON : |
2658 | (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559; |
2659 | break; |
2660 | |
2661 | case ETHTOOL_ID_OFF: |
2662 | leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off; |
2663 | break; |
2664 | |
2665 | case ETHTOOL_ID_INACTIVE: |
2666 | break; |
2667 | } |
2668 | |
2669 | mdio_write(netdev, addr: nic->mii.phy_id, reg: led_reg, data: leds); |
2670 | return 0; |
2671 | } |
2672 | |
2673 | static const char e100_gstrings_stats[][ETH_GSTRING_LEN] = { |
2674 | "rx_packets" , "tx_packets" , "rx_bytes" , "tx_bytes" , "rx_errors" , |
2675 | "tx_errors" , "rx_dropped" , "tx_dropped" , "multicast" , "collisions" , |
2676 | "rx_length_errors" , "rx_over_errors" , "rx_crc_errors" , |
2677 | "rx_frame_errors" , "rx_fifo_errors" , "rx_missed_errors" , |
2678 | "tx_aborted_errors" , "tx_carrier_errors" , "tx_fifo_errors" , |
2679 | "tx_heartbeat_errors" , "tx_window_errors" , |
2680 | /* device-specific stats */ |
2681 | "tx_deferred" , "tx_single_collisions" , "tx_multi_collisions" , |
2682 | "tx_flow_control_pause" , "rx_flow_control_pause" , |
2683 | "rx_flow_control_unsupported" , "tx_tco_packets" , "rx_tco_packets" , |
2684 | "rx_short_frame_errors" , "rx_over_length_errors" , |
2685 | }; |
2686 | #define E100_NET_STATS_LEN 21 |
2687 | #define E100_STATS_LEN ARRAY_SIZE(e100_gstrings_stats) |
2688 | |
2689 | static int e100_get_sset_count(struct net_device *netdev, int sset) |
2690 | { |
2691 | switch (sset) { |
2692 | case ETH_SS_TEST: |
2693 | return E100_TEST_LEN; |
2694 | case ETH_SS_STATS: |
2695 | return E100_STATS_LEN; |
2696 | default: |
2697 | return -EOPNOTSUPP; |
2698 | } |
2699 | } |
2700 | |
2701 | static void e100_get_ethtool_stats(struct net_device *netdev, |
2702 | struct ethtool_stats *stats, u64 *data) |
2703 | { |
2704 | struct nic *nic = netdev_priv(dev: netdev); |
2705 | int i; |
2706 | |
2707 | for (i = 0; i < E100_NET_STATS_LEN; i++) |
2708 | data[i] = ((unsigned long *)&netdev->stats)[i]; |
2709 | |
2710 | data[i++] = nic->tx_deferred; |
2711 | data[i++] = nic->tx_single_collisions; |
2712 | data[i++] = nic->tx_multiple_collisions; |
2713 | data[i++] = nic->tx_fc_pause; |
2714 | data[i++] = nic->rx_fc_pause; |
2715 | data[i++] = nic->rx_fc_unsupported; |
2716 | data[i++] = nic->tx_tco_frames; |
2717 | data[i++] = nic->rx_tco_frames; |
2718 | data[i++] = nic->rx_short_frame_errors; |
2719 | data[i++] = nic->rx_over_length_errors; |
2720 | } |
2721 | |
2722 | static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) |
2723 | { |
2724 | switch (stringset) { |
2725 | case ETH_SS_TEST: |
2726 | memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test)); |
2727 | break; |
2728 | case ETH_SS_STATS: |
2729 | memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats)); |
2730 | break; |
2731 | } |
2732 | } |
2733 | |
2734 | static const struct ethtool_ops e100_ethtool_ops = { |
2735 | .get_drvinfo = e100_get_drvinfo, |
2736 | .get_regs_len = e100_get_regs_len, |
2737 | .get_regs = e100_get_regs, |
2738 | .get_wol = e100_get_wol, |
2739 | .set_wol = e100_set_wol, |
2740 | .get_msglevel = e100_get_msglevel, |
2741 | .set_msglevel = e100_set_msglevel, |
2742 | .nway_reset = e100_nway_reset, |
2743 | .get_link = e100_get_link, |
2744 | .get_eeprom_len = e100_get_eeprom_len, |
2745 | .get_eeprom = e100_get_eeprom, |
2746 | .set_eeprom = e100_set_eeprom, |
2747 | .get_ringparam = e100_get_ringparam, |
2748 | .set_ringparam = e100_set_ringparam, |
2749 | .self_test = e100_diag_test, |
2750 | .get_strings = e100_get_strings, |
2751 | .set_phys_id = e100_set_phys_id, |
2752 | .get_ethtool_stats = e100_get_ethtool_stats, |
2753 | .get_sset_count = e100_get_sset_count, |
2754 | .get_ts_info = ethtool_op_get_ts_info, |
2755 | .get_link_ksettings = e100_get_link_ksettings, |
2756 | .set_link_ksettings = e100_set_link_ksettings, |
2757 | }; |
2758 | |
2759 | static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
2760 | { |
2761 | struct nic *nic = netdev_priv(dev: netdev); |
2762 | |
2763 | return generic_mii_ioctl(mii_if: &nic->mii, mii_data: if_mii(rq: ifr), cmd, NULL); |
2764 | } |
2765 | |
2766 | static int e100_alloc(struct nic *nic) |
2767 | { |
2768 | nic->mem = dma_alloc_coherent(dev: &nic->pdev->dev, size: sizeof(struct mem), |
2769 | dma_handle: &nic->dma_addr, GFP_KERNEL); |
2770 | return nic->mem ? 0 : -ENOMEM; |
2771 | } |
2772 | |
2773 | static void e100_free(struct nic *nic) |
2774 | { |
2775 | if (nic->mem) { |
2776 | dma_free_coherent(dev: &nic->pdev->dev, size: sizeof(struct mem), |
2777 | cpu_addr: nic->mem, dma_handle: nic->dma_addr); |
2778 | nic->mem = NULL; |
2779 | } |
2780 | } |
2781 | |
2782 | static int e100_open(struct net_device *netdev) |
2783 | { |
2784 | struct nic *nic = netdev_priv(dev: netdev); |
2785 | int err = 0; |
2786 | |
2787 | netif_carrier_off(dev: netdev); |
2788 | if ((err = e100_up(nic))) |
2789 | netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n" ); |
2790 | return err; |
2791 | } |
2792 | |
2793 | static int e100_close(struct net_device *netdev) |
2794 | { |
2795 | e100_down(nic: netdev_priv(dev: netdev)); |
2796 | return 0; |
2797 | } |
2798 | |
2799 | static int e100_set_features(struct net_device *netdev, |
2800 | netdev_features_t features) |
2801 | { |
2802 | struct nic *nic = netdev_priv(dev: netdev); |
2803 | netdev_features_t changed = features ^ netdev->features; |
2804 | |
2805 | if (!(changed & (NETIF_F_RXFCS | NETIF_F_RXALL))) |
2806 | return 0; |
2807 | |
2808 | netdev->features = features; |
2809 | e100_exec_cb(nic, NULL, cb_prepare: e100_configure); |
2810 | return 1; |
2811 | } |
2812 | |
2813 | static const struct net_device_ops e100_netdev_ops = { |
2814 | .ndo_open = e100_open, |
2815 | .ndo_stop = e100_close, |
2816 | .ndo_start_xmit = e100_xmit_frame, |
2817 | .ndo_validate_addr = eth_validate_addr, |
2818 | .ndo_set_rx_mode = e100_set_multicast_list, |
2819 | .ndo_set_mac_address = e100_set_mac_address, |
2820 | .ndo_eth_ioctl = e100_do_ioctl, |
2821 | .ndo_tx_timeout = e100_tx_timeout, |
2822 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2823 | .ndo_poll_controller = e100_netpoll, |
2824 | #endif |
2825 | .ndo_set_features = e100_set_features, |
2826 | }; |
2827 | |
2828 | static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
2829 | { |
2830 | struct net_device *netdev; |
2831 | struct nic *nic; |
2832 | int err; |
2833 | |
2834 | if (!(netdev = alloc_etherdev(sizeof(struct nic)))) |
2835 | return -ENOMEM; |
2836 | |
2837 | netdev->hw_features |= NETIF_F_RXFCS; |
2838 | netdev->priv_flags |= IFF_SUPP_NOFCS; |
2839 | netdev->hw_features |= NETIF_F_RXALL; |
2840 | |
2841 | netdev->netdev_ops = &e100_netdev_ops; |
2842 | netdev->ethtool_ops = &e100_ethtool_ops; |
2843 | netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; |
2844 | strscpy(p: netdev->name, q: pci_name(pdev), size: sizeof(netdev->name)); |
2845 | |
2846 | nic = netdev_priv(dev: netdev); |
2847 | netif_napi_add_weight(dev: netdev, napi: &nic->napi, poll: e100_poll, E100_NAPI_WEIGHT); |
2848 | nic->netdev = netdev; |
2849 | nic->pdev = pdev; |
2850 | nic->msg_enable = (1 << debug) - 1; |
2851 | nic->mdio_ctrl = mdio_ctrl_hw; |
2852 | pci_set_drvdata(pdev, data: netdev); |
2853 | |
2854 | if ((err = pci_enable_device(dev: pdev))) { |
2855 | netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n" ); |
2856 | goto err_out_free_dev; |
2857 | } |
2858 | |
2859 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { |
2860 | netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n" ); |
2861 | err = -ENODEV; |
2862 | goto err_out_disable_pdev; |
2863 | } |
2864 | |
2865 | if ((err = pci_request_regions(pdev, DRV_NAME))) { |
2866 | netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n" ); |
2867 | goto err_out_disable_pdev; |
2868 | } |
2869 | |
2870 | if ((err = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)))) { |
2871 | netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n" ); |
2872 | goto err_out_free_res; |
2873 | } |
2874 | |
2875 | SET_NETDEV_DEV(netdev, &pdev->dev); |
2876 | |
2877 | if (use_io) |
2878 | netif_info(nic, probe, nic->netdev, "using i/o access mode\n" ); |
2879 | |
2880 | nic->csr = pci_iomap(dev: pdev, bar: (use_io ? 1 : 0), max: sizeof(struct csr)); |
2881 | if (!nic->csr) { |
2882 | netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n" ); |
2883 | err = -ENOMEM; |
2884 | goto err_out_free_res; |
2885 | } |
2886 | |
2887 | if (ent->driver_data) |
2888 | nic->flags |= ich; |
2889 | else |
2890 | nic->flags &= ~ich; |
2891 | |
2892 | e100_get_defaults(nic); |
2893 | |
2894 | /* D100 MAC doesn't allow rx of vlan packets with normal MTU */ |
2895 | if (nic->mac < mac_82558_D101_A4) |
2896 | netdev->features |= NETIF_F_VLAN_CHALLENGED; |
2897 | |
2898 | /* locks must be initialized before calling hw_reset */ |
2899 | spin_lock_init(&nic->cb_lock); |
2900 | spin_lock_init(&nic->cmd_lock); |
2901 | spin_lock_init(&nic->mdio_lock); |
2902 | |
2903 | /* Reset the device before pci_set_master() in case device is in some |
2904 | * funky state and has an interrupt pending - hint: we don't have the |
2905 | * interrupt handler registered yet. */ |
2906 | e100_hw_reset(nic); |
2907 | |
2908 | pci_set_master(dev: pdev); |
2909 | |
2910 | timer_setup(&nic->watchdog, e100_watchdog, 0); |
2911 | |
2912 | INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); |
2913 | |
2914 | if ((err = e100_alloc(nic))) { |
2915 | netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n" ); |
2916 | goto err_out_iounmap; |
2917 | } |
2918 | |
2919 | if ((err = e100_eeprom_load(nic))) |
2920 | goto err_out_free; |
2921 | |
2922 | e100_phy_init(nic); |
2923 | |
2924 | eth_hw_addr_set(dev: netdev, addr: (u8 *)nic->eeprom); |
2925 | if (!is_valid_ether_addr(addr: netdev->dev_addr)) { |
2926 | if (!eeprom_bad_csum_allow) { |
2927 | netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n" ); |
2928 | err = -EAGAIN; |
2929 | goto err_out_free; |
2930 | } else { |
2931 | netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n" ); |
2932 | } |
2933 | } |
2934 | |
2935 | /* Wol magic packet can be enabled from eeprom */ |
2936 | if ((nic->mac >= mac_82558_D101_A4) && |
2937 | (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) { |
2938 | nic->flags |= wol_magic; |
2939 | device_set_wakeup_enable(dev: &pdev->dev, enable: true); |
2940 | } |
2941 | |
2942 | /* ack any pending wake events, disable PME */ |
2943 | pci_pme_active(dev: pdev, enable: false); |
2944 | |
2945 | strcpy(p: netdev->name, q: "eth%d" ); |
2946 | if ((err = register_netdev(dev: netdev))) { |
2947 | netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n" ); |
2948 | goto err_out_free; |
2949 | } |
2950 | nic->cbs_pool = dma_pool_create(name: netdev->name, |
2951 | dev: &nic->pdev->dev, |
2952 | size: nic->params.cbs.max * sizeof(struct cb), |
2953 | align: sizeof(u32), |
2954 | allocation: 0); |
2955 | if (!nic->cbs_pool) { |
2956 | netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n" ); |
2957 | err = -ENOMEM; |
2958 | goto err_out_pool; |
2959 | } |
2960 | netif_info(nic, probe, nic->netdev, |
2961 | "addr 0x%llx, irq %d, MAC addr %pM\n" , |
2962 | (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), |
2963 | pdev->irq, netdev->dev_addr); |
2964 | |
2965 | return 0; |
2966 | |
2967 | err_out_pool: |
2968 | unregister_netdev(dev: netdev); |
2969 | err_out_free: |
2970 | e100_free(nic); |
2971 | err_out_iounmap: |
2972 | pci_iounmap(dev: pdev, nic->csr); |
2973 | err_out_free_res: |
2974 | pci_release_regions(pdev); |
2975 | err_out_disable_pdev: |
2976 | pci_disable_device(dev: pdev); |
2977 | err_out_free_dev: |
2978 | free_netdev(dev: netdev); |
2979 | return err; |
2980 | } |
2981 | |
2982 | static void e100_remove(struct pci_dev *pdev) |
2983 | { |
2984 | struct net_device *netdev = pci_get_drvdata(pdev); |
2985 | |
2986 | if (netdev) { |
2987 | struct nic *nic = netdev_priv(dev: netdev); |
2988 | unregister_netdev(dev: netdev); |
2989 | e100_free(nic); |
2990 | pci_iounmap(dev: pdev, nic->csr); |
2991 | dma_pool_destroy(pool: nic->cbs_pool); |
2992 | free_netdev(dev: netdev); |
2993 | pci_release_regions(pdev); |
2994 | pci_disable_device(dev: pdev); |
2995 | } |
2996 | } |
2997 | |
2998 | #define E100_82552_SMARTSPEED 0x14 /* SmartSpeed Ctrl register */ |
2999 | #define E100_82552_REV_ANEG 0x0200 /* Reverse auto-negotiation */ |
3000 | #define E100_82552_ANEG_NOW 0x0400 /* Auto-negotiate now */ |
3001 | static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) |
3002 | { |
3003 | struct net_device *netdev = pci_get_drvdata(pdev); |
3004 | struct nic *nic = netdev_priv(dev: netdev); |
3005 | |
3006 | netif_device_detach(dev: netdev); |
3007 | |
3008 | if (netif_running(dev: netdev)) |
3009 | e100_down(nic); |
3010 | |
3011 | if ((nic->flags & wol_magic) | e100_asf(nic)) { |
3012 | /* enable reverse auto-negotiation */ |
3013 | if (nic->phy == phy_82552_v) { |
3014 | u16 smartspeed = mdio_read(netdev, addr: nic->mii.phy_id, |
3015 | E100_82552_SMARTSPEED); |
3016 | |
3017 | mdio_write(netdev, addr: nic->mii.phy_id, |
3018 | E100_82552_SMARTSPEED, data: smartspeed | |
3019 | E100_82552_REV_ANEG | E100_82552_ANEG_NOW); |
3020 | } |
3021 | *enable_wake = true; |
3022 | } else { |
3023 | *enable_wake = false; |
3024 | } |
3025 | |
3026 | pci_disable_device(dev: pdev); |
3027 | } |
3028 | |
3029 | static int __e100_power_off(struct pci_dev *pdev, bool wake) |
3030 | { |
3031 | if (wake) |
3032 | return pci_prepare_to_sleep(dev: pdev); |
3033 | |
3034 | pci_wake_from_d3(dev: pdev, enable: false); |
3035 | pci_set_power_state(dev: pdev, PCI_D3hot); |
3036 | |
3037 | return 0; |
3038 | } |
3039 | |
3040 | static int __maybe_unused e100_suspend(struct device *dev_d) |
3041 | { |
3042 | bool wake; |
3043 | |
3044 | __e100_shutdown(to_pci_dev(dev_d), enable_wake: &wake); |
3045 | |
3046 | return 0; |
3047 | } |
3048 | |
3049 | static int __maybe_unused e100_resume(struct device *dev_d) |
3050 | { |
3051 | struct net_device *netdev = dev_get_drvdata(dev: dev_d); |
3052 | struct nic *nic = netdev_priv(dev: netdev); |
3053 | int err; |
3054 | |
3055 | err = pci_enable_device(to_pci_dev(dev_d)); |
3056 | if (err) { |
3057 | netdev_err(dev: netdev, format: "Resume cannot enable PCI device, aborting\n" ); |
3058 | return err; |
3059 | } |
3060 | pci_set_master(to_pci_dev(dev_d)); |
3061 | |
3062 | /* disable reverse auto-negotiation */ |
3063 | if (nic->phy == phy_82552_v) { |
3064 | u16 smartspeed = mdio_read(netdev, addr: nic->mii.phy_id, |
3065 | E100_82552_SMARTSPEED); |
3066 | |
3067 | mdio_write(netdev, addr: nic->mii.phy_id, |
3068 | E100_82552_SMARTSPEED, |
3069 | data: smartspeed & ~(E100_82552_REV_ANEG)); |
3070 | } |
3071 | |
3072 | if (netif_running(dev: netdev)) |
3073 | e100_up(nic); |
3074 | |
3075 | netif_device_attach(dev: netdev); |
3076 | |
3077 | return 0; |
3078 | } |
3079 | |
3080 | static void e100_shutdown(struct pci_dev *pdev) |
3081 | { |
3082 | bool wake; |
3083 | __e100_shutdown(pdev, enable_wake: &wake); |
3084 | if (system_state == SYSTEM_POWER_OFF) |
3085 | __e100_power_off(pdev, wake); |
3086 | } |
3087 | |
3088 | /* ------------------ PCI Error Recovery infrastructure -------------- */ |
3089 | /** |
3090 | * e100_io_error_detected - called when PCI error is detected. |
3091 | * @pdev: Pointer to PCI device |
3092 | * @state: The current pci connection state |
3093 | */ |
3094 | static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) |
3095 | { |
3096 | struct net_device *netdev = pci_get_drvdata(pdev); |
3097 | struct nic *nic = netdev_priv(dev: netdev); |
3098 | |
3099 | netif_device_detach(dev: netdev); |
3100 | |
3101 | if (state == pci_channel_io_perm_failure) |
3102 | return PCI_ERS_RESULT_DISCONNECT; |
3103 | |
3104 | if (netif_running(dev: netdev)) |
3105 | e100_down(nic); |
3106 | pci_disable_device(dev: pdev); |
3107 | |
3108 | /* Request a slot reset. */ |
3109 | return PCI_ERS_RESULT_NEED_RESET; |
3110 | } |
3111 | |
3112 | /** |
3113 | * e100_io_slot_reset - called after the pci bus has been reset. |
3114 | * @pdev: Pointer to PCI device |
3115 | * |
3116 | * Restart the card from scratch. |
3117 | */ |
3118 | static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev) |
3119 | { |
3120 | struct net_device *netdev = pci_get_drvdata(pdev); |
3121 | struct nic *nic = netdev_priv(dev: netdev); |
3122 | |
3123 | if (pci_enable_device(dev: pdev)) { |
3124 | pr_err("Cannot re-enable PCI device after reset\n" ); |
3125 | return PCI_ERS_RESULT_DISCONNECT; |
3126 | } |
3127 | pci_set_master(dev: pdev); |
3128 | |
3129 | /* Only one device per card can do a reset */ |
3130 | if (0 != PCI_FUNC(pdev->devfn)) |
3131 | return PCI_ERS_RESULT_RECOVERED; |
3132 | e100_hw_reset(nic); |
3133 | e100_phy_init(nic); |
3134 | |
3135 | return PCI_ERS_RESULT_RECOVERED; |
3136 | } |
3137 | |
3138 | /** |
3139 | * e100_io_resume - resume normal operations |
3140 | * @pdev: Pointer to PCI device |
3141 | * |
3142 | * Resume normal operations after an error recovery |
3143 | * sequence has been completed. |
3144 | */ |
3145 | static void e100_io_resume(struct pci_dev *pdev) |
3146 | { |
3147 | struct net_device *netdev = pci_get_drvdata(pdev); |
3148 | struct nic *nic = netdev_priv(dev: netdev); |
3149 | |
3150 | /* ack any pending wake events, disable PME */ |
3151 | pci_enable_wake(dev: pdev, PCI_D0, enable: 0); |
3152 | |
3153 | netif_device_attach(dev: netdev); |
3154 | if (netif_running(dev: netdev)) { |
3155 | e100_open(netdev); |
3156 | mod_timer(timer: &nic->watchdog, expires: jiffies); |
3157 | } |
3158 | } |
3159 | |
3160 | static const struct pci_error_handlers e100_err_handler = { |
3161 | .error_detected = e100_io_error_detected, |
3162 | .slot_reset = e100_io_slot_reset, |
3163 | .resume = e100_io_resume, |
3164 | }; |
3165 | |
3166 | static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); |
3167 | |
3168 | static struct pci_driver e100_driver = { |
3169 | .name = DRV_NAME, |
3170 | .id_table = e100_id_table, |
3171 | .probe = e100_probe, |
3172 | .remove = e100_remove, |
3173 | |
3174 | /* Power Management hooks */ |
3175 | .driver.pm = &e100_pm_ops, |
3176 | |
3177 | .shutdown = e100_shutdown, |
3178 | .err_handler = &e100_err_handler, |
3179 | }; |
3180 | |
3181 | static int __init e100_init_module(void) |
3182 | { |
3183 | if (((1 << debug) - 1) & NETIF_MSG_DRV) { |
3184 | pr_info("%s\n" , DRV_DESCRIPTION); |
3185 | pr_info("%s\n" , DRV_COPYRIGHT); |
3186 | } |
3187 | return pci_register_driver(&e100_driver); |
3188 | } |
3189 | |
3190 | static void __exit e100_cleanup_module(void) |
3191 | { |
3192 | pci_unregister_driver(dev: &e100_driver); |
3193 | } |
3194 | |
3195 | module_init(e100_init_module); |
3196 | module_exit(e100_cleanup_module); |
3197 | |