1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 1999 - 2006 Intel Corporation. */ |
3 | |
4 | #include "e1000.h" |
5 | #include <net/ip6_checksum.h> |
6 | #include <linux/io.h> |
7 | #include <linux/prefetch.h> |
8 | #include <linux/bitops.h> |
9 | #include <linux/if_vlan.h> |
10 | |
11 | char e1000_driver_name[] = "e1000" ; |
12 | static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver" ; |
13 | static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation." ; |
14 | |
15 | /* e1000_pci_tbl - PCI Device ID Table |
16 | * |
17 | * Last entry must be all 0s |
18 | * |
19 | * Macro expands to... |
20 | * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} |
21 | */ |
22 | static const struct pci_device_id e1000_pci_tbl[] = { |
23 | INTEL_E1000_ETHERNET_DEVICE(0x1000), |
24 | INTEL_E1000_ETHERNET_DEVICE(0x1001), |
25 | INTEL_E1000_ETHERNET_DEVICE(0x1004), |
26 | INTEL_E1000_ETHERNET_DEVICE(0x1008), |
27 | INTEL_E1000_ETHERNET_DEVICE(0x1009), |
28 | INTEL_E1000_ETHERNET_DEVICE(0x100C), |
29 | INTEL_E1000_ETHERNET_DEVICE(0x100D), |
30 | INTEL_E1000_ETHERNET_DEVICE(0x100E), |
31 | INTEL_E1000_ETHERNET_DEVICE(0x100F), |
32 | INTEL_E1000_ETHERNET_DEVICE(0x1010), |
33 | INTEL_E1000_ETHERNET_DEVICE(0x1011), |
34 | INTEL_E1000_ETHERNET_DEVICE(0x1012), |
35 | INTEL_E1000_ETHERNET_DEVICE(0x1013), |
36 | INTEL_E1000_ETHERNET_DEVICE(0x1014), |
37 | INTEL_E1000_ETHERNET_DEVICE(0x1015), |
38 | INTEL_E1000_ETHERNET_DEVICE(0x1016), |
39 | INTEL_E1000_ETHERNET_DEVICE(0x1017), |
40 | INTEL_E1000_ETHERNET_DEVICE(0x1018), |
41 | INTEL_E1000_ETHERNET_DEVICE(0x1019), |
42 | INTEL_E1000_ETHERNET_DEVICE(0x101A), |
43 | INTEL_E1000_ETHERNET_DEVICE(0x101D), |
44 | INTEL_E1000_ETHERNET_DEVICE(0x101E), |
45 | INTEL_E1000_ETHERNET_DEVICE(0x1026), |
46 | INTEL_E1000_ETHERNET_DEVICE(0x1027), |
47 | INTEL_E1000_ETHERNET_DEVICE(0x1028), |
48 | INTEL_E1000_ETHERNET_DEVICE(0x1075), |
49 | INTEL_E1000_ETHERNET_DEVICE(0x1076), |
50 | INTEL_E1000_ETHERNET_DEVICE(0x1077), |
51 | INTEL_E1000_ETHERNET_DEVICE(0x1078), |
52 | INTEL_E1000_ETHERNET_DEVICE(0x1079), |
53 | INTEL_E1000_ETHERNET_DEVICE(0x107A), |
54 | INTEL_E1000_ETHERNET_DEVICE(0x107B), |
55 | INTEL_E1000_ETHERNET_DEVICE(0x107C), |
56 | INTEL_E1000_ETHERNET_DEVICE(0x108A), |
57 | INTEL_E1000_ETHERNET_DEVICE(0x1099), |
58 | INTEL_E1000_ETHERNET_DEVICE(0x10B5), |
59 | INTEL_E1000_ETHERNET_DEVICE(0x2E6E), |
60 | /* required last entry */ |
61 | {0,} |
62 | }; |
63 | |
64 | MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); |
65 | |
66 | int e1000_up(struct e1000_adapter *adapter); |
67 | void e1000_down(struct e1000_adapter *adapter); |
68 | void e1000_reinit_locked(struct e1000_adapter *adapter); |
69 | void e1000_reset(struct e1000_adapter *adapter); |
70 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); |
71 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); |
72 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter); |
73 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter); |
74 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, |
75 | struct e1000_tx_ring *txdr); |
76 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, |
77 | struct e1000_rx_ring *rxdr); |
78 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, |
79 | struct e1000_tx_ring *tx_ring); |
80 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, |
81 | struct e1000_rx_ring *rx_ring); |
82 | void e1000_update_stats(struct e1000_adapter *adapter); |
83 | |
84 | static int e1000_init_module(void); |
85 | static void e1000_exit_module(void); |
86 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); |
87 | static void e1000_remove(struct pci_dev *pdev); |
88 | static int e1000_alloc_queues(struct e1000_adapter *adapter); |
89 | static int e1000_sw_init(struct e1000_adapter *adapter); |
90 | int e1000_open(struct net_device *netdev); |
91 | int e1000_close(struct net_device *netdev); |
92 | static void e1000_configure_tx(struct e1000_adapter *adapter); |
93 | static void e1000_configure_rx(struct e1000_adapter *adapter); |
94 | static void e1000_setup_rctl(struct e1000_adapter *adapter); |
95 | static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); |
96 | static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); |
97 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter, |
98 | struct e1000_tx_ring *tx_ring); |
99 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter, |
100 | struct e1000_rx_ring *rx_ring); |
101 | static void e1000_set_rx_mode(struct net_device *netdev); |
102 | static void e1000_update_phy_info_task(struct work_struct *work); |
103 | static void e1000_watchdog(struct work_struct *work); |
104 | static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); |
105 | static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, |
106 | struct net_device *netdev); |
107 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu); |
108 | static int e1000_set_mac(struct net_device *netdev, void *p); |
109 | static irqreturn_t e1000_intr(int irq, void *data); |
110 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, |
111 | struct e1000_tx_ring *tx_ring); |
112 | static int e1000_clean(struct napi_struct *napi, int budget); |
113 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, |
114 | struct e1000_rx_ring *rx_ring, |
115 | int *work_done, int work_to_do); |
116 | static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, |
117 | struct e1000_rx_ring *rx_ring, |
118 | int *work_done, int work_to_do); |
119 | static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, |
120 | struct e1000_rx_ring *rx_ring, |
121 | int cleaned_count) |
122 | { |
123 | } |
124 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
125 | struct e1000_rx_ring *rx_ring, |
126 | int cleaned_count); |
127 | static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, |
128 | struct e1000_rx_ring *rx_ring, |
129 | int cleaned_count); |
130 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); |
131 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
132 | int cmd); |
133 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); |
134 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); |
135 | static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue); |
136 | static void e1000_reset_task(struct work_struct *work); |
137 | static void e1000_smartspeed(struct e1000_adapter *adapter); |
138 | static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
139 | struct sk_buff *skb); |
140 | |
141 | static bool e1000_vlan_used(struct e1000_adapter *adapter); |
142 | static void e1000_vlan_mode(struct net_device *netdev, |
143 | netdev_features_t features); |
144 | static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, |
145 | bool filter_on); |
146 | static int e1000_vlan_rx_add_vid(struct net_device *netdev, |
147 | __be16 proto, u16 vid); |
148 | static int e1000_vlan_rx_kill_vid(struct net_device *netdev, |
149 | __be16 proto, u16 vid); |
150 | static void e1000_restore_vlan(struct e1000_adapter *adapter); |
151 | |
152 | static int __maybe_unused e1000_suspend(struct device *dev); |
153 | static int __maybe_unused e1000_resume(struct device *dev); |
154 | static void e1000_shutdown(struct pci_dev *pdev); |
155 | |
156 | #ifdef CONFIG_NET_POLL_CONTROLLER |
157 | /* for netdump / net console */ |
158 | static void e1000_netpoll (struct net_device *netdev); |
159 | #endif |
160 | |
161 | #define COPYBREAK_DEFAULT 256 |
162 | static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; |
163 | module_param(copybreak, uint, 0644); |
164 | MODULE_PARM_DESC(copybreak, |
165 | "Maximum size of packet that is copied to a new buffer on receive" ); |
166 | |
167 | static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, |
168 | pci_channel_state_t state); |
169 | static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); |
170 | static void e1000_io_resume(struct pci_dev *pdev); |
171 | |
172 | static const struct pci_error_handlers e1000_err_handler = { |
173 | .error_detected = e1000_io_error_detected, |
174 | .slot_reset = e1000_io_slot_reset, |
175 | .resume = e1000_io_resume, |
176 | }; |
177 | |
178 | static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume); |
179 | |
180 | static struct pci_driver e1000_driver = { |
181 | .name = e1000_driver_name, |
182 | .id_table = e1000_pci_tbl, |
183 | .probe = e1000_probe, |
184 | .remove = e1000_remove, |
185 | .driver = { |
186 | .pm = &e1000_pm_ops, |
187 | }, |
188 | .shutdown = e1000_shutdown, |
189 | .err_handler = &e1000_err_handler |
190 | }; |
191 | |
192 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>" ); |
193 | MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver" ); |
194 | MODULE_LICENSE("GPL v2" ); |
195 | |
196 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) |
197 | static int debug = -1; |
198 | module_param(debug, int, 0); |
199 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)" ); |
200 | |
201 | /** |
202 | * e1000_get_hw_dev - helper function for getting netdev |
203 | * @hw: pointer to HW struct |
204 | * |
205 | * return device used by hardware layer to print debugging information |
206 | * |
207 | **/ |
208 | struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) |
209 | { |
210 | struct e1000_adapter *adapter = hw->back; |
211 | return adapter->netdev; |
212 | } |
213 | |
214 | /** |
215 | * e1000_init_module - Driver Registration Routine |
216 | * |
217 | * e1000_init_module is the first routine called when the driver is |
218 | * loaded. All it does is register with the PCI subsystem. |
219 | **/ |
220 | static int __init e1000_init_module(void) |
221 | { |
222 | int ret; |
223 | pr_info("%s\n" , e1000_driver_string); |
224 | |
225 | pr_info("%s\n" , e1000_copyright); |
226 | |
227 | ret = pci_register_driver(&e1000_driver); |
228 | if (copybreak != COPYBREAK_DEFAULT) { |
229 | if (copybreak == 0) |
230 | pr_info("copybreak disabled\n" ); |
231 | else |
232 | pr_info("copybreak enabled for " |
233 | "packets <= %u bytes\n" , copybreak); |
234 | } |
235 | return ret; |
236 | } |
237 | |
238 | module_init(e1000_init_module); |
239 | |
240 | /** |
241 | * e1000_exit_module - Driver Exit Cleanup Routine |
242 | * |
243 | * e1000_exit_module is called just before the driver is removed |
244 | * from memory. |
245 | **/ |
246 | static void __exit e1000_exit_module(void) |
247 | { |
248 | pci_unregister_driver(dev: &e1000_driver); |
249 | } |
250 | |
251 | module_exit(e1000_exit_module); |
252 | |
253 | static int e1000_request_irq(struct e1000_adapter *adapter) |
254 | { |
255 | struct net_device *netdev = adapter->netdev; |
256 | irq_handler_t handler = e1000_intr; |
257 | int irq_flags = IRQF_SHARED; |
258 | int err; |
259 | |
260 | err = request_irq(irq: adapter->pdev->irq, handler, flags: irq_flags, name: netdev->name, |
261 | dev: netdev); |
262 | if (err) { |
263 | e_err(probe, "Unable to allocate interrupt Error: %d\n" , err); |
264 | } |
265 | |
266 | return err; |
267 | } |
268 | |
269 | static void e1000_free_irq(struct e1000_adapter *adapter) |
270 | { |
271 | struct net_device *netdev = adapter->netdev; |
272 | |
273 | free_irq(adapter->pdev->irq, netdev); |
274 | } |
275 | |
276 | /** |
277 | * e1000_irq_disable - Mask off interrupt generation on the NIC |
278 | * @adapter: board private structure |
279 | **/ |
280 | static void e1000_irq_disable(struct e1000_adapter *adapter) |
281 | { |
282 | struct e1000_hw *hw = &adapter->hw; |
283 | |
284 | ew32(IMC, ~0); |
285 | E1000_WRITE_FLUSH(); |
286 | synchronize_irq(irq: adapter->pdev->irq); |
287 | } |
288 | |
289 | /** |
290 | * e1000_irq_enable - Enable default interrupt generation settings |
291 | * @adapter: board private structure |
292 | **/ |
293 | static void e1000_irq_enable(struct e1000_adapter *adapter) |
294 | { |
295 | struct e1000_hw *hw = &adapter->hw; |
296 | |
297 | ew32(IMS, IMS_ENABLE_MASK); |
298 | E1000_WRITE_FLUSH(); |
299 | } |
300 | |
301 | static void e1000_update_mng_vlan(struct e1000_adapter *adapter) |
302 | { |
303 | struct e1000_hw *hw = &adapter->hw; |
304 | struct net_device *netdev = adapter->netdev; |
305 | u16 vid = hw->mng_cookie.vlan_id; |
306 | u16 old_vid = adapter->mng_vlan_id; |
307 | |
308 | if (!e1000_vlan_used(adapter)) |
309 | return; |
310 | |
311 | if (!test_bit(vid, adapter->active_vlans)) { |
312 | if (hw->mng_cookie.status & |
313 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { |
314 | e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); |
315 | adapter->mng_vlan_id = vid; |
316 | } else { |
317 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
318 | } |
319 | if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && |
320 | (vid != old_vid) && |
321 | !test_bit(old_vid, adapter->active_vlans)) |
322 | e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), |
323 | vid: old_vid); |
324 | } else { |
325 | adapter->mng_vlan_id = vid; |
326 | } |
327 | } |
328 | |
329 | static void e1000_init_manageability(struct e1000_adapter *adapter) |
330 | { |
331 | struct e1000_hw *hw = &adapter->hw; |
332 | |
333 | if (adapter->en_mng_pt) { |
334 | u32 manc = er32(MANC); |
335 | |
336 | /* disable hardware interception of ARP */ |
337 | manc &= ~(E1000_MANC_ARP_EN); |
338 | |
339 | ew32(MANC, manc); |
340 | } |
341 | } |
342 | |
343 | static void e1000_release_manageability(struct e1000_adapter *adapter) |
344 | { |
345 | struct e1000_hw *hw = &adapter->hw; |
346 | |
347 | if (adapter->en_mng_pt) { |
348 | u32 manc = er32(MANC); |
349 | |
350 | /* re-enable hardware interception of ARP */ |
351 | manc |= E1000_MANC_ARP_EN; |
352 | |
353 | ew32(MANC, manc); |
354 | } |
355 | } |
356 | |
357 | /** |
358 | * e1000_configure - configure the hardware for RX and TX |
359 | * @adapter: private board structure |
360 | **/ |
361 | static void e1000_configure(struct e1000_adapter *adapter) |
362 | { |
363 | struct net_device *netdev = adapter->netdev; |
364 | int i; |
365 | |
366 | e1000_set_rx_mode(netdev); |
367 | |
368 | e1000_restore_vlan(adapter); |
369 | e1000_init_manageability(adapter); |
370 | |
371 | e1000_configure_tx(adapter); |
372 | e1000_setup_rctl(adapter); |
373 | e1000_configure_rx(adapter); |
374 | /* call E1000_DESC_UNUSED which always leaves |
375 | * at least 1 descriptor unused to make sure |
376 | * next_to_use != next_to_clean |
377 | */ |
378 | for (i = 0; i < adapter->num_rx_queues; i++) { |
379 | struct e1000_rx_ring *ring = &adapter->rx_ring[i]; |
380 | adapter->alloc_rx_buf(adapter, ring, |
381 | E1000_DESC_UNUSED(ring)); |
382 | } |
383 | } |
384 | |
385 | int e1000_up(struct e1000_adapter *adapter) |
386 | { |
387 | struct e1000_hw *hw = &adapter->hw; |
388 | |
389 | /* hardware has been reset, we need to reload some things */ |
390 | e1000_configure(adapter); |
391 | |
392 | clear_bit(nr: __E1000_DOWN, addr: &adapter->flags); |
393 | |
394 | napi_enable(n: &adapter->napi); |
395 | |
396 | e1000_irq_enable(adapter); |
397 | |
398 | netif_wake_queue(dev: adapter->netdev); |
399 | |
400 | /* fire a link change interrupt to start the watchdog */ |
401 | ew32(ICS, E1000_ICS_LSC); |
402 | return 0; |
403 | } |
404 | |
405 | /** |
406 | * e1000_power_up_phy - restore link in case the phy was powered down |
407 | * @adapter: address of board private structure |
408 | * |
409 | * The phy may be powered down to save power and turn off link when the |
410 | * driver is unloaded and wake on lan is not enabled (among others) |
411 | * *** this routine MUST be followed by a call to e1000_reset *** |
412 | **/ |
413 | void e1000_power_up_phy(struct e1000_adapter *adapter) |
414 | { |
415 | struct e1000_hw *hw = &adapter->hw; |
416 | u16 mii_reg = 0; |
417 | |
418 | /* Just clear the power down bit to wake the phy back up */ |
419 | if (hw->media_type == e1000_media_type_copper) { |
420 | /* according to the manual, the phy will retain its |
421 | * settings across a power-down/up cycle |
422 | */ |
423 | e1000_read_phy_reg(hw, PHY_CTRL, phy_data: &mii_reg); |
424 | mii_reg &= ~MII_CR_POWER_DOWN; |
425 | e1000_write_phy_reg(hw, PHY_CTRL, data: mii_reg); |
426 | } |
427 | } |
428 | |
429 | static void e1000_power_down_phy(struct e1000_adapter *adapter) |
430 | { |
431 | struct e1000_hw *hw = &adapter->hw; |
432 | |
433 | /* Power down the PHY so no link is implied when interface is down * |
434 | * The PHY cannot be powered down if any of the following is true * |
435 | * (a) WoL is enabled |
436 | * (b) AMT is active |
437 | * (c) SoL/IDER session is active |
438 | */ |
439 | if (!adapter->wol && hw->mac_type >= e1000_82540 && |
440 | hw->media_type == e1000_media_type_copper) { |
441 | u16 mii_reg = 0; |
442 | |
443 | switch (hw->mac_type) { |
444 | case e1000_82540: |
445 | case e1000_82545: |
446 | case e1000_82545_rev_3: |
447 | case e1000_82546: |
448 | case e1000_ce4100: |
449 | case e1000_82546_rev_3: |
450 | case e1000_82541: |
451 | case e1000_82541_rev_2: |
452 | case e1000_82547: |
453 | case e1000_82547_rev_2: |
454 | if (er32(MANC) & E1000_MANC_SMBUS_EN) |
455 | goto out; |
456 | break; |
457 | default: |
458 | goto out; |
459 | } |
460 | e1000_read_phy_reg(hw, PHY_CTRL, phy_data: &mii_reg); |
461 | mii_reg |= MII_CR_POWER_DOWN; |
462 | e1000_write_phy_reg(hw, PHY_CTRL, data: mii_reg); |
463 | msleep(msecs: 1); |
464 | } |
465 | out: |
466 | return; |
467 | } |
468 | |
469 | static void e1000_down_and_stop(struct e1000_adapter *adapter) |
470 | { |
471 | set_bit(nr: __E1000_DOWN, addr: &adapter->flags); |
472 | |
473 | cancel_delayed_work_sync(dwork: &adapter->watchdog_task); |
474 | |
475 | /* |
476 | * Since the watchdog task can reschedule other tasks, we should cancel |
477 | * it first, otherwise we can run into the situation when a work is |
478 | * still running after the adapter has been turned down. |
479 | */ |
480 | |
481 | cancel_delayed_work_sync(dwork: &adapter->phy_info_task); |
482 | cancel_delayed_work_sync(dwork: &adapter->fifo_stall_task); |
483 | |
484 | /* Only kill reset task if adapter is not resetting */ |
485 | if (!test_bit(__E1000_RESETTING, &adapter->flags)) |
486 | cancel_work_sync(work: &adapter->reset_task); |
487 | } |
488 | |
489 | void e1000_down(struct e1000_adapter *adapter) |
490 | { |
491 | struct e1000_hw *hw = &adapter->hw; |
492 | struct net_device *netdev = adapter->netdev; |
493 | u32 rctl, tctl; |
494 | |
495 | /* disable receives in the hardware */ |
496 | rctl = er32(RCTL); |
497 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
498 | /* flush and sleep below */ |
499 | |
500 | netif_tx_disable(dev: netdev); |
501 | |
502 | /* disable transmits in the hardware */ |
503 | tctl = er32(TCTL); |
504 | tctl &= ~E1000_TCTL_EN; |
505 | ew32(TCTL, tctl); |
506 | /* flush both disables and wait for them to finish */ |
507 | E1000_WRITE_FLUSH(); |
508 | msleep(msecs: 10); |
509 | |
510 | /* Set the carrier off after transmits have been disabled in the |
511 | * hardware, to avoid race conditions with e1000_watchdog() (which |
512 | * may be running concurrently to us, checking for the carrier |
513 | * bit to decide whether it should enable transmits again). Such |
514 | * a race condition would result into transmission being disabled |
515 | * in the hardware until the next IFF_DOWN+IFF_UP cycle. |
516 | */ |
517 | netif_carrier_off(dev: netdev); |
518 | |
519 | napi_disable(n: &adapter->napi); |
520 | |
521 | e1000_irq_disable(adapter); |
522 | |
523 | /* Setting DOWN must be after irq_disable to prevent |
524 | * a screaming interrupt. Setting DOWN also prevents |
525 | * tasks from rescheduling. |
526 | */ |
527 | e1000_down_and_stop(adapter); |
528 | |
529 | adapter->link_speed = 0; |
530 | adapter->link_duplex = 0; |
531 | |
532 | e1000_reset(adapter); |
533 | e1000_clean_all_tx_rings(adapter); |
534 | e1000_clean_all_rx_rings(adapter); |
535 | } |
536 | |
537 | void e1000_reinit_locked(struct e1000_adapter *adapter) |
538 | { |
539 | while (test_and_set_bit(nr: __E1000_RESETTING, addr: &adapter->flags)) |
540 | msleep(msecs: 1); |
541 | |
542 | /* only run the task if not already down */ |
543 | if (!test_bit(__E1000_DOWN, &adapter->flags)) { |
544 | e1000_down(adapter); |
545 | e1000_up(adapter); |
546 | } |
547 | |
548 | clear_bit(nr: __E1000_RESETTING, addr: &adapter->flags); |
549 | } |
550 | |
551 | void e1000_reset(struct e1000_adapter *adapter) |
552 | { |
553 | struct e1000_hw *hw = &adapter->hw; |
554 | u32 pba = 0, tx_space, min_tx_space, min_rx_space; |
555 | bool legacy_pba_adjust = false; |
556 | u16 hwm; |
557 | |
558 | /* Repartition Pba for greater than 9k mtu |
559 | * To take effect CTRL.RST is required. |
560 | */ |
561 | |
562 | switch (hw->mac_type) { |
563 | case e1000_82542_rev2_0: |
564 | case e1000_82542_rev2_1: |
565 | case e1000_82543: |
566 | case e1000_82544: |
567 | case e1000_82540: |
568 | case e1000_82541: |
569 | case e1000_82541_rev_2: |
570 | legacy_pba_adjust = true; |
571 | pba = E1000_PBA_48K; |
572 | break; |
573 | case e1000_82545: |
574 | case e1000_82545_rev_3: |
575 | case e1000_82546: |
576 | case e1000_ce4100: |
577 | case e1000_82546_rev_3: |
578 | pba = E1000_PBA_48K; |
579 | break; |
580 | case e1000_82547: |
581 | case e1000_82547_rev_2: |
582 | legacy_pba_adjust = true; |
583 | pba = E1000_PBA_30K; |
584 | break; |
585 | case e1000_undefined: |
586 | case e1000_num_macs: |
587 | break; |
588 | } |
589 | |
590 | if (legacy_pba_adjust) { |
591 | if (hw->max_frame_size > E1000_RXBUFFER_8192) |
592 | pba -= 8; /* allocate more FIFO for Tx */ |
593 | |
594 | if (hw->mac_type == e1000_82547) { |
595 | adapter->tx_fifo_head = 0; |
596 | adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; |
597 | adapter->tx_fifo_size = |
598 | (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; |
599 | atomic_set(v: &adapter->tx_fifo_stall, i: 0); |
600 | } |
601 | } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { |
602 | /* adjust PBA for jumbo frames */ |
603 | ew32(PBA, pba); |
604 | |
605 | /* To maintain wire speed transmits, the Tx FIFO should be |
606 | * large enough to accommodate two full transmit packets, |
607 | * rounded up to the next 1KB and expressed in KB. Likewise, |
608 | * the Rx FIFO should be large enough to accommodate at least |
609 | * one full receive packet and is similarly rounded up and |
610 | * expressed in KB. |
611 | */ |
612 | pba = er32(PBA); |
613 | /* upper 16 bits has Tx packet buffer allocation size in KB */ |
614 | tx_space = pba >> 16; |
615 | /* lower 16 bits has Rx packet buffer allocation size in KB */ |
616 | pba &= 0xffff; |
617 | /* the Tx fifo also stores 16 bytes of information about the Tx |
618 | * but don't include ethernet FCS because hardware appends it |
619 | */ |
620 | min_tx_space = (hw->max_frame_size + |
621 | sizeof(struct e1000_tx_desc) - |
622 | ETH_FCS_LEN) * 2; |
623 | min_tx_space = ALIGN(min_tx_space, 1024); |
624 | min_tx_space >>= 10; |
625 | /* software strips receive CRC, so leave room for it */ |
626 | min_rx_space = hw->max_frame_size; |
627 | min_rx_space = ALIGN(min_rx_space, 1024); |
628 | min_rx_space >>= 10; |
629 | |
630 | /* If current Tx allocation is less than the min Tx FIFO size, |
631 | * and the min Tx FIFO size is less than the current Rx FIFO |
632 | * allocation, take space away from current Rx allocation |
633 | */ |
634 | if (tx_space < min_tx_space && |
635 | ((min_tx_space - tx_space) < pba)) { |
636 | pba = pba - (min_tx_space - tx_space); |
637 | |
638 | /* PCI/PCIx hardware has PBA alignment constraints */ |
639 | switch (hw->mac_type) { |
640 | case e1000_82545 ... e1000_82546_rev_3: |
641 | pba &= ~(E1000_PBA_8K - 1); |
642 | break; |
643 | default: |
644 | break; |
645 | } |
646 | |
647 | /* if short on Rx space, Rx wins and must trump Tx |
648 | * adjustment or use Early Receive if available |
649 | */ |
650 | if (pba < min_rx_space) |
651 | pba = min_rx_space; |
652 | } |
653 | } |
654 | |
655 | ew32(PBA, pba); |
656 | |
657 | /* flow control settings: |
658 | * The high water mark must be low enough to fit one full frame |
659 | * (or the size used for early receive) above it in the Rx FIFO. |
660 | * Set it to the lower of: |
661 | * - 90% of the Rx FIFO size, and |
662 | * - the full Rx FIFO size minus the early receive size (for parts |
663 | * with ERT support assuming ERT set to E1000_ERT_2048), or |
664 | * - the full Rx FIFO size minus one full frame |
665 | */ |
666 | hwm = min(((pba << 10) * 9 / 10), |
667 | ((pba << 10) - hw->max_frame_size)); |
668 | |
669 | hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ |
670 | hw->fc_low_water = hw->fc_high_water - 8; |
671 | hw->fc_pause_time = E1000_FC_PAUSE_TIME; |
672 | hw->fc_send_xon = 1; |
673 | hw->fc = hw->original_fc; |
674 | |
675 | /* Allow time for pending master requests to run */ |
676 | e1000_reset_hw(hw); |
677 | if (hw->mac_type >= e1000_82544) |
678 | ew32(WUC, 0); |
679 | |
680 | if (e1000_init_hw(hw)) |
681 | e_dev_err("Hardware Error\n" ); |
682 | e1000_update_mng_vlan(adapter); |
683 | |
684 | /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ |
685 | if (hw->mac_type >= e1000_82544 && |
686 | hw->autoneg == 1 && |
687 | hw->autoneg_advertised == ADVERTISE_1000_FULL) { |
688 | u32 ctrl = er32(CTRL); |
689 | /* clear phy power management bit if we are in gig only mode, |
690 | * which if enabled will attempt negotiation to 100Mb, which |
691 | * can cause a loss of link at power off or driver unload |
692 | */ |
693 | ctrl &= ~E1000_CTRL_SWDPIN3; |
694 | ew32(CTRL, ctrl); |
695 | } |
696 | |
697 | /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ |
698 | ew32(VET, ETHERNET_IEEE_VLAN_TYPE); |
699 | |
700 | e1000_reset_adaptive(hw); |
701 | e1000_phy_get_info(hw, phy_info: &adapter->phy_info); |
702 | |
703 | e1000_release_manageability(adapter); |
704 | } |
705 | |
706 | /* Dump the eeprom for users having checksum issues */ |
707 | static void e1000_dump_eeprom(struct e1000_adapter *adapter) |
708 | { |
709 | struct net_device *netdev = adapter->netdev; |
710 | struct ethtool_eeprom eeprom; |
711 | const struct ethtool_ops *ops = netdev->ethtool_ops; |
712 | u8 *data; |
713 | int i; |
714 | u16 csum_old, csum_new = 0; |
715 | |
716 | eeprom.len = ops->get_eeprom_len(netdev); |
717 | eeprom.offset = 0; |
718 | |
719 | data = kmalloc(size: eeprom.len, GFP_KERNEL); |
720 | if (!data) |
721 | return; |
722 | |
723 | ops->get_eeprom(netdev, &eeprom, data); |
724 | |
725 | csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + |
726 | (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); |
727 | for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) |
728 | csum_new += data[i] + (data[i + 1] << 8); |
729 | csum_new = EEPROM_SUM - csum_new; |
730 | |
731 | pr_err("/*********************/\n" ); |
732 | pr_err("Current EEPROM Checksum : 0x%04x\n" , csum_old); |
733 | pr_err("Calculated : 0x%04x\n" , csum_new); |
734 | |
735 | pr_err("Offset Values\n" ); |
736 | pr_err("======== ======\n" ); |
737 | print_hex_dump(KERN_ERR, prefix_str: "" , prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 1, buf: data, len: 128, ascii: 0); |
738 | |
739 | pr_err("Include this output when contacting your support provider.\n" ); |
740 | pr_err("This is not a software error! Something bad happened to\n" ); |
741 | pr_err("your hardware or EEPROM image. Ignoring this problem could\n" ); |
742 | pr_err("result in further problems, possibly loss of data,\n" ); |
743 | pr_err("corruption or system hangs!\n" ); |
744 | pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n" ); |
745 | pr_err("which is invalid and requires you to set the proper MAC\n" ); |
746 | pr_err("address manually before continuing to enable this network\n" ); |
747 | pr_err("device. Please inspect the EEPROM dump and report the\n" ); |
748 | pr_err("issue to your hardware vendor or Intel Customer Support.\n" ); |
749 | pr_err("/*********************/\n" ); |
750 | |
751 | kfree(objp: data); |
752 | } |
753 | |
754 | /** |
755 | * e1000_is_need_ioport - determine if an adapter needs ioport resources or not |
756 | * @pdev: PCI device information struct |
757 | * |
758 | * Return true if an adapter needs ioport resources |
759 | **/ |
760 | static int e1000_is_need_ioport(struct pci_dev *pdev) |
761 | { |
762 | switch (pdev->device) { |
763 | case E1000_DEV_ID_82540EM: |
764 | case E1000_DEV_ID_82540EM_LOM: |
765 | case E1000_DEV_ID_82540EP: |
766 | case E1000_DEV_ID_82540EP_LOM: |
767 | case E1000_DEV_ID_82540EP_LP: |
768 | case E1000_DEV_ID_82541EI: |
769 | case E1000_DEV_ID_82541EI_MOBILE: |
770 | case E1000_DEV_ID_82541ER: |
771 | case E1000_DEV_ID_82541ER_LOM: |
772 | case E1000_DEV_ID_82541GI: |
773 | case E1000_DEV_ID_82541GI_LF: |
774 | case E1000_DEV_ID_82541GI_MOBILE: |
775 | case E1000_DEV_ID_82544EI_COPPER: |
776 | case E1000_DEV_ID_82544EI_FIBER: |
777 | case E1000_DEV_ID_82544GC_COPPER: |
778 | case E1000_DEV_ID_82544GC_LOM: |
779 | case E1000_DEV_ID_82545EM_COPPER: |
780 | case E1000_DEV_ID_82545EM_FIBER: |
781 | case E1000_DEV_ID_82546EB_COPPER: |
782 | case E1000_DEV_ID_82546EB_FIBER: |
783 | case E1000_DEV_ID_82546EB_QUAD_COPPER: |
784 | return true; |
785 | default: |
786 | return false; |
787 | } |
788 | } |
789 | |
790 | static netdev_features_t e1000_fix_features(struct net_device *netdev, |
791 | netdev_features_t features) |
792 | { |
793 | /* Since there is no support for separate Rx/Tx vlan accel |
794 | * enable/disable make sure Tx flag is always in same state as Rx. |
795 | */ |
796 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
797 | features |= NETIF_F_HW_VLAN_CTAG_TX; |
798 | else |
799 | features &= ~NETIF_F_HW_VLAN_CTAG_TX; |
800 | |
801 | return features; |
802 | } |
803 | |
804 | static int e1000_set_features(struct net_device *netdev, |
805 | netdev_features_t features) |
806 | { |
807 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
808 | netdev_features_t changed = features ^ netdev->features; |
809 | |
810 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) |
811 | e1000_vlan_mode(netdev, features); |
812 | |
813 | if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) |
814 | return 0; |
815 | |
816 | netdev->features = features; |
817 | adapter->rx_csum = !!(features & NETIF_F_RXCSUM); |
818 | |
819 | if (netif_running(dev: netdev)) |
820 | e1000_reinit_locked(adapter); |
821 | else |
822 | e1000_reset(adapter); |
823 | |
824 | return 1; |
825 | } |
826 | |
827 | static const struct net_device_ops e1000_netdev_ops = { |
828 | .ndo_open = e1000_open, |
829 | .ndo_stop = e1000_close, |
830 | .ndo_start_xmit = e1000_xmit_frame, |
831 | .ndo_set_rx_mode = e1000_set_rx_mode, |
832 | .ndo_set_mac_address = e1000_set_mac, |
833 | .ndo_tx_timeout = e1000_tx_timeout, |
834 | .ndo_change_mtu = e1000_change_mtu, |
835 | .ndo_eth_ioctl = e1000_ioctl, |
836 | .ndo_validate_addr = eth_validate_addr, |
837 | .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, |
838 | .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, |
839 | #ifdef CONFIG_NET_POLL_CONTROLLER |
840 | .ndo_poll_controller = e1000_netpoll, |
841 | #endif |
842 | .ndo_fix_features = e1000_fix_features, |
843 | .ndo_set_features = e1000_set_features, |
844 | }; |
845 | |
846 | /** |
847 | * e1000_init_hw_struct - initialize members of hw struct |
848 | * @adapter: board private struct |
849 | * @hw: structure used by e1000_hw.c |
850 | * |
851 | * Factors out initialization of the e1000_hw struct to its own function |
852 | * that can be called very early at init (just after struct allocation). |
853 | * Fields are initialized based on PCI device information and |
854 | * OS network device settings (MTU size). |
855 | * Returns negative error codes if MAC type setup fails. |
856 | */ |
857 | static int e1000_init_hw_struct(struct e1000_adapter *adapter, |
858 | struct e1000_hw *hw) |
859 | { |
860 | struct pci_dev *pdev = adapter->pdev; |
861 | |
862 | /* PCI config space info */ |
863 | hw->vendor_id = pdev->vendor; |
864 | hw->device_id = pdev->device; |
865 | hw->subsystem_vendor_id = pdev->subsystem_vendor; |
866 | hw->subsystem_id = pdev->subsystem_device; |
867 | hw->revision_id = pdev->revision; |
868 | |
869 | pci_read_config_word(dev: pdev, PCI_COMMAND, val: &hw->pci_cmd_word); |
870 | |
871 | hw->max_frame_size = adapter->netdev->mtu + |
872 | ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; |
873 | hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; |
874 | |
875 | /* identify the MAC */ |
876 | if (e1000_set_mac_type(hw)) { |
877 | e_err(probe, "Unknown MAC Type\n" ); |
878 | return -EIO; |
879 | } |
880 | |
881 | switch (hw->mac_type) { |
882 | default: |
883 | break; |
884 | case e1000_82541: |
885 | case e1000_82547: |
886 | case e1000_82541_rev_2: |
887 | case e1000_82547_rev_2: |
888 | hw->phy_init_script = 1; |
889 | break; |
890 | } |
891 | |
892 | e1000_set_media_type(hw); |
893 | e1000_get_bus_info(hw); |
894 | |
895 | hw->wait_autoneg_complete = false; |
896 | hw->tbi_compatibility_en = true; |
897 | hw->adaptive_ifs = true; |
898 | |
899 | /* Copper options */ |
900 | |
901 | if (hw->media_type == e1000_media_type_copper) { |
902 | hw->mdix = AUTO_ALL_MODES; |
903 | hw->disable_polarity_correction = false; |
904 | hw->master_slave = E1000_MASTER_SLAVE; |
905 | } |
906 | |
907 | return 0; |
908 | } |
909 | |
910 | /** |
911 | * e1000_probe - Device Initialization Routine |
912 | * @pdev: PCI device information struct |
913 | * @ent: entry in e1000_pci_tbl |
914 | * |
915 | * Returns 0 on success, negative on failure |
916 | * |
917 | * e1000_probe initializes an adapter identified by a pci_dev structure. |
918 | * The OS initialization, configuring of the adapter private structure, |
919 | * and a hardware reset occur. |
920 | **/ |
921 | static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
922 | { |
923 | struct net_device *netdev; |
924 | struct e1000_adapter *adapter = NULL; |
925 | struct e1000_hw *hw; |
926 | |
927 | static int cards_found; |
928 | static int global_quad_port_a; /* global ksp3 port a indication */ |
929 | int i, err, pci_using_dac; |
930 | u16 eeprom_data = 0; |
931 | u16 tmp = 0; |
932 | u16 eeprom_apme_mask = E1000_EEPROM_APME; |
933 | int bars, need_ioport; |
934 | bool disable_dev = false; |
935 | |
936 | /* do not allocate ioport bars when not needed */ |
937 | need_ioport = e1000_is_need_ioport(pdev); |
938 | if (need_ioport) { |
939 | bars = pci_select_bars(dev: pdev, IORESOURCE_MEM | IORESOURCE_IO); |
940 | err = pci_enable_device(dev: pdev); |
941 | } else { |
942 | bars = pci_select_bars(dev: pdev, IORESOURCE_MEM); |
943 | err = pci_enable_device_mem(dev: pdev); |
944 | } |
945 | if (err) |
946 | return err; |
947 | |
948 | err = pci_request_selected_regions(pdev, bars, e1000_driver_name); |
949 | if (err) |
950 | goto err_pci_reg; |
951 | |
952 | pci_set_master(dev: pdev); |
953 | err = pci_save_state(dev: pdev); |
954 | if (err) |
955 | goto err_alloc_etherdev; |
956 | |
957 | err = -ENOMEM; |
958 | netdev = alloc_etherdev(sizeof(struct e1000_adapter)); |
959 | if (!netdev) |
960 | goto err_alloc_etherdev; |
961 | |
962 | SET_NETDEV_DEV(netdev, &pdev->dev); |
963 | |
964 | pci_set_drvdata(pdev, data: netdev); |
965 | adapter = netdev_priv(dev: netdev); |
966 | adapter->netdev = netdev; |
967 | adapter->pdev = pdev; |
968 | adapter->msg_enable = netif_msg_init(debug_value: debug, DEFAULT_MSG_ENABLE); |
969 | adapter->bars = bars; |
970 | adapter->need_ioport = need_ioport; |
971 | |
972 | hw = &adapter->hw; |
973 | hw->back = adapter; |
974 | |
975 | err = -EIO; |
976 | hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); |
977 | if (!hw->hw_addr) |
978 | goto err_ioremap; |
979 | |
980 | if (adapter->need_ioport) { |
981 | for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) { |
982 | if (pci_resource_len(pdev, i) == 0) |
983 | continue; |
984 | if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { |
985 | hw->io_base = pci_resource_start(pdev, i); |
986 | break; |
987 | } |
988 | } |
989 | } |
990 | |
991 | /* make ready for any if (hw->...) below */ |
992 | err = e1000_init_hw_struct(adapter, hw); |
993 | if (err) |
994 | goto err_sw_init; |
995 | |
996 | /* there is a workaround being applied below that limits |
997 | * 64-bit DMA addresses to 64-bit hardware. There are some |
998 | * 32-bit adapters that Tx hang when given 64-bit DMA addresses |
999 | */ |
1000 | pci_using_dac = 0; |
1001 | if ((hw->bus_type == e1000_bus_type_pcix) && |
1002 | !dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64))) { |
1003 | pci_using_dac = 1; |
1004 | } else { |
1005 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(32)); |
1006 | if (err) { |
1007 | pr_err("No usable DMA config, aborting\n" ); |
1008 | goto err_dma; |
1009 | } |
1010 | } |
1011 | |
1012 | netdev->netdev_ops = &e1000_netdev_ops; |
1013 | e1000_set_ethtool_ops(netdev); |
1014 | netdev->watchdog_timeo = 5 * HZ; |
1015 | netif_napi_add(dev: netdev, napi: &adapter->napi, poll: e1000_clean); |
1016 | |
1017 | strscpy(p: netdev->name, q: pci_name(pdev), size: sizeof(netdev->name)); |
1018 | |
1019 | adapter->bd_number = cards_found; |
1020 | |
1021 | /* setup the private structure */ |
1022 | |
1023 | err = e1000_sw_init(adapter); |
1024 | if (err) |
1025 | goto err_sw_init; |
1026 | |
1027 | err = -EIO; |
1028 | if (hw->mac_type == e1000_ce4100) { |
1029 | hw->ce4100_gbe_mdio_base_virt = |
1030 | ioremap(pci_resource_start(pdev, BAR_1), |
1031 | pci_resource_len(pdev, BAR_1)); |
1032 | |
1033 | if (!hw->ce4100_gbe_mdio_base_virt) |
1034 | goto err_mdio_ioremap; |
1035 | } |
1036 | |
1037 | if (hw->mac_type >= e1000_82543) { |
1038 | netdev->hw_features = NETIF_F_SG | |
1039 | NETIF_F_HW_CSUM | |
1040 | NETIF_F_HW_VLAN_CTAG_RX; |
1041 | netdev->features = NETIF_F_HW_VLAN_CTAG_TX | |
1042 | NETIF_F_HW_VLAN_CTAG_FILTER; |
1043 | } |
1044 | |
1045 | if ((hw->mac_type >= e1000_82544) && |
1046 | (hw->mac_type != e1000_82547)) |
1047 | netdev->hw_features |= NETIF_F_TSO; |
1048 | |
1049 | netdev->priv_flags |= IFF_SUPP_NOFCS; |
1050 | |
1051 | netdev->features |= netdev->hw_features; |
1052 | netdev->hw_features |= (NETIF_F_RXCSUM | |
1053 | NETIF_F_RXALL | |
1054 | NETIF_F_RXFCS); |
1055 | |
1056 | if (pci_using_dac) { |
1057 | netdev->features |= NETIF_F_HIGHDMA; |
1058 | netdev->vlan_features |= NETIF_F_HIGHDMA; |
1059 | } |
1060 | |
1061 | netdev->vlan_features |= (NETIF_F_TSO | |
1062 | NETIF_F_HW_CSUM | |
1063 | NETIF_F_SG); |
1064 | |
1065 | /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ |
1066 | if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || |
1067 | hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) |
1068 | netdev->priv_flags |= IFF_UNICAST_FLT; |
1069 | |
1070 | /* MTU range: 46 - 16110 */ |
1071 | netdev->min_mtu = ETH_ZLEN - ETH_HLEN; |
1072 | netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); |
1073 | |
1074 | adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); |
1075 | |
1076 | /* initialize eeprom parameters */ |
1077 | if (e1000_init_eeprom_params(hw)) { |
1078 | e_err(probe, "EEPROM initialization failed\n" ); |
1079 | goto err_eeprom; |
1080 | } |
1081 | |
1082 | /* before reading the EEPROM, reset the controller to |
1083 | * put the device in a known good starting state |
1084 | */ |
1085 | |
1086 | e1000_reset_hw(hw); |
1087 | |
1088 | /* make sure the EEPROM is good */ |
1089 | if (e1000_validate_eeprom_checksum(hw) < 0) { |
1090 | e_err(probe, "The EEPROM Checksum Is Not Valid\n" ); |
1091 | e1000_dump_eeprom(adapter); |
1092 | /* set MAC address to all zeroes to invalidate and temporary |
1093 | * disable this device for the user. This blocks regular |
1094 | * traffic while still permitting ethtool ioctls from reaching |
1095 | * the hardware as well as allowing the user to run the |
1096 | * interface after manually setting a hw addr using |
1097 | * `ip set address` |
1098 | */ |
1099 | memset(hw->mac_addr, 0, netdev->addr_len); |
1100 | } else { |
1101 | /* copy the MAC address out of the EEPROM */ |
1102 | if (e1000_read_mac_addr(hw)) |
1103 | e_err(probe, "EEPROM Read Error\n" ); |
1104 | } |
1105 | /* don't block initialization here due to bad MAC address */ |
1106 | eth_hw_addr_set(dev: netdev, addr: hw->mac_addr); |
1107 | |
1108 | if (!is_valid_ether_addr(addr: netdev->dev_addr)) |
1109 | e_err(probe, "Invalid MAC Address\n" ); |
1110 | |
1111 | |
1112 | INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); |
1113 | INIT_DELAYED_WORK(&adapter->fifo_stall_task, |
1114 | e1000_82547_tx_fifo_stall_task); |
1115 | INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); |
1116 | INIT_WORK(&adapter->reset_task, e1000_reset_task); |
1117 | |
1118 | e1000_check_options(adapter); |
1119 | |
1120 | /* Initial Wake on LAN setting |
1121 | * If APM wake is enabled in the EEPROM, |
1122 | * enable the ACPI Magic Packet filter |
1123 | */ |
1124 | |
1125 | switch (hw->mac_type) { |
1126 | case e1000_82542_rev2_0: |
1127 | case e1000_82542_rev2_1: |
1128 | case e1000_82543: |
1129 | break; |
1130 | case e1000_82544: |
1131 | e1000_read_eeprom(hw, |
1132 | EEPROM_INIT_CONTROL2_REG, words: 1, data: &eeprom_data); |
1133 | eeprom_apme_mask = E1000_EEPROM_82544_APM; |
1134 | break; |
1135 | case e1000_82546: |
1136 | case e1000_82546_rev_3: |
1137 | if (er32(STATUS) & E1000_STATUS_FUNC_1) { |
1138 | e1000_read_eeprom(hw, |
1139 | EEPROM_INIT_CONTROL3_PORT_B, words: 1, data: &eeprom_data); |
1140 | break; |
1141 | } |
1142 | fallthrough; |
1143 | default: |
1144 | e1000_read_eeprom(hw, |
1145 | EEPROM_INIT_CONTROL3_PORT_A, words: 1, data: &eeprom_data); |
1146 | break; |
1147 | } |
1148 | if (eeprom_data & eeprom_apme_mask) |
1149 | adapter->eeprom_wol |= E1000_WUFC_MAG; |
1150 | |
1151 | /* now that we have the eeprom settings, apply the special cases |
1152 | * where the eeprom may be wrong or the board simply won't support |
1153 | * wake on lan on a particular port |
1154 | */ |
1155 | switch (pdev->device) { |
1156 | case E1000_DEV_ID_82546GB_PCIE: |
1157 | adapter->eeprom_wol = 0; |
1158 | break; |
1159 | case E1000_DEV_ID_82546EB_FIBER: |
1160 | case E1000_DEV_ID_82546GB_FIBER: |
1161 | /* Wake events only supported on port A for dual fiber |
1162 | * regardless of eeprom setting |
1163 | */ |
1164 | if (er32(STATUS) & E1000_STATUS_FUNC_1) |
1165 | adapter->eeprom_wol = 0; |
1166 | break; |
1167 | case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: |
1168 | /* if quad port adapter, disable WoL on all but port A */ |
1169 | if (global_quad_port_a != 0) |
1170 | adapter->eeprom_wol = 0; |
1171 | else |
1172 | adapter->quad_port_a = true; |
1173 | /* Reset for multiple quad port adapters */ |
1174 | if (++global_quad_port_a == 4) |
1175 | global_quad_port_a = 0; |
1176 | break; |
1177 | } |
1178 | |
1179 | /* initialize the wol settings based on the eeprom settings */ |
1180 | adapter->wol = adapter->eeprom_wol; |
1181 | device_set_wakeup_enable(dev: &adapter->pdev->dev, enable: adapter->wol); |
1182 | |
1183 | /* Auto detect PHY address */ |
1184 | if (hw->mac_type == e1000_ce4100) { |
1185 | for (i = 0; i < 32; i++) { |
1186 | hw->phy_addr = i; |
1187 | e1000_read_phy_reg(hw, PHY_ID2, phy_data: &tmp); |
1188 | |
1189 | if (tmp != 0 && tmp != 0xFF) |
1190 | break; |
1191 | } |
1192 | |
1193 | if (i >= 32) |
1194 | goto err_eeprom; |
1195 | } |
1196 | |
1197 | /* reset the hardware with the new settings */ |
1198 | e1000_reset(adapter); |
1199 | |
1200 | strcpy(p: netdev->name, q: "eth%d" ); |
1201 | err = register_netdev(dev: netdev); |
1202 | if (err) |
1203 | goto err_register; |
1204 | |
1205 | e1000_vlan_filter_on_off(adapter, filter_on: false); |
1206 | |
1207 | /* print bus type/speed/width info */ |
1208 | e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n" , |
1209 | ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : "" ), |
1210 | ((hw->bus_speed == e1000_bus_speed_133) ? 133 : |
1211 | (hw->bus_speed == e1000_bus_speed_120) ? 120 : |
1212 | (hw->bus_speed == e1000_bus_speed_100) ? 100 : |
1213 | (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), |
1214 | ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), |
1215 | netdev->dev_addr); |
1216 | |
1217 | /* carrier off reporting is important to ethtool even BEFORE open */ |
1218 | netif_carrier_off(dev: netdev); |
1219 | |
1220 | e_info(probe, "Intel(R) PRO/1000 Network Connection\n" ); |
1221 | |
1222 | cards_found++; |
1223 | return 0; |
1224 | |
1225 | err_register: |
1226 | err_eeprom: |
1227 | e1000_phy_hw_reset(hw); |
1228 | |
1229 | if (hw->flash_address) |
1230 | iounmap(addr: hw->flash_address); |
1231 | kfree(objp: adapter->tx_ring); |
1232 | kfree(objp: adapter->rx_ring); |
1233 | err_dma: |
1234 | err_sw_init: |
1235 | err_mdio_ioremap: |
1236 | iounmap(addr: hw->ce4100_gbe_mdio_base_virt); |
1237 | iounmap(addr: hw->hw_addr); |
1238 | err_ioremap: |
1239 | disable_dev = !test_and_set_bit(nr: __E1000_DISABLED, addr: &adapter->flags); |
1240 | free_netdev(dev: netdev); |
1241 | err_alloc_etherdev: |
1242 | pci_release_selected_regions(pdev, bars); |
1243 | err_pci_reg: |
1244 | if (!adapter || disable_dev) |
1245 | pci_disable_device(dev: pdev); |
1246 | return err; |
1247 | } |
1248 | |
1249 | /** |
1250 | * e1000_remove - Device Removal Routine |
1251 | * @pdev: PCI device information struct |
1252 | * |
1253 | * e1000_remove is called by the PCI subsystem to alert the driver |
1254 | * that it should release a PCI device. That could be caused by a |
1255 | * Hot-Plug event, or because the driver is going to be removed from |
1256 | * memory. |
1257 | **/ |
1258 | static void e1000_remove(struct pci_dev *pdev) |
1259 | { |
1260 | struct net_device *netdev = pci_get_drvdata(pdev); |
1261 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
1262 | struct e1000_hw *hw = &adapter->hw; |
1263 | bool disable_dev; |
1264 | |
1265 | e1000_down_and_stop(adapter); |
1266 | e1000_release_manageability(adapter); |
1267 | |
1268 | unregister_netdev(dev: netdev); |
1269 | |
1270 | e1000_phy_hw_reset(hw); |
1271 | |
1272 | kfree(objp: adapter->tx_ring); |
1273 | kfree(objp: adapter->rx_ring); |
1274 | |
1275 | if (hw->mac_type == e1000_ce4100) |
1276 | iounmap(addr: hw->ce4100_gbe_mdio_base_virt); |
1277 | iounmap(addr: hw->hw_addr); |
1278 | if (hw->flash_address) |
1279 | iounmap(addr: hw->flash_address); |
1280 | pci_release_selected_regions(pdev, adapter->bars); |
1281 | |
1282 | disable_dev = !test_and_set_bit(nr: __E1000_DISABLED, addr: &adapter->flags); |
1283 | free_netdev(dev: netdev); |
1284 | |
1285 | if (disable_dev) |
1286 | pci_disable_device(dev: pdev); |
1287 | } |
1288 | |
1289 | /** |
1290 | * e1000_sw_init - Initialize general software structures (struct e1000_adapter) |
1291 | * @adapter: board private structure to initialize |
1292 | * |
1293 | * e1000_sw_init initializes the Adapter private data structure. |
1294 | * e1000_init_hw_struct MUST be called before this function |
1295 | **/ |
1296 | static int e1000_sw_init(struct e1000_adapter *adapter) |
1297 | { |
1298 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
1299 | |
1300 | adapter->num_tx_queues = 1; |
1301 | adapter->num_rx_queues = 1; |
1302 | |
1303 | if (e1000_alloc_queues(adapter)) { |
1304 | e_err(probe, "Unable to allocate memory for queues\n" ); |
1305 | return -ENOMEM; |
1306 | } |
1307 | |
1308 | /* Explicitly disable IRQ since the NIC can be in any state. */ |
1309 | e1000_irq_disable(adapter); |
1310 | |
1311 | spin_lock_init(&adapter->stats_lock); |
1312 | |
1313 | set_bit(nr: __E1000_DOWN, addr: &adapter->flags); |
1314 | |
1315 | return 0; |
1316 | } |
1317 | |
1318 | /** |
1319 | * e1000_alloc_queues - Allocate memory for all rings |
1320 | * @adapter: board private structure to initialize |
1321 | * |
1322 | * We allocate one ring per queue at run-time since we don't know the |
1323 | * number of queues at compile-time. |
1324 | **/ |
1325 | static int e1000_alloc_queues(struct e1000_adapter *adapter) |
1326 | { |
1327 | adapter->tx_ring = kcalloc(n: adapter->num_tx_queues, |
1328 | size: sizeof(struct e1000_tx_ring), GFP_KERNEL); |
1329 | if (!adapter->tx_ring) |
1330 | return -ENOMEM; |
1331 | |
1332 | adapter->rx_ring = kcalloc(n: adapter->num_rx_queues, |
1333 | size: sizeof(struct e1000_rx_ring), GFP_KERNEL); |
1334 | if (!adapter->rx_ring) { |
1335 | kfree(objp: adapter->tx_ring); |
1336 | return -ENOMEM; |
1337 | } |
1338 | |
1339 | return E1000_SUCCESS; |
1340 | } |
1341 | |
1342 | /** |
1343 | * e1000_open - Called when a network interface is made active |
1344 | * @netdev: network interface device structure |
1345 | * |
1346 | * Returns 0 on success, negative value on failure |
1347 | * |
1348 | * The open entry point is called when a network interface is made |
1349 | * active by the system (IFF_UP). At this point all resources needed |
1350 | * for transmit and receive operations are allocated, the interrupt |
1351 | * handler is registered with the OS, the watchdog task is started, |
1352 | * and the stack is notified that the interface is ready. |
1353 | **/ |
1354 | int e1000_open(struct net_device *netdev) |
1355 | { |
1356 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
1357 | struct e1000_hw *hw = &adapter->hw; |
1358 | int err; |
1359 | |
1360 | /* disallow open during test */ |
1361 | if (test_bit(__E1000_TESTING, &adapter->flags)) |
1362 | return -EBUSY; |
1363 | |
1364 | netif_carrier_off(dev: netdev); |
1365 | |
1366 | /* allocate transmit descriptors */ |
1367 | err = e1000_setup_all_tx_resources(adapter); |
1368 | if (err) |
1369 | goto err_setup_tx; |
1370 | |
1371 | /* allocate receive descriptors */ |
1372 | err = e1000_setup_all_rx_resources(adapter); |
1373 | if (err) |
1374 | goto err_setup_rx; |
1375 | |
1376 | e1000_power_up_phy(adapter); |
1377 | |
1378 | adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; |
1379 | if ((hw->mng_cookie.status & |
1380 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { |
1381 | e1000_update_mng_vlan(adapter); |
1382 | } |
1383 | |
1384 | /* before we allocate an interrupt, we must be ready to handle it. |
1385 | * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt |
1386 | * as soon as we call pci_request_irq, so we have to setup our |
1387 | * clean_rx handler before we do so. |
1388 | */ |
1389 | e1000_configure(adapter); |
1390 | |
1391 | err = e1000_request_irq(adapter); |
1392 | if (err) |
1393 | goto err_req_irq; |
1394 | |
1395 | /* From here on the code is the same as e1000_up() */ |
1396 | clear_bit(nr: __E1000_DOWN, addr: &adapter->flags); |
1397 | |
1398 | napi_enable(n: &adapter->napi); |
1399 | |
1400 | e1000_irq_enable(adapter); |
1401 | |
1402 | netif_start_queue(dev: netdev); |
1403 | |
1404 | /* fire a link status change interrupt to start the watchdog */ |
1405 | ew32(ICS, E1000_ICS_LSC); |
1406 | |
1407 | return E1000_SUCCESS; |
1408 | |
1409 | err_req_irq: |
1410 | e1000_power_down_phy(adapter); |
1411 | e1000_free_all_rx_resources(adapter); |
1412 | err_setup_rx: |
1413 | e1000_free_all_tx_resources(adapter); |
1414 | err_setup_tx: |
1415 | e1000_reset(adapter); |
1416 | |
1417 | return err; |
1418 | } |
1419 | |
1420 | /** |
1421 | * e1000_close - Disables a network interface |
1422 | * @netdev: network interface device structure |
1423 | * |
1424 | * Returns 0, this is not allowed to fail |
1425 | * |
1426 | * The close entry point is called when an interface is de-activated |
1427 | * by the OS. The hardware is still under the drivers control, but |
1428 | * needs to be disabled. A global MAC reset is issued to stop the |
1429 | * hardware, and all transmit and receive resources are freed. |
1430 | **/ |
1431 | int e1000_close(struct net_device *netdev) |
1432 | { |
1433 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
1434 | struct e1000_hw *hw = &adapter->hw; |
1435 | int count = E1000_CHECK_RESET_COUNT; |
1436 | |
1437 | while (test_and_set_bit(nr: __E1000_RESETTING, addr: &adapter->flags) && count--) |
1438 | usleep_range(min: 10000, max: 20000); |
1439 | |
1440 | WARN_ON(count < 0); |
1441 | |
1442 | /* signal that we're down so that the reset task will no longer run */ |
1443 | set_bit(nr: __E1000_DOWN, addr: &adapter->flags); |
1444 | clear_bit(nr: __E1000_RESETTING, addr: &adapter->flags); |
1445 | |
1446 | e1000_down(adapter); |
1447 | e1000_power_down_phy(adapter); |
1448 | e1000_free_irq(adapter); |
1449 | |
1450 | e1000_free_all_tx_resources(adapter); |
1451 | e1000_free_all_rx_resources(adapter); |
1452 | |
1453 | /* kill manageability vlan ID if supported, but not if a vlan with |
1454 | * the same ID is registered on the host OS (let 8021q kill it) |
1455 | */ |
1456 | if ((hw->mng_cookie.status & |
1457 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
1458 | !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { |
1459 | e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), |
1460 | vid: adapter->mng_vlan_id); |
1461 | } |
1462 | |
1463 | return 0; |
1464 | } |
1465 | |
1466 | /** |
1467 | * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary |
1468 | * @adapter: address of board private structure |
1469 | * @start: address of beginning of memory |
1470 | * @len: length of memory |
1471 | **/ |
1472 | static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, |
1473 | unsigned long len) |
1474 | { |
1475 | struct e1000_hw *hw = &adapter->hw; |
1476 | unsigned long begin = (unsigned long)start; |
1477 | unsigned long end = begin + len; |
1478 | |
1479 | /* First rev 82545 and 82546 need to not allow any memory |
1480 | * write location to cross 64k boundary due to errata 23 |
1481 | */ |
1482 | if (hw->mac_type == e1000_82545 || |
1483 | hw->mac_type == e1000_ce4100 || |
1484 | hw->mac_type == e1000_82546) { |
1485 | return ((begin ^ (end - 1)) >> 16) == 0; |
1486 | } |
1487 | |
1488 | return true; |
1489 | } |
1490 | |
1491 | /** |
1492 | * e1000_setup_tx_resources - allocate Tx resources (Descriptors) |
1493 | * @adapter: board private structure |
1494 | * @txdr: tx descriptor ring (for a specific queue) to setup |
1495 | * |
1496 | * Return 0 on success, negative on failure |
1497 | **/ |
1498 | static int e1000_setup_tx_resources(struct e1000_adapter *adapter, |
1499 | struct e1000_tx_ring *txdr) |
1500 | { |
1501 | struct pci_dev *pdev = adapter->pdev; |
1502 | int size; |
1503 | |
1504 | size = sizeof(struct e1000_tx_buffer) * txdr->count; |
1505 | txdr->buffer_info = vzalloc(size); |
1506 | if (!txdr->buffer_info) |
1507 | return -ENOMEM; |
1508 | |
1509 | /* round up to nearest 4K */ |
1510 | |
1511 | txdr->size = txdr->count * sizeof(struct e1000_tx_desc); |
1512 | txdr->size = ALIGN(txdr->size, 4096); |
1513 | |
1514 | txdr->desc = dma_alloc_coherent(dev: &pdev->dev, size: txdr->size, dma_handle: &txdr->dma, |
1515 | GFP_KERNEL); |
1516 | if (!txdr->desc) { |
1517 | setup_tx_desc_die: |
1518 | vfree(addr: txdr->buffer_info); |
1519 | return -ENOMEM; |
1520 | } |
1521 | |
1522 | /* Fix for errata 23, can't cross 64kB boundary */ |
1523 | if (!e1000_check_64k_bound(adapter, start: txdr->desc, len: txdr->size)) { |
1524 | void *olddesc = txdr->desc; |
1525 | dma_addr_t olddma = txdr->dma; |
1526 | e_err(tx_err, "txdr align check failed: %u bytes at %p\n" , |
1527 | txdr->size, txdr->desc); |
1528 | /* Try again, without freeing the previous */ |
1529 | txdr->desc = dma_alloc_coherent(dev: &pdev->dev, size: txdr->size, |
1530 | dma_handle: &txdr->dma, GFP_KERNEL); |
1531 | /* Failed allocation, critical failure */ |
1532 | if (!txdr->desc) { |
1533 | dma_free_coherent(dev: &pdev->dev, size: txdr->size, cpu_addr: olddesc, |
1534 | dma_handle: olddma); |
1535 | goto setup_tx_desc_die; |
1536 | } |
1537 | |
1538 | if (!e1000_check_64k_bound(adapter, start: txdr->desc, len: txdr->size)) { |
1539 | /* give up */ |
1540 | dma_free_coherent(dev: &pdev->dev, size: txdr->size, cpu_addr: txdr->desc, |
1541 | dma_handle: txdr->dma); |
1542 | dma_free_coherent(dev: &pdev->dev, size: txdr->size, cpu_addr: olddesc, |
1543 | dma_handle: olddma); |
1544 | e_err(probe, "Unable to allocate aligned memory " |
1545 | "for the transmit descriptor ring\n" ); |
1546 | vfree(addr: txdr->buffer_info); |
1547 | return -ENOMEM; |
1548 | } else { |
1549 | /* Free old allocation, new allocation was successful */ |
1550 | dma_free_coherent(dev: &pdev->dev, size: txdr->size, cpu_addr: olddesc, |
1551 | dma_handle: olddma); |
1552 | } |
1553 | } |
1554 | memset(txdr->desc, 0, txdr->size); |
1555 | |
1556 | txdr->next_to_use = 0; |
1557 | txdr->next_to_clean = 0; |
1558 | |
1559 | return 0; |
1560 | } |
1561 | |
1562 | /** |
1563 | * e1000_setup_all_tx_resources - wrapper to allocate Tx resources |
1564 | * (Descriptors) for all queues |
1565 | * @adapter: board private structure |
1566 | * |
1567 | * Return 0 on success, negative on failure |
1568 | **/ |
1569 | int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) |
1570 | { |
1571 | int i, err = 0; |
1572 | |
1573 | for (i = 0; i < adapter->num_tx_queues; i++) { |
1574 | err = e1000_setup_tx_resources(adapter, txdr: &adapter->tx_ring[i]); |
1575 | if (err) { |
1576 | e_err(probe, "Allocation for Tx Queue %u failed\n" , i); |
1577 | for (i-- ; i >= 0; i--) |
1578 | e1000_free_tx_resources(adapter, |
1579 | tx_ring: &adapter->tx_ring[i]); |
1580 | break; |
1581 | } |
1582 | } |
1583 | |
1584 | return err; |
1585 | } |
1586 | |
1587 | /** |
1588 | * e1000_configure_tx - Configure 8254x Transmit Unit after Reset |
1589 | * @adapter: board private structure |
1590 | * |
1591 | * Configure the Tx unit of the MAC after a reset. |
1592 | **/ |
1593 | static void e1000_configure_tx(struct e1000_adapter *adapter) |
1594 | { |
1595 | u64 tdba; |
1596 | struct e1000_hw *hw = &adapter->hw; |
1597 | u32 tdlen, tctl, tipg; |
1598 | u32 ipgr1, ipgr2; |
1599 | |
1600 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
1601 | |
1602 | switch (adapter->num_tx_queues) { |
1603 | case 1: |
1604 | default: |
1605 | tdba = adapter->tx_ring[0].dma; |
1606 | tdlen = adapter->tx_ring[0].count * |
1607 | sizeof(struct e1000_tx_desc); |
1608 | ew32(TDLEN, tdlen); |
1609 | ew32(TDBAH, (tdba >> 32)); |
1610 | ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); |
1611 | ew32(TDT, 0); |
1612 | ew32(TDH, 0); |
1613 | adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? |
1614 | E1000_TDH : E1000_82542_TDH); |
1615 | adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? |
1616 | E1000_TDT : E1000_82542_TDT); |
1617 | break; |
1618 | } |
1619 | |
1620 | /* Set the default values for the Tx Inter Packet Gap timer */ |
1621 | if ((hw->media_type == e1000_media_type_fiber || |
1622 | hw->media_type == e1000_media_type_internal_serdes)) |
1623 | tipg = DEFAULT_82543_TIPG_IPGT_FIBER; |
1624 | else |
1625 | tipg = DEFAULT_82543_TIPG_IPGT_COPPER; |
1626 | |
1627 | switch (hw->mac_type) { |
1628 | case e1000_82542_rev2_0: |
1629 | case e1000_82542_rev2_1: |
1630 | tipg = DEFAULT_82542_TIPG_IPGT; |
1631 | ipgr1 = DEFAULT_82542_TIPG_IPGR1; |
1632 | ipgr2 = DEFAULT_82542_TIPG_IPGR2; |
1633 | break; |
1634 | default: |
1635 | ipgr1 = DEFAULT_82543_TIPG_IPGR1; |
1636 | ipgr2 = DEFAULT_82543_TIPG_IPGR2; |
1637 | break; |
1638 | } |
1639 | tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; |
1640 | tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; |
1641 | ew32(TIPG, tipg); |
1642 | |
1643 | /* Set the Tx Interrupt Delay register */ |
1644 | |
1645 | ew32(TIDV, adapter->tx_int_delay); |
1646 | if (hw->mac_type >= e1000_82540) |
1647 | ew32(TADV, adapter->tx_abs_int_delay); |
1648 | |
1649 | /* Program the Transmit Control Register */ |
1650 | |
1651 | tctl = er32(TCTL); |
1652 | tctl &= ~E1000_TCTL_CT; |
1653 | tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | |
1654 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); |
1655 | |
1656 | e1000_config_collision_dist(hw); |
1657 | |
1658 | /* Setup Transmit Descriptor Settings for eop descriptor */ |
1659 | adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; |
1660 | |
1661 | /* only set IDE if we are delaying interrupts using the timers */ |
1662 | if (adapter->tx_int_delay) |
1663 | adapter->txd_cmd |= E1000_TXD_CMD_IDE; |
1664 | |
1665 | if (hw->mac_type < e1000_82543) |
1666 | adapter->txd_cmd |= E1000_TXD_CMD_RPS; |
1667 | else |
1668 | adapter->txd_cmd |= E1000_TXD_CMD_RS; |
1669 | |
1670 | /* Cache if we're 82544 running in PCI-X because we'll |
1671 | * need this to apply a workaround later in the send path. |
1672 | */ |
1673 | if (hw->mac_type == e1000_82544 && |
1674 | hw->bus_type == e1000_bus_type_pcix) |
1675 | adapter->pcix_82544 = true; |
1676 | |
1677 | ew32(TCTL, tctl); |
1678 | |
1679 | } |
1680 | |
1681 | /** |
1682 | * e1000_setup_rx_resources - allocate Rx resources (Descriptors) |
1683 | * @adapter: board private structure |
1684 | * @rxdr: rx descriptor ring (for a specific queue) to setup |
1685 | * |
1686 | * Returns 0 on success, negative on failure |
1687 | **/ |
1688 | static int e1000_setup_rx_resources(struct e1000_adapter *adapter, |
1689 | struct e1000_rx_ring *rxdr) |
1690 | { |
1691 | struct pci_dev *pdev = adapter->pdev; |
1692 | int size, desc_len; |
1693 | |
1694 | size = sizeof(struct e1000_rx_buffer) * rxdr->count; |
1695 | rxdr->buffer_info = vzalloc(size); |
1696 | if (!rxdr->buffer_info) |
1697 | return -ENOMEM; |
1698 | |
1699 | desc_len = sizeof(struct e1000_rx_desc); |
1700 | |
1701 | /* Round up to nearest 4K */ |
1702 | |
1703 | rxdr->size = rxdr->count * desc_len; |
1704 | rxdr->size = ALIGN(rxdr->size, 4096); |
1705 | |
1706 | rxdr->desc = dma_alloc_coherent(dev: &pdev->dev, size: rxdr->size, dma_handle: &rxdr->dma, |
1707 | GFP_KERNEL); |
1708 | if (!rxdr->desc) { |
1709 | setup_rx_desc_die: |
1710 | vfree(addr: rxdr->buffer_info); |
1711 | return -ENOMEM; |
1712 | } |
1713 | |
1714 | /* Fix for errata 23, can't cross 64kB boundary */ |
1715 | if (!e1000_check_64k_bound(adapter, start: rxdr->desc, len: rxdr->size)) { |
1716 | void *olddesc = rxdr->desc; |
1717 | dma_addr_t olddma = rxdr->dma; |
1718 | e_err(rx_err, "rxdr align check failed: %u bytes at %p\n" , |
1719 | rxdr->size, rxdr->desc); |
1720 | /* Try again, without freeing the previous */ |
1721 | rxdr->desc = dma_alloc_coherent(dev: &pdev->dev, size: rxdr->size, |
1722 | dma_handle: &rxdr->dma, GFP_KERNEL); |
1723 | /* Failed allocation, critical failure */ |
1724 | if (!rxdr->desc) { |
1725 | dma_free_coherent(dev: &pdev->dev, size: rxdr->size, cpu_addr: olddesc, |
1726 | dma_handle: olddma); |
1727 | goto setup_rx_desc_die; |
1728 | } |
1729 | |
1730 | if (!e1000_check_64k_bound(adapter, start: rxdr->desc, len: rxdr->size)) { |
1731 | /* give up */ |
1732 | dma_free_coherent(dev: &pdev->dev, size: rxdr->size, cpu_addr: rxdr->desc, |
1733 | dma_handle: rxdr->dma); |
1734 | dma_free_coherent(dev: &pdev->dev, size: rxdr->size, cpu_addr: olddesc, |
1735 | dma_handle: olddma); |
1736 | e_err(probe, "Unable to allocate aligned memory for " |
1737 | "the Rx descriptor ring\n" ); |
1738 | goto setup_rx_desc_die; |
1739 | } else { |
1740 | /* Free old allocation, new allocation was successful */ |
1741 | dma_free_coherent(dev: &pdev->dev, size: rxdr->size, cpu_addr: olddesc, |
1742 | dma_handle: olddma); |
1743 | } |
1744 | } |
1745 | memset(rxdr->desc, 0, rxdr->size); |
1746 | |
1747 | rxdr->next_to_clean = 0; |
1748 | rxdr->next_to_use = 0; |
1749 | rxdr->rx_skb_top = NULL; |
1750 | |
1751 | return 0; |
1752 | } |
1753 | |
1754 | /** |
1755 | * e1000_setup_all_rx_resources - wrapper to allocate Rx resources |
1756 | * (Descriptors) for all queues |
1757 | * @adapter: board private structure |
1758 | * |
1759 | * Return 0 on success, negative on failure |
1760 | **/ |
1761 | int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) |
1762 | { |
1763 | int i, err = 0; |
1764 | |
1765 | for (i = 0; i < adapter->num_rx_queues; i++) { |
1766 | err = e1000_setup_rx_resources(adapter, rxdr: &adapter->rx_ring[i]); |
1767 | if (err) { |
1768 | e_err(probe, "Allocation for Rx Queue %u failed\n" , i); |
1769 | for (i-- ; i >= 0; i--) |
1770 | e1000_free_rx_resources(adapter, |
1771 | rx_ring: &adapter->rx_ring[i]); |
1772 | break; |
1773 | } |
1774 | } |
1775 | |
1776 | return err; |
1777 | } |
1778 | |
1779 | /** |
1780 | * e1000_setup_rctl - configure the receive control registers |
1781 | * @adapter: Board private structure |
1782 | **/ |
1783 | static void e1000_setup_rctl(struct e1000_adapter *adapter) |
1784 | { |
1785 | struct e1000_hw *hw = &adapter->hw; |
1786 | u32 rctl; |
1787 | |
1788 | rctl = er32(RCTL); |
1789 | |
1790 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); |
1791 | |
1792 | rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | |
1793 | E1000_RCTL_RDMTS_HALF | |
1794 | (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); |
1795 | |
1796 | if (hw->tbi_compatibility_on == 1) |
1797 | rctl |= E1000_RCTL_SBP; |
1798 | else |
1799 | rctl &= ~E1000_RCTL_SBP; |
1800 | |
1801 | if (adapter->netdev->mtu <= ETH_DATA_LEN) |
1802 | rctl &= ~E1000_RCTL_LPE; |
1803 | else |
1804 | rctl |= E1000_RCTL_LPE; |
1805 | |
1806 | /* Setup buffer sizes */ |
1807 | rctl &= ~E1000_RCTL_SZ_4096; |
1808 | rctl |= E1000_RCTL_BSEX; |
1809 | switch (adapter->rx_buffer_len) { |
1810 | case E1000_RXBUFFER_2048: |
1811 | default: |
1812 | rctl |= E1000_RCTL_SZ_2048; |
1813 | rctl &= ~E1000_RCTL_BSEX; |
1814 | break; |
1815 | case E1000_RXBUFFER_4096: |
1816 | rctl |= E1000_RCTL_SZ_4096; |
1817 | break; |
1818 | case E1000_RXBUFFER_8192: |
1819 | rctl |= E1000_RCTL_SZ_8192; |
1820 | break; |
1821 | case E1000_RXBUFFER_16384: |
1822 | rctl |= E1000_RCTL_SZ_16384; |
1823 | break; |
1824 | } |
1825 | |
1826 | /* This is useful for sniffing bad packets. */ |
1827 | if (adapter->netdev->features & NETIF_F_RXALL) { |
1828 | /* UPE and MPE will be handled by normal PROMISC logic |
1829 | * in e1000e_set_rx_mode |
1830 | */ |
1831 | rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ |
1832 | E1000_RCTL_BAM | /* RX All Bcast Pkts */ |
1833 | E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ |
1834 | |
1835 | rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ |
1836 | E1000_RCTL_DPF | /* Allow filtered pause */ |
1837 | E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ |
1838 | /* Do not mess with E1000_CTRL_VME, it affects transmit as well, |
1839 | * and that breaks VLANs. |
1840 | */ |
1841 | } |
1842 | |
1843 | ew32(RCTL, rctl); |
1844 | } |
1845 | |
1846 | /** |
1847 | * e1000_configure_rx - Configure 8254x Receive Unit after Reset |
1848 | * @adapter: board private structure |
1849 | * |
1850 | * Configure the Rx unit of the MAC after a reset. |
1851 | **/ |
1852 | static void e1000_configure_rx(struct e1000_adapter *adapter) |
1853 | { |
1854 | u64 rdba; |
1855 | struct e1000_hw *hw = &adapter->hw; |
1856 | u32 rdlen, rctl, rxcsum; |
1857 | |
1858 | if (adapter->netdev->mtu > ETH_DATA_LEN) { |
1859 | rdlen = adapter->rx_ring[0].count * |
1860 | sizeof(struct e1000_rx_desc); |
1861 | adapter->clean_rx = e1000_clean_jumbo_rx_irq; |
1862 | adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; |
1863 | } else { |
1864 | rdlen = adapter->rx_ring[0].count * |
1865 | sizeof(struct e1000_rx_desc); |
1866 | adapter->clean_rx = e1000_clean_rx_irq; |
1867 | adapter->alloc_rx_buf = e1000_alloc_rx_buffers; |
1868 | } |
1869 | |
1870 | /* disable receives while setting up the descriptors */ |
1871 | rctl = er32(RCTL); |
1872 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
1873 | |
1874 | /* set the Receive Delay Timer Register */ |
1875 | ew32(RDTR, adapter->rx_int_delay); |
1876 | |
1877 | if (hw->mac_type >= e1000_82540) { |
1878 | ew32(RADV, adapter->rx_abs_int_delay); |
1879 | if (adapter->itr_setting != 0) |
1880 | ew32(ITR, 1000000000 / (adapter->itr * 256)); |
1881 | } |
1882 | |
1883 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
1884 | * the Base and Length of the Rx Descriptor Ring |
1885 | */ |
1886 | switch (adapter->num_rx_queues) { |
1887 | case 1: |
1888 | default: |
1889 | rdba = adapter->rx_ring[0].dma; |
1890 | ew32(RDLEN, rdlen); |
1891 | ew32(RDBAH, (rdba >> 32)); |
1892 | ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); |
1893 | ew32(RDT, 0); |
1894 | ew32(RDH, 0); |
1895 | adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? |
1896 | E1000_RDH : E1000_82542_RDH); |
1897 | adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? |
1898 | E1000_RDT : E1000_82542_RDT); |
1899 | break; |
1900 | } |
1901 | |
1902 | /* Enable 82543 Receive Checksum Offload for TCP and UDP */ |
1903 | if (hw->mac_type >= e1000_82543) { |
1904 | rxcsum = er32(RXCSUM); |
1905 | if (adapter->rx_csum) |
1906 | rxcsum |= E1000_RXCSUM_TUOFL; |
1907 | else |
1908 | /* don't need to clear IPPCSE as it defaults to 0 */ |
1909 | rxcsum &= ~E1000_RXCSUM_TUOFL; |
1910 | ew32(RXCSUM, rxcsum); |
1911 | } |
1912 | |
1913 | /* Enable Receives */ |
1914 | ew32(RCTL, rctl | E1000_RCTL_EN); |
1915 | } |
1916 | |
1917 | /** |
1918 | * e1000_free_tx_resources - Free Tx Resources per Queue |
1919 | * @adapter: board private structure |
1920 | * @tx_ring: Tx descriptor ring for a specific queue |
1921 | * |
1922 | * Free all transmit software resources |
1923 | **/ |
1924 | static void e1000_free_tx_resources(struct e1000_adapter *adapter, |
1925 | struct e1000_tx_ring *tx_ring) |
1926 | { |
1927 | struct pci_dev *pdev = adapter->pdev; |
1928 | |
1929 | e1000_clean_tx_ring(adapter, tx_ring); |
1930 | |
1931 | vfree(addr: tx_ring->buffer_info); |
1932 | tx_ring->buffer_info = NULL; |
1933 | |
1934 | dma_free_coherent(dev: &pdev->dev, size: tx_ring->size, cpu_addr: tx_ring->desc, |
1935 | dma_handle: tx_ring->dma); |
1936 | |
1937 | tx_ring->desc = NULL; |
1938 | } |
1939 | |
1940 | /** |
1941 | * e1000_free_all_tx_resources - Free Tx Resources for All Queues |
1942 | * @adapter: board private structure |
1943 | * |
1944 | * Free all transmit software resources |
1945 | **/ |
1946 | void e1000_free_all_tx_resources(struct e1000_adapter *adapter) |
1947 | { |
1948 | int i; |
1949 | |
1950 | for (i = 0; i < adapter->num_tx_queues; i++) |
1951 | e1000_free_tx_resources(adapter, tx_ring: &adapter->tx_ring[i]); |
1952 | } |
1953 | |
1954 | static void |
1955 | e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, |
1956 | struct e1000_tx_buffer *buffer_info, |
1957 | int budget) |
1958 | { |
1959 | if (buffer_info->dma) { |
1960 | if (buffer_info->mapped_as_page) |
1961 | dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, |
1962 | buffer_info->length, DMA_TO_DEVICE); |
1963 | else |
1964 | dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, |
1965 | buffer_info->length, |
1966 | DMA_TO_DEVICE); |
1967 | buffer_info->dma = 0; |
1968 | } |
1969 | if (buffer_info->skb) { |
1970 | napi_consume_skb(skb: buffer_info->skb, budget); |
1971 | buffer_info->skb = NULL; |
1972 | } |
1973 | buffer_info->time_stamp = 0; |
1974 | /* buffer_info must be completely set up in the transmit path */ |
1975 | } |
1976 | |
1977 | /** |
1978 | * e1000_clean_tx_ring - Free Tx Buffers |
1979 | * @adapter: board private structure |
1980 | * @tx_ring: ring to be cleaned |
1981 | **/ |
1982 | static void e1000_clean_tx_ring(struct e1000_adapter *adapter, |
1983 | struct e1000_tx_ring *tx_ring) |
1984 | { |
1985 | struct e1000_hw *hw = &adapter->hw; |
1986 | struct e1000_tx_buffer *buffer_info; |
1987 | unsigned long size; |
1988 | unsigned int i; |
1989 | |
1990 | /* Free all the Tx ring sk_buffs */ |
1991 | |
1992 | for (i = 0; i < tx_ring->count; i++) { |
1993 | buffer_info = &tx_ring->buffer_info[i]; |
1994 | e1000_unmap_and_free_tx_resource(adapter, buffer_info, budget: 0); |
1995 | } |
1996 | |
1997 | netdev_reset_queue(dev_queue: adapter->netdev); |
1998 | size = sizeof(struct e1000_tx_buffer) * tx_ring->count; |
1999 | memset(tx_ring->buffer_info, 0, size); |
2000 | |
2001 | /* Zero out the descriptor ring */ |
2002 | |
2003 | memset(tx_ring->desc, 0, tx_ring->size); |
2004 | |
2005 | tx_ring->next_to_use = 0; |
2006 | tx_ring->next_to_clean = 0; |
2007 | tx_ring->last_tx_tso = false; |
2008 | |
2009 | writel(val: 0, addr: hw->hw_addr + tx_ring->tdh); |
2010 | writel(val: 0, addr: hw->hw_addr + tx_ring->tdt); |
2011 | } |
2012 | |
2013 | /** |
2014 | * e1000_clean_all_tx_rings - Free Tx Buffers for all queues |
2015 | * @adapter: board private structure |
2016 | **/ |
2017 | static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) |
2018 | { |
2019 | int i; |
2020 | |
2021 | for (i = 0; i < adapter->num_tx_queues; i++) |
2022 | e1000_clean_tx_ring(adapter, tx_ring: &adapter->tx_ring[i]); |
2023 | } |
2024 | |
2025 | /** |
2026 | * e1000_free_rx_resources - Free Rx Resources |
2027 | * @adapter: board private structure |
2028 | * @rx_ring: ring to clean the resources from |
2029 | * |
2030 | * Free all receive software resources |
2031 | **/ |
2032 | static void e1000_free_rx_resources(struct e1000_adapter *adapter, |
2033 | struct e1000_rx_ring *rx_ring) |
2034 | { |
2035 | struct pci_dev *pdev = adapter->pdev; |
2036 | |
2037 | e1000_clean_rx_ring(adapter, rx_ring); |
2038 | |
2039 | vfree(addr: rx_ring->buffer_info); |
2040 | rx_ring->buffer_info = NULL; |
2041 | |
2042 | dma_free_coherent(dev: &pdev->dev, size: rx_ring->size, cpu_addr: rx_ring->desc, |
2043 | dma_handle: rx_ring->dma); |
2044 | |
2045 | rx_ring->desc = NULL; |
2046 | } |
2047 | |
2048 | /** |
2049 | * e1000_free_all_rx_resources - Free Rx Resources for All Queues |
2050 | * @adapter: board private structure |
2051 | * |
2052 | * Free all receive software resources |
2053 | **/ |
2054 | void e1000_free_all_rx_resources(struct e1000_adapter *adapter) |
2055 | { |
2056 | int i; |
2057 | |
2058 | for (i = 0; i < adapter->num_rx_queues; i++) |
2059 | e1000_free_rx_resources(adapter, rx_ring: &adapter->rx_ring[i]); |
2060 | } |
2061 | |
2062 | #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) |
2063 | static unsigned int e1000_frag_len(const struct e1000_adapter *a) |
2064 | { |
2065 | return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + |
2066 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
2067 | } |
2068 | |
2069 | static void *e1000_alloc_frag(const struct e1000_adapter *a) |
2070 | { |
2071 | unsigned int len = e1000_frag_len(a); |
2072 | u8 *data = netdev_alloc_frag(fragsz: len); |
2073 | |
2074 | if (likely(data)) |
2075 | data += E1000_HEADROOM; |
2076 | return data; |
2077 | } |
2078 | |
2079 | /** |
2080 | * e1000_clean_rx_ring - Free Rx Buffers per Queue |
2081 | * @adapter: board private structure |
2082 | * @rx_ring: ring to free buffers from |
2083 | **/ |
2084 | static void e1000_clean_rx_ring(struct e1000_adapter *adapter, |
2085 | struct e1000_rx_ring *rx_ring) |
2086 | { |
2087 | struct e1000_hw *hw = &adapter->hw; |
2088 | struct e1000_rx_buffer *buffer_info; |
2089 | struct pci_dev *pdev = adapter->pdev; |
2090 | unsigned long size; |
2091 | unsigned int i; |
2092 | |
2093 | /* Free all the Rx netfrags */ |
2094 | for (i = 0; i < rx_ring->count; i++) { |
2095 | buffer_info = &rx_ring->buffer_info[i]; |
2096 | if (adapter->clean_rx == e1000_clean_rx_irq) { |
2097 | if (buffer_info->dma) |
2098 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
2099 | adapter->rx_buffer_len, |
2100 | DMA_FROM_DEVICE); |
2101 | if (buffer_info->rxbuf.data) { |
2102 | skb_free_frag(addr: buffer_info->rxbuf.data); |
2103 | buffer_info->rxbuf.data = NULL; |
2104 | } |
2105 | } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { |
2106 | if (buffer_info->dma) |
2107 | dma_unmap_page(&pdev->dev, buffer_info->dma, |
2108 | adapter->rx_buffer_len, |
2109 | DMA_FROM_DEVICE); |
2110 | if (buffer_info->rxbuf.page) { |
2111 | put_page(page: buffer_info->rxbuf.page); |
2112 | buffer_info->rxbuf.page = NULL; |
2113 | } |
2114 | } |
2115 | |
2116 | buffer_info->dma = 0; |
2117 | } |
2118 | |
2119 | /* there also may be some cached data from a chained receive */ |
2120 | napi_free_frags(napi: &adapter->napi); |
2121 | rx_ring->rx_skb_top = NULL; |
2122 | |
2123 | size = sizeof(struct e1000_rx_buffer) * rx_ring->count; |
2124 | memset(rx_ring->buffer_info, 0, size); |
2125 | |
2126 | /* Zero out the descriptor ring */ |
2127 | memset(rx_ring->desc, 0, rx_ring->size); |
2128 | |
2129 | rx_ring->next_to_clean = 0; |
2130 | rx_ring->next_to_use = 0; |
2131 | |
2132 | writel(val: 0, addr: hw->hw_addr + rx_ring->rdh); |
2133 | writel(val: 0, addr: hw->hw_addr + rx_ring->rdt); |
2134 | } |
2135 | |
2136 | /** |
2137 | * e1000_clean_all_rx_rings - Free Rx Buffers for all queues |
2138 | * @adapter: board private structure |
2139 | **/ |
2140 | static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) |
2141 | { |
2142 | int i; |
2143 | |
2144 | for (i = 0; i < adapter->num_rx_queues; i++) |
2145 | e1000_clean_rx_ring(adapter, rx_ring: &adapter->rx_ring[i]); |
2146 | } |
2147 | |
2148 | /* The 82542 2.0 (revision 2) needs to have the receive unit in reset |
2149 | * and memory write and invalidate disabled for certain operations |
2150 | */ |
2151 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter) |
2152 | { |
2153 | struct e1000_hw *hw = &adapter->hw; |
2154 | struct net_device *netdev = adapter->netdev; |
2155 | u32 rctl; |
2156 | |
2157 | e1000_pci_clear_mwi(hw); |
2158 | |
2159 | rctl = er32(RCTL); |
2160 | rctl |= E1000_RCTL_RST; |
2161 | ew32(RCTL, rctl); |
2162 | E1000_WRITE_FLUSH(); |
2163 | mdelay(5); |
2164 | |
2165 | if (netif_running(dev: netdev)) |
2166 | e1000_clean_all_rx_rings(adapter); |
2167 | } |
2168 | |
2169 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter) |
2170 | { |
2171 | struct e1000_hw *hw = &adapter->hw; |
2172 | struct net_device *netdev = adapter->netdev; |
2173 | u32 rctl; |
2174 | |
2175 | rctl = er32(RCTL); |
2176 | rctl &= ~E1000_RCTL_RST; |
2177 | ew32(RCTL, rctl); |
2178 | E1000_WRITE_FLUSH(); |
2179 | mdelay(5); |
2180 | |
2181 | if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) |
2182 | e1000_pci_set_mwi(hw); |
2183 | |
2184 | if (netif_running(dev: netdev)) { |
2185 | /* No need to loop, because 82542 supports only 1 queue */ |
2186 | struct e1000_rx_ring *ring = &adapter->rx_ring[0]; |
2187 | e1000_configure_rx(adapter); |
2188 | adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); |
2189 | } |
2190 | } |
2191 | |
2192 | /** |
2193 | * e1000_set_mac - Change the Ethernet Address of the NIC |
2194 | * @netdev: network interface device structure |
2195 | * @p: pointer to an address structure |
2196 | * |
2197 | * Returns 0 on success, negative on failure |
2198 | **/ |
2199 | static int e1000_set_mac(struct net_device *netdev, void *p) |
2200 | { |
2201 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
2202 | struct e1000_hw *hw = &adapter->hw; |
2203 | struct sockaddr *addr = p; |
2204 | |
2205 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
2206 | return -EADDRNOTAVAIL; |
2207 | |
2208 | /* 82542 2.0 needs to be in reset to write receive address registers */ |
2209 | |
2210 | if (hw->mac_type == e1000_82542_rev2_0) |
2211 | e1000_enter_82542_rst(adapter); |
2212 | |
2213 | eth_hw_addr_set(dev: netdev, addr: addr->sa_data); |
2214 | memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); |
2215 | |
2216 | e1000_rar_set(hw, mc_addr: hw->mac_addr, rar_index: 0); |
2217 | |
2218 | if (hw->mac_type == e1000_82542_rev2_0) |
2219 | e1000_leave_82542_rst(adapter); |
2220 | |
2221 | return 0; |
2222 | } |
2223 | |
2224 | /** |
2225 | * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set |
2226 | * @netdev: network interface device structure |
2227 | * |
2228 | * The set_rx_mode entry point is called whenever the unicast or multicast |
2229 | * address lists or the network interface flags are updated. This routine is |
2230 | * responsible for configuring the hardware for proper unicast, multicast, |
2231 | * promiscuous mode, and all-multi behavior. |
2232 | **/ |
2233 | static void e1000_set_rx_mode(struct net_device *netdev) |
2234 | { |
2235 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
2236 | struct e1000_hw *hw = &adapter->hw; |
2237 | struct netdev_hw_addr *ha; |
2238 | bool use_uc = false; |
2239 | u32 rctl; |
2240 | u32 hash_value; |
2241 | int i, rar_entries = E1000_RAR_ENTRIES; |
2242 | int mta_reg_count = E1000_NUM_MTA_REGISTERS; |
2243 | u32 *mcarray = kcalloc(n: mta_reg_count, size: sizeof(u32), GFP_ATOMIC); |
2244 | |
2245 | if (!mcarray) |
2246 | return; |
2247 | |
2248 | /* Check for Promiscuous and All Multicast modes */ |
2249 | |
2250 | rctl = er32(RCTL); |
2251 | |
2252 | if (netdev->flags & IFF_PROMISC) { |
2253 | rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); |
2254 | rctl &= ~E1000_RCTL_VFE; |
2255 | } else { |
2256 | if (netdev->flags & IFF_ALLMULTI) |
2257 | rctl |= E1000_RCTL_MPE; |
2258 | else |
2259 | rctl &= ~E1000_RCTL_MPE; |
2260 | /* Enable VLAN filter if there is a VLAN */ |
2261 | if (e1000_vlan_used(adapter)) |
2262 | rctl |= E1000_RCTL_VFE; |
2263 | } |
2264 | |
2265 | if (netdev_uc_count(netdev) > rar_entries - 1) { |
2266 | rctl |= E1000_RCTL_UPE; |
2267 | } else if (!(netdev->flags & IFF_PROMISC)) { |
2268 | rctl &= ~E1000_RCTL_UPE; |
2269 | use_uc = true; |
2270 | } |
2271 | |
2272 | ew32(RCTL, rctl); |
2273 | |
2274 | /* 82542 2.0 needs to be in reset to write receive address registers */ |
2275 | |
2276 | if (hw->mac_type == e1000_82542_rev2_0) |
2277 | e1000_enter_82542_rst(adapter); |
2278 | |
2279 | /* load the first 14 addresses into the exact filters 1-14. Unicast |
2280 | * addresses take precedence to avoid disabling unicast filtering |
2281 | * when possible. |
2282 | * |
2283 | * RAR 0 is used for the station MAC address |
2284 | * if there are not 14 addresses, go ahead and clear the filters |
2285 | */ |
2286 | i = 1; |
2287 | if (use_uc) |
2288 | netdev_for_each_uc_addr(ha, netdev) { |
2289 | if (i == rar_entries) |
2290 | break; |
2291 | e1000_rar_set(hw, mc_addr: ha->addr, rar_index: i++); |
2292 | } |
2293 | |
2294 | netdev_for_each_mc_addr(ha, netdev) { |
2295 | if (i == rar_entries) { |
2296 | /* load any remaining addresses into the hash table */ |
2297 | u32 hash_reg, hash_bit, mta; |
2298 | hash_value = e1000_hash_mc_addr(hw, mc_addr: ha->addr); |
2299 | hash_reg = (hash_value >> 5) & 0x7F; |
2300 | hash_bit = hash_value & 0x1F; |
2301 | mta = (1 << hash_bit); |
2302 | mcarray[hash_reg] |= mta; |
2303 | } else { |
2304 | e1000_rar_set(hw, mc_addr: ha->addr, rar_index: i++); |
2305 | } |
2306 | } |
2307 | |
2308 | for (; i < rar_entries; i++) { |
2309 | E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); |
2310 | E1000_WRITE_FLUSH(); |
2311 | E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); |
2312 | E1000_WRITE_FLUSH(); |
2313 | } |
2314 | |
2315 | /* write the hash table completely, write from bottom to avoid |
2316 | * both stupid write combining chipsets, and flushing each write |
2317 | */ |
2318 | for (i = mta_reg_count - 1; i >= 0 ; i--) { |
2319 | /* If we are on an 82544 has an errata where writing odd |
2320 | * offsets overwrites the previous even offset, but writing |
2321 | * backwards over the range solves the issue by always |
2322 | * writing the odd offset first |
2323 | */ |
2324 | E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); |
2325 | } |
2326 | E1000_WRITE_FLUSH(); |
2327 | |
2328 | if (hw->mac_type == e1000_82542_rev2_0) |
2329 | e1000_leave_82542_rst(adapter); |
2330 | |
2331 | kfree(objp: mcarray); |
2332 | } |
2333 | |
2334 | /** |
2335 | * e1000_update_phy_info_task - get phy info |
2336 | * @work: work struct contained inside adapter struct |
2337 | * |
2338 | * Need to wait a few seconds after link up to get diagnostic information from |
2339 | * the phy |
2340 | */ |
2341 | static void e1000_update_phy_info_task(struct work_struct *work) |
2342 | { |
2343 | struct e1000_adapter *adapter = container_of(work, |
2344 | struct e1000_adapter, |
2345 | phy_info_task.work); |
2346 | |
2347 | e1000_phy_get_info(hw: &adapter->hw, phy_info: &adapter->phy_info); |
2348 | } |
2349 | |
2350 | /** |
2351 | * e1000_82547_tx_fifo_stall_task - task to complete work |
2352 | * @work: work struct contained inside adapter struct |
2353 | **/ |
2354 | static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) |
2355 | { |
2356 | struct e1000_adapter *adapter = container_of(work, |
2357 | struct e1000_adapter, |
2358 | fifo_stall_task.work); |
2359 | struct e1000_hw *hw = &adapter->hw; |
2360 | struct net_device *netdev = adapter->netdev; |
2361 | u32 tctl; |
2362 | |
2363 | if (atomic_read(v: &adapter->tx_fifo_stall)) { |
2364 | if ((er32(TDT) == er32(TDH)) && |
2365 | (er32(TDFT) == er32(TDFH)) && |
2366 | (er32(TDFTS) == er32(TDFHS))) { |
2367 | tctl = er32(TCTL); |
2368 | ew32(TCTL, tctl & ~E1000_TCTL_EN); |
2369 | ew32(TDFT, adapter->tx_head_addr); |
2370 | ew32(TDFH, adapter->tx_head_addr); |
2371 | ew32(TDFTS, adapter->tx_head_addr); |
2372 | ew32(TDFHS, adapter->tx_head_addr); |
2373 | ew32(TCTL, tctl); |
2374 | E1000_WRITE_FLUSH(); |
2375 | |
2376 | adapter->tx_fifo_head = 0; |
2377 | atomic_set(v: &adapter->tx_fifo_stall, i: 0); |
2378 | netif_wake_queue(dev: netdev); |
2379 | } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { |
2380 | schedule_delayed_work(dwork: &adapter->fifo_stall_task, delay: 1); |
2381 | } |
2382 | } |
2383 | } |
2384 | |
2385 | bool e1000_has_link(struct e1000_adapter *adapter) |
2386 | { |
2387 | struct e1000_hw *hw = &adapter->hw; |
2388 | bool link_active = false; |
2389 | |
2390 | /* get_link_status is set on LSC (link status) interrupt or rx |
2391 | * sequence error interrupt (except on intel ce4100). |
2392 | * get_link_status will stay false until the |
2393 | * e1000_check_for_link establishes link for copper adapters |
2394 | * ONLY |
2395 | */ |
2396 | switch (hw->media_type) { |
2397 | case e1000_media_type_copper: |
2398 | if (hw->mac_type == e1000_ce4100) |
2399 | hw->get_link_status = 1; |
2400 | if (hw->get_link_status) { |
2401 | e1000_check_for_link(hw); |
2402 | link_active = !hw->get_link_status; |
2403 | } else { |
2404 | link_active = true; |
2405 | } |
2406 | break; |
2407 | case e1000_media_type_fiber: |
2408 | e1000_check_for_link(hw); |
2409 | link_active = !!(er32(STATUS) & E1000_STATUS_LU); |
2410 | break; |
2411 | case e1000_media_type_internal_serdes: |
2412 | e1000_check_for_link(hw); |
2413 | link_active = hw->serdes_has_link; |
2414 | break; |
2415 | default: |
2416 | break; |
2417 | } |
2418 | |
2419 | return link_active; |
2420 | } |
2421 | |
2422 | /** |
2423 | * e1000_watchdog - work function |
2424 | * @work: work struct contained inside adapter struct |
2425 | **/ |
2426 | static void e1000_watchdog(struct work_struct *work) |
2427 | { |
2428 | struct e1000_adapter *adapter = container_of(work, |
2429 | struct e1000_adapter, |
2430 | watchdog_task.work); |
2431 | struct e1000_hw *hw = &adapter->hw; |
2432 | struct net_device *netdev = adapter->netdev; |
2433 | struct e1000_tx_ring *txdr = adapter->tx_ring; |
2434 | u32 link, tctl; |
2435 | |
2436 | link = e1000_has_link(adapter); |
2437 | if ((netif_carrier_ok(dev: netdev)) && link) |
2438 | goto link_up; |
2439 | |
2440 | if (link) { |
2441 | if (!netif_carrier_ok(dev: netdev)) { |
2442 | u32 ctrl; |
2443 | /* update snapshot of PHY registers on LSC */ |
2444 | e1000_get_speed_and_duplex(hw, |
2445 | speed: &adapter->link_speed, |
2446 | duplex: &adapter->link_duplex); |
2447 | |
2448 | ctrl = er32(CTRL); |
2449 | pr_info("%s NIC Link is Up %d Mbps %s, " |
2450 | "Flow Control: %s\n" , |
2451 | netdev->name, |
2452 | adapter->link_speed, |
2453 | adapter->link_duplex == FULL_DUPLEX ? |
2454 | "Full Duplex" : "Half Duplex" , |
2455 | ((ctrl & E1000_CTRL_TFCE) && (ctrl & |
2456 | E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & |
2457 | E1000_CTRL_RFCE) ? "RX" : ((ctrl & |
2458 | E1000_CTRL_TFCE) ? "TX" : "None" ))); |
2459 | |
2460 | /* adjust timeout factor according to speed/duplex */ |
2461 | adapter->tx_timeout_factor = 1; |
2462 | switch (adapter->link_speed) { |
2463 | case SPEED_10: |
2464 | adapter->tx_timeout_factor = 16; |
2465 | break; |
2466 | case SPEED_100: |
2467 | /* maybe add some timeout factor ? */ |
2468 | break; |
2469 | } |
2470 | |
2471 | /* enable transmits in the hardware */ |
2472 | tctl = er32(TCTL); |
2473 | tctl |= E1000_TCTL_EN; |
2474 | ew32(TCTL, tctl); |
2475 | |
2476 | netif_carrier_on(dev: netdev); |
2477 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
2478 | schedule_delayed_work(dwork: &adapter->phy_info_task, |
2479 | delay: 2 * HZ); |
2480 | adapter->smartspeed = 0; |
2481 | } |
2482 | } else { |
2483 | if (netif_carrier_ok(dev: netdev)) { |
2484 | adapter->link_speed = 0; |
2485 | adapter->link_duplex = 0; |
2486 | pr_info("%s NIC Link is Down\n" , |
2487 | netdev->name); |
2488 | netif_carrier_off(dev: netdev); |
2489 | |
2490 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
2491 | schedule_delayed_work(dwork: &adapter->phy_info_task, |
2492 | delay: 2 * HZ); |
2493 | } |
2494 | |
2495 | e1000_smartspeed(adapter); |
2496 | } |
2497 | |
2498 | link_up: |
2499 | e1000_update_stats(adapter); |
2500 | |
2501 | hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; |
2502 | adapter->tpt_old = adapter->stats.tpt; |
2503 | hw->collision_delta = adapter->stats.colc - adapter->colc_old; |
2504 | adapter->colc_old = adapter->stats.colc; |
2505 | |
2506 | adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; |
2507 | adapter->gorcl_old = adapter->stats.gorcl; |
2508 | adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; |
2509 | adapter->gotcl_old = adapter->stats.gotcl; |
2510 | |
2511 | e1000_update_adaptive(hw); |
2512 | |
2513 | if (!netif_carrier_ok(dev: netdev)) { |
2514 | if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { |
2515 | /* We've lost link, so the controller stops DMA, |
2516 | * but we've got queued Tx work that's never going |
2517 | * to get done, so reset controller to flush Tx. |
2518 | * (Do the reset outside of interrupt context). |
2519 | */ |
2520 | adapter->tx_timeout_count++; |
2521 | schedule_work(work: &adapter->reset_task); |
2522 | /* exit immediately since reset is imminent */ |
2523 | return; |
2524 | } |
2525 | } |
2526 | |
2527 | /* Simple mode for Interrupt Throttle Rate (ITR) */ |
2528 | if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { |
2529 | /* Symmetric Tx/Rx gets a reduced ITR=2000; |
2530 | * Total asymmetrical Tx or Rx gets ITR=8000; |
2531 | * everyone else is between 2000-8000. |
2532 | */ |
2533 | u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; |
2534 | u32 dif = (adapter->gotcl > adapter->gorcl ? |
2535 | adapter->gotcl - adapter->gorcl : |
2536 | adapter->gorcl - adapter->gotcl) / 10000; |
2537 | u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; |
2538 | |
2539 | ew32(ITR, 1000000000 / (itr * 256)); |
2540 | } |
2541 | |
2542 | /* Cause software interrupt to ensure rx ring is cleaned */ |
2543 | ew32(ICS, E1000_ICS_RXDMT0); |
2544 | |
2545 | /* Force detection of hung controller every watchdog period */ |
2546 | adapter->detect_tx_hung = true; |
2547 | |
2548 | /* Reschedule the task */ |
2549 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
2550 | schedule_delayed_work(dwork: &adapter->watchdog_task, delay: 2 * HZ); |
2551 | } |
2552 | |
2553 | enum latency_range { |
2554 | lowest_latency = 0, |
2555 | low_latency = 1, |
2556 | bulk_latency = 2, |
2557 | latency_invalid = 255 |
2558 | }; |
2559 | |
2560 | /** |
2561 | * e1000_update_itr - update the dynamic ITR value based on statistics |
2562 | * @adapter: pointer to adapter |
2563 | * @itr_setting: current adapter->itr |
2564 | * @packets: the number of packets during this measurement interval |
2565 | * @bytes: the number of bytes during this measurement interval |
2566 | * |
2567 | * Stores a new ITR value based on packets and byte |
2568 | * counts during the last interrupt. The advantage of per interrupt |
2569 | * computation is faster updates and more accurate ITR for the current |
2570 | * traffic pattern. Constants in this function were computed |
2571 | * based on theoretical maximum wire speed and thresholds were set based |
2572 | * on testing data as well as attempting to minimize response time |
2573 | * while increasing bulk throughput. |
2574 | * this functionality is controlled by the InterruptThrottleRate module |
2575 | * parameter (see e1000_param.c) |
2576 | **/ |
2577 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, |
2578 | u16 itr_setting, int packets, int bytes) |
2579 | { |
2580 | unsigned int retval = itr_setting; |
2581 | struct e1000_hw *hw = &adapter->hw; |
2582 | |
2583 | if (unlikely(hw->mac_type < e1000_82540)) |
2584 | goto update_itr_done; |
2585 | |
2586 | if (packets == 0) |
2587 | goto update_itr_done; |
2588 | |
2589 | switch (itr_setting) { |
2590 | case lowest_latency: |
2591 | /* jumbo frames get bulk treatment*/ |
2592 | if (bytes/packets > 8000) |
2593 | retval = bulk_latency; |
2594 | else if ((packets < 5) && (bytes > 512)) |
2595 | retval = low_latency; |
2596 | break; |
2597 | case low_latency: /* 50 usec aka 20000 ints/s */ |
2598 | if (bytes > 10000) { |
2599 | /* jumbo frames need bulk latency setting */ |
2600 | if (bytes/packets > 8000) |
2601 | retval = bulk_latency; |
2602 | else if ((packets < 10) || ((bytes/packets) > 1200)) |
2603 | retval = bulk_latency; |
2604 | else if ((packets > 35)) |
2605 | retval = lowest_latency; |
2606 | } else if (bytes/packets > 2000) |
2607 | retval = bulk_latency; |
2608 | else if (packets <= 2 && bytes < 512) |
2609 | retval = lowest_latency; |
2610 | break; |
2611 | case bulk_latency: /* 250 usec aka 4000 ints/s */ |
2612 | if (bytes > 25000) { |
2613 | if (packets > 35) |
2614 | retval = low_latency; |
2615 | } else if (bytes < 6000) { |
2616 | retval = low_latency; |
2617 | } |
2618 | break; |
2619 | } |
2620 | |
2621 | update_itr_done: |
2622 | return retval; |
2623 | } |
2624 | |
2625 | static void e1000_set_itr(struct e1000_adapter *adapter) |
2626 | { |
2627 | struct e1000_hw *hw = &adapter->hw; |
2628 | u16 current_itr; |
2629 | u32 new_itr = adapter->itr; |
2630 | |
2631 | if (unlikely(hw->mac_type < e1000_82540)) |
2632 | return; |
2633 | |
2634 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ |
2635 | if (unlikely(adapter->link_speed != SPEED_1000)) { |
2636 | new_itr = 4000; |
2637 | goto set_itr_now; |
2638 | } |
2639 | |
2640 | adapter->tx_itr = e1000_update_itr(adapter, itr_setting: adapter->tx_itr, |
2641 | packets: adapter->total_tx_packets, |
2642 | bytes: adapter->total_tx_bytes); |
2643 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
2644 | if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) |
2645 | adapter->tx_itr = low_latency; |
2646 | |
2647 | adapter->rx_itr = e1000_update_itr(adapter, itr_setting: adapter->rx_itr, |
2648 | packets: adapter->total_rx_packets, |
2649 | bytes: adapter->total_rx_bytes); |
2650 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
2651 | if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) |
2652 | adapter->rx_itr = low_latency; |
2653 | |
2654 | current_itr = max(adapter->rx_itr, adapter->tx_itr); |
2655 | |
2656 | switch (current_itr) { |
2657 | /* counts and packets in update_itr are dependent on these numbers */ |
2658 | case lowest_latency: |
2659 | new_itr = 70000; |
2660 | break; |
2661 | case low_latency: |
2662 | new_itr = 20000; /* aka hwitr = ~200 */ |
2663 | break; |
2664 | case bulk_latency: |
2665 | new_itr = 4000; |
2666 | break; |
2667 | default: |
2668 | break; |
2669 | } |
2670 | |
2671 | set_itr_now: |
2672 | if (new_itr != adapter->itr) { |
2673 | /* this attempts to bias the interrupt rate towards Bulk |
2674 | * by adding intermediate steps when interrupt rate is |
2675 | * increasing |
2676 | */ |
2677 | new_itr = new_itr > adapter->itr ? |
2678 | min(adapter->itr + (new_itr >> 2), new_itr) : |
2679 | new_itr; |
2680 | adapter->itr = new_itr; |
2681 | ew32(ITR, 1000000000 / (new_itr * 256)); |
2682 | } |
2683 | } |
2684 | |
2685 | #define E1000_TX_FLAGS_CSUM 0x00000001 |
2686 | #define E1000_TX_FLAGS_VLAN 0x00000002 |
2687 | #define E1000_TX_FLAGS_TSO 0x00000004 |
2688 | #define E1000_TX_FLAGS_IPV4 0x00000008 |
2689 | #define E1000_TX_FLAGS_NO_FCS 0x00000010 |
2690 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 |
2691 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 |
2692 | |
2693 | static int e1000_tso(struct e1000_adapter *adapter, |
2694 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb, |
2695 | __be16 protocol) |
2696 | { |
2697 | struct e1000_context_desc *context_desc; |
2698 | struct e1000_tx_buffer *buffer_info; |
2699 | unsigned int i; |
2700 | u32 cmd_length = 0; |
2701 | u16 ipcse = 0, tucse, mss; |
2702 | u8 ipcss, ipcso, tucss, tucso, hdr_len; |
2703 | |
2704 | if (skb_is_gso(skb)) { |
2705 | int err; |
2706 | |
2707 | err = skb_cow_head(skb, headroom: 0); |
2708 | if (err < 0) |
2709 | return err; |
2710 | |
2711 | hdr_len = skb_tcp_all_headers(skb); |
2712 | mss = skb_shinfo(skb)->gso_size; |
2713 | if (protocol == htons(ETH_P_IP)) { |
2714 | struct iphdr *iph = ip_hdr(skb); |
2715 | iph->tot_len = 0; |
2716 | iph->check = 0; |
2717 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(saddr: iph->saddr, |
2718 | daddr: iph->daddr, len: 0, |
2719 | IPPROTO_TCP, |
2720 | sum: 0); |
2721 | cmd_length = E1000_TXD_CMD_IP; |
2722 | ipcse = skb_transport_offset(skb) - 1; |
2723 | } else if (skb_is_gso_v6(skb)) { |
2724 | tcp_v6_gso_csum_prep(skb); |
2725 | ipcse = 0; |
2726 | } |
2727 | ipcss = skb_network_offset(skb); |
2728 | ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; |
2729 | tucss = skb_transport_offset(skb); |
2730 | tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; |
2731 | tucse = 0; |
2732 | |
2733 | cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | |
2734 | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); |
2735 | |
2736 | i = tx_ring->next_to_use; |
2737 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
2738 | buffer_info = &tx_ring->buffer_info[i]; |
2739 | |
2740 | context_desc->lower_setup.ip_fields.ipcss = ipcss; |
2741 | context_desc->lower_setup.ip_fields.ipcso = ipcso; |
2742 | context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); |
2743 | context_desc->upper_setup.tcp_fields.tucss = tucss; |
2744 | context_desc->upper_setup.tcp_fields.tucso = tucso; |
2745 | context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); |
2746 | context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); |
2747 | context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; |
2748 | context_desc->cmd_and_length = cpu_to_le32(cmd_length); |
2749 | |
2750 | buffer_info->time_stamp = jiffies; |
2751 | buffer_info->next_to_watch = i; |
2752 | |
2753 | if (++i == tx_ring->count) |
2754 | i = 0; |
2755 | |
2756 | tx_ring->next_to_use = i; |
2757 | |
2758 | return true; |
2759 | } |
2760 | return false; |
2761 | } |
2762 | |
2763 | static bool e1000_tx_csum(struct e1000_adapter *adapter, |
2764 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb, |
2765 | __be16 protocol) |
2766 | { |
2767 | struct e1000_context_desc *context_desc; |
2768 | struct e1000_tx_buffer *buffer_info; |
2769 | unsigned int i; |
2770 | u8 css; |
2771 | u32 cmd_len = E1000_TXD_CMD_DEXT; |
2772 | |
2773 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
2774 | return false; |
2775 | |
2776 | switch (protocol) { |
2777 | case cpu_to_be16(ETH_P_IP): |
2778 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
2779 | cmd_len |= E1000_TXD_CMD_TCP; |
2780 | break; |
2781 | case cpu_to_be16(ETH_P_IPV6): |
2782 | /* XXX not handling all IPV6 headers */ |
2783 | if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) |
2784 | cmd_len |= E1000_TXD_CMD_TCP; |
2785 | break; |
2786 | default: |
2787 | if (unlikely(net_ratelimit())) |
2788 | e_warn(drv, "checksum_partial proto=%x!\n" , |
2789 | skb->protocol); |
2790 | break; |
2791 | } |
2792 | |
2793 | css = skb_checksum_start_offset(skb); |
2794 | |
2795 | i = tx_ring->next_to_use; |
2796 | buffer_info = &tx_ring->buffer_info[i]; |
2797 | context_desc = E1000_CONTEXT_DESC(*tx_ring, i); |
2798 | |
2799 | context_desc->lower_setup.ip_config = 0; |
2800 | context_desc->upper_setup.tcp_fields.tucss = css; |
2801 | context_desc->upper_setup.tcp_fields.tucso = |
2802 | css + skb->csum_offset; |
2803 | context_desc->upper_setup.tcp_fields.tucse = 0; |
2804 | context_desc->tcp_seg_setup.data = 0; |
2805 | context_desc->cmd_and_length = cpu_to_le32(cmd_len); |
2806 | |
2807 | buffer_info->time_stamp = jiffies; |
2808 | buffer_info->next_to_watch = i; |
2809 | |
2810 | if (unlikely(++i == tx_ring->count)) |
2811 | i = 0; |
2812 | |
2813 | tx_ring->next_to_use = i; |
2814 | |
2815 | return true; |
2816 | } |
2817 | |
2818 | #define E1000_MAX_TXD_PWR 12 |
2819 | #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) |
2820 | |
2821 | static int e1000_tx_map(struct e1000_adapter *adapter, |
2822 | struct e1000_tx_ring *tx_ring, |
2823 | struct sk_buff *skb, unsigned int first, |
2824 | unsigned int max_per_txd, unsigned int nr_frags, |
2825 | unsigned int mss) |
2826 | { |
2827 | struct e1000_hw *hw = &adapter->hw; |
2828 | struct pci_dev *pdev = adapter->pdev; |
2829 | struct e1000_tx_buffer *buffer_info; |
2830 | unsigned int len = skb_headlen(skb); |
2831 | unsigned int offset = 0, size, count = 0, i; |
2832 | unsigned int f, bytecount, segs; |
2833 | |
2834 | i = tx_ring->next_to_use; |
2835 | |
2836 | while (len) { |
2837 | buffer_info = &tx_ring->buffer_info[i]; |
2838 | size = min(len, max_per_txd); |
2839 | /* Workaround for Controller erratum -- |
2840 | * descriptor for non-tso packet in a linear SKB that follows a |
2841 | * tso gets written back prematurely before the data is fully |
2842 | * DMA'd to the controller |
2843 | */ |
2844 | if (!skb->data_len && tx_ring->last_tx_tso && |
2845 | !skb_is_gso(skb)) { |
2846 | tx_ring->last_tx_tso = false; |
2847 | size -= 4; |
2848 | } |
2849 | |
2850 | /* Workaround for premature desc write-backs |
2851 | * in TSO mode. Append 4-byte sentinel desc |
2852 | */ |
2853 | if (unlikely(mss && !nr_frags && size == len && size > 8)) |
2854 | size -= 4; |
2855 | /* work-around for errata 10 and it applies |
2856 | * to all controllers in PCI-X mode |
2857 | * The fix is to make sure that the first descriptor of a |
2858 | * packet is smaller than 2048 - 16 - 16 (or 2016) bytes |
2859 | */ |
2860 | if (unlikely((hw->bus_type == e1000_bus_type_pcix) && |
2861 | (size > 2015) && count == 0)) |
2862 | size = 2015; |
2863 | |
2864 | /* Workaround for potential 82544 hang in PCI-X. Avoid |
2865 | * terminating buffers within evenly-aligned dwords. |
2866 | */ |
2867 | if (unlikely(adapter->pcix_82544 && |
2868 | !((unsigned long)(skb->data + offset + size - 1) & 4) && |
2869 | size > 4)) |
2870 | size -= 4; |
2871 | |
2872 | buffer_info->length = size; |
2873 | /* set time_stamp *before* dma to help avoid a possible race */ |
2874 | buffer_info->time_stamp = jiffies; |
2875 | buffer_info->mapped_as_page = false; |
2876 | buffer_info->dma = dma_map_single(&pdev->dev, |
2877 | skb->data + offset, |
2878 | size, DMA_TO_DEVICE); |
2879 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: buffer_info->dma)) |
2880 | goto dma_error; |
2881 | buffer_info->next_to_watch = i; |
2882 | |
2883 | len -= size; |
2884 | offset += size; |
2885 | count++; |
2886 | if (len) { |
2887 | i++; |
2888 | if (unlikely(i == tx_ring->count)) |
2889 | i = 0; |
2890 | } |
2891 | } |
2892 | |
2893 | for (f = 0; f < nr_frags; f++) { |
2894 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; |
2895 | |
2896 | len = skb_frag_size(frag); |
2897 | offset = 0; |
2898 | |
2899 | while (len) { |
2900 | unsigned long bufend; |
2901 | i++; |
2902 | if (unlikely(i == tx_ring->count)) |
2903 | i = 0; |
2904 | |
2905 | buffer_info = &tx_ring->buffer_info[i]; |
2906 | size = min(len, max_per_txd); |
2907 | /* Workaround for premature desc write-backs |
2908 | * in TSO mode. Append 4-byte sentinel desc |
2909 | */ |
2910 | if (unlikely(mss && f == (nr_frags-1) && |
2911 | size == len && size > 8)) |
2912 | size -= 4; |
2913 | /* Workaround for potential 82544 hang in PCI-X. |
2914 | * Avoid terminating buffers within evenly-aligned |
2915 | * dwords. |
2916 | */ |
2917 | bufend = (unsigned long) |
2918 | page_to_phys(skb_frag_page(frag)); |
2919 | bufend += offset + size - 1; |
2920 | if (unlikely(adapter->pcix_82544 && |
2921 | !(bufend & 4) && |
2922 | size > 4)) |
2923 | size -= 4; |
2924 | |
2925 | buffer_info->length = size; |
2926 | buffer_info->time_stamp = jiffies; |
2927 | buffer_info->mapped_as_page = true; |
2928 | buffer_info->dma = skb_frag_dma_map(dev: &pdev->dev, frag, |
2929 | offset, size, dir: DMA_TO_DEVICE); |
2930 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: buffer_info->dma)) |
2931 | goto dma_error; |
2932 | buffer_info->next_to_watch = i; |
2933 | |
2934 | len -= size; |
2935 | offset += size; |
2936 | count++; |
2937 | } |
2938 | } |
2939 | |
2940 | segs = skb_shinfo(skb)->gso_segs ?: 1; |
2941 | /* multiply data chunks by size of headers */ |
2942 | bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; |
2943 | |
2944 | tx_ring->buffer_info[i].skb = skb; |
2945 | tx_ring->buffer_info[i].segs = segs; |
2946 | tx_ring->buffer_info[i].bytecount = bytecount; |
2947 | tx_ring->buffer_info[first].next_to_watch = i; |
2948 | |
2949 | return count; |
2950 | |
2951 | dma_error: |
2952 | dev_err(&pdev->dev, "TX DMA map failed\n" ); |
2953 | buffer_info->dma = 0; |
2954 | if (count) |
2955 | count--; |
2956 | |
2957 | while (count--) { |
2958 | if (i == 0) |
2959 | i += tx_ring->count; |
2960 | i--; |
2961 | buffer_info = &tx_ring->buffer_info[i]; |
2962 | e1000_unmap_and_free_tx_resource(adapter, buffer_info, budget: 0); |
2963 | } |
2964 | |
2965 | return 0; |
2966 | } |
2967 | |
2968 | static void e1000_tx_queue(struct e1000_adapter *adapter, |
2969 | struct e1000_tx_ring *tx_ring, int tx_flags, |
2970 | int count) |
2971 | { |
2972 | struct e1000_tx_desc *tx_desc = NULL; |
2973 | struct e1000_tx_buffer *buffer_info; |
2974 | u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; |
2975 | unsigned int i; |
2976 | |
2977 | if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { |
2978 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | |
2979 | E1000_TXD_CMD_TSE; |
2980 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
2981 | |
2982 | if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) |
2983 | txd_upper |= E1000_TXD_POPTS_IXSM << 8; |
2984 | } |
2985 | |
2986 | if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { |
2987 | txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; |
2988 | txd_upper |= E1000_TXD_POPTS_TXSM << 8; |
2989 | } |
2990 | |
2991 | if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { |
2992 | txd_lower |= E1000_TXD_CMD_VLE; |
2993 | txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); |
2994 | } |
2995 | |
2996 | if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) |
2997 | txd_lower &= ~(E1000_TXD_CMD_IFCS); |
2998 | |
2999 | i = tx_ring->next_to_use; |
3000 | |
3001 | while (count--) { |
3002 | buffer_info = &tx_ring->buffer_info[i]; |
3003 | tx_desc = E1000_TX_DESC(*tx_ring, i); |
3004 | tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
3005 | tx_desc->lower.data = |
3006 | cpu_to_le32(txd_lower | buffer_info->length); |
3007 | tx_desc->upper.data = cpu_to_le32(txd_upper); |
3008 | if (unlikely(++i == tx_ring->count)) |
3009 | i = 0; |
3010 | } |
3011 | |
3012 | tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); |
3013 | |
3014 | /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ |
3015 | if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) |
3016 | tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); |
3017 | |
3018 | /* Force memory writes to complete before letting h/w |
3019 | * know there are new descriptors to fetch. (Only |
3020 | * applicable for weak-ordered memory model archs, |
3021 | * such as IA-64). |
3022 | */ |
3023 | dma_wmb(); |
3024 | |
3025 | tx_ring->next_to_use = i; |
3026 | } |
3027 | |
3028 | /* 82547 workaround to avoid controller hang in half-duplex environment. |
3029 | * The workaround is to avoid queuing a large packet that would span |
3030 | * the internal Tx FIFO ring boundary by notifying the stack to resend |
3031 | * the packet at a later time. This gives the Tx FIFO an opportunity to |
3032 | * flush all packets. When that occurs, we reset the Tx FIFO pointers |
3033 | * to the beginning of the Tx FIFO. |
3034 | */ |
3035 | |
3036 | #define E1000_FIFO_HDR 0x10 |
3037 | #define E1000_82547_PAD_LEN 0x3E0 |
3038 | |
3039 | static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
3040 | struct sk_buff *skb) |
3041 | { |
3042 | u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; |
3043 | u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; |
3044 | |
3045 | skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); |
3046 | |
3047 | if (adapter->link_duplex != HALF_DUPLEX) |
3048 | goto no_fifo_stall_required; |
3049 | |
3050 | if (atomic_read(v: &adapter->tx_fifo_stall)) |
3051 | return 1; |
3052 | |
3053 | if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { |
3054 | atomic_set(v: &adapter->tx_fifo_stall, i: 1); |
3055 | return 1; |
3056 | } |
3057 | |
3058 | no_fifo_stall_required: |
3059 | adapter->tx_fifo_head += skb_fifo_len; |
3060 | if (adapter->tx_fifo_head >= adapter->tx_fifo_size) |
3061 | adapter->tx_fifo_head -= adapter->tx_fifo_size; |
3062 | return 0; |
3063 | } |
3064 | |
3065 | static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) |
3066 | { |
3067 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
3068 | struct e1000_tx_ring *tx_ring = adapter->tx_ring; |
3069 | |
3070 | netif_stop_queue(dev: netdev); |
3071 | /* Herbert's original patch had: |
3072 | * smp_mb__after_netif_stop_queue(); |
3073 | * but since that doesn't exist yet, just open code it. |
3074 | */ |
3075 | smp_mb(); |
3076 | |
3077 | /* We need to check again in a case another CPU has just |
3078 | * made room available. |
3079 | */ |
3080 | if (likely(E1000_DESC_UNUSED(tx_ring) < size)) |
3081 | return -EBUSY; |
3082 | |
3083 | /* A reprieve! */ |
3084 | netif_start_queue(dev: netdev); |
3085 | ++adapter->restart_queue; |
3086 | return 0; |
3087 | } |
3088 | |
3089 | static int e1000_maybe_stop_tx(struct net_device *netdev, |
3090 | struct e1000_tx_ring *tx_ring, int size) |
3091 | { |
3092 | if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) |
3093 | return 0; |
3094 | return __e1000_maybe_stop_tx(netdev, size); |
3095 | } |
3096 | |
3097 | #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) |
3098 | static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, |
3099 | struct net_device *netdev) |
3100 | { |
3101 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
3102 | struct e1000_hw *hw = &adapter->hw; |
3103 | struct e1000_tx_ring *tx_ring; |
3104 | unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; |
3105 | unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; |
3106 | unsigned int tx_flags = 0; |
3107 | unsigned int len = skb_headlen(skb); |
3108 | unsigned int nr_frags; |
3109 | unsigned int mss; |
3110 | int count = 0; |
3111 | int tso; |
3112 | unsigned int f; |
3113 | __be16 protocol = vlan_get_protocol(skb); |
3114 | |
3115 | /* This goes back to the question of how to logically map a Tx queue |
3116 | * to a flow. Right now, performance is impacted slightly negatively |
3117 | * if using multiple Tx queues. If the stack breaks away from a |
3118 | * single qdisc implementation, we can look at this again. |
3119 | */ |
3120 | tx_ring = adapter->tx_ring; |
3121 | |
3122 | /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, |
3123 | * packets may get corrupted during padding by HW. |
3124 | * To WA this issue, pad all small packets manually. |
3125 | */ |
3126 | if (eth_skb_pad(skb)) |
3127 | return NETDEV_TX_OK; |
3128 | |
3129 | mss = skb_shinfo(skb)->gso_size; |
3130 | /* The controller does a simple calculation to |
3131 | * make sure there is enough room in the FIFO before |
3132 | * initiating the DMA for each buffer. The calc is: |
3133 | * 4 = ceil(buffer len/mss). To make sure we don't |
3134 | * overrun the FIFO, adjust the max buffer len if mss |
3135 | * drops. |
3136 | */ |
3137 | if (mss) { |
3138 | u8 hdr_len; |
3139 | max_per_txd = min(mss << 2, max_per_txd); |
3140 | max_txd_pwr = fls(x: max_per_txd) - 1; |
3141 | |
3142 | hdr_len = skb_tcp_all_headers(skb); |
3143 | if (skb->data_len && hdr_len == len) { |
3144 | switch (hw->mac_type) { |
3145 | case e1000_82544: { |
3146 | unsigned int pull_size; |
3147 | |
3148 | /* Make sure we have room to chop off 4 bytes, |
3149 | * and that the end alignment will work out to |
3150 | * this hardware's requirements |
3151 | * NOTE: this is a TSO only workaround |
3152 | * if end byte alignment not correct move us |
3153 | * into the next dword |
3154 | */ |
3155 | if ((unsigned long)(skb_tail_pointer(skb) - 1) |
3156 | & 4) |
3157 | break; |
3158 | pull_size = min((unsigned int)4, skb->data_len); |
3159 | if (!__pskb_pull_tail(skb, delta: pull_size)) { |
3160 | e_err(drv, "__pskb_pull_tail " |
3161 | "failed.\n" ); |
3162 | dev_kfree_skb_any(skb); |
3163 | return NETDEV_TX_OK; |
3164 | } |
3165 | len = skb_headlen(skb); |
3166 | break; |
3167 | } |
3168 | default: |
3169 | /* do nothing */ |
3170 | break; |
3171 | } |
3172 | } |
3173 | } |
3174 | |
3175 | /* reserve a descriptor for the offload context */ |
3176 | if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) |
3177 | count++; |
3178 | count++; |
3179 | |
3180 | /* Controller Erratum workaround */ |
3181 | if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) |
3182 | count++; |
3183 | |
3184 | count += TXD_USE_COUNT(len, max_txd_pwr); |
3185 | |
3186 | if (adapter->pcix_82544) |
3187 | count++; |
3188 | |
3189 | /* work-around for errata 10 and it applies to all controllers |
3190 | * in PCI-X mode, so add one more descriptor to the count |
3191 | */ |
3192 | if (unlikely((hw->bus_type == e1000_bus_type_pcix) && |
3193 | (len > 2015))) |
3194 | count++; |
3195 | |
3196 | nr_frags = skb_shinfo(skb)->nr_frags; |
3197 | for (f = 0; f < nr_frags; f++) |
3198 | count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), |
3199 | max_txd_pwr); |
3200 | if (adapter->pcix_82544) |
3201 | count += nr_frags; |
3202 | |
3203 | /* need: count + 2 desc gap to keep tail from touching |
3204 | * head, otherwise try next time |
3205 | */ |
3206 | if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) |
3207 | return NETDEV_TX_BUSY; |
3208 | |
3209 | if (unlikely((hw->mac_type == e1000_82547) && |
3210 | (e1000_82547_fifo_workaround(adapter, skb)))) { |
3211 | netif_stop_queue(dev: netdev); |
3212 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
3213 | schedule_delayed_work(dwork: &adapter->fifo_stall_task, delay: 1); |
3214 | return NETDEV_TX_BUSY; |
3215 | } |
3216 | |
3217 | if (skb_vlan_tag_present(skb)) { |
3218 | tx_flags |= E1000_TX_FLAGS_VLAN; |
3219 | tx_flags |= (skb_vlan_tag_get(skb) << |
3220 | E1000_TX_FLAGS_VLAN_SHIFT); |
3221 | } |
3222 | |
3223 | first = tx_ring->next_to_use; |
3224 | |
3225 | tso = e1000_tso(adapter, tx_ring, skb, protocol); |
3226 | if (tso < 0) { |
3227 | dev_kfree_skb_any(skb); |
3228 | return NETDEV_TX_OK; |
3229 | } |
3230 | |
3231 | if (likely(tso)) { |
3232 | if (likely(hw->mac_type != e1000_82544)) |
3233 | tx_ring->last_tx_tso = true; |
3234 | tx_flags |= E1000_TX_FLAGS_TSO; |
3235 | } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) |
3236 | tx_flags |= E1000_TX_FLAGS_CSUM; |
3237 | |
3238 | if (protocol == htons(ETH_P_IP)) |
3239 | tx_flags |= E1000_TX_FLAGS_IPV4; |
3240 | |
3241 | if (unlikely(skb->no_fcs)) |
3242 | tx_flags |= E1000_TX_FLAGS_NO_FCS; |
3243 | |
3244 | count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, |
3245 | nr_frags, mss); |
3246 | |
3247 | if (count) { |
3248 | /* The descriptors needed is higher than other Intel drivers |
3249 | * due to a number of workarounds. The breakdown is below: |
3250 | * Data descriptors: MAX_SKB_FRAGS + 1 |
3251 | * Context Descriptor: 1 |
3252 | * Keep head from touching tail: 2 |
3253 | * Workarounds: 3 |
3254 | */ |
3255 | int desc_needed = MAX_SKB_FRAGS + 7; |
3256 | |
3257 | netdev_sent_queue(dev: netdev, bytes: skb->len); |
3258 | skb_tx_timestamp(skb); |
3259 | |
3260 | e1000_tx_queue(adapter, tx_ring, tx_flags, count); |
3261 | |
3262 | /* 82544 potentially requires twice as many data descriptors |
3263 | * in order to guarantee buffers don't end on evenly-aligned |
3264 | * dwords |
3265 | */ |
3266 | if (adapter->pcix_82544) |
3267 | desc_needed += MAX_SKB_FRAGS + 1; |
3268 | |
3269 | /* Make sure there is space in the ring for the next send. */ |
3270 | e1000_maybe_stop_tx(netdev, tx_ring, size: desc_needed); |
3271 | |
3272 | if (!netdev_xmit_more() || |
3273 | netif_xmit_stopped(dev_queue: netdev_get_tx_queue(dev: netdev, index: 0))) { |
3274 | writel(val: tx_ring->next_to_use, addr: hw->hw_addr + tx_ring->tdt); |
3275 | } |
3276 | } else { |
3277 | dev_kfree_skb_any(skb); |
3278 | tx_ring->buffer_info[first].time_stamp = 0; |
3279 | tx_ring->next_to_use = first; |
3280 | } |
3281 | |
3282 | return NETDEV_TX_OK; |
3283 | } |
3284 | |
3285 | #define NUM_REGS 38 /* 1 based count */ |
3286 | static void e1000_regdump(struct e1000_adapter *adapter) |
3287 | { |
3288 | struct e1000_hw *hw = &adapter->hw; |
3289 | u32 regs[NUM_REGS]; |
3290 | u32 *regs_buff = regs; |
3291 | int i = 0; |
3292 | |
3293 | static const char * const reg_name[] = { |
3294 | "CTRL" , "STATUS" , |
3295 | "RCTL" , "RDLEN" , "RDH" , "RDT" , "RDTR" , |
3296 | "TCTL" , "TDBAL" , "TDBAH" , "TDLEN" , "TDH" , "TDT" , |
3297 | "TIDV" , "TXDCTL" , "TADV" , "TARC0" , |
3298 | "TDBAL1" , "TDBAH1" , "TDLEN1" , "TDH1" , "TDT1" , |
3299 | "TXDCTL1" , "TARC1" , |
3300 | "CTRL_EXT" , "ERT" , "RDBAL" , "RDBAH" , |
3301 | "TDFH" , "TDFT" , "TDFHS" , "TDFTS" , "TDFPC" , |
3302 | "RDFH" , "RDFT" , "RDFHS" , "RDFTS" , "RDFPC" |
3303 | }; |
3304 | |
3305 | regs_buff[0] = er32(CTRL); |
3306 | regs_buff[1] = er32(STATUS); |
3307 | |
3308 | regs_buff[2] = er32(RCTL); |
3309 | regs_buff[3] = er32(RDLEN); |
3310 | regs_buff[4] = er32(RDH); |
3311 | regs_buff[5] = er32(RDT); |
3312 | regs_buff[6] = er32(RDTR); |
3313 | |
3314 | regs_buff[7] = er32(TCTL); |
3315 | regs_buff[8] = er32(TDBAL); |
3316 | regs_buff[9] = er32(TDBAH); |
3317 | regs_buff[10] = er32(TDLEN); |
3318 | regs_buff[11] = er32(TDH); |
3319 | regs_buff[12] = er32(TDT); |
3320 | regs_buff[13] = er32(TIDV); |
3321 | regs_buff[14] = er32(TXDCTL); |
3322 | regs_buff[15] = er32(TADV); |
3323 | regs_buff[16] = er32(TARC0); |
3324 | |
3325 | regs_buff[17] = er32(TDBAL1); |
3326 | regs_buff[18] = er32(TDBAH1); |
3327 | regs_buff[19] = er32(TDLEN1); |
3328 | regs_buff[20] = er32(TDH1); |
3329 | regs_buff[21] = er32(TDT1); |
3330 | regs_buff[22] = er32(TXDCTL1); |
3331 | regs_buff[23] = er32(TARC1); |
3332 | regs_buff[24] = er32(CTRL_EXT); |
3333 | regs_buff[25] = er32(ERT); |
3334 | regs_buff[26] = er32(RDBAL0); |
3335 | regs_buff[27] = er32(RDBAH0); |
3336 | regs_buff[28] = er32(TDFH); |
3337 | regs_buff[29] = er32(TDFT); |
3338 | regs_buff[30] = er32(TDFHS); |
3339 | regs_buff[31] = er32(TDFTS); |
3340 | regs_buff[32] = er32(TDFPC); |
3341 | regs_buff[33] = er32(RDFH); |
3342 | regs_buff[34] = er32(RDFT); |
3343 | regs_buff[35] = er32(RDFHS); |
3344 | regs_buff[36] = er32(RDFTS); |
3345 | regs_buff[37] = er32(RDFPC); |
3346 | |
3347 | pr_info("Register dump\n" ); |
3348 | for (i = 0; i < NUM_REGS; i++) |
3349 | pr_info("%-15s %08x\n" , reg_name[i], regs_buff[i]); |
3350 | } |
3351 | |
3352 | /* |
3353 | * e1000_dump: Print registers, tx ring and rx ring |
3354 | */ |
3355 | static void e1000_dump(struct e1000_adapter *adapter) |
3356 | { |
3357 | /* this code doesn't handle multiple rings */ |
3358 | struct e1000_tx_ring *tx_ring = adapter->tx_ring; |
3359 | struct e1000_rx_ring *rx_ring = adapter->rx_ring; |
3360 | int i; |
3361 | |
3362 | if (!netif_msg_hw(adapter)) |
3363 | return; |
3364 | |
3365 | /* Print Registers */ |
3366 | e1000_regdump(adapter); |
3367 | |
3368 | /* transmit dump */ |
3369 | pr_info("TX Desc ring0 dump\n" ); |
3370 | |
3371 | /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) |
3372 | * |
3373 | * Legacy Transmit Descriptor |
3374 | * +--------------------------------------------------------------+ |
3375 | * 0 | Buffer Address [63:0] (Reserved on Write Back) | |
3376 | * +--------------------------------------------------------------+ |
3377 | * 8 | Special | CSS | Status | CMD | CSO | Length | |
3378 | * +--------------------------------------------------------------+ |
3379 | * 63 48 47 36 35 32 31 24 23 16 15 0 |
3380 | * |
3381 | * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload |
3382 | * 63 48 47 40 39 32 31 16 15 8 7 0 |
3383 | * +----------------------------------------------------------------+ |
3384 | * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | |
3385 | * +----------------------------------------------------------------+ |
3386 | * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | |
3387 | * +----------------------------------------------------------------+ |
3388 | * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 |
3389 | * |
3390 | * Extended Data Descriptor (DTYP=0x1) |
3391 | * +----------------------------------------------------------------+ |
3392 | * 0 | Buffer Address [63:0] | |
3393 | * +----------------------------------------------------------------+ |
3394 | * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | |
3395 | * +----------------------------------------------------------------+ |
3396 | * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 |
3397 | */ |
3398 | pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n" ); |
3399 | pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n" ); |
3400 | |
3401 | if (!netif_msg_tx_done(adapter)) |
3402 | goto rx_ring_summary; |
3403 | |
3404 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { |
3405 | struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); |
3406 | struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; |
3407 | struct my_u { __le64 a; __le64 b; }; |
3408 | struct my_u *u = (struct my_u *)tx_desc; |
3409 | const char *type; |
3410 | |
3411 | if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) |
3412 | type = "NTC/U" ; |
3413 | else if (i == tx_ring->next_to_use) |
3414 | type = "NTU" ; |
3415 | else if (i == tx_ring->next_to_clean) |
3416 | type = "NTC" ; |
3417 | else |
3418 | type = "" ; |
3419 | |
3420 | pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n" , |
3421 | ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, |
3422 | le64_to_cpu(u->a), le64_to_cpu(u->b), |
3423 | (u64)buffer_info->dma, buffer_info->length, |
3424 | buffer_info->next_to_watch, |
3425 | (u64)buffer_info->time_stamp, buffer_info->skb, type); |
3426 | } |
3427 | |
3428 | rx_ring_summary: |
3429 | /* receive dump */ |
3430 | pr_info("\nRX Desc ring dump\n" ); |
3431 | |
3432 | /* Legacy Receive Descriptor Format |
3433 | * |
3434 | * +-----------------------------------------------------+ |
3435 | * | Buffer Address [63:0] | |
3436 | * +-----------------------------------------------------+ |
3437 | * | VLAN Tag | Errors | Status 0 | Packet csum | Length | |
3438 | * +-----------------------------------------------------+ |
3439 | * 63 48 47 40 39 32 31 16 15 0 |
3440 | */ |
3441 | pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n" ); |
3442 | |
3443 | if (!netif_msg_rx_status(adapter)) |
3444 | goto exit; |
3445 | |
3446 | for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { |
3447 | struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); |
3448 | struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; |
3449 | struct my_u { __le64 a; __le64 b; }; |
3450 | struct my_u *u = (struct my_u *)rx_desc; |
3451 | const char *type; |
3452 | |
3453 | if (i == rx_ring->next_to_use) |
3454 | type = "NTU" ; |
3455 | else if (i == rx_ring->next_to_clean) |
3456 | type = "NTC" ; |
3457 | else |
3458 | type = "" ; |
3459 | |
3460 | pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n" , |
3461 | i, le64_to_cpu(u->a), le64_to_cpu(u->b), |
3462 | (u64)buffer_info->dma, buffer_info->rxbuf.data, type); |
3463 | } /* for */ |
3464 | |
3465 | /* dump the descriptor caches */ |
3466 | /* rx */ |
3467 | pr_info("Rx descriptor cache in 64bit format\n" ); |
3468 | for (i = 0x6000; i <= 0x63FF ; i += 0x10) { |
3469 | pr_info("R%04X: %08X|%08X %08X|%08X\n" , |
3470 | i, |
3471 | readl(adapter->hw.hw_addr + i+4), |
3472 | readl(adapter->hw.hw_addr + i), |
3473 | readl(adapter->hw.hw_addr + i+12), |
3474 | readl(adapter->hw.hw_addr + i+8)); |
3475 | } |
3476 | /* tx */ |
3477 | pr_info("Tx descriptor cache in 64bit format\n" ); |
3478 | for (i = 0x7000; i <= 0x73FF ; i += 0x10) { |
3479 | pr_info("T%04X: %08X|%08X %08X|%08X\n" , |
3480 | i, |
3481 | readl(adapter->hw.hw_addr + i+4), |
3482 | readl(adapter->hw.hw_addr + i), |
3483 | readl(adapter->hw.hw_addr + i+12), |
3484 | readl(adapter->hw.hw_addr + i+8)); |
3485 | } |
3486 | exit: |
3487 | return; |
3488 | } |
3489 | |
3490 | /** |
3491 | * e1000_tx_timeout - Respond to a Tx Hang |
3492 | * @netdev: network interface device structure |
3493 | * @txqueue: number of the Tx queue that hung (unused) |
3494 | **/ |
3495 | static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue) |
3496 | { |
3497 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
3498 | |
3499 | /* Do the reset outside of interrupt context */ |
3500 | adapter->tx_timeout_count++; |
3501 | schedule_work(work: &adapter->reset_task); |
3502 | } |
3503 | |
3504 | static void e1000_reset_task(struct work_struct *work) |
3505 | { |
3506 | struct e1000_adapter *adapter = |
3507 | container_of(work, struct e1000_adapter, reset_task); |
3508 | |
3509 | e_err(drv, "Reset adapter\n" ); |
3510 | e1000_reinit_locked(adapter); |
3511 | } |
3512 | |
3513 | /** |
3514 | * e1000_change_mtu - Change the Maximum Transfer Unit |
3515 | * @netdev: network interface device structure |
3516 | * @new_mtu: new value for maximum frame size |
3517 | * |
3518 | * Returns 0 on success, negative on failure |
3519 | **/ |
3520 | static int e1000_change_mtu(struct net_device *netdev, int new_mtu) |
3521 | { |
3522 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
3523 | struct e1000_hw *hw = &adapter->hw; |
3524 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
3525 | |
3526 | /* Adapter-specific max frame size limits. */ |
3527 | switch (hw->mac_type) { |
3528 | case e1000_undefined ... e1000_82542_rev2_1: |
3529 | if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { |
3530 | e_err(probe, "Jumbo Frames not supported.\n" ); |
3531 | return -EINVAL; |
3532 | } |
3533 | break; |
3534 | default: |
3535 | /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ |
3536 | break; |
3537 | } |
3538 | |
3539 | while (test_and_set_bit(nr: __E1000_RESETTING, addr: &adapter->flags)) |
3540 | msleep(msecs: 1); |
3541 | /* e1000_down has a dependency on max_frame_size */ |
3542 | hw->max_frame_size = max_frame; |
3543 | if (netif_running(dev: netdev)) { |
3544 | /* prevent buffers from being reallocated */ |
3545 | adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; |
3546 | e1000_down(adapter); |
3547 | } |
3548 | |
3549 | /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN |
3550 | * means we reserve 2 more, this pushes us to allocate from the next |
3551 | * larger slab size. |
3552 | * i.e. RXBUFFER_2048 --> size-4096 slab |
3553 | * however with the new *_jumbo_rx* routines, jumbo receives will use |
3554 | * fragmented skbs |
3555 | */ |
3556 | |
3557 | if (max_frame <= E1000_RXBUFFER_2048) |
3558 | adapter->rx_buffer_len = E1000_RXBUFFER_2048; |
3559 | else |
3560 | #if (PAGE_SIZE >= E1000_RXBUFFER_16384) |
3561 | adapter->rx_buffer_len = E1000_RXBUFFER_16384; |
3562 | #elif (PAGE_SIZE >= E1000_RXBUFFER_4096) |
3563 | adapter->rx_buffer_len = PAGE_SIZE; |
3564 | #endif |
3565 | |
3566 | /* adjust allocation if LPE protects us, and we aren't using SBP */ |
3567 | if (!hw->tbi_compatibility_on && |
3568 | ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || |
3569 | (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) |
3570 | adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
3571 | |
3572 | netdev_dbg(netdev, "changing MTU from %d to %d\n" , |
3573 | netdev->mtu, new_mtu); |
3574 | netdev->mtu = new_mtu; |
3575 | |
3576 | if (netif_running(dev: netdev)) |
3577 | e1000_up(adapter); |
3578 | else |
3579 | e1000_reset(adapter); |
3580 | |
3581 | clear_bit(nr: __E1000_RESETTING, addr: &adapter->flags); |
3582 | |
3583 | return 0; |
3584 | } |
3585 | |
3586 | /** |
3587 | * e1000_update_stats - Update the board statistics counters |
3588 | * @adapter: board private structure |
3589 | **/ |
3590 | void e1000_update_stats(struct e1000_adapter *adapter) |
3591 | { |
3592 | struct net_device *netdev = adapter->netdev; |
3593 | struct e1000_hw *hw = &adapter->hw; |
3594 | struct pci_dev *pdev = adapter->pdev; |
3595 | unsigned long flags; |
3596 | u16 phy_tmp; |
3597 | |
3598 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF |
3599 | |
3600 | /* Prevent stats update while adapter is being reset, or if the pci |
3601 | * connection is down. |
3602 | */ |
3603 | if (adapter->link_speed == 0) |
3604 | return; |
3605 | if (pci_channel_offline(pdev)) |
3606 | return; |
3607 | |
3608 | spin_lock_irqsave(&adapter->stats_lock, flags); |
3609 | |
3610 | /* these counters are modified from e1000_tbi_adjust_stats, |
3611 | * called from the interrupt context, so they must only |
3612 | * be written while holding adapter->stats_lock |
3613 | */ |
3614 | |
3615 | adapter->stats.crcerrs += er32(CRCERRS); |
3616 | adapter->stats.gprc += er32(GPRC); |
3617 | adapter->stats.gorcl += er32(GORCL); |
3618 | adapter->stats.gorch += er32(GORCH); |
3619 | adapter->stats.bprc += er32(BPRC); |
3620 | adapter->stats.mprc += er32(MPRC); |
3621 | adapter->stats.roc += er32(ROC); |
3622 | |
3623 | adapter->stats.prc64 += er32(PRC64); |
3624 | adapter->stats.prc127 += er32(PRC127); |
3625 | adapter->stats.prc255 += er32(PRC255); |
3626 | adapter->stats.prc511 += er32(PRC511); |
3627 | adapter->stats.prc1023 += er32(PRC1023); |
3628 | adapter->stats.prc1522 += er32(PRC1522); |
3629 | |
3630 | adapter->stats.symerrs += er32(SYMERRS); |
3631 | adapter->stats.mpc += er32(MPC); |
3632 | adapter->stats.scc += er32(SCC); |
3633 | adapter->stats.ecol += er32(ECOL); |
3634 | adapter->stats.mcc += er32(MCC); |
3635 | adapter->stats.latecol += er32(LATECOL); |
3636 | adapter->stats.dc += er32(DC); |
3637 | adapter->stats.sec += er32(SEC); |
3638 | adapter->stats.rlec += er32(RLEC); |
3639 | adapter->stats.xonrxc += er32(XONRXC); |
3640 | adapter->stats.xontxc += er32(XONTXC); |
3641 | adapter->stats.xoffrxc += er32(XOFFRXC); |
3642 | adapter->stats.xofftxc += er32(XOFFTXC); |
3643 | adapter->stats.fcruc += er32(FCRUC); |
3644 | adapter->stats.gptc += er32(GPTC); |
3645 | adapter->stats.gotcl += er32(GOTCL); |
3646 | adapter->stats.gotch += er32(GOTCH); |
3647 | adapter->stats.rnbc += er32(RNBC); |
3648 | adapter->stats.ruc += er32(RUC); |
3649 | adapter->stats.rfc += er32(RFC); |
3650 | adapter->stats.rjc += er32(RJC); |
3651 | adapter->stats.torl += er32(TORL); |
3652 | adapter->stats.torh += er32(TORH); |
3653 | adapter->stats.totl += er32(TOTL); |
3654 | adapter->stats.toth += er32(TOTH); |
3655 | adapter->stats.tpr += er32(TPR); |
3656 | |
3657 | adapter->stats.ptc64 += er32(PTC64); |
3658 | adapter->stats.ptc127 += er32(PTC127); |
3659 | adapter->stats.ptc255 += er32(PTC255); |
3660 | adapter->stats.ptc511 += er32(PTC511); |
3661 | adapter->stats.ptc1023 += er32(PTC1023); |
3662 | adapter->stats.ptc1522 += er32(PTC1522); |
3663 | |
3664 | adapter->stats.mptc += er32(MPTC); |
3665 | adapter->stats.bptc += er32(BPTC); |
3666 | |
3667 | /* used for adaptive IFS */ |
3668 | |
3669 | hw->tx_packet_delta = er32(TPT); |
3670 | adapter->stats.tpt += hw->tx_packet_delta; |
3671 | hw->collision_delta = er32(COLC); |
3672 | adapter->stats.colc += hw->collision_delta; |
3673 | |
3674 | if (hw->mac_type >= e1000_82543) { |
3675 | adapter->stats.algnerrc += er32(ALGNERRC); |
3676 | adapter->stats.rxerrc += er32(RXERRC); |
3677 | adapter->stats.tncrs += er32(TNCRS); |
3678 | adapter->stats.cexterr += er32(CEXTERR); |
3679 | adapter->stats.tsctc += er32(TSCTC); |
3680 | adapter->stats.tsctfc += er32(TSCTFC); |
3681 | } |
3682 | |
3683 | /* Fill out the OS statistics structure */ |
3684 | netdev->stats.multicast = adapter->stats.mprc; |
3685 | netdev->stats.collisions = adapter->stats.colc; |
3686 | |
3687 | /* Rx Errors */ |
3688 | |
3689 | /* RLEC on some newer hardware can be incorrect so build |
3690 | * our own version based on RUC and ROC |
3691 | */ |
3692 | netdev->stats.rx_errors = adapter->stats.rxerrc + |
3693 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
3694 | adapter->stats.ruc + adapter->stats.roc + |
3695 | adapter->stats.cexterr; |
3696 | adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; |
3697 | netdev->stats.rx_length_errors = adapter->stats.rlerrc; |
3698 | netdev->stats.rx_crc_errors = adapter->stats.crcerrs; |
3699 | netdev->stats.rx_frame_errors = adapter->stats.algnerrc; |
3700 | netdev->stats.rx_missed_errors = adapter->stats.mpc; |
3701 | |
3702 | /* Tx Errors */ |
3703 | adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; |
3704 | netdev->stats.tx_errors = adapter->stats.txerrc; |
3705 | netdev->stats.tx_aborted_errors = adapter->stats.ecol; |
3706 | netdev->stats.tx_window_errors = adapter->stats.latecol; |
3707 | netdev->stats.tx_carrier_errors = adapter->stats.tncrs; |
3708 | if (hw->bad_tx_carr_stats_fd && |
3709 | adapter->link_duplex == FULL_DUPLEX) { |
3710 | netdev->stats.tx_carrier_errors = 0; |
3711 | adapter->stats.tncrs = 0; |
3712 | } |
3713 | |
3714 | /* Tx Dropped needs to be maintained elsewhere */ |
3715 | |
3716 | /* Phy Stats */ |
3717 | if (hw->media_type == e1000_media_type_copper) { |
3718 | if ((adapter->link_speed == SPEED_1000) && |
3719 | (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, phy_data: &phy_tmp))) { |
3720 | phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; |
3721 | adapter->phy_stats.idle_errors += phy_tmp; |
3722 | } |
3723 | |
3724 | if ((hw->mac_type <= e1000_82546) && |
3725 | (hw->phy_type == e1000_phy_m88) && |
3726 | !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, phy_data: &phy_tmp)) |
3727 | adapter->phy_stats.receive_errors += phy_tmp; |
3728 | } |
3729 | |
3730 | /* Management Stats */ |
3731 | if (hw->has_smbus) { |
3732 | adapter->stats.mgptc += er32(MGTPTC); |
3733 | adapter->stats.mgprc += er32(MGTPRC); |
3734 | adapter->stats.mgpdc += er32(MGTPDC); |
3735 | } |
3736 | |
3737 | spin_unlock_irqrestore(lock: &adapter->stats_lock, flags); |
3738 | } |
3739 | |
3740 | /** |
3741 | * e1000_intr - Interrupt Handler |
3742 | * @irq: interrupt number |
3743 | * @data: pointer to a network interface device structure |
3744 | **/ |
3745 | static irqreturn_t e1000_intr(int irq, void *data) |
3746 | { |
3747 | struct net_device *netdev = data; |
3748 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
3749 | struct e1000_hw *hw = &adapter->hw; |
3750 | u32 icr = er32(ICR); |
3751 | |
3752 | if (unlikely((!icr))) |
3753 | return IRQ_NONE; /* Not our interrupt */ |
3754 | |
3755 | /* we might have caused the interrupt, but the above |
3756 | * read cleared it, and just in case the driver is |
3757 | * down there is nothing to do so return handled |
3758 | */ |
3759 | if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) |
3760 | return IRQ_HANDLED; |
3761 | |
3762 | if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { |
3763 | hw->get_link_status = 1; |
3764 | /* guard against interrupt when we're going down */ |
3765 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
3766 | schedule_delayed_work(dwork: &adapter->watchdog_task, delay: 1); |
3767 | } |
3768 | |
3769 | /* disable interrupts, without the synchronize_irq bit */ |
3770 | ew32(IMC, ~0); |
3771 | E1000_WRITE_FLUSH(); |
3772 | |
3773 | if (likely(napi_schedule_prep(&adapter->napi))) { |
3774 | adapter->total_tx_bytes = 0; |
3775 | adapter->total_tx_packets = 0; |
3776 | adapter->total_rx_bytes = 0; |
3777 | adapter->total_rx_packets = 0; |
3778 | __napi_schedule(n: &adapter->napi); |
3779 | } else { |
3780 | /* this really should not happen! if it does it is basically a |
3781 | * bug, but not a hard error, so enable ints and continue |
3782 | */ |
3783 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
3784 | e1000_irq_enable(adapter); |
3785 | } |
3786 | |
3787 | return IRQ_HANDLED; |
3788 | } |
3789 | |
3790 | /** |
3791 | * e1000_clean - NAPI Rx polling callback |
3792 | * @napi: napi struct containing references to driver info |
3793 | * @budget: budget given to driver for receive packets |
3794 | **/ |
3795 | static int e1000_clean(struct napi_struct *napi, int budget) |
3796 | { |
3797 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, |
3798 | napi); |
3799 | int tx_clean_complete = 0, work_done = 0; |
3800 | |
3801 | tx_clean_complete = e1000_clean_tx_irq(adapter, tx_ring: &adapter->tx_ring[0]); |
3802 | |
3803 | adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); |
3804 | |
3805 | if (!tx_clean_complete || work_done == budget) |
3806 | return budget; |
3807 | |
3808 | /* Exit the polling mode, but don't re-enable interrupts if stack might |
3809 | * poll us due to busy-polling |
3810 | */ |
3811 | if (likely(napi_complete_done(napi, work_done))) { |
3812 | if (likely(adapter->itr_setting & 3)) |
3813 | e1000_set_itr(adapter); |
3814 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
3815 | e1000_irq_enable(adapter); |
3816 | } |
3817 | |
3818 | return work_done; |
3819 | } |
3820 | |
3821 | /** |
3822 | * e1000_clean_tx_irq - Reclaim resources after transmit completes |
3823 | * @adapter: board private structure |
3824 | * @tx_ring: ring to clean |
3825 | **/ |
3826 | static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, |
3827 | struct e1000_tx_ring *tx_ring) |
3828 | { |
3829 | struct e1000_hw *hw = &adapter->hw; |
3830 | struct net_device *netdev = adapter->netdev; |
3831 | struct e1000_tx_desc *tx_desc, *eop_desc; |
3832 | struct e1000_tx_buffer *buffer_info; |
3833 | unsigned int i, eop; |
3834 | unsigned int count = 0; |
3835 | unsigned int total_tx_bytes = 0, total_tx_packets = 0; |
3836 | unsigned int bytes_compl = 0, pkts_compl = 0; |
3837 | |
3838 | i = tx_ring->next_to_clean; |
3839 | eop = tx_ring->buffer_info[i].next_to_watch; |
3840 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
3841 | |
3842 | while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && |
3843 | (count < tx_ring->count)) { |
3844 | bool cleaned = false; |
3845 | dma_rmb(); /* read buffer_info after eop_desc */ |
3846 | for ( ; !cleaned; count++) { |
3847 | tx_desc = E1000_TX_DESC(*tx_ring, i); |
3848 | buffer_info = &tx_ring->buffer_info[i]; |
3849 | cleaned = (i == eop); |
3850 | |
3851 | if (cleaned) { |
3852 | total_tx_packets += buffer_info->segs; |
3853 | total_tx_bytes += buffer_info->bytecount; |
3854 | if (buffer_info->skb) { |
3855 | bytes_compl += buffer_info->skb->len; |
3856 | pkts_compl++; |
3857 | } |
3858 | |
3859 | } |
3860 | e1000_unmap_and_free_tx_resource(adapter, buffer_info, |
3861 | budget: 64); |
3862 | tx_desc->upper.data = 0; |
3863 | |
3864 | if (unlikely(++i == tx_ring->count)) |
3865 | i = 0; |
3866 | } |
3867 | |
3868 | eop = tx_ring->buffer_info[i].next_to_watch; |
3869 | eop_desc = E1000_TX_DESC(*tx_ring, eop); |
3870 | } |
3871 | |
3872 | /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, |
3873 | * which will reuse the cleaned buffers. |
3874 | */ |
3875 | smp_store_release(&tx_ring->next_to_clean, i); |
3876 | |
3877 | netdev_completed_queue(dev: netdev, pkts: pkts_compl, bytes: bytes_compl); |
3878 | |
3879 | #define TX_WAKE_THRESHOLD 32 |
3880 | if (unlikely(count && netif_carrier_ok(netdev) && |
3881 | E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { |
3882 | /* Make sure that anybody stopping the queue after this |
3883 | * sees the new next_to_clean. |
3884 | */ |
3885 | smp_mb(); |
3886 | |
3887 | if (netif_queue_stopped(dev: netdev) && |
3888 | !(test_bit(__E1000_DOWN, &adapter->flags))) { |
3889 | netif_wake_queue(dev: netdev); |
3890 | ++adapter->restart_queue; |
3891 | } |
3892 | } |
3893 | |
3894 | if (adapter->detect_tx_hung) { |
3895 | /* Detect a transmit hang in hardware, this serializes the |
3896 | * check with the clearing of time_stamp and movement of i |
3897 | */ |
3898 | adapter->detect_tx_hung = false; |
3899 | if (tx_ring->buffer_info[eop].time_stamp && |
3900 | time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + |
3901 | (adapter->tx_timeout_factor * HZ)) && |
3902 | !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
3903 | |
3904 | /* detected Tx unit hang */ |
3905 | e_err(drv, "Detected Tx Unit Hang\n" |
3906 | " Tx Queue <%lu>\n" |
3907 | " TDH <%x>\n" |
3908 | " TDT <%x>\n" |
3909 | " next_to_use <%x>\n" |
3910 | " next_to_clean <%x>\n" |
3911 | "buffer_info[next_to_clean]\n" |
3912 | " time_stamp <%lx>\n" |
3913 | " next_to_watch <%x>\n" |
3914 | " jiffies <%lx>\n" |
3915 | " next_to_watch.status <%x>\n" , |
3916 | (unsigned long)(tx_ring - adapter->tx_ring), |
3917 | readl(hw->hw_addr + tx_ring->tdh), |
3918 | readl(hw->hw_addr + tx_ring->tdt), |
3919 | tx_ring->next_to_use, |
3920 | tx_ring->next_to_clean, |
3921 | tx_ring->buffer_info[eop].time_stamp, |
3922 | eop, |
3923 | jiffies, |
3924 | eop_desc->upper.fields.status); |
3925 | e1000_dump(adapter); |
3926 | netif_stop_queue(dev: netdev); |
3927 | } |
3928 | } |
3929 | adapter->total_tx_bytes += total_tx_bytes; |
3930 | adapter->total_tx_packets += total_tx_packets; |
3931 | netdev->stats.tx_bytes += total_tx_bytes; |
3932 | netdev->stats.tx_packets += total_tx_packets; |
3933 | return count < tx_ring->count; |
3934 | } |
3935 | |
3936 | /** |
3937 | * e1000_rx_checksum - Receive Checksum Offload for 82543 |
3938 | * @adapter: board private structure |
3939 | * @status_err: receive descriptor status and error fields |
3940 | * @csum: receive descriptor csum field |
3941 | * @skb: socket buffer with received data |
3942 | **/ |
3943 | static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, |
3944 | u32 csum, struct sk_buff *skb) |
3945 | { |
3946 | struct e1000_hw *hw = &adapter->hw; |
3947 | u16 status = (u16)status_err; |
3948 | u8 errors = (u8)(status_err >> 24); |
3949 | |
3950 | skb_checksum_none_assert(skb); |
3951 | |
3952 | /* 82543 or newer only */ |
3953 | if (unlikely(hw->mac_type < e1000_82543)) |
3954 | return; |
3955 | /* Ignore Checksum bit is set */ |
3956 | if (unlikely(status & E1000_RXD_STAT_IXSM)) |
3957 | return; |
3958 | /* TCP/UDP checksum error bit is set */ |
3959 | if (unlikely(errors & E1000_RXD_ERR_TCPE)) { |
3960 | /* let the stack verify checksum errors */ |
3961 | adapter->hw_csum_err++; |
3962 | return; |
3963 | } |
3964 | /* TCP/UDP Checksum has not been calculated */ |
3965 | if (!(status & E1000_RXD_STAT_TCPCS)) |
3966 | return; |
3967 | |
3968 | /* It must be a TCP or UDP packet with a valid checksum */ |
3969 | if (likely(status & E1000_RXD_STAT_TCPCS)) { |
3970 | /* TCP checksum is good */ |
3971 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
3972 | } |
3973 | adapter->hw_csum_good++; |
3974 | } |
3975 | |
3976 | /** |
3977 | * e1000_consume_page - helper function for jumbo Rx path |
3978 | * @bi: software descriptor shadow data |
3979 | * @skb: skb being modified |
3980 | * @length: length of data being added |
3981 | **/ |
3982 | static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, |
3983 | u16 length) |
3984 | { |
3985 | bi->rxbuf.page = NULL; |
3986 | skb->len += length; |
3987 | skb->data_len += length; |
3988 | skb->truesize += PAGE_SIZE; |
3989 | } |
3990 | |
3991 | /** |
3992 | * e1000_receive_skb - helper function to handle rx indications |
3993 | * @adapter: board private structure |
3994 | * @status: descriptor status field as written by hardware |
3995 | * @vlan: descriptor vlan field as written by hardware (no le/be conversion) |
3996 | * @skb: pointer to sk_buff to be indicated to stack |
3997 | */ |
3998 | static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, |
3999 | __le16 vlan, struct sk_buff *skb) |
4000 | { |
4001 | skb->protocol = eth_type_trans(skb, dev: adapter->netdev); |
4002 | |
4003 | if (status & E1000_RXD_STAT_VP) { |
4004 | u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; |
4005 | |
4006 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vid); |
4007 | } |
4008 | napi_gro_receive(napi: &adapter->napi, skb); |
4009 | } |
4010 | |
4011 | /** |
4012 | * e1000_tbi_adjust_stats |
4013 | * @hw: Struct containing variables accessed by shared code |
4014 | * @stats: point to stats struct |
4015 | * @frame_len: The length of the frame in question |
4016 | * @mac_addr: The Ethernet destination address of the frame in question |
4017 | * |
4018 | * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT |
4019 | */ |
4020 | static void e1000_tbi_adjust_stats(struct e1000_hw *hw, |
4021 | struct e1000_hw_stats *stats, |
4022 | u32 frame_len, const u8 *mac_addr) |
4023 | { |
4024 | u64 carry_bit; |
4025 | |
4026 | /* First adjust the frame length. */ |
4027 | frame_len--; |
4028 | /* We need to adjust the statistics counters, since the hardware |
4029 | * counters overcount this packet as a CRC error and undercount |
4030 | * the packet as a good packet |
4031 | */ |
4032 | /* This packet should not be counted as a CRC error. */ |
4033 | stats->crcerrs--; |
4034 | /* This packet does count as a Good Packet Received. */ |
4035 | stats->gprc++; |
4036 | |
4037 | /* Adjust the Good Octets received counters */ |
4038 | carry_bit = 0x80000000 & stats->gorcl; |
4039 | stats->gorcl += frame_len; |
4040 | /* If the high bit of Gorcl (the low 32 bits of the Good Octets |
4041 | * Received Count) was one before the addition, |
4042 | * AND it is zero after, then we lost the carry out, |
4043 | * need to add one to Gorch (Good Octets Received Count High). |
4044 | * This could be simplified if all environments supported |
4045 | * 64-bit integers. |
4046 | */ |
4047 | if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) |
4048 | stats->gorch++; |
4049 | /* Is this a broadcast or multicast? Check broadcast first, |
4050 | * since the test for a multicast frame will test positive on |
4051 | * a broadcast frame. |
4052 | */ |
4053 | if (is_broadcast_ether_addr(addr: mac_addr)) |
4054 | stats->bprc++; |
4055 | else if (is_multicast_ether_addr(addr: mac_addr)) |
4056 | stats->mprc++; |
4057 | |
4058 | if (frame_len == hw->max_frame_size) { |
4059 | /* In this case, the hardware has overcounted the number of |
4060 | * oversize frames. |
4061 | */ |
4062 | if (stats->roc > 0) |
4063 | stats->roc--; |
4064 | } |
4065 | |
4066 | /* Adjust the bin counters when the extra byte put the frame in the |
4067 | * wrong bin. Remember that the frame_len was adjusted above. |
4068 | */ |
4069 | if (frame_len == 64) { |
4070 | stats->prc64++; |
4071 | stats->prc127--; |
4072 | } else if (frame_len == 127) { |
4073 | stats->prc127++; |
4074 | stats->prc255--; |
4075 | } else if (frame_len == 255) { |
4076 | stats->prc255++; |
4077 | stats->prc511--; |
4078 | } else if (frame_len == 511) { |
4079 | stats->prc511++; |
4080 | stats->prc1023--; |
4081 | } else if (frame_len == 1023) { |
4082 | stats->prc1023++; |
4083 | stats->prc1522--; |
4084 | } else if (frame_len == 1522) { |
4085 | stats->prc1522++; |
4086 | } |
4087 | } |
4088 | |
4089 | static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, |
4090 | u8 status, u8 errors, |
4091 | u32 length, const u8 *data) |
4092 | { |
4093 | struct e1000_hw *hw = &adapter->hw; |
4094 | u8 last_byte = *(data + length - 1); |
4095 | |
4096 | if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { |
4097 | unsigned long irq_flags; |
4098 | |
4099 | spin_lock_irqsave(&adapter->stats_lock, irq_flags); |
4100 | e1000_tbi_adjust_stats(hw, stats: &adapter->stats, frame_len: length, mac_addr: data); |
4101 | spin_unlock_irqrestore(lock: &adapter->stats_lock, flags: irq_flags); |
4102 | |
4103 | return true; |
4104 | } |
4105 | |
4106 | return false; |
4107 | } |
4108 | |
4109 | static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, |
4110 | unsigned int bufsz) |
4111 | { |
4112 | struct sk_buff *skb = napi_alloc_skb(napi: &adapter->napi, length: bufsz); |
4113 | |
4114 | if (unlikely(!skb)) |
4115 | adapter->alloc_rx_buff_failed++; |
4116 | return skb; |
4117 | } |
4118 | |
4119 | /** |
4120 | * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy |
4121 | * @adapter: board private structure |
4122 | * @rx_ring: ring to clean |
4123 | * @work_done: amount of napi work completed this call |
4124 | * @work_to_do: max amount of work allowed for this call to do |
4125 | * |
4126 | * the return value indicates whether actual cleaning was done, there |
4127 | * is no guarantee that everything was cleaned |
4128 | */ |
4129 | static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, |
4130 | struct e1000_rx_ring *rx_ring, |
4131 | int *work_done, int work_to_do) |
4132 | { |
4133 | struct net_device *netdev = adapter->netdev; |
4134 | struct pci_dev *pdev = adapter->pdev; |
4135 | struct e1000_rx_desc *rx_desc, *next_rxd; |
4136 | struct e1000_rx_buffer *buffer_info, *next_buffer; |
4137 | u32 length; |
4138 | unsigned int i; |
4139 | int cleaned_count = 0; |
4140 | bool cleaned = false; |
4141 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
4142 | |
4143 | i = rx_ring->next_to_clean; |
4144 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
4145 | buffer_info = &rx_ring->buffer_info[i]; |
4146 | |
4147 | while (rx_desc->status & E1000_RXD_STAT_DD) { |
4148 | struct sk_buff *skb; |
4149 | u8 status; |
4150 | |
4151 | if (*work_done >= work_to_do) |
4152 | break; |
4153 | (*work_done)++; |
4154 | dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ |
4155 | |
4156 | status = rx_desc->status; |
4157 | |
4158 | if (++i == rx_ring->count) |
4159 | i = 0; |
4160 | |
4161 | next_rxd = E1000_RX_DESC(*rx_ring, i); |
4162 | prefetch(next_rxd); |
4163 | |
4164 | next_buffer = &rx_ring->buffer_info[i]; |
4165 | |
4166 | cleaned = true; |
4167 | cleaned_count++; |
4168 | dma_unmap_page(&pdev->dev, buffer_info->dma, |
4169 | adapter->rx_buffer_len, DMA_FROM_DEVICE); |
4170 | buffer_info->dma = 0; |
4171 | |
4172 | length = le16_to_cpu(rx_desc->length); |
4173 | |
4174 | /* errors is only valid for DD + EOP descriptors */ |
4175 | if (unlikely((status & E1000_RXD_STAT_EOP) && |
4176 | (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { |
4177 | u8 *mapped = page_address(buffer_info->rxbuf.page); |
4178 | |
4179 | if (e1000_tbi_should_accept(adapter, status, |
4180 | errors: rx_desc->errors, |
4181 | length, data: mapped)) { |
4182 | length--; |
4183 | } else if (netdev->features & NETIF_F_RXALL) { |
4184 | goto process_skb; |
4185 | } else { |
4186 | /* an error means any chain goes out the window |
4187 | * too |
4188 | */ |
4189 | dev_kfree_skb(rx_ring->rx_skb_top); |
4190 | rx_ring->rx_skb_top = NULL; |
4191 | goto next_desc; |
4192 | } |
4193 | } |
4194 | |
4195 | #define rxtop rx_ring->rx_skb_top |
4196 | process_skb: |
4197 | if (!(status & E1000_RXD_STAT_EOP)) { |
4198 | /* this descriptor is only the beginning (or middle) */ |
4199 | if (!rxtop) { |
4200 | /* this is the beginning of a chain */ |
4201 | rxtop = napi_get_frags(napi: &adapter->napi); |
4202 | if (!rxtop) |
4203 | break; |
4204 | |
4205 | skb_fill_page_desc(rxtop, i: 0, |
4206 | page: buffer_info->rxbuf.page, |
4207 | off: 0, size: length); |
4208 | } else { |
4209 | /* this is the middle of a chain */ |
4210 | skb_fill_page_desc(rxtop, |
4211 | skb_shinfo(rxtop)->nr_frags, |
4212 | page: buffer_info->rxbuf.page, off: 0, size: length); |
4213 | } |
4214 | e1000_consume_page(bi: buffer_info, rxtop, length); |
4215 | goto next_desc; |
4216 | } else { |
4217 | if (rxtop) { |
4218 | /* end of the chain */ |
4219 | skb_fill_page_desc(rxtop, |
4220 | skb_shinfo(rxtop)->nr_frags, |
4221 | page: buffer_info->rxbuf.page, off: 0, size: length); |
4222 | skb = rxtop; |
4223 | rxtop = NULL; |
4224 | e1000_consume_page(bi: buffer_info, skb, length); |
4225 | } else { |
4226 | struct page *p; |
4227 | /* no chain, got EOP, this buf is the packet |
4228 | * copybreak to save the put_page/alloc_page |
4229 | */ |
4230 | p = buffer_info->rxbuf.page; |
4231 | if (length <= copybreak) { |
4232 | if (likely(!(netdev->features & NETIF_F_RXFCS))) |
4233 | length -= 4; |
4234 | skb = e1000_alloc_rx_skb(adapter, |
4235 | bufsz: length); |
4236 | if (!skb) |
4237 | break; |
4238 | |
4239 | memcpy(skb_tail_pointer(skb), |
4240 | page_address(p), length); |
4241 | |
4242 | /* re-use the page, so don't erase |
4243 | * buffer_info->rxbuf.page |
4244 | */ |
4245 | skb_put(skb, len: length); |
4246 | e1000_rx_checksum(adapter, |
4247 | status_err: status | rx_desc->errors << 24, |
4248 | le16_to_cpu(rx_desc->csum), skb); |
4249 | |
4250 | total_rx_bytes += skb->len; |
4251 | total_rx_packets++; |
4252 | |
4253 | e1000_receive_skb(adapter, status, |
4254 | vlan: rx_desc->special, skb); |
4255 | goto next_desc; |
4256 | } else { |
4257 | skb = napi_get_frags(napi: &adapter->napi); |
4258 | if (!skb) { |
4259 | adapter->alloc_rx_buff_failed++; |
4260 | break; |
4261 | } |
4262 | skb_fill_page_desc(skb, i: 0, page: p, off: 0, |
4263 | size: length); |
4264 | e1000_consume_page(bi: buffer_info, skb, |
4265 | length); |
4266 | } |
4267 | } |
4268 | } |
4269 | |
4270 | /* Receive Checksum Offload XXX recompute due to CRC strip? */ |
4271 | e1000_rx_checksum(adapter, |
4272 | status_err: (u32)(status) | |
4273 | ((u32)(rx_desc->errors) << 24), |
4274 | le16_to_cpu(rx_desc->csum), skb); |
4275 | |
4276 | total_rx_bytes += (skb->len - 4); /* don't count FCS */ |
4277 | if (likely(!(netdev->features & NETIF_F_RXFCS))) |
4278 | pskb_trim(skb, len: skb->len - 4); |
4279 | total_rx_packets++; |
4280 | |
4281 | if (status & E1000_RXD_STAT_VP) { |
4282 | __le16 vlan = rx_desc->special; |
4283 | u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; |
4284 | |
4285 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vid); |
4286 | } |
4287 | |
4288 | napi_gro_frags(napi: &adapter->napi); |
4289 | |
4290 | next_desc: |
4291 | rx_desc->status = 0; |
4292 | |
4293 | /* return some buffers to hardware, one at a time is too slow */ |
4294 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { |
4295 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
4296 | cleaned_count = 0; |
4297 | } |
4298 | |
4299 | /* use prefetched values */ |
4300 | rx_desc = next_rxd; |
4301 | buffer_info = next_buffer; |
4302 | } |
4303 | rx_ring->next_to_clean = i; |
4304 | |
4305 | cleaned_count = E1000_DESC_UNUSED(rx_ring); |
4306 | if (cleaned_count) |
4307 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
4308 | |
4309 | adapter->total_rx_packets += total_rx_packets; |
4310 | adapter->total_rx_bytes += total_rx_bytes; |
4311 | netdev->stats.rx_bytes += total_rx_bytes; |
4312 | netdev->stats.rx_packets += total_rx_packets; |
4313 | return cleaned; |
4314 | } |
4315 | |
4316 | /* this should improve performance for small packets with large amounts |
4317 | * of reassembly being done in the stack |
4318 | */ |
4319 | static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, |
4320 | struct e1000_rx_buffer *buffer_info, |
4321 | u32 length, const void *data) |
4322 | { |
4323 | struct sk_buff *skb; |
4324 | |
4325 | if (length > copybreak) |
4326 | return NULL; |
4327 | |
4328 | skb = e1000_alloc_rx_skb(adapter, bufsz: length); |
4329 | if (!skb) |
4330 | return NULL; |
4331 | |
4332 | dma_sync_single_for_cpu(dev: &adapter->pdev->dev, addr: buffer_info->dma, |
4333 | size: length, dir: DMA_FROM_DEVICE); |
4334 | |
4335 | skb_put_data(skb, data, len: length); |
4336 | |
4337 | return skb; |
4338 | } |
4339 | |
4340 | /** |
4341 | * e1000_clean_rx_irq - Send received data up the network stack; legacy |
4342 | * @adapter: board private structure |
4343 | * @rx_ring: ring to clean |
4344 | * @work_done: amount of napi work completed this call |
4345 | * @work_to_do: max amount of work allowed for this call to do |
4346 | */ |
4347 | static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, |
4348 | struct e1000_rx_ring *rx_ring, |
4349 | int *work_done, int work_to_do) |
4350 | { |
4351 | struct net_device *netdev = adapter->netdev; |
4352 | struct pci_dev *pdev = adapter->pdev; |
4353 | struct e1000_rx_desc *rx_desc, *next_rxd; |
4354 | struct e1000_rx_buffer *buffer_info, *next_buffer; |
4355 | u32 length; |
4356 | unsigned int i; |
4357 | int cleaned_count = 0; |
4358 | bool cleaned = false; |
4359 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
4360 | |
4361 | i = rx_ring->next_to_clean; |
4362 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
4363 | buffer_info = &rx_ring->buffer_info[i]; |
4364 | |
4365 | while (rx_desc->status & E1000_RXD_STAT_DD) { |
4366 | struct sk_buff *skb; |
4367 | u8 *data; |
4368 | u8 status; |
4369 | |
4370 | if (*work_done >= work_to_do) |
4371 | break; |
4372 | (*work_done)++; |
4373 | dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ |
4374 | |
4375 | status = rx_desc->status; |
4376 | length = le16_to_cpu(rx_desc->length); |
4377 | |
4378 | data = buffer_info->rxbuf.data; |
4379 | prefetch(data); |
4380 | skb = e1000_copybreak(adapter, buffer_info, length, data); |
4381 | if (!skb) { |
4382 | unsigned int frag_len = e1000_frag_len(a: adapter); |
4383 | |
4384 | skb = napi_build_skb(data: data - E1000_HEADROOM, frag_size: frag_len); |
4385 | if (!skb) { |
4386 | adapter->alloc_rx_buff_failed++; |
4387 | break; |
4388 | } |
4389 | |
4390 | skb_reserve(skb, E1000_HEADROOM); |
4391 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
4392 | adapter->rx_buffer_len, |
4393 | DMA_FROM_DEVICE); |
4394 | buffer_info->dma = 0; |
4395 | buffer_info->rxbuf.data = NULL; |
4396 | } |
4397 | |
4398 | if (++i == rx_ring->count) |
4399 | i = 0; |
4400 | |
4401 | next_rxd = E1000_RX_DESC(*rx_ring, i); |
4402 | prefetch(next_rxd); |
4403 | |
4404 | next_buffer = &rx_ring->buffer_info[i]; |
4405 | |
4406 | cleaned = true; |
4407 | cleaned_count++; |
4408 | |
4409 | /* !EOP means multiple descriptors were used to store a single |
4410 | * packet, if thats the case we need to toss it. In fact, we |
4411 | * to toss every packet with the EOP bit clear and the next |
4412 | * frame that _does_ have the EOP bit set, as it is by |
4413 | * definition only a frame fragment |
4414 | */ |
4415 | if (unlikely(!(status & E1000_RXD_STAT_EOP))) |
4416 | adapter->discarding = true; |
4417 | |
4418 | if (adapter->discarding) { |
4419 | /* All receives must fit into a single buffer */ |
4420 | netdev_dbg(netdev, "Receive packet consumed multiple buffers\n" ); |
4421 | dev_kfree_skb(skb); |
4422 | if (status & E1000_RXD_STAT_EOP) |
4423 | adapter->discarding = false; |
4424 | goto next_desc; |
4425 | } |
4426 | |
4427 | if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { |
4428 | if (e1000_tbi_should_accept(adapter, status, |
4429 | errors: rx_desc->errors, |
4430 | length, data)) { |
4431 | length--; |
4432 | } else if (netdev->features & NETIF_F_RXALL) { |
4433 | goto process_skb; |
4434 | } else { |
4435 | dev_kfree_skb(skb); |
4436 | goto next_desc; |
4437 | } |
4438 | } |
4439 | |
4440 | process_skb: |
4441 | total_rx_bytes += (length - 4); /* don't count FCS */ |
4442 | total_rx_packets++; |
4443 | |
4444 | if (likely(!(netdev->features & NETIF_F_RXFCS))) |
4445 | /* adjust length to remove Ethernet CRC, this must be |
4446 | * done after the TBI_ACCEPT workaround above |
4447 | */ |
4448 | length -= 4; |
4449 | |
4450 | if (buffer_info->rxbuf.data == NULL) |
4451 | skb_put(skb, len: length); |
4452 | else /* copybreak skb */ |
4453 | skb_trim(skb, len: length); |
4454 | |
4455 | /* Receive Checksum Offload */ |
4456 | e1000_rx_checksum(adapter, |
4457 | status_err: (u32)(status) | |
4458 | ((u32)(rx_desc->errors) << 24), |
4459 | le16_to_cpu(rx_desc->csum), skb); |
4460 | |
4461 | e1000_receive_skb(adapter, status, vlan: rx_desc->special, skb); |
4462 | |
4463 | next_desc: |
4464 | rx_desc->status = 0; |
4465 | |
4466 | /* return some buffers to hardware, one at a time is too slow */ |
4467 | if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { |
4468 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
4469 | cleaned_count = 0; |
4470 | } |
4471 | |
4472 | /* use prefetched values */ |
4473 | rx_desc = next_rxd; |
4474 | buffer_info = next_buffer; |
4475 | } |
4476 | rx_ring->next_to_clean = i; |
4477 | |
4478 | cleaned_count = E1000_DESC_UNUSED(rx_ring); |
4479 | if (cleaned_count) |
4480 | adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); |
4481 | |
4482 | adapter->total_rx_packets += total_rx_packets; |
4483 | adapter->total_rx_bytes += total_rx_bytes; |
4484 | netdev->stats.rx_bytes += total_rx_bytes; |
4485 | netdev->stats.rx_packets += total_rx_packets; |
4486 | return cleaned; |
4487 | } |
4488 | |
4489 | /** |
4490 | * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers |
4491 | * @adapter: address of board private structure |
4492 | * @rx_ring: pointer to receive ring structure |
4493 | * @cleaned_count: number of buffers to allocate this pass |
4494 | **/ |
4495 | static void |
4496 | e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, |
4497 | struct e1000_rx_ring *rx_ring, int cleaned_count) |
4498 | { |
4499 | struct pci_dev *pdev = adapter->pdev; |
4500 | struct e1000_rx_desc *rx_desc; |
4501 | struct e1000_rx_buffer *buffer_info; |
4502 | unsigned int i; |
4503 | |
4504 | i = rx_ring->next_to_use; |
4505 | buffer_info = &rx_ring->buffer_info[i]; |
4506 | |
4507 | while (cleaned_count--) { |
4508 | /* allocate a new page if necessary */ |
4509 | if (!buffer_info->rxbuf.page) { |
4510 | buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); |
4511 | if (unlikely(!buffer_info->rxbuf.page)) { |
4512 | adapter->alloc_rx_buff_failed++; |
4513 | break; |
4514 | } |
4515 | } |
4516 | |
4517 | if (!buffer_info->dma) { |
4518 | buffer_info->dma = dma_map_page(&pdev->dev, |
4519 | buffer_info->rxbuf.page, 0, |
4520 | adapter->rx_buffer_len, |
4521 | DMA_FROM_DEVICE); |
4522 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: buffer_info->dma)) { |
4523 | put_page(page: buffer_info->rxbuf.page); |
4524 | buffer_info->rxbuf.page = NULL; |
4525 | buffer_info->dma = 0; |
4526 | adapter->alloc_rx_buff_failed++; |
4527 | break; |
4528 | } |
4529 | } |
4530 | |
4531 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
4532 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
4533 | |
4534 | if (unlikely(++i == rx_ring->count)) |
4535 | i = 0; |
4536 | buffer_info = &rx_ring->buffer_info[i]; |
4537 | } |
4538 | |
4539 | if (likely(rx_ring->next_to_use != i)) { |
4540 | rx_ring->next_to_use = i; |
4541 | if (unlikely(i-- == 0)) |
4542 | i = (rx_ring->count - 1); |
4543 | |
4544 | /* Force memory writes to complete before letting h/w |
4545 | * know there are new descriptors to fetch. (Only |
4546 | * applicable for weak-ordered memory model archs, |
4547 | * such as IA-64). |
4548 | */ |
4549 | dma_wmb(); |
4550 | writel(val: i, addr: adapter->hw.hw_addr + rx_ring->rdt); |
4551 | } |
4552 | } |
4553 | |
4554 | /** |
4555 | * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended |
4556 | * @adapter: address of board private structure |
4557 | * @rx_ring: pointer to ring struct |
4558 | * @cleaned_count: number of new Rx buffers to try to allocate |
4559 | **/ |
4560 | static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, |
4561 | struct e1000_rx_ring *rx_ring, |
4562 | int cleaned_count) |
4563 | { |
4564 | struct e1000_hw *hw = &adapter->hw; |
4565 | struct pci_dev *pdev = adapter->pdev; |
4566 | struct e1000_rx_desc *rx_desc; |
4567 | struct e1000_rx_buffer *buffer_info; |
4568 | unsigned int i; |
4569 | unsigned int bufsz = adapter->rx_buffer_len; |
4570 | |
4571 | i = rx_ring->next_to_use; |
4572 | buffer_info = &rx_ring->buffer_info[i]; |
4573 | |
4574 | while (cleaned_count--) { |
4575 | void *data; |
4576 | |
4577 | if (buffer_info->rxbuf.data) |
4578 | goto skip; |
4579 | |
4580 | data = e1000_alloc_frag(a: adapter); |
4581 | if (!data) { |
4582 | /* Better luck next round */ |
4583 | adapter->alloc_rx_buff_failed++; |
4584 | break; |
4585 | } |
4586 | |
4587 | /* Fix for errata 23, can't cross 64kB boundary */ |
4588 | if (!e1000_check_64k_bound(adapter, start: data, len: bufsz)) { |
4589 | void *olddata = data; |
4590 | e_err(rx_err, "skb align check failed: %u bytes at " |
4591 | "%p\n" , bufsz, data); |
4592 | /* Try again, without freeing the previous */ |
4593 | data = e1000_alloc_frag(a: adapter); |
4594 | /* Failed allocation, critical failure */ |
4595 | if (!data) { |
4596 | skb_free_frag(addr: olddata); |
4597 | adapter->alloc_rx_buff_failed++; |
4598 | break; |
4599 | } |
4600 | |
4601 | if (!e1000_check_64k_bound(adapter, start: data, len: bufsz)) { |
4602 | /* give up */ |
4603 | skb_free_frag(addr: data); |
4604 | skb_free_frag(addr: olddata); |
4605 | adapter->alloc_rx_buff_failed++; |
4606 | break; |
4607 | } |
4608 | |
4609 | /* Use new allocation */ |
4610 | skb_free_frag(addr: olddata); |
4611 | } |
4612 | buffer_info->dma = dma_map_single(&pdev->dev, |
4613 | data, |
4614 | adapter->rx_buffer_len, |
4615 | DMA_FROM_DEVICE); |
4616 | if (dma_mapping_error(dev: &pdev->dev, dma_addr: buffer_info->dma)) { |
4617 | skb_free_frag(addr: data); |
4618 | buffer_info->dma = 0; |
4619 | adapter->alloc_rx_buff_failed++; |
4620 | break; |
4621 | } |
4622 | |
4623 | /* XXX if it was allocated cleanly it will never map to a |
4624 | * boundary crossing |
4625 | */ |
4626 | |
4627 | /* Fix for errata 23, can't cross 64kB boundary */ |
4628 | if (!e1000_check_64k_bound(adapter, |
4629 | start: (void *)(unsigned long)buffer_info->dma, |
4630 | len: adapter->rx_buffer_len)) { |
4631 | e_err(rx_err, "dma align check failed: %u bytes at " |
4632 | "%p\n" , adapter->rx_buffer_len, |
4633 | (void *)(unsigned long)buffer_info->dma); |
4634 | |
4635 | dma_unmap_single(&pdev->dev, buffer_info->dma, |
4636 | adapter->rx_buffer_len, |
4637 | DMA_FROM_DEVICE); |
4638 | |
4639 | skb_free_frag(addr: data); |
4640 | buffer_info->rxbuf.data = NULL; |
4641 | buffer_info->dma = 0; |
4642 | |
4643 | adapter->alloc_rx_buff_failed++; |
4644 | break; |
4645 | } |
4646 | buffer_info->rxbuf.data = data; |
4647 | skip: |
4648 | rx_desc = E1000_RX_DESC(*rx_ring, i); |
4649 | rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); |
4650 | |
4651 | if (unlikely(++i == rx_ring->count)) |
4652 | i = 0; |
4653 | buffer_info = &rx_ring->buffer_info[i]; |
4654 | } |
4655 | |
4656 | if (likely(rx_ring->next_to_use != i)) { |
4657 | rx_ring->next_to_use = i; |
4658 | if (unlikely(i-- == 0)) |
4659 | i = (rx_ring->count - 1); |
4660 | |
4661 | /* Force memory writes to complete before letting h/w |
4662 | * know there are new descriptors to fetch. (Only |
4663 | * applicable for weak-ordered memory model archs, |
4664 | * such as IA-64). |
4665 | */ |
4666 | dma_wmb(); |
4667 | writel(val: i, addr: hw->hw_addr + rx_ring->rdt); |
4668 | } |
4669 | } |
4670 | |
4671 | /** |
4672 | * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. |
4673 | * @adapter: address of board private structure |
4674 | **/ |
4675 | static void e1000_smartspeed(struct e1000_adapter *adapter) |
4676 | { |
4677 | struct e1000_hw *hw = &adapter->hw; |
4678 | u16 phy_status; |
4679 | u16 phy_ctrl; |
4680 | |
4681 | if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || |
4682 | !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) |
4683 | return; |
4684 | |
4685 | if (adapter->smartspeed == 0) { |
4686 | /* If Master/Slave config fault is asserted twice, |
4687 | * we assume back-to-back |
4688 | */ |
4689 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, phy_data: &phy_status); |
4690 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) |
4691 | return; |
4692 | e1000_read_phy_reg(hw, PHY_1000T_STATUS, phy_data: &phy_status); |
4693 | if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) |
4694 | return; |
4695 | e1000_read_phy_reg(hw, PHY_1000T_CTRL, phy_data: &phy_ctrl); |
4696 | if (phy_ctrl & CR_1000T_MS_ENABLE) { |
4697 | phy_ctrl &= ~CR_1000T_MS_ENABLE; |
4698 | e1000_write_phy_reg(hw, PHY_1000T_CTRL, |
4699 | data: phy_ctrl); |
4700 | adapter->smartspeed++; |
4701 | if (!e1000_phy_setup_autoneg(hw) && |
4702 | !e1000_read_phy_reg(hw, PHY_CTRL, |
4703 | phy_data: &phy_ctrl)) { |
4704 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
4705 | MII_CR_RESTART_AUTO_NEG); |
4706 | e1000_write_phy_reg(hw, PHY_CTRL, |
4707 | data: phy_ctrl); |
4708 | } |
4709 | } |
4710 | return; |
4711 | } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { |
4712 | /* If still no link, perhaps using 2/3 pair cable */ |
4713 | e1000_read_phy_reg(hw, PHY_1000T_CTRL, phy_data: &phy_ctrl); |
4714 | phy_ctrl |= CR_1000T_MS_ENABLE; |
4715 | e1000_write_phy_reg(hw, PHY_1000T_CTRL, data: phy_ctrl); |
4716 | if (!e1000_phy_setup_autoneg(hw) && |
4717 | !e1000_read_phy_reg(hw, PHY_CTRL, phy_data: &phy_ctrl)) { |
4718 | phy_ctrl |= (MII_CR_AUTO_NEG_EN | |
4719 | MII_CR_RESTART_AUTO_NEG); |
4720 | e1000_write_phy_reg(hw, PHY_CTRL, data: phy_ctrl); |
4721 | } |
4722 | } |
4723 | /* Restart process after E1000_SMARTSPEED_MAX iterations */ |
4724 | if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) |
4725 | adapter->smartspeed = 0; |
4726 | } |
4727 | |
4728 | /** |
4729 | * e1000_ioctl - handle ioctl calls |
4730 | * @netdev: pointer to our netdev |
4731 | * @ifr: pointer to interface request structure |
4732 | * @cmd: ioctl data |
4733 | **/ |
4734 | static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
4735 | { |
4736 | switch (cmd) { |
4737 | case SIOCGMIIPHY: |
4738 | case SIOCGMIIREG: |
4739 | case SIOCSMIIREG: |
4740 | return e1000_mii_ioctl(netdev, ifr, cmd); |
4741 | default: |
4742 | return -EOPNOTSUPP; |
4743 | } |
4744 | } |
4745 | |
4746 | /** |
4747 | * e1000_mii_ioctl - |
4748 | * @netdev: pointer to our netdev |
4749 | * @ifr: pointer to interface request structure |
4750 | * @cmd: ioctl data |
4751 | **/ |
4752 | static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, |
4753 | int cmd) |
4754 | { |
4755 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
4756 | struct e1000_hw *hw = &adapter->hw; |
4757 | struct mii_ioctl_data *data = if_mii(rq: ifr); |
4758 | int retval; |
4759 | u16 mii_reg; |
4760 | unsigned long flags; |
4761 | |
4762 | if (hw->media_type != e1000_media_type_copper) |
4763 | return -EOPNOTSUPP; |
4764 | |
4765 | switch (cmd) { |
4766 | case SIOCGMIIPHY: |
4767 | data->phy_id = hw->phy_addr; |
4768 | break; |
4769 | case SIOCGMIIREG: |
4770 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4771 | if (e1000_read_phy_reg(hw, reg_addr: data->reg_num & 0x1F, |
4772 | phy_data: &data->val_out)) { |
4773 | spin_unlock_irqrestore(lock: &adapter->stats_lock, flags); |
4774 | return -EIO; |
4775 | } |
4776 | spin_unlock_irqrestore(lock: &adapter->stats_lock, flags); |
4777 | break; |
4778 | case SIOCSMIIREG: |
4779 | if (data->reg_num & ~(0x1F)) |
4780 | return -EFAULT; |
4781 | mii_reg = data->val_in; |
4782 | spin_lock_irqsave(&adapter->stats_lock, flags); |
4783 | if (e1000_write_phy_reg(hw, reg_addr: data->reg_num, |
4784 | data: mii_reg)) { |
4785 | spin_unlock_irqrestore(lock: &adapter->stats_lock, flags); |
4786 | return -EIO; |
4787 | } |
4788 | spin_unlock_irqrestore(lock: &adapter->stats_lock, flags); |
4789 | if (hw->media_type == e1000_media_type_copper) { |
4790 | switch (data->reg_num) { |
4791 | case PHY_CTRL: |
4792 | if (mii_reg & MII_CR_POWER_DOWN) |
4793 | break; |
4794 | if (mii_reg & MII_CR_AUTO_NEG_EN) { |
4795 | hw->autoneg = 1; |
4796 | hw->autoneg_advertised = 0x2F; |
4797 | } else { |
4798 | u32 speed; |
4799 | if (mii_reg & 0x40) |
4800 | speed = SPEED_1000; |
4801 | else if (mii_reg & 0x2000) |
4802 | speed = SPEED_100; |
4803 | else |
4804 | speed = SPEED_10; |
4805 | retval = e1000_set_spd_dplx( |
4806 | adapter, spd: speed, |
4807 | dplx: ((mii_reg & 0x100) |
4808 | ? DUPLEX_FULL : |
4809 | DUPLEX_HALF)); |
4810 | if (retval) |
4811 | return retval; |
4812 | } |
4813 | if (netif_running(dev: adapter->netdev)) |
4814 | e1000_reinit_locked(adapter); |
4815 | else |
4816 | e1000_reset(adapter); |
4817 | break; |
4818 | case M88E1000_PHY_SPEC_CTRL: |
4819 | case M88E1000_EXT_PHY_SPEC_CTRL: |
4820 | if (e1000_phy_reset(hw)) |
4821 | return -EIO; |
4822 | break; |
4823 | } |
4824 | } else { |
4825 | switch (data->reg_num) { |
4826 | case PHY_CTRL: |
4827 | if (mii_reg & MII_CR_POWER_DOWN) |
4828 | break; |
4829 | if (netif_running(dev: adapter->netdev)) |
4830 | e1000_reinit_locked(adapter); |
4831 | else |
4832 | e1000_reset(adapter); |
4833 | break; |
4834 | } |
4835 | } |
4836 | break; |
4837 | default: |
4838 | return -EOPNOTSUPP; |
4839 | } |
4840 | return E1000_SUCCESS; |
4841 | } |
4842 | |
4843 | void e1000_pci_set_mwi(struct e1000_hw *hw) |
4844 | { |
4845 | struct e1000_adapter *adapter = hw->back; |
4846 | int ret_val = pci_set_mwi(dev: adapter->pdev); |
4847 | |
4848 | if (ret_val) |
4849 | e_err(probe, "Error in setting MWI\n" ); |
4850 | } |
4851 | |
4852 | void e1000_pci_clear_mwi(struct e1000_hw *hw) |
4853 | { |
4854 | struct e1000_adapter *adapter = hw->back; |
4855 | |
4856 | pci_clear_mwi(dev: adapter->pdev); |
4857 | } |
4858 | |
4859 | int e1000_pcix_get_mmrbc(struct e1000_hw *hw) |
4860 | { |
4861 | struct e1000_adapter *adapter = hw->back; |
4862 | return pcix_get_mmrbc(dev: adapter->pdev); |
4863 | } |
4864 | |
4865 | void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) |
4866 | { |
4867 | struct e1000_adapter *adapter = hw->back; |
4868 | pcix_set_mmrbc(dev: adapter->pdev, mmrbc); |
4869 | } |
4870 | |
4871 | void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) |
4872 | { |
4873 | outl(value, port); |
4874 | } |
4875 | |
4876 | static bool e1000_vlan_used(struct e1000_adapter *adapter) |
4877 | { |
4878 | u16 vid; |
4879 | |
4880 | for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) |
4881 | return true; |
4882 | return false; |
4883 | } |
4884 | |
4885 | static void __e1000_vlan_mode(struct e1000_adapter *adapter, |
4886 | netdev_features_t features) |
4887 | { |
4888 | struct e1000_hw *hw = &adapter->hw; |
4889 | u32 ctrl; |
4890 | |
4891 | ctrl = er32(CTRL); |
4892 | if (features & NETIF_F_HW_VLAN_CTAG_RX) { |
4893 | /* enable VLAN tag insert/strip */ |
4894 | ctrl |= E1000_CTRL_VME; |
4895 | } else { |
4896 | /* disable VLAN tag insert/strip */ |
4897 | ctrl &= ~E1000_CTRL_VME; |
4898 | } |
4899 | ew32(CTRL, ctrl); |
4900 | } |
4901 | static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, |
4902 | bool filter_on) |
4903 | { |
4904 | struct e1000_hw *hw = &adapter->hw; |
4905 | u32 rctl; |
4906 | |
4907 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
4908 | e1000_irq_disable(adapter); |
4909 | |
4910 | __e1000_vlan_mode(adapter, features: adapter->netdev->features); |
4911 | if (filter_on) { |
4912 | /* enable VLAN receive filtering */ |
4913 | rctl = er32(RCTL); |
4914 | rctl &= ~E1000_RCTL_CFIEN; |
4915 | if (!(adapter->netdev->flags & IFF_PROMISC)) |
4916 | rctl |= E1000_RCTL_VFE; |
4917 | ew32(RCTL, rctl); |
4918 | e1000_update_mng_vlan(adapter); |
4919 | } else { |
4920 | /* disable VLAN receive filtering */ |
4921 | rctl = er32(RCTL); |
4922 | rctl &= ~E1000_RCTL_VFE; |
4923 | ew32(RCTL, rctl); |
4924 | } |
4925 | |
4926 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
4927 | e1000_irq_enable(adapter); |
4928 | } |
4929 | |
4930 | static void e1000_vlan_mode(struct net_device *netdev, |
4931 | netdev_features_t features) |
4932 | { |
4933 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
4934 | |
4935 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
4936 | e1000_irq_disable(adapter); |
4937 | |
4938 | __e1000_vlan_mode(adapter, features); |
4939 | |
4940 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
4941 | e1000_irq_enable(adapter); |
4942 | } |
4943 | |
4944 | static int e1000_vlan_rx_add_vid(struct net_device *netdev, |
4945 | __be16 proto, u16 vid) |
4946 | { |
4947 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
4948 | struct e1000_hw *hw = &adapter->hw; |
4949 | u32 vfta, index; |
4950 | |
4951 | if ((hw->mng_cookie.status & |
4952 | E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && |
4953 | (vid == adapter->mng_vlan_id)) |
4954 | return 0; |
4955 | |
4956 | if (!e1000_vlan_used(adapter)) |
4957 | e1000_vlan_filter_on_off(adapter, filter_on: true); |
4958 | |
4959 | /* add VID to filter table */ |
4960 | index = (vid >> 5) & 0x7F; |
4961 | vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); |
4962 | vfta |= (1 << (vid & 0x1F)); |
4963 | e1000_write_vfta(hw, offset: index, value: vfta); |
4964 | |
4965 | set_bit(nr: vid, addr: adapter->active_vlans); |
4966 | |
4967 | return 0; |
4968 | } |
4969 | |
4970 | static int e1000_vlan_rx_kill_vid(struct net_device *netdev, |
4971 | __be16 proto, u16 vid) |
4972 | { |
4973 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
4974 | struct e1000_hw *hw = &adapter->hw; |
4975 | u32 vfta, index; |
4976 | |
4977 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
4978 | e1000_irq_disable(adapter); |
4979 | if (!test_bit(__E1000_DOWN, &adapter->flags)) |
4980 | e1000_irq_enable(adapter); |
4981 | |
4982 | /* remove VID from filter table */ |
4983 | index = (vid >> 5) & 0x7F; |
4984 | vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); |
4985 | vfta &= ~(1 << (vid & 0x1F)); |
4986 | e1000_write_vfta(hw, offset: index, value: vfta); |
4987 | |
4988 | clear_bit(nr: vid, addr: adapter->active_vlans); |
4989 | |
4990 | if (!e1000_vlan_used(adapter)) |
4991 | e1000_vlan_filter_on_off(adapter, filter_on: false); |
4992 | |
4993 | return 0; |
4994 | } |
4995 | |
4996 | static void e1000_restore_vlan(struct e1000_adapter *adapter) |
4997 | { |
4998 | u16 vid; |
4999 | |
5000 | if (!e1000_vlan_used(adapter)) |
5001 | return; |
5002 | |
5003 | e1000_vlan_filter_on_off(adapter, filter_on: true); |
5004 | for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) |
5005 | e1000_vlan_rx_add_vid(netdev: adapter->netdev, htons(ETH_P_8021Q), vid); |
5006 | } |
5007 | |
5008 | int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) |
5009 | { |
5010 | struct e1000_hw *hw = &adapter->hw; |
5011 | |
5012 | hw->autoneg = 0; |
5013 | |
5014 | /* Make sure dplx is at most 1 bit and lsb of speed is not set |
5015 | * for the switch() below to work |
5016 | */ |
5017 | if ((spd & 1) || (dplx & ~1)) |
5018 | goto err_inval; |
5019 | |
5020 | /* Fiber NICs only allow 1000 gbps Full duplex */ |
5021 | if ((hw->media_type == e1000_media_type_fiber) && |
5022 | spd != SPEED_1000 && |
5023 | dplx != DUPLEX_FULL) |
5024 | goto err_inval; |
5025 | |
5026 | switch (spd + dplx) { |
5027 | case SPEED_10 + DUPLEX_HALF: |
5028 | hw->forced_speed_duplex = e1000_10_half; |
5029 | break; |
5030 | case SPEED_10 + DUPLEX_FULL: |
5031 | hw->forced_speed_duplex = e1000_10_full; |
5032 | break; |
5033 | case SPEED_100 + DUPLEX_HALF: |
5034 | hw->forced_speed_duplex = e1000_100_half; |
5035 | break; |
5036 | case SPEED_100 + DUPLEX_FULL: |
5037 | hw->forced_speed_duplex = e1000_100_full; |
5038 | break; |
5039 | case SPEED_1000 + DUPLEX_FULL: |
5040 | hw->autoneg = 1; |
5041 | hw->autoneg_advertised = ADVERTISE_1000_FULL; |
5042 | break; |
5043 | case SPEED_1000 + DUPLEX_HALF: /* not supported */ |
5044 | default: |
5045 | goto err_inval; |
5046 | } |
5047 | |
5048 | /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ |
5049 | hw->mdix = AUTO_ALL_MODES; |
5050 | |
5051 | return 0; |
5052 | |
5053 | err_inval: |
5054 | e_err(probe, "Unsupported Speed/Duplex configuration\n" ); |
5055 | return -EINVAL; |
5056 | } |
5057 | |
5058 | static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) |
5059 | { |
5060 | struct net_device *netdev = pci_get_drvdata(pdev); |
5061 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
5062 | struct e1000_hw *hw = &adapter->hw; |
5063 | u32 ctrl, ctrl_ext, rctl, status; |
5064 | u32 wufc = adapter->wol; |
5065 | |
5066 | netif_device_detach(dev: netdev); |
5067 | |
5068 | if (netif_running(dev: netdev)) { |
5069 | int count = E1000_CHECK_RESET_COUNT; |
5070 | |
5071 | while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) |
5072 | usleep_range(min: 10000, max: 20000); |
5073 | |
5074 | WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); |
5075 | e1000_down(adapter); |
5076 | } |
5077 | |
5078 | status = er32(STATUS); |
5079 | if (status & E1000_STATUS_LU) |
5080 | wufc &= ~E1000_WUFC_LNKC; |
5081 | |
5082 | if (wufc) { |
5083 | e1000_setup_rctl(adapter); |
5084 | e1000_set_rx_mode(netdev); |
5085 | |
5086 | rctl = er32(RCTL); |
5087 | |
5088 | /* turn on all-multi mode if wake on multicast is enabled */ |
5089 | if (wufc & E1000_WUFC_MC) |
5090 | rctl |= E1000_RCTL_MPE; |
5091 | |
5092 | /* enable receives in the hardware */ |
5093 | ew32(RCTL, rctl | E1000_RCTL_EN); |
5094 | |
5095 | if (hw->mac_type >= e1000_82540) { |
5096 | ctrl = er32(CTRL); |
5097 | /* advertise wake from D3Cold */ |
5098 | #define E1000_CTRL_ADVD3WUC 0x00100000 |
5099 | /* phy power management enable */ |
5100 | #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 |
5101 | ctrl |= E1000_CTRL_ADVD3WUC | |
5102 | E1000_CTRL_EN_PHY_PWR_MGMT; |
5103 | ew32(CTRL, ctrl); |
5104 | } |
5105 | |
5106 | if (hw->media_type == e1000_media_type_fiber || |
5107 | hw->media_type == e1000_media_type_internal_serdes) { |
5108 | /* keep the laser running in D3 */ |
5109 | ctrl_ext = er32(CTRL_EXT); |
5110 | ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; |
5111 | ew32(CTRL_EXT, ctrl_ext); |
5112 | } |
5113 | |
5114 | ew32(WUC, E1000_WUC_PME_EN); |
5115 | ew32(WUFC, wufc); |
5116 | } else { |
5117 | ew32(WUC, 0); |
5118 | ew32(WUFC, 0); |
5119 | } |
5120 | |
5121 | e1000_release_manageability(adapter); |
5122 | |
5123 | *enable_wake = !!wufc; |
5124 | |
5125 | /* make sure adapter isn't asleep if manageability is enabled */ |
5126 | if (adapter->en_mng_pt) |
5127 | *enable_wake = true; |
5128 | |
5129 | if (netif_running(dev: netdev)) |
5130 | e1000_free_irq(adapter); |
5131 | |
5132 | if (!test_and_set_bit(nr: __E1000_DISABLED, addr: &adapter->flags)) |
5133 | pci_disable_device(dev: pdev); |
5134 | |
5135 | return 0; |
5136 | } |
5137 | |
5138 | static int __maybe_unused e1000_suspend(struct device *dev) |
5139 | { |
5140 | int retval; |
5141 | struct pci_dev *pdev = to_pci_dev(dev); |
5142 | bool wake; |
5143 | |
5144 | retval = __e1000_shutdown(pdev, enable_wake: &wake); |
5145 | device_set_wakeup_enable(dev, enable: wake); |
5146 | |
5147 | return retval; |
5148 | } |
5149 | |
5150 | static int __maybe_unused e1000_resume(struct device *dev) |
5151 | { |
5152 | struct pci_dev *pdev = to_pci_dev(dev); |
5153 | struct net_device *netdev = pci_get_drvdata(pdev); |
5154 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
5155 | struct e1000_hw *hw = &adapter->hw; |
5156 | u32 err; |
5157 | |
5158 | if (adapter->need_ioport) |
5159 | err = pci_enable_device(dev: pdev); |
5160 | else |
5161 | err = pci_enable_device_mem(dev: pdev); |
5162 | if (err) { |
5163 | pr_err("Cannot enable PCI device from suspend\n" ); |
5164 | return err; |
5165 | } |
5166 | |
5167 | /* flush memory to make sure state is correct */ |
5168 | smp_mb__before_atomic(); |
5169 | clear_bit(nr: __E1000_DISABLED, addr: &adapter->flags); |
5170 | pci_set_master(dev: pdev); |
5171 | |
5172 | pci_enable_wake(dev: pdev, PCI_D3hot, enable: 0); |
5173 | pci_enable_wake(dev: pdev, PCI_D3cold, enable: 0); |
5174 | |
5175 | if (netif_running(dev: netdev)) { |
5176 | err = e1000_request_irq(adapter); |
5177 | if (err) |
5178 | return err; |
5179 | } |
5180 | |
5181 | e1000_power_up_phy(adapter); |
5182 | e1000_reset(adapter); |
5183 | ew32(WUS, ~0); |
5184 | |
5185 | e1000_init_manageability(adapter); |
5186 | |
5187 | if (netif_running(dev: netdev)) |
5188 | e1000_up(adapter); |
5189 | |
5190 | netif_device_attach(dev: netdev); |
5191 | |
5192 | return 0; |
5193 | } |
5194 | |
5195 | static void e1000_shutdown(struct pci_dev *pdev) |
5196 | { |
5197 | bool wake; |
5198 | |
5199 | __e1000_shutdown(pdev, enable_wake: &wake); |
5200 | |
5201 | if (system_state == SYSTEM_POWER_OFF) { |
5202 | pci_wake_from_d3(dev: pdev, enable: wake); |
5203 | pci_set_power_state(dev: pdev, PCI_D3hot); |
5204 | } |
5205 | } |
5206 | |
5207 | #ifdef CONFIG_NET_POLL_CONTROLLER |
5208 | /* Polling 'interrupt' - used by things like netconsole to send skbs |
5209 | * without having to re-enable interrupts. It's not called while |
5210 | * the interrupt routine is executing. |
5211 | */ |
5212 | static void e1000_netpoll(struct net_device *netdev) |
5213 | { |
5214 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
5215 | |
5216 | if (disable_hardirq(irq: adapter->pdev->irq)) |
5217 | e1000_intr(irq: adapter->pdev->irq, data: netdev); |
5218 | enable_irq(irq: adapter->pdev->irq); |
5219 | } |
5220 | #endif |
5221 | |
5222 | /** |
5223 | * e1000_io_error_detected - called when PCI error is detected |
5224 | * @pdev: Pointer to PCI device |
5225 | * @state: The current pci connection state |
5226 | * |
5227 | * This function is called after a PCI bus error affecting |
5228 | * this device has been detected. |
5229 | */ |
5230 | static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, |
5231 | pci_channel_state_t state) |
5232 | { |
5233 | struct net_device *netdev = pci_get_drvdata(pdev); |
5234 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
5235 | |
5236 | netif_device_detach(dev: netdev); |
5237 | |
5238 | if (state == pci_channel_io_perm_failure) |
5239 | return PCI_ERS_RESULT_DISCONNECT; |
5240 | |
5241 | if (netif_running(dev: netdev)) |
5242 | e1000_down(adapter); |
5243 | |
5244 | if (!test_and_set_bit(nr: __E1000_DISABLED, addr: &adapter->flags)) |
5245 | pci_disable_device(dev: pdev); |
5246 | |
5247 | /* Request a slot reset. */ |
5248 | return PCI_ERS_RESULT_NEED_RESET; |
5249 | } |
5250 | |
5251 | /** |
5252 | * e1000_io_slot_reset - called after the pci bus has been reset. |
5253 | * @pdev: Pointer to PCI device |
5254 | * |
5255 | * Restart the card from scratch, as if from a cold-boot. Implementation |
5256 | * resembles the first-half of the e1000_resume routine. |
5257 | */ |
5258 | static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) |
5259 | { |
5260 | struct net_device *netdev = pci_get_drvdata(pdev); |
5261 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
5262 | struct e1000_hw *hw = &adapter->hw; |
5263 | int err; |
5264 | |
5265 | if (adapter->need_ioport) |
5266 | err = pci_enable_device(dev: pdev); |
5267 | else |
5268 | err = pci_enable_device_mem(dev: pdev); |
5269 | if (err) { |
5270 | pr_err("Cannot re-enable PCI device after reset.\n" ); |
5271 | return PCI_ERS_RESULT_DISCONNECT; |
5272 | } |
5273 | |
5274 | /* flush memory to make sure state is correct */ |
5275 | smp_mb__before_atomic(); |
5276 | clear_bit(nr: __E1000_DISABLED, addr: &adapter->flags); |
5277 | pci_set_master(dev: pdev); |
5278 | |
5279 | pci_enable_wake(dev: pdev, PCI_D3hot, enable: 0); |
5280 | pci_enable_wake(dev: pdev, PCI_D3cold, enable: 0); |
5281 | |
5282 | e1000_reset(adapter); |
5283 | ew32(WUS, ~0); |
5284 | |
5285 | return PCI_ERS_RESULT_RECOVERED; |
5286 | } |
5287 | |
5288 | /** |
5289 | * e1000_io_resume - called when traffic can start flowing again. |
5290 | * @pdev: Pointer to PCI device |
5291 | * |
5292 | * This callback is called when the error recovery driver tells us that |
5293 | * its OK to resume normal operation. Implementation resembles the |
5294 | * second-half of the e1000_resume routine. |
5295 | */ |
5296 | static void e1000_io_resume(struct pci_dev *pdev) |
5297 | { |
5298 | struct net_device *netdev = pci_get_drvdata(pdev); |
5299 | struct e1000_adapter *adapter = netdev_priv(dev: netdev); |
5300 | |
5301 | e1000_init_manageability(adapter); |
5302 | |
5303 | if (netif_running(dev: netdev)) { |
5304 | if (e1000_up(adapter)) { |
5305 | pr_info("can't bring device back up after reset\n" ); |
5306 | return; |
5307 | } |
5308 | } |
5309 | |
5310 | netif_device_attach(dev: netdev); |
5311 | } |
5312 | |
5313 | /* e1000_main.c */ |
5314 | |