1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* Copyright (c) 2018 Intel Corporation */ |
3 | |
4 | #include <linux/module.h> |
5 | #include <linux/types.h> |
6 | #include <linux/if_vlan.h> |
7 | #include <linux/tcp.h> |
8 | #include <linux/udp.h> |
9 | #include <linux/ip.h> |
10 | #include <linux/pm_runtime.h> |
11 | #include <net/pkt_sched.h> |
12 | #include <linux/bpf_trace.h> |
13 | #include <net/xdp_sock_drv.h> |
14 | #include <linux/pci.h> |
15 | |
16 | #include <net/ipv6.h> |
17 | |
18 | #include "igc.h" |
19 | #include "igc_hw.h" |
20 | #include "igc_tsn.h" |
21 | #include "igc_xdp.h" |
22 | |
23 | #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" |
24 | |
25 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) |
26 | |
27 | #define IGC_XDP_PASS 0 |
28 | #define IGC_XDP_CONSUMED BIT(0) |
29 | #define IGC_XDP_TX BIT(1) |
30 | #define IGC_XDP_REDIRECT BIT(2) |
31 | |
32 | static int debug = -1; |
33 | |
34 | MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); |
35 | MODULE_DESCRIPTION(DRV_SUMMARY); |
36 | MODULE_LICENSE("GPL v2"); |
37 | module_param(debug, int, 0); |
38 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
39 | |
40 | char igc_driver_name[] = "igc"; |
41 | static const char igc_driver_string[] = DRV_SUMMARY; |
42 | static const char igc_copyright[] = |
43 | "Copyright(c) 2018 Intel Corporation."; |
44 | |
45 | static const struct igc_info *igc_info_tbl[] = { |
46 | [board_base] = &igc_base_info, |
47 | }; |
48 | |
49 | static const struct pci_device_id igc_pci_tbl[] = { |
50 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base }, |
51 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base }, |
52 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base }, |
53 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base }, |
54 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base }, |
55 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base }, |
56 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base }, |
57 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base }, |
58 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base }, |
59 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base }, |
60 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base }, |
61 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base }, |
62 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base }, |
63 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base }, |
64 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base }, |
65 | { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base }, |
66 | /* required last entry */ |
67 | {0, } |
68 | }; |
69 | |
70 | MODULE_DEVICE_TABLE(pci, igc_pci_tbl); |
71 | |
72 | enum latency_range { |
73 | lowest_latency = 0, |
74 | low_latency = 1, |
75 | bulk_latency = 2, |
76 | latency_invalid = 255 |
77 | }; |
78 | |
79 | void igc_reset(struct igc_adapter *adapter) |
80 | { |
81 | struct net_device *dev = adapter->netdev; |
82 | struct igc_hw *hw = &adapter->hw; |
83 | struct igc_fc_info *fc = &hw->fc; |
84 | u32 pba, hwm; |
85 | |
86 | /* Repartition PBA for greater than 9k MTU if required */ |
87 | pba = IGC_PBA_34K; |
88 | |
89 | /* flow control settings |
90 | * The high water mark must be low enough to fit one full frame |
91 | * after transmitting the pause frame. As such we must have enough |
92 | * space to allow for us to complete our current transmit and then |
93 | * receive the frame that is in progress from the link partner. |
94 | * Set it to: |
95 | * - the full Rx FIFO size minus one full Tx plus one full Rx frame |
96 | */ |
97 | hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); |
98 | |
99 | fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ |
100 | fc->low_water = fc->high_water - 16; |
101 | fc->pause_time = 0xFFFF; |
102 | fc->send_xon = 1; |
103 | fc->current_mode = fc->requested_mode; |
104 | |
105 | hw->mac.ops.reset_hw(hw); |
106 | |
107 | if (hw->mac.ops.init_hw(hw)) |
108 | netdev_err(dev, format: "Error on hardware initialization\n"); |
109 | |
110 | /* Re-establish EEE setting */ |
111 | igc_set_eee_i225(hw, adv2p5G: true, adv1G: true, adv100M: true); |
112 | |
113 | if (!netif_running(dev: adapter->netdev)) |
114 | igc_power_down_phy_copper_base(hw: &adapter->hw); |
115 | |
116 | /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */ |
117 | wr32(IGC_VET, ETH_P_8021Q); |
118 | |
119 | /* Re-enable PTP, where applicable. */ |
120 | igc_ptp_reset(adapter); |
121 | |
122 | /* Re-enable TSN offloading, where applicable. */ |
123 | igc_tsn_reset(adapter); |
124 | |
125 | igc_get_phy_info(hw); |
126 | } |
127 | |
128 | /** |
129 | * igc_power_up_link - Power up the phy link |
130 | * @adapter: address of board private structure |
131 | */ |
132 | static void igc_power_up_link(struct igc_adapter *adapter) |
133 | { |
134 | igc_reset_phy(hw: &adapter->hw); |
135 | |
136 | igc_power_up_phy_copper(hw: &adapter->hw); |
137 | |
138 | igc_setup_link(hw: &adapter->hw); |
139 | } |
140 | |
141 | /** |
142 | * igc_release_hw_control - release control of the h/w to f/w |
143 | * @adapter: address of board private structure |
144 | * |
145 | * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit. |
146 | * For ASF and Pass Through versions of f/w this means that the |
147 | * driver is no longer loaded. |
148 | */ |
149 | static void igc_release_hw_control(struct igc_adapter *adapter) |
150 | { |
151 | struct igc_hw *hw = &adapter->hw; |
152 | u32 ctrl_ext; |
153 | |
154 | if (!pci_device_is_present(pdev: adapter->pdev)) |
155 | return; |
156 | |
157 | /* Let firmware take over control of h/w */ |
158 | ctrl_ext = rd32(IGC_CTRL_EXT); |
159 | wr32(IGC_CTRL_EXT, |
160 | ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); |
161 | } |
162 | |
163 | /** |
164 | * igc_get_hw_control - get control of the h/w from f/w |
165 | * @adapter: address of board private structure |
166 | * |
167 | * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit. |
168 | * For ASF and Pass Through versions of f/w this means that |
169 | * the driver is loaded. |
170 | */ |
171 | static void igc_get_hw_control(struct igc_adapter *adapter) |
172 | { |
173 | struct igc_hw *hw = &adapter->hw; |
174 | u32 ctrl_ext; |
175 | |
176 | /* Let firmware know the driver has taken over */ |
177 | ctrl_ext = rd32(IGC_CTRL_EXT); |
178 | wr32(IGC_CTRL_EXT, |
179 | ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); |
180 | } |
181 | |
182 | static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf) |
183 | { |
184 | dma_unmap_single(dev, dma_unmap_addr(buf, dma), |
185 | dma_unmap_len(buf, len), DMA_TO_DEVICE); |
186 | |
187 | dma_unmap_len_set(buf, len, 0); |
188 | } |
189 | |
190 | /** |
191 | * igc_clean_tx_ring - Free Tx Buffers |
192 | * @tx_ring: ring to be cleaned |
193 | */ |
194 | static void igc_clean_tx_ring(struct igc_ring *tx_ring) |
195 | { |
196 | u16 i = tx_ring->next_to_clean; |
197 | struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; |
198 | u32 xsk_frames = 0; |
199 | |
200 | while (i != tx_ring->next_to_use) { |
201 | union igc_adv_tx_desc *eop_desc, *tx_desc; |
202 | |
203 | switch (tx_buffer->type) { |
204 | case IGC_TX_BUFFER_TYPE_XSK: |
205 | xsk_frames++; |
206 | break; |
207 | case IGC_TX_BUFFER_TYPE_XDP: |
208 | xdp_return_frame(xdpf: tx_buffer->xdpf); |
209 | igc_unmap_tx_buffer(dev: tx_ring->dev, buf: tx_buffer); |
210 | break; |
211 | case IGC_TX_BUFFER_TYPE_SKB: |
212 | dev_kfree_skb_any(skb: tx_buffer->skb); |
213 | igc_unmap_tx_buffer(dev: tx_ring->dev, buf: tx_buffer); |
214 | break; |
215 | default: |
216 | netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); |
217 | break; |
218 | } |
219 | |
220 | /* check for eop_desc to determine the end of the packet */ |
221 | eop_desc = tx_buffer->next_to_watch; |
222 | tx_desc = IGC_TX_DESC(tx_ring, i); |
223 | |
224 | /* unmap remaining buffers */ |
225 | while (tx_desc != eop_desc) { |
226 | tx_buffer++; |
227 | tx_desc++; |
228 | i++; |
229 | if (unlikely(i == tx_ring->count)) { |
230 | i = 0; |
231 | tx_buffer = tx_ring->tx_buffer_info; |
232 | tx_desc = IGC_TX_DESC(tx_ring, 0); |
233 | } |
234 | |
235 | /* unmap any remaining paged data */ |
236 | if (dma_unmap_len(tx_buffer, len)) |
237 | igc_unmap_tx_buffer(dev: tx_ring->dev, buf: tx_buffer); |
238 | } |
239 | |
240 | tx_buffer->next_to_watch = NULL; |
241 | |
242 | /* move us one more past the eop_desc for start of next pkt */ |
243 | tx_buffer++; |
244 | i++; |
245 | if (unlikely(i == tx_ring->count)) { |
246 | i = 0; |
247 | tx_buffer = tx_ring->tx_buffer_info; |
248 | } |
249 | } |
250 | |
251 | if (tx_ring->xsk_pool && xsk_frames) |
252 | xsk_tx_completed(pool: tx_ring->xsk_pool, nb_entries: xsk_frames); |
253 | |
254 | /* reset BQL for queue */ |
255 | netdev_tx_reset_queue(q: txring_txq(tx_ring)); |
256 | |
257 | /* Zero out the buffer ring */ |
258 | memset(tx_ring->tx_buffer_info, 0, |
259 | sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); |
260 | |
261 | /* Zero out the descriptor ring */ |
262 | memset(tx_ring->desc, 0, tx_ring->size); |
263 | |
264 | /* reset next_to_use and next_to_clean */ |
265 | tx_ring->next_to_use = 0; |
266 | tx_ring->next_to_clean = 0; |
267 | } |
268 | |
269 | /** |
270 | * igc_free_tx_resources - Free Tx Resources per Queue |
271 | * @tx_ring: Tx descriptor ring for a specific queue |
272 | * |
273 | * Free all transmit software resources |
274 | */ |
275 | void igc_free_tx_resources(struct igc_ring *tx_ring) |
276 | { |
277 | igc_disable_tx_ring(ring: tx_ring); |
278 | |
279 | vfree(addr: tx_ring->tx_buffer_info); |
280 | tx_ring->tx_buffer_info = NULL; |
281 | |
282 | /* if not set, then don't free */ |
283 | if (!tx_ring->desc) |
284 | return; |
285 | |
286 | dma_free_coherent(dev: tx_ring->dev, size: tx_ring->size, |
287 | cpu_addr: tx_ring->desc, dma_handle: tx_ring->dma); |
288 | |
289 | tx_ring->desc = NULL; |
290 | } |
291 | |
292 | /** |
293 | * igc_free_all_tx_resources - Free Tx Resources for All Queues |
294 | * @adapter: board private structure |
295 | * |
296 | * Free all transmit software resources |
297 | */ |
298 | static void igc_free_all_tx_resources(struct igc_adapter *adapter) |
299 | { |
300 | int i; |
301 | |
302 | for (i = 0; i < adapter->num_tx_queues; i++) |
303 | igc_free_tx_resources(tx_ring: adapter->tx_ring[i]); |
304 | } |
305 | |
306 | /** |
307 | * igc_clean_all_tx_rings - Free Tx Buffers for all queues |
308 | * @adapter: board private structure |
309 | */ |
310 | static void igc_clean_all_tx_rings(struct igc_adapter *adapter) |
311 | { |
312 | int i; |
313 | |
314 | for (i = 0; i < adapter->num_tx_queues; i++) |
315 | if (adapter->tx_ring[i]) |
316 | igc_clean_tx_ring(tx_ring: adapter->tx_ring[i]); |
317 | } |
318 | |
319 | static void igc_disable_tx_ring_hw(struct igc_ring *ring) |
320 | { |
321 | struct igc_hw *hw = &ring->q_vector->adapter->hw; |
322 | u8 idx = ring->reg_idx; |
323 | u32 txdctl; |
324 | |
325 | txdctl = rd32(IGC_TXDCTL(idx)); |
326 | txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; |
327 | txdctl |= IGC_TXDCTL_SWFLUSH; |
328 | wr32(IGC_TXDCTL(idx), txdctl); |
329 | } |
330 | |
331 | /** |
332 | * igc_disable_all_tx_rings_hw - Disable all transmit queue operation |
333 | * @adapter: board private structure |
334 | */ |
335 | static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter) |
336 | { |
337 | int i; |
338 | |
339 | for (i = 0; i < adapter->num_tx_queues; i++) { |
340 | struct igc_ring *tx_ring = adapter->tx_ring[i]; |
341 | |
342 | igc_disable_tx_ring_hw(ring: tx_ring); |
343 | } |
344 | } |
345 | |
346 | /** |
347 | * igc_setup_tx_resources - allocate Tx resources (Descriptors) |
348 | * @tx_ring: tx descriptor ring (for a specific queue) to setup |
349 | * |
350 | * Return 0 on success, negative on failure |
351 | */ |
352 | int igc_setup_tx_resources(struct igc_ring *tx_ring) |
353 | { |
354 | struct net_device *ndev = tx_ring->netdev; |
355 | struct device *dev = tx_ring->dev; |
356 | int size = 0; |
357 | |
358 | size = sizeof(struct igc_tx_buffer) * tx_ring->count; |
359 | tx_ring->tx_buffer_info = vzalloc(size); |
360 | if (!tx_ring->tx_buffer_info) |
361 | goto err; |
362 | |
363 | /* round up to nearest 4K */ |
364 | tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); |
365 | tx_ring->size = ALIGN(tx_ring->size, 4096); |
366 | |
367 | tx_ring->desc = dma_alloc_coherent(dev, size: tx_ring->size, |
368 | dma_handle: &tx_ring->dma, GFP_KERNEL); |
369 | |
370 | if (!tx_ring->desc) |
371 | goto err; |
372 | |
373 | tx_ring->next_to_use = 0; |
374 | tx_ring->next_to_clean = 0; |
375 | |
376 | return 0; |
377 | |
378 | err: |
379 | vfree(addr: tx_ring->tx_buffer_info); |
380 | netdev_err(dev: ndev, format: "Unable to allocate memory for Tx descriptor ring\n"); |
381 | return -ENOMEM; |
382 | } |
383 | |
384 | /** |
385 | * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues |
386 | * @adapter: board private structure |
387 | * |
388 | * Return 0 on success, negative on failure |
389 | */ |
390 | static int igc_setup_all_tx_resources(struct igc_adapter *adapter) |
391 | { |
392 | struct net_device *dev = adapter->netdev; |
393 | int i, err = 0; |
394 | |
395 | for (i = 0; i < adapter->num_tx_queues; i++) { |
396 | err = igc_setup_tx_resources(tx_ring: adapter->tx_ring[i]); |
397 | if (err) { |
398 | netdev_err(dev, format: "Error on Tx queue %u setup\n", i); |
399 | for (i--; i >= 0; i--) |
400 | igc_free_tx_resources(tx_ring: adapter->tx_ring[i]); |
401 | break; |
402 | } |
403 | } |
404 | |
405 | return err; |
406 | } |
407 | |
408 | static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring) |
409 | { |
410 | u16 i = rx_ring->next_to_clean; |
411 | |
412 | dev_kfree_skb(rx_ring->skb); |
413 | rx_ring->skb = NULL; |
414 | |
415 | /* Free all the Rx ring sk_buffs */ |
416 | while (i != rx_ring->next_to_alloc) { |
417 | struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; |
418 | |
419 | /* Invalidate cache lines that may have been written to by |
420 | * device so that we avoid corrupting memory. |
421 | */ |
422 | dma_sync_single_range_for_cpu(dev: rx_ring->dev, |
423 | addr: buffer_info->dma, |
424 | offset: buffer_info->page_offset, |
425 | size: igc_rx_bufsz(ring: rx_ring), |
426 | dir: DMA_FROM_DEVICE); |
427 | |
428 | /* free resources associated with mapping */ |
429 | dma_unmap_page_attrs(dev: rx_ring->dev, |
430 | addr: buffer_info->dma, |
431 | igc_rx_pg_size(rx_ring), |
432 | dir: DMA_FROM_DEVICE, |
433 | IGC_RX_DMA_ATTR); |
434 | __page_frag_cache_drain(page: buffer_info->page, |
435 | count: buffer_info->pagecnt_bias); |
436 | |
437 | i++; |
438 | if (i == rx_ring->count) |
439 | i = 0; |
440 | } |
441 | } |
442 | |
443 | static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring) |
444 | { |
445 | struct igc_rx_buffer *bi; |
446 | u16 i; |
447 | |
448 | for (i = 0; i < ring->count; i++) { |
449 | bi = &ring->rx_buffer_info[i]; |
450 | if (!bi->xdp) |
451 | continue; |
452 | |
453 | xsk_buff_free(xdp: bi->xdp); |
454 | bi->xdp = NULL; |
455 | } |
456 | } |
457 | |
458 | /** |
459 | * igc_clean_rx_ring - Free Rx Buffers per Queue |
460 | * @ring: ring to free buffers from |
461 | */ |
462 | static void igc_clean_rx_ring(struct igc_ring *ring) |
463 | { |
464 | if (ring->xsk_pool) |
465 | igc_clean_rx_ring_xsk_pool(ring); |
466 | else |
467 | igc_clean_rx_ring_page_shared(rx_ring: ring); |
468 | |
469 | clear_ring_uses_large_buffer(ring); |
470 | |
471 | ring->next_to_alloc = 0; |
472 | ring->next_to_clean = 0; |
473 | ring->next_to_use = 0; |
474 | } |
475 | |
476 | /** |
477 | * igc_clean_all_rx_rings - Free Rx Buffers for all queues |
478 | * @adapter: board private structure |
479 | */ |
480 | static void igc_clean_all_rx_rings(struct igc_adapter *adapter) |
481 | { |
482 | int i; |
483 | |
484 | for (i = 0; i < adapter->num_rx_queues; i++) |
485 | if (adapter->rx_ring[i]) |
486 | igc_clean_rx_ring(ring: adapter->rx_ring[i]); |
487 | } |
488 | |
489 | /** |
490 | * igc_free_rx_resources - Free Rx Resources |
491 | * @rx_ring: ring to clean the resources from |
492 | * |
493 | * Free all receive software resources |
494 | */ |
495 | void igc_free_rx_resources(struct igc_ring *rx_ring) |
496 | { |
497 | igc_clean_rx_ring(ring: rx_ring); |
498 | |
499 | xdp_rxq_info_unreg(xdp_rxq: &rx_ring->xdp_rxq); |
500 | |
501 | vfree(addr: rx_ring->rx_buffer_info); |
502 | rx_ring->rx_buffer_info = NULL; |
503 | |
504 | /* if not set, then don't free */ |
505 | if (!rx_ring->desc) |
506 | return; |
507 | |
508 | dma_free_coherent(dev: rx_ring->dev, size: rx_ring->size, |
509 | cpu_addr: rx_ring->desc, dma_handle: rx_ring->dma); |
510 | |
511 | rx_ring->desc = NULL; |
512 | } |
513 | |
514 | /** |
515 | * igc_free_all_rx_resources - Free Rx Resources for All Queues |
516 | * @adapter: board private structure |
517 | * |
518 | * Free all receive software resources |
519 | */ |
520 | static void igc_free_all_rx_resources(struct igc_adapter *adapter) |
521 | { |
522 | int i; |
523 | |
524 | for (i = 0; i < adapter->num_rx_queues; i++) |
525 | igc_free_rx_resources(rx_ring: adapter->rx_ring[i]); |
526 | } |
527 | |
528 | /** |
529 | * igc_setup_rx_resources - allocate Rx resources (Descriptors) |
530 | * @rx_ring: rx descriptor ring (for a specific queue) to setup |
531 | * |
532 | * Returns 0 on success, negative on failure |
533 | */ |
534 | int igc_setup_rx_resources(struct igc_ring *rx_ring) |
535 | { |
536 | struct net_device *ndev = rx_ring->netdev; |
537 | struct device *dev = rx_ring->dev; |
538 | u8 index = rx_ring->queue_index; |
539 | int size, desc_len, res; |
540 | |
541 | /* XDP RX-queue info */ |
542 | if (xdp_rxq_info_is_reg(xdp_rxq: &rx_ring->xdp_rxq)) |
543 | xdp_rxq_info_unreg(xdp_rxq: &rx_ring->xdp_rxq); |
544 | res = xdp_rxq_info_reg(xdp_rxq: &rx_ring->xdp_rxq, dev: ndev, queue_index: index, |
545 | napi_id: rx_ring->q_vector->napi.napi_id); |
546 | if (res < 0) { |
547 | netdev_err(dev: ndev, format: "Failed to register xdp_rxq index %u\n", |
548 | index); |
549 | return res; |
550 | } |
551 | |
552 | size = sizeof(struct igc_rx_buffer) * rx_ring->count; |
553 | rx_ring->rx_buffer_info = vzalloc(size); |
554 | if (!rx_ring->rx_buffer_info) |
555 | goto err; |
556 | |
557 | desc_len = sizeof(union igc_adv_rx_desc); |
558 | |
559 | /* Round up to nearest 4K */ |
560 | rx_ring->size = rx_ring->count * desc_len; |
561 | rx_ring->size = ALIGN(rx_ring->size, 4096); |
562 | |
563 | rx_ring->desc = dma_alloc_coherent(dev, size: rx_ring->size, |
564 | dma_handle: &rx_ring->dma, GFP_KERNEL); |
565 | |
566 | if (!rx_ring->desc) |
567 | goto err; |
568 | |
569 | rx_ring->next_to_alloc = 0; |
570 | rx_ring->next_to_clean = 0; |
571 | rx_ring->next_to_use = 0; |
572 | |
573 | return 0; |
574 | |
575 | err: |
576 | xdp_rxq_info_unreg(xdp_rxq: &rx_ring->xdp_rxq); |
577 | vfree(addr: rx_ring->rx_buffer_info); |
578 | rx_ring->rx_buffer_info = NULL; |
579 | netdev_err(dev: ndev, format: "Unable to allocate memory for Rx descriptor ring\n"); |
580 | return -ENOMEM; |
581 | } |
582 | |
583 | /** |
584 | * igc_setup_all_rx_resources - wrapper to allocate Rx resources |
585 | * (Descriptors) for all queues |
586 | * @adapter: board private structure |
587 | * |
588 | * Return 0 on success, negative on failure |
589 | */ |
590 | static int igc_setup_all_rx_resources(struct igc_adapter *adapter) |
591 | { |
592 | struct net_device *dev = adapter->netdev; |
593 | int i, err = 0; |
594 | |
595 | for (i = 0; i < adapter->num_rx_queues; i++) { |
596 | err = igc_setup_rx_resources(rx_ring: adapter->rx_ring[i]); |
597 | if (err) { |
598 | netdev_err(dev, format: "Error on Rx queue %u setup\n", i); |
599 | for (i--; i >= 0; i--) |
600 | igc_free_rx_resources(rx_ring: adapter->rx_ring[i]); |
601 | break; |
602 | } |
603 | } |
604 | |
605 | return err; |
606 | } |
607 | |
608 | static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter, |
609 | struct igc_ring *ring) |
610 | { |
611 | if (!igc_xdp_is_enabled(adapter) || |
612 | !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) |
613 | return NULL; |
614 | |
615 | return xsk_get_pool_from_qid(dev: ring->netdev, queue_id: ring->queue_index); |
616 | } |
617 | |
618 | /** |
619 | * igc_configure_rx_ring - Configure a receive ring after Reset |
620 | * @adapter: board private structure |
621 | * @ring: receive ring to be configured |
622 | * |
623 | * Configure the Rx unit of the MAC after a reset. |
624 | */ |
625 | static void igc_configure_rx_ring(struct igc_adapter *adapter, |
626 | struct igc_ring *ring) |
627 | { |
628 | struct igc_hw *hw = &adapter->hw; |
629 | union igc_adv_rx_desc *rx_desc; |
630 | int reg_idx = ring->reg_idx; |
631 | u32 srrctl = 0, rxdctl = 0; |
632 | u64 rdba = ring->dma; |
633 | u32 buf_size; |
634 | |
635 | xdp_rxq_info_unreg_mem_model(xdp_rxq: &ring->xdp_rxq); |
636 | ring->xsk_pool = igc_get_xsk_pool(adapter, ring); |
637 | if (ring->xsk_pool) { |
638 | WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
639 | MEM_TYPE_XSK_BUFF_POOL, |
640 | NULL)); |
641 | xsk_pool_set_rxq_info(pool: ring->xsk_pool, rxq: &ring->xdp_rxq); |
642 | } else { |
643 | WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
644 | MEM_TYPE_PAGE_SHARED, |
645 | NULL)); |
646 | } |
647 | |
648 | if (igc_xdp_is_enabled(adapter)) |
649 | set_ring_uses_large_buffer(ring); |
650 | |
651 | /* disable the queue */ |
652 | wr32(IGC_RXDCTL(reg_idx), 0); |
653 | |
654 | /* Set DMA base address registers */ |
655 | wr32(IGC_RDBAL(reg_idx), |
656 | rdba & 0x00000000ffffffffULL); |
657 | wr32(IGC_RDBAH(reg_idx), rdba >> 32); |
658 | wr32(IGC_RDLEN(reg_idx), |
659 | ring->count * sizeof(union igc_adv_rx_desc)); |
660 | |
661 | /* initialize head and tail */ |
662 | ring->tail = adapter->io_addr + IGC_RDT(reg_idx); |
663 | wr32(IGC_RDH(reg_idx), 0); |
664 | writel(val: 0, addr: ring->tail); |
665 | |
666 | /* reset next-to- use/clean to place SW in sync with hardware */ |
667 | ring->next_to_clean = 0; |
668 | ring->next_to_use = 0; |
669 | |
670 | if (ring->xsk_pool) |
671 | buf_size = xsk_pool_get_rx_frame_size(pool: ring->xsk_pool); |
672 | else if (ring_uses_large_buffer(ring)) |
673 | buf_size = IGC_RXBUFFER_3072; |
674 | else |
675 | buf_size = IGC_RXBUFFER_2048; |
676 | |
677 | srrctl = rd32(IGC_SRRCTL(reg_idx)); |
678 | srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | |
679 | IGC_SRRCTL_DESCTYPE_MASK); |
680 | srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); |
681 | srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); |
682 | srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; |
683 | |
684 | wr32(IGC_SRRCTL(reg_idx), srrctl); |
685 | |
686 | rxdctl |= IGC_RX_PTHRESH; |
687 | rxdctl |= IGC_RX_HTHRESH << 8; |
688 | rxdctl |= IGC_RX_WTHRESH << 16; |
689 | |
690 | /* initialize rx_buffer_info */ |
691 | memset(ring->rx_buffer_info, 0, |
692 | sizeof(struct igc_rx_buffer) * ring->count); |
693 | |
694 | /* initialize Rx descriptor 0 */ |
695 | rx_desc = IGC_RX_DESC(ring, 0); |
696 | rx_desc->wb.upper.length = 0; |
697 | |
698 | /* enable receive descriptor fetching */ |
699 | rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; |
700 | |
701 | wr32(IGC_RXDCTL(reg_idx), rxdctl); |
702 | } |
703 | |
704 | /** |
705 | * igc_configure_rx - Configure receive Unit after Reset |
706 | * @adapter: board private structure |
707 | * |
708 | * Configure the Rx unit of the MAC after a reset. |
709 | */ |
710 | static void igc_configure_rx(struct igc_adapter *adapter) |
711 | { |
712 | int i; |
713 | |
714 | /* Setup the HW Rx Head and Tail Descriptor Pointers and |
715 | * the Base and Length of the Rx Descriptor Ring |
716 | */ |
717 | for (i = 0; i < adapter->num_rx_queues; i++) |
718 | igc_configure_rx_ring(adapter, ring: adapter->rx_ring[i]); |
719 | } |
720 | |
721 | /** |
722 | * igc_configure_tx_ring - Configure transmit ring after Reset |
723 | * @adapter: board private structure |
724 | * @ring: tx ring to configure |
725 | * |
726 | * Configure a transmit ring after a reset. |
727 | */ |
728 | static void igc_configure_tx_ring(struct igc_adapter *adapter, |
729 | struct igc_ring *ring) |
730 | { |
731 | struct igc_hw *hw = &adapter->hw; |
732 | int reg_idx = ring->reg_idx; |
733 | u64 tdba = ring->dma; |
734 | u32 txdctl = 0; |
735 | |
736 | ring->xsk_pool = igc_get_xsk_pool(adapter, ring); |
737 | |
738 | /* disable the queue */ |
739 | wr32(IGC_TXDCTL(reg_idx), 0); |
740 | wrfl(); |
741 | |
742 | wr32(IGC_TDLEN(reg_idx), |
743 | ring->count * sizeof(union igc_adv_tx_desc)); |
744 | wr32(IGC_TDBAL(reg_idx), |
745 | tdba & 0x00000000ffffffffULL); |
746 | wr32(IGC_TDBAH(reg_idx), tdba >> 32); |
747 | |
748 | ring->tail = adapter->io_addr + IGC_TDT(reg_idx); |
749 | wr32(IGC_TDH(reg_idx), 0); |
750 | writel(val: 0, addr: ring->tail); |
751 | |
752 | txdctl |= IGC_TX_PTHRESH; |
753 | txdctl |= IGC_TX_HTHRESH << 8; |
754 | txdctl |= IGC_TX_WTHRESH << 16; |
755 | |
756 | txdctl |= IGC_TXDCTL_QUEUE_ENABLE; |
757 | wr32(IGC_TXDCTL(reg_idx), txdctl); |
758 | } |
759 | |
760 | /** |
761 | * igc_configure_tx - Configure transmit Unit after Reset |
762 | * @adapter: board private structure |
763 | * |
764 | * Configure the Tx unit of the MAC after a reset. |
765 | */ |
766 | static void igc_configure_tx(struct igc_adapter *adapter) |
767 | { |
768 | int i; |
769 | |
770 | for (i = 0; i < adapter->num_tx_queues; i++) |
771 | igc_configure_tx_ring(adapter, ring: adapter->tx_ring[i]); |
772 | } |
773 | |
774 | /** |
775 | * igc_setup_mrqc - configure the multiple receive queue control registers |
776 | * @adapter: Board private structure |
777 | */ |
778 | static void igc_setup_mrqc(struct igc_adapter *adapter) |
779 | { |
780 | struct igc_hw *hw = &adapter->hw; |
781 | u32 j, num_rx_queues; |
782 | u32 mrqc, rxcsum; |
783 | u32 rss_key[10]; |
784 | |
785 | netdev_rss_key_fill(buffer: rss_key, len: sizeof(rss_key)); |
786 | for (j = 0; j < 10; j++) |
787 | wr32(IGC_RSSRK(j), rss_key[j]); |
788 | |
789 | num_rx_queues = adapter->rss_queues; |
790 | |
791 | if (adapter->rss_indir_tbl_init != num_rx_queues) { |
792 | for (j = 0; j < IGC_RETA_SIZE; j++) |
793 | adapter->rss_indir_tbl[j] = |
794 | (j * num_rx_queues) / IGC_RETA_SIZE; |
795 | adapter->rss_indir_tbl_init = num_rx_queues; |
796 | } |
797 | igc_write_rss_indir_tbl(adapter); |
798 | |
799 | /* Disable raw packet checksumming so that RSS hash is placed in |
800 | * descriptor on writeback. No need to enable TCP/UDP/IP checksum |
801 | * offloads as they are enabled by default |
802 | */ |
803 | rxcsum = rd32(IGC_RXCSUM); |
804 | rxcsum |= IGC_RXCSUM_PCSD; |
805 | |
806 | /* Enable Receive Checksum Offload for SCTP */ |
807 | rxcsum |= IGC_RXCSUM_CRCOFL; |
808 | |
809 | /* Don't need to set TUOFL or IPOFL, they default to 1 */ |
810 | wr32(IGC_RXCSUM, rxcsum); |
811 | |
812 | /* Generate RSS hash based on packet types, TCP/UDP |
813 | * port numbers and/or IPv4/v6 src and dst addresses |
814 | */ |
815 | mrqc = IGC_MRQC_RSS_FIELD_IPV4 | |
816 | IGC_MRQC_RSS_FIELD_IPV4_TCP | |
817 | IGC_MRQC_RSS_FIELD_IPV6 | |
818 | IGC_MRQC_RSS_FIELD_IPV6_TCP | |
819 | IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; |
820 | |
821 | if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) |
822 | mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP; |
823 | if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) |
824 | mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP; |
825 | |
826 | mrqc |= IGC_MRQC_ENABLE_RSS_MQ; |
827 | |
828 | wr32(IGC_MRQC, mrqc); |
829 | } |
830 | |
831 | /** |
832 | * igc_setup_rctl - configure the receive control registers |
833 | * @adapter: Board private structure |
834 | */ |
835 | static void igc_setup_rctl(struct igc_adapter *adapter) |
836 | { |
837 | struct igc_hw *hw = &adapter->hw; |
838 | u32 rctl; |
839 | |
840 | rctl = rd32(IGC_RCTL); |
841 | |
842 | rctl &= ~(3 << IGC_RCTL_MO_SHIFT); |
843 | rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC); |
844 | |
845 | rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF | |
846 | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); |
847 | |
848 | /* enable stripping of CRC. Newer features require |
849 | * that the HW strips the CRC. |
850 | */ |
851 | rctl |= IGC_RCTL_SECRC; |
852 | |
853 | /* disable store bad packets and clear size bits. */ |
854 | rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256); |
855 | |
856 | /* enable LPE to allow for reception of jumbo frames */ |
857 | rctl |= IGC_RCTL_LPE; |
858 | |
859 | /* disable queue 0 to prevent tail write w/o re-config */ |
860 | wr32(IGC_RXDCTL(0), 0); |
861 | |
862 | /* This is useful for sniffing bad packets. */ |
863 | if (adapter->netdev->features & NETIF_F_RXALL) { |
864 | /* UPE and MPE will be handled by normal PROMISC logic |
865 | * in set_rx_mode |
866 | */ |
867 | rctl |= (IGC_RCTL_SBP | /* Receive bad packets */ |
868 | IGC_RCTL_BAM | /* RX All Bcast Pkts */ |
869 | IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ |
870 | |
871 | rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */ |
872 | IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */ |
873 | } |
874 | |
875 | wr32(IGC_RCTL, rctl); |
876 | } |
877 | |
878 | /** |
879 | * igc_setup_tctl - configure the transmit control registers |
880 | * @adapter: Board private structure |
881 | */ |
882 | static void igc_setup_tctl(struct igc_adapter *adapter) |
883 | { |
884 | struct igc_hw *hw = &adapter->hw; |
885 | u32 tctl; |
886 | |
887 | /* disable queue 0 which icould be enabled by default */ |
888 | wr32(IGC_TXDCTL(0), 0); |
889 | |
890 | /* Program the Transmit Control Register */ |
891 | tctl = rd32(IGC_TCTL); |
892 | tctl &= ~IGC_TCTL_CT; |
893 | tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC | |
894 | (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT); |
895 | |
896 | /* Enable transmits */ |
897 | tctl |= IGC_TCTL_EN; |
898 | |
899 | wr32(IGC_TCTL, tctl); |
900 | } |
901 | |
902 | /** |
903 | * igc_set_mac_filter_hw() - Set MAC address filter in hardware |
904 | * @adapter: Pointer to adapter where the filter should be set |
905 | * @index: Filter index |
906 | * @type: MAC address filter type (source or destination) |
907 | * @addr: MAC address |
908 | * @queue: If non-negative, queue assignment feature is enabled and frames |
909 | * matching the filter are enqueued onto 'queue'. Otherwise, queue |
910 | * assignment is disabled. |
911 | */ |
912 | static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index, |
913 | enum igc_mac_filter_type type, |
914 | const u8 *addr, int queue) |
915 | { |
916 | struct net_device *dev = adapter->netdev; |
917 | struct igc_hw *hw = &adapter->hw; |
918 | u32 ral, rah; |
919 | |
920 | if (WARN_ON(index >= hw->mac.rar_entry_count)) |
921 | return; |
922 | |
923 | ral = le32_to_cpup(p: (__le32 *)(addr)); |
924 | rah = le16_to_cpup(p: (__le16 *)(addr + 4)); |
925 | |
926 | if (type == IGC_MAC_FILTER_TYPE_SRC) { |
927 | rah &= ~IGC_RAH_ASEL_MASK; |
928 | rah |= IGC_RAH_ASEL_SRC_ADDR; |
929 | } |
930 | |
931 | if (queue >= 0) { |
932 | rah &= ~IGC_RAH_QSEL_MASK; |
933 | rah |= (queue << IGC_RAH_QSEL_SHIFT); |
934 | rah |= IGC_RAH_QSEL_ENABLE; |
935 | } |
936 | |
937 | rah |= IGC_RAH_AV; |
938 | |
939 | wr32(IGC_RAL(index), ral); |
940 | wr32(IGC_RAH(index), rah); |
941 | |
942 | netdev_dbg(dev, "MAC address filter set in HW: index %d", index); |
943 | } |
944 | |
945 | /** |
946 | * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware |
947 | * @adapter: Pointer to adapter where the filter should be cleared |
948 | * @index: Filter index |
949 | */ |
950 | static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index) |
951 | { |
952 | struct net_device *dev = adapter->netdev; |
953 | struct igc_hw *hw = &adapter->hw; |
954 | |
955 | if (WARN_ON(index >= hw->mac.rar_entry_count)) |
956 | return; |
957 | |
958 | wr32(IGC_RAL(index), 0); |
959 | wr32(IGC_RAH(index), 0); |
960 | |
961 | netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index); |
962 | } |
963 | |
964 | /* Set default MAC address for the PF in the first RAR entry */ |
965 | static void igc_set_default_mac_filter(struct igc_adapter *adapter) |
966 | { |
967 | struct net_device *dev = adapter->netdev; |
968 | u8 *addr = adapter->hw.mac.addr; |
969 | |
970 | netdev_dbg(dev, "Set default MAC address filter: address %pM", addr); |
971 | |
972 | igc_set_mac_filter_hw(adapter, index: 0, type: IGC_MAC_FILTER_TYPE_DST, addr, queue: -1); |
973 | } |
974 | |
975 | /** |
976 | * igc_set_mac - Change the Ethernet Address of the NIC |
977 | * @netdev: network interface device structure |
978 | * @p: pointer to an address structure |
979 | * |
980 | * Returns 0 on success, negative on failure |
981 | */ |
982 | static int igc_set_mac(struct net_device *netdev, void *p) |
983 | { |
984 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
985 | struct igc_hw *hw = &adapter->hw; |
986 | struct sockaddr *addr = p; |
987 | |
988 | if (!is_valid_ether_addr(addr: addr->sa_data)) |
989 | return -EADDRNOTAVAIL; |
990 | |
991 | eth_hw_addr_set(dev: netdev, addr: addr->sa_data); |
992 | memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); |
993 | |
994 | /* set the correct pool for the new PF MAC address in entry 0 */ |
995 | igc_set_default_mac_filter(adapter); |
996 | |
997 | return 0; |
998 | } |
999 | |
1000 | /** |
1001 | * igc_write_mc_addr_list - write multicast addresses to MTA |
1002 | * @netdev: network interface device structure |
1003 | * |
1004 | * Writes multicast address list to the MTA hash table. |
1005 | * Returns: -ENOMEM on failure |
1006 | * 0 on no addresses written |
1007 | * X on writing X addresses to MTA |
1008 | **/ |
1009 | static int igc_write_mc_addr_list(struct net_device *netdev) |
1010 | { |
1011 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
1012 | struct igc_hw *hw = &adapter->hw; |
1013 | struct netdev_hw_addr *ha; |
1014 | u8 *mta_list; |
1015 | int i; |
1016 | |
1017 | if (netdev_mc_empty(netdev)) { |
1018 | /* nothing to program, so clear mc list */ |
1019 | igc_update_mc_addr_list(hw, NULL, mc_addr_count: 0); |
1020 | return 0; |
1021 | } |
1022 | |
1023 | mta_list = kcalloc(netdev_mc_count(netdev), size: 6, GFP_ATOMIC); |
1024 | if (!mta_list) |
1025 | return -ENOMEM; |
1026 | |
1027 | /* The shared function expects a packed array of only addresses. */ |
1028 | i = 0; |
1029 | netdev_for_each_mc_addr(ha, netdev) |
1030 | memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); |
1031 | |
1032 | igc_update_mc_addr_list(hw, mc_addr_list: mta_list, mc_addr_count: i); |
1033 | kfree(objp: mta_list); |
1034 | |
1035 | return netdev_mc_count(netdev); |
1036 | } |
1037 | |
1038 | static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, |
1039 | bool *first_flag, bool *insert_empty) |
1040 | { |
1041 | struct igc_adapter *adapter = netdev_priv(dev: ring->netdev); |
1042 | ktime_t cycle_time = adapter->cycle_time; |
1043 | ktime_t base_time = adapter->base_time; |
1044 | ktime_t now = ktime_get_clocktai(); |
1045 | ktime_t baset_est, end_of_cycle; |
1046 | s32 launchtime; |
1047 | s64 n; |
1048 | |
1049 | n = div64_s64(ktime_sub_ns(now, base_time), divisor: cycle_time); |
1050 | |
1051 | baset_est = ktime_add_ns(base_time, cycle_time * (n)); |
1052 | end_of_cycle = ktime_add_ns(baset_est, cycle_time); |
1053 | |
1054 | if (ktime_compare(cmp1: txtime, cmp2: end_of_cycle) >= 0) { |
1055 | if (baset_est != ring->last_ff_cycle) { |
1056 | *first_flag = true; |
1057 | ring->last_ff_cycle = baset_est; |
1058 | |
1059 | if (ktime_compare(cmp1: end_of_cycle, cmp2: ring->last_tx_cycle) > 0) |
1060 | *insert_empty = true; |
1061 | } |
1062 | } |
1063 | |
1064 | /* Introducing a window at end of cycle on which packets |
1065 | * potentially not honor launchtime. Window of 5us chosen |
1066 | * considering software update the tail pointer and packets |
1067 | * are dma'ed to packet buffer. |
1068 | */ |
1069 | if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) |
1070 | netdev_warn(dev: ring->netdev, format: "Packet with txtime=%llu may not be honoured\n", |
1071 | txtime); |
1072 | |
1073 | ring->last_tx_cycle = end_of_cycle; |
1074 | |
1075 | launchtime = ktime_sub_ns(txtime, baset_est); |
1076 | if (launchtime > 0) |
1077 | div_s64_rem(dividend: launchtime, divisor: cycle_time, remainder: &launchtime); |
1078 | else |
1079 | launchtime = 0; |
1080 | |
1081 | return cpu_to_le32(launchtime); |
1082 | } |
1083 | |
1084 | static int igc_init_empty_frame(struct igc_ring *ring, |
1085 | struct igc_tx_buffer *buffer, |
1086 | struct sk_buff *skb) |
1087 | { |
1088 | unsigned int size; |
1089 | dma_addr_t dma; |
1090 | |
1091 | size = skb_headlen(skb); |
1092 | |
1093 | dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); |
1094 | if (dma_mapping_error(dev: ring->dev, dma_addr: dma)) { |
1095 | netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); |
1096 | return -ENOMEM; |
1097 | } |
1098 | |
1099 | buffer->skb = skb; |
1100 | buffer->protocol = 0; |
1101 | buffer->bytecount = skb->len; |
1102 | buffer->gso_segs = 1; |
1103 | buffer->time_stamp = jiffies; |
1104 | dma_unmap_len_set(buffer, len, skb->len); |
1105 | dma_unmap_addr_set(buffer, dma, dma); |
1106 | |
1107 | return 0; |
1108 | } |
1109 | |
1110 | static int igc_init_tx_empty_descriptor(struct igc_ring *ring, |
1111 | struct sk_buff *skb, |
1112 | struct igc_tx_buffer *first) |
1113 | { |
1114 | union igc_adv_tx_desc *desc; |
1115 | u32 cmd_type, olinfo_status; |
1116 | int err; |
1117 | |
1118 | if (!igc_desc_unused(ring)) |
1119 | return -EBUSY; |
1120 | |
1121 | err = igc_init_empty_frame(ring, buffer: first, skb); |
1122 | if (err) |
1123 | return err; |
1124 | |
1125 | cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | |
1126 | IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | |
1127 | first->bytecount; |
1128 | olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; |
1129 | |
1130 | desc = IGC_TX_DESC(ring, ring->next_to_use); |
1131 | desc->read.cmd_type_len = cpu_to_le32(cmd_type); |
1132 | desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
1133 | desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); |
1134 | |
1135 | netdev_tx_sent_queue(dev_queue: txring_txq(tx_ring: ring), bytes: skb->len); |
1136 | |
1137 | first->next_to_watch = desc; |
1138 | |
1139 | ring->next_to_use++; |
1140 | if (ring->next_to_use == ring->count) |
1141 | ring->next_to_use = 0; |
1142 | |
1143 | return 0; |
1144 | } |
1145 | |
1146 | #define IGC_EMPTY_FRAME_SIZE 60 |
1147 | |
1148 | static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, |
1149 | __le32 launch_time, bool first_flag, |
1150 | u32 vlan_macip_lens, u32 type_tucmd, |
1151 | u32 mss_l4len_idx) |
1152 | { |
1153 | struct igc_adv_tx_context_desc *context_desc; |
1154 | u16 i = tx_ring->next_to_use; |
1155 | |
1156 | context_desc = IGC_TX_CTXTDESC(tx_ring, i); |
1157 | |
1158 | i++; |
1159 | tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; |
1160 | |
1161 | /* set bits to identify this as an advanced context descriptor */ |
1162 | type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT; |
1163 | |
1164 | /* For i225, context index must be unique per ring. */ |
1165 | if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) |
1166 | mss_l4len_idx |= tx_ring->reg_idx << 4; |
1167 | |
1168 | if (first_flag) |
1169 | mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; |
1170 | |
1171 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); |
1172 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); |
1173 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); |
1174 | context_desc->launch_time = launch_time; |
1175 | } |
1176 | |
1177 | static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, |
1178 | __le32 launch_time, bool first_flag) |
1179 | { |
1180 | struct sk_buff *skb = first->skb; |
1181 | u32 vlan_macip_lens = 0; |
1182 | u32 type_tucmd = 0; |
1183 | |
1184 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
1185 | csum_failed: |
1186 | if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && |
1187 | !tx_ring->launchtime_enable) |
1188 | return; |
1189 | goto no_csum; |
1190 | } |
1191 | |
1192 | switch (skb->csum_offset) { |
1193 | case offsetof(struct tcphdr, check): |
1194 | type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; |
1195 | fallthrough; |
1196 | case offsetof(struct udphdr, check): |
1197 | break; |
1198 | case offsetof(struct sctphdr, checksum): |
1199 | /* validate that this is actually an SCTP request */ |
1200 | if (skb_csum_is_sctp(skb)) { |
1201 | type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; |
1202 | break; |
1203 | } |
1204 | fallthrough; |
1205 | default: |
1206 | skb_checksum_help(skb); |
1207 | goto csum_failed; |
1208 | } |
1209 | |
1210 | /* update TX checksum flag */ |
1211 | first->tx_flags |= IGC_TX_FLAGS_CSUM; |
1212 | vlan_macip_lens = skb_checksum_start_offset(skb) - |
1213 | skb_network_offset(skb); |
1214 | no_csum: |
1215 | vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; |
1216 | vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; |
1217 | |
1218 | igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, |
1219 | vlan_macip_lens, type_tucmd, mss_l4len_idx: 0); |
1220 | } |
1221 | |
1222 | static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) |
1223 | { |
1224 | struct net_device *netdev = tx_ring->netdev; |
1225 | |
1226 | netif_stop_subqueue(dev: netdev, queue_index: tx_ring->queue_index); |
1227 | |
1228 | /* memory barriier comment */ |
1229 | smp_mb(); |
1230 | |
1231 | /* We need to check again in a case another CPU has just |
1232 | * made room available. |
1233 | */ |
1234 | if (igc_desc_unused(ring: tx_ring) < size) |
1235 | return -EBUSY; |
1236 | |
1237 | /* A reprieve! */ |
1238 | netif_wake_subqueue(dev: netdev, queue_index: tx_ring->queue_index); |
1239 | |
1240 | u64_stats_update_begin(syncp: &tx_ring->tx_syncp2); |
1241 | tx_ring->tx_stats.restart_queue2++; |
1242 | u64_stats_update_end(syncp: &tx_ring->tx_syncp2); |
1243 | |
1244 | return 0; |
1245 | } |
1246 | |
1247 | static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) |
1248 | { |
1249 | if (igc_desc_unused(ring: tx_ring) >= size) |
1250 | return 0; |
1251 | return __igc_maybe_stop_tx(tx_ring, size); |
1252 | } |
1253 | |
1254 | #define IGC_SET_FLAG(_input, _flag, _result) \ |
1255 | (((_flag) <= (_result)) ? \ |
1256 | ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \ |
1257 | ((u32)((_input) & (_flag)) / ((_flag) / (_result)))) |
1258 | |
1259 | static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) |
1260 | { |
1261 | /* set type for advanced descriptor with frame checksum insertion */ |
1262 | u32 cmd_type = IGC_ADVTXD_DTYP_DATA | |
1263 | IGC_ADVTXD_DCMD_DEXT | |
1264 | IGC_ADVTXD_DCMD_IFCS; |
1265 | |
1266 | /* set HW vlan bit if vlan is present */ |
1267 | cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN, |
1268 | IGC_ADVTXD_DCMD_VLE); |
1269 | |
1270 | /* set segmentation bits for TSO */ |
1271 | cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO, |
1272 | (IGC_ADVTXD_DCMD_TSE)); |
1273 | |
1274 | /* set timestamp bit if present, will select the register set |
1275 | * based on the _TSTAMP(_X) bit. |
1276 | */ |
1277 | cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP, |
1278 | (IGC_ADVTXD_MAC_TSTAMP)); |
1279 | |
1280 | cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1, |
1281 | (IGC_ADVTXD_TSTAMP_REG_1)); |
1282 | |
1283 | cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2, |
1284 | (IGC_ADVTXD_TSTAMP_REG_2)); |
1285 | |
1286 | cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3, |
1287 | (IGC_ADVTXD_TSTAMP_REG_3)); |
1288 | |
1289 | /* insert frame checksum */ |
1290 | cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); |
1291 | |
1292 | return cmd_type; |
1293 | } |
1294 | |
1295 | static void igc_tx_olinfo_status(struct igc_ring *tx_ring, |
1296 | union igc_adv_tx_desc *tx_desc, |
1297 | u32 tx_flags, unsigned int paylen) |
1298 | { |
1299 | u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT; |
1300 | |
1301 | /* insert L4 checksum */ |
1302 | olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM, |
1303 | (IGC_TXD_POPTS_TXSM << 8)); |
1304 | |
1305 | /* insert IPv4 checksum */ |
1306 | olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4, |
1307 | (IGC_TXD_POPTS_IXSM << 8)); |
1308 | |
1309 | /* Use the second timer (free running, in general) for the timestamp */ |
1310 | olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1, |
1311 | IGC_TXD_PTP2_TIMER_1); |
1312 | |
1313 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
1314 | } |
1315 | |
1316 | static int igc_tx_map(struct igc_ring *tx_ring, |
1317 | struct igc_tx_buffer *first, |
1318 | const u8 hdr_len) |
1319 | { |
1320 | struct sk_buff *skb = first->skb; |
1321 | struct igc_tx_buffer *tx_buffer; |
1322 | union igc_adv_tx_desc *tx_desc; |
1323 | u32 tx_flags = first->tx_flags; |
1324 | skb_frag_t *frag; |
1325 | u16 i = tx_ring->next_to_use; |
1326 | unsigned int data_len, size; |
1327 | dma_addr_t dma; |
1328 | u32 cmd_type; |
1329 | |
1330 | cmd_type = igc_tx_cmd_type(skb, tx_flags); |
1331 | tx_desc = IGC_TX_DESC(tx_ring, i); |
1332 | |
1333 | igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen: skb->len - hdr_len); |
1334 | |
1335 | size = skb_headlen(skb); |
1336 | data_len = skb->data_len; |
1337 | |
1338 | dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); |
1339 | |
1340 | tx_buffer = first; |
1341 | |
1342 | for (frag = &skb_shinfo(skb)->frags[0];; frag++) { |
1343 | if (dma_mapping_error(dev: tx_ring->dev, dma_addr: dma)) |
1344 | goto dma_error; |
1345 | |
1346 | /* record length, and DMA address */ |
1347 | dma_unmap_len_set(tx_buffer, len, size); |
1348 | dma_unmap_addr_set(tx_buffer, dma, dma); |
1349 | |
1350 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
1351 | |
1352 | while (unlikely(size > IGC_MAX_DATA_PER_TXD)) { |
1353 | tx_desc->read.cmd_type_len = |
1354 | cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD); |
1355 | |
1356 | i++; |
1357 | tx_desc++; |
1358 | if (i == tx_ring->count) { |
1359 | tx_desc = IGC_TX_DESC(tx_ring, 0); |
1360 | i = 0; |
1361 | } |
1362 | tx_desc->read.olinfo_status = 0; |
1363 | |
1364 | dma += IGC_MAX_DATA_PER_TXD; |
1365 | size -= IGC_MAX_DATA_PER_TXD; |
1366 | |
1367 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
1368 | } |
1369 | |
1370 | if (likely(!data_len)) |
1371 | break; |
1372 | |
1373 | tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); |
1374 | |
1375 | i++; |
1376 | tx_desc++; |
1377 | if (i == tx_ring->count) { |
1378 | tx_desc = IGC_TX_DESC(tx_ring, 0); |
1379 | i = 0; |
1380 | } |
1381 | tx_desc->read.olinfo_status = 0; |
1382 | |
1383 | size = skb_frag_size(frag); |
1384 | data_len -= size; |
1385 | |
1386 | dma = skb_frag_dma_map(dev: tx_ring->dev, frag, offset: 0, |
1387 | size, dir: DMA_TO_DEVICE); |
1388 | |
1389 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
1390 | } |
1391 | |
1392 | /* write last descriptor with RS and EOP bits */ |
1393 | cmd_type |= size | IGC_TXD_DCMD; |
1394 | tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); |
1395 | |
1396 | netdev_tx_sent_queue(dev_queue: txring_txq(tx_ring), bytes: first->bytecount); |
1397 | |
1398 | /* set the timestamp */ |
1399 | first->time_stamp = jiffies; |
1400 | |
1401 | skb_tx_timestamp(skb); |
1402 | |
1403 | /* Force memory writes to complete before letting h/w know there |
1404 | * are new descriptors to fetch. (Only applicable for weak-ordered |
1405 | * memory model archs, such as IA-64). |
1406 | * |
1407 | * We also need this memory barrier to make certain all of the |
1408 | * status bits have been updated before next_to_watch is written. |
1409 | */ |
1410 | wmb(); |
1411 | |
1412 | /* set next_to_watch value indicating a packet is present */ |
1413 | first->next_to_watch = tx_desc; |
1414 | |
1415 | i++; |
1416 | if (i == tx_ring->count) |
1417 | i = 0; |
1418 | |
1419 | tx_ring->next_to_use = i; |
1420 | |
1421 | /* Make sure there is space in the ring for the next send. */ |
1422 | igc_maybe_stop_tx(tx_ring, DESC_NEEDED); |
1423 | |
1424 | if (netif_xmit_stopped(dev_queue: txring_txq(tx_ring)) || !netdev_xmit_more()) { |
1425 | writel(val: i, addr: tx_ring->tail); |
1426 | } |
1427 | |
1428 | return 0; |
1429 | dma_error: |
1430 | netdev_err(dev: tx_ring->netdev, format: "TX DMA map failed\n"); |
1431 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
1432 | |
1433 | /* clear dma mappings for failed tx_buffer_info map */ |
1434 | while (tx_buffer != first) { |
1435 | if (dma_unmap_len(tx_buffer, len)) |
1436 | igc_unmap_tx_buffer(dev: tx_ring->dev, buf: tx_buffer); |
1437 | |
1438 | if (i-- == 0) |
1439 | i += tx_ring->count; |
1440 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
1441 | } |
1442 | |
1443 | if (dma_unmap_len(tx_buffer, len)) |
1444 | igc_unmap_tx_buffer(dev: tx_ring->dev, buf: tx_buffer); |
1445 | |
1446 | dev_kfree_skb_any(skb: tx_buffer->skb); |
1447 | tx_buffer->skb = NULL; |
1448 | |
1449 | tx_ring->next_to_use = i; |
1450 | |
1451 | return -1; |
1452 | } |
1453 | |
1454 | static int igc_tso(struct igc_ring *tx_ring, |
1455 | struct igc_tx_buffer *first, |
1456 | __le32 launch_time, bool first_flag, |
1457 | u8 *hdr_len) |
1458 | { |
1459 | u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; |
1460 | struct sk_buff *skb = first->skb; |
1461 | union { |
1462 | struct iphdr *v4; |
1463 | struct ipv6hdr *v6; |
1464 | unsigned char *hdr; |
1465 | } ip; |
1466 | union { |
1467 | struct tcphdr *tcp; |
1468 | struct udphdr *udp; |
1469 | unsigned char *hdr; |
1470 | } l4; |
1471 | u32 paylen, l4_offset; |
1472 | int err; |
1473 | |
1474 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
1475 | return 0; |
1476 | |
1477 | if (!skb_is_gso(skb)) |
1478 | return 0; |
1479 | |
1480 | err = skb_cow_head(skb, headroom: 0); |
1481 | if (err < 0) |
1482 | return err; |
1483 | |
1484 | ip.hdr = skb_network_header(skb); |
1485 | l4.hdr = skb_checksum_start(skb); |
1486 | |
1487 | /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ |
1488 | type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; |
1489 | |
1490 | /* initialize outer IP header fields */ |
1491 | if (ip.v4->version == 4) { |
1492 | unsigned char *csum_start = skb_checksum_start(skb); |
1493 | unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); |
1494 | |
1495 | /* IP header will have to cancel out any data that |
1496 | * is not a part of the outer IP header |
1497 | */ |
1498 | ip.v4->check = csum_fold(sum: csum_partial(buff: trans_start, |
1499 | len: csum_start - trans_start, |
1500 | sum: 0)); |
1501 | type_tucmd |= IGC_ADVTXD_TUCMD_IPV4; |
1502 | |
1503 | ip.v4->tot_len = 0; |
1504 | first->tx_flags |= IGC_TX_FLAGS_TSO | |
1505 | IGC_TX_FLAGS_CSUM | |
1506 | IGC_TX_FLAGS_IPV4; |
1507 | } else { |
1508 | ip.v6->payload_len = 0; |
1509 | first->tx_flags |= IGC_TX_FLAGS_TSO | |
1510 | IGC_TX_FLAGS_CSUM; |
1511 | } |
1512 | |
1513 | /* determine offset of inner transport header */ |
1514 | l4_offset = l4.hdr - skb->data; |
1515 | |
1516 | /* remove payload length from inner checksum */ |
1517 | paylen = skb->len - l4_offset; |
1518 | if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) { |
1519 | /* compute length of segmentation header */ |
1520 | *hdr_len = (l4.tcp->doff * 4) + l4_offset; |
1521 | csum_replace_by_diff(sum: &l4.tcp->check, |
1522 | diff: (__force __wsum)htonl(paylen)); |
1523 | } else { |
1524 | /* compute length of segmentation header */ |
1525 | *hdr_len = sizeof(*l4.udp) + l4_offset; |
1526 | csum_replace_by_diff(sum: &l4.udp->check, |
1527 | diff: (__force __wsum)htonl(paylen)); |
1528 | } |
1529 | |
1530 | /* update gso size and bytecount with header size */ |
1531 | first->gso_segs = skb_shinfo(skb)->gso_segs; |
1532 | first->bytecount += (first->gso_segs - 1) * *hdr_len; |
1533 | |
1534 | /* MSS L4LEN IDX */ |
1535 | mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; |
1536 | mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; |
1537 | |
1538 | /* VLAN MACLEN IPLEN */ |
1539 | vlan_macip_lens = l4.hdr - ip.hdr; |
1540 | vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; |
1541 | vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; |
1542 | |
1543 | igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, |
1544 | vlan_macip_lens, type_tucmd, mss_l4len_idx); |
1545 | |
1546 | return 1; |
1547 | } |
1548 | |
1549 | static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags) |
1550 | { |
1551 | int i; |
1552 | |
1553 | for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { |
1554 | struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; |
1555 | |
1556 | if (tstamp->skb) |
1557 | continue; |
1558 | |
1559 | tstamp->skb = skb_get(skb); |
1560 | tstamp->start = jiffies; |
1561 | *flags = tstamp->flags; |
1562 | |
1563 | return true; |
1564 | } |
1565 | |
1566 | return false; |
1567 | } |
1568 | |
1569 | static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, |
1570 | struct igc_ring *tx_ring) |
1571 | { |
1572 | struct igc_adapter *adapter = netdev_priv(dev: tx_ring->netdev); |
1573 | bool first_flag = false, insert_empty = false; |
1574 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); |
1575 | __be16 protocol = vlan_get_protocol(skb); |
1576 | struct igc_tx_buffer *first; |
1577 | __le32 launch_time = 0; |
1578 | u32 tx_flags = 0; |
1579 | unsigned short f; |
1580 | ktime_t txtime; |
1581 | u8 hdr_len = 0; |
1582 | int tso = 0; |
1583 | |
1584 | /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD, |
1585 | * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, |
1586 | * + 2 desc gap to keep tail from touching head, |
1587 | * + 1 desc for context descriptor, |
1588 | * otherwise try next time |
1589 | */ |
1590 | for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) |
1591 | count += TXD_USE_COUNT(skb_frag_size( |
1592 | &skb_shinfo(skb)->frags[f])); |
1593 | |
1594 | if (igc_maybe_stop_tx(tx_ring, size: count + 5)) { |
1595 | /* this is a hard error */ |
1596 | return NETDEV_TX_BUSY; |
1597 | } |
1598 | |
1599 | if (!tx_ring->launchtime_enable) |
1600 | goto done; |
1601 | |
1602 | txtime = skb->tstamp; |
1603 | skb->tstamp = ktime_set(secs: 0, nsecs: 0); |
1604 | launch_time = igc_tx_launchtime(ring: tx_ring, txtime, first_flag: &first_flag, insert_empty: &insert_empty); |
1605 | |
1606 | if (insert_empty) { |
1607 | struct igc_tx_buffer *empty_info; |
1608 | struct sk_buff *empty; |
1609 | void *data; |
1610 | |
1611 | empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
1612 | empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); |
1613 | if (!empty) |
1614 | goto done; |
1615 | |
1616 | data = skb_put(skb: empty, IGC_EMPTY_FRAME_SIZE); |
1617 | memset(data, 0, IGC_EMPTY_FRAME_SIZE); |
1618 | |
1619 | igc_tx_ctxtdesc(tx_ring, launch_time: 0, first_flag: false, vlan_macip_lens: 0, type_tucmd: 0, mss_l4len_idx: 0); |
1620 | |
1621 | if (igc_init_tx_empty_descriptor(ring: tx_ring, |
1622 | skb: empty, |
1623 | first: empty_info) < 0) |
1624 | dev_kfree_skb_any(skb: empty); |
1625 | } |
1626 | |
1627 | done: |
1628 | /* record the location of the first descriptor for this packet */ |
1629 | first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
1630 | first->type = IGC_TX_BUFFER_TYPE_SKB; |
1631 | first->skb = skb; |
1632 | first->bytecount = skb->len; |
1633 | first->gso_segs = 1; |
1634 | |
1635 | if (adapter->qbv_transition || tx_ring->oper_gate_closed) |
1636 | goto out_drop; |
1637 | |
1638 | if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { |
1639 | adapter->stats.txdrop++; |
1640 | goto out_drop; |
1641 | } |
1642 | |
1643 | if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && |
1644 | skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { |
1645 | unsigned long flags; |
1646 | u32 tstamp_flags; |
1647 | |
1648 | spin_lock_irqsave(&adapter->ptp_tx_lock, flags); |
1649 | if (igc_request_tx_tstamp(adapter, skb, flags: &tstamp_flags)) { |
1650 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1651 | tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags; |
1652 | if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES) |
1653 | tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1; |
1654 | } else { |
1655 | adapter->tx_hwtstamp_skipped++; |
1656 | } |
1657 | |
1658 | spin_unlock_irqrestore(lock: &adapter->ptp_tx_lock, flags); |
1659 | } |
1660 | |
1661 | if (skb_vlan_tag_present(skb)) { |
1662 | tx_flags |= IGC_TX_FLAGS_VLAN; |
1663 | tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT); |
1664 | } |
1665 | |
1666 | /* record initial flags and protocol */ |
1667 | first->tx_flags = tx_flags; |
1668 | first->protocol = protocol; |
1669 | |
1670 | tso = igc_tso(tx_ring, first, launch_time, first_flag, hdr_len: &hdr_len); |
1671 | if (tso < 0) |
1672 | goto out_drop; |
1673 | else if (!tso) |
1674 | igc_tx_csum(tx_ring, first, launch_time, first_flag); |
1675 | |
1676 | igc_tx_map(tx_ring, first, hdr_len); |
1677 | |
1678 | return NETDEV_TX_OK; |
1679 | |
1680 | out_drop: |
1681 | dev_kfree_skb_any(skb: first->skb); |
1682 | first->skb = NULL; |
1683 | |
1684 | return NETDEV_TX_OK; |
1685 | } |
1686 | |
1687 | static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter, |
1688 | struct sk_buff *skb) |
1689 | { |
1690 | unsigned int r_idx = skb->queue_mapping; |
1691 | |
1692 | if (r_idx >= adapter->num_tx_queues) |
1693 | r_idx = r_idx % adapter->num_tx_queues; |
1694 | |
1695 | return adapter->tx_ring[r_idx]; |
1696 | } |
1697 | |
1698 | static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, |
1699 | struct net_device *netdev) |
1700 | { |
1701 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
1702 | |
1703 | /* The minimum packet size with TCTL.PSP set is 17 so pad the skb |
1704 | * in order to meet this minimum size requirement. |
1705 | */ |
1706 | if (skb->len < 17) { |
1707 | if (skb_padto(skb, len: 17)) |
1708 | return NETDEV_TX_OK; |
1709 | skb->len = 17; |
1710 | } |
1711 | |
1712 | return igc_xmit_frame_ring(skb, tx_ring: igc_tx_queue_mapping(adapter, skb)); |
1713 | } |
1714 | |
1715 | static void igc_rx_checksum(struct igc_ring *ring, |
1716 | union igc_adv_rx_desc *rx_desc, |
1717 | struct sk_buff *skb) |
1718 | { |
1719 | skb_checksum_none_assert(skb); |
1720 | |
1721 | /* Ignore Checksum bit is set */ |
1722 | if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM)) |
1723 | return; |
1724 | |
1725 | /* Rx checksum disabled via ethtool */ |
1726 | if (!(ring->netdev->features & NETIF_F_RXCSUM)) |
1727 | return; |
1728 | |
1729 | /* TCP/UDP checksum error bit is set */ |
1730 | if (igc_test_staterr(rx_desc, |
1731 | IGC_RXDEXT_STATERR_L4E | |
1732 | IGC_RXDEXT_STATERR_IPE)) { |
1733 | /* work around errata with sctp packets where the TCPE aka |
1734 | * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) |
1735 | * packets (aka let the stack check the crc32c) |
1736 | */ |
1737 | if (!(skb->len == 60 && |
1738 | test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { |
1739 | u64_stats_update_begin(syncp: &ring->rx_syncp); |
1740 | ring->rx_stats.csum_err++; |
1741 | u64_stats_update_end(syncp: &ring->rx_syncp); |
1742 | } |
1743 | /* let the stack verify checksum errors */ |
1744 | return; |
1745 | } |
1746 | /* It must be a TCP or UDP packet with a valid checksum */ |
1747 | if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS | |
1748 | IGC_RXD_STAT_UDPCS)) |
1749 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1750 | |
1751 | netdev_dbg(ring->netdev, "cksum success: bits %08X\n", |
1752 | le32_to_cpu(rx_desc->wb.upper.status_error)); |
1753 | } |
1754 | |
1755 | /* Mapping HW RSS Type to enum pkt_hash_types */ |
1756 | static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { |
1757 | [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2, |
1758 | [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4, |
1759 | [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3, |
1760 | [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4, |
1761 | [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3, |
1762 | [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3, |
1763 | [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, |
1764 | [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4, |
1765 | [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4, |
1766 | [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, |
1767 | [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ |
1768 | [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */ |
1769 | [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */ |
1770 | [13] = PKT_HASH_TYPE_NONE, |
1771 | [14] = PKT_HASH_TYPE_NONE, |
1772 | [15] = PKT_HASH_TYPE_NONE, |
1773 | }; |
1774 | |
1775 | static inline void igc_rx_hash(struct igc_ring *ring, |
1776 | union igc_adv_rx_desc *rx_desc, |
1777 | struct sk_buff *skb) |
1778 | { |
1779 | if (ring->netdev->features & NETIF_F_RXHASH) { |
1780 | u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); |
1781 | u32 rss_type = igc_rss_type(rx_desc); |
1782 | |
1783 | skb_set_hash(skb, hash: rss_hash, type: igc_rss_type_table[rss_type]); |
1784 | } |
1785 | } |
1786 | |
1787 | static void igc_rx_vlan(struct igc_ring *rx_ring, |
1788 | union igc_adv_rx_desc *rx_desc, |
1789 | struct sk_buff *skb) |
1790 | { |
1791 | struct net_device *dev = rx_ring->netdev; |
1792 | u16 vid; |
1793 | |
1794 | if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && |
1795 | igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) { |
1796 | if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) && |
1797 | test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) |
1798 | vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); |
1799 | else |
1800 | vid = le16_to_cpu(rx_desc->wb.upper.vlan); |
1801 | |
1802 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vid); |
1803 | } |
1804 | } |
1805 | |
1806 | /** |
1807 | * igc_process_skb_fields - Populate skb header fields from Rx descriptor |
1808 | * @rx_ring: rx descriptor ring packet is being transacted on |
1809 | * @rx_desc: pointer to the EOP Rx descriptor |
1810 | * @skb: pointer to current skb being populated |
1811 | * |
1812 | * This function checks the ring, descriptor, and packet information in order |
1813 | * to populate the hash, checksum, VLAN, protocol, and other fields within the |
1814 | * skb. |
1815 | */ |
1816 | static void igc_process_skb_fields(struct igc_ring *rx_ring, |
1817 | union igc_adv_rx_desc *rx_desc, |
1818 | struct sk_buff *skb) |
1819 | { |
1820 | igc_rx_hash(ring: rx_ring, rx_desc, skb); |
1821 | |
1822 | igc_rx_checksum(ring: rx_ring, rx_desc, skb); |
1823 | |
1824 | igc_rx_vlan(rx_ring, rx_desc, skb); |
1825 | |
1826 | skb_record_rx_queue(skb, rx_queue: rx_ring->queue_index); |
1827 | |
1828 | skb->protocol = eth_type_trans(skb, dev: rx_ring->netdev); |
1829 | } |
1830 | |
1831 | static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features) |
1832 | { |
1833 | bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); |
1834 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
1835 | struct igc_hw *hw = &adapter->hw; |
1836 | u32 ctrl; |
1837 | |
1838 | ctrl = rd32(IGC_CTRL); |
1839 | |
1840 | if (enable) { |
1841 | /* enable VLAN tag insert/strip */ |
1842 | ctrl |= IGC_CTRL_VME; |
1843 | } else { |
1844 | /* disable VLAN tag insert/strip */ |
1845 | ctrl &= ~IGC_CTRL_VME; |
1846 | } |
1847 | wr32(IGC_CTRL, ctrl); |
1848 | } |
1849 | |
1850 | static void igc_restore_vlan(struct igc_adapter *adapter) |
1851 | { |
1852 | igc_vlan_mode(netdev: adapter->netdev, features: adapter->netdev->features); |
1853 | } |
1854 | |
1855 | static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring, |
1856 | const unsigned int size, |
1857 | int *rx_buffer_pgcnt) |
1858 | { |
1859 | struct igc_rx_buffer *rx_buffer; |
1860 | |
1861 | rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; |
1862 | *rx_buffer_pgcnt = |
1863 | #if (PAGE_SIZE < 8192) |
1864 | page_count(page: rx_buffer->page); |
1865 | #else |
1866 | 0; |
1867 | #endif |
1868 | prefetchw(x: rx_buffer->page); |
1869 | |
1870 | /* we are reusing so sync this buffer for CPU use */ |
1871 | dma_sync_single_range_for_cpu(dev: rx_ring->dev, |
1872 | addr: rx_buffer->dma, |
1873 | offset: rx_buffer->page_offset, |
1874 | size, |
1875 | dir: DMA_FROM_DEVICE); |
1876 | |
1877 | rx_buffer->pagecnt_bias--; |
1878 | |
1879 | return rx_buffer; |
1880 | } |
1881 | |
1882 | static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer, |
1883 | unsigned int truesize) |
1884 | { |
1885 | #if (PAGE_SIZE < 8192) |
1886 | buffer->page_offset ^= truesize; |
1887 | #else |
1888 | buffer->page_offset += truesize; |
1889 | #endif |
1890 | } |
1891 | |
1892 | static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring, |
1893 | unsigned int size) |
1894 | { |
1895 | unsigned int truesize; |
1896 | |
1897 | #if (PAGE_SIZE < 8192) |
1898 | truesize = igc_rx_pg_size(ring) / 2; |
1899 | #else |
1900 | truesize = ring_uses_build_skb(ring) ? |
1901 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + |
1902 | SKB_DATA_ALIGN(IGC_SKB_PAD + size) : |
1903 | SKB_DATA_ALIGN(size); |
1904 | #endif |
1905 | return truesize; |
1906 | } |
1907 | |
1908 | /** |
1909 | * igc_add_rx_frag - Add contents of Rx buffer to sk_buff |
1910 | * @rx_ring: rx descriptor ring to transact packets on |
1911 | * @rx_buffer: buffer containing page to add |
1912 | * @skb: sk_buff to place the data into |
1913 | * @size: size of buffer to be added |
1914 | * |
1915 | * This function will add the data contained in rx_buffer->page to the skb. |
1916 | */ |
1917 | static void igc_add_rx_frag(struct igc_ring *rx_ring, |
1918 | struct igc_rx_buffer *rx_buffer, |
1919 | struct sk_buff *skb, |
1920 | unsigned int size) |
1921 | { |
1922 | unsigned int truesize; |
1923 | |
1924 | #if (PAGE_SIZE < 8192) |
1925 | truesize = igc_rx_pg_size(rx_ring) / 2; |
1926 | #else |
1927 | truesize = ring_uses_build_skb(rx_ring) ? |
1928 | SKB_DATA_ALIGN(IGC_SKB_PAD + size) : |
1929 | SKB_DATA_ALIGN(size); |
1930 | #endif |
1931 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page: rx_buffer->page, |
1932 | off: rx_buffer->page_offset, size, truesize); |
1933 | |
1934 | igc_rx_buffer_flip(buffer: rx_buffer, truesize); |
1935 | } |
1936 | |
1937 | static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, |
1938 | struct igc_rx_buffer *rx_buffer, |
1939 | struct xdp_buff *xdp) |
1940 | { |
1941 | unsigned int size = xdp->data_end - xdp->data; |
1942 | unsigned int truesize = igc_get_rx_frame_truesize(ring: rx_ring, size); |
1943 | unsigned int metasize = xdp->data - xdp->data_meta; |
1944 | struct sk_buff *skb; |
1945 | |
1946 | /* prefetch first cache line of first page */ |
1947 | net_prefetch(p: xdp->data_meta); |
1948 | |
1949 | /* build an skb around the page buffer */ |
1950 | skb = napi_build_skb(data: xdp->data_hard_start, frag_size: truesize); |
1951 | if (unlikely(!skb)) |
1952 | return NULL; |
1953 | |
1954 | /* update pointers within the skb to store the data */ |
1955 | skb_reserve(skb, len: xdp->data - xdp->data_hard_start); |
1956 | __skb_put(skb, len: size); |
1957 | if (metasize) |
1958 | skb_metadata_set(skb, meta_len: metasize); |
1959 | |
1960 | igc_rx_buffer_flip(buffer: rx_buffer, truesize); |
1961 | return skb; |
1962 | } |
1963 | |
1964 | static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, |
1965 | struct igc_rx_buffer *rx_buffer, |
1966 | struct igc_xdp_buff *ctx) |
1967 | { |
1968 | struct xdp_buff *xdp = &ctx->xdp; |
1969 | unsigned int metasize = xdp->data - xdp->data_meta; |
1970 | unsigned int size = xdp->data_end - xdp->data; |
1971 | unsigned int truesize = igc_get_rx_frame_truesize(ring: rx_ring, size); |
1972 | void *va = xdp->data; |
1973 | unsigned int headlen; |
1974 | struct sk_buff *skb; |
1975 | |
1976 | /* prefetch first cache line of first page */ |
1977 | net_prefetch(p: xdp->data_meta); |
1978 | |
1979 | /* allocate a skb to store the frags */ |
1980 | skb = napi_alloc_skb(napi: &rx_ring->q_vector->napi, |
1981 | IGC_RX_HDR_LEN + metasize); |
1982 | if (unlikely(!skb)) |
1983 | return NULL; |
1984 | |
1985 | if (ctx->rx_ts) { |
1986 | skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; |
1987 | skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; |
1988 | } |
1989 | |
1990 | /* Determine available headroom for copy */ |
1991 | headlen = size; |
1992 | if (headlen > IGC_RX_HDR_LEN) |
1993 | headlen = eth_get_headlen(dev: skb->dev, data: va, IGC_RX_HDR_LEN); |
1994 | |
1995 | /* align pull length to size of long to optimize memcpy performance */ |
1996 | memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, |
1997 | ALIGN(headlen + metasize, sizeof(long))); |
1998 | |
1999 | if (metasize) { |
2000 | skb_metadata_set(skb, meta_len: metasize); |
2001 | __skb_pull(skb, len: metasize); |
2002 | } |
2003 | |
2004 | /* update all of the pointers */ |
2005 | size -= headlen; |
2006 | if (size) { |
2007 | skb_add_rx_frag(skb, i: 0, page: rx_buffer->page, |
2008 | off: (va + headlen) - page_address(rx_buffer->page), |
2009 | size, truesize); |
2010 | igc_rx_buffer_flip(buffer: rx_buffer, truesize); |
2011 | } else { |
2012 | rx_buffer->pagecnt_bias++; |
2013 | } |
2014 | |
2015 | return skb; |
2016 | } |
2017 | |
2018 | /** |
2019 | * igc_reuse_rx_page - page flip buffer and store it back on the ring |
2020 | * @rx_ring: rx descriptor ring to store buffers on |
2021 | * @old_buff: donor buffer to have page reused |
2022 | * |
2023 | * Synchronizes page for reuse by the adapter |
2024 | */ |
2025 | static void igc_reuse_rx_page(struct igc_ring *rx_ring, |
2026 | struct igc_rx_buffer *old_buff) |
2027 | { |
2028 | u16 nta = rx_ring->next_to_alloc; |
2029 | struct igc_rx_buffer *new_buff; |
2030 | |
2031 | new_buff = &rx_ring->rx_buffer_info[nta]; |
2032 | |
2033 | /* update, and store next to alloc */ |
2034 | nta++; |
2035 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; |
2036 | |
2037 | /* Transfer page from old buffer to new buffer. |
2038 | * Move each member individually to avoid possible store |
2039 | * forwarding stalls. |
2040 | */ |
2041 | new_buff->dma = old_buff->dma; |
2042 | new_buff->page = old_buff->page; |
2043 | new_buff->page_offset = old_buff->page_offset; |
2044 | new_buff->pagecnt_bias = old_buff->pagecnt_bias; |
2045 | } |
2046 | |
2047 | static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer, |
2048 | int rx_buffer_pgcnt) |
2049 | { |
2050 | unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; |
2051 | struct page *page = rx_buffer->page; |
2052 | |
2053 | /* avoid re-using remote and pfmemalloc pages */ |
2054 | if (!dev_page_is_reusable(page)) |
2055 | return false; |
2056 | |
2057 | #if (PAGE_SIZE < 8192) |
2058 | /* if we are only owner of page we can reuse it */ |
2059 | if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) |
2060 | return false; |
2061 | #else |
2062 | #define IGC_LAST_OFFSET \ |
2063 | (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) |
2064 | |
2065 | if (rx_buffer->page_offset > IGC_LAST_OFFSET) |
2066 | return false; |
2067 | #endif |
2068 | |
2069 | /* If we have drained the page fragment pool we need to update |
2070 | * the pagecnt_bias and page count so that we fully restock the |
2071 | * number of references the driver holds. |
2072 | */ |
2073 | if (unlikely(pagecnt_bias == 1)) { |
2074 | page_ref_add(page, USHRT_MAX - 1); |
2075 | rx_buffer->pagecnt_bias = USHRT_MAX; |
2076 | } |
2077 | |
2078 | return true; |
2079 | } |
2080 | |
2081 | /** |
2082 | * igc_is_non_eop - process handling of non-EOP buffers |
2083 | * @rx_ring: Rx ring being processed |
2084 | * @rx_desc: Rx descriptor for current buffer |
2085 | * |
2086 | * This function updates next to clean. If the buffer is an EOP buffer |
2087 | * this function exits returning false, otherwise it will place the |
2088 | * sk_buff in the next buffer to be chained and return true indicating |
2089 | * that this is in fact a non-EOP buffer. |
2090 | */ |
2091 | static bool igc_is_non_eop(struct igc_ring *rx_ring, |
2092 | union igc_adv_rx_desc *rx_desc) |
2093 | { |
2094 | u32 ntc = rx_ring->next_to_clean + 1; |
2095 | |
2096 | /* fetch, update, and store next to clean */ |
2097 | ntc = (ntc < rx_ring->count) ? ntc : 0; |
2098 | rx_ring->next_to_clean = ntc; |
2099 | |
2100 | prefetch(IGC_RX_DESC(rx_ring, ntc)); |
2101 | |
2102 | if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP))) |
2103 | return false; |
2104 | |
2105 | return true; |
2106 | } |
2107 | |
2108 | /** |
2109 | * igc_cleanup_headers - Correct corrupted or empty headers |
2110 | * @rx_ring: rx descriptor ring packet is being transacted on |
2111 | * @rx_desc: pointer to the EOP Rx descriptor |
2112 | * @skb: pointer to current skb being fixed |
2113 | * |
2114 | * Address the case where we are pulling data in on pages only |
2115 | * and as such no data is present in the skb header. |
2116 | * |
2117 | * In addition if skb is not at least 60 bytes we need to pad it so that |
2118 | * it is large enough to qualify as a valid Ethernet frame. |
2119 | * |
2120 | * Returns true if an error was encountered and skb was freed. |
2121 | */ |
2122 | static bool igc_cleanup_headers(struct igc_ring *rx_ring, |
2123 | union igc_adv_rx_desc *rx_desc, |
2124 | struct sk_buff *skb) |
2125 | { |
2126 | /* XDP packets use error pointer so abort at this point */ |
2127 | if (IS_ERR(ptr: skb)) |
2128 | return true; |
2129 | |
2130 | if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) { |
2131 | struct net_device *netdev = rx_ring->netdev; |
2132 | |
2133 | if (!(netdev->features & NETIF_F_RXALL)) { |
2134 | dev_kfree_skb_any(skb); |
2135 | return true; |
2136 | } |
2137 | } |
2138 | |
2139 | /* if eth_skb_pad returns an error the skb was freed */ |
2140 | if (eth_skb_pad(skb)) |
2141 | return true; |
2142 | |
2143 | return false; |
2144 | } |
2145 | |
2146 | static void igc_put_rx_buffer(struct igc_ring *rx_ring, |
2147 | struct igc_rx_buffer *rx_buffer, |
2148 | int rx_buffer_pgcnt) |
2149 | { |
2150 | if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { |
2151 | /* hand second half of page back to the ring */ |
2152 | igc_reuse_rx_page(rx_ring, old_buff: rx_buffer); |
2153 | } else { |
2154 | /* We are not reusing the buffer so unmap it and free |
2155 | * any references we are holding to it |
2156 | */ |
2157 | dma_unmap_page_attrs(dev: rx_ring->dev, addr: rx_buffer->dma, |
2158 | igc_rx_pg_size(rx_ring), dir: DMA_FROM_DEVICE, |
2159 | IGC_RX_DMA_ATTR); |
2160 | __page_frag_cache_drain(page: rx_buffer->page, |
2161 | count: rx_buffer->pagecnt_bias); |
2162 | } |
2163 | |
2164 | /* clear contents of rx_buffer */ |
2165 | rx_buffer->page = NULL; |
2166 | } |
2167 | |
2168 | static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring) |
2169 | { |
2170 | struct igc_adapter *adapter = rx_ring->q_vector->adapter; |
2171 | |
2172 | if (ring_uses_build_skb(rx_ring)) |
2173 | return IGC_SKB_PAD; |
2174 | if (igc_xdp_is_enabled(adapter)) |
2175 | return XDP_PACKET_HEADROOM; |
2176 | |
2177 | return 0; |
2178 | } |
2179 | |
2180 | static bool igc_alloc_mapped_page(struct igc_ring *rx_ring, |
2181 | struct igc_rx_buffer *bi) |
2182 | { |
2183 | struct page *page = bi->page; |
2184 | dma_addr_t dma; |
2185 | |
2186 | /* since we are recycling buffers we should seldom need to alloc */ |
2187 | if (likely(page)) |
2188 | return true; |
2189 | |
2190 | /* alloc new page for storage */ |
2191 | page = dev_alloc_pages(order: igc_rx_pg_order(ring: rx_ring)); |
2192 | if (unlikely(!page)) { |
2193 | rx_ring->rx_stats.alloc_failed++; |
2194 | return false; |
2195 | } |
2196 | |
2197 | /* map page for use */ |
2198 | dma = dma_map_page_attrs(dev: rx_ring->dev, page, offset: 0, |
2199 | igc_rx_pg_size(rx_ring), |
2200 | dir: DMA_FROM_DEVICE, |
2201 | IGC_RX_DMA_ATTR); |
2202 | |
2203 | /* if mapping failed free memory back to system since |
2204 | * there isn't much point in holding memory we can't use |
2205 | */ |
2206 | if (dma_mapping_error(dev: rx_ring->dev, dma_addr: dma)) { |
2207 | __free_page(page); |
2208 | |
2209 | rx_ring->rx_stats.alloc_failed++; |
2210 | return false; |
2211 | } |
2212 | |
2213 | bi->dma = dma; |
2214 | bi->page = page; |
2215 | bi->page_offset = igc_rx_offset(rx_ring); |
2216 | page_ref_add(page, USHRT_MAX - 1); |
2217 | bi->pagecnt_bias = USHRT_MAX; |
2218 | |
2219 | return true; |
2220 | } |
2221 | |
2222 | /** |
2223 | * igc_alloc_rx_buffers - Replace used receive buffers; packet split |
2224 | * @rx_ring: rx descriptor ring |
2225 | * @cleaned_count: number of buffers to clean |
2226 | */ |
2227 | static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count) |
2228 | { |
2229 | union igc_adv_rx_desc *rx_desc; |
2230 | u16 i = rx_ring->next_to_use; |
2231 | struct igc_rx_buffer *bi; |
2232 | u16 bufsz; |
2233 | |
2234 | /* nothing to do */ |
2235 | if (!cleaned_count) |
2236 | return; |
2237 | |
2238 | rx_desc = IGC_RX_DESC(rx_ring, i); |
2239 | bi = &rx_ring->rx_buffer_info[i]; |
2240 | i -= rx_ring->count; |
2241 | |
2242 | bufsz = igc_rx_bufsz(ring: rx_ring); |
2243 | |
2244 | do { |
2245 | if (!igc_alloc_mapped_page(rx_ring, bi)) |
2246 | break; |
2247 | |
2248 | /* sync the buffer for use by the device */ |
2249 | dma_sync_single_range_for_device(dev: rx_ring->dev, addr: bi->dma, |
2250 | offset: bi->page_offset, size: bufsz, |
2251 | dir: DMA_FROM_DEVICE); |
2252 | |
2253 | /* Refresh the desc even if buffer_addrs didn't change |
2254 | * because each write-back erases this info. |
2255 | */ |
2256 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); |
2257 | |
2258 | rx_desc++; |
2259 | bi++; |
2260 | i++; |
2261 | if (unlikely(!i)) { |
2262 | rx_desc = IGC_RX_DESC(rx_ring, 0); |
2263 | bi = rx_ring->rx_buffer_info; |
2264 | i -= rx_ring->count; |
2265 | } |
2266 | |
2267 | /* clear the length for the next_to_use descriptor */ |
2268 | rx_desc->wb.upper.length = 0; |
2269 | |
2270 | cleaned_count--; |
2271 | } while (cleaned_count); |
2272 | |
2273 | i += rx_ring->count; |
2274 | |
2275 | if (rx_ring->next_to_use != i) { |
2276 | /* record the next descriptor to use */ |
2277 | rx_ring->next_to_use = i; |
2278 | |
2279 | /* update next to alloc since we have filled the ring */ |
2280 | rx_ring->next_to_alloc = i; |
2281 | |
2282 | /* Force memory writes to complete before letting h/w |
2283 | * know there are new descriptors to fetch. (Only |
2284 | * applicable for weak-ordered memory model archs, |
2285 | * such as IA-64). |
2286 | */ |
2287 | wmb(); |
2288 | writel(val: i, addr: rx_ring->tail); |
2289 | } |
2290 | } |
2291 | |
2292 | static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count) |
2293 | { |
2294 | union igc_adv_rx_desc *desc; |
2295 | u16 i = ring->next_to_use; |
2296 | struct igc_rx_buffer *bi; |
2297 | dma_addr_t dma; |
2298 | bool ok = true; |
2299 | |
2300 | if (!count) |
2301 | return ok; |
2302 | |
2303 | XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff); |
2304 | |
2305 | desc = IGC_RX_DESC(ring, i); |
2306 | bi = &ring->rx_buffer_info[i]; |
2307 | i -= ring->count; |
2308 | |
2309 | do { |
2310 | bi->xdp = xsk_buff_alloc(pool: ring->xsk_pool); |
2311 | if (!bi->xdp) { |
2312 | ok = false; |
2313 | break; |
2314 | } |
2315 | |
2316 | dma = xsk_buff_xdp_get_dma(xdp: bi->xdp); |
2317 | desc->read.pkt_addr = cpu_to_le64(dma); |
2318 | |
2319 | desc++; |
2320 | bi++; |
2321 | i++; |
2322 | if (unlikely(!i)) { |
2323 | desc = IGC_RX_DESC(ring, 0); |
2324 | bi = ring->rx_buffer_info; |
2325 | i -= ring->count; |
2326 | } |
2327 | |
2328 | /* Clear the length for the next_to_use descriptor. */ |
2329 | desc->wb.upper.length = 0; |
2330 | |
2331 | count--; |
2332 | } while (count); |
2333 | |
2334 | i += ring->count; |
2335 | |
2336 | if (ring->next_to_use != i) { |
2337 | ring->next_to_use = i; |
2338 | |
2339 | /* Force memory writes to complete before letting h/w |
2340 | * know there are new descriptors to fetch. (Only |
2341 | * applicable for weak-ordered memory model archs, |
2342 | * such as IA-64). |
2343 | */ |
2344 | wmb(); |
2345 | writel(val: i, addr: ring->tail); |
2346 | } |
2347 | |
2348 | return ok; |
2349 | } |
2350 | |
2351 | /* This function requires __netif_tx_lock is held by the caller. */ |
2352 | static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, |
2353 | struct xdp_frame *xdpf) |
2354 | { |
2355 | struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(frame: xdpf); |
2356 | u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; |
2357 | u16 count, index = ring->next_to_use; |
2358 | struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; |
2359 | struct igc_tx_buffer *buffer = head; |
2360 | union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index); |
2361 | u32 olinfo_status, len = xdpf->len, cmd_type; |
2362 | void *data = xdpf->data; |
2363 | u16 i; |
2364 | |
2365 | count = TXD_USE_COUNT(len); |
2366 | for (i = 0; i < nr_frags; i++) |
2367 | count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); |
2368 | |
2369 | if (igc_maybe_stop_tx(tx_ring: ring, size: count + 3)) { |
2370 | /* this is a hard error */ |
2371 | return -EBUSY; |
2372 | } |
2373 | |
2374 | i = 0; |
2375 | head->bytecount = xdp_get_frame_len(xdpf); |
2376 | head->type = IGC_TX_BUFFER_TYPE_XDP; |
2377 | head->gso_segs = 1; |
2378 | head->xdpf = xdpf; |
2379 | |
2380 | olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; |
2381 | desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
2382 | |
2383 | for (;;) { |
2384 | dma_addr_t dma; |
2385 | |
2386 | dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); |
2387 | if (dma_mapping_error(dev: ring->dev, dma_addr: dma)) { |
2388 | netdev_err_once(ring->netdev, |
2389 | "Failed to map DMA for TX\n"); |
2390 | goto unmap; |
2391 | } |
2392 | |
2393 | dma_unmap_len_set(buffer, len, len); |
2394 | dma_unmap_addr_set(buffer, dma, dma); |
2395 | |
2396 | cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | |
2397 | IGC_ADVTXD_DCMD_IFCS | len; |
2398 | |
2399 | desc->read.cmd_type_len = cpu_to_le32(cmd_type); |
2400 | desc->read.buffer_addr = cpu_to_le64(dma); |
2401 | |
2402 | buffer->protocol = 0; |
2403 | |
2404 | if (++index == ring->count) |
2405 | index = 0; |
2406 | |
2407 | if (i == nr_frags) |
2408 | break; |
2409 | |
2410 | buffer = &ring->tx_buffer_info[index]; |
2411 | desc = IGC_TX_DESC(ring, index); |
2412 | desc->read.olinfo_status = 0; |
2413 | |
2414 | data = skb_frag_address(frag: &sinfo->frags[i]); |
2415 | len = skb_frag_size(frag: &sinfo->frags[i]); |
2416 | i++; |
2417 | } |
2418 | desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); |
2419 | |
2420 | netdev_tx_sent_queue(dev_queue: txring_txq(tx_ring: ring), bytes: head->bytecount); |
2421 | /* set the timestamp */ |
2422 | head->time_stamp = jiffies; |
2423 | /* set next_to_watch value indicating a packet is present */ |
2424 | head->next_to_watch = desc; |
2425 | ring->next_to_use = index; |
2426 | |
2427 | return 0; |
2428 | |
2429 | unmap: |
2430 | for (;;) { |
2431 | buffer = &ring->tx_buffer_info[index]; |
2432 | if (dma_unmap_len(buffer, len)) |
2433 | dma_unmap_page(ring->dev, |
2434 | dma_unmap_addr(buffer, dma), |
2435 | dma_unmap_len(buffer, len), |
2436 | DMA_TO_DEVICE); |
2437 | dma_unmap_len_set(buffer, len, 0); |
2438 | if (buffer == head) |
2439 | break; |
2440 | |
2441 | if (!index) |
2442 | index += ring->count; |
2443 | index--; |
2444 | } |
2445 | |
2446 | return -ENOMEM; |
2447 | } |
2448 | |
2449 | static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter, |
2450 | int cpu) |
2451 | { |
2452 | int index = cpu; |
2453 | |
2454 | if (unlikely(index < 0)) |
2455 | index = 0; |
2456 | |
2457 | while (index >= adapter->num_tx_queues) |
2458 | index -= adapter->num_tx_queues; |
2459 | |
2460 | return adapter->tx_ring[index]; |
2461 | } |
2462 | |
2463 | static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) |
2464 | { |
2465 | struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); |
2466 | int cpu = smp_processor_id(); |
2467 | struct netdev_queue *nq; |
2468 | struct igc_ring *ring; |
2469 | int res; |
2470 | |
2471 | if (unlikely(!xdpf)) |
2472 | return -EFAULT; |
2473 | |
2474 | ring = igc_xdp_get_tx_ring(adapter, cpu); |
2475 | nq = txring_txq(tx_ring: ring); |
2476 | |
2477 | __netif_tx_lock(txq: nq, cpu); |
2478 | /* Avoid transmit queue timeout since we share it with the slow path */ |
2479 | txq_trans_cond_update(txq: nq); |
2480 | res = igc_xdp_init_tx_descriptor(ring, xdpf); |
2481 | __netif_tx_unlock(txq: nq); |
2482 | return res; |
2483 | } |
2484 | |
2485 | /* This function assumes rcu_read_lock() is held by the caller. */ |
2486 | static int __igc_xdp_run_prog(struct igc_adapter *adapter, |
2487 | struct bpf_prog *prog, |
2488 | struct xdp_buff *xdp) |
2489 | { |
2490 | u32 act = bpf_prog_run_xdp(prog, xdp); |
2491 | |
2492 | switch (act) { |
2493 | case XDP_PASS: |
2494 | return IGC_XDP_PASS; |
2495 | case XDP_TX: |
2496 | if (igc_xdp_xmit_back(adapter, xdp) < 0) |
2497 | goto out_failure; |
2498 | return IGC_XDP_TX; |
2499 | case XDP_REDIRECT: |
2500 | if (xdp_do_redirect(dev: adapter->netdev, xdp, prog) < 0) |
2501 | goto out_failure; |
2502 | return IGC_XDP_REDIRECT; |
2503 | break; |
2504 | default: |
2505 | bpf_warn_invalid_xdp_action(dev: adapter->netdev, prog, act); |
2506 | fallthrough; |
2507 | case XDP_ABORTED: |
2508 | out_failure: |
2509 | trace_xdp_exception(dev: adapter->netdev, xdp: prog, act); |
2510 | fallthrough; |
2511 | case XDP_DROP: |
2512 | return IGC_XDP_CONSUMED; |
2513 | } |
2514 | } |
2515 | |
2516 | static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, |
2517 | struct xdp_buff *xdp) |
2518 | { |
2519 | struct bpf_prog *prog; |
2520 | int res; |
2521 | |
2522 | prog = READ_ONCE(adapter->xdp_prog); |
2523 | if (!prog) { |
2524 | res = IGC_XDP_PASS; |
2525 | goto out; |
2526 | } |
2527 | |
2528 | res = __igc_xdp_run_prog(adapter, prog, xdp); |
2529 | |
2530 | out: |
2531 | return ERR_PTR(error: -res); |
2532 | } |
2533 | |
2534 | /* This function assumes __netif_tx_lock is held by the caller. */ |
2535 | static void igc_flush_tx_descriptors(struct igc_ring *ring) |
2536 | { |
2537 | /* Once tail pointer is updated, hardware can fetch the descriptors |
2538 | * any time so we issue a write membar here to ensure all memory |
2539 | * writes are complete before the tail pointer is updated. |
2540 | */ |
2541 | wmb(); |
2542 | writel(val: ring->next_to_use, addr: ring->tail); |
2543 | } |
2544 | |
2545 | static void igc_finalize_xdp(struct igc_adapter *adapter, int status) |
2546 | { |
2547 | int cpu = smp_processor_id(); |
2548 | struct netdev_queue *nq; |
2549 | struct igc_ring *ring; |
2550 | |
2551 | if (status & IGC_XDP_TX) { |
2552 | ring = igc_xdp_get_tx_ring(adapter, cpu); |
2553 | nq = txring_txq(tx_ring: ring); |
2554 | |
2555 | __netif_tx_lock(txq: nq, cpu); |
2556 | igc_flush_tx_descriptors(ring); |
2557 | __netif_tx_unlock(txq: nq); |
2558 | } |
2559 | |
2560 | if (status & IGC_XDP_REDIRECT) |
2561 | xdp_do_flush(); |
2562 | } |
2563 | |
2564 | static void igc_update_rx_stats(struct igc_q_vector *q_vector, |
2565 | unsigned int packets, unsigned int bytes) |
2566 | { |
2567 | struct igc_ring *ring = q_vector->rx.ring; |
2568 | |
2569 | u64_stats_update_begin(syncp: &ring->rx_syncp); |
2570 | ring->rx_stats.packets += packets; |
2571 | ring->rx_stats.bytes += bytes; |
2572 | u64_stats_update_end(syncp: &ring->rx_syncp); |
2573 | |
2574 | q_vector->rx.total_packets += packets; |
2575 | q_vector->rx.total_bytes += bytes; |
2576 | } |
2577 | |
2578 | static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) |
2579 | { |
2580 | unsigned int total_bytes = 0, total_packets = 0; |
2581 | struct igc_adapter *adapter = q_vector->adapter; |
2582 | struct igc_ring *rx_ring = q_vector->rx.ring; |
2583 | struct sk_buff *skb = rx_ring->skb; |
2584 | u16 cleaned_count = igc_desc_unused(ring: rx_ring); |
2585 | int xdp_status = 0, rx_buffer_pgcnt; |
2586 | |
2587 | while (likely(total_packets < budget)) { |
2588 | struct igc_xdp_buff ctx = { .rx_ts = NULL }; |
2589 | struct igc_rx_buffer *rx_buffer; |
2590 | union igc_adv_rx_desc *rx_desc; |
2591 | unsigned int size, truesize; |
2592 | int pkt_offset = 0; |
2593 | void *pktbuf; |
2594 | |
2595 | /* return some buffers to hardware, one at a time is too slow */ |
2596 | if (cleaned_count >= IGC_RX_BUFFER_WRITE) { |
2597 | igc_alloc_rx_buffers(rx_ring, cleaned_count); |
2598 | cleaned_count = 0; |
2599 | } |
2600 | |
2601 | rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); |
2602 | size = le16_to_cpu(rx_desc->wb.upper.length); |
2603 | if (!size) |
2604 | break; |
2605 | |
2606 | /* This memory barrier is needed to keep us from reading |
2607 | * any other fields out of the rx_desc until we know the |
2608 | * descriptor has been written back |
2609 | */ |
2610 | dma_rmb(); |
2611 | |
2612 | rx_buffer = igc_get_rx_buffer(rx_ring, size, rx_buffer_pgcnt: &rx_buffer_pgcnt); |
2613 | truesize = igc_get_rx_frame_truesize(ring: rx_ring, size); |
2614 | |
2615 | pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; |
2616 | |
2617 | if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) { |
2618 | ctx.rx_ts = pktbuf; |
2619 | pkt_offset = IGC_TS_HDR_LEN; |
2620 | size -= IGC_TS_HDR_LEN; |
2621 | } |
2622 | |
2623 | if (!skb) { |
2624 | xdp_init_buff(xdp: &ctx.xdp, frame_sz: truesize, rxq: &rx_ring->xdp_rxq); |
2625 | xdp_prepare_buff(xdp: &ctx.xdp, hard_start: pktbuf - igc_rx_offset(rx_ring), |
2626 | headroom: igc_rx_offset(rx_ring) + pkt_offset, |
2627 | data_len: size, meta_valid: true); |
2628 | xdp_buff_clear_frags_flag(xdp: &ctx.xdp); |
2629 | ctx.rx_desc = rx_desc; |
2630 | |
2631 | skb = igc_xdp_run_prog(adapter, xdp: &ctx.xdp); |
2632 | } |
2633 | |
2634 | if (IS_ERR(ptr: skb)) { |
2635 | unsigned int xdp_res = -PTR_ERR(ptr: skb); |
2636 | |
2637 | switch (xdp_res) { |
2638 | case IGC_XDP_CONSUMED: |
2639 | rx_buffer->pagecnt_bias++; |
2640 | break; |
2641 | case IGC_XDP_TX: |
2642 | case IGC_XDP_REDIRECT: |
2643 | igc_rx_buffer_flip(buffer: rx_buffer, truesize); |
2644 | xdp_status |= xdp_res; |
2645 | break; |
2646 | } |
2647 | |
2648 | total_packets++; |
2649 | total_bytes += size; |
2650 | } else if (skb) |
2651 | igc_add_rx_frag(rx_ring, rx_buffer, skb, size); |
2652 | else if (ring_uses_build_skb(rx_ring)) |
2653 | skb = igc_build_skb(rx_ring, rx_buffer, xdp: &ctx.xdp); |
2654 | else |
2655 | skb = igc_construct_skb(rx_ring, rx_buffer, ctx: &ctx); |
2656 | |
2657 | /* exit if we failed to retrieve a buffer */ |
2658 | if (!skb) { |
2659 | rx_ring->rx_stats.alloc_failed++; |
2660 | rx_buffer->pagecnt_bias++; |
2661 | break; |
2662 | } |
2663 | |
2664 | igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt); |
2665 | cleaned_count++; |
2666 | |
2667 | /* fetch next buffer in frame if non-eop */ |
2668 | if (igc_is_non_eop(rx_ring, rx_desc)) |
2669 | continue; |
2670 | |
2671 | /* verify the packet layout is correct */ |
2672 | if (igc_cleanup_headers(rx_ring, rx_desc, skb)) { |
2673 | skb = NULL; |
2674 | continue; |
2675 | } |
2676 | |
2677 | /* probably a little skewed due to removing CRC */ |
2678 | total_bytes += skb->len; |
2679 | |
2680 | /* populate checksum, VLAN, and protocol */ |
2681 | igc_process_skb_fields(rx_ring, rx_desc, skb); |
2682 | |
2683 | napi_gro_receive(napi: &q_vector->napi, skb); |
2684 | |
2685 | /* reset skb pointer */ |
2686 | skb = NULL; |
2687 | |
2688 | /* update budget accounting */ |
2689 | total_packets++; |
2690 | } |
2691 | |
2692 | if (xdp_status) |
2693 | igc_finalize_xdp(adapter, status: xdp_status); |
2694 | |
2695 | /* place incomplete frames back on ring for completion */ |
2696 | rx_ring->skb = skb; |
2697 | |
2698 | igc_update_rx_stats(q_vector, packets: total_packets, bytes: total_bytes); |
2699 | |
2700 | if (cleaned_count) |
2701 | igc_alloc_rx_buffers(rx_ring, cleaned_count); |
2702 | |
2703 | return total_packets; |
2704 | } |
2705 | |
2706 | static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, |
2707 | struct xdp_buff *xdp) |
2708 | { |
2709 | unsigned int totalsize = xdp->data_end - xdp->data_meta; |
2710 | unsigned int metasize = xdp->data - xdp->data_meta; |
2711 | struct sk_buff *skb; |
2712 | |
2713 | net_prefetch(p: xdp->data_meta); |
2714 | |
2715 | skb = __napi_alloc_skb(napi: &ring->q_vector->napi, length: totalsize, |
2716 | GFP_ATOMIC | __GFP_NOWARN); |
2717 | if (unlikely(!skb)) |
2718 | return NULL; |
2719 | |
2720 | memcpy(__skb_put(skb, totalsize), xdp->data_meta, |
2721 | ALIGN(totalsize, sizeof(long))); |
2722 | |
2723 | if (metasize) { |
2724 | skb_metadata_set(skb, meta_len: metasize); |
2725 | __skb_pull(skb, len: metasize); |
2726 | } |
2727 | |
2728 | return skb; |
2729 | } |
2730 | |
2731 | static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, |
2732 | union igc_adv_rx_desc *desc, |
2733 | struct xdp_buff *xdp, |
2734 | ktime_t timestamp) |
2735 | { |
2736 | struct igc_ring *ring = q_vector->rx.ring; |
2737 | struct sk_buff *skb; |
2738 | |
2739 | skb = igc_construct_skb_zc(ring, xdp); |
2740 | if (!skb) { |
2741 | ring->rx_stats.alloc_failed++; |
2742 | return; |
2743 | } |
2744 | |
2745 | if (timestamp) |
2746 | skb_hwtstamps(skb)->hwtstamp = timestamp; |
2747 | |
2748 | if (igc_cleanup_headers(rx_ring: ring, rx_desc: desc, skb)) |
2749 | return; |
2750 | |
2751 | igc_process_skb_fields(rx_ring: ring, rx_desc: desc, skb); |
2752 | napi_gro_receive(napi: &q_vector->napi, skb); |
2753 | } |
2754 | |
2755 | static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp) |
2756 | { |
2757 | /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The |
2758 | * igc_xdp_buff shares its layout with xdp_buff_xsk and private |
2759 | * igc_xdp_buff fields fall into xdp_buff_xsk->cb |
2760 | */ |
2761 | return (struct igc_xdp_buff *)xdp; |
2762 | } |
2763 | |
2764 | static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) |
2765 | { |
2766 | struct igc_adapter *adapter = q_vector->adapter; |
2767 | struct igc_ring *ring = q_vector->rx.ring; |
2768 | u16 cleaned_count = igc_desc_unused(ring); |
2769 | int total_bytes = 0, total_packets = 0; |
2770 | u16 ntc = ring->next_to_clean; |
2771 | struct bpf_prog *prog; |
2772 | bool failure = false; |
2773 | int xdp_status = 0; |
2774 | |
2775 | rcu_read_lock(); |
2776 | |
2777 | prog = READ_ONCE(adapter->xdp_prog); |
2778 | |
2779 | while (likely(total_packets < budget)) { |
2780 | union igc_adv_rx_desc *desc; |
2781 | struct igc_rx_buffer *bi; |
2782 | struct igc_xdp_buff *ctx; |
2783 | ktime_t timestamp = 0; |
2784 | unsigned int size; |
2785 | int res; |
2786 | |
2787 | desc = IGC_RX_DESC(ring, ntc); |
2788 | size = le16_to_cpu(desc->wb.upper.length); |
2789 | if (!size) |
2790 | break; |
2791 | |
2792 | /* This memory barrier is needed to keep us from reading |
2793 | * any other fields out of the rx_desc until we know the |
2794 | * descriptor has been written back |
2795 | */ |
2796 | dma_rmb(); |
2797 | |
2798 | bi = &ring->rx_buffer_info[ntc]; |
2799 | |
2800 | ctx = xsk_buff_to_igc_ctx(xdp: bi->xdp); |
2801 | ctx->rx_desc = desc; |
2802 | |
2803 | if (igc_test_staterr(rx_desc: desc, IGC_RXDADV_STAT_TSIP)) { |
2804 | ctx->rx_ts = bi->xdp->data; |
2805 | |
2806 | bi->xdp->data += IGC_TS_HDR_LEN; |
2807 | |
2808 | /* HW timestamp has been copied into local variable. Metadata |
2809 | * length when XDP program is called should be 0. |
2810 | */ |
2811 | bi->xdp->data_meta += IGC_TS_HDR_LEN; |
2812 | size -= IGC_TS_HDR_LEN; |
2813 | } |
2814 | |
2815 | bi->xdp->data_end = bi->xdp->data + size; |
2816 | xsk_buff_dma_sync_for_cpu(xdp: bi->xdp, pool: ring->xsk_pool); |
2817 | |
2818 | res = __igc_xdp_run_prog(adapter, prog, xdp: bi->xdp); |
2819 | switch (res) { |
2820 | case IGC_XDP_PASS: |
2821 | igc_dispatch_skb_zc(q_vector, desc, xdp: bi->xdp, timestamp); |
2822 | fallthrough; |
2823 | case IGC_XDP_CONSUMED: |
2824 | xsk_buff_free(xdp: bi->xdp); |
2825 | break; |
2826 | case IGC_XDP_TX: |
2827 | case IGC_XDP_REDIRECT: |
2828 | xdp_status |= res; |
2829 | break; |
2830 | } |
2831 | |
2832 | bi->xdp = NULL; |
2833 | total_bytes += size; |
2834 | total_packets++; |
2835 | cleaned_count++; |
2836 | ntc++; |
2837 | if (ntc == ring->count) |
2838 | ntc = 0; |
2839 | } |
2840 | |
2841 | ring->next_to_clean = ntc; |
2842 | rcu_read_unlock(); |
2843 | |
2844 | if (cleaned_count >= IGC_RX_BUFFER_WRITE) |
2845 | failure = !igc_alloc_rx_buffers_zc(ring, count: cleaned_count); |
2846 | |
2847 | if (xdp_status) |
2848 | igc_finalize_xdp(adapter, status: xdp_status); |
2849 | |
2850 | igc_update_rx_stats(q_vector, packets: total_packets, bytes: total_bytes); |
2851 | |
2852 | if (xsk_uses_need_wakeup(pool: ring->xsk_pool)) { |
2853 | if (failure || ring->next_to_clean == ring->next_to_use) |
2854 | xsk_set_rx_need_wakeup(pool: ring->xsk_pool); |
2855 | else |
2856 | xsk_clear_rx_need_wakeup(pool: ring->xsk_pool); |
2857 | return total_packets; |
2858 | } |
2859 | |
2860 | return failure ? budget : total_packets; |
2861 | } |
2862 | |
2863 | static void igc_update_tx_stats(struct igc_q_vector *q_vector, |
2864 | unsigned int packets, unsigned int bytes) |
2865 | { |
2866 | struct igc_ring *ring = q_vector->tx.ring; |
2867 | |
2868 | u64_stats_update_begin(syncp: &ring->tx_syncp); |
2869 | ring->tx_stats.bytes += bytes; |
2870 | ring->tx_stats.packets += packets; |
2871 | u64_stats_update_end(syncp: &ring->tx_syncp); |
2872 | |
2873 | q_vector->tx.total_bytes += bytes; |
2874 | q_vector->tx.total_packets += packets; |
2875 | } |
2876 | |
2877 | static void igc_xdp_xmit_zc(struct igc_ring *ring) |
2878 | { |
2879 | struct xsk_buff_pool *pool = ring->xsk_pool; |
2880 | struct netdev_queue *nq = txring_txq(tx_ring: ring); |
2881 | union igc_adv_tx_desc *tx_desc = NULL; |
2882 | int cpu = smp_processor_id(); |
2883 | struct xdp_desc xdp_desc; |
2884 | u16 budget, ntu; |
2885 | |
2886 | if (!netif_carrier_ok(dev: ring->netdev)) |
2887 | return; |
2888 | |
2889 | __netif_tx_lock(txq: nq, cpu); |
2890 | |
2891 | /* Avoid transmit queue timeout since we share it with the slow path */ |
2892 | txq_trans_cond_update(txq: nq); |
2893 | |
2894 | ntu = ring->next_to_use; |
2895 | budget = igc_desc_unused(ring); |
2896 | |
2897 | while (xsk_tx_peek_desc(pool, desc: &xdp_desc) && budget--) { |
2898 | u32 cmd_type, olinfo_status; |
2899 | struct igc_tx_buffer *bi; |
2900 | dma_addr_t dma; |
2901 | |
2902 | cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | |
2903 | IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | |
2904 | xdp_desc.len; |
2905 | olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT; |
2906 | |
2907 | dma = xsk_buff_raw_get_dma(pool, addr: xdp_desc.addr); |
2908 | xsk_buff_raw_dma_sync_for_device(pool, dma, size: xdp_desc.len); |
2909 | |
2910 | tx_desc = IGC_TX_DESC(ring, ntu); |
2911 | tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); |
2912 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
2913 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
2914 | |
2915 | bi = &ring->tx_buffer_info[ntu]; |
2916 | bi->type = IGC_TX_BUFFER_TYPE_XSK; |
2917 | bi->protocol = 0; |
2918 | bi->bytecount = xdp_desc.len; |
2919 | bi->gso_segs = 1; |
2920 | bi->time_stamp = jiffies; |
2921 | bi->next_to_watch = tx_desc; |
2922 | |
2923 | netdev_tx_sent_queue(dev_queue: txring_txq(tx_ring: ring), bytes: xdp_desc.len); |
2924 | |
2925 | ntu++; |
2926 | if (ntu == ring->count) |
2927 | ntu = 0; |
2928 | } |
2929 | |
2930 | ring->next_to_use = ntu; |
2931 | if (tx_desc) { |
2932 | igc_flush_tx_descriptors(ring); |
2933 | xsk_tx_release(pool); |
2934 | } |
2935 | |
2936 | __netif_tx_unlock(txq: nq); |
2937 | } |
2938 | |
2939 | /** |
2940 | * igc_clean_tx_irq - Reclaim resources after transmit completes |
2941 | * @q_vector: pointer to q_vector containing needed info |
2942 | * @napi_budget: Used to determine if we are in netpoll |
2943 | * |
2944 | * returns true if ring is completely cleaned |
2945 | */ |
2946 | static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) |
2947 | { |
2948 | struct igc_adapter *adapter = q_vector->adapter; |
2949 | unsigned int total_bytes = 0, total_packets = 0; |
2950 | unsigned int budget = q_vector->tx.work_limit; |
2951 | struct igc_ring *tx_ring = q_vector->tx.ring; |
2952 | unsigned int i = tx_ring->next_to_clean; |
2953 | struct igc_tx_buffer *tx_buffer; |
2954 | union igc_adv_tx_desc *tx_desc; |
2955 | u32 xsk_frames = 0; |
2956 | |
2957 | if (test_bit(__IGC_DOWN, &adapter->state)) |
2958 | return true; |
2959 | |
2960 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
2961 | tx_desc = IGC_TX_DESC(tx_ring, i); |
2962 | i -= tx_ring->count; |
2963 | |
2964 | do { |
2965 | union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; |
2966 | |
2967 | /* if next_to_watch is not set then there is no work pending */ |
2968 | if (!eop_desc) |
2969 | break; |
2970 | |
2971 | /* prevent any other reads prior to eop_desc */ |
2972 | smp_rmb(); |
2973 | |
2974 | /* if DD is not set pending work has not been completed */ |
2975 | if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) |
2976 | break; |
2977 | |
2978 | /* clear next_to_watch to prevent false hangs */ |
2979 | tx_buffer->next_to_watch = NULL; |
2980 | |
2981 | /* update the statistics for this packet */ |
2982 | total_bytes += tx_buffer->bytecount; |
2983 | total_packets += tx_buffer->gso_segs; |
2984 | |
2985 | switch (tx_buffer->type) { |
2986 | case IGC_TX_BUFFER_TYPE_XSK: |
2987 | xsk_frames++; |
2988 | break; |
2989 | case IGC_TX_BUFFER_TYPE_XDP: |
2990 | xdp_return_frame(xdpf: tx_buffer->xdpf); |
2991 | igc_unmap_tx_buffer(dev: tx_ring->dev, buf: tx_buffer); |
2992 | break; |
2993 | case IGC_TX_BUFFER_TYPE_SKB: |
2994 | napi_consume_skb(skb: tx_buffer->skb, budget: napi_budget); |
2995 | igc_unmap_tx_buffer(dev: tx_ring->dev, buf: tx_buffer); |
2996 | break; |
2997 | default: |
2998 | netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); |
2999 | break; |
3000 | } |
3001 | |
3002 | /* clear last DMA location and unmap remaining buffers */ |
3003 | while (tx_desc != eop_desc) { |
3004 | tx_buffer++; |
3005 | tx_desc++; |
3006 | i++; |
3007 | if (unlikely(!i)) { |
3008 | i -= tx_ring->count; |
3009 | tx_buffer = tx_ring->tx_buffer_info; |
3010 | tx_desc = IGC_TX_DESC(tx_ring, 0); |
3011 | } |
3012 | |
3013 | /* unmap any remaining paged data */ |
3014 | if (dma_unmap_len(tx_buffer, len)) |
3015 | igc_unmap_tx_buffer(dev: tx_ring->dev, buf: tx_buffer); |
3016 | } |
3017 | |
3018 | /* move us one more past the eop_desc for start of next pkt */ |
3019 | tx_buffer++; |
3020 | tx_desc++; |
3021 | i++; |
3022 | if (unlikely(!i)) { |
3023 | i -= tx_ring->count; |
3024 | tx_buffer = tx_ring->tx_buffer_info; |
3025 | tx_desc = IGC_TX_DESC(tx_ring, 0); |
3026 | } |
3027 | |
3028 | /* issue prefetch for next Tx descriptor */ |
3029 | prefetch(tx_desc); |
3030 | |
3031 | /* update budget accounting */ |
3032 | budget--; |
3033 | } while (likely(budget)); |
3034 | |
3035 | netdev_tx_completed_queue(dev_queue: txring_txq(tx_ring), |
3036 | pkts: total_packets, bytes: total_bytes); |
3037 | |
3038 | i += tx_ring->count; |
3039 | tx_ring->next_to_clean = i; |
3040 | |
3041 | igc_update_tx_stats(q_vector, packets: total_packets, bytes: total_bytes); |
3042 | |
3043 | if (tx_ring->xsk_pool) { |
3044 | if (xsk_frames) |
3045 | xsk_tx_completed(pool: tx_ring->xsk_pool, nb_entries: xsk_frames); |
3046 | if (xsk_uses_need_wakeup(pool: tx_ring->xsk_pool)) |
3047 | xsk_set_tx_need_wakeup(pool: tx_ring->xsk_pool); |
3048 | igc_xdp_xmit_zc(ring: tx_ring); |
3049 | } |
3050 | |
3051 | if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { |
3052 | struct igc_hw *hw = &adapter->hw; |
3053 | |
3054 | /* Detect a transmit hang in hardware, this serializes the |
3055 | * check with the clearing of time_stamp and movement of i |
3056 | */ |
3057 | clear_bit(nr: IGC_RING_FLAG_TX_DETECT_HANG, addr: &tx_ring->flags); |
3058 | if (tx_buffer->next_to_watch && |
3059 | time_after(jiffies, tx_buffer->time_stamp + |
3060 | (adapter->tx_timeout_factor * HZ)) && |
3061 | !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) && |
3062 | (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(addr: tx_ring->tail)) && |
3063 | !tx_ring->oper_gate_closed) { |
3064 | /* detected Tx unit hang */ |
3065 | netdev_err(dev: tx_ring->netdev, |
3066 | format: "Detected Tx Unit Hang\n" |
3067 | " Tx Queue <%d>\n" |
3068 | " TDH <%x>\n" |
3069 | " TDT <%x>\n" |
3070 | " next_to_use <%x>\n" |
3071 | " next_to_clean <%x>\n" |
3072 | "buffer_info[next_to_clean]\n" |
3073 | " time_stamp <%lx>\n" |
3074 | " next_to_watch <%p>\n" |
3075 | " jiffies <%lx>\n" |
3076 | " desc.status <%x>\n", |
3077 | tx_ring->queue_index, |
3078 | rd32(IGC_TDH(tx_ring->reg_idx)), |
3079 | readl(addr: tx_ring->tail), |
3080 | tx_ring->next_to_use, |
3081 | tx_ring->next_to_clean, |
3082 | tx_buffer->time_stamp, |
3083 | tx_buffer->next_to_watch, |
3084 | jiffies, |
3085 | tx_buffer->next_to_watch->wb.status); |
3086 | netif_stop_subqueue(dev: tx_ring->netdev, |
3087 | queue_index: tx_ring->queue_index); |
3088 | |
3089 | /* we are about to reset, no point in enabling stuff */ |
3090 | return true; |
3091 | } |
3092 | } |
3093 | |
3094 | #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) |
3095 | if (unlikely(total_packets && |
3096 | netif_carrier_ok(tx_ring->netdev) && |
3097 | igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { |
3098 | /* Make sure that anybody stopping the queue after this |
3099 | * sees the new next_to_clean. |
3100 | */ |
3101 | smp_mb(); |
3102 | if (__netif_subqueue_stopped(dev: tx_ring->netdev, |
3103 | queue_index: tx_ring->queue_index) && |
3104 | !(test_bit(__IGC_DOWN, &adapter->state))) { |
3105 | netif_wake_subqueue(dev: tx_ring->netdev, |
3106 | queue_index: tx_ring->queue_index); |
3107 | |
3108 | u64_stats_update_begin(syncp: &tx_ring->tx_syncp); |
3109 | tx_ring->tx_stats.restart_queue++; |
3110 | u64_stats_update_end(syncp: &tx_ring->tx_syncp); |
3111 | } |
3112 | } |
3113 | |
3114 | return !!budget; |
3115 | } |
3116 | |
3117 | static int igc_find_mac_filter(struct igc_adapter *adapter, |
3118 | enum igc_mac_filter_type type, const u8 *addr) |
3119 | { |
3120 | struct igc_hw *hw = &adapter->hw; |
3121 | int max_entries = hw->mac.rar_entry_count; |
3122 | u32 ral, rah; |
3123 | int i; |
3124 | |
3125 | for (i = 0; i < max_entries; i++) { |
3126 | ral = rd32(IGC_RAL(i)); |
3127 | rah = rd32(IGC_RAH(i)); |
3128 | |
3129 | if (!(rah & IGC_RAH_AV)) |
3130 | continue; |
3131 | if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type) |
3132 | continue; |
3133 | if ((rah & IGC_RAH_RAH_MASK) != |
3134 | le16_to_cpup(p: (__le16 *)(addr + 4))) |
3135 | continue; |
3136 | if (ral != le32_to_cpup(p: (__le32 *)(addr))) |
3137 | continue; |
3138 | |
3139 | return i; |
3140 | } |
3141 | |
3142 | return -1; |
3143 | } |
3144 | |
3145 | static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter) |
3146 | { |
3147 | struct igc_hw *hw = &adapter->hw; |
3148 | int max_entries = hw->mac.rar_entry_count; |
3149 | u32 rah; |
3150 | int i; |
3151 | |
3152 | for (i = 0; i < max_entries; i++) { |
3153 | rah = rd32(IGC_RAH(i)); |
3154 | |
3155 | if (!(rah & IGC_RAH_AV)) |
3156 | return i; |
3157 | } |
3158 | |
3159 | return -1; |
3160 | } |
3161 | |
3162 | /** |
3163 | * igc_add_mac_filter() - Add MAC address filter |
3164 | * @adapter: Pointer to adapter where the filter should be added |
3165 | * @type: MAC address filter type (source or destination) |
3166 | * @addr: MAC address |
3167 | * @queue: If non-negative, queue assignment feature is enabled and frames |
3168 | * matching the filter are enqueued onto 'queue'. Otherwise, queue |
3169 | * assignment is disabled. |
3170 | * |
3171 | * Return: 0 in case of success, negative errno code otherwise. |
3172 | */ |
3173 | static int igc_add_mac_filter(struct igc_adapter *adapter, |
3174 | enum igc_mac_filter_type type, const u8 *addr, |
3175 | int queue) |
3176 | { |
3177 | struct net_device *dev = adapter->netdev; |
3178 | int index; |
3179 | |
3180 | index = igc_find_mac_filter(adapter, type, addr); |
3181 | if (index >= 0) |
3182 | goto update_filter; |
3183 | |
3184 | index = igc_get_avail_mac_filter_slot(adapter); |
3185 | if (index < 0) |
3186 | return -ENOSPC; |
3187 | |
3188 | netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n", |
3189 | index, type == IGC_MAC_FILTER_TYPE_DST ? "dst": "src", |
3190 | addr, queue); |
3191 | |
3192 | update_filter: |
3193 | igc_set_mac_filter_hw(adapter, index, type, addr, queue); |
3194 | return 0; |
3195 | } |
3196 | |
3197 | /** |
3198 | * igc_del_mac_filter() - Delete MAC address filter |
3199 | * @adapter: Pointer to adapter where the filter should be deleted from |
3200 | * @type: MAC address filter type (source or destination) |
3201 | * @addr: MAC address |
3202 | */ |
3203 | static void igc_del_mac_filter(struct igc_adapter *adapter, |
3204 | enum igc_mac_filter_type type, const u8 *addr) |
3205 | { |
3206 | struct net_device *dev = adapter->netdev; |
3207 | int index; |
3208 | |
3209 | index = igc_find_mac_filter(adapter, type, addr); |
3210 | if (index < 0) |
3211 | return; |
3212 | |
3213 | if (index == 0) { |
3214 | /* If this is the default filter, we don't actually delete it. |
3215 | * We just reset to its default value i.e. disable queue |
3216 | * assignment. |
3217 | */ |
3218 | netdev_dbg(dev, "Disable default MAC filter queue assignment"); |
3219 | |
3220 | igc_set_mac_filter_hw(adapter, index: 0, type, addr, queue: -1); |
3221 | } else { |
3222 | netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n", |
3223 | index, |
3224 | type == IGC_MAC_FILTER_TYPE_DST ? "dst": "src", |
3225 | addr); |
3226 | |
3227 | igc_clear_mac_filter_hw(adapter, index); |
3228 | } |
3229 | } |
3230 | |
3231 | /** |
3232 | * igc_add_vlan_prio_filter() - Add VLAN priority filter |
3233 | * @adapter: Pointer to adapter where the filter should be added |
3234 | * @prio: VLAN priority value |
3235 | * @queue: Queue number which matching frames are assigned to |
3236 | * |
3237 | * Return: 0 in case of success, negative errno code otherwise. |
3238 | */ |
3239 | static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio, |
3240 | int queue) |
3241 | { |
3242 | struct net_device *dev = adapter->netdev; |
3243 | struct igc_hw *hw = &adapter->hw; |
3244 | u32 vlanpqf; |
3245 | |
3246 | vlanpqf = rd32(IGC_VLANPQF); |
3247 | |
3248 | if (vlanpqf & IGC_VLANPQF_VALID(prio)) { |
3249 | netdev_dbg(dev, "VLAN priority filter already in use\n"); |
3250 | return -EEXIST; |
3251 | } |
3252 | |
3253 | vlanpqf |= IGC_VLANPQF_QSEL(prio, queue); |
3254 | vlanpqf |= IGC_VLANPQF_VALID(prio); |
3255 | |
3256 | wr32(IGC_VLANPQF, vlanpqf); |
3257 | |
3258 | netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n", |
3259 | prio, queue); |
3260 | return 0; |
3261 | } |
3262 | |
3263 | /** |
3264 | * igc_del_vlan_prio_filter() - Delete VLAN priority filter |
3265 | * @adapter: Pointer to adapter where the filter should be deleted from |
3266 | * @prio: VLAN priority value |
3267 | */ |
3268 | static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio) |
3269 | { |
3270 | struct igc_hw *hw = &adapter->hw; |
3271 | u32 vlanpqf; |
3272 | |
3273 | vlanpqf = rd32(IGC_VLANPQF); |
3274 | |
3275 | vlanpqf &= ~IGC_VLANPQF_VALID(prio); |
3276 | vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK); |
3277 | |
3278 | wr32(IGC_VLANPQF, vlanpqf); |
3279 | |
3280 | netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", |
3281 | prio); |
3282 | } |
3283 | |
3284 | static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter) |
3285 | { |
3286 | struct igc_hw *hw = &adapter->hw; |
3287 | int i; |
3288 | |
3289 | for (i = 0; i < MAX_ETYPE_FILTER; i++) { |
3290 | u32 etqf = rd32(IGC_ETQF(i)); |
3291 | |
3292 | if (!(etqf & IGC_ETQF_FILTER_ENABLE)) |
3293 | return i; |
3294 | } |
3295 | |
3296 | return -1; |
3297 | } |
3298 | |
3299 | /** |
3300 | * igc_add_etype_filter() - Add ethertype filter |
3301 | * @adapter: Pointer to adapter where the filter should be added |
3302 | * @etype: Ethertype value |
3303 | * @queue: If non-negative, queue assignment feature is enabled and frames |
3304 | * matching the filter are enqueued onto 'queue'. Otherwise, queue |
3305 | * assignment is disabled. |
3306 | * |
3307 | * Return: 0 in case of success, negative errno code otherwise. |
3308 | */ |
3309 | static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype, |
3310 | int queue) |
3311 | { |
3312 | struct igc_hw *hw = &adapter->hw; |
3313 | int index; |
3314 | u32 etqf; |
3315 | |
3316 | index = igc_get_avail_etype_filter_slot(adapter); |
3317 | if (index < 0) |
3318 | return -ENOSPC; |
3319 | |
3320 | etqf = rd32(IGC_ETQF(index)); |
3321 | |
3322 | etqf &= ~IGC_ETQF_ETYPE_MASK; |
3323 | etqf |= etype; |
3324 | |
3325 | if (queue >= 0) { |
3326 | etqf &= ~IGC_ETQF_QUEUE_MASK; |
3327 | etqf |= (queue << IGC_ETQF_QUEUE_SHIFT); |
3328 | etqf |= IGC_ETQF_QUEUE_ENABLE; |
3329 | } |
3330 | |
3331 | etqf |= IGC_ETQF_FILTER_ENABLE; |
3332 | |
3333 | wr32(IGC_ETQF(index), etqf); |
3334 | |
3335 | netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", |
3336 | etype, queue); |
3337 | return 0; |
3338 | } |
3339 | |
3340 | static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype) |
3341 | { |
3342 | struct igc_hw *hw = &adapter->hw; |
3343 | int i; |
3344 | |
3345 | for (i = 0; i < MAX_ETYPE_FILTER; i++) { |
3346 | u32 etqf = rd32(IGC_ETQF(i)); |
3347 | |
3348 | if ((etqf & IGC_ETQF_ETYPE_MASK) == etype) |
3349 | return i; |
3350 | } |
3351 | |
3352 | return -1; |
3353 | } |
3354 | |
3355 | /** |
3356 | * igc_del_etype_filter() - Delete ethertype filter |
3357 | * @adapter: Pointer to adapter where the filter should be deleted from |
3358 | * @etype: Ethertype value |
3359 | */ |
3360 | static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) |
3361 | { |
3362 | struct igc_hw *hw = &adapter->hw; |
3363 | int index; |
3364 | |
3365 | index = igc_find_etype_filter(adapter, etype); |
3366 | if (index < 0) |
3367 | return; |
3368 | |
3369 | wr32(IGC_ETQF(index), 0); |
3370 | |
3371 | netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", |
3372 | etype); |
3373 | } |
3374 | |
3375 | static int igc_flex_filter_select(struct igc_adapter *adapter, |
3376 | struct igc_flex_filter *input, |
3377 | u32 *fhft) |
3378 | { |
3379 | struct igc_hw *hw = &adapter->hw; |
3380 | u8 fhft_index; |
3381 | u32 fhftsl; |
3382 | |
3383 | if (input->index >= MAX_FLEX_FILTER) { |
3384 | netdev_err(dev: adapter->netdev, format: "Wrong Flex Filter index selected!\n"); |
3385 | return -EINVAL; |
3386 | } |
3387 | |
3388 | /* Indirect table select register */ |
3389 | fhftsl = rd32(IGC_FHFTSL); |
3390 | fhftsl &= ~IGC_FHFTSL_FTSL_MASK; |
3391 | switch (input->index) { |
3392 | case 0 ... 7: |
3393 | fhftsl |= 0x00; |
3394 | break; |
3395 | case 8 ... 15: |
3396 | fhftsl |= 0x01; |
3397 | break; |
3398 | case 16 ... 23: |
3399 | fhftsl |= 0x02; |
3400 | break; |
3401 | case 24 ... 31: |
3402 | fhftsl |= 0x03; |
3403 | break; |
3404 | } |
3405 | wr32(IGC_FHFTSL, fhftsl); |
3406 | |
3407 | /* Normalize index down to host table register */ |
3408 | fhft_index = input->index % 8; |
3409 | |
3410 | *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : |
3411 | IGC_FHFT_EXT(fhft_index - 4); |
3412 | |
3413 | return 0; |
3414 | } |
3415 | |
3416 | static int igc_write_flex_filter_ll(struct igc_adapter *adapter, |
3417 | struct igc_flex_filter *input) |
3418 | { |
3419 | struct igc_hw *hw = &adapter->hw; |
3420 | u8 *data = input->data; |
3421 | u8 *mask = input->mask; |
3422 | u32 queuing; |
3423 | u32 fhft; |
3424 | u32 wufc; |
3425 | int ret; |
3426 | int i; |
3427 | |
3428 | /* Length has to be aligned to 8. Otherwise the filter will fail. Bail |
3429 | * out early to avoid surprises later. |
3430 | */ |
3431 | if (input->length % 8 != 0) { |
3432 | netdev_err(dev: adapter->netdev, format: "The length of a flex filter has to be 8 byte aligned!\n"); |
3433 | return -EINVAL; |
3434 | } |
3435 | |
3436 | /* Select corresponding flex filter register and get base for host table. */ |
3437 | ret = igc_flex_filter_select(adapter, input, fhft: &fhft); |
3438 | if (ret) |
3439 | return ret; |
3440 | |
3441 | /* When adding a filter globally disable flex filter feature. That is |
3442 | * recommended within the datasheet. |
3443 | */ |
3444 | wufc = rd32(IGC_WUFC); |
3445 | wufc &= ~IGC_WUFC_FLEX_HQ; |
3446 | wr32(IGC_WUFC, wufc); |
3447 | |
3448 | /* Configure filter */ |
3449 | queuing = input->length & IGC_FHFT_LENGTH_MASK; |
3450 | queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue); |
3451 | queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio); |
3452 | |
3453 | if (input->immediate_irq) |
3454 | queuing |= IGC_FHFT_IMM_INT; |
3455 | |
3456 | if (input->drop) |
3457 | queuing |= IGC_FHFT_DROP; |
3458 | |
3459 | wr32(fhft + 0xFC, queuing); |
3460 | |
3461 | /* Write data (128 byte) and mask (128 bit) */ |
3462 | for (i = 0; i < 16; ++i) { |
3463 | const size_t data_idx = i * 8; |
3464 | const size_t row_idx = i * 16; |
3465 | u32 dw0 = |
3466 | (data[data_idx + 0] << 0) | |
3467 | (data[data_idx + 1] << 8) | |
3468 | (data[data_idx + 2] << 16) | |
3469 | (data[data_idx + 3] << 24); |
3470 | u32 dw1 = |
3471 | (data[data_idx + 4] << 0) | |
3472 | (data[data_idx + 5] << 8) | |
3473 | (data[data_idx + 6] << 16) | |
3474 | (data[data_idx + 7] << 24); |
3475 | u32 tmp; |
3476 | |
3477 | /* Write row: dw0, dw1 and mask */ |
3478 | wr32(fhft + row_idx, dw0); |
3479 | wr32(fhft + row_idx + 4, dw1); |
3480 | |
3481 | /* mask is only valid for MASK(7, 0) */ |
3482 | tmp = rd32(fhft + row_idx + 8); |
3483 | tmp &= ~GENMASK(7, 0); |
3484 | tmp |= mask[i]; |
3485 | wr32(fhft + row_idx + 8, tmp); |
3486 | } |
3487 | |
3488 | /* Enable filter. */ |
3489 | wufc |= IGC_WUFC_FLEX_HQ; |
3490 | if (input->index > 8) { |
3491 | /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ |
3492 | u32 wufc_ext = rd32(IGC_WUFC_EXT); |
3493 | |
3494 | wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); |
3495 | |
3496 | wr32(IGC_WUFC_EXT, wufc_ext); |
3497 | } else { |
3498 | wufc |= (IGC_WUFC_FLX0 << input->index); |
3499 | } |
3500 | wr32(IGC_WUFC, wufc); |
3501 | |
3502 | netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n", |
3503 | input->index); |
3504 | |
3505 | return 0; |
3506 | } |
3507 | |
3508 | static void igc_flex_filter_add_field(struct igc_flex_filter *flex, |
3509 | const void *src, unsigned int offset, |
3510 | size_t len, const void *mask) |
3511 | { |
3512 | int i; |
3513 | |
3514 | /* data */ |
3515 | memcpy(&flex->data[offset], src, len); |
3516 | |
3517 | /* mask */ |
3518 | for (i = 0; i < len; ++i) { |
3519 | const unsigned int idx = i + offset; |
3520 | const u8 *ptr = mask; |
3521 | |
3522 | if (mask) { |
3523 | if (ptr[i] & 0xff) |
3524 | flex->mask[idx / 8] |= BIT(idx % 8); |
3525 | |
3526 | continue; |
3527 | } |
3528 | |
3529 | flex->mask[idx / 8] |= BIT(idx % 8); |
3530 | } |
3531 | } |
3532 | |
3533 | static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) |
3534 | { |
3535 | struct igc_hw *hw = &adapter->hw; |
3536 | u32 wufc, wufc_ext; |
3537 | int i; |
3538 | |
3539 | wufc = rd32(IGC_WUFC); |
3540 | wufc_ext = rd32(IGC_WUFC_EXT); |
3541 | |
3542 | for (i = 0; i < MAX_FLEX_FILTER; i++) { |
3543 | if (i < 8) { |
3544 | if (!(wufc & (IGC_WUFC_FLX0 << i))) |
3545 | return i; |
3546 | } else { |
3547 | if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) |
3548 | return i; |
3549 | } |
3550 | } |
3551 | |
3552 | return -ENOSPC; |
3553 | } |
3554 | |
3555 | static bool igc_flex_filter_in_use(struct igc_adapter *adapter) |
3556 | { |
3557 | struct igc_hw *hw = &adapter->hw; |
3558 | u32 wufc, wufc_ext; |
3559 | |
3560 | wufc = rd32(IGC_WUFC); |
3561 | wufc_ext = rd32(IGC_WUFC_EXT); |
3562 | |
3563 | if (wufc & IGC_WUFC_FILTER_MASK) |
3564 | return true; |
3565 | |
3566 | if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) |
3567 | return true; |
3568 | |
3569 | return false; |
3570 | } |
3571 | |
3572 | static int igc_add_flex_filter(struct igc_adapter *adapter, |
3573 | struct igc_nfc_rule *rule) |
3574 | { |
3575 | struct igc_nfc_filter *filter = &rule->filter; |
3576 | unsigned int eth_offset, user_offset; |
3577 | struct igc_flex_filter flex = { }; |
3578 | int ret, index; |
3579 | bool vlan; |
3580 | |
3581 | index = igc_find_avail_flex_filter_slot(adapter); |
3582 | if (index < 0) |
3583 | return -ENOSPC; |
3584 | |
3585 | /* Construct the flex filter: |
3586 | * -> dest_mac [6] |
3587 | * -> src_mac [6] |
3588 | * -> tpid [2] |
3589 | * -> vlan tci [2] |
3590 | * -> ether type [2] |
3591 | * -> user data [8] |
3592 | * -> = 26 bytes => 32 length |
3593 | */ |
3594 | flex.index = index; |
3595 | flex.length = 32; |
3596 | flex.rx_queue = rule->action; |
3597 | |
3598 | vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; |
3599 | eth_offset = vlan ? 16 : 12; |
3600 | user_offset = vlan ? 18 : 14; |
3601 | |
3602 | /* Add destination MAC */ |
3603 | if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) |
3604 | igc_flex_filter_add_field(flex: &flex, src: &filter->dst_addr, offset: 0, |
3605 | ETH_ALEN, NULL); |
3606 | |
3607 | /* Add source MAC */ |
3608 | if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) |
3609 | igc_flex_filter_add_field(flex: &flex, src: &filter->src_addr, offset: 6, |
3610 | ETH_ALEN, NULL); |
3611 | |
3612 | /* Add VLAN etype */ |
3613 | if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { |
3614 | __be16 vlan_etype = cpu_to_be16(filter->vlan_etype); |
3615 | |
3616 | igc_flex_filter_add_field(flex: &flex, src: &vlan_etype, offset: 12, |
3617 | len: sizeof(vlan_etype), NULL); |
3618 | } |
3619 | |
3620 | /* Add VLAN TCI */ |
3621 | if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) |
3622 | igc_flex_filter_add_field(flex: &flex, src: &filter->vlan_tci, offset: 14, |
3623 | len: sizeof(filter->vlan_tci), NULL); |
3624 | |
3625 | /* Add Ether type */ |
3626 | if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { |
3627 | __be16 etype = cpu_to_be16(filter->etype); |
3628 | |
3629 | igc_flex_filter_add_field(flex: &flex, src: &etype, offset: eth_offset, |
3630 | len: sizeof(etype), NULL); |
3631 | } |
3632 | |
3633 | /* Add user data */ |
3634 | if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) |
3635 | igc_flex_filter_add_field(flex: &flex, src: &filter->user_data, |
3636 | offset: user_offset, |
3637 | len: sizeof(filter->user_data), |
3638 | mask: filter->user_mask); |
3639 | |
3640 | /* Add it down to the hardware and enable it. */ |
3641 | ret = igc_write_flex_filter_ll(adapter, input: &flex); |
3642 | if (ret) |
3643 | return ret; |
3644 | |
3645 | filter->flex_index = index; |
3646 | |
3647 | return 0; |
3648 | } |
3649 | |
3650 | static void igc_del_flex_filter(struct igc_adapter *adapter, |
3651 | u16 reg_index) |
3652 | { |
3653 | struct igc_hw *hw = &adapter->hw; |
3654 | u32 wufc; |
3655 | |
3656 | /* Just disable the filter. The filter table itself is kept |
3657 | * intact. Another flex_filter_add() should override the "old" data |
3658 | * then. |
3659 | */ |
3660 | if (reg_index > 8) { |
3661 | u32 wufc_ext = rd32(IGC_WUFC_EXT); |
3662 | |
3663 | wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); |
3664 | wr32(IGC_WUFC_EXT, wufc_ext); |
3665 | } else { |
3666 | wufc = rd32(IGC_WUFC); |
3667 | |
3668 | wufc &= ~(IGC_WUFC_FLX0 << reg_index); |
3669 | wr32(IGC_WUFC, wufc); |
3670 | } |
3671 | |
3672 | if (igc_flex_filter_in_use(adapter)) |
3673 | return; |
3674 | |
3675 | /* No filters are in use, we may disable flex filters */ |
3676 | wufc = rd32(IGC_WUFC); |
3677 | wufc &= ~IGC_WUFC_FLEX_HQ; |
3678 | wr32(IGC_WUFC, wufc); |
3679 | } |
3680 | |
3681 | static int igc_enable_nfc_rule(struct igc_adapter *adapter, |
3682 | struct igc_nfc_rule *rule) |
3683 | { |
3684 | int err; |
3685 | |
3686 | if (rule->flex) { |
3687 | return igc_add_flex_filter(adapter, rule); |
3688 | } |
3689 | |
3690 | if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { |
3691 | err = igc_add_etype_filter(adapter, etype: rule->filter.etype, |
3692 | queue: rule->action); |
3693 | if (err) |
3694 | return err; |
3695 | } |
3696 | |
3697 | if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { |
3698 | err = igc_add_mac_filter(adapter, type: IGC_MAC_FILTER_TYPE_SRC, |
3699 | addr: rule->filter.src_addr, queue: rule->action); |
3700 | if (err) |
3701 | return err; |
3702 | } |
3703 | |
3704 | if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { |
3705 | err = igc_add_mac_filter(adapter, type: IGC_MAC_FILTER_TYPE_DST, |
3706 | addr: rule->filter.dst_addr, queue: rule->action); |
3707 | if (err) |
3708 | return err; |
3709 | } |
3710 | |
3711 | if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { |
3712 | int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); |
3713 | |
3714 | err = igc_add_vlan_prio_filter(adapter, prio, queue: rule->action); |
3715 | if (err) |
3716 | return err; |
3717 | } |
3718 | |
3719 | return 0; |
3720 | } |
3721 | |
3722 | static void igc_disable_nfc_rule(struct igc_adapter *adapter, |
3723 | const struct igc_nfc_rule *rule) |
3724 | { |
3725 | if (rule->flex) { |
3726 | igc_del_flex_filter(adapter, reg_index: rule->filter.flex_index); |
3727 | return; |
3728 | } |
3729 | |
3730 | if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) |
3731 | igc_del_etype_filter(adapter, etype: rule->filter.etype); |
3732 | |
3733 | if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { |
3734 | int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); |
3735 | |
3736 | igc_del_vlan_prio_filter(adapter, prio); |
3737 | } |
3738 | |
3739 | if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) |
3740 | igc_del_mac_filter(adapter, type: IGC_MAC_FILTER_TYPE_SRC, |
3741 | addr: rule->filter.src_addr); |
3742 | |
3743 | if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) |
3744 | igc_del_mac_filter(adapter, type: IGC_MAC_FILTER_TYPE_DST, |
3745 | addr: rule->filter.dst_addr); |
3746 | } |
3747 | |
3748 | /** |
3749 | * igc_get_nfc_rule() - Get NFC rule |
3750 | * @adapter: Pointer to adapter |
3751 | * @location: Rule location |
3752 | * |
3753 | * Context: Expects adapter->nfc_rule_lock to be held by caller. |
3754 | * |
3755 | * Return: Pointer to NFC rule at @location. If not found, NULL. |
3756 | */ |
3757 | struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter, |
3758 | u32 location) |
3759 | { |
3760 | struct igc_nfc_rule *rule; |
3761 | |
3762 | list_for_each_entry(rule, &adapter->nfc_rule_list, list) { |
3763 | if (rule->location == location) |
3764 | return rule; |
3765 | if (rule->location > location) |
3766 | break; |
3767 | } |
3768 | |
3769 | return NULL; |
3770 | } |
3771 | |
3772 | /** |
3773 | * igc_del_nfc_rule() - Delete NFC rule |
3774 | * @adapter: Pointer to adapter |
3775 | * @rule: Pointer to rule to be deleted |
3776 | * |
3777 | * Disable NFC rule in hardware and delete it from adapter. |
3778 | * |
3779 | * Context: Expects adapter->nfc_rule_lock to be held by caller. |
3780 | */ |
3781 | void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) |
3782 | { |
3783 | igc_disable_nfc_rule(adapter, rule); |
3784 | |
3785 | list_del(entry: &rule->list); |
3786 | adapter->nfc_rule_count--; |
3787 | |
3788 | kfree(objp: rule); |
3789 | } |
3790 | |
3791 | static void igc_flush_nfc_rules(struct igc_adapter *adapter) |
3792 | { |
3793 | struct igc_nfc_rule *rule, *tmp; |
3794 | |
3795 | mutex_lock(&adapter->nfc_rule_lock); |
3796 | |
3797 | list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) |
3798 | igc_del_nfc_rule(adapter, rule); |
3799 | |
3800 | mutex_unlock(lock: &adapter->nfc_rule_lock); |
3801 | } |
3802 | |
3803 | /** |
3804 | * igc_add_nfc_rule() - Add NFC rule |
3805 | * @adapter: Pointer to adapter |
3806 | * @rule: Pointer to rule to be added |
3807 | * |
3808 | * Enable NFC rule in hardware and add it to adapter. |
3809 | * |
3810 | * Context: Expects adapter->nfc_rule_lock to be held by caller. |
3811 | * |
3812 | * Return: 0 on success, negative errno on failure. |
3813 | */ |
3814 | int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule) |
3815 | { |
3816 | struct igc_nfc_rule *pred, *cur; |
3817 | int err; |
3818 | |
3819 | err = igc_enable_nfc_rule(adapter, rule); |
3820 | if (err) |
3821 | return err; |
3822 | |
3823 | pred = NULL; |
3824 | list_for_each_entry(cur, &adapter->nfc_rule_list, list) { |
3825 | if (cur->location >= rule->location) |
3826 | break; |
3827 | pred = cur; |
3828 | } |
3829 | |
3830 | list_add(new: &rule->list, head: pred ? &pred->list : &adapter->nfc_rule_list); |
3831 | adapter->nfc_rule_count++; |
3832 | return 0; |
3833 | } |
3834 | |
3835 | static void igc_restore_nfc_rules(struct igc_adapter *adapter) |
3836 | { |
3837 | struct igc_nfc_rule *rule; |
3838 | |
3839 | mutex_lock(&adapter->nfc_rule_lock); |
3840 | |
3841 | list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) |
3842 | igc_enable_nfc_rule(adapter, rule); |
3843 | |
3844 | mutex_unlock(lock: &adapter->nfc_rule_lock); |
3845 | } |
3846 | |
3847 | static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr) |
3848 | { |
3849 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
3850 | |
3851 | return igc_add_mac_filter(adapter, type: IGC_MAC_FILTER_TYPE_DST, addr, queue: -1); |
3852 | } |
3853 | |
3854 | static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr) |
3855 | { |
3856 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
3857 | |
3858 | igc_del_mac_filter(adapter, type: IGC_MAC_FILTER_TYPE_DST, addr); |
3859 | return 0; |
3860 | } |
3861 | |
3862 | /** |
3863 | * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set |
3864 | * @netdev: network interface device structure |
3865 | * |
3866 | * The set_rx_mode entry point is called whenever the unicast or multicast |
3867 | * address lists or the network interface flags are updated. This routine is |
3868 | * responsible for configuring the hardware for proper unicast, multicast, |
3869 | * promiscuous mode, and all-multi behavior. |
3870 | */ |
3871 | static void igc_set_rx_mode(struct net_device *netdev) |
3872 | { |
3873 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
3874 | struct igc_hw *hw = &adapter->hw; |
3875 | u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE; |
3876 | int count; |
3877 | |
3878 | /* Check for Promiscuous and All Multicast modes */ |
3879 | if (netdev->flags & IFF_PROMISC) { |
3880 | rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE; |
3881 | } else { |
3882 | if (netdev->flags & IFF_ALLMULTI) { |
3883 | rctl |= IGC_RCTL_MPE; |
3884 | } else { |
3885 | /* Write addresses to the MTA, if the attempt fails |
3886 | * then we should just turn on promiscuous mode so |
3887 | * that we can at least receive multicast traffic |
3888 | */ |
3889 | count = igc_write_mc_addr_list(netdev); |
3890 | if (count < 0) |
3891 | rctl |= IGC_RCTL_MPE; |
3892 | } |
3893 | } |
3894 | |
3895 | /* Write addresses to available RAR registers, if there is not |
3896 | * sufficient space to store all the addresses then enable |
3897 | * unicast promiscuous mode |
3898 | */ |
3899 | if (__dev_uc_sync(dev: netdev, sync: igc_uc_sync, unsync: igc_uc_unsync)) |
3900 | rctl |= IGC_RCTL_UPE; |
3901 | |
3902 | /* update state of unicast and multicast */ |
3903 | rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE); |
3904 | wr32(IGC_RCTL, rctl); |
3905 | |
3906 | #if (PAGE_SIZE < 8192) |
3907 | if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) |
3908 | rlpml = IGC_MAX_FRAME_BUILD_SKB; |
3909 | #endif |
3910 | wr32(IGC_RLPML, rlpml); |
3911 | } |
3912 | |
3913 | /** |
3914 | * igc_configure - configure the hardware for RX and TX |
3915 | * @adapter: private board structure |
3916 | */ |
3917 | static void igc_configure(struct igc_adapter *adapter) |
3918 | { |
3919 | struct net_device *netdev = adapter->netdev; |
3920 | int i = 0; |
3921 | |
3922 | igc_get_hw_control(adapter); |
3923 | igc_set_rx_mode(netdev); |
3924 | |
3925 | igc_restore_vlan(adapter); |
3926 | |
3927 | igc_setup_tctl(adapter); |
3928 | igc_setup_mrqc(adapter); |
3929 | igc_setup_rctl(adapter); |
3930 | |
3931 | igc_set_default_mac_filter(adapter); |
3932 | igc_restore_nfc_rules(adapter); |
3933 | |
3934 | igc_configure_tx(adapter); |
3935 | igc_configure_rx(adapter); |
3936 | |
3937 | igc_rx_fifo_flush_base(hw: &adapter->hw); |
3938 | |
3939 | /* call igc_desc_unused which always leaves |
3940 | * at least 1 descriptor unused to make sure |
3941 | * next_to_use != next_to_clean |
3942 | */ |
3943 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3944 | struct igc_ring *ring = adapter->rx_ring[i]; |
3945 | |
3946 | if (ring->xsk_pool) |
3947 | igc_alloc_rx_buffers_zc(ring, count: igc_desc_unused(ring)); |
3948 | else |
3949 | igc_alloc_rx_buffers(rx_ring: ring, cleaned_count: igc_desc_unused(ring)); |
3950 | } |
3951 | } |
3952 | |
3953 | /** |
3954 | * igc_write_ivar - configure ivar for given MSI-X vector |
3955 | * @hw: pointer to the HW structure |
3956 | * @msix_vector: vector number we are allocating to a given ring |
3957 | * @index: row index of IVAR register to write within IVAR table |
3958 | * @offset: column offset of in IVAR, should be multiple of 8 |
3959 | * |
3960 | * The IVAR table consists of 2 columns, |
3961 | * each containing an cause allocation for an Rx and Tx ring, and a |
3962 | * variable number of rows depending on the number of queues supported. |
3963 | */ |
3964 | static void igc_write_ivar(struct igc_hw *hw, int msix_vector, |
3965 | int index, int offset) |
3966 | { |
3967 | u32 ivar = array_rd32(IGC_IVAR0, index); |
3968 | |
3969 | /* clear any bits that are currently set */ |
3970 | ivar &= ~((u32)0xFF << offset); |
3971 | |
3972 | /* write vector and valid bit */ |
3973 | ivar |= (msix_vector | IGC_IVAR_VALID) << offset; |
3974 | |
3975 | array_wr32(IGC_IVAR0, index, ivar); |
3976 | } |
3977 | |
3978 | static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector) |
3979 | { |
3980 | struct igc_adapter *adapter = q_vector->adapter; |
3981 | struct igc_hw *hw = &adapter->hw; |
3982 | int rx_queue = IGC_N0_QUEUE; |
3983 | int tx_queue = IGC_N0_QUEUE; |
3984 | |
3985 | if (q_vector->rx.ring) |
3986 | rx_queue = q_vector->rx.ring->reg_idx; |
3987 | if (q_vector->tx.ring) |
3988 | tx_queue = q_vector->tx.ring->reg_idx; |
3989 | |
3990 | switch (hw->mac.type) { |
3991 | case igc_i225: |
3992 | if (rx_queue > IGC_N0_QUEUE) |
3993 | igc_write_ivar(hw, msix_vector, |
3994 | index: rx_queue >> 1, |
3995 | offset: (rx_queue & 0x1) << 4); |
3996 | if (tx_queue > IGC_N0_QUEUE) |
3997 | igc_write_ivar(hw, msix_vector, |
3998 | index: tx_queue >> 1, |
3999 | offset: ((tx_queue & 0x1) << 4) + 8); |
4000 | q_vector->eims_value = BIT(msix_vector); |
4001 | break; |
4002 | default: |
4003 | WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); |
4004 | break; |
4005 | } |
4006 | |
4007 | /* add q_vector eims value to global eims_enable_mask */ |
4008 | adapter->eims_enable_mask |= q_vector->eims_value; |
4009 | |
4010 | /* configure q_vector to set itr on first interrupt */ |
4011 | q_vector->set_itr = 1; |
4012 | } |
4013 | |
4014 | /** |
4015 | * igc_configure_msix - Configure MSI-X hardware |
4016 | * @adapter: Pointer to adapter structure |
4017 | * |
4018 | * igc_configure_msix sets up the hardware to properly |
4019 | * generate MSI-X interrupts. |
4020 | */ |
4021 | static void igc_configure_msix(struct igc_adapter *adapter) |
4022 | { |
4023 | struct igc_hw *hw = &adapter->hw; |
4024 | int i, vector = 0; |
4025 | u32 tmp; |
4026 | |
4027 | adapter->eims_enable_mask = 0; |
4028 | |
4029 | /* set vector for other causes, i.e. link changes */ |
4030 | switch (hw->mac.type) { |
4031 | case igc_i225: |
4032 | /* Turn on MSI-X capability first, or our settings |
4033 | * won't stick. And it will take days to debug. |
4034 | */ |
4035 | wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE | |
4036 | IGC_GPIE_PBA | IGC_GPIE_EIAME | |
4037 | IGC_GPIE_NSICR); |
4038 | |
4039 | /* enable msix_other interrupt */ |
4040 | adapter->eims_other = BIT(vector); |
4041 | tmp = (vector++ | IGC_IVAR_VALID) << 8; |
4042 | |
4043 | wr32(IGC_IVAR_MISC, tmp); |
4044 | break; |
4045 | default: |
4046 | /* do nothing, since nothing else supports MSI-X */ |
4047 | break; |
4048 | } /* switch (hw->mac.type) */ |
4049 | |
4050 | adapter->eims_enable_mask |= adapter->eims_other; |
4051 | |
4052 | for (i = 0; i < adapter->num_q_vectors; i++) |
4053 | igc_assign_vector(q_vector: adapter->q_vector[i], msix_vector: vector++); |
4054 | |
4055 | wrfl(); |
4056 | } |
4057 | |
4058 | /** |
4059 | * igc_irq_enable - Enable default interrupt generation settings |
4060 | * @adapter: board private structure |
4061 | */ |
4062 | static void igc_irq_enable(struct igc_adapter *adapter) |
4063 | { |
4064 | struct igc_hw *hw = &adapter->hw; |
4065 | |
4066 | if (adapter->msix_entries) { |
4067 | u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA; |
4068 | u32 regval = rd32(IGC_EIAC); |
4069 | |
4070 | wr32(IGC_EIAC, regval | adapter->eims_enable_mask); |
4071 | regval = rd32(IGC_EIAM); |
4072 | wr32(IGC_EIAM, regval | adapter->eims_enable_mask); |
4073 | wr32(IGC_EIMS, adapter->eims_enable_mask); |
4074 | wr32(IGC_IMS, ims); |
4075 | } else { |
4076 | wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA); |
4077 | wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA); |
4078 | } |
4079 | } |
4080 | |
4081 | /** |
4082 | * igc_irq_disable - Mask off interrupt generation on the NIC |
4083 | * @adapter: board private structure |
4084 | */ |
4085 | static void igc_irq_disable(struct igc_adapter *adapter) |
4086 | { |
4087 | struct igc_hw *hw = &adapter->hw; |
4088 | |
4089 | if (adapter->msix_entries) { |
4090 | u32 regval = rd32(IGC_EIAM); |
4091 | |
4092 | wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); |
4093 | wr32(IGC_EIMC, adapter->eims_enable_mask); |
4094 | regval = rd32(IGC_EIAC); |
4095 | wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); |
4096 | } |
4097 | |
4098 | wr32(IGC_IAM, 0); |
4099 | wr32(IGC_IMC, ~0); |
4100 | wrfl(); |
4101 | |
4102 | if (adapter->msix_entries) { |
4103 | int vector = 0, i; |
4104 | |
4105 | synchronize_irq(irq: adapter->msix_entries[vector++].vector); |
4106 | |
4107 | for (i = 0; i < adapter->num_q_vectors; i++) |
4108 | synchronize_irq(irq: adapter->msix_entries[vector++].vector); |
4109 | } else { |
4110 | synchronize_irq(irq: adapter->pdev->irq); |
4111 | } |
4112 | } |
4113 | |
4114 | void igc_set_flag_queue_pairs(struct igc_adapter *adapter, |
4115 | const u32 max_rss_queues) |
4116 | { |
4117 | /* Determine if we need to pair queues. */ |
4118 | /* If rss_queues > half of max_rss_queues, pair the queues in |
4119 | * order to conserve interrupts due to limited supply. |
4120 | */ |
4121 | if (adapter->rss_queues > (max_rss_queues / 2)) |
4122 | adapter->flags |= IGC_FLAG_QUEUE_PAIRS; |
4123 | else |
4124 | adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; |
4125 | } |
4126 | |
4127 | unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) |
4128 | { |
4129 | return IGC_MAX_RX_QUEUES; |
4130 | } |
4131 | |
4132 | static void igc_init_queue_configuration(struct igc_adapter *adapter) |
4133 | { |
4134 | u32 max_rss_queues; |
4135 | |
4136 | max_rss_queues = igc_get_max_rss_queues(adapter); |
4137 | adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); |
4138 | |
4139 | igc_set_flag_queue_pairs(adapter, max_rss_queues); |
4140 | } |
4141 | |
4142 | /** |
4143 | * igc_reset_q_vector - Reset config for interrupt vector |
4144 | * @adapter: board private structure to initialize |
4145 | * @v_idx: Index of vector to be reset |
4146 | * |
4147 | * If NAPI is enabled it will delete any references to the |
4148 | * NAPI struct. This is preparation for igc_free_q_vector. |
4149 | */ |
4150 | static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx) |
4151 | { |
4152 | struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; |
4153 | |
4154 | /* if we're coming from igc_set_interrupt_capability, the vectors are |
4155 | * not yet allocated |
4156 | */ |
4157 | if (!q_vector) |
4158 | return; |
4159 | |
4160 | if (q_vector->tx.ring) |
4161 | adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; |
4162 | |
4163 | if (q_vector->rx.ring) |
4164 | adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; |
4165 | |
4166 | netif_napi_del(napi: &q_vector->napi); |
4167 | } |
4168 | |
4169 | /** |
4170 | * igc_free_q_vector - Free memory allocated for specific interrupt vector |
4171 | * @adapter: board private structure to initialize |
4172 | * @v_idx: Index of vector to be freed |
4173 | * |
4174 | * This function frees the memory allocated to the q_vector. |
4175 | */ |
4176 | static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx) |
4177 | { |
4178 | struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; |
4179 | |
4180 | adapter->q_vector[v_idx] = NULL; |
4181 | |
4182 | /* igc_get_stats64() might access the rings on this vector, |
4183 | * we must wait a grace period before freeing it. |
4184 | */ |
4185 | if (q_vector) |
4186 | kfree_rcu(q_vector, rcu); |
4187 | } |
4188 | |
4189 | /** |
4190 | * igc_free_q_vectors - Free memory allocated for interrupt vectors |
4191 | * @adapter: board private structure to initialize |
4192 | * |
4193 | * This function frees the memory allocated to the q_vectors. In addition if |
4194 | * NAPI is enabled it will delete any references to the NAPI struct prior |
4195 | * to freeing the q_vector. |
4196 | */ |
4197 | static void igc_free_q_vectors(struct igc_adapter *adapter) |
4198 | { |
4199 | int v_idx = adapter->num_q_vectors; |
4200 | |
4201 | adapter->num_tx_queues = 0; |
4202 | adapter->num_rx_queues = 0; |
4203 | adapter->num_q_vectors = 0; |
4204 | |
4205 | while (v_idx--) { |
4206 | igc_reset_q_vector(adapter, v_idx); |
4207 | igc_free_q_vector(adapter, v_idx); |
4208 | } |
4209 | } |
4210 | |
4211 | /** |
4212 | * igc_update_itr - update the dynamic ITR value based on statistics |
4213 | * @q_vector: pointer to q_vector |
4214 | * @ring_container: ring info to update the itr for |
4215 | * |
4216 | * Stores a new ITR value based on packets and byte |
4217 | * counts during the last interrupt. The advantage of per interrupt |
4218 | * computation is faster updates and more accurate ITR for the current |
4219 | * traffic pattern. Constants in this function were computed |
4220 | * based on theoretical maximum wire speed and thresholds were set based |
4221 | * on testing data as well as attempting to minimize response time |
4222 | * while increasing bulk throughput. |
4223 | * NOTE: These calculations are only valid when operating in a single- |
4224 | * queue environment. |
4225 | */ |
4226 | static void igc_update_itr(struct igc_q_vector *q_vector, |
4227 | struct igc_ring_container *ring_container) |
4228 | { |
4229 | unsigned int packets = ring_container->total_packets; |
4230 | unsigned int bytes = ring_container->total_bytes; |
4231 | u8 itrval = ring_container->itr; |
4232 | |
4233 | /* no packets, exit with status unchanged */ |
4234 | if (packets == 0) |
4235 | return; |
4236 | |
4237 | switch (itrval) { |
4238 | case lowest_latency: |
4239 | /* handle TSO and jumbo frames */ |
4240 | if (bytes / packets > 8000) |
4241 | itrval = bulk_latency; |
4242 | else if ((packets < 5) && (bytes > 512)) |
4243 | itrval = low_latency; |
4244 | break; |
4245 | case low_latency: /* 50 usec aka 20000 ints/s */ |
4246 | if (bytes > 10000) { |
4247 | /* this if handles the TSO accounting */ |
4248 | if (bytes / packets > 8000) |
4249 | itrval = bulk_latency; |
4250 | else if ((packets < 10) || ((bytes / packets) > 1200)) |
4251 | itrval = bulk_latency; |
4252 | else if ((packets > 35)) |
4253 | itrval = lowest_latency; |
4254 | } else if (bytes / packets > 2000) { |
4255 | itrval = bulk_latency; |
4256 | } else if (packets <= 2 && bytes < 512) { |
4257 | itrval = lowest_latency; |
4258 | } |
4259 | break; |
4260 | case bulk_latency: /* 250 usec aka 4000 ints/s */ |
4261 | if (bytes > 25000) { |
4262 | if (packets > 35) |
4263 | itrval = low_latency; |
4264 | } else if (bytes < 1500) { |
4265 | itrval = low_latency; |
4266 | } |
4267 | break; |
4268 | } |
4269 | |
4270 | /* clear work counters since we have the values we need */ |
4271 | ring_container->total_bytes = 0; |
4272 | ring_container->total_packets = 0; |
4273 | |
4274 | /* write updated itr to ring container */ |
4275 | ring_container->itr = itrval; |
4276 | } |
4277 | |
4278 | static void igc_set_itr(struct igc_q_vector *q_vector) |
4279 | { |
4280 | struct igc_adapter *adapter = q_vector->adapter; |
4281 | u32 new_itr = q_vector->itr_val; |
4282 | u8 current_itr = 0; |
4283 | |
4284 | /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ |
4285 | switch (adapter->link_speed) { |
4286 | case SPEED_10: |
4287 | case SPEED_100: |
4288 | current_itr = 0; |
4289 | new_itr = IGC_4K_ITR; |
4290 | goto set_itr_now; |
4291 | default: |
4292 | break; |
4293 | } |
4294 | |
4295 | igc_update_itr(q_vector, ring_container: &q_vector->tx); |
4296 | igc_update_itr(q_vector, ring_container: &q_vector->rx); |
4297 | |
4298 | current_itr = max(q_vector->rx.itr, q_vector->tx.itr); |
4299 | |
4300 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
4301 | if (current_itr == lowest_latency && |
4302 | ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || |
4303 | (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) |
4304 | current_itr = low_latency; |
4305 | |
4306 | switch (current_itr) { |
4307 | /* counts and packets in update_itr are dependent on these numbers */ |
4308 | case lowest_latency: |
4309 | new_itr = IGC_70K_ITR; /* 70,000 ints/sec */ |
4310 | break; |
4311 | case low_latency: |
4312 | new_itr = IGC_20K_ITR; /* 20,000 ints/sec */ |
4313 | break; |
4314 | case bulk_latency: |
4315 | new_itr = IGC_4K_ITR; /* 4,000 ints/sec */ |
4316 | break; |
4317 | default: |
4318 | break; |
4319 | } |
4320 | |
4321 | set_itr_now: |
4322 | if (new_itr != q_vector->itr_val) { |
4323 | /* this attempts to bias the interrupt rate towards Bulk |
4324 | * by adding intermediate steps when interrupt rate is |
4325 | * increasing |
4326 | */ |
4327 | new_itr = new_itr > q_vector->itr_val ? |
4328 | max((new_itr * q_vector->itr_val) / |
4329 | (new_itr + (q_vector->itr_val >> 2)), |
4330 | new_itr) : new_itr; |
4331 | /* Don't write the value here; it resets the adapter's |
4332 | * internal timer, and causes us to delay far longer than |
4333 | * we should between interrupts. Instead, we write the ITR |
4334 | * value at the beginning of the next interrupt so the timing |
4335 | * ends up being correct. |
4336 | */ |
4337 | q_vector->itr_val = new_itr; |
4338 | q_vector->set_itr = 1; |
4339 | } |
4340 | } |
4341 | |
4342 | static void igc_reset_interrupt_capability(struct igc_adapter *adapter) |
4343 | { |
4344 | int v_idx = adapter->num_q_vectors; |
4345 | |
4346 | if (adapter->msix_entries) { |
4347 | pci_disable_msix(dev: adapter->pdev); |
4348 | kfree(objp: adapter->msix_entries); |
4349 | adapter->msix_entries = NULL; |
4350 | } else if (adapter->flags & IGC_FLAG_HAS_MSI) { |
4351 | pci_disable_msi(dev: adapter->pdev); |
4352 | } |
4353 | |
4354 | while (v_idx--) |
4355 | igc_reset_q_vector(adapter, v_idx); |
4356 | } |
4357 | |
4358 | /** |
4359 | * igc_set_interrupt_capability - set MSI or MSI-X if supported |
4360 | * @adapter: Pointer to adapter structure |
4361 | * @msix: boolean value for MSI-X capability |
4362 | * |
4363 | * Attempt to configure interrupts using the best available |
4364 | * capabilities of the hardware and kernel. |
4365 | */ |
4366 | static void igc_set_interrupt_capability(struct igc_adapter *adapter, |
4367 | bool msix) |
4368 | { |
4369 | int numvecs, i; |
4370 | int err; |
4371 | |
4372 | if (!msix) |
4373 | goto msi_only; |
4374 | adapter->flags |= IGC_FLAG_HAS_MSIX; |
4375 | |
4376 | /* Number of supported queues. */ |
4377 | adapter->num_rx_queues = adapter->rss_queues; |
4378 | |
4379 | adapter->num_tx_queues = adapter->rss_queues; |
4380 | |
4381 | /* start with one vector for every Rx queue */ |
4382 | numvecs = adapter->num_rx_queues; |
4383 | |
4384 | /* if Tx handler is separate add 1 for every Tx queue */ |
4385 | if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) |
4386 | numvecs += adapter->num_tx_queues; |
4387 | |
4388 | /* store the number of vectors reserved for queues */ |
4389 | adapter->num_q_vectors = numvecs; |
4390 | |
4391 | /* add 1 vector for link status interrupts */ |
4392 | numvecs++; |
4393 | |
4394 | adapter->msix_entries = kcalloc(n: numvecs, size: sizeof(struct msix_entry), |
4395 | GFP_KERNEL); |
4396 | |
4397 | if (!adapter->msix_entries) |
4398 | return; |
4399 | |
4400 | /* populate entry values */ |
4401 | for (i = 0; i < numvecs; i++) |
4402 | adapter->msix_entries[i].entry = i; |
4403 | |
4404 | err = pci_enable_msix_range(dev: adapter->pdev, |
4405 | entries: adapter->msix_entries, |
4406 | minvec: numvecs, |
4407 | maxvec: numvecs); |
4408 | if (err > 0) |
4409 | return; |
4410 | |
4411 | kfree(objp: adapter->msix_entries); |
4412 | adapter->msix_entries = NULL; |
4413 | |
4414 | igc_reset_interrupt_capability(adapter); |
4415 | |
4416 | msi_only: |
4417 | adapter->flags &= ~IGC_FLAG_HAS_MSIX; |
4418 | |
4419 | adapter->rss_queues = 1; |
4420 | adapter->flags |= IGC_FLAG_QUEUE_PAIRS; |
4421 | adapter->num_rx_queues = 1; |
4422 | adapter->num_tx_queues = 1; |
4423 | adapter->num_q_vectors = 1; |
4424 | if (!pci_enable_msi(dev: adapter->pdev)) |
4425 | adapter->flags |= IGC_FLAG_HAS_MSI; |
4426 | } |
4427 | |
4428 | /** |
4429 | * igc_update_ring_itr - update the dynamic ITR value based on packet size |
4430 | * @q_vector: pointer to q_vector |
4431 | * |
4432 | * Stores a new ITR value based on strictly on packet size. This |
4433 | * algorithm is less sophisticated than that used in igc_update_itr, |
4434 | * due to the difficulty of synchronizing statistics across multiple |
4435 | * receive rings. The divisors and thresholds used by this function |
4436 | * were determined based on theoretical maximum wire speed and testing |
4437 | * data, in order to minimize response time while increasing bulk |
4438 | * throughput. |
4439 | * NOTE: This function is called only when operating in a multiqueue |
4440 | * receive environment. |
4441 | */ |
4442 | static void igc_update_ring_itr(struct igc_q_vector *q_vector) |
4443 | { |
4444 | struct igc_adapter *adapter = q_vector->adapter; |
4445 | int new_val = q_vector->itr_val; |
4446 | int avg_wire_size = 0; |
4447 | unsigned int packets; |
4448 | |
4449 | /* For non-gigabit speeds, just fix the interrupt rate at 4000 |
4450 | * ints/sec - ITR timer value of 120 ticks. |
4451 | */ |
4452 | switch (adapter->link_speed) { |
4453 | case SPEED_10: |
4454 | case SPEED_100: |
4455 | new_val = IGC_4K_ITR; |
4456 | goto set_itr_val; |
4457 | default: |
4458 | break; |
4459 | } |
4460 | |
4461 | packets = q_vector->rx.total_packets; |
4462 | if (packets) |
4463 | avg_wire_size = q_vector->rx.total_bytes / packets; |
4464 | |
4465 | packets = q_vector->tx.total_packets; |
4466 | if (packets) |
4467 | avg_wire_size = max_t(u32, avg_wire_size, |
4468 | q_vector->tx.total_bytes / packets); |
4469 | |
4470 | /* if avg_wire_size isn't set no work was done */ |
4471 | if (!avg_wire_size) |
4472 | goto clear_counts; |
4473 | |
4474 | /* Add 24 bytes to size to account for CRC, preamble, and gap */ |
4475 | avg_wire_size += 24; |
4476 | |
4477 | /* Don't starve jumbo frames */ |
4478 | avg_wire_size = min(avg_wire_size, 3000); |
4479 | |
4480 | /* Give a little boost to mid-size frames */ |
4481 | if (avg_wire_size > 300 && avg_wire_size < 1200) |
4482 | new_val = avg_wire_size / 3; |
4483 | else |
4484 | new_val = avg_wire_size / 2; |
4485 | |
4486 | /* conservative mode (itr 3) eliminates the lowest_latency setting */ |
4487 | if (new_val < IGC_20K_ITR && |
4488 | ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || |
4489 | (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) |
4490 | new_val = IGC_20K_ITR; |
4491 | |
4492 | set_itr_val: |
4493 | if (new_val != q_vector->itr_val) { |
4494 | q_vector->itr_val = new_val; |
4495 | q_vector->set_itr = 1; |
4496 | } |
4497 | clear_counts: |
4498 | q_vector->rx.total_bytes = 0; |
4499 | q_vector->rx.total_packets = 0; |
4500 | q_vector->tx.total_bytes = 0; |
4501 | q_vector->tx.total_packets = 0; |
4502 | } |
4503 | |
4504 | static void igc_ring_irq_enable(struct igc_q_vector *q_vector) |
4505 | { |
4506 | struct igc_adapter *adapter = q_vector->adapter; |
4507 | struct igc_hw *hw = &adapter->hw; |
4508 | |
4509 | if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || |
4510 | (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { |
4511 | if (adapter->num_q_vectors == 1) |
4512 | igc_set_itr(q_vector); |
4513 | else |
4514 | igc_update_ring_itr(q_vector); |
4515 | } |
4516 | |
4517 | if (!test_bit(__IGC_DOWN, &adapter->state)) { |
4518 | if (adapter->msix_entries) |
4519 | wr32(IGC_EIMS, q_vector->eims_value); |
4520 | else |
4521 | igc_irq_enable(adapter); |
4522 | } |
4523 | } |
4524 | |
4525 | static void igc_add_ring(struct igc_ring *ring, |
4526 | struct igc_ring_container *head) |
4527 | { |
4528 | head->ring = ring; |
4529 | head->count++; |
4530 | } |
4531 | |
4532 | /** |
4533 | * igc_cache_ring_register - Descriptor ring to register mapping |
4534 | * @adapter: board private structure to initialize |
4535 | * |
4536 | * Once we know the feature-set enabled for the device, we'll cache |
4537 | * the register offset the descriptor ring is assigned to. |
4538 | */ |
4539 | static void igc_cache_ring_register(struct igc_adapter *adapter) |
4540 | { |
4541 | int i = 0, j = 0; |
4542 | |
4543 | switch (adapter->hw.mac.type) { |
4544 | case igc_i225: |
4545 | default: |
4546 | for (; i < adapter->num_rx_queues; i++) |
4547 | adapter->rx_ring[i]->reg_idx = i; |
4548 | for (; j < adapter->num_tx_queues; j++) |
4549 | adapter->tx_ring[j]->reg_idx = j; |
4550 | break; |
4551 | } |
4552 | } |
4553 | |
4554 | /** |
4555 | * igc_poll - NAPI Rx polling callback |
4556 | * @napi: napi polling structure |
4557 | * @budget: count of how many packets we should handle |
4558 | */ |
4559 | static int igc_poll(struct napi_struct *napi, int budget) |
4560 | { |
4561 | struct igc_q_vector *q_vector = container_of(napi, |
4562 | struct igc_q_vector, |
4563 | napi); |
4564 | struct igc_ring *rx_ring = q_vector->rx.ring; |
4565 | bool clean_complete = true; |
4566 | int work_done = 0; |
4567 | |
4568 | if (q_vector->tx.ring) |
4569 | clean_complete = igc_clean_tx_irq(q_vector, napi_budget: budget); |
4570 | |
4571 | if (rx_ring) { |
4572 | int cleaned = rx_ring->xsk_pool ? |
4573 | igc_clean_rx_irq_zc(q_vector, budget) : |
4574 | igc_clean_rx_irq(q_vector, budget); |
4575 | |
4576 | work_done += cleaned; |
4577 | if (cleaned >= budget) |
4578 | clean_complete = false; |
4579 | } |
4580 | |
4581 | /* If all work not completed, return budget and keep polling */ |
4582 | if (!clean_complete) |
4583 | return budget; |
4584 | |
4585 | /* Exit the polling mode, but don't re-enable interrupts if stack might |
4586 | * poll us due to busy-polling |
4587 | */ |
4588 | if (likely(napi_complete_done(napi, work_done))) |
4589 | igc_ring_irq_enable(q_vector); |
4590 | |
4591 | return min(work_done, budget - 1); |
4592 | } |
4593 | |
4594 | /** |
4595 | * igc_alloc_q_vector - Allocate memory for a single interrupt vector |
4596 | * @adapter: board private structure to initialize |
4597 | * @v_count: q_vectors allocated on adapter, used for ring interleaving |
4598 | * @v_idx: index of vector in adapter struct |
4599 | * @txr_count: total number of Tx rings to allocate |
4600 | * @txr_idx: index of first Tx ring to allocate |
4601 | * @rxr_count: total number of Rx rings to allocate |
4602 | * @rxr_idx: index of first Rx ring to allocate |
4603 | * |
4604 | * We allocate one q_vector. If allocation fails we return -ENOMEM. |
4605 | */ |
4606 | static int igc_alloc_q_vector(struct igc_adapter *adapter, |
4607 | unsigned int v_count, unsigned int v_idx, |
4608 | unsigned int txr_count, unsigned int txr_idx, |
4609 | unsigned int rxr_count, unsigned int rxr_idx) |
4610 | { |
4611 | struct igc_q_vector *q_vector; |
4612 | struct igc_ring *ring; |
4613 | int ring_count; |
4614 | |
4615 | /* igc only supports 1 Tx and/or 1 Rx queue per vector */ |
4616 | if (txr_count > 1 || rxr_count > 1) |
4617 | return -ENOMEM; |
4618 | |
4619 | ring_count = txr_count + rxr_count; |
4620 | |
4621 | /* allocate q_vector and rings */ |
4622 | q_vector = adapter->q_vector[v_idx]; |
4623 | if (!q_vector) |
4624 | q_vector = kzalloc(struct_size(q_vector, ring, ring_count), |
4625 | GFP_KERNEL); |
4626 | else |
4627 | memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); |
4628 | if (!q_vector) |
4629 | return -ENOMEM; |
4630 | |
4631 | /* initialize NAPI */ |
4632 | netif_napi_add(dev: adapter->netdev, napi: &q_vector->napi, poll: igc_poll); |
4633 | |
4634 | /* tie q_vector and adapter together */ |
4635 | adapter->q_vector[v_idx] = q_vector; |
4636 | q_vector->adapter = adapter; |
4637 | |
4638 | /* initialize work limits */ |
4639 | q_vector->tx.work_limit = adapter->tx_work_limit; |
4640 | |
4641 | /* initialize ITR configuration */ |
4642 | q_vector->itr_register = adapter->io_addr + IGC_EITR(0); |
4643 | q_vector->itr_val = IGC_START_ITR; |
4644 | |
4645 | /* initialize pointer to rings */ |
4646 | ring = q_vector->ring; |
4647 | |
4648 | /* initialize ITR */ |
4649 | if (rxr_count) { |
4650 | /* rx or rx/tx vector */ |
4651 | if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) |
4652 | q_vector->itr_val = adapter->rx_itr_setting; |
4653 | } else { |
4654 | /* tx only vector */ |
4655 | if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) |
4656 | q_vector->itr_val = adapter->tx_itr_setting; |
4657 | } |
4658 | |
4659 | if (txr_count) { |
4660 | /* assign generic ring traits */ |
4661 | ring->dev = &adapter->pdev->dev; |
4662 | ring->netdev = adapter->netdev; |
4663 | |
4664 | /* configure backlink on ring */ |
4665 | ring->q_vector = q_vector; |
4666 | |
4667 | /* update q_vector Tx values */ |
4668 | igc_add_ring(ring, head: &q_vector->tx); |
4669 | |
4670 | /* apply Tx specific ring traits */ |
4671 | ring->count = adapter->tx_ring_count; |
4672 | ring->queue_index = txr_idx; |
4673 | |
4674 | /* assign ring to adapter */ |
4675 | adapter->tx_ring[txr_idx] = ring; |
4676 | |
4677 | /* push pointer to next ring */ |
4678 | ring++; |
4679 | } |
4680 | |
4681 | if (rxr_count) { |
4682 | /* assign generic ring traits */ |
4683 | ring->dev = &adapter->pdev->dev; |
4684 | ring->netdev = adapter->netdev; |
4685 | |
4686 | /* configure backlink on ring */ |
4687 | ring->q_vector = q_vector; |
4688 | |
4689 | /* update q_vector Rx values */ |
4690 | igc_add_ring(ring, head: &q_vector->rx); |
4691 | |
4692 | /* apply Rx specific ring traits */ |
4693 | ring->count = adapter->rx_ring_count; |
4694 | ring->queue_index = rxr_idx; |
4695 | |
4696 | /* assign ring to adapter */ |
4697 | adapter->rx_ring[rxr_idx] = ring; |
4698 | } |
4699 | |
4700 | return 0; |
4701 | } |
4702 | |
4703 | /** |
4704 | * igc_alloc_q_vectors - Allocate memory for interrupt vectors |
4705 | * @adapter: board private structure to initialize |
4706 | * |
4707 | * We allocate one q_vector per queue interrupt. If allocation fails we |
4708 | * return -ENOMEM. |
4709 | */ |
4710 | static int igc_alloc_q_vectors(struct igc_adapter *adapter) |
4711 | { |
4712 | int rxr_remaining = adapter->num_rx_queues; |
4713 | int txr_remaining = adapter->num_tx_queues; |
4714 | int rxr_idx = 0, txr_idx = 0, v_idx = 0; |
4715 | int q_vectors = adapter->num_q_vectors; |
4716 | int err; |
4717 | |
4718 | if (q_vectors >= (rxr_remaining + txr_remaining)) { |
4719 | for (; rxr_remaining; v_idx++) { |
4720 | err = igc_alloc_q_vector(adapter, v_count: q_vectors, v_idx, |
4721 | txr_count: 0, txr_idx: 0, rxr_count: 1, rxr_idx); |
4722 | |
4723 | if (err) |
4724 | goto err_out; |
4725 | |
4726 | /* update counts and index */ |
4727 | rxr_remaining--; |
4728 | rxr_idx++; |
4729 | } |
4730 | } |
4731 | |
4732 | for (; v_idx < q_vectors; v_idx++) { |
4733 | int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); |
4734 | int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); |
4735 | |
4736 | err = igc_alloc_q_vector(adapter, v_count: q_vectors, v_idx, |
4737 | txr_count: tqpv, txr_idx, rxr_count: rqpv, rxr_idx); |
4738 | |
4739 | if (err) |
4740 | goto err_out; |
4741 | |
4742 | /* update counts and index */ |
4743 | rxr_remaining -= rqpv; |
4744 | txr_remaining -= tqpv; |
4745 | rxr_idx++; |
4746 | txr_idx++; |
4747 | } |
4748 | |
4749 | return 0; |
4750 | |
4751 | err_out: |
4752 | adapter->num_tx_queues = 0; |
4753 | adapter->num_rx_queues = 0; |
4754 | adapter->num_q_vectors = 0; |
4755 | |
4756 | while (v_idx--) |
4757 | igc_free_q_vector(adapter, v_idx); |
4758 | |
4759 | return -ENOMEM; |
4760 | } |
4761 | |
4762 | /** |
4763 | * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors |
4764 | * @adapter: Pointer to adapter structure |
4765 | * @msix: boolean for MSI-X capability |
4766 | * |
4767 | * This function initializes the interrupts and allocates all of the queues. |
4768 | */ |
4769 | static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix) |
4770 | { |
4771 | struct net_device *dev = adapter->netdev; |
4772 | int err = 0; |
4773 | |
4774 | igc_set_interrupt_capability(adapter, msix); |
4775 | |
4776 | err = igc_alloc_q_vectors(adapter); |
4777 | if (err) { |
4778 | netdev_err(dev, format: "Unable to allocate memory for vectors\n"); |
4779 | goto err_alloc_q_vectors; |
4780 | } |
4781 | |
4782 | igc_cache_ring_register(adapter); |
4783 | |
4784 | return 0; |
4785 | |
4786 | err_alloc_q_vectors: |
4787 | igc_reset_interrupt_capability(adapter); |
4788 | return err; |
4789 | } |
4790 | |
4791 | /** |
4792 | * igc_sw_init - Initialize general software structures (struct igc_adapter) |
4793 | * @adapter: board private structure to initialize |
4794 | * |
4795 | * igc_sw_init initializes the Adapter private data structure. |
4796 | * Fields are initialized based on PCI device information and |
4797 | * OS network device settings (MTU size). |
4798 | */ |
4799 | static int igc_sw_init(struct igc_adapter *adapter) |
4800 | { |
4801 | struct net_device *netdev = adapter->netdev; |
4802 | struct pci_dev *pdev = adapter->pdev; |
4803 | struct igc_hw *hw = &adapter->hw; |
4804 | |
4805 | pci_read_config_word(dev: pdev, PCI_COMMAND, val: &hw->bus.pci_cmd_word); |
4806 | |
4807 | /* set default ring sizes */ |
4808 | adapter->tx_ring_count = IGC_DEFAULT_TXD; |
4809 | adapter->rx_ring_count = IGC_DEFAULT_RXD; |
4810 | |
4811 | /* set default ITR values */ |
4812 | adapter->rx_itr_setting = IGC_DEFAULT_ITR; |
4813 | adapter->tx_itr_setting = IGC_DEFAULT_ITR; |
4814 | |
4815 | /* set default work limits */ |
4816 | adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; |
4817 | |
4818 | /* adjust max frame to be at least the size of a standard frame */ |
4819 | adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + |
4820 | VLAN_HLEN; |
4821 | adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; |
4822 | |
4823 | mutex_init(&adapter->nfc_rule_lock); |
4824 | INIT_LIST_HEAD(list: &adapter->nfc_rule_list); |
4825 | adapter->nfc_rule_count = 0; |
4826 | |
4827 | spin_lock_init(&adapter->stats64_lock); |
4828 | spin_lock_init(&adapter->qbv_tx_lock); |
4829 | /* Assume MSI-X interrupts, will be checked during IRQ allocation */ |
4830 | adapter->flags |= IGC_FLAG_HAS_MSIX; |
4831 | |
4832 | igc_init_queue_configuration(adapter); |
4833 | |
4834 | /* This call may decrease the number of queues */ |
4835 | if (igc_init_interrupt_scheme(adapter, msix: true)) { |
4836 | netdev_err(dev: netdev, format: "Unable to allocate memory for queues\n"); |
4837 | return -ENOMEM; |
4838 | } |
4839 | |
4840 | /* Explicitly disable IRQ since the NIC can be in any state. */ |
4841 | igc_irq_disable(adapter); |
4842 | |
4843 | set_bit(nr: __IGC_DOWN, addr: &adapter->state); |
4844 | |
4845 | return 0; |
4846 | } |
4847 | |
4848 | /** |
4849 | * igc_up - Open the interface and prepare it to handle traffic |
4850 | * @adapter: board private structure |
4851 | */ |
4852 | void igc_up(struct igc_adapter *adapter) |
4853 | { |
4854 | struct igc_hw *hw = &adapter->hw; |
4855 | int i = 0; |
4856 | |
4857 | /* hardware has been reset, we need to reload some things */ |
4858 | igc_configure(adapter); |
4859 | |
4860 | clear_bit(nr: __IGC_DOWN, addr: &adapter->state); |
4861 | |
4862 | for (i = 0; i < adapter->num_q_vectors; i++) |
4863 | napi_enable(n: &adapter->q_vector[i]->napi); |
4864 | |
4865 | if (adapter->msix_entries) |
4866 | igc_configure_msix(adapter); |
4867 | else |
4868 | igc_assign_vector(q_vector: adapter->q_vector[0], msix_vector: 0); |
4869 | |
4870 | /* Clear any pending interrupts. */ |
4871 | rd32(IGC_ICR); |
4872 | igc_irq_enable(adapter); |
4873 | |
4874 | netif_tx_start_all_queues(dev: adapter->netdev); |
4875 | |
4876 | /* start the watchdog. */ |
4877 | hw->mac.get_link_status = true; |
4878 | schedule_work(work: &adapter->watchdog_task); |
4879 | } |
4880 | |
4881 | /** |
4882 | * igc_update_stats - Update the board statistics counters |
4883 | * @adapter: board private structure |
4884 | */ |
4885 | void igc_update_stats(struct igc_adapter *adapter) |
4886 | { |
4887 | struct rtnl_link_stats64 *net_stats = &adapter->stats64; |
4888 | struct pci_dev *pdev = adapter->pdev; |
4889 | struct igc_hw *hw = &adapter->hw; |
4890 | u64 _bytes, _packets; |
4891 | u64 bytes, packets; |
4892 | unsigned int start; |
4893 | u32 mpc; |
4894 | int i; |
4895 | |
4896 | /* Prevent stats update while adapter is being reset, or if the pci |
4897 | * connection is down. |
4898 | */ |
4899 | if (adapter->link_speed == 0) |
4900 | return; |
4901 | if (pci_channel_offline(pdev)) |
4902 | return; |
4903 | |
4904 | packets = 0; |
4905 | bytes = 0; |
4906 | |
4907 | rcu_read_lock(); |
4908 | for (i = 0; i < adapter->num_rx_queues; i++) { |
4909 | struct igc_ring *ring = adapter->rx_ring[i]; |
4910 | u32 rqdpc = rd32(IGC_RQDPC(i)); |
4911 | |
4912 | if (hw->mac.type >= igc_i225) |
4913 | wr32(IGC_RQDPC(i), 0); |
4914 | |
4915 | if (rqdpc) { |
4916 | ring->rx_stats.drops += rqdpc; |
4917 | net_stats->rx_fifo_errors += rqdpc; |
4918 | } |
4919 | |
4920 | do { |
4921 | start = u64_stats_fetch_begin(syncp: &ring->rx_syncp); |
4922 | _bytes = ring->rx_stats.bytes; |
4923 | _packets = ring->rx_stats.packets; |
4924 | } while (u64_stats_fetch_retry(syncp: &ring->rx_syncp, start)); |
4925 | bytes += _bytes; |
4926 | packets += _packets; |
4927 | } |
4928 | |
4929 | net_stats->rx_bytes = bytes; |
4930 | net_stats->rx_packets = packets; |
4931 | |
4932 | packets = 0; |
4933 | bytes = 0; |
4934 | for (i = 0; i < adapter->num_tx_queues; i++) { |
4935 | struct igc_ring *ring = adapter->tx_ring[i]; |
4936 | |
4937 | do { |
4938 | start = u64_stats_fetch_begin(syncp: &ring->tx_syncp); |
4939 | _bytes = ring->tx_stats.bytes; |
4940 | _packets = ring->tx_stats.packets; |
4941 | } while (u64_stats_fetch_retry(syncp: &ring->tx_syncp, start)); |
4942 | bytes += _bytes; |
4943 | packets += _packets; |
4944 | } |
4945 | net_stats->tx_bytes = bytes; |
4946 | net_stats->tx_packets = packets; |
4947 | rcu_read_unlock(); |
4948 | |
4949 | /* read stats registers */ |
4950 | adapter->stats.crcerrs += rd32(IGC_CRCERRS); |
4951 | adapter->stats.gprc += rd32(IGC_GPRC); |
4952 | adapter->stats.gorc += rd32(IGC_GORCL); |
4953 | rd32(IGC_GORCH); /* clear GORCL */ |
4954 | adapter->stats.bprc += rd32(IGC_BPRC); |
4955 | adapter->stats.mprc += rd32(IGC_MPRC); |
4956 | adapter->stats.roc += rd32(IGC_ROC); |
4957 | |
4958 | adapter->stats.prc64 += rd32(IGC_PRC64); |
4959 | adapter->stats.prc127 += rd32(IGC_PRC127); |
4960 | adapter->stats.prc255 += rd32(IGC_PRC255); |
4961 | adapter->stats.prc511 += rd32(IGC_PRC511); |
4962 | adapter->stats.prc1023 += rd32(IGC_PRC1023); |
4963 | adapter->stats.prc1522 += rd32(IGC_PRC1522); |
4964 | adapter->stats.tlpic += rd32(IGC_TLPIC); |
4965 | adapter->stats.rlpic += rd32(IGC_RLPIC); |
4966 | adapter->stats.hgptc += rd32(IGC_HGPTC); |
4967 | |
4968 | mpc = rd32(IGC_MPC); |
4969 | adapter->stats.mpc += mpc; |
4970 | net_stats->rx_fifo_errors += mpc; |
4971 | adapter->stats.scc += rd32(IGC_SCC); |
4972 | adapter->stats.ecol += rd32(IGC_ECOL); |
4973 | adapter->stats.mcc += rd32(IGC_MCC); |
4974 | adapter->stats.latecol += rd32(IGC_LATECOL); |
4975 | adapter->stats.dc += rd32(IGC_DC); |
4976 | adapter->stats.rlec += rd32(IGC_RLEC); |
4977 | adapter->stats.xonrxc += rd32(IGC_XONRXC); |
4978 | adapter->stats.xontxc += rd32(IGC_XONTXC); |
4979 | adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); |
4980 | adapter->stats.xofftxc += rd32(IGC_XOFFTXC); |
4981 | adapter->stats.fcruc += rd32(IGC_FCRUC); |
4982 | adapter->stats.gptc += rd32(IGC_GPTC); |
4983 | adapter->stats.gotc += rd32(IGC_GOTCL); |
4984 | rd32(IGC_GOTCH); /* clear GOTCL */ |
4985 | adapter->stats.rnbc += rd32(IGC_RNBC); |
4986 | adapter->stats.ruc += rd32(IGC_RUC); |
4987 | adapter->stats.rfc += rd32(IGC_RFC); |
4988 | adapter->stats.rjc += rd32(IGC_RJC); |
4989 | adapter->stats.tor += rd32(IGC_TORH); |
4990 | adapter->stats.tot += rd32(IGC_TOTH); |
4991 | adapter->stats.tpr += rd32(IGC_TPR); |
4992 | |
4993 | adapter->stats.ptc64 += rd32(IGC_PTC64); |
4994 | adapter->stats.ptc127 += rd32(IGC_PTC127); |
4995 | adapter->stats.ptc255 += rd32(IGC_PTC255); |
4996 | adapter->stats.ptc511 += rd32(IGC_PTC511); |
4997 | adapter->stats.ptc1023 += rd32(IGC_PTC1023); |
4998 | adapter->stats.ptc1522 += rd32(IGC_PTC1522); |
4999 | |
5000 | adapter->stats.mptc += rd32(IGC_MPTC); |
5001 | adapter->stats.bptc += rd32(IGC_BPTC); |
5002 | |
5003 | adapter->stats.tpt += rd32(IGC_TPT); |
5004 | adapter->stats.colc += rd32(IGC_COLC); |
5005 | adapter->stats.colc += rd32(IGC_RERC); |
5006 | |
5007 | adapter->stats.algnerrc += rd32(IGC_ALGNERRC); |
5008 | |
5009 | adapter->stats.tsctc += rd32(IGC_TSCTC); |
5010 | |
5011 | adapter->stats.iac += rd32(IGC_IAC); |
5012 | |
5013 | /* Fill out the OS statistics structure */ |
5014 | net_stats->multicast = adapter->stats.mprc; |
5015 | net_stats->collisions = adapter->stats.colc; |
5016 | |
5017 | /* Rx Errors */ |
5018 | |
5019 | /* RLEC on some newer hardware can be incorrect so build |
5020 | * our own version based on RUC and ROC |
5021 | */ |
5022 | net_stats->rx_errors = adapter->stats.rxerrc + |
5023 | adapter->stats.crcerrs + adapter->stats.algnerrc + |
5024 | adapter->stats.ruc + adapter->stats.roc + |
5025 | adapter->stats.cexterr; |
5026 | net_stats->rx_length_errors = adapter->stats.ruc + |
5027 | adapter->stats.roc; |
5028 | net_stats->rx_crc_errors = adapter->stats.crcerrs; |
5029 | net_stats->rx_frame_errors = adapter->stats.algnerrc; |
5030 | net_stats->rx_missed_errors = adapter->stats.mpc; |
5031 | |
5032 | /* Tx Errors */ |
5033 | net_stats->tx_errors = adapter->stats.ecol + |
5034 | adapter->stats.latecol; |
5035 | net_stats->tx_aborted_errors = adapter->stats.ecol; |
5036 | net_stats->tx_window_errors = adapter->stats.latecol; |
5037 | net_stats->tx_carrier_errors = adapter->stats.tncrs; |
5038 | |
5039 | /* Tx Dropped */ |
5040 | net_stats->tx_dropped = adapter->stats.txdrop; |
5041 | |
5042 | /* Management Stats */ |
5043 | adapter->stats.mgptc += rd32(IGC_MGTPTC); |
5044 | adapter->stats.mgprc += rd32(IGC_MGTPRC); |
5045 | adapter->stats.mgpdc += rd32(IGC_MGTPDC); |
5046 | } |
5047 | |
5048 | /** |
5049 | * igc_down - Close the interface |
5050 | * @adapter: board private structure |
5051 | */ |
5052 | void igc_down(struct igc_adapter *adapter) |
5053 | { |
5054 | struct net_device *netdev = adapter->netdev; |
5055 | struct igc_hw *hw = &adapter->hw; |
5056 | u32 tctl, rctl; |
5057 | int i = 0; |
5058 | |
5059 | set_bit(nr: __IGC_DOWN, addr: &adapter->state); |
5060 | |
5061 | igc_ptp_suspend(adapter); |
5062 | |
5063 | if (pci_device_is_present(pdev: adapter->pdev)) { |
5064 | /* disable receives in the hardware */ |
5065 | rctl = rd32(IGC_RCTL); |
5066 | wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); |
5067 | /* flush and sleep below */ |
5068 | } |
5069 | /* set trans_start so we don't get spurious watchdogs during reset */ |
5070 | netif_trans_update(dev: netdev); |
5071 | |
5072 | netif_carrier_off(dev: netdev); |
5073 | netif_tx_stop_all_queues(dev: netdev); |
5074 | |
5075 | if (pci_device_is_present(pdev: adapter->pdev)) { |
5076 | /* disable transmits in the hardware */ |
5077 | tctl = rd32(IGC_TCTL); |
5078 | tctl &= ~IGC_TCTL_EN; |
5079 | wr32(IGC_TCTL, tctl); |
5080 | /* flush both disables and wait for them to finish */ |
5081 | wrfl(); |
5082 | usleep_range(min: 10000, max: 20000); |
5083 | |
5084 | igc_irq_disable(adapter); |
5085 | } |
5086 | |
5087 | adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; |
5088 | |
5089 | for (i = 0; i < adapter->num_q_vectors; i++) { |
5090 | if (adapter->q_vector[i]) { |
5091 | napi_synchronize(n: &adapter->q_vector[i]->napi); |
5092 | napi_disable(n: &adapter->q_vector[i]->napi); |
5093 | } |
5094 | } |
5095 | |
5096 | del_timer_sync(timer: &adapter->watchdog_timer); |
5097 | del_timer_sync(timer: &adapter->phy_info_timer); |
5098 | |
5099 | /* record the stats before reset*/ |
5100 | spin_lock(lock: &adapter->stats64_lock); |
5101 | igc_update_stats(adapter); |
5102 | spin_unlock(lock: &adapter->stats64_lock); |
5103 | |
5104 | adapter->link_speed = 0; |
5105 | adapter->link_duplex = 0; |
5106 | |
5107 | if (!pci_channel_offline(pdev: adapter->pdev)) |
5108 | igc_reset(adapter); |
5109 | |
5110 | /* clear VLAN promisc flag so VFTA will be updated if necessary */ |
5111 | adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; |
5112 | |
5113 | igc_disable_all_tx_rings_hw(adapter); |
5114 | igc_clean_all_tx_rings(adapter); |
5115 | igc_clean_all_rx_rings(adapter); |
5116 | } |
5117 | |
5118 | void igc_reinit_locked(struct igc_adapter *adapter) |
5119 | { |
5120 | while (test_and_set_bit(nr: __IGC_RESETTING, addr: &adapter->state)) |
5121 | usleep_range(min: 1000, max: 2000); |
5122 | igc_down(adapter); |
5123 | igc_up(adapter); |
5124 | clear_bit(nr: __IGC_RESETTING, addr: &adapter->state); |
5125 | } |
5126 | |
5127 | static void igc_reset_task(struct work_struct *work) |
5128 | { |
5129 | struct igc_adapter *adapter; |
5130 | |
5131 | adapter = container_of(work, struct igc_adapter, reset_task); |
5132 | |
5133 | rtnl_lock(); |
5134 | /* If we're already down or resetting, just bail */ |
5135 | if (test_bit(__IGC_DOWN, &adapter->state) || |
5136 | test_bit(__IGC_RESETTING, &adapter->state)) { |
5137 | rtnl_unlock(); |
5138 | return; |
5139 | } |
5140 | |
5141 | igc_rings_dump(adapter); |
5142 | igc_regs_dump(adapter); |
5143 | netdev_err(dev: adapter->netdev, format: "Reset adapter\n"); |
5144 | igc_reinit_locked(adapter); |
5145 | rtnl_unlock(); |
5146 | } |
5147 | |
5148 | /** |
5149 | * igc_change_mtu - Change the Maximum Transfer Unit |
5150 | * @netdev: network interface device structure |
5151 | * @new_mtu: new value for maximum frame size |
5152 | * |
5153 | * Returns 0 on success, negative on failure |
5154 | */ |
5155 | static int igc_change_mtu(struct net_device *netdev, int new_mtu) |
5156 | { |
5157 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
5158 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
5159 | |
5160 | if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) { |
5161 | netdev_dbg(netdev, "Jumbo frames not supported with XDP"); |
5162 | return -EINVAL; |
5163 | } |
5164 | |
5165 | /* adjust max frame to be at least the size of a standard frame */ |
5166 | if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) |
5167 | max_frame = ETH_FRAME_LEN + ETH_FCS_LEN; |
5168 | |
5169 | while (test_and_set_bit(nr: __IGC_RESETTING, addr: &adapter->state)) |
5170 | usleep_range(min: 1000, max: 2000); |
5171 | |
5172 | /* igc_down has a dependency on max_frame_size */ |
5173 | adapter->max_frame_size = max_frame; |
5174 | |
5175 | if (netif_running(dev: netdev)) |
5176 | igc_down(adapter); |
5177 | |
5178 | netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); |
5179 | netdev->mtu = new_mtu; |
5180 | |
5181 | if (netif_running(dev: netdev)) |
5182 | igc_up(adapter); |
5183 | else |
5184 | igc_reset(adapter); |
5185 | |
5186 | clear_bit(nr: __IGC_RESETTING, addr: &adapter->state); |
5187 | |
5188 | return 0; |
5189 | } |
5190 | |
5191 | /** |
5192 | * igc_tx_timeout - Respond to a Tx Hang |
5193 | * @netdev: network interface device structure |
5194 | * @txqueue: queue number that timed out |
5195 | **/ |
5196 | static void igc_tx_timeout(struct net_device *netdev, |
5197 | unsigned int __always_unused txqueue) |
5198 | { |
5199 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
5200 | struct igc_hw *hw = &adapter->hw; |
5201 | |
5202 | /* Do the reset outside of interrupt context */ |
5203 | adapter->tx_timeout_count++; |
5204 | schedule_work(work: &adapter->reset_task); |
5205 | wr32(IGC_EICS, |
5206 | (adapter->eims_enable_mask & ~adapter->eims_other)); |
5207 | } |
5208 | |
5209 | /** |
5210 | * igc_get_stats64 - Get System Network Statistics |
5211 | * @netdev: network interface device structure |
5212 | * @stats: rtnl_link_stats64 pointer |
5213 | * |
5214 | * Returns the address of the device statistics structure. |
5215 | * The statistics are updated here and also from the timer callback. |
5216 | */ |
5217 | static void igc_get_stats64(struct net_device *netdev, |
5218 | struct rtnl_link_stats64 *stats) |
5219 | { |
5220 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
5221 | |
5222 | spin_lock(lock: &adapter->stats64_lock); |
5223 | if (!test_bit(__IGC_RESETTING, &adapter->state)) |
5224 | igc_update_stats(adapter); |
5225 | memcpy(stats, &adapter->stats64, sizeof(*stats)); |
5226 | spin_unlock(lock: &adapter->stats64_lock); |
5227 | } |
5228 | |
5229 | static netdev_features_t igc_fix_features(struct net_device *netdev, |
5230 | netdev_features_t features) |
5231 | { |
5232 | /* Since there is no support for separate Rx/Tx vlan accel |
5233 | * enable/disable make sure Tx flag is always in same state as Rx. |
5234 | */ |
5235 | if (features & NETIF_F_HW_VLAN_CTAG_RX) |
5236 | features |= NETIF_F_HW_VLAN_CTAG_TX; |
5237 | else |
5238 | features &= ~NETIF_F_HW_VLAN_CTAG_TX; |
5239 | |
5240 | return features; |
5241 | } |
5242 | |
5243 | static int igc_set_features(struct net_device *netdev, |
5244 | netdev_features_t features) |
5245 | { |
5246 | netdev_features_t changed = netdev->features ^ features; |
5247 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
5248 | |
5249 | if (changed & NETIF_F_HW_VLAN_CTAG_RX) |
5250 | igc_vlan_mode(netdev, features); |
5251 | |
5252 | /* Add VLAN support */ |
5253 | if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) |
5254 | return 0; |
5255 | |
5256 | if (!(features & NETIF_F_NTUPLE)) |
5257 | igc_flush_nfc_rules(adapter); |
5258 | |
5259 | netdev->features = features; |
5260 | |
5261 | if (netif_running(dev: netdev)) |
5262 | igc_reinit_locked(adapter); |
5263 | else |
5264 | igc_reset(adapter); |
5265 | |
5266 | return 1; |
5267 | } |
5268 | |
5269 | static netdev_features_t |
5270 | igc_features_check(struct sk_buff *skb, struct net_device *dev, |
5271 | netdev_features_t features) |
5272 | { |
5273 | unsigned int network_hdr_len, mac_hdr_len; |
5274 | |
5275 | /* Make certain the headers can be described by a context descriptor */ |
5276 | mac_hdr_len = skb_network_offset(skb); |
5277 | if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) |
5278 | return features & ~(NETIF_F_HW_CSUM | |
5279 | NETIF_F_SCTP_CRC | |
5280 | NETIF_F_HW_VLAN_CTAG_TX | |
5281 | NETIF_F_TSO | |
5282 | NETIF_F_TSO6); |
5283 | |
5284 | network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); |
5285 | if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN)) |
5286 | return features & ~(NETIF_F_HW_CSUM | |
5287 | NETIF_F_SCTP_CRC | |
5288 | NETIF_F_TSO | |
5289 | NETIF_F_TSO6); |
5290 | |
5291 | /* We can only support IPv4 TSO in tunnels if we can mangle the |
5292 | * inner IP ID field, so strip TSO if MANGLEID is not supported. |
5293 | */ |
5294 | if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) |
5295 | features &= ~NETIF_F_TSO; |
5296 | |
5297 | return features; |
5298 | } |
5299 | |
5300 | static void igc_tsync_interrupt(struct igc_adapter *adapter) |
5301 | { |
5302 | struct igc_hw *hw = &adapter->hw; |
5303 | u32 tsauxc, sec, nsec, tsicr; |
5304 | struct ptp_clock_event event; |
5305 | struct timespec64 ts; |
5306 | |
5307 | tsicr = rd32(IGC_TSICR); |
5308 | |
5309 | if (tsicr & IGC_TSICR_SYS_WRAP) { |
5310 | event.type = PTP_CLOCK_PPS; |
5311 | if (adapter->ptp_caps.pps) |
5312 | ptp_clock_event(ptp: adapter->ptp_clock, event: &event); |
5313 | } |
5314 | |
5315 | if (tsicr & IGC_TSICR_TXTS) { |
5316 | /* retrieve hardware timestamp */ |
5317 | igc_ptp_tx_tstamp_event(adapter); |
5318 | } |
5319 | |
5320 | if (tsicr & IGC_TSICR_TT0) { |
5321 | spin_lock(lock: &adapter->tmreg_lock); |
5322 | ts = timespec64_add(lhs: adapter->perout[0].start, |
5323 | rhs: adapter->perout[0].period); |
5324 | wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); |
5325 | wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec); |
5326 | tsauxc = rd32(IGC_TSAUXC); |
5327 | tsauxc |= IGC_TSAUXC_EN_TT0; |
5328 | wr32(IGC_TSAUXC, tsauxc); |
5329 | adapter->perout[0].start = ts; |
5330 | spin_unlock(lock: &adapter->tmreg_lock); |
5331 | } |
5332 | |
5333 | if (tsicr & IGC_TSICR_TT1) { |
5334 | spin_lock(lock: &adapter->tmreg_lock); |
5335 | ts = timespec64_add(lhs: adapter->perout[1].start, |
5336 | rhs: adapter->perout[1].period); |
5337 | wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0); |
5338 | wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec); |
5339 | tsauxc = rd32(IGC_TSAUXC); |
5340 | tsauxc |= IGC_TSAUXC_EN_TT1; |
5341 | wr32(IGC_TSAUXC, tsauxc); |
5342 | adapter->perout[1].start = ts; |
5343 | spin_unlock(lock: &adapter->tmreg_lock); |
5344 | } |
5345 | |
5346 | if (tsicr & IGC_TSICR_AUTT0) { |
5347 | nsec = rd32(IGC_AUXSTMPL0); |
5348 | sec = rd32(IGC_AUXSTMPH0); |
5349 | event.type = PTP_CLOCK_EXTTS; |
5350 | event.index = 0; |
5351 | event.timestamp = sec * NSEC_PER_SEC + nsec; |
5352 | ptp_clock_event(ptp: adapter->ptp_clock, event: &event); |
5353 | } |
5354 | |
5355 | if (tsicr & IGC_TSICR_AUTT1) { |
5356 | nsec = rd32(IGC_AUXSTMPL1); |
5357 | sec = rd32(IGC_AUXSTMPH1); |
5358 | event.type = PTP_CLOCK_EXTTS; |
5359 | event.index = 1; |
5360 | event.timestamp = sec * NSEC_PER_SEC + nsec; |
5361 | ptp_clock_event(ptp: adapter->ptp_clock, event: &event); |
5362 | } |
5363 | } |
5364 | |
5365 | /** |
5366 | * igc_msix_other - msix other interrupt handler |
5367 | * @irq: interrupt number |
5368 | * @data: pointer to a q_vector |
5369 | */ |
5370 | static irqreturn_t igc_msix_other(int irq, void *data) |
5371 | { |
5372 | struct igc_adapter *adapter = data; |
5373 | struct igc_hw *hw = &adapter->hw; |
5374 | u32 icr = rd32(IGC_ICR); |
5375 | |
5376 | /* reading ICR causes bit 31 of EICR to be cleared */ |
5377 | if (icr & IGC_ICR_DRSTA) |
5378 | schedule_work(work: &adapter->reset_task); |
5379 | |
5380 | if (icr & IGC_ICR_DOUTSYNC) { |
5381 | /* HW is reporting DMA is out of sync */ |
5382 | adapter->stats.doosync++; |
5383 | } |
5384 | |
5385 | if (icr & IGC_ICR_LSC) { |
5386 | hw->mac.get_link_status = true; |
5387 | /* guard against interrupt when we're going down */ |
5388 | if (!test_bit(__IGC_DOWN, &adapter->state)) |
5389 | mod_timer(timer: &adapter->watchdog_timer, expires: jiffies + 1); |
5390 | } |
5391 | |
5392 | if (icr & IGC_ICR_TS) |
5393 | igc_tsync_interrupt(adapter); |
5394 | |
5395 | wr32(IGC_EIMS, adapter->eims_other); |
5396 | |
5397 | return IRQ_HANDLED; |
5398 | } |
5399 | |
5400 | static void igc_write_itr(struct igc_q_vector *q_vector) |
5401 | { |
5402 | u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; |
5403 | |
5404 | if (!q_vector->set_itr) |
5405 | return; |
5406 | |
5407 | if (!itr_val) |
5408 | itr_val = IGC_ITR_VAL_MASK; |
5409 | |
5410 | itr_val |= IGC_EITR_CNT_IGNR; |
5411 | |
5412 | writel(val: itr_val, addr: q_vector->itr_register); |
5413 | q_vector->set_itr = 0; |
5414 | } |
5415 | |
5416 | static irqreturn_t igc_msix_ring(int irq, void *data) |
5417 | { |
5418 | struct igc_q_vector *q_vector = data; |
5419 | |
5420 | /* Write the ITR value calculated from the previous interrupt. */ |
5421 | igc_write_itr(q_vector); |
5422 | |
5423 | napi_schedule(n: &q_vector->napi); |
5424 | |
5425 | return IRQ_HANDLED; |
5426 | } |
5427 | |
5428 | /** |
5429 | * igc_request_msix - Initialize MSI-X interrupts |
5430 | * @adapter: Pointer to adapter structure |
5431 | * |
5432 | * igc_request_msix allocates MSI-X vectors and requests interrupts from the |
5433 | * kernel. |
5434 | */ |
5435 | static int igc_request_msix(struct igc_adapter *adapter) |
5436 | { |
5437 | unsigned int num_q_vectors = adapter->num_q_vectors; |
5438 | int i = 0, err = 0, vector = 0, free_vector = 0; |
5439 | struct net_device *netdev = adapter->netdev; |
5440 | |
5441 | err = request_irq(irq: adapter->msix_entries[vector].vector, |
5442 | handler: &igc_msix_other, flags: 0, name: netdev->name, dev: adapter); |
5443 | if (err) |
5444 | goto err_out; |
5445 | |
5446 | if (num_q_vectors > MAX_Q_VECTORS) { |
5447 | num_q_vectors = MAX_Q_VECTORS; |
5448 | dev_warn(&adapter->pdev->dev, |
5449 | "The number of queue vectors (%d) is higher than max allowed (%d)\n", |
5450 | adapter->num_q_vectors, MAX_Q_VECTORS); |
5451 | } |
5452 | for (i = 0; i < num_q_vectors; i++) { |
5453 | struct igc_q_vector *q_vector = adapter->q_vector[i]; |
5454 | |
5455 | vector++; |
5456 | |
5457 | q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); |
5458 | |
5459 | if (q_vector->rx.ring && q_vector->tx.ring) |
5460 | sprintf(buf: q_vector->name, fmt: "%s-TxRx-%u", netdev->name, |
5461 | q_vector->rx.ring->queue_index); |
5462 | else if (q_vector->tx.ring) |
5463 | sprintf(buf: q_vector->name, fmt: "%s-tx-%u", netdev->name, |
5464 | q_vector->tx.ring->queue_index); |
5465 | else if (q_vector->rx.ring) |
5466 | sprintf(buf: q_vector->name, fmt: "%s-rx-%u", netdev->name, |
5467 | q_vector->rx.ring->queue_index); |
5468 | else |
5469 | sprintf(buf: q_vector->name, fmt: "%s-unused", netdev->name); |
5470 | |
5471 | err = request_irq(irq: adapter->msix_entries[vector].vector, |
5472 | handler: igc_msix_ring, flags: 0, name: q_vector->name, |
5473 | dev: q_vector); |
5474 | if (err) |
5475 | goto err_free; |
5476 | } |
5477 | |
5478 | igc_configure_msix(adapter); |
5479 | return 0; |
5480 | |
5481 | err_free: |
5482 | /* free already assigned IRQs */ |
5483 | free_irq(adapter->msix_entries[free_vector++].vector, adapter); |
5484 | |
5485 | vector--; |
5486 | for (i = 0; i < vector; i++) { |
5487 | free_irq(adapter->msix_entries[free_vector++].vector, |
5488 | adapter->q_vector[i]); |
5489 | } |
5490 | err_out: |
5491 | return err; |
5492 | } |
5493 | |
5494 | /** |
5495 | * igc_clear_interrupt_scheme - reset the device to a state of no interrupts |
5496 | * @adapter: Pointer to adapter structure |
5497 | * |
5498 | * This function resets the device so that it has 0 rx queues, tx queues, and |
5499 | * MSI-X interrupts allocated. |
5500 | */ |
5501 | static void igc_clear_interrupt_scheme(struct igc_adapter *adapter) |
5502 | { |
5503 | igc_free_q_vectors(adapter); |
5504 | igc_reset_interrupt_capability(adapter); |
5505 | } |
5506 | |
5507 | /* Need to wait a few seconds after link up to get diagnostic information from |
5508 | * the phy |
5509 | */ |
5510 | static void igc_update_phy_info(struct timer_list *t) |
5511 | { |
5512 | struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer); |
5513 | |
5514 | igc_get_phy_info(hw: &adapter->hw); |
5515 | } |
5516 | |
5517 | /** |
5518 | * igc_has_link - check shared code for link and determine up/down |
5519 | * @adapter: pointer to driver private info |
5520 | */ |
5521 | bool igc_has_link(struct igc_adapter *adapter) |
5522 | { |
5523 | struct igc_hw *hw = &adapter->hw; |
5524 | bool link_active = false; |
5525 | |
5526 | /* get_link_status is set on LSC (link status) interrupt or |
5527 | * rx sequence error interrupt. get_link_status will stay |
5528 | * false until the igc_check_for_link establishes link |
5529 | * for copper adapters ONLY |
5530 | */ |
5531 | if (!hw->mac.get_link_status) |
5532 | return true; |
5533 | hw->mac.ops.check_for_link(hw); |
5534 | link_active = !hw->mac.get_link_status; |
5535 | |
5536 | if (hw->mac.type == igc_i225) { |
5537 | if (!netif_carrier_ok(dev: adapter->netdev)) { |
5538 | adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; |
5539 | } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { |
5540 | adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; |
5541 | adapter->link_check_timeout = jiffies; |
5542 | } |
5543 | } |
5544 | |
5545 | return link_active; |
5546 | } |
5547 | |
5548 | /** |
5549 | * igc_watchdog - Timer Call-back |
5550 | * @t: timer for the watchdog |
5551 | */ |
5552 | static void igc_watchdog(struct timer_list *t) |
5553 | { |
5554 | struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer); |
5555 | /* Do the rest outside of interrupt context */ |
5556 | schedule_work(work: &adapter->watchdog_task); |
5557 | } |
5558 | |
5559 | static void igc_watchdog_task(struct work_struct *work) |
5560 | { |
5561 | struct igc_adapter *adapter = container_of(work, |
5562 | struct igc_adapter, |
5563 | watchdog_task); |
5564 | struct net_device *netdev = adapter->netdev; |
5565 | struct igc_hw *hw = &adapter->hw; |
5566 | struct igc_phy_info *phy = &hw->phy; |
5567 | u16 phy_data, retry_count = 20; |
5568 | u32 link; |
5569 | int i; |
5570 | |
5571 | link = igc_has_link(adapter); |
5572 | |
5573 | if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { |
5574 | if (time_after(jiffies, (adapter->link_check_timeout + HZ))) |
5575 | adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; |
5576 | else |
5577 | link = false; |
5578 | } |
5579 | |
5580 | if (link) { |
5581 | /* Cancel scheduled suspend requests. */ |
5582 | pm_runtime_resume(dev: netdev->dev.parent); |
5583 | |
5584 | if (!netif_carrier_ok(dev: netdev)) { |
5585 | u32 ctrl; |
5586 | |
5587 | hw->mac.ops.get_speed_and_duplex(hw, |
5588 | &adapter->link_speed, |
5589 | &adapter->link_duplex); |
5590 | |
5591 | ctrl = rd32(IGC_CTRL); |
5592 | /* Link status message must follow this format */ |
5593 | netdev_info(dev: netdev, |
5594 | format: "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", |
5595 | adapter->link_speed, |
5596 | adapter->link_duplex == FULL_DUPLEX ? |
5597 | "Full": "Half", |
5598 | (ctrl & IGC_CTRL_TFCE) && |
5599 | (ctrl & IGC_CTRL_RFCE) ? "RX/TX": |
5600 | (ctrl & IGC_CTRL_RFCE) ? "RX": |
5601 | (ctrl & IGC_CTRL_TFCE) ? "TX": "None"); |
5602 | |
5603 | /* disable EEE if enabled */ |
5604 | if ((adapter->flags & IGC_FLAG_EEE) && |
5605 | adapter->link_duplex == HALF_DUPLEX) { |
5606 | netdev_info(dev: netdev, |
5607 | format: "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); |
5608 | adapter->hw.dev_spec._base.eee_enable = false; |
5609 | adapter->flags &= ~IGC_FLAG_EEE; |
5610 | } |
5611 | |
5612 | /* check if SmartSpeed worked */ |
5613 | igc_check_downshift(hw); |
5614 | if (phy->speed_downgraded) |
5615 | netdev_warn(dev: netdev, format: "Link Speed was downgraded by SmartSpeed\n"); |
5616 | |
5617 | /* adjust timeout factor according to speed/duplex */ |
5618 | adapter->tx_timeout_factor = 1; |
5619 | switch (adapter->link_speed) { |
5620 | case SPEED_10: |
5621 | adapter->tx_timeout_factor = 14; |
5622 | break; |
5623 | case SPEED_100: |
5624 | case SPEED_1000: |
5625 | case SPEED_2500: |
5626 | adapter->tx_timeout_factor = 1; |
5627 | break; |
5628 | } |
5629 | |
5630 | /* Once the launch time has been set on the wire, there |
5631 | * is a delay before the link speed can be determined |
5632 | * based on link-up activity. Write into the register |
5633 | * as soon as we know the correct link speed. |
5634 | */ |
5635 | igc_tsn_adjust_txtime_offset(adapter); |
5636 | |
5637 | if (adapter->link_speed != SPEED_1000) |
5638 | goto no_wait; |
5639 | |
5640 | /* wait for Remote receiver status OK */ |
5641 | retry_read_status: |
5642 | if (!igc_read_phy_reg(hw, PHY_1000T_STATUS, |
5643 | data: &phy_data)) { |
5644 | if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && |
5645 | retry_count) { |
5646 | msleep(msecs: 100); |
5647 | retry_count--; |
5648 | goto retry_read_status; |
5649 | } else if (!retry_count) { |
5650 | netdev_err(dev: netdev, format: "exceed max 2 second\n"); |
5651 | } |
5652 | } else { |
5653 | netdev_err(dev: netdev, format: "read 1000Base-T Status Reg\n"); |
5654 | } |
5655 | no_wait: |
5656 | netif_carrier_on(dev: netdev); |
5657 | |
5658 | /* link state has changed, schedule phy info update */ |
5659 | if (!test_bit(__IGC_DOWN, &adapter->state)) |
5660 | mod_timer(timer: &adapter->phy_info_timer, |
5661 | expires: round_jiffies(j: jiffies + 2 * HZ)); |
5662 | } |
5663 | } else { |
5664 | if (netif_carrier_ok(dev: netdev)) { |
5665 | adapter->link_speed = 0; |
5666 | adapter->link_duplex = 0; |
5667 | |
5668 | /* Links status message must follow this format */ |
5669 | netdev_info(dev: netdev, format: "NIC Link is Down\n"); |
5670 | netif_carrier_off(dev: netdev); |
5671 | |
5672 | /* link state has changed, schedule phy info update */ |
5673 | if (!test_bit(__IGC_DOWN, &adapter->state)) |
5674 | mod_timer(timer: &adapter->phy_info_timer, |
5675 | expires: round_jiffies(j: jiffies + 2 * HZ)); |
5676 | |
5677 | pm_schedule_suspend(dev: netdev->dev.parent, |
5678 | MSEC_PER_SEC * 5); |
5679 | } |
5680 | } |
5681 | |
5682 | spin_lock(lock: &adapter->stats64_lock); |
5683 | igc_update_stats(adapter); |
5684 | spin_unlock(lock: &adapter->stats64_lock); |
5685 | |
5686 | for (i = 0; i < adapter->num_tx_queues; i++) { |
5687 | struct igc_ring *tx_ring = adapter->tx_ring[i]; |
5688 | |
5689 | if (!netif_carrier_ok(dev: netdev)) { |
5690 | /* We've lost link, so the controller stops DMA, |
5691 | * but we've got queued Tx work that's never going |
5692 | * to get done, so reset controller to flush Tx. |
5693 | * (Do the reset outside of interrupt context). |
5694 | */ |
5695 | if (igc_desc_unused(ring: tx_ring) + 1 < tx_ring->count) { |
5696 | adapter->tx_timeout_count++; |
5697 | schedule_work(work: &adapter->reset_task); |
5698 | /* return immediately since reset is imminent */ |
5699 | return; |
5700 | } |
5701 | } |
5702 | |
5703 | /* Force detection of hung controller every watchdog period */ |
5704 | set_bit(nr: IGC_RING_FLAG_TX_DETECT_HANG, addr: &tx_ring->flags); |
5705 | } |
5706 | |
5707 | /* Cause software interrupt to ensure Rx ring is cleaned */ |
5708 | if (adapter->flags & IGC_FLAG_HAS_MSIX) { |
5709 | u32 eics = 0; |
5710 | |
5711 | for (i = 0; i < adapter->num_q_vectors; i++) |
5712 | eics |= adapter->q_vector[i]->eims_value; |
5713 | wr32(IGC_EICS, eics); |
5714 | } else { |
5715 | wr32(IGC_ICS, IGC_ICS_RXDMT0); |
5716 | } |
5717 | |
5718 | igc_ptp_tx_hang(adapter); |
5719 | |
5720 | /* Reset the timer */ |
5721 | if (!test_bit(__IGC_DOWN, &adapter->state)) { |
5722 | if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) |
5723 | mod_timer(timer: &adapter->watchdog_timer, |
5724 | expires: round_jiffies(j: jiffies + HZ)); |
5725 | else |
5726 | mod_timer(timer: &adapter->watchdog_timer, |
5727 | expires: round_jiffies(j: jiffies + 2 * HZ)); |
5728 | } |
5729 | } |
5730 | |
5731 | /** |
5732 | * igc_intr_msi - Interrupt Handler |
5733 | * @irq: interrupt number |
5734 | * @data: pointer to a network interface device structure |
5735 | */ |
5736 | static irqreturn_t igc_intr_msi(int irq, void *data) |
5737 | { |
5738 | struct igc_adapter *adapter = data; |
5739 | struct igc_q_vector *q_vector = adapter->q_vector[0]; |
5740 | struct igc_hw *hw = &adapter->hw; |
5741 | /* read ICR disables interrupts using IAM */ |
5742 | u32 icr = rd32(IGC_ICR); |
5743 | |
5744 | igc_write_itr(q_vector); |
5745 | |
5746 | if (icr & IGC_ICR_DRSTA) |
5747 | schedule_work(work: &adapter->reset_task); |
5748 | |
5749 | if (icr & IGC_ICR_DOUTSYNC) { |
5750 | /* HW is reporting DMA is out of sync */ |
5751 | adapter->stats.doosync++; |
5752 | } |
5753 | |
5754 | if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { |
5755 | hw->mac.get_link_status = true; |
5756 | if (!test_bit(__IGC_DOWN, &adapter->state)) |
5757 | mod_timer(timer: &adapter->watchdog_timer, expires: jiffies + 1); |
5758 | } |
5759 | |
5760 | if (icr & IGC_ICR_TS) |
5761 | igc_tsync_interrupt(adapter); |
5762 | |
5763 | napi_schedule(n: &q_vector->napi); |
5764 | |
5765 | return IRQ_HANDLED; |
5766 | } |
5767 | |
5768 | /** |
5769 | * igc_intr - Legacy Interrupt Handler |
5770 | * @irq: interrupt number |
5771 | * @data: pointer to a network interface device structure |
5772 | */ |
5773 | static irqreturn_t igc_intr(int irq, void *data) |
5774 | { |
5775 | struct igc_adapter *adapter = data; |
5776 | struct igc_q_vector *q_vector = adapter->q_vector[0]; |
5777 | struct igc_hw *hw = &adapter->hw; |
5778 | /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No |
5779 | * need for the IMC write |
5780 | */ |
5781 | u32 icr = rd32(IGC_ICR); |
5782 | |
5783 | /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is |
5784 | * not set, then the adapter didn't send an interrupt |
5785 | */ |
5786 | if (!(icr & IGC_ICR_INT_ASSERTED)) |
5787 | return IRQ_NONE; |
5788 | |
5789 | igc_write_itr(q_vector); |
5790 | |
5791 | if (icr & IGC_ICR_DRSTA) |
5792 | schedule_work(work: &adapter->reset_task); |
5793 | |
5794 | if (icr & IGC_ICR_DOUTSYNC) { |
5795 | /* HW is reporting DMA is out of sync */ |
5796 | adapter->stats.doosync++; |
5797 | } |
5798 | |
5799 | if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { |
5800 | hw->mac.get_link_status = true; |
5801 | /* guard against interrupt when we're going down */ |
5802 | if (!test_bit(__IGC_DOWN, &adapter->state)) |
5803 | mod_timer(timer: &adapter->watchdog_timer, expires: jiffies + 1); |
5804 | } |
5805 | |
5806 | if (icr & IGC_ICR_TS) |
5807 | igc_tsync_interrupt(adapter); |
5808 | |
5809 | napi_schedule(n: &q_vector->napi); |
5810 | |
5811 | return IRQ_HANDLED; |
5812 | } |
5813 | |
5814 | static void igc_free_irq(struct igc_adapter *adapter) |
5815 | { |
5816 | if (adapter->msix_entries) { |
5817 | int vector = 0, i; |
5818 | |
5819 | free_irq(adapter->msix_entries[vector++].vector, adapter); |
5820 | |
5821 | for (i = 0; i < adapter->num_q_vectors; i++) |
5822 | free_irq(adapter->msix_entries[vector++].vector, |
5823 | adapter->q_vector[i]); |
5824 | } else { |
5825 | free_irq(adapter->pdev->irq, adapter); |
5826 | } |
5827 | } |
5828 | |
5829 | /** |
5830 | * igc_request_irq - initialize interrupts |
5831 | * @adapter: Pointer to adapter structure |
5832 | * |
5833 | * Attempts to configure interrupts using the best available |
5834 | * capabilities of the hardware and kernel. |
5835 | */ |
5836 | static int igc_request_irq(struct igc_adapter *adapter) |
5837 | { |
5838 | struct net_device *netdev = adapter->netdev; |
5839 | struct pci_dev *pdev = adapter->pdev; |
5840 | int err = 0; |
5841 | |
5842 | if (adapter->flags & IGC_FLAG_HAS_MSIX) { |
5843 | err = igc_request_msix(adapter); |
5844 | if (!err) |
5845 | goto request_done; |
5846 | /* fall back to MSI */ |
5847 | igc_free_all_tx_resources(adapter); |
5848 | igc_free_all_rx_resources(adapter); |
5849 | |
5850 | igc_clear_interrupt_scheme(adapter); |
5851 | err = igc_init_interrupt_scheme(adapter, msix: false); |
5852 | if (err) |
5853 | goto request_done; |
5854 | igc_setup_all_tx_resources(adapter); |
5855 | igc_setup_all_rx_resources(adapter); |
5856 | igc_configure(adapter); |
5857 | } |
5858 | |
5859 | igc_assign_vector(q_vector: adapter->q_vector[0], msix_vector: 0); |
5860 | |
5861 | if (adapter->flags & IGC_FLAG_HAS_MSI) { |
5862 | err = request_irq(irq: pdev->irq, handler: &igc_intr_msi, flags: 0, |
5863 | name: netdev->name, dev: adapter); |
5864 | if (!err) |
5865 | goto request_done; |
5866 | |
5867 | /* fall back to legacy interrupts */ |
5868 | igc_reset_interrupt_capability(adapter); |
5869 | adapter->flags &= ~IGC_FLAG_HAS_MSI; |
5870 | } |
5871 | |
5872 | err = request_irq(irq: pdev->irq, handler: &igc_intr, IRQF_SHARED, |
5873 | name: netdev->name, dev: adapter); |
5874 | |
5875 | if (err) |
5876 | netdev_err(dev: netdev, format: "Error %d getting interrupt\n", err); |
5877 | |
5878 | request_done: |
5879 | return err; |
5880 | } |
5881 | |
5882 | /** |
5883 | * __igc_open - Called when a network interface is made active |
5884 | * @netdev: network interface device structure |
5885 | * @resuming: boolean indicating if the device is resuming |
5886 | * |
5887 | * Returns 0 on success, negative value on failure |
5888 | * |
5889 | * The open entry point is called when a network interface is made |
5890 | * active by the system (IFF_UP). At this point all resources needed |
5891 | * for transmit and receive operations are allocated, the interrupt |
5892 | * handler is registered with the OS, the watchdog timer is started, |
5893 | * and the stack is notified that the interface is ready. |
5894 | */ |
5895 | static int __igc_open(struct net_device *netdev, bool resuming) |
5896 | { |
5897 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
5898 | struct pci_dev *pdev = adapter->pdev; |
5899 | struct igc_hw *hw = &adapter->hw; |
5900 | int err = 0; |
5901 | int i = 0; |
5902 | |
5903 | /* disallow open during test */ |
5904 | |
5905 | if (test_bit(__IGC_TESTING, &adapter->state)) { |
5906 | WARN_ON(resuming); |
5907 | return -EBUSY; |
5908 | } |
5909 | |
5910 | if (!resuming) |
5911 | pm_runtime_get_sync(dev: &pdev->dev); |
5912 | |
5913 | netif_carrier_off(dev: netdev); |
5914 | |
5915 | /* allocate transmit descriptors */ |
5916 | err = igc_setup_all_tx_resources(adapter); |
5917 | if (err) |
5918 | goto err_setup_tx; |
5919 | |
5920 | /* allocate receive descriptors */ |
5921 | err = igc_setup_all_rx_resources(adapter); |
5922 | if (err) |
5923 | goto err_setup_rx; |
5924 | |
5925 | igc_power_up_link(adapter); |
5926 | |
5927 | igc_configure(adapter); |
5928 | |
5929 | err = igc_request_irq(adapter); |
5930 | if (err) |
5931 | goto err_req_irq; |
5932 | |
5933 | /* Notify the stack of the actual queue counts. */ |
5934 | err = netif_set_real_num_tx_queues(dev: netdev, txq: adapter->num_tx_queues); |
5935 | if (err) |
5936 | goto err_set_queues; |
5937 | |
5938 | err = netif_set_real_num_rx_queues(dev: netdev, rxq: adapter->num_rx_queues); |
5939 | if (err) |
5940 | goto err_set_queues; |
5941 | |
5942 | clear_bit(nr: __IGC_DOWN, addr: &adapter->state); |
5943 | |
5944 | for (i = 0; i < adapter->num_q_vectors; i++) |
5945 | napi_enable(n: &adapter->q_vector[i]->napi); |
5946 | |
5947 | /* Clear any pending interrupts. */ |
5948 | rd32(IGC_ICR); |
5949 | igc_irq_enable(adapter); |
5950 | |
5951 | if (!resuming) |
5952 | pm_runtime_put(dev: &pdev->dev); |
5953 | |
5954 | netif_tx_start_all_queues(dev: netdev); |
5955 | |
5956 | /* start the watchdog. */ |
5957 | hw->mac.get_link_status = true; |
5958 | schedule_work(work: &adapter->watchdog_task); |
5959 | |
5960 | return IGC_SUCCESS; |
5961 | |
5962 | err_set_queues: |
5963 | igc_free_irq(adapter); |
5964 | err_req_irq: |
5965 | igc_release_hw_control(adapter); |
5966 | igc_power_down_phy_copper_base(hw: &adapter->hw); |
5967 | igc_free_all_rx_resources(adapter); |
5968 | err_setup_rx: |
5969 | igc_free_all_tx_resources(adapter); |
5970 | err_setup_tx: |
5971 | igc_reset(adapter); |
5972 | if (!resuming) |
5973 | pm_runtime_put(dev: &pdev->dev); |
5974 | |
5975 | return err; |
5976 | } |
5977 | |
5978 | int igc_open(struct net_device *netdev) |
5979 | { |
5980 | return __igc_open(netdev, resuming: false); |
5981 | } |
5982 | |
5983 | /** |
5984 | * __igc_close - Disables a network interface |
5985 | * @netdev: network interface device structure |
5986 | * @suspending: boolean indicating the device is suspending |
5987 | * |
5988 | * Returns 0, this is not allowed to fail |
5989 | * |
5990 | * The close entry point is called when an interface is de-activated |
5991 | * by the OS. The hardware is still under the driver's control, but |
5992 | * needs to be disabled. A global MAC reset is issued to stop the |
5993 | * hardware, and all transmit and receive resources are freed. |
5994 | */ |
5995 | static int __igc_close(struct net_device *netdev, bool suspending) |
5996 | { |
5997 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
5998 | struct pci_dev *pdev = adapter->pdev; |
5999 | |
6000 | WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); |
6001 | |
6002 | if (!suspending) |
6003 | pm_runtime_get_sync(dev: &pdev->dev); |
6004 | |
6005 | igc_down(adapter); |
6006 | |
6007 | igc_release_hw_control(adapter); |
6008 | |
6009 | igc_free_irq(adapter); |
6010 | |
6011 | igc_free_all_tx_resources(adapter); |
6012 | igc_free_all_rx_resources(adapter); |
6013 | |
6014 | if (!suspending) |
6015 | pm_runtime_put_sync(dev: &pdev->dev); |
6016 | |
6017 | return 0; |
6018 | } |
6019 | |
6020 | int igc_close(struct net_device *netdev) |
6021 | { |
6022 | if (netif_device_present(dev: netdev) || netdev->dismantle) |
6023 | return __igc_close(netdev, suspending: false); |
6024 | return 0; |
6025 | } |
6026 | |
6027 | /** |
6028 | * igc_ioctl - Access the hwtstamp interface |
6029 | * @netdev: network interface device structure |
6030 | * @ifr: interface request data |
6031 | * @cmd: ioctl command |
6032 | **/ |
6033 | static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) |
6034 | { |
6035 | switch (cmd) { |
6036 | case SIOCGHWTSTAMP: |
6037 | return igc_ptp_get_ts_config(netdev, ifr); |
6038 | case SIOCSHWTSTAMP: |
6039 | return igc_ptp_set_ts_config(netdev, ifr); |
6040 | default: |
6041 | return -EOPNOTSUPP; |
6042 | } |
6043 | } |
6044 | |
6045 | static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, |
6046 | bool enable) |
6047 | { |
6048 | struct igc_ring *ring; |
6049 | |
6050 | if (queue < 0 || queue >= adapter->num_tx_queues) |
6051 | return -EINVAL; |
6052 | |
6053 | ring = adapter->tx_ring[queue]; |
6054 | ring->launchtime_enable = enable; |
6055 | |
6056 | return 0; |
6057 | } |
6058 | |
6059 | static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now) |
6060 | { |
6061 | struct timespec64 b; |
6062 | |
6063 | b = ktime_to_timespec64(base_time); |
6064 | |
6065 | return timespec64_compare(lhs: now, rhs: &b) > 0; |
6066 | } |
6067 | |
6068 | static bool validate_schedule(struct igc_adapter *adapter, |
6069 | const struct tc_taprio_qopt_offload *qopt) |
6070 | { |
6071 | int queue_uses[IGC_MAX_TX_QUEUES] = { }; |
6072 | struct igc_hw *hw = &adapter->hw; |
6073 | struct timespec64 now; |
6074 | size_t n; |
6075 | |
6076 | if (qopt->cycle_time_extension) |
6077 | return false; |
6078 | |
6079 | igc_ptp_read(adapter, ts: &now); |
6080 | |
6081 | /* If we program the controller's BASET registers with a time |
6082 | * in the future, it will hold all the packets until that |
6083 | * time, causing a lot of TX Hangs, so to avoid that, we |
6084 | * reject schedules that would start in the future. |
6085 | * Note: Limitation above is no longer in i226. |
6086 | */ |
6087 | if (!is_base_time_past(base_time: qopt->base_time, now: &now) && |
6088 | igc_is_device_id_i225(hw)) |
6089 | return false; |
6090 | |
6091 | for (n = 0; n < qopt->num_entries; n++) { |
6092 | const struct tc_taprio_sched_entry *e, *prev; |
6093 | int i; |
6094 | |
6095 | prev = n ? &qopt->entries[n - 1] : NULL; |
6096 | e = &qopt->entries[n]; |
6097 | |
6098 | /* i225 only supports "global" frame preemption |
6099 | * settings. |
6100 | */ |
6101 | if (e->command != TC_TAPRIO_CMD_SET_GATES) |
6102 | return false; |
6103 | |
6104 | for (i = 0; i < adapter->num_tx_queues; i++) |
6105 | if (e->gate_mask & BIT(i)) { |
6106 | queue_uses[i]++; |
6107 | |
6108 | /* There are limitations: A single queue cannot |
6109 | * be opened and closed multiple times per cycle |
6110 | * unless the gate stays open. Check for it. |
6111 | */ |
6112 | if (queue_uses[i] > 1 && |
6113 | !(prev->gate_mask & BIT(i))) |
6114 | return false; |
6115 | } |
6116 | } |
6117 | |
6118 | return true; |
6119 | } |
6120 | |
6121 | static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, |
6122 | struct tc_etf_qopt_offload *qopt) |
6123 | { |
6124 | struct igc_hw *hw = &adapter->hw; |
6125 | int err; |
6126 | |
6127 | if (hw->mac.type != igc_i225) |
6128 | return -EOPNOTSUPP; |
6129 | |
6130 | err = igc_save_launchtime_params(adapter, queue: qopt->queue, enable: qopt->enable); |
6131 | if (err) |
6132 | return err; |
6133 | |
6134 | return igc_tsn_offload_apply(adapter); |
6135 | } |
6136 | |
6137 | static int igc_qbv_clear_schedule(struct igc_adapter *adapter) |
6138 | { |
6139 | unsigned long flags; |
6140 | int i; |
6141 | |
6142 | adapter->base_time = 0; |
6143 | adapter->cycle_time = NSEC_PER_SEC; |
6144 | adapter->taprio_offload_enable = false; |
6145 | adapter->qbv_config_change_errors = 0; |
6146 | adapter->qbv_count = 0; |
6147 | |
6148 | for (i = 0; i < adapter->num_tx_queues; i++) { |
6149 | struct igc_ring *ring = adapter->tx_ring[i]; |
6150 | |
6151 | ring->start_time = 0; |
6152 | ring->end_time = NSEC_PER_SEC; |
6153 | ring->max_sdu = 0; |
6154 | } |
6155 | |
6156 | spin_lock_irqsave(&adapter->qbv_tx_lock, flags); |
6157 | |
6158 | adapter->qbv_transition = false; |
6159 | |
6160 | for (i = 0; i < adapter->num_tx_queues; i++) { |
6161 | struct igc_ring *ring = adapter->tx_ring[i]; |
6162 | |
6163 | ring->oper_gate_closed = false; |
6164 | ring->admin_gate_closed = false; |
6165 | } |
6166 | |
6167 | spin_unlock_irqrestore(lock: &adapter->qbv_tx_lock, flags); |
6168 | |
6169 | return 0; |
6170 | } |
6171 | |
6172 | static int igc_tsn_clear_schedule(struct igc_adapter *adapter) |
6173 | { |
6174 | igc_qbv_clear_schedule(adapter); |
6175 | |
6176 | return 0; |
6177 | } |
6178 | |
6179 | static void igc_taprio_stats(struct net_device *dev, |
6180 | struct tc_taprio_qopt_stats *stats) |
6181 | { |
6182 | /* When Strict_End is enabled, the tx_overruns counter |
6183 | * will always be zero. |
6184 | */ |
6185 | stats->tx_overruns = 0; |
6186 | } |
6187 | |
6188 | static void igc_taprio_queue_stats(struct net_device *dev, |
6189 | struct tc_taprio_qopt_queue_stats *queue_stats) |
6190 | { |
6191 | struct tc_taprio_qopt_stats *stats = &queue_stats->stats; |
6192 | |
6193 | /* When Strict_End is enabled, the tx_overruns counter |
6194 | * will always be zero. |
6195 | */ |
6196 | stats->tx_overruns = 0; |
6197 | } |
6198 | |
6199 | static int igc_save_qbv_schedule(struct igc_adapter *adapter, |
6200 | struct tc_taprio_qopt_offload *qopt) |
6201 | { |
6202 | bool queue_configured[IGC_MAX_TX_QUEUES] = { }; |
6203 | struct igc_hw *hw = &adapter->hw; |
6204 | u32 start_time = 0, end_time = 0; |
6205 | struct timespec64 now; |
6206 | unsigned long flags; |
6207 | size_t n; |
6208 | int i; |
6209 | |
6210 | switch (qopt->cmd) { |
6211 | case TAPRIO_CMD_REPLACE: |
6212 | break; |
6213 | case TAPRIO_CMD_DESTROY: |
6214 | return igc_tsn_clear_schedule(adapter); |
6215 | case TAPRIO_CMD_STATS: |
6216 | igc_taprio_stats(dev: adapter->netdev, stats: &qopt->stats); |
6217 | return 0; |
6218 | case TAPRIO_CMD_QUEUE_STATS: |
6219 | igc_taprio_queue_stats(dev: adapter->netdev, queue_stats: &qopt->queue_stats); |
6220 | return 0; |
6221 | default: |
6222 | return -EOPNOTSUPP; |
6223 | } |
6224 | |
6225 | if (qopt->base_time < 0) |
6226 | return -ERANGE; |
6227 | |
6228 | if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) |
6229 | return -EALREADY; |
6230 | |
6231 | if (!validate_schedule(adapter, qopt)) |
6232 | return -EINVAL; |
6233 | |
6234 | adapter->cycle_time = qopt->cycle_time; |
6235 | adapter->base_time = qopt->base_time; |
6236 | adapter->taprio_offload_enable = true; |
6237 | |
6238 | igc_ptp_read(adapter, ts: &now); |
6239 | |
6240 | for (n = 0; n < qopt->num_entries; n++) { |
6241 | struct tc_taprio_sched_entry *e = &qopt->entries[n]; |
6242 | |
6243 | end_time += e->interval; |
6244 | |
6245 | /* If any of the conditions below are true, we need to manually |
6246 | * control the end time of the cycle. |
6247 | * 1. Qbv users can specify a cycle time that is not equal |
6248 | * to the total GCL intervals. Hence, recalculation is |
6249 | * necessary here to exclude the time interval that |
6250 | * exceeds the cycle time. |
6251 | * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, |
6252 | * once the end of the list is reached, it will switch |
6253 | * to the END_OF_CYCLE state and leave the gates in the |
6254 | * same state until the next cycle is started. |
6255 | */ |
6256 | if (end_time > adapter->cycle_time || |
6257 | n + 1 == qopt->num_entries) |
6258 | end_time = adapter->cycle_time; |
6259 | |
6260 | for (i = 0; i < adapter->num_tx_queues; i++) { |
6261 | struct igc_ring *ring = adapter->tx_ring[i]; |
6262 | |
6263 | if (!(e->gate_mask & BIT(i))) |
6264 | continue; |
6265 | |
6266 | /* Check whether a queue stays open for more than one |
6267 | * entry. If so, keep the start and advance the end |
6268 | * time. |
6269 | */ |
6270 | if (!queue_configured[i]) |
6271 | ring->start_time = start_time; |
6272 | ring->end_time = end_time; |
6273 | |
6274 | if (ring->start_time >= adapter->cycle_time) |
6275 | queue_configured[i] = false; |
6276 | else |
6277 | queue_configured[i] = true; |
6278 | } |
6279 | |
6280 | start_time += e->interval; |
6281 | } |
6282 | |
6283 | spin_lock_irqsave(&adapter->qbv_tx_lock, flags); |
6284 | |
6285 | /* Check whether a queue gets configured. |
6286 | * If not, set the start and end time to be end time. |
6287 | */ |
6288 | for (i = 0; i < adapter->num_tx_queues; i++) { |
6289 | struct igc_ring *ring = adapter->tx_ring[i]; |
6290 | |
6291 | if (!is_base_time_past(base_time: qopt->base_time, now: &now)) { |
6292 | ring->admin_gate_closed = false; |
6293 | } else { |
6294 | ring->oper_gate_closed = false; |
6295 | ring->admin_gate_closed = false; |
6296 | } |
6297 | |
6298 | if (!queue_configured[i]) { |
6299 | if (!is_base_time_past(base_time: qopt->base_time, now: &now)) |
6300 | ring->admin_gate_closed = true; |
6301 | else |
6302 | ring->oper_gate_closed = true; |
6303 | |
6304 | ring->start_time = end_time; |
6305 | ring->end_time = end_time; |
6306 | } |
6307 | } |
6308 | |
6309 | spin_unlock_irqrestore(lock: &adapter->qbv_tx_lock, flags); |
6310 | |
6311 | for (i = 0; i < adapter->num_tx_queues; i++) { |
6312 | struct igc_ring *ring = adapter->tx_ring[i]; |
6313 | struct net_device *dev = adapter->netdev; |
6314 | |
6315 | if (qopt->max_sdu[i]) |
6316 | ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; |
6317 | else |
6318 | ring->max_sdu = 0; |
6319 | } |
6320 | |
6321 | return 0; |
6322 | } |
6323 | |
6324 | static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, |
6325 | struct tc_taprio_qopt_offload *qopt) |
6326 | { |
6327 | struct igc_hw *hw = &adapter->hw; |
6328 | int err; |
6329 | |
6330 | if (hw->mac.type != igc_i225) |
6331 | return -EOPNOTSUPP; |
6332 | |
6333 | err = igc_save_qbv_schedule(adapter, qopt); |
6334 | if (err) |
6335 | return err; |
6336 | |
6337 | return igc_tsn_offload_apply(adapter); |
6338 | } |
6339 | |
6340 | static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, |
6341 | bool enable, int idleslope, int sendslope, |
6342 | int hicredit, int locredit) |
6343 | { |
6344 | bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; |
6345 | struct net_device *netdev = adapter->netdev; |
6346 | struct igc_ring *ring; |
6347 | int i; |
6348 | |
6349 | /* i225 has two sets of credit-based shaper logic. |
6350 | * Supporting it only on the top two priority queues |
6351 | */ |
6352 | if (queue < 0 || queue > 1) |
6353 | return -EINVAL; |
6354 | |
6355 | ring = adapter->tx_ring[queue]; |
6356 | |
6357 | for (i = 0; i < IGC_MAX_SR_QUEUES; i++) |
6358 | if (adapter->tx_ring[i]) |
6359 | cbs_status[i] = adapter->tx_ring[i]->cbs_enable; |
6360 | |
6361 | /* CBS should be enabled on the highest priority queue first in order |
6362 | * for the CBS algorithm to operate as intended. |
6363 | */ |
6364 | if (enable) { |
6365 | if (queue == 1 && !cbs_status[0]) { |
6366 | netdev_err(dev: netdev, |
6367 | format: "Enabling CBS on queue1 before queue0\n"); |
6368 | return -EINVAL; |
6369 | } |
6370 | } else { |
6371 | if (queue == 0 && cbs_status[1]) { |
6372 | netdev_err(dev: netdev, |
6373 | format: "Disabling CBS on queue0 before queue1\n"); |
6374 | return -EINVAL; |
6375 | } |
6376 | } |
6377 | |
6378 | ring->cbs_enable = enable; |
6379 | ring->idleslope = idleslope; |
6380 | ring->sendslope = sendslope; |
6381 | ring->hicredit = hicredit; |
6382 | ring->locredit = locredit; |
6383 | |
6384 | return 0; |
6385 | } |
6386 | |
6387 | static int igc_tsn_enable_cbs(struct igc_adapter *adapter, |
6388 | struct tc_cbs_qopt_offload *qopt) |
6389 | { |
6390 | struct igc_hw *hw = &adapter->hw; |
6391 | int err; |
6392 | |
6393 | if (hw->mac.type != igc_i225) |
6394 | return -EOPNOTSUPP; |
6395 | |
6396 | if (qopt->queue < 0 || qopt->queue > 1) |
6397 | return -EINVAL; |
6398 | |
6399 | err = igc_save_cbs_params(adapter, queue: qopt->queue, enable: qopt->enable, |
6400 | idleslope: qopt->idleslope, sendslope: qopt->sendslope, |
6401 | hicredit: qopt->hicredit, locredit: qopt->locredit); |
6402 | if (err) |
6403 | return err; |
6404 | |
6405 | return igc_tsn_offload_apply(adapter); |
6406 | } |
6407 | |
6408 | static int igc_tc_query_caps(struct igc_adapter *adapter, |
6409 | struct tc_query_caps_base *base) |
6410 | { |
6411 | struct igc_hw *hw = &adapter->hw; |
6412 | |
6413 | switch (base->type) { |
6414 | case TC_SETUP_QDISC_TAPRIO: { |
6415 | struct tc_taprio_caps *caps = base->caps; |
6416 | |
6417 | caps->broken_mqprio = true; |
6418 | |
6419 | if (hw->mac.type == igc_i225) { |
6420 | caps->supports_queue_max_sdu = true; |
6421 | caps->gate_mask_per_txq = true; |
6422 | } |
6423 | |
6424 | return 0; |
6425 | } |
6426 | default: |
6427 | return -EOPNOTSUPP; |
6428 | } |
6429 | } |
6430 | |
6431 | static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, |
6432 | void *type_data) |
6433 | { |
6434 | struct igc_adapter *adapter = netdev_priv(dev); |
6435 | |
6436 | adapter->tc_setup_type = type; |
6437 | |
6438 | switch (type) { |
6439 | case TC_QUERY_CAPS: |
6440 | return igc_tc_query_caps(adapter, base: type_data); |
6441 | case TC_SETUP_QDISC_TAPRIO: |
6442 | return igc_tsn_enable_qbv_scheduling(adapter, qopt: type_data); |
6443 | |
6444 | case TC_SETUP_QDISC_ETF: |
6445 | return igc_tsn_enable_launchtime(adapter, qopt: type_data); |
6446 | |
6447 | case TC_SETUP_QDISC_CBS: |
6448 | return igc_tsn_enable_cbs(adapter, qopt: type_data); |
6449 | |
6450 | default: |
6451 | return -EOPNOTSUPP; |
6452 | } |
6453 | } |
6454 | |
6455 | static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf) |
6456 | { |
6457 | struct igc_adapter *adapter = netdev_priv(dev); |
6458 | |
6459 | switch (bpf->command) { |
6460 | case XDP_SETUP_PROG: |
6461 | return igc_xdp_set_prog(adapter, prog: bpf->prog, extack: bpf->extack); |
6462 | case XDP_SETUP_XSK_POOL: |
6463 | return igc_xdp_setup_pool(adapter, pool: bpf->xsk.pool, |
6464 | queue_id: bpf->xsk.queue_id); |
6465 | default: |
6466 | return -EOPNOTSUPP; |
6467 | } |
6468 | } |
6469 | |
6470 | static int igc_xdp_xmit(struct net_device *dev, int num_frames, |
6471 | struct xdp_frame **frames, u32 flags) |
6472 | { |
6473 | struct igc_adapter *adapter = netdev_priv(dev); |
6474 | int cpu = smp_processor_id(); |
6475 | struct netdev_queue *nq; |
6476 | struct igc_ring *ring; |
6477 | int i, nxmit; |
6478 | |
6479 | if (unlikely(!netif_carrier_ok(dev))) |
6480 | return -ENETDOWN; |
6481 | |
6482 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
6483 | return -EINVAL; |
6484 | |
6485 | ring = igc_xdp_get_tx_ring(adapter, cpu); |
6486 | nq = txring_txq(tx_ring: ring); |
6487 | |
6488 | __netif_tx_lock(txq: nq, cpu); |
6489 | |
6490 | /* Avoid transmit queue timeout since we share it with the slow path */ |
6491 | txq_trans_cond_update(txq: nq); |
6492 | |
6493 | nxmit = 0; |
6494 | for (i = 0; i < num_frames; i++) { |
6495 | int err; |
6496 | struct xdp_frame *xdpf = frames[i]; |
6497 | |
6498 | err = igc_xdp_init_tx_descriptor(ring, xdpf); |
6499 | if (err) |
6500 | break; |
6501 | nxmit++; |
6502 | } |
6503 | |
6504 | if (flags & XDP_XMIT_FLUSH) |
6505 | igc_flush_tx_descriptors(ring); |
6506 | |
6507 | __netif_tx_unlock(txq: nq); |
6508 | |
6509 | return nxmit; |
6510 | } |
6511 | |
6512 | static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, |
6513 | struct igc_q_vector *q_vector) |
6514 | { |
6515 | struct igc_hw *hw = &adapter->hw; |
6516 | u32 eics = 0; |
6517 | |
6518 | eics |= q_vector->eims_value; |
6519 | wr32(IGC_EICS, eics); |
6520 | } |
6521 | |
6522 | int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) |
6523 | { |
6524 | struct igc_adapter *adapter = netdev_priv(dev); |
6525 | struct igc_q_vector *q_vector; |
6526 | struct igc_ring *ring; |
6527 | |
6528 | if (test_bit(__IGC_DOWN, &adapter->state)) |
6529 | return -ENETDOWN; |
6530 | |
6531 | if (!igc_xdp_is_enabled(adapter)) |
6532 | return -ENXIO; |
6533 | |
6534 | if (queue_id >= adapter->num_rx_queues) |
6535 | return -EINVAL; |
6536 | |
6537 | ring = adapter->rx_ring[queue_id]; |
6538 | |
6539 | if (!ring->xsk_pool) |
6540 | return -ENXIO; |
6541 | |
6542 | q_vector = adapter->q_vector[queue_id]; |
6543 | if (!napi_if_scheduled_mark_missed(n: &q_vector->napi)) |
6544 | igc_trigger_rxtxq_interrupt(adapter, q_vector); |
6545 | |
6546 | return 0; |
6547 | } |
6548 | |
6549 | static ktime_t igc_get_tstamp(struct net_device *dev, |
6550 | const struct skb_shared_hwtstamps *hwtstamps, |
6551 | bool cycles) |
6552 | { |
6553 | struct igc_adapter *adapter = netdev_priv(dev); |
6554 | struct igc_inline_rx_tstamps *tstamp; |
6555 | ktime_t timestamp; |
6556 | |
6557 | tstamp = hwtstamps->netdev_data; |
6558 | |
6559 | if (cycles) |
6560 | timestamp = igc_ptp_rx_pktstamp(adapter, buf: tstamp->timer1); |
6561 | else |
6562 | timestamp = igc_ptp_rx_pktstamp(adapter, buf: tstamp->timer0); |
6563 | |
6564 | return timestamp; |
6565 | } |
6566 | |
6567 | static const struct net_device_ops igc_netdev_ops = { |
6568 | .ndo_open = igc_open, |
6569 | .ndo_stop = igc_close, |
6570 | .ndo_start_xmit = igc_xmit_frame, |
6571 | .ndo_set_rx_mode = igc_set_rx_mode, |
6572 | .ndo_set_mac_address = igc_set_mac, |
6573 | .ndo_change_mtu = igc_change_mtu, |
6574 | .ndo_tx_timeout = igc_tx_timeout, |
6575 | .ndo_get_stats64 = igc_get_stats64, |
6576 | .ndo_fix_features = igc_fix_features, |
6577 | .ndo_set_features = igc_set_features, |
6578 | .ndo_features_check = igc_features_check, |
6579 | .ndo_eth_ioctl = igc_ioctl, |
6580 | .ndo_setup_tc = igc_setup_tc, |
6581 | .ndo_bpf = igc_bpf, |
6582 | .ndo_xdp_xmit = igc_xdp_xmit, |
6583 | .ndo_xsk_wakeup = igc_xsk_wakeup, |
6584 | .ndo_get_tstamp = igc_get_tstamp, |
6585 | }; |
6586 | |
6587 | /* PCIe configuration access */ |
6588 | void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) |
6589 | { |
6590 | struct igc_adapter *adapter = hw->back; |
6591 | |
6592 | pci_read_config_word(dev: adapter->pdev, where: reg, val: value); |
6593 | } |
6594 | |
6595 | void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value) |
6596 | { |
6597 | struct igc_adapter *adapter = hw->back; |
6598 | |
6599 | pci_write_config_word(dev: adapter->pdev, where: reg, val: *value); |
6600 | } |
6601 | |
6602 | s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) |
6603 | { |
6604 | struct igc_adapter *adapter = hw->back; |
6605 | |
6606 | if (!pci_is_pcie(dev: adapter->pdev)) |
6607 | return -IGC_ERR_CONFIG; |
6608 | |
6609 | pcie_capability_read_word(dev: adapter->pdev, pos: reg, val: value); |
6610 | |
6611 | return IGC_SUCCESS; |
6612 | } |
6613 | |
6614 | s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value) |
6615 | { |
6616 | struct igc_adapter *adapter = hw->back; |
6617 | |
6618 | if (!pci_is_pcie(dev: adapter->pdev)) |
6619 | return -IGC_ERR_CONFIG; |
6620 | |
6621 | pcie_capability_write_word(dev: adapter->pdev, pos: reg, val: *value); |
6622 | |
6623 | return IGC_SUCCESS; |
6624 | } |
6625 | |
6626 | u32 igc_rd32(struct igc_hw *hw, u32 reg) |
6627 | { |
6628 | struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw); |
6629 | u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); |
6630 | u32 value = 0; |
6631 | |
6632 | if (IGC_REMOVED(hw_addr)) |
6633 | return ~value; |
6634 | |
6635 | value = readl(addr: &hw_addr[reg]); |
6636 | |
6637 | /* reads should not return all F's */ |
6638 | if (!(~value) && (!reg || !(~readl(addr: hw_addr)))) { |
6639 | struct net_device *netdev = igc->netdev; |
6640 | |
6641 | hw->hw_addr = NULL; |
6642 | netif_device_detach(dev: netdev); |
6643 | netdev_err(dev: netdev, format: "PCIe link lost, device now detached\n"); |
6644 | WARN(pci_device_is_present(igc->pdev), |
6645 | "igc: Failed to read reg 0x%x!\n", reg); |
6646 | } |
6647 | |
6648 | return value; |
6649 | } |
6650 | |
6651 | /* Mapping HW RSS Type to enum xdp_rss_hash_type */ |
6652 | static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = { |
6653 | [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2, |
6654 | [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP, |
6655 | [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4, |
6656 | [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP, |
6657 | [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX, |
6658 | [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6, |
6659 | [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX, |
6660 | [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP, |
6661 | [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP, |
6662 | [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX, |
6663 | [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ |
6664 | [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */ |
6665 | [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */ |
6666 | [13] = XDP_RSS_TYPE_NONE, |
6667 | [14] = XDP_RSS_TYPE_NONE, |
6668 | [15] = XDP_RSS_TYPE_NONE, |
6669 | }; |
6670 | |
6671 | static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash, |
6672 | enum xdp_rss_hash_type *rss_type) |
6673 | { |
6674 | const struct igc_xdp_buff *ctx = (void *)_ctx; |
6675 | |
6676 | if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) |
6677 | return -ENODATA; |
6678 | |
6679 | *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); |
6680 | *rss_type = igc_xdp_rss_type[igc_rss_type(rx_desc: ctx->rx_desc)]; |
6681 | |
6682 | return 0; |
6683 | } |
6684 | |
6685 | static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) |
6686 | { |
6687 | const struct igc_xdp_buff *ctx = (void *)_ctx; |
6688 | struct igc_adapter *adapter = netdev_priv(dev: ctx->xdp.rxq->dev); |
6689 | struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts; |
6690 | |
6691 | if (igc_test_staterr(rx_desc: ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { |
6692 | *timestamp = igc_ptp_rx_pktstamp(adapter, buf: tstamp->timer0); |
6693 | |
6694 | return 0; |
6695 | } |
6696 | |
6697 | return -ENODATA; |
6698 | } |
6699 | |
6700 | static const struct xdp_metadata_ops igc_xdp_metadata_ops = { |
6701 | .xmo_rx_hash = igc_xdp_rx_hash, |
6702 | .xmo_rx_timestamp = igc_xdp_rx_timestamp, |
6703 | }; |
6704 | |
6705 | static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer) |
6706 | { |
6707 | struct igc_adapter *adapter = container_of(timer, struct igc_adapter, |
6708 | hrtimer); |
6709 | unsigned long flags; |
6710 | unsigned int i; |
6711 | |
6712 | spin_lock_irqsave(&adapter->qbv_tx_lock, flags); |
6713 | |
6714 | adapter->qbv_transition = true; |
6715 | for (i = 0; i < adapter->num_tx_queues; i++) { |
6716 | struct igc_ring *tx_ring = adapter->tx_ring[i]; |
6717 | |
6718 | if (tx_ring->admin_gate_closed) { |
6719 | tx_ring->admin_gate_closed = false; |
6720 | tx_ring->oper_gate_closed = true; |
6721 | } else { |
6722 | tx_ring->oper_gate_closed = false; |
6723 | } |
6724 | } |
6725 | adapter->qbv_transition = false; |
6726 | |
6727 | spin_unlock_irqrestore(lock: &adapter->qbv_tx_lock, flags); |
6728 | |
6729 | return HRTIMER_NORESTART; |
6730 | } |
6731 | |
6732 | /** |
6733 | * igc_probe - Device Initialization Routine |
6734 | * @pdev: PCI device information struct |
6735 | * @ent: entry in igc_pci_tbl |
6736 | * |
6737 | * Returns 0 on success, negative on failure |
6738 | * |
6739 | * igc_probe initializes an adapter identified by a pci_dev structure. |
6740 | * The OS initialization, configuring the adapter private structure, |
6741 | * and a hardware reset occur. |
6742 | */ |
6743 | static int igc_probe(struct pci_dev *pdev, |
6744 | const struct pci_device_id *ent) |
6745 | { |
6746 | struct igc_adapter *adapter; |
6747 | struct net_device *netdev; |
6748 | struct igc_hw *hw; |
6749 | const struct igc_info *ei = igc_info_tbl[ent->driver_data]; |
6750 | int err; |
6751 | |
6752 | err = pci_enable_device_mem(dev: pdev); |
6753 | if (err) |
6754 | return err; |
6755 | |
6756 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
6757 | if (err) { |
6758 | dev_err(&pdev->dev, |
6759 | "No usable DMA configuration, aborting\n"); |
6760 | goto err_dma; |
6761 | } |
6762 | |
6763 | err = pci_request_mem_regions(pdev, name: igc_driver_name); |
6764 | if (err) |
6765 | goto err_pci_reg; |
6766 | |
6767 | err = pci_enable_ptm(dev: pdev, NULL); |
6768 | if (err < 0) |
6769 | dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); |
6770 | |
6771 | pci_set_master(dev: pdev); |
6772 | |
6773 | err = -ENOMEM; |
6774 | netdev = alloc_etherdev_mq(sizeof(struct igc_adapter), |
6775 | IGC_MAX_TX_QUEUES); |
6776 | |
6777 | if (!netdev) |
6778 | goto err_alloc_etherdev; |
6779 | |
6780 | SET_NETDEV_DEV(netdev, &pdev->dev); |
6781 | |
6782 | pci_set_drvdata(pdev, data: netdev); |
6783 | adapter = netdev_priv(dev: netdev); |
6784 | adapter->netdev = netdev; |
6785 | adapter->pdev = pdev; |
6786 | hw = &adapter->hw; |
6787 | hw->back = adapter; |
6788 | adapter->port_num = hw->bus.func; |
6789 | adapter->msg_enable = netif_msg_init(debug_value: debug, DEFAULT_MSG_ENABLE); |
6790 | |
6791 | err = pci_save_state(dev: pdev); |
6792 | if (err) |
6793 | goto err_ioremap; |
6794 | |
6795 | err = -EIO; |
6796 | adapter->io_addr = ioremap(pci_resource_start(pdev, 0), |
6797 | pci_resource_len(pdev, 0)); |
6798 | if (!adapter->io_addr) |
6799 | goto err_ioremap; |
6800 | |
6801 | /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ |
6802 | hw->hw_addr = adapter->io_addr; |
6803 | |
6804 | netdev->netdev_ops = &igc_netdev_ops; |
6805 | netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; |
6806 | igc_ethtool_set_ops(netdev); |
6807 | netdev->watchdog_timeo = 5 * HZ; |
6808 | |
6809 | netdev->mem_start = pci_resource_start(pdev, 0); |
6810 | netdev->mem_end = pci_resource_end(pdev, 0); |
6811 | |
6812 | /* PCI config space info */ |
6813 | hw->vendor_id = pdev->vendor; |
6814 | hw->device_id = pdev->device; |
6815 | hw->revision_id = pdev->revision; |
6816 | hw->subsystem_vendor_id = pdev->subsystem_vendor; |
6817 | hw->subsystem_device_id = pdev->subsystem_device; |
6818 | |
6819 | /* Copy the default MAC and PHY function pointers */ |
6820 | memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); |
6821 | memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); |
6822 | |
6823 | /* Initialize skew-specific constants */ |
6824 | err = ei->get_invariants(hw); |
6825 | if (err) |
6826 | goto err_sw_init; |
6827 | |
6828 | /* Add supported features to the features list*/ |
6829 | netdev->features |= NETIF_F_SG; |
6830 | netdev->features |= NETIF_F_TSO; |
6831 | netdev->features |= NETIF_F_TSO6; |
6832 | netdev->features |= NETIF_F_TSO_ECN; |
6833 | netdev->features |= NETIF_F_RXHASH; |
6834 | netdev->features |= NETIF_F_RXCSUM; |
6835 | netdev->features |= NETIF_F_HW_CSUM; |
6836 | netdev->features |= NETIF_F_SCTP_CRC; |
6837 | netdev->features |= NETIF_F_HW_TC; |
6838 | |
6839 | #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ |
6840 | NETIF_F_GSO_GRE_CSUM | \ |
6841 | NETIF_F_GSO_IPXIP4 | \ |
6842 | NETIF_F_GSO_IPXIP6 | \ |
6843 | NETIF_F_GSO_UDP_TUNNEL | \ |
6844 | NETIF_F_GSO_UDP_TUNNEL_CSUM) |
6845 | |
6846 | netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; |
6847 | netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; |
6848 | |
6849 | /* setup the private structure */ |
6850 | err = igc_sw_init(adapter); |
6851 | if (err) |
6852 | goto err_sw_init; |
6853 | |
6854 | /* copy netdev features into list of user selectable features */ |
6855 | netdev->hw_features |= NETIF_F_NTUPLE; |
6856 | netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; |
6857 | netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; |
6858 | netdev->hw_features |= netdev->features; |
6859 | |
6860 | netdev->features |= NETIF_F_HIGHDMA; |
6861 | |
6862 | netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; |
6863 | netdev->mpls_features |= NETIF_F_HW_CSUM; |
6864 | netdev->hw_enc_features |= netdev->vlan_features; |
6865 | |
6866 | netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | |
6867 | NETDEV_XDP_ACT_XSK_ZEROCOPY; |
6868 | |
6869 | /* MTU range: 68 - 9216 */ |
6870 | netdev->min_mtu = ETH_MIN_MTU; |
6871 | netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; |
6872 | |
6873 | /* before reading the NVM, reset the controller to put the device in a |
6874 | * known good starting state |
6875 | */ |
6876 | hw->mac.ops.reset_hw(hw); |
6877 | |
6878 | if (igc_get_flash_presence_i225(hw)) { |
6879 | if (hw->nvm.ops.validate(hw) < 0) { |
6880 | dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); |
6881 | err = -EIO; |
6882 | goto err_eeprom; |
6883 | } |
6884 | } |
6885 | |
6886 | if (eth_platform_get_mac_address(dev: &pdev->dev, mac_addr: hw->mac.addr)) { |
6887 | /* copy the MAC address out of the NVM */ |
6888 | if (hw->mac.ops.read_mac_addr(hw)) |
6889 | dev_err(&pdev->dev, "NVM Read Error\n"); |
6890 | } |
6891 | |
6892 | eth_hw_addr_set(dev: netdev, addr: hw->mac.addr); |
6893 | |
6894 | if (!is_valid_ether_addr(addr: netdev->dev_addr)) { |
6895 | dev_err(&pdev->dev, "Invalid MAC Address\n"); |
6896 | err = -EIO; |
6897 | goto err_eeprom; |
6898 | } |
6899 | |
6900 | /* configure RXPBSIZE and TXPBSIZE */ |
6901 | wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT); |
6902 | wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); |
6903 | |
6904 | timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); |
6905 | timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); |
6906 | |
6907 | INIT_WORK(&adapter->reset_task, igc_reset_task); |
6908 | INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); |
6909 | |
6910 | hrtimer_init(timer: &adapter->hrtimer, CLOCK_MONOTONIC, mode: HRTIMER_MODE_REL); |
6911 | adapter->hrtimer.function = &igc_qbv_scheduling_timer; |
6912 | |
6913 | /* Initialize link properties that are user-changeable */ |
6914 | adapter->fc_autoneg = true; |
6915 | hw->mac.autoneg = true; |
6916 | hw->phy.autoneg_advertised = 0xaf; |
6917 | |
6918 | hw->fc.requested_mode = igc_fc_default; |
6919 | hw->fc.current_mode = igc_fc_default; |
6920 | |
6921 | /* By default, support wake on port A */ |
6922 | adapter->flags |= IGC_FLAG_WOL_SUPPORTED; |
6923 | |
6924 | /* initialize the wol settings based on the eeprom settings */ |
6925 | if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) |
6926 | adapter->wol |= IGC_WUFC_MAG; |
6927 | |
6928 | device_set_wakeup_enable(dev: &adapter->pdev->dev, |
6929 | enable: adapter->flags & IGC_FLAG_WOL_SUPPORTED); |
6930 | |
6931 | igc_ptp_init(adapter); |
6932 | |
6933 | igc_tsn_clear_schedule(adapter); |
6934 | |
6935 | /* reset the hardware with the new settings */ |
6936 | igc_reset(adapter); |
6937 | |
6938 | /* let the f/w know that the h/w is now under the control of the |
6939 | * driver. |
6940 | */ |
6941 | igc_get_hw_control(adapter); |
6942 | |
6943 | strscpy(netdev->name, "eth%d", sizeof(netdev->name)); |
6944 | err = register_netdev(dev: netdev); |
6945 | if (err) |
6946 | goto err_register; |
6947 | |
6948 | /* carrier off reporting is important to ethtool even BEFORE open */ |
6949 | netif_carrier_off(dev: netdev); |
6950 | |
6951 | /* Check if Media Autosense is enabled */ |
6952 | adapter->ei = *ei; |
6953 | |
6954 | /* print pcie link status and MAC address */ |
6955 | pcie_print_link_status(dev: pdev); |
6956 | netdev_info(dev: netdev, format: "MAC: %pM\n", netdev->dev_addr); |
6957 | |
6958 | dev_pm_set_driver_flags(dev: &pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); |
6959 | /* Disable EEE for internal PHY devices */ |
6960 | hw->dev_spec._base.eee_enable = false; |
6961 | adapter->flags &= ~IGC_FLAG_EEE; |
6962 | igc_set_eee_i225(hw, adv2p5G: false, adv1G: false, adv100M: false); |
6963 | |
6964 | pm_runtime_put_noidle(dev: &pdev->dev); |
6965 | |
6966 | if (IS_ENABLED(CONFIG_IGC_LEDS)) { |
6967 | err = igc_led_setup(adapter); |
6968 | if (err) |
6969 | goto err_register; |
6970 | } |
6971 | |
6972 | return 0; |
6973 | |
6974 | err_register: |
6975 | igc_release_hw_control(adapter); |
6976 | err_eeprom: |
6977 | if (!igc_check_reset_block(hw)) |
6978 | igc_reset_phy(hw); |
6979 | err_sw_init: |
6980 | igc_clear_interrupt_scheme(adapter); |
6981 | iounmap(addr: adapter->io_addr); |
6982 | err_ioremap: |
6983 | free_netdev(dev: netdev); |
6984 | err_alloc_etherdev: |
6985 | pci_release_mem_regions(pdev); |
6986 | err_pci_reg: |
6987 | err_dma: |
6988 | pci_disable_device(dev: pdev); |
6989 | return err; |
6990 | } |
6991 | |
6992 | /** |
6993 | * igc_remove - Device Removal Routine |
6994 | * @pdev: PCI device information struct |
6995 | * |
6996 | * igc_remove is called by the PCI subsystem to alert the driver |
6997 | * that it should release a PCI device. This could be caused by a |
6998 | * Hot-Plug event, or because the driver is going to be removed from |
6999 | * memory. |
7000 | */ |
7001 | static void igc_remove(struct pci_dev *pdev) |
7002 | { |
7003 | struct net_device *netdev = pci_get_drvdata(pdev); |
7004 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
7005 | |
7006 | pm_runtime_get_noresume(dev: &pdev->dev); |
7007 | |
7008 | igc_flush_nfc_rules(adapter); |
7009 | |
7010 | igc_ptp_stop(adapter); |
7011 | |
7012 | pci_disable_ptm(dev: pdev); |
7013 | pci_clear_master(dev: pdev); |
7014 | |
7015 | set_bit(nr: __IGC_DOWN, addr: &adapter->state); |
7016 | |
7017 | del_timer_sync(timer: &adapter->watchdog_timer); |
7018 | del_timer_sync(timer: &adapter->phy_info_timer); |
7019 | |
7020 | cancel_work_sync(work: &adapter->reset_task); |
7021 | cancel_work_sync(work: &adapter->watchdog_task); |
7022 | hrtimer_cancel(timer: &adapter->hrtimer); |
7023 | |
7024 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
7025 | * would have already happened in close and is redundant. |
7026 | */ |
7027 | igc_release_hw_control(adapter); |
7028 | unregister_netdev(dev: netdev); |
7029 | |
7030 | igc_clear_interrupt_scheme(adapter); |
7031 | pci_iounmap(dev: pdev, adapter->io_addr); |
7032 | pci_release_mem_regions(pdev); |
7033 | |
7034 | free_netdev(dev: netdev); |
7035 | |
7036 | pci_disable_device(dev: pdev); |
7037 | } |
7038 | |
7039 | static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, |
7040 | bool runtime) |
7041 | { |
7042 | struct net_device *netdev = pci_get_drvdata(pdev); |
7043 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
7044 | u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; |
7045 | struct igc_hw *hw = &adapter->hw; |
7046 | u32 ctrl, rctl, status; |
7047 | bool wake; |
7048 | |
7049 | rtnl_lock(); |
7050 | netif_device_detach(dev: netdev); |
7051 | |
7052 | if (netif_running(dev: netdev)) |
7053 | __igc_close(netdev, suspending: true); |
7054 | |
7055 | igc_ptp_suspend(adapter); |
7056 | |
7057 | igc_clear_interrupt_scheme(adapter); |
7058 | rtnl_unlock(); |
7059 | |
7060 | status = rd32(IGC_STATUS); |
7061 | if (status & IGC_STATUS_LU) |
7062 | wufc &= ~IGC_WUFC_LNKC; |
7063 | |
7064 | if (wufc) { |
7065 | igc_setup_rctl(adapter); |
7066 | igc_set_rx_mode(netdev); |
7067 | |
7068 | /* turn on all-multi mode if wake on multicast is enabled */ |
7069 | if (wufc & IGC_WUFC_MC) { |
7070 | rctl = rd32(IGC_RCTL); |
7071 | rctl |= IGC_RCTL_MPE; |
7072 | wr32(IGC_RCTL, rctl); |
7073 | } |
7074 | |
7075 | ctrl = rd32(IGC_CTRL); |
7076 | ctrl |= IGC_CTRL_ADVD3WUC; |
7077 | wr32(IGC_CTRL, ctrl); |
7078 | |
7079 | /* Allow time for pending master requests to run */ |
7080 | igc_disable_pcie_master(hw); |
7081 | |
7082 | wr32(IGC_WUC, IGC_WUC_PME_EN); |
7083 | wr32(IGC_WUFC, wufc); |
7084 | } else { |
7085 | wr32(IGC_WUC, 0); |
7086 | wr32(IGC_WUFC, 0); |
7087 | } |
7088 | |
7089 | wake = wufc || adapter->en_mng_pt; |
7090 | if (!wake) |
7091 | igc_power_down_phy_copper_base(hw: &adapter->hw); |
7092 | else |
7093 | igc_power_up_link(adapter); |
7094 | |
7095 | if (enable_wake) |
7096 | *enable_wake = wake; |
7097 | |
7098 | /* Release control of h/w to f/w. If f/w is AMT enabled, this |
7099 | * would have already happened in close and is redundant. |
7100 | */ |
7101 | igc_release_hw_control(adapter); |
7102 | |
7103 | pci_disable_device(dev: pdev); |
7104 | |
7105 | return 0; |
7106 | } |
7107 | |
7108 | #ifdef CONFIG_PM |
7109 | static int __maybe_unused igc_runtime_suspend(struct device *dev) |
7110 | { |
7111 | return __igc_shutdown(to_pci_dev(dev), NULL, runtime: 1); |
7112 | } |
7113 | |
7114 | static void igc_deliver_wake_packet(struct net_device *netdev) |
7115 | { |
7116 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
7117 | struct igc_hw *hw = &adapter->hw; |
7118 | struct sk_buff *skb; |
7119 | u32 wupl; |
7120 | |
7121 | wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK; |
7122 | |
7123 | /* WUPM stores only the first 128 bytes of the wake packet. |
7124 | * Read the packet only if we have the whole thing. |
7125 | */ |
7126 | if (wupl == 0 || wupl > IGC_WUPM_BYTES) |
7127 | return; |
7128 | |
7129 | skb = netdev_alloc_skb_ip_align(dev: netdev, IGC_WUPM_BYTES); |
7130 | if (!skb) |
7131 | return; |
7132 | |
7133 | skb_put(skb, len: wupl); |
7134 | |
7135 | /* Ensure reads are 32-bit aligned */ |
7136 | wupl = roundup(wupl, 4); |
7137 | |
7138 | memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); |
7139 | |
7140 | skb->protocol = eth_type_trans(skb, dev: netdev); |
7141 | netif_rx(skb); |
7142 | } |
7143 | |
7144 | static int __maybe_unused igc_resume(struct device *dev) |
7145 | { |
7146 | struct pci_dev *pdev = to_pci_dev(dev); |
7147 | struct net_device *netdev = pci_get_drvdata(pdev); |
7148 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
7149 | struct igc_hw *hw = &adapter->hw; |
7150 | u32 err, val; |
7151 | |
7152 | pci_set_power_state(dev: pdev, PCI_D0); |
7153 | pci_restore_state(dev: pdev); |
7154 | pci_save_state(dev: pdev); |
7155 | |
7156 | if (!pci_device_is_present(pdev)) |
7157 | return -ENODEV; |
7158 | err = pci_enable_device_mem(dev: pdev); |
7159 | if (err) { |
7160 | netdev_err(dev: netdev, format: "Cannot enable PCI device from suspend\n"); |
7161 | return err; |
7162 | } |
7163 | pci_set_master(dev: pdev); |
7164 | |
7165 | pci_enable_wake(dev: pdev, PCI_D3hot, enable: 0); |
7166 | pci_enable_wake(dev: pdev, PCI_D3cold, enable: 0); |
7167 | |
7168 | if (igc_init_interrupt_scheme(adapter, msix: true)) { |
7169 | netdev_err(dev: netdev, format: "Unable to allocate memory for queues\n"); |
7170 | return -ENOMEM; |
7171 | } |
7172 | |
7173 | igc_reset(adapter); |
7174 | |
7175 | /* let the f/w know that the h/w is now under the control of the |
7176 | * driver. |
7177 | */ |
7178 | igc_get_hw_control(adapter); |
7179 | |
7180 | val = rd32(IGC_WUS); |
7181 | if (val & WAKE_PKT_WUS) |
7182 | igc_deliver_wake_packet(netdev); |
7183 | |
7184 | wr32(IGC_WUS, ~0); |
7185 | |
7186 | rtnl_lock(); |
7187 | if (!err && netif_running(dev: netdev)) |
7188 | err = __igc_open(netdev, resuming: true); |
7189 | |
7190 | if (!err) |
7191 | netif_device_attach(dev: netdev); |
7192 | rtnl_unlock(); |
7193 | |
7194 | return err; |
7195 | } |
7196 | |
7197 | static int __maybe_unused igc_runtime_resume(struct device *dev) |
7198 | { |
7199 | return igc_resume(dev); |
7200 | } |
7201 | |
7202 | static int __maybe_unused igc_suspend(struct device *dev) |
7203 | { |
7204 | return __igc_shutdown(to_pci_dev(dev), NULL, runtime: 0); |
7205 | } |
7206 | |
7207 | static int __maybe_unused igc_runtime_idle(struct device *dev) |
7208 | { |
7209 | struct net_device *netdev = dev_get_drvdata(dev); |
7210 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
7211 | |
7212 | if (!igc_has_link(adapter)) |
7213 | pm_schedule_suspend(dev, MSEC_PER_SEC * 5); |
7214 | |
7215 | return -EBUSY; |
7216 | } |
7217 | #endif /* CONFIG_PM */ |
7218 | |
7219 | static void igc_shutdown(struct pci_dev *pdev) |
7220 | { |
7221 | bool wake; |
7222 | |
7223 | __igc_shutdown(pdev, enable_wake: &wake, runtime: 0); |
7224 | |
7225 | if (system_state == SYSTEM_POWER_OFF) { |
7226 | pci_wake_from_d3(dev: pdev, enable: wake); |
7227 | pci_set_power_state(dev: pdev, PCI_D3hot); |
7228 | } |
7229 | } |
7230 | |
7231 | /** |
7232 | * igc_io_error_detected - called when PCI error is detected |
7233 | * @pdev: Pointer to PCI device |
7234 | * @state: The current PCI connection state |
7235 | * |
7236 | * This function is called after a PCI bus error affecting |
7237 | * this device has been detected. |
7238 | **/ |
7239 | static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev, |
7240 | pci_channel_state_t state) |
7241 | { |
7242 | struct net_device *netdev = pci_get_drvdata(pdev); |
7243 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
7244 | |
7245 | netif_device_detach(dev: netdev); |
7246 | |
7247 | if (state == pci_channel_io_perm_failure) |
7248 | return PCI_ERS_RESULT_DISCONNECT; |
7249 | |
7250 | if (netif_running(dev: netdev)) |
7251 | igc_down(adapter); |
7252 | pci_disable_device(dev: pdev); |
7253 | |
7254 | /* Request a slot reset. */ |
7255 | return PCI_ERS_RESULT_NEED_RESET; |
7256 | } |
7257 | |
7258 | /** |
7259 | * igc_io_slot_reset - called after the PCI bus has been reset. |
7260 | * @pdev: Pointer to PCI device |
7261 | * |
7262 | * Restart the card from scratch, as if from a cold-boot. Implementation |
7263 | * resembles the first-half of the igc_resume routine. |
7264 | **/ |
7265 | static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) |
7266 | { |
7267 | struct net_device *netdev = pci_get_drvdata(pdev); |
7268 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
7269 | struct igc_hw *hw = &adapter->hw; |
7270 | pci_ers_result_t result; |
7271 | |
7272 | if (pci_enable_device_mem(dev: pdev)) { |
7273 | netdev_err(dev: netdev, format: "Could not re-enable PCI device after reset\n"); |
7274 | result = PCI_ERS_RESULT_DISCONNECT; |
7275 | } else { |
7276 | pci_set_master(dev: pdev); |
7277 | pci_restore_state(dev: pdev); |
7278 | pci_save_state(dev: pdev); |
7279 | |
7280 | pci_enable_wake(dev: pdev, PCI_D3hot, enable: 0); |
7281 | pci_enable_wake(dev: pdev, PCI_D3cold, enable: 0); |
7282 | |
7283 | /* In case of PCI error, adapter loses its HW address |
7284 | * so we should re-assign it here. |
7285 | */ |
7286 | hw->hw_addr = adapter->io_addr; |
7287 | |
7288 | igc_reset(adapter); |
7289 | wr32(IGC_WUS, ~0); |
7290 | result = PCI_ERS_RESULT_RECOVERED; |
7291 | } |
7292 | |
7293 | return result; |
7294 | } |
7295 | |
7296 | /** |
7297 | * igc_io_resume - called when traffic can start to flow again. |
7298 | * @pdev: Pointer to PCI device |
7299 | * |
7300 | * This callback is called when the error recovery driver tells us that |
7301 | * its OK to resume normal operation. Implementation resembles the |
7302 | * second-half of the igc_resume routine. |
7303 | */ |
7304 | static void igc_io_resume(struct pci_dev *pdev) |
7305 | { |
7306 | struct net_device *netdev = pci_get_drvdata(pdev); |
7307 | struct igc_adapter *adapter = netdev_priv(dev: netdev); |
7308 | |
7309 | rtnl_lock(); |
7310 | if (netif_running(dev: netdev)) { |
7311 | if (igc_open(netdev)) { |
7312 | netdev_err(dev: netdev, format: "igc_open failed after reset\n"); |
7313 | return; |
7314 | } |
7315 | } |
7316 | |
7317 | netif_device_attach(dev: netdev); |
7318 | |
7319 | /* let the f/w know that the h/w is now under the control of the |
7320 | * driver. |
7321 | */ |
7322 | igc_get_hw_control(adapter); |
7323 | rtnl_unlock(); |
7324 | } |
7325 | |
7326 | static const struct pci_error_handlers igc_err_handler = { |
7327 | .error_detected = igc_io_error_detected, |
7328 | .slot_reset = igc_io_slot_reset, |
7329 | .resume = igc_io_resume, |
7330 | }; |
7331 | |
7332 | #ifdef CONFIG_PM |
7333 | static const struct dev_pm_ops igc_pm_ops = { |
7334 | SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume) |
7335 | SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume, |
7336 | igc_runtime_idle) |
7337 | }; |
7338 | #endif |
7339 | |
7340 | static struct pci_driver igc_driver = { |
7341 | .name = igc_driver_name, |
7342 | .id_table = igc_pci_tbl, |
7343 | .probe = igc_probe, |
7344 | .remove = igc_remove, |
7345 | #ifdef CONFIG_PM |
7346 | .driver.pm = &igc_pm_ops, |
7347 | #endif |
7348 | .shutdown = igc_shutdown, |
7349 | .err_handler = &igc_err_handler, |
7350 | }; |
7351 | |
7352 | /** |
7353 | * igc_reinit_queues - return error |
7354 | * @adapter: pointer to adapter structure |
7355 | */ |
7356 | int igc_reinit_queues(struct igc_adapter *adapter) |
7357 | { |
7358 | struct net_device *netdev = adapter->netdev; |
7359 | int err = 0; |
7360 | |
7361 | if (netif_running(dev: netdev)) |
7362 | igc_close(netdev); |
7363 | |
7364 | igc_reset_interrupt_capability(adapter); |
7365 | |
7366 | if (igc_init_interrupt_scheme(adapter, msix: true)) { |
7367 | netdev_err(dev: netdev, format: "Unable to allocate memory for queues\n"); |
7368 | return -ENOMEM; |
7369 | } |
7370 | |
7371 | if (netif_running(dev: netdev)) |
7372 | err = igc_open(netdev); |
7373 | |
7374 | return err; |
7375 | } |
7376 | |
7377 | /** |
7378 | * igc_get_hw_dev - return device |
7379 | * @hw: pointer to hardware structure |
7380 | * |
7381 | * used by hardware layer to print debugging information |
7382 | */ |
7383 | struct net_device *igc_get_hw_dev(struct igc_hw *hw) |
7384 | { |
7385 | struct igc_adapter *adapter = hw->back; |
7386 | |
7387 | return adapter->netdev; |
7388 | } |
7389 | |
7390 | static void igc_disable_rx_ring_hw(struct igc_ring *ring) |
7391 | { |
7392 | struct igc_hw *hw = &ring->q_vector->adapter->hw; |
7393 | u8 idx = ring->reg_idx; |
7394 | u32 rxdctl; |
7395 | |
7396 | rxdctl = rd32(IGC_RXDCTL(idx)); |
7397 | rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE; |
7398 | rxdctl |= IGC_RXDCTL_SWFLUSH; |
7399 | wr32(IGC_RXDCTL(idx), rxdctl); |
7400 | } |
7401 | |
7402 | void igc_disable_rx_ring(struct igc_ring *ring) |
7403 | { |
7404 | igc_disable_rx_ring_hw(ring); |
7405 | igc_clean_rx_ring(ring); |
7406 | } |
7407 | |
7408 | void igc_enable_rx_ring(struct igc_ring *ring) |
7409 | { |
7410 | struct igc_adapter *adapter = ring->q_vector->adapter; |
7411 | |
7412 | igc_configure_rx_ring(adapter, ring); |
7413 | |
7414 | if (ring->xsk_pool) |
7415 | igc_alloc_rx_buffers_zc(ring, count: igc_desc_unused(ring)); |
7416 | else |
7417 | igc_alloc_rx_buffers(rx_ring: ring, cleaned_count: igc_desc_unused(ring)); |
7418 | } |
7419 | |
7420 | void igc_disable_tx_ring(struct igc_ring *ring) |
7421 | { |
7422 | igc_disable_tx_ring_hw(ring); |
7423 | igc_clean_tx_ring(tx_ring: ring); |
7424 | } |
7425 | |
7426 | void igc_enable_tx_ring(struct igc_ring *ring) |
7427 | { |
7428 | struct igc_adapter *adapter = ring->q_vector->adapter; |
7429 | |
7430 | igc_configure_tx_ring(adapter, ring); |
7431 | } |
7432 | |
7433 | /** |
7434 | * igc_init_module - Driver Registration Routine |
7435 | * |
7436 | * igc_init_module is the first routine called when the driver is |
7437 | * loaded. All it does is register with the PCI subsystem. |
7438 | */ |
7439 | static int __init igc_init_module(void) |
7440 | { |
7441 | int ret; |
7442 | |
7443 | pr_info("%s\n", igc_driver_string); |
7444 | pr_info("%s\n", igc_copyright); |
7445 | |
7446 | ret = pci_register_driver(&igc_driver); |
7447 | return ret; |
7448 | } |
7449 | |
7450 | module_init(igc_init_module); |
7451 | |
7452 | /** |
7453 | * igc_exit_module - Driver Exit Cleanup Routine |
7454 | * |
7455 | * igc_exit_module is called just before the driver is removed |
7456 | * from memory. |
7457 | */ |
7458 | static void __exit igc_exit_module(void) |
7459 | { |
7460 | pci_unregister_driver(dev: &igc_driver); |
7461 | } |
7462 | |
7463 | module_exit(igc_exit_module); |
7464 | /* igc_main.c */ |
7465 |
Definitions
- debug
- igc_driver_name
- igc_driver_string
- igc_copyright
- igc_info_tbl
- igc_pci_tbl
- latency_range
- igc_reset
- igc_power_up_link
- igc_release_hw_control
- igc_get_hw_control
- igc_unmap_tx_buffer
- igc_clean_tx_ring
- igc_free_tx_resources
- igc_free_all_tx_resources
- igc_clean_all_tx_rings
- igc_disable_tx_ring_hw
- igc_disable_all_tx_rings_hw
- igc_setup_tx_resources
- igc_setup_all_tx_resources
- igc_clean_rx_ring_page_shared
- igc_clean_rx_ring_xsk_pool
- igc_clean_rx_ring
- igc_clean_all_rx_rings
- igc_free_rx_resources
- igc_free_all_rx_resources
- igc_setup_rx_resources
- igc_setup_all_rx_resources
- igc_get_xsk_pool
- igc_configure_rx_ring
- igc_configure_rx
- igc_configure_tx_ring
- igc_configure_tx
- igc_setup_mrqc
- igc_setup_rctl
- igc_setup_tctl
- igc_set_mac_filter_hw
- igc_clear_mac_filter_hw
- igc_set_default_mac_filter
- igc_set_mac
- igc_write_mc_addr_list
- igc_tx_launchtime
- igc_init_empty_frame
- igc_init_tx_empty_descriptor
- igc_tx_ctxtdesc
- igc_tx_csum
- __igc_maybe_stop_tx
- igc_maybe_stop_tx
- igc_tx_cmd_type
- igc_tx_olinfo_status
- igc_tx_map
- igc_tso
- igc_request_tx_tstamp
- igc_xmit_frame_ring
- igc_tx_queue_mapping
- igc_xmit_frame
- igc_rx_checksum
- igc_rss_type_table
- igc_rx_hash
- igc_rx_vlan
- igc_process_skb_fields
- igc_vlan_mode
- igc_restore_vlan
- igc_get_rx_buffer
- igc_rx_buffer_flip
- igc_get_rx_frame_truesize
- igc_add_rx_frag
- igc_build_skb
- igc_construct_skb
- igc_reuse_rx_page
- igc_can_reuse_rx_page
- igc_is_non_eop
- igc_cleanup_headers
- igc_put_rx_buffer
- igc_rx_offset
- igc_alloc_mapped_page
- igc_alloc_rx_buffers
- igc_alloc_rx_buffers_zc
- igc_xdp_init_tx_descriptor
- igc_xdp_get_tx_ring
- igc_xdp_xmit_back
- __igc_xdp_run_prog
- igc_xdp_run_prog
- igc_flush_tx_descriptors
- igc_finalize_xdp
- igc_update_rx_stats
- igc_clean_rx_irq
- igc_construct_skb_zc
- igc_dispatch_skb_zc
- xsk_buff_to_igc_ctx
- igc_clean_rx_irq_zc
- igc_update_tx_stats
- igc_xdp_xmit_zc
- igc_clean_tx_irq
- igc_find_mac_filter
- igc_get_avail_mac_filter_slot
- igc_add_mac_filter
- igc_del_mac_filter
- igc_add_vlan_prio_filter
- igc_del_vlan_prio_filter
- igc_get_avail_etype_filter_slot
- igc_add_etype_filter
- igc_find_etype_filter
- igc_del_etype_filter
- igc_flex_filter_select
- igc_write_flex_filter_ll
- igc_flex_filter_add_field
- igc_find_avail_flex_filter_slot
- igc_flex_filter_in_use
- igc_add_flex_filter
- igc_del_flex_filter
- igc_enable_nfc_rule
- igc_disable_nfc_rule
- igc_get_nfc_rule
- igc_del_nfc_rule
- igc_flush_nfc_rules
- igc_add_nfc_rule
- igc_restore_nfc_rules
- igc_uc_sync
- igc_uc_unsync
- igc_set_rx_mode
- igc_configure
- igc_write_ivar
- igc_assign_vector
- igc_configure_msix
- igc_irq_enable
- igc_irq_disable
- igc_set_flag_queue_pairs
- igc_get_max_rss_queues
- igc_init_queue_configuration
- igc_reset_q_vector
- igc_free_q_vector
- igc_free_q_vectors
- igc_update_itr
- igc_set_itr
- igc_reset_interrupt_capability
- igc_set_interrupt_capability
- igc_update_ring_itr
- igc_ring_irq_enable
- igc_add_ring
- igc_cache_ring_register
- igc_poll
- igc_alloc_q_vector
- igc_alloc_q_vectors
- igc_init_interrupt_scheme
- igc_sw_init
- igc_up
- igc_update_stats
- igc_down
- igc_reinit_locked
- igc_reset_task
- igc_change_mtu
- igc_tx_timeout
- igc_get_stats64
- igc_fix_features
- igc_set_features
- igc_features_check
- igc_tsync_interrupt
- igc_msix_other
- igc_write_itr
- igc_msix_ring
- igc_request_msix
- igc_clear_interrupt_scheme
- igc_update_phy_info
- igc_has_link
- igc_watchdog
- igc_watchdog_task
- igc_intr_msi
- igc_intr
- igc_free_irq
- igc_request_irq
- __igc_open
- igc_open
- __igc_close
- igc_close
- igc_ioctl
- igc_save_launchtime_params
- is_base_time_past
- validate_schedule
- igc_tsn_enable_launchtime
- igc_qbv_clear_schedule
- igc_tsn_clear_schedule
- igc_taprio_stats
- igc_taprio_queue_stats
- igc_save_qbv_schedule
- igc_tsn_enable_qbv_scheduling
- igc_save_cbs_params
- igc_tsn_enable_cbs
- igc_tc_query_caps
- igc_setup_tc
- igc_bpf
- igc_xdp_xmit
- igc_trigger_rxtxq_interrupt
- igc_xsk_wakeup
- igc_get_tstamp
- igc_netdev_ops
- igc_read_pci_cfg
- igc_write_pci_cfg
- igc_read_pcie_cap_reg
- igc_write_pcie_cap_reg
- igc_rd32
- igc_xdp_rss_type
- igc_xdp_rx_hash
- igc_xdp_rx_timestamp
- igc_xdp_metadata_ops
- igc_qbv_scheduling_timer
- igc_probe
- igc_remove
- __igc_shutdown
- igc_runtime_suspend
- igc_deliver_wake_packet
- igc_resume
- igc_runtime_resume
- igc_suspend
- igc_runtime_idle
- igc_shutdown
- igc_io_error_detected
- igc_io_slot_reset
- igc_io_resume
- igc_err_handler
- igc_pm_ops
- igc_driver
- igc_reinit_queues
- igc_get_hw_dev
- igc_disable_rx_ring_hw
- igc_disable_rx_ring
- igc_enable_rx_ring
- igc_disable_tx_ring
- igc_enable_tx_ring
- igc_init_module
Improve your Profiling and Debugging skills
Find out more