1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|---|
2 | /* Copyright(c) 2007 - 2018 Intel Corporation. */ |
3 | |
4 | /* Linux PRO/1000 Ethernet Driver main header file */ |
5 | |
6 | #ifndef _IGB_H_ |
7 | #define _IGB_H_ |
8 | |
9 | #include "e1000_mac.h" |
10 | #include "e1000_82575.h" |
11 | |
12 | #include <linux/timecounter.h> |
13 | #include <linux/net_tstamp.h> |
14 | #include <linux/ptp_clock_kernel.h> |
15 | #include <linux/bitops.h> |
16 | #include <linux/if_vlan.h> |
17 | #include <linux/i2c.h> |
18 | #include <linux/i2c-algo-bit.h> |
19 | #include <linux/pci.h> |
20 | #include <linux/mdio.h> |
21 | #include <linux/lockdep.h> |
22 | |
23 | #include <net/xdp.h> |
24 | #include <net/xdp_sock_drv.h> |
25 | |
26 | struct igb_adapter; |
27 | |
28 | #define E1000_PCS_CFG_IGN_SD 1 |
29 | |
30 | /* Interrupt defines */ |
31 | #define IGB_START_ITR 648 /* ~6000 ints/sec */ |
32 | #define IGB_4K_ITR 980 |
33 | #define IGB_20K_ITR 196 |
34 | #define IGB_70K_ITR 56 |
35 | |
36 | /* TX/RX descriptor defines */ |
37 | #define IGB_DEFAULT_TXD 256 |
38 | #define IGB_DEFAULT_TX_WORK 128 |
39 | #define IGB_MIN_TXD 64 |
40 | #define IGB_MAX_TXD 4096 |
41 | |
42 | #define IGB_DEFAULT_RXD 256 |
43 | #define IGB_MIN_RXD 64 |
44 | #define IGB_MAX_RXD 4096 |
45 | |
46 | #define IGB_DEFAULT_ITR 3 /* dynamic */ |
47 | #define IGB_MAX_ITR_USECS 10000 |
48 | #define IGB_MIN_ITR_USECS 10 |
49 | #define NON_Q_VECTORS 1 |
50 | #define MAX_Q_VECTORS 8 |
51 | #define MAX_MSIX_ENTRIES 10 |
52 | |
53 | /* Transmit and receive queues */ |
54 | #define IGB_MAX_RX_QUEUES 8 |
55 | #define IGB_MAX_RX_QUEUES_82575 4 |
56 | #define IGB_MAX_RX_QUEUES_I211 2 |
57 | #define IGB_MAX_TX_QUEUES 8 |
58 | #define IGB_MAX_VF_MC_ENTRIES 30 |
59 | #define IGB_MAX_VF_FUNCTIONS 8 |
60 | #define IGB_MAX_VFTA_ENTRIES 128 |
61 | #define IGB_82576_VF_DEV_ID 0x10CA |
62 | #define IGB_I350_VF_DEV_ID 0x1520 |
63 | |
64 | /* NVM version defines */ |
65 | #define IGB_MAJOR_MASK 0xF000 |
66 | #define IGB_MINOR_MASK 0x0FF0 |
67 | #define IGB_BUILD_MASK 0x000F |
68 | #define IGB_COMB_VER_MASK 0x00FF |
69 | #define IGB_MAJOR_SHIFT 12 |
70 | #define IGB_MINOR_SHIFT 4 |
71 | #define IGB_COMB_VER_SHFT 8 |
72 | #define IGB_NVM_VER_INVALID 0xFFFF |
73 | #define IGB_ETRACK_SHIFT 16 |
74 | #define NVM_ETRACK_WORD 0x0042 |
75 | #define NVM_COMB_VER_OFF 0x0083 |
76 | #define NVM_COMB_VER_PTR 0x003d |
77 | |
78 | /* Transmit and receive latency (for PTP timestamps) */ |
79 | #define IGB_I210_TX_LATENCY_10 9542 |
80 | #define IGB_I210_TX_LATENCY_100 1024 |
81 | #define IGB_I210_TX_LATENCY_1000 178 |
82 | #define IGB_I210_RX_LATENCY_10 20662 |
83 | #define IGB_I210_RX_LATENCY_100 2213 |
84 | #define IGB_I210_RX_LATENCY_1000 448 |
85 | |
86 | /* XDP */ |
87 | #define IGB_XDP_PASS 0 |
88 | #define IGB_XDP_CONSUMED BIT(0) |
89 | #define IGB_XDP_TX BIT(1) |
90 | #define IGB_XDP_REDIR BIT(2) |
91 | #define IGB_XDP_EXIT BIT(3) |
92 | |
93 | struct vf_data_storage { |
94 | unsigned char vf_mac_addresses[ETH_ALEN]; |
95 | u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; |
96 | u16 num_vf_mc_hashes; |
97 | u32 flags; |
98 | unsigned long last_nack; |
99 | u16 pf_vlan; /* When set, guest VLAN config not allowed. */ |
100 | u16 pf_qos; |
101 | u16 tx_rate; |
102 | bool spoofchk_enabled; |
103 | bool trusted; |
104 | }; |
105 | |
106 | /* Number of unicast MAC filters reserved for the PF in the RAR registers */ |
107 | #define IGB_PF_MAC_FILTERS_RESERVED 3 |
108 | |
109 | struct vf_mac_filter { |
110 | struct list_head l; |
111 | int vf; |
112 | bool free; |
113 | u8 vf_mac[ETH_ALEN]; |
114 | }; |
115 | |
116 | #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ |
117 | #define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ |
118 | #define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ |
119 | #define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ |
120 | |
121 | /* RX descriptor control thresholds. |
122 | * PTHRESH - MAC will consider prefetch if it has fewer than this number of |
123 | * descriptors available in its onboard memory. |
124 | * Setting this to 0 disables RX descriptor prefetch. |
125 | * HTHRESH - MAC will only prefetch if there are at least this many descriptors |
126 | * available in host memory. |
127 | * If PTHRESH is 0, this should also be 0. |
128 | * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back |
129 | * descriptors until either it has this many to write back, or the |
130 | * ITR timer expires. |
131 | */ |
132 | #define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) |
133 | #define IGB_RX_HTHRESH 8 |
134 | #define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) |
135 | #define IGB_TX_HTHRESH 1 |
136 | #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ |
137 | (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4) |
138 | #define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \ |
139 | (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16) |
140 | |
141 | /* this is the size past which hardware will drop packets when setting LPE=0 */ |
142 | #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 |
143 | |
144 | #define IGB_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) |
145 | |
146 | /* Supported Rx Buffer Sizes */ |
147 | #define IGB_RXBUFFER_256 256 |
148 | #define IGB_RXBUFFER_1536 1536 |
149 | #define IGB_RXBUFFER_2048 2048 |
150 | #define IGB_RXBUFFER_3072 3072 |
151 | #define IGB_RX_HDR_LEN IGB_RXBUFFER_256 |
152 | #define IGB_TS_HDR_LEN 16 |
153 | |
154 | /* Attempt to maximize the headroom available for incoming frames. We |
155 | * use a 2K buffer for receives and need 1536/1534 to store the data for |
156 | * the frame. This leaves us with 512 bytes of room. From that we need |
157 | * to deduct the space needed for the shared info and the padding needed |
158 | * to IP align the frame. |
159 | * |
160 | * Note: For cache line sizes 256 or larger this value is going to end |
161 | * up negative. In these cases we should fall back to the 3K |
162 | * buffers. |
163 | */ |
164 | #if (PAGE_SIZE < 8192) |
165 | #define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN) |
166 | #define IGB_2K_TOO_SMALL_WITH_PADDING \ |
167 | ((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048)) |
168 | |
169 | static inline int igb_compute_pad(int rx_buf_len) |
170 | { |
171 | int page_size, pad_size; |
172 | |
173 | page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); |
174 | pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; |
175 | |
176 | return pad_size; |
177 | } |
178 | |
179 | static inline int igb_skb_pad(void) |
180 | { |
181 | int rx_buf_len; |
182 | |
183 | /* If a 2K buffer cannot handle a standard Ethernet frame then |
184 | * optimize padding for a 3K buffer instead of a 1.5K buffer. |
185 | * |
186 | * For a 3K buffer we need to add enough padding to allow for |
187 | * tailroom due to NET_IP_ALIGN possibly shifting us out of |
188 | * cache-line alignment. |
189 | */ |
190 | if (IGB_2K_TOO_SMALL_WITH_PADDING) |
191 | rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); |
192 | else |
193 | rx_buf_len = IGB_RXBUFFER_1536; |
194 | |
195 | /* if needed make room for NET_IP_ALIGN */ |
196 | rx_buf_len -= NET_IP_ALIGN; |
197 | |
198 | return igb_compute_pad(rx_buf_len); |
199 | } |
200 | |
201 | #define IGB_SKB_PAD igb_skb_pad() |
202 | #else |
203 | #define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) |
204 | #endif |
205 | |
206 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
207 | #define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ |
208 | |
209 | #define IGB_RX_DMA_ATTR \ |
210 | (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) |
211 | |
212 | #define AUTO_ALL_MODES 0 |
213 | #define IGB_EEPROM_APME 0x0400 |
214 | |
215 | #ifndef IGB_MASTER_SLAVE |
216 | /* Switch to override PHY master/slave setting */ |
217 | #define IGB_MASTER_SLAVE e1000_ms_hw_default |
218 | #endif |
219 | |
220 | #define IGB_MNG_VLAN_NONE -1 |
221 | |
222 | enum igb_tx_flags { |
223 | /* cmd_type flags */ |
224 | IGB_TX_FLAGS_VLAN = 0x01, |
225 | IGB_TX_FLAGS_TSO = 0x02, |
226 | IGB_TX_FLAGS_TSTAMP = 0x04, |
227 | |
228 | /* olinfo flags */ |
229 | IGB_TX_FLAGS_IPV4 = 0x10, |
230 | IGB_TX_FLAGS_CSUM = 0x20, |
231 | }; |
232 | |
233 | /* VLAN info */ |
234 | #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 |
235 | #define IGB_TX_FLAGS_VLAN_SHIFT 16 |
236 | |
237 | /* The largest size we can write to the descriptor is 65535. In order to |
238 | * maintain a power of two alignment we have to limit ourselves to 32K. |
239 | */ |
240 | #define IGB_MAX_TXD_PWR 15 |
241 | #define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR) |
242 | |
243 | /* Tx Descriptors needed, worst case */ |
244 | #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) |
245 | #define DESC_NEEDED (MAX_SKB_FRAGS + 4) |
246 | |
247 | /* EEPROM byte offsets */ |
248 | #define IGB_SFF_8472_SWAP 0x5C |
249 | #define IGB_SFF_8472_COMP 0x5E |
250 | |
251 | /* Bitmasks */ |
252 | #define IGB_SFF_ADDRESSING_MODE 0x4 |
253 | #define IGB_SFF_8472_UNSUP 0x00 |
254 | |
255 | /* TX resources are shared between XDP and netstack |
256 | * and we need to tag the buffer type to distinguish them |
257 | */ |
258 | enum igb_tx_buf_type { |
259 | IGB_TYPE_SKB = 0, |
260 | IGB_TYPE_XDP, |
261 | IGB_TYPE_XSK |
262 | }; |
263 | |
264 | /* wrapper around a pointer to a socket buffer, |
265 | * so a DMA handle can be stored along with the buffer |
266 | */ |
267 | struct igb_tx_buffer { |
268 | union e1000_adv_tx_desc *next_to_watch; |
269 | unsigned long time_stamp; |
270 | enum igb_tx_buf_type type; |
271 | union { |
272 | struct sk_buff *skb; |
273 | struct xdp_frame *xdpf; |
274 | }; |
275 | unsigned int bytecount; |
276 | u16 gso_segs; |
277 | __be16 protocol; |
278 | |
279 | DEFINE_DMA_UNMAP_ADDR(dma); |
280 | DEFINE_DMA_UNMAP_LEN(len); |
281 | u32 tx_flags; |
282 | }; |
283 | |
284 | struct igb_rx_buffer { |
285 | dma_addr_t dma; |
286 | struct page *page; |
287 | #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) |
288 | __u32 page_offset; |
289 | #else |
290 | __u16 page_offset; |
291 | #endif |
292 | __u16 pagecnt_bias; |
293 | }; |
294 | |
295 | struct igb_tx_queue_stats { |
296 | u64 packets; |
297 | u64 bytes; |
298 | u64 restart_queue; |
299 | u64 restart_queue2; |
300 | }; |
301 | |
302 | struct igb_rx_queue_stats { |
303 | u64 packets; |
304 | u64 bytes; |
305 | u64 drops; |
306 | u64 csum_err; |
307 | u64 alloc_failed; |
308 | }; |
309 | |
310 | struct igb_ring_container { |
311 | struct igb_ring *ring; /* pointer to linked list of rings */ |
312 | unsigned int total_bytes; /* total bytes processed this int */ |
313 | unsigned int total_packets; /* total packets processed this int */ |
314 | u16 work_limit; /* total work allowed per interrupt */ |
315 | u8 count; /* total number of rings in vector */ |
316 | u8 itr; /* current ITR setting for ring */ |
317 | }; |
318 | |
319 | struct igb_ring { |
320 | struct igb_q_vector *q_vector; /* backlink to q_vector */ |
321 | struct net_device *netdev; /* back pointer to net_device */ |
322 | struct bpf_prog *xdp_prog; |
323 | struct device *dev; /* device pointer for dma mapping */ |
324 | union { /* array of buffer info structs */ |
325 | struct igb_tx_buffer *tx_buffer_info; |
326 | struct igb_rx_buffer *rx_buffer_info; |
327 | struct xdp_buff **rx_buffer_info_zc; |
328 | }; |
329 | void *desc; /* descriptor ring memory */ |
330 | unsigned long flags; /* ring specific flags */ |
331 | void __iomem *tail; /* pointer to ring tail register */ |
332 | dma_addr_t dma; /* phys address of the ring */ |
333 | unsigned int size; /* length of desc. ring in bytes */ |
334 | |
335 | u16 count; /* number of desc. in the ring */ |
336 | u8 queue_index; /* logical index of the ring*/ |
337 | u8 reg_idx; /* physical index of the ring */ |
338 | bool launchtime_enable; /* true if LaunchTime is enabled */ |
339 | bool cbs_enable; /* indicates if CBS is enabled */ |
340 | s32 idleslope; /* idleSlope in kbps */ |
341 | s32 sendslope; /* sendSlope in kbps */ |
342 | s32 hicredit; /* hiCredit in bytes */ |
343 | s32 locredit; /* loCredit in bytes */ |
344 | |
345 | /* everything past this point are written often */ |
346 | u16 next_to_clean; |
347 | u16 next_to_use; |
348 | u16 next_to_alloc; |
349 | |
350 | union { |
351 | /* TX */ |
352 | struct { |
353 | struct igb_tx_queue_stats tx_stats; |
354 | struct u64_stats_sync tx_syncp; |
355 | struct u64_stats_sync tx_syncp2; |
356 | }; |
357 | /* RX */ |
358 | struct { |
359 | struct sk_buff *skb; |
360 | struct igb_rx_queue_stats rx_stats; |
361 | struct u64_stats_sync rx_syncp; |
362 | }; |
363 | }; |
364 | struct xdp_rxq_info xdp_rxq; |
365 | struct xsk_buff_pool *xsk_pool; |
366 | } ____cacheline_internodealigned_in_smp; |
367 | |
368 | struct igb_q_vector { |
369 | struct igb_adapter *adapter; /* backlink */ |
370 | int cpu; /* CPU for DCA */ |
371 | u32 eims_value; /* EIMS mask value */ |
372 | |
373 | u16 itr_val; |
374 | u8 set_itr; |
375 | void __iomem *itr_register; |
376 | |
377 | struct igb_ring_container rx, tx; |
378 | |
379 | struct napi_struct napi; |
380 | struct rcu_head rcu; /* to avoid race with update stats on free */ |
381 | char name[IFNAMSIZ + 9]; |
382 | |
383 | /* for dynamic allocation of rings associated with this q_vector */ |
384 | struct igb_ring ring[] ____cacheline_internodealigned_in_smp; |
385 | }; |
386 | |
387 | enum e1000_ring_flags_t { |
388 | IGB_RING_FLAG_RX_3K_BUFFER, |
389 | IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, |
390 | IGB_RING_FLAG_RX_SCTP_CSUM, |
391 | IGB_RING_FLAG_RX_LB_VLAN_BSWAP, |
392 | IGB_RING_FLAG_TX_CTX_IDX, |
393 | IGB_RING_FLAG_TX_DETECT_HANG, |
394 | IGB_RING_FLAG_TX_DISABLED, |
395 | IGB_RING_FLAG_RX_ALLOC_FAILED, |
396 | }; |
397 | |
398 | #define ring_uses_large_buffer(ring) \ |
399 | test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) |
400 | #define set_ring_uses_large_buffer(ring) \ |
401 | set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) |
402 | #define clear_ring_uses_large_buffer(ring) \ |
403 | clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags) |
404 | |
405 | #define ring_uses_build_skb(ring) \ |
406 | test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) |
407 | #define set_ring_build_skb_enabled(ring) \ |
408 | set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) |
409 | #define clear_ring_build_skb_enabled(ring) \ |
410 | clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags) |
411 | |
412 | static inline unsigned int igb_rx_bufsz(struct igb_ring *ring) |
413 | { |
414 | #if (PAGE_SIZE < 8192) |
415 | if (ring_uses_large_buffer(ring)) |
416 | return IGB_RXBUFFER_3072; |
417 | |
418 | if (ring_uses_build_skb(ring)) |
419 | return IGB_MAX_FRAME_BUILD_SKB; |
420 | #endif |
421 | return IGB_RXBUFFER_2048; |
422 | } |
423 | |
424 | static inline unsigned int igb_rx_pg_order(struct igb_ring *ring) |
425 | { |
426 | #if (PAGE_SIZE < 8192) |
427 | if (ring_uses_large_buffer(ring)) |
428 | return 1; |
429 | #endif |
430 | return 0; |
431 | } |
432 | |
433 | #define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring)) |
434 | |
435 | #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) |
436 | |
437 | #define IGB_RX_DESC(R, i) \ |
438 | (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) |
439 | #define IGB_TX_DESC(R, i) \ |
440 | (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) |
441 | #define IGB_TX_CTXTDESC(R, i) \ |
442 | (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) |
443 | |
444 | /* igb_test_staterr - tests bits within Rx descriptor status and error fields */ |
445 | static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, |
446 | const u32 stat_err_bits) |
447 | { |
448 | return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); |
449 | } |
450 | |
451 | /* igb_desc_unused - calculate if we have unused descriptors */ |
452 | static inline int igb_desc_unused(struct igb_ring *ring) |
453 | { |
454 | if (ring->next_to_clean > ring->next_to_use) |
455 | return ring->next_to_clean - ring->next_to_use - 1; |
456 | |
457 | return ring->count + ring->next_to_clean - ring->next_to_use - 1; |
458 | } |
459 | |
460 | #ifdef CONFIG_IGB_HWMON |
461 | |
462 | #define IGB_HWMON_TYPE_LOC 0 |
463 | #define IGB_HWMON_TYPE_TEMP 1 |
464 | #define IGB_HWMON_TYPE_CAUTION 2 |
465 | #define IGB_HWMON_TYPE_MAX 3 |
466 | |
467 | struct hwmon_attr { |
468 | struct device_attribute dev_attr; |
469 | struct e1000_hw *hw; |
470 | struct e1000_thermal_diode_data *sensor; |
471 | char name[12]; |
472 | }; |
473 | |
474 | struct hwmon_buff { |
475 | struct attribute_group group; |
476 | const struct attribute_group *groups[2]; |
477 | struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1]; |
478 | struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4]; |
479 | unsigned int n_hwmon; |
480 | }; |
481 | #endif |
482 | |
483 | /* The number of L2 ether-type filter registers, Index 3 is reserved |
484 | * for PTP 1588 timestamp |
485 | */ |
486 | #define MAX_ETYPE_FILTER (4 - 1) |
487 | /* ETQF filter list: one static filter per filter consumer. This is |
488 | * to avoid filter collisions later. Add new filters here!! |
489 | * |
490 | * Current filters: Filter 3 |
491 | */ |
492 | #define IGB_ETQF_FILTER_1588 3 |
493 | |
494 | #define IGB_N_EXTTS 2 |
495 | #define IGB_N_PEROUT 2 |
496 | #define IGB_N_SDP 4 |
497 | #define IGB_RETA_SIZE 128 |
498 | |
499 | enum igb_filter_match_flags { |
500 | IGB_FILTER_FLAG_ETHER_TYPE = 0x1, |
501 | IGB_FILTER_FLAG_VLAN_TCI = 0x2, |
502 | IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4, |
503 | IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8, |
504 | }; |
505 | |
506 | #define IGB_MAX_RXNFC_FILTERS 16 |
507 | |
508 | /* RX network flow classification data structure */ |
509 | struct igb_nfc_input { |
510 | /* Byte layout in order, all values with MSB first: |
511 | * match_flags - 1 byte |
512 | * etype - 2 bytes |
513 | * vlan_tci - 2 bytes |
514 | */ |
515 | u8 match_flags; |
516 | __be16 etype; |
517 | __be16 vlan_tci; |
518 | u8 src_addr[ETH_ALEN]; |
519 | u8 dst_addr[ETH_ALEN]; |
520 | }; |
521 | |
522 | struct igb_nfc_filter { |
523 | struct hlist_node nfc_node; |
524 | struct igb_nfc_input filter; |
525 | unsigned long cookie; |
526 | u16 etype_reg_index; |
527 | u16 sw_idx; |
528 | u16 action; |
529 | }; |
530 | |
531 | struct igb_mac_addr { |
532 | u8 addr[ETH_ALEN]; |
533 | u8 queue; |
534 | u8 state; /* bitmask */ |
535 | }; |
536 | |
537 | #define IGB_MAC_STATE_DEFAULT 0x1 |
538 | #define IGB_MAC_STATE_IN_USE 0x2 |
539 | #define IGB_MAC_STATE_SRC_ADDR 0x4 |
540 | #define IGB_MAC_STATE_QUEUE_STEERING 0x8 |
541 | |
542 | /* board specific private data structure */ |
543 | struct igb_adapter { |
544 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
545 | |
546 | struct net_device *netdev; |
547 | struct bpf_prog *xdp_prog; |
548 | |
549 | unsigned long state; |
550 | unsigned int flags; |
551 | |
552 | unsigned int num_q_vectors; |
553 | struct msix_entry msix_entries[MAX_MSIX_ENTRIES]; |
554 | |
555 | /* Interrupt Throttle Rate */ |
556 | u32 rx_itr_setting; |
557 | u32 tx_itr_setting; |
558 | u16 tx_itr; |
559 | u16 rx_itr; |
560 | |
561 | /* TX */ |
562 | u16 tx_work_limit; |
563 | u32 tx_timeout_count; |
564 | int num_tx_queues; |
565 | struct igb_ring *tx_ring[16]; |
566 | |
567 | /* RX */ |
568 | int num_rx_queues; |
569 | struct igb_ring *rx_ring[16]; |
570 | |
571 | u32 max_frame_size; |
572 | u32 min_frame_size; |
573 | |
574 | struct timer_list watchdog_timer; |
575 | struct timer_list phy_info_timer; |
576 | |
577 | u16 mng_vlan_id; |
578 | u32 bd_number; |
579 | u32 wol; |
580 | u32 en_mng_pt; |
581 | u16 link_speed; |
582 | u16 link_duplex; |
583 | |
584 | u8 __iomem *io_addr; /* Mainly for iounmap use */ |
585 | |
586 | struct work_struct reset_task; |
587 | struct work_struct watchdog_task; |
588 | bool fc_autoneg; |
589 | u8 tx_timeout_factor; |
590 | struct timer_list blink_timer; |
591 | unsigned long led_status; |
592 | |
593 | /* OS defined structs */ |
594 | struct pci_dev *pdev; |
595 | |
596 | spinlock_t stats64_lock; |
597 | struct rtnl_link_stats64 stats64; |
598 | |
599 | /* structs defined in e1000_hw.h */ |
600 | struct e1000_hw hw; |
601 | struct e1000_hw_stats stats; |
602 | struct e1000_phy_info phy_info; |
603 | |
604 | u32 test_icr; |
605 | struct igb_ring test_tx_ring; |
606 | struct igb_ring test_rx_ring; |
607 | |
608 | int msg_enable; |
609 | |
610 | struct igb_q_vector *q_vector[MAX_Q_VECTORS]; |
611 | u32 eims_enable_mask; |
612 | u32 eims_other; |
613 | |
614 | /* to not mess up cache alignment, always add to the bottom */ |
615 | u16 tx_ring_count; |
616 | u16 rx_ring_count; |
617 | unsigned int vfs_allocated_count; |
618 | struct vf_data_storage *vf_data; |
619 | int vf_rate_link_speed; |
620 | u32 rss_queues; |
621 | u32 wvbr; |
622 | u32 *shadow_vfta; |
623 | |
624 | struct ptp_clock *ptp_clock; |
625 | struct ptp_clock_info ptp_caps; |
626 | struct delayed_work ptp_overflow_work; |
627 | struct work_struct ptp_tx_work; |
628 | struct sk_buff *ptp_tx_skb; |
629 | struct hwtstamp_config tstamp_config; |
630 | unsigned long ptp_tx_start; |
631 | unsigned long last_rx_ptp_check; |
632 | unsigned long last_rx_timestamp; |
633 | unsigned int ptp_flags; |
634 | spinlock_t tmreg_lock; |
635 | struct cyclecounter cc; |
636 | struct timecounter tc; |
637 | u32 tx_hwtstamp_timeouts; |
638 | u32 tx_hwtstamp_skipped; |
639 | u32 rx_hwtstamp_cleared; |
640 | bool pps_sys_wrap_on; |
641 | |
642 | struct ptp_pin_desc sdp_config[IGB_N_SDP]; |
643 | struct { |
644 | struct timespec64 start; |
645 | struct timespec64 period; |
646 | } perout[IGB_N_PEROUT]; |
647 | |
648 | char fw_version[48]; |
649 | #ifdef CONFIG_IGB_HWMON |
650 | struct hwmon_buff *igb_hwmon_buff; |
651 | bool ets; |
652 | #endif |
653 | struct i2c_algo_bit_data i2c_algo; |
654 | struct i2c_adapter i2c_adap; |
655 | struct i2c_client *i2c_client; |
656 | u32 rss_indir_tbl_init; |
657 | u8 rss_indir_tbl[IGB_RETA_SIZE]; |
658 | |
659 | unsigned long link_check_timeout; |
660 | int copper_tries; |
661 | struct e1000_info ei; |
662 | u16 eee_advert; |
663 | |
664 | /* RX network flow classification support */ |
665 | struct hlist_head nfc_filter_list; |
666 | struct hlist_head cls_flower_list; |
667 | unsigned int nfc_filter_count; |
668 | /* lock for RX network flow classification filter */ |
669 | spinlock_t nfc_lock; |
670 | bool etype_bitmap[MAX_ETYPE_FILTER]; |
671 | |
672 | struct igb_mac_addr *mac_table; |
673 | struct vf_mac_filter vf_macs; |
674 | struct vf_mac_filter *vf_mac_list; |
675 | /* lock for VF resources */ |
676 | spinlock_t vfs_lock; |
677 | }; |
678 | |
679 | /* flags controlling PTP/1588 function */ |
680 | #define IGB_PTP_ENABLED BIT(0) |
681 | #define IGB_PTP_OVERFLOW_CHECK BIT(1) |
682 | |
683 | #define IGB_FLAG_HAS_MSI BIT(0) |
684 | #define IGB_FLAG_DCA_ENABLED BIT(1) |
685 | #define IGB_FLAG_QUAD_PORT_A BIT(2) |
686 | #define IGB_FLAG_QUEUE_PAIRS BIT(3) |
687 | #define IGB_FLAG_DMAC BIT(4) |
688 | #define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) |
689 | #define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) |
690 | #define IGB_FLAG_WOL_SUPPORTED BIT(8) |
691 | #define IGB_FLAG_NEED_LINK_UPDATE BIT(9) |
692 | #define IGB_FLAG_MEDIA_RESET BIT(10) |
693 | #define IGB_FLAG_MAS_CAPABLE BIT(11) |
694 | #define IGB_FLAG_MAS_ENABLE BIT(12) |
695 | #define IGB_FLAG_HAS_MSIX BIT(13) |
696 | #define IGB_FLAG_EEE BIT(14) |
697 | #define IGB_FLAG_VLAN_PROMISC BIT(15) |
698 | #define IGB_FLAG_RX_LEGACY BIT(16) |
699 | #define IGB_FLAG_FQTSS BIT(17) |
700 | |
701 | /* Media Auto Sense */ |
702 | #define IGB_MAS_ENABLE_0 0X0001 |
703 | #define IGB_MAS_ENABLE_1 0X0002 |
704 | #define IGB_MAS_ENABLE_2 0X0004 |
705 | #define IGB_MAS_ENABLE_3 0X0008 |
706 | |
707 | /* DMA Coalescing defines */ |
708 | #define IGB_MIN_TXPBSIZE 20408 |
709 | #define IGB_TX_BUF_4096 4096 |
710 | #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ |
711 | |
712 | #define IGB_82576_TSYNC_SHIFT 19 |
713 | enum e1000_state_t { |
714 | __IGB_TESTING, |
715 | __IGB_RESETTING, |
716 | __IGB_DOWN, |
717 | __IGB_PTP_TX_IN_PROGRESS, |
718 | }; |
719 | |
720 | enum igb_boards { |
721 | board_82575, |
722 | }; |
723 | |
724 | extern char igb_driver_name[]; |
725 | |
726 | void igb_set_queue_napi(struct igb_adapter *adapter, int q_idx, |
727 | struct napi_struct *napi); |
728 | int igb_xmit_xdp_ring(struct igb_adapter *adapter, |
729 | struct igb_ring *ring, |
730 | struct xdp_frame *xdpf); |
731 | int igb_open(struct net_device *netdev); |
732 | int igb_close(struct net_device *netdev); |
733 | int igb_up(struct igb_adapter *); |
734 | void igb_down(struct igb_adapter *); |
735 | void igb_reinit_locked(struct igb_adapter *); |
736 | void igb_reset(struct igb_adapter *); |
737 | int igb_reinit_queues(struct igb_adapter *); |
738 | void igb_write_rss_indir_tbl(struct igb_adapter *); |
739 | int igb_set_spd_dplx(struct igb_adapter *, u32, u8); |
740 | int igb_setup_tx_resources(struct igb_ring *); |
741 | int igb_setup_rx_resources(struct igb_ring *); |
742 | void igb_free_tx_resources(struct igb_ring *); |
743 | void igb_free_rx_resources(struct igb_ring *); |
744 | void igb_clean_tx_ring(struct igb_ring *tx_ring); |
745 | void igb_clean_rx_ring(struct igb_ring *rx_ring); |
746 | void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); |
747 | void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); |
748 | void igb_finalize_xdp(struct igb_adapter *adapter, unsigned int status); |
749 | void igb_update_rx_stats(struct igb_q_vector *q_vector, unsigned int packets, |
750 | unsigned int bytes); |
751 | void igb_setup_tctl(struct igb_adapter *); |
752 | void igb_setup_rctl(struct igb_adapter *); |
753 | void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *); |
754 | netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); |
755 | int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp); |
756 | void igb_process_skb_fields(struct igb_ring *rx_ring, |
757 | union e1000_adv_rx_desc *rx_desc, |
758 | struct sk_buff *skb); |
759 | void igb_alloc_rx_buffers(struct igb_ring *, u16); |
760 | void igb_update_stats(struct igb_adapter *); |
761 | bool igb_has_link(struct igb_adapter *adapter); |
762 | void igb_set_ethtool_ops(struct net_device *); |
763 | void igb_power_up_link(struct igb_adapter *); |
764 | void igb_set_fw_version(struct igb_adapter *); |
765 | void igb_ptp_init(struct igb_adapter *adapter); |
766 | void igb_ptp_stop(struct igb_adapter *adapter); |
767 | void igb_ptp_reset(struct igb_adapter *adapter); |
768 | void igb_ptp_suspend(struct igb_adapter *adapter); |
769 | void igb_ptp_rx_hang(struct igb_adapter *adapter); |
770 | void igb_ptp_tx_hang(struct igb_adapter *adapter); |
771 | void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); |
772 | int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, |
773 | ktime_t *timestamp); |
774 | int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); |
775 | int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); |
776 | void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); |
777 | unsigned int igb_get_max_rss_queues(struct igb_adapter *); |
778 | #ifdef CONFIG_IGB_HWMON |
779 | void igb_sysfs_exit(struct igb_adapter *adapter); |
780 | int igb_sysfs_init(struct igb_adapter *adapter); |
781 | #endif |
782 | static inline s32 igb_reset_phy(struct e1000_hw *hw) |
783 | { |
784 | if (hw->phy.ops.reset) |
785 | return hw->phy.ops.reset(hw); |
786 | |
787 | return 0; |
788 | } |
789 | |
790 | static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) |
791 | { |
792 | if (hw->phy.ops.read_reg) |
793 | return hw->phy.ops.read_reg(hw, offset, data); |
794 | |
795 | return 0; |
796 | } |
797 | |
798 | static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) |
799 | { |
800 | if (hw->phy.ops.write_reg) |
801 | return hw->phy.ops.write_reg(hw, offset, data); |
802 | |
803 | return 0; |
804 | } |
805 | |
806 | static inline s32 igb_get_phy_info(struct e1000_hw *hw) |
807 | { |
808 | if (hw->phy.ops.get_phy_info) |
809 | return hw->phy.ops.get_phy_info(hw); |
810 | |
811 | return 0; |
812 | } |
813 | |
814 | static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) |
815 | { |
816 | return netdev_get_tx_queue(dev: tx_ring->netdev, index: tx_ring->queue_index); |
817 | } |
818 | |
819 | /* This function assumes __netif_tx_lock is held by the caller. */ |
820 | static inline void igb_xdp_ring_update_tail(struct igb_ring *ring) |
821 | { |
822 | lockdep_assert_held(&txring_txq(ring)->_xmit_lock); |
823 | |
824 | /* Force memory writes to complete before letting h/w know there |
825 | * are new descriptors to fetch. |
826 | */ |
827 | wmb(); |
828 | writel(val: ring->next_to_use, addr: ring->tail); |
829 | } |
830 | |
831 | static inline struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter) |
832 | { |
833 | unsigned int r_idx = smp_processor_id(); |
834 | |
835 | if (r_idx >= adapter->num_tx_queues) |
836 | r_idx = r_idx % adapter->num_tx_queues; |
837 | |
838 | return adapter->tx_ring[r_idx]; |
839 | } |
840 | |
841 | static inline bool igb_xdp_is_enabled(struct igb_adapter *adapter) |
842 | { |
843 | return !!READ_ONCE(adapter->xdp_prog); |
844 | } |
845 | |
846 | int igb_add_filter(struct igb_adapter *adapter, |
847 | struct igb_nfc_filter *input); |
848 | int igb_erase_filter(struct igb_adapter *adapter, |
849 | struct igb_nfc_filter *input); |
850 | |
851 | int igb_add_mac_steering_filter(struct igb_adapter *adapter, |
852 | const u8 *addr, u8 queue, u8 flags); |
853 | int igb_del_mac_steering_filter(struct igb_adapter *adapter, |
854 | const u8 *addr, u8 queue, u8 flags); |
855 | |
856 | struct xsk_buff_pool *igb_xsk_pool(struct igb_adapter *adapter, |
857 | struct igb_ring *ring); |
858 | int igb_xsk_pool_setup(struct igb_adapter *adapter, |
859 | struct xsk_buff_pool *pool, |
860 | u16 qid); |
861 | bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring, |
862 | struct xsk_buff_pool *xsk_pool, u16 count); |
863 | void igb_clean_rx_ring_zc(struct igb_ring *rx_ring); |
864 | int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector, |
865 | struct xsk_buff_pool *xsk_pool, const int budget); |
866 | bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool); |
867 | int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); |
868 | |
869 | #endif /* _IGB_H_ */ |
870 |
Definitions
- vf_data_storage
- vf_mac_filter
- igb_compute_pad
- igb_skb_pad
- igb_tx_flags
- igb_tx_buf_type
- igb_tx_buffer
- igb_rx_buffer
- igb_tx_queue_stats
- igb_rx_queue_stats
- igb_ring_container
- igb_ring
- igb_q_vector
- e1000_ring_flags_t
- igb_rx_bufsz
- igb_rx_pg_order
- igb_test_staterr
- igb_desc_unused
- hwmon_attr
- hwmon_buff
- igb_filter_match_flags
- igb_nfc_input
- igb_nfc_filter
- igb_mac_addr
- igb_adapter
- e1000_state_t
- igb_boards
- igb_reset_phy
- igb_read_phy_reg
- igb_write_phy_reg
- igb_get_phy_info
- txring_txq
- igb_xdp_ring_update_tail
- igb_xdp_tx_queue_mapping
Improve your Profiling and Debugging skills
Find out more