1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* Copyright (c) 2018, Intel Corporation. */ |
3 | |
4 | #ifndef _ICE_TXRX_H_ |
5 | #define _ICE_TXRX_H_ |
6 | |
7 | #include "ice_type.h" |
8 | |
9 | #define ICE_DFLT_IRQ_WORK 256 |
10 | #define ICE_RXBUF_3072 3072 |
11 | #define ICE_RXBUF_2048 2048 |
12 | #define ICE_RXBUF_1664 1664 |
13 | #define ICE_RXBUF_1536 1536 |
14 | #define ICE_MAX_CHAINED_RX_BUFS 5 |
15 | #define ICE_MAX_BUF_TXD 8 |
16 | #define ICE_MIN_TX_LEN 17 |
17 | #define ICE_MAX_FRAME_LEGACY_RX 8320 |
18 | |
19 | /* The size limit for a transmit buffer in a descriptor is (16K - 1). |
20 | * In order to align with the read requests we will align the value to |
21 | * the nearest 4K which represents our maximum read request size. |
22 | */ |
23 | #define ICE_MAX_READ_REQ_SIZE 4096 |
24 | #define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1) |
25 | #define ICE_MAX_DATA_PER_TXD_ALIGNED \ |
26 | (~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD) |
27 | |
28 | #define ICE_MAX_TXQ_PER_TXQG 128 |
29 | |
30 | /* Attempt to maximize the headroom available for incoming frames. We use a 2K |
31 | * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame. |
32 | * This leaves us with 512 bytes of room. From that we need to deduct the |
33 | * space needed for the shared info and the padding needed to IP align the |
34 | * frame. |
35 | * |
36 | * Note: For cache line sizes 256 or larger this value is going to end |
37 | * up negative. In these cases we should fall back to the legacy |
38 | * receive path. |
39 | */ |
40 | #if (PAGE_SIZE < 8192) |
41 | #define ICE_2K_TOO_SMALL_WITH_PADDING \ |
42 | ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \ |
43 | SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) |
44 | |
45 | /** |
46 | * ice_compute_pad - compute the padding |
47 | * @rx_buf_len: buffer length |
48 | * |
49 | * Figure out the size of half page based on given buffer length and |
50 | * then subtract the skb_shared_info followed by subtraction of the |
51 | * actual buffer length; this in turn results in the actual space that |
52 | * is left for padding usage |
53 | */ |
54 | static inline int ice_compute_pad(int rx_buf_len) |
55 | { |
56 | int half_page_size; |
57 | |
58 | half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); |
59 | return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len; |
60 | } |
61 | |
62 | /** |
63 | * ice_skb_pad - determine the padding that we can supply |
64 | * |
65 | * Figure out the right Rx buffer size and based on that calculate the |
66 | * padding |
67 | */ |
68 | static inline int ice_skb_pad(void) |
69 | { |
70 | int rx_buf_len; |
71 | |
72 | /* If a 2K buffer cannot handle a standard Ethernet frame then |
73 | * optimize padding for a 3K buffer instead of a 1.5K buffer. |
74 | * |
75 | * For a 3K buffer we need to add enough padding to allow for |
76 | * tailroom due to NET_IP_ALIGN possibly shifting us out of |
77 | * cache-line alignment. |
78 | */ |
79 | if (ICE_2K_TOO_SMALL_WITH_PADDING) |
80 | rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); |
81 | else |
82 | rx_buf_len = ICE_RXBUF_1536; |
83 | |
84 | /* if needed make room for NET_IP_ALIGN */ |
85 | rx_buf_len -= NET_IP_ALIGN; |
86 | |
87 | return ice_compute_pad(rx_buf_len); |
88 | } |
89 | |
90 | #define ICE_SKB_PAD ice_skb_pad() |
91 | #else |
92 | #define ICE_2K_TOO_SMALL_WITH_PADDING false |
93 | #define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) |
94 | #endif |
95 | |
96 | /* We are assuming that the cache line is always 64 Bytes here for ice. |
97 | * In order to make sure that is a correct assumption there is a check in probe |
98 | * to print a warning if the read from GLPCI_CNF2 tells us that the cache line |
99 | * size is 128 bytes. We do it this way because we do not want to read the |
100 | * GLPCI_CNF2 register or a variable containing the value on every pass through |
101 | * the Tx path. |
102 | */ |
103 | #define ICE_CACHE_LINE_BYTES 64 |
104 | #define ICE_DESCS_PER_CACHE_LINE (ICE_CACHE_LINE_BYTES / \ |
105 | sizeof(struct ice_tx_desc)) |
106 | #define ICE_DESCS_FOR_CTX_DESC 1 |
107 | #define ICE_DESCS_FOR_SKB_DATA_PTR 1 |
108 | /* Tx descriptors needed, worst case */ |
109 | #define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \ |
110 | ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR) |
111 | #define ICE_DESC_UNUSED(R) \ |
112 | (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ |
113 | (R)->next_to_clean - (R)->next_to_use - 1) |
114 | |
115 | #define ICE_RX_DESC_UNUSED(R) \ |
116 | ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \ |
117 | (R)->first_desc - (R)->next_to_use - 1) |
118 | |
119 | #define ICE_RING_QUARTER(R) ((R)->count >> 2) |
120 | |
121 | #define ICE_TX_FLAGS_TSO BIT(0) |
122 | #define ICE_TX_FLAGS_HW_VLAN BIT(1) |
123 | #define ICE_TX_FLAGS_SW_VLAN BIT(2) |
124 | /* Free, was ICE_TX_FLAGS_DUMMY_PKT */ |
125 | #define ICE_TX_FLAGS_TSYN BIT(4) |
126 | #define ICE_TX_FLAGS_IPV4 BIT(5) |
127 | #define ICE_TX_FLAGS_IPV6 BIT(6) |
128 | #define ICE_TX_FLAGS_TUNNEL BIT(7) |
129 | #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8) |
130 | |
131 | #define ICE_XDP_PASS 0 |
132 | #define ICE_XDP_CONSUMED BIT(0) |
133 | #define ICE_XDP_TX BIT(1) |
134 | #define ICE_XDP_REDIR BIT(2) |
135 | #define ICE_XDP_EXIT BIT(3) |
136 | #define ICE_SKB_CONSUMED ICE_XDP_CONSUMED |
137 | |
138 | #define ICE_RX_DMA_ATTR \ |
139 | (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) |
140 | |
141 | #define ICE_ETH_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) |
142 | |
143 | #define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS) |
144 | |
145 | /** |
146 | * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion |
147 | * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required |
148 | * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree() |
149 | * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA |
150 | * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats |
151 | * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats |
152 | * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats |
153 | * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats |
154 | */ |
155 | enum ice_tx_buf_type { |
156 | ICE_TX_BUF_EMPTY = 0U, |
157 | ICE_TX_BUF_DUMMY, |
158 | ICE_TX_BUF_FRAG, |
159 | ICE_TX_BUF_SKB, |
160 | ICE_TX_BUF_XDP_TX, |
161 | ICE_TX_BUF_XDP_XMIT, |
162 | ICE_TX_BUF_XSK_TX, |
163 | }; |
164 | |
165 | struct ice_tx_buf { |
166 | union { |
167 | struct ice_tx_desc *next_to_watch; |
168 | u32 rs_idx; |
169 | }; |
170 | union { |
171 | void *raw_buf; /* used for XDP_TX and FDir rules */ |
172 | struct sk_buff *skb; /* used for .ndo_start_xmit() */ |
173 | struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */ |
174 | struct xdp_buff *xdp; /* used for XDP_TX ZC */ |
175 | }; |
176 | unsigned int bytecount; |
177 | union { |
178 | unsigned int gso_segs; |
179 | unsigned int nr_frags; /* used for mbuf XDP */ |
180 | }; |
181 | u32 tx_flags:12; |
182 | u32 type:4; /* &ice_tx_buf_type */ |
183 | u32 vid:16; |
184 | DEFINE_DMA_UNMAP_LEN(len); |
185 | DEFINE_DMA_UNMAP_ADDR(dma); |
186 | }; |
187 | |
188 | struct ice_tx_offload_params { |
189 | u64 cd_qw1; |
190 | struct ice_tx_ring *tx_ring; |
191 | u32 td_cmd; |
192 | u32 td_offset; |
193 | u32 td_l2tag1; |
194 | u32 cd_tunnel_params; |
195 | u16 cd_l2tag2; |
196 | u8 ; |
197 | }; |
198 | |
199 | struct ice_rx_buf { |
200 | dma_addr_t dma; |
201 | struct page *page; |
202 | unsigned int page_offset; |
203 | unsigned int pgcnt; |
204 | unsigned int act; |
205 | unsigned int pagecnt_bias; |
206 | }; |
207 | |
208 | struct ice_q_stats { |
209 | u64 pkts; |
210 | u64 bytes; |
211 | }; |
212 | |
213 | struct ice_txq_stats { |
214 | u64 restart_q; |
215 | u64 tx_busy; |
216 | u64 tx_linearize; |
217 | int prev_pkt; /* negative if no pending Tx descriptors */ |
218 | }; |
219 | |
220 | struct ice_rxq_stats { |
221 | u64 non_eop_descs; |
222 | u64 alloc_page_failed; |
223 | u64 alloc_buf_failed; |
224 | }; |
225 | |
226 | struct ice_ring_stats { |
227 | struct rcu_head rcu; /* to avoid race on free */ |
228 | struct ice_q_stats stats; |
229 | struct u64_stats_sync syncp; |
230 | union { |
231 | struct ice_txq_stats tx_stats; |
232 | struct ice_rxq_stats rx_stats; |
233 | }; |
234 | }; |
235 | |
236 | enum ice_ring_state_t { |
237 | ICE_TX_XPS_INIT_DONE, |
238 | ICE_TX_NBITS, |
239 | }; |
240 | |
241 | /* this enum matches hardware bits and is meant to be used by DYN_CTLN |
242 | * registers and QINT registers or more generally anywhere in the manual |
243 | * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any |
244 | * register but instead is a special value meaning "don't update" ITR0/1/2. |
245 | */ |
246 | enum ice_dyn_idx_t { |
247 | ICE_IDX_ITR0 = 0, |
248 | ICE_IDX_ITR1 = 1, |
249 | ICE_IDX_ITR2 = 2, |
250 | ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ |
251 | }; |
252 | |
253 | /* Header split modes defined by DTYPE field of Rx RLAN context */ |
254 | enum ice_rx_dtype { |
255 | ICE_RX_DTYPE_NO_SPLIT = 0, |
256 | = 1, |
257 | ICE_RX_DTYPE_SPLIT_ALWAYS = 2, |
258 | }; |
259 | |
260 | /* indices into GLINT_ITR registers */ |
261 | #define ICE_RX_ITR ICE_IDX_ITR0 |
262 | #define ICE_TX_ITR ICE_IDX_ITR1 |
263 | #define ICE_ITR_8K 124 |
264 | #define ICE_ITR_20K 50 |
265 | #define ICE_ITR_MAX 8160 /* 0x1FE0 */ |
266 | #define ICE_DFLT_TX_ITR ICE_ITR_20K |
267 | #define ICE_DFLT_RX_ITR ICE_ITR_20K |
268 | enum ice_dynamic_itr { |
269 | ITR_STATIC = 0, |
270 | ITR_DYNAMIC = 1 |
271 | }; |
272 | |
273 | #define ITR_IS_DYNAMIC(rc) ((rc)->itr_mode == ITR_DYNAMIC) |
274 | #define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */ |
275 | #define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S) |
276 | #define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ |
277 | #define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK) |
278 | |
279 | #define ICE_DFLT_INTRL 0 |
280 | #define ICE_MAX_INTRL 236 |
281 | |
282 | #define ICE_IN_WB_ON_ITR_MODE 255 |
283 | /* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows |
284 | * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also, |
285 | * set the write-back latency to the usecs passed in. |
286 | */ |
287 | #define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \ |
288 | ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \ |
289 | GLINT_DYN_CTL_INTERVAL_M) | \ |
290 | (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \ |
291 | GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \ |
292 | GLINT_DYN_CTL_WB_ON_ITR_M) |
293 | |
294 | /* Legacy or Advanced Mode Queue */ |
295 | #define ICE_TX_ADVANCED 0 |
296 | #define ICE_TX_LEGACY 1 |
297 | |
298 | /* descriptor ring, associated with a VSI */ |
299 | struct ice_rx_ring { |
300 | /* CL1 - 1st cacheline starts here */ |
301 | struct ice_rx_ring *next; /* pointer to next ring in q_vector */ |
302 | void *desc; /* Descriptor ring memory */ |
303 | struct device *dev; /* Used for DMA mapping */ |
304 | struct net_device *netdev; /* netdev ring maps to */ |
305 | struct ice_vsi *vsi; /* Backreference to associated VSI */ |
306 | struct ice_q_vector *q_vector; /* Backreference to associated vector */ |
307 | u8 __iomem *tail; |
308 | u16 q_index; /* Queue number of ring */ |
309 | |
310 | u16 count; /* Number of descriptors */ |
311 | u16 reg_idx; /* HW register index of the ring */ |
312 | u16 next_to_alloc; |
313 | /* CL2 - 2nd cacheline starts here */ |
314 | union { |
315 | struct ice_rx_buf *rx_buf; |
316 | struct xdp_buff **xdp_buf; |
317 | }; |
318 | struct xdp_buff xdp; |
319 | /* CL3 - 3rd cacheline starts here */ |
320 | struct bpf_prog *xdp_prog; |
321 | u16 rx_offset; |
322 | |
323 | /* used in interrupt processing */ |
324 | u16 next_to_use; |
325 | u16 next_to_clean; |
326 | u16 first_desc; |
327 | |
328 | /* stats structs */ |
329 | struct ice_ring_stats *ring_stats; |
330 | |
331 | struct rcu_head rcu; /* to avoid race on free */ |
332 | /* CL4 - 4th cacheline starts here */ |
333 | struct ice_channel *ch; |
334 | struct ice_tx_ring *xdp_ring; |
335 | struct xsk_buff_pool *xsk_pool; |
336 | dma_addr_t dma; /* physical address of ring */ |
337 | u64 cached_phctime; |
338 | u16 rx_buf_len; |
339 | u8 dcb_tc; /* Traffic class of ring */ |
340 | u8 ptp_rx; |
341 | #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) |
342 | #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) |
343 | u8 flags; |
344 | /* CL5 - 5th cacheline starts here */ |
345 | struct xdp_rxq_info xdp_rxq; |
346 | } ____cacheline_internodealigned_in_smp; |
347 | |
348 | struct ice_tx_ring { |
349 | /* CL1 - 1st cacheline starts here */ |
350 | struct ice_tx_ring *next; /* pointer to next ring in q_vector */ |
351 | void *desc; /* Descriptor ring memory */ |
352 | struct device *dev; /* Used for DMA mapping */ |
353 | u8 __iomem *tail; |
354 | struct ice_tx_buf *tx_buf; |
355 | struct ice_q_vector *q_vector; /* Backreference to associated vector */ |
356 | struct net_device *netdev; /* netdev ring maps to */ |
357 | struct ice_vsi *vsi; /* Backreference to associated VSI */ |
358 | /* CL2 - 2nd cacheline starts here */ |
359 | dma_addr_t dma; /* physical address of ring */ |
360 | struct xsk_buff_pool *xsk_pool; |
361 | u16 next_to_use; |
362 | u16 next_to_clean; |
363 | u16 q_handle; /* Queue handle per TC */ |
364 | u16 reg_idx; /* HW register index of the ring */ |
365 | u16 count; /* Number of descriptors */ |
366 | u16 q_index; /* Queue number of ring */ |
367 | u16 xdp_tx_active; |
368 | /* stats structs */ |
369 | struct ice_ring_stats *ring_stats; |
370 | /* CL3 - 3rd cacheline starts here */ |
371 | struct rcu_head rcu; /* to avoid race on free */ |
372 | DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */ |
373 | struct ice_channel *ch; |
374 | struct ice_ptp_tx *tx_tstamps; |
375 | spinlock_t tx_lock; |
376 | u32 txq_teid; /* Added Tx queue TEID */ |
377 | /* CL4 - 4th cacheline starts here */ |
378 | #define ICE_TX_FLAGS_RING_XDP BIT(0) |
379 | #define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1) |
380 | #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2) |
381 | u8 flags; |
382 | u8 dcb_tc; /* Traffic class of ring */ |
383 | u8 ptp_tx; |
384 | } ____cacheline_internodealigned_in_smp; |
385 | |
386 | static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) |
387 | { |
388 | return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); |
389 | } |
390 | |
391 | static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring) |
392 | { |
393 | ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; |
394 | } |
395 | |
396 | static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring) |
397 | { |
398 | ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; |
399 | } |
400 | |
401 | static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring) |
402 | { |
403 | return !!ring->ch; |
404 | } |
405 | |
406 | static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring) |
407 | { |
408 | return !!(ring->flags & ICE_TX_FLAGS_RING_XDP); |
409 | } |
410 | |
411 | enum ice_container_type { |
412 | ICE_RX_CONTAINER, |
413 | ICE_TX_CONTAINER, |
414 | }; |
415 | |
416 | struct ice_ring_container { |
417 | /* head of linked-list of rings */ |
418 | union { |
419 | struct ice_rx_ring *rx_ring; |
420 | struct ice_tx_ring *tx_ring; |
421 | }; |
422 | struct dim dim; /* data for net_dim algorithm */ |
423 | u16 itr_idx; /* index in the interrupt vector */ |
424 | /* this matches the maximum number of ITR bits, but in usec |
425 | * values, so it is shifted left one bit (bit zero is ignored) |
426 | */ |
427 | union { |
428 | struct { |
429 | u16 itr_setting:13; |
430 | u16 itr_reserved:2; |
431 | u16 itr_mode:1; |
432 | }; |
433 | u16 itr_settings; |
434 | }; |
435 | enum ice_container_type type; |
436 | }; |
437 | |
438 | struct ice_coalesce_stored { |
439 | u16 itr_tx; |
440 | u16 itr_rx; |
441 | u8 intrl; |
442 | u8 tx_valid; |
443 | u8 rx_valid; |
444 | }; |
445 | |
446 | /* iterator for handling rings in ring container */ |
447 | #define ice_for_each_rx_ring(pos, head) \ |
448 | for (pos = (head).rx_ring; pos; pos = pos->next) |
449 | |
450 | #define ice_for_each_tx_ring(pos, head) \ |
451 | for (pos = (head).tx_ring; pos; pos = pos->next) |
452 | |
453 | static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring) |
454 | { |
455 | #if (PAGE_SIZE < 8192) |
456 | if (ring->rx_buf_len > (PAGE_SIZE / 2)) |
457 | return 1; |
458 | #endif |
459 | return 0; |
460 | } |
461 | |
462 | #define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) |
463 | |
464 | union ice_32b_rx_flex_desc; |
465 | |
466 | bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count); |
467 | netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); |
468 | u16 |
469 | ice_select_queue(struct net_device *dev, struct sk_buff *skb, |
470 | struct net_device *sb_dev); |
471 | void ice_clean_tx_ring(struct ice_tx_ring *tx_ring); |
472 | void ice_clean_rx_ring(struct ice_rx_ring *rx_ring); |
473 | int ice_setup_tx_ring(struct ice_tx_ring *tx_ring); |
474 | int ice_setup_rx_ring(struct ice_rx_ring *rx_ring); |
475 | void ice_free_tx_ring(struct ice_tx_ring *tx_ring); |
476 | void ice_free_rx_ring(struct ice_rx_ring *rx_ring); |
477 | int ice_napi_poll(struct napi_struct *napi, int budget); |
478 | int |
479 | ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc, |
480 | u8 *raw_packet); |
481 | int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget); |
482 | void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring); |
483 | #endif /* _ICE_TXRX_H_ */ |
484 | |