1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2019, Intel Corporation. */ |
3 | |
4 | #include <linux/filter.h> |
5 | |
6 | #include "ice_txrx_lib.h" |
7 | #include "ice_eswitch.h" |
8 | #include "ice_lib.h" |
9 | |
10 | /** |
11 | * ice_release_rx_desc - Store the new tail and head values |
12 | * @rx_ring: ring to bump |
13 | * @val: new head index |
14 | */ |
15 | void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val) |
16 | { |
17 | u16 prev_ntu = rx_ring->next_to_use & ~0x7; |
18 | |
19 | rx_ring->next_to_use = val; |
20 | |
21 | /* update next to alloc since we have filled the ring */ |
22 | rx_ring->next_to_alloc = val; |
23 | |
24 | /* QRX_TAIL will be updated with any tail value, but hardware ignores |
25 | * the lower 3 bits. This makes it so we only bump tail on meaningful |
26 | * boundaries. Also, this allows us to bump tail on intervals of 8 up to |
27 | * the budget depending on the current traffic load. |
28 | */ |
29 | val &= ~0x7; |
30 | if (prev_ntu != val) { |
31 | /* Force memory writes to complete before letting h/w |
32 | * know there are new descriptors to fetch. (Only |
33 | * applicable for weak-ordered memory model archs, |
34 | * such as IA-64). |
35 | */ |
36 | wmb(); |
37 | writel(val, addr: rx_ring->tail); |
38 | } |
39 | } |
40 | |
41 | /** |
42 | * ice_ptype_to_htype - get a hash type |
43 | * @ptype: the ptype value from the descriptor |
44 | * |
45 | * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by |
46 | * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of |
47 | * Rx desc. |
48 | */ |
49 | static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) |
50 | { |
51 | struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype); |
52 | |
53 | if (!decoded.known) |
54 | return PKT_HASH_TYPE_NONE; |
55 | if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4) |
56 | return PKT_HASH_TYPE_L4; |
57 | if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3) |
58 | return PKT_HASH_TYPE_L3; |
59 | if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2) |
60 | return PKT_HASH_TYPE_L2; |
61 | |
62 | return PKT_HASH_TYPE_NONE; |
63 | } |
64 | |
65 | /** |
66 | * ice_rx_hash - set the hash value in the skb |
67 | * @rx_ring: descriptor ring |
68 | * @rx_desc: specific descriptor |
69 | * @skb: pointer to current skb |
70 | * @rx_ptype: the ptype value from the descriptor |
71 | */ |
72 | static void |
73 | ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, |
74 | struct sk_buff *skb, u16 rx_ptype) |
75 | { |
76 | struct ice_32b_rx_flex_desc_nic *nic_mdid; |
77 | u32 hash; |
78 | |
79 | if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) |
80 | return; |
81 | |
82 | if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC) |
83 | return; |
84 | |
85 | nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc; |
86 | hash = le32_to_cpu(nic_mdid->rss_hash); |
87 | skb_set_hash(skb, hash, type: ice_ptype_to_htype(ptype: rx_ptype)); |
88 | } |
89 | |
90 | /** |
91 | * ice_rx_csum - Indicate in skb if checksum is good |
92 | * @ring: the ring we care about |
93 | * @skb: skb currently being received and modified |
94 | * @rx_desc: the receive descriptor |
95 | * @ptype: the packet type decoded by hardware |
96 | * |
97 | * skb->protocol must be set before this function is called |
98 | */ |
99 | static void |
100 | ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, |
101 | union ice_32b_rx_flex_desc *rx_desc, u16 ptype) |
102 | { |
103 | struct ice_rx_ptype_decoded decoded; |
104 | u16 rx_status0, rx_status1; |
105 | bool ipv4, ipv6; |
106 | |
107 | rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); |
108 | rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); |
109 | |
110 | decoded = ice_decode_rx_desc_ptype(ptype); |
111 | |
112 | /* Start with CHECKSUM_NONE and by default csum_level = 0 */ |
113 | skb->ip_summed = CHECKSUM_NONE; |
114 | skb_checksum_none_assert(skb); |
115 | |
116 | /* check if Rx checksum is enabled */ |
117 | if (!(ring->netdev->features & NETIF_F_RXCSUM)) |
118 | return; |
119 | |
120 | /* check if HW has decoded the packet and checksum */ |
121 | if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) |
122 | return; |
123 | |
124 | if (!(decoded.known && decoded.outer_ip)) |
125 | return; |
126 | |
127 | ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && |
128 | (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); |
129 | ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && |
130 | (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); |
131 | |
132 | if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | |
133 | BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) |
134 | goto checksum_fail; |
135 | |
136 | if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) |
137 | goto checksum_fail; |
138 | |
139 | /* check for L4 errors and handle packets that were not able to be |
140 | * checksummed due to arrival speed |
141 | */ |
142 | if (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)) |
143 | goto checksum_fail; |
144 | |
145 | /* check for outer UDP checksum error in tunneled packets */ |
146 | if ((rx_status1 & BIT(ICE_RX_FLEX_DESC_STATUS1_NAT_S)) && |
147 | (rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S))) |
148 | goto checksum_fail; |
149 | |
150 | /* If there is an outer header present that might contain a checksum |
151 | * we need to bump the checksum level by 1 to reflect the fact that |
152 | * we are indicating we validated the inner checksum. |
153 | */ |
154 | if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT) |
155 | skb->csum_level = 1; |
156 | |
157 | /* Only report checksum unnecessary for TCP, UDP, or SCTP */ |
158 | switch (decoded.inner_prot) { |
159 | case ICE_RX_PTYPE_INNER_PROT_TCP: |
160 | case ICE_RX_PTYPE_INNER_PROT_UDP: |
161 | case ICE_RX_PTYPE_INNER_PROT_SCTP: |
162 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
163 | break; |
164 | default: |
165 | break; |
166 | } |
167 | return; |
168 | |
169 | checksum_fail: |
170 | ring->vsi->back->hw_csum_rx_error++; |
171 | } |
172 | |
173 | /** |
174 | * ice_process_skb_fields - Populate skb header fields from Rx descriptor |
175 | * @rx_ring: Rx descriptor ring packet is being transacted on |
176 | * @rx_desc: pointer to the EOP Rx descriptor |
177 | * @skb: pointer to current skb being populated |
178 | * @ptype: the packet type decoded by hardware |
179 | * |
180 | * This function checks the ring, descriptor, and packet information in |
181 | * order to populate the hash, checksum, VLAN, protocol, and |
182 | * other fields within the skb. |
183 | */ |
184 | void |
185 | ice_process_skb_fields(struct ice_rx_ring *rx_ring, |
186 | union ice_32b_rx_flex_desc *rx_desc, |
187 | struct sk_buff *skb, u16 ptype) |
188 | { |
189 | ice_rx_hash(rx_ring, rx_desc, skb, rx_ptype: ptype); |
190 | |
191 | /* modifies the skb - consumes the enet header */ |
192 | skb->protocol = eth_type_trans(skb, dev: rx_ring->netdev); |
193 | |
194 | ice_rx_csum(ring: rx_ring, skb, rx_desc, ptype); |
195 | |
196 | if (rx_ring->ptp_rx) |
197 | ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); |
198 | } |
199 | |
200 | /** |
201 | * ice_receive_skb - Send a completed packet up the stack |
202 | * @rx_ring: Rx ring in play |
203 | * @skb: packet to send up |
204 | * @vlan_tag: VLAN tag for packet |
205 | * |
206 | * This function sends the completed packet (via. skb) up the stack using |
207 | * gro receive functions (with/without VLAN tag) |
208 | */ |
209 | void |
210 | ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) |
211 | { |
212 | netdev_features_t features = rx_ring->netdev->features; |
213 | bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK); |
214 | |
215 | if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan) |
216 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci: vlan_tag); |
217 | else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan) |
218 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tci: vlan_tag); |
219 | |
220 | napi_gro_receive(napi: &rx_ring->q_vector->napi, skb); |
221 | } |
222 | |
223 | /** |
224 | * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer |
225 | * @dev: device for DMA mapping |
226 | * @tx_buf: Tx buffer to clean |
227 | * @bq: XDP bulk flush struct |
228 | */ |
229 | static void |
230 | ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf, |
231 | struct xdp_frame_bulk *bq) |
232 | { |
233 | dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma), |
234 | dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); |
235 | dma_unmap_len_set(tx_buf, len, 0); |
236 | |
237 | switch (tx_buf->type) { |
238 | case ICE_TX_BUF_XDP_TX: |
239 | page_frag_free(addr: tx_buf->raw_buf); |
240 | break; |
241 | case ICE_TX_BUF_XDP_XMIT: |
242 | xdp_return_frame_bulk(xdpf: tx_buf->xdpf, bq); |
243 | break; |
244 | } |
245 | |
246 | tx_buf->type = ICE_TX_BUF_EMPTY; |
247 | } |
248 | |
249 | /** |
250 | * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring |
251 | * @xdp_ring: XDP ring to clean |
252 | */ |
253 | static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring) |
254 | { |
255 | int total_bytes = 0, total_pkts = 0; |
256 | struct device *dev = xdp_ring->dev; |
257 | u32 ntc = xdp_ring->next_to_clean; |
258 | struct ice_tx_desc *tx_desc; |
259 | u32 cnt = xdp_ring->count; |
260 | struct xdp_frame_bulk bq; |
261 | u32 frags, xdp_tx = 0; |
262 | u32 ready_frames = 0; |
263 | u32 idx; |
264 | u32 ret; |
265 | |
266 | idx = xdp_ring->tx_buf[ntc].rs_idx; |
267 | tx_desc = ICE_TX_DESC(xdp_ring, idx); |
268 | if (tx_desc->cmd_type_offset_bsz & |
269 | cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) { |
270 | if (idx >= ntc) |
271 | ready_frames = idx - ntc + 1; |
272 | else |
273 | ready_frames = idx + cnt - ntc + 1; |
274 | } |
275 | |
276 | if (unlikely(!ready_frames)) |
277 | return 0; |
278 | ret = ready_frames; |
279 | |
280 | xdp_frame_bulk_init(bq: &bq); |
281 | rcu_read_lock(); /* xdp_return_frame_bulk() */ |
282 | |
283 | while (ready_frames) { |
284 | struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc]; |
285 | struct ice_tx_buf *head = tx_buf; |
286 | |
287 | /* bytecount holds size of head + frags */ |
288 | total_bytes += tx_buf->bytecount; |
289 | frags = tx_buf->nr_frags; |
290 | total_pkts++; |
291 | /* count head + frags */ |
292 | ready_frames -= frags + 1; |
293 | xdp_tx++; |
294 | |
295 | ntc++; |
296 | if (ntc == cnt) |
297 | ntc = 0; |
298 | |
299 | for (int i = 0; i < frags; i++) { |
300 | tx_buf = &xdp_ring->tx_buf[ntc]; |
301 | |
302 | ice_clean_xdp_tx_buf(dev, tx_buf, bq: &bq); |
303 | ntc++; |
304 | if (ntc == cnt) |
305 | ntc = 0; |
306 | } |
307 | |
308 | ice_clean_xdp_tx_buf(dev, tx_buf: head, bq: &bq); |
309 | } |
310 | |
311 | xdp_flush_frame_bulk(bq: &bq); |
312 | rcu_read_unlock(); |
313 | |
314 | tx_desc->cmd_type_offset_bsz = 0; |
315 | xdp_ring->next_to_clean = ntc; |
316 | xdp_ring->xdp_tx_active -= xdp_tx; |
317 | ice_update_tx_ring_stats(ring: xdp_ring, pkts: total_pkts, bytes: total_bytes); |
318 | |
319 | return ret; |
320 | } |
321 | |
322 | /** |
323 | * __ice_xmit_xdp_ring - submit frame to XDP ring for transmission |
324 | * @xdp: XDP buffer to be placed onto Tx descriptors |
325 | * @xdp_ring: XDP ring for transmission |
326 | * @frame: whether this comes from .ndo_xdp_xmit() |
327 | */ |
328 | int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, |
329 | bool frame) |
330 | { |
331 | struct skb_shared_info *sinfo = NULL; |
332 | u32 size = xdp->data_end - xdp->data; |
333 | struct device *dev = xdp_ring->dev; |
334 | u32 ntu = xdp_ring->next_to_use; |
335 | struct ice_tx_desc *tx_desc; |
336 | struct ice_tx_buf *tx_head; |
337 | struct ice_tx_buf *tx_buf; |
338 | u32 cnt = xdp_ring->count; |
339 | void *data = xdp->data; |
340 | u32 nr_frags = 0; |
341 | u32 free_space; |
342 | u32 frag = 0; |
343 | |
344 | free_space = ICE_DESC_UNUSED(xdp_ring); |
345 | if (free_space < ICE_RING_QUARTER(xdp_ring)) |
346 | free_space += ice_clean_xdp_irq(xdp_ring); |
347 | |
348 | if (unlikely(!free_space)) |
349 | goto busy; |
350 | |
351 | if (unlikely(xdp_buff_has_frags(xdp))) { |
352 | sinfo = xdp_get_shared_info_from_buff(xdp); |
353 | nr_frags = sinfo->nr_frags; |
354 | if (free_space < nr_frags + 1) |
355 | goto busy; |
356 | } |
357 | |
358 | tx_desc = ICE_TX_DESC(xdp_ring, ntu); |
359 | tx_head = &xdp_ring->tx_buf[ntu]; |
360 | tx_buf = tx_head; |
361 | |
362 | for (;;) { |
363 | dma_addr_t dma; |
364 | |
365 | dma = dma_map_single(dev, data, size, DMA_TO_DEVICE); |
366 | if (dma_mapping_error(dev, dma_addr: dma)) |
367 | goto dma_unmap; |
368 | |
369 | /* record length, and DMA address */ |
370 | dma_unmap_len_set(tx_buf, len, size); |
371 | dma_unmap_addr_set(tx_buf, dma, dma); |
372 | |
373 | if (frame) { |
374 | tx_buf->type = ICE_TX_BUF_FRAG; |
375 | } else { |
376 | tx_buf->type = ICE_TX_BUF_XDP_TX; |
377 | tx_buf->raw_buf = data; |
378 | } |
379 | |
380 | tx_desc->buf_addr = cpu_to_le64(dma); |
381 | tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd: 0, td_offset: 0, size, td_tag: 0); |
382 | |
383 | ntu++; |
384 | if (ntu == cnt) |
385 | ntu = 0; |
386 | |
387 | if (frag == nr_frags) |
388 | break; |
389 | |
390 | tx_desc = ICE_TX_DESC(xdp_ring, ntu); |
391 | tx_buf = &xdp_ring->tx_buf[ntu]; |
392 | |
393 | data = skb_frag_address(frag: &sinfo->frags[frag]); |
394 | size = skb_frag_size(frag: &sinfo->frags[frag]); |
395 | frag++; |
396 | } |
397 | |
398 | /* store info about bytecount and frag count in first desc */ |
399 | tx_head->bytecount = xdp_get_buff_len(xdp); |
400 | tx_head->nr_frags = nr_frags; |
401 | |
402 | if (frame) { |
403 | tx_head->type = ICE_TX_BUF_XDP_XMIT; |
404 | tx_head->xdpf = xdp->data_hard_start; |
405 | } |
406 | |
407 | /* update last descriptor from a frame with EOP */ |
408 | tx_desc->cmd_type_offset_bsz |= |
409 | cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S); |
410 | |
411 | xdp_ring->xdp_tx_active++; |
412 | xdp_ring->next_to_use = ntu; |
413 | |
414 | return ICE_XDP_TX; |
415 | |
416 | dma_unmap: |
417 | for (;;) { |
418 | tx_buf = &xdp_ring->tx_buf[ntu]; |
419 | dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma), |
420 | dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); |
421 | dma_unmap_len_set(tx_buf, len, 0); |
422 | if (tx_buf == tx_head) |
423 | break; |
424 | |
425 | if (!ntu) |
426 | ntu += cnt; |
427 | ntu--; |
428 | } |
429 | return ICE_XDP_CONSUMED; |
430 | |
431 | busy: |
432 | xdp_ring->ring_stats->tx_stats.tx_busy++; |
433 | |
434 | return ICE_XDP_CONSUMED; |
435 | } |
436 | |
437 | /** |
438 | * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map |
439 | * @xdp_ring: XDP ring |
440 | * @xdp_res: Result of the receive batch |
441 | * @first_idx: index to write from caller |
442 | * |
443 | * This function bumps XDP Tx tail and/or flush redirect map, and |
444 | * should be called when a batch of packets has been processed in the |
445 | * napi loop. |
446 | */ |
447 | void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, |
448 | u32 first_idx) |
449 | { |
450 | struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx]; |
451 | |
452 | if (xdp_res & ICE_XDP_REDIR) |
453 | xdp_do_flush(); |
454 | |
455 | if (xdp_res & ICE_XDP_TX) { |
456 | if (static_branch_unlikely(&ice_xdp_locking_key)) |
457 | spin_lock(lock: &xdp_ring->tx_lock); |
458 | /* store index of descriptor with RS bit set in the first |
459 | * ice_tx_buf of given NAPI batch |
460 | */ |
461 | tx_buf->rs_idx = ice_set_rs_bit(xdp_ring); |
462 | ice_xdp_ring_update_tail(xdp_ring); |
463 | if (static_branch_unlikely(&ice_xdp_locking_key)) |
464 | spin_unlock(lock: &xdp_ring->tx_lock); |
465 | } |
466 | } |
467 | |