1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards |
4 | * Copyright 2018 Solarflare Communications Inc. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ |
10 | |
11 | #include "net_driver.h" |
12 | #include "efx.h" |
13 | #include "nic_common.h" |
14 | #include "tx_common.h" |
15 | #include <net/gso.h> |
16 | |
17 | static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) |
18 | { |
19 | return DIV_ROUND_UP(tx_queue->ptr_mask + 1, |
20 | PAGE_SIZE >> EFX_TX_CB_ORDER); |
21 | } |
22 | |
23 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) |
24 | { |
25 | struct efx_nic *efx = tx_queue->efx; |
26 | unsigned int entries; |
27 | int rc; |
28 | |
29 | /* Create the smallest power-of-two aligned ring */ |
30 | entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); |
31 | EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); |
32 | tx_queue->ptr_mask = entries - 1; |
33 | |
34 | netif_dbg(efx, probe, efx->net_dev, |
35 | "creating TX queue %d size %#x mask %#x\n" , |
36 | tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); |
37 | |
38 | /* Allocate software ring */ |
39 | tx_queue->buffer = kcalloc(n: entries, size: sizeof(*tx_queue->buffer), |
40 | GFP_KERNEL); |
41 | if (!tx_queue->buffer) |
42 | return -ENOMEM; |
43 | |
44 | tx_queue->cb_page = kcalloc(n: efx_tx_cb_page_count(tx_queue), |
45 | size: sizeof(tx_queue->cb_page[0]), GFP_KERNEL); |
46 | if (!tx_queue->cb_page) { |
47 | rc = -ENOMEM; |
48 | goto fail1; |
49 | } |
50 | |
51 | /* Allocate hardware ring, determine TXQ type */ |
52 | rc = efx_nic_probe_tx(tx_queue); |
53 | if (rc) |
54 | goto fail2; |
55 | |
56 | tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; |
57 | return 0; |
58 | |
59 | fail2: |
60 | kfree(objp: tx_queue->cb_page); |
61 | tx_queue->cb_page = NULL; |
62 | fail1: |
63 | kfree(objp: tx_queue->buffer); |
64 | tx_queue->buffer = NULL; |
65 | return rc; |
66 | } |
67 | |
68 | void efx_init_tx_queue(struct efx_tx_queue *tx_queue) |
69 | { |
70 | struct efx_nic *efx = tx_queue->efx; |
71 | |
72 | netif_dbg(efx, drv, efx->net_dev, |
73 | "initialising TX queue %d\n" , tx_queue->queue); |
74 | |
75 | tx_queue->insert_count = 0; |
76 | tx_queue->notify_count = 0; |
77 | tx_queue->write_count = 0; |
78 | tx_queue->packet_write_count = 0; |
79 | tx_queue->old_write_count = 0; |
80 | tx_queue->read_count = 0; |
81 | tx_queue->old_read_count = 0; |
82 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; |
83 | tx_queue->xmit_pending = false; |
84 | tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && |
85 | tx_queue->channel == efx_ptp_channel(efx)); |
86 | tx_queue->completed_timestamp_major = 0; |
87 | tx_queue->completed_timestamp_minor = 0; |
88 | |
89 | tx_queue->xdp_tx = efx_channel_is_xdp_tx(channel: tx_queue->channel); |
90 | tx_queue->tso_version = 0; |
91 | |
92 | /* Set up TX descriptor ring */ |
93 | efx_nic_init_tx(tx_queue); |
94 | |
95 | tx_queue->initialised = true; |
96 | } |
97 | |
98 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) |
99 | { |
100 | struct efx_tx_buffer *buffer; |
101 | |
102 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
103 | "shutting down TX queue %d\n" , tx_queue->queue); |
104 | |
105 | tx_queue->initialised = false; |
106 | |
107 | if (!tx_queue->buffer) |
108 | return; |
109 | |
110 | /* Free any buffers left in the ring */ |
111 | while (tx_queue->read_count != tx_queue->write_count) { |
112 | unsigned int pkts_compl = 0, bytes_compl = 0; |
113 | unsigned int efv_pkts_compl = 0; |
114 | |
115 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; |
116 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl: &pkts_compl, bytes_compl: &bytes_compl, |
117 | efv_pkts_compl: &efv_pkts_compl); |
118 | |
119 | ++tx_queue->read_count; |
120 | } |
121 | tx_queue->xmit_pending = false; |
122 | netdev_tx_reset_queue(q: tx_queue->core_txq); |
123 | } |
124 | |
125 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) |
126 | { |
127 | int i; |
128 | |
129 | if (!tx_queue->buffer) |
130 | return; |
131 | |
132 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
133 | "destroying TX queue %d\n" , tx_queue->queue); |
134 | efx_nic_remove_tx(tx_queue); |
135 | |
136 | if (tx_queue->cb_page) { |
137 | for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) |
138 | efx_nic_free_buffer(efx: tx_queue->efx, |
139 | buffer: &tx_queue->cb_page[i]); |
140 | kfree(objp: tx_queue->cb_page); |
141 | tx_queue->cb_page = NULL; |
142 | } |
143 | |
144 | kfree(objp: tx_queue->buffer); |
145 | tx_queue->buffer = NULL; |
146 | tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL; |
147 | } |
148 | |
149 | void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, |
150 | struct efx_tx_buffer *buffer, |
151 | unsigned int *pkts_compl, |
152 | unsigned int *bytes_compl, |
153 | unsigned int *efv_pkts_compl) |
154 | { |
155 | if (buffer->unmap_len) { |
156 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; |
157 | dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; |
158 | |
159 | if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) |
160 | dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, |
161 | DMA_TO_DEVICE); |
162 | else |
163 | dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, |
164 | DMA_TO_DEVICE); |
165 | buffer->unmap_len = 0; |
166 | } |
167 | |
168 | if (buffer->flags & EFX_TX_BUF_SKB) { |
169 | struct sk_buff *skb = (struct sk_buff *)buffer->skb; |
170 | |
171 | if (unlikely(buffer->flags & EFX_TX_BUF_EFV)) { |
172 | EFX_WARN_ON_PARANOID(!efv_pkts_compl); |
173 | (*efv_pkts_compl)++; |
174 | } else { |
175 | EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); |
176 | (*pkts_compl)++; |
177 | (*bytes_compl) += skb->len; |
178 | } |
179 | |
180 | if (tx_queue->timestamping && |
181 | (tx_queue->completed_timestamp_major || |
182 | tx_queue->completed_timestamp_minor)) { |
183 | struct skb_shared_hwtstamps hwtstamp; |
184 | |
185 | hwtstamp.hwtstamp = |
186 | efx_ptp_nic_to_kernel_time(tx_queue); |
187 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &hwtstamp); |
188 | |
189 | tx_queue->completed_timestamp_major = 0; |
190 | tx_queue->completed_timestamp_minor = 0; |
191 | } |
192 | dev_consume_skb_any(skb: (struct sk_buff *)buffer->skb); |
193 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, |
194 | "TX queue %d transmission id %x complete\n" , |
195 | tx_queue->queue, tx_queue->read_count); |
196 | } else if (buffer->flags & EFX_TX_BUF_XDP) { |
197 | xdp_return_frame_rx_napi(xdpf: buffer->xdpf); |
198 | } |
199 | |
200 | buffer->len = 0; |
201 | buffer->flags = 0; |
202 | } |
203 | |
204 | /* Remove packets from the TX queue |
205 | * |
206 | * This removes packets from the TX queue, up to and including the |
207 | * specified index. |
208 | */ |
209 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, |
210 | unsigned int index, |
211 | unsigned int *pkts_compl, |
212 | unsigned int *bytes_compl, |
213 | unsigned int *efv_pkts_compl) |
214 | { |
215 | struct efx_nic *efx = tx_queue->efx; |
216 | unsigned int stop_index, read_ptr; |
217 | |
218 | stop_index = (index + 1) & tx_queue->ptr_mask; |
219 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
220 | |
221 | while (read_ptr != stop_index) { |
222 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; |
223 | |
224 | if (!efx_tx_buffer_in_use(buffer)) { |
225 | netif_err(efx, tx_err, efx->net_dev, |
226 | "TX queue %d spurious TX completion id %d\n" , |
227 | tx_queue->queue, read_ptr); |
228 | efx_schedule_reset(efx, type: RESET_TYPE_TX_SKIP); |
229 | return; |
230 | } |
231 | |
232 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl, |
233 | efv_pkts_compl); |
234 | |
235 | ++tx_queue->read_count; |
236 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
237 | } |
238 | } |
239 | |
240 | void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue) |
241 | { |
242 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { |
243 | tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); |
244 | if (tx_queue->read_count == tx_queue->old_write_count) { |
245 | /* Ensure that read_count is flushed. */ |
246 | smp_mb(); |
247 | tx_queue->empty_read_count = |
248 | tx_queue->read_count | EFX_EMPTY_COUNT_VALID; |
249 | } |
250 | } |
251 | } |
252 | |
253 | int efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) |
254 | { |
255 | unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; |
256 | unsigned int efv_pkts_compl = 0; |
257 | struct efx_nic *efx = tx_queue->efx; |
258 | |
259 | EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); |
260 | |
261 | efx_dequeue_buffers(tx_queue, index, pkts_compl: &pkts_compl, bytes_compl: &bytes_compl, |
262 | efv_pkts_compl: &efv_pkts_compl); |
263 | tx_queue->pkts_compl += pkts_compl; |
264 | tx_queue->bytes_compl += bytes_compl; |
265 | |
266 | if (pkts_compl + efv_pkts_compl > 1) |
267 | ++tx_queue->merge_events; |
268 | |
269 | /* See if we need to restart the netif queue. This memory |
270 | * barrier ensures that we write read_count (inside |
271 | * efx_dequeue_buffers()) before reading the queue status. |
272 | */ |
273 | smp_mb(); |
274 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && |
275 | likely(efx->port_enabled) && |
276 | likely(netif_device_present(efx->net_dev))) { |
277 | fill_level = efx_channel_tx_fill_level(channel: tx_queue->channel); |
278 | if (fill_level <= efx->txq_wake_thresh) |
279 | netif_tx_wake_queue(dev_queue: tx_queue->core_txq); |
280 | } |
281 | |
282 | efx_xmit_done_check_empty(tx_queue); |
283 | |
284 | return pkts_compl + efv_pkts_compl; |
285 | } |
286 | |
287 | /* Remove buffers put into a tx_queue for the current packet. |
288 | * None of the buffers must have an skb attached. |
289 | */ |
290 | void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, |
291 | unsigned int insert_count) |
292 | { |
293 | unsigned int efv_pkts_compl = 0; |
294 | struct efx_tx_buffer *buffer; |
295 | unsigned int bytes_compl = 0; |
296 | unsigned int pkts_compl = 0; |
297 | |
298 | /* Work backwards until we hit the original insert pointer value */ |
299 | while (tx_queue->insert_count != insert_count) { |
300 | --tx_queue->insert_count; |
301 | buffer = __efx_tx_queue_get_insert_buffer(tx_queue); |
302 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl: &pkts_compl, bytes_compl: &bytes_compl, |
303 | efv_pkts_compl: &efv_pkts_compl); |
304 | } |
305 | } |
306 | |
307 | struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, |
308 | dma_addr_t dma_addr, size_t len) |
309 | { |
310 | const struct efx_nic_type *nic_type = tx_queue->efx->type; |
311 | struct efx_tx_buffer *buffer; |
312 | unsigned int dma_len; |
313 | |
314 | /* Map the fragment taking account of NIC-dependent DMA limits. */ |
315 | do { |
316 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); |
317 | |
318 | if (nic_type->tx_limit_len) |
319 | dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); |
320 | else |
321 | dma_len = len; |
322 | |
323 | buffer->len = dma_len; |
324 | buffer->dma_addr = dma_addr; |
325 | buffer->flags = EFX_TX_BUF_CONT; |
326 | len -= dma_len; |
327 | dma_addr += dma_len; |
328 | ++tx_queue->insert_count; |
329 | } while (len); |
330 | |
331 | return buffer; |
332 | } |
333 | |
334 | int (struct sk_buff *skb) |
335 | { |
336 | size_t ; |
337 | |
338 | if (skb->encapsulation) |
339 | header_len = skb_inner_transport_header(skb) - |
340 | skb->data + |
341 | (inner_tcp_hdr(skb)->doff << 2u); |
342 | else |
343 | header_len = skb_transport_header(skb) - skb->data + |
344 | (tcp_hdr(skb)->doff << 2u); |
345 | return header_len; |
346 | } |
347 | |
348 | /* Map all data from an SKB for DMA and create descriptors on the queue. */ |
349 | int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, |
350 | unsigned int segment_count) |
351 | { |
352 | struct efx_nic *efx = tx_queue->efx; |
353 | struct device *dma_dev = &efx->pci_dev->dev; |
354 | unsigned int frag_index, nr_frags; |
355 | dma_addr_t dma_addr, unmap_addr; |
356 | unsigned short dma_flags; |
357 | size_t len, unmap_len; |
358 | |
359 | nr_frags = skb_shinfo(skb)->nr_frags; |
360 | frag_index = 0; |
361 | |
362 | /* Map header data. */ |
363 | len = skb_headlen(skb); |
364 | dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE); |
365 | dma_flags = EFX_TX_BUF_MAP_SINGLE; |
366 | unmap_len = len; |
367 | unmap_addr = dma_addr; |
368 | |
369 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) |
370 | return -EIO; |
371 | |
372 | if (segment_count) { |
373 | /* For TSO we need to put the header in to a separate |
374 | * descriptor. Map this separately if necessary. |
375 | */ |
376 | size_t = efx_tx_tso_header_length(skb); |
377 | |
378 | if (header_len != len) { |
379 | tx_queue->tso_long_headers++; |
380 | efx_tx_map_chunk(tx_queue, dma_addr, len: header_len); |
381 | len -= header_len; |
382 | dma_addr += header_len; |
383 | } |
384 | } |
385 | |
386 | /* Add descriptors for each fragment. */ |
387 | do { |
388 | struct efx_tx_buffer *buffer; |
389 | skb_frag_t *fragment; |
390 | |
391 | buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); |
392 | |
393 | /* The final descriptor for a fragment is responsible for |
394 | * unmapping the whole fragment. |
395 | */ |
396 | buffer->flags = EFX_TX_BUF_CONT | dma_flags; |
397 | buffer->unmap_len = unmap_len; |
398 | buffer->dma_offset = buffer->dma_addr - unmap_addr; |
399 | |
400 | if (frag_index >= nr_frags) { |
401 | /* Store SKB details with the final buffer for |
402 | * the completion. |
403 | */ |
404 | buffer->skb = skb; |
405 | buffer->flags = EFX_TX_BUF_SKB | dma_flags; |
406 | return 0; |
407 | } |
408 | |
409 | /* Move on to the next fragment. */ |
410 | fragment = &skb_shinfo(skb)->frags[frag_index++]; |
411 | len = skb_frag_size(frag: fragment); |
412 | dma_addr = skb_frag_dma_map(dev: dma_dev, frag: fragment, offset: 0, size: len, |
413 | dir: DMA_TO_DEVICE); |
414 | dma_flags = 0; |
415 | unmap_len = len; |
416 | unmap_addr = dma_addr; |
417 | |
418 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) |
419 | return -EIO; |
420 | } while (1); |
421 | } |
422 | |
423 | unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) |
424 | { |
425 | /* Header and payload descriptor for each output segment, plus |
426 | * one for every input fragment boundary within a segment |
427 | */ |
428 | unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; |
429 | |
430 | /* Possibly one more per segment for option descriptors */ |
431 | if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) |
432 | max_descs += EFX_TSO_MAX_SEGS; |
433 | |
434 | /* Possibly more for PCIe page boundaries within input fragments */ |
435 | if (PAGE_SIZE > EFX_PAGE_SIZE) |
436 | max_descs += max_t(unsigned int, MAX_SKB_FRAGS, |
437 | DIV_ROUND_UP(GSO_LEGACY_MAX_SIZE, |
438 | EFX_PAGE_SIZE)); |
439 | |
440 | return max_descs; |
441 | } |
442 | |
443 | /* |
444 | * Fallback to software TSO. |
445 | * |
446 | * This is used if we are unable to send a GSO packet through hardware TSO. |
447 | * This should only ever happen due to per-queue restrictions - unsupported |
448 | * packets should first be filtered by the feature flags. |
449 | * |
450 | * Returns 0 on success, error code otherwise. |
451 | */ |
452 | int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) |
453 | { |
454 | struct sk_buff *segments, *next; |
455 | |
456 | segments = skb_gso_segment(skb, features: 0); |
457 | if (IS_ERR(ptr: segments)) |
458 | return PTR_ERR(ptr: segments); |
459 | |
460 | dev_consume_skb_any(skb); |
461 | |
462 | skb_list_walk_safe(segments, skb, next) { |
463 | skb_mark_not_on_list(skb); |
464 | efx_enqueue_skb(tx_queue, skb); |
465 | } |
466 | |
467 | return 0; |
468 | } |
469 | |