1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards |
4 | * Copyright 2018 Solarflare Communications Inc. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ |
10 | |
11 | #include "net_driver.h" |
12 | #include "efx.h" |
13 | #include "nic_common.h" |
14 | #include "tx_common.h" |
15 | #include <net/gso.h> |
16 | |
17 | static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) |
18 | { |
19 | return DIV_ROUND_UP(tx_queue->ptr_mask + 1, |
20 | PAGE_SIZE >> EFX_TX_CB_ORDER); |
21 | } |
22 | |
23 | int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue) |
24 | { |
25 | struct efx_nic *efx = tx_queue->efx; |
26 | unsigned int entries; |
27 | int rc; |
28 | |
29 | /* Create the smallest power-of-two aligned ring */ |
30 | entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); |
31 | EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); |
32 | tx_queue->ptr_mask = entries - 1; |
33 | |
34 | netif_dbg(efx, probe, efx->net_dev, |
35 | "creating TX queue %d size %#x mask %#x\n" , |
36 | tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); |
37 | |
38 | /* Allocate software ring */ |
39 | tx_queue->buffer = kcalloc(n: entries, size: sizeof(*tx_queue->buffer), |
40 | GFP_KERNEL); |
41 | if (!tx_queue->buffer) |
42 | return -ENOMEM; |
43 | |
44 | tx_queue->cb_page = kcalloc(n: efx_tx_cb_page_count(tx_queue), |
45 | size: sizeof(tx_queue->cb_page[0]), GFP_KERNEL); |
46 | if (!tx_queue->cb_page) { |
47 | rc = -ENOMEM; |
48 | goto fail1; |
49 | } |
50 | |
51 | /* Allocate hardware ring, determine TXQ type */ |
52 | rc = efx_nic_probe_tx(tx_queue); |
53 | if (rc) |
54 | goto fail2; |
55 | |
56 | tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; |
57 | return 0; |
58 | |
59 | fail2: |
60 | kfree(objp: tx_queue->cb_page); |
61 | tx_queue->cb_page = NULL; |
62 | fail1: |
63 | kfree(objp: tx_queue->buffer); |
64 | tx_queue->buffer = NULL; |
65 | return rc; |
66 | } |
67 | |
68 | void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue) |
69 | { |
70 | struct efx_nic *efx = tx_queue->efx; |
71 | |
72 | netif_dbg(efx, drv, efx->net_dev, |
73 | "initialising TX queue %d\n" , tx_queue->queue); |
74 | |
75 | tx_queue->insert_count = 0; |
76 | tx_queue->notify_count = 0; |
77 | tx_queue->write_count = 0; |
78 | tx_queue->packet_write_count = 0; |
79 | tx_queue->old_write_count = 0; |
80 | tx_queue->read_count = 0; |
81 | tx_queue->old_read_count = 0; |
82 | tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; |
83 | tx_queue->xmit_pending = false; |
84 | tx_queue->timestamping = (efx_siena_ptp_use_mac_tx_timestamps(efx) && |
85 | tx_queue->channel == efx_siena_ptp_channel(efx)); |
86 | tx_queue->completed_timestamp_major = 0; |
87 | tx_queue->completed_timestamp_minor = 0; |
88 | |
89 | tx_queue->xdp_tx = efx_channel_is_xdp_tx(channel: tx_queue->channel); |
90 | tx_queue->tso_version = 0; |
91 | |
92 | /* Set up TX descriptor ring */ |
93 | efx_nic_init_tx(tx_queue); |
94 | |
95 | tx_queue->initialised = true; |
96 | } |
97 | |
98 | void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue) |
99 | { |
100 | int i; |
101 | |
102 | if (!tx_queue->buffer) |
103 | return; |
104 | |
105 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
106 | "destroying TX queue %d\n" , tx_queue->queue); |
107 | efx_nic_remove_tx(tx_queue); |
108 | |
109 | if (tx_queue->cb_page) { |
110 | for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) |
111 | efx_siena_free_buffer(efx: tx_queue->efx, |
112 | buffer: &tx_queue->cb_page[i]); |
113 | kfree(objp: tx_queue->cb_page); |
114 | tx_queue->cb_page = NULL; |
115 | } |
116 | |
117 | kfree(objp: tx_queue->buffer); |
118 | tx_queue->buffer = NULL; |
119 | tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL; |
120 | } |
121 | |
122 | static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, |
123 | struct efx_tx_buffer *buffer, |
124 | unsigned int *pkts_compl, |
125 | unsigned int *bytes_compl) |
126 | { |
127 | if (buffer->unmap_len) { |
128 | struct device *dma_dev = &tx_queue->efx->pci_dev->dev; |
129 | dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; |
130 | |
131 | if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) |
132 | dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, |
133 | DMA_TO_DEVICE); |
134 | else |
135 | dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, |
136 | DMA_TO_DEVICE); |
137 | buffer->unmap_len = 0; |
138 | } |
139 | |
140 | if (buffer->flags & EFX_TX_BUF_SKB) { |
141 | struct sk_buff *skb = (struct sk_buff *)buffer->skb; |
142 | |
143 | EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); |
144 | (*pkts_compl)++; |
145 | (*bytes_compl) += skb->len; |
146 | if (tx_queue->timestamping && |
147 | (tx_queue->completed_timestamp_major || |
148 | tx_queue->completed_timestamp_minor)) { |
149 | struct skb_shared_hwtstamps hwtstamp; |
150 | |
151 | hwtstamp.hwtstamp = |
152 | efx_siena_ptp_nic_to_kernel_time(tx_queue); |
153 | skb_tstamp_tx(orig_skb: skb, hwtstamps: &hwtstamp); |
154 | |
155 | tx_queue->completed_timestamp_major = 0; |
156 | tx_queue->completed_timestamp_minor = 0; |
157 | } |
158 | dev_consume_skb_any(skb: (struct sk_buff *)buffer->skb); |
159 | netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, |
160 | "TX queue %d transmission id %x complete\n" , |
161 | tx_queue->queue, tx_queue->read_count); |
162 | } else if (buffer->flags & EFX_TX_BUF_XDP) { |
163 | xdp_return_frame_rx_napi(xdpf: buffer->xdpf); |
164 | } |
165 | |
166 | buffer->len = 0; |
167 | buffer->flags = 0; |
168 | } |
169 | |
170 | void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue) |
171 | { |
172 | struct efx_tx_buffer *buffer; |
173 | |
174 | netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, |
175 | "shutting down TX queue %d\n" , tx_queue->queue); |
176 | |
177 | if (!tx_queue->buffer) |
178 | return; |
179 | |
180 | /* Free any buffers left in the ring */ |
181 | while (tx_queue->read_count != tx_queue->write_count) { |
182 | unsigned int pkts_compl = 0, bytes_compl = 0; |
183 | |
184 | buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; |
185 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl: &pkts_compl, bytes_compl: &bytes_compl); |
186 | |
187 | ++tx_queue->read_count; |
188 | } |
189 | tx_queue->xmit_pending = false; |
190 | netdev_tx_reset_queue(q: tx_queue->core_txq); |
191 | } |
192 | |
193 | /* Remove packets from the TX queue |
194 | * |
195 | * This removes packets from the TX queue, up to and including the |
196 | * specified index. |
197 | */ |
198 | static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, |
199 | unsigned int index, |
200 | unsigned int *pkts_compl, |
201 | unsigned int *bytes_compl) |
202 | { |
203 | struct efx_nic *efx = tx_queue->efx; |
204 | unsigned int stop_index, read_ptr; |
205 | |
206 | stop_index = (index + 1) & tx_queue->ptr_mask; |
207 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
208 | |
209 | while (read_ptr != stop_index) { |
210 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; |
211 | |
212 | if (!efx_tx_buffer_in_use(buffer)) { |
213 | netif_err(efx, tx_err, efx->net_dev, |
214 | "TX queue %d spurious TX completion id %d\n" , |
215 | tx_queue->queue, read_ptr); |
216 | efx_siena_schedule_reset(efx, type: RESET_TYPE_TX_SKIP); |
217 | return; |
218 | } |
219 | |
220 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); |
221 | |
222 | ++tx_queue->read_count; |
223 | read_ptr = tx_queue->read_count & tx_queue->ptr_mask; |
224 | } |
225 | } |
226 | |
227 | void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue) |
228 | { |
229 | if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { |
230 | tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); |
231 | if (tx_queue->read_count == tx_queue->old_write_count) { |
232 | /* Ensure that read_count is flushed. */ |
233 | smp_mb(); |
234 | tx_queue->empty_read_count = |
235 | tx_queue->read_count | EFX_EMPTY_COUNT_VALID; |
236 | } |
237 | } |
238 | } |
239 | |
240 | void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) |
241 | { |
242 | unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; |
243 | struct efx_nic *efx = tx_queue->efx; |
244 | |
245 | EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); |
246 | |
247 | efx_dequeue_buffers(tx_queue, index, pkts_compl: &pkts_compl, bytes_compl: &bytes_compl); |
248 | tx_queue->pkts_compl += pkts_compl; |
249 | tx_queue->bytes_compl += bytes_compl; |
250 | |
251 | if (pkts_compl > 1) |
252 | ++tx_queue->merge_events; |
253 | |
254 | /* See if we need to restart the netif queue. This memory |
255 | * barrier ensures that we write read_count (inside |
256 | * efx_dequeue_buffers()) before reading the queue status. |
257 | */ |
258 | smp_mb(); |
259 | if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && |
260 | likely(efx->port_enabled) && |
261 | likely(netif_device_present(efx->net_dev))) { |
262 | fill_level = efx_channel_tx_fill_level(channel: tx_queue->channel); |
263 | if (fill_level <= efx->txq_wake_thresh) |
264 | netif_tx_wake_queue(dev_queue: tx_queue->core_txq); |
265 | } |
266 | |
267 | efx_siena_xmit_done_check_empty(tx_queue); |
268 | } |
269 | |
270 | /* Remove buffers put into a tx_queue for the current packet. |
271 | * None of the buffers must have an skb attached. |
272 | */ |
273 | void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue, |
274 | unsigned int insert_count) |
275 | { |
276 | struct efx_tx_buffer *buffer; |
277 | unsigned int bytes_compl = 0; |
278 | unsigned int pkts_compl = 0; |
279 | |
280 | /* Work backwards until we hit the original insert pointer value */ |
281 | while (tx_queue->insert_count != insert_count) { |
282 | --tx_queue->insert_count; |
283 | buffer = __efx_tx_queue_get_insert_buffer(tx_queue); |
284 | efx_dequeue_buffer(tx_queue, buffer, pkts_compl: &pkts_compl, bytes_compl: &bytes_compl); |
285 | } |
286 | } |
287 | |
288 | struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue, |
289 | dma_addr_t dma_addr, size_t len) |
290 | { |
291 | const struct efx_nic_type *nic_type = tx_queue->efx->type; |
292 | struct efx_tx_buffer *buffer; |
293 | unsigned int dma_len; |
294 | |
295 | /* Map the fragment taking account of NIC-dependent DMA limits. */ |
296 | do { |
297 | buffer = efx_tx_queue_get_insert_buffer(tx_queue); |
298 | |
299 | if (nic_type->tx_limit_len) |
300 | dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); |
301 | else |
302 | dma_len = len; |
303 | |
304 | buffer->len = dma_len; |
305 | buffer->dma_addr = dma_addr; |
306 | buffer->flags = EFX_TX_BUF_CONT; |
307 | len -= dma_len; |
308 | dma_addr += dma_len; |
309 | ++tx_queue->insert_count; |
310 | } while (len); |
311 | |
312 | return buffer; |
313 | } |
314 | |
315 | static int (struct sk_buff *skb) |
316 | { |
317 | size_t ; |
318 | |
319 | if (skb->encapsulation) |
320 | header_len = skb_inner_transport_offset(skb) + |
321 | (inner_tcp_hdr(skb)->doff << 2u); |
322 | else |
323 | header_len = skb_transport_offset(skb) + |
324 | (tcp_hdr(skb)->doff << 2u); |
325 | return header_len; |
326 | } |
327 | |
328 | /* Map all data from an SKB for DMA and create descriptors on the queue. */ |
329 | int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, |
330 | unsigned int segment_count) |
331 | { |
332 | struct efx_nic *efx = tx_queue->efx; |
333 | struct device *dma_dev = &efx->pci_dev->dev; |
334 | unsigned int frag_index, nr_frags; |
335 | dma_addr_t dma_addr, unmap_addr; |
336 | unsigned short dma_flags; |
337 | size_t len, unmap_len; |
338 | |
339 | nr_frags = skb_shinfo(skb)->nr_frags; |
340 | frag_index = 0; |
341 | |
342 | /* Map header data. */ |
343 | len = skb_headlen(skb); |
344 | dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE); |
345 | dma_flags = EFX_TX_BUF_MAP_SINGLE; |
346 | unmap_len = len; |
347 | unmap_addr = dma_addr; |
348 | |
349 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) |
350 | return -EIO; |
351 | |
352 | if (segment_count) { |
353 | /* For TSO we need to put the header in to a separate |
354 | * descriptor. Map this separately if necessary. |
355 | */ |
356 | size_t = efx_tx_tso_header_length(skb); |
357 | |
358 | if (header_len != len) { |
359 | tx_queue->tso_long_headers++; |
360 | efx_siena_tx_map_chunk(tx_queue, dma_addr, len: header_len); |
361 | len -= header_len; |
362 | dma_addr += header_len; |
363 | } |
364 | } |
365 | |
366 | /* Add descriptors for each fragment. */ |
367 | do { |
368 | struct efx_tx_buffer *buffer; |
369 | skb_frag_t *fragment; |
370 | |
371 | buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len); |
372 | |
373 | /* The final descriptor for a fragment is responsible for |
374 | * unmapping the whole fragment. |
375 | */ |
376 | buffer->flags = EFX_TX_BUF_CONT | dma_flags; |
377 | buffer->unmap_len = unmap_len; |
378 | buffer->dma_offset = buffer->dma_addr - unmap_addr; |
379 | |
380 | if (frag_index >= nr_frags) { |
381 | /* Store SKB details with the final buffer for |
382 | * the completion. |
383 | */ |
384 | buffer->skb = skb; |
385 | buffer->flags = EFX_TX_BUF_SKB | dma_flags; |
386 | return 0; |
387 | } |
388 | |
389 | /* Move on to the next fragment. */ |
390 | fragment = &skb_shinfo(skb)->frags[frag_index++]; |
391 | len = skb_frag_size(frag: fragment); |
392 | dma_addr = skb_frag_dma_map(dev: dma_dev, frag: fragment, offset: 0, size: len, |
393 | dir: DMA_TO_DEVICE); |
394 | dma_flags = 0; |
395 | unmap_len = len; |
396 | unmap_addr = dma_addr; |
397 | |
398 | if (unlikely(dma_mapping_error(dma_dev, dma_addr))) |
399 | return -EIO; |
400 | } while (1); |
401 | } |
402 | |
403 | unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx) |
404 | { |
405 | /* Header and payload descriptor for each output segment, plus |
406 | * one for every input fragment boundary within a segment |
407 | */ |
408 | unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; |
409 | |
410 | /* Possibly one more per segment for option descriptors */ |
411 | if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) |
412 | max_descs += EFX_TSO_MAX_SEGS; |
413 | |
414 | /* Possibly more for PCIe page boundaries within input fragments */ |
415 | if (PAGE_SIZE > EFX_PAGE_SIZE) |
416 | max_descs += max_t(unsigned int, MAX_SKB_FRAGS, |
417 | DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); |
418 | |
419 | return max_descs; |
420 | } |
421 | |
422 | /* |
423 | * Fallback to software TSO. |
424 | * |
425 | * This is used if we are unable to send a GSO packet through hardware TSO. |
426 | * This should only ever happen due to per-queue restrictions - unsupported |
427 | * packets should first be filtered by the feature flags. |
428 | * |
429 | * Returns 0 on success, error code otherwise. |
430 | */ |
431 | int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, |
432 | struct sk_buff *skb) |
433 | { |
434 | struct sk_buff *segments, *next; |
435 | |
436 | segments = skb_gso_segment(skb, features: 0); |
437 | if (IS_ERR(ptr: segments)) |
438 | return PTR_ERR(ptr: segments); |
439 | |
440 | dev_consume_skb_any(skb); |
441 | |
442 | skb_list_walk_safe(segments, skb, next) { |
443 | skb_mark_not_on_list(skb); |
444 | efx_enqueue_skb(tx_queue, skb); |
445 | } |
446 | |
447 | return 0; |
448 | } |
449 | |