1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /**************************************************************************** |
3 | * Driver for Solarflare network controllers and boards |
4 | * Copyright 2018 Solarflare Communications Inc. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ |
10 | |
11 | #include "net_driver.h" |
12 | #include <linux/module.h> |
13 | #include <linux/filter.h> |
14 | #include "efx_channels.h" |
15 | #include "efx.h" |
16 | #include "efx_common.h" |
17 | #include "tx_common.h" |
18 | #include "rx_common.h" |
19 | #include "nic.h" |
20 | #include "sriov.h" |
21 | #include "workarounds.h" |
22 | |
23 | /* This is the first interrupt mode to try out of: |
24 | * 0 => MSI-X |
25 | * 1 => MSI |
26 | * 2 => legacy |
27 | */ |
28 | unsigned int efx_siena_interrupt_mode = EFX_INT_MODE_MSIX; |
29 | |
30 | /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), |
31 | * i.e. the number of CPUs among which we may distribute simultaneous |
32 | * interrupt handling. |
33 | * |
34 | * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. |
35 | * The default (0) means to assign an interrupt to each core. |
36 | */ |
37 | unsigned int ; |
38 | |
39 | static unsigned int irq_adapt_low_thresh = 8000; |
40 | module_param(irq_adapt_low_thresh, uint, 0644); |
41 | MODULE_PARM_DESC(irq_adapt_low_thresh, |
42 | "Threshold score for reducing IRQ moderation" ); |
43 | |
44 | static unsigned int irq_adapt_high_thresh = 16000; |
45 | module_param(irq_adapt_high_thresh, uint, 0644); |
46 | MODULE_PARM_DESC(irq_adapt_high_thresh, |
47 | "Threshold score for increasing IRQ moderation" ); |
48 | |
49 | static const struct efx_channel_type efx_default_channel_type; |
50 | |
51 | /************* |
52 | * INTERRUPTS |
53 | *************/ |
54 | |
55 | static unsigned int count_online_cores(struct efx_nic *efx, bool local_node) |
56 | { |
57 | cpumask_var_t filter_mask; |
58 | unsigned int count; |
59 | int cpu; |
60 | |
61 | if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) { |
62 | netif_warn(efx, probe, efx->net_dev, |
63 | "RSS disabled due to allocation failure\n" ); |
64 | return 1; |
65 | } |
66 | |
67 | cpumask_copy(dstp: filter_mask, cpu_online_mask); |
68 | if (local_node) |
69 | cpumask_and(dstp: filter_mask, src1p: filter_mask, |
70 | src2p: cpumask_of_pcibus(bus: efx->pci_dev->bus)); |
71 | |
72 | count = 0; |
73 | for_each_cpu(cpu, filter_mask) { |
74 | ++count; |
75 | cpumask_andnot(dstp: filter_mask, src1p: filter_mask, topology_sibling_cpumask(cpu)); |
76 | } |
77 | |
78 | free_cpumask_var(mask: filter_mask); |
79 | |
80 | return count; |
81 | } |
82 | |
83 | static unsigned int efx_wanted_parallelism(struct efx_nic *efx) |
84 | { |
85 | unsigned int count; |
86 | |
87 | if (efx_siena_rss_cpus) { |
88 | count = efx_siena_rss_cpus; |
89 | } else { |
90 | count = count_online_cores(efx, local_node: true); |
91 | |
92 | /* If no online CPUs in local node, fallback to any online CPUs */ |
93 | if (count == 0) |
94 | count = count_online_cores(efx, local_node: false); |
95 | } |
96 | |
97 | if (count > EFX_MAX_RX_QUEUES) { |
98 | netif_cond_dbg(efx, probe, efx->net_dev, !efx_siena_rss_cpus, |
99 | warn, |
100 | "Reducing number of rx queues from %u to %u.\n" , |
101 | count, EFX_MAX_RX_QUEUES); |
102 | count = EFX_MAX_RX_QUEUES; |
103 | } |
104 | |
105 | /* If RSS is requested for the PF *and* VFs then we can't write RSS |
106 | * table entries that are inaccessible to VFs |
107 | */ |
108 | #ifdef CONFIG_SFC_SIENA_SRIOV |
109 | if (efx->type->sriov_wanted) { |
110 | if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 && |
111 | count > efx_vf_size(efx)) { |
112 | netif_warn(efx, probe, efx->net_dev, |
113 | "Reducing number of RSS channels from %u to %u for " |
114 | "VF support. Increase vf-msix-limit to use more " |
115 | "channels on the PF.\n" , |
116 | count, efx_vf_size(efx)); |
117 | count = efx_vf_size(efx); |
118 | } |
119 | } |
120 | #endif |
121 | |
122 | return count; |
123 | } |
124 | |
125 | static int efx_allocate_msix_channels(struct efx_nic *efx, |
126 | unsigned int max_channels, |
127 | unsigned int , |
128 | unsigned int parallelism) |
129 | { |
130 | unsigned int n_channels = parallelism; |
131 | int vec_count; |
132 | int tx_per_ev; |
133 | int n_xdp_tx; |
134 | int n_xdp_ev; |
135 | |
136 | if (efx_siena_separate_tx_channels) |
137 | n_channels *= 2; |
138 | n_channels += extra_channels; |
139 | |
140 | /* To allow XDP transmit to happen from arbitrary NAPI contexts |
141 | * we allocate a TX queue per CPU. We share event queues across |
142 | * multiple tx queues, assuming tx and ev queues are both |
143 | * maximum size. |
144 | */ |
145 | tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx); |
146 | tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL); |
147 | n_xdp_tx = num_possible_cpus(); |
148 | n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev); |
149 | |
150 | vec_count = pci_msix_vec_count(dev: efx->pci_dev); |
151 | if (vec_count < 0) |
152 | return vec_count; |
153 | |
154 | max_channels = min_t(unsigned int, vec_count, max_channels); |
155 | |
156 | /* Check resources. |
157 | * We need a channel per event queue, plus a VI per tx queue. |
158 | * This may be more pessimistic than it needs to be. |
159 | */ |
160 | if (n_channels >= max_channels) { |
161 | efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; |
162 | netif_warn(efx, drv, efx->net_dev, |
163 | "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n" , |
164 | n_xdp_ev, n_channels, max_channels); |
165 | netif_warn(efx, drv, efx->net_dev, |
166 | "XDP_TX and XDP_REDIRECT might decrease device's performance\n" ); |
167 | } else if (n_channels + n_xdp_tx > efx->max_vis) { |
168 | efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; |
169 | netif_warn(efx, drv, efx->net_dev, |
170 | "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n" , |
171 | n_xdp_tx, n_channels, efx->max_vis); |
172 | netif_warn(efx, drv, efx->net_dev, |
173 | "XDP_TX and XDP_REDIRECT might decrease device's performance\n" ); |
174 | } else if (n_channels + n_xdp_ev > max_channels) { |
175 | efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED; |
176 | netif_warn(efx, drv, efx->net_dev, |
177 | "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n" , |
178 | n_xdp_ev, n_channels, max_channels); |
179 | |
180 | n_xdp_ev = max_channels - n_channels; |
181 | netif_warn(efx, drv, efx->net_dev, |
182 | "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n" , |
183 | DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev)); |
184 | } else { |
185 | efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED; |
186 | } |
187 | |
188 | if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) { |
189 | efx->n_xdp_channels = n_xdp_ev; |
190 | efx->xdp_tx_per_channel = tx_per_ev; |
191 | efx->xdp_tx_queue_count = n_xdp_tx; |
192 | n_channels += n_xdp_ev; |
193 | netif_dbg(efx, drv, efx->net_dev, |
194 | "Allocating %d TX and %d event queues for XDP\n" , |
195 | n_xdp_ev * tx_per_ev, n_xdp_ev); |
196 | } else { |
197 | efx->n_xdp_channels = 0; |
198 | efx->xdp_tx_per_channel = 0; |
199 | efx->xdp_tx_queue_count = n_xdp_tx; |
200 | } |
201 | |
202 | if (vec_count < n_channels) { |
203 | netif_err(efx, drv, efx->net_dev, |
204 | "WARNING: Insufficient MSI-X vectors available (%d < %u).\n" , |
205 | vec_count, n_channels); |
206 | netif_err(efx, drv, efx->net_dev, |
207 | "WARNING: Performance may be reduced.\n" ); |
208 | n_channels = vec_count; |
209 | } |
210 | |
211 | n_channels = min(n_channels, max_channels); |
212 | |
213 | efx->n_channels = n_channels; |
214 | |
215 | /* Ignore XDP tx channels when creating rx channels. */ |
216 | n_channels -= efx->n_xdp_channels; |
217 | |
218 | if (efx_siena_separate_tx_channels) { |
219 | efx->n_tx_channels = |
220 | min(max(n_channels / 2, 1U), |
221 | efx->max_tx_channels); |
222 | efx->tx_channel_offset = |
223 | n_channels - efx->n_tx_channels; |
224 | efx->n_rx_channels = |
225 | max(n_channels - |
226 | efx->n_tx_channels, 1U); |
227 | } else { |
228 | efx->n_tx_channels = min(n_channels, efx->max_tx_channels); |
229 | efx->tx_channel_offset = 0; |
230 | efx->n_rx_channels = n_channels; |
231 | } |
232 | |
233 | efx->n_rx_channels = min(efx->n_rx_channels, parallelism); |
234 | efx->n_tx_channels = min(efx->n_tx_channels, parallelism); |
235 | |
236 | efx->xdp_channel_offset = n_channels; |
237 | |
238 | netif_dbg(efx, drv, efx->net_dev, |
239 | "Allocating %u RX channels\n" , |
240 | efx->n_rx_channels); |
241 | |
242 | return efx->n_channels; |
243 | } |
244 | |
245 | /* Probe the number and type of interrupts we are able to obtain, and |
246 | * the resulting numbers of channels and RX queues. |
247 | */ |
248 | int efx_siena_probe_interrupts(struct efx_nic *efx) |
249 | { |
250 | unsigned int = 0; |
251 | unsigned int ; |
252 | unsigned int i, j; |
253 | int rc; |
254 | |
255 | for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) |
256 | if (efx->extra_channel_type[i]) |
257 | ++extra_channels; |
258 | |
259 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { |
260 | unsigned int parallelism = efx_wanted_parallelism(efx); |
261 | struct msix_entry xentries[EFX_MAX_CHANNELS]; |
262 | unsigned int n_channels; |
263 | |
264 | rc = efx_allocate_msix_channels(efx, max_channels: efx->max_channels, |
265 | extra_channels, parallelism); |
266 | if (rc >= 0) { |
267 | n_channels = rc; |
268 | for (i = 0; i < n_channels; i++) |
269 | xentries[i].entry = i; |
270 | rc = pci_enable_msix_range(dev: efx->pci_dev, entries: xentries, minvec: 1, |
271 | maxvec: n_channels); |
272 | } |
273 | if (rc < 0) { |
274 | /* Fall back to single channel MSI */ |
275 | netif_err(efx, drv, efx->net_dev, |
276 | "could not enable MSI-X\n" ); |
277 | if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI) |
278 | efx->interrupt_mode = EFX_INT_MODE_MSI; |
279 | else |
280 | return rc; |
281 | } else if (rc < n_channels) { |
282 | netif_err(efx, drv, efx->net_dev, |
283 | "WARNING: Insufficient MSI-X vectors" |
284 | " available (%d < %u).\n" , rc, n_channels); |
285 | netif_err(efx, drv, efx->net_dev, |
286 | "WARNING: Performance may be reduced.\n" ); |
287 | n_channels = rc; |
288 | } |
289 | |
290 | if (rc > 0) { |
291 | for (i = 0; i < efx->n_channels; i++) |
292 | efx_get_channel(efx, index: i)->irq = |
293 | xentries[i].vector; |
294 | } |
295 | } |
296 | |
297 | /* Try single interrupt MSI */ |
298 | if (efx->interrupt_mode == EFX_INT_MODE_MSI) { |
299 | efx->n_channels = 1; |
300 | efx->n_rx_channels = 1; |
301 | efx->n_tx_channels = 1; |
302 | efx->tx_channel_offset = 0; |
303 | efx->n_xdp_channels = 0; |
304 | efx->xdp_channel_offset = efx->n_channels; |
305 | efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; |
306 | rc = pci_enable_msi(dev: efx->pci_dev); |
307 | if (rc == 0) { |
308 | efx_get_channel(efx, index: 0)->irq = efx->pci_dev->irq; |
309 | } else { |
310 | netif_err(efx, drv, efx->net_dev, |
311 | "could not enable MSI\n" ); |
312 | if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY) |
313 | efx->interrupt_mode = EFX_INT_MODE_LEGACY; |
314 | else |
315 | return rc; |
316 | } |
317 | } |
318 | |
319 | /* Assume legacy interrupts */ |
320 | if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { |
321 | efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0); |
322 | efx->n_rx_channels = 1; |
323 | efx->n_tx_channels = 1; |
324 | efx->tx_channel_offset = efx_siena_separate_tx_channels ? 1 : 0; |
325 | efx->n_xdp_channels = 0; |
326 | efx->xdp_channel_offset = efx->n_channels; |
327 | efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED; |
328 | efx->legacy_irq = efx->pci_dev->irq; |
329 | } |
330 | |
331 | /* Assign extra channels if possible, before XDP channels */ |
332 | efx->n_extra_tx_channels = 0; |
333 | j = efx->xdp_channel_offset; |
334 | for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) { |
335 | if (!efx->extra_channel_type[i]) |
336 | continue; |
337 | if (j <= efx->tx_channel_offset + efx->n_tx_channels) { |
338 | efx->extra_channel_type[i]->handle_no_channel(efx); |
339 | } else { |
340 | --j; |
341 | efx_get_channel(efx, index: j)->type = |
342 | efx->extra_channel_type[i]; |
343 | if (efx_channel_has_tx_queues(channel: efx_get_channel(efx, index: j))) |
344 | efx->n_extra_tx_channels++; |
345 | } |
346 | } |
347 | |
348 | rss_spread = efx->n_rx_channels; |
349 | /* RSS might be usable on VFs even if it is disabled on the PF */ |
350 | #ifdef CONFIG_SFC_SIENA_SRIOV |
351 | if (efx->type->sriov_wanted) { |
352 | efx->rss_spread = ((rss_spread > 1 || |
353 | !efx->type->sriov_wanted(efx)) ? |
354 | rss_spread : efx_vf_size(efx)); |
355 | return 0; |
356 | } |
357 | #endif |
358 | efx->rss_spread = rss_spread; |
359 | |
360 | return 0; |
361 | } |
362 | |
363 | #if defined(CONFIG_SMP) |
364 | void efx_siena_set_interrupt_affinity(struct efx_nic *efx) |
365 | { |
366 | const struct cpumask *numa_mask = cpumask_of_pcibus(bus: efx->pci_dev->bus); |
367 | struct efx_channel *channel; |
368 | unsigned int cpu; |
369 | |
370 | /* If no online CPUs in local node, fallback to any online CPU */ |
371 | if (cpumask_first_and(cpu_online_mask, srcp2: numa_mask) >= nr_cpu_ids) |
372 | numa_mask = cpu_online_mask; |
373 | |
374 | cpu = -1; |
375 | efx_for_each_channel(channel, efx) { |
376 | cpu = cpumask_next_and(n: cpu, cpu_online_mask, src2p: numa_mask); |
377 | if (cpu >= nr_cpu_ids) |
378 | cpu = cpumask_first_and(cpu_online_mask, srcp2: numa_mask); |
379 | irq_set_affinity_hint(irq: channel->irq, cpumask_of(cpu)); |
380 | } |
381 | } |
382 | |
383 | void efx_siena_clear_interrupt_affinity(struct efx_nic *efx) |
384 | { |
385 | struct efx_channel *channel; |
386 | |
387 | efx_for_each_channel(channel, efx) |
388 | irq_set_affinity_hint(irq: channel->irq, NULL); |
389 | } |
390 | #else |
391 | void |
392 | efx_siena_set_interrupt_affinity(struct efx_nic *efx __always_unused) |
393 | { |
394 | } |
395 | |
396 | void |
397 | efx_siena_clear_interrupt_affinity(struct efx_nic *efx __always_unused) |
398 | { |
399 | } |
400 | #endif /* CONFIG_SMP */ |
401 | |
402 | void efx_siena_remove_interrupts(struct efx_nic *efx) |
403 | { |
404 | struct efx_channel *channel; |
405 | |
406 | /* Remove MSI/MSI-X interrupts */ |
407 | efx_for_each_channel(channel, efx) |
408 | channel->irq = 0; |
409 | pci_disable_msi(dev: efx->pci_dev); |
410 | pci_disable_msix(dev: efx->pci_dev); |
411 | |
412 | /* Remove legacy interrupt */ |
413 | efx->legacy_irq = 0; |
414 | } |
415 | |
416 | /*************** |
417 | * EVENT QUEUES |
418 | ***************/ |
419 | |
420 | /* Create event queue |
421 | * Event queue memory allocations are done only once. If the channel |
422 | * is reset, the memory buffer will be reused; this guards against |
423 | * errors during channel reset and also simplifies interrupt handling. |
424 | */ |
425 | static int efx_probe_eventq(struct efx_channel *channel) |
426 | { |
427 | struct efx_nic *efx = channel->efx; |
428 | unsigned long entries; |
429 | |
430 | netif_dbg(efx, probe, efx->net_dev, |
431 | "chan %d create event queue\n" , channel->channel); |
432 | |
433 | /* Build an event queue with room for one event per tx and rx buffer, |
434 | * plus some extra for link state events and MCDI completions. |
435 | */ |
436 | entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); |
437 | EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); |
438 | channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; |
439 | |
440 | return efx_nic_probe_eventq(channel); |
441 | } |
442 | |
443 | /* Prepare channel's event queue */ |
444 | static int efx_init_eventq(struct efx_channel *channel) |
445 | { |
446 | struct efx_nic *efx = channel->efx; |
447 | int rc; |
448 | |
449 | EFX_WARN_ON_PARANOID(channel->eventq_init); |
450 | |
451 | netif_dbg(efx, drv, efx->net_dev, |
452 | "chan %d init event queue\n" , channel->channel); |
453 | |
454 | rc = efx_nic_init_eventq(channel); |
455 | if (rc == 0) { |
456 | efx->type->push_irq_moderation(channel); |
457 | channel->eventq_read_ptr = 0; |
458 | channel->eventq_init = true; |
459 | } |
460 | return rc; |
461 | } |
462 | |
463 | /* Enable event queue processing and NAPI */ |
464 | void efx_siena_start_eventq(struct efx_channel *channel) |
465 | { |
466 | netif_dbg(channel->efx, ifup, channel->efx->net_dev, |
467 | "chan %d start event queue\n" , channel->channel); |
468 | |
469 | /* Make sure the NAPI handler sees the enabled flag set */ |
470 | channel->enabled = true; |
471 | smp_wmb(); |
472 | |
473 | napi_enable(n: &channel->napi_str); |
474 | efx_nic_eventq_read_ack(channel); |
475 | } |
476 | |
477 | /* Disable event queue processing and NAPI */ |
478 | void efx_siena_stop_eventq(struct efx_channel *channel) |
479 | { |
480 | if (!channel->enabled) |
481 | return; |
482 | |
483 | napi_disable(n: &channel->napi_str); |
484 | channel->enabled = false; |
485 | } |
486 | |
487 | static void efx_fini_eventq(struct efx_channel *channel) |
488 | { |
489 | if (!channel->eventq_init) |
490 | return; |
491 | |
492 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
493 | "chan %d fini event queue\n" , channel->channel); |
494 | |
495 | efx_nic_fini_eventq(channel); |
496 | channel->eventq_init = false; |
497 | } |
498 | |
499 | static void efx_remove_eventq(struct efx_channel *channel) |
500 | { |
501 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
502 | "chan %d remove event queue\n" , channel->channel); |
503 | |
504 | efx_nic_remove_eventq(channel); |
505 | } |
506 | |
507 | /************************************************************************** |
508 | * |
509 | * Channel handling |
510 | * |
511 | *************************************************************************/ |
512 | |
513 | #ifdef CONFIG_RFS_ACCEL |
514 | static void efx_filter_rfs_expire(struct work_struct *data) |
515 | { |
516 | struct delayed_work *dwork = to_delayed_work(work: data); |
517 | struct efx_channel *channel; |
518 | unsigned int time, quota; |
519 | |
520 | channel = container_of(dwork, struct efx_channel, filter_work); |
521 | time = jiffies - channel->rfs_last_expiry; |
522 | quota = channel->rfs_filter_count * time / (30 * HZ); |
523 | if (quota >= 20 && __efx_siena_filter_rfs_expire(channel, |
524 | min(channel->rfs_filter_count, quota))) |
525 | channel->rfs_last_expiry += time; |
526 | /* Ensure we do more work eventually even if NAPI poll is not happening */ |
527 | schedule_delayed_work(dwork, delay: 30 * HZ); |
528 | } |
529 | #endif |
530 | |
531 | /* Allocate and initialise a channel structure. */ |
532 | static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) |
533 | { |
534 | struct efx_rx_queue *rx_queue; |
535 | struct efx_tx_queue *tx_queue; |
536 | struct efx_channel *channel; |
537 | int j; |
538 | |
539 | channel = kzalloc(size: sizeof(*channel), GFP_KERNEL); |
540 | if (!channel) |
541 | return NULL; |
542 | |
543 | channel->efx = efx; |
544 | channel->channel = i; |
545 | channel->type = &efx_default_channel_type; |
546 | |
547 | for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) { |
548 | tx_queue = &channel->tx_queue[j]; |
549 | tx_queue->efx = efx; |
550 | tx_queue->queue = -1; |
551 | tx_queue->label = j; |
552 | tx_queue->channel = channel; |
553 | } |
554 | |
555 | #ifdef CONFIG_RFS_ACCEL |
556 | INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); |
557 | #endif |
558 | |
559 | rx_queue = &channel->rx_queue; |
560 | rx_queue->efx = efx; |
561 | timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0); |
562 | |
563 | return channel; |
564 | } |
565 | |
566 | int efx_siena_init_channels(struct efx_nic *efx) |
567 | { |
568 | unsigned int i; |
569 | |
570 | for (i = 0; i < EFX_MAX_CHANNELS; i++) { |
571 | efx->channel[i] = efx_alloc_channel(efx, i); |
572 | if (!efx->channel[i]) |
573 | return -ENOMEM; |
574 | efx->msi_context[i].efx = efx; |
575 | efx->msi_context[i].index = i; |
576 | } |
577 | |
578 | /* Higher numbered interrupt modes are less capable! */ |
579 | efx->interrupt_mode = min(efx->type->min_interrupt_mode, |
580 | efx_siena_interrupt_mode); |
581 | |
582 | efx->max_channels = EFX_MAX_CHANNELS; |
583 | efx->max_tx_channels = EFX_MAX_CHANNELS; |
584 | |
585 | return 0; |
586 | } |
587 | |
588 | void efx_siena_fini_channels(struct efx_nic *efx) |
589 | { |
590 | unsigned int i; |
591 | |
592 | for (i = 0; i < EFX_MAX_CHANNELS; i++) |
593 | if (efx->channel[i]) { |
594 | kfree(objp: efx->channel[i]); |
595 | efx->channel[i] = NULL; |
596 | } |
597 | } |
598 | |
599 | /* Allocate and initialise a channel structure, copying parameters |
600 | * (but not resources) from an old channel structure. |
601 | */ |
602 | static |
603 | struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) |
604 | { |
605 | struct efx_rx_queue *rx_queue; |
606 | struct efx_tx_queue *tx_queue; |
607 | struct efx_channel *channel; |
608 | int j; |
609 | |
610 | channel = kmalloc(size: sizeof(*channel), GFP_KERNEL); |
611 | if (!channel) |
612 | return NULL; |
613 | |
614 | *channel = *old_channel; |
615 | |
616 | channel->napi_dev = NULL; |
617 | INIT_HLIST_NODE(h: &channel->napi_str.napi_hash_node); |
618 | channel->napi_str.napi_id = 0; |
619 | channel->napi_str.state = 0; |
620 | memset(&channel->eventq, 0, sizeof(channel->eventq)); |
621 | |
622 | for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) { |
623 | tx_queue = &channel->tx_queue[j]; |
624 | if (tx_queue->channel) |
625 | tx_queue->channel = channel; |
626 | tx_queue->buffer = NULL; |
627 | tx_queue->cb_page = NULL; |
628 | memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); |
629 | } |
630 | |
631 | rx_queue = &channel->rx_queue; |
632 | rx_queue->buffer = NULL; |
633 | memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); |
634 | timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0); |
635 | #ifdef CONFIG_RFS_ACCEL |
636 | INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); |
637 | #endif |
638 | |
639 | return channel; |
640 | } |
641 | |
642 | static int efx_probe_channel(struct efx_channel *channel) |
643 | { |
644 | struct efx_tx_queue *tx_queue; |
645 | struct efx_rx_queue *rx_queue; |
646 | int rc; |
647 | |
648 | netif_dbg(channel->efx, probe, channel->efx->net_dev, |
649 | "creating channel %d\n" , channel->channel); |
650 | |
651 | rc = channel->type->pre_probe(channel); |
652 | if (rc) |
653 | goto fail; |
654 | |
655 | rc = efx_probe_eventq(channel); |
656 | if (rc) |
657 | goto fail; |
658 | |
659 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
660 | rc = efx_siena_probe_tx_queue(tx_queue); |
661 | if (rc) |
662 | goto fail; |
663 | } |
664 | |
665 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
666 | rc = efx_siena_probe_rx_queue(rx_queue); |
667 | if (rc) |
668 | goto fail; |
669 | } |
670 | |
671 | channel->rx_list = NULL; |
672 | |
673 | return 0; |
674 | |
675 | fail: |
676 | efx_siena_remove_channel(channel); |
677 | return rc; |
678 | } |
679 | |
680 | static void efx_get_channel_name(struct efx_channel *channel, char *buf, |
681 | size_t len) |
682 | { |
683 | struct efx_nic *efx = channel->efx; |
684 | const char *type; |
685 | int number; |
686 | |
687 | number = channel->channel; |
688 | |
689 | if (number >= efx->xdp_channel_offset && |
690 | !WARN_ON_ONCE(!efx->n_xdp_channels)) { |
691 | type = "-xdp" ; |
692 | number -= efx->xdp_channel_offset; |
693 | } else if (efx->tx_channel_offset == 0) { |
694 | type = "" ; |
695 | } else if (number < efx->tx_channel_offset) { |
696 | type = "-rx" ; |
697 | } else { |
698 | type = "-tx" ; |
699 | number -= efx->tx_channel_offset; |
700 | } |
701 | snprintf(buf, size: len, fmt: "%s%s-%d" , efx->name, type, number); |
702 | } |
703 | |
704 | void efx_siena_set_channel_names(struct efx_nic *efx) |
705 | { |
706 | struct efx_channel *channel; |
707 | |
708 | efx_for_each_channel(channel, efx) |
709 | channel->type->get_name(channel, |
710 | efx->msi_context[channel->channel].name, |
711 | sizeof(efx->msi_context[0].name)); |
712 | } |
713 | |
714 | int efx_siena_probe_channels(struct efx_nic *efx) |
715 | { |
716 | struct efx_channel *channel; |
717 | int rc; |
718 | |
719 | /* Restart special buffer allocation */ |
720 | efx->next_buffer_table = 0; |
721 | |
722 | /* Probe channels in reverse, so that any 'extra' channels |
723 | * use the start of the buffer table. This allows the traffic |
724 | * channels to be resized without moving them or wasting the |
725 | * entries before them. |
726 | */ |
727 | efx_for_each_channel_rev(channel, efx) { |
728 | rc = efx_probe_channel(channel); |
729 | if (rc) { |
730 | netif_err(efx, probe, efx->net_dev, |
731 | "failed to create channel %d\n" , |
732 | channel->channel); |
733 | goto fail; |
734 | } |
735 | } |
736 | efx_siena_set_channel_names(efx); |
737 | |
738 | return 0; |
739 | |
740 | fail: |
741 | efx_siena_remove_channels(efx); |
742 | return rc; |
743 | } |
744 | |
745 | void efx_siena_remove_channel(struct efx_channel *channel) |
746 | { |
747 | struct efx_tx_queue *tx_queue; |
748 | struct efx_rx_queue *rx_queue; |
749 | |
750 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
751 | "destroy chan %d\n" , channel->channel); |
752 | |
753 | efx_for_each_channel_rx_queue(rx_queue, channel) |
754 | efx_siena_remove_rx_queue(rx_queue); |
755 | efx_for_each_channel_tx_queue(tx_queue, channel) |
756 | efx_siena_remove_tx_queue(tx_queue); |
757 | efx_remove_eventq(channel); |
758 | channel->type->post_remove(channel); |
759 | } |
760 | |
761 | void efx_siena_remove_channels(struct efx_nic *efx) |
762 | { |
763 | struct efx_channel *channel; |
764 | |
765 | efx_for_each_channel(channel, efx) |
766 | efx_siena_remove_channel(channel); |
767 | |
768 | kfree(objp: efx->xdp_tx_queues); |
769 | } |
770 | |
771 | static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number, |
772 | struct efx_tx_queue *tx_queue) |
773 | { |
774 | if (xdp_queue_number >= efx->xdp_tx_queue_count) |
775 | return -EINVAL; |
776 | |
777 | netif_dbg(efx, drv, efx->net_dev, |
778 | "Channel %u TXQ %u is XDP %u, HW %u\n" , |
779 | tx_queue->channel->channel, tx_queue->label, |
780 | xdp_queue_number, tx_queue->queue); |
781 | efx->xdp_tx_queues[xdp_queue_number] = tx_queue; |
782 | return 0; |
783 | } |
784 | |
785 | static void efx_set_xdp_channels(struct efx_nic *efx) |
786 | { |
787 | struct efx_tx_queue *tx_queue; |
788 | struct efx_channel *channel; |
789 | unsigned int next_queue = 0; |
790 | int xdp_queue_number = 0; |
791 | int rc; |
792 | |
793 | /* We need to mark which channels really have RX and TX |
794 | * queues, and adjust the TX queue numbers if we have separate |
795 | * RX-only and TX-only channels. |
796 | */ |
797 | efx_for_each_channel(channel, efx) { |
798 | if (channel->channel < efx->tx_channel_offset) |
799 | continue; |
800 | |
801 | if (efx_channel_is_xdp_tx(channel)) { |
802 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
803 | tx_queue->queue = next_queue++; |
804 | rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, |
805 | tx_queue); |
806 | if (rc == 0) |
807 | xdp_queue_number++; |
808 | } |
809 | } else { |
810 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
811 | tx_queue->queue = next_queue++; |
812 | netif_dbg(efx, drv, efx->net_dev, |
813 | "Channel %u TXQ %u is HW %u\n" , |
814 | channel->channel, tx_queue->label, |
815 | tx_queue->queue); |
816 | } |
817 | |
818 | /* If XDP is borrowing queues from net stack, it must |
819 | * use the queue with no csum offload, which is the |
820 | * first one of the channel |
821 | * (note: tx_queue_by_type is not initialized yet) |
822 | */ |
823 | if (efx->xdp_txq_queues_mode == |
824 | EFX_XDP_TX_QUEUES_BORROWED) { |
825 | tx_queue = &channel->tx_queue[0]; |
826 | rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, |
827 | tx_queue); |
828 | if (rc == 0) |
829 | xdp_queue_number++; |
830 | } |
831 | } |
832 | } |
833 | WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED && |
834 | xdp_queue_number != efx->xdp_tx_queue_count); |
835 | WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED && |
836 | xdp_queue_number > efx->xdp_tx_queue_count); |
837 | |
838 | /* If we have more CPUs than assigned XDP TX queues, assign the already |
839 | * existing queues to the exceeding CPUs |
840 | */ |
841 | next_queue = 0; |
842 | while (xdp_queue_number < efx->xdp_tx_queue_count) { |
843 | tx_queue = efx->xdp_tx_queues[next_queue++]; |
844 | rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue); |
845 | if (rc == 0) |
846 | xdp_queue_number++; |
847 | } |
848 | } |
849 | |
850 | static int efx_soft_enable_interrupts(struct efx_nic *efx); |
851 | static void efx_soft_disable_interrupts(struct efx_nic *efx); |
852 | static void efx_init_napi_channel(struct efx_channel *channel); |
853 | static void efx_fini_napi_channel(struct efx_channel *channel); |
854 | |
855 | int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries, |
856 | u32 txq_entries) |
857 | { |
858 | struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; |
859 | unsigned int i, next_buffer_table = 0; |
860 | u32 old_rxq_entries, old_txq_entries; |
861 | int rc, rc2; |
862 | |
863 | rc = efx_check_disabled(efx); |
864 | if (rc) |
865 | return rc; |
866 | |
867 | /* Not all channels should be reallocated. We must avoid |
868 | * reallocating their buffer table entries. |
869 | */ |
870 | efx_for_each_channel(channel, efx) { |
871 | struct efx_rx_queue *rx_queue; |
872 | struct efx_tx_queue *tx_queue; |
873 | |
874 | if (channel->type->copy) |
875 | continue; |
876 | next_buffer_table = max(next_buffer_table, |
877 | channel->eventq.index + |
878 | channel->eventq.entries); |
879 | efx_for_each_channel_rx_queue(rx_queue, channel) |
880 | next_buffer_table = max(next_buffer_table, |
881 | rx_queue->rxd.index + |
882 | rx_queue->rxd.entries); |
883 | efx_for_each_channel_tx_queue(tx_queue, channel) |
884 | next_buffer_table = max(next_buffer_table, |
885 | tx_queue->txd.index + |
886 | tx_queue->txd.entries); |
887 | } |
888 | |
889 | efx_device_detach_sync(efx); |
890 | efx_siena_stop_all(efx); |
891 | efx_soft_disable_interrupts(efx); |
892 | |
893 | /* Clone channels (where possible) */ |
894 | memset(other_channel, 0, sizeof(other_channel)); |
895 | for (i = 0; i < efx->n_channels; i++) { |
896 | channel = efx->channel[i]; |
897 | if (channel->type->copy) |
898 | channel = channel->type->copy(channel); |
899 | if (!channel) { |
900 | rc = -ENOMEM; |
901 | goto out; |
902 | } |
903 | other_channel[i] = channel; |
904 | } |
905 | |
906 | /* Swap entry counts and channel pointers */ |
907 | old_rxq_entries = efx->rxq_entries; |
908 | old_txq_entries = efx->txq_entries; |
909 | efx->rxq_entries = rxq_entries; |
910 | efx->txq_entries = txq_entries; |
911 | for (i = 0; i < efx->n_channels; i++) |
912 | swap(efx->channel[i], other_channel[i]); |
913 | |
914 | /* Restart buffer table allocation */ |
915 | efx->next_buffer_table = next_buffer_table; |
916 | |
917 | for (i = 0; i < efx->n_channels; i++) { |
918 | channel = efx->channel[i]; |
919 | if (!channel->type->copy) |
920 | continue; |
921 | rc = efx_probe_channel(channel); |
922 | if (rc) |
923 | goto rollback; |
924 | efx_init_napi_channel(channel: efx->channel[i]); |
925 | } |
926 | |
927 | efx_set_xdp_channels(efx); |
928 | out: |
929 | /* Destroy unused channel structures */ |
930 | for (i = 0; i < efx->n_channels; i++) { |
931 | channel = other_channel[i]; |
932 | if (channel && channel->type->copy) { |
933 | efx_fini_napi_channel(channel); |
934 | efx_siena_remove_channel(channel); |
935 | kfree(objp: channel); |
936 | } |
937 | } |
938 | |
939 | rc2 = efx_soft_enable_interrupts(efx); |
940 | if (rc2) { |
941 | rc = rc ? rc : rc2; |
942 | netif_err(efx, drv, efx->net_dev, |
943 | "unable to restart interrupts on channel reallocation\n" ); |
944 | efx_siena_schedule_reset(efx, type: RESET_TYPE_DISABLE); |
945 | } else { |
946 | efx_siena_start_all(efx); |
947 | efx_device_attach_if_not_resetting(efx); |
948 | } |
949 | return rc; |
950 | |
951 | rollback: |
952 | /* Swap back */ |
953 | efx->rxq_entries = old_rxq_entries; |
954 | efx->txq_entries = old_txq_entries; |
955 | for (i = 0; i < efx->n_channels; i++) |
956 | swap(efx->channel[i], other_channel[i]); |
957 | goto out; |
958 | } |
959 | |
960 | int efx_siena_set_channels(struct efx_nic *efx) |
961 | { |
962 | struct efx_channel *channel; |
963 | int rc; |
964 | |
965 | if (efx->xdp_tx_queue_count) { |
966 | EFX_WARN_ON_PARANOID(efx->xdp_tx_queues); |
967 | |
968 | /* Allocate array for XDP TX queue lookup. */ |
969 | efx->xdp_tx_queues = kcalloc(n: efx->xdp_tx_queue_count, |
970 | size: sizeof(*efx->xdp_tx_queues), |
971 | GFP_KERNEL); |
972 | if (!efx->xdp_tx_queues) |
973 | return -ENOMEM; |
974 | } |
975 | |
976 | efx_for_each_channel(channel, efx) { |
977 | if (channel->channel < efx->n_rx_channels) |
978 | channel->rx_queue.core_index = channel->channel; |
979 | else |
980 | channel->rx_queue.core_index = -1; |
981 | } |
982 | |
983 | efx_set_xdp_channels(efx); |
984 | |
985 | rc = netif_set_real_num_tx_queues(dev: efx->net_dev, txq: efx->n_tx_channels); |
986 | if (rc) |
987 | return rc; |
988 | return netif_set_real_num_rx_queues(dev: efx->net_dev, rxq: efx->n_rx_channels); |
989 | } |
990 | |
991 | static bool efx_default_channel_want_txqs(struct efx_channel *channel) |
992 | { |
993 | return channel->channel - channel->efx->tx_channel_offset < |
994 | channel->efx->n_tx_channels; |
995 | } |
996 | |
997 | /************* |
998 | * START/STOP |
999 | *************/ |
1000 | |
1001 | static int efx_soft_enable_interrupts(struct efx_nic *efx) |
1002 | { |
1003 | struct efx_channel *channel, *end_channel; |
1004 | int rc; |
1005 | |
1006 | BUG_ON(efx->state == STATE_DISABLED); |
1007 | |
1008 | efx->irq_soft_enabled = true; |
1009 | smp_wmb(); |
1010 | |
1011 | efx_for_each_channel(channel, efx) { |
1012 | if (!channel->type->keep_eventq) { |
1013 | rc = efx_init_eventq(channel); |
1014 | if (rc) |
1015 | goto fail; |
1016 | } |
1017 | efx_siena_start_eventq(channel); |
1018 | } |
1019 | |
1020 | efx_siena_mcdi_mode_event(efx); |
1021 | |
1022 | return 0; |
1023 | fail: |
1024 | end_channel = channel; |
1025 | efx_for_each_channel(channel, efx) { |
1026 | if (channel == end_channel) |
1027 | break; |
1028 | efx_siena_stop_eventq(channel); |
1029 | if (!channel->type->keep_eventq) |
1030 | efx_fini_eventq(channel); |
1031 | } |
1032 | |
1033 | return rc; |
1034 | } |
1035 | |
1036 | static void efx_soft_disable_interrupts(struct efx_nic *efx) |
1037 | { |
1038 | struct efx_channel *channel; |
1039 | |
1040 | if (efx->state == STATE_DISABLED) |
1041 | return; |
1042 | |
1043 | efx_siena_mcdi_mode_poll(efx); |
1044 | |
1045 | efx->irq_soft_enabled = false; |
1046 | smp_wmb(); |
1047 | |
1048 | if (efx->legacy_irq) |
1049 | synchronize_irq(irq: efx->legacy_irq); |
1050 | |
1051 | efx_for_each_channel(channel, efx) { |
1052 | if (channel->irq) |
1053 | synchronize_irq(irq: channel->irq); |
1054 | |
1055 | efx_siena_stop_eventq(channel); |
1056 | if (!channel->type->keep_eventq) |
1057 | efx_fini_eventq(channel); |
1058 | } |
1059 | |
1060 | /* Flush the asynchronous MCDI request queue */ |
1061 | efx_siena_mcdi_flush_async(efx); |
1062 | } |
1063 | |
1064 | int efx_siena_enable_interrupts(struct efx_nic *efx) |
1065 | { |
1066 | struct efx_channel *channel, *end_channel; |
1067 | int rc; |
1068 | |
1069 | /* TODO: Is this really a bug? */ |
1070 | BUG_ON(efx->state == STATE_DISABLED); |
1071 | |
1072 | if (efx->eeh_disabled_legacy_irq) { |
1073 | enable_irq(irq: efx->legacy_irq); |
1074 | efx->eeh_disabled_legacy_irq = false; |
1075 | } |
1076 | |
1077 | efx->type->irq_enable_master(efx); |
1078 | |
1079 | efx_for_each_channel(channel, efx) { |
1080 | if (channel->type->keep_eventq) { |
1081 | rc = efx_init_eventq(channel); |
1082 | if (rc) |
1083 | goto fail; |
1084 | } |
1085 | } |
1086 | |
1087 | rc = efx_soft_enable_interrupts(efx); |
1088 | if (rc) |
1089 | goto fail; |
1090 | |
1091 | return 0; |
1092 | |
1093 | fail: |
1094 | end_channel = channel; |
1095 | efx_for_each_channel(channel, efx) { |
1096 | if (channel == end_channel) |
1097 | break; |
1098 | if (channel->type->keep_eventq) |
1099 | efx_fini_eventq(channel); |
1100 | } |
1101 | |
1102 | efx->type->irq_disable_non_ev(efx); |
1103 | |
1104 | return rc; |
1105 | } |
1106 | |
1107 | void efx_siena_disable_interrupts(struct efx_nic *efx) |
1108 | { |
1109 | struct efx_channel *channel; |
1110 | |
1111 | efx_soft_disable_interrupts(efx); |
1112 | |
1113 | efx_for_each_channel(channel, efx) { |
1114 | if (channel->type->keep_eventq) |
1115 | efx_fini_eventq(channel); |
1116 | } |
1117 | |
1118 | efx->type->irq_disable_non_ev(efx); |
1119 | } |
1120 | |
1121 | void efx_siena_start_channels(struct efx_nic *efx) |
1122 | { |
1123 | struct efx_tx_queue *tx_queue; |
1124 | struct efx_rx_queue *rx_queue; |
1125 | struct efx_channel *channel; |
1126 | |
1127 | efx_for_each_channel_rev(channel, efx) { |
1128 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
1129 | efx_siena_init_tx_queue(tx_queue); |
1130 | atomic_inc(v: &efx->active_queues); |
1131 | } |
1132 | |
1133 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
1134 | efx_siena_init_rx_queue(rx_queue); |
1135 | atomic_inc(v: &efx->active_queues); |
1136 | efx_siena_stop_eventq(channel); |
1137 | efx_siena_fast_push_rx_descriptors(rx_queue, atomic: false); |
1138 | efx_siena_start_eventq(channel); |
1139 | } |
1140 | |
1141 | WARN_ON(channel->rx_pkt_n_frags); |
1142 | } |
1143 | } |
1144 | |
1145 | void efx_siena_stop_channels(struct efx_nic *efx) |
1146 | { |
1147 | struct efx_tx_queue *tx_queue; |
1148 | struct efx_rx_queue *rx_queue; |
1149 | struct efx_channel *channel; |
1150 | int rc = 0; |
1151 | |
1152 | /* Stop RX refill */ |
1153 | efx_for_each_channel(channel, efx) { |
1154 | efx_for_each_channel_rx_queue(rx_queue, channel) |
1155 | rx_queue->refill_enabled = false; |
1156 | } |
1157 | |
1158 | efx_for_each_channel(channel, efx) { |
1159 | /* RX packet processing is pipelined, so wait for the |
1160 | * NAPI handler to complete. At least event queue 0 |
1161 | * might be kept active by non-data events, so don't |
1162 | * use napi_synchronize() but actually disable NAPI |
1163 | * temporarily. |
1164 | */ |
1165 | if (efx_channel_has_rx_queue(channel)) { |
1166 | efx_siena_stop_eventq(channel); |
1167 | efx_siena_start_eventq(channel); |
1168 | } |
1169 | } |
1170 | |
1171 | if (efx->type->fini_dmaq) |
1172 | rc = efx->type->fini_dmaq(efx); |
1173 | |
1174 | if (rc) { |
1175 | netif_err(efx, drv, efx->net_dev, "failed to flush queues\n" ); |
1176 | } else { |
1177 | netif_dbg(efx, drv, efx->net_dev, |
1178 | "successfully flushed all queues\n" ); |
1179 | } |
1180 | |
1181 | efx_for_each_channel(channel, efx) { |
1182 | efx_for_each_channel_rx_queue(rx_queue, channel) |
1183 | efx_siena_fini_rx_queue(rx_queue); |
1184 | efx_for_each_channel_tx_queue(tx_queue, channel) |
1185 | efx_siena_fini_tx_queue(tx_queue); |
1186 | } |
1187 | } |
1188 | |
1189 | /************************************************************************** |
1190 | * |
1191 | * NAPI interface |
1192 | * |
1193 | *************************************************************************/ |
1194 | |
1195 | /* Process channel's event queue |
1196 | * |
1197 | * This function is responsible for processing the event queue of a |
1198 | * single channel. The caller must guarantee that this function will |
1199 | * never be concurrently called more than once on the same channel, |
1200 | * though different channels may be being processed concurrently. |
1201 | */ |
1202 | static int efx_process_channel(struct efx_channel *channel, int budget) |
1203 | { |
1204 | struct efx_tx_queue *tx_queue; |
1205 | struct list_head rx_list; |
1206 | int spent; |
1207 | |
1208 | if (unlikely(!channel->enabled)) |
1209 | return 0; |
1210 | |
1211 | /* Prepare the batch receive list */ |
1212 | EFX_WARN_ON_PARANOID(channel->rx_list != NULL); |
1213 | INIT_LIST_HEAD(list: &rx_list); |
1214 | channel->rx_list = &rx_list; |
1215 | |
1216 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
1217 | tx_queue->pkts_compl = 0; |
1218 | tx_queue->bytes_compl = 0; |
1219 | } |
1220 | |
1221 | spent = efx_nic_process_eventq(channel, quota: budget); |
1222 | if (spent && efx_channel_has_rx_queue(channel)) { |
1223 | struct efx_rx_queue *rx_queue = |
1224 | efx_channel_get_rx_queue(channel); |
1225 | |
1226 | efx_rx_flush_packet(channel); |
1227 | efx_siena_fast_push_rx_descriptors(rx_queue, atomic: true); |
1228 | } |
1229 | |
1230 | /* Update BQL */ |
1231 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
1232 | if (tx_queue->bytes_compl) { |
1233 | netdev_tx_completed_queue(dev_queue: tx_queue->core_txq, |
1234 | pkts: tx_queue->pkts_compl, |
1235 | bytes: tx_queue->bytes_compl); |
1236 | } |
1237 | } |
1238 | |
1239 | /* Receive any packets we queued up */ |
1240 | netif_receive_skb_list(head: channel->rx_list); |
1241 | channel->rx_list = NULL; |
1242 | |
1243 | return spent; |
1244 | } |
1245 | |
1246 | static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel) |
1247 | { |
1248 | int step = efx->irq_mod_step_us; |
1249 | |
1250 | if (channel->irq_mod_score < irq_adapt_low_thresh) { |
1251 | if (channel->irq_moderation_us > step) { |
1252 | channel->irq_moderation_us -= step; |
1253 | efx->type->push_irq_moderation(channel); |
1254 | } |
1255 | } else if (channel->irq_mod_score > irq_adapt_high_thresh) { |
1256 | if (channel->irq_moderation_us < |
1257 | efx->irq_rx_moderation_us) { |
1258 | channel->irq_moderation_us += step; |
1259 | efx->type->push_irq_moderation(channel); |
1260 | } |
1261 | } |
1262 | |
1263 | channel->irq_count = 0; |
1264 | channel->irq_mod_score = 0; |
1265 | } |
1266 | |
1267 | /* NAPI poll handler |
1268 | * |
1269 | * NAPI guarantees serialisation of polls of the same device, which |
1270 | * provides the guarantee required by efx_process_channel(). |
1271 | */ |
1272 | static int efx_poll(struct napi_struct *napi, int budget) |
1273 | { |
1274 | struct efx_channel *channel = |
1275 | container_of(napi, struct efx_channel, napi_str); |
1276 | struct efx_nic *efx = channel->efx; |
1277 | #ifdef CONFIG_RFS_ACCEL |
1278 | unsigned int time; |
1279 | #endif |
1280 | int spent; |
1281 | |
1282 | netif_vdbg(efx, intr, efx->net_dev, |
1283 | "channel %d NAPI poll executing on CPU %d\n" , |
1284 | channel->channel, raw_smp_processor_id()); |
1285 | |
1286 | spent = efx_process_channel(channel, budget); |
1287 | |
1288 | xdp_do_flush(); |
1289 | |
1290 | if (spent < budget) { |
1291 | if (efx_channel_has_rx_queue(channel) && |
1292 | efx->irq_rx_adaptive && |
1293 | unlikely(++channel->irq_count == 1000)) { |
1294 | efx_update_irq_mod(efx, channel); |
1295 | } |
1296 | |
1297 | #ifdef CONFIG_RFS_ACCEL |
1298 | /* Perhaps expire some ARFS filters */ |
1299 | time = jiffies - channel->rfs_last_expiry; |
1300 | /* Would our quota be >= 20? */ |
1301 | if (channel->rfs_filter_count * time >= 600 * HZ) |
1302 | mod_delayed_work(wq: system_wq, dwork: &channel->filter_work, delay: 0); |
1303 | #endif |
1304 | |
1305 | /* There is no race here; although napi_disable() will |
1306 | * only wait for napi_complete(), this isn't a problem |
1307 | * since efx_nic_eventq_read_ack() will have no effect if |
1308 | * interrupts have already been disabled. |
1309 | */ |
1310 | if (napi_complete_done(n: napi, work_done: spent)) |
1311 | efx_nic_eventq_read_ack(channel); |
1312 | } |
1313 | |
1314 | return spent; |
1315 | } |
1316 | |
1317 | static void efx_init_napi_channel(struct efx_channel *channel) |
1318 | { |
1319 | struct efx_nic *efx = channel->efx; |
1320 | |
1321 | channel->napi_dev = efx->net_dev; |
1322 | netif_napi_add(dev: channel->napi_dev, napi: &channel->napi_str, poll: efx_poll); |
1323 | } |
1324 | |
1325 | void efx_siena_init_napi(struct efx_nic *efx) |
1326 | { |
1327 | struct efx_channel *channel; |
1328 | |
1329 | efx_for_each_channel(channel, efx) |
1330 | efx_init_napi_channel(channel); |
1331 | } |
1332 | |
1333 | static void efx_fini_napi_channel(struct efx_channel *channel) |
1334 | { |
1335 | if (channel->napi_dev) |
1336 | netif_napi_del(napi: &channel->napi_str); |
1337 | |
1338 | channel->napi_dev = NULL; |
1339 | } |
1340 | |
1341 | void efx_siena_fini_napi(struct efx_nic *efx) |
1342 | { |
1343 | struct efx_channel *channel; |
1344 | |
1345 | efx_for_each_channel(channel, efx) |
1346 | efx_fini_napi_channel(channel); |
1347 | } |
1348 | |
1349 | /*************** |
1350 | * Housekeeping |
1351 | ***************/ |
1352 | |
1353 | static int efx_channel_dummy_op_int(struct efx_channel *channel) |
1354 | { |
1355 | return 0; |
1356 | } |
1357 | |
1358 | void efx_siena_channel_dummy_op_void(struct efx_channel *channel) |
1359 | { |
1360 | } |
1361 | |
1362 | static const struct efx_channel_type efx_default_channel_type = { |
1363 | .pre_probe = efx_channel_dummy_op_int, |
1364 | .post_remove = efx_siena_channel_dummy_op_void, |
1365 | .get_name = efx_get_channel_name, |
1366 | .copy = efx_copy_channel, |
1367 | .want_txqs = efx_default_channel_want_txqs, |
1368 | .keep_eventq = false, |
1369 | .want_pio = true, |
1370 | }; |
1371 | |