1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2021 Intel Corporation. */
3
4#include <generated/utsrelease.h>
5#include <linux/crash_dump.h>
6#include <linux/if_bridge.h>
7#include <linux/if_macvlan.h>
8#include <linux/module.h>
9#include <net/pkt_cls.h>
10#include <net/xdp_sock_drv.h>
11
12/* Local includes */
13#include "i40e.h"
14#include "i40e_devids.h"
15#include "i40e_diag.h"
16#include "i40e_lan_hmc.h"
17#include "i40e_virtchnl_pf.h"
18#include "i40e_xsk.h"
19
20/* All i40e tracepoints are defined by the include below, which
21 * must be included exactly once across the whole kernel with
22 * CREATE_TRACE_POINTS defined
23 */
24#define CREATE_TRACE_POINTS
25#include "i40e_trace.h"
26
27const char i40e_driver_name[] = "i40e";
28static const char i40e_driver_string[] =
29 "Intel(R) Ethernet Connection XL710 Network Driver";
30
31static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
32
33/* a bit of forward declarations */
34static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
35static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
36static int i40e_add_vsi(struct i40e_vsi *vsi);
37static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
38static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired);
39static int i40e_setup_misc_vector(struct i40e_pf *pf);
40static void i40e_determine_queue_usage(struct i40e_pf *pf);
41static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
42static void i40e_prep_for_reset(struct i40e_pf *pf);
43static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
44 bool lock_acquired);
45static int i40e_reset(struct i40e_pf *pf);
46static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
47static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
48static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
49static bool i40e_check_recovery_mode(struct i40e_pf *pf);
50static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
51static void i40e_fdir_sb_setup(struct i40e_pf *pf);
52static int i40e_veb_get_bw_info(struct i40e_veb *veb);
53static int i40e_get_capabilities(struct i40e_pf *pf,
54 enum i40e_admin_queue_opc list_type);
55static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
56
57/* i40e_pci_tbl - PCI Device ID Table
58 *
59 * Last entry must be all 0s
60 *
61 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
62 * Class, Class Mask, private data (not used) }
63 */
64static const struct pci_device_id i40e_pci_tbl[] = {
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
82 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
83 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
84 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
85 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
86 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
87 {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
88 {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
89 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
90 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
91 /* required last entry */
92 {0, }
93};
94MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
95
96#define I40E_MAX_VF_COUNT 128
97static int debug = -1;
98module_param(debug, uint, 0);
99MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
100
101MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
102MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
103MODULE_LICENSE("GPL v2");
104
105static struct workqueue_struct *i40e_wq;
106
107static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
108 struct net_device *netdev, int delta)
109{
110 struct netdev_hw_addr *ha;
111
112 if (!f || !netdev)
113 return;
114
115 netdev_for_each_mc_addr(ha, netdev) {
116 if (ether_addr_equal(addr1: ha->addr, addr2: f->macaddr)) {
117 ha->refcount += delta;
118 if (ha->refcount <= 0)
119 ha->refcount = 1;
120 break;
121 }
122 }
123}
124
125/**
126 * i40e_hw_to_dev - get device pointer from the hardware structure
127 * @hw: pointer to the device HW structure
128 **/
129struct device *i40e_hw_to_dev(struct i40e_hw *hw)
130{
131 struct i40e_pf *pf = i40e_hw_to_pf(hw);
132
133 return &pf->pdev->dev;
134}
135
136/**
137 * i40e_allocate_dma_mem - OS specific memory alloc for shared code
138 * @hw: pointer to the HW structure
139 * @mem: ptr to mem struct to fill out
140 * @size: size of memory requested
141 * @alignment: what to align the allocation to
142 **/
143int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
144 u64 size, u32 alignment)
145{
146 struct i40e_pf *pf = i40e_hw_to_pf(hw);
147
148 mem->size = ALIGN(size, alignment);
149 mem->va = dma_alloc_coherent(dev: &pf->pdev->dev, size: mem->size, dma_handle: &mem->pa,
150 GFP_KERNEL);
151 if (!mem->va)
152 return -ENOMEM;
153
154 return 0;
155}
156
157/**
158 * i40e_free_dma_mem - OS specific memory free for shared code
159 * @hw: pointer to the HW structure
160 * @mem: ptr to mem struct to free
161 **/
162int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
163{
164 struct i40e_pf *pf = i40e_hw_to_pf(hw);
165
166 dma_free_coherent(dev: &pf->pdev->dev, size: mem->size, cpu_addr: mem->va, dma_handle: mem->pa);
167 mem->va = NULL;
168 mem->pa = 0;
169 mem->size = 0;
170
171 return 0;
172}
173
174/**
175 * i40e_allocate_virt_mem - OS specific memory alloc for shared code
176 * @hw: pointer to the HW structure
177 * @mem: ptr to mem struct to fill out
178 * @size: size of memory requested
179 **/
180int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem,
181 u32 size)
182{
183 mem->size = size;
184 mem->va = kzalloc(size, GFP_KERNEL);
185
186 if (!mem->va)
187 return -ENOMEM;
188
189 return 0;
190}
191
192/**
193 * i40e_free_virt_mem - OS specific memory free for shared code
194 * @hw: pointer to the HW structure
195 * @mem: ptr to mem struct to free
196 **/
197int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
198{
199 /* it's ok to kfree a NULL pointer */
200 kfree(objp: mem->va);
201 mem->va = NULL;
202 mem->size = 0;
203
204 return 0;
205}
206
207/**
208 * i40e_get_lump - find a lump of free generic resource
209 * @pf: board private structure
210 * @pile: the pile of resource to search
211 * @needed: the number of items needed
212 * @id: an owner id to stick on the items assigned
213 *
214 * Returns the base item index of the lump, or negative for error
215 **/
216static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
217 u16 needed, u16 id)
218{
219 int ret = -ENOMEM;
220 int i, j;
221
222 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
223 dev_info(&pf->pdev->dev,
224 "param err: pile=%s needed=%d id=0x%04x\n",
225 pile ? "<valid>" : "<null>", needed, id);
226 return -EINVAL;
227 }
228
229 /* Allocate last queue in the pile for FDIR VSI queue
230 * so it doesn't fragment the qp_pile
231 */
232 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
233 if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
234 dev_err(&pf->pdev->dev,
235 "Cannot allocate queue %d for I40E_VSI_FDIR\n",
236 pile->num_entries - 1);
237 return -ENOMEM;
238 }
239 pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
240 return pile->num_entries - 1;
241 }
242
243 i = 0;
244 while (i < pile->num_entries) {
245 /* skip already allocated entries */
246 if (pile->list[i] & I40E_PILE_VALID_BIT) {
247 i++;
248 continue;
249 }
250
251 /* do we have enough in this lump? */
252 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
253 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
254 break;
255 }
256
257 if (j == needed) {
258 /* there was enough, so assign it to the requestor */
259 for (j = 0; j < needed; j++)
260 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
261 ret = i;
262 break;
263 }
264
265 /* not enough, so skip over it and continue looking */
266 i += j;
267 }
268
269 return ret;
270}
271
272/**
273 * i40e_put_lump - return a lump of generic resource
274 * @pile: the pile of resource to search
275 * @index: the base item index
276 * @id: the owner id of the items assigned
277 *
278 * Returns the count of items in the lump
279 **/
280static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
281{
282 int valid_id = (id | I40E_PILE_VALID_BIT);
283 int count = 0;
284 u16 i;
285
286 if (!pile || index >= pile->num_entries)
287 return -EINVAL;
288
289 for (i = index;
290 i < pile->num_entries && pile->list[i] == valid_id;
291 i++) {
292 pile->list[i] = 0;
293 count++;
294 }
295
296
297 return count;
298}
299
300/**
301 * i40e_find_vsi_from_id - searches for the vsi with the given id
302 * @pf: the pf structure to search for the vsi
303 * @id: id of the vsi it is searching for
304 **/
305struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
306{
307 int i;
308
309 for (i = 0; i < pf->num_alloc_vsi; i++)
310 if (pf->vsi[i] && (pf->vsi[i]->id == id))
311 return pf->vsi[i];
312
313 return NULL;
314}
315
316/**
317 * i40e_service_event_schedule - Schedule the service task to wake up
318 * @pf: board private structure
319 *
320 * If not already scheduled, this puts the task into the work queue
321 **/
322void i40e_service_event_schedule(struct i40e_pf *pf)
323{
324 if ((!test_bit(__I40E_DOWN, pf->state) &&
325 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
326 test_bit(__I40E_RECOVERY_MODE, pf->state))
327 queue_work(wq: i40e_wq, work: &pf->service_task);
328}
329
330/**
331 * i40e_tx_timeout - Respond to a Tx Hang
332 * @netdev: network interface device structure
333 * @txqueue: queue number timing out
334 *
335 * If any port has noticed a Tx timeout, it is likely that the whole
336 * device is munged, not just the one netdev port, so go for the full
337 * reset.
338 **/
339static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
340{
341 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
342 struct i40e_vsi *vsi = np->vsi;
343 struct i40e_pf *pf = vsi->back;
344 struct i40e_ring *tx_ring = NULL;
345 unsigned int i;
346 u32 head, val;
347
348 pf->tx_timeout_count++;
349
350 /* with txqueue index, find the tx_ring struct */
351 for (i = 0; i < vsi->num_queue_pairs; i++) {
352 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
353 if (txqueue ==
354 vsi->tx_rings[i]->queue_index) {
355 tx_ring = vsi->tx_rings[i];
356 break;
357 }
358 }
359 }
360
361 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
362 pf->tx_timeout_recovery_level = 1; /* reset after some time */
363 else if (time_before(jiffies,
364 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
365 return; /* don't do any new action before the next timeout */
366
367 /* don't kick off another recovery if one is already pending */
368 if (test_and_set_bit(nr: __I40E_TIMEOUT_RECOVERY_PENDING, addr: pf->state))
369 return;
370
371 if (tx_ring) {
372 head = i40e_get_head(tx_ring);
373 /* Read interrupt register */
374 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
375 val = rd32(&pf->hw,
376 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
377 tx_ring->vsi->base_vector - 1));
378 else
379 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
380
381 netdev_info(dev: netdev, format: "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
382 vsi->seid, txqueue, tx_ring->next_to_clean,
383 head, tx_ring->next_to_use,
384 readl(addr: tx_ring->tail), val);
385 }
386
387 pf->tx_timeout_last_recovery = jiffies;
388 netdev_info(dev: netdev, format: "tx_timeout recovery level %d, txqueue %d\n",
389 pf->tx_timeout_recovery_level, txqueue);
390
391 switch (pf->tx_timeout_recovery_level) {
392 case 1:
393 set_bit(nr: __I40E_PF_RESET_REQUESTED, addr: pf->state);
394 break;
395 case 2:
396 set_bit(nr: __I40E_CORE_RESET_REQUESTED, addr: pf->state);
397 break;
398 case 3:
399 set_bit(nr: __I40E_GLOBAL_RESET_REQUESTED, addr: pf->state);
400 break;
401 default:
402 netdev_err(dev: netdev, format: "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
403 set_bit(nr: __I40E_DOWN_REQUESTED, addr: pf->state);
404 set_bit(nr: __I40E_VSI_DOWN_REQUESTED, addr: vsi->state);
405 break;
406 }
407
408 i40e_service_event_schedule(pf);
409 pf->tx_timeout_recovery_level++;
410}
411
412/**
413 * i40e_get_vsi_stats_struct - Get System Network Statistics
414 * @vsi: the VSI we care about
415 *
416 * Returns the address of the device statistics structure.
417 * The statistics are actually updated from the service task.
418 **/
419struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
420{
421 return &vsi->net_stats;
422}
423
424/**
425 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
426 * @ring: Tx ring to get statistics from
427 * @stats: statistics entry to be updated
428 **/
429static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
430 struct rtnl_link_stats64 *stats)
431{
432 u64 bytes, packets;
433 unsigned int start;
434
435 do {
436 start = u64_stats_fetch_begin(syncp: &ring->syncp);
437 packets = ring->stats.packets;
438 bytes = ring->stats.bytes;
439 } while (u64_stats_fetch_retry(syncp: &ring->syncp, start));
440
441 stats->tx_packets += packets;
442 stats->tx_bytes += bytes;
443}
444
445/**
446 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
447 * @netdev: network interface device structure
448 * @stats: data structure to store statistics
449 *
450 * Returns the address of the device statistics structure.
451 * The statistics are actually updated from the service task.
452 **/
453static void i40e_get_netdev_stats_struct(struct net_device *netdev,
454 struct rtnl_link_stats64 *stats)
455{
456 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
457 struct i40e_vsi *vsi = np->vsi;
458 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
459 struct i40e_ring *ring;
460 int i;
461
462 if (test_bit(__I40E_VSI_DOWN, vsi->state))
463 return;
464
465 if (!vsi->tx_rings)
466 return;
467
468 rcu_read_lock();
469 for (i = 0; i < vsi->num_queue_pairs; i++) {
470 u64 bytes, packets;
471 unsigned int start;
472
473 ring = READ_ONCE(vsi->tx_rings[i]);
474 if (!ring)
475 continue;
476 i40e_get_netdev_stats_struct_tx(ring, stats);
477
478 if (i40e_enabled_xdp_vsi(vsi)) {
479 ring = READ_ONCE(vsi->xdp_rings[i]);
480 if (!ring)
481 continue;
482 i40e_get_netdev_stats_struct_tx(ring, stats);
483 }
484
485 ring = READ_ONCE(vsi->rx_rings[i]);
486 if (!ring)
487 continue;
488 do {
489 start = u64_stats_fetch_begin(syncp: &ring->syncp);
490 packets = ring->stats.packets;
491 bytes = ring->stats.bytes;
492 } while (u64_stats_fetch_retry(syncp: &ring->syncp, start));
493
494 stats->rx_packets += packets;
495 stats->rx_bytes += bytes;
496
497 }
498 rcu_read_unlock();
499
500 /* following stats updated by i40e_watchdog_subtask() */
501 stats->multicast = vsi_stats->multicast;
502 stats->tx_errors = vsi_stats->tx_errors;
503 stats->tx_dropped = vsi_stats->tx_dropped;
504 stats->rx_errors = vsi_stats->rx_errors;
505 stats->rx_dropped = vsi_stats->rx_dropped;
506 stats->rx_missed_errors = vsi_stats->rx_missed_errors;
507 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
508 stats->rx_length_errors = vsi_stats->rx_length_errors;
509}
510
511/**
512 * i40e_vsi_reset_stats - Resets all stats of the given vsi
513 * @vsi: the VSI to have its stats reset
514 **/
515void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
516{
517 struct rtnl_link_stats64 *ns;
518 int i;
519
520 if (!vsi)
521 return;
522
523 ns = i40e_get_vsi_stats_struct(vsi);
524 memset(ns, 0, sizeof(*ns));
525 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
526 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
527 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
528 if (vsi->rx_rings && vsi->rx_rings[0]) {
529 for (i = 0; i < vsi->num_queue_pairs; i++) {
530 memset(&vsi->rx_rings[i]->stats, 0,
531 sizeof(vsi->rx_rings[i]->stats));
532 memset(&vsi->rx_rings[i]->rx_stats, 0,
533 sizeof(vsi->rx_rings[i]->rx_stats));
534 memset(&vsi->tx_rings[i]->stats, 0,
535 sizeof(vsi->tx_rings[i]->stats));
536 memset(&vsi->tx_rings[i]->tx_stats, 0,
537 sizeof(vsi->tx_rings[i]->tx_stats));
538 }
539 }
540 vsi->stat_offsets_loaded = false;
541}
542
543/**
544 * i40e_pf_reset_stats - Reset all of the stats for the given PF
545 * @pf: the PF to be reset
546 **/
547void i40e_pf_reset_stats(struct i40e_pf *pf)
548{
549 int i;
550
551 memset(&pf->stats, 0, sizeof(pf->stats));
552 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
553 pf->stat_offsets_loaded = false;
554
555 for (i = 0; i < I40E_MAX_VEB; i++) {
556 if (pf->veb[i]) {
557 memset(&pf->veb[i]->stats, 0,
558 sizeof(pf->veb[i]->stats));
559 memset(&pf->veb[i]->stats_offsets, 0,
560 sizeof(pf->veb[i]->stats_offsets));
561 memset(&pf->veb[i]->tc_stats, 0,
562 sizeof(pf->veb[i]->tc_stats));
563 memset(&pf->veb[i]->tc_stats_offsets, 0,
564 sizeof(pf->veb[i]->tc_stats_offsets));
565 pf->veb[i]->stat_offsets_loaded = false;
566 }
567 }
568 pf->hw_csum_rx_error = 0;
569}
570
571/**
572 * i40e_compute_pci_to_hw_id - compute index form PCI function.
573 * @vsi: ptr to the VSI to read from.
574 * @hw: ptr to the hardware info.
575 **/
576static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
577{
578 int pf_count = i40e_get_pf_count(hw);
579
580 if (vsi->type == I40E_VSI_SRIOV)
581 return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
582
583 return hw->port + BIT(7);
584}
585
586/**
587 * i40e_stat_update64 - read and update a 64 bit stat from the chip.
588 * @hw: ptr to the hardware info.
589 * @hireg: the high 32 bit reg to read.
590 * @loreg: the low 32 bit reg to read.
591 * @offset_loaded: has the initial offset been loaded yet.
592 * @offset: ptr to current offset value.
593 * @stat: ptr to the stat.
594 *
595 * Since the device stats are not reset at PFReset, they will not
596 * be zeroed when the driver starts. We'll save the first values read
597 * and use them as offsets to be subtracted from the raw values in order
598 * to report stats that count from zero.
599 **/
600static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
601 bool offset_loaded, u64 *offset, u64 *stat)
602{
603 u64 new_data;
604
605 new_data = rd64(hw, loreg);
606
607 if (!offset_loaded || new_data < *offset)
608 *offset = new_data;
609 *stat = new_data - *offset;
610}
611
612/**
613 * i40e_stat_update48 - read and update a 48 bit stat from the chip
614 * @hw: ptr to the hardware info
615 * @hireg: the high 32 bit reg to read
616 * @loreg: the low 32 bit reg to read
617 * @offset_loaded: has the initial offset been loaded yet
618 * @offset: ptr to current offset value
619 * @stat: ptr to the stat
620 *
621 * Since the device stats are not reset at PFReset, they likely will not
622 * be zeroed when the driver starts. We'll save the first values read
623 * and use them as offsets to be subtracted from the raw values in order
624 * to report stats that count from zero. In the process, we also manage
625 * the potential roll-over.
626 **/
627static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
628 bool offset_loaded, u64 *offset, u64 *stat)
629{
630 u64 new_data;
631
632 if (hw->device_id == I40E_DEV_ID_QEMU) {
633 new_data = rd32(hw, loreg);
634 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
635 } else {
636 new_data = rd64(hw, loreg);
637 }
638 if (!offset_loaded)
639 *offset = new_data;
640 if (likely(new_data >= *offset))
641 *stat = new_data - *offset;
642 else
643 *stat = (new_data + BIT_ULL(48)) - *offset;
644 *stat &= 0xFFFFFFFFFFFFULL;
645}
646
647/**
648 * i40e_stat_update32 - read and update a 32 bit stat from the chip
649 * @hw: ptr to the hardware info
650 * @reg: the hw reg to read
651 * @offset_loaded: has the initial offset been loaded yet
652 * @offset: ptr to current offset value
653 * @stat: ptr to the stat
654 **/
655static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
656 bool offset_loaded, u64 *offset, u64 *stat)
657{
658 u32 new_data;
659
660 new_data = rd32(hw, reg);
661 if (!offset_loaded)
662 *offset = new_data;
663 if (likely(new_data >= *offset))
664 *stat = (u32)(new_data - *offset);
665 else
666 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
667}
668
669/**
670 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
671 * @hw: ptr to the hardware info
672 * @reg: the hw reg to read and clear
673 * @stat: ptr to the stat
674 **/
675static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
676{
677 u32 new_data = rd32(hw, reg);
678
679 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
680 *stat += new_data;
681}
682
683/**
684 * i40e_stats_update_rx_discards - update rx_discards.
685 * @vsi: ptr to the VSI to be updated.
686 * @hw: ptr to the hardware info.
687 * @stat_idx: VSI's stat_counter_idx.
688 * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
689 * @stat_offset: ptr to stat_offset to store first read of specific register.
690 * @stat: ptr to VSI's stat to be updated.
691 **/
692static void
693i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
694 int stat_idx, bool offset_loaded,
695 struct i40e_eth_stats *stat_offset,
696 struct i40e_eth_stats *stat)
697{
698 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
699 offset: &stat_offset->rx_discards, stat: &stat->rx_discards);
700 i40e_stat_update64(hw,
701 I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
702 I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
703 offset_loaded, offset: &stat_offset->rx_discards_other,
704 stat: &stat->rx_discards_other);
705}
706
707/**
708 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
709 * @vsi: the VSI to be updated
710 **/
711void i40e_update_eth_stats(struct i40e_vsi *vsi)
712{
713 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
714 struct i40e_pf *pf = vsi->back;
715 struct i40e_hw *hw = &pf->hw;
716 struct i40e_eth_stats *oes;
717 struct i40e_eth_stats *es; /* device's eth stats */
718
719 es = &vsi->eth_stats;
720 oes = &vsi->eth_stats_offsets;
721
722 /* Gather up the stats that the hw collects */
723 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
724 offset_loaded: vsi->stat_offsets_loaded,
725 offset: &oes->tx_errors, stat: &es->tx_errors);
726 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
727 offset_loaded: vsi->stat_offsets_loaded,
728 offset: &oes->rx_unknown_protocol, stat: &es->rx_unknown_protocol);
729
730 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
731 I40E_GLV_GORCL(stat_idx),
732 offset_loaded: vsi->stat_offsets_loaded,
733 offset: &oes->rx_bytes, stat: &es->rx_bytes);
734 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
735 I40E_GLV_UPRCL(stat_idx),
736 offset_loaded: vsi->stat_offsets_loaded,
737 offset: &oes->rx_unicast, stat: &es->rx_unicast);
738 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
739 I40E_GLV_MPRCL(stat_idx),
740 offset_loaded: vsi->stat_offsets_loaded,
741 offset: &oes->rx_multicast, stat: &es->rx_multicast);
742 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
743 I40E_GLV_BPRCL(stat_idx),
744 offset_loaded: vsi->stat_offsets_loaded,
745 offset: &oes->rx_broadcast, stat: &es->rx_broadcast);
746
747 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
748 I40E_GLV_GOTCL(stat_idx),
749 offset_loaded: vsi->stat_offsets_loaded,
750 offset: &oes->tx_bytes, stat: &es->tx_bytes);
751 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
752 I40E_GLV_UPTCL(stat_idx),
753 offset_loaded: vsi->stat_offsets_loaded,
754 offset: &oes->tx_unicast, stat: &es->tx_unicast);
755 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
756 I40E_GLV_MPTCL(stat_idx),
757 offset_loaded: vsi->stat_offsets_loaded,
758 offset: &oes->tx_multicast, stat: &es->tx_multicast);
759 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
760 I40E_GLV_BPTCL(stat_idx),
761 offset_loaded: vsi->stat_offsets_loaded,
762 offset: &oes->tx_broadcast, stat: &es->tx_broadcast);
763
764 i40e_stats_update_rx_discards(vsi, hw, stat_idx,
765 offset_loaded: vsi->stat_offsets_loaded, stat_offset: oes, stat: es);
766
767 vsi->stat_offsets_loaded = true;
768}
769
770/**
771 * i40e_update_veb_stats - Update Switch component statistics
772 * @veb: the VEB being updated
773 **/
774void i40e_update_veb_stats(struct i40e_veb *veb)
775{
776 struct i40e_pf *pf = veb->pf;
777 struct i40e_hw *hw = &pf->hw;
778 struct i40e_eth_stats *oes;
779 struct i40e_eth_stats *es; /* device's eth stats */
780 struct i40e_veb_tc_stats *veb_oes;
781 struct i40e_veb_tc_stats *veb_es;
782 int i, idx = 0;
783
784 idx = veb->stats_idx;
785 es = &veb->stats;
786 oes = &veb->stats_offsets;
787 veb_es = &veb->tc_stats;
788 veb_oes = &veb->tc_stats_offsets;
789
790 /* Gather up the stats that the hw collects */
791 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
792 offset_loaded: veb->stat_offsets_loaded,
793 offset: &oes->tx_discards, stat: &es->tx_discards);
794 if (hw->revision_id > 0)
795 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
796 offset_loaded: veb->stat_offsets_loaded,
797 offset: &oes->rx_unknown_protocol,
798 stat: &es->rx_unknown_protocol);
799 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
800 offset_loaded: veb->stat_offsets_loaded,
801 offset: &oes->rx_bytes, stat: &es->rx_bytes);
802 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
803 offset_loaded: veb->stat_offsets_loaded,
804 offset: &oes->rx_unicast, stat: &es->rx_unicast);
805 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
806 offset_loaded: veb->stat_offsets_loaded,
807 offset: &oes->rx_multicast, stat: &es->rx_multicast);
808 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
809 offset_loaded: veb->stat_offsets_loaded,
810 offset: &oes->rx_broadcast, stat: &es->rx_broadcast);
811
812 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
813 offset_loaded: veb->stat_offsets_loaded,
814 offset: &oes->tx_bytes, stat: &es->tx_bytes);
815 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
816 offset_loaded: veb->stat_offsets_loaded,
817 offset: &oes->tx_unicast, stat: &es->tx_unicast);
818 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
819 offset_loaded: veb->stat_offsets_loaded,
820 offset: &oes->tx_multicast, stat: &es->tx_multicast);
821 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
822 offset_loaded: veb->stat_offsets_loaded,
823 offset: &oes->tx_broadcast, stat: &es->tx_broadcast);
824 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
825 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
826 I40E_GLVEBTC_RPCL(i, idx),
827 offset_loaded: veb->stat_offsets_loaded,
828 offset: &veb_oes->tc_rx_packets[i],
829 stat: &veb_es->tc_rx_packets[i]);
830 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
831 I40E_GLVEBTC_RBCL(i, idx),
832 offset_loaded: veb->stat_offsets_loaded,
833 offset: &veb_oes->tc_rx_bytes[i],
834 stat: &veb_es->tc_rx_bytes[i]);
835 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
836 I40E_GLVEBTC_TPCL(i, idx),
837 offset_loaded: veb->stat_offsets_loaded,
838 offset: &veb_oes->tc_tx_packets[i],
839 stat: &veb_es->tc_tx_packets[i]);
840 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
841 I40E_GLVEBTC_TBCL(i, idx),
842 offset_loaded: veb->stat_offsets_loaded,
843 offset: &veb_oes->tc_tx_bytes[i],
844 stat: &veb_es->tc_tx_bytes[i]);
845 }
846 veb->stat_offsets_loaded = true;
847}
848
849/**
850 * i40e_update_vsi_stats - Update the vsi statistics counters.
851 * @vsi: the VSI to be updated
852 *
853 * There are a few instances where we store the same stat in a
854 * couple of different structs. This is partly because we have
855 * the netdev stats that need to be filled out, which is slightly
856 * different from the "eth_stats" defined by the chip and used in
857 * VF communications. We sort it out here.
858 **/
859static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
860{
861 u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
862 struct i40e_pf *pf = vsi->back;
863 struct rtnl_link_stats64 *ons;
864 struct rtnl_link_stats64 *ns; /* netdev stats */
865 struct i40e_eth_stats *oes;
866 struct i40e_eth_stats *es; /* device's eth stats */
867 u64 tx_restart, tx_busy;
868 struct i40e_ring *p;
869 u64 bytes, packets;
870 unsigned int start;
871 u64 tx_linearize;
872 u64 tx_force_wb;
873 u64 tx_stopped;
874 u64 rx_p, rx_b;
875 u64 tx_p, tx_b;
876 u16 q;
877
878 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
879 test_bit(__I40E_CONFIG_BUSY, pf->state))
880 return;
881
882 ns = i40e_get_vsi_stats_struct(vsi);
883 ons = &vsi->net_stats_offsets;
884 es = &vsi->eth_stats;
885 oes = &vsi->eth_stats_offsets;
886
887 /* Gather up the netdev and vsi stats that the driver collects
888 * on the fly during packet processing
889 */
890 rx_b = rx_p = 0;
891 tx_b = tx_p = 0;
892 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
893 tx_stopped = 0;
894 rx_page = 0;
895 rx_buf = 0;
896 rx_reuse = 0;
897 rx_alloc = 0;
898 rx_waive = 0;
899 rx_busy = 0;
900 rcu_read_lock();
901 for (q = 0; q < vsi->num_queue_pairs; q++) {
902 /* locate Tx ring */
903 p = READ_ONCE(vsi->tx_rings[q]);
904 if (!p)
905 continue;
906
907 do {
908 start = u64_stats_fetch_begin(syncp: &p->syncp);
909 packets = p->stats.packets;
910 bytes = p->stats.bytes;
911 } while (u64_stats_fetch_retry(syncp: &p->syncp, start));
912 tx_b += bytes;
913 tx_p += packets;
914 tx_restart += p->tx_stats.restart_queue;
915 tx_busy += p->tx_stats.tx_busy;
916 tx_linearize += p->tx_stats.tx_linearize;
917 tx_force_wb += p->tx_stats.tx_force_wb;
918 tx_stopped += p->tx_stats.tx_stopped;
919
920 /* locate Rx ring */
921 p = READ_ONCE(vsi->rx_rings[q]);
922 if (!p)
923 continue;
924
925 do {
926 start = u64_stats_fetch_begin(syncp: &p->syncp);
927 packets = p->stats.packets;
928 bytes = p->stats.bytes;
929 } while (u64_stats_fetch_retry(syncp: &p->syncp, start));
930 rx_b += bytes;
931 rx_p += packets;
932 rx_buf += p->rx_stats.alloc_buff_failed;
933 rx_page += p->rx_stats.alloc_page_failed;
934 rx_reuse += p->rx_stats.page_reuse_count;
935 rx_alloc += p->rx_stats.page_alloc_count;
936 rx_waive += p->rx_stats.page_waive_count;
937 rx_busy += p->rx_stats.page_busy_count;
938
939 if (i40e_enabled_xdp_vsi(vsi)) {
940 /* locate XDP ring */
941 p = READ_ONCE(vsi->xdp_rings[q]);
942 if (!p)
943 continue;
944
945 do {
946 start = u64_stats_fetch_begin(syncp: &p->syncp);
947 packets = p->stats.packets;
948 bytes = p->stats.bytes;
949 } while (u64_stats_fetch_retry(syncp: &p->syncp, start));
950 tx_b += bytes;
951 tx_p += packets;
952 tx_restart += p->tx_stats.restart_queue;
953 tx_busy += p->tx_stats.tx_busy;
954 tx_linearize += p->tx_stats.tx_linearize;
955 tx_force_wb += p->tx_stats.tx_force_wb;
956 }
957 }
958 rcu_read_unlock();
959 vsi->tx_restart = tx_restart;
960 vsi->tx_busy = tx_busy;
961 vsi->tx_linearize = tx_linearize;
962 vsi->tx_force_wb = tx_force_wb;
963 vsi->tx_stopped = tx_stopped;
964 vsi->rx_page_failed = rx_page;
965 vsi->rx_buf_failed = rx_buf;
966 vsi->rx_page_reuse = rx_reuse;
967 vsi->rx_page_alloc = rx_alloc;
968 vsi->rx_page_waive = rx_waive;
969 vsi->rx_page_busy = rx_busy;
970
971 ns->rx_packets = rx_p;
972 ns->rx_bytes = rx_b;
973 ns->tx_packets = tx_p;
974 ns->tx_bytes = tx_b;
975
976 /* update netdev stats from eth stats */
977 i40e_update_eth_stats(vsi);
978 ons->tx_errors = oes->tx_errors;
979 ns->tx_errors = es->tx_errors;
980 ons->multicast = oes->rx_multicast;
981 ns->multicast = es->rx_multicast;
982 ons->rx_dropped = oes->rx_discards_other;
983 ns->rx_dropped = es->rx_discards_other;
984 ons->rx_missed_errors = oes->rx_discards;
985 ns->rx_missed_errors = es->rx_discards;
986 ons->tx_dropped = oes->tx_discards;
987 ns->tx_dropped = es->tx_discards;
988
989 /* pull in a couple PF stats if this is the main vsi */
990 if (vsi == pf->vsi[pf->lan_vsi]) {
991 ns->rx_crc_errors = pf->stats.crc_errors;
992 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
993 ns->rx_length_errors = pf->stats.rx_length_errors;
994 }
995}
996
997/**
998 * i40e_update_pf_stats - Update the PF statistics counters.
999 * @pf: the PF to be updated
1000 **/
1001static void i40e_update_pf_stats(struct i40e_pf *pf)
1002{
1003 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1004 struct i40e_hw_port_stats *nsd = &pf->stats;
1005 struct i40e_hw *hw = &pf->hw;
1006 u32 val;
1007 int i;
1008
1009 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1010 I40E_GLPRT_GORCL(hw->port),
1011 offset_loaded: pf->stat_offsets_loaded,
1012 offset: &osd->eth.rx_bytes, stat: &nsd->eth.rx_bytes);
1013 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1014 I40E_GLPRT_GOTCL(hw->port),
1015 offset_loaded: pf->stat_offsets_loaded,
1016 offset: &osd->eth.tx_bytes, stat: &nsd->eth.tx_bytes);
1017 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1018 offset_loaded: pf->stat_offsets_loaded,
1019 offset: &osd->eth.rx_discards,
1020 stat: &nsd->eth.rx_discards);
1021 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1022 I40E_GLPRT_UPRCL(hw->port),
1023 offset_loaded: pf->stat_offsets_loaded,
1024 offset: &osd->eth.rx_unicast,
1025 stat: &nsd->eth.rx_unicast);
1026 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1027 I40E_GLPRT_MPRCL(hw->port),
1028 offset_loaded: pf->stat_offsets_loaded,
1029 offset: &osd->eth.rx_multicast,
1030 stat: &nsd->eth.rx_multicast);
1031 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1032 I40E_GLPRT_BPRCL(hw->port),
1033 offset_loaded: pf->stat_offsets_loaded,
1034 offset: &osd->eth.rx_broadcast,
1035 stat: &nsd->eth.rx_broadcast);
1036 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1037 I40E_GLPRT_UPTCL(hw->port),
1038 offset_loaded: pf->stat_offsets_loaded,
1039 offset: &osd->eth.tx_unicast,
1040 stat: &nsd->eth.tx_unicast);
1041 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1042 I40E_GLPRT_MPTCL(hw->port),
1043 offset_loaded: pf->stat_offsets_loaded,
1044 offset: &osd->eth.tx_multicast,
1045 stat: &nsd->eth.tx_multicast);
1046 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1047 I40E_GLPRT_BPTCL(hw->port),
1048 offset_loaded: pf->stat_offsets_loaded,
1049 offset: &osd->eth.tx_broadcast,
1050 stat: &nsd->eth.tx_broadcast);
1051
1052 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1053 offset_loaded: pf->stat_offsets_loaded,
1054 offset: &osd->tx_dropped_link_down,
1055 stat: &nsd->tx_dropped_link_down);
1056
1057 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1058 offset_loaded: pf->stat_offsets_loaded,
1059 offset: &osd->crc_errors, stat: &nsd->crc_errors);
1060
1061 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1062 offset_loaded: pf->stat_offsets_loaded,
1063 offset: &osd->illegal_bytes, stat: &nsd->illegal_bytes);
1064
1065 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1066 offset_loaded: pf->stat_offsets_loaded,
1067 offset: &osd->mac_local_faults,
1068 stat: &nsd->mac_local_faults);
1069 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1070 offset_loaded: pf->stat_offsets_loaded,
1071 offset: &osd->mac_remote_faults,
1072 stat: &nsd->mac_remote_faults);
1073
1074 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1075 offset_loaded: pf->stat_offsets_loaded,
1076 offset: &osd->rx_length_errors,
1077 stat: &nsd->rx_length_errors);
1078
1079 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1080 offset_loaded: pf->stat_offsets_loaded,
1081 offset: &osd->link_xon_rx, stat: &nsd->link_xon_rx);
1082 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1083 offset_loaded: pf->stat_offsets_loaded,
1084 offset: &osd->link_xon_tx, stat: &nsd->link_xon_tx);
1085 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
1086 offset_loaded: pf->stat_offsets_loaded,
1087 offset: &osd->link_xoff_rx, stat: &nsd->link_xoff_rx);
1088 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1089 offset_loaded: pf->stat_offsets_loaded,
1090 offset: &osd->link_xoff_tx, stat: &nsd->link_xoff_tx);
1091
1092 for (i = 0; i < 8; i++) {
1093 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
1094 offset_loaded: pf->stat_offsets_loaded,
1095 offset: &osd->priority_xoff_rx[i],
1096 stat: &nsd->priority_xoff_rx[i]);
1097 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1098 offset_loaded: pf->stat_offsets_loaded,
1099 offset: &osd->priority_xon_rx[i],
1100 stat: &nsd->priority_xon_rx[i]);
1101 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1102 offset_loaded: pf->stat_offsets_loaded,
1103 offset: &osd->priority_xon_tx[i],
1104 stat: &nsd->priority_xon_tx[i]);
1105 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1106 offset_loaded: pf->stat_offsets_loaded,
1107 offset: &osd->priority_xoff_tx[i],
1108 stat: &nsd->priority_xoff_tx[i]);
1109 i40e_stat_update32(hw,
1110 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1111 offset_loaded: pf->stat_offsets_loaded,
1112 offset: &osd->priority_xon_2_xoff[i],
1113 stat: &nsd->priority_xon_2_xoff[i]);
1114 }
1115
1116 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1117 I40E_GLPRT_PRC64L(hw->port),
1118 offset_loaded: pf->stat_offsets_loaded,
1119 offset: &osd->rx_size_64, stat: &nsd->rx_size_64);
1120 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1121 I40E_GLPRT_PRC127L(hw->port),
1122 offset_loaded: pf->stat_offsets_loaded,
1123 offset: &osd->rx_size_127, stat: &nsd->rx_size_127);
1124 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1125 I40E_GLPRT_PRC255L(hw->port),
1126 offset_loaded: pf->stat_offsets_loaded,
1127 offset: &osd->rx_size_255, stat: &nsd->rx_size_255);
1128 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1129 I40E_GLPRT_PRC511L(hw->port),
1130 offset_loaded: pf->stat_offsets_loaded,
1131 offset: &osd->rx_size_511, stat: &nsd->rx_size_511);
1132 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1133 I40E_GLPRT_PRC1023L(hw->port),
1134 offset_loaded: pf->stat_offsets_loaded,
1135 offset: &osd->rx_size_1023, stat: &nsd->rx_size_1023);
1136 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1137 I40E_GLPRT_PRC1522L(hw->port),
1138 offset_loaded: pf->stat_offsets_loaded,
1139 offset: &osd->rx_size_1522, stat: &nsd->rx_size_1522);
1140 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1141 I40E_GLPRT_PRC9522L(hw->port),
1142 offset_loaded: pf->stat_offsets_loaded,
1143 offset: &osd->rx_size_big, stat: &nsd->rx_size_big);
1144
1145 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1146 I40E_GLPRT_PTC64L(hw->port),
1147 offset_loaded: pf->stat_offsets_loaded,
1148 offset: &osd->tx_size_64, stat: &nsd->tx_size_64);
1149 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1150 I40E_GLPRT_PTC127L(hw->port),
1151 offset_loaded: pf->stat_offsets_loaded,
1152 offset: &osd->tx_size_127, stat: &nsd->tx_size_127);
1153 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1154 I40E_GLPRT_PTC255L(hw->port),
1155 offset_loaded: pf->stat_offsets_loaded,
1156 offset: &osd->tx_size_255, stat: &nsd->tx_size_255);
1157 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1158 I40E_GLPRT_PTC511L(hw->port),
1159 offset_loaded: pf->stat_offsets_loaded,
1160 offset: &osd->tx_size_511, stat: &nsd->tx_size_511);
1161 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1162 I40E_GLPRT_PTC1023L(hw->port),
1163 offset_loaded: pf->stat_offsets_loaded,
1164 offset: &osd->tx_size_1023, stat: &nsd->tx_size_1023);
1165 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1166 I40E_GLPRT_PTC1522L(hw->port),
1167 offset_loaded: pf->stat_offsets_loaded,
1168 offset: &osd->tx_size_1522, stat: &nsd->tx_size_1522);
1169 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1170 I40E_GLPRT_PTC9522L(hw->port),
1171 offset_loaded: pf->stat_offsets_loaded,
1172 offset: &osd->tx_size_big, stat: &nsd->tx_size_big);
1173
1174 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1175 offset_loaded: pf->stat_offsets_loaded,
1176 offset: &osd->rx_undersize, stat: &nsd->rx_undersize);
1177 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1178 offset_loaded: pf->stat_offsets_loaded,
1179 offset: &osd->rx_fragments, stat: &nsd->rx_fragments);
1180 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1181 offset_loaded: pf->stat_offsets_loaded,
1182 offset: &osd->rx_oversize, stat: &nsd->rx_oversize);
1183 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1184 offset_loaded: pf->stat_offsets_loaded,
1185 offset: &osd->rx_jabber, stat: &nsd->rx_jabber);
1186
1187 /* FDIR stats */
1188 i40e_stat_update_and_clear32(hw,
1189 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1190 stat: &nsd->fd_atr_match);
1191 i40e_stat_update_and_clear32(hw,
1192 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1193 stat: &nsd->fd_sb_match);
1194 i40e_stat_update_and_clear32(hw,
1195 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1196 stat: &nsd->fd_atr_tunnel_match);
1197
1198 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1199 nsd->tx_lpi_status =
1200 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1201 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1202 nsd->rx_lpi_status =
1203 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1204 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1205 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1206 offset_loaded: pf->stat_offsets_loaded,
1207 offset: &osd->tx_lpi_count, stat: &nsd->tx_lpi_count);
1208 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1209 offset_loaded: pf->stat_offsets_loaded,
1210 offset: &osd->rx_lpi_count, stat: &nsd->rx_lpi_count);
1211
1212 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1213 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1214 nsd->fd_sb_status = true;
1215 else
1216 nsd->fd_sb_status = false;
1217
1218 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1219 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1220 nsd->fd_atr_status = true;
1221 else
1222 nsd->fd_atr_status = false;
1223
1224 pf->stat_offsets_loaded = true;
1225}
1226
1227/**
1228 * i40e_update_stats - Update the various statistics counters.
1229 * @vsi: the VSI to be updated
1230 *
1231 * Update the various stats for this VSI and its related entities.
1232 **/
1233void i40e_update_stats(struct i40e_vsi *vsi)
1234{
1235 struct i40e_pf *pf = vsi->back;
1236
1237 if (vsi == pf->vsi[pf->lan_vsi])
1238 i40e_update_pf_stats(pf);
1239
1240 i40e_update_vsi_stats(vsi);
1241}
1242
1243/**
1244 * i40e_count_filters - counts VSI mac filters
1245 * @vsi: the VSI to be searched
1246 *
1247 * Returns count of mac filters
1248 **/
1249int i40e_count_filters(struct i40e_vsi *vsi)
1250{
1251 struct i40e_mac_filter *f;
1252 struct hlist_node *h;
1253 int bkt;
1254 int cnt = 0;
1255
1256 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
1257 ++cnt;
1258
1259 return cnt;
1260}
1261
1262/**
1263 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1264 * @vsi: the VSI to be searched
1265 * @macaddr: the MAC address
1266 * @vlan: the vlan
1267 *
1268 * Returns ptr to the filter object or NULL
1269 **/
1270static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1271 const u8 *macaddr, s16 vlan)
1272{
1273 struct i40e_mac_filter *f;
1274 u64 key;
1275
1276 if (!vsi || !macaddr)
1277 return NULL;
1278
1279 key = i40e_addr_to_hkey(macaddr);
1280 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1281 if ((ether_addr_equal(addr1: macaddr, addr2: f->macaddr)) &&
1282 (vlan == f->vlan))
1283 return f;
1284 }
1285 return NULL;
1286}
1287
1288/**
1289 * i40e_find_mac - Find a mac addr in the macvlan filters list
1290 * @vsi: the VSI to be searched
1291 * @macaddr: the MAC address we are searching for
1292 *
1293 * Returns the first filter with the provided MAC address or NULL if
1294 * MAC address was not found
1295 **/
1296struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1297{
1298 struct i40e_mac_filter *f;
1299 u64 key;
1300
1301 if (!vsi || !macaddr)
1302 return NULL;
1303
1304 key = i40e_addr_to_hkey(macaddr);
1305 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1306 if ((ether_addr_equal(addr1: macaddr, addr2: f->macaddr)))
1307 return f;
1308 }
1309 return NULL;
1310}
1311
1312/**
1313 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1314 * @vsi: the VSI to be searched
1315 *
1316 * Returns true if VSI is in vlan mode or false otherwise
1317 **/
1318bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1319{
1320 /* If we have a PVID, always operate in VLAN mode */
1321 if (vsi->info.pvid)
1322 return true;
1323
1324 /* We need to operate in VLAN mode whenever we have any filters with
1325 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1326 * time, incurring search cost repeatedly. However, we can notice two
1327 * things:
1328 *
1329 * 1) the only place where we can gain a VLAN filter is in
1330 * i40e_add_filter.
1331 *
1332 * 2) the only place where filters are actually removed is in
1333 * i40e_sync_filters_subtask.
1334 *
1335 * Thus, we can simply use a boolean value, has_vlan_filters which we
1336 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1337 * we have to perform the full search after deleting filters in
1338 * i40e_sync_filters_subtask, but we already have to search
1339 * filters here and can perform the check at the same time. This
1340 * results in avoiding embedding a loop for VLAN mode inside another
1341 * loop over all the filters, and should maintain correctness as noted
1342 * above.
1343 */
1344 return vsi->has_vlan_filter;
1345}
1346
1347/**
1348 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1349 * @vsi: the VSI to configure
1350 * @tmp_add_list: list of filters ready to be added
1351 * @tmp_del_list: list of filters ready to be deleted
1352 * @vlan_filters: the number of active VLAN filters
1353 *
1354 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1355 * behave as expected. If we have any active VLAN filters remaining or about
1356 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1357 * so that they only match against untagged traffic. If we no longer have any
1358 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1359 * so that they match against both tagged and untagged traffic. In this way,
1360 * we ensure that we correctly receive the desired traffic. This ensures that
1361 * when we have an active VLAN we will receive only untagged traffic and
1362 * traffic matching active VLANs. If we have no active VLANs then we will
1363 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1364 *
1365 * Finally, in a similar fashion, this function also corrects filters when
1366 * there is an active PVID assigned to this VSI.
1367 *
1368 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1369 *
1370 * This function is only expected to be called from within
1371 * i40e_sync_vsi_filters.
1372 *
1373 * NOTE: This function expects to be called while under the
1374 * mac_filter_hash_lock
1375 */
1376static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1377 struct hlist_head *tmp_add_list,
1378 struct hlist_head *tmp_del_list,
1379 int vlan_filters)
1380{
1381 s16 pvid = le16_to_cpu(vsi->info.pvid);
1382 struct i40e_mac_filter *f, *add_head;
1383 struct i40e_new_mac_filter *new;
1384 struct hlist_node *h;
1385 int bkt, new_vlan;
1386
1387 /* To determine if a particular filter needs to be replaced we
1388 * have the three following conditions:
1389 *
1390 * a) if we have a PVID assigned, then all filters which are
1391 * not marked as VLAN=PVID must be replaced with filters that
1392 * are.
1393 * b) otherwise, if we have any active VLANS, all filters
1394 * which are marked as VLAN=-1 must be replaced with
1395 * filters marked as VLAN=0
1396 * c) finally, if we do not have any active VLANS, all filters
1397 * which are marked as VLAN=0 must be replaced with filters
1398 * marked as VLAN=-1
1399 */
1400
1401 /* Update the filters about to be added in place */
1402 hlist_for_each_entry(new, tmp_add_list, hlist) {
1403 if (pvid && new->f->vlan != pvid)
1404 new->f->vlan = pvid;
1405 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1406 new->f->vlan = 0;
1407 else if (!vlan_filters && new->f->vlan == 0)
1408 new->f->vlan = I40E_VLAN_ANY;
1409 }
1410
1411 /* Update the remaining active filters */
1412 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1413 /* Combine the checks for whether a filter needs to be changed
1414 * and then determine the new VLAN inside the if block, in
1415 * order to avoid duplicating code for adding the new filter
1416 * then deleting the old filter.
1417 */
1418 if ((pvid && f->vlan != pvid) ||
1419 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1420 (!vlan_filters && f->vlan == 0)) {
1421 /* Determine the new vlan we will be adding */
1422 if (pvid)
1423 new_vlan = pvid;
1424 else if (vlan_filters)
1425 new_vlan = 0;
1426 else
1427 new_vlan = I40E_VLAN_ANY;
1428
1429 /* Create the new filter */
1430 add_head = i40e_add_filter(vsi, macaddr: f->macaddr, vlan: new_vlan);
1431 if (!add_head)
1432 return -ENOMEM;
1433
1434 /* Create a temporary i40e_new_mac_filter */
1435 new = kzalloc(size: sizeof(*new), GFP_ATOMIC);
1436 if (!new)
1437 return -ENOMEM;
1438
1439 new->f = add_head;
1440 new->state = add_head->state;
1441
1442 /* Add the new filter to the tmp list */
1443 hlist_add_head(n: &new->hlist, h: tmp_add_list);
1444
1445 /* Put the original filter into the delete list */
1446 f->state = I40E_FILTER_REMOVE;
1447 hash_del(node: &f->hlist);
1448 hlist_add_head(n: &f->hlist, h: tmp_del_list);
1449 }
1450 }
1451
1452 vsi->has_vlan_filter = !!vlan_filters;
1453
1454 return 0;
1455}
1456
1457/**
1458 * i40e_get_vf_new_vlan - Get new vlan id on a vf
1459 * @vsi: the vsi to configure
1460 * @new_mac: new mac filter to be added
1461 * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
1462 * @vlan_filters: the number of active VLAN filters
1463 * @trusted: flag if the VF is trusted
1464 *
1465 * Get new VLAN id based on current VLAN filters, trust, PVID
1466 * and vf-vlan-prune-disable flag.
1467 *
1468 * Returns the value of the new vlan filter or
1469 * the old value if no new filter is needed.
1470 */
1471static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
1472 struct i40e_new_mac_filter *new_mac,
1473 struct i40e_mac_filter *f,
1474 int vlan_filters,
1475 bool trusted)
1476{
1477 s16 pvid = le16_to_cpu(vsi->info.pvid);
1478 struct i40e_pf *pf = vsi->back;
1479 bool is_any;
1480
1481 if (new_mac)
1482 f = new_mac->f;
1483
1484 if (pvid && f->vlan != pvid)
1485 return pvid;
1486
1487 is_any = (trusted ||
1488 !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
1489
1490 if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1491 (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1492 (is_any && !vlan_filters && f->vlan == 0)) {
1493 if (is_any)
1494 return I40E_VLAN_ANY;
1495 else
1496 return 0;
1497 }
1498
1499 return f->vlan;
1500}
1501
1502/**
1503 * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
1504 * @vsi: the vsi to configure
1505 * @tmp_add_list: list of filters ready to be added
1506 * @tmp_del_list: list of filters ready to be deleted
1507 * @vlan_filters: the number of active VLAN filters
1508 * @trusted: flag if the VF is trusted
1509 *
1510 * Correct VF VLAN filters based on current VLAN filters, trust, PVID
1511 * and vf-vlan-prune-disable flag.
1512 *
1513 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1514 *
1515 * This function is only expected to be called from within
1516 * i40e_sync_vsi_filters.
1517 *
1518 * NOTE: This function expects to be called while under the
1519 * mac_filter_hash_lock
1520 */
1521static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
1522 struct hlist_head *tmp_add_list,
1523 struct hlist_head *tmp_del_list,
1524 int vlan_filters,
1525 bool trusted)
1526{
1527 struct i40e_mac_filter *f, *add_head;
1528 struct i40e_new_mac_filter *new_mac;
1529 struct hlist_node *h;
1530 int bkt, new_vlan;
1531
1532 hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
1533 new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
1534 vlan_filters, trusted);
1535 }
1536
1537 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1538 new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
1539 trusted);
1540 if (new_vlan != f->vlan) {
1541 add_head = i40e_add_filter(vsi, macaddr: f->macaddr, vlan: new_vlan);
1542 if (!add_head)
1543 return -ENOMEM;
1544 /* Create a temporary i40e_new_mac_filter */
1545 new_mac = kzalloc(size: sizeof(*new_mac), GFP_ATOMIC);
1546 if (!new_mac)
1547 return -ENOMEM;
1548 new_mac->f = add_head;
1549 new_mac->state = add_head->state;
1550
1551 /* Add the new filter to the tmp list */
1552 hlist_add_head(n: &new_mac->hlist, h: tmp_add_list);
1553
1554 /* Put the original filter into the delete list */
1555 f->state = I40E_FILTER_REMOVE;
1556 hash_del(node: &f->hlist);
1557 hlist_add_head(n: &f->hlist, h: tmp_del_list);
1558 }
1559 }
1560
1561 vsi->has_vlan_filter = !!vlan_filters;
1562 return 0;
1563}
1564
1565/**
1566 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1567 * @vsi: the PF Main VSI - inappropriate for any other VSI
1568 * @macaddr: the MAC address
1569 *
1570 * Remove whatever filter the firmware set up so the driver can manage
1571 * its own filtering intelligently.
1572 **/
1573static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1574{
1575 struct i40e_aqc_remove_macvlan_element_data element;
1576 struct i40e_pf *pf = vsi->back;
1577
1578 /* Only appropriate for the PF main VSI */
1579 if (vsi->type != I40E_VSI_MAIN)
1580 return;
1581
1582 memset(&element, 0, sizeof(element));
1583 ether_addr_copy(dst: element.mac_addr, src: macaddr);
1584 element.vlan_tag = 0;
1585 /* Ignore error returns, some firmware does it this way... */
1586 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1587 i40e_aq_remove_macvlan(hw: &pf->hw, vsi_id: vsi->seid, mv_list: &element, count: 1, NULL);
1588
1589 memset(&element, 0, sizeof(element));
1590 ether_addr_copy(dst: element.mac_addr, src: macaddr);
1591 element.vlan_tag = 0;
1592 /* ...and some firmware does it this way. */
1593 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1594 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1595 i40e_aq_remove_macvlan(hw: &pf->hw, vsi_id: vsi->seid, mv_list: &element, count: 1, NULL);
1596}
1597
1598/**
1599 * i40e_add_filter - Add a mac/vlan filter to the VSI
1600 * @vsi: the VSI to be searched
1601 * @macaddr: the MAC address
1602 * @vlan: the vlan
1603 *
1604 * Returns ptr to the filter object or NULL when no memory available.
1605 *
1606 * NOTE: This function is expected to be called with mac_filter_hash_lock
1607 * being held.
1608 **/
1609struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1610 const u8 *macaddr, s16 vlan)
1611{
1612 struct i40e_mac_filter *f;
1613 u64 key;
1614
1615 if (!vsi || !macaddr)
1616 return NULL;
1617
1618 f = i40e_find_filter(vsi, macaddr, vlan);
1619 if (!f) {
1620 f = kzalloc(size: sizeof(*f), GFP_ATOMIC);
1621 if (!f)
1622 return NULL;
1623
1624 /* Update the boolean indicating if we need to function in
1625 * VLAN mode.
1626 */
1627 if (vlan >= 0)
1628 vsi->has_vlan_filter = true;
1629
1630 ether_addr_copy(dst: f->macaddr, src: macaddr);
1631 f->vlan = vlan;
1632 f->state = I40E_FILTER_NEW;
1633 INIT_HLIST_NODE(h: &f->hlist);
1634
1635 key = i40e_addr_to_hkey(macaddr);
1636 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1637
1638 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1639 set_bit(nr: __I40E_MACVLAN_SYNC_PENDING, addr: vsi->back->state);
1640 }
1641
1642 /* If we're asked to add a filter that has been marked for removal, it
1643 * is safe to simply restore it to active state. __i40e_del_filter
1644 * will have simply deleted any filters which were previously marked
1645 * NEW or FAILED, so if it is currently marked REMOVE it must have
1646 * previously been ACTIVE. Since we haven't yet run the sync filters
1647 * task, just restore this filter to the ACTIVE state so that the
1648 * sync task leaves it in place
1649 */
1650 if (f->state == I40E_FILTER_REMOVE)
1651 f->state = I40E_FILTER_ACTIVE;
1652
1653 return f;
1654}
1655
1656/**
1657 * __i40e_del_filter - Remove a specific filter from the VSI
1658 * @vsi: VSI to remove from
1659 * @f: the filter to remove from the list
1660 *
1661 * This function should be called instead of i40e_del_filter only if you know
1662 * the exact filter you will remove already, such as via i40e_find_filter or
1663 * i40e_find_mac.
1664 *
1665 * NOTE: This function is expected to be called with mac_filter_hash_lock
1666 * being held.
1667 * ANOTHER NOTE: This function MUST be called from within the context of
1668 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1669 * instead of list_for_each_entry().
1670 **/
1671void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1672{
1673 if (!f)
1674 return;
1675
1676 /* If the filter was never added to firmware then we can just delete it
1677 * directly and we don't want to set the status to remove or else an
1678 * admin queue command will unnecessarily fire.
1679 */
1680 if ((f->state == I40E_FILTER_FAILED) ||
1681 (f->state == I40E_FILTER_NEW)) {
1682 hash_del(node: &f->hlist);
1683 kfree(objp: f);
1684 } else {
1685 f->state = I40E_FILTER_REMOVE;
1686 }
1687
1688 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1689 set_bit(nr: __I40E_MACVLAN_SYNC_PENDING, addr: vsi->back->state);
1690}
1691
1692/**
1693 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1694 * @vsi: the VSI to be searched
1695 * @macaddr: the MAC address
1696 * @vlan: the VLAN
1697 *
1698 * NOTE: This function is expected to be called with mac_filter_hash_lock
1699 * being held.
1700 * ANOTHER NOTE: This function MUST be called from within the context of
1701 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1702 * instead of list_for_each_entry().
1703 **/
1704void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1705{
1706 struct i40e_mac_filter *f;
1707
1708 if (!vsi || !macaddr)
1709 return;
1710
1711 f = i40e_find_filter(vsi, macaddr, vlan);
1712 __i40e_del_filter(vsi, f);
1713}
1714
1715/**
1716 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1717 * @vsi: the VSI to be searched
1718 * @macaddr: the mac address to be filtered
1719 *
1720 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1721 * go through all the macvlan filters and add a macvlan filter for each
1722 * unique vlan that already exists. If a PVID has been assigned, instead only
1723 * add the macaddr to that VLAN.
1724 *
1725 * Returns last filter added on success, else NULL
1726 **/
1727struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1728 const u8 *macaddr)
1729{
1730 struct i40e_mac_filter *f, *add = NULL;
1731 struct hlist_node *h;
1732 int bkt;
1733
1734 if (vsi->info.pvid)
1735 return i40e_add_filter(vsi, macaddr,
1736 le16_to_cpu(vsi->info.pvid));
1737
1738 if (!i40e_is_vsi_in_vlan(vsi))
1739 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1740
1741 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1742 if (f->state == I40E_FILTER_REMOVE)
1743 continue;
1744 add = i40e_add_filter(vsi, macaddr, vlan: f->vlan);
1745 if (!add)
1746 return NULL;
1747 }
1748
1749 return add;
1750}
1751
1752/**
1753 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1754 * @vsi: the VSI to be searched
1755 * @macaddr: the mac address to be removed
1756 *
1757 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1758 * associated with.
1759 *
1760 * Returns 0 for success, or error
1761 **/
1762int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1763{
1764 struct i40e_mac_filter *f;
1765 struct hlist_node *h;
1766 bool found = false;
1767 int bkt;
1768
1769 lockdep_assert_held(&vsi->mac_filter_hash_lock);
1770 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1771 if (ether_addr_equal(addr1: macaddr, addr2: f->macaddr)) {
1772 __i40e_del_filter(vsi, f);
1773 found = true;
1774 }
1775 }
1776
1777 if (found)
1778 return 0;
1779 else
1780 return -ENOENT;
1781}
1782
1783/**
1784 * i40e_set_mac - NDO callback to set mac address
1785 * @netdev: network interface device structure
1786 * @p: pointer to an address structure
1787 *
1788 * Returns 0 on success, negative on failure
1789 **/
1790static int i40e_set_mac(struct net_device *netdev, void *p)
1791{
1792 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
1793 struct i40e_vsi *vsi = np->vsi;
1794 struct i40e_pf *pf = vsi->back;
1795 struct i40e_hw *hw = &pf->hw;
1796 struct sockaddr *addr = p;
1797
1798 if (!is_valid_ether_addr(addr: addr->sa_data))
1799 return -EADDRNOTAVAIL;
1800
1801 if (test_bit(__I40E_DOWN, pf->state) ||
1802 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1803 return -EADDRNOTAVAIL;
1804
1805 if (ether_addr_equal(addr1: hw->mac.addr, addr2: addr->sa_data))
1806 netdev_info(dev: netdev, format: "returning to hw mac address %pM\n",
1807 hw->mac.addr);
1808 else
1809 netdev_info(dev: netdev, format: "set new mac address %pM\n", addr->sa_data);
1810
1811 /* Copy the address first, so that we avoid a possible race with
1812 * .set_rx_mode().
1813 * - Remove old address from MAC filter
1814 * - Copy new address
1815 * - Add new address to MAC filter
1816 */
1817 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
1818 i40e_del_mac_filter(vsi, macaddr: netdev->dev_addr);
1819 eth_hw_addr_set(dev: netdev, addr: addr->sa_data);
1820 i40e_add_mac_filter(vsi, macaddr: netdev->dev_addr);
1821 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
1822
1823 if (vsi->type == I40E_VSI_MAIN) {
1824 int ret;
1825
1826 ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
1827 mac_addr: addr->sa_data, NULL);
1828 if (ret)
1829 netdev_info(dev: netdev, format: "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
1830 ERR_PTR(error: ret),
1831 i40e_aq_str(hw, aq_err: hw->aq.asq_last_status));
1832 }
1833
1834 /* schedule our worker thread which will take care of
1835 * applying the new filter changes
1836 */
1837 i40e_service_event_schedule(pf);
1838 return 0;
1839}
1840
1841/**
1842 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1843 * @vsi: vsi structure
1844 * @seed: RSS hash seed
1845 * @lut: pointer to lookup table of lut_size
1846 * @lut_size: size of the lookup table
1847 **/
1848static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1849 u8 *lut, u16 lut_size)
1850{
1851 struct i40e_pf *pf = vsi->back;
1852 struct i40e_hw *hw = &pf->hw;
1853 int ret = 0;
1854
1855 if (seed) {
1856 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1857 (struct i40e_aqc_get_set_rss_key_data *)seed;
1858 ret = i40e_aq_set_rss_key(hw, seid: vsi->id, key: seed_dw);
1859 if (ret) {
1860 dev_info(&pf->pdev->dev,
1861 "Cannot set RSS key, err %pe aq_err %s\n",
1862 ERR_PTR(ret),
1863 i40e_aq_str(hw, hw->aq.asq_last_status));
1864 return ret;
1865 }
1866 }
1867 if (lut) {
1868 bool pf_lut = vsi->type == I40E_VSI_MAIN;
1869
1870 ret = i40e_aq_set_rss_lut(hw, seid: vsi->id, pf_lut, lut, lut_size);
1871 if (ret) {
1872 dev_info(&pf->pdev->dev,
1873 "Cannot set RSS lut, err %pe aq_err %s\n",
1874 ERR_PTR(ret),
1875 i40e_aq_str(hw, hw->aq.asq_last_status));
1876 return ret;
1877 }
1878 }
1879 return ret;
1880}
1881
1882/**
1883 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1884 * @vsi: VSI structure
1885 **/
1886static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1887{
1888 struct i40e_pf *pf = vsi->back;
1889 u8 seed[I40E_HKEY_ARRAY_SIZE];
1890 u8 *lut;
1891 int ret;
1892
1893 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1894 return 0;
1895 if (!vsi->rss_size)
1896 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1897 vsi->num_queue_pairs);
1898 if (!vsi->rss_size)
1899 return -EINVAL;
1900 lut = kzalloc(size: vsi->rss_table_size, GFP_KERNEL);
1901 if (!lut)
1902 return -ENOMEM;
1903
1904 /* Use the user configured hash keys and lookup table if there is one,
1905 * otherwise use default
1906 */
1907 if (vsi->rss_lut_user)
1908 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1909 else
1910 i40e_fill_rss_lut(pf, lut, rss_table_size: vsi->rss_table_size, rss_size: vsi->rss_size);
1911 if (vsi->rss_hkey_user)
1912 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1913 else
1914 netdev_rss_key_fill(buffer: (void *)seed, I40E_HKEY_ARRAY_SIZE);
1915 ret = i40e_config_rss_aq(vsi, seed, lut, lut_size: vsi->rss_table_size);
1916 kfree(objp: lut);
1917 return ret;
1918}
1919
1920/**
1921 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1922 * @vsi: the VSI being configured,
1923 * @ctxt: VSI context structure
1924 * @enabled_tc: number of traffic classes to enable
1925 *
1926 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1927 **/
1928static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1929 struct i40e_vsi_context *ctxt,
1930 u8 enabled_tc)
1931{
1932 u16 qcount = 0, max_qcount, qmap, sections = 0;
1933 int i, override_q, pow, num_qps, ret;
1934 u8 netdev_tc = 0, offset = 0;
1935
1936 if (vsi->type != I40E_VSI_MAIN)
1937 return -EINVAL;
1938 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1939 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1940 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1941 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1942 num_qps = vsi->mqprio_qopt.qopt.count[0];
1943
1944 /* find the next higher power-of-2 of num queue pairs */
1945 pow = ilog2(num_qps);
1946 if (!is_power_of_2(n: num_qps))
1947 pow++;
1948 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1949 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1950
1951 /* Setup queue offset/count for all TCs for given VSI */
1952 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1953 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1954 /* See if the given TC is enabled for the given VSI */
1955 if (vsi->tc_config.enabled_tc & BIT(i)) {
1956 offset = vsi->mqprio_qopt.qopt.offset[i];
1957 qcount = vsi->mqprio_qopt.qopt.count[i];
1958 if (qcount > max_qcount)
1959 max_qcount = qcount;
1960 vsi->tc_config.tc_info[i].qoffset = offset;
1961 vsi->tc_config.tc_info[i].qcount = qcount;
1962 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1963 } else {
1964 /* TC is not enabled so set the offset to
1965 * default queue and allocate one queue
1966 * for the given TC.
1967 */
1968 vsi->tc_config.tc_info[i].qoffset = 0;
1969 vsi->tc_config.tc_info[i].qcount = 1;
1970 vsi->tc_config.tc_info[i].netdev_tc = 0;
1971 }
1972 }
1973
1974 /* Set actual Tx/Rx queue pairs */
1975 vsi->num_queue_pairs = offset + qcount;
1976
1977 /* Setup queue TC[0].qmap for given VSI context */
1978 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1979 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1980 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1981 ctxt->info.valid_sections |= cpu_to_le16(sections);
1982
1983 /* Reconfigure RSS for main VSI with max queue count */
1984 vsi->rss_size = max_qcount;
1985 ret = i40e_vsi_config_rss(vsi);
1986 if (ret) {
1987 dev_info(&vsi->back->pdev->dev,
1988 "Failed to reconfig rss for num_queues (%u)\n",
1989 max_qcount);
1990 return ret;
1991 }
1992 vsi->reconfig_rss = true;
1993 dev_dbg(&vsi->back->pdev->dev,
1994 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1995
1996 /* Find queue count available for channel VSIs and starting offset
1997 * for channel VSIs
1998 */
1999 override_q = vsi->mqprio_qopt.qopt.count[0];
2000 if (override_q && override_q < vsi->num_queue_pairs) {
2001 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
2002 vsi->next_base_queue = override_q;
2003 }
2004 return 0;
2005}
2006
2007/**
2008 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
2009 * @vsi: the VSI being setup
2010 * @ctxt: VSI context structure
2011 * @enabled_tc: Enabled TCs bitmap
2012 * @is_add: True if called before Add VSI
2013 *
2014 * Setup VSI queue mapping for enabled traffic classes.
2015 **/
2016static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
2017 struct i40e_vsi_context *ctxt,
2018 u8 enabled_tc,
2019 bool is_add)
2020{
2021 struct i40e_pf *pf = vsi->back;
2022 u16 num_tc_qps = 0;
2023 u16 sections = 0;
2024 u8 netdev_tc = 0;
2025 u16 numtc = 1;
2026 u16 qcount;
2027 u8 offset;
2028 u16 qmap;
2029 int i;
2030
2031 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
2032 offset = 0;
2033 /* zero out queue mapping, it will get updated on the end of the function */
2034 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
2035
2036 if (vsi->type == I40E_VSI_MAIN) {
2037 /* This code helps add more queue to the VSI if we have
2038 * more cores than RSS can support, the higher cores will
2039 * be served by ATR or other filters. Furthermore, the
2040 * non-zero req_queue_pairs says that user requested a new
2041 * queue count via ethtool's set_channels, so use this
2042 * value for queues distribution across traffic classes
2043 * We need at least one queue pair for the interface
2044 * to be usable as we see in else statement.
2045 */
2046 if (vsi->req_queue_pairs > 0)
2047 vsi->num_queue_pairs = vsi->req_queue_pairs;
2048 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2049 vsi->num_queue_pairs = pf->num_lan_msix;
2050 else
2051 vsi->num_queue_pairs = 1;
2052 }
2053
2054 /* Number of queues per enabled TC */
2055 if (vsi->type == I40E_VSI_MAIN ||
2056 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
2057 num_tc_qps = vsi->num_queue_pairs;
2058 else
2059 num_tc_qps = vsi->alloc_queue_pairs;
2060
2061 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2062 /* Find numtc from enabled TC bitmap */
2063 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2064 if (enabled_tc & BIT(i)) /* TC is enabled */
2065 numtc++;
2066 }
2067 if (!numtc) {
2068 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
2069 numtc = 1;
2070 }
2071 num_tc_qps = num_tc_qps / numtc;
2072 num_tc_qps = min_t(int, num_tc_qps,
2073 i40e_pf_get_max_q_per_tc(pf));
2074 }
2075
2076 vsi->tc_config.numtc = numtc;
2077 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
2078
2079 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
2080 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
2081 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
2082
2083 /* Setup queue offset/count for all TCs for given VSI */
2084 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
2085 /* See if the given TC is enabled for the given VSI */
2086 if (vsi->tc_config.enabled_tc & BIT(i)) {
2087 /* TC is enabled */
2088 int pow, num_qps;
2089
2090 switch (vsi->type) {
2091 case I40E_VSI_MAIN:
2092 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
2093 I40E_FLAG_FD_ATR_ENABLED)) ||
2094 vsi->tc_config.enabled_tc != 1) {
2095 qcount = min_t(int, pf->alloc_rss_size,
2096 num_tc_qps);
2097 break;
2098 }
2099 fallthrough;
2100 case I40E_VSI_FDIR:
2101 case I40E_VSI_SRIOV:
2102 case I40E_VSI_VMDQ2:
2103 default:
2104 qcount = num_tc_qps;
2105 WARN_ON(i != 0);
2106 break;
2107 }
2108 vsi->tc_config.tc_info[i].qoffset = offset;
2109 vsi->tc_config.tc_info[i].qcount = qcount;
2110
2111 /* find the next higher power-of-2 of num queue pairs */
2112 num_qps = qcount;
2113 pow = 0;
2114 while (num_qps && (BIT_ULL(pow) < qcount)) {
2115 pow++;
2116 num_qps >>= 1;
2117 }
2118
2119 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
2120 qmap =
2121 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
2122 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
2123
2124 offset += qcount;
2125 } else {
2126 /* TC is not enabled so set the offset to
2127 * default queue and allocate one queue
2128 * for the given TC.
2129 */
2130 vsi->tc_config.tc_info[i].qoffset = 0;
2131 vsi->tc_config.tc_info[i].qcount = 1;
2132 vsi->tc_config.tc_info[i].netdev_tc = 0;
2133
2134 qmap = 0;
2135 }
2136 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
2137 }
2138 /* Do not change previously set num_queue_pairs for PFs and VFs*/
2139 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
2140 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
2141 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
2142 vsi->num_queue_pairs = offset;
2143
2144 /* Scheduler section valid can only be set for ADD VSI */
2145 if (is_add) {
2146 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
2147
2148 ctxt->info.up_enable_bits = enabled_tc;
2149 }
2150 if (vsi->type == I40E_VSI_SRIOV) {
2151 ctxt->info.mapping_flags |=
2152 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
2153 for (i = 0; i < vsi->num_queue_pairs; i++)
2154 ctxt->info.queue_mapping[i] =
2155 cpu_to_le16(vsi->base_queue + i);
2156 } else {
2157 ctxt->info.mapping_flags |=
2158 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2159 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
2160 }
2161 ctxt->info.valid_sections |= cpu_to_le16(sections);
2162}
2163
2164/**
2165 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
2166 * @netdev: the netdevice
2167 * @addr: address to add
2168 *
2169 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
2170 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
2171 */
2172static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
2173{
2174 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
2175 struct i40e_vsi *vsi = np->vsi;
2176
2177 if (i40e_add_mac_filter(vsi, macaddr: addr))
2178 return 0;
2179 else
2180 return -ENOMEM;
2181}
2182
2183/**
2184 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2185 * @netdev: the netdevice
2186 * @addr: address to add
2187 *
2188 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
2189 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
2190 */
2191static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
2192{
2193 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
2194 struct i40e_vsi *vsi = np->vsi;
2195
2196 /* Under some circumstances, we might receive a request to delete
2197 * our own device address from our uc list. Because we store the
2198 * device address in the VSI's MAC/VLAN filter list, we need to ignore
2199 * such requests and not delete our device address from this list.
2200 */
2201 if (ether_addr_equal(addr1: addr, addr2: netdev->dev_addr))
2202 return 0;
2203
2204 i40e_del_mac_filter(vsi, macaddr: addr);
2205
2206 return 0;
2207}
2208
2209/**
2210 * i40e_set_rx_mode - NDO callback to set the netdev filters
2211 * @netdev: network interface device structure
2212 **/
2213static void i40e_set_rx_mode(struct net_device *netdev)
2214{
2215 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
2216 struct i40e_vsi *vsi = np->vsi;
2217
2218 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
2219
2220 __dev_uc_sync(dev: netdev, sync: i40e_addr_sync, unsync: i40e_addr_unsync);
2221 __dev_mc_sync(dev: netdev, sync: i40e_addr_sync, unsync: i40e_addr_unsync);
2222
2223 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
2224
2225 /* check for other flag changes */
2226 if (vsi->current_netdev_flags != vsi->netdev->flags) {
2227 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2228 set_bit(nr: __I40E_MACVLAN_SYNC_PENDING, addr: vsi->back->state);
2229 }
2230}
2231
2232/**
2233 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2234 * @vsi: Pointer to VSI struct
2235 * @from: Pointer to list which contains MAC filter entries - changes to
2236 * those entries needs to be undone.
2237 *
2238 * MAC filter entries from this list were slated for deletion.
2239 **/
2240static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
2241 struct hlist_head *from)
2242{
2243 struct i40e_mac_filter *f;
2244 struct hlist_node *h;
2245
2246 hlist_for_each_entry_safe(f, h, from, hlist) {
2247 u64 key = i40e_addr_to_hkey(macaddr: f->macaddr);
2248
2249 /* Move the element back into MAC filter list*/
2250 hlist_del(n: &f->hlist);
2251 hash_add(vsi->mac_filter_hash, &f->hlist, key);
2252 }
2253}
2254
2255/**
2256 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2257 * @vsi: Pointer to vsi struct
2258 * @from: Pointer to list which contains MAC filter entries - changes to
2259 * those entries needs to be undone.
2260 *
2261 * MAC filter entries from this list were slated for addition.
2262 **/
2263static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
2264 struct hlist_head *from)
2265{
2266 struct i40e_new_mac_filter *new;
2267 struct hlist_node *h;
2268
2269 hlist_for_each_entry_safe(new, h, from, hlist) {
2270 /* We can simply free the wrapper structure */
2271 hlist_del(n: &new->hlist);
2272 netdev_hw_addr_refcnt(f: new->f, netdev: vsi->netdev, delta: -1);
2273 kfree(objp: new);
2274 }
2275}
2276
2277/**
2278 * i40e_next_filter - Get the next non-broadcast filter from a list
2279 * @next: pointer to filter in list
2280 *
2281 * Returns the next non-broadcast filter in the list. Required so that we
2282 * ignore broadcast filters within the list, since these are not handled via
2283 * the normal firmware update path.
2284 */
2285static
2286struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2287{
2288 hlist_for_each_entry_continue(next, hlist) {
2289 if (!is_broadcast_ether_addr(addr: next->f->macaddr))
2290 return next;
2291 }
2292
2293 return NULL;
2294}
2295
2296/**
2297 * i40e_update_filter_state - Update filter state based on return data
2298 * from firmware
2299 * @count: Number of filters added
2300 * @add_list: return data from fw
2301 * @add_head: pointer to first filter in current batch
2302 *
2303 * MAC filter entries from list were slated to be added to device. Returns
2304 * number of successful filters. Note that 0 does NOT mean success!
2305 **/
2306static int
2307i40e_update_filter_state(int count,
2308 struct i40e_aqc_add_macvlan_element_data *add_list,
2309 struct i40e_new_mac_filter *add_head)
2310{
2311 int retval = 0;
2312 int i;
2313
2314 for (i = 0; i < count; i++) {
2315 /* Always check status of each filter. We don't need to check
2316 * the firmware return status because we pre-set the filter
2317 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2318 * request to the adminq. Thus, if it no longer matches then
2319 * we know the filter is active.
2320 */
2321 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2322 add_head->state = I40E_FILTER_FAILED;
2323 } else {
2324 add_head->state = I40E_FILTER_ACTIVE;
2325 retval++;
2326 }
2327
2328 add_head = i40e_next_filter(next: add_head);
2329 if (!add_head)
2330 break;
2331 }
2332
2333 return retval;
2334}
2335
2336/**
2337 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2338 * @vsi: ptr to the VSI
2339 * @vsi_name: name to display in messages
2340 * @list: the list of filters to send to firmware
2341 * @num_del: the number of filters to delete
2342 * @retval: Set to -EIO on failure to delete
2343 *
2344 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2345 * *retval instead of a return value so that success does not force ret_val to
2346 * be set to 0. This ensures that a sequence of calls to this function
2347 * preserve the previous value of *retval on successful delete.
2348 */
2349static
2350void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2351 struct i40e_aqc_remove_macvlan_element_data *list,
2352 int num_del, int *retval)
2353{
2354 struct i40e_hw *hw = &vsi->back->hw;
2355 enum i40e_admin_queue_err aq_status;
2356 int aq_ret;
2357
2358 aq_ret = i40e_aq_remove_macvlan_v2(hw, seid: vsi->seid, mv_list: list, count: num_del, NULL,
2359 aq_status: &aq_status);
2360
2361 /* Explicitly ignore and do not report when firmware returns ENOENT */
2362 if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
2363 *retval = -EIO;
2364 dev_info(&vsi->back->pdev->dev,
2365 "ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
2366 vsi_name, ERR_PTR(aq_ret),
2367 i40e_aq_str(hw, aq_status));
2368 }
2369}
2370
2371/**
2372 * i40e_aqc_add_filters - Request firmware to add a set of filters
2373 * @vsi: ptr to the VSI
2374 * @vsi_name: name to display in messages
2375 * @list: the list of filters to send to firmware
2376 * @add_head: Position in the add hlist
2377 * @num_add: the number of filters to add
2378 *
2379 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2380 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2381 * space for more filters.
2382 */
2383static
2384void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2385 struct i40e_aqc_add_macvlan_element_data *list,
2386 struct i40e_new_mac_filter *add_head,
2387 int num_add)
2388{
2389 struct i40e_hw *hw = &vsi->back->hw;
2390 enum i40e_admin_queue_err aq_status;
2391 int fcnt;
2392
2393 i40e_aq_add_macvlan_v2(hw, seid: vsi->seid, mv_list: list, count: num_add, NULL, aq_status: &aq_status);
2394 fcnt = i40e_update_filter_state(count: num_add, add_list: list, add_head);
2395
2396 if (fcnt != num_add) {
2397 if (vsi->type == I40E_VSI_MAIN) {
2398 set_bit(nr: __I40E_VSI_OVERFLOW_PROMISC, addr: vsi->state);
2399 dev_warn(&vsi->back->pdev->dev,
2400 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2401 i40e_aq_str(hw, aq_status), vsi_name);
2402 } else if (vsi->type == I40E_VSI_SRIOV ||
2403 vsi->type == I40E_VSI_VMDQ1 ||
2404 vsi->type == I40E_VSI_VMDQ2) {
2405 dev_warn(&vsi->back->pdev->dev,
2406 "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
2407 i40e_aq_str(hw, aq_status), vsi_name,
2408 vsi_name);
2409 } else {
2410 dev_warn(&vsi->back->pdev->dev,
2411 "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
2412 i40e_aq_str(hw, aq_status), vsi_name,
2413 vsi->type);
2414 }
2415 }
2416}
2417
2418/**
2419 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2420 * @vsi: pointer to the VSI
2421 * @vsi_name: the VSI name
2422 * @f: filter data
2423 *
2424 * This function sets or clears the promiscuous broadcast flags for VLAN
2425 * filters in order to properly receive broadcast frames. Assumes that only
2426 * broadcast filters are passed.
2427 *
2428 * Returns status indicating success or failure;
2429 **/
2430static int
2431i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2432 struct i40e_mac_filter *f)
2433{
2434 bool enable = f->state == I40E_FILTER_NEW;
2435 struct i40e_hw *hw = &vsi->back->hw;
2436 int aq_ret;
2437
2438 if (f->vlan == I40E_VLAN_ANY) {
2439 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2440 vsi_id: vsi->seid,
2441 set_filter: enable,
2442 NULL);
2443 } else {
2444 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2445 seid: vsi->seid,
2446 enable,
2447 vid: f->vlan,
2448 NULL);
2449 }
2450
2451 if (aq_ret) {
2452 set_bit(nr: __I40E_VSI_OVERFLOW_PROMISC, addr: vsi->state);
2453 dev_warn(&vsi->back->pdev->dev,
2454 "Error %s, forcing overflow promiscuous on %s\n",
2455 i40e_aq_str(hw, hw->aq.asq_last_status),
2456 vsi_name);
2457 }
2458
2459 return aq_ret;
2460}
2461
2462/**
2463 * i40e_set_promiscuous - set promiscuous mode
2464 * @pf: board private structure
2465 * @promisc: promisc on or off
2466 *
2467 * There are different ways of setting promiscuous mode on a PF depending on
2468 * what state/environment we're in. This identifies and sets it appropriately.
2469 * Returns 0 on success.
2470 **/
2471static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2472{
2473 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2474 struct i40e_hw *hw = &pf->hw;
2475 int aq_ret;
2476
2477 if (vsi->type == I40E_VSI_MAIN &&
2478 pf->lan_veb != I40E_NO_VEB &&
2479 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2480 /* set defport ON for Main VSI instead of true promisc
2481 * this way we will get all unicast/multicast and VLAN
2482 * promisc behavior but will not get VF or VMDq traffic
2483 * replicated on the Main VSI.
2484 */
2485 if (promisc)
2486 aq_ret = i40e_aq_set_default_vsi(hw,
2487 vsi_id: vsi->seid,
2488 NULL);
2489 else
2490 aq_ret = i40e_aq_clear_default_vsi(hw,
2491 vsi_id: vsi->seid,
2492 NULL);
2493 if (aq_ret) {
2494 dev_info(&pf->pdev->dev,
2495 "Set default VSI failed, err %pe, aq_err %s\n",
2496 ERR_PTR(aq_ret),
2497 i40e_aq_str(hw, hw->aq.asq_last_status));
2498 }
2499 } else {
2500 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2501 hw,
2502 vsi_id: vsi->seid,
2503 set: promisc, NULL,
2504 rx_only_promisc: true);
2505 if (aq_ret) {
2506 dev_info(&pf->pdev->dev,
2507 "set unicast promisc failed, err %pe, aq_err %s\n",
2508 ERR_PTR(aq_ret),
2509 i40e_aq_str(hw, hw->aq.asq_last_status));
2510 }
2511 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2512 hw,
2513 vsi_id: vsi->seid,
2514 set: promisc, NULL);
2515 if (aq_ret) {
2516 dev_info(&pf->pdev->dev,
2517 "set multicast promisc failed, err %pe, aq_err %s\n",
2518 ERR_PTR(aq_ret),
2519 i40e_aq_str(hw, hw->aq.asq_last_status));
2520 }
2521 }
2522
2523 if (!aq_ret)
2524 pf->cur_promisc = promisc;
2525
2526 return aq_ret;
2527}
2528
2529/**
2530 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2531 * @vsi: ptr to the VSI
2532 *
2533 * Push any outstanding VSI filter changes through the AdminQ.
2534 *
2535 * Returns 0 or error value
2536 **/
2537int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2538{
2539 struct hlist_head tmp_add_list, tmp_del_list;
2540 struct i40e_mac_filter *f;
2541 struct i40e_new_mac_filter *new, *add_head = NULL;
2542 struct i40e_hw *hw = &vsi->back->hw;
2543 bool old_overflow, new_overflow;
2544 unsigned int failed_filters = 0;
2545 unsigned int vlan_filters = 0;
2546 char vsi_name[16] = "PF";
2547 int filter_list_len = 0;
2548 u32 changed_flags = 0;
2549 struct hlist_node *h;
2550 struct i40e_pf *pf;
2551 int num_add = 0;
2552 int num_del = 0;
2553 int aq_ret = 0;
2554 int retval = 0;
2555 u16 cmd_flags;
2556 int list_size;
2557 int bkt;
2558
2559 /* empty array typed pointers, kcalloc later */
2560 struct i40e_aqc_add_macvlan_element_data *add_list;
2561 struct i40e_aqc_remove_macvlan_element_data *del_list;
2562
2563 while (test_and_set_bit(nr: __I40E_VSI_SYNCING_FILTERS, addr: vsi->state))
2564 usleep_range(min: 1000, max: 2000);
2565 pf = vsi->back;
2566
2567 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2568
2569 if (vsi->netdev) {
2570 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2571 vsi->current_netdev_flags = vsi->netdev->flags;
2572 }
2573
2574 INIT_HLIST_HEAD(&tmp_add_list);
2575 INIT_HLIST_HEAD(&tmp_del_list);
2576
2577 if (vsi->type == I40E_VSI_SRIOV)
2578 snprintf(buf: vsi_name, size: sizeof(vsi_name) - 1, fmt: "VF %d", vsi->vf_id);
2579 else if (vsi->type != I40E_VSI_MAIN)
2580 snprintf(buf: vsi_name, size: sizeof(vsi_name) - 1, fmt: "vsi %d", vsi->seid);
2581
2582 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2583 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2584
2585 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
2586 /* Create a list of filters to delete. */
2587 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2588 if (f->state == I40E_FILTER_REMOVE) {
2589 /* Move the element into temporary del_list */
2590 hash_del(node: &f->hlist);
2591 hlist_add_head(n: &f->hlist, h: &tmp_del_list);
2592
2593 /* Avoid counting removed filters */
2594 continue;
2595 }
2596 if (f->state == I40E_FILTER_NEW) {
2597 /* Create a temporary i40e_new_mac_filter */
2598 new = kzalloc(size: sizeof(*new), GFP_ATOMIC);
2599 if (!new)
2600 goto err_no_memory_locked;
2601
2602 /* Store pointer to the real filter */
2603 new->f = f;
2604 new->state = f->state;
2605
2606 /* Add it to the hash list */
2607 hlist_add_head(n: &new->hlist, h: &tmp_add_list);
2608 }
2609
2610 /* Count the number of active (current and new) VLAN
2611 * filters we have now. Does not count filters which
2612 * are marked for deletion.
2613 */
2614 if (f->vlan > 0)
2615 vlan_filters++;
2616 }
2617
2618 if (vsi->type != I40E_VSI_SRIOV)
2619 retval = i40e_correct_mac_vlan_filters
2620 (vsi, tmp_add_list: &tmp_add_list, tmp_del_list: &tmp_del_list,
2621 vlan_filters);
2622 else if (pf->vf)
2623 retval = i40e_correct_vf_mac_vlan_filters
2624 (vsi, tmp_add_list: &tmp_add_list, tmp_del_list: &tmp_del_list,
2625 vlan_filters, trusted: pf->vf[vsi->vf_id].trusted);
2626
2627 hlist_for_each_entry(new, &tmp_add_list, hlist)
2628 netdev_hw_addr_refcnt(f: new->f, netdev: vsi->netdev, delta: 1);
2629
2630 if (retval)
2631 goto err_no_memory_locked;
2632
2633 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
2634 }
2635
2636 /* Now process 'del_list' outside the lock */
2637 if (!hlist_empty(h: &tmp_del_list)) {
2638 filter_list_len = hw->aq.asq_buf_size /
2639 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2640 list_size = filter_list_len *
2641 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2642 del_list = kzalloc(size: list_size, GFP_ATOMIC);
2643 if (!del_list)
2644 goto err_no_memory;
2645
2646 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2647 cmd_flags = 0;
2648
2649 /* handle broadcast filters by updating the broadcast
2650 * promiscuous flag and release filter list.
2651 */
2652 if (is_broadcast_ether_addr(addr: f->macaddr)) {
2653 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2654
2655 hlist_del(n: &f->hlist);
2656 kfree(objp: f);
2657 continue;
2658 }
2659
2660 /* add to delete list */
2661 ether_addr_copy(dst: del_list[num_del].mac_addr, src: f->macaddr);
2662 if (f->vlan == I40E_VLAN_ANY) {
2663 del_list[num_del].vlan_tag = 0;
2664 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2665 } else {
2666 del_list[num_del].vlan_tag =
2667 cpu_to_le16((u16)(f->vlan));
2668 }
2669
2670 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2671 del_list[num_del].flags = cmd_flags;
2672 num_del++;
2673
2674 /* flush a full buffer */
2675 if (num_del == filter_list_len) {
2676 i40e_aqc_del_filters(vsi, vsi_name, list: del_list,
2677 num_del, retval: &retval);
2678 memset(del_list, 0, list_size);
2679 num_del = 0;
2680 }
2681 /* Release memory for MAC filter entries which were
2682 * synced up with HW.
2683 */
2684 hlist_del(n: &f->hlist);
2685 kfree(objp: f);
2686 }
2687
2688 if (num_del) {
2689 i40e_aqc_del_filters(vsi, vsi_name, list: del_list,
2690 num_del, retval: &retval);
2691 }
2692
2693 kfree(objp: del_list);
2694 del_list = NULL;
2695 }
2696
2697 if (!hlist_empty(h: &tmp_add_list)) {
2698 /* Do all the adds now. */
2699 filter_list_len = hw->aq.asq_buf_size /
2700 sizeof(struct i40e_aqc_add_macvlan_element_data);
2701 list_size = filter_list_len *
2702 sizeof(struct i40e_aqc_add_macvlan_element_data);
2703 add_list = kzalloc(size: list_size, GFP_ATOMIC);
2704 if (!add_list)
2705 goto err_no_memory;
2706
2707 num_add = 0;
2708 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2709 /* handle broadcast filters by updating the broadcast
2710 * promiscuous flag instead of adding a MAC filter.
2711 */
2712 if (is_broadcast_ether_addr(addr: new->f->macaddr)) {
2713 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2714 f: new->f))
2715 new->state = I40E_FILTER_FAILED;
2716 else
2717 new->state = I40E_FILTER_ACTIVE;
2718 continue;
2719 }
2720
2721 /* add to add array */
2722 if (num_add == 0)
2723 add_head = new;
2724 cmd_flags = 0;
2725 ether_addr_copy(dst: add_list[num_add].mac_addr,
2726 src: new->f->macaddr);
2727 if (new->f->vlan == I40E_VLAN_ANY) {
2728 add_list[num_add].vlan_tag = 0;
2729 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2730 } else {
2731 add_list[num_add].vlan_tag =
2732 cpu_to_le16((u16)(new->f->vlan));
2733 }
2734 add_list[num_add].queue_number = 0;
2735 /* set invalid match method for later detection */
2736 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2737 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2738 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2739 num_add++;
2740
2741 /* flush a full buffer */
2742 if (num_add == filter_list_len) {
2743 i40e_aqc_add_filters(vsi, vsi_name, list: add_list,
2744 add_head, num_add);
2745 memset(add_list, 0, list_size);
2746 num_add = 0;
2747 }
2748 }
2749 if (num_add) {
2750 i40e_aqc_add_filters(vsi, vsi_name, list: add_list, add_head,
2751 num_add);
2752 }
2753 /* Now move all of the filters from the temp add list back to
2754 * the VSI's list.
2755 */
2756 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
2757 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2758 /* Only update the state if we're still NEW */
2759 if (new->f->state == I40E_FILTER_NEW)
2760 new->f->state = new->state;
2761 hlist_del(n: &new->hlist);
2762 netdev_hw_addr_refcnt(f: new->f, netdev: vsi->netdev, delta: -1);
2763 kfree(objp: new);
2764 }
2765 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
2766 kfree(objp: add_list);
2767 add_list = NULL;
2768 }
2769
2770 /* Determine the number of active and failed filters. */
2771 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
2772 vsi->active_filters = 0;
2773 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2774 if (f->state == I40E_FILTER_ACTIVE)
2775 vsi->active_filters++;
2776 else if (f->state == I40E_FILTER_FAILED)
2777 failed_filters++;
2778 }
2779 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
2780
2781 /* Check if we are able to exit overflow promiscuous mode. We can
2782 * safely exit if we didn't just enter, we no longer have any failed
2783 * filters, and we have reduced filters below the threshold value.
2784 */
2785 if (old_overflow && !failed_filters &&
2786 vsi->active_filters < vsi->promisc_threshold) {
2787 dev_info(&pf->pdev->dev,
2788 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2789 vsi_name);
2790 clear_bit(nr: __I40E_VSI_OVERFLOW_PROMISC, addr: vsi->state);
2791 vsi->promisc_threshold = 0;
2792 }
2793
2794 /* if the VF is not trusted do not do promisc */
2795 if (vsi->type == I40E_VSI_SRIOV && pf->vf &&
2796 !pf->vf[vsi->vf_id].trusted) {
2797 clear_bit(nr: __I40E_VSI_OVERFLOW_PROMISC, addr: vsi->state);
2798 goto out;
2799 }
2800
2801 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2802
2803 /* If we are entering overflow promiscuous, we need to calculate a new
2804 * threshold for when we are safe to exit
2805 */
2806 if (!old_overflow && new_overflow)
2807 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2808
2809 /* check for changes in promiscuous modes */
2810 if (changed_flags & IFF_ALLMULTI) {
2811 bool cur_multipromisc;
2812
2813 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2814 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw: &vsi->back->hw,
2815 vsi_id: vsi->seid,
2816 set: cur_multipromisc,
2817 NULL);
2818 if (aq_ret) {
2819 retval = i40e_aq_rc_to_posix(aq_ret,
2820 aq_rc: hw->aq.asq_last_status);
2821 dev_info(&pf->pdev->dev,
2822 "set multi promisc failed on %s, err %pe aq_err %s\n",
2823 vsi_name,
2824 ERR_PTR(aq_ret),
2825 i40e_aq_str(hw, hw->aq.asq_last_status));
2826 } else {
2827 dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
2828 cur_multipromisc ? "entering" : "leaving");
2829 }
2830 }
2831
2832 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2833 bool cur_promisc;
2834
2835 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2836 new_overflow);
2837 aq_ret = i40e_set_promiscuous(pf, promisc: cur_promisc);
2838 if (aq_ret) {
2839 retval = i40e_aq_rc_to_posix(aq_ret,
2840 aq_rc: hw->aq.asq_last_status);
2841 dev_info(&pf->pdev->dev,
2842 "Setting promiscuous %s failed on %s, err %pe aq_err %s\n",
2843 cur_promisc ? "on" : "off",
2844 vsi_name,
2845 ERR_PTR(aq_ret),
2846 i40e_aq_str(hw, hw->aq.asq_last_status));
2847 }
2848 }
2849out:
2850 /* if something went wrong then set the changed flag so we try again */
2851 if (retval)
2852 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2853
2854 clear_bit(nr: __I40E_VSI_SYNCING_FILTERS, addr: vsi->state);
2855 return retval;
2856
2857err_no_memory:
2858 /* Restore elements on the temporary add and delete lists */
2859 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
2860err_no_memory_locked:
2861 i40e_undo_del_filter_entries(vsi, from: &tmp_del_list);
2862 i40e_undo_add_filter_entries(vsi, from: &tmp_add_list);
2863 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
2864
2865 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2866 clear_bit(nr: __I40E_VSI_SYNCING_FILTERS, addr: vsi->state);
2867 return -ENOMEM;
2868}
2869
2870/**
2871 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2872 * @pf: board private structure
2873 **/
2874static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2875{
2876 int v;
2877
2878 if (!pf)
2879 return;
2880 if (!test_and_clear_bit(nr: __I40E_MACVLAN_SYNC_PENDING, addr: pf->state))
2881 return;
2882 if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2883 set_bit(nr: __I40E_MACVLAN_SYNC_PENDING, addr: pf->state);
2884 return;
2885 }
2886
2887 for (v = 0; v < pf->num_alloc_vsi; v++) {
2888 if (pf->vsi[v] &&
2889 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
2890 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
2891 int ret = i40e_sync_vsi_filters(vsi: pf->vsi[v]);
2892
2893 if (ret) {
2894 /* come back and try again later */
2895 set_bit(nr: __I40E_MACVLAN_SYNC_PENDING,
2896 addr: pf->state);
2897 break;
2898 }
2899 }
2900 }
2901}
2902
2903/**
2904 * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
2905 *
2906 * @vsi: VSI to calculate rx_buf_len from
2907 */
2908static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
2909{
2910 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2911 return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048);
2912
2913 return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
2914}
2915
2916/**
2917 * i40e_max_vsi_frame_size - returns the maximum allowed frame size for VSI
2918 * @vsi: the vsi
2919 * @xdp_prog: XDP program
2920 **/
2921static int i40e_max_vsi_frame_size(struct i40e_vsi *vsi,
2922 struct bpf_prog *xdp_prog)
2923{
2924 u16 rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
2925 u16 chain_len;
2926
2927 if (xdp_prog && !xdp_prog->aux->xdp_has_frags)
2928 chain_len = 1;
2929 else
2930 chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
2931
2932 return min_t(u16, rx_buf_len * chain_len, I40E_MAX_RXBUFFER);
2933}
2934
2935/**
2936 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2937 * @netdev: network interface device structure
2938 * @new_mtu: new value for maximum frame size
2939 *
2940 * Returns 0 on success, negative on failure
2941 **/
2942static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2943{
2944 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
2945 struct i40e_vsi *vsi = np->vsi;
2946 struct i40e_pf *pf = vsi->back;
2947 int frame_size;
2948
2949 frame_size = i40e_max_vsi_frame_size(vsi, xdp_prog: vsi->xdp_prog);
2950 if (new_mtu > frame_size - I40E_PACKET_HDR_PAD) {
2951 netdev_err(dev: netdev, format: "Error changing mtu to %d, Max is %d\n",
2952 new_mtu, frame_size - I40E_PACKET_HDR_PAD);
2953 return -EINVAL;
2954 }
2955
2956 netdev_dbg(netdev, "changing MTU from %d to %d\n",
2957 netdev->mtu, new_mtu);
2958 netdev->mtu = new_mtu;
2959 if (netif_running(dev: netdev))
2960 i40e_vsi_reinit_locked(vsi);
2961 set_bit(nr: __I40E_CLIENT_SERVICE_REQUESTED, addr: pf->state);
2962 set_bit(nr: __I40E_CLIENT_L2_CHANGE, addr: pf->state);
2963 return 0;
2964}
2965
2966/**
2967 * i40e_ioctl - Access the hwtstamp interface
2968 * @netdev: network interface device structure
2969 * @ifr: interface request data
2970 * @cmd: ioctl command
2971 **/
2972int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2973{
2974 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
2975 struct i40e_pf *pf = np->vsi->back;
2976
2977 switch (cmd) {
2978 case SIOCGHWTSTAMP:
2979 return i40e_ptp_get_ts_config(pf, ifr);
2980 case SIOCSHWTSTAMP:
2981 return i40e_ptp_set_ts_config(pf, ifr);
2982 default:
2983 return -EOPNOTSUPP;
2984 }
2985}
2986
2987/**
2988 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2989 * @vsi: the vsi being adjusted
2990 **/
2991void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2992{
2993 struct i40e_vsi_context ctxt;
2994 int ret;
2995
2996 /* Don't modify stripping options if a port VLAN is active */
2997 if (vsi->info.pvid)
2998 return;
2999
3000 if ((vsi->info.valid_sections &
3001 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
3002 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
3003 return; /* already enabled */
3004
3005 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3006 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3007 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
3008
3009 ctxt.seid = vsi->seid;
3010 ctxt.info = vsi->info;
3011 ret = i40e_aq_update_vsi_params(hw: &vsi->back->hw, vsi_ctx: &ctxt, NULL);
3012 if (ret) {
3013 dev_info(&vsi->back->pdev->dev,
3014 "update vlan stripping failed, err %pe aq_err %s\n",
3015 ERR_PTR(ret),
3016 i40e_aq_str(&vsi->back->hw,
3017 vsi->back->hw.aq.asq_last_status));
3018 }
3019}
3020
3021/**
3022 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
3023 * @vsi: the vsi being adjusted
3024 **/
3025void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
3026{
3027 struct i40e_vsi_context ctxt;
3028 int ret;
3029
3030 /* Don't modify stripping options if a port VLAN is active */
3031 if (vsi->info.pvid)
3032 return;
3033
3034 if ((vsi->info.valid_sections &
3035 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
3036 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
3037 I40E_AQ_VSI_PVLAN_EMOD_MASK))
3038 return; /* already disabled */
3039
3040 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3041 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
3042 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
3043
3044 ctxt.seid = vsi->seid;
3045 ctxt.info = vsi->info;
3046 ret = i40e_aq_update_vsi_params(hw: &vsi->back->hw, vsi_ctx: &ctxt, NULL);
3047 if (ret) {
3048 dev_info(&vsi->back->pdev->dev,
3049 "update vlan stripping failed, err %pe aq_err %s\n",
3050 ERR_PTR(ret),
3051 i40e_aq_str(&vsi->back->hw,
3052 vsi->back->hw.aq.asq_last_status));
3053 }
3054}
3055
3056/**
3057 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
3058 * @vsi: the vsi being configured
3059 * @vid: vlan id to be added (0 = untagged only , -1 = any)
3060 *
3061 * This is a helper function for adding a new MAC/VLAN filter with the
3062 * specified VLAN for each existing MAC address already in the hash table.
3063 * This function does *not* perform any accounting to update filters based on
3064 * VLAN mode.
3065 *
3066 * NOTE: this function expects to be called while under the
3067 * mac_filter_hash_lock
3068 **/
3069int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3070{
3071 struct i40e_mac_filter *f, *add_f;
3072 struct hlist_node *h;
3073 int bkt;
3074
3075 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3076 /* If we're asked to add a filter that has been marked for
3077 * removal, it is safe to simply restore it to active state.
3078 * __i40e_del_filter will have simply deleted any filters which
3079 * were previously marked NEW or FAILED, so if it is currently
3080 * marked REMOVE it must have previously been ACTIVE. Since we
3081 * haven't yet run the sync filters task, just restore this
3082 * filter to the ACTIVE state so that the sync task leaves it
3083 * in place.
3084 */
3085 if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
3086 f->state = I40E_FILTER_ACTIVE;
3087 continue;
3088 } else if (f->state == I40E_FILTER_REMOVE) {
3089 continue;
3090 }
3091 add_f = i40e_add_filter(vsi, macaddr: f->macaddr, vlan: vid);
3092 if (!add_f) {
3093 dev_info(&vsi->back->pdev->dev,
3094 "Could not add vlan filter %d for %pM\n",
3095 vid, f->macaddr);
3096 return -ENOMEM;
3097 }
3098 }
3099
3100 return 0;
3101}
3102
3103/**
3104 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
3105 * @vsi: the VSI being configured
3106 * @vid: VLAN id to be added
3107 **/
3108int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
3109{
3110 int err;
3111
3112 if (vsi->info.pvid)
3113 return -EINVAL;
3114
3115 /* The network stack will attempt to add VID=0, with the intention to
3116 * receive priority tagged packets with a VLAN of 0. Our HW receives
3117 * these packets by default when configured to receive untagged
3118 * packets, so we don't need to add a filter for this case.
3119 * Additionally, HW interprets adding a VID=0 filter as meaning to
3120 * receive *only* tagged traffic and stops receiving untagged traffic.
3121 * Thus, we do not want to actually add a filter for VID=0
3122 */
3123 if (!vid)
3124 return 0;
3125
3126 /* Locked once because all functions invoked below iterates list*/
3127 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
3128 err = i40e_add_vlan_all_mac(vsi, vid);
3129 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
3130 if (err)
3131 return err;
3132
3133 /* schedule our worker thread which will take care of
3134 * applying the new filter changes
3135 */
3136 i40e_service_event_schedule(pf: vsi->back);
3137 return 0;
3138}
3139
3140/**
3141 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
3142 * @vsi: the vsi being configured
3143 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
3144 *
3145 * This function should be used to remove all VLAN filters which match the
3146 * given VID. It does not schedule the service event and does not take the
3147 * mac_filter_hash_lock so it may be combined with other operations under
3148 * a single invocation of the mac_filter_hash_lock.
3149 *
3150 * NOTE: this function expects to be called while under the
3151 * mac_filter_hash_lock
3152 */
3153void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
3154{
3155 struct i40e_mac_filter *f;
3156 struct hlist_node *h;
3157 int bkt;
3158
3159 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
3160 if (f->vlan == vid)
3161 __i40e_del_filter(vsi, f);
3162 }
3163}
3164
3165/**
3166 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
3167 * @vsi: the VSI being configured
3168 * @vid: VLAN id to be removed
3169 **/
3170void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
3171{
3172 if (!vid || vsi->info.pvid)
3173 return;
3174
3175 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
3176 i40e_rm_vlan_all_mac(vsi, vid);
3177 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
3178
3179 /* schedule our worker thread which will take care of
3180 * applying the new filter changes
3181 */
3182 i40e_service_event_schedule(pf: vsi->back);
3183}
3184
3185/**
3186 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
3187 * @netdev: network interface to be adjusted
3188 * @proto: unused protocol value
3189 * @vid: vlan id to be added
3190 *
3191 * net_device_ops implementation for adding vlan ids
3192 **/
3193static int i40e_vlan_rx_add_vid(struct net_device *netdev,
3194 __always_unused __be16 proto, u16 vid)
3195{
3196 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
3197 struct i40e_vsi *vsi = np->vsi;
3198 int ret = 0;
3199
3200 if (vid >= VLAN_N_VID)
3201 return -EINVAL;
3202
3203 ret = i40e_vsi_add_vlan(vsi, vid);
3204 if (!ret)
3205 set_bit(nr: vid, addr: vsi->active_vlans);
3206
3207 return ret;
3208}
3209
3210/**
3211 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
3212 * @netdev: network interface to be adjusted
3213 * @proto: unused protocol value
3214 * @vid: vlan id to be added
3215 **/
3216static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
3217 __always_unused __be16 proto, u16 vid)
3218{
3219 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
3220 struct i40e_vsi *vsi = np->vsi;
3221
3222 if (vid >= VLAN_N_VID)
3223 return;
3224 set_bit(nr: vid, addr: vsi->active_vlans);
3225}
3226
3227/**
3228 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
3229 * @netdev: network interface to be adjusted
3230 * @proto: unused protocol value
3231 * @vid: vlan id to be removed
3232 *
3233 * net_device_ops implementation for removing vlan ids
3234 **/
3235static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
3236 __always_unused __be16 proto, u16 vid)
3237{
3238 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
3239 struct i40e_vsi *vsi = np->vsi;
3240
3241 /* return code is ignored as there is nothing a user
3242 * can do about failure to remove and a log message was
3243 * already printed from the other function
3244 */
3245 i40e_vsi_kill_vlan(vsi, vid);
3246
3247 clear_bit(nr: vid, addr: vsi->active_vlans);
3248
3249 return 0;
3250}
3251
3252/**
3253 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3254 * @vsi: the vsi being brought back up
3255 **/
3256static void i40e_restore_vlan(struct i40e_vsi *vsi)
3257{
3258 u16 vid;
3259
3260 if (!vsi->netdev)
3261 return;
3262
3263 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3264 i40e_vlan_stripping_enable(vsi);
3265 else
3266 i40e_vlan_stripping_disable(vsi);
3267
3268 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
3269 i40e_vlan_rx_add_vid_up(netdev: vsi->netdev, htons(ETH_P_8021Q),
3270 vid);
3271}
3272
3273/**
3274 * i40e_vsi_add_pvid - Add pvid for the VSI
3275 * @vsi: the vsi being adjusted
3276 * @vid: the vlan id to set as a PVID
3277 **/
3278int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
3279{
3280 struct i40e_vsi_context ctxt;
3281 int ret;
3282
3283 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
3284 vsi->info.pvid = cpu_to_le16(vid);
3285 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
3286 I40E_AQ_VSI_PVLAN_INSERT_PVID |
3287 I40E_AQ_VSI_PVLAN_EMOD_STR;
3288
3289 ctxt.seid = vsi->seid;
3290 ctxt.info = vsi->info;
3291 ret = i40e_aq_update_vsi_params(hw: &vsi->back->hw, vsi_ctx: &ctxt, NULL);
3292 if (ret) {
3293 dev_info(&vsi->back->pdev->dev,
3294 "add pvid failed, err %pe aq_err %s\n",
3295 ERR_PTR(ret),
3296 i40e_aq_str(&vsi->back->hw,
3297 vsi->back->hw.aq.asq_last_status));
3298 return -ENOENT;
3299 }
3300
3301 return 0;
3302}
3303
3304/**
3305 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3306 * @vsi: the vsi being adjusted
3307 *
3308 * Just use the vlan_rx_register() service to put it back to normal
3309 **/
3310void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
3311{
3312 vsi->info.pvid = 0;
3313
3314 i40e_vlan_stripping_disable(vsi);
3315}
3316
3317/**
3318 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3319 * @vsi: ptr to the VSI
3320 *
3321 * If this function returns with an error, then it's possible one or
3322 * more of the rings is populated (while the rest are not). It is the
3323 * callers duty to clean those orphaned rings.
3324 *
3325 * Return 0 on success, negative on failure
3326 **/
3327static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
3328{
3329 int i, err = 0;
3330
3331 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3332 err = i40e_setup_tx_descriptors(tx_ring: vsi->tx_rings[i]);
3333
3334 if (!i40e_enabled_xdp_vsi(vsi))
3335 return err;
3336
3337 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3338 err = i40e_setup_tx_descriptors(tx_ring: vsi->xdp_rings[i]);
3339
3340 return err;
3341}
3342
3343/**
3344 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3345 * @vsi: ptr to the VSI
3346 *
3347 * Free VSI's transmit software resources
3348 **/
3349static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3350{
3351 int i;
3352
3353 if (vsi->tx_rings) {
3354 for (i = 0; i < vsi->num_queue_pairs; i++)
3355 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3356 i40e_free_tx_resources(tx_ring: vsi->tx_rings[i]);
3357 }
3358
3359 if (vsi->xdp_rings) {
3360 for (i = 0; i < vsi->num_queue_pairs; i++)
3361 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3362 i40e_free_tx_resources(tx_ring: vsi->xdp_rings[i]);
3363 }
3364}
3365
3366/**
3367 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3368 * @vsi: ptr to the VSI
3369 *
3370 * If this function returns with an error, then it's possible one or
3371 * more of the rings is populated (while the rest are not). It is the
3372 * callers duty to clean those orphaned rings.
3373 *
3374 * Return 0 on success, negative on failure
3375 **/
3376static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3377{
3378 int i, err = 0;
3379
3380 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3381 err = i40e_setup_rx_descriptors(rx_ring: vsi->rx_rings[i]);
3382 return err;
3383}
3384
3385/**
3386 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3387 * @vsi: ptr to the VSI
3388 *
3389 * Free all receive software resources
3390 **/
3391static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3392{
3393 int i;
3394
3395 if (!vsi->rx_rings)
3396 return;
3397
3398 for (i = 0; i < vsi->num_queue_pairs; i++)
3399 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3400 i40e_free_rx_resources(rx_ring: vsi->rx_rings[i]);
3401}
3402
3403/**
3404 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3405 * @ring: The Tx ring to configure
3406 *
3407 * This enables/disables XPS for a given Tx descriptor ring
3408 * based on the TCs enabled for the VSI that ring belongs to.
3409 **/
3410static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3411{
3412 int cpu;
3413
3414 if (!ring->q_vector || !ring->netdev || ring->ch)
3415 return;
3416
3417 /* We only initialize XPS once, so as not to overwrite user settings */
3418 if (test_and_set_bit(nr: __I40E_TX_XPS_INIT_DONE, addr: ring->state))
3419 return;
3420
3421 cpu = cpumask_local_spread(i: ring->q_vector->v_idx, node: -1);
3422 netif_set_xps_queue(dev: ring->netdev, mask: get_cpu_mask(cpu),
3423 index: ring->queue_index);
3424}
3425
3426/**
3427 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3428 * @ring: The Tx or Rx ring
3429 *
3430 * Returns the AF_XDP buffer pool or NULL.
3431 **/
3432static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
3433{
3434 bool xdp_on = i40e_enabled_xdp_vsi(vsi: ring->vsi);
3435 int qid = ring->queue_index;
3436
3437 if (ring_is_xdp(ring))
3438 qid -= ring->vsi->alloc_queue_pairs;
3439
3440 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3441 return NULL;
3442
3443 return xsk_get_pool_from_qid(dev: ring->vsi->netdev, queue_id: qid);
3444}
3445
3446/**
3447 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3448 * @ring: The Tx ring to configure
3449 *
3450 * Configure the Tx descriptor ring in the HMC context.
3451 **/
3452static int i40e_configure_tx_ring(struct i40e_ring *ring)
3453{
3454 struct i40e_vsi *vsi = ring->vsi;
3455 u16 pf_q = vsi->base_queue + ring->queue_index;
3456 struct i40e_hw *hw = &vsi->back->hw;
3457 struct i40e_hmc_obj_txq tx_ctx;
3458 u32 qtx_ctl = 0;
3459 int err = 0;
3460
3461 if (ring_is_xdp(ring))
3462 ring->xsk_pool = i40e_xsk_pool(ring);
3463
3464 /* some ATR related tx ring init */
3465 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3466 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3467 ring->atr_count = 0;
3468 } else {
3469 ring->atr_sample_rate = 0;
3470 }
3471
3472 /* configure XPS */
3473 i40e_config_xps_tx_ring(ring);
3474
3475 /* clear the context structure first */
3476 memset(&tx_ctx, 0, sizeof(tx_ctx));
3477
3478 tx_ctx.new_context = 1;
3479 tx_ctx.base = (ring->dma / 128);
3480 tx_ctx.qlen = ring->count;
3481 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3482 I40E_FLAG_FD_ATR_ENABLED));
3483 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3484 /* FDIR VSI tx ring can still use RS bit and writebacks */
3485 if (vsi->type != I40E_VSI_FDIR)
3486 tx_ctx.head_wb_ena = 1;
3487 tx_ctx.head_wb_addr = ring->dma +
3488 (ring->count * sizeof(struct i40e_tx_desc));
3489
3490 /* As part of VSI creation/update, FW allocates certain
3491 * Tx arbitration queue sets for each TC enabled for
3492 * the VSI. The FW returns the handles to these queue
3493 * sets as part of the response buffer to Add VSI,
3494 * Update VSI, etc. AQ commands. It is expected that
3495 * these queue set handles be associated with the Tx
3496 * queues by the driver as part of the TX queue context
3497 * initialization. This has to be done regardless of
3498 * DCB as by default everything is mapped to TC0.
3499 */
3500
3501 if (ring->ch)
3502 tx_ctx.rdylist =
3503 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3504
3505 else
3506 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3507
3508 tx_ctx.rdylist_act = 0;
3509
3510 /* clear the context in the HMC */
3511 err = i40e_clear_lan_tx_queue_context(hw, queue: pf_q);
3512 if (err) {
3513 dev_info(&vsi->back->pdev->dev,
3514 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3515 ring->queue_index, pf_q, err);
3516 return -ENOMEM;
3517 }
3518
3519 /* set the context in the HMC */
3520 err = i40e_set_lan_tx_queue_context(hw, queue: pf_q, s: &tx_ctx);
3521 if (err) {
3522 dev_info(&vsi->back->pdev->dev,
3523 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3524 ring->queue_index, pf_q, err);
3525 return -ENOMEM;
3526 }
3527
3528 /* Now associate this queue with this PCI function */
3529 if (ring->ch) {
3530 if (ring->ch->type == I40E_VSI_VMDQ2)
3531 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3532 else
3533 return -EINVAL;
3534
3535 qtx_ctl |= (ring->ch->vsi_number <<
3536 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3537 I40E_QTX_CTL_VFVM_INDX_MASK;
3538 } else {
3539 if (vsi->type == I40E_VSI_VMDQ2) {
3540 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3541 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3542 I40E_QTX_CTL_VFVM_INDX_MASK;
3543 } else {
3544 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3545 }
3546 }
3547
3548 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3549 I40E_QTX_CTL_PF_INDX_MASK);
3550 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3551 i40e_flush(hw);
3552
3553 /* cache tail off for easier writes later */
3554 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3555
3556 return 0;
3557}
3558
3559/**
3560 * i40e_rx_offset - Return expected offset into page to access data
3561 * @rx_ring: Ring we are requesting offset of
3562 *
3563 * Returns the offset value for ring into the data buffer.
3564 */
3565static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
3566{
3567 return ring_uses_build_skb(ring: rx_ring) ? I40E_SKB_PAD : 0;
3568}
3569
3570/**
3571 * i40e_configure_rx_ring - Configure a receive ring context
3572 * @ring: The Rx ring to configure
3573 *
3574 * Configure the Rx descriptor ring in the HMC context.
3575 **/
3576static int i40e_configure_rx_ring(struct i40e_ring *ring)
3577{
3578 struct i40e_vsi *vsi = ring->vsi;
3579 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3580 u16 pf_q = vsi->base_queue + ring->queue_index;
3581 struct i40e_hw *hw = &vsi->back->hw;
3582 struct i40e_hmc_obj_rxq rx_ctx;
3583 int err = 0;
3584 bool ok;
3585 int ret;
3586
3587 bitmap_zero(dst: ring->state, nbits: __I40E_RING_STATE_NBITS);
3588
3589 /* clear the context structure first */
3590 memset(&rx_ctx, 0, sizeof(rx_ctx));
3591
3592 if (ring->vsi->type == I40E_VSI_MAIN)
3593 xdp_rxq_info_unreg_mem_model(xdp_rxq: &ring->xdp_rxq);
3594
3595 ring->xsk_pool = i40e_xsk_pool(ring);
3596 if (ring->xsk_pool) {
3597 ring->rx_buf_len =
3598 xsk_pool_get_rx_frame_size(pool: ring->xsk_pool);
3599 ret = xdp_rxq_info_reg_mem_model(xdp_rxq: &ring->xdp_rxq,
3600 type: MEM_TYPE_XSK_BUFF_POOL,
3601 NULL);
3602 if (ret)
3603 return ret;
3604 dev_info(&vsi->back->pdev->dev,
3605 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
3606 ring->queue_index);
3607
3608 } else {
3609 ring->rx_buf_len = vsi->rx_buf_len;
3610 if (ring->vsi->type == I40E_VSI_MAIN) {
3611 ret = xdp_rxq_info_reg_mem_model(xdp_rxq: &ring->xdp_rxq,
3612 type: MEM_TYPE_PAGE_SHARED,
3613 NULL);
3614 if (ret)
3615 return ret;
3616 }
3617 }
3618
3619 xdp_init_buff(xdp: &ring->xdp, i40e_rx_pg_size(ring) / 2, rxq: &ring->xdp_rxq);
3620
3621 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3622 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3623
3624 rx_ctx.base = (ring->dma / 128);
3625 rx_ctx.qlen = ring->count;
3626
3627 /* use 16 byte descriptors */
3628 rx_ctx.dsize = 0;
3629
3630 /* descriptor type is always zero
3631 * rx_ctx.dtype = 0;
3632 */
3633 rx_ctx.hsplit_0 = 0;
3634
3635 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3636 if (hw->revision_id == 0)
3637 rx_ctx.lrxqthresh = 0;
3638 else
3639 rx_ctx.lrxqthresh = 1;
3640 rx_ctx.crcstrip = 1;
3641 rx_ctx.l2tsel = 1;
3642 /* this controls whether VLAN is stripped from inner headers */
3643 rx_ctx.showiv = 0;
3644 /* set the prefena field to 1 because the manual says to */
3645 rx_ctx.prefena = 1;
3646
3647 /* clear the context in the HMC */
3648 err = i40e_clear_lan_rx_queue_context(hw, queue: pf_q);
3649 if (err) {
3650 dev_info(&vsi->back->pdev->dev,
3651 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3652 ring->queue_index, pf_q, err);
3653 return -ENOMEM;
3654 }
3655
3656 /* set the context in the HMC */
3657 err = i40e_set_lan_rx_queue_context(hw, queue: pf_q, s: &rx_ctx);
3658 if (err) {
3659 dev_info(&vsi->back->pdev->dev,
3660 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3661 ring->queue_index, pf_q, err);
3662 return -ENOMEM;
3663 }
3664
3665 /* configure Rx buffer alignment */
3666 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3667 if (I40E_2K_TOO_SMALL_WITH_PADDING) {
3668 dev_info(&vsi->back->pdev->dev,
3669 "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
3670 return -EOPNOTSUPP;
3671 }
3672 clear_ring_build_skb_enabled(ring);
3673 } else {
3674 set_ring_build_skb_enabled(ring);
3675 }
3676
3677 ring->rx_offset = i40e_rx_offset(rx_ring: ring);
3678
3679 /* cache tail for quicker writes, and clear the reg before use */
3680 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3681 writel(val: 0, addr: ring->tail);
3682
3683 if (ring->xsk_pool) {
3684 xsk_pool_set_rxq_info(pool: ring->xsk_pool, rxq: &ring->xdp_rxq);
3685 ok = i40e_alloc_rx_buffers_zc(rx_ring: ring, I40E_DESC_UNUSED(ring));
3686 } else {
3687 ok = !i40e_alloc_rx_buffers(rxr: ring, I40E_DESC_UNUSED(ring));
3688 }
3689 if (!ok) {
3690 /* Log this in case the user has forgotten to give the kernel
3691 * any buffers, even later in the application.
3692 */
3693 dev_info(&vsi->back->pdev->dev,
3694 "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
3695 ring->xsk_pool ? "AF_XDP ZC enabled " : "",
3696 ring->queue_index, pf_q);
3697 }
3698
3699 return 0;
3700}
3701
3702/**
3703 * i40e_vsi_configure_tx - Configure the VSI for Tx
3704 * @vsi: VSI structure describing this set of rings and resources
3705 *
3706 * Configure the Tx VSI for operation.
3707 **/
3708static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3709{
3710 int err = 0;
3711 u16 i;
3712
3713 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3714 err = i40e_configure_tx_ring(ring: vsi->tx_rings[i]);
3715
3716 if (err || !i40e_enabled_xdp_vsi(vsi))
3717 return err;
3718
3719 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3720 err = i40e_configure_tx_ring(ring: vsi->xdp_rings[i]);
3721
3722 return err;
3723}
3724
3725/**
3726 * i40e_vsi_configure_rx - Configure the VSI for Rx
3727 * @vsi: the VSI being configured
3728 *
3729 * Configure the Rx VSI for operation.
3730 **/
3731static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3732{
3733 int err = 0;
3734 u16 i;
3735
3736 vsi->max_frame = i40e_max_vsi_frame_size(vsi, xdp_prog: vsi->xdp_prog);
3737 vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
3738
3739#if (PAGE_SIZE < 8192)
3740 if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
3741 vsi->netdev->mtu <= ETH_DATA_LEN) {
3742 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3743 vsi->max_frame = vsi->rx_buf_len;
3744 }
3745#endif
3746
3747 /* set up individual rings */
3748 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3749 err = i40e_configure_rx_ring(ring: vsi->rx_rings[i]);
3750
3751 return err;
3752}
3753
3754/**
3755 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3756 * @vsi: ptr to the VSI
3757 **/
3758static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3759{
3760 struct i40e_ring *tx_ring, *rx_ring;
3761 u16 qoffset, qcount;
3762 int i, n;
3763
3764 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3765 /* Reset the TC information */
3766 for (i = 0; i < vsi->num_queue_pairs; i++) {
3767 rx_ring = vsi->rx_rings[i];
3768 tx_ring = vsi->tx_rings[i];
3769 rx_ring->dcb_tc = 0;
3770 tx_ring->dcb_tc = 0;
3771 }
3772 return;
3773 }
3774
3775 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3776 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3777 continue;
3778
3779 qoffset = vsi->tc_config.tc_info[n].qoffset;
3780 qcount = vsi->tc_config.tc_info[n].qcount;
3781 for (i = qoffset; i < (qoffset + qcount); i++) {
3782 rx_ring = vsi->rx_rings[i];
3783 tx_ring = vsi->tx_rings[i];
3784 rx_ring->dcb_tc = n;
3785 tx_ring->dcb_tc = n;
3786 }
3787 }
3788}
3789
3790/**
3791 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3792 * @vsi: ptr to the VSI
3793 **/
3794static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3795{
3796 if (vsi->netdev)
3797 i40e_set_rx_mode(netdev: vsi->netdev);
3798}
3799
3800/**
3801 * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3802 * @pf: Pointer to the targeted PF
3803 *
3804 * Set all flow director counters to 0.
3805 */
3806static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
3807{
3808 pf->fd_tcp4_filter_cnt = 0;
3809 pf->fd_udp4_filter_cnt = 0;
3810 pf->fd_sctp4_filter_cnt = 0;
3811 pf->fd_ip4_filter_cnt = 0;
3812 pf->fd_tcp6_filter_cnt = 0;
3813 pf->fd_udp6_filter_cnt = 0;
3814 pf->fd_sctp6_filter_cnt = 0;
3815 pf->fd_ip6_filter_cnt = 0;
3816}
3817
3818/**
3819 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3820 * @vsi: Pointer to the targeted VSI
3821 *
3822 * This function replays the hlist on the hw where all the SB Flow Director
3823 * filters were saved.
3824 **/
3825static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3826{
3827 struct i40e_fdir_filter *filter;
3828 struct i40e_pf *pf = vsi->back;
3829 struct hlist_node *node;
3830
3831 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3832 return;
3833
3834 /* Reset FDir counters as we're replaying all existing filters */
3835 i40e_reset_fdir_filter_cnt(pf);
3836
3837 hlist_for_each_entry_safe(filter, node,
3838 &pf->fdir_filter_list, fdir_node) {
3839 i40e_add_del_fdir(vsi, input: filter, add: true);
3840 }
3841}
3842
3843/**
3844 * i40e_vsi_configure - Set up the VSI for action
3845 * @vsi: the VSI being configured
3846 **/
3847static int i40e_vsi_configure(struct i40e_vsi *vsi)
3848{
3849 int err;
3850
3851 i40e_set_vsi_rx_mode(vsi);
3852 i40e_restore_vlan(vsi);
3853 i40e_vsi_config_dcb_rings(vsi);
3854 err = i40e_vsi_configure_tx(vsi);
3855 if (!err)
3856 err = i40e_vsi_configure_rx(vsi);
3857
3858 return err;
3859}
3860
3861/**
3862 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3863 * @vsi: the VSI being configured
3864 **/
3865static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3866{
3867 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3868 struct i40e_pf *pf = vsi->back;
3869 struct i40e_hw *hw = &pf->hw;
3870 u16 vector;
3871 int i, q;
3872 u32 qp;
3873
3874 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3875 * and PFINT_LNKLSTn registers, e.g.:
3876 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3877 */
3878 qp = vsi->base_queue;
3879 vector = vsi->base_vector;
3880 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3881 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3882
3883 q_vector->rx.next_update = jiffies + 1;
3884 q_vector->rx.target_itr =
3885 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3886 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3887 q_vector->rx.target_itr >> 1);
3888 q_vector->rx.current_itr = q_vector->rx.target_itr;
3889
3890 q_vector->tx.next_update = jiffies + 1;
3891 q_vector->tx.target_itr =
3892 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3893 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3894 q_vector->tx.target_itr >> 1);
3895 q_vector->tx.current_itr = q_vector->tx.target_itr;
3896
3897 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3898 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3899
3900 /* begin of linked list for RX queue assigned to this vector */
3901 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3902 for (q = 0; q < q_vector->num_ringpairs; q++) {
3903 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3904 u32 val;
3905
3906 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3907 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3908 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3909 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3910 (I40E_QUEUE_TYPE_TX <<
3911 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3912
3913 wr32(hw, I40E_QINT_RQCTL(qp), val);
3914
3915 if (has_xdp) {
3916 /* TX queue with next queue set to TX */
3917 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3918 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3919 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3920 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3921 (I40E_QUEUE_TYPE_TX <<
3922 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3923
3924 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3925 }
3926 /* TX queue with next RX or end of linked list */
3927 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3928 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3929 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3930 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3931 (I40E_QUEUE_TYPE_RX <<
3932 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3933
3934 /* Terminate the linked list */
3935 if (q == (q_vector->num_ringpairs - 1))
3936 val |= (I40E_QUEUE_END_OF_LIST <<
3937 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3938
3939 wr32(hw, I40E_QINT_TQCTL(qp), val);
3940 qp++;
3941 }
3942 }
3943
3944 i40e_flush(hw);
3945}
3946
3947/**
3948 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3949 * @pf: pointer to private device data structure
3950 **/
3951static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3952{
3953 struct i40e_hw *hw = &pf->hw;
3954 u32 val;
3955
3956 /* clear things first */
3957 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3958 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3959
3960 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3961 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3962 I40E_PFINT_ICR0_ENA_GRST_MASK |
3963 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3964 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3965 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3966 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3967 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3968
3969 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3970 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3971
3972 if (pf->flags & I40E_FLAG_PTP)
3973 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3974
3975 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3976
3977 /* SW_ITR_IDX = 0, but don't change INTENA */
3978 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3979 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3980
3981 /* OTHER_ITR_IDX = 0 */
3982 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3983}
3984
3985/**
3986 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3987 * @vsi: the VSI being configured
3988 **/
3989static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3990{
3991 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3992 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3993 struct i40e_pf *pf = vsi->back;
3994 struct i40e_hw *hw = &pf->hw;
3995
3996 /* set the ITR configuration */
3997 q_vector->rx.next_update = jiffies + 1;
3998 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3999 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
4000 q_vector->rx.current_itr = q_vector->rx.target_itr;
4001 q_vector->tx.next_update = jiffies + 1;
4002 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
4003 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
4004 q_vector->tx.current_itr = q_vector->tx.target_itr;
4005
4006 i40e_enable_misc_int_causes(pf);
4007
4008 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
4009 wr32(hw, I40E_PFINT_LNKLST0, 0);
4010
4011 /* Associate the queue pair to the vector and enable the queue
4012 * interrupt RX queue in linked list with next queue set to TX
4013 */
4014 wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
4015
4016 if (i40e_enabled_xdp_vsi(vsi)) {
4017 /* TX queue in linked list with next queue set to TX */
4018 wr32(hw, I40E_QINT_TQCTL(nextqp),
4019 I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
4020 }
4021
4022 /* last TX queue so the next RX queue doesn't matter */
4023 wr32(hw, I40E_QINT_TQCTL(0),
4024 I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
4025 i40e_flush(hw);
4026}
4027
4028/**
4029 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
4030 * @pf: board private structure
4031 **/
4032void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
4033{
4034 struct i40e_hw *hw = &pf->hw;
4035
4036 wr32(hw, I40E_PFINT_DYN_CTL0,
4037 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
4038 i40e_flush(hw);
4039}
4040
4041/**
4042 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
4043 * @pf: board private structure
4044 **/
4045void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
4046{
4047 struct i40e_hw *hw = &pf->hw;
4048 u32 val;
4049
4050 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
4051 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
4052 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
4053
4054 wr32(hw, I40E_PFINT_DYN_CTL0, val);
4055 i40e_flush(hw);
4056}
4057
4058/**
4059 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
4060 * @irq: interrupt number
4061 * @data: pointer to a q_vector
4062 **/
4063static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
4064{
4065 struct i40e_q_vector *q_vector = data;
4066
4067 if (!q_vector->tx.ring && !q_vector->rx.ring)
4068 return IRQ_HANDLED;
4069
4070 napi_schedule_irqoff(n: &q_vector->napi);
4071
4072 return IRQ_HANDLED;
4073}
4074
4075/**
4076 * i40e_irq_affinity_notify - Callback for affinity changes
4077 * @notify: context as to what irq was changed
4078 * @mask: the new affinity mask
4079 *
4080 * This is a callback function used by the irq_set_affinity_notifier function
4081 * so that we may register to receive changes to the irq affinity masks.
4082 **/
4083static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
4084 const cpumask_t *mask)
4085{
4086 struct i40e_q_vector *q_vector =
4087 container_of(notify, struct i40e_q_vector, affinity_notify);
4088
4089 cpumask_copy(dstp: &q_vector->affinity_mask, srcp: mask);
4090}
4091
4092/**
4093 * i40e_irq_affinity_release - Callback for affinity notifier release
4094 * @ref: internal core kernel usage
4095 *
4096 * This is a callback function used by the irq_set_affinity_notifier function
4097 * to inform the current notification subscriber that they will no longer
4098 * receive notifications.
4099 **/
4100static void i40e_irq_affinity_release(struct kref *ref) {}
4101
4102/**
4103 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
4104 * @vsi: the VSI being configured
4105 * @basename: name for the vector
4106 *
4107 * Allocates MSI-X vectors and requests interrupts from the kernel.
4108 **/
4109static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
4110{
4111 int q_vectors = vsi->num_q_vectors;
4112 struct i40e_pf *pf = vsi->back;
4113 int base = vsi->base_vector;
4114 int rx_int_idx = 0;
4115 int tx_int_idx = 0;
4116 int vector, err;
4117 int irq_num;
4118 int cpu;
4119
4120 for (vector = 0; vector < q_vectors; vector++) {
4121 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
4122
4123 irq_num = pf->msix_entries[base + vector].vector;
4124
4125 if (q_vector->tx.ring && q_vector->rx.ring) {
4126 snprintf(buf: q_vector->name, size: sizeof(q_vector->name) - 1,
4127 fmt: "%s-%s-%d", basename, "TxRx", rx_int_idx++);
4128 tx_int_idx++;
4129 } else if (q_vector->rx.ring) {
4130 snprintf(buf: q_vector->name, size: sizeof(q_vector->name) - 1,
4131 fmt: "%s-%s-%d", basename, "rx", rx_int_idx++);
4132 } else if (q_vector->tx.ring) {
4133 snprintf(buf: q_vector->name, size: sizeof(q_vector->name) - 1,
4134 fmt: "%s-%s-%d", basename, "tx", tx_int_idx++);
4135 } else {
4136 /* skip this unused q_vector */
4137 continue;
4138 }
4139 err = request_irq(irq: irq_num,
4140 handler: vsi->irq_handler,
4141 flags: 0,
4142 name: q_vector->name,
4143 dev: q_vector);
4144 if (err) {
4145 dev_info(&pf->pdev->dev,
4146 "MSIX request_irq failed, error: %d\n", err);
4147 goto free_queue_irqs;
4148 }
4149
4150 /* register for affinity change notifications */
4151 q_vector->irq_num = irq_num;
4152 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
4153 q_vector->affinity_notify.release = i40e_irq_affinity_release;
4154 irq_set_affinity_notifier(irq: irq_num, notify: &q_vector->affinity_notify);
4155 /* Spread affinity hints out across online CPUs.
4156 *
4157 * get_cpu_mask returns a static constant mask with
4158 * a permanent lifetime so it's ok to pass to
4159 * irq_update_affinity_hint without making a copy.
4160 */
4161 cpu = cpumask_local_spread(i: q_vector->v_idx, node: -1);
4162 irq_update_affinity_hint(irq: irq_num, m: get_cpu_mask(cpu));
4163 }
4164
4165 vsi->irqs_ready = true;
4166 return 0;
4167
4168free_queue_irqs:
4169 while (vector) {
4170 vector--;
4171 irq_num = pf->msix_entries[base + vector].vector;
4172 irq_set_affinity_notifier(irq: irq_num, NULL);
4173 irq_update_affinity_hint(irq: irq_num, NULL);
4174 free_irq(irq_num, &vsi->q_vectors[vector]);
4175 }
4176 return err;
4177}
4178
4179/**
4180 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
4181 * @vsi: the VSI being un-configured
4182 **/
4183static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
4184{
4185 struct i40e_pf *pf = vsi->back;
4186 struct i40e_hw *hw = &pf->hw;
4187 int base = vsi->base_vector;
4188 int i;
4189
4190 /* disable interrupt causation from each queue */
4191 for (i = 0; i < vsi->num_queue_pairs; i++) {
4192 u32 val;
4193
4194 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
4195 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
4196 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
4197
4198 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
4199 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
4200 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
4201
4202 if (!i40e_enabled_xdp_vsi(vsi))
4203 continue;
4204 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
4205 }
4206
4207 /* disable each interrupt */
4208 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4209 for (i = vsi->base_vector;
4210 i < (vsi->num_q_vectors + vsi->base_vector); i++)
4211 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
4212
4213 i40e_flush(hw);
4214 for (i = 0; i < vsi->num_q_vectors; i++)
4215 synchronize_irq(irq: pf->msix_entries[i + base].vector);
4216 } else {
4217 /* Legacy and MSI mode - this stops all interrupt handling */
4218 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
4219 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
4220 i40e_flush(hw);
4221 synchronize_irq(irq: pf->pdev->irq);
4222 }
4223}
4224
4225/**
4226 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4227 * @vsi: the VSI being configured
4228 **/
4229static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
4230{
4231 struct i40e_pf *pf = vsi->back;
4232 int i;
4233
4234 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4235 for (i = 0; i < vsi->num_q_vectors; i++)
4236 i40e_irq_dynamic_enable(vsi, vector: i);
4237 } else {
4238 i40e_irq_dynamic_enable_icr0(pf);
4239 }
4240
4241 i40e_flush(&pf->hw);
4242 return 0;
4243}
4244
4245/**
4246 * i40e_free_misc_vector - Free the vector that handles non-queue events
4247 * @pf: board private structure
4248 **/
4249static void i40e_free_misc_vector(struct i40e_pf *pf)
4250{
4251 /* Disable ICR 0 */
4252 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
4253 i40e_flush(&pf->hw);
4254
4255 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
4256 free_irq(pf->msix_entries[0].vector, pf);
4257 clear_bit(nr: __I40E_MISC_IRQ_REQUESTED, addr: pf->state);
4258 }
4259}
4260
4261/**
4262 * i40e_intr - MSI/Legacy and non-queue interrupt handler
4263 * @irq: interrupt number
4264 * @data: pointer to a q_vector
4265 *
4266 * This is the handler used for all MSI/Legacy interrupts, and deals
4267 * with both queue and non-queue interrupts. This is also used in
4268 * MSIX mode to handle the non-queue interrupts.
4269 **/
4270static irqreturn_t i40e_intr(int irq, void *data)
4271{
4272 struct i40e_pf *pf = (struct i40e_pf *)data;
4273 struct i40e_hw *hw = &pf->hw;
4274 irqreturn_t ret = IRQ_NONE;
4275 u32 icr0, icr0_remaining;
4276 u32 val, ena_mask;
4277
4278 icr0 = rd32(hw, I40E_PFINT_ICR0);
4279 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
4280
4281 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
4282 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
4283 goto enable_intr;
4284
4285 /* if interrupt but no bits showing, must be SWINT */
4286 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
4287 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
4288 pf->sw_int_count++;
4289
4290 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
4291 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
4292 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
4293 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
4294 set_bit(nr: __I40E_CORE_RESET_REQUESTED, addr: pf->state);
4295 }
4296
4297 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
4298 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
4299 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
4300 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
4301
4302 /* We do not have a way to disarm Queue causes while leaving
4303 * interrupt enabled for all other causes, ideally
4304 * interrupt should be disabled while we are in NAPI but
4305 * this is not a performance path and napi_schedule()
4306 * can deal with rescheduling.
4307 */
4308 if (!test_bit(__I40E_DOWN, pf->state))
4309 napi_schedule_irqoff(n: &q_vector->napi);
4310 }
4311
4312 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
4313 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
4314 set_bit(nr: __I40E_ADMINQ_EVENT_PENDING, addr: pf->state);
4315 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
4316 }
4317
4318 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
4319 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
4320 set_bit(nr: __I40E_MDD_EVENT_PENDING, addr: pf->state);
4321 }
4322
4323 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
4324 /* disable any further VFLR event notifications */
4325 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
4326 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4327
4328 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
4329 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4330 } else {
4331 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
4332 set_bit(nr: __I40E_VFLR_EVENT_PENDING, addr: pf->state);
4333 }
4334 }
4335
4336 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
4337 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4338 set_bit(nr: __I40E_RESET_INTR_RECEIVED, addr: pf->state);
4339 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
4340 val = rd32(hw, I40E_GLGEN_RSTAT);
4341 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
4342 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
4343 if (val == I40E_RESET_CORER) {
4344 pf->corer_count++;
4345 } else if (val == I40E_RESET_GLOBR) {
4346 pf->globr_count++;
4347 } else if (val == I40E_RESET_EMPR) {
4348 pf->empr_count++;
4349 set_bit(nr: __I40E_EMP_RESET_INTR_RECEIVED, addr: pf->state);
4350 }
4351 }
4352
4353 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
4354 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
4355 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
4356 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
4357 rd32(hw, I40E_PFHMC_ERRORINFO),
4358 rd32(hw, I40E_PFHMC_ERRORDATA));
4359 }
4360
4361 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
4362 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
4363
4364 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK)
4365 schedule_work(work: &pf->ptp_extts0_work);
4366
4367 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
4368 i40e_ptp_tx_hwtstamp(pf);
4369
4370 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
4371 }
4372
4373 /* If a critical error is pending we have no choice but to reset the
4374 * device.
4375 * Report and mask out any remaining unexpected interrupts.
4376 */
4377 icr0_remaining = icr0 & ena_mask;
4378 if (icr0_remaining) {
4379 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
4380 icr0_remaining);
4381 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
4382 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
4383 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
4384 dev_info(&pf->pdev->dev, "device will be reset\n");
4385 set_bit(nr: __I40E_PF_RESET_REQUESTED, addr: pf->state);
4386 i40e_service_event_schedule(pf);
4387 }
4388 ena_mask &= ~icr0_remaining;
4389 }
4390 ret = IRQ_HANDLED;
4391
4392enable_intr:
4393 /* re-enable interrupt causes */
4394 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
4395 if (!test_bit(__I40E_DOWN, pf->state) ||
4396 test_bit(__I40E_RECOVERY_MODE, pf->state)) {
4397 i40e_service_event_schedule(pf);
4398 i40e_irq_dynamic_enable_icr0(pf);
4399 }
4400
4401 return ret;
4402}
4403
4404/**
4405 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4406 * @tx_ring: tx ring to clean
4407 * @budget: how many cleans we're allowed
4408 *
4409 * Returns true if there's any budget left (e.g. the clean is finished)
4410 **/
4411static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
4412{
4413 struct i40e_vsi *vsi = tx_ring->vsi;
4414 u16 i = tx_ring->next_to_clean;
4415 struct i40e_tx_buffer *tx_buf;
4416 struct i40e_tx_desc *tx_desc;
4417
4418 tx_buf = &tx_ring->tx_bi[i];
4419 tx_desc = I40E_TX_DESC(tx_ring, i);
4420 i -= tx_ring->count;
4421
4422 do {
4423 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
4424
4425 /* if next_to_watch is not set then there is no work pending */
4426 if (!eop_desc)
4427 break;
4428
4429 /* prevent any other reads prior to eop_desc */
4430 smp_rmb();
4431
4432 /* if the descriptor isn't done, no work yet to do */
4433 if (!(eop_desc->cmd_type_offset_bsz &
4434 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4435 break;
4436
4437 /* clear next_to_watch to prevent false hangs */
4438 tx_buf->next_to_watch = NULL;
4439
4440 tx_desc->buffer_addr = 0;
4441 tx_desc->cmd_type_offset_bsz = 0;
4442 /* move past filter desc */
4443 tx_buf++;
4444 tx_desc++;
4445 i++;
4446 if (unlikely(!i)) {
4447 i -= tx_ring->count;
4448 tx_buf = tx_ring->tx_bi;
4449 tx_desc = I40E_TX_DESC(tx_ring, 0);
4450 }
4451 /* unmap skb header data */
4452 dma_unmap_single(tx_ring->dev,
4453 dma_unmap_addr(tx_buf, dma),
4454 dma_unmap_len(tx_buf, len),
4455 DMA_TO_DEVICE);
4456 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4457 kfree(objp: tx_buf->raw_buf);
4458
4459 tx_buf->raw_buf = NULL;
4460 tx_buf->tx_flags = 0;
4461 tx_buf->next_to_watch = NULL;
4462 dma_unmap_len_set(tx_buf, len, 0);
4463 tx_desc->buffer_addr = 0;
4464 tx_desc->cmd_type_offset_bsz = 0;
4465
4466 /* move us past the eop_desc for start of next FD desc */
4467 tx_buf++;
4468 tx_desc++;
4469 i++;
4470 if (unlikely(!i)) {
4471 i -= tx_ring->count;
4472 tx_buf = tx_ring->tx_bi;
4473 tx_desc = I40E_TX_DESC(tx_ring, 0);
4474 }
4475
4476 /* update budget accounting */
4477 budget--;
4478 } while (likely(budget));
4479
4480 i += tx_ring->count;
4481 tx_ring->next_to_clean = i;
4482
4483 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4484 i40e_irq_dynamic_enable(vsi, vector: tx_ring->q_vector->v_idx);
4485
4486 return budget > 0;
4487}
4488
4489/**
4490 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4491 * @irq: interrupt number
4492 * @data: pointer to a q_vector
4493 **/
4494static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4495{
4496 struct i40e_q_vector *q_vector = data;
4497 struct i40e_vsi *vsi;
4498
4499 if (!q_vector->tx.ring)
4500 return IRQ_HANDLED;
4501
4502 vsi = q_vector->tx.ring->vsi;
4503 i40e_clean_fdir_tx_irq(tx_ring: q_vector->tx.ring, budget: vsi->work_limit);
4504
4505 return IRQ_HANDLED;
4506}
4507
4508/**
4509 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4510 * @vsi: the VSI being configured
4511 * @v_idx: vector index
4512 * @qp_idx: queue pair index
4513 **/
4514static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4515{
4516 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4517 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4518 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4519
4520 tx_ring->q_vector = q_vector;
4521 tx_ring->next = q_vector->tx.ring;
4522 q_vector->tx.ring = tx_ring;
4523 q_vector->tx.count++;
4524
4525 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4526 if (i40e_enabled_xdp_vsi(vsi)) {
4527 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4528
4529 xdp_ring->q_vector = q_vector;
4530 xdp_ring->next = q_vector->tx.ring;
4531 q_vector->tx.ring = xdp_ring;
4532 q_vector->tx.count++;
4533 }
4534
4535 rx_ring->q_vector = q_vector;
4536 rx_ring->next = q_vector->rx.ring;
4537 q_vector->rx.ring = rx_ring;
4538 q_vector->rx.count++;
4539}
4540
4541/**
4542 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4543 * @vsi: the VSI being configured
4544 *
4545 * This function maps descriptor rings to the queue-specific vectors
4546 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4547 * one vector per queue pair, but on a constrained vector budget, we
4548 * group the queue pairs as "efficiently" as possible.
4549 **/
4550static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4551{
4552 int qp_remaining = vsi->num_queue_pairs;
4553 int q_vectors = vsi->num_q_vectors;
4554 int num_ringpairs;
4555 int v_start = 0;
4556 int qp_idx = 0;
4557
4558 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4559 * group them so there are multiple queues per vector.
4560 * It is also important to go through all the vectors available to be
4561 * sure that if we don't use all the vectors, that the remaining vectors
4562 * are cleared. This is especially important when decreasing the
4563 * number of queues in use.
4564 */
4565 for (; v_start < q_vectors; v_start++) {
4566 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4567
4568 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4569
4570 q_vector->num_ringpairs = num_ringpairs;
4571 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4572
4573 q_vector->rx.count = 0;
4574 q_vector->tx.count = 0;
4575 q_vector->rx.ring = NULL;
4576 q_vector->tx.ring = NULL;
4577
4578 while (num_ringpairs--) {
4579 i40e_map_vector_to_qp(vsi, v_idx: v_start, qp_idx);
4580 qp_idx++;
4581 qp_remaining--;
4582 }
4583 }
4584}
4585
4586/**
4587 * i40e_vsi_request_irq - Request IRQ from the OS
4588 * @vsi: the VSI being configured
4589 * @basename: name for the vector
4590 **/
4591static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4592{
4593 struct i40e_pf *pf = vsi->back;
4594 int err;
4595
4596 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4597 err = i40e_vsi_request_irq_msix(vsi, basename);
4598 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4599 err = request_irq(irq: pf->pdev->irq, handler: i40e_intr, flags: 0,
4600 name: pf->int_name, dev: pf);
4601 else
4602 err = request_irq(irq: pf->pdev->irq, handler: i40e_intr, IRQF_SHARED,
4603 name: pf->int_name, dev: pf);
4604
4605 if (err)
4606 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4607
4608 return err;
4609}
4610
4611#ifdef CONFIG_NET_POLL_CONTROLLER
4612/**
4613 * i40e_netpoll - A Polling 'interrupt' handler
4614 * @netdev: network interface device structure
4615 *
4616 * This is used by netconsole to send skbs without having to re-enable
4617 * interrupts. It's not called while the normal interrupt routine is executing.
4618 **/
4619static void i40e_netpoll(struct net_device *netdev)
4620{
4621 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
4622 struct i40e_vsi *vsi = np->vsi;
4623 struct i40e_pf *pf = vsi->back;
4624 int i;
4625
4626 /* if interface is down do nothing */
4627 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4628 return;
4629
4630 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4631 for (i = 0; i < vsi->num_q_vectors; i++)
4632 i40e_msix_clean_rings(irq: 0, data: vsi->q_vectors[i]);
4633 } else {
4634 i40e_intr(irq: pf->pdev->irq, data: netdev);
4635 }
4636}
4637#endif
4638
4639#define I40E_QTX_ENA_WAIT_COUNT 50
4640
4641/**
4642 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4643 * @pf: the PF being configured
4644 * @pf_q: the PF queue
4645 * @enable: enable or disable state of the queue
4646 *
4647 * This routine will wait for the given Tx queue of the PF to reach the
4648 * enabled or disabled state.
4649 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4650 * multiple retries; else will return 0 in case of success.
4651 **/
4652static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4653{
4654 int i;
4655 u32 tx_reg;
4656
4657 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4658 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4659 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4660 break;
4661
4662 usleep_range(min: 10, max: 20);
4663 }
4664 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4665 return -ETIMEDOUT;
4666
4667 return 0;
4668}
4669
4670/**
4671 * i40e_control_tx_q - Start or stop a particular Tx queue
4672 * @pf: the PF structure
4673 * @pf_q: the PF queue to configure
4674 * @enable: start or stop the queue
4675 *
4676 * This function enables or disables a single queue. Note that any delay
4677 * required after the operation is expected to be handled by the caller of
4678 * this function.
4679 **/
4680static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4681{
4682 struct i40e_hw *hw = &pf->hw;
4683 u32 tx_reg;
4684 int i;
4685
4686 /* warn the TX unit of coming changes */
4687 i40e_pre_tx_queue_cfg(hw: &pf->hw, queue: pf_q, enable);
4688 if (!enable)
4689 usleep_range(min: 10, max: 20);
4690
4691 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4692 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4693 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4694 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4695 break;
4696 usleep_range(min: 1000, max: 2000);
4697 }
4698
4699 /* Skip if the queue is already in the requested state */
4700 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4701 return;
4702
4703 /* turn on/off the queue */
4704 if (enable) {
4705 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4706 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4707 } else {
4708 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4709 }
4710
4711 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4712}
4713
4714/**
4715 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4716 * @seid: VSI SEID
4717 * @pf: the PF structure
4718 * @pf_q: the PF queue to configure
4719 * @is_xdp: true if the queue is used for XDP
4720 * @enable: start or stop the queue
4721 **/
4722int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4723 bool is_xdp, bool enable)
4724{
4725 int ret;
4726
4727 i40e_control_tx_q(pf, pf_q, enable);
4728
4729 /* wait for the change to finish */
4730 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4731 if (ret) {
4732 dev_info(&pf->pdev->dev,
4733 "VSI seid %d %sTx ring %d %sable timeout\n",
4734 seid, (is_xdp ? "XDP " : ""), pf_q,
4735 (enable ? "en" : "dis"));
4736 }
4737
4738 return ret;
4739}
4740
4741/**
4742 * i40e_vsi_enable_tx - Start a VSI's rings
4743 * @vsi: the VSI being configured
4744 **/
4745static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4746{
4747 struct i40e_pf *pf = vsi->back;
4748 int i, pf_q, ret = 0;
4749
4750 pf_q = vsi->base_queue;
4751 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4752 ret = i40e_control_wait_tx_q(seid: vsi->seid, pf,
4753 pf_q,
4754 is_xdp: false /*is xdp*/, enable: true);
4755 if (ret)
4756 break;
4757
4758 if (!i40e_enabled_xdp_vsi(vsi))
4759 continue;
4760
4761 ret = i40e_control_wait_tx_q(seid: vsi->seid, pf,
4762 pf_q: pf_q + vsi->alloc_queue_pairs,
4763 is_xdp: true /*is xdp*/, enable: true);
4764 if (ret)
4765 break;
4766 }
4767 return ret;
4768}
4769
4770/**
4771 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4772 * @pf: the PF being configured
4773 * @pf_q: the PF queue
4774 * @enable: enable or disable state of the queue
4775 *
4776 * This routine will wait for the given Rx queue of the PF to reach the
4777 * enabled or disabled state.
4778 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4779 * multiple retries; else will return 0 in case of success.
4780 **/
4781static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4782{
4783 int i;
4784 u32 rx_reg;
4785
4786 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4787 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4788 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4789 break;
4790
4791 usleep_range(min: 10, max: 20);
4792 }
4793 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4794 return -ETIMEDOUT;
4795
4796 return 0;
4797}
4798
4799/**
4800 * i40e_control_rx_q - Start or stop a particular Rx queue
4801 * @pf: the PF structure
4802 * @pf_q: the PF queue to configure
4803 * @enable: start or stop the queue
4804 *
4805 * This function enables or disables a single queue. Note that
4806 * any delay required after the operation is expected to be
4807 * handled by the caller of this function.
4808 **/
4809static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4810{
4811 struct i40e_hw *hw = &pf->hw;
4812 u32 rx_reg;
4813 int i;
4814
4815 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4816 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4817 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4818 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4819 break;
4820 usleep_range(min: 1000, max: 2000);
4821 }
4822
4823 /* Skip if the queue is already in the requested state */
4824 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4825 return;
4826
4827 /* turn on/off the queue */
4828 if (enable)
4829 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4830 else
4831 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4832
4833 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4834}
4835
4836/**
4837 * i40e_control_wait_rx_q
4838 * @pf: the PF structure
4839 * @pf_q: queue being configured
4840 * @enable: start or stop the rings
4841 *
4842 * This function enables or disables a single queue along with waiting
4843 * for the change to finish. The caller of this function should handle
4844 * the delays needed in the case of disabling queues.
4845 **/
4846int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4847{
4848 int ret = 0;
4849
4850 i40e_control_rx_q(pf, pf_q, enable);
4851
4852 /* wait for the change to finish */
4853 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4854 if (ret)
4855 return ret;
4856
4857 return ret;
4858}
4859
4860/**
4861 * i40e_vsi_enable_rx - Start a VSI's rings
4862 * @vsi: the VSI being configured
4863 **/
4864static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4865{
4866 struct i40e_pf *pf = vsi->back;
4867 int i, pf_q, ret = 0;
4868
4869 pf_q = vsi->base_queue;
4870 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4871 ret = i40e_control_wait_rx_q(pf, pf_q, enable: true);
4872 if (ret) {
4873 dev_info(&pf->pdev->dev,
4874 "VSI seid %d Rx ring %d enable timeout\n",
4875 vsi->seid, pf_q);
4876 break;
4877 }
4878 }
4879
4880 return ret;
4881}
4882
4883/**
4884 * i40e_vsi_start_rings - Start a VSI's rings
4885 * @vsi: the VSI being configured
4886 **/
4887int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4888{
4889 int ret = 0;
4890
4891 /* do rx first for enable and last for disable */
4892 ret = i40e_vsi_enable_rx(vsi);
4893 if (ret)
4894 return ret;
4895 ret = i40e_vsi_enable_tx(vsi);
4896
4897 return ret;
4898}
4899
4900#define I40E_DISABLE_TX_GAP_MSEC 50
4901
4902/**
4903 * i40e_vsi_stop_rings - Stop a VSI's rings
4904 * @vsi: the VSI being configured
4905 **/
4906void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4907{
4908 struct i40e_pf *pf = vsi->back;
4909 int pf_q, err, q_end;
4910
4911 /* When port TX is suspended, don't wait */
4912 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4913 return i40e_vsi_stop_rings_no_wait(vsi);
4914
4915 q_end = vsi->base_queue + vsi->num_queue_pairs;
4916 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4917 i40e_pre_tx_queue_cfg(hw: &pf->hw, queue: (u32)pf_q, enable: false);
4918
4919 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4920 err = i40e_control_wait_rx_q(pf, pf_q, enable: false);
4921 if (err)
4922 dev_info(&pf->pdev->dev,
4923 "VSI seid %d Rx ring %d disable timeout\n",
4924 vsi->seid, pf_q);
4925 }
4926
4927 msleep(I40E_DISABLE_TX_GAP_MSEC);
4928 pf_q = vsi->base_queue;
4929 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4930 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4931
4932 i40e_vsi_wait_queues_disabled(vsi);
4933}
4934
4935/**
4936 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4937 * @vsi: the VSI being shutdown
4938 *
4939 * This function stops all the rings for a VSI but does not delay to verify
4940 * that rings have been disabled. It is expected that the caller is shutting
4941 * down multiple VSIs at once and will delay together for all the VSIs after
4942 * initiating the shutdown. This is particularly useful for shutting down lots
4943 * of VFs together. Otherwise, a large delay can be incurred while configuring
4944 * each VSI in serial.
4945 **/
4946void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4947{
4948 struct i40e_pf *pf = vsi->back;
4949 int i, pf_q;
4950
4951 pf_q = vsi->base_queue;
4952 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4953 i40e_control_tx_q(pf, pf_q, enable: false);
4954 i40e_control_rx_q(pf, pf_q, enable: false);
4955 }
4956}
4957
4958/**
4959 * i40e_vsi_free_irq - Free the irq association with the OS
4960 * @vsi: the VSI being configured
4961 **/
4962static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4963{
4964 struct i40e_pf *pf = vsi->back;
4965 struct i40e_hw *hw = &pf->hw;
4966 int base = vsi->base_vector;
4967 u32 val, qp;
4968 int i;
4969
4970 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4971 if (!vsi->q_vectors)
4972 return;
4973
4974 if (!vsi->irqs_ready)
4975 return;
4976
4977 vsi->irqs_ready = false;
4978 for (i = 0; i < vsi->num_q_vectors; i++) {
4979 int irq_num;
4980 u16 vector;
4981
4982 vector = i + base;
4983 irq_num = pf->msix_entries[vector].vector;
4984
4985 /* free only the irqs that were actually requested */
4986 if (!vsi->q_vectors[i] ||
4987 !vsi->q_vectors[i]->num_ringpairs)
4988 continue;
4989
4990 /* clear the affinity notifier in the IRQ descriptor */
4991 irq_set_affinity_notifier(irq: irq_num, NULL);
4992 /* remove our suggested affinity mask for this IRQ */
4993 irq_update_affinity_hint(irq: irq_num, NULL);
4994 free_irq(irq_num, vsi->q_vectors[i]);
4995
4996 /* Tear down the interrupt queue link list
4997 *
4998 * We know that they come in pairs and always
4999 * the Rx first, then the Tx. To clear the
5000 * link list, stick the EOL value into the
5001 * next_q field of the registers.
5002 */
5003 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
5004 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
5005 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
5006 val |= I40E_QUEUE_END_OF_LIST
5007 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
5008 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
5009
5010 while (qp != I40E_QUEUE_END_OF_LIST) {
5011 u32 next;
5012
5013 val = rd32(hw, I40E_QINT_RQCTL(qp));
5014
5015 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5016 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5017 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5018 I40E_QINT_RQCTL_INTEVENT_MASK);
5019
5020 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5021 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5022
5023 wr32(hw, I40E_QINT_RQCTL(qp), val);
5024
5025 val = rd32(hw, I40E_QINT_TQCTL(qp));
5026
5027 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
5028 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
5029
5030 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5031 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5032 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5033 I40E_QINT_TQCTL_INTEVENT_MASK);
5034
5035 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5036 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5037
5038 wr32(hw, I40E_QINT_TQCTL(qp), val);
5039 qp = next;
5040 }
5041 }
5042 } else {
5043 free_irq(pf->pdev->irq, pf);
5044
5045 val = rd32(hw, I40E_PFINT_LNKLST0);
5046 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
5047 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
5048 val |= I40E_QUEUE_END_OF_LIST
5049 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5050 wr32(hw, I40E_PFINT_LNKLST0, val);
5051
5052 val = rd32(hw, I40E_QINT_RQCTL(qp));
5053 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
5054 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
5055 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
5056 I40E_QINT_RQCTL_INTEVENT_MASK);
5057
5058 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
5059 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
5060
5061 wr32(hw, I40E_QINT_RQCTL(qp), val);
5062
5063 val = rd32(hw, I40E_QINT_TQCTL(qp));
5064
5065 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
5066 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
5067 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
5068 I40E_QINT_TQCTL_INTEVENT_MASK);
5069
5070 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
5071 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
5072
5073 wr32(hw, I40E_QINT_TQCTL(qp), val);
5074 }
5075}
5076
5077/**
5078 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
5079 * @vsi: the VSI being configured
5080 * @v_idx: Index of vector to be freed
5081 *
5082 * This function frees the memory allocated to the q_vector. In addition if
5083 * NAPI is enabled it will delete any references to the NAPI struct prior
5084 * to freeing the q_vector.
5085 **/
5086static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
5087{
5088 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
5089 struct i40e_ring *ring;
5090
5091 if (!q_vector)
5092 return;
5093
5094 /* disassociate q_vector from rings */
5095 i40e_for_each_ring(ring, q_vector->tx)
5096 ring->q_vector = NULL;
5097
5098 i40e_for_each_ring(ring, q_vector->rx)
5099 ring->q_vector = NULL;
5100
5101 /* only VSI w/ an associated netdev is set up w/ NAPI */
5102 if (vsi->netdev)
5103 netif_napi_del(napi: &q_vector->napi);
5104
5105 vsi->q_vectors[v_idx] = NULL;
5106
5107 kfree_rcu(q_vector, rcu);
5108}
5109
5110/**
5111 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
5112 * @vsi: the VSI being un-configured
5113 *
5114 * This frees the memory allocated to the q_vectors and
5115 * deletes references to the NAPI struct.
5116 **/
5117static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
5118{
5119 int v_idx;
5120
5121 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
5122 i40e_free_q_vector(vsi, v_idx);
5123}
5124
5125/**
5126 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
5127 * @pf: board private structure
5128 **/
5129static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
5130{
5131 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
5132 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
5133 pci_disable_msix(dev: pf->pdev);
5134 kfree(objp: pf->msix_entries);
5135 pf->msix_entries = NULL;
5136 kfree(objp: pf->irq_pile);
5137 pf->irq_pile = NULL;
5138 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
5139 pci_disable_msi(dev: pf->pdev);
5140 }
5141 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
5142}
5143
5144/**
5145 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
5146 * @pf: board private structure
5147 *
5148 * We go through and clear interrupt specific resources and reset the structure
5149 * to pre-load conditions
5150 **/
5151static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
5152{
5153 int i;
5154
5155 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
5156 i40e_free_misc_vector(pf);
5157
5158 i40e_put_lump(pile: pf->irq_pile, index: pf->iwarp_base_vector,
5159 I40E_IWARP_IRQ_PILE_ID);
5160
5161 i40e_put_lump(pile: pf->irq_pile, index: 0, I40E_PILE_VALID_BIT-1);
5162 for (i = 0; i < pf->num_alloc_vsi; i++)
5163 if (pf->vsi[i])
5164 i40e_vsi_free_q_vectors(vsi: pf->vsi[i]);
5165 i40e_reset_interrupt_capability(pf);
5166}
5167
5168/**
5169 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5170 * @vsi: the VSI being configured
5171 **/
5172static void i40e_napi_enable_all(struct i40e_vsi *vsi)
5173{
5174 int q_idx;
5175
5176 if (!vsi->netdev)
5177 return;
5178
5179 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5180 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5181
5182 if (q_vector->rx.ring || q_vector->tx.ring)
5183 napi_enable(n: &q_vector->napi);
5184 }
5185}
5186
5187/**
5188 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5189 * @vsi: the VSI being configured
5190 **/
5191static void i40e_napi_disable_all(struct i40e_vsi *vsi)
5192{
5193 int q_idx;
5194
5195 if (!vsi->netdev)
5196 return;
5197
5198 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
5199 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
5200
5201 if (q_vector->rx.ring || q_vector->tx.ring)
5202 napi_disable(n: &q_vector->napi);
5203 }
5204}
5205
5206/**
5207 * i40e_vsi_close - Shut down a VSI
5208 * @vsi: the vsi to be quelled
5209 **/
5210static void i40e_vsi_close(struct i40e_vsi *vsi)
5211{
5212 struct i40e_pf *pf = vsi->back;
5213 if (!test_and_set_bit(nr: __I40E_VSI_DOWN, addr: vsi->state))
5214 i40e_down(vsi);
5215 i40e_vsi_free_irq(vsi);
5216 i40e_vsi_free_tx_resources(vsi);
5217 i40e_vsi_free_rx_resources(vsi);
5218 vsi->current_netdev_flags = 0;
5219 set_bit(nr: __I40E_CLIENT_SERVICE_REQUESTED, addr: pf->state);
5220 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
5221 set_bit(nr: __I40E_CLIENT_RESET, addr: pf->state);
5222}
5223
5224/**
5225 * i40e_quiesce_vsi - Pause a given VSI
5226 * @vsi: the VSI being paused
5227 **/
5228static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
5229{
5230 if (test_bit(__I40E_VSI_DOWN, vsi->state))
5231 return;
5232
5233 set_bit(nr: __I40E_VSI_NEEDS_RESTART, addr: vsi->state);
5234 if (vsi->netdev && netif_running(dev: vsi->netdev))
5235 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
5236 else
5237 i40e_vsi_close(vsi);
5238}
5239
5240/**
5241 * i40e_unquiesce_vsi - Resume a given VSI
5242 * @vsi: the VSI being resumed
5243 **/
5244static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
5245{
5246 if (!test_and_clear_bit(nr: __I40E_VSI_NEEDS_RESTART, addr: vsi->state))
5247 return;
5248
5249 if (vsi->netdev && netif_running(dev: vsi->netdev))
5250 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
5251 else
5252 i40e_vsi_open(vsi); /* this clears the DOWN bit */
5253}
5254
5255/**
5256 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5257 * @pf: the PF
5258 **/
5259static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
5260{
5261 int v;
5262
5263 for (v = 0; v < pf->num_alloc_vsi; v++) {
5264 if (pf->vsi[v])
5265 i40e_quiesce_vsi(vsi: pf->vsi[v]);
5266 }
5267}
5268
5269/**
5270 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5271 * @pf: the PF
5272 **/
5273static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
5274{
5275 int v;
5276
5277 for (v = 0; v < pf->num_alloc_vsi; v++) {
5278 if (pf->vsi[v])
5279 i40e_unquiesce_vsi(vsi: pf->vsi[v]);
5280 }
5281}
5282
5283/**
5284 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5285 * @vsi: the VSI being configured
5286 *
5287 * Wait until all queues on a given VSI have been disabled.
5288 **/
5289int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
5290{
5291 struct i40e_pf *pf = vsi->back;
5292 int i, pf_q, ret;
5293
5294 pf_q = vsi->base_queue;
5295 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
5296 /* Check and wait for the Tx queue */
5297 ret = i40e_pf_txq_wait(pf, pf_q, enable: false);
5298 if (ret) {
5299 dev_info(&pf->pdev->dev,
5300 "VSI seid %d Tx ring %d disable timeout\n",
5301 vsi->seid, pf_q);
5302 return ret;
5303 }
5304
5305 if (!i40e_enabled_xdp_vsi(vsi))
5306 goto wait_rx;
5307
5308 /* Check and wait for the XDP Tx queue */
5309 ret = i40e_pf_txq_wait(pf, pf_q: pf_q + vsi->alloc_queue_pairs,
5310 enable: false);
5311 if (ret) {
5312 dev_info(&pf->pdev->dev,
5313 "VSI seid %d XDP Tx ring %d disable timeout\n",
5314 vsi->seid, pf_q);
5315 return ret;
5316 }
5317wait_rx:
5318 /* Check and wait for the Rx queue */
5319 ret = i40e_pf_rxq_wait(pf, pf_q, enable: false);
5320 if (ret) {
5321 dev_info(&pf->pdev->dev,
5322 "VSI seid %d Rx ring %d disable timeout\n",
5323 vsi->seid, pf_q);
5324 return ret;
5325 }
5326 }
5327
5328 return 0;
5329}
5330
5331#ifdef CONFIG_I40E_DCB
5332/**
5333 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5334 * @pf: the PF
5335 *
5336 * This function waits for the queues to be in disabled state for all the
5337 * VSIs that are managed by this PF.
5338 **/
5339static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
5340{
5341 int v, ret = 0;
5342
5343 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
5344 if (pf->vsi[v]) {
5345 ret = i40e_vsi_wait_queues_disabled(vsi: pf->vsi[v]);
5346 if (ret)
5347 break;
5348 }
5349 }
5350
5351 return ret;
5352}
5353
5354#endif
5355
5356/**
5357 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5358 * @pf: pointer to PF
5359 *
5360 * Get TC map for ISCSI PF type that will include iSCSI TC
5361 * and LAN TC.
5362 **/
5363static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
5364{
5365 struct i40e_dcb_app_priority_table app;
5366 struct i40e_hw *hw = &pf->hw;
5367 u8 enabled_tc = 1; /* TC0 is always enabled */
5368 u8 tc, i;
5369 /* Get the iSCSI APP TLV */
5370 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5371
5372 for (i = 0; i < dcbcfg->numapps; i++) {
5373 app = dcbcfg->app[i];
5374 if (app.selector == I40E_APP_SEL_TCPIP &&
5375 app.protocolid == I40E_APP_PROTOID_ISCSI) {
5376 tc = dcbcfg->etscfg.prioritytable[app.priority];
5377 enabled_tc |= BIT(tc);
5378 break;
5379 }
5380 }
5381
5382 return enabled_tc;
5383}
5384
5385/**
5386 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5387 * @dcbcfg: the corresponding DCBx configuration structure
5388 *
5389 * Return the number of TCs from given DCBx configuration
5390 **/
5391static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
5392{
5393 int i, tc_unused = 0;
5394 u8 num_tc = 0;
5395 u8 ret = 0;
5396
5397 /* Scan the ETS Config Priority Table to find
5398 * traffic class enabled for a given priority
5399 * and create a bitmask of enabled TCs
5400 */
5401 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
5402 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
5403
5404 /* Now scan the bitmask to check for
5405 * contiguous TCs starting with TC0
5406 */
5407 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5408 if (num_tc & BIT(i)) {
5409 if (!tc_unused) {
5410 ret++;
5411 } else {
5412 pr_err("Non-contiguous TC - Disabling DCB\n");
5413 return 1;
5414 }
5415 } else {
5416 tc_unused = 1;
5417 }
5418 }
5419
5420 /* There is always at least TC0 */
5421 if (!ret)
5422 ret = 1;
5423
5424 return ret;
5425}
5426
5427/**
5428 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5429 * @dcbcfg: the corresponding DCBx configuration structure
5430 *
5431 * Query the current DCB configuration and return the number of
5432 * traffic classes enabled from the given DCBX config
5433 **/
5434static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5435{
5436 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5437 u8 enabled_tc = 1;
5438 u8 i;
5439
5440 for (i = 0; i < num_tc; i++)
5441 enabled_tc |= BIT(i);
5442
5443 return enabled_tc;
5444}
5445
5446/**
5447 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5448 * @pf: PF being queried
5449 *
5450 * Query the current MQPRIO configuration and return the number of
5451 * traffic classes enabled.
5452 **/
5453static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5454{
5455 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5456 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5457 u8 enabled_tc = 1, i;
5458
5459 for (i = 1; i < num_tc; i++)
5460 enabled_tc |= BIT(i);
5461 return enabled_tc;
5462}
5463
5464/**
5465 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5466 * @pf: PF being queried
5467 *
5468 * Return number of traffic classes enabled for the given PF
5469 **/
5470static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5471{
5472 struct i40e_hw *hw = &pf->hw;
5473 u8 i, enabled_tc = 1;
5474 u8 num_tc = 0;
5475 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5476
5477 if (i40e_is_tc_mqprio_enabled(pf))
5478 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5479
5480 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5481 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5482 return 1;
5483
5484 /* SFP mode will be enabled for all TCs on port */
5485 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5486 return i40e_dcb_get_num_tc(dcbcfg);
5487
5488 /* MFP mode return count of enabled TCs for this PF */
5489 if (pf->hw.func_caps.iscsi)
5490 enabled_tc = i40e_get_iscsi_tc_map(pf);
5491 else
5492 return 1; /* Only TC0 */
5493
5494 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5495 if (enabled_tc & BIT(i))
5496 num_tc++;
5497 }
5498 return num_tc;
5499}
5500
5501/**
5502 * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5503 * @pf: PF being queried
5504 *
5505 * Return a bitmap for enabled traffic classes for this PF.
5506 **/
5507static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5508{
5509 if (i40e_is_tc_mqprio_enabled(pf))
5510 return i40e_mqprio_get_enabled_tc(pf);
5511
5512 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5513 * default TC
5514 */
5515 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5516 return I40E_DEFAULT_TRAFFIC_CLASS;
5517
5518 /* SFP mode we want PF to be enabled for all TCs */
5519 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5520 return i40e_dcb_get_enabled_tc(dcbcfg: &pf->hw.local_dcbx_config);
5521
5522 /* MFP enabled and iSCSI PF type */
5523 if (pf->hw.func_caps.iscsi)
5524 return i40e_get_iscsi_tc_map(pf);
5525 else
5526 return I40E_DEFAULT_TRAFFIC_CLASS;
5527}
5528
5529/**
5530 * i40e_vsi_get_bw_info - Query VSI BW Information
5531 * @vsi: the VSI being queried
5532 *
5533 * Returns 0 on success, negative value on failure
5534 **/
5535static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5536{
5537 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5538 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5539 struct i40e_pf *pf = vsi->back;
5540 struct i40e_hw *hw = &pf->hw;
5541 u32 tc_bw_max;
5542 int ret;
5543 int i;
5544
5545 /* Get the VSI level BW configuration */
5546 ret = i40e_aq_query_vsi_bw_config(hw, seid: vsi->seid, bw_data: &bw_config, NULL);
5547 if (ret) {
5548 dev_info(&pf->pdev->dev,
5549 "couldn't get PF vsi bw config, err %pe aq_err %s\n",
5550 ERR_PTR(ret),
5551 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5552 return -EINVAL;
5553 }
5554
5555 /* Get the VSI level BW configuration per TC */
5556 ret = i40e_aq_query_vsi_ets_sla_config(hw, seid: vsi->seid, bw_data: &bw_ets_config,
5557 NULL);
5558 if (ret) {
5559 dev_info(&pf->pdev->dev,
5560 "couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
5561 ERR_PTR(ret),
5562 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5563 return -EINVAL;
5564 }
5565
5566 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5567 dev_info(&pf->pdev->dev,
5568 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5569 bw_config.tc_valid_bits,
5570 bw_ets_config.tc_valid_bits);
5571 /* Still continuing */
5572 }
5573
5574 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5575 vsi->bw_max_quanta = bw_config.max_bw;
5576 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5577 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5578 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5579 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5580 vsi->bw_ets_limit_credits[i] =
5581 le16_to_cpu(bw_ets_config.credits[i]);
5582 /* 3 bits out of 4 for each TC */
5583 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5584 }
5585
5586 return 0;
5587}
5588
5589/**
5590 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5591 * @vsi: the VSI being configured
5592 * @enabled_tc: TC bitmap
5593 * @bw_share: BW shared credits per TC
5594 *
5595 * Returns 0 on success, negative value on failure
5596 **/
5597static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5598 u8 *bw_share)
5599{
5600 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5601 struct i40e_pf *pf = vsi->back;
5602 int ret;
5603 int i;
5604
5605 /* There is no need to reset BW when mqprio mode is on. */
5606 if (i40e_is_tc_mqprio_enabled(pf))
5607 return 0;
5608 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5609 ret = i40e_set_bw_limit(vsi, seid: vsi->seid, max_tx_rate: 0);
5610 if (ret)
5611 dev_info(&pf->pdev->dev,
5612 "Failed to reset tx rate for vsi->seid %u\n",
5613 vsi->seid);
5614 return ret;
5615 }
5616 memset(&bw_data, 0, sizeof(bw_data));
5617 bw_data.tc_valid_bits = enabled_tc;
5618 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5619 bw_data.tc_bw_credits[i] = bw_share[i];
5620
5621 ret = i40e_aq_config_vsi_tc_bw(hw: &pf->hw, seid: vsi->seid, bw_data: &bw_data, NULL);
5622 if (ret) {
5623 dev_info(&pf->pdev->dev,
5624 "AQ command Config VSI BW allocation per TC failed = %d\n",
5625 pf->hw.aq.asq_last_status);
5626 return -EINVAL;
5627 }
5628
5629 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5630 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5631
5632 return 0;
5633}
5634
5635/**
5636 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5637 * @vsi: the VSI being configured
5638 * @enabled_tc: TC map to be enabled
5639 *
5640 **/
5641static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5642{
5643 struct net_device *netdev = vsi->netdev;
5644 struct i40e_pf *pf = vsi->back;
5645 struct i40e_hw *hw = &pf->hw;
5646 u8 netdev_tc = 0;
5647 int i;
5648 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5649
5650 if (!netdev)
5651 return;
5652
5653 if (!enabled_tc) {
5654 netdev_reset_tc(dev: netdev);
5655 return;
5656 }
5657
5658 /* Set up actual enabled TCs on the VSI */
5659 if (netdev_set_num_tc(dev: netdev, num_tc: vsi->tc_config.numtc))
5660 return;
5661
5662 /* set per TC queues for the VSI */
5663 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5664 /* Only set TC queues for enabled tcs
5665 *
5666 * e.g. For a VSI that has TC0 and TC3 enabled the
5667 * enabled_tc bitmap would be 0x00001001; the driver
5668 * will set the numtc for netdev as 2 that will be
5669 * referenced by the netdev layer as TC 0 and 1.
5670 */
5671 if (vsi->tc_config.enabled_tc & BIT(i))
5672 netdev_set_tc_queue(dev: netdev,
5673 tc: vsi->tc_config.tc_info[i].netdev_tc,
5674 count: vsi->tc_config.tc_info[i].qcount,
5675 offset: vsi->tc_config.tc_info[i].qoffset);
5676 }
5677
5678 if (i40e_is_tc_mqprio_enabled(pf))
5679 return;
5680
5681 /* Assign UP2TC map for the VSI */
5682 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5683 /* Get the actual TC# for the UP */
5684 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5685 /* Get the mapped netdev TC# for the UP */
5686 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5687 netdev_set_prio_tc_map(dev: netdev, prio: i, tc: netdev_tc);
5688 }
5689}
5690
5691/**
5692 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5693 * @vsi: the VSI being configured
5694 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5695 **/
5696static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5697 struct i40e_vsi_context *ctxt)
5698{
5699 /* copy just the sections touched not the entire info
5700 * since not all sections are valid as returned by
5701 * update vsi params
5702 */
5703 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5704 memcpy(&vsi->info.queue_mapping,
5705 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5706 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5707 sizeof(vsi->info.tc_mapping));
5708}
5709
5710/**
5711 * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5712 * @vsi: the VSI being reconfigured
5713 * @vsi_offset: offset from main VF VSI
5714 */
5715int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
5716{
5717 struct i40e_vsi_context ctxt = {};
5718 struct i40e_pf *pf;
5719 struct i40e_hw *hw;
5720 int ret;
5721
5722 if (!vsi)
5723 return -EINVAL;
5724 pf = vsi->back;
5725 hw = &pf->hw;
5726
5727 ctxt.seid = vsi->seid;
5728 ctxt.pf_num = hw->pf_id;
5729 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
5730 ctxt.uplink_seid = vsi->uplink_seid;
5731 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5732 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
5733 ctxt.info = vsi->info;
5734
5735 i40e_vsi_setup_queue_map(vsi, ctxt: &ctxt, enabled_tc: vsi->tc_config.enabled_tc,
5736 is_add: false);
5737 if (vsi->reconfig_rss) {
5738 vsi->rss_size = min_t(int, pf->alloc_rss_size,
5739 vsi->num_queue_pairs);
5740 ret = i40e_vsi_config_rss(vsi);
5741 if (ret) {
5742 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
5743 return ret;
5744 }
5745 vsi->reconfig_rss = false;
5746 }
5747
5748 ret = i40e_aq_update_vsi_params(hw, vsi_ctx: &ctxt, NULL);
5749 if (ret) {
5750 dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
5751 ERR_PTR(ret),
5752 i40e_aq_str(hw, hw->aq.asq_last_status));
5753 return ret;
5754 }
5755 /* update the local VSI info with updated queue map */
5756 i40e_vsi_update_queue_map(vsi, ctxt: &ctxt);
5757 vsi->info.valid_sections = 0;
5758
5759 return ret;
5760}
5761
5762/**
5763 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5764 * @vsi: VSI to be configured
5765 * @enabled_tc: TC bitmap
5766 *
5767 * This configures a particular VSI for TCs that are mapped to the
5768 * given TC bitmap. It uses default bandwidth share for TCs across
5769 * VSIs to configure TC for a particular VSI.
5770 *
5771 * NOTE:
5772 * It is expected that the VSI queues have been quisced before calling
5773 * this function.
5774 **/
5775static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5776{
5777 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5778 struct i40e_pf *pf = vsi->back;
5779 struct i40e_hw *hw = &pf->hw;
5780 struct i40e_vsi_context ctxt;
5781 int ret = 0;
5782 int i;
5783
5784 /* Check if enabled_tc is same as existing or new TCs */
5785 if (vsi->tc_config.enabled_tc == enabled_tc &&
5786 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5787 return ret;
5788
5789 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5790 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5791 if (enabled_tc & BIT(i))
5792 bw_share[i] = 1;
5793 }
5794
5795 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5796 if (ret) {
5797 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5798
5799 dev_info(&pf->pdev->dev,
5800 "Failed configuring TC map %d for VSI %d\n",
5801 enabled_tc, vsi->seid);
5802 ret = i40e_aq_query_vsi_bw_config(hw, seid: vsi->seid,
5803 bw_data: &bw_config, NULL);
5804 if (ret) {
5805 dev_info(&pf->pdev->dev,
5806 "Failed querying vsi bw info, err %pe aq_err %s\n",
5807 ERR_PTR(ret),
5808 i40e_aq_str(hw, hw->aq.asq_last_status));
5809 goto out;
5810 }
5811 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5812 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5813
5814 if (!valid_tc)
5815 valid_tc = bw_config.tc_valid_bits;
5816 /* Always enable TC0, no matter what */
5817 valid_tc |= 1;
5818 dev_info(&pf->pdev->dev,
5819 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5820 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5821 enabled_tc = valid_tc;
5822 }
5823
5824 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5825 if (ret) {
5826 dev_err(&pf->pdev->dev,
5827 "Unable to configure TC map %d for VSI %d\n",
5828 enabled_tc, vsi->seid);
5829 goto out;
5830 }
5831 }
5832
5833 /* Update Queue Pairs Mapping for currently enabled UPs */
5834 ctxt.seid = vsi->seid;
5835 ctxt.pf_num = vsi->back->hw.pf_id;
5836 ctxt.vf_num = 0;
5837 ctxt.uplink_seid = vsi->uplink_seid;
5838 ctxt.info = vsi->info;
5839 if (i40e_is_tc_mqprio_enabled(pf)) {
5840 ret = i40e_vsi_setup_queue_map_mqprio(vsi, ctxt: &ctxt, enabled_tc);
5841 if (ret)
5842 goto out;
5843 } else {
5844 i40e_vsi_setup_queue_map(vsi, ctxt: &ctxt, enabled_tc, is_add: false);
5845 }
5846
5847 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5848 * queues changed.
5849 */
5850 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5851 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5852 vsi->num_queue_pairs);
5853 ret = i40e_vsi_config_rss(vsi);
5854 if (ret) {
5855 dev_info(&vsi->back->pdev->dev,
5856 "Failed to reconfig rss for num_queues\n");
5857 return ret;
5858 }
5859 vsi->reconfig_rss = false;
5860 }
5861 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5862 ctxt.info.valid_sections |=
5863 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5864 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5865 }
5866
5867 /* Update the VSI after updating the VSI queue-mapping
5868 * information
5869 */
5870 ret = i40e_aq_update_vsi_params(hw, vsi_ctx: &ctxt, NULL);
5871 if (ret) {
5872 dev_info(&pf->pdev->dev,
5873 "Update vsi tc config failed, err %pe aq_err %s\n",
5874 ERR_PTR(ret),
5875 i40e_aq_str(hw, hw->aq.asq_last_status));
5876 goto out;
5877 }
5878 /* update the local VSI info with updated queue map */
5879 i40e_vsi_update_queue_map(vsi, ctxt: &ctxt);
5880 vsi->info.valid_sections = 0;
5881
5882 /* Update current VSI BW information */
5883 ret = i40e_vsi_get_bw_info(vsi);
5884 if (ret) {
5885 dev_info(&pf->pdev->dev,
5886 "Failed updating vsi bw info, err %pe aq_err %s\n",
5887 ERR_PTR(ret),
5888 i40e_aq_str(hw, hw->aq.asq_last_status));
5889 goto out;
5890 }
5891
5892 /* Update the netdev TC setup */
5893 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5894out:
5895 return ret;
5896}
5897
5898/**
5899 * i40e_get_link_speed - Returns link speed for the interface
5900 * @vsi: VSI to be configured
5901 *
5902 **/
5903static int i40e_get_link_speed(struct i40e_vsi *vsi)
5904{
5905 struct i40e_pf *pf = vsi->back;
5906
5907 switch (pf->hw.phy.link_info.link_speed) {
5908 case I40E_LINK_SPEED_40GB:
5909 return 40000;
5910 case I40E_LINK_SPEED_25GB:
5911 return 25000;
5912 case I40E_LINK_SPEED_20GB:
5913 return 20000;
5914 case I40E_LINK_SPEED_10GB:
5915 return 10000;
5916 case I40E_LINK_SPEED_1GB:
5917 return 1000;
5918 default:
5919 return -EINVAL;
5920 }
5921}
5922
5923/**
5924 * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
5925 * @vsi: Pointer to vsi structure
5926 * @max_tx_rate: max TX rate in bytes to be converted into Mbits
5927 *
5928 * Helper function to convert units before send to set BW limit
5929 **/
5930static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
5931{
5932 if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
5933 dev_warn(&vsi->back->pdev->dev,
5934 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5935 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5936 } else {
5937 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
5938 }
5939
5940 return max_tx_rate;
5941}
5942
5943/**
5944 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5945 * @vsi: VSI to be configured
5946 * @seid: seid of the channel/VSI
5947 * @max_tx_rate: max TX rate to be configured as BW limit
5948 *
5949 * Helper function to set BW limit for a given VSI
5950 **/
5951int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5952{
5953 struct i40e_pf *pf = vsi->back;
5954 u64 credits = 0;
5955 int speed = 0;
5956 int ret = 0;
5957
5958 speed = i40e_get_link_speed(vsi);
5959 if (max_tx_rate > speed) {
5960 dev_err(&pf->pdev->dev,
5961 "Invalid max tx rate %llu specified for VSI seid %d.",
5962 max_tx_rate, seid);
5963 return -EINVAL;
5964 }
5965 if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
5966 dev_warn(&pf->pdev->dev,
5967 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5968 max_tx_rate = I40E_BW_CREDIT_DIVISOR;
5969 }
5970
5971 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5972 credits = max_tx_rate;
5973 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5974 ret = i40e_aq_config_vsi_bw_limit(hw: &pf->hw, seid, credit: credits,
5975 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5976 if (ret)
5977 dev_err(&pf->pdev->dev,
5978 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
5979 max_tx_rate, seid, ERR_PTR(ret),
5980 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5981 return ret;
5982}
5983
5984/**
5985 * i40e_remove_queue_channels - Remove queue channels for the TCs
5986 * @vsi: VSI to be configured
5987 *
5988 * Remove queue channels for the TCs
5989 **/
5990static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5991{
5992 enum i40e_admin_queue_err last_aq_status;
5993 struct i40e_cloud_filter *cfilter;
5994 struct i40e_channel *ch, *ch_tmp;
5995 struct i40e_pf *pf = vsi->back;
5996 struct hlist_node *node;
5997 int ret, i;
5998
5999 /* Reset rss size that was stored when reconfiguring rss for
6000 * channel VSIs with non-power-of-2 queue count.
6001 */
6002 vsi->current_rss_size = 0;
6003
6004 /* perform cleanup for channels if they exist */
6005 if (list_empty(head: &vsi->ch_list))
6006 return;
6007
6008 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
6009 struct i40e_vsi *p_vsi;
6010
6011 list_del(entry: &ch->list);
6012 p_vsi = ch->parent_vsi;
6013 if (!p_vsi || !ch->initialized) {
6014 kfree(objp: ch);
6015 continue;
6016 }
6017 /* Reset queue contexts */
6018 for (i = 0; i < ch->num_queue_pairs; i++) {
6019 struct i40e_ring *tx_ring, *rx_ring;
6020 u16 pf_q;
6021
6022 pf_q = ch->base_queue + i;
6023 tx_ring = vsi->tx_rings[pf_q];
6024 tx_ring->ch = NULL;
6025
6026 rx_ring = vsi->rx_rings[pf_q];
6027 rx_ring->ch = NULL;
6028 }
6029
6030 /* Reset BW configured for this VSI via mqprio */
6031 ret = i40e_set_bw_limit(vsi, seid: ch->seid, max_tx_rate: 0);
6032 if (ret)
6033 dev_info(&vsi->back->pdev->dev,
6034 "Failed to reset tx rate for ch->seid %u\n",
6035 ch->seid);
6036
6037 /* delete cloud filters associated with this channel */
6038 hlist_for_each_entry_safe(cfilter, node,
6039 &pf->cloud_filter_list, cloud_node) {
6040 if (cfilter->seid != ch->seid)
6041 continue;
6042
6043 hash_del(node: &cfilter->cloud_node);
6044 if (cfilter->dst_port)
6045 ret = i40e_add_del_cloud_filter_big_buf(vsi,
6046 filter: cfilter,
6047 add: false);
6048 else
6049 ret = i40e_add_del_cloud_filter(vsi, filter: cfilter,
6050 add: false);
6051 last_aq_status = pf->hw.aq.asq_last_status;
6052 if (ret)
6053 dev_info(&pf->pdev->dev,
6054 "Failed to delete cloud filter, err %pe aq_err %s\n",
6055 ERR_PTR(ret),
6056 i40e_aq_str(&pf->hw, last_aq_status));
6057 kfree(objp: cfilter);
6058 }
6059
6060 /* delete VSI from FW */
6061 ret = i40e_aq_delete_element(hw: &vsi->back->hw, seid: ch->seid,
6062 NULL);
6063 if (ret)
6064 dev_err(&vsi->back->pdev->dev,
6065 "unable to remove channel (%d) for parent VSI(%d)\n",
6066 ch->seid, p_vsi->seid);
6067 kfree(objp: ch);
6068 }
6069 INIT_LIST_HEAD(list: &vsi->ch_list);
6070}
6071
6072/**
6073 * i40e_get_max_queues_for_channel
6074 * @vsi: ptr to VSI to which channels are associated with
6075 *
6076 * Helper function which returns max value among the queue counts set on the
6077 * channels/TCs created.
6078 **/
6079static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
6080{
6081 struct i40e_channel *ch, *ch_tmp;
6082 int max = 0;
6083
6084 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
6085 if (!ch->initialized)
6086 continue;
6087 if (ch->num_queue_pairs > max)
6088 max = ch->num_queue_pairs;
6089 }
6090
6091 return max;
6092}
6093
6094/**
6095 * i40e_validate_num_queues - validate num_queues w.r.t channel
6096 * @pf: ptr to PF device
6097 * @num_queues: number of queues
6098 * @vsi: the parent VSI
6099 * @reconfig_rss: indicates should the RSS be reconfigured or not
6100 *
6101 * This function validates number of queues in the context of new channel
6102 * which is being established and determines if RSS should be reconfigured
6103 * or not for parent VSI.
6104 **/
6105static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
6106 struct i40e_vsi *vsi, bool *reconfig_rss)
6107{
6108 int max_ch_queues;
6109
6110 if (!reconfig_rss)
6111 return -EINVAL;
6112
6113 *reconfig_rss = false;
6114 if (vsi->current_rss_size) {
6115 if (num_queues > vsi->current_rss_size) {
6116 dev_dbg(&pf->pdev->dev,
6117 "Error: num_queues (%d) > vsi's current_size(%d)\n",
6118 num_queues, vsi->current_rss_size);
6119 return -EINVAL;
6120 } else if ((num_queues < vsi->current_rss_size) &&
6121 (!is_power_of_2(n: num_queues))) {
6122 dev_dbg(&pf->pdev->dev,
6123 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
6124 num_queues, vsi->current_rss_size);
6125 return -EINVAL;
6126 }
6127 }
6128
6129 if (!is_power_of_2(n: num_queues)) {
6130 /* Find the max num_queues configured for channel if channel
6131 * exist.
6132 * if channel exist, then enforce 'num_queues' to be more than
6133 * max ever queues configured for channel.
6134 */
6135 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
6136 if (num_queues < max_ch_queues) {
6137 dev_dbg(&pf->pdev->dev,
6138 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
6139 num_queues, max_ch_queues);
6140 return -EINVAL;
6141 }
6142 *reconfig_rss = true;
6143 }
6144
6145 return 0;
6146}
6147
6148/**
6149 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
6150 * @vsi: the VSI being setup
6151 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
6152 *
6153 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
6154 **/
6155static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
6156{
6157 struct i40e_pf *pf = vsi->back;
6158 u8 seed[I40E_HKEY_ARRAY_SIZE];
6159 struct i40e_hw *hw = &pf->hw;
6160 int local_rss_size;
6161 u8 *lut;
6162 int ret;
6163
6164 if (!vsi->rss_size)
6165 return -EINVAL;
6166
6167 if (rss_size > vsi->rss_size)
6168 return -EINVAL;
6169
6170 local_rss_size = min_t(int, vsi->rss_size, rss_size);
6171 lut = kzalloc(size: vsi->rss_table_size, GFP_KERNEL);
6172 if (!lut)
6173 return -ENOMEM;
6174
6175 /* Ignoring user configured lut if there is one */
6176 i40e_fill_rss_lut(pf, lut, rss_table_size: vsi->rss_table_size, rss_size: local_rss_size);
6177
6178 /* Use user configured hash key if there is one, otherwise
6179 * use default.
6180 */
6181 if (vsi->rss_hkey_user)
6182 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
6183 else
6184 netdev_rss_key_fill(buffer: (void *)seed, I40E_HKEY_ARRAY_SIZE);
6185
6186 ret = i40e_config_rss(vsi, seed, lut, lut_size: vsi->rss_table_size);
6187 if (ret) {
6188 dev_info(&pf->pdev->dev,
6189 "Cannot set RSS lut, err %pe aq_err %s\n",
6190 ERR_PTR(ret),
6191 i40e_aq_str(hw, hw->aq.asq_last_status));
6192 kfree(objp: lut);
6193 return ret;
6194 }
6195 kfree(objp: lut);
6196
6197 /* Do the update w.r.t. storing rss_size */
6198 if (!vsi->orig_rss_size)
6199 vsi->orig_rss_size = vsi->rss_size;
6200 vsi->current_rss_size = local_rss_size;
6201
6202 return ret;
6203}
6204
6205/**
6206 * i40e_channel_setup_queue_map - Setup a channel queue map
6207 * @pf: ptr to PF device
6208 * @ctxt: VSI context structure
6209 * @ch: ptr to channel structure
6210 *
6211 * Setup queue map for a specific channel
6212 **/
6213static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
6214 struct i40e_vsi_context *ctxt,
6215 struct i40e_channel *ch)
6216{
6217 u16 qcount, qmap, sections = 0;
6218 u8 offset = 0;
6219 int pow;
6220
6221 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
6222 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
6223
6224 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
6225 ch->num_queue_pairs = qcount;
6226
6227 /* find the next higher power-of-2 of num queue pairs */
6228 pow = ilog2(qcount);
6229 if (!is_power_of_2(n: qcount))
6230 pow++;
6231
6232 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
6233 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
6234
6235 /* Setup queue TC[0].qmap for given VSI context */
6236 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
6237
6238 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
6239 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
6240 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
6241 ctxt->info.valid_sections |= cpu_to_le16(sections);
6242}
6243
6244/**
6245 * i40e_add_channel - add a channel by adding VSI
6246 * @pf: ptr to PF device
6247 * @uplink_seid: underlying HW switching element (VEB) ID
6248 * @ch: ptr to channel structure
6249 *
6250 * Add a channel (VSI) using add_vsi and queue_map
6251 **/
6252static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
6253 struct i40e_channel *ch)
6254{
6255 struct i40e_hw *hw = &pf->hw;
6256 struct i40e_vsi_context ctxt;
6257 u8 enabled_tc = 0x1; /* TC0 enabled */
6258 int ret;
6259
6260 if (ch->type != I40E_VSI_VMDQ2) {
6261 dev_info(&pf->pdev->dev,
6262 "add new vsi failed, ch->type %d\n", ch->type);
6263 return -EINVAL;
6264 }
6265
6266 memset(&ctxt, 0, sizeof(ctxt));
6267 ctxt.pf_num = hw->pf_id;
6268 ctxt.vf_num = 0;
6269 ctxt.uplink_seid = uplink_seid;
6270 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
6271 if (ch->type == I40E_VSI_VMDQ2)
6272 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
6273
6274 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
6275 ctxt.info.valid_sections |=
6276 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6277 ctxt.info.switch_id =
6278 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6279 }
6280
6281 /* Set queue map for a given VSI context */
6282 i40e_channel_setup_queue_map(pf, ctxt: &ctxt, ch);
6283
6284 /* Now time to create VSI */
6285 ret = i40e_aq_add_vsi(hw, vsi_ctx: &ctxt, NULL);
6286 if (ret) {
6287 dev_info(&pf->pdev->dev,
6288 "add new vsi failed, err %pe aq_err %s\n",
6289 ERR_PTR(ret),
6290 i40e_aq_str(&pf->hw,
6291 pf->hw.aq.asq_last_status));
6292 return -ENOENT;
6293 }
6294
6295 /* Success, update channel, set enabled_tc only if the channel
6296 * is not a macvlan
6297 */
6298 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc;
6299 ch->seid = ctxt.seid;
6300 ch->vsi_number = ctxt.vsi_number;
6301 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx);
6302
6303 /* copy just the sections touched not the entire info
6304 * since not all sections are valid as returned by
6305 * update vsi params
6306 */
6307 ch->info.mapping_flags = ctxt.info.mapping_flags;
6308 memcpy(&ch->info.queue_mapping,
6309 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
6310 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
6311 sizeof(ctxt.info.tc_mapping));
6312
6313 return 0;
6314}
6315
6316static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
6317 u8 *bw_share)
6318{
6319 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
6320 int ret;
6321 int i;
6322
6323 memset(&bw_data, 0, sizeof(bw_data));
6324 bw_data.tc_valid_bits = ch->enabled_tc;
6325 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6326 bw_data.tc_bw_credits[i] = bw_share[i];
6327
6328 ret = i40e_aq_config_vsi_tc_bw(hw: &vsi->back->hw, seid: ch->seid,
6329 bw_data: &bw_data, NULL);
6330 if (ret) {
6331 dev_info(&vsi->back->pdev->dev,
6332 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
6333 vsi->back->hw.aq.asq_last_status, ch->seid);
6334 return -EINVAL;
6335 }
6336
6337 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
6338 ch->info.qs_handle[i] = bw_data.qs_handles[i];
6339
6340 return 0;
6341}
6342
6343/**
6344 * i40e_channel_config_tx_ring - config TX ring associated with new channel
6345 * @pf: ptr to PF device
6346 * @vsi: the VSI being setup
6347 * @ch: ptr to channel structure
6348 *
6349 * Configure TX rings associated with channel (VSI) since queues are being
6350 * from parent VSI.
6351 **/
6352static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
6353 struct i40e_vsi *vsi,
6354 struct i40e_channel *ch)
6355{
6356 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
6357 int ret;
6358 int i;
6359
6360 /* Enable ETS TCs with equal BW Share for now across all VSIs */
6361 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6362 if (ch->enabled_tc & BIT(i))
6363 bw_share[i] = 1;
6364 }
6365
6366 /* configure BW for new VSI */
6367 ret = i40e_channel_config_bw(vsi, ch, bw_share);
6368 if (ret) {
6369 dev_info(&vsi->back->pdev->dev,
6370 "Failed configuring TC map %d for channel (seid %u)\n",
6371 ch->enabled_tc, ch->seid);
6372 return ret;
6373 }
6374
6375 for (i = 0; i < ch->num_queue_pairs; i++) {
6376 struct i40e_ring *tx_ring, *rx_ring;
6377 u16 pf_q;
6378
6379 pf_q = ch->base_queue + i;
6380
6381 /* Get to TX ring ptr of main VSI, for re-setup TX queue
6382 * context
6383 */
6384 tx_ring = vsi->tx_rings[pf_q];
6385 tx_ring->ch = ch;
6386
6387 /* Get the RX ring ptr */
6388 rx_ring = vsi->rx_rings[pf_q];
6389 rx_ring->ch = ch;
6390 }
6391
6392 return 0;
6393}
6394
6395/**
6396 * i40e_setup_hw_channel - setup new channel
6397 * @pf: ptr to PF device
6398 * @vsi: the VSI being setup
6399 * @ch: ptr to channel structure
6400 * @uplink_seid: underlying HW switching element (VEB) ID
6401 * @type: type of channel to be created (VMDq2/VF)
6402 *
6403 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6404 * and configures TX rings accordingly
6405 **/
6406static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
6407 struct i40e_vsi *vsi,
6408 struct i40e_channel *ch,
6409 u16 uplink_seid, u8 type)
6410{
6411 int ret;
6412
6413 ch->initialized = false;
6414 ch->base_queue = vsi->next_base_queue;
6415 ch->type = type;
6416
6417 /* Proceed with creation of channel (VMDq2) VSI */
6418 ret = i40e_add_channel(pf, uplink_seid, ch);
6419 if (ret) {
6420 dev_info(&pf->pdev->dev,
6421 "failed to add_channel using uplink_seid %u\n",
6422 uplink_seid);
6423 return ret;
6424 }
6425
6426 /* Mark the successful creation of channel */
6427 ch->initialized = true;
6428
6429 /* Reconfigure TX queues using QTX_CTL register */
6430 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
6431 if (ret) {
6432 dev_info(&pf->pdev->dev,
6433 "failed to configure TX rings for channel %u\n",
6434 ch->seid);
6435 return ret;
6436 }
6437
6438 /* update 'next_base_queue' */
6439 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
6440 dev_dbg(&pf->pdev->dev,
6441 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
6442 ch->seid, ch->vsi_number, ch->stat_counter_idx,
6443 ch->num_queue_pairs,
6444 vsi->next_base_queue);
6445 return ret;
6446}
6447
6448/**
6449 * i40e_setup_channel - setup new channel using uplink element
6450 * @pf: ptr to PF device
6451 * @vsi: pointer to the VSI to set up the channel within
6452 * @ch: ptr to channel structure
6453 *
6454 * Setup new channel (VSI) based on specified type (VMDq2/VF)
6455 * and uplink switching element (uplink_seid)
6456 **/
6457static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
6458 struct i40e_channel *ch)
6459{
6460 u8 vsi_type;
6461 u16 seid;
6462 int ret;
6463
6464 if (vsi->type == I40E_VSI_MAIN) {
6465 vsi_type = I40E_VSI_VMDQ2;
6466 } else {
6467 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
6468 vsi->type);
6469 return false;
6470 }
6471
6472 /* underlying switching element */
6473 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6474
6475 /* create channel (VSI), configure TX rings */
6476 ret = i40e_setup_hw_channel(pf, vsi, ch, uplink_seid: seid, type: vsi_type);
6477 if (ret) {
6478 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
6479 return false;
6480 }
6481
6482 return ch->initialized ? true : false;
6483}
6484
6485/**
6486 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6487 * @vsi: ptr to VSI which has PF backing
6488 *
6489 * Sets up switch mode correctly if it needs to be changed and perform
6490 * what are allowed modes.
6491 **/
6492static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6493{
6494 u8 mode;
6495 struct i40e_pf *pf = vsi->back;
6496 struct i40e_hw *hw = &pf->hw;
6497 int ret;
6498
6499 ret = i40e_get_capabilities(pf, list_type: i40e_aqc_opc_list_dev_capabilities);
6500 if (ret)
6501 return -EINVAL;
6502
6503 if (hw->dev_caps.switch_mode) {
6504 /* if switch mode is set, support mode2 (non-tunneled for
6505 * cloud filter) for now
6506 */
6507 u32 switch_mode = hw->dev_caps.switch_mode &
6508 I40E_SWITCH_MODE_MASK;
6509 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6510 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6511 return 0;
6512 dev_err(&pf->pdev->dev,
6513 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6514 hw->dev_caps.switch_mode);
6515 return -EINVAL;
6516 }
6517 }
6518
6519 /* Set Bit 7 to be valid */
6520 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6521
6522 /* Set L4type for TCP support */
6523 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6524
6525 /* Set cloud filter mode */
6526 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6527
6528 /* Prep mode field for set_switch_config */
6529 ret = i40e_aq_set_switch_config(hw, flags: pf->last_sw_conf_flags,
6530 valid_flags: pf->last_sw_conf_valid_flags,
6531 mode, NULL);
6532 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6533 dev_err(&pf->pdev->dev,
6534 "couldn't set switch config bits, err %pe aq_err %s\n",
6535 ERR_PTR(ret),
6536 i40e_aq_str(hw,
6537 hw->aq.asq_last_status));
6538
6539 return ret;
6540}
6541
6542/**
6543 * i40e_create_queue_channel - function to create channel
6544 * @vsi: VSI to be configured
6545 * @ch: ptr to channel (it contains channel specific params)
6546 *
6547 * This function creates channel (VSI) using num_queues specified by user,
6548 * reconfigs RSS if needed.
6549 **/
6550int i40e_create_queue_channel(struct i40e_vsi *vsi,
6551 struct i40e_channel *ch)
6552{
6553 struct i40e_pf *pf = vsi->back;
6554 bool reconfig_rss;
6555 int err;
6556
6557 if (!ch)
6558 return -EINVAL;
6559
6560 if (!ch->num_queue_pairs) {
6561 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6562 ch->num_queue_pairs);
6563 return -EINVAL;
6564 }
6565
6566 /* validate user requested num_queues for channel */
6567 err = i40e_validate_num_queues(pf, num_queues: ch->num_queue_pairs, vsi,
6568 reconfig_rss: &reconfig_rss);
6569 if (err) {
6570 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6571 ch->num_queue_pairs);
6572 return -EINVAL;
6573 }
6574
6575 /* By default we are in VEPA mode, if this is the first VF/VMDq
6576 * VSI to be added switch to VEB mode.
6577 */
6578
6579 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6580 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6581
6582 if (vsi->type == I40E_VSI_MAIN) {
6583 if (i40e_is_tc_mqprio_enabled(pf))
6584 i40e_do_reset(pf, I40E_PF_RESET_FLAG, lock_acquired: true);
6585 else
6586 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
6587 }
6588 /* now onwards for main VSI, number of queues will be value
6589 * of TC0's queue count
6590 */
6591 }
6592
6593 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6594 * it should be more than num_queues
6595 */
6596 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6597 dev_dbg(&pf->pdev->dev,
6598 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6599 vsi->cnt_q_avail, ch->num_queue_pairs);
6600 return -EINVAL;
6601 }
6602
6603 /* reconfig_rss only if vsi type is MAIN_VSI */
6604 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6605 err = i40e_vsi_reconfig_rss(vsi, rss_size: ch->num_queue_pairs);
6606 if (err) {
6607 dev_info(&pf->pdev->dev,
6608 "Error: unable to reconfig rss for num_queues (%u)\n",
6609 ch->num_queue_pairs);
6610 return -EINVAL;
6611 }
6612 }
6613
6614 if (!i40e_setup_channel(pf, vsi, ch)) {
6615 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6616 return -EINVAL;
6617 }
6618
6619 dev_info(&pf->pdev->dev,
6620 "Setup channel (id:%u) utilizing num_queues %d\n",
6621 ch->seid, ch->num_queue_pairs);
6622
6623 /* configure VSI for BW limit */
6624 if (ch->max_tx_rate) {
6625 u64 credits = ch->max_tx_rate;
6626
6627 if (i40e_set_bw_limit(vsi, seid: ch->seid, max_tx_rate: ch->max_tx_rate))
6628 return -EINVAL;
6629
6630 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6631 dev_dbg(&pf->pdev->dev,
6632 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6633 ch->max_tx_rate,
6634 credits,
6635 ch->seid);
6636 }
6637
6638 /* in case of VF, this will be main SRIOV VSI */
6639 ch->parent_vsi = vsi;
6640
6641 /* and update main_vsi's count for queue_available to use */
6642 vsi->cnt_q_avail -= ch->num_queue_pairs;
6643
6644 return 0;
6645}
6646
6647/**
6648 * i40e_configure_queue_channels - Add queue channel for the given TCs
6649 * @vsi: VSI to be configured
6650 *
6651 * Configures queue channel mapping to the given TCs
6652 **/
6653static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6654{
6655 struct i40e_channel *ch;
6656 u64 max_rate = 0;
6657 int ret = 0, i;
6658
6659 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6660 vsi->tc_seid_map[0] = vsi->seid;
6661 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6662 if (vsi->tc_config.enabled_tc & BIT(i)) {
6663 ch = kzalloc(size: sizeof(*ch), GFP_KERNEL);
6664 if (!ch) {
6665 ret = -ENOMEM;
6666 goto err_free;
6667 }
6668
6669 INIT_LIST_HEAD(list: &ch->list);
6670 ch->num_queue_pairs =
6671 vsi->tc_config.tc_info[i].qcount;
6672 ch->base_queue =
6673 vsi->tc_config.tc_info[i].qoffset;
6674
6675 /* Bandwidth limit through tc interface is in bytes/s,
6676 * change to Mbit/s
6677 */
6678 max_rate = vsi->mqprio_qopt.max_rate[i];
6679 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6680 ch->max_tx_rate = max_rate;
6681
6682 list_add_tail(new: &ch->list, head: &vsi->ch_list);
6683
6684 ret = i40e_create_queue_channel(vsi, ch);
6685 if (ret) {
6686 dev_err(&vsi->back->pdev->dev,
6687 "Failed creating queue channel with TC%d: queues %d\n",
6688 i, ch->num_queue_pairs);
6689 goto err_free;
6690 }
6691 vsi->tc_seid_map[i] = ch->seid;
6692 }
6693 }
6694
6695 /* reset to reconfigure TX queue contexts */
6696 i40e_do_reset(pf: vsi->back, I40E_PF_RESET_FLAG, lock_acquired: true);
6697 return ret;
6698
6699err_free:
6700 i40e_remove_queue_channels(vsi);
6701 return ret;
6702}
6703
6704/**
6705 * i40e_veb_config_tc - Configure TCs for given VEB
6706 * @veb: given VEB
6707 * @enabled_tc: TC bitmap
6708 *
6709 * Configures given TC bitmap for VEB (switching) element
6710 **/
6711int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6712{
6713 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6714 struct i40e_pf *pf = veb->pf;
6715 int ret = 0;
6716 int i;
6717
6718 /* No TCs or already enabled TCs just return */
6719 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6720 return ret;
6721
6722 bw_data.tc_valid_bits = enabled_tc;
6723 /* bw_data.absolute_credits is not set (relative) */
6724
6725 /* Enable ETS TCs with equal BW Share for now */
6726 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6727 if (enabled_tc & BIT(i))
6728 bw_data.tc_bw_share_credits[i] = 1;
6729 }
6730
6731 ret = i40e_aq_config_switch_comp_bw_config(hw: &pf->hw, seid: veb->seid,
6732 bw_data: &bw_data, NULL);
6733 if (ret) {
6734 dev_info(&pf->pdev->dev,
6735 "VEB bw config failed, err %pe aq_err %s\n",
6736 ERR_PTR(ret),
6737 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6738 goto out;
6739 }
6740
6741 /* Update the BW information */
6742 ret = i40e_veb_get_bw_info(veb);
6743 if (ret) {
6744 dev_info(&pf->pdev->dev,
6745 "Failed getting veb bw config, err %pe aq_err %s\n",
6746 ERR_PTR(ret),
6747 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6748 }
6749
6750out:
6751 return ret;
6752}
6753
6754#ifdef CONFIG_I40E_DCB
6755/**
6756 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6757 * @pf: PF struct
6758 *
6759 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6760 * the caller would've quiesce all the VSIs before calling
6761 * this function
6762 **/
6763static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6764{
6765 u8 tc_map = 0;
6766 int ret;
6767 u8 v;
6768
6769 /* Enable the TCs available on PF to all VEBs */
6770 tc_map = i40e_pf_get_tc_map(pf);
6771 if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
6772 return;
6773
6774 for (v = 0; v < I40E_MAX_VEB; v++) {
6775 if (!pf->veb[v])
6776 continue;
6777 ret = i40e_veb_config_tc(veb: pf->veb[v], enabled_tc: tc_map);
6778 if (ret) {
6779 dev_info(&pf->pdev->dev,
6780 "Failed configuring TC for VEB seid=%d\n",
6781 pf->veb[v]->seid);
6782 /* Will try to configure as many components */
6783 }
6784 }
6785
6786 /* Update each VSI */
6787 for (v = 0; v < pf->num_alloc_vsi; v++) {
6788 if (!pf->vsi[v])
6789 continue;
6790
6791 /* - Enable all TCs for the LAN VSI
6792 * - For all others keep them at TC0 for now
6793 */
6794 if (v == pf->lan_vsi)
6795 tc_map = i40e_pf_get_tc_map(pf);
6796 else
6797 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6798
6799 ret = i40e_vsi_config_tc(vsi: pf->vsi[v], enabled_tc: tc_map);
6800 if (ret) {
6801 dev_info(&pf->pdev->dev,
6802 "Failed configuring TC for VSI seid=%d\n",
6803 pf->vsi[v]->seid);
6804 /* Will try to configure as many components */
6805 } else {
6806 /* Re-configure VSI vectors based on updated TC map */
6807 i40e_vsi_map_rings_to_vectors(vsi: pf->vsi[v]);
6808 if (pf->vsi[v]->netdev)
6809 i40e_dcbnl_set_all(vsi: pf->vsi[v]);
6810 }
6811 }
6812}
6813
6814/**
6815 * i40e_resume_port_tx - Resume port Tx
6816 * @pf: PF struct
6817 *
6818 * Resume a port's Tx and issue a PF reset in case of failure to
6819 * resume.
6820 **/
6821static int i40e_resume_port_tx(struct i40e_pf *pf)
6822{
6823 struct i40e_hw *hw = &pf->hw;
6824 int ret;
6825
6826 ret = i40e_aq_resume_port_tx(hw, NULL);
6827 if (ret) {
6828 dev_info(&pf->pdev->dev,
6829 "Resume Port Tx failed, err %pe aq_err %s\n",
6830 ERR_PTR(ret),
6831 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6832 /* Schedule PF reset to recover */
6833 set_bit(nr: __I40E_PF_RESET_REQUESTED, addr: pf->state);
6834 i40e_service_event_schedule(pf);
6835 }
6836
6837 return ret;
6838}
6839
6840/**
6841 * i40e_suspend_port_tx - Suspend port Tx
6842 * @pf: PF struct
6843 *
6844 * Suspend a port's Tx and issue a PF reset in case of failure.
6845 **/
6846static int i40e_suspend_port_tx(struct i40e_pf *pf)
6847{
6848 struct i40e_hw *hw = &pf->hw;
6849 int ret;
6850
6851 ret = i40e_aq_suspend_port_tx(hw, seid: pf->mac_seid, NULL);
6852 if (ret) {
6853 dev_info(&pf->pdev->dev,
6854 "Suspend Port Tx failed, err %pe aq_err %s\n",
6855 ERR_PTR(ret),
6856 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6857 /* Schedule PF reset to recover */
6858 set_bit(nr: __I40E_PF_RESET_REQUESTED, addr: pf->state);
6859 i40e_service_event_schedule(pf);
6860 }
6861
6862 return ret;
6863}
6864
6865/**
6866 * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6867 * @pf: PF being configured
6868 * @new_cfg: New DCBX configuration
6869 *
6870 * Program DCB settings into HW and reconfigure VEB/VSIs on
6871 * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
6872 **/
6873static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
6874 struct i40e_dcbx_config *new_cfg)
6875{
6876 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
6877 int ret;
6878
6879 /* Check if need reconfiguration */
6880 if (!memcmp(p: &new_cfg, q: &old_cfg, size: sizeof(new_cfg))) {
6881 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
6882 return 0;
6883 }
6884
6885 /* Config change disable all VSIs */
6886 i40e_pf_quiesce_all_vsi(pf);
6887
6888 /* Copy the new config to the current config */
6889 *old_cfg = *new_cfg;
6890 old_cfg->etsrec = old_cfg->etscfg;
6891 ret = i40e_set_dcb_config(hw: &pf->hw);
6892 if (ret) {
6893 dev_info(&pf->pdev->dev,
6894 "Set DCB Config failed, err %pe aq_err %s\n",
6895 ERR_PTR(ret),
6896 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6897 goto out;
6898 }
6899
6900 /* Changes in configuration update VEB/VSI */
6901 i40e_dcb_reconfigure(pf);
6902out:
6903 /* In case of reset do not try to resume anything */
6904 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
6905 /* Re-start the VSIs if disabled */
6906 ret = i40e_resume_port_tx(pf);
6907 /* In case of error no point in resuming VSIs */
6908 if (ret)
6909 goto err;
6910 i40e_pf_unquiesce_all_vsi(pf);
6911 }
6912err:
6913 return ret;
6914}
6915
6916/**
6917 * i40e_hw_dcb_config - Program new DCBX settings into HW
6918 * @pf: PF being configured
6919 * @new_cfg: New DCBX configuration
6920 *
6921 * Program DCB settings into HW and reconfigure VEB/VSIs on
6922 * given PF
6923 **/
6924int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
6925{
6926 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
6927 u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
6928 u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
6929 struct i40e_dcbx_config *old_cfg;
6930 u8 mode[I40E_MAX_TRAFFIC_CLASS];
6931 struct i40e_rx_pb_config pb_cfg;
6932 struct i40e_hw *hw = &pf->hw;
6933 u8 num_ports = hw->num_ports;
6934 bool need_reconfig;
6935 int ret = -EINVAL;
6936 u8 lltc_map = 0;
6937 u8 tc_map = 0;
6938 u8 new_numtc;
6939 u8 i;
6940
6941 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
6942 /* Un-pack information to Program ETS HW via shared API
6943 * numtc, tcmap
6944 * LLTC map
6945 * ETS/NON-ETS arbiter mode
6946 * max exponent (credit refills)
6947 * Total number of ports
6948 * PFC priority bit-map
6949 * Priority Table
6950 * BW % per TC
6951 * Arbiter mode between UPs sharing same TC
6952 * TSA table (ETS or non-ETS)
6953 * EEE enabled or not
6954 * MFS TC table
6955 */
6956
6957 new_numtc = i40e_dcb_get_num_tc(dcbcfg: new_cfg);
6958
6959 memset(&ets_data, 0, sizeof(ets_data));
6960 for (i = 0; i < new_numtc; i++) {
6961 tc_map |= BIT(i);
6962 switch (new_cfg->etscfg.tsatable[i]) {
6963 case I40E_IEEE_TSA_ETS:
6964 prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
6965 ets_data.tc_bw_share_credits[i] =
6966 new_cfg->etscfg.tcbwtable[i];
6967 break;
6968 case I40E_IEEE_TSA_STRICT:
6969 prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
6970 lltc_map |= BIT(i);
6971 ets_data.tc_bw_share_credits[i] =
6972 I40E_DCB_STRICT_PRIO_CREDITS;
6973 break;
6974 default:
6975 /* Invalid TSA type */
6976 need_reconfig = false;
6977 goto out;
6978 }
6979 }
6980
6981 old_cfg = &hw->local_dcbx_config;
6982 /* Check if need reconfiguration */
6983 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
6984
6985 /* If needed, enable/disable frame tagging, disable all VSIs
6986 * and suspend port tx
6987 */
6988 if (need_reconfig) {
6989 /* Enable DCB tagging only when more than one TC */
6990 if (new_numtc > 1)
6991 pf->flags |= I40E_FLAG_DCB_ENABLED;
6992 else
6993 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6994
6995 set_bit(nr: __I40E_PORT_SUSPENDED, addr: pf->state);
6996 /* Reconfiguration needed quiesce all VSIs */
6997 i40e_pf_quiesce_all_vsi(pf);
6998 ret = i40e_suspend_port_tx(pf);
6999 if (ret)
7000 goto err;
7001 }
7002
7003 /* Configure Port ETS Tx Scheduler */
7004 ets_data.tc_valid_bits = tc_map;
7005 ets_data.tc_strict_priority_flags = lltc_map;
7006 ret = i40e_aq_config_switch_comp_ets
7007 (hw, seid: pf->mac_seid, ets_data: &ets_data,
7008 opcode: i40e_aqc_opc_modify_switching_comp_ets, NULL);
7009 if (ret) {
7010 dev_info(&pf->pdev->dev,
7011 "Modify Port ETS failed, err %pe aq_err %s\n",
7012 ERR_PTR(ret),
7013 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7014 goto out;
7015 }
7016
7017 /* Configure Rx ETS HW */
7018 memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
7019 i40e_dcb_hw_set_num_tc(hw, num_tc: new_numtc);
7020 i40e_dcb_hw_rx_fifo_config(hw, ets_mode: I40E_DCB_ARB_MODE_ROUND_ROBIN,
7021 non_ets_mode: I40E_DCB_ARB_MODE_STRICT_PRIORITY,
7022 I40E_DCB_DEFAULT_MAX_EXPONENT,
7023 lltc_map);
7024 i40e_dcb_hw_rx_cmd_monitor_config(hw, num_tc: new_numtc, num_ports);
7025 i40e_dcb_hw_rx_ets_bw_config(hw, bw_share: new_cfg->etscfg.tcbwtable, mode,
7026 prio_type);
7027 i40e_dcb_hw_pfc_config(hw, pfc_en: new_cfg->pfc.pfcenable,
7028 prio_tc: new_cfg->etscfg.prioritytable);
7029 i40e_dcb_hw_rx_up2tc_config(hw, prio_tc: new_cfg->etscfg.prioritytable);
7030
7031 /* Configure Rx Packet Buffers in HW */
7032 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7033 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
7034 mfs_tc[i] += I40E_PACKET_HDR_PAD;
7035 }
7036
7037 i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
7038 eee_enabled: false, pfc_en: new_cfg->pfc.pfcenable,
7039 mfs_tc, pb_cfg: &pb_cfg);
7040 i40e_dcb_hw_rx_pb_config(hw, old_pb_cfg: &pf->pb_cfg, new_pb_cfg: &pb_cfg);
7041
7042 /* Update the local Rx Packet buffer config */
7043 pf->pb_cfg = pb_cfg;
7044
7045 /* Inform the FW about changes to DCB configuration */
7046 ret = i40e_aq_dcb_updated(hw: &pf->hw, NULL);
7047 if (ret) {
7048 dev_info(&pf->pdev->dev,
7049 "DCB Updated failed, err %pe aq_err %s\n",
7050 ERR_PTR(ret),
7051 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7052 goto out;
7053 }
7054
7055 /* Update the port DCBx configuration */
7056 *old_cfg = *new_cfg;
7057
7058 /* Changes in configuration update VEB/VSI */
7059 i40e_dcb_reconfigure(pf);
7060out:
7061 /* Re-start the VSIs if disabled */
7062 if (need_reconfig) {
7063 ret = i40e_resume_port_tx(pf);
7064
7065 clear_bit(nr: __I40E_PORT_SUSPENDED, addr: pf->state);
7066 /* In case of error no point in resuming VSIs */
7067 if (ret)
7068 goto err;
7069
7070 /* Wait for the PF's queues to be disabled */
7071 ret = i40e_pf_wait_queues_disabled(pf);
7072 if (ret) {
7073 /* Schedule PF reset to recover */
7074 set_bit(nr: __I40E_PF_RESET_REQUESTED, addr: pf->state);
7075 i40e_service_event_schedule(pf);
7076 goto err;
7077 } else {
7078 i40e_pf_unquiesce_all_vsi(pf);
7079 set_bit(nr: __I40E_CLIENT_SERVICE_REQUESTED, addr: pf->state);
7080 set_bit(nr: __I40E_CLIENT_L2_CHANGE, addr: pf->state);
7081 }
7082 /* registers are set, lets apply */
7083 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
7084 ret = i40e_hw_set_dcb_config(pf, new_cfg);
7085 }
7086
7087err:
7088 return ret;
7089}
7090
7091/**
7092 * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
7093 * @pf: PF being queried
7094 *
7095 * Set default DCB configuration in case DCB is to be done in SW.
7096 **/
7097int i40e_dcb_sw_default_config(struct i40e_pf *pf)
7098{
7099 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
7100 struct i40e_aqc_configure_switching_comp_ets_data ets_data;
7101 struct i40e_hw *hw = &pf->hw;
7102 int err;
7103
7104 if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
7105 /* Update the local cached instance with TC0 ETS */
7106 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
7107 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7108 pf->tmp_cfg.etscfg.maxtcs = 0;
7109 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7110 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
7111 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
7112 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
7113 /* FW needs one App to configure HW */
7114 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
7115 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
7116 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
7117 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
7118
7119 return i40e_hw_set_dcb_config(pf, new_cfg: &pf->tmp_cfg);
7120 }
7121
7122 memset(&ets_data, 0, sizeof(ets_data));
7123 ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
7124 ets_data.tc_strict_priority_flags = 0; /* ETS */
7125 ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
7126
7127 /* Enable ETS on the Physical port */
7128 err = i40e_aq_config_switch_comp_ets
7129 (hw, seid: pf->mac_seid, ets_data: &ets_data,
7130 opcode: i40e_aqc_opc_enable_switching_comp_ets, NULL);
7131 if (err) {
7132 dev_info(&pf->pdev->dev,
7133 "Enable Port ETS failed, err %pe aq_err %s\n",
7134 ERR_PTR(err),
7135 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7136 err = -ENOENT;
7137 goto out;
7138 }
7139
7140 /* Update the local cached instance with TC0 ETS */
7141 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
7142 dcb_cfg->etscfg.cbs = 0;
7143 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
7144 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
7145
7146out:
7147 return err;
7148}
7149
7150/**
7151 * i40e_init_pf_dcb - Initialize DCB configuration
7152 * @pf: PF being configured
7153 *
7154 * Query the current DCB configuration and cache it
7155 * in the hardware structure
7156 **/
7157static int i40e_init_pf_dcb(struct i40e_pf *pf)
7158{
7159 struct i40e_hw *hw = &pf->hw;
7160 int err;
7161
7162 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
7163 * Also do not enable DCBx if FW LLDP agent is disabled
7164 */
7165 if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
7166 dev_info(&pf->pdev->dev, "DCB is not supported.\n");
7167 err = -EOPNOTSUPP;
7168 goto out;
7169 }
7170 if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
7171 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
7172 err = i40e_dcb_sw_default_config(pf);
7173 if (err) {
7174 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
7175 goto out;
7176 }
7177 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
7178 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
7179 DCB_CAP_DCBX_VER_IEEE;
7180 /* at init capable but disabled */
7181 pf->flags |= I40E_FLAG_DCB_CAPABLE;
7182 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7183 goto out;
7184 }
7185 err = i40e_init_dcb(hw, enable_mib_change: true);
7186 if (!err) {
7187 /* Device/Function is not DCBX capable */
7188 if ((!hw->func_caps.dcb) ||
7189 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
7190 dev_info(&pf->pdev->dev,
7191 "DCBX offload is not supported or is disabled for this PF.\n");
7192 } else {
7193 /* When status is not DISABLED then DCBX in FW */
7194 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
7195 DCB_CAP_DCBX_VER_IEEE;
7196
7197 pf->flags |= I40E_FLAG_DCB_CAPABLE;
7198 /* Enable DCB tagging only when more than one TC
7199 * or explicitly disable if only one TC
7200 */
7201 if (i40e_dcb_get_num_tc(dcbcfg: &hw->local_dcbx_config) > 1)
7202 pf->flags |= I40E_FLAG_DCB_ENABLED;
7203 else
7204 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
7205 dev_dbg(&pf->pdev->dev,
7206 "DCBX offload is supported for this PF.\n");
7207 }
7208 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
7209 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
7210 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
7211 } else {
7212 dev_info(&pf->pdev->dev,
7213 "Query for DCB configuration failed, err %pe aq_err %s\n",
7214 ERR_PTR(err),
7215 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7216 }
7217
7218out:
7219 return err;
7220}
7221#endif /* CONFIG_I40E_DCB */
7222
7223/**
7224 * i40e_print_link_message - print link up or down
7225 * @vsi: the VSI for which link needs a message
7226 * @isup: true of link is up, false otherwise
7227 */
7228void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
7229{
7230 enum i40e_aq_link_speed new_speed;
7231 struct i40e_pf *pf = vsi->back;
7232 char *speed = "Unknown";
7233 char *fc = "Unknown";
7234 char *fec = "";
7235 char *req_fec = "";
7236 char *an = "";
7237
7238 if (isup)
7239 new_speed = pf->hw.phy.link_info.link_speed;
7240 else
7241 new_speed = I40E_LINK_SPEED_UNKNOWN;
7242
7243 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
7244 return;
7245 vsi->current_isup = isup;
7246 vsi->current_speed = new_speed;
7247 if (!isup) {
7248 netdev_info(dev: vsi->netdev, format: "NIC Link is Down\n");
7249 return;
7250 }
7251
7252 /* Warn user if link speed on NPAR enabled partition is not at
7253 * least 10GB
7254 */
7255 if (pf->hw.func_caps.npar_enable &&
7256 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
7257 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
7258 netdev_warn(dev: vsi->netdev,
7259 format: "The partition detected link speed that is less than 10Gbps\n");
7260
7261 switch (pf->hw.phy.link_info.link_speed) {
7262 case I40E_LINK_SPEED_40GB:
7263 speed = "40 G";
7264 break;
7265 case I40E_LINK_SPEED_20GB:
7266 speed = "20 G";
7267 break;
7268 case I40E_LINK_SPEED_25GB:
7269 speed = "25 G";
7270 break;
7271 case I40E_LINK_SPEED_10GB:
7272 speed = "10 G";
7273 break;
7274 case I40E_LINK_SPEED_5GB:
7275 speed = "5 G";
7276 break;
7277 case I40E_LINK_SPEED_2_5GB:
7278 speed = "2.5 G";
7279 break;
7280 case I40E_LINK_SPEED_1GB:
7281 speed = "1000 M";
7282 break;
7283 case I40E_LINK_SPEED_100MB:
7284 speed = "100 M";
7285 break;
7286 default:
7287 break;
7288 }
7289
7290 switch (pf->hw.fc.current_mode) {
7291 case I40E_FC_FULL:
7292 fc = "RX/TX";
7293 break;
7294 case I40E_FC_TX_PAUSE:
7295 fc = "TX";
7296 break;
7297 case I40E_FC_RX_PAUSE:
7298 fc = "RX";
7299 break;
7300 default:
7301 fc = "None";
7302 break;
7303 }
7304
7305 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
7306 req_fec = "None";
7307 fec = "None";
7308 an = "False";
7309
7310 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7311 an = "True";
7312
7313 if (pf->hw.phy.link_info.fec_info &
7314 I40E_AQ_CONFIG_FEC_KR_ENA)
7315 fec = "CL74 FC-FEC/BASE-R";
7316 else if (pf->hw.phy.link_info.fec_info &
7317 I40E_AQ_CONFIG_FEC_RS_ENA)
7318 fec = "CL108 RS-FEC";
7319
7320 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
7321 * both RS and FC are requested
7322 */
7323 if (vsi->back->hw.phy.link_info.req_fec_info &
7324 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
7325 if (vsi->back->hw.phy.link_info.req_fec_info &
7326 I40E_AQ_REQUEST_FEC_RS)
7327 req_fec = "CL108 RS-FEC";
7328 else
7329 req_fec = "CL74 FC-FEC/BASE-R";
7330 }
7331 netdev_info(dev: vsi->netdev,
7332 format: "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7333 speed, req_fec, fec, an, fc);
7334 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
7335 req_fec = "None";
7336 fec = "None";
7337 an = "False";
7338
7339 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
7340 an = "True";
7341
7342 if (pf->hw.phy.link_info.fec_info &
7343 I40E_AQ_CONFIG_FEC_KR_ENA)
7344 fec = "CL74 FC-FEC/BASE-R";
7345
7346 if (pf->hw.phy.link_info.req_fec_info &
7347 I40E_AQ_REQUEST_FEC_KR)
7348 req_fec = "CL74 FC-FEC/BASE-R";
7349
7350 netdev_info(dev: vsi->netdev,
7351 format: "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
7352 speed, req_fec, fec, an, fc);
7353 } else {
7354 netdev_info(dev: vsi->netdev,
7355 format: "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
7356 speed, fc);
7357 }
7358
7359}
7360
7361/**
7362 * i40e_up_complete - Finish the last steps of bringing up a connection
7363 * @vsi: the VSI being configured
7364 **/
7365static int i40e_up_complete(struct i40e_vsi *vsi)
7366{
7367 struct i40e_pf *pf = vsi->back;
7368 int err;
7369
7370 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7371 i40e_vsi_configure_msix(vsi);
7372 else
7373 i40e_configure_msi_and_legacy(vsi);
7374
7375 /* start rings */
7376 err = i40e_vsi_start_rings(vsi);
7377 if (err)
7378 return err;
7379
7380 clear_bit(nr: __I40E_VSI_DOWN, addr: vsi->state);
7381 i40e_napi_enable_all(vsi);
7382 i40e_vsi_enable_irq(vsi);
7383
7384 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
7385 (vsi->netdev)) {
7386 i40e_print_link_message(vsi, isup: true);
7387 netif_tx_start_all_queues(dev: vsi->netdev);
7388 netif_carrier_on(dev: vsi->netdev);
7389 }
7390
7391 /* replay FDIR SB filters */
7392 if (vsi->type == I40E_VSI_FDIR) {
7393 /* reset fd counters */
7394 pf->fd_add_err = 0;
7395 pf->fd_atr_cnt = 0;
7396 i40e_fdir_filter_restore(vsi);
7397 }
7398
7399 /* On the next run of the service_task, notify any clients of the new
7400 * opened netdev
7401 */
7402 set_bit(nr: __I40E_CLIENT_SERVICE_REQUESTED, addr: pf->state);
7403 i40e_service_event_schedule(pf);
7404
7405 return 0;
7406}
7407
7408/**
7409 * i40e_vsi_reinit_locked - Reset the VSI
7410 * @vsi: the VSI being configured
7411 *
7412 * Rebuild the ring structs after some configuration
7413 * has changed, e.g. MTU size.
7414 **/
7415static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
7416{
7417 struct i40e_pf *pf = vsi->back;
7418
7419 while (test_and_set_bit(nr: __I40E_CONFIG_BUSY, addr: pf->state))
7420 usleep_range(min: 1000, max: 2000);
7421 i40e_down(vsi);
7422
7423 i40e_up(vsi);
7424 clear_bit(nr: __I40E_CONFIG_BUSY, addr: pf->state);
7425}
7426
7427/**
7428 * i40e_force_link_state - Force the link status
7429 * @pf: board private structure
7430 * @is_up: whether the link state should be forced up or down
7431 **/
7432static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
7433{
7434 struct i40e_aq_get_phy_abilities_resp abilities;
7435 struct i40e_aq_set_phy_config config = {0};
7436 bool non_zero_phy_type = is_up;
7437 struct i40e_hw *hw = &pf->hw;
7438 u64 mask;
7439 u8 speed;
7440 int err;
7441
7442 /* Card might've been put in an unstable state by other drivers
7443 * and applications, which causes incorrect speed values being
7444 * set on startup. In order to clear speed registers, we call
7445 * get_phy_capabilities twice, once to get initial state of
7446 * available speeds, and once to get current PHY config.
7447 */
7448 err = i40e_aq_get_phy_capabilities(hw, qualified_modules: false, report_init: true, abilities: &abilities,
7449 NULL);
7450 if (err) {
7451 dev_err(&pf->pdev->dev,
7452 "failed to get phy cap., ret = %pe last_status = %s\n",
7453 ERR_PTR(err),
7454 i40e_aq_str(hw, hw->aq.asq_last_status));
7455 return err;
7456 }
7457 speed = abilities.link_speed;
7458
7459 /* Get the current phy config */
7460 err = i40e_aq_get_phy_capabilities(hw, qualified_modules: false, report_init: false, abilities: &abilities,
7461 NULL);
7462 if (err) {
7463 dev_err(&pf->pdev->dev,
7464 "failed to get phy cap., ret = %pe last_status = %s\n",
7465 ERR_PTR(err),
7466 i40e_aq_str(hw, hw->aq.asq_last_status));
7467 return err;
7468 }
7469
7470 /* If link needs to go up, but was not forced to go down,
7471 * and its speed values are OK, no need for a flap
7472 * if non_zero_phy_type was set, still need to force up
7473 */
7474 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
7475 non_zero_phy_type = true;
7476 else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
7477 return 0;
7478
7479 /* To force link we need to set bits for all supported PHY types,
7480 * but there are now more than 32, so we need to split the bitmap
7481 * across two fields.
7482 */
7483 mask = I40E_PHY_TYPES_BITMASK;
7484 config.phy_type =
7485 non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
7486 config.phy_type_ext =
7487 non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
7488 /* Copy the old settings, except of phy_type */
7489 config.abilities = abilities.abilities;
7490 if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
7491 if (is_up)
7492 config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
7493 else
7494 config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
7495 }
7496 if (abilities.link_speed != 0)
7497 config.link_speed = abilities.link_speed;
7498 else
7499 config.link_speed = speed;
7500 config.eee_capability = abilities.eee_capability;
7501 config.eeer = abilities.eeer_val;
7502 config.low_power_ctrl = abilities.d3_lpan;
7503 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
7504 I40E_AQ_PHY_FEC_CONFIG_MASK;
7505 err = i40e_aq_set_phy_config(hw, config: &config, NULL);
7506
7507 if (err) {
7508 dev_err(&pf->pdev->dev,
7509 "set phy config ret = %pe last_status = %s\n",
7510 ERR_PTR(err),
7511 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7512 return err;
7513 }
7514
7515 /* Update the link info */
7516 err = i40e_update_link_info(hw);
7517 if (err) {
7518 /* Wait a little bit (on 40G cards it sometimes takes a really
7519 * long time for link to come back from the atomic reset)
7520 * and try once more
7521 */
7522 msleep(msecs: 1000);
7523 i40e_update_link_info(hw);
7524 }
7525
7526 i40e_aq_set_link_restart_an(hw, enable_link: is_up, NULL);
7527
7528 return 0;
7529}
7530
7531/**
7532 * i40e_up - Bring the connection back up after being down
7533 * @vsi: the VSI being configured
7534 **/
7535int i40e_up(struct i40e_vsi *vsi)
7536{
7537 int err;
7538
7539 if (vsi->type == I40E_VSI_MAIN &&
7540 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7541 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7542 i40e_force_link_state(pf: vsi->back, is_up: true);
7543
7544 err = i40e_vsi_configure(vsi);
7545 if (!err)
7546 err = i40e_up_complete(vsi);
7547
7548 return err;
7549}
7550
7551/**
7552 * i40e_down - Shutdown the connection processing
7553 * @vsi: the VSI being stopped
7554 **/
7555void i40e_down(struct i40e_vsi *vsi)
7556{
7557 int i;
7558
7559 /* It is assumed that the caller of this function
7560 * sets the vsi->state __I40E_VSI_DOWN bit.
7561 */
7562 if (vsi->netdev) {
7563 netif_carrier_off(dev: vsi->netdev);
7564 netif_tx_disable(dev: vsi->netdev);
7565 }
7566 i40e_vsi_disable_irq(vsi);
7567 i40e_vsi_stop_rings(vsi);
7568 if (vsi->type == I40E_VSI_MAIN &&
7569 (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
7570 vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
7571 i40e_force_link_state(pf: vsi->back, is_up: false);
7572 i40e_napi_disable_all(vsi);
7573
7574 for (i = 0; i < vsi->num_queue_pairs; i++) {
7575 i40e_clean_tx_ring(tx_ring: vsi->tx_rings[i]);
7576 if (i40e_enabled_xdp_vsi(vsi)) {
7577 /* Make sure that in-progress ndo_xdp_xmit and
7578 * ndo_xsk_wakeup calls are completed.
7579 */
7580 synchronize_rcu();
7581 i40e_clean_tx_ring(tx_ring: vsi->xdp_rings[i]);
7582 }
7583 i40e_clean_rx_ring(rx_ring: vsi->rx_rings[i]);
7584 }
7585
7586}
7587
7588/**
7589 * i40e_validate_mqprio_qopt- validate queue mapping info
7590 * @vsi: the VSI being configured
7591 * @mqprio_qopt: queue parametrs
7592 **/
7593static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
7594 struct tc_mqprio_qopt_offload *mqprio_qopt)
7595{
7596 u64 sum_max_rate = 0;
7597 u64 max_rate = 0;
7598 int i;
7599
7600 if (mqprio_qopt->qopt.offset[0] != 0 ||
7601 mqprio_qopt->qopt.num_tc < 1 ||
7602 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
7603 return -EINVAL;
7604 for (i = 0; ; i++) {
7605 if (!mqprio_qopt->qopt.count[i])
7606 return -EINVAL;
7607 if (mqprio_qopt->min_rate[i]) {
7608 dev_err(&vsi->back->pdev->dev,
7609 "Invalid min tx rate (greater than 0) specified\n");
7610 return -EINVAL;
7611 }
7612 max_rate = mqprio_qopt->max_rate[i];
7613 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
7614 sum_max_rate += max_rate;
7615
7616 if (i >= mqprio_qopt->qopt.num_tc - 1)
7617 break;
7618 if (mqprio_qopt->qopt.offset[i + 1] !=
7619 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
7620 return -EINVAL;
7621 }
7622 if (vsi->num_queue_pairs <
7623 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
7624 dev_err(&vsi->back->pdev->dev,
7625 "Failed to create traffic channel, insufficient number of queues.\n");
7626 return -EINVAL;
7627 }
7628 if (sum_max_rate > i40e_get_link_speed(vsi)) {
7629 dev_err(&vsi->back->pdev->dev,
7630 "Invalid max tx rate specified\n");
7631 return -EINVAL;
7632 }
7633 return 0;
7634}
7635
7636/**
7637 * i40e_vsi_set_default_tc_config - set default values for tc configuration
7638 * @vsi: the VSI being configured
7639 **/
7640static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
7641{
7642 u16 qcount;
7643 int i;
7644
7645 /* Only TC0 is enabled */
7646 vsi->tc_config.numtc = 1;
7647 vsi->tc_config.enabled_tc = 1;
7648 qcount = min_t(int, vsi->alloc_queue_pairs,
7649 i40e_pf_get_max_q_per_tc(vsi->back));
7650 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
7651 /* For the TC that is not enabled set the offset to default
7652 * queue and allocate one queue for the given TC.
7653 */
7654 vsi->tc_config.tc_info[i].qoffset = 0;
7655 if (i == 0)
7656 vsi->tc_config.tc_info[i].qcount = qcount;
7657 else
7658 vsi->tc_config.tc_info[i].qcount = 1;
7659 vsi->tc_config.tc_info[i].netdev_tc = 0;
7660 }
7661}
7662
7663/**
7664 * i40e_del_macvlan_filter
7665 * @hw: pointer to the HW structure
7666 * @seid: seid of the channel VSI
7667 * @macaddr: the mac address to apply as a filter
7668 * @aq_err: store the admin Q error
7669 *
7670 * This function deletes a mac filter on the channel VSI which serves as the
7671 * macvlan. Returns 0 on success.
7672 **/
7673static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
7674 const u8 *macaddr, int *aq_err)
7675{
7676 struct i40e_aqc_remove_macvlan_element_data element;
7677 int status;
7678
7679 memset(&element, 0, sizeof(element));
7680 ether_addr_copy(dst: element.mac_addr, src: macaddr);
7681 element.vlan_tag = 0;
7682 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
7683 status = i40e_aq_remove_macvlan(hw, vsi_id: seid, mv_list: &element, count: 1, NULL);
7684 *aq_err = hw->aq.asq_last_status;
7685
7686 return status;
7687}
7688
7689/**
7690 * i40e_add_macvlan_filter
7691 * @hw: pointer to the HW structure
7692 * @seid: seid of the channel VSI
7693 * @macaddr: the mac address to apply as a filter
7694 * @aq_err: store the admin Q error
7695 *
7696 * This function adds a mac filter on the channel VSI which serves as the
7697 * macvlan. Returns 0 on success.
7698 **/
7699static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
7700 const u8 *macaddr, int *aq_err)
7701{
7702 struct i40e_aqc_add_macvlan_element_data element;
7703 u16 cmd_flags = 0;
7704 int status;
7705
7706 ether_addr_copy(dst: element.mac_addr, src: macaddr);
7707 element.vlan_tag = 0;
7708 element.queue_number = 0;
7709 element.match_method = I40E_AQC_MM_ERR_NO_RES;
7710 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
7711 element.flags = cpu_to_le16(cmd_flags);
7712 status = i40e_aq_add_macvlan(hw, vsi_id: seid, mv_list: &element, count: 1, NULL);
7713 *aq_err = hw->aq.asq_last_status;
7714
7715 return status;
7716}
7717
7718/**
7719 * i40e_reset_ch_rings - Reset the queue contexts in a channel
7720 * @vsi: the VSI we want to access
7721 * @ch: the channel we want to access
7722 */
7723static void i40e_reset_ch_rings(struct i40e_vsi *vsi, struct i40e_channel *ch)
7724{
7725 struct i40e_ring *tx_ring, *rx_ring;
7726 u16 pf_q;
7727 int i;
7728
7729 for (i = 0; i < ch->num_queue_pairs; i++) {
7730 pf_q = ch->base_queue + i;
7731 tx_ring = vsi->tx_rings[pf_q];
7732 tx_ring->ch = NULL;
7733 rx_ring = vsi->rx_rings[pf_q];
7734 rx_ring->ch = NULL;
7735 }
7736}
7737
7738/**
7739 * i40e_free_macvlan_channels
7740 * @vsi: the VSI we want to access
7741 *
7742 * This function frees the Qs of the channel VSI from
7743 * the stack and also deletes the channel VSIs which
7744 * serve as macvlans.
7745 */
7746static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
7747{
7748 struct i40e_channel *ch, *ch_tmp;
7749 int ret;
7750
7751 if (list_empty(head: &vsi->macvlan_list))
7752 return;
7753
7754 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
7755 struct i40e_vsi *parent_vsi;
7756
7757 if (i40e_is_channel_macvlan(ch)) {
7758 i40e_reset_ch_rings(vsi, ch);
7759 clear_bit(nr: ch->fwd->bit_no, addr: vsi->fwd_bitmask);
7760 netdev_unbind_sb_channel(dev: vsi->netdev, sb_dev: ch->fwd->netdev);
7761 netdev_set_sb_channel(dev: ch->fwd->netdev, channel: 0);
7762 kfree(objp: ch->fwd);
7763 ch->fwd = NULL;
7764 }
7765
7766 list_del(entry: &ch->list);
7767 parent_vsi = ch->parent_vsi;
7768 if (!parent_vsi || !ch->initialized) {
7769 kfree(objp: ch);
7770 continue;
7771 }
7772
7773 /* remove the VSI */
7774 ret = i40e_aq_delete_element(hw: &vsi->back->hw, seid: ch->seid,
7775 NULL);
7776 if (ret)
7777 dev_err(&vsi->back->pdev->dev,
7778 "unable to remove channel (%d) for parent VSI(%d)\n",
7779 ch->seid, parent_vsi->seid);
7780 kfree(objp: ch);
7781 }
7782 vsi->macvlan_cnt = 0;
7783}
7784
7785/**
7786 * i40e_fwd_ring_up - bring the macvlan device up
7787 * @vsi: the VSI we want to access
7788 * @vdev: macvlan netdevice
7789 * @fwd: the private fwd structure
7790 */
7791static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
7792 struct i40e_fwd_adapter *fwd)
7793{
7794 struct i40e_channel *ch = NULL, *ch_tmp, *iter;
7795 int ret = 0, num_tc = 1, i, aq_err;
7796 struct i40e_pf *pf = vsi->back;
7797 struct i40e_hw *hw = &pf->hw;
7798
7799 /* Go through the list and find an available channel */
7800 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
7801 if (!i40e_is_channel_macvlan(ch: iter)) {
7802 iter->fwd = fwd;
7803 /* record configuration for macvlan interface in vdev */
7804 for (i = 0; i < num_tc; i++)
7805 netdev_bind_sb_channel_queue(dev: vsi->netdev, sb_dev: vdev,
7806 tc: i,
7807 count: iter->num_queue_pairs,
7808 offset: iter->base_queue);
7809 for (i = 0; i < iter->num_queue_pairs; i++) {
7810 struct i40e_ring *tx_ring, *rx_ring;
7811 u16 pf_q;
7812
7813 pf_q = iter->base_queue + i;
7814
7815 /* Get to TX ring ptr */
7816 tx_ring = vsi->tx_rings[pf_q];
7817 tx_ring->ch = iter;
7818
7819 /* Get the RX ring ptr */
7820 rx_ring = vsi->rx_rings[pf_q];
7821 rx_ring->ch = iter;
7822 }
7823 ch = iter;
7824 break;
7825 }
7826 }
7827
7828 if (!ch)
7829 return -EINVAL;
7830
7831 /* Guarantee all rings are updated before we update the
7832 * MAC address filter.
7833 */
7834 wmb();
7835
7836 /* Add a mac filter */
7837 ret = i40e_add_macvlan_filter(hw, seid: ch->seid, macaddr: vdev->dev_addr, aq_err: &aq_err);
7838 if (ret) {
7839 /* if we cannot add the MAC rule then disable the offload */
7840 macvlan_release_l2fw_offload(dev: vdev);
7841 for (i = 0; i < ch->num_queue_pairs; i++) {
7842 struct i40e_ring *rx_ring;
7843 u16 pf_q;
7844
7845 pf_q = ch->base_queue + i;
7846 rx_ring = vsi->rx_rings[pf_q];
7847 rx_ring->netdev = NULL;
7848 }
7849 dev_info(&pf->pdev->dev,
7850 "Error adding mac filter on macvlan err %pe, aq_err %s\n",
7851 ERR_PTR(ret),
7852 i40e_aq_str(hw, aq_err));
7853 netdev_err(dev: vdev, format: "L2fwd offload disabled to L2 filter error\n");
7854 }
7855
7856 return ret;
7857}
7858
7859/**
7860 * i40e_setup_macvlans - create the channels which will be macvlans
7861 * @vsi: the VSI we want to access
7862 * @macvlan_cnt: no. of macvlans to be setup
7863 * @qcnt: no. of Qs per macvlan
7864 * @vdev: macvlan netdevice
7865 */
7866static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
7867 struct net_device *vdev)
7868{
7869 struct i40e_pf *pf = vsi->back;
7870 struct i40e_hw *hw = &pf->hw;
7871 struct i40e_vsi_context ctxt;
7872 u16 sections, qmap, num_qps;
7873 struct i40e_channel *ch;
7874 int i, pow, ret = 0;
7875 u8 offset = 0;
7876
7877 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt)
7878 return -EINVAL;
7879
7880 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt);
7881
7882 /* find the next higher power-of-2 of num queue pairs */
7883 pow = fls(roundup_pow_of_two(num_qps) - 1);
7884
7885 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
7886 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
7887
7888 /* Setup context bits for the main VSI */
7889 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
7890 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
7891 memset(&ctxt, 0, sizeof(ctxt));
7892 ctxt.seid = vsi->seid;
7893 ctxt.pf_num = vsi->back->hw.pf_id;
7894 ctxt.vf_num = 0;
7895 ctxt.uplink_seid = vsi->uplink_seid;
7896 ctxt.info = vsi->info;
7897 ctxt.info.tc_mapping[0] = cpu_to_le16(qmap);
7898 ctxt.info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
7899 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
7900 ctxt.info.valid_sections |= cpu_to_le16(sections);
7901
7902 /* Reconfigure RSS for main VSI with new max queue count */
7903 vsi->rss_size = max_t(u16, num_qps, qcnt);
7904 ret = i40e_vsi_config_rss(vsi);
7905 if (ret) {
7906 dev_info(&pf->pdev->dev,
7907 "Failed to reconfig RSS for num_queues (%u)\n",
7908 vsi->rss_size);
7909 return ret;
7910 }
7911 vsi->reconfig_rss = true;
7912 dev_dbg(&vsi->back->pdev->dev,
7913 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size);
7914 vsi->next_base_queue = num_qps;
7915 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps;
7916
7917 /* Update the VSI after updating the VSI queue-mapping
7918 * information
7919 */
7920 ret = i40e_aq_update_vsi_params(hw, vsi_ctx: &ctxt, NULL);
7921 if (ret) {
7922 dev_info(&pf->pdev->dev,
7923 "Update vsi tc config failed, err %pe aq_err %s\n",
7924 ERR_PTR(ret),
7925 i40e_aq_str(hw, hw->aq.asq_last_status));
7926 return ret;
7927 }
7928 /* update the local VSI info with updated queue map */
7929 i40e_vsi_update_queue_map(vsi, ctxt: &ctxt);
7930 vsi->info.valid_sections = 0;
7931
7932 /* Create channels for macvlans */
7933 INIT_LIST_HEAD(list: &vsi->macvlan_list);
7934 for (i = 0; i < macvlan_cnt; i++) {
7935 ch = kzalloc(size: sizeof(*ch), GFP_KERNEL);
7936 if (!ch) {
7937 ret = -ENOMEM;
7938 goto err_free;
7939 }
7940 INIT_LIST_HEAD(list: &ch->list);
7941 ch->num_queue_pairs = qcnt;
7942 if (!i40e_setup_channel(pf, vsi, ch)) {
7943 ret = -EINVAL;
7944 kfree(objp: ch);
7945 goto err_free;
7946 }
7947 ch->parent_vsi = vsi;
7948 vsi->cnt_q_avail -= ch->num_queue_pairs;
7949 vsi->macvlan_cnt++;
7950 list_add_tail(new: &ch->list, head: &vsi->macvlan_list);
7951 }
7952
7953 return ret;
7954
7955err_free:
7956 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n");
7957 i40e_free_macvlan_channels(vsi);
7958
7959 return ret;
7960}
7961
7962/**
7963 * i40e_fwd_add - configure macvlans
7964 * @netdev: net device to configure
7965 * @vdev: macvlan netdevice
7966 **/
7967static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
7968{
7969 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
7970 u16 q_per_macvlan = 0, macvlan_cnt = 0, vectors;
7971 struct i40e_vsi *vsi = np->vsi;
7972 struct i40e_pf *pf = vsi->back;
7973 struct i40e_fwd_adapter *fwd;
7974 int avail_macvlan, ret;
7975
7976 if ((pf->flags & I40E_FLAG_DCB_ENABLED)) {
7977 netdev_info(dev: netdev, format: "Macvlans are not supported when DCB is enabled\n");
7978 return ERR_PTR(error: -EINVAL);
7979 }
7980 if (i40e_is_tc_mqprio_enabled(pf)) {
7981 netdev_info(dev: netdev, format: "Macvlans are not supported when HW TC offload is on\n");
7982 return ERR_PTR(error: -EINVAL);
7983 }
7984 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) {
7985 netdev_info(dev: netdev, format: "Not enough vectors available to support macvlans\n");
7986 return ERR_PTR(error: -EINVAL);
7987 }
7988
7989 /* The macvlan device has to be a single Q device so that the
7990 * tc_to_txq field can be reused to pick the tx queue.
7991 */
7992 if (netif_is_multiqueue(dev: vdev))
7993 return ERR_PTR(error: -ERANGE);
7994
7995 if (!vsi->macvlan_cnt) {
7996 /* reserve bit 0 for the pf device */
7997 set_bit(nr: 0, addr: vsi->fwd_bitmask);
7998
7999 /* Try to reserve as many queues as possible for macvlans. First
8000 * reserve 3/4th of max vectors, then half, then quarter and
8001 * calculate Qs per macvlan as you go
8002 */
8003 vectors = pf->num_lan_msix;
8004 if (vectors <= I40E_MAX_MACVLANS && vectors > 64) {
8005 /* allocate 4 Qs per macvlan and 32 Qs to the PF*/
8006 q_per_macvlan = 4;
8007 macvlan_cnt = (vectors - 32) / 4;
8008 } else if (vectors <= 64 && vectors > 32) {
8009 /* allocate 2 Qs per macvlan and 16 Qs to the PF*/
8010 q_per_macvlan = 2;
8011 macvlan_cnt = (vectors - 16) / 2;
8012 } else if (vectors <= 32 && vectors > 16) {
8013 /* allocate 1 Q per macvlan and 16 Qs to the PF*/
8014 q_per_macvlan = 1;
8015 macvlan_cnt = vectors - 16;
8016 } else if (vectors <= 16 && vectors > 8) {
8017 /* allocate 1 Q per macvlan and 8 Qs to the PF */
8018 q_per_macvlan = 1;
8019 macvlan_cnt = vectors - 8;
8020 } else {
8021 /* allocate 1 Q per macvlan and 1 Q to the PF */
8022 q_per_macvlan = 1;
8023 macvlan_cnt = vectors - 1;
8024 }
8025
8026 if (macvlan_cnt == 0)
8027 return ERR_PTR(error: -EBUSY);
8028
8029 /* Quiesce VSI queues */
8030 i40e_quiesce_vsi(vsi);
8031
8032 /* sets up the macvlans but does not "enable" them */
8033 ret = i40e_setup_macvlans(vsi, macvlan_cnt, qcnt: q_per_macvlan,
8034 vdev);
8035 if (ret)
8036 return ERR_PTR(error: ret);
8037
8038 /* Unquiesce VSI */
8039 i40e_unquiesce_vsi(vsi);
8040 }
8041 avail_macvlan = find_first_zero_bit(addr: vsi->fwd_bitmask,
8042 size: vsi->macvlan_cnt);
8043 if (avail_macvlan >= I40E_MAX_MACVLANS)
8044 return ERR_PTR(error: -EBUSY);
8045
8046 /* create the fwd struct */
8047 fwd = kzalloc(size: sizeof(*fwd), GFP_KERNEL);
8048 if (!fwd)
8049 return ERR_PTR(error: -ENOMEM);
8050
8051 set_bit(nr: avail_macvlan, addr: vsi->fwd_bitmask);
8052 fwd->bit_no = avail_macvlan;
8053 netdev_set_sb_channel(dev: vdev, channel: avail_macvlan);
8054 fwd->netdev = vdev;
8055
8056 if (!netif_running(dev: netdev))
8057 return fwd;
8058
8059 /* Set fwd ring up */
8060 ret = i40e_fwd_ring_up(vsi, vdev, fwd);
8061 if (ret) {
8062 /* unbind the queues and drop the subordinate channel config */
8063 netdev_unbind_sb_channel(dev: netdev, sb_dev: vdev);
8064 netdev_set_sb_channel(dev: vdev, channel: 0);
8065
8066 kfree(objp: fwd);
8067 return ERR_PTR(error: -EINVAL);
8068 }
8069
8070 return fwd;
8071}
8072
8073/**
8074 * i40e_del_all_macvlans - Delete all the mac filters on the channels
8075 * @vsi: the VSI we want to access
8076 */
8077static void i40e_del_all_macvlans(struct i40e_vsi *vsi)
8078{
8079 struct i40e_channel *ch, *ch_tmp;
8080 struct i40e_pf *pf = vsi->back;
8081 struct i40e_hw *hw = &pf->hw;
8082 int aq_err, ret = 0;
8083
8084 if (list_empty(head: &vsi->macvlan_list))
8085 return;
8086
8087 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8088 if (i40e_is_channel_macvlan(ch)) {
8089 ret = i40e_del_macvlan_filter(hw, seid: ch->seid,
8090 macaddr: i40e_channel_mac(ch),
8091 aq_err: &aq_err);
8092 if (!ret) {
8093 /* Reset queue contexts */
8094 i40e_reset_ch_rings(vsi, ch);
8095 clear_bit(nr: ch->fwd->bit_no, addr: vsi->fwd_bitmask);
8096 netdev_unbind_sb_channel(dev: vsi->netdev,
8097 sb_dev: ch->fwd->netdev);
8098 netdev_set_sb_channel(dev: ch->fwd->netdev, channel: 0);
8099 kfree(objp: ch->fwd);
8100 ch->fwd = NULL;
8101 }
8102 }
8103 }
8104}
8105
8106/**
8107 * i40e_fwd_del - delete macvlan interfaces
8108 * @netdev: net device to configure
8109 * @vdev: macvlan netdevice
8110 */
8111static void i40e_fwd_del(struct net_device *netdev, void *vdev)
8112{
8113 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
8114 struct i40e_fwd_adapter *fwd = vdev;
8115 struct i40e_channel *ch, *ch_tmp;
8116 struct i40e_vsi *vsi = np->vsi;
8117 struct i40e_pf *pf = vsi->back;
8118 struct i40e_hw *hw = &pf->hw;
8119 int aq_err, ret = 0;
8120
8121 /* Find the channel associated with the macvlan and del mac filter */
8122 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
8123 if (i40e_is_channel_macvlan(ch) &&
8124 ether_addr_equal(addr1: i40e_channel_mac(ch),
8125 addr2: fwd->netdev->dev_addr)) {
8126 ret = i40e_del_macvlan_filter(hw, seid: ch->seid,
8127 macaddr: i40e_channel_mac(ch),
8128 aq_err: &aq_err);
8129 if (!ret) {
8130 /* Reset queue contexts */
8131 i40e_reset_ch_rings(vsi, ch);
8132 clear_bit(nr: ch->fwd->bit_no, addr: vsi->fwd_bitmask);
8133 netdev_unbind_sb_channel(dev: netdev, sb_dev: fwd->netdev);
8134 netdev_set_sb_channel(dev: fwd->netdev, channel: 0);
8135 kfree(objp: ch->fwd);
8136 ch->fwd = NULL;
8137 } else {
8138 dev_info(&pf->pdev->dev,
8139 "Error deleting mac filter on macvlan err %pe, aq_err %s\n",
8140 ERR_PTR(ret),
8141 i40e_aq_str(hw, aq_err));
8142 }
8143 break;
8144 }
8145 }
8146}
8147
8148/**
8149 * i40e_setup_tc - configure multiple traffic classes
8150 * @netdev: net device to configure
8151 * @type_data: tc offload data
8152 **/
8153static int i40e_setup_tc(struct net_device *netdev, void *type_data)
8154{
8155 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
8156 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
8157 struct i40e_vsi *vsi = np->vsi;
8158 struct i40e_pf *pf = vsi->back;
8159 u8 enabled_tc = 0, num_tc, hw;
8160 bool need_reset = false;
8161 int old_queue_pairs;
8162 int ret = -EINVAL;
8163 u16 mode;
8164 int i;
8165
8166 old_queue_pairs = vsi->num_queue_pairs;
8167 num_tc = mqprio_qopt->qopt.num_tc;
8168 hw = mqprio_qopt->qopt.hw;
8169 mode = mqprio_qopt->mode;
8170 if (!hw) {
8171 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
8172 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
8173 goto config_tc;
8174 }
8175
8176 /* Check if MFP enabled */
8177 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
8178 netdev_info(dev: netdev,
8179 format: "Configuring TC not supported in MFP mode\n");
8180 return ret;
8181 }
8182 switch (mode) {
8183 case TC_MQPRIO_MODE_DCB:
8184 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
8185
8186 /* Check if DCB enabled to continue */
8187 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
8188 netdev_info(dev: netdev,
8189 format: "DCB is not enabled for adapter\n");
8190 return ret;
8191 }
8192
8193 /* Check whether tc count is within enabled limit */
8194 if (num_tc > i40e_pf_get_num_tc(pf)) {
8195 netdev_info(dev: netdev,
8196 format: "TC count greater than enabled on link for adapter\n");
8197 return ret;
8198 }
8199 break;
8200 case TC_MQPRIO_MODE_CHANNEL:
8201 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
8202 netdev_info(dev: netdev,
8203 format: "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
8204 return ret;
8205 }
8206 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8207 return ret;
8208 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
8209 if (ret)
8210 return ret;
8211 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
8212 sizeof(*mqprio_qopt));
8213 pf->flags |= I40E_FLAG_TC_MQPRIO;
8214 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8215 break;
8216 default:
8217 return -EINVAL;
8218 }
8219
8220config_tc:
8221 /* Generate TC map for number of tc requested */
8222 for (i = 0; i < num_tc; i++)
8223 enabled_tc |= BIT(i);
8224
8225 /* Requesting same TC configuration as already enabled */
8226 if (enabled_tc == vsi->tc_config.enabled_tc &&
8227 mode != TC_MQPRIO_MODE_CHANNEL)
8228 return 0;
8229
8230 /* Quiesce VSI queues */
8231 i40e_quiesce_vsi(vsi);
8232
8233 if (!hw && !i40e_is_tc_mqprio_enabled(pf))
8234 i40e_remove_queue_channels(vsi);
8235
8236 /* Configure VSI for enabled TCs */
8237 ret = i40e_vsi_config_tc(vsi, enabled_tc);
8238 if (ret) {
8239 netdev_info(dev: netdev, format: "Failed configuring TC for VSI seid=%d\n",
8240 vsi->seid);
8241 need_reset = true;
8242 goto exit;
8243 } else if (enabled_tc &&
8244 (!is_power_of_2(n: vsi->tc_config.tc_info[0].qcount))) {
8245 netdev_info(dev: netdev,
8246 format: "Failed to create channel. Override queues (%u) not power of 2\n",
8247 vsi->tc_config.tc_info[0].qcount);
8248 ret = -EINVAL;
8249 need_reset = true;
8250 goto exit;
8251 }
8252
8253 dev_info(&vsi->back->pdev->dev,
8254 "Setup channel (id:%u) utilizing num_queues %d\n",
8255 vsi->seid, vsi->tc_config.tc_info[0].qcount);
8256
8257 if (i40e_is_tc_mqprio_enabled(pf)) {
8258 if (vsi->mqprio_qopt.max_rate[0]) {
8259 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
8260 max_tx_rate: vsi->mqprio_qopt.max_rate[0]);
8261
8262 ret = i40e_set_bw_limit(vsi, seid: vsi->seid, max_tx_rate);
8263 if (!ret) {
8264 u64 credits = max_tx_rate;
8265
8266 do_div(credits, I40E_BW_CREDIT_DIVISOR);
8267 dev_dbg(&vsi->back->pdev->dev,
8268 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
8269 max_tx_rate,
8270 credits,
8271 vsi->seid);
8272 } else {
8273 need_reset = true;
8274 goto exit;
8275 }
8276 }
8277 ret = i40e_configure_queue_channels(vsi);
8278 if (ret) {
8279 vsi->num_queue_pairs = old_queue_pairs;
8280 netdev_info(dev: netdev,
8281 format: "Failed configuring queue channels\n");
8282 need_reset = true;
8283 goto exit;
8284 }
8285 }
8286
8287exit:
8288 /* Reset the configuration data to defaults, only TC0 is enabled */
8289 if (need_reset) {
8290 i40e_vsi_set_default_tc_config(vsi);
8291 need_reset = false;
8292 }
8293
8294 /* Unquiesce VSI */
8295 i40e_unquiesce_vsi(vsi);
8296 return ret;
8297}
8298
8299/**
8300 * i40e_set_cld_element - sets cloud filter element data
8301 * @filter: cloud filter rule
8302 * @cld: ptr to cloud filter element data
8303 *
8304 * This is helper function to copy data into cloud filter element
8305 **/
8306static inline void
8307i40e_set_cld_element(struct i40e_cloud_filter *filter,
8308 struct i40e_aqc_cloud_filters_element_data *cld)
8309{
8310 u32 ipa;
8311 int i;
8312
8313 memset(cld, 0, sizeof(*cld));
8314 ether_addr_copy(dst: cld->outer_mac, src: filter->dst_mac);
8315 ether_addr_copy(dst: cld->inner_mac, src: filter->src_mac);
8316
8317 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
8318 return;
8319
8320 if (filter->n_proto == ETH_P_IPV6) {
8321#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
8322 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) {
8323 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
8324
8325 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa);
8326 }
8327 } else {
8328 ipa = be32_to_cpu(filter->dst_ipv4);
8329
8330 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
8331 }
8332
8333 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
8334
8335 /* tenant_id is not supported by FW now, once the support is enabled
8336 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
8337 */
8338 if (filter->tenant_id)
8339 return;
8340}
8341
8342/**
8343 * i40e_add_del_cloud_filter - Add/del cloud filter
8344 * @vsi: pointer to VSI
8345 * @filter: cloud filter rule
8346 * @add: if true, add, if false, delete
8347 *
8348 * Add or delete a cloud filter for a specific flow spec.
8349 * Returns 0 if the filter were successfully added.
8350 **/
8351int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
8352 struct i40e_cloud_filter *filter, bool add)
8353{
8354 struct i40e_aqc_cloud_filters_element_data cld_filter;
8355 struct i40e_pf *pf = vsi->back;
8356 int ret;
8357 static const u16 flag_table[128] = {
8358 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
8359 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
8360 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
8361 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
8362 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
8363 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
8364 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
8365 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
8366 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
8367 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
8368 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
8369 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
8370 [I40E_CLOUD_FILTER_FLAGS_IIP] =
8371 I40E_AQC_ADD_CLOUD_FILTER_IIP,
8372 };
8373
8374 if (filter->flags >= ARRAY_SIZE(flag_table))
8375 return -EIO;
8376
8377 memset(&cld_filter, 0, sizeof(cld_filter));
8378
8379 /* copy element needed to add cloud filter from filter */
8380 i40e_set_cld_element(filter, cld: &cld_filter);
8381
8382 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
8383 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
8384 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
8385
8386 if (filter->n_proto == ETH_P_IPV6)
8387 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8388 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8389 else
8390 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
8391 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8392
8393 if (add)
8394 ret = i40e_aq_add_cloud_filters(hw: &pf->hw, vsi: filter->seid,
8395 filters: &cld_filter, filter_count: 1);
8396 else
8397 ret = i40e_aq_rem_cloud_filters(hw: &pf->hw, vsi: filter->seid,
8398 filters: &cld_filter, filter_count: 1);
8399 if (ret)
8400 dev_dbg(&pf->pdev->dev,
8401 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
8402 add ? "add" : "delete", filter->dst_port, ret,
8403 pf->hw.aq.asq_last_status);
8404 else
8405 dev_info(&pf->pdev->dev,
8406 "%s cloud filter for VSI: %d\n",
8407 add ? "Added" : "Deleted", filter->seid);
8408 return ret;
8409}
8410
8411/**
8412 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8413 * @vsi: pointer to VSI
8414 * @filter: cloud filter rule
8415 * @add: if true, add, if false, delete
8416 *
8417 * Add or delete a cloud filter for a specific flow spec using big buffer.
8418 * Returns 0 if the filter were successfully added.
8419 **/
8420int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
8421 struct i40e_cloud_filter *filter,
8422 bool add)
8423{
8424 struct i40e_aqc_cloud_filters_element_bb cld_filter;
8425 struct i40e_pf *pf = vsi->back;
8426 int ret;
8427
8428 /* Both (src/dst) valid mac_addr are not supported */
8429 if ((is_valid_ether_addr(addr: filter->dst_mac) &&
8430 is_valid_ether_addr(addr: filter->src_mac)) ||
8431 (is_multicast_ether_addr(addr: filter->dst_mac) &&
8432 is_multicast_ether_addr(addr: filter->src_mac)))
8433 return -EOPNOTSUPP;
8434
8435 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
8436 * ports are not supported via big buffer now.
8437 */
8438 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
8439 return -EOPNOTSUPP;
8440
8441 /* adding filter using src_port/src_ip is not supported at this stage */
8442 if (filter->src_port ||
8443 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8444 !ipv6_addr_any(a: &filter->ip.v6.src_ip6))
8445 return -EOPNOTSUPP;
8446
8447 memset(&cld_filter, 0, sizeof(cld_filter));
8448
8449 /* copy element needed to add cloud filter from filter */
8450 i40e_set_cld_element(filter, cld: &cld_filter.element);
8451
8452 if (is_valid_ether_addr(addr: filter->dst_mac) ||
8453 is_valid_ether_addr(addr: filter->src_mac) ||
8454 is_multicast_ether_addr(addr: filter->dst_mac) ||
8455 is_multicast_ether_addr(addr: filter->src_mac)) {
8456 /* MAC + IP : unsupported mode */
8457 if (filter->dst_ipv4)
8458 return -EOPNOTSUPP;
8459
8460 /* since we validated that L4 port must be valid before
8461 * we get here, start with respective "flags" value
8462 * and update if vlan is present or not
8463 */
8464 cld_filter.element.flags =
8465 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
8466
8467 if (filter->vlan_id) {
8468 cld_filter.element.flags =
8469 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
8470 }
8471
8472 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
8473 !ipv6_addr_any(a: &filter->ip.v6.dst_ip6)) {
8474 cld_filter.element.flags =
8475 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
8476 if (filter->n_proto == ETH_P_IPV6)
8477 cld_filter.element.flags |=
8478 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
8479 else
8480 cld_filter.element.flags |=
8481 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
8482 } else {
8483 dev_err(&pf->pdev->dev,
8484 "either mac or ip has to be valid for cloud filter\n");
8485 return -EINVAL;
8486 }
8487
8488 /* Now copy L4 port in Byte 6..7 in general fields */
8489 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
8490 be16_to_cpu(filter->dst_port);
8491
8492 if (add) {
8493 /* Validate current device switch mode, change if necessary */
8494 ret = i40e_validate_and_set_switch_mode(vsi);
8495 if (ret) {
8496 dev_err(&pf->pdev->dev,
8497 "failed to set switch mode, ret %d\n",
8498 ret);
8499 return ret;
8500 }
8501
8502 ret = i40e_aq_add_cloud_filters_bb(hw: &pf->hw, seid: filter->seid,
8503 filters: &cld_filter, filter_count: 1);
8504 } else {
8505 ret = i40e_aq_rem_cloud_filters_bb(hw: &pf->hw, seid: filter->seid,
8506 filters: &cld_filter, filter_count: 1);
8507 }
8508
8509 if (ret)
8510 dev_dbg(&pf->pdev->dev,
8511 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
8512 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
8513 else
8514 dev_info(&pf->pdev->dev,
8515 "%s cloud filter for VSI: %d, L4 port: %d\n",
8516 add ? "add" : "delete", filter->seid,
8517 ntohs(filter->dst_port));
8518 return ret;
8519}
8520
8521/**
8522 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8523 * @vsi: Pointer to VSI
8524 * @f: Pointer to struct flow_cls_offload
8525 * @filter: Pointer to cloud filter structure
8526 *
8527 **/
8528static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
8529 struct flow_cls_offload *f,
8530 struct i40e_cloud_filter *filter)
8531{
8532 struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: f);
8533 struct flow_dissector *dissector = rule->match.dissector;
8534 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
8535 struct i40e_pf *pf = vsi->back;
8536 u8 field_flags = 0;
8537
8538 if (dissector->used_keys &
8539 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
8540 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
8541 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
8542 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
8543 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
8544 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
8545 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
8546 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
8547 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n",
8548 dissector->used_keys);
8549 return -EOPNOTSUPP;
8550 }
8551
8552 if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_KEYID)) {
8553 struct flow_match_enc_keyid match;
8554
8555 flow_rule_match_enc_keyid(rule, out: &match);
8556 if (match.mask->keyid != 0)
8557 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
8558
8559 filter->tenant_id = be32_to_cpu(match.key->keyid);
8560 }
8561
8562 if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_BASIC)) {
8563 struct flow_match_basic match;
8564
8565 flow_rule_match_basic(rule, out: &match);
8566 n_proto_key = ntohs(match.key->n_proto);
8567 n_proto_mask = ntohs(match.mask->n_proto);
8568
8569 if (n_proto_key == ETH_P_ALL) {
8570 n_proto_key = 0;
8571 n_proto_mask = 0;
8572 }
8573 filter->n_proto = n_proto_key & n_proto_mask;
8574 filter->ip_proto = match.key->ip_proto;
8575 }
8576
8577 if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
8578 struct flow_match_eth_addrs match;
8579
8580 flow_rule_match_eth_addrs(rule, out: &match);
8581
8582 /* use is_broadcast and is_zero to check for all 0xf or 0 */
8583 if (!is_zero_ether_addr(addr: match.mask->dst)) {
8584 if (is_broadcast_ether_addr(addr: match.mask->dst)) {
8585 field_flags |= I40E_CLOUD_FIELD_OMAC;
8586 } else {
8587 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
8588 match.mask->dst);
8589 return -EIO;
8590 }
8591 }
8592
8593 if (!is_zero_ether_addr(addr: match.mask->src)) {
8594 if (is_broadcast_ether_addr(addr: match.mask->src)) {
8595 field_flags |= I40E_CLOUD_FIELD_IMAC;
8596 } else {
8597 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
8598 match.mask->src);
8599 return -EIO;
8600 }
8601 }
8602 ether_addr_copy(dst: filter->dst_mac, src: match.key->dst);
8603 ether_addr_copy(dst: filter->src_mac, src: match.key->src);
8604 }
8605
8606 if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_VLAN)) {
8607 struct flow_match_vlan match;
8608
8609 flow_rule_match_vlan(rule, out: &match);
8610 if (match.mask->vlan_id) {
8611 if (match.mask->vlan_id == VLAN_VID_MASK) {
8612 field_flags |= I40E_CLOUD_FIELD_IVLAN;
8613
8614 } else {
8615 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
8616 match.mask->vlan_id);
8617 return -EIO;
8618 }
8619 }
8620
8621 filter->vlan_id = cpu_to_be16(match.key->vlan_id);
8622 }
8623
8624 if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_CONTROL)) {
8625 struct flow_match_control match;
8626
8627 flow_rule_match_control(rule, out: &match);
8628 addr_type = match.key->addr_type;
8629 }
8630
8631 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
8632 struct flow_match_ipv4_addrs match;
8633
8634 flow_rule_match_ipv4_addrs(rule, out: &match);
8635 if (match.mask->dst) {
8636 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
8637 field_flags |= I40E_CLOUD_FIELD_IIP;
8638 } else {
8639 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
8640 &match.mask->dst);
8641 return -EIO;
8642 }
8643 }
8644
8645 if (match.mask->src) {
8646 if (match.mask->src == cpu_to_be32(0xffffffff)) {
8647 field_flags |= I40E_CLOUD_FIELD_IIP;
8648 } else {
8649 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
8650 &match.mask->src);
8651 return -EIO;
8652 }
8653 }
8654
8655 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
8656 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
8657 return -EIO;
8658 }
8659 filter->dst_ipv4 = match.key->dst;
8660 filter->src_ipv4 = match.key->src;
8661 }
8662
8663 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
8664 struct flow_match_ipv6_addrs match;
8665
8666 flow_rule_match_ipv6_addrs(rule, out: &match);
8667
8668 /* src and dest IPV6 address should not be LOOPBACK
8669 * (0:0:0:0:0:0:0:1), which can be represented as ::1
8670 */
8671 if (ipv6_addr_loopback(a: &match.key->dst) ||
8672 ipv6_addr_loopback(a: &match.key->src)) {
8673 dev_err(&pf->pdev->dev,
8674 "Bad ipv6, addr is LOOPBACK\n");
8675 return -EIO;
8676 }
8677 if (!ipv6_addr_any(a: &match.mask->dst) ||
8678 !ipv6_addr_any(a: &match.mask->src))
8679 field_flags |= I40E_CLOUD_FIELD_IIP;
8680
8681 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32,
8682 sizeof(filter->src_ipv6));
8683 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32,
8684 sizeof(filter->dst_ipv6));
8685 }
8686
8687 if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_PORTS)) {
8688 struct flow_match_ports match;
8689
8690 flow_rule_match_ports(rule, out: &match);
8691 if (match.mask->src) {
8692 if (match.mask->src == cpu_to_be16(0xffff)) {
8693 field_flags |= I40E_CLOUD_FIELD_IIP;
8694 } else {
8695 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
8696 be16_to_cpu(match.mask->src));
8697 return -EIO;
8698 }
8699 }
8700
8701 if (match.mask->dst) {
8702 if (match.mask->dst == cpu_to_be16(0xffff)) {
8703 field_flags |= I40E_CLOUD_FIELD_IIP;
8704 } else {
8705 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
8706 be16_to_cpu(match.mask->dst));
8707 return -EIO;
8708 }
8709 }
8710
8711 filter->dst_port = match.key->dst;
8712 filter->src_port = match.key->src;
8713
8714 switch (filter->ip_proto) {
8715 case IPPROTO_TCP:
8716 case IPPROTO_UDP:
8717 break;
8718 default:
8719 dev_err(&pf->pdev->dev,
8720 "Only UDP and TCP transport are supported\n");
8721 return -EINVAL;
8722 }
8723 }
8724 filter->flags = field_flags;
8725 return 0;
8726}
8727
8728/**
8729 * i40e_handle_tclass: Forward to a traffic class on the device
8730 * @vsi: Pointer to VSI
8731 * @tc: traffic class index on the device
8732 * @filter: Pointer to cloud filter structure
8733 *
8734 **/
8735static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
8736 struct i40e_cloud_filter *filter)
8737{
8738 struct i40e_channel *ch, *ch_tmp;
8739
8740 /* direct to a traffic class on the same device */
8741 if (tc == 0) {
8742 filter->seid = vsi->seid;
8743 return 0;
8744 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
8745 if (!filter->dst_port) {
8746 dev_err(&vsi->back->pdev->dev,
8747 "Specify destination port to direct to traffic class that is not default\n");
8748 return -EINVAL;
8749 }
8750 if (list_empty(head: &vsi->ch_list))
8751 return -EINVAL;
8752 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
8753 list) {
8754 if (ch->seid == vsi->tc_seid_map[tc])
8755 filter->seid = ch->seid;
8756 }
8757 return 0;
8758 }
8759 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
8760 return -EINVAL;
8761}
8762
8763/**
8764 * i40e_configure_clsflower - Configure tc flower filters
8765 * @vsi: Pointer to VSI
8766 * @cls_flower: Pointer to struct flow_cls_offload
8767 *
8768 **/
8769static int i40e_configure_clsflower(struct i40e_vsi *vsi,
8770 struct flow_cls_offload *cls_flower)
8771{
8772 int tc = tc_classid_to_hwtc(dev: vsi->netdev, classid: cls_flower->classid);
8773 struct i40e_cloud_filter *filter = NULL;
8774 struct i40e_pf *pf = vsi->back;
8775 int err = 0;
8776
8777 if (tc < 0) {
8778 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
8779 return -EOPNOTSUPP;
8780 }
8781
8782 if (!tc) {
8783 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
8784 return -EINVAL;
8785 }
8786
8787 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
8788 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
8789 return -EBUSY;
8790
8791 if (pf->fdir_pf_active_filters ||
8792 (!hlist_empty(h: &pf->fdir_filter_list))) {
8793 dev_err(&vsi->back->pdev->dev,
8794 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
8795 return -EINVAL;
8796 }
8797
8798 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
8799 dev_err(&vsi->back->pdev->dev,
8800 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
8801 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8802 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8803 }
8804
8805 filter = kzalloc(size: sizeof(*filter), GFP_KERNEL);
8806 if (!filter)
8807 return -ENOMEM;
8808
8809 filter->cookie = cls_flower->cookie;
8810
8811 err = i40e_parse_cls_flower(vsi, f: cls_flower, filter);
8812 if (err < 0)
8813 goto err;
8814
8815 err = i40e_handle_tclass(vsi, tc, filter);
8816 if (err < 0)
8817 goto err;
8818
8819 /* Add cloud filter */
8820 if (filter->dst_port)
8821 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, add: true);
8822 else
8823 err = i40e_add_del_cloud_filter(vsi, filter, add: true);
8824
8825 if (err) {
8826 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
8827 err);
8828 goto err;
8829 }
8830
8831 /* add filter to the ordered list */
8832 INIT_HLIST_NODE(h: &filter->cloud_node);
8833
8834 hlist_add_head(n: &filter->cloud_node, h: &pf->cloud_filter_list);
8835
8836 pf->num_cloud_filters++;
8837
8838 return err;
8839err:
8840 kfree(objp: filter);
8841 return err;
8842}
8843
8844/**
8845 * i40e_find_cloud_filter - Find the could filter in the list
8846 * @vsi: Pointer to VSI
8847 * @cookie: filter specific cookie
8848 *
8849 **/
8850static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
8851 unsigned long *cookie)
8852{
8853 struct i40e_cloud_filter *filter = NULL;
8854 struct hlist_node *node2;
8855
8856 hlist_for_each_entry_safe(filter, node2,
8857 &vsi->back->cloud_filter_list, cloud_node)
8858 if (!memcmp(p: cookie, q: &filter->cookie, size: sizeof(filter->cookie)))
8859 return filter;
8860 return NULL;
8861}
8862
8863/**
8864 * i40e_delete_clsflower - Remove tc flower filters
8865 * @vsi: Pointer to VSI
8866 * @cls_flower: Pointer to struct flow_cls_offload
8867 *
8868 **/
8869static int i40e_delete_clsflower(struct i40e_vsi *vsi,
8870 struct flow_cls_offload *cls_flower)
8871{
8872 struct i40e_cloud_filter *filter = NULL;
8873 struct i40e_pf *pf = vsi->back;
8874 int err = 0;
8875
8876 filter = i40e_find_cloud_filter(vsi, cookie: &cls_flower->cookie);
8877
8878 if (!filter)
8879 return -EINVAL;
8880
8881 hash_del(node: &filter->cloud_node);
8882
8883 if (filter->dst_port)
8884 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, add: false);
8885 else
8886 err = i40e_add_del_cloud_filter(vsi, filter, add: false);
8887
8888 kfree(objp: filter);
8889 if (err) {
8890 dev_err(&pf->pdev->dev,
8891 "Failed to delete cloud filter, err %pe\n",
8892 ERR_PTR(err));
8893 return i40e_aq_rc_to_posix(aq_ret: err, aq_rc: pf->hw.aq.asq_last_status);
8894 }
8895
8896 pf->num_cloud_filters--;
8897 if (!pf->num_cloud_filters)
8898 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
8899 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
8900 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8901 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
8902 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
8903 }
8904 return 0;
8905}
8906
8907/**
8908 * i40e_setup_tc_cls_flower - flower classifier offloads
8909 * @np: net device to configure
8910 * @cls_flower: offload data
8911 **/
8912static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
8913 struct flow_cls_offload *cls_flower)
8914{
8915 struct i40e_vsi *vsi = np->vsi;
8916
8917 switch (cls_flower->command) {
8918 case FLOW_CLS_REPLACE:
8919 return i40e_configure_clsflower(vsi, cls_flower);
8920 case FLOW_CLS_DESTROY:
8921 return i40e_delete_clsflower(vsi, cls_flower);
8922 case FLOW_CLS_STATS:
8923 return -EOPNOTSUPP;
8924 default:
8925 return -EOPNOTSUPP;
8926 }
8927}
8928
8929static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
8930 void *cb_priv)
8931{
8932 struct i40e_netdev_priv *np = cb_priv;
8933
8934 if (!tc_cls_can_offload_and_chain0(dev: np->vsi->netdev, common: type_data))
8935 return -EOPNOTSUPP;
8936
8937 switch (type) {
8938 case TC_SETUP_CLSFLOWER:
8939 return i40e_setup_tc_cls_flower(np, cls_flower: type_data);
8940
8941 default:
8942 return -EOPNOTSUPP;
8943 }
8944}
8945
8946static LIST_HEAD(i40e_block_cb_list);
8947
8948static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
8949 void *type_data)
8950{
8951 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
8952
8953 switch (type) {
8954 case TC_SETUP_QDISC_MQPRIO:
8955 return i40e_setup_tc(netdev, type_data);
8956 case TC_SETUP_BLOCK:
8957 return flow_block_cb_setup_simple(f: type_data,
8958 driver_list: &i40e_block_cb_list,
8959 cb: i40e_setup_tc_block_cb,
8960 cb_ident: np, cb_priv: np, ingress_only: true);
8961 default:
8962 return -EOPNOTSUPP;
8963 }
8964}
8965
8966/**
8967 * i40e_open - Called when a network interface is made active
8968 * @netdev: network interface device structure
8969 *
8970 * The open entry point is called when a network interface is made
8971 * active by the system (IFF_UP). At this point all resources needed
8972 * for transmit and receive operations are allocated, the interrupt
8973 * handler is registered with the OS, the netdev watchdog subtask is
8974 * enabled, and the stack is notified that the interface is ready.
8975 *
8976 * Returns 0 on success, negative value on failure
8977 **/
8978int i40e_open(struct net_device *netdev)
8979{
8980 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
8981 struct i40e_vsi *vsi = np->vsi;
8982 struct i40e_pf *pf = vsi->back;
8983 int err;
8984
8985 /* disallow open during test or if eeprom is broken */
8986 if (test_bit(__I40E_TESTING, pf->state) ||
8987 test_bit(__I40E_BAD_EEPROM, pf->state))
8988 return -EBUSY;
8989
8990 netif_carrier_off(dev: netdev);
8991
8992 if (i40e_force_link_state(pf, is_up: true))
8993 return -EAGAIN;
8994
8995 err = i40e_vsi_open(vsi);
8996 if (err)
8997 return err;
8998
8999 /* configure global TSO hardware offload settings */
9000 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
9001 TCP_FLAG_FIN) >> 16);
9002 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
9003 TCP_FLAG_FIN |
9004 TCP_FLAG_CWR) >> 16);
9005 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
9006 udp_tunnel_get_rx_info(dev: netdev);
9007
9008 return 0;
9009}
9010
9011/**
9012 * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
9013 * @vsi: vsi structure
9014 *
9015 * This updates netdev's number of tx/rx queues
9016 *
9017 * Returns status of setting tx/rx queues
9018 **/
9019static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
9020{
9021 int ret;
9022
9023 ret = netif_set_real_num_rx_queues(dev: vsi->netdev,
9024 rxq: vsi->num_queue_pairs);
9025 if (ret)
9026 return ret;
9027
9028 return netif_set_real_num_tx_queues(dev: vsi->netdev,
9029 txq: vsi->num_queue_pairs);
9030}
9031
9032/**
9033 * i40e_vsi_open -
9034 * @vsi: the VSI to open
9035 *
9036 * Finish initialization of the VSI.
9037 *
9038 * Returns 0 on success, negative value on failure
9039 *
9040 * Note: expects to be called while under rtnl_lock()
9041 **/
9042int i40e_vsi_open(struct i40e_vsi *vsi)
9043{
9044 struct i40e_pf *pf = vsi->back;
9045 char int_name[I40E_INT_NAME_STR_LEN];
9046 int err;
9047
9048 /* allocate descriptors */
9049 err = i40e_vsi_setup_tx_resources(vsi);
9050 if (err)
9051 goto err_setup_tx;
9052 err = i40e_vsi_setup_rx_resources(vsi);
9053 if (err)
9054 goto err_setup_rx;
9055
9056 err = i40e_vsi_configure(vsi);
9057 if (err)
9058 goto err_setup_rx;
9059
9060 if (vsi->netdev) {
9061 snprintf(buf: int_name, size: sizeof(int_name) - 1, fmt: "%s-%s",
9062 dev_driver_string(dev: &pf->pdev->dev), vsi->netdev->name);
9063 err = i40e_vsi_request_irq(vsi, basename: int_name);
9064 if (err)
9065 goto err_setup_rx;
9066
9067 /* Notify the stack of the actual queue counts. */
9068 err = i40e_netif_set_realnum_tx_rx_queues(vsi);
9069 if (err)
9070 goto err_set_queues;
9071
9072 } else if (vsi->type == I40E_VSI_FDIR) {
9073 snprintf(buf: int_name, size: sizeof(int_name) - 1, fmt: "%s-%s:fdir",
9074 dev_driver_string(dev: &pf->pdev->dev),
9075 dev_name(dev: &pf->pdev->dev));
9076 err = i40e_vsi_request_irq(vsi, basename: int_name);
9077 if (err)
9078 goto err_setup_rx;
9079
9080 } else {
9081 err = -EINVAL;
9082 goto err_setup_rx;
9083 }
9084
9085 err = i40e_up_complete(vsi);
9086 if (err)
9087 goto err_up_complete;
9088
9089 return 0;
9090
9091err_up_complete:
9092 i40e_down(vsi);
9093err_set_queues:
9094 i40e_vsi_free_irq(vsi);
9095err_setup_rx:
9096 i40e_vsi_free_rx_resources(vsi);
9097err_setup_tx:
9098 i40e_vsi_free_tx_resources(vsi);
9099 if (vsi == pf->vsi[pf->lan_vsi])
9100 i40e_do_reset(pf, I40E_PF_RESET_FLAG, lock_acquired: true);
9101
9102 return err;
9103}
9104
9105/**
9106 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
9107 * @pf: Pointer to PF
9108 *
9109 * This function destroys the hlist where all the Flow Director
9110 * filters were saved.
9111 **/
9112static void i40e_fdir_filter_exit(struct i40e_pf *pf)
9113{
9114 struct i40e_fdir_filter *filter;
9115 struct i40e_flex_pit *pit_entry, *tmp;
9116 struct hlist_node *node2;
9117
9118 hlist_for_each_entry_safe(filter, node2,
9119 &pf->fdir_filter_list, fdir_node) {
9120 hlist_del(n: &filter->fdir_node);
9121 kfree(objp: filter);
9122 }
9123
9124 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
9125 list_del(entry: &pit_entry->list);
9126 kfree(objp: pit_entry);
9127 }
9128 INIT_LIST_HEAD(list: &pf->l3_flex_pit_list);
9129
9130 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
9131 list_del(entry: &pit_entry->list);
9132 kfree(objp: pit_entry);
9133 }
9134 INIT_LIST_HEAD(list: &pf->l4_flex_pit_list);
9135
9136 pf->fdir_pf_active_filters = 0;
9137 i40e_reset_fdir_filter_cnt(pf);
9138
9139 /* Reprogram the default input set for TCP/IPv4 */
9140 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9141 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9142 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9143
9144 /* Reprogram the default input set for TCP/IPv6 */
9145 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
9146 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9147 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9148
9149 /* Reprogram the default input set for UDP/IPv4 */
9150 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
9151 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9152 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9153
9154 /* Reprogram the default input set for UDP/IPv6 */
9155 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
9156 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9157 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9158
9159 /* Reprogram the default input set for SCTP/IPv4 */
9160 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
9161 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9162 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9163
9164 /* Reprogram the default input set for SCTP/IPv6 */
9165 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
9166 I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
9167 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9168
9169 /* Reprogram the default input set for Other/IPv4 */
9170 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
9171 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9172
9173 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_FRAG_IPV4,
9174 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9175
9176 /* Reprogram the default input set for Other/IPv6 */
9177 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
9178 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9179
9180 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_FRAG_IPV6,
9181 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
9182}
9183
9184/**
9185 * i40e_cloud_filter_exit - Cleans up the cloud filters
9186 * @pf: Pointer to PF
9187 *
9188 * This function destroys the hlist where all the cloud filters
9189 * were saved.
9190 **/
9191static void i40e_cloud_filter_exit(struct i40e_pf *pf)
9192{
9193 struct i40e_cloud_filter *cfilter;
9194 struct hlist_node *node;
9195
9196 hlist_for_each_entry_safe(cfilter, node,
9197 &pf->cloud_filter_list, cloud_node) {
9198 hlist_del(n: &cfilter->cloud_node);
9199 kfree(objp: cfilter);
9200 }
9201 pf->num_cloud_filters = 0;
9202
9203 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
9204 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
9205 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
9206 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
9207 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
9208 }
9209}
9210
9211/**
9212 * i40e_close - Disables a network interface
9213 * @netdev: network interface device structure
9214 *
9215 * The close entry point is called when an interface is de-activated
9216 * by the OS. The hardware is still under the driver's control, but
9217 * this netdev interface is disabled.
9218 *
9219 * Returns 0, this is not allowed to fail
9220 **/
9221int i40e_close(struct net_device *netdev)
9222{
9223 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
9224 struct i40e_vsi *vsi = np->vsi;
9225
9226 i40e_vsi_close(vsi);
9227
9228 return 0;
9229}
9230
9231/**
9232 * i40e_do_reset - Start a PF or Core Reset sequence
9233 * @pf: board private structure
9234 * @reset_flags: which reset is requested
9235 * @lock_acquired: indicates whether or not the lock has been acquired
9236 * before this function was called.
9237 *
9238 * The essential difference in resets is that the PF Reset
9239 * doesn't clear the packet buffers, doesn't reset the PE
9240 * firmware, and doesn't bother the other PFs on the chip.
9241 **/
9242void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
9243{
9244 u32 val;
9245
9246 /* do the biggest reset indicated */
9247 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
9248
9249 /* Request a Global Reset
9250 *
9251 * This will start the chip's countdown to the actual full
9252 * chip reset event, and a warning interrupt to be sent
9253 * to all PFs, including the requestor. Our handler
9254 * for the warning interrupt will deal with the shutdown
9255 * and recovery of the switch setup.
9256 */
9257 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
9258 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9259 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
9260 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9261
9262 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
9263
9264 /* Request a Core Reset
9265 *
9266 * Same as Global Reset, except does *not* include the MAC/PHY
9267 */
9268 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
9269 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
9270 val |= I40E_GLGEN_RTRIG_CORER_MASK;
9271 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
9272 i40e_flush(&pf->hw);
9273
9274 } else if (reset_flags & I40E_PF_RESET_FLAG) {
9275
9276 /* Request a PF Reset
9277 *
9278 * Resets only the PF-specific registers
9279 *
9280 * This goes directly to the tear-down and rebuild of
9281 * the switch, since we need to do all the recovery as
9282 * for the Core Reset.
9283 */
9284 dev_dbg(&pf->pdev->dev, "PFR requested\n");
9285 i40e_handle_reset_warning(pf, lock_acquired);
9286
9287 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
9288 /* Request a PF Reset
9289 *
9290 * Resets PF and reinitializes PFs VSI.
9291 */
9292 i40e_prep_for_reset(pf);
9293 i40e_reset_and_rebuild(pf, reinit: true, lock_acquired);
9294 dev_info(&pf->pdev->dev,
9295 pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
9296 "FW LLDP is disabled\n" :
9297 "FW LLDP is enabled\n");
9298
9299 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
9300 int v;
9301
9302 /* Find the VSI(s) that requested a re-init */
9303 dev_info(&pf->pdev->dev,
9304 "VSI reinit requested\n");
9305 for (v = 0; v < pf->num_alloc_vsi; v++) {
9306 struct i40e_vsi *vsi = pf->vsi[v];
9307
9308 if (vsi != NULL &&
9309 test_and_clear_bit(nr: __I40E_VSI_REINIT_REQUESTED,
9310 addr: vsi->state))
9311 i40e_vsi_reinit_locked(vsi: pf->vsi[v]);
9312 }
9313 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
9314 int v;
9315
9316 /* Find the VSI(s) that needs to be brought down */
9317 dev_info(&pf->pdev->dev, "VSI down requested\n");
9318 for (v = 0; v < pf->num_alloc_vsi; v++) {
9319 struct i40e_vsi *vsi = pf->vsi[v];
9320
9321 if (vsi != NULL &&
9322 test_and_clear_bit(nr: __I40E_VSI_DOWN_REQUESTED,
9323 addr: vsi->state)) {
9324 set_bit(nr: __I40E_VSI_DOWN, addr: vsi->state);
9325 i40e_down(vsi);
9326 }
9327 }
9328 } else {
9329 dev_info(&pf->pdev->dev,
9330 "bad reset request 0x%08x\n", reset_flags);
9331 }
9332}
9333
9334#ifdef CONFIG_I40E_DCB
9335/**
9336 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9337 * @pf: board private structure
9338 * @old_cfg: current DCB config
9339 * @new_cfg: new DCB config
9340 **/
9341bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
9342 struct i40e_dcbx_config *old_cfg,
9343 struct i40e_dcbx_config *new_cfg)
9344{
9345 bool need_reconfig = false;
9346
9347 /* Check if ETS configuration has changed */
9348 if (memcmp(p: &new_cfg->etscfg,
9349 q: &old_cfg->etscfg,
9350 size: sizeof(new_cfg->etscfg))) {
9351 /* If Priority Table has changed reconfig is needed */
9352 if (memcmp(p: &new_cfg->etscfg.prioritytable,
9353 q: &old_cfg->etscfg.prioritytable,
9354 size: sizeof(new_cfg->etscfg.prioritytable))) {
9355 need_reconfig = true;
9356 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
9357 }
9358
9359 if (memcmp(p: &new_cfg->etscfg.tcbwtable,
9360 q: &old_cfg->etscfg.tcbwtable,
9361 size: sizeof(new_cfg->etscfg.tcbwtable)))
9362 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
9363
9364 if (memcmp(p: &new_cfg->etscfg.tsatable,
9365 q: &old_cfg->etscfg.tsatable,
9366 size: sizeof(new_cfg->etscfg.tsatable)))
9367 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
9368 }
9369
9370 /* Check if PFC configuration has changed */
9371 if (memcmp(p: &new_cfg->pfc,
9372 q: &old_cfg->pfc,
9373 size: sizeof(new_cfg->pfc))) {
9374 need_reconfig = true;
9375 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
9376 }
9377
9378 /* Check if APP Table has changed */
9379 if (memcmp(p: &new_cfg->app,
9380 q: &old_cfg->app,
9381 size: sizeof(new_cfg->app))) {
9382 need_reconfig = true;
9383 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
9384 }
9385
9386 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
9387 return need_reconfig;
9388}
9389
9390/**
9391 * i40e_handle_lldp_event - Handle LLDP Change MIB event
9392 * @pf: board private structure
9393 * @e: event info posted on ARQ
9394 **/
9395static int i40e_handle_lldp_event(struct i40e_pf *pf,
9396 struct i40e_arq_event_info *e)
9397{
9398 struct i40e_aqc_lldp_get_mib *mib =
9399 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
9400 struct i40e_hw *hw = &pf->hw;
9401 struct i40e_dcbx_config tmp_dcbx_cfg;
9402 bool need_reconfig = false;
9403 int ret = 0;
9404 u8 type;
9405
9406 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9407 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9408 (hw->phy.link_info.link_speed &
9409 ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
9410 !(pf->flags & I40E_FLAG_DCB_CAPABLE))
9411 /* let firmware decide if the DCB should be disabled */
9412 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9413
9414 /* Not DCB capable or capability disabled */
9415 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
9416 return ret;
9417
9418 /* Ignore if event is not for Nearest Bridge */
9419 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
9420 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
9421 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
9422 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
9423 return ret;
9424
9425 /* Check MIB Type and return if event for Remote MIB update */
9426 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
9427 dev_dbg(&pf->pdev->dev,
9428 "LLDP event mib type %s\n", type ? "remote" : "local");
9429 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
9430 /* Update the remote cached instance and return */
9431 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
9432 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
9433 dcbcfg: &hw->remote_dcbx_config);
9434 goto exit;
9435 }
9436
9437 /* Store the old configuration */
9438 tmp_dcbx_cfg = hw->local_dcbx_config;
9439
9440 /* Reset the old DCBx configuration data */
9441 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
9442 /* Get updated DCBX data from firmware */
9443 ret = i40e_get_dcb_config(hw: &pf->hw);
9444 if (ret) {
9445 /* X710-T*L 2.5G and 5G speeds don't support DCB */
9446 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
9447 (hw->phy.link_info.link_speed &
9448 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
9449 dev_warn(&pf->pdev->dev,
9450 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
9451 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9452 } else {
9453 dev_info(&pf->pdev->dev,
9454 "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
9455 ERR_PTR(ret),
9456 i40e_aq_str(&pf->hw,
9457 pf->hw.aq.asq_last_status));
9458 }
9459 goto exit;
9460 }
9461
9462 /* No change detected in DCBX configs */
9463 if (!memcmp(p: &tmp_dcbx_cfg, q: &hw->local_dcbx_config,
9464 size: sizeof(tmp_dcbx_cfg))) {
9465 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
9466 goto exit;
9467 }
9468
9469 need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg: &tmp_dcbx_cfg,
9470 new_cfg: &hw->local_dcbx_config);
9471
9472 i40e_dcbnl_flush_apps(pf, old_cfg: &tmp_dcbx_cfg, new_cfg: &hw->local_dcbx_config);
9473
9474 if (!need_reconfig)
9475 goto exit;
9476
9477 /* Enable DCB tagging only when more than one TC */
9478 if (i40e_dcb_get_num_tc(dcbcfg: &hw->local_dcbx_config) > 1)
9479 pf->flags |= I40E_FLAG_DCB_ENABLED;
9480 else
9481 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9482
9483 set_bit(nr: __I40E_PORT_SUSPENDED, addr: pf->state);
9484 /* Reconfiguration needed quiesce all VSIs */
9485 i40e_pf_quiesce_all_vsi(pf);
9486
9487 /* Changes in configuration update VEB/VSI */
9488 i40e_dcb_reconfigure(pf);
9489
9490 ret = i40e_resume_port_tx(pf);
9491
9492 clear_bit(nr: __I40E_PORT_SUSPENDED, addr: pf->state);
9493 /* In case of error no point in resuming VSIs */
9494 if (ret)
9495 goto exit;
9496
9497 /* Wait for the PF's queues to be disabled */
9498 ret = i40e_pf_wait_queues_disabled(pf);
9499 if (ret) {
9500 /* Schedule PF reset to recover */
9501 set_bit(nr: __I40E_PF_RESET_REQUESTED, addr: pf->state);
9502 i40e_service_event_schedule(pf);
9503 } else {
9504 i40e_pf_unquiesce_all_vsi(pf);
9505 set_bit(nr: __I40E_CLIENT_SERVICE_REQUESTED, addr: pf->state);
9506 set_bit(nr: __I40E_CLIENT_L2_CHANGE, addr: pf->state);
9507 }
9508
9509exit:
9510 return ret;
9511}
9512#endif /* CONFIG_I40E_DCB */
9513
9514/**
9515 * i40e_do_reset_safe - Protected reset path for userland calls.
9516 * @pf: board private structure
9517 * @reset_flags: which reset is requested
9518 *
9519 **/
9520void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
9521{
9522 rtnl_lock();
9523 i40e_do_reset(pf, reset_flags, lock_acquired: true);
9524 rtnl_unlock();
9525}
9526
9527/**
9528 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9529 * @pf: board private structure
9530 * @e: event info posted on ARQ
9531 *
9532 * Handler for LAN Queue Overflow Event generated by the firmware for PF
9533 * and VF queues
9534 **/
9535static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
9536 struct i40e_arq_event_info *e)
9537{
9538 struct i40e_aqc_lan_overflow *data =
9539 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
9540 u32 queue = le32_to_cpu(data->prtdcb_rupto);
9541 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
9542 struct i40e_hw *hw = &pf->hw;
9543 struct i40e_vf *vf;
9544 u16 vf_id;
9545
9546 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
9547 queue, qtx_ctl);
9548
9549 /* Queue belongs to VF, find the VF and issue VF reset */
9550 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
9551 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
9552 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
9553 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
9554 vf_id -= hw->func_caps.vf_base_id;
9555 vf = &pf->vf[vf_id];
9556 i40e_vc_notify_vf_reset(vf);
9557 /* Allow VF to process pending reset notification */
9558 msleep(msecs: 20);
9559 i40e_reset_vf(vf, flr: false);
9560 }
9561}
9562
9563/**
9564 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9565 * @pf: board private structure
9566 **/
9567u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
9568{
9569 u32 val, fcnt_prog;
9570
9571 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9572 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
9573 return fcnt_prog;
9574}
9575
9576/**
9577 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9578 * @pf: board private structure
9579 **/
9580u32 i40e_get_current_fd_count(struct i40e_pf *pf)
9581{
9582 u32 val, fcnt_prog;
9583
9584 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
9585 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
9586 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
9587 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
9588 return fcnt_prog;
9589}
9590
9591/**
9592 * i40e_get_global_fd_count - Get total FD filters programmed on device
9593 * @pf: board private structure
9594 **/
9595u32 i40e_get_global_fd_count(struct i40e_pf *pf)
9596{
9597 u32 val, fcnt_prog;
9598
9599 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
9600 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
9601 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
9602 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
9603 return fcnt_prog;
9604}
9605
9606/**
9607 * i40e_reenable_fdir_sb - Restore FDir SB capability
9608 * @pf: board private structure
9609 **/
9610static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
9611{
9612 if (test_and_clear_bit(nr: __I40E_FD_SB_AUTO_DISABLED, addr: pf->state))
9613 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
9614 (I40E_DEBUG_FD & pf->hw.debug_mask))
9615 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
9616}
9617
9618/**
9619 * i40e_reenable_fdir_atr - Restore FDir ATR capability
9620 * @pf: board private structure
9621 **/
9622static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
9623{
9624 if (test_and_clear_bit(nr: __I40E_FD_ATR_AUTO_DISABLED, addr: pf->state)) {
9625 /* ATR uses the same filtering logic as SB rules. It only
9626 * functions properly if the input set mask is at the default
9627 * settings. It is safe to restore the default input set
9628 * because there are no active TCPv4 filter rules.
9629 */
9630 i40e_write_fd_input_set(pf, addr: I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
9631 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
9632 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
9633
9634 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
9635 (I40E_DEBUG_FD & pf->hw.debug_mask))
9636 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
9637 }
9638}
9639
9640/**
9641 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9642 * @pf: board private structure
9643 * @filter: FDir filter to remove
9644 */
9645static void i40e_delete_invalid_filter(struct i40e_pf *pf,
9646 struct i40e_fdir_filter *filter)
9647{
9648 /* Update counters */
9649 pf->fdir_pf_active_filters--;
9650 pf->fd_inv = 0;
9651
9652 switch (filter->flow_type) {
9653 case TCP_V4_FLOW:
9654 pf->fd_tcp4_filter_cnt--;
9655 break;
9656 case UDP_V4_FLOW:
9657 pf->fd_udp4_filter_cnt--;
9658 break;
9659 case SCTP_V4_FLOW:
9660 pf->fd_sctp4_filter_cnt--;
9661 break;
9662 case TCP_V6_FLOW:
9663 pf->fd_tcp6_filter_cnt--;
9664 break;
9665 case UDP_V6_FLOW:
9666 pf->fd_udp6_filter_cnt--;
9667 break;
9668 case SCTP_V6_FLOW:
9669 pf->fd_udp6_filter_cnt--;
9670 break;
9671 case IP_USER_FLOW:
9672 switch (filter->ipl4_proto) {
9673 case IPPROTO_TCP:
9674 pf->fd_tcp4_filter_cnt--;
9675 break;
9676 case IPPROTO_UDP:
9677 pf->fd_udp4_filter_cnt--;
9678 break;
9679 case IPPROTO_SCTP:
9680 pf->fd_sctp4_filter_cnt--;
9681 break;
9682 case IPPROTO_IP:
9683 pf->fd_ip4_filter_cnt--;
9684 break;
9685 }
9686 break;
9687 case IPV6_USER_FLOW:
9688 switch (filter->ipl4_proto) {
9689 case IPPROTO_TCP:
9690 pf->fd_tcp6_filter_cnt--;
9691 break;
9692 case IPPROTO_UDP:
9693 pf->fd_udp6_filter_cnt--;
9694 break;
9695 case IPPROTO_SCTP:
9696 pf->fd_sctp6_filter_cnt--;
9697 break;
9698 case IPPROTO_IP:
9699 pf->fd_ip6_filter_cnt--;
9700 break;
9701 }
9702 break;
9703 }
9704
9705 /* Remove the filter from the list and free memory */
9706 hlist_del(n: &filter->fdir_node);
9707 kfree(objp: filter);
9708}
9709
9710/**
9711 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9712 * @pf: board private structure
9713 **/
9714void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
9715{
9716 struct i40e_fdir_filter *filter;
9717 u32 fcnt_prog, fcnt_avail;
9718 struct hlist_node *node;
9719
9720 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9721 return;
9722
9723 /* Check if we have enough room to re-enable FDir SB capability. */
9724 fcnt_prog = i40e_get_global_fd_count(pf);
9725 fcnt_avail = pf->fdir_pf_filter_count;
9726 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
9727 (pf->fd_add_err == 0) ||
9728 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
9729 i40e_reenable_fdir_sb(pf);
9730
9731 /* We should wait for even more space before re-enabling ATR.
9732 * Additionally, we cannot enable ATR as long as we still have TCP SB
9733 * rules active.
9734 */
9735 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
9736 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
9737 i40e_reenable_fdir_atr(pf);
9738
9739 /* if hw had a problem adding a filter, delete it */
9740 if (pf->fd_inv > 0) {
9741 hlist_for_each_entry_safe(filter, node,
9742 &pf->fdir_filter_list, fdir_node)
9743 if (filter->fd_id == pf->fd_inv)
9744 i40e_delete_invalid_filter(pf, filter);
9745 }
9746}
9747
9748#define I40E_MIN_FD_FLUSH_INTERVAL 10
9749#define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
9750/**
9751 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9752 * @pf: board private structure
9753 **/
9754static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
9755{
9756 unsigned long min_flush_time;
9757 int flush_wait_retry = 50;
9758 bool disable_atr = false;
9759 int fd_room;
9760 int reg;
9761
9762 if (!time_after(jiffies, pf->fd_flush_timestamp +
9763 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
9764 return;
9765
9766 /* If the flush is happening too quick and we have mostly SB rules we
9767 * should not re-enable ATR for some time.
9768 */
9769 min_flush_time = pf->fd_flush_timestamp +
9770 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
9771 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
9772
9773 if (!(time_after(jiffies, min_flush_time)) &&
9774 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
9775 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9776 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
9777 disable_atr = true;
9778 }
9779
9780 pf->fd_flush_timestamp = jiffies;
9781 set_bit(nr: __I40E_FD_ATR_AUTO_DISABLED, addr: pf->state);
9782 /* flush all filters */
9783 wr32(&pf->hw, I40E_PFQF_CTL_1,
9784 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
9785 i40e_flush(&pf->hw);
9786 pf->fd_flush_cnt++;
9787 pf->fd_add_err = 0;
9788 do {
9789 /* Check FD flush status every 5-6msec */
9790 usleep_range(min: 5000, max: 6000);
9791 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
9792 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
9793 break;
9794 } while (flush_wait_retry--);
9795 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
9796 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
9797 } else {
9798 /* replay sideband filters */
9799 i40e_fdir_filter_restore(vsi: pf->vsi[pf->lan_vsi]);
9800 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
9801 clear_bit(nr: __I40E_FD_ATR_AUTO_DISABLED, addr: pf->state);
9802 clear_bit(nr: __I40E_FD_FLUSH_REQUESTED, addr: pf->state);
9803 if (I40E_DEBUG_FD & pf->hw.debug_mask)
9804 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
9805 }
9806}
9807
9808/**
9809 * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9810 * @pf: board private structure
9811 **/
9812u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
9813{
9814 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
9815}
9816
9817/**
9818 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9819 * @pf: board private structure
9820 **/
9821static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
9822{
9823
9824 /* if interface is down do nothing */
9825 if (test_bit(__I40E_DOWN, pf->state))
9826 return;
9827
9828 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
9829 i40e_fdir_flush_and_replay(pf);
9830
9831 i40e_fdir_check_and_reenable(pf);
9832
9833}
9834
9835/**
9836 * i40e_vsi_link_event - notify VSI of a link event
9837 * @vsi: vsi to be notified
9838 * @link_up: link up or down
9839 **/
9840static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
9841{
9842 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
9843 return;
9844
9845 switch (vsi->type) {
9846 case I40E_VSI_MAIN:
9847 if (!vsi->netdev || !vsi->netdev_registered)
9848 break;
9849
9850 if (link_up) {
9851 netif_carrier_on(dev: vsi->netdev);
9852 netif_tx_wake_all_queues(dev: vsi->netdev);
9853 } else {
9854 netif_carrier_off(dev: vsi->netdev);
9855 netif_tx_stop_all_queues(dev: vsi->netdev);
9856 }
9857 break;
9858
9859 case I40E_VSI_SRIOV:
9860 case I40E_VSI_VMDQ2:
9861 case I40E_VSI_CTRL:
9862 case I40E_VSI_IWARP:
9863 case I40E_VSI_MIRROR:
9864 default:
9865 /* there is no notification for other VSIs */
9866 break;
9867 }
9868}
9869
9870/**
9871 * i40e_veb_link_event - notify elements on the veb of a link event
9872 * @veb: veb to be notified
9873 * @link_up: link up or down
9874 **/
9875static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
9876{
9877 struct i40e_pf *pf;
9878 int i;
9879
9880 if (!veb || !veb->pf)
9881 return;
9882 pf = veb->pf;
9883
9884 /* depth first... */
9885 for (i = 0; i < I40E_MAX_VEB; i++)
9886 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
9887 i40e_veb_link_event(veb: pf->veb[i], link_up);
9888
9889 /* ... now the local VSIs */
9890 for (i = 0; i < pf->num_alloc_vsi; i++)
9891 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
9892 i40e_vsi_link_event(vsi: pf->vsi[i], link_up);
9893}
9894
9895/**
9896 * i40e_link_event - Update netif_carrier status
9897 * @pf: board private structure
9898 **/
9899static void i40e_link_event(struct i40e_pf *pf)
9900{
9901 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9902 u8 new_link_speed, old_link_speed;
9903 bool new_link, old_link;
9904 int status;
9905#ifdef CONFIG_I40E_DCB
9906 int err;
9907#endif /* CONFIG_I40E_DCB */
9908
9909 /* set this to force the get_link_status call to refresh state */
9910 pf->hw.phy.get_link_info = true;
9911 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
9912 status = i40e_get_link_status(hw: &pf->hw, link_up: &new_link);
9913
9914 /* On success, disable temp link polling */
9915 if (status == 0) {
9916 clear_bit(nr: __I40E_TEMP_LINK_POLLING, addr: pf->state);
9917 } else {
9918 /* Enable link polling temporarily until i40e_get_link_status
9919 * returns 0
9920 */
9921 set_bit(nr: __I40E_TEMP_LINK_POLLING, addr: pf->state);
9922 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
9923 status);
9924 return;
9925 }
9926
9927 old_link_speed = pf->hw.phy.link_info_old.link_speed;
9928 new_link_speed = pf->hw.phy.link_info.link_speed;
9929
9930 if (new_link == old_link &&
9931 new_link_speed == old_link_speed &&
9932 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
9933 new_link == netif_carrier_ok(dev: vsi->netdev)))
9934 return;
9935
9936 i40e_print_link_message(vsi, isup: new_link);
9937
9938 /* Notify the base of the switch tree connected to
9939 * the link. Floating VEBs are not notified.
9940 */
9941 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
9942 i40e_veb_link_event(veb: pf->veb[pf->lan_veb], link_up: new_link);
9943 else
9944 i40e_vsi_link_event(vsi, link_up: new_link);
9945
9946 if (pf->vf)
9947 i40e_vc_notify_link_state(pf);
9948
9949 if (pf->flags & I40E_FLAG_PTP)
9950 i40e_ptp_set_increment(pf);
9951#ifdef CONFIG_I40E_DCB
9952 if (new_link == old_link)
9953 return;
9954 /* Not SW DCB so firmware will take care of default settings */
9955 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
9956 return;
9957
9958 /* We cover here only link down, as after link up in case of SW DCB
9959 * SW LLDP agent will take care of setting it up
9960 */
9961 if (!new_link) {
9962 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
9963 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
9964 err = i40e_dcb_sw_default_config(pf);
9965 if (err) {
9966 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
9967 I40E_FLAG_DCB_ENABLED);
9968 } else {
9969 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
9970 DCB_CAP_DCBX_VER_IEEE;
9971 pf->flags |= I40E_FLAG_DCB_CAPABLE;
9972 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
9973 }
9974 }
9975#endif /* CONFIG_I40E_DCB */
9976}
9977
9978/**
9979 * i40e_watchdog_subtask - periodic checks not using event driven response
9980 * @pf: board private structure
9981 **/
9982static void i40e_watchdog_subtask(struct i40e_pf *pf)
9983{
9984 int i;
9985
9986 /* if interface is down do nothing */
9987 if (test_bit(__I40E_DOWN, pf->state) ||
9988 test_bit(__I40E_CONFIG_BUSY, pf->state))
9989 return;
9990
9991 /* make sure we don't do these things too often */
9992 if (time_before(jiffies, (pf->service_timer_previous +
9993 pf->service_timer_period)))
9994 return;
9995 pf->service_timer_previous = jiffies;
9996
9997 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
9998 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
9999 i40e_link_event(pf);
10000
10001 /* Update the stats for active netdevs so the network stack
10002 * can look at updated numbers whenever it cares to
10003 */
10004 for (i = 0; i < pf->num_alloc_vsi; i++)
10005 if (pf->vsi[i] && pf->vsi[i]->netdev)
10006 i40e_update_stats(vsi: pf->vsi[i]);
10007
10008 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
10009 /* Update the stats for the active switching components */
10010 for (i = 0; i < I40E_MAX_VEB; i++)
10011 if (pf->veb[i])
10012 i40e_update_veb_stats(veb: pf->veb[i]);
10013 }
10014
10015 i40e_ptp_rx_hang(pf);
10016 i40e_ptp_tx_hang(pf);
10017}
10018
10019/**
10020 * i40e_reset_subtask - Set up for resetting the device and driver
10021 * @pf: board private structure
10022 **/
10023static void i40e_reset_subtask(struct i40e_pf *pf)
10024{
10025 u32 reset_flags = 0;
10026
10027 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
10028 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
10029 clear_bit(nr: __I40E_REINIT_REQUESTED, addr: pf->state);
10030 }
10031 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
10032 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
10033 clear_bit(nr: __I40E_PF_RESET_REQUESTED, addr: pf->state);
10034 }
10035 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
10036 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
10037 clear_bit(nr: __I40E_CORE_RESET_REQUESTED, addr: pf->state);
10038 }
10039 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
10040 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
10041 clear_bit(nr: __I40E_GLOBAL_RESET_REQUESTED, addr: pf->state);
10042 }
10043 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
10044 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
10045 clear_bit(nr: __I40E_DOWN_REQUESTED, addr: pf->state);
10046 }
10047
10048 /* If there's a recovery already waiting, it takes
10049 * precedence before starting a new reset sequence.
10050 */
10051 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
10052 i40e_prep_for_reset(pf);
10053 i40e_reset(pf);
10054 i40e_rebuild(pf, reinit: false, lock_acquired: false);
10055 }
10056
10057 /* If we're already down or resetting, just bail */
10058 if (reset_flags &&
10059 !test_bit(__I40E_DOWN, pf->state) &&
10060 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
10061 i40e_do_reset(pf, reset_flags, lock_acquired: false);
10062 }
10063}
10064
10065/**
10066 * i40e_handle_link_event - Handle link event
10067 * @pf: board private structure
10068 * @e: event info posted on ARQ
10069 **/
10070static void i40e_handle_link_event(struct i40e_pf *pf,
10071 struct i40e_arq_event_info *e)
10072{
10073 struct i40e_aqc_get_link_status *status =
10074 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
10075
10076 /* Do a new status request to re-enable LSE reporting
10077 * and load new status information into the hw struct
10078 * This completely ignores any state information
10079 * in the ARQ event info, instead choosing to always
10080 * issue the AQ update link status command.
10081 */
10082 i40e_link_event(pf);
10083
10084 /* Check if module meets thermal requirements */
10085 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
10086 dev_err(&pf->pdev->dev,
10087 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
10088 dev_err(&pf->pdev->dev,
10089 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10090 } else {
10091 /* check for unqualified module, if link is down, suppress
10092 * the message if link was forced to be down.
10093 */
10094 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
10095 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
10096 (!(status->link_info & I40E_AQ_LINK_UP)) &&
10097 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
10098 dev_err(&pf->pdev->dev,
10099 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
10100 dev_err(&pf->pdev->dev,
10101 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
10102 }
10103 }
10104}
10105
10106/**
10107 * i40e_clean_adminq_subtask - Clean the AdminQ rings
10108 * @pf: board private structure
10109 **/
10110static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
10111{
10112 struct i40e_arq_event_info event;
10113 struct i40e_hw *hw = &pf->hw;
10114 u16 pending, i = 0;
10115 u16 opcode;
10116 u32 oldval;
10117 int ret;
10118 u32 val;
10119
10120 /* Do not run clean AQ when PF reset fails */
10121 if (test_bit(__I40E_RESET_FAILED, pf->state))
10122 return;
10123
10124 /* check for error indications */
10125 val = rd32(&pf->hw, pf->hw.aq.arq.len);
10126 oldval = val;
10127 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
10128 if (hw->debug_mask & I40E_DEBUG_AQ)
10129 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
10130 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
10131 }
10132 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
10133 if (hw->debug_mask & I40E_DEBUG_AQ)
10134 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
10135 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
10136 pf->arq_overflows++;
10137 }
10138 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
10139 if (hw->debug_mask & I40E_DEBUG_AQ)
10140 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
10141 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
10142 }
10143 if (oldval != val)
10144 wr32(&pf->hw, pf->hw.aq.arq.len, val);
10145
10146 val = rd32(&pf->hw, pf->hw.aq.asq.len);
10147 oldval = val;
10148 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
10149 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10150 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
10151 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
10152 }
10153 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
10154 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10155 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
10156 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
10157 }
10158 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
10159 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
10160 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
10161 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
10162 }
10163 if (oldval != val)
10164 wr32(&pf->hw, pf->hw.aq.asq.len, val);
10165
10166 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
10167 event.msg_buf = kzalloc(size: event.buf_len, GFP_KERNEL);
10168 if (!event.msg_buf)
10169 return;
10170
10171 do {
10172 ret = i40e_clean_arq_element(hw, e: &event, events_pending: &pending);
10173 if (ret == -EALREADY)
10174 break;
10175 else if (ret) {
10176 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
10177 break;
10178 }
10179
10180 opcode = le16_to_cpu(event.desc.opcode);
10181 switch (opcode) {
10182
10183 case i40e_aqc_opc_get_link_status:
10184 rtnl_lock();
10185 i40e_handle_link_event(pf, e: &event);
10186 rtnl_unlock();
10187 break;
10188 case i40e_aqc_opc_send_msg_to_pf:
10189 ret = i40e_vc_process_vf_msg(pf,
10190 le16_to_cpu(event.desc.retval),
10191 le32_to_cpu(event.desc.cookie_high),
10192 le32_to_cpu(event.desc.cookie_low),
10193 msg: event.msg_buf,
10194 msglen: event.msg_len);
10195 break;
10196 case i40e_aqc_opc_lldp_update_mib:
10197 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
10198#ifdef CONFIG_I40E_DCB
10199 rtnl_lock();
10200 i40e_handle_lldp_event(pf, e: &event);
10201 rtnl_unlock();
10202#endif /* CONFIG_I40E_DCB */
10203 break;
10204 case i40e_aqc_opc_event_lan_overflow:
10205 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
10206 i40e_handle_lan_overflow_event(pf, e: &event);
10207 break;
10208 case i40e_aqc_opc_send_msg_to_peer:
10209 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
10210 break;
10211 case i40e_aqc_opc_nvm_erase:
10212 case i40e_aqc_opc_nvm_update:
10213 case i40e_aqc_opc_oem_post_update:
10214 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
10215 "ARQ NVM operation 0x%04x completed\n",
10216 opcode);
10217 break;
10218 default:
10219 dev_info(&pf->pdev->dev,
10220 "ARQ: Unknown event 0x%04x ignored\n",
10221 opcode);
10222 break;
10223 }
10224 } while (i++ < pf->adminq_work_limit);
10225
10226 if (i < pf->adminq_work_limit)
10227 clear_bit(nr: __I40E_ADMINQ_EVENT_PENDING, addr: pf->state);
10228
10229 /* re-enable Admin queue interrupt cause */
10230 val = rd32(hw, I40E_PFINT_ICR0_ENA);
10231 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
10232 wr32(hw, I40E_PFINT_ICR0_ENA, val);
10233 i40e_flush(hw);
10234
10235 kfree(objp: event.msg_buf);
10236}
10237
10238/**
10239 * i40e_verify_eeprom - make sure eeprom is good to use
10240 * @pf: board private structure
10241 **/
10242static void i40e_verify_eeprom(struct i40e_pf *pf)
10243{
10244 int err;
10245
10246 err = i40e_diag_eeprom_test(hw: &pf->hw);
10247 if (err) {
10248 /* retry in case of garbage read */
10249 err = i40e_diag_eeprom_test(hw: &pf->hw);
10250 if (err) {
10251 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
10252 err);
10253 set_bit(nr: __I40E_BAD_EEPROM, addr: pf->state);
10254 }
10255 }
10256
10257 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
10258 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
10259 clear_bit(nr: __I40E_BAD_EEPROM, addr: pf->state);
10260 }
10261}
10262
10263/**
10264 * i40e_enable_pf_switch_lb
10265 * @pf: pointer to the PF structure
10266 *
10267 * enable switch loop back or die - no point in a return value
10268 **/
10269static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
10270{
10271 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10272 struct i40e_vsi_context ctxt;
10273 int ret;
10274
10275 ctxt.seid = pf->main_vsi_seid;
10276 ctxt.pf_num = pf->hw.pf_id;
10277 ctxt.vf_num = 0;
10278 ret = i40e_aq_get_vsi_params(hw: &pf->hw, vsi_ctx: &ctxt, NULL);
10279 if (ret) {
10280 dev_info(&pf->pdev->dev,
10281 "couldn't get PF vsi config, err %pe aq_err %s\n",
10282 ERR_PTR(ret),
10283 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10284 return;
10285 }
10286 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10287 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10288 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10289
10290 ret = i40e_aq_update_vsi_params(hw: &vsi->back->hw, vsi_ctx: &ctxt, NULL);
10291 if (ret) {
10292 dev_info(&pf->pdev->dev,
10293 "update vsi switch failed, err %pe aq_err %s\n",
10294 ERR_PTR(ret),
10295 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10296 }
10297}
10298
10299/**
10300 * i40e_disable_pf_switch_lb
10301 * @pf: pointer to the PF structure
10302 *
10303 * disable switch loop back or die - no point in a return value
10304 **/
10305static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
10306{
10307 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10308 struct i40e_vsi_context ctxt;
10309 int ret;
10310
10311 ctxt.seid = pf->main_vsi_seid;
10312 ctxt.pf_num = pf->hw.pf_id;
10313 ctxt.vf_num = 0;
10314 ret = i40e_aq_get_vsi_params(hw: &pf->hw, vsi_ctx: &ctxt, NULL);
10315 if (ret) {
10316 dev_info(&pf->pdev->dev,
10317 "couldn't get PF vsi config, err %pe aq_err %s\n",
10318 ERR_PTR(ret),
10319 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10320 return;
10321 }
10322 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
10323 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
10324 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
10325
10326 ret = i40e_aq_update_vsi_params(hw: &vsi->back->hw, vsi_ctx: &ctxt, NULL);
10327 if (ret) {
10328 dev_info(&pf->pdev->dev,
10329 "update vsi switch failed, err %pe aq_err %s\n",
10330 ERR_PTR(ret),
10331 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10332 }
10333}
10334
10335/**
10336 * i40e_config_bridge_mode - Configure the HW bridge mode
10337 * @veb: pointer to the bridge instance
10338 *
10339 * Configure the loop back mode for the LAN VSI that is downlink to the
10340 * specified HW bridge instance. It is expected this function is called
10341 * when a new HW bridge is instantiated.
10342 **/
10343static void i40e_config_bridge_mode(struct i40e_veb *veb)
10344{
10345 struct i40e_pf *pf = veb->pf;
10346
10347 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
10348 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
10349 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
10350 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
10351 i40e_disable_pf_switch_lb(pf);
10352 else
10353 i40e_enable_pf_switch_lb(pf);
10354}
10355
10356/**
10357 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10358 * @veb: pointer to the VEB instance
10359 *
10360 * This is a recursive function that first builds the attached VSIs then
10361 * recurses in to build the next layer of VEB. We track the connections
10362 * through our own index numbers because the seid's from the HW could
10363 * change across the reset.
10364 **/
10365static int i40e_reconstitute_veb(struct i40e_veb *veb)
10366{
10367 struct i40e_vsi *ctl_vsi = NULL;
10368 struct i40e_pf *pf = veb->pf;
10369 int v, veb_idx;
10370 int ret;
10371
10372 /* build VSI that owns this VEB, temporarily attached to base VEB */
10373 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
10374 if (pf->vsi[v] &&
10375 pf->vsi[v]->veb_idx == veb->idx &&
10376 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
10377 ctl_vsi = pf->vsi[v];
10378 break;
10379 }
10380 }
10381 if (!ctl_vsi) {
10382 dev_info(&pf->pdev->dev,
10383 "missing owner VSI for veb_idx %d\n", veb->idx);
10384 ret = -ENOENT;
10385 goto end_reconstitute;
10386 }
10387 if (ctl_vsi != pf->vsi[pf->lan_vsi])
10388 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
10389 ret = i40e_add_vsi(vsi: ctl_vsi);
10390 if (ret) {
10391 dev_info(&pf->pdev->dev,
10392 "rebuild of veb_idx %d owner VSI failed: %d\n",
10393 veb->idx, ret);
10394 goto end_reconstitute;
10395 }
10396 i40e_vsi_reset_stats(vsi: ctl_vsi);
10397
10398 /* create the VEB in the switch and move the VSI onto the VEB */
10399 ret = i40e_add_veb(veb, vsi: ctl_vsi);
10400 if (ret)
10401 goto end_reconstitute;
10402
10403 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10404 veb->bridge_mode = BRIDGE_MODE_VEB;
10405 else
10406 veb->bridge_mode = BRIDGE_MODE_VEPA;
10407 i40e_config_bridge_mode(veb);
10408
10409 /* create the remaining VSIs attached to this VEB */
10410 for (v = 0; v < pf->num_alloc_vsi; v++) {
10411 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
10412 continue;
10413
10414 if (pf->vsi[v]->veb_idx == veb->idx) {
10415 struct i40e_vsi *vsi = pf->vsi[v];
10416
10417 vsi->uplink_seid = veb->seid;
10418 ret = i40e_add_vsi(vsi);
10419 if (ret) {
10420 dev_info(&pf->pdev->dev,
10421 "rebuild of vsi_idx %d failed: %d\n",
10422 v, ret);
10423 goto end_reconstitute;
10424 }
10425 i40e_vsi_reset_stats(vsi);
10426 }
10427 }
10428
10429 /* create any VEBs attached to this VEB - RECURSION */
10430 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
10431 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
10432 pf->veb[veb_idx]->uplink_seid = veb->seid;
10433 ret = i40e_reconstitute_veb(veb: pf->veb[veb_idx]);
10434 if (ret)
10435 break;
10436 }
10437 }
10438
10439end_reconstitute:
10440 return ret;
10441}
10442
10443/**
10444 * i40e_get_capabilities - get info about the HW
10445 * @pf: the PF struct
10446 * @list_type: AQ capability to be queried
10447 **/
10448static int i40e_get_capabilities(struct i40e_pf *pf,
10449 enum i40e_admin_queue_opc list_type)
10450{
10451 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
10452 u16 data_size;
10453 int buf_len;
10454 int err;
10455
10456 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
10457 do {
10458 cap_buf = kzalloc(size: buf_len, GFP_KERNEL);
10459 if (!cap_buf)
10460 return -ENOMEM;
10461
10462 /* this loads the data into the hw struct for us */
10463 err = i40e_aq_discover_capabilities(hw: &pf->hw, buff: cap_buf, buff_size: buf_len,
10464 data_size: &data_size, list_type_opc: list_type,
10465 NULL);
10466 /* data loaded, buffer no longer needed */
10467 kfree(objp: cap_buf);
10468
10469 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
10470 /* retry with a larger buffer */
10471 buf_len = data_size;
10472 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
10473 dev_info(&pf->pdev->dev,
10474 "capability discovery failed, err %pe aq_err %s\n",
10475 ERR_PTR(err),
10476 i40e_aq_str(&pf->hw,
10477 pf->hw.aq.asq_last_status));
10478 return -ENODEV;
10479 }
10480 } while (err);
10481
10482 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
10483 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10484 dev_info(&pf->pdev->dev,
10485 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
10486 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
10487 pf->hw.func_caps.num_msix_vectors,
10488 pf->hw.func_caps.num_msix_vectors_vf,
10489 pf->hw.func_caps.fd_filters_guaranteed,
10490 pf->hw.func_caps.fd_filters_best_effort,
10491 pf->hw.func_caps.num_tx_qp,
10492 pf->hw.func_caps.num_vsis);
10493 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
10494 dev_info(&pf->pdev->dev,
10495 "switch_mode=0x%04x, function_valid=0x%08x\n",
10496 pf->hw.dev_caps.switch_mode,
10497 pf->hw.dev_caps.valid_functions);
10498 dev_info(&pf->pdev->dev,
10499 "SR-IOV=%d, num_vfs for all function=%u\n",
10500 pf->hw.dev_caps.sr_iov_1_1,
10501 pf->hw.dev_caps.num_vfs);
10502 dev_info(&pf->pdev->dev,
10503 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
10504 pf->hw.dev_caps.num_vsis,
10505 pf->hw.dev_caps.num_rx_qp,
10506 pf->hw.dev_caps.num_tx_qp);
10507 }
10508 }
10509 if (list_type == i40e_aqc_opc_list_func_capabilities) {
10510#define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
10511 + pf->hw.func_caps.num_vfs)
10512 if (pf->hw.revision_id == 0 &&
10513 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
10514 dev_info(&pf->pdev->dev,
10515 "got num_vsis %d, setting num_vsis to %d\n",
10516 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
10517 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
10518 }
10519 }
10520 return 0;
10521}
10522
10523static int i40e_vsi_clear(struct i40e_vsi *vsi);
10524
10525/**
10526 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10527 * @pf: board private structure
10528 **/
10529static void i40e_fdir_sb_setup(struct i40e_pf *pf)
10530{
10531 struct i40e_vsi *vsi;
10532
10533 /* quick workaround for an NVM issue that leaves a critical register
10534 * uninitialized
10535 */
10536 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
10537 static const u32 hkey[] = {
10538 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
10539 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
10540 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
10541 0x95b3a76d};
10542 int i;
10543
10544 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
10545 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
10546 }
10547
10548 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
10549 return;
10550
10551 /* find existing VSI and see if it needs configuring */
10552 vsi = i40e_find_vsi_by_type(pf, type: I40E_VSI_FDIR);
10553
10554 /* create a new VSI if none exists */
10555 if (!vsi) {
10556 vsi = i40e_vsi_setup(pf, type: I40E_VSI_FDIR,
10557 uplink: pf->vsi[pf->lan_vsi]->seid, param1: 0);
10558 if (!vsi) {
10559 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
10560 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10561 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10562 return;
10563 }
10564 }
10565
10566 i40e_vsi_setup_irqhandler(vsi, irq_handler: i40e_fdir_clean_ring);
10567}
10568
10569/**
10570 * i40e_fdir_teardown - release the Flow Director resources
10571 * @pf: board private structure
10572 **/
10573static void i40e_fdir_teardown(struct i40e_pf *pf)
10574{
10575 struct i40e_vsi *vsi;
10576
10577 i40e_fdir_filter_exit(pf);
10578 vsi = i40e_find_vsi_by_type(pf, type: I40E_VSI_FDIR);
10579 if (vsi)
10580 i40e_vsi_release(vsi);
10581}
10582
10583/**
10584 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10585 * @vsi: PF main vsi
10586 * @seid: seid of main or channel VSIs
10587 *
10588 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
10589 * existed before reset
10590 **/
10591static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
10592{
10593 struct i40e_cloud_filter *cfilter;
10594 struct i40e_pf *pf = vsi->back;
10595 struct hlist_node *node;
10596 int ret;
10597
10598 /* Add cloud filters back if they exist */
10599 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
10600 cloud_node) {
10601 if (cfilter->seid != seid)
10602 continue;
10603
10604 if (cfilter->dst_port)
10605 ret = i40e_add_del_cloud_filter_big_buf(vsi, filter: cfilter,
10606 add: true);
10607 else
10608 ret = i40e_add_del_cloud_filter(vsi, filter: cfilter, add: true);
10609
10610 if (ret) {
10611 dev_dbg(&pf->pdev->dev,
10612 "Failed to rebuild cloud filter, err %pe aq_err %s\n",
10613 ERR_PTR(ret),
10614 i40e_aq_str(&pf->hw,
10615 pf->hw.aq.asq_last_status));
10616 return ret;
10617 }
10618 }
10619 return 0;
10620}
10621
10622/**
10623 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10624 * @vsi: PF main vsi
10625 *
10626 * Rebuilds channel VSIs if they existed before reset
10627 **/
10628static int i40e_rebuild_channels(struct i40e_vsi *vsi)
10629{
10630 struct i40e_channel *ch, *ch_tmp;
10631 int ret;
10632
10633 if (list_empty(head: &vsi->ch_list))
10634 return 0;
10635
10636 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
10637 if (!ch->initialized)
10638 break;
10639 /* Proceed with creation of channel (VMDq2) VSI */
10640 ret = i40e_add_channel(pf: vsi->back, uplink_seid: vsi->uplink_seid, ch);
10641 if (ret) {
10642 dev_info(&vsi->back->pdev->dev,
10643 "failed to rebuild channels using uplink_seid %u\n",
10644 vsi->uplink_seid);
10645 return ret;
10646 }
10647 /* Reconfigure TX queues using QTX_CTL register */
10648 ret = i40e_channel_config_tx_ring(pf: vsi->back, vsi, ch);
10649 if (ret) {
10650 dev_info(&vsi->back->pdev->dev,
10651 "failed to configure TX rings for channel %u\n",
10652 ch->seid);
10653 return ret;
10654 }
10655 /* update 'next_base_queue' */
10656 vsi->next_base_queue = vsi->next_base_queue +
10657 ch->num_queue_pairs;
10658 if (ch->max_tx_rate) {
10659 u64 credits = ch->max_tx_rate;
10660
10661 if (i40e_set_bw_limit(vsi, seid: ch->seid,
10662 max_tx_rate: ch->max_tx_rate))
10663 return -EINVAL;
10664
10665 do_div(credits, I40E_BW_CREDIT_DIVISOR);
10666 dev_dbg(&vsi->back->pdev->dev,
10667 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
10668 ch->max_tx_rate,
10669 credits,
10670 ch->seid);
10671 }
10672 ret = i40e_rebuild_cloud_filters(vsi, seid: ch->seid);
10673 if (ret) {
10674 dev_dbg(&vsi->back->pdev->dev,
10675 "Failed to rebuild cloud filters for channel VSI %u\n",
10676 ch->seid);
10677 return ret;
10678 }
10679 }
10680 return 0;
10681}
10682
10683/**
10684 * i40e_clean_xps_state - clean xps state for every tx_ring
10685 * @vsi: ptr to the VSI
10686 **/
10687static void i40e_clean_xps_state(struct i40e_vsi *vsi)
10688{
10689 int i;
10690
10691 if (vsi->tx_rings)
10692 for (i = 0; i < vsi->num_queue_pairs; i++)
10693 if (vsi->tx_rings[i])
10694 clear_bit(nr: __I40E_TX_XPS_INIT_DONE,
10695 addr: vsi->tx_rings[i]->state);
10696}
10697
10698/**
10699 * i40e_prep_for_reset - prep for the core to reset
10700 * @pf: board private structure
10701 *
10702 * Close up the VFs and other things in prep for PF Reset.
10703 **/
10704static void i40e_prep_for_reset(struct i40e_pf *pf)
10705{
10706 struct i40e_hw *hw = &pf->hw;
10707 int ret = 0;
10708 u32 v;
10709
10710 clear_bit(nr: __I40E_RESET_INTR_RECEIVED, addr: pf->state);
10711 if (test_and_set_bit(nr: __I40E_RESET_RECOVERY_PENDING, addr: pf->state))
10712 return;
10713 if (i40e_check_asq_alive(hw: &pf->hw))
10714 i40e_vc_notify_reset(pf);
10715
10716 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
10717
10718 /* quiesce the VSIs and their queues that are not already DOWN */
10719 i40e_pf_quiesce_all_vsi(pf);
10720
10721 for (v = 0; v < pf->num_alloc_vsi; v++) {
10722 if (pf->vsi[v]) {
10723 i40e_clean_xps_state(vsi: pf->vsi[v]);
10724 pf->vsi[v]->seid = 0;
10725 }
10726 }
10727
10728 i40e_shutdown_adminq(hw: &pf->hw);
10729
10730 /* call shutdown HMC */
10731 if (hw->hmc.hmc_obj) {
10732 ret = i40e_shutdown_lan_hmc(hw);
10733 if (ret)
10734 dev_warn(&pf->pdev->dev,
10735 "shutdown_lan_hmc failed: %d\n", ret);
10736 }
10737
10738 /* Save the current PTP time so that we can restore the time after the
10739 * reset completes.
10740 */
10741 i40e_ptp_save_hw_time(pf);
10742}
10743
10744/**
10745 * i40e_send_version - update firmware with driver version
10746 * @pf: PF struct
10747 */
10748static void i40e_send_version(struct i40e_pf *pf)
10749{
10750 struct i40e_driver_version dv;
10751
10752 dv.major_version = 0xff;
10753 dv.minor_version = 0xff;
10754 dv.build_version = 0xff;
10755 dv.subbuild_version = 0;
10756 strscpy(p: dv.driver_string, UTS_RELEASE, size: sizeof(dv.driver_string));
10757 i40e_aq_send_driver_version(hw: &pf->hw, dv: &dv, NULL);
10758}
10759
10760/**
10761 * i40e_get_oem_version - get OEM specific version information
10762 * @hw: pointer to the hardware structure
10763 **/
10764static void i40e_get_oem_version(struct i40e_hw *hw)
10765{
10766 u16 block_offset = 0xffff;
10767 u16 block_length = 0;
10768 u16 capabilities = 0;
10769 u16 gen_snap = 0;
10770 u16 release = 0;
10771
10772#define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
10773#define I40E_NVM_OEM_LENGTH_OFFSET 0x00
10774#define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
10775#define I40E_NVM_OEM_GEN_OFFSET 0x02
10776#define I40E_NVM_OEM_RELEASE_OFFSET 0x03
10777#define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
10778#define I40E_NVM_OEM_LENGTH 3
10779
10780 /* Check if pointer to OEM version block is valid. */
10781 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, data: &block_offset);
10782 if (block_offset == 0xffff)
10783 return;
10784
10785 /* Check if OEM version block has correct length. */
10786 i40e_read_nvm_word(hw, offset: block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
10787 data: &block_length);
10788 if (block_length < I40E_NVM_OEM_LENGTH)
10789 return;
10790
10791 /* Check if OEM version format is as expected. */
10792 i40e_read_nvm_word(hw, offset: block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
10793 data: &capabilities);
10794 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
10795 return;
10796
10797 i40e_read_nvm_word(hw, offset: block_offset + I40E_NVM_OEM_GEN_OFFSET,
10798 data: &gen_snap);
10799 i40e_read_nvm_word(hw, offset: block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
10800 data: &release);
10801 hw->nvm.oem_ver =
10802 FIELD_PREP(I40E_OEM_GEN_MASK | I40E_OEM_SNAP_MASK, gen_snap) |
10803 FIELD_PREP(I40E_OEM_RELEASE_MASK, release);
10804 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
10805}
10806
10807/**
10808 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10809 * @pf: board private structure
10810 **/
10811static int i40e_reset(struct i40e_pf *pf)
10812{
10813 struct i40e_hw *hw = &pf->hw;
10814 int ret;
10815
10816 ret = i40e_pf_reset(hw);
10817 if (ret) {
10818 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
10819 set_bit(nr: __I40E_RESET_FAILED, addr: pf->state);
10820 clear_bit(nr: __I40E_RESET_RECOVERY_PENDING, addr: pf->state);
10821 } else {
10822 pf->pfr_count++;
10823 }
10824 return ret;
10825}
10826
10827/**
10828 * i40e_rebuild - rebuild using a saved config
10829 * @pf: board private structure
10830 * @reinit: if the Main VSI needs to re-initialized.
10831 * @lock_acquired: indicates whether or not the lock has been acquired
10832 * before this function was called.
10833 **/
10834static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
10835{
10836 const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
10837 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10838 struct i40e_hw *hw = &pf->hw;
10839 int ret;
10840 u32 val;
10841 int v;
10842
10843 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
10844 is_recovery_mode_reported)
10845 i40e_set_ethtool_ops(netdev: pf->vsi[pf->lan_vsi]->netdev);
10846
10847 if (test_bit(__I40E_DOWN, pf->state) &&
10848 !test_bit(__I40E_RECOVERY_MODE, pf->state))
10849 goto clear_recovery;
10850 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
10851
10852 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
10853 ret = i40e_init_adminq(hw: &pf->hw);
10854 if (ret) {
10855 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
10856 ERR_PTR(ret),
10857 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10858 goto clear_recovery;
10859 }
10860 i40e_get_oem_version(hw: &pf->hw);
10861
10862 if (test_and_clear_bit(nr: __I40E_EMP_RESET_INTR_RECEIVED, addr: pf->state)) {
10863 /* The following delay is necessary for firmware update. */
10864 mdelay(1000);
10865 }
10866
10867 /* re-verify the eeprom if we just had an EMP reset */
10868 if (test_and_clear_bit(nr: __I40E_EMP_RESET_INTR_RECEIVED, addr: pf->state))
10869 i40e_verify_eeprom(pf);
10870
10871 /* if we are going out of or into recovery mode we have to act
10872 * accordingly with regard to resources initialization
10873 * and deinitialization
10874 */
10875 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
10876 if (i40e_get_capabilities(pf,
10877 list_type: i40e_aqc_opc_list_func_capabilities))
10878 goto end_unlock;
10879
10880 if (is_recovery_mode_reported) {
10881 /* we're staying in recovery mode so we'll reinitialize
10882 * misc vector here
10883 */
10884 if (i40e_setup_misc_vector_for_recovery_mode(pf))
10885 goto end_unlock;
10886 } else {
10887 if (!lock_acquired)
10888 rtnl_lock();
10889 /* we're going out of recovery mode so we'll free
10890 * the IRQ allocated specifically for recovery mode
10891 * and restore the interrupt scheme
10892 */
10893 free_irq(pf->pdev->irq, pf);
10894 i40e_clear_interrupt_scheme(pf);
10895 if (i40e_restore_interrupt_scheme(pf))
10896 goto end_unlock;
10897 }
10898
10899 /* tell the firmware that we're starting */
10900 i40e_send_version(pf);
10901
10902 /* bail out in case recovery mode was detected, as there is
10903 * no need for further configuration.
10904 */
10905 goto end_unlock;
10906 }
10907
10908 i40e_clear_pxe_mode(hw);
10909 ret = i40e_get_capabilities(pf, list_type: i40e_aqc_opc_list_func_capabilities);
10910 if (ret)
10911 goto end_core_reset;
10912
10913 ret = i40e_init_lan_hmc(hw, txq_num: hw->func_caps.num_tx_qp,
10914 rxq_num: hw->func_caps.num_rx_qp, fcoe_cntx_num: 0, fcoe_filt_num: 0);
10915 if (ret) {
10916 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
10917 goto end_core_reset;
10918 }
10919 ret = i40e_configure_lan_hmc(hw, model: I40E_HMC_MODEL_DIRECT_ONLY);
10920 if (ret) {
10921 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
10922 goto end_core_reset;
10923 }
10924
10925#ifdef CONFIG_I40E_DCB
10926 /* Enable FW to write a default DCB config on link-up
10927 * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
10928 * is not supported with new link speed
10929 */
10930 if (i40e_is_tc_mqprio_enabled(pf)) {
10931 i40e_aq_set_dcb_parameters(hw, dcb_enable: false, NULL);
10932 } else {
10933 if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
10934 (hw->phy.link_info.link_speed &
10935 (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
10936 i40e_aq_set_dcb_parameters(hw, dcb_enable: false, NULL);
10937 dev_warn(&pf->pdev->dev,
10938 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
10939 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10940 } else {
10941 i40e_aq_set_dcb_parameters(hw, dcb_enable: true, NULL);
10942 ret = i40e_init_pf_dcb(pf);
10943 if (ret) {
10944 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
10945 ret);
10946 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10947 /* Continue without DCB enabled */
10948 }
10949 }
10950 }
10951
10952#endif /* CONFIG_I40E_DCB */
10953 if (!lock_acquired)
10954 rtnl_lock();
10955 ret = i40e_setup_pf_switch(pf, reinit, lock_acquired: true);
10956 if (ret)
10957 goto end_unlock;
10958
10959 /* The driver only wants link up/down and module qualification
10960 * reports from firmware. Note the negative logic.
10961 */
10962 ret = i40e_aq_set_phy_int_mask(hw: &pf->hw,
10963 mask: ~(I40E_AQ_EVENT_LINK_UPDOWN |
10964 I40E_AQ_EVENT_MEDIA_NA |
10965 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
10966 if (ret)
10967 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
10968 ERR_PTR(ret),
10969 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10970
10971 /* Rebuild the VSIs and VEBs that existed before reset.
10972 * They are still in our local switch element arrays, so only
10973 * need to rebuild the switch model in the HW.
10974 *
10975 * If there were VEBs but the reconstitution failed, we'll try
10976 * to recover minimal use by getting the basic PF VSI working.
10977 */
10978 if (vsi->uplink_seid != pf->mac_seid) {
10979 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
10980 /* find the one VEB connected to the MAC, and find orphans */
10981 for (v = 0; v < I40E_MAX_VEB; v++) {
10982 if (!pf->veb[v])
10983 continue;
10984
10985 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
10986 pf->veb[v]->uplink_seid == 0) {
10987 ret = i40e_reconstitute_veb(veb: pf->veb[v]);
10988
10989 if (!ret)
10990 continue;
10991
10992 /* If Main VEB failed, we're in deep doodoo,
10993 * so give up rebuilding the switch and set up
10994 * for minimal rebuild of PF VSI.
10995 * If orphan failed, we'll report the error
10996 * but try to keep going.
10997 */
10998 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
10999 dev_info(&pf->pdev->dev,
11000 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
11001 ret);
11002 vsi->uplink_seid = pf->mac_seid;
11003 break;
11004 } else if (pf->veb[v]->uplink_seid == 0) {
11005 dev_info(&pf->pdev->dev,
11006 "rebuild of orphan VEB failed: %d\n",
11007 ret);
11008 }
11009 }
11010 }
11011 }
11012
11013 if (vsi->uplink_seid == pf->mac_seid) {
11014 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
11015 /* no VEB, so rebuild only the Main VSI */
11016 ret = i40e_add_vsi(vsi);
11017 if (ret) {
11018 dev_info(&pf->pdev->dev,
11019 "rebuild of Main VSI failed: %d\n", ret);
11020 goto end_unlock;
11021 }
11022 }
11023
11024 if (vsi->mqprio_qopt.max_rate[0]) {
11025 u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
11026 max_tx_rate: vsi->mqprio_qopt.max_rate[0]);
11027 u64 credits = 0;
11028
11029 ret = i40e_set_bw_limit(vsi, seid: vsi->seid, max_tx_rate);
11030 if (ret)
11031 goto end_unlock;
11032
11033 credits = max_tx_rate;
11034 do_div(credits, I40E_BW_CREDIT_DIVISOR);
11035 dev_dbg(&vsi->back->pdev->dev,
11036 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
11037 max_tx_rate,
11038 credits,
11039 vsi->seid);
11040 }
11041
11042 ret = i40e_rebuild_cloud_filters(vsi, seid: vsi->seid);
11043 if (ret)
11044 goto end_unlock;
11045
11046 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
11047 * for this main VSI if they exist
11048 */
11049 ret = i40e_rebuild_channels(vsi);
11050 if (ret)
11051 goto end_unlock;
11052
11053 /* Reconfigure hardware for allowing smaller MSS in the case
11054 * of TSO, so that we avoid the MDD being fired and causing
11055 * a reset in the case of small MSS+TSO.
11056 */
11057#define I40E_REG_MSS 0x000E64DC
11058#define I40E_REG_MSS_MIN_MASK 0x3FF0000
11059#define I40E_64BYTE_MSS 0x400000
11060 val = rd32(hw, I40E_REG_MSS);
11061 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
11062 val &= ~I40E_REG_MSS_MIN_MASK;
11063 val |= I40E_64BYTE_MSS;
11064 wr32(hw, I40E_REG_MSS, val);
11065 }
11066
11067 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
11068 msleep(msecs: 75);
11069 ret = i40e_aq_set_link_restart_an(hw: &pf->hw, enable_link: true, NULL);
11070 if (ret)
11071 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
11072 ERR_PTR(ret),
11073 i40e_aq_str(&pf->hw,
11074 pf->hw.aq.asq_last_status));
11075 }
11076 /* reinit the misc interrupt */
11077 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
11078 ret = i40e_setup_misc_vector(pf);
11079 if (ret)
11080 goto end_unlock;
11081 }
11082
11083 /* Add a filter to drop all Flow control frames from any VSI from being
11084 * transmitted. By doing so we stop a malicious VF from sending out
11085 * PAUSE or PFC frames and potentially controlling traffic for other
11086 * PF/VF VSIs.
11087 * The FW can still send Flow control frames if enabled.
11088 */
11089 i40e_add_filter_to_drop_tx_flow_control_frames(hw: &pf->hw,
11090 vsi_seid: pf->main_vsi_seid);
11091
11092 /* restart the VSIs that were rebuilt and running before the reset */
11093 i40e_pf_unquiesce_all_vsi(pf);
11094
11095 /* Release the RTNL lock before we start resetting VFs */
11096 if (!lock_acquired)
11097 rtnl_unlock();
11098
11099 /* Restore promiscuous settings */
11100 ret = i40e_set_promiscuous(pf, promisc: pf->cur_promisc);
11101 if (ret)
11102 dev_warn(&pf->pdev->dev,
11103 "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
11104 pf->cur_promisc ? "on" : "off",
11105 ERR_PTR(ret),
11106 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
11107
11108 i40e_reset_all_vfs(pf, flr: true);
11109
11110 /* tell the firmware that we're starting */
11111 i40e_send_version(pf);
11112
11113 /* We've already released the lock, so don't do it again */
11114 goto end_core_reset;
11115
11116end_unlock:
11117 if (!lock_acquired)
11118 rtnl_unlock();
11119end_core_reset:
11120 clear_bit(nr: __I40E_RESET_FAILED, addr: pf->state);
11121clear_recovery:
11122 clear_bit(nr: __I40E_RESET_RECOVERY_PENDING, addr: pf->state);
11123 clear_bit(nr: __I40E_TIMEOUT_RECOVERY_PENDING, addr: pf->state);
11124}
11125
11126/**
11127 * i40e_reset_and_rebuild - reset and rebuild using a saved config
11128 * @pf: board private structure
11129 * @reinit: if the Main VSI needs to re-initialized.
11130 * @lock_acquired: indicates whether or not the lock has been acquired
11131 * before this function was called.
11132 **/
11133static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
11134 bool lock_acquired)
11135{
11136 int ret;
11137
11138 if (test_bit(__I40E_IN_REMOVE, pf->state))
11139 return;
11140 /* Now we wait for GRST to settle out.
11141 * We don't have to delete the VEBs or VSIs from the hw switch
11142 * because the reset will make them disappear.
11143 */
11144 ret = i40e_reset(pf);
11145 if (!ret)
11146 i40e_rebuild(pf, reinit, lock_acquired);
11147}
11148
11149/**
11150 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
11151 * @pf: board private structure
11152 *
11153 * Close up the VFs and other things in prep for a Core Reset,
11154 * then get ready to rebuild the world.
11155 * @lock_acquired: indicates whether or not the lock has been acquired
11156 * before this function was called.
11157 **/
11158static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
11159{
11160 i40e_prep_for_reset(pf);
11161 i40e_reset_and_rebuild(pf, reinit: false, lock_acquired);
11162}
11163
11164/**
11165 * i40e_handle_mdd_event
11166 * @pf: pointer to the PF structure
11167 *
11168 * Called from the MDD irq handler to identify possibly malicious vfs
11169 **/
11170static void i40e_handle_mdd_event(struct i40e_pf *pf)
11171{
11172 struct i40e_hw *hw = &pf->hw;
11173 bool mdd_detected = false;
11174 struct i40e_vf *vf;
11175 u32 reg;
11176 int i;
11177
11178 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
11179 return;
11180
11181 /* find what triggered the MDD event */
11182 reg = rd32(hw, I40E_GL_MDET_TX);
11183 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
11184 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
11185 I40E_GL_MDET_TX_PF_NUM_SHIFT;
11186 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
11187 I40E_GL_MDET_TX_VF_NUM_SHIFT;
11188 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
11189 I40E_GL_MDET_TX_EVENT_SHIFT;
11190 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
11191 I40E_GL_MDET_TX_QUEUE_SHIFT) -
11192 pf->hw.func_caps.base_queue;
11193 if (netif_msg_tx_err(pf))
11194 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
11195 event, queue, pf_num, vf_num);
11196 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
11197 mdd_detected = true;
11198 }
11199 reg = rd32(hw, I40E_GL_MDET_RX);
11200 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
11201 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
11202 I40E_GL_MDET_RX_FUNCTION_SHIFT;
11203 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
11204 I40E_GL_MDET_RX_EVENT_SHIFT;
11205 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
11206 I40E_GL_MDET_RX_QUEUE_SHIFT) -
11207 pf->hw.func_caps.base_queue;
11208 if (netif_msg_rx_err(pf))
11209 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
11210 event, queue, func);
11211 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
11212 mdd_detected = true;
11213 }
11214
11215 if (mdd_detected) {
11216 reg = rd32(hw, I40E_PF_MDET_TX);
11217 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
11218 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
11219 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
11220 }
11221 reg = rd32(hw, I40E_PF_MDET_RX);
11222 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
11223 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
11224 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
11225 }
11226 }
11227
11228 /* see if one of the VFs needs its hand slapped */
11229 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
11230 vf = &(pf->vf[i]);
11231 reg = rd32(hw, I40E_VP_MDET_TX(i));
11232 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
11233 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
11234 vf->num_mdd_events++;
11235 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
11236 i);
11237 dev_info(&pf->pdev->dev,
11238 "Use PF Control I/F to re-enable the VF\n");
11239 set_bit(nr: I40E_VF_STATE_DISABLED, addr: &vf->vf_states);
11240 }
11241
11242 reg = rd32(hw, I40E_VP_MDET_RX(i));
11243 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
11244 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
11245 vf->num_mdd_events++;
11246 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
11247 i);
11248 dev_info(&pf->pdev->dev,
11249 "Use PF Control I/F to re-enable the VF\n");
11250 set_bit(nr: I40E_VF_STATE_DISABLED, addr: &vf->vf_states);
11251 }
11252 }
11253
11254 /* re-enable mdd interrupt cause */
11255 clear_bit(nr: __I40E_MDD_EVENT_PENDING, addr: pf->state);
11256 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
11257 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
11258 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
11259 i40e_flush(hw);
11260}
11261
11262/**
11263 * i40e_service_task - Run the driver's async subtasks
11264 * @work: pointer to work_struct containing our data
11265 **/
11266static void i40e_service_task(struct work_struct *work)
11267{
11268 struct i40e_pf *pf = container_of(work,
11269 struct i40e_pf,
11270 service_task);
11271 unsigned long start_time = jiffies;
11272
11273 /* don't bother with service tasks if a reset is in progress */
11274 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
11275 test_bit(__I40E_SUSPENDED, pf->state))
11276 return;
11277
11278 if (test_and_set_bit(nr: __I40E_SERVICE_SCHED, addr: pf->state))
11279 return;
11280
11281 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
11282 i40e_detect_recover_hung(vsi: pf->vsi[pf->lan_vsi]);
11283 i40e_sync_filters_subtask(pf);
11284 i40e_reset_subtask(pf);
11285 i40e_handle_mdd_event(pf);
11286 i40e_vc_process_vflr_event(pf);
11287 i40e_watchdog_subtask(pf);
11288 i40e_fdir_reinit_subtask(pf);
11289 if (test_and_clear_bit(nr: __I40E_CLIENT_RESET, addr: pf->state)) {
11290 /* Client subtask will reopen next time through. */
11291 i40e_notify_client_of_netdev_close(vsi: pf->vsi[pf->lan_vsi],
11292 reset: true);
11293 } else {
11294 i40e_client_subtask(pf);
11295 if (test_and_clear_bit(nr: __I40E_CLIENT_L2_CHANGE,
11296 addr: pf->state))
11297 i40e_notify_client_of_l2_param_changes(
11298 vsi: pf->vsi[pf->lan_vsi]);
11299 }
11300 i40e_sync_filters_subtask(pf);
11301 } else {
11302 i40e_reset_subtask(pf);
11303 }
11304
11305 i40e_clean_adminq_subtask(pf);
11306
11307 /* flush memory to make sure state is correct before next watchdog */
11308 smp_mb__before_atomic();
11309 clear_bit(nr: __I40E_SERVICE_SCHED, addr: pf->state);
11310
11311 /* If the tasks have taken longer than one timer cycle or there
11312 * is more work to be done, reschedule the service task now
11313 * rather than wait for the timer to tick again.
11314 */
11315 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
11316 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
11317 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
11318 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
11319 i40e_service_event_schedule(pf);
11320}
11321
11322/**
11323 * i40e_service_timer - timer callback
11324 * @t: timer list pointer
11325 **/
11326static void i40e_service_timer(struct timer_list *t)
11327{
11328 struct i40e_pf *pf = from_timer(pf, t, service_timer);
11329
11330 mod_timer(timer: &pf->service_timer,
11331 expires: round_jiffies(j: jiffies + pf->service_timer_period));
11332 i40e_service_event_schedule(pf);
11333}
11334
11335/**
11336 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11337 * @vsi: the VSI being configured
11338 **/
11339static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
11340{
11341 struct i40e_pf *pf = vsi->back;
11342
11343 switch (vsi->type) {
11344 case I40E_VSI_MAIN:
11345 vsi->alloc_queue_pairs = pf->num_lan_qps;
11346 if (!vsi->num_tx_desc)
11347 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11348 I40E_REQ_DESCRIPTOR_MULTIPLE);
11349 if (!vsi->num_rx_desc)
11350 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11351 I40E_REQ_DESCRIPTOR_MULTIPLE);
11352 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
11353 vsi->num_q_vectors = pf->num_lan_msix;
11354 else
11355 vsi->num_q_vectors = 1;
11356
11357 break;
11358
11359 case I40E_VSI_FDIR:
11360 vsi->alloc_queue_pairs = 1;
11361 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11362 I40E_REQ_DESCRIPTOR_MULTIPLE);
11363 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT,
11364 I40E_REQ_DESCRIPTOR_MULTIPLE);
11365 vsi->num_q_vectors = pf->num_fdsb_msix;
11366 break;
11367
11368 case I40E_VSI_VMDQ2:
11369 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
11370 if (!vsi->num_tx_desc)
11371 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11372 I40E_REQ_DESCRIPTOR_MULTIPLE);
11373 if (!vsi->num_rx_desc)
11374 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11375 I40E_REQ_DESCRIPTOR_MULTIPLE);
11376 vsi->num_q_vectors = pf->num_vmdq_msix;
11377 break;
11378
11379 case I40E_VSI_SRIOV:
11380 vsi->alloc_queue_pairs = pf->num_vf_qps;
11381 if (!vsi->num_tx_desc)
11382 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11383 I40E_REQ_DESCRIPTOR_MULTIPLE);
11384 if (!vsi->num_rx_desc)
11385 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
11386 I40E_REQ_DESCRIPTOR_MULTIPLE);
11387 break;
11388
11389 default:
11390 WARN_ON(1);
11391 return -ENODATA;
11392 }
11393
11394 if (is_kdump_kernel()) {
11395 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS;
11396 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS;
11397 }
11398
11399 return 0;
11400}
11401
11402/**
11403 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11404 * @vsi: VSI pointer
11405 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
11406 *
11407 * On error: returns error code (negative)
11408 * On success: returns 0
11409 **/
11410static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
11411{
11412 struct i40e_ring **next_rings;
11413 int size;
11414 int ret = 0;
11415
11416 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
11417 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
11418 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
11419 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
11420 if (!vsi->tx_rings)
11421 return -ENOMEM;
11422 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
11423 if (i40e_enabled_xdp_vsi(vsi)) {
11424 vsi->xdp_rings = next_rings;
11425 next_rings += vsi->alloc_queue_pairs;
11426 }
11427 vsi->rx_rings = next_rings;
11428
11429 if (alloc_qvectors) {
11430 /* allocate memory for q_vector pointers */
11431 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
11432 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
11433 if (!vsi->q_vectors) {
11434 ret = -ENOMEM;
11435 goto err_vectors;
11436 }
11437 }
11438 return ret;
11439
11440err_vectors:
11441 kfree(objp: vsi->tx_rings);
11442 return ret;
11443}
11444
11445/**
11446 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11447 * @pf: board private structure
11448 * @type: type of VSI
11449 *
11450 * On error: returns error code (negative)
11451 * On success: returns vsi index in PF (positive)
11452 **/
11453static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
11454{
11455 int ret = -ENODEV;
11456 struct i40e_vsi *vsi;
11457 int vsi_idx;
11458 int i;
11459
11460 /* Need to protect the allocation of the VSIs at the PF level */
11461 mutex_lock(&pf->switch_mutex);
11462
11463 /* VSI list may be fragmented if VSI creation/destruction has
11464 * been happening. We can afford to do a quick scan to look
11465 * for any free VSIs in the list.
11466 *
11467 * find next empty vsi slot, looping back around if necessary
11468 */
11469 i = pf->next_vsi;
11470 while (i < pf->num_alloc_vsi && pf->vsi[i])
11471 i++;
11472 if (i >= pf->num_alloc_vsi) {
11473 i = 0;
11474 while (i < pf->next_vsi && pf->vsi[i])
11475 i++;
11476 }
11477
11478 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
11479 vsi_idx = i; /* Found one! */
11480 } else {
11481 ret = -ENODEV;
11482 goto unlock_pf; /* out of VSI slots! */
11483 }
11484 pf->next_vsi = ++i;
11485
11486 vsi = kzalloc(size: sizeof(*vsi), GFP_KERNEL);
11487 if (!vsi) {
11488 ret = -ENOMEM;
11489 goto unlock_pf;
11490 }
11491 vsi->type = type;
11492 vsi->back = pf;
11493 set_bit(nr: __I40E_VSI_DOWN, addr: vsi->state);
11494 vsi->flags = 0;
11495 vsi->idx = vsi_idx;
11496 vsi->int_rate_limit = 0;
11497 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
11498 pf->rss_table_size : 64;
11499 vsi->netdev_registered = false;
11500 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
11501 hash_init(vsi->mac_filter_hash);
11502 vsi->irqs_ready = false;
11503
11504 if (type == I40E_VSI_MAIN) {
11505 vsi->af_xdp_zc_qps = bitmap_zalloc(nbits: pf->num_lan_qps, GFP_KERNEL);
11506 if (!vsi->af_xdp_zc_qps)
11507 goto err_rings;
11508 }
11509
11510 ret = i40e_set_num_rings_in_vsi(vsi);
11511 if (ret)
11512 goto err_rings;
11513
11514 ret = i40e_vsi_alloc_arrays(vsi, alloc_qvectors: true);
11515 if (ret)
11516 goto err_rings;
11517
11518 /* Setup default MSIX irq handler for VSI */
11519 i40e_vsi_setup_irqhandler(vsi, irq_handler: i40e_msix_clean_rings);
11520
11521 /* Initialize VSI lock */
11522 spin_lock_init(&vsi->mac_filter_hash_lock);
11523 pf->vsi[vsi_idx] = vsi;
11524 ret = vsi_idx;
11525 goto unlock_pf;
11526
11527err_rings:
11528 bitmap_free(bitmap: vsi->af_xdp_zc_qps);
11529 pf->next_vsi = i - 1;
11530 kfree(objp: vsi);
11531unlock_pf:
11532 mutex_unlock(lock: &pf->switch_mutex);
11533 return ret;
11534}
11535
11536/**
11537 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11538 * @vsi: VSI pointer
11539 * @free_qvectors: a bool to specify if q_vectors need to be freed.
11540 *
11541 * On error: returns error code (negative)
11542 * On success: returns 0
11543 **/
11544static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
11545{
11546 /* free the ring and vector containers */
11547 if (free_qvectors) {
11548 kfree(objp: vsi->q_vectors);
11549 vsi->q_vectors = NULL;
11550 }
11551 kfree(objp: vsi->tx_rings);
11552 vsi->tx_rings = NULL;
11553 vsi->rx_rings = NULL;
11554 vsi->xdp_rings = NULL;
11555}
11556
11557/**
11558 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11559 * and lookup table
11560 * @vsi: Pointer to VSI structure
11561 */
11562static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
11563{
11564 if (!vsi)
11565 return;
11566
11567 kfree(objp: vsi->rss_hkey_user);
11568 vsi->rss_hkey_user = NULL;
11569
11570 kfree(objp: vsi->rss_lut_user);
11571 vsi->rss_lut_user = NULL;
11572}
11573
11574/**
11575 * i40e_vsi_clear - Deallocate the VSI provided
11576 * @vsi: the VSI being un-configured
11577 **/
11578static int i40e_vsi_clear(struct i40e_vsi *vsi)
11579{
11580 struct i40e_pf *pf;
11581
11582 if (!vsi)
11583 return 0;
11584
11585 if (!vsi->back)
11586 goto free_vsi;
11587 pf = vsi->back;
11588
11589 mutex_lock(&pf->switch_mutex);
11590 if (!pf->vsi[vsi->idx]) {
11591 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
11592 vsi->idx, vsi->idx, vsi->type);
11593 goto unlock_vsi;
11594 }
11595
11596 if (pf->vsi[vsi->idx] != vsi) {
11597 dev_err(&pf->pdev->dev,
11598 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
11599 pf->vsi[vsi->idx]->idx,
11600 pf->vsi[vsi->idx]->type,
11601 vsi->idx, vsi->type);
11602 goto unlock_vsi;
11603 }
11604
11605 /* updates the PF for this cleared vsi */
11606 i40e_put_lump(pile: pf->qp_pile, index: vsi->base_queue, id: vsi->idx);
11607 i40e_put_lump(pile: pf->irq_pile, index: vsi->base_vector, id: vsi->idx);
11608
11609 bitmap_free(bitmap: vsi->af_xdp_zc_qps);
11610 i40e_vsi_free_arrays(vsi, free_qvectors: true);
11611 i40e_clear_rss_config_user(vsi);
11612
11613 pf->vsi[vsi->idx] = NULL;
11614 if (vsi->idx < pf->next_vsi)
11615 pf->next_vsi = vsi->idx;
11616
11617unlock_vsi:
11618 mutex_unlock(lock: &pf->switch_mutex);
11619free_vsi:
11620 kfree(objp: vsi);
11621
11622 return 0;
11623}
11624
11625/**
11626 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11627 * @vsi: the VSI being cleaned
11628 **/
11629static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
11630{
11631 int i;
11632
11633 if (vsi->tx_rings && vsi->tx_rings[0]) {
11634 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11635 kfree_rcu(vsi->tx_rings[i], rcu);
11636 WRITE_ONCE(vsi->tx_rings[i], NULL);
11637 WRITE_ONCE(vsi->rx_rings[i], NULL);
11638 if (vsi->xdp_rings)
11639 WRITE_ONCE(vsi->xdp_rings[i], NULL);
11640 }
11641 }
11642}
11643
11644/**
11645 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11646 * @vsi: the VSI being configured
11647 **/
11648static int i40e_alloc_rings(struct i40e_vsi *vsi)
11649{
11650 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
11651 struct i40e_pf *pf = vsi->back;
11652 struct i40e_ring *ring;
11653
11654 /* Set basic values in the rings to be used later during open() */
11655 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
11656 /* allocate space for both Tx and Rx in one shot */
11657 ring = kcalloc(n: qpv, size: sizeof(struct i40e_ring), GFP_KERNEL);
11658 if (!ring)
11659 goto err_out;
11660
11661 ring->queue_index = i;
11662 ring->reg_idx = vsi->base_queue + i;
11663 ring->ring_active = false;
11664 ring->vsi = vsi;
11665 ring->netdev = vsi->netdev;
11666 ring->dev = &pf->pdev->dev;
11667 ring->count = vsi->num_tx_desc;
11668 ring->size = 0;
11669 ring->dcb_tc = 0;
11670 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11671 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11672 ring->itr_setting = pf->tx_itr_default;
11673 WRITE_ONCE(vsi->tx_rings[i], ring++);
11674
11675 if (!i40e_enabled_xdp_vsi(vsi))
11676 goto setup_rx;
11677
11678 ring->queue_index = vsi->alloc_queue_pairs + i;
11679 ring->reg_idx = vsi->base_queue + ring->queue_index;
11680 ring->ring_active = false;
11681 ring->vsi = vsi;
11682 ring->netdev = NULL;
11683 ring->dev = &pf->pdev->dev;
11684 ring->count = vsi->num_tx_desc;
11685 ring->size = 0;
11686 ring->dcb_tc = 0;
11687 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
11688 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
11689 set_ring_xdp(ring);
11690 ring->itr_setting = pf->tx_itr_default;
11691 WRITE_ONCE(vsi->xdp_rings[i], ring++);
11692
11693setup_rx:
11694 ring->queue_index = i;
11695 ring->reg_idx = vsi->base_queue + i;
11696 ring->ring_active = false;
11697 ring->vsi = vsi;
11698 ring->netdev = vsi->netdev;
11699 ring->dev = &pf->pdev->dev;
11700 ring->count = vsi->num_rx_desc;
11701 ring->size = 0;
11702 ring->dcb_tc = 0;
11703 ring->itr_setting = pf->rx_itr_default;
11704 WRITE_ONCE(vsi->rx_rings[i], ring);
11705 }
11706
11707 return 0;
11708
11709err_out:
11710 i40e_vsi_clear_rings(vsi);
11711 return -ENOMEM;
11712}
11713
11714/**
11715 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11716 * @pf: board private structure
11717 * @vectors: the number of MSI-X vectors to request
11718 *
11719 * Returns the number of vectors reserved, or error
11720 **/
11721static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
11722{
11723 vectors = pci_enable_msix_range(dev: pf->pdev, entries: pf->msix_entries,
11724 I40E_MIN_MSIX, maxvec: vectors);
11725 if (vectors < 0) {
11726 dev_info(&pf->pdev->dev,
11727 "MSI-X vector reservation failed: %d\n", vectors);
11728 vectors = 0;
11729 }
11730
11731 return vectors;
11732}
11733
11734/**
11735 * i40e_init_msix - Setup the MSIX capability
11736 * @pf: board private structure
11737 *
11738 * Work with the OS to set up the MSIX vectors needed.
11739 *
11740 * Returns the number of vectors reserved or negative on failure
11741 **/
11742static int i40e_init_msix(struct i40e_pf *pf)
11743{
11744 struct i40e_hw *hw = &pf->hw;
11745 int cpus, extra_vectors;
11746 int vectors_left;
11747 int v_budget, i;
11748 int v_actual;
11749 int iwarp_requested = 0;
11750
11751 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
11752 return -ENODEV;
11753
11754 /* The number of vectors we'll request will be comprised of:
11755 * - Add 1 for "other" cause for Admin Queue events, etc.
11756 * - The number of LAN queue pairs
11757 * - Queues being used for RSS.
11758 * We don't need as many as max_rss_size vectors.
11759 * use rss_size instead in the calculation since that
11760 * is governed by number of cpus in the system.
11761 * - assumes symmetric Tx/Rx pairing
11762 * - The number of VMDq pairs
11763 * - The CPU count within the NUMA node if iWARP is enabled
11764 * Once we count this up, try the request.
11765 *
11766 * If we can't get what we want, we'll simplify to nearly nothing
11767 * and try again. If that still fails, we punt.
11768 */
11769 vectors_left = hw->func_caps.num_msix_vectors;
11770 v_budget = 0;
11771
11772 /* reserve one vector for miscellaneous handler */
11773 if (vectors_left) {
11774 v_budget++;
11775 vectors_left--;
11776 }
11777
11778 /* reserve some vectors for the main PF traffic queues. Initially we
11779 * only reserve at most 50% of the available vectors, in the case that
11780 * the number of online CPUs is large. This ensures that we can enable
11781 * extra features as well. Once we've enabled the other features, we
11782 * will use any remaining vectors to reach as close as we can to the
11783 * number of online CPUs.
11784 */
11785 cpus = num_online_cpus();
11786 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
11787 vectors_left -= pf->num_lan_msix;
11788
11789 /* reserve one vector for sideband flow director */
11790 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11791 if (vectors_left) {
11792 pf->num_fdsb_msix = 1;
11793 v_budget++;
11794 vectors_left--;
11795 } else {
11796 pf->num_fdsb_msix = 0;
11797 }
11798 }
11799
11800 /* can we reserve enough for iWARP? */
11801 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11802 iwarp_requested = pf->num_iwarp_msix;
11803
11804 if (!vectors_left)
11805 pf->num_iwarp_msix = 0;
11806 else if (vectors_left < pf->num_iwarp_msix)
11807 pf->num_iwarp_msix = 1;
11808 v_budget += pf->num_iwarp_msix;
11809 vectors_left -= pf->num_iwarp_msix;
11810 }
11811
11812 /* any vectors left over go for VMDq support */
11813 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
11814 if (!vectors_left) {
11815 pf->num_vmdq_msix = 0;
11816 pf->num_vmdq_qps = 0;
11817 } else {
11818 int vmdq_vecs_wanted =
11819 pf->num_vmdq_vsis * pf->num_vmdq_qps;
11820 int vmdq_vecs =
11821 min_t(int, vectors_left, vmdq_vecs_wanted);
11822
11823 /* if we're short on vectors for what's desired, we limit
11824 * the queues per vmdq. If this is still more than are
11825 * available, the user will need to change the number of
11826 * queues/vectors used by the PF later with the ethtool
11827 * channels command
11828 */
11829 if (vectors_left < vmdq_vecs_wanted) {
11830 pf->num_vmdq_qps = 1;
11831 vmdq_vecs_wanted = pf->num_vmdq_vsis;
11832 vmdq_vecs = min_t(int,
11833 vectors_left,
11834 vmdq_vecs_wanted);
11835 }
11836 pf->num_vmdq_msix = pf->num_vmdq_qps;
11837
11838 v_budget += vmdq_vecs;
11839 vectors_left -= vmdq_vecs;
11840 }
11841 }
11842
11843 /* On systems with a large number of SMP cores, we previously limited
11844 * the number of vectors for num_lan_msix to be at most 50% of the
11845 * available vectors, to allow for other features. Now, we add back
11846 * the remaining vectors. However, we ensure that the total
11847 * num_lan_msix will not exceed num_online_cpus(). To do this, we
11848 * calculate the number of vectors we can add without going over the
11849 * cap of CPUs. For systems with a small number of CPUs this will be
11850 * zero.
11851 */
11852 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
11853 pf->num_lan_msix += extra_vectors;
11854 vectors_left -= extra_vectors;
11855
11856 WARN(vectors_left < 0,
11857 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
11858
11859 v_budget += pf->num_lan_msix;
11860 pf->msix_entries = kcalloc(n: v_budget, size: sizeof(struct msix_entry),
11861 GFP_KERNEL);
11862 if (!pf->msix_entries)
11863 return -ENOMEM;
11864
11865 for (i = 0; i < v_budget; i++)
11866 pf->msix_entries[i].entry = i;
11867 v_actual = i40e_reserve_msix_vectors(pf, vectors: v_budget);
11868
11869 if (v_actual < I40E_MIN_MSIX) {
11870 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
11871 kfree(objp: pf->msix_entries);
11872 pf->msix_entries = NULL;
11873 pci_disable_msix(dev: pf->pdev);
11874 return -ENODEV;
11875
11876 } else if (v_actual == I40E_MIN_MSIX) {
11877 /* Adjust for minimal MSIX use */
11878 pf->num_vmdq_vsis = 0;
11879 pf->num_vmdq_qps = 0;
11880 pf->num_lan_qps = 1;
11881 pf->num_lan_msix = 1;
11882
11883 } else if (v_actual != v_budget) {
11884 /* If we have limited resources, we will start with no vectors
11885 * for the special features and then allocate vectors to some
11886 * of these features based on the policy and at the end disable
11887 * the features that did not get any vectors.
11888 */
11889 int vec;
11890
11891 dev_info(&pf->pdev->dev,
11892 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
11893 v_actual, v_budget);
11894 /* reserve the misc vector */
11895 vec = v_actual - 1;
11896
11897 /* Scale vector usage down */
11898 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
11899 pf->num_vmdq_vsis = 1;
11900 pf->num_vmdq_qps = 1;
11901
11902 /* partition out the remaining vectors */
11903 switch (vec) {
11904 case 2:
11905 pf->num_lan_msix = 1;
11906 break;
11907 case 3:
11908 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11909 pf->num_lan_msix = 1;
11910 pf->num_iwarp_msix = 1;
11911 } else {
11912 pf->num_lan_msix = 2;
11913 }
11914 break;
11915 default:
11916 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
11917 pf->num_iwarp_msix = min_t(int, (vec / 3),
11918 iwarp_requested);
11919 pf->num_vmdq_vsis = min_t(int, (vec / 3),
11920 I40E_DEFAULT_NUM_VMDQ_VSI);
11921 } else {
11922 pf->num_vmdq_vsis = min_t(int, (vec / 2),
11923 I40E_DEFAULT_NUM_VMDQ_VSI);
11924 }
11925 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11926 pf->num_fdsb_msix = 1;
11927 vec--;
11928 }
11929 pf->num_lan_msix = min_t(int,
11930 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
11931 pf->num_lan_msix);
11932 pf->num_lan_qps = pf->num_lan_msix;
11933 break;
11934 }
11935 }
11936
11937 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
11938 (pf->num_fdsb_msix == 0)) {
11939 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
11940 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11941 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11942 }
11943 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
11944 (pf->num_vmdq_msix == 0)) {
11945 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
11946 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
11947 }
11948
11949 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
11950 (pf->num_iwarp_msix == 0)) {
11951 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
11952 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
11953 }
11954 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
11955 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
11956 pf->num_lan_msix,
11957 pf->num_vmdq_msix * pf->num_vmdq_vsis,
11958 pf->num_fdsb_msix,
11959 pf->num_iwarp_msix);
11960
11961 return v_actual;
11962}
11963
11964/**
11965 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11966 * @vsi: the VSI being configured
11967 * @v_idx: index of the vector in the vsi struct
11968 *
11969 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11970 **/
11971static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
11972{
11973 struct i40e_q_vector *q_vector;
11974
11975 /* allocate q_vector */
11976 q_vector = kzalloc(size: sizeof(struct i40e_q_vector), GFP_KERNEL);
11977 if (!q_vector)
11978 return -ENOMEM;
11979
11980 q_vector->vsi = vsi;
11981 q_vector->v_idx = v_idx;
11982 cpumask_copy(dstp: &q_vector->affinity_mask, cpu_possible_mask);
11983
11984 if (vsi->netdev)
11985 netif_napi_add(dev: vsi->netdev, napi: &q_vector->napi, poll: i40e_napi_poll);
11986
11987 /* tie q_vector and vsi together */
11988 vsi->q_vectors[v_idx] = q_vector;
11989
11990 return 0;
11991}
11992
11993/**
11994 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
11995 * @vsi: the VSI being configured
11996 *
11997 * We allocate one q_vector per queue interrupt. If allocation fails we
11998 * return -ENOMEM.
11999 **/
12000static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
12001{
12002 struct i40e_pf *pf = vsi->back;
12003 int err, v_idx, num_q_vectors;
12004
12005 /* if not MSIX, give the one vector only to the LAN VSI */
12006 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
12007 num_q_vectors = vsi->num_q_vectors;
12008 else if (vsi == pf->vsi[pf->lan_vsi])
12009 num_q_vectors = 1;
12010 else
12011 return -EINVAL;
12012
12013 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
12014 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
12015 if (err)
12016 goto err_out;
12017 }
12018
12019 return 0;
12020
12021err_out:
12022 while (v_idx--)
12023 i40e_free_q_vector(vsi, v_idx);
12024
12025 return err;
12026}
12027
12028/**
12029 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
12030 * @pf: board private structure to initialize
12031 **/
12032static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
12033{
12034 int vectors = 0;
12035 ssize_t size;
12036
12037 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12038 vectors = i40e_init_msix(pf);
12039 if (vectors < 0) {
12040 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
12041 I40E_FLAG_IWARP_ENABLED |
12042 I40E_FLAG_RSS_ENABLED |
12043 I40E_FLAG_DCB_CAPABLE |
12044 I40E_FLAG_DCB_ENABLED |
12045 I40E_FLAG_SRIOV_ENABLED |
12046 I40E_FLAG_FD_SB_ENABLED |
12047 I40E_FLAG_FD_ATR_ENABLED |
12048 I40E_FLAG_VMDQ_ENABLED);
12049 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12050
12051 /* rework the queue expectations without MSIX */
12052 i40e_determine_queue_usage(pf);
12053 }
12054 }
12055
12056 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
12057 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
12058 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
12059 vectors = pci_enable_msi(dev: pf->pdev);
12060 if (vectors < 0) {
12061 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
12062 vectors);
12063 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
12064 }
12065 vectors = 1; /* one MSI or Legacy vector */
12066 }
12067
12068 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
12069 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
12070
12071 /* set up vector assignment tracking */
12072 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
12073 pf->irq_pile = kzalloc(size, GFP_KERNEL);
12074 if (!pf->irq_pile)
12075 return -ENOMEM;
12076
12077 pf->irq_pile->num_entries = vectors;
12078
12079 /* track first vector for misc interrupts, ignore return */
12080 (void)i40e_get_lump(pf, pile: pf->irq_pile, needed: 1, I40E_PILE_VALID_BIT - 1);
12081
12082 return 0;
12083}
12084
12085/**
12086 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
12087 * @pf: private board data structure
12088 *
12089 * Restore the interrupt scheme that was cleared when we suspended the
12090 * device. This should be called during resume to re-allocate the q_vectors
12091 * and reacquire IRQs.
12092 */
12093static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
12094{
12095 int err, i;
12096
12097 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
12098 * scheme. We need to re-enabled them here in order to attempt to
12099 * re-acquire the MSI or MSI-X vectors
12100 */
12101 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
12102
12103 err = i40e_init_interrupt_scheme(pf);
12104 if (err)
12105 return err;
12106
12107 /* Now that we've re-acquired IRQs, we need to remap the vectors and
12108 * rings together again.
12109 */
12110 for (i = 0; i < pf->num_alloc_vsi; i++) {
12111 if (pf->vsi[i]) {
12112 err = i40e_vsi_alloc_q_vectors(vsi: pf->vsi[i]);
12113 if (err)
12114 goto err_unwind;
12115 i40e_vsi_map_rings_to_vectors(vsi: pf->vsi[i]);
12116 }
12117 }
12118
12119 err = i40e_setup_misc_vector(pf);
12120 if (err)
12121 goto err_unwind;
12122
12123 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
12124 i40e_client_update_msix_info(pf);
12125
12126 return 0;
12127
12128err_unwind:
12129 while (i--) {
12130 if (pf->vsi[i])
12131 i40e_vsi_free_q_vectors(vsi: pf->vsi[i]);
12132 }
12133
12134 return err;
12135}
12136
12137/**
12138 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
12139 * non queue events in recovery mode
12140 * @pf: board private structure
12141 *
12142 * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
12143 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
12144 * This is handled differently than in recovery mode since no Tx/Rx resources
12145 * are being allocated.
12146 **/
12147static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
12148{
12149 int err;
12150
12151 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
12152 err = i40e_setup_misc_vector(pf);
12153
12154 if (err) {
12155 dev_info(&pf->pdev->dev,
12156 "MSI-X misc vector request failed, error %d\n",
12157 err);
12158 return err;
12159 }
12160 } else {
12161 u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
12162
12163 err = request_irq(irq: pf->pdev->irq, handler: i40e_intr, flags,
12164 name: pf->int_name, dev: pf);
12165
12166 if (err) {
12167 dev_info(&pf->pdev->dev,
12168 "MSI/legacy misc vector request failed, error %d\n",
12169 err);
12170 return err;
12171 }
12172 i40e_enable_misc_int_causes(pf);
12173 i40e_irq_dynamic_enable_icr0(pf);
12174 }
12175
12176 return 0;
12177}
12178
12179/**
12180 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
12181 * @pf: board private structure
12182 *
12183 * This sets up the handler for MSIX 0, which is used to manage the
12184 * non-queue interrupts, e.g. AdminQ and errors. This is not used
12185 * when in MSI or Legacy interrupt mode.
12186 **/
12187static int i40e_setup_misc_vector(struct i40e_pf *pf)
12188{
12189 struct i40e_hw *hw = &pf->hw;
12190 int err = 0;
12191
12192 /* Only request the IRQ once, the first time through. */
12193 if (!test_and_set_bit(nr: __I40E_MISC_IRQ_REQUESTED, addr: pf->state)) {
12194 err = request_irq(irq: pf->msix_entries[0].vector,
12195 handler: i40e_intr, flags: 0, name: pf->int_name, dev: pf);
12196 if (err) {
12197 clear_bit(nr: __I40E_MISC_IRQ_REQUESTED, addr: pf->state);
12198 dev_info(&pf->pdev->dev,
12199 "request_irq for %s failed: %d\n",
12200 pf->int_name, err);
12201 return -EFAULT;
12202 }
12203 }
12204
12205 i40e_enable_misc_int_causes(pf);
12206
12207 /* associate no queues to the misc vector */
12208 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
12209 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
12210
12211 i40e_flush(hw);
12212
12213 i40e_irq_dynamic_enable_icr0(pf);
12214
12215 return err;
12216}
12217
12218/**
12219 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
12220 * @vsi: Pointer to vsi structure
12221 * @seed: Buffter to store the hash keys
12222 * @lut: Buffer to store the lookup table entries
12223 * @lut_size: Size of buffer to store the lookup table entries
12224 *
12225 * Return 0 on success, negative on failure
12226 */
12227static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
12228 u8 *lut, u16 lut_size)
12229{
12230 struct i40e_pf *pf = vsi->back;
12231 struct i40e_hw *hw = &pf->hw;
12232 int ret = 0;
12233
12234 if (seed) {
12235 ret = i40e_aq_get_rss_key(hw, seid: vsi->id,
12236 key: (struct i40e_aqc_get_set_rss_key_data *)seed);
12237 if (ret) {
12238 dev_info(&pf->pdev->dev,
12239 "Cannot get RSS key, err %pe aq_err %s\n",
12240 ERR_PTR(ret),
12241 i40e_aq_str(&pf->hw,
12242 pf->hw.aq.asq_last_status));
12243 return ret;
12244 }
12245 }
12246
12247 if (lut) {
12248 bool pf_lut = vsi->type == I40E_VSI_MAIN;
12249
12250 ret = i40e_aq_get_rss_lut(hw, seid: vsi->id, pf_lut, lut, lut_size);
12251 if (ret) {
12252 dev_info(&pf->pdev->dev,
12253 "Cannot get RSS lut, err %pe aq_err %s\n",
12254 ERR_PTR(ret),
12255 i40e_aq_str(&pf->hw,
12256 pf->hw.aq.asq_last_status));
12257 return ret;
12258 }
12259 }
12260
12261 return ret;
12262}
12263
12264/**
12265 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12266 * @vsi: Pointer to vsi structure
12267 * @seed: RSS hash seed
12268 * @lut: Lookup table
12269 * @lut_size: Lookup table size
12270 *
12271 * Returns 0 on success, negative on failure
12272 **/
12273static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
12274 const u8 *lut, u16 lut_size)
12275{
12276 struct i40e_pf *pf = vsi->back;
12277 struct i40e_hw *hw = &pf->hw;
12278 u16 vf_id = vsi->vf_id;
12279 u8 i;
12280
12281 /* Fill out hash function seed */
12282 if (seed) {
12283 u32 *seed_dw = (u32 *)seed;
12284
12285 if (vsi->type == I40E_VSI_MAIN) {
12286 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12287 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
12288 } else if (vsi->type == I40E_VSI_SRIOV) {
12289 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
12290 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
12291 } else {
12292 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
12293 }
12294 }
12295
12296 if (lut) {
12297 u32 *lut_dw = (u32 *)lut;
12298
12299 if (vsi->type == I40E_VSI_MAIN) {
12300 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12301 return -EINVAL;
12302 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12303 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
12304 } else if (vsi->type == I40E_VSI_SRIOV) {
12305 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
12306 return -EINVAL;
12307 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12308 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
12309 } else {
12310 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12311 }
12312 }
12313 i40e_flush(hw);
12314
12315 return 0;
12316}
12317
12318/**
12319 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12320 * @vsi: Pointer to VSI structure
12321 * @seed: Buffer to store the keys
12322 * @lut: Buffer to store the lookup table entries
12323 * @lut_size: Size of buffer to store the lookup table entries
12324 *
12325 * Returns 0 on success, negative on failure
12326 */
12327static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
12328 u8 *lut, u16 lut_size)
12329{
12330 struct i40e_pf *pf = vsi->back;
12331 struct i40e_hw *hw = &pf->hw;
12332 u16 i;
12333
12334 if (seed) {
12335 u32 *seed_dw = (u32 *)seed;
12336
12337 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
12338 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
12339 }
12340 if (lut) {
12341 u32 *lut_dw = (u32 *)lut;
12342
12343 if (lut_size != I40E_HLUT_ARRAY_SIZE)
12344 return -EINVAL;
12345 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12346 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
12347 }
12348
12349 return 0;
12350}
12351
12352/**
12353 * i40e_config_rss - Configure RSS keys and lut
12354 * @vsi: Pointer to VSI structure
12355 * @seed: RSS hash seed
12356 * @lut: Lookup table
12357 * @lut_size: Lookup table size
12358 *
12359 * Returns 0 on success, negative on failure
12360 */
12361int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12362{
12363 struct i40e_pf *pf = vsi->back;
12364
12365 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12366 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
12367 else
12368 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
12369}
12370
12371/**
12372 * i40e_get_rss - Get RSS keys and lut
12373 * @vsi: Pointer to VSI structure
12374 * @seed: Buffer to store the keys
12375 * @lut: Buffer to store the lookup table entries
12376 * @lut_size: Size of buffer to store the lookup table entries
12377 *
12378 * Returns 0 on success, negative on failure
12379 */
12380int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
12381{
12382 struct i40e_pf *pf = vsi->back;
12383
12384 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
12385 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
12386 else
12387 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
12388}
12389
12390/**
12391 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12392 * @pf: Pointer to board private structure
12393 * @lut: Lookup table
12394 * @rss_table_size: Lookup table size
12395 * @rss_size: Range of queue number for hashing
12396 */
12397void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
12398 u16 rss_table_size, u16 rss_size)
12399{
12400 u16 i;
12401
12402 for (i = 0; i < rss_table_size; i++)
12403 lut[i] = i % rss_size;
12404}
12405
12406/**
12407 * i40e_pf_config_rss - Prepare for RSS if used
12408 * @pf: board private structure
12409 **/
12410static int i40e_pf_config_rss(struct i40e_pf *pf)
12411{
12412 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12413 u8 seed[I40E_HKEY_ARRAY_SIZE];
12414 u8 *lut;
12415 struct i40e_hw *hw = &pf->hw;
12416 u32 reg_val;
12417 u64 hena;
12418 int ret;
12419
12420 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
12421 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
12422 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
12423 hena |= i40e_pf_get_default_rss_hena(pf);
12424
12425 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), reg_val: (u32)hena);
12426 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), reg_val: (u32)(hena >> 32));
12427
12428 /* Determine the RSS table size based on the hardware capabilities */
12429 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
12430 reg_val = (pf->rss_table_size == 512) ?
12431 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
12432 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
12433 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
12434
12435 /* Determine the RSS size of the VSI */
12436 if (!vsi->rss_size) {
12437 u16 qcount;
12438 /* If the firmware does something weird during VSI init, we
12439 * could end up with zero TCs. Check for that to avoid
12440 * divide-by-zero. It probably won't pass traffic, but it also
12441 * won't panic.
12442 */
12443 qcount = vsi->num_queue_pairs /
12444 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
12445 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12446 }
12447 if (!vsi->rss_size)
12448 return -EINVAL;
12449
12450 lut = kzalloc(size: vsi->rss_table_size, GFP_KERNEL);
12451 if (!lut)
12452 return -ENOMEM;
12453
12454 /* Use user configured lut if there is one, otherwise use default */
12455 if (vsi->rss_lut_user)
12456 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
12457 else
12458 i40e_fill_rss_lut(pf, lut, rss_table_size: vsi->rss_table_size, rss_size: vsi->rss_size);
12459
12460 /* Use user configured hash key if there is one, otherwise
12461 * use default.
12462 */
12463 if (vsi->rss_hkey_user)
12464 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
12465 else
12466 netdev_rss_key_fill(buffer: (void *)seed, I40E_HKEY_ARRAY_SIZE);
12467 ret = i40e_config_rss(vsi, seed, lut, lut_size: vsi->rss_table_size);
12468 kfree(objp: lut);
12469
12470 return ret;
12471}
12472
12473/**
12474 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12475 * @pf: board private structure
12476 * @queue_count: the requested queue count for rss.
12477 *
12478 * returns 0 if rss is not enabled, if enabled returns the final rss queue
12479 * count which may be different from the requested queue count.
12480 * Note: expects to be called while under rtnl_lock()
12481 **/
12482int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
12483{
12484 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
12485 int new_rss_size;
12486
12487 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
12488 return 0;
12489
12490 queue_count = min_t(int, queue_count, num_online_cpus());
12491 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
12492
12493 if (queue_count != vsi->num_queue_pairs) {
12494 u16 qcount;
12495
12496 vsi->req_queue_pairs = queue_count;
12497 i40e_prep_for_reset(pf);
12498 if (test_bit(__I40E_IN_REMOVE, pf->state))
12499 return pf->alloc_rss_size;
12500
12501 pf->alloc_rss_size = new_rss_size;
12502
12503 i40e_reset_and_rebuild(pf, reinit: true, lock_acquired: true);
12504
12505 /* Discard the user configured hash keys and lut, if less
12506 * queues are enabled.
12507 */
12508 if (queue_count < vsi->rss_size) {
12509 i40e_clear_rss_config_user(vsi);
12510 dev_dbg(&pf->pdev->dev,
12511 "discard user configured hash keys and lut\n");
12512 }
12513
12514 /* Reset vsi->rss_size, as number of enabled queues changed */
12515 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
12516 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
12517
12518 i40e_pf_config_rss(pf);
12519 }
12520 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
12521 vsi->req_queue_pairs, pf->rss_size_max);
12522 return pf->alloc_rss_size;
12523}
12524
12525/**
12526 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12527 * @pf: board private structure
12528 **/
12529int i40e_get_partition_bw_setting(struct i40e_pf *pf)
12530{
12531 bool min_valid, max_valid;
12532 u32 max_bw, min_bw;
12533 int status;
12534
12535 status = i40e_read_bw_from_alt_ram(hw: &pf->hw, max_bw: &max_bw, min_bw: &min_bw,
12536 min_valid: &min_valid, max_valid: &max_valid);
12537
12538 if (!status) {
12539 if (min_valid)
12540 pf->min_bw = min_bw;
12541 if (max_valid)
12542 pf->max_bw = max_bw;
12543 }
12544
12545 return status;
12546}
12547
12548/**
12549 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12550 * @pf: board private structure
12551 **/
12552int i40e_set_partition_bw_setting(struct i40e_pf *pf)
12553{
12554 struct i40e_aqc_configure_partition_bw_data bw_data;
12555 int status;
12556
12557 memset(&bw_data, 0, sizeof(bw_data));
12558
12559 /* Set the valid bit for this PF */
12560 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
12561 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
12562 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
12563
12564 /* Set the new bandwidths */
12565 status = i40e_aq_configure_partition_bw(hw: &pf->hw, bw_data: &bw_data, NULL);
12566
12567 return status;
12568}
12569
12570/**
12571 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12572 * @pf: board private structure
12573 **/
12574int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
12575{
12576 /* Commit temporary BW setting to permanent NVM image */
12577 enum i40e_admin_queue_err last_aq_status;
12578 u16 nvm_word;
12579 int ret;
12580
12581 if (pf->hw.partition_id != 1) {
12582 dev_info(&pf->pdev->dev,
12583 "Commit BW only works on partition 1! This is partition %d",
12584 pf->hw.partition_id);
12585 ret = -EOPNOTSUPP;
12586 goto bw_commit_out;
12587 }
12588
12589 /* Acquire NVM for read access */
12590 ret = i40e_acquire_nvm(hw: &pf->hw, access: I40E_RESOURCE_READ);
12591 last_aq_status = pf->hw.aq.asq_last_status;
12592 if (ret) {
12593 dev_info(&pf->pdev->dev,
12594 "Cannot acquire NVM for read access, err %pe aq_err %s\n",
12595 ERR_PTR(ret),
12596 i40e_aq_str(&pf->hw, last_aq_status));
12597 goto bw_commit_out;
12598 }
12599
12600 /* Read word 0x10 of NVM - SW compatibility word 1 */
12601 ret = i40e_aq_read_nvm(hw: &pf->hw,
12602 I40E_SR_NVM_CONTROL_WORD,
12603 offset: 0x10, length: sizeof(nvm_word), data: &nvm_word,
12604 last_command: false, NULL);
12605 /* Save off last admin queue command status before releasing
12606 * the NVM
12607 */
12608 last_aq_status = pf->hw.aq.asq_last_status;
12609 i40e_release_nvm(hw: &pf->hw);
12610 if (ret) {
12611 dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
12612 ERR_PTR(ret),
12613 i40e_aq_str(&pf->hw, last_aq_status));
12614 goto bw_commit_out;
12615 }
12616
12617 /* Wait a bit for NVM release to complete */
12618 msleep(msecs: 50);
12619
12620 /* Acquire NVM for write access */
12621 ret = i40e_acquire_nvm(hw: &pf->hw, access: I40E_RESOURCE_WRITE);
12622 last_aq_status = pf->hw.aq.asq_last_status;
12623 if (ret) {
12624 dev_info(&pf->pdev->dev,
12625 "Cannot acquire NVM for write access, err %pe aq_err %s\n",
12626 ERR_PTR(ret),
12627 i40e_aq_str(&pf->hw, last_aq_status));
12628 goto bw_commit_out;
12629 }
12630 /* Write it back out unchanged to initiate update NVM,
12631 * which will force a write of the shadow (alt) RAM to
12632 * the NVM - thus storing the bandwidth values permanently.
12633 */
12634 ret = i40e_aq_update_nvm(hw: &pf->hw,
12635 I40E_SR_NVM_CONTROL_WORD,
12636 offset: 0x10, length: sizeof(nvm_word),
12637 data: &nvm_word, last_command: true, preservation_flags: 0, NULL);
12638 /* Save off last admin queue command status before releasing
12639 * the NVM
12640 */
12641 last_aq_status = pf->hw.aq.asq_last_status;
12642 i40e_release_nvm(hw: &pf->hw);
12643 if (ret)
12644 dev_info(&pf->pdev->dev,
12645 "BW settings NOT SAVED, err %pe aq_err %s\n",
12646 ERR_PTR(ret),
12647 i40e_aq_str(&pf->hw, last_aq_status));
12648bw_commit_out:
12649
12650 return ret;
12651}
12652
12653/**
12654 * i40e_is_total_port_shutdown_enabled - read NVM and return value
12655 * if total port shutdown feature is enabled for this PF
12656 * @pf: board private structure
12657 **/
12658static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
12659{
12660#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4)
12661#define I40E_FEATURES_ENABLE_PTR 0x2A
12662#define I40E_CURRENT_SETTING_PTR 0x2B
12663#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D
12664#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
12665#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
12666#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
12667 u16 sr_emp_sr_settings_ptr = 0;
12668 u16 features_enable = 0;
12669 u16 link_behavior = 0;
12670 int read_status = 0;
12671 bool ret = false;
12672
12673 read_status = i40e_read_nvm_word(hw: &pf->hw,
12674 I40E_SR_EMP_SR_SETTINGS_PTR,
12675 data: &sr_emp_sr_settings_ptr);
12676 if (read_status)
12677 goto err_nvm;
12678 read_status = i40e_read_nvm_word(hw: &pf->hw,
12679 offset: sr_emp_sr_settings_ptr +
12680 I40E_FEATURES_ENABLE_PTR,
12681 data: &features_enable);
12682 if (read_status)
12683 goto err_nvm;
12684 if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
12685 read_status = i40e_read_nvm_module_data(hw: &pf->hw,
12686 I40E_SR_EMP_SR_SETTINGS_PTR,
12687 I40E_CURRENT_SETTING_PTR,
12688 I40E_LINK_BEHAVIOR_WORD_OFFSET,
12689 I40E_LINK_BEHAVIOR_WORD_LENGTH,
12690 data_ptr: &link_behavior);
12691 if (read_status)
12692 goto err_nvm;
12693 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
12694 ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
12695 }
12696 return ret;
12697
12698err_nvm:
12699 dev_warn(&pf->pdev->dev,
12700 "total-port-shutdown feature is off due to read nvm error: %pe\n",
12701 ERR_PTR(read_status));
12702 return ret;
12703}
12704
12705/**
12706 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12707 * @pf: board private structure to initialize
12708 *
12709 * i40e_sw_init initializes the Adapter private data structure.
12710 * Fields are initialized based on PCI device information and
12711 * OS network device settings (MTU size).
12712 **/
12713static int i40e_sw_init(struct i40e_pf *pf)
12714{
12715 int err = 0;
12716 int size;
12717 u16 pow;
12718
12719 /* Set default capability flags */
12720 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
12721 I40E_FLAG_MSI_ENABLED |
12722 I40E_FLAG_MSIX_ENABLED;
12723
12724 /* Set default ITR */
12725 pf->rx_itr_default = I40E_ITR_RX_DEF;
12726 pf->tx_itr_default = I40E_ITR_TX_DEF;
12727
12728 /* Depending on PF configurations, it is possible that the RSS
12729 * maximum might end up larger than the available queues
12730 */
12731 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
12732 pf->alloc_rss_size = 1;
12733 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
12734 pf->rss_size_max = min_t(int, pf->rss_size_max,
12735 pf->hw.func_caps.num_tx_qp);
12736
12737 /* find the next higher power-of-2 of num cpus */
12738 pow = roundup_pow_of_two(num_online_cpus());
12739 pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
12740
12741 if (pf->hw.func_caps.rss) {
12742 pf->flags |= I40E_FLAG_RSS_ENABLED;
12743 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
12744 num_online_cpus());
12745 }
12746
12747 /* MFP mode enabled */
12748 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
12749 pf->flags |= I40E_FLAG_MFP_ENABLED;
12750 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
12751 if (i40e_get_partition_bw_setting(pf)) {
12752 dev_warn(&pf->pdev->dev,
12753 "Could not get partition bw settings\n");
12754 } else {
12755 dev_info(&pf->pdev->dev,
12756 "Partition BW Min = %8.8x, Max = %8.8x\n",
12757 pf->min_bw, pf->max_bw);
12758
12759 /* nudge the Tx scheduler */
12760 i40e_set_partition_bw_setting(pf);
12761 }
12762 }
12763
12764 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
12765 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
12766 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
12767 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
12768 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
12769 pf->hw.num_partitions > 1)
12770 dev_info(&pf->pdev->dev,
12771 "Flow Director Sideband mode Disabled in MFP mode\n");
12772 else
12773 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12774 pf->fdir_pf_filter_count =
12775 pf->hw.func_caps.fd_filters_guaranteed;
12776 pf->hw.fdir_shared_filter_count =
12777 pf->hw.func_caps.fd_filters_best_effort;
12778 }
12779
12780 if (pf->hw.mac.type == I40E_MAC_X722) {
12781 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
12782 I40E_HW_128_QP_RSS_CAPABLE |
12783 I40E_HW_ATR_EVICT_CAPABLE |
12784 I40E_HW_WB_ON_ITR_CAPABLE |
12785 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
12786 I40E_HW_NO_PCI_LINK_CHECK |
12787 I40E_HW_USE_SET_LLDP_MIB |
12788 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
12789 I40E_HW_PTP_L4_CAPABLE |
12790 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
12791 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
12792
12793#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
12794 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
12795 I40E_FDEVICT_PCTYPE_DEFAULT) {
12796 dev_warn(&pf->pdev->dev,
12797 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
12798 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
12799 }
12800 } else if ((pf->hw.aq.api_maj_ver > 1) ||
12801 ((pf->hw.aq.api_maj_ver == 1) &&
12802 (pf->hw.aq.api_min_ver > 4))) {
12803 /* Supported in FW API version higher than 1.4 */
12804 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
12805 }
12806
12807 /* Enable HW ATR eviction if possible */
12808 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
12809 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
12810
12811 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12812 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
12813 (pf->hw.aq.fw_maj_ver < 4))) {
12814 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
12815 /* No DCB support for FW < v4.33 */
12816 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
12817 }
12818
12819 /* Disable FW LLDP if FW < v4.3 */
12820 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12821 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
12822 (pf->hw.aq.fw_maj_ver < 4)))
12823 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
12824
12825 /* Use the FW Set LLDP MIB API if FW > v4.40 */
12826 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
12827 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
12828 (pf->hw.aq.fw_maj_ver >= 5)))
12829 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
12830
12831 /* Enable PTP L4 if FW > v6.0 */
12832 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12833 pf->hw.aq.fw_maj_ver >= 6)
12834 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
12835
12836 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
12837 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
12838 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
12839 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
12840 }
12841
12842 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
12843 pf->flags |= I40E_FLAG_IWARP_ENABLED;
12844 /* IWARP needs one extra vector for CQP just like MISC.*/
12845 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
12846 }
12847 /* Stopping FW LLDP engine is supported on XL710 and X722
12848 * starting from FW versions determined in i40e_init_adminq.
12849 * Stopping the FW LLDP engine is not supported on XL710
12850 * if NPAR is functioning so unset this hw flag in this case.
12851 */
12852 if (pf->hw.mac.type == I40E_MAC_XL710 &&
12853 pf->hw.func_caps.npar_enable &&
12854 (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
12855 pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
12856
12857#ifdef CONFIG_PCI_IOV
12858 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
12859 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
12860 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
12861 pf->num_req_vfs = min_t(int,
12862 pf->hw.func_caps.num_vfs,
12863 I40E_MAX_VF_COUNT);
12864 }
12865#endif /* CONFIG_PCI_IOV */
12866 pf->eeprom_version = 0xDEAD;
12867 pf->lan_veb = I40E_NO_VEB;
12868 pf->lan_vsi = I40E_NO_VSI;
12869
12870 /* By default FW has this off for performance reasons */
12871 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
12872
12873 /* set up queue assignment tracking */
12874 size = sizeof(struct i40e_lump_tracking)
12875 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
12876 pf->qp_pile = kzalloc(size, GFP_KERNEL);
12877 if (!pf->qp_pile) {
12878 err = -ENOMEM;
12879 goto sw_init_done;
12880 }
12881 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
12882
12883 pf->tx_timeout_recovery_level = 1;
12884
12885 if (pf->hw.mac.type != I40E_MAC_X722 &&
12886 i40e_is_total_port_shutdown_enabled(pf)) {
12887 /* Link down on close must be on when total port shutdown
12888 * is enabled for a given port
12889 */
12890 pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
12891 I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
12892 dev_info(&pf->pdev->dev,
12893 "total-port-shutdown was enabled, link-down-on-close is forced on\n");
12894 }
12895 mutex_init(&pf->switch_mutex);
12896
12897sw_init_done:
12898 return err;
12899}
12900
12901/**
12902 * i40e_set_ntuple - set the ntuple feature flag and take action
12903 * @pf: board private structure to initialize
12904 * @features: the feature set that the stack is suggesting
12905 *
12906 * returns a bool to indicate if reset needs to happen
12907 **/
12908bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
12909{
12910 bool need_reset = false;
12911
12912 /* Check if Flow Director n-tuple support was enabled or disabled. If
12913 * the state changed, we need to reset.
12914 */
12915 if (features & NETIF_F_NTUPLE) {
12916 /* Enable filters and mark for reset */
12917 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
12918 need_reset = true;
12919 /* enable FD_SB only if there is MSI-X vector and no cloud
12920 * filters exist
12921 */
12922 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
12923 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
12924 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
12925 }
12926 } else {
12927 /* turn off filters, mark for reset and clear SW filter list */
12928 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
12929 need_reset = true;
12930 i40e_fdir_filter_exit(pf);
12931 }
12932 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
12933 clear_bit(nr: __I40E_FD_SB_AUTO_DISABLED, addr: pf->state);
12934 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
12935
12936 /* reset fd counters */
12937 pf->fd_add_err = 0;
12938 pf->fd_atr_cnt = 0;
12939 /* if ATR was auto disabled it can be re-enabled. */
12940 if (test_and_clear_bit(nr: __I40E_FD_ATR_AUTO_DISABLED, addr: pf->state))
12941 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
12942 (I40E_DEBUG_FD & pf->hw.debug_mask))
12943 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
12944 }
12945 return need_reset;
12946}
12947
12948/**
12949 * i40e_clear_rss_lut - clear the rx hash lookup table
12950 * @vsi: the VSI being configured
12951 **/
12952static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
12953{
12954 struct i40e_pf *pf = vsi->back;
12955 struct i40e_hw *hw = &pf->hw;
12956 u16 vf_id = vsi->vf_id;
12957 u8 i;
12958
12959 if (vsi->type == I40E_VSI_MAIN) {
12960 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
12961 wr32(hw, I40E_PFQF_HLUT(i), 0);
12962 } else if (vsi->type == I40E_VSI_SRIOV) {
12963 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
12964 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), reg_val: 0);
12965 } else {
12966 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
12967 }
12968}
12969
12970/**
12971 * i40e_set_loopback - turn on/off loopback mode on underlying PF
12972 * @vsi: ptr to VSI
12973 * @ena: flag to indicate the on/off setting
12974 */
12975static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena)
12976{
12977 bool if_running = netif_running(dev: vsi->netdev) &&
12978 !test_and_set_bit(nr: __I40E_VSI_DOWN, addr: vsi->state);
12979 int ret;
12980
12981 if (if_running)
12982 i40e_down(vsi);
12983
12984 ret = i40e_aq_set_mac_loopback(hw: &vsi->back->hw, ena_lpbk: ena, NULL);
12985 if (ret)
12986 netdev_err(dev: vsi->netdev, format: "Failed to toggle loopback state\n");
12987 if (if_running)
12988 i40e_up(vsi);
12989
12990 return ret;
12991}
12992
12993/**
12994 * i40e_set_features - set the netdev feature flags
12995 * @netdev: ptr to the netdev being adjusted
12996 * @features: the feature set that the stack is suggesting
12997 * Note: expects to be called while under rtnl_lock()
12998 **/
12999static int i40e_set_features(struct net_device *netdev,
13000 netdev_features_t features)
13001{
13002 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
13003 struct i40e_vsi *vsi = np->vsi;
13004 struct i40e_pf *pf = vsi->back;
13005 bool need_reset;
13006
13007 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
13008 i40e_pf_config_rss(pf);
13009 else if (!(features & NETIF_F_RXHASH) &&
13010 netdev->features & NETIF_F_RXHASH)
13011 i40e_clear_rss_lut(vsi);
13012
13013 if (features & NETIF_F_HW_VLAN_CTAG_RX)
13014 i40e_vlan_stripping_enable(vsi);
13015 else
13016 i40e_vlan_stripping_disable(vsi);
13017
13018 if (!(features & NETIF_F_HW_TC) &&
13019 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
13020 dev_err(&pf->pdev->dev,
13021 "Offloaded tc filters active, can't turn hw_tc_offload off");
13022 return -EINVAL;
13023 }
13024
13025 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt)
13026 i40e_del_all_macvlans(vsi);
13027
13028 need_reset = i40e_set_ntuple(pf, features);
13029
13030 if (need_reset)
13031 i40e_do_reset(pf, I40E_PF_RESET_FLAG, lock_acquired: true);
13032
13033 if ((features ^ netdev->features) & NETIF_F_LOOPBACK)
13034 return i40e_set_loopback(vsi, ena: !!(features & NETIF_F_LOOPBACK));
13035
13036 return 0;
13037}
13038
13039static int i40e_udp_tunnel_set_port(struct net_device *netdev,
13040 unsigned int table, unsigned int idx,
13041 struct udp_tunnel_info *ti)
13042{
13043 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
13044 struct i40e_hw *hw = &np->vsi->back->hw;
13045 u8 type, filter_index;
13046 int ret;
13047
13048 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
13049 I40E_AQC_TUNNEL_TYPE_NGE;
13050
13051 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), protocol_index: type, filter_index: &filter_index,
13052 NULL);
13053 if (ret) {
13054 netdev_info(dev: netdev, format: "add UDP port failed, err %pe aq_err %s\n",
13055 ERR_PTR(error: ret),
13056 i40e_aq_str(hw, aq_err: hw->aq.asq_last_status));
13057 return -EIO;
13058 }
13059
13060 udp_tunnel_nic_set_port_priv(dev: netdev, table, idx, priv: filter_index);
13061 return 0;
13062}
13063
13064static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
13065 unsigned int table, unsigned int idx,
13066 struct udp_tunnel_info *ti)
13067{
13068 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
13069 struct i40e_hw *hw = &np->vsi->back->hw;
13070 int ret;
13071
13072 ret = i40e_aq_del_udp_tunnel(hw, index: ti->hw_priv, NULL);
13073 if (ret) {
13074 netdev_info(dev: netdev, format: "delete UDP port failed, err %pe aq_err %s\n",
13075 ERR_PTR(error: ret),
13076 i40e_aq_str(hw, aq_err: hw->aq.asq_last_status));
13077 return -EIO;
13078 }
13079
13080 return 0;
13081}
13082
13083static int i40e_get_phys_port_id(struct net_device *netdev,
13084 struct netdev_phys_item_id *ppid)
13085{
13086 struct i40e_netdev_priv *np = netdev_priv(dev: netdev);
13087 struct i40e_pf *pf = np->vsi->back;
13088 struct i40e_hw *hw = &pf->hw;
13089
13090 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
13091 return -EOPNOTSUPP;
13092
13093 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
13094 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
13095
13096 return 0;
13097}
13098
13099/**
13100 * i40e_ndo_fdb_add - add an entry to the hardware database
13101 * @ndm: the input from the stack
13102 * @tb: pointer to array of nladdr (unused)
13103 * @dev: the net device pointer
13104 * @addr: the MAC address entry being added
13105 * @vid: VLAN ID
13106 * @flags: instructions from stack about fdb operation
13107 * @extack: netlink extended ack, unused currently
13108 */
13109static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
13110 struct net_device *dev,
13111 const unsigned char *addr, u16 vid,
13112 u16 flags,
13113 struct netlink_ext_ack *extack)
13114{
13115 struct i40e_netdev_priv *np = netdev_priv(dev);
13116 struct i40e_pf *pf = np->vsi->back;
13117 int err = 0;
13118
13119 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
13120 return -EOPNOTSUPP;
13121
13122 if (vid) {
13123 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
13124 return -EINVAL;
13125 }
13126
13127 /* Hardware does not support aging addresses so if a
13128 * ndm_state is given only allow permanent addresses
13129 */
13130 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
13131 netdev_info(dev, format: "FDB only supports static addresses\n");
13132 return -EINVAL;
13133 }
13134
13135 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
13136 err = dev_uc_add_excl(dev, addr);
13137 else if (is_multicast_ether_addr(addr))
13138 err = dev_mc_add_excl(dev, addr);
13139 else
13140 err = -EINVAL;
13141
13142 /* Only return duplicate errors if NLM_F_EXCL is set */
13143 if (err == -EEXIST && !(flags & NLM_F_EXCL))
13144 err = 0;
13145
13146 return err;
13147}
13148
13149/**
13150 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
13151 * @dev: the netdev being configured
13152 * @nlh: RTNL message
13153 * @flags: bridge flags
13154 * @extack: netlink extended ack
13155 *
13156 * Inserts a new hardware bridge if not already created and
13157 * enables the bridging mode requested (VEB or VEPA). If the
13158 * hardware bridge has already been inserted and the request
13159 * is to change the mode then that requires a PF reset to
13160 * allow rebuild of the components with required hardware
13161 * bridge mode enabled.
13162 *
13163 * Note: expects to be called while under rtnl_lock()
13164 **/
13165static int i40e_ndo_bridge_setlink(struct net_device *dev,
13166 struct nlmsghdr *nlh,
13167 u16 flags,
13168 struct netlink_ext_ack *extack)
13169{
13170 struct i40e_netdev_priv *np = netdev_priv(dev);
13171 struct i40e_vsi *vsi = np->vsi;
13172 struct i40e_pf *pf = vsi->back;
13173 struct i40e_veb *veb = NULL;
13174 struct nlattr *attr, *br_spec;
13175 int i, rem;
13176
13177 /* Only for PF VSI for now */
13178 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13179 return -EOPNOTSUPP;
13180
13181 /* Find the HW bridge for PF VSI */
13182 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13183 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13184 veb = pf->veb[i];
13185 }
13186
13187 br_spec = nlmsg_find_attr(nlh, hdrlen: sizeof(struct ifinfomsg), attrtype: IFLA_AF_SPEC);
13188 if (!br_spec)
13189 return -EINVAL;
13190
13191 nla_for_each_nested(attr, br_spec, rem) {
13192 __u16 mode;
13193
13194 if (nla_type(nla: attr) != IFLA_BRIDGE_MODE)
13195 continue;
13196
13197 mode = nla_get_u16(nla: attr);
13198 if ((mode != BRIDGE_MODE_VEPA) &&
13199 (mode != BRIDGE_MODE_VEB))
13200 return -EINVAL;
13201
13202 /* Insert a new HW bridge */
13203 if (!veb) {
13204 veb = i40e_veb_setup(pf, flags: 0, uplink_seid: vsi->uplink_seid, downlink_seid: vsi->seid,
13205 enabled_tc: vsi->tc_config.enabled_tc);
13206 if (veb) {
13207 veb->bridge_mode = mode;
13208 i40e_config_bridge_mode(veb);
13209 } else {
13210 /* No Bridge HW offload available */
13211 return -ENOENT;
13212 }
13213 break;
13214 } else if (mode != veb->bridge_mode) {
13215 /* Existing HW bridge but different mode needs reset */
13216 veb->bridge_mode = mode;
13217 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
13218 if (mode == BRIDGE_MODE_VEB)
13219 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
13220 else
13221 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
13222 i40e_do_reset(pf, I40E_PF_RESET_FLAG, lock_acquired: true);
13223 break;
13224 }
13225 }
13226
13227 return 0;
13228}
13229
13230/**
13231 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
13232 * @skb: skb buff
13233 * @pid: process id
13234 * @seq: RTNL message seq #
13235 * @dev: the netdev being configured
13236 * @filter_mask: unused
13237 * @nlflags: netlink flags passed in
13238 *
13239 * Return the mode in which the hardware bridge is operating in
13240 * i.e VEB or VEPA.
13241 **/
13242static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13243 struct net_device *dev,
13244 u32 __always_unused filter_mask,
13245 int nlflags)
13246{
13247 struct i40e_netdev_priv *np = netdev_priv(dev);
13248 struct i40e_vsi *vsi = np->vsi;
13249 struct i40e_pf *pf = vsi->back;
13250 struct i40e_veb *veb = NULL;
13251 int i;
13252
13253 /* Only for PF VSI for now */
13254 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
13255 return -EOPNOTSUPP;
13256
13257 /* Find the HW bridge for the PF VSI */
13258 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
13259 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
13260 veb = pf->veb[i];
13261 }
13262
13263 if (!veb)
13264 return 0;
13265
13266 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode: veb->bridge_mode,
13267 flags: 0, mask: 0, nlflags, filter_mask, NULL);
13268}
13269
13270/**
13271 * i40e_features_check - Validate encapsulated packet conforms to limits
13272 * @skb: skb buff
13273 * @dev: This physical port's netdev
13274 * @features: Offload features that the stack believes apply
13275 **/
13276static netdev_features_t i40e_features_check(struct sk_buff *skb,
13277 struct net_device *dev,
13278 netdev_features_t features)
13279{
13280 size_t len;
13281
13282 /* No point in doing any of this if neither checksum nor GSO are
13283 * being requested for this frame. We can rule out both by just
13284 * checking for CHECKSUM_PARTIAL
13285 */
13286 if (skb->ip_summed != CHECKSUM_PARTIAL)
13287 return features;
13288
13289 /* We cannot support GSO if the MSS is going to be less than
13290 * 64 bytes. If it is then we need to drop support for GSO.
13291 */
13292 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
13293 features &= ~NETIF_F_GSO_MASK;
13294
13295 /* MACLEN can support at most 63 words */
13296 len = skb_network_header(skb) - skb->data;
13297 if (len & ~(63 * 2))
13298 goto out_err;
13299
13300 /* IPLEN and EIPLEN can support at most 127 dwords */
13301 len = skb_transport_header(skb) - skb_network_header(skb);
13302 if (len & ~(127 * 4))
13303 goto out_err;
13304
13305 if (skb->encapsulation) {
13306 /* L4TUNLEN can support 127 words */
13307 len = skb_inner_network_header(skb) - skb_transport_header(skb);
13308 if (len & ~(127 * 2))
13309 goto out_err;
13310
13311 /* IPLEN can support at most 127 dwords */
13312 len = skb_inner_transport_header(skb) -
13313 skb_inner_network_header(skb);
13314 if (len & ~(127 * 4))
13315 goto out_err;
13316 }
13317
13318 /* No need to validate L4LEN as TCP is the only protocol with a
13319 * flexible value and we support all possible values supported
13320 * by TCP, which is at most 15 dwords
13321 */
13322
13323 return features;
13324out_err:
13325 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
13326}
13327
13328/**
13329 * i40e_xdp_setup - add/remove an XDP program
13330 * @vsi: VSI to changed
13331 * @prog: XDP program
13332 * @extack: netlink extended ack
13333 **/
13334static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
13335 struct netlink_ext_ack *extack)
13336{
13337 int frame_size = i40e_max_vsi_frame_size(vsi, xdp_prog: prog);
13338 struct i40e_pf *pf = vsi->back;
13339 struct bpf_prog *old_prog;
13340 bool need_reset;
13341 int i;
13342
13343 /* Don't allow frames that span over multiple buffers */
13344 if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
13345 NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
13346 return -EINVAL;
13347 }
13348
13349 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
13350 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
13351
13352 if (need_reset)
13353 i40e_prep_for_reset(pf);
13354
13355 /* VSI shall be deleted in a moment, just return EINVAL */
13356 if (test_bit(__I40E_IN_REMOVE, pf->state))
13357 return -EINVAL;
13358
13359 old_prog = xchg(&vsi->xdp_prog, prog);
13360
13361 if (need_reset) {
13362 if (!prog) {
13363 xdp_features_clear_redirect_target(dev: vsi->netdev);
13364 /* Wait until ndo_xsk_wakeup completes. */
13365 synchronize_rcu();
13366 }
13367 i40e_reset_and_rebuild(pf, reinit: true, lock_acquired: true);
13368 }
13369
13370 if (!i40e_enabled_xdp_vsi(vsi) && prog) {
13371 if (i40e_realloc_rx_bi_zc(vsi, zc: true))
13372 return -ENOMEM;
13373 } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
13374 if (i40e_realloc_rx_bi_zc(vsi, zc: false))
13375 return -ENOMEM;
13376 }
13377
13378 for (i = 0; i < vsi->num_queue_pairs; i++)
13379 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
13380
13381 if (old_prog)
13382 bpf_prog_put(prog: old_prog);
13383
13384 /* Kick start the NAPI context if there is an AF_XDP socket open
13385 * on that queue id. This so that receiving will start.
13386 */
13387 if (need_reset && prog) {
13388 for (i = 0; i < vsi->num_queue_pairs; i++)
13389 if (vsi->xdp_rings[i]->xsk_pool)
13390 (void)i40e_xsk_wakeup(dev: vsi->netdev, queue_id: i,
13391 XDP_WAKEUP_RX);
13392 xdp_features_set_redirect_target(dev: vsi->netdev, support_sg: true);
13393 }
13394
13395 return 0;
13396}
13397
13398/**
13399 * i40e_enter_busy_conf - Enters busy config state
13400 * @vsi: vsi
13401 *
13402 * Returns 0 on success, <0 for failure.
13403 **/
13404static int i40e_enter_busy_conf(struct i40e_vsi *vsi)
13405{
13406 struct i40e_pf *pf = vsi->back;
13407 int timeout = 50;
13408
13409 while (test_and_set_bit(nr: __I40E_CONFIG_BUSY, addr: pf->state)) {
13410 timeout--;
13411 if (!timeout)
13412 return -EBUSY;
13413 usleep_range(min: 1000, max: 2000);
13414 }
13415
13416 return 0;
13417}
13418
13419/**
13420 * i40e_exit_busy_conf - Exits busy config state
13421 * @vsi: vsi
13422 **/
13423static void i40e_exit_busy_conf(struct i40e_vsi *vsi)
13424{
13425 struct i40e_pf *pf = vsi->back;
13426
13427 clear_bit(nr: __I40E_CONFIG_BUSY, addr: pf->state);
13428}
13429
13430/**
13431 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13432 * @vsi: vsi
13433 * @queue_pair: queue pair
13434 **/
13435static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair)
13436{
13437 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0,
13438 sizeof(vsi->rx_rings[queue_pair]->rx_stats));
13439 memset(&vsi->tx_rings[queue_pair]->stats, 0,
13440 sizeof(vsi->tx_rings[queue_pair]->stats));
13441 if (i40e_enabled_xdp_vsi(vsi)) {
13442 memset(&vsi->xdp_rings[queue_pair]->stats, 0,
13443 sizeof(vsi->xdp_rings[queue_pair]->stats));
13444 }
13445}
13446
13447/**
13448 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13449 * @vsi: vsi
13450 * @queue_pair: queue pair
13451 **/
13452static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair)
13453{
13454 i40e_clean_tx_ring(tx_ring: vsi->tx_rings[queue_pair]);
13455 if (i40e_enabled_xdp_vsi(vsi)) {
13456 /* Make sure that in-progress ndo_xdp_xmit calls are
13457 * completed.
13458 */
13459 synchronize_rcu();
13460 i40e_clean_tx_ring(tx_ring: vsi->xdp_rings[queue_pair]);
13461 }
13462 i40e_clean_rx_ring(rx_ring: vsi->rx_rings[queue_pair]);
13463}
13464
13465/**
13466 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13467 * @vsi: vsi
13468 * @queue_pair: queue pair
13469 * @enable: true for enable, false for disable
13470 **/
13471static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair,
13472 bool enable)
13473{
13474 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13475 struct i40e_q_vector *q_vector = rxr->q_vector;
13476
13477 if (!vsi->netdev)
13478 return;
13479
13480 /* All rings in a qp belong to the same qvector. */
13481 if (q_vector->rx.ring || q_vector->tx.ring) {
13482 if (enable)
13483 napi_enable(n: &q_vector->napi);
13484 else
13485 napi_disable(n: &q_vector->napi);
13486 }
13487}
13488
13489/**
13490 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13491 * @vsi: vsi
13492 * @queue_pair: queue pair
13493 * @enable: true for enable, false for disable
13494 *
13495 * Returns 0 on success, <0 on failure.
13496 **/
13497static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair,
13498 bool enable)
13499{
13500 struct i40e_pf *pf = vsi->back;
13501 int pf_q, ret = 0;
13502
13503 pf_q = vsi->base_queue + queue_pair;
13504 ret = i40e_control_wait_tx_q(seid: vsi->seid, pf, pf_q,
13505 is_xdp: false /*is xdp*/, enable);
13506 if (ret) {
13507 dev_info(&pf->pdev->dev,
13508 "VSI seid %d Tx ring %d %sable timeout\n",
13509 vsi->seid, pf_q, (enable ? "en" : "dis"));
13510 return ret;
13511 }
13512
13513 i40e_control_rx_q(pf, pf_q, enable);
13514 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
13515 if (ret) {
13516 dev_info(&pf->pdev->dev,
13517 "VSI seid %d Rx ring %d %sable timeout\n",
13518 vsi->seid, pf_q, (enable ? "en" : "dis"));
13519 return ret;
13520 }
13521
13522 /* Due to HW errata, on Rx disable only, the register can
13523 * indicate done before it really is. Needs 50ms to be sure
13524 */
13525 if (!enable)
13526 mdelay(50);
13527
13528 if (!i40e_enabled_xdp_vsi(vsi))
13529 return ret;
13530
13531 ret = i40e_control_wait_tx_q(seid: vsi->seid, pf,
13532 pf_q: pf_q + vsi->alloc_queue_pairs,
13533 is_xdp: true /*is xdp*/, enable);
13534 if (ret) {
13535 dev_info(&pf->pdev->dev,
13536 "VSI seid %d XDP Tx ring %d %sable timeout\n",
13537 vsi->seid, pf_q, (enable ? "en" : "dis"));
13538 }
13539
13540 return ret;
13541}
13542
13543/**
13544 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13545 * @vsi: vsi
13546 * @queue_pair: queue_pair
13547 **/
13548static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair)
13549{
13550 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13551 struct i40e_pf *pf = vsi->back;
13552 struct i40e_hw *hw = &pf->hw;
13553
13554 /* All rings in a qp belong to the same qvector. */
13555 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
13556 i40e_irq_dynamic_enable(vsi, vector: rxr->q_vector->v_idx);
13557 else
13558 i40e_irq_dynamic_enable_icr0(pf);
13559
13560 i40e_flush(hw);
13561}
13562
13563/**
13564 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13565 * @vsi: vsi
13566 * @queue_pair: queue_pair
13567 **/
13568static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair)
13569{
13570 struct i40e_ring *rxr = vsi->rx_rings[queue_pair];
13571 struct i40e_pf *pf = vsi->back;
13572 struct i40e_hw *hw = &pf->hw;
13573
13574 /* For simplicity, instead of removing the qp interrupt causes
13575 * from the interrupt linked list, we simply disable the interrupt, and
13576 * leave the list intact.
13577 *
13578 * All rings in a qp belong to the same qvector.
13579 */
13580 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13581 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx;
13582
13583 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0);
13584 i40e_flush(hw);
13585 synchronize_irq(irq: pf->msix_entries[intpf].vector);
13586 } else {
13587 /* Legacy and MSI mode - this stops all interrupt handling */
13588 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
13589 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
13590 i40e_flush(hw);
13591 synchronize_irq(irq: pf->pdev->irq);
13592 }
13593}
13594
13595/**
13596 * i40e_queue_pair_disable - Disables a queue pair
13597 * @vsi: vsi
13598 * @queue_pair: queue pair
13599 *
13600 * Returns 0 on success, <0 on failure.
13601 **/
13602int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
13603{
13604 int err;
13605
13606 err = i40e_enter_busy_conf(vsi);
13607 if (err)
13608 return err;
13609
13610 i40e_queue_pair_disable_irq(vsi, queue_pair);
13611 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, enable: false /* off */);
13612 i40e_clean_rx_ring(rx_ring: vsi->rx_rings[queue_pair]);
13613 i40e_queue_pair_toggle_napi(vsi, queue_pair, enable: false /* off */);
13614 i40e_queue_pair_clean_rings(vsi, queue_pair);
13615 i40e_queue_pair_reset_stats(vsi, queue_pair);
13616
13617 return err;
13618}
13619
13620/**
13621 * i40e_queue_pair_enable - Enables a queue pair
13622 * @vsi: vsi
13623 * @queue_pair: queue pair
13624 *
13625 * Returns 0 on success, <0 on failure.
13626 **/
13627int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair)
13628{
13629 int err;
13630
13631 err = i40e_configure_tx_ring(ring: vsi->tx_rings[queue_pair]);
13632 if (err)
13633 return err;
13634
13635 if (i40e_enabled_xdp_vsi(vsi)) {
13636 err = i40e_configure_tx_ring(ring: vsi->xdp_rings[queue_pair]);
13637 if (err)
13638 return err;
13639 }
13640
13641 err = i40e_configure_rx_ring(ring: vsi->rx_rings[queue_pair]);
13642 if (err)
13643 return err;
13644
13645 err = i40e_queue_pair_toggle_rings(vsi, queue_pair, enable: true /* on */);
13646 i40e_queue_pair_toggle_napi(vsi, queue_pair, enable: true /* on */);
13647 i40e_queue_pair_enable_irq(vsi, queue_pair);
13648
13649 i40e_exit_busy_conf(vsi);
13650
13651 return err;
13652}
13653
13654/**
13655 * i40e_xdp - implements ndo_bpf for i40e
13656 * @dev: netdevice
13657 * @xdp: XDP command
13658 **/
13659static int i40e_xdp(struct net_device *dev,
13660 struct netdev_bpf *xdp)
13661{
13662 struct i40e_netdev_priv *np = netdev_priv(dev);
13663 struct i40e_vsi *vsi = np->vsi;
13664
13665 if (vsi->type != I40E_VSI_MAIN)
13666 return -EINVAL;
13667
13668 switch (xdp->command) {
13669 case XDP_SETUP_PROG:
13670 return i40e_xdp_setup(vsi, prog: xdp->prog, extack: xdp->extack);
13671 case XDP_SETUP_XSK_POOL:
13672 return i40e_xsk_pool_setup(vsi, pool: xdp->xsk.pool,
13673 qid: xdp->xsk.queue_id);
13674 default:
13675 return -EINVAL;
13676 }
13677}
13678
13679static const struct net_device_ops i40e_netdev_ops = {
13680 .ndo_open = i40e_open,
13681 .ndo_stop = i40e_close,
13682 .ndo_start_xmit = i40e_lan_xmit_frame,
13683 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
13684 .ndo_set_rx_mode = i40e_set_rx_mode,
13685 .ndo_validate_addr = eth_validate_addr,
13686 .ndo_set_mac_address = i40e_set_mac,
13687 .ndo_change_mtu = i40e_change_mtu,
13688 .ndo_eth_ioctl = i40e_ioctl,
13689 .ndo_tx_timeout = i40e_tx_timeout,
13690 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
13691 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
13692#ifdef CONFIG_NET_POLL_CONTROLLER
13693 .ndo_poll_controller = i40e_netpoll,
13694#endif
13695 .ndo_setup_tc = __i40e_setup_tc,
13696 .ndo_select_queue = i40e_lan_select_queue,
13697 .ndo_set_features = i40e_set_features,
13698 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
13699 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
13700 .ndo_get_vf_stats = i40e_get_vf_stats,
13701 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
13702 .ndo_get_vf_config = i40e_ndo_get_vf_config,
13703 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
13704 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
13705 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
13706 .ndo_get_phys_port_id = i40e_get_phys_port_id,
13707 .ndo_fdb_add = i40e_ndo_fdb_add,
13708 .ndo_features_check = i40e_features_check,
13709 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
13710 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
13711 .ndo_bpf = i40e_xdp,
13712 .ndo_xdp_xmit = i40e_xdp_xmit,
13713 .ndo_xsk_wakeup = i40e_xsk_wakeup,
13714 .ndo_dfwd_add_station = i40e_fwd_add,
13715 .ndo_dfwd_del_station = i40e_fwd_del,
13716};
13717
13718/**
13719 * i40e_config_netdev - Setup the netdev flags
13720 * @vsi: the VSI being configured
13721 *
13722 * Returns 0 on success, negative value on failure
13723 **/
13724static int i40e_config_netdev(struct i40e_vsi *vsi)
13725{
13726 struct i40e_pf *pf = vsi->back;
13727 struct i40e_hw *hw = &pf->hw;
13728 struct i40e_netdev_priv *np;
13729 struct net_device *netdev;
13730 u8 broadcast[ETH_ALEN];
13731 u8 mac_addr[ETH_ALEN];
13732 int etherdev_size;
13733 netdev_features_t hw_enc_features;
13734 netdev_features_t hw_features;
13735
13736 etherdev_size = sizeof(struct i40e_netdev_priv);
13737 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
13738 if (!netdev)
13739 return -ENOMEM;
13740
13741 vsi->netdev = netdev;
13742 np = netdev_priv(dev: netdev);
13743 np->vsi = vsi;
13744
13745 hw_enc_features = NETIF_F_SG |
13746 NETIF_F_HW_CSUM |
13747 NETIF_F_HIGHDMA |
13748 NETIF_F_SOFT_FEATURES |
13749 NETIF_F_TSO |
13750 NETIF_F_TSO_ECN |
13751 NETIF_F_TSO6 |
13752 NETIF_F_GSO_GRE |
13753 NETIF_F_GSO_GRE_CSUM |
13754 NETIF_F_GSO_PARTIAL |
13755 NETIF_F_GSO_IPXIP4 |
13756 NETIF_F_GSO_IPXIP6 |
13757 NETIF_F_GSO_UDP_TUNNEL |
13758 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13759 NETIF_F_GSO_UDP_L4 |
13760 NETIF_F_SCTP_CRC |
13761 NETIF_F_RXHASH |
13762 NETIF_F_RXCSUM |
13763 0;
13764
13765 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
13766 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
13767
13768 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
13769
13770 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
13771
13772 netdev->hw_enc_features |= hw_enc_features;
13773
13774 /* record features VLANs can make use of */
13775 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
13776
13777#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
13778 NETIF_F_GSO_GRE_CSUM | \
13779 NETIF_F_GSO_IPXIP4 | \
13780 NETIF_F_GSO_IPXIP6 | \
13781 NETIF_F_GSO_UDP_TUNNEL | \
13782 NETIF_F_GSO_UDP_TUNNEL_CSUM)
13783
13784 netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
13785 netdev->features |= NETIF_F_GSO_PARTIAL |
13786 I40E_GSO_PARTIAL_FEATURES;
13787
13788 netdev->mpls_features |= NETIF_F_SG;
13789 netdev->mpls_features |= NETIF_F_HW_CSUM;
13790 netdev->mpls_features |= NETIF_F_TSO;
13791 netdev->mpls_features |= NETIF_F_TSO6;
13792 netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
13793
13794 /* enable macvlan offloads */
13795 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
13796
13797 hw_features = hw_enc_features |
13798 NETIF_F_HW_VLAN_CTAG_TX |
13799 NETIF_F_HW_VLAN_CTAG_RX;
13800
13801 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
13802 hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
13803
13804 netdev->hw_features |= hw_features | NETIF_F_LOOPBACK;
13805
13806 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
13807 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
13808
13809 netdev->features &= ~NETIF_F_HW_TC;
13810
13811 if (vsi->type == I40E_VSI_MAIN) {
13812 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
13813 ether_addr_copy(dst: mac_addr, src: hw->mac.perm_addr);
13814 /* The following steps are necessary for two reasons. First,
13815 * some older NVM configurations load a default MAC-VLAN
13816 * filter that will accept any tagged packet, and we want to
13817 * replace this with a normal filter. Additionally, it is
13818 * possible our MAC address was provided by the platform using
13819 * Open Firmware or similar.
13820 *
13821 * Thus, we need to remove the default filter and install one
13822 * specific to the MAC address.
13823 */
13824 i40e_rm_default_mac_filter(vsi, macaddr: mac_addr);
13825 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
13826 i40e_add_mac_filter(vsi, macaddr: mac_addr);
13827 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
13828
13829 netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
13830 NETDEV_XDP_ACT_REDIRECT |
13831 NETDEV_XDP_ACT_XSK_ZEROCOPY |
13832 NETDEV_XDP_ACT_RX_SG;
13833 netdev->xdp_zc_max_segs = I40E_MAX_BUFFER_TXD;
13834 } else {
13835 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
13836 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
13837 * the end, which is 4 bytes long, so force truncation of the
13838 * original name by IFNAMSIZ - 4
13839 */
13840 snprintf(buf: netdev->name, IFNAMSIZ, fmt: "%.*sv%%d",
13841 IFNAMSIZ - 4,
13842 pf->vsi[pf->lan_vsi]->netdev->name);
13843 eth_random_addr(addr: mac_addr);
13844
13845 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
13846 i40e_add_mac_filter(vsi, macaddr: mac_addr);
13847 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
13848 }
13849
13850 /* Add the broadcast filter so that we initially will receive
13851 * broadcast packets. Note that when a new VLAN is first added the
13852 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
13853 * specific filters as part of transitioning into "vlan" operation.
13854 * When more VLANs are added, the driver will copy each existing MAC
13855 * filter and add it for the new VLAN.
13856 *
13857 * Broadcast filters are handled specially by
13858 * i40e_sync_filters_subtask, as the driver must to set the broadcast
13859 * promiscuous bit instead of adding this directly as a MAC/VLAN
13860 * filter. The subtask will update the correct broadcast promiscuous
13861 * bits as VLANs become active or inactive.
13862 */
13863 eth_broadcast_addr(addr: broadcast);
13864 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
13865 i40e_add_mac_filter(vsi, macaddr: broadcast);
13866 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
13867
13868 eth_hw_addr_set(dev: netdev, addr: mac_addr);
13869 ether_addr_copy(dst: netdev->perm_addr, src: mac_addr);
13870
13871 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
13872 netdev->neigh_priv_len = sizeof(u32) * 4;
13873
13874 netdev->priv_flags |= IFF_UNICAST_FLT;
13875 netdev->priv_flags |= IFF_SUPP_NOFCS;
13876 /* Setup netdev TC information */
13877 i40e_vsi_config_netdev_tc(vsi, enabled_tc: vsi->tc_config.enabled_tc);
13878
13879 netdev->netdev_ops = &i40e_netdev_ops;
13880 netdev->watchdog_timeo = 5 * HZ;
13881 i40e_set_ethtool_ops(netdev);
13882
13883 /* MTU range: 68 - 9706 */
13884 netdev->min_mtu = ETH_MIN_MTU;
13885 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
13886
13887 return 0;
13888}
13889
13890/**
13891 * i40e_vsi_delete - Delete a VSI from the switch
13892 * @vsi: the VSI being removed
13893 *
13894 * Returns 0 on success, negative value on failure
13895 **/
13896static void i40e_vsi_delete(struct i40e_vsi *vsi)
13897{
13898 /* remove default VSI is not allowed */
13899 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
13900 return;
13901
13902 i40e_aq_delete_element(hw: &vsi->back->hw, seid: vsi->seid, NULL);
13903}
13904
13905/**
13906 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13907 * @vsi: the VSI being queried
13908 *
13909 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
13910 **/
13911int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
13912{
13913 struct i40e_veb *veb;
13914 struct i40e_pf *pf = vsi->back;
13915
13916 /* Uplink is not a bridge so default to VEB */
13917 if (vsi->veb_idx >= I40E_MAX_VEB)
13918 return 1;
13919
13920 veb = pf->veb[vsi->veb_idx];
13921 if (!veb) {
13922 dev_info(&pf->pdev->dev,
13923 "There is no veb associated with the bridge\n");
13924 return -ENOENT;
13925 }
13926
13927 /* Uplink is a bridge in VEPA mode */
13928 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
13929 return 0;
13930 } else {
13931 /* Uplink is a bridge in VEB mode */
13932 return 1;
13933 }
13934
13935 /* VEPA is now default bridge, so return 0 */
13936 return 0;
13937}
13938
13939/**
13940 * i40e_add_vsi - Add a VSI to the switch
13941 * @vsi: the VSI being configured
13942 *
13943 * This initializes a VSI context depending on the VSI type to be added and
13944 * passes it down to the add_vsi aq command.
13945 **/
13946static int i40e_add_vsi(struct i40e_vsi *vsi)
13947{
13948 int ret = -ENODEV;
13949 struct i40e_pf *pf = vsi->back;
13950 struct i40e_hw *hw = &pf->hw;
13951 struct i40e_vsi_context ctxt;
13952 struct i40e_mac_filter *f;
13953 struct hlist_node *h;
13954 int bkt;
13955
13956 u8 enabled_tc = 0x1; /* TC0 enabled */
13957 int f_count = 0;
13958
13959 memset(&ctxt, 0, sizeof(ctxt));
13960 switch (vsi->type) {
13961 case I40E_VSI_MAIN:
13962 /* The PF's main VSI is already setup as part of the
13963 * device initialization, so we'll not bother with
13964 * the add_vsi call, but we will retrieve the current
13965 * VSI context.
13966 */
13967 ctxt.seid = pf->main_vsi_seid;
13968 ctxt.pf_num = pf->hw.pf_id;
13969 ctxt.vf_num = 0;
13970 ret = i40e_aq_get_vsi_params(hw: &pf->hw, vsi_ctx: &ctxt, NULL);
13971 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
13972 if (ret) {
13973 dev_info(&pf->pdev->dev,
13974 "couldn't get PF vsi config, err %pe aq_err %s\n",
13975 ERR_PTR(ret),
13976 i40e_aq_str(&pf->hw,
13977 pf->hw.aq.asq_last_status));
13978 return -ENOENT;
13979 }
13980 vsi->info = ctxt.info;
13981 vsi->info.valid_sections = 0;
13982
13983 vsi->seid = ctxt.seid;
13984 vsi->id = ctxt.vsi_number;
13985
13986 enabled_tc = i40e_pf_get_tc_map(pf);
13987
13988 /* Source pruning is enabled by default, so the flag is
13989 * negative logic - if it's set, we need to fiddle with
13990 * the VSI to disable source pruning.
13991 */
13992 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
13993 memset(&ctxt, 0, sizeof(ctxt));
13994 ctxt.seid = pf->main_vsi_seid;
13995 ctxt.pf_num = pf->hw.pf_id;
13996 ctxt.vf_num = 0;
13997 ctxt.info.valid_sections |=
13998 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
13999 ctxt.info.switch_id =
14000 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
14001 ret = i40e_aq_update_vsi_params(hw, vsi_ctx: &ctxt, NULL);
14002 if (ret) {
14003 dev_info(&pf->pdev->dev,
14004 "update vsi failed, err %d aq_err %s\n",
14005 ret,
14006 i40e_aq_str(&pf->hw,
14007 pf->hw.aq.asq_last_status));
14008 ret = -ENOENT;
14009 goto err;
14010 }
14011 }
14012
14013 /* MFP mode setup queue map and update VSI */
14014 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
14015 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
14016 memset(&ctxt, 0, sizeof(ctxt));
14017 ctxt.seid = pf->main_vsi_seid;
14018 ctxt.pf_num = pf->hw.pf_id;
14019 ctxt.vf_num = 0;
14020 i40e_vsi_setup_queue_map(vsi, ctxt: &ctxt, enabled_tc, is_add: false);
14021 ret = i40e_aq_update_vsi_params(hw, vsi_ctx: &ctxt, NULL);
14022 if (ret) {
14023 dev_info(&pf->pdev->dev,
14024 "update vsi failed, err %pe aq_err %s\n",
14025 ERR_PTR(ret),
14026 i40e_aq_str(&pf->hw,
14027 pf->hw.aq.asq_last_status));
14028 ret = -ENOENT;
14029 goto err;
14030 }
14031 /* update the local VSI info queue map */
14032 i40e_vsi_update_queue_map(vsi, ctxt: &ctxt);
14033 vsi->info.valid_sections = 0;
14034 } else {
14035 /* Default/Main VSI is only enabled for TC0
14036 * reconfigure it to enable all TCs that are
14037 * available on the port in SFP mode.
14038 * For MFP case the iSCSI PF would use this
14039 * flow to enable LAN+iSCSI TC.
14040 */
14041 ret = i40e_vsi_config_tc(vsi, enabled_tc);
14042 if (ret) {
14043 /* Single TC condition is not fatal,
14044 * message and continue
14045 */
14046 dev_info(&pf->pdev->dev,
14047 "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
14048 enabled_tc,
14049 ERR_PTR(ret),
14050 i40e_aq_str(&pf->hw,
14051 pf->hw.aq.asq_last_status));
14052 }
14053 }
14054 break;
14055
14056 case I40E_VSI_FDIR:
14057 ctxt.pf_num = hw->pf_id;
14058 ctxt.vf_num = 0;
14059 ctxt.uplink_seid = vsi->uplink_seid;
14060 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14061 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
14062 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
14063 (i40e_is_vsi_uplink_mode_veb(vsi))) {
14064 ctxt.info.valid_sections |=
14065 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14066 ctxt.info.switch_id =
14067 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14068 }
14069 i40e_vsi_setup_queue_map(vsi, ctxt: &ctxt, enabled_tc, is_add: true);
14070 break;
14071
14072 case I40E_VSI_VMDQ2:
14073 ctxt.pf_num = hw->pf_id;
14074 ctxt.vf_num = 0;
14075 ctxt.uplink_seid = vsi->uplink_seid;
14076 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14077 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
14078
14079 /* This VSI is connected to VEB so the switch_id
14080 * should be set to zero by default.
14081 */
14082 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
14083 ctxt.info.valid_sections |=
14084 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14085 ctxt.info.switch_id =
14086 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14087 }
14088
14089 /* Setup the VSI tx/rx queue map for TC0 only for now */
14090 i40e_vsi_setup_queue_map(vsi, ctxt: &ctxt, enabled_tc, is_add: true);
14091 break;
14092
14093 case I40E_VSI_SRIOV:
14094 ctxt.pf_num = hw->pf_id;
14095 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
14096 ctxt.uplink_seid = vsi->uplink_seid;
14097 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
14098 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
14099
14100 /* This VSI is connected to VEB so the switch_id
14101 * should be set to zero by default.
14102 */
14103 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
14104 ctxt.info.valid_sections |=
14105 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
14106 ctxt.info.switch_id =
14107 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
14108 }
14109
14110 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
14111 ctxt.info.valid_sections |=
14112 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
14113 ctxt.info.queueing_opt_flags |=
14114 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
14115 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
14116 }
14117
14118 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
14119 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
14120 if (pf->vf[vsi->vf_id].spoofchk) {
14121 ctxt.info.valid_sections |=
14122 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
14123 ctxt.info.sec_flags |=
14124 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
14125 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
14126 }
14127 /* Setup the VSI tx/rx queue map for TC0 only for now */
14128 i40e_vsi_setup_queue_map(vsi, ctxt: &ctxt, enabled_tc, is_add: true);
14129 break;
14130
14131 case I40E_VSI_IWARP:
14132 /* send down message to iWARP */
14133 break;
14134
14135 default:
14136 return -ENODEV;
14137 }
14138
14139 if (vsi->type != I40E_VSI_MAIN) {
14140 ret = i40e_aq_add_vsi(hw, vsi_ctx: &ctxt, NULL);
14141 if (ret) {
14142 dev_info(&vsi->back->pdev->dev,
14143 "add vsi failed, err %pe aq_err %s\n",
14144 ERR_PTR(ret),
14145 i40e_aq_str(&pf->hw,
14146 pf->hw.aq.asq_last_status));
14147 ret = -ENOENT;
14148 goto err;
14149 }
14150 vsi->info = ctxt.info;
14151 vsi->info.valid_sections = 0;
14152 vsi->seid = ctxt.seid;
14153 vsi->id = ctxt.vsi_number;
14154 }
14155
14156 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
14157 vsi->active_filters = 0;
14158 /* If macvlan filters already exist, force them to get loaded */
14159 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
14160 f->state = I40E_FILTER_NEW;
14161 f_count++;
14162 }
14163 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
14164 clear_bit(nr: __I40E_VSI_OVERFLOW_PROMISC, addr: vsi->state);
14165
14166 if (f_count) {
14167 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
14168 set_bit(nr: __I40E_MACVLAN_SYNC_PENDING, addr: pf->state);
14169 }
14170
14171 /* Update VSI BW information */
14172 ret = i40e_vsi_get_bw_info(vsi);
14173 if (ret) {
14174 dev_info(&pf->pdev->dev,
14175 "couldn't get vsi bw info, err %pe aq_err %s\n",
14176 ERR_PTR(ret),
14177 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14178 /* VSI is already added so not tearing that up */
14179 ret = 0;
14180 }
14181
14182err:
14183 return ret;
14184}
14185
14186/**
14187 * i40e_vsi_release - Delete a VSI and free its resources
14188 * @vsi: the VSI being removed
14189 *
14190 * Returns 0 on success or < 0 on error
14191 **/
14192int i40e_vsi_release(struct i40e_vsi *vsi)
14193{
14194 struct i40e_mac_filter *f;
14195 struct hlist_node *h;
14196 struct i40e_veb *veb = NULL;
14197 struct i40e_pf *pf;
14198 u16 uplink_seid;
14199 int i, n, bkt;
14200
14201 pf = vsi->back;
14202
14203 /* release of a VEB-owner or last VSI is not allowed */
14204 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
14205 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
14206 vsi->seid, vsi->uplink_seid);
14207 return -ENODEV;
14208 }
14209 if (vsi == pf->vsi[pf->lan_vsi] &&
14210 !test_bit(__I40E_DOWN, pf->state)) {
14211 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
14212 return -ENODEV;
14213 }
14214 set_bit(nr: __I40E_VSI_RELEASING, addr: vsi->state);
14215 uplink_seid = vsi->uplink_seid;
14216 if (vsi->type == I40E_VSI_MAIN)
14217 i40e_devlink_destroy_port(pf);
14218 if (vsi->type != I40E_VSI_SRIOV) {
14219 if (vsi->netdev_registered) {
14220 vsi->netdev_registered = false;
14221 if (vsi->netdev) {
14222 /* results in a call to i40e_close() */
14223 unregister_netdev(dev: vsi->netdev);
14224 }
14225 } else {
14226 i40e_vsi_close(vsi);
14227 }
14228 i40e_vsi_disable_irq(vsi);
14229 }
14230
14231 spin_lock_bh(lock: &vsi->mac_filter_hash_lock);
14232
14233 /* clear the sync flag on all filters */
14234 if (vsi->netdev) {
14235 __dev_uc_unsync(dev: vsi->netdev, NULL);
14236 __dev_mc_unsync(dev: vsi->netdev, NULL);
14237 }
14238
14239 /* make sure any remaining filters are marked for deletion */
14240 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
14241 __i40e_del_filter(vsi, f);
14242
14243 spin_unlock_bh(lock: &vsi->mac_filter_hash_lock);
14244
14245 i40e_sync_vsi_filters(vsi);
14246
14247 i40e_vsi_delete(vsi);
14248 i40e_vsi_free_q_vectors(vsi);
14249 if (vsi->netdev) {
14250 free_netdev(dev: vsi->netdev);
14251 vsi->netdev = NULL;
14252 }
14253 i40e_vsi_clear_rings(vsi);
14254 i40e_vsi_clear(vsi);
14255
14256 /* If this was the last thing on the VEB, except for the
14257 * controlling VSI, remove the VEB, which puts the controlling
14258 * VSI onto the next level down in the switch.
14259 *
14260 * Well, okay, there's one more exception here: don't remove
14261 * the orphan VEBs yet. We'll wait for an explicit remove request
14262 * from up the network stack.
14263 */
14264 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
14265 if (pf->vsi[i] &&
14266 pf->vsi[i]->uplink_seid == uplink_seid &&
14267 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14268 n++; /* count the VSIs */
14269 }
14270 }
14271 for (i = 0; i < I40E_MAX_VEB; i++) {
14272 if (!pf->veb[i])
14273 continue;
14274 if (pf->veb[i]->uplink_seid == uplink_seid)
14275 n++; /* count the VEBs */
14276 if (pf->veb[i]->seid == uplink_seid)
14277 veb = pf->veb[i];
14278 }
14279 if (n == 0 && veb && veb->uplink_seid != 0)
14280 i40e_veb_release(veb);
14281
14282 return 0;
14283}
14284
14285/**
14286 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
14287 * @vsi: ptr to the VSI
14288 *
14289 * This should only be called after i40e_vsi_mem_alloc() which allocates the
14290 * corresponding SW VSI structure and initializes num_queue_pairs for the
14291 * newly allocated VSI.
14292 *
14293 * Returns 0 on success or negative on failure
14294 **/
14295static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
14296{
14297 int ret = -ENOENT;
14298 struct i40e_pf *pf = vsi->back;
14299
14300 if (vsi->q_vectors[0]) {
14301 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
14302 vsi->seid);
14303 return -EEXIST;
14304 }
14305
14306 if (vsi->base_vector) {
14307 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
14308 vsi->seid, vsi->base_vector);
14309 return -EEXIST;
14310 }
14311
14312 ret = i40e_vsi_alloc_q_vectors(vsi);
14313 if (ret) {
14314 dev_info(&pf->pdev->dev,
14315 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
14316 vsi->num_q_vectors, vsi->seid, ret);
14317 vsi->num_q_vectors = 0;
14318 goto vector_setup_out;
14319 }
14320
14321 /* In Legacy mode, we do not have to get any other vector since we
14322 * piggyback on the misc/ICR0 for queue interrupts.
14323 */
14324 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
14325 return ret;
14326 if (vsi->num_q_vectors)
14327 vsi->base_vector = i40e_get_lump(pf, pile: pf->irq_pile,
14328 needed: vsi->num_q_vectors, id: vsi->idx);
14329 if (vsi->base_vector < 0) {
14330 dev_info(&pf->pdev->dev,
14331 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
14332 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
14333 i40e_vsi_free_q_vectors(vsi);
14334 ret = -ENOENT;
14335 goto vector_setup_out;
14336 }
14337
14338vector_setup_out:
14339 return ret;
14340}
14341
14342/**
14343 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14344 * @vsi: pointer to the vsi.
14345 *
14346 * This re-allocates a vsi's queue resources.
14347 *
14348 * Returns pointer to the successfully allocated and configured VSI sw struct
14349 * on success, otherwise returns NULL on failure.
14350 **/
14351static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
14352{
14353 u16 alloc_queue_pairs;
14354 struct i40e_pf *pf;
14355 u8 enabled_tc;
14356 int ret;
14357
14358 if (!vsi)
14359 return NULL;
14360
14361 pf = vsi->back;
14362
14363 i40e_put_lump(pile: pf->qp_pile, index: vsi->base_queue, id: vsi->idx);
14364 i40e_vsi_clear_rings(vsi);
14365
14366 i40e_vsi_free_arrays(vsi, free_qvectors: false);
14367 i40e_set_num_rings_in_vsi(vsi);
14368 ret = i40e_vsi_alloc_arrays(vsi, alloc_qvectors: false);
14369 if (ret)
14370 goto err_vsi;
14371
14372 alloc_queue_pairs = vsi->alloc_queue_pairs *
14373 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14374
14375 ret = i40e_get_lump(pf, pile: pf->qp_pile, needed: alloc_queue_pairs, id: vsi->idx);
14376 if (ret < 0) {
14377 dev_info(&pf->pdev->dev,
14378 "failed to get tracking for %d queues for VSI %d err %d\n",
14379 alloc_queue_pairs, vsi->seid, ret);
14380 goto err_vsi;
14381 }
14382 vsi->base_queue = ret;
14383
14384 /* Update the FW view of the VSI. Force a reset of TC and queue
14385 * layout configurations.
14386 */
14387 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
14388 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
14389 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
14390 i40e_vsi_config_tc(vsi: pf->vsi[pf->lan_vsi], enabled_tc);
14391 if (vsi->type == I40E_VSI_MAIN)
14392 i40e_rm_default_mac_filter(vsi, macaddr: pf->hw.mac.perm_addr);
14393
14394 /* assign it some queues */
14395 ret = i40e_alloc_rings(vsi);
14396 if (ret)
14397 goto err_rings;
14398
14399 /* map all of the rings to the q_vectors */
14400 i40e_vsi_map_rings_to_vectors(vsi);
14401 return vsi;
14402
14403err_rings:
14404 i40e_vsi_free_q_vectors(vsi);
14405 if (vsi->type == I40E_VSI_MAIN)
14406 i40e_devlink_destroy_port(pf);
14407 if (vsi->netdev_registered) {
14408 vsi->netdev_registered = false;
14409 unregister_netdev(dev: vsi->netdev);
14410 free_netdev(dev: vsi->netdev);
14411 vsi->netdev = NULL;
14412 }
14413 i40e_aq_delete_element(hw: &pf->hw, seid: vsi->seid, NULL);
14414err_vsi:
14415 i40e_vsi_clear(vsi);
14416 return NULL;
14417}
14418
14419/**
14420 * i40e_vsi_setup - Set up a VSI by a given type
14421 * @pf: board private structure
14422 * @type: VSI type
14423 * @uplink_seid: the switch element to link to
14424 * @param1: usage depends upon VSI type. For VF types, indicates VF id
14425 *
14426 * This allocates the sw VSI structure and its queue resources, then add a VSI
14427 * to the identified VEB.
14428 *
14429 * Returns pointer to the successfully allocated and configure VSI sw struct on
14430 * success, otherwise returns NULL on failure.
14431 **/
14432struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
14433 u16 uplink_seid, u32 param1)
14434{
14435 struct i40e_vsi *vsi = NULL;
14436 struct i40e_veb *veb = NULL;
14437 u16 alloc_queue_pairs;
14438 int ret, i;
14439 int v_idx;
14440
14441 /* The requested uplink_seid must be either
14442 * - the PF's port seid
14443 * no VEB is needed because this is the PF
14444 * or this is a Flow Director special case VSI
14445 * - seid of an existing VEB
14446 * - seid of a VSI that owns an existing VEB
14447 * - seid of a VSI that doesn't own a VEB
14448 * a new VEB is created and the VSI becomes the owner
14449 * - seid of the PF VSI, which is what creates the first VEB
14450 * this is a special case of the previous
14451 *
14452 * Find which uplink_seid we were given and create a new VEB if needed
14453 */
14454 for (i = 0; i < I40E_MAX_VEB; i++) {
14455 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
14456 veb = pf->veb[i];
14457 break;
14458 }
14459 }
14460
14461 if (!veb && uplink_seid != pf->mac_seid) {
14462
14463 for (i = 0; i < pf->num_alloc_vsi; i++) {
14464 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
14465 vsi = pf->vsi[i];
14466 break;
14467 }
14468 }
14469 if (!vsi) {
14470 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
14471 uplink_seid);
14472 return NULL;
14473 }
14474
14475 if (vsi->uplink_seid == pf->mac_seid)
14476 veb = i40e_veb_setup(pf, flags: 0, uplink_seid: pf->mac_seid, downlink_seid: vsi->seid,
14477 enabled_tc: vsi->tc_config.enabled_tc);
14478 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
14479 veb = i40e_veb_setup(pf, flags: 0, uplink_seid: vsi->uplink_seid, downlink_seid: vsi->seid,
14480 enabled_tc: vsi->tc_config.enabled_tc);
14481 if (veb) {
14482 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
14483 dev_info(&vsi->back->pdev->dev,
14484 "New VSI creation error, uplink seid of LAN VSI expected.\n");
14485 return NULL;
14486 }
14487 /* We come up by default in VEPA mode if SRIOV is not
14488 * already enabled, in which case we can't force VEPA
14489 * mode.
14490 */
14491 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
14492 veb->bridge_mode = BRIDGE_MODE_VEPA;
14493 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
14494 }
14495 i40e_config_bridge_mode(veb);
14496 }
14497 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
14498 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
14499 veb = pf->veb[i];
14500 }
14501 if (!veb) {
14502 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
14503 return NULL;
14504 }
14505
14506 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14507 uplink_seid = veb->seid;
14508 }
14509
14510 /* get vsi sw struct */
14511 v_idx = i40e_vsi_mem_alloc(pf, type);
14512 if (v_idx < 0)
14513 goto err_alloc;
14514 vsi = pf->vsi[v_idx];
14515 if (!vsi)
14516 goto err_alloc;
14517 vsi->type = type;
14518 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
14519
14520 if (type == I40E_VSI_MAIN)
14521 pf->lan_vsi = v_idx;
14522 else if (type == I40E_VSI_SRIOV)
14523 vsi->vf_id = param1;
14524 /* assign it some queues */
14525 alloc_queue_pairs = vsi->alloc_queue_pairs *
14526 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
14527
14528 ret = i40e_get_lump(pf, pile: pf->qp_pile, needed: alloc_queue_pairs, id: vsi->idx);
14529 if (ret < 0) {
14530 dev_info(&pf->pdev->dev,
14531 "failed to get tracking for %d queues for VSI %d err=%d\n",
14532 alloc_queue_pairs, vsi->seid, ret);
14533 goto err_vsi;
14534 }
14535 vsi->base_queue = ret;
14536
14537 /* get a VSI from the hardware */
14538 vsi->uplink_seid = uplink_seid;
14539 ret = i40e_add_vsi(vsi);
14540 if (ret)
14541 goto err_vsi;
14542
14543 switch (vsi->type) {
14544 /* setup the netdev if needed */
14545 case I40E_VSI_MAIN:
14546 case I40E_VSI_VMDQ2:
14547 ret = i40e_config_netdev(vsi);
14548 if (ret)
14549 goto err_netdev;
14550 ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
14551 if (ret)
14552 goto err_netdev;
14553 if (vsi->type == I40E_VSI_MAIN) {
14554 ret = i40e_devlink_create_port(pf);
14555 if (ret)
14556 goto err_netdev;
14557 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
14558 }
14559 ret = register_netdev(dev: vsi->netdev);
14560 if (ret)
14561 goto err_dl_port;
14562 vsi->netdev_registered = true;
14563 netif_carrier_off(dev: vsi->netdev);
14564#ifdef CONFIG_I40E_DCB
14565 /* Setup DCB netlink interface */
14566 i40e_dcbnl_setup(vsi);
14567#endif /* CONFIG_I40E_DCB */
14568 fallthrough;
14569 case I40E_VSI_FDIR:
14570 /* set up vectors and rings if needed */
14571 ret = i40e_vsi_setup_vectors(vsi);
14572 if (ret)
14573 goto err_msix;
14574
14575 ret = i40e_alloc_rings(vsi);
14576 if (ret)
14577 goto err_rings;
14578
14579 /* map all of the rings to the q_vectors */
14580 i40e_vsi_map_rings_to_vectors(vsi);
14581
14582 i40e_vsi_reset_stats(vsi);
14583 break;
14584 default:
14585 /* no netdev or rings for the other VSI types */
14586 break;
14587 }
14588
14589 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
14590 (vsi->type == I40E_VSI_VMDQ2)) {
14591 ret = i40e_vsi_config_rss(vsi);
14592 }
14593 return vsi;
14594
14595err_rings:
14596 i40e_vsi_free_q_vectors(vsi);
14597err_msix:
14598 if (vsi->netdev_registered) {
14599 vsi->netdev_registered = false;
14600 unregister_netdev(dev: vsi->netdev);
14601 free_netdev(dev: vsi->netdev);
14602 vsi->netdev = NULL;
14603 }
14604err_dl_port:
14605 if (vsi->type == I40E_VSI_MAIN)
14606 i40e_devlink_destroy_port(pf);
14607err_netdev:
14608 i40e_aq_delete_element(hw: &pf->hw, seid: vsi->seid, NULL);
14609err_vsi:
14610 i40e_vsi_clear(vsi);
14611err_alloc:
14612 return NULL;
14613}
14614
14615/**
14616 * i40e_veb_get_bw_info - Query VEB BW information
14617 * @veb: the veb to query
14618 *
14619 * Query the Tx scheduler BW configuration data for given VEB
14620 **/
14621static int i40e_veb_get_bw_info(struct i40e_veb *veb)
14622{
14623 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
14624 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
14625 struct i40e_pf *pf = veb->pf;
14626 struct i40e_hw *hw = &pf->hw;
14627 u32 tc_bw_max;
14628 int ret = 0;
14629 int i;
14630
14631 ret = i40e_aq_query_switch_comp_bw_config(hw, seid: veb->seid,
14632 bw_data: &bw_data, NULL);
14633 if (ret) {
14634 dev_info(&pf->pdev->dev,
14635 "query veb bw config failed, err %pe aq_err %s\n",
14636 ERR_PTR(ret),
14637 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14638 goto out;
14639 }
14640
14641 ret = i40e_aq_query_switch_comp_ets_config(hw, seid: veb->seid,
14642 bw_data: &ets_data, NULL);
14643 if (ret) {
14644 dev_info(&pf->pdev->dev,
14645 "query veb bw ets config failed, err %pe aq_err %s\n",
14646 ERR_PTR(ret),
14647 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
14648 goto out;
14649 }
14650
14651 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
14652 veb->bw_max_quanta = ets_data.tc_bw_max;
14653 veb->is_abs_credits = bw_data.absolute_credits_enable;
14654 veb->enabled_tc = ets_data.tc_valid_bits;
14655 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
14656 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
14657 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
14658 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
14659 veb->bw_tc_limit_credits[i] =
14660 le16_to_cpu(bw_data.tc_bw_limits[i]);
14661 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
14662 }
14663
14664out:
14665 return ret;
14666}
14667
14668/**
14669 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14670 * @pf: board private structure
14671 *
14672 * On error: returns error code (negative)
14673 * On success: returns vsi index in PF (positive)
14674 **/
14675static int i40e_veb_mem_alloc(struct i40e_pf *pf)
14676{
14677 int ret = -ENOENT;
14678 struct i40e_veb *veb;
14679 int i;
14680
14681 /* Need to protect the allocation of switch elements at the PF level */
14682 mutex_lock(&pf->switch_mutex);
14683
14684 /* VEB list may be fragmented if VEB creation/destruction has
14685 * been happening. We can afford to do a quick scan to look
14686 * for any free slots in the list.
14687 *
14688 * find next empty veb slot, looping back around if necessary
14689 */
14690 i = 0;
14691 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
14692 i++;
14693 if (i >= I40E_MAX_VEB) {
14694 ret = -ENOMEM;
14695 goto err_alloc_veb; /* out of VEB slots! */
14696 }
14697
14698 veb = kzalloc(size: sizeof(*veb), GFP_KERNEL);
14699 if (!veb) {
14700 ret = -ENOMEM;
14701 goto err_alloc_veb;
14702 }
14703 veb->pf = pf;
14704 veb->idx = i;
14705 veb->enabled_tc = 1;
14706
14707 pf->veb[i] = veb;
14708 ret = i;
14709err_alloc_veb:
14710 mutex_unlock(lock: &pf->switch_mutex);
14711 return ret;
14712}
14713
14714/**
14715 * i40e_switch_branch_release - Delete a branch of the switch tree
14716 * @branch: where to start deleting
14717 *
14718 * This uses recursion to find the tips of the branch to be
14719 * removed, deleting until we get back to and can delete this VEB.
14720 **/
14721static void i40e_switch_branch_release(struct i40e_veb *branch)
14722{
14723 struct i40e_pf *pf = branch->pf;
14724 u16 branch_seid = branch->seid;
14725 u16 veb_idx = branch->idx;
14726 int i;
14727
14728 /* release any VEBs on this VEB - RECURSION */
14729 for (i = 0; i < I40E_MAX_VEB; i++) {
14730 if (!pf->veb[i])
14731 continue;
14732 if (pf->veb[i]->uplink_seid == branch->seid)
14733 i40e_switch_branch_release(branch: pf->veb[i]);
14734 }
14735
14736 /* Release the VSIs on this VEB, but not the owner VSI.
14737 *
14738 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
14739 * the VEB itself, so don't use (*branch) after this loop.
14740 */
14741 for (i = 0; i < pf->num_alloc_vsi; i++) {
14742 if (!pf->vsi[i])
14743 continue;
14744 if (pf->vsi[i]->uplink_seid == branch_seid &&
14745 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
14746 i40e_vsi_release(vsi: pf->vsi[i]);
14747 }
14748 }
14749
14750 /* There's one corner case where the VEB might not have been
14751 * removed, so double check it here and remove it if needed.
14752 * This case happens if the veb was created from the debugfs
14753 * commands and no VSIs were added to it.
14754 */
14755 if (pf->veb[veb_idx])
14756 i40e_veb_release(veb: pf->veb[veb_idx]);
14757}
14758
14759/**
14760 * i40e_veb_clear - remove veb struct
14761 * @veb: the veb to remove
14762 **/
14763static void i40e_veb_clear(struct i40e_veb *veb)
14764{
14765 if (!veb)
14766 return;
14767
14768 if (veb->pf) {
14769 struct i40e_pf *pf = veb->pf;
14770
14771 mutex_lock(&pf->switch_mutex);
14772 if (pf->veb[veb->idx] == veb)
14773 pf->veb[veb->idx] = NULL;
14774 mutex_unlock(lock: &pf->switch_mutex);
14775 }
14776
14777 kfree(objp: veb);
14778}
14779
14780/**
14781 * i40e_veb_release - Delete a VEB and free its resources
14782 * @veb: the VEB being removed
14783 **/
14784void i40e_veb_release(struct i40e_veb *veb)
14785{
14786 struct i40e_vsi *vsi = NULL;
14787 struct i40e_pf *pf;
14788 int i, n = 0;
14789
14790 pf = veb->pf;
14791
14792 /* find the remaining VSI and check for extras */
14793 for (i = 0; i < pf->num_alloc_vsi; i++) {
14794 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
14795 n++;
14796 vsi = pf->vsi[i];
14797 }
14798 }
14799 if (n != 1) {
14800 dev_info(&pf->pdev->dev,
14801 "can't remove VEB %d with %d VSIs left\n",
14802 veb->seid, n);
14803 return;
14804 }
14805
14806 /* move the remaining VSI to uplink veb */
14807 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
14808 if (veb->uplink_seid) {
14809 vsi->uplink_seid = veb->uplink_seid;
14810 if (veb->uplink_seid == pf->mac_seid)
14811 vsi->veb_idx = I40E_NO_VEB;
14812 else
14813 vsi->veb_idx = veb->veb_idx;
14814 } else {
14815 /* floating VEB */
14816 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
14817 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
14818 }
14819
14820 i40e_aq_delete_element(hw: &pf->hw, seid: veb->seid, NULL);
14821 i40e_veb_clear(veb);
14822}
14823
14824/**
14825 * i40e_add_veb - create the VEB in the switch
14826 * @veb: the VEB to be instantiated
14827 * @vsi: the controlling VSI
14828 **/
14829static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
14830{
14831 struct i40e_pf *pf = veb->pf;
14832 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
14833 int ret;
14834
14835 ret = i40e_aq_add_veb(hw: &pf->hw, uplink_seid: veb->uplink_seid, downlink_seid: vsi->seid,
14836 enabled_tc: veb->enabled_tc, default_port: false,
14837 pveb_seid: &veb->seid, enable_stats, NULL);
14838
14839 /* get a VEB from the hardware */
14840 if (ret) {
14841 dev_info(&pf->pdev->dev,
14842 "couldn't add VEB, err %pe aq_err %s\n",
14843 ERR_PTR(ret),
14844 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14845 return -EPERM;
14846 }
14847
14848 /* get statistics counter */
14849 ret = i40e_aq_get_veb_parameters(hw: &pf->hw, veb_seid: veb->seid, NULL, NULL,
14850 statistic_index: &veb->stats_idx, NULL, NULL, NULL);
14851 if (ret) {
14852 dev_info(&pf->pdev->dev,
14853 "couldn't get VEB statistics idx, err %pe aq_err %s\n",
14854 ERR_PTR(ret),
14855 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14856 return -EPERM;
14857 }
14858 ret = i40e_veb_get_bw_info(veb);
14859 if (ret) {
14860 dev_info(&pf->pdev->dev,
14861 "couldn't get VEB bw info, err %pe aq_err %s\n",
14862 ERR_PTR(ret),
14863 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14864 i40e_aq_delete_element(hw: &pf->hw, seid: veb->seid, NULL);
14865 return -ENOENT;
14866 }
14867
14868 vsi->uplink_seid = veb->seid;
14869 vsi->veb_idx = veb->idx;
14870 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
14871
14872 return 0;
14873}
14874
14875/**
14876 * i40e_veb_setup - Set up a VEB
14877 * @pf: board private structure
14878 * @flags: VEB setup flags
14879 * @uplink_seid: the switch element to link to
14880 * @vsi_seid: the initial VSI seid
14881 * @enabled_tc: Enabled TC bit-map
14882 *
14883 * This allocates the sw VEB structure and links it into the switch
14884 * It is possible and legal for this to be a duplicate of an already
14885 * existing VEB. It is also possible for both uplink and vsi seids
14886 * to be zero, in order to create a floating VEB.
14887 *
14888 * Returns pointer to the successfully allocated VEB sw struct on
14889 * success, otherwise returns NULL on failure.
14890 **/
14891struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
14892 u16 uplink_seid, u16 vsi_seid,
14893 u8 enabled_tc)
14894{
14895 struct i40e_veb *veb, *uplink_veb = NULL;
14896 int vsi_idx, veb_idx;
14897 int ret;
14898
14899 /* if one seid is 0, the other must be 0 to create a floating relay */
14900 if ((uplink_seid == 0 || vsi_seid == 0) &&
14901 (uplink_seid + vsi_seid != 0)) {
14902 dev_info(&pf->pdev->dev,
14903 "one, not both seid's are 0: uplink=%d vsi=%d\n",
14904 uplink_seid, vsi_seid);
14905 return NULL;
14906 }
14907
14908 /* make sure there is such a vsi and uplink */
14909 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
14910 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
14911 break;
14912 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) {
14913 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
14914 vsi_seid);
14915 return NULL;
14916 }
14917
14918 if (uplink_seid && uplink_seid != pf->mac_seid) {
14919 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
14920 if (pf->veb[veb_idx] &&
14921 pf->veb[veb_idx]->seid == uplink_seid) {
14922 uplink_veb = pf->veb[veb_idx];
14923 break;
14924 }
14925 }
14926 if (!uplink_veb) {
14927 dev_info(&pf->pdev->dev,
14928 "uplink seid %d not found\n", uplink_seid);
14929 return NULL;
14930 }
14931 }
14932
14933 /* get veb sw struct */
14934 veb_idx = i40e_veb_mem_alloc(pf);
14935 if (veb_idx < 0)
14936 goto err_alloc;
14937 veb = pf->veb[veb_idx];
14938 veb->flags = flags;
14939 veb->uplink_seid = uplink_seid;
14940 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
14941 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
14942
14943 /* create the VEB in the switch */
14944 ret = i40e_add_veb(veb, vsi: pf->vsi[vsi_idx]);
14945 if (ret)
14946 goto err_veb;
14947 if (vsi_idx == pf->lan_vsi)
14948 pf->lan_veb = veb->idx;
14949
14950 return veb;
14951
14952err_veb:
14953 i40e_veb_clear(veb);
14954err_alloc:
14955 return NULL;
14956}
14957
14958/**
14959 * i40e_setup_pf_switch_element - set PF vars based on switch type
14960 * @pf: board private structure
14961 * @ele: element we are building info from
14962 * @num_reported: total number of elements
14963 * @printconfig: should we print the contents
14964 *
14965 * helper function to assist in extracting a few useful SEID values.
14966 **/
14967static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
14968 struct i40e_aqc_switch_config_element_resp *ele,
14969 u16 num_reported, bool printconfig)
14970{
14971 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
14972 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
14973 u8 element_type = ele->element_type;
14974 u16 seid = le16_to_cpu(ele->seid);
14975
14976 if (printconfig)
14977 dev_info(&pf->pdev->dev,
14978 "type=%d seid=%d uplink=%d downlink=%d\n",
14979 element_type, seid, uplink_seid, downlink_seid);
14980
14981 switch (element_type) {
14982 case I40E_SWITCH_ELEMENT_TYPE_MAC:
14983 pf->mac_seid = seid;
14984 break;
14985 case I40E_SWITCH_ELEMENT_TYPE_VEB:
14986 /* Main VEB? */
14987 if (uplink_seid != pf->mac_seid)
14988 break;
14989 if (pf->lan_veb >= I40E_MAX_VEB) {
14990 int v;
14991
14992 /* find existing or else empty VEB */
14993 for (v = 0; v < I40E_MAX_VEB; v++) {
14994 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
14995 pf->lan_veb = v;
14996 break;
14997 }
14998 }
14999 if (pf->lan_veb >= I40E_MAX_VEB) {
15000 v = i40e_veb_mem_alloc(pf);
15001 if (v < 0)
15002 break;
15003 pf->lan_veb = v;
15004 }
15005 }
15006 if (pf->lan_veb >= I40E_MAX_VEB)
15007 break;
15008
15009 pf->veb[pf->lan_veb]->seid = seid;
15010 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
15011 pf->veb[pf->lan_veb]->pf = pf;
15012 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
15013 break;
15014 case I40E_SWITCH_ELEMENT_TYPE_VSI:
15015 if (num_reported != 1)
15016 break;
15017 /* This is immediately after a reset so we can assume this is
15018 * the PF's VSI
15019 */
15020 pf->mac_seid = uplink_seid;
15021 pf->pf_seid = downlink_seid;
15022 pf->main_vsi_seid = seid;
15023 if (printconfig)
15024 dev_info(&pf->pdev->dev,
15025 "pf_seid=%d main_vsi_seid=%d\n",
15026 pf->pf_seid, pf->main_vsi_seid);
15027 break;
15028 case I40E_SWITCH_ELEMENT_TYPE_PF:
15029 case I40E_SWITCH_ELEMENT_TYPE_VF:
15030 case I40E_SWITCH_ELEMENT_TYPE_EMP:
15031 case I40E_SWITCH_ELEMENT_TYPE_BMC:
15032 case I40E_SWITCH_ELEMENT_TYPE_PE:
15033 case I40E_SWITCH_ELEMENT_TYPE_PA:
15034 /* ignore these for now */
15035 break;
15036 default:
15037 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
15038 element_type, seid);
15039 break;
15040 }
15041}
15042
15043/**
15044 * i40e_fetch_switch_configuration - Get switch config from firmware
15045 * @pf: board private structure
15046 * @printconfig: should we print the contents
15047 *
15048 * Get the current switch configuration from the device and
15049 * extract a few useful SEID values.
15050 **/
15051int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
15052{
15053 struct i40e_aqc_get_switch_config_resp *sw_config;
15054 u16 next_seid = 0;
15055 int ret = 0;
15056 u8 *aq_buf;
15057 int i;
15058
15059 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
15060 if (!aq_buf)
15061 return -ENOMEM;
15062
15063 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
15064 do {
15065 u16 num_reported, num_total;
15066
15067 ret = i40e_aq_get_switch_config(hw: &pf->hw, buf: sw_config,
15068 I40E_AQ_LARGE_BUF,
15069 start_seid: &next_seid, NULL);
15070 if (ret) {
15071 dev_info(&pf->pdev->dev,
15072 "get switch config failed err %d aq_err %s\n",
15073 ret,
15074 i40e_aq_str(&pf->hw,
15075 pf->hw.aq.asq_last_status));
15076 kfree(objp: aq_buf);
15077 return -ENOENT;
15078 }
15079
15080 num_reported = le16_to_cpu(sw_config->header.num_reported);
15081 num_total = le16_to_cpu(sw_config->header.num_total);
15082
15083 if (printconfig)
15084 dev_info(&pf->pdev->dev,
15085 "header: %d reported %d total\n",
15086 num_reported, num_total);
15087
15088 for (i = 0; i < num_reported; i++) {
15089 struct i40e_aqc_switch_config_element_resp *ele =
15090 &sw_config->element[i];
15091
15092 i40e_setup_pf_switch_element(pf, ele, num_reported,
15093 printconfig);
15094 }
15095 } while (next_seid != 0);
15096
15097 kfree(objp: aq_buf);
15098 return ret;
15099}
15100
15101/**
15102 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
15103 * @pf: board private structure
15104 * @reinit: if the Main VSI needs to re-initialized.
15105 * @lock_acquired: indicates whether or not the lock has been acquired
15106 *
15107 * Returns 0 on success, negative value on failure
15108 **/
15109static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
15110{
15111 u16 flags = 0;
15112 int ret;
15113
15114 /* find out what's out there already */
15115 ret = i40e_fetch_switch_configuration(pf, printconfig: false);
15116 if (ret) {
15117 dev_info(&pf->pdev->dev,
15118 "couldn't fetch switch config, err %pe aq_err %s\n",
15119 ERR_PTR(ret),
15120 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
15121 return ret;
15122 }
15123 i40e_pf_reset_stats(pf);
15124
15125 /* set the switch config bit for the whole device to
15126 * support limited promisc or true promisc
15127 * when user requests promisc. The default is limited
15128 * promisc.
15129 */
15130
15131 if ((pf->hw.pf_id == 0) &&
15132 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
15133 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15134 pf->last_sw_conf_flags = flags;
15135 }
15136
15137 if (pf->hw.pf_id == 0) {
15138 u16 valid_flags;
15139
15140 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
15141 ret = i40e_aq_set_switch_config(hw: &pf->hw, flags, valid_flags, mode: 0,
15142 NULL);
15143 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
15144 dev_info(&pf->pdev->dev,
15145 "couldn't set switch config bits, err %pe aq_err %s\n",
15146 ERR_PTR(ret),
15147 i40e_aq_str(&pf->hw,
15148 pf->hw.aq.asq_last_status));
15149 /* not a fatal problem, just keep going */
15150 }
15151 pf->last_sw_conf_valid_flags = valid_flags;
15152 }
15153
15154 /* first time setup */
15155 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
15156 struct i40e_vsi *vsi = NULL;
15157 u16 uplink_seid;
15158
15159 /* Set up the PF VSI associated with the PF's main VSI
15160 * that is already in the HW switch
15161 */
15162 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
15163 uplink_seid = pf->veb[pf->lan_veb]->seid;
15164 else
15165 uplink_seid = pf->mac_seid;
15166 if (pf->lan_vsi == I40E_NO_VSI)
15167 vsi = i40e_vsi_setup(pf, type: I40E_VSI_MAIN, uplink_seid, param1: 0);
15168 else if (reinit)
15169 vsi = i40e_vsi_reinit_setup(vsi: pf->vsi[pf->lan_vsi]);
15170 if (!vsi) {
15171 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
15172 i40e_cloud_filter_exit(pf);
15173 i40e_fdir_teardown(pf);
15174 return -EAGAIN;
15175 }
15176 } else {
15177 /* force a reset of TC and queue layout configurations */
15178 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
15179
15180 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
15181 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
15182 i40e_vsi_config_tc(vsi: pf->vsi[pf->lan_vsi], enabled_tc);
15183 }
15184 i40e_vlan_stripping_disable(vsi: pf->vsi[pf->lan_vsi]);
15185
15186 i40e_fdir_sb_setup(pf);
15187
15188 /* Setup static PF queue filter control settings */
15189 ret = i40e_setup_pf_filter_control(pf);
15190 if (ret) {
15191 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
15192 ret);
15193 /* Failure here should not stop continuing other steps */
15194 }
15195
15196 /* enable RSS in the HW, even for only one queue, as the stack can use
15197 * the hash
15198 */
15199 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
15200 i40e_pf_config_rss(pf);
15201
15202 /* fill in link information and enable LSE reporting */
15203 i40e_link_event(pf);
15204
15205 /* Initialize user-specific link properties */
15206 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
15207 I40E_AQ_AN_COMPLETED) ? true : false);
15208
15209 i40e_ptp_init(pf);
15210
15211 if (!lock_acquired)
15212 rtnl_lock();
15213
15214 /* repopulate tunnel port filters */
15215 udp_tunnel_nic_reset_ntf(dev: pf->vsi[pf->lan_vsi]->netdev);
15216
15217 if (!lock_acquired)
15218 rtnl_unlock();
15219
15220 return ret;
15221}
15222
15223/**
15224 * i40e_determine_queue_usage - Work out queue distribution
15225 * @pf: board private structure
15226 **/
15227static void i40e_determine_queue_usage(struct i40e_pf *pf)
15228{
15229 int queues_left;
15230 int q_max;
15231
15232 pf->num_lan_qps = 0;
15233
15234 /* Find the max queues to be put into basic use. We'll always be
15235 * using TC0, whether or not DCB is running, and TC0 will get the
15236 * big RSS set.
15237 */
15238 queues_left = pf->hw.func_caps.num_tx_qp;
15239
15240 if ((queues_left == 1) ||
15241 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
15242 /* one qp for PF, no queues for anything else */
15243 queues_left = 0;
15244 pf->alloc_rss_size = pf->num_lan_qps = 1;
15245
15246 /* make sure all the fancies are disabled */
15247 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
15248 I40E_FLAG_IWARP_ENABLED |
15249 I40E_FLAG_FD_SB_ENABLED |
15250 I40E_FLAG_FD_ATR_ENABLED |
15251 I40E_FLAG_DCB_CAPABLE |
15252 I40E_FLAG_DCB_ENABLED |
15253 I40E_FLAG_SRIOV_ENABLED |
15254 I40E_FLAG_VMDQ_ENABLED);
15255 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15256 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
15257 I40E_FLAG_FD_SB_ENABLED |
15258 I40E_FLAG_FD_ATR_ENABLED |
15259 I40E_FLAG_DCB_CAPABLE))) {
15260 /* one qp for PF */
15261 pf->alloc_rss_size = pf->num_lan_qps = 1;
15262 queues_left -= pf->num_lan_qps;
15263
15264 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
15265 I40E_FLAG_IWARP_ENABLED |
15266 I40E_FLAG_FD_SB_ENABLED |
15267 I40E_FLAG_FD_ATR_ENABLED |
15268 I40E_FLAG_DCB_ENABLED |
15269 I40E_FLAG_VMDQ_ENABLED);
15270 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15271 } else {
15272 /* Not enough queues for all TCs */
15273 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
15274 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
15275 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
15276 I40E_FLAG_DCB_ENABLED);
15277 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
15278 }
15279
15280 /* limit lan qps to the smaller of qps, cpus or msix */
15281 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
15282 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
15283 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
15284 pf->num_lan_qps = q_max;
15285
15286 queues_left -= pf->num_lan_qps;
15287 }
15288
15289 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15290 if (queues_left > 1) {
15291 queues_left -= 1; /* save 1 queue for FD */
15292 } else {
15293 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
15294 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
15295 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
15296 }
15297 }
15298
15299 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
15300 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
15301 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
15302 (queues_left / pf->num_vf_qps));
15303 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
15304 }
15305
15306 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
15307 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
15308 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
15309 (queues_left / pf->num_vmdq_qps));
15310 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
15311 }
15312
15313 pf->queues_left = queues_left;
15314 dev_dbg(&pf->pdev->dev,
15315 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
15316 pf->hw.func_caps.num_tx_qp,
15317 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
15318 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
15319 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
15320 queues_left);
15321}
15322
15323/**
15324 * i40e_setup_pf_filter_control - Setup PF static filter control
15325 * @pf: PF to be setup
15326 *
15327 * i40e_setup_pf_filter_control sets up a PF's initial filter control
15328 * settings. If PE/FCoE are enabled then it will also set the per PF
15329 * based filter sizes required for them. It also enables Flow director,
15330 * ethertype and macvlan type filter settings for the pf.
15331 *
15332 * Returns 0 on success, negative on failure
15333 **/
15334static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
15335{
15336 struct i40e_filter_control_settings *settings = &pf->filter_settings;
15337
15338 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
15339
15340 /* Flow Director is enabled */
15341 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
15342 settings->enable_fdir = true;
15343
15344 /* Ethtype and MACVLAN filters enabled for PF */
15345 settings->enable_ethtype = true;
15346 settings->enable_macvlan = true;
15347
15348 if (i40e_set_filter_control(hw: &pf->hw, settings))
15349 return -ENOENT;
15350
15351 return 0;
15352}
15353
15354#define INFO_STRING_LEN 255
15355#define REMAIN(__x) (INFO_STRING_LEN - (__x))
15356static void i40e_print_features(struct i40e_pf *pf)
15357{
15358 struct i40e_hw *hw = &pf->hw;
15359 char *buf;
15360 int i;
15361
15362 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
15363 if (!buf)
15364 return;
15365
15366 i = snprintf(buf, INFO_STRING_LEN, fmt: "Features: PF-id[%d]", hw->pf_id);
15367#ifdef CONFIG_PCI_IOV
15368 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " VFs: %d", pf->num_req_vfs);
15369#endif
15370 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " VSIs: %d QP: %d",
15371 pf->hw.func_caps.num_vsis,
15372 pf->vsi[pf->lan_vsi]->num_queue_pairs);
15373 if (pf->flags & I40E_FLAG_RSS_ENABLED)
15374 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " RSS");
15375 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
15376 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " FD_ATR");
15377 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
15378 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " FD_SB");
15379 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " NTUPLE");
15380 }
15381 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
15382 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " DCB");
15383 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " VxLAN");
15384 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " Geneve");
15385 if (pf->flags & I40E_FLAG_PTP)
15386 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " PTP");
15387 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
15388 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " VEB");
15389 else
15390 i += scnprintf(buf: &buf[i], REMAIN(i), fmt: " VEPA");
15391
15392 dev_info(&pf->pdev->dev, "%s\n", buf);
15393 kfree(objp: buf);
15394 WARN_ON(i > INFO_STRING_LEN);
15395}
15396
15397/**
15398 * i40e_get_platform_mac_addr - get platform-specific MAC address
15399 * @pdev: PCI device information struct
15400 * @pf: board private structure
15401 *
15402 * Look up the MAC address for the device. First we'll try
15403 * eth_platform_get_mac_address, which will check Open Firmware, or arch
15404 * specific fallback. Otherwise, we'll default to the stored value in
15405 * firmware.
15406 **/
15407static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
15408{
15409 if (eth_platform_get_mac_address(dev: &pdev->dev, mac_addr: pf->hw.mac.addr))
15410 i40e_get_mac_addr(hw: &pf->hw, mac_addr: pf->hw.mac.addr);
15411}
15412
15413/**
15414 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15415 * @fec_cfg: FEC option to set in flags
15416 * @flags: ptr to flags in which we set FEC option
15417 **/
15418void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
15419{
15420 if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
15421 *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
15422 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
15423 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
15424 *flags |= I40E_FLAG_RS_FEC;
15425 *flags &= ~I40E_FLAG_BASE_R_FEC;
15426 }
15427 if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
15428 (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
15429 *flags |= I40E_FLAG_BASE_R_FEC;
15430 *flags &= ~I40E_FLAG_RS_FEC;
15431 }
15432 if (fec_cfg == 0)
15433 *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
15434}
15435
15436/**
15437 * i40e_check_recovery_mode - check if we are running transition firmware
15438 * @pf: board private structure
15439 *
15440 * Check registers indicating the firmware runs in recovery mode. Sets the
15441 * appropriate driver state.
15442 *
15443 * Returns true if the recovery mode was detected, false otherwise
15444 **/
15445static bool i40e_check_recovery_mode(struct i40e_pf *pf)
15446{
15447 u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
15448
15449 if (val & I40E_GL_FWSTS_FWS1B_MASK) {
15450 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
15451 dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
15452 set_bit(nr: __I40E_RECOVERY_MODE, addr: pf->state);
15453
15454 return true;
15455 }
15456 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15457 dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
15458
15459 return false;
15460}
15461
15462/**
15463 * i40e_pf_loop_reset - perform reset in a loop.
15464 * @pf: board private structure
15465 *
15466 * This function is useful when a NIC is about to enter recovery mode.
15467 * When a NIC's internal data structures are corrupted the NIC's
15468 * firmware is going to enter recovery mode.
15469 * Right after a POR it takes about 7 minutes for firmware to enter
15470 * recovery mode. Until that time a NIC is in some kind of intermediate
15471 * state. After that time period the NIC almost surely enters
15472 * recovery mode. The only way for a driver to detect intermediate
15473 * state is to issue a series of pf-resets and check a return value.
15474 * If a PF reset returns success then the firmware could be in recovery
15475 * mode so the caller of this code needs to check for recovery mode
15476 * if this function returns success. There is a little chance that
15477 * firmware will hang in intermediate state forever.
15478 * Since waiting 7 minutes is quite a lot of time this function waits
15479 * 10 seconds and then gives up by returning an error.
15480 *
15481 * Return 0 on success, negative on failure.
15482 **/
15483static int i40e_pf_loop_reset(struct i40e_pf *pf)
15484{
15485 /* wait max 10 seconds for PF reset to succeed */
15486 const unsigned long time_end = jiffies + 10 * HZ;
15487 struct i40e_hw *hw = &pf->hw;
15488 int ret;
15489
15490 ret = i40e_pf_reset(hw);
15491 while (ret != 0 && time_before(jiffies, time_end)) {
15492 usleep_range(min: 10000, max: 20000);
15493 ret = i40e_pf_reset(hw);
15494 }
15495
15496 if (ret == 0)
15497 pf->pfr_count++;
15498 else
15499 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
15500
15501 return ret;
15502}
15503
15504/**
15505 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15506 * @pf: board private structure
15507 *
15508 * Check FW registers to determine if FW issued unexpected EMP Reset.
15509 * Every time when unexpected EMP Reset occurs the FW increments
15510 * a counter of unexpected EMP Resets. When the counter reaches 10
15511 * the FW should enter the Recovery mode
15512 *
15513 * Returns true if FW issued unexpected EMP Reset
15514 **/
15515static bool i40e_check_fw_empr(struct i40e_pf *pf)
15516{
15517 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
15518 I40E_GL_FWSTS_FWS1B_MASK;
15519 return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
15520 (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
15521}
15522
15523/**
15524 * i40e_handle_resets - handle EMP resets and PF resets
15525 * @pf: board private structure
15526 *
15527 * Handle both EMP resets and PF resets and conclude whether there are
15528 * any issues regarding these resets. If there are any issues then
15529 * generate log entry.
15530 *
15531 * Return 0 if NIC is healthy or negative value when there are issues
15532 * with resets
15533 **/
15534static int i40e_handle_resets(struct i40e_pf *pf)
15535{
15536 const int pfr = i40e_pf_loop_reset(pf);
15537 const bool is_empr = i40e_check_fw_empr(pf);
15538
15539 if (is_empr || pfr != 0)
15540 dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
15541
15542 return is_empr ? -EIO : pfr;
15543}
15544
15545/**
15546 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15547 * @pf: board private structure
15548 * @hw: ptr to the hardware info
15549 *
15550 * This function does a minimal setup of all subsystems needed for running
15551 * recovery mode.
15552 *
15553 * Returns 0 on success, negative on failure
15554 **/
15555static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
15556{
15557 struct i40e_vsi *vsi;
15558 int err;
15559 int v_idx;
15560
15561 pci_set_drvdata(pdev: pf->pdev, data: pf);
15562 pci_save_state(dev: pf->pdev);
15563
15564 /* set up periodic task facility */
15565 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15566 pf->service_timer_period = HZ;
15567
15568 INIT_WORK(&pf->service_task, i40e_service_task);
15569 clear_bit(nr: __I40E_SERVICE_SCHED, addr: pf->state);
15570
15571 err = i40e_init_interrupt_scheme(pf);
15572 if (err)
15573 goto err_switch_setup;
15574
15575 /* The number of VSIs reported by the FW is the minimum guaranteed
15576 * to us; HW supports far more and we share the remaining pool with
15577 * the other PFs. We allocate space for more than the guarantee with
15578 * the understanding that we might not get them all later.
15579 */
15580 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
15581 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
15582 else
15583 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
15584
15585 /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
15586 pf->vsi = kcalloc(n: pf->num_alloc_vsi, size: sizeof(struct i40e_vsi *),
15587 GFP_KERNEL);
15588 if (!pf->vsi) {
15589 err = -ENOMEM;
15590 goto err_switch_setup;
15591 }
15592
15593 /* We allocate one VSI which is needed as absolute minimum
15594 * in order to register the netdev
15595 */
15596 v_idx = i40e_vsi_mem_alloc(pf, type: I40E_VSI_MAIN);
15597 if (v_idx < 0) {
15598 err = v_idx;
15599 goto err_switch_setup;
15600 }
15601 pf->lan_vsi = v_idx;
15602 vsi = pf->vsi[v_idx];
15603 if (!vsi) {
15604 err = -EFAULT;
15605 goto err_switch_setup;
15606 }
15607 vsi->alloc_queue_pairs = 1;
15608 err = i40e_config_netdev(vsi);
15609 if (err)
15610 goto err_switch_setup;
15611 err = register_netdev(dev: vsi->netdev);
15612 if (err)
15613 goto err_switch_setup;
15614 vsi->netdev_registered = true;
15615 i40e_dbg_pf_init(pf);
15616
15617 err = i40e_setup_misc_vector_for_recovery_mode(pf);
15618 if (err)
15619 goto err_switch_setup;
15620
15621 /* tell the firmware that we're starting */
15622 i40e_send_version(pf);
15623
15624 /* since everything's happy, start the service_task timer */
15625 mod_timer(timer: &pf->service_timer,
15626 expires: round_jiffies(j: jiffies + pf->service_timer_period));
15627
15628 return 0;
15629
15630err_switch_setup:
15631 i40e_reset_interrupt_capability(pf);
15632 timer_shutdown_sync(timer: &pf->service_timer);
15633 i40e_shutdown_adminq(hw);
15634 iounmap(addr: hw->hw_addr);
15635 pci_release_mem_regions(pdev: pf->pdev);
15636 pci_disable_device(dev: pf->pdev);
15637 i40e_free_pf(pf);
15638
15639 return err;
15640}
15641
15642/**
15643 * i40e_set_subsystem_device_id - set subsystem device id
15644 * @hw: pointer to the hardware info
15645 *
15646 * Set PCI subsystem device id either from a pci_dev structure or
15647 * a specific FW register.
15648 **/
15649static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
15650{
15651 struct i40e_pf *pf = i40e_hw_to_pf(hw);
15652
15653 hw->subsystem_device_id = pf->pdev->subsystem_device ?
15654 pf->pdev->subsystem_device :
15655 (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
15656}
15657
15658/**
15659 * i40e_probe - Device initialization routine
15660 * @pdev: PCI device information struct
15661 * @ent: entry in i40e_pci_tbl
15662 *
15663 * i40e_probe initializes a PF identified by a pci_dev structure.
15664 * The OS initialization, configuring of the PF private structure,
15665 * and a hardware reset occur.
15666 *
15667 * Returns 0 on success, negative on failure
15668 **/
15669static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15670{
15671 struct i40e_aq_get_phy_abilities_resp abilities;
15672#ifdef CONFIG_I40E_DCB
15673 enum i40e_get_fw_lldp_status_resp lldp_status;
15674#endif /* CONFIG_I40E_DCB */
15675 struct i40e_pf *pf;
15676 struct i40e_hw *hw;
15677 static u16 pfs_found;
15678 u16 wol_nvm_bits;
15679 char nvm_ver[32];
15680 u16 link_status;
15681#ifdef CONFIG_I40E_DCB
15682 int status;
15683#endif /* CONFIG_I40E_DCB */
15684 int err;
15685 u32 val;
15686 u32 i;
15687
15688 err = pci_enable_device_mem(dev: pdev);
15689 if (err)
15690 return err;
15691
15692 /* set up for high or low dma */
15693 err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64));
15694 if (err) {
15695 dev_err(&pdev->dev,
15696 "DMA configuration failed: 0x%x\n", err);
15697 goto err_dma;
15698 }
15699
15700 /* set up pci connections */
15701 err = pci_request_mem_regions(pdev, name: i40e_driver_name);
15702 if (err) {
15703 dev_info(&pdev->dev,
15704 "pci_request_selected_regions failed %d\n", err);
15705 goto err_pci_reg;
15706 }
15707
15708 pci_set_master(dev: pdev);
15709
15710 /* Now that we have a PCI connection, we need to do the
15711 * low level device setup. This is primarily setting up
15712 * the Admin Queue structures and then querying for the
15713 * device's current profile information.
15714 */
15715 pf = i40e_alloc_pf(dev: &pdev->dev);
15716 if (!pf) {
15717 err = -ENOMEM;
15718 goto err_pf_alloc;
15719 }
15720 pf->next_vsi = 0;
15721 pf->pdev = pdev;
15722 set_bit(nr: __I40E_DOWN, addr: pf->state);
15723
15724 hw = &pf->hw;
15725
15726 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
15727 I40E_MAX_CSR_SPACE);
15728 /* We believe that the highest register to read is
15729 * I40E_GLGEN_STAT_CLEAR, so we check if the BAR size
15730 * is not less than that before mapping to prevent a
15731 * kernel panic.
15732 */
15733 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) {
15734 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n",
15735 pf->ioremap_len);
15736 err = -ENOMEM;
15737 goto err_ioremap;
15738 }
15739 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), size: pf->ioremap_len);
15740 if (!hw->hw_addr) {
15741 err = -EIO;
15742 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
15743 (unsigned int)pci_resource_start(pdev, 0),
15744 pf->ioremap_len, err);
15745 goto err_ioremap;
15746 }
15747 hw->vendor_id = pdev->vendor;
15748 hw->device_id = pdev->device;
15749 pci_read_config_byte(dev: pdev, PCI_REVISION_ID, val: &hw->revision_id);
15750 hw->subsystem_vendor_id = pdev->subsystem_vendor;
15751 i40e_set_subsystem_device_id(hw);
15752 hw->bus.device = PCI_SLOT(pdev->devfn);
15753 hw->bus.func = PCI_FUNC(pdev->devfn);
15754 hw->bus.bus_id = pdev->bus->number;
15755 pf->instance = pfs_found;
15756
15757 /* Select something other than the 802.1ad ethertype for the
15758 * switch to use internally and drop on ingress.
15759 */
15760 hw->switch_tag = 0xffff;
15761 hw->first_tag = ETH_P_8021AD;
15762 hw->second_tag = ETH_P_8021Q;
15763
15764 INIT_LIST_HEAD(list: &pf->l3_flex_pit_list);
15765 INIT_LIST_HEAD(list: &pf->l4_flex_pit_list);
15766 INIT_LIST_HEAD(list: &pf->ddp_old_prof);
15767
15768 /* set up the locks for the AQ, do this only once in probe
15769 * and destroy them only once in remove
15770 */
15771 mutex_init(&hw->aq.asq_mutex);
15772 mutex_init(&hw->aq.arq_mutex);
15773
15774 pf->msg_enable = netif_msg_init(debug_value: debug,
15775 NETIF_MSG_DRV |
15776 NETIF_MSG_PROBE |
15777 NETIF_MSG_LINK);
15778 if (debug < -1)
15779 pf->hw.debug_mask = debug;
15780
15781 /* do a special CORER for clearing PXE mode once at init */
15782 if (hw->revision_id == 0 &&
15783 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
15784 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
15785 i40e_flush(hw);
15786 msleep(msecs: 200);
15787 pf->corer_count++;
15788
15789 i40e_clear_pxe_mode(hw);
15790 }
15791
15792 /* Reset here to make sure all is clean and to define PF 'n' */
15793 i40e_clear_hw(hw);
15794
15795 err = i40e_set_mac_type(hw);
15796 if (err) {
15797 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15798 err);
15799 goto err_pf_reset;
15800 }
15801
15802 err = i40e_handle_resets(pf);
15803 if (err)
15804 goto err_pf_reset;
15805
15806 i40e_check_recovery_mode(pf);
15807
15808 if (is_kdump_kernel()) {
15809 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN;
15810 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN;
15811 } else {
15812 hw->aq.num_arq_entries = I40E_AQ_LEN;
15813 hw->aq.num_asq_entries = I40E_AQ_LEN;
15814 }
15815 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15816 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
15817 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
15818
15819 snprintf(buf: pf->int_name, size: sizeof(pf->int_name) - 1,
15820 fmt: "%s-%s:misc",
15821 dev_driver_string(dev: &pf->pdev->dev), dev_name(dev: &pdev->dev));
15822
15823 err = i40e_init_shared_code(hw);
15824 if (err) {
15825 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
15826 err);
15827 goto err_pf_reset;
15828 }
15829
15830 /* set up a default setting for link flow control */
15831 pf->hw.fc.requested_mode = I40E_FC_NONE;
15832
15833 err = i40e_init_adminq(hw);
15834 if (err) {
15835 if (err == -EIO)
15836 dev_info(&pdev->dev,
15837 "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
15838 hw->aq.api_maj_ver,
15839 hw->aq.api_min_ver,
15840 I40E_FW_API_VERSION_MAJOR,
15841 I40E_FW_MINOR_VERSION(hw));
15842 else
15843 dev_info(&pdev->dev,
15844 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
15845
15846 goto err_pf_reset;
15847 }
15848 i40e_get_oem_version(hw);
15849 i40e_get_pba_string(hw);
15850
15851 /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
15852 i40e_nvm_version_str(hw, buf: nvm_ver, len: sizeof(nvm_ver));
15853 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
15854 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
15855 hw->aq.api_maj_ver, hw->aq.api_min_ver, nvm_ver,
15856 hw->vendor_id, hw->device_id, hw->subsystem_vendor_id,
15857 hw->subsystem_device_id);
15858
15859 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
15860 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
15861 dev_dbg(&pdev->dev,
15862 "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
15863 hw->aq.api_maj_ver,
15864 hw->aq.api_min_ver,
15865 I40E_FW_API_VERSION_MAJOR,
15866 I40E_FW_MINOR_VERSION(hw));
15867 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
15868 dev_info(&pdev->dev,
15869 "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
15870 hw->aq.api_maj_ver,
15871 hw->aq.api_min_ver,
15872 I40E_FW_API_VERSION_MAJOR,
15873 I40E_FW_MINOR_VERSION(hw));
15874
15875 i40e_verify_eeprom(pf);
15876
15877 /* Rev 0 hardware was never productized */
15878 if (hw->revision_id < 1)
15879 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
15880
15881 i40e_clear_pxe_mode(hw);
15882
15883 err = i40e_get_capabilities(pf, list_type: i40e_aqc_opc_list_func_capabilities);
15884 if (err)
15885 goto err_adminq_setup;
15886
15887 err = i40e_sw_init(pf);
15888 if (err) {
15889 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
15890 goto err_sw_init;
15891 }
15892
15893 if (test_bit(__I40E_RECOVERY_MODE, pf->state))
15894 return i40e_init_recovery_mode(pf, hw);
15895
15896 err = i40e_init_lan_hmc(hw, txq_num: hw->func_caps.num_tx_qp,
15897 rxq_num: hw->func_caps.num_rx_qp, fcoe_cntx_num: 0, fcoe_filt_num: 0);
15898 if (err) {
15899 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
15900 goto err_init_lan_hmc;
15901 }
15902
15903 err = i40e_configure_lan_hmc(hw, model: I40E_HMC_MODEL_DIRECT_ONLY);
15904 if (err) {
15905 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
15906 err = -ENOENT;
15907 goto err_configure_lan_hmc;
15908 }
15909
15910 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
15911 * Ignore error return codes because if it was already disabled via
15912 * hardware settings this will fail
15913 */
15914 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
15915 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
15916 i40e_aq_stop_lldp(hw, shutdown_agent: true, persist: false, NULL);
15917 }
15918
15919 /* allow a platform config to override the HW addr */
15920 i40e_get_platform_mac_addr(pdev, pf);
15921
15922 if (!is_valid_ether_addr(addr: hw->mac.addr)) {
15923 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
15924 err = -EIO;
15925 goto err_mac_addr;
15926 }
15927 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
15928 ether_addr_copy(dst: hw->mac.perm_addr, src: hw->mac.addr);
15929 i40e_get_port_mac_addr(hw, mac_addr: hw->mac.port_addr);
15930 if (is_valid_ether_addr(addr: hw->mac.port_addr))
15931 pf->hw_features |= I40E_HW_PORT_ID_VALID;
15932
15933 i40e_ptp_alloc_pins(pf);
15934 pci_set_drvdata(pdev, data: pf);
15935 pci_save_state(dev: pdev);
15936
15937#ifdef CONFIG_I40E_DCB
15938 status = i40e_get_fw_lldp_status(hw: &pf->hw, lldp_status: &lldp_status);
15939 (!status &&
15940 lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
15941 (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
15942 (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
15943 dev_info(&pdev->dev,
15944 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
15945 "FW LLDP is disabled\n" :
15946 "FW LLDP is enabled\n");
15947
15948 /* Enable FW to write default DCB config on link-up */
15949 i40e_aq_set_dcb_parameters(hw, dcb_enable: true, NULL);
15950
15951 err = i40e_init_pf_dcb(pf);
15952 if (err) {
15953 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
15954 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
15955 /* Continue without DCB enabled */
15956 }
15957#endif /* CONFIG_I40E_DCB */
15958
15959 /* set up periodic task facility */
15960 timer_setup(&pf->service_timer, i40e_service_timer, 0);
15961 pf->service_timer_period = HZ;
15962
15963 INIT_WORK(&pf->service_task, i40e_service_task);
15964 clear_bit(nr: __I40E_SERVICE_SCHED, addr: pf->state);
15965
15966 /* NVM bit on means WoL disabled for the port */
15967 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, data: &wol_nvm_bits);
15968 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
15969 pf->wol_en = false;
15970 else
15971 pf->wol_en = true;
15972 device_set_wakeup_enable(dev: &pf->pdev->dev, enable: pf->wol_en);
15973
15974 /* set up the main switch operations */
15975 i40e_determine_queue_usage(pf);
15976 err = i40e_init_interrupt_scheme(pf);
15977 if (err)
15978 goto err_switch_setup;
15979
15980 /* Reduce Tx and Rx pairs for kdump
15981 * When MSI-X is enabled, it's not allowed to use more TC queue
15982 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus
15983 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1.
15984 */
15985 if (is_kdump_kernel())
15986 pf->num_lan_msix = 1;
15987
15988 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
15989 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
15990 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
15991 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
15992 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
15993 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
15994 UDP_TUNNEL_TYPE_GENEVE;
15995
15996 /* The number of VSIs reported by the FW is the minimum guaranteed
15997 * to us; HW supports far more and we share the remaining pool with
15998 * the other PFs. We allocate space for more than the guarantee with
15999 * the understanding that we might not get them all later.
16000 */
16001 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
16002 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
16003 else
16004 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
16005 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
16006 dev_warn(&pf->pdev->dev,
16007 "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
16008 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
16009 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
16010 }
16011
16012 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
16013 pf->vsi = kcalloc(n: pf->num_alloc_vsi, size: sizeof(struct i40e_vsi *),
16014 GFP_KERNEL);
16015 if (!pf->vsi) {
16016 err = -ENOMEM;
16017 goto err_switch_setup;
16018 }
16019
16020#ifdef CONFIG_PCI_IOV
16021 /* prep for VF support */
16022 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
16023 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
16024 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
16025 if (pci_num_vf(dev: pdev))
16026 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
16027 }
16028#endif
16029 err = i40e_setup_pf_switch(pf, reinit: false, lock_acquired: false);
16030 if (err) {
16031 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
16032 goto err_vsis;
16033 }
16034 INIT_LIST_HEAD(list: &pf->vsi[pf->lan_vsi]->ch_list);
16035
16036 /* if FDIR VSI was set up, start it now */
16037 for (i = 0; i < pf->num_alloc_vsi; i++) {
16038 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
16039 i40e_vsi_open(vsi: pf->vsi[i]);
16040 break;
16041 }
16042 }
16043
16044 /* The driver only wants link up/down and module qualification
16045 * reports from firmware. Note the negative logic.
16046 */
16047 err = i40e_aq_set_phy_int_mask(hw: &pf->hw,
16048 mask: ~(I40E_AQ_EVENT_LINK_UPDOWN |
16049 I40E_AQ_EVENT_MEDIA_NA |
16050 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
16051 if (err)
16052 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
16053 ERR_PTR(err),
16054 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16055
16056 /* Reconfigure hardware for allowing smaller MSS in the case
16057 * of TSO, so that we avoid the MDD being fired and causing
16058 * a reset in the case of small MSS+TSO.
16059 */
16060 val = rd32(hw, I40E_REG_MSS);
16061 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
16062 val &= ~I40E_REG_MSS_MIN_MASK;
16063 val |= I40E_64BYTE_MSS;
16064 wr32(hw, I40E_REG_MSS, val);
16065 }
16066
16067 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
16068 msleep(msecs: 75);
16069 err = i40e_aq_set_link_restart_an(hw: &pf->hw, enable_link: true, NULL);
16070 if (err)
16071 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
16072 ERR_PTR(err),
16073 i40e_aq_str(&pf->hw,
16074 pf->hw.aq.asq_last_status));
16075 }
16076 /* The main driver is (mostly) up and happy. We need to set this state
16077 * before setting up the misc vector or we get a race and the vector
16078 * ends up disabled forever.
16079 */
16080 clear_bit(nr: __I40E_DOWN, addr: pf->state);
16081
16082 /* In case of MSIX we are going to setup the misc vector right here
16083 * to handle admin queue events etc. In case of legacy and MSI
16084 * the misc functionality and queue processing is combined in
16085 * the same vector and that gets setup at open.
16086 */
16087 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
16088 err = i40e_setup_misc_vector(pf);
16089 if (err) {
16090 dev_info(&pdev->dev,
16091 "setup of misc vector failed: %d\n", err);
16092 i40e_cloud_filter_exit(pf);
16093 i40e_fdir_teardown(pf);
16094 goto err_vsis;
16095 }
16096 }
16097
16098#ifdef CONFIG_PCI_IOV
16099 /* prep for VF support */
16100 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
16101 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
16102 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
16103 /* disable link interrupts for VFs */
16104 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
16105 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
16106 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
16107 i40e_flush(hw);
16108
16109 if (pci_num_vf(dev: pdev)) {
16110 dev_info(&pdev->dev,
16111 "Active VFs found, allocating resources.\n");
16112 err = i40e_alloc_vfs(pf, num_alloc_vfs: pci_num_vf(dev: pdev));
16113 if (err)
16114 dev_info(&pdev->dev,
16115 "Error %d allocating resources for existing VFs\n",
16116 err);
16117 }
16118 }
16119#endif /* CONFIG_PCI_IOV */
16120
16121 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16122 pf->iwarp_base_vector = i40e_get_lump(pf, pile: pf->irq_pile,
16123 needed: pf->num_iwarp_msix,
16124 I40E_IWARP_IRQ_PILE_ID);
16125 if (pf->iwarp_base_vector < 0) {
16126 dev_info(&pdev->dev,
16127 "failed to get tracking for %d vectors for IWARP err=%d\n",
16128 pf->num_iwarp_msix, pf->iwarp_base_vector);
16129 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
16130 }
16131 }
16132
16133 i40e_dbg_pf_init(pf);
16134
16135 /* tell the firmware that we're starting */
16136 i40e_send_version(pf);
16137
16138 /* since everything's happy, start the service_task timer */
16139 mod_timer(timer: &pf->service_timer,
16140 expires: round_jiffies(j: jiffies + pf->service_timer_period));
16141
16142 /* add this PF to client device list and launch a client service task */
16143 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16144 err = i40e_lan_add_device(pf);
16145 if (err)
16146 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
16147 err);
16148 }
16149
16150#define PCI_SPEED_SIZE 8
16151#define PCI_WIDTH_SIZE 8
16152 /* Devices on the IOSF bus do not have this information
16153 * and will report PCI Gen 1 x 1 by default so don't bother
16154 * checking them.
16155 */
16156 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
16157 char speed[PCI_SPEED_SIZE] = "Unknown";
16158 char width[PCI_WIDTH_SIZE] = "Unknown";
16159
16160 /* Get the negotiated link width and speed from PCI config
16161 * space
16162 */
16163 pcie_capability_read_word(dev: pf->pdev, PCI_EXP_LNKSTA,
16164 val: &link_status);
16165
16166 i40e_set_pci_config_data(hw, link_status);
16167
16168 switch (hw->bus.speed) {
16169 case i40e_bus_speed_8000:
16170 strscpy(p: speed, q: "8.0", PCI_SPEED_SIZE); break;
16171 case i40e_bus_speed_5000:
16172 strscpy(p: speed, q: "5.0", PCI_SPEED_SIZE); break;
16173 case i40e_bus_speed_2500:
16174 strscpy(p: speed, q: "2.5", PCI_SPEED_SIZE); break;
16175 default:
16176 break;
16177 }
16178 switch (hw->bus.width) {
16179 case i40e_bus_width_pcie_x8:
16180 strscpy(p: width, q: "8", PCI_WIDTH_SIZE); break;
16181 case i40e_bus_width_pcie_x4:
16182 strscpy(p: width, q: "4", PCI_WIDTH_SIZE); break;
16183 case i40e_bus_width_pcie_x2:
16184 strscpy(p: width, q: "2", PCI_WIDTH_SIZE); break;
16185 case i40e_bus_width_pcie_x1:
16186 strscpy(p: width, q: "1", PCI_WIDTH_SIZE); break;
16187 default:
16188 break;
16189 }
16190
16191 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
16192 speed, width);
16193
16194 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
16195 hw->bus.speed < i40e_bus_speed_8000) {
16196 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
16197 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
16198 }
16199 }
16200
16201 /* get the requested speeds from the fw */
16202 err = i40e_aq_get_phy_capabilities(hw, qualified_modules: false, report_init: false, abilities: &abilities, NULL);
16203 if (err)
16204 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
16205 ERR_PTR(err),
16206 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16207 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
16208
16209 /* set the FEC config due to the board capabilities */
16210 i40e_set_fec_in_flags(fec_cfg: abilities.fec_cfg_curr_mod_ext_info, flags: &pf->flags);
16211
16212 /* get the supported phy types from the fw */
16213 err = i40e_aq_get_phy_capabilities(hw, qualified_modules: false, report_init: true, abilities: &abilities, NULL);
16214 if (err)
16215 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
16216 ERR_PTR(err),
16217 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
16218
16219 /* make sure the MFS hasn't been set lower than the default */
16220#define MAX_FRAME_SIZE_DEFAULT 0x2600
16221 val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
16222 I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
16223 if (val < MAX_FRAME_SIZE_DEFAULT)
16224 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
16225 i, val);
16226
16227 /* Add a filter to drop all Flow control frames from any VSI from being
16228 * transmitted. By doing so we stop a malicious VF from sending out
16229 * PAUSE or PFC frames and potentially controlling traffic for other
16230 * PF/VF VSIs.
16231 * The FW can still send Flow control frames if enabled.
16232 */
16233 i40e_add_filter_to_drop_tx_flow_control_frames(hw: &pf->hw,
16234 vsi_seid: pf->main_vsi_seid);
16235
16236 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
16237 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
16238 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
16239 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
16240 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
16241 /* print a string summarizing features */
16242 i40e_print_features(pf);
16243
16244 i40e_devlink_register(pf);
16245
16246 return 0;
16247
16248 /* Unwind what we've done if something failed in the setup */
16249err_vsis:
16250 set_bit(nr: __I40E_DOWN, addr: pf->state);
16251 i40e_clear_interrupt_scheme(pf);
16252 kfree(objp: pf->vsi);
16253err_switch_setup:
16254 i40e_reset_interrupt_capability(pf);
16255 timer_shutdown_sync(timer: &pf->service_timer);
16256err_mac_addr:
16257err_configure_lan_hmc:
16258 (void)i40e_shutdown_lan_hmc(hw);
16259err_init_lan_hmc:
16260 kfree(objp: pf->qp_pile);
16261err_sw_init:
16262err_adminq_setup:
16263err_pf_reset:
16264 iounmap(addr: hw->hw_addr);
16265err_ioremap:
16266 i40e_free_pf(pf);
16267err_pf_alloc:
16268 pci_release_mem_regions(pdev);
16269err_pci_reg:
16270err_dma:
16271 pci_disable_device(dev: pdev);
16272 return err;
16273}
16274
16275/**
16276 * i40e_remove - Device removal routine
16277 * @pdev: PCI device information struct
16278 *
16279 * i40e_remove is called by the PCI subsystem to alert the driver
16280 * that is should release a PCI device. This could be caused by a
16281 * Hot-Plug event, or because the driver is going to be removed from
16282 * memory.
16283 **/
16284static void i40e_remove(struct pci_dev *pdev)
16285{
16286 struct i40e_pf *pf = pci_get_drvdata(pdev);
16287 struct i40e_hw *hw = &pf->hw;
16288 int ret_code;
16289 int i;
16290
16291 i40e_devlink_unregister(pf);
16292
16293 i40e_dbg_pf_exit(pf);
16294
16295 i40e_ptp_stop(pf);
16296
16297 /* Disable RSS in hw */
16298 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), reg_val: 0);
16299 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), reg_val: 0);
16300
16301 /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
16302 * flags, once they are set, i40e_rebuild should not be called as
16303 * i40e_prep_for_reset always returns early.
16304 */
16305 while (test_and_set_bit(nr: __I40E_RESET_RECOVERY_PENDING, addr: pf->state))
16306 usleep_range(min: 1000, max: 2000);
16307 set_bit(nr: __I40E_IN_REMOVE, addr: pf->state);
16308
16309 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
16310 set_bit(nr: __I40E_VF_RESETS_DISABLED, addr: pf->state);
16311 i40e_free_vfs(pf);
16312 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
16313 }
16314 /* no more scheduling of any task */
16315 set_bit(nr: __I40E_SUSPENDED, addr: pf->state);
16316 set_bit(nr: __I40E_DOWN, addr: pf->state);
16317 if (pf->service_timer.function)
16318 timer_shutdown_sync(timer: &pf->service_timer);
16319 if (pf->service_task.func)
16320 cancel_work_sync(work: &pf->service_task);
16321
16322 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
16323 struct i40e_vsi *vsi = pf->vsi[0];
16324
16325 /* We know that we have allocated only one vsi for this PF,
16326 * it was just for registering netdevice, so the interface
16327 * could be visible in the 'ifconfig' output
16328 */
16329 unregister_netdev(dev: vsi->netdev);
16330 free_netdev(dev: vsi->netdev);
16331
16332 goto unmap;
16333 }
16334
16335 /* Client close must be called explicitly here because the timer
16336 * has been stopped.
16337 */
16338 i40e_notify_client_of_netdev_close(vsi: pf->vsi[pf->lan_vsi], reset: false);
16339
16340 i40e_fdir_teardown(pf);
16341
16342 /* If there is a switch structure or any orphans, remove them.
16343 * This will leave only the PF's VSI remaining.
16344 */
16345 for (i = 0; i < I40E_MAX_VEB; i++) {
16346 if (!pf->veb[i])
16347 continue;
16348
16349 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
16350 pf->veb[i]->uplink_seid == 0)
16351 i40e_switch_branch_release(branch: pf->veb[i]);
16352 }
16353
16354 /* Now we can shutdown the PF's VSIs, just before we kill
16355 * adminq and hmc.
16356 */
16357 for (i = pf->num_alloc_vsi; i--;)
16358 if (pf->vsi[i]) {
16359 i40e_vsi_close(vsi: pf->vsi[i]);
16360 i40e_vsi_release(vsi: pf->vsi[i]);
16361 pf->vsi[i] = NULL;
16362 }
16363
16364 i40e_cloud_filter_exit(pf);
16365
16366 /* remove attached clients */
16367 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
16368 ret_code = i40e_lan_del_device(pf);
16369 if (ret_code)
16370 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
16371 ret_code);
16372 }
16373
16374 /* shutdown and destroy the HMC */
16375 if (hw->hmc.hmc_obj) {
16376 ret_code = i40e_shutdown_lan_hmc(hw);
16377 if (ret_code)
16378 dev_warn(&pdev->dev,
16379 "Failed to destroy the HMC resources: %d\n",
16380 ret_code);
16381 }
16382
16383unmap:
16384 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16385 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16386 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16387 free_irq(pf->pdev->irq, pf);
16388
16389 /* shutdown the adminq */
16390 i40e_shutdown_adminq(hw);
16391
16392 /* destroy the locks only once, here */
16393 mutex_destroy(lock: &hw->aq.arq_mutex);
16394 mutex_destroy(lock: &hw->aq.asq_mutex);
16395
16396 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
16397 rtnl_lock();
16398 i40e_clear_interrupt_scheme(pf);
16399 for (i = 0; i < pf->num_alloc_vsi; i++) {
16400 if (pf->vsi[i]) {
16401 if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
16402 i40e_vsi_clear_rings(vsi: pf->vsi[i]);
16403 i40e_vsi_clear(vsi: pf->vsi[i]);
16404 pf->vsi[i] = NULL;
16405 }
16406 }
16407 rtnl_unlock();
16408
16409 for (i = 0; i < I40E_MAX_VEB; i++) {
16410 kfree(objp: pf->veb[i]);
16411 pf->veb[i] = NULL;
16412 }
16413
16414 kfree(objp: pf->qp_pile);
16415 kfree(objp: pf->vsi);
16416
16417 iounmap(addr: hw->hw_addr);
16418 i40e_free_pf(pf);
16419 pci_release_mem_regions(pdev);
16420
16421 pci_disable_device(dev: pdev);
16422}
16423
16424/**
16425 * i40e_pci_error_detected - warning that something funky happened in PCI land
16426 * @pdev: PCI device information struct
16427 * @error: the type of PCI error
16428 *
16429 * Called to warn that something happened and the error handling steps
16430 * are in progress. Allows the driver to quiesce things, be ready for
16431 * remediation.
16432 **/
16433static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
16434 pci_channel_state_t error)
16435{
16436 struct i40e_pf *pf = pci_get_drvdata(pdev);
16437
16438 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
16439
16440 if (!pf) {
16441 dev_info(&pdev->dev,
16442 "Cannot recover - error happened during device probe\n");
16443 return PCI_ERS_RESULT_DISCONNECT;
16444 }
16445
16446 /* shutdown all operations */
16447 if (!test_bit(__I40E_SUSPENDED, pf->state))
16448 i40e_prep_for_reset(pf);
16449
16450 /* Request a slot reset */
16451 return PCI_ERS_RESULT_NEED_RESET;
16452}
16453
16454/**
16455 * i40e_pci_error_slot_reset - a PCI slot reset just happened
16456 * @pdev: PCI device information struct
16457 *
16458 * Called to find if the driver can work with the device now that
16459 * the pci slot has been reset. If a basic connection seems good
16460 * (registers are readable and have sane content) then return a
16461 * happy little PCI_ERS_RESULT_xxx.
16462 **/
16463static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
16464{
16465 struct i40e_pf *pf = pci_get_drvdata(pdev);
16466 pci_ers_result_t result;
16467 u32 reg;
16468
16469 dev_dbg(&pdev->dev, "%s\n", __func__);
16470 if (pci_enable_device_mem(dev: pdev)) {
16471 dev_info(&pdev->dev,
16472 "Cannot re-enable PCI device after reset.\n");
16473 result = PCI_ERS_RESULT_DISCONNECT;
16474 } else {
16475 pci_set_master(dev: pdev);
16476 pci_restore_state(dev: pdev);
16477 pci_save_state(dev: pdev);
16478 pci_wake_from_d3(dev: pdev, enable: false);
16479
16480 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
16481 if (reg == 0)
16482 result = PCI_ERS_RESULT_RECOVERED;
16483 else
16484 result = PCI_ERS_RESULT_DISCONNECT;
16485 }
16486
16487 return result;
16488}
16489
16490/**
16491 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16492 * @pdev: PCI device information struct
16493 */
16494static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
16495{
16496 struct i40e_pf *pf = pci_get_drvdata(pdev);
16497
16498 i40e_prep_for_reset(pf);
16499}
16500
16501/**
16502 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16503 * @pdev: PCI device information struct
16504 */
16505static void i40e_pci_error_reset_done(struct pci_dev *pdev)
16506{
16507 struct i40e_pf *pf = pci_get_drvdata(pdev);
16508
16509 if (test_bit(__I40E_IN_REMOVE, pf->state))
16510 return;
16511
16512 i40e_reset_and_rebuild(pf, reinit: false, lock_acquired: false);
16513}
16514
16515/**
16516 * i40e_pci_error_resume - restart operations after PCI error recovery
16517 * @pdev: PCI device information struct
16518 *
16519 * Called to allow the driver to bring things back up after PCI error
16520 * and/or reset recovery has finished.
16521 **/
16522static void i40e_pci_error_resume(struct pci_dev *pdev)
16523{
16524 struct i40e_pf *pf = pci_get_drvdata(pdev);
16525
16526 dev_dbg(&pdev->dev, "%s\n", __func__);
16527 if (test_bit(__I40E_SUSPENDED, pf->state))
16528 return;
16529
16530 i40e_handle_reset_warning(pf, lock_acquired: false);
16531}
16532
16533/**
16534 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16535 * using the mac_address_write admin q function
16536 * @pf: pointer to i40e_pf struct
16537 **/
16538static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
16539{
16540 struct i40e_hw *hw = &pf->hw;
16541 u8 mac_addr[6];
16542 u16 flags = 0;
16543 int ret;
16544
16545 /* Get current MAC address in case it's an LAA */
16546 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
16547 ether_addr_copy(dst: mac_addr,
16548 src: pf->vsi[pf->lan_vsi]->netdev->dev_addr);
16549 } else {
16550 dev_err(&pf->pdev->dev,
16551 "Failed to retrieve MAC address; using default\n");
16552 ether_addr_copy(dst: mac_addr, src: hw->mac.addr);
16553 }
16554
16555 /* The FW expects the mac address write cmd to first be called with
16556 * one of these flags before calling it again with the multicast
16557 * enable flags.
16558 */
16559 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
16560
16561 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
16562 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
16563
16564 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16565 if (ret) {
16566 dev_err(&pf->pdev->dev,
16567 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
16568 return;
16569 }
16570
16571 flags = I40E_AQC_MC_MAG_EN
16572 | I40E_AQC_WOL_PRESERVE_ON_PFR
16573 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
16574 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
16575 if (ret)
16576 dev_err(&pf->pdev->dev,
16577 "Failed to enable Multicast Magic Packet wake up\n");
16578}
16579
16580/**
16581 * i40e_shutdown - PCI callback for shutting down
16582 * @pdev: PCI device information struct
16583 **/
16584static void i40e_shutdown(struct pci_dev *pdev)
16585{
16586 struct i40e_pf *pf = pci_get_drvdata(pdev);
16587 struct i40e_hw *hw = &pf->hw;
16588
16589 set_bit(nr: __I40E_SUSPENDED, addr: pf->state);
16590 set_bit(nr: __I40E_DOWN, addr: pf->state);
16591
16592 del_timer_sync(timer: &pf->service_timer);
16593 cancel_work_sync(work: &pf->service_task);
16594 i40e_cloud_filter_exit(pf);
16595 i40e_fdir_teardown(pf);
16596
16597 /* Client close must be called explicitly here because the timer
16598 * has been stopped.
16599 */
16600 i40e_notify_client_of_netdev_close(vsi: pf->vsi[pf->lan_vsi], reset: false);
16601
16602 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16603 i40e_enable_mc_magic_wake(pf);
16604
16605 i40e_prep_for_reset(pf);
16606
16607 wr32(hw, I40E_PFPM_APM,
16608 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16609 wr32(hw, I40E_PFPM_WUFC,
16610 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16611
16612 /* Free MSI/legacy interrupt 0 when in recovery mode. */
16613 if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
16614 !(pf->flags & I40E_FLAG_MSIX_ENABLED))
16615 free_irq(pf->pdev->irq, pf);
16616
16617 /* Since we're going to destroy queues during the
16618 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16619 * whole section
16620 */
16621 rtnl_lock();
16622 i40e_clear_interrupt_scheme(pf);
16623 rtnl_unlock();
16624
16625 if (system_state == SYSTEM_POWER_OFF) {
16626 pci_wake_from_d3(dev: pdev, enable: pf->wol_en);
16627 pci_set_power_state(dev: pdev, PCI_D3hot);
16628 }
16629}
16630
16631/**
16632 * i40e_suspend - PM callback for moving to D3
16633 * @dev: generic device information structure
16634 **/
16635static int __maybe_unused i40e_suspend(struct device *dev)
16636{
16637 struct i40e_pf *pf = dev_get_drvdata(dev);
16638 struct i40e_hw *hw = &pf->hw;
16639
16640 /* If we're already suspended, then there is nothing to do */
16641 if (test_and_set_bit(nr: __I40E_SUSPENDED, addr: pf->state))
16642 return 0;
16643
16644 set_bit(nr: __I40E_DOWN, addr: pf->state);
16645
16646 /* Ensure service task will not be running */
16647 del_timer_sync(timer: &pf->service_timer);
16648 cancel_work_sync(work: &pf->service_task);
16649
16650 /* Client close must be called explicitly here because the timer
16651 * has been stopped.
16652 */
16653 i40e_notify_client_of_netdev_close(vsi: pf->vsi[pf->lan_vsi], reset: false);
16654
16655 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
16656 i40e_enable_mc_magic_wake(pf);
16657
16658 /* Since we're going to destroy queues during the
16659 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
16660 * whole section
16661 */
16662 rtnl_lock();
16663
16664 i40e_prep_for_reset(pf);
16665
16666 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
16667 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
16668
16669 /* Clear the interrupt scheme and release our IRQs so that the system
16670 * can safely hibernate even when there are a large number of CPUs.
16671 * Otherwise hibernation might fail when mapping all the vectors back
16672 * to CPU0.
16673 */
16674 i40e_clear_interrupt_scheme(pf);
16675
16676 rtnl_unlock();
16677
16678 return 0;
16679}
16680
16681/**
16682 * i40e_resume - PM callback for waking up from D3
16683 * @dev: generic device information structure
16684 **/
16685static int __maybe_unused i40e_resume(struct device *dev)
16686{
16687 struct i40e_pf *pf = dev_get_drvdata(dev);
16688 int err;
16689
16690 /* If we're not suspended, then there is nothing to do */
16691 if (!test_bit(__I40E_SUSPENDED, pf->state))
16692 return 0;
16693
16694 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
16695 * since we're going to be restoring queues
16696 */
16697 rtnl_lock();
16698
16699 /* We cleared the interrupt scheme when we suspended, so we need to
16700 * restore it now to resume device functionality.
16701 */
16702 err = i40e_restore_interrupt_scheme(pf);
16703 if (err) {
16704 dev_err(dev, "Cannot restore interrupt scheme: %d\n",
16705 err);
16706 }
16707
16708 clear_bit(nr: __I40E_DOWN, addr: pf->state);
16709 i40e_reset_and_rebuild(pf, reinit: false, lock_acquired: true);
16710
16711 rtnl_unlock();
16712
16713 /* Clear suspended state last after everything is recovered */
16714 clear_bit(nr: __I40E_SUSPENDED, addr: pf->state);
16715
16716 /* Restart the service task */
16717 mod_timer(timer: &pf->service_timer,
16718 expires: round_jiffies(j: jiffies + pf->service_timer_period));
16719
16720 return 0;
16721}
16722
16723static const struct pci_error_handlers i40e_err_handler = {
16724 .error_detected = i40e_pci_error_detected,
16725 .slot_reset = i40e_pci_error_slot_reset,
16726 .reset_prepare = i40e_pci_error_reset_prepare,
16727 .reset_done = i40e_pci_error_reset_done,
16728 .resume = i40e_pci_error_resume,
16729};
16730
16731static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
16732
16733static struct pci_driver i40e_driver = {
16734 .name = i40e_driver_name,
16735 .id_table = i40e_pci_tbl,
16736 .probe = i40e_probe,
16737 .remove = i40e_remove,
16738 .driver = {
16739 .pm = &i40e_pm_ops,
16740 },
16741 .shutdown = i40e_shutdown,
16742 .err_handler = &i40e_err_handler,
16743 .sriov_configure = i40e_pci_sriov_configure,
16744};
16745
16746/**
16747 * i40e_init_module - Driver registration routine
16748 *
16749 * i40e_init_module is the first routine called when the driver is
16750 * loaded. All it does is register with the PCI subsystem.
16751 **/
16752static int __init i40e_init_module(void)
16753{
16754 int err;
16755
16756 pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
16757 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
16758
16759 /* There is no need to throttle the number of active tasks because
16760 * each device limits its own task using a state bit for scheduling
16761 * the service task, and the device tasks do not interfere with each
16762 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
16763 * since we need to be able to guarantee forward progress even under
16764 * memory pressure.
16765 */
16766 i40e_wq = alloc_workqueue(fmt: "%s", flags: WQ_MEM_RECLAIM, max_active: 0, i40e_driver_name);
16767 if (!i40e_wq) {
16768 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
16769 return -ENOMEM;
16770 }
16771
16772 i40e_dbg_init();
16773 err = pci_register_driver(&i40e_driver);
16774 if (err) {
16775 destroy_workqueue(wq: i40e_wq);
16776 i40e_dbg_exit();
16777 return err;
16778 }
16779
16780 return 0;
16781}
16782module_init(i40e_init_module);
16783
16784/**
16785 * i40e_exit_module - Driver exit cleanup routine
16786 *
16787 * i40e_exit_module is called just before the driver is removed
16788 * from memory.
16789 **/
16790static void __exit i40e_exit_module(void)
16791{
16792 pci_unregister_driver(dev: &i40e_driver);
16793 destroy_workqueue(wq: i40e_wq);
16794 ida_destroy(ida: &i40e_client_ida);
16795 i40e_dbg_exit();
16796}
16797module_exit(i40e_exit_module);
16798

source code of linux/drivers/net/ethernet/intel/i40e/i40e_main.c