1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 1999 - 2018 Intel Corporation. */ |
3 | |
4 | #include <linux/types.h> |
5 | #include <linux/module.h> |
6 | #include <linux/pci.h> |
7 | #include <linux/netdevice.h> |
8 | #include <linux/vmalloc.h> |
9 | #include <linux/string.h> |
10 | #include <linux/in.h> |
11 | #include <linux/ip.h> |
12 | #include <linux/tcp.h> |
13 | #include <linux/ipv6.h> |
14 | #include <linux/if_bridge.h> |
15 | #ifdef NETIF_F_HW_VLAN_CTAG_TX |
16 | #include <linux/if_vlan.h> |
17 | #endif |
18 | |
19 | #include "ixgbe.h" |
20 | #include "ixgbe_type.h" |
21 | #include "ixgbe_sriov.h" |
22 | |
23 | #ifdef CONFIG_PCI_IOV |
24 | static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, |
25 | unsigned int num_vfs) |
26 | { |
27 | struct ixgbe_hw *hw = &adapter->hw; |
28 | struct vf_macvlans *mv_list; |
29 | int num_vf_macvlans, i; |
30 | |
31 | /* Initialize list of VF macvlans */ |
32 | INIT_LIST_HEAD(list: &adapter->vf_mvs.l); |
33 | |
34 | num_vf_macvlans = hw->mac.num_rar_entries - |
35 | (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); |
36 | if (!num_vf_macvlans) |
37 | return; |
38 | |
39 | mv_list = kcalloc(n: num_vf_macvlans, size: sizeof(struct vf_macvlans), |
40 | GFP_KERNEL); |
41 | if (mv_list) { |
42 | for (i = 0; i < num_vf_macvlans; i++) { |
43 | mv_list[i].vf = -1; |
44 | mv_list[i].free = true; |
45 | list_add(new: &mv_list[i].l, head: &adapter->vf_mvs.l); |
46 | } |
47 | adapter->mv_list = mv_list; |
48 | } |
49 | } |
50 | |
51 | static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, |
52 | unsigned int num_vfs) |
53 | { |
54 | struct ixgbe_hw *hw = &adapter->hw; |
55 | int i; |
56 | |
57 | if (adapter->xdp_prog) { |
58 | e_warn(probe, "SRIOV is not supported with XDP\n" ); |
59 | return -EINVAL; |
60 | } |
61 | |
62 | /* Enable VMDq flag so device will be set in VM mode */ |
63 | adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED | |
64 | IXGBE_FLAG_VMDQ_ENABLED; |
65 | |
66 | /* Allocate memory for per VF control structures */ |
67 | adapter->vfinfo = kcalloc(n: num_vfs, size: sizeof(struct vf_data_storage), |
68 | GFP_KERNEL); |
69 | if (!adapter->vfinfo) |
70 | return -ENOMEM; |
71 | |
72 | adapter->num_vfs = num_vfs; |
73 | |
74 | ixgbe_alloc_vf_macvlans(adapter, num_vfs); |
75 | adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; |
76 | |
77 | /* Initialize default switching mode VEB */ |
78 | IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); |
79 | adapter->bridge_mode = BRIDGE_MODE_VEB; |
80 | |
81 | /* limit traffic classes based on VFs enabled */ |
82 | if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) { |
83 | adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; |
84 | adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; |
85 | } else if (num_vfs < 32) { |
86 | adapter->dcb_cfg.num_tcs.pg_tcs = 4; |
87 | adapter->dcb_cfg.num_tcs.pfc_tcs = 4; |
88 | } else { |
89 | adapter->dcb_cfg.num_tcs.pg_tcs = 1; |
90 | adapter->dcb_cfg.num_tcs.pfc_tcs = 1; |
91 | } |
92 | |
93 | /* Disable RSC when in SR-IOV mode */ |
94 | adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | |
95 | IXGBE_FLAG2_RSC_ENABLED); |
96 | |
97 | for (i = 0; i < num_vfs; i++) { |
98 | /* enable spoof checking for all VFs */ |
99 | adapter->vfinfo[i].spoofchk_enabled = true; |
100 | adapter->vfinfo[i].link_enable = true; |
101 | |
102 | /* We support VF RSS querying only for 82599 and x540 |
103 | * devices at the moment. These devices share RSS |
104 | * indirection table and RSS hash key with PF therefore |
105 | * we want to disable the querying by default. |
106 | */ |
107 | adapter->vfinfo[i].rss_query_enabled = false; |
108 | |
109 | /* Untrust all VFs */ |
110 | adapter->vfinfo[i].trusted = false; |
111 | |
112 | /* set the default xcast mode */ |
113 | adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; |
114 | } |
115 | |
116 | e_info(probe, "SR-IOV enabled with %d VFs\n" , num_vfs); |
117 | return 0; |
118 | } |
119 | |
120 | /** |
121 | * ixgbe_get_vfs - Find and take references to all vf devices |
122 | * @adapter: Pointer to adapter struct |
123 | */ |
124 | static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) |
125 | { |
126 | struct pci_dev *pdev = adapter->pdev; |
127 | u16 vendor = pdev->vendor; |
128 | struct pci_dev *vfdev; |
129 | int vf = 0; |
130 | u16 vf_id; |
131 | int pos; |
132 | |
133 | pos = pci_find_ext_capability(dev: pdev, PCI_EXT_CAP_ID_SRIOV); |
134 | if (!pos) |
135 | return; |
136 | pci_read_config_word(dev: pdev, where: pos + PCI_SRIOV_VF_DID, val: &vf_id); |
137 | |
138 | vfdev = pci_get_device(vendor, device: vf_id, NULL); |
139 | for (; vfdev; vfdev = pci_get_device(vendor, device: vf_id, from: vfdev)) { |
140 | if (!vfdev->is_virtfn) |
141 | continue; |
142 | if (vfdev->physfn != pdev) |
143 | continue; |
144 | if (vf >= adapter->num_vfs) |
145 | continue; |
146 | pci_dev_get(dev: vfdev); |
147 | adapter->vfinfo[vf].vfdev = vfdev; |
148 | ++vf; |
149 | } |
150 | } |
151 | |
152 | /* Note this function is called when the user wants to enable SR-IOV |
153 | * VFs using the now deprecated module parameter |
154 | */ |
155 | void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) |
156 | { |
157 | int pre_existing_vfs = 0; |
158 | unsigned int num_vfs; |
159 | |
160 | pre_existing_vfs = pci_num_vf(dev: adapter->pdev); |
161 | if (!pre_existing_vfs && !max_vfs) |
162 | return; |
163 | |
164 | /* If there are pre-existing VFs then we have to force |
165 | * use of that many - over ride any module parameter value. |
166 | * This may result from the user unloading the PF driver |
167 | * while VFs were assigned to guest VMs or because the VFs |
168 | * have been created via the new PCI SR-IOV sysfs interface. |
169 | */ |
170 | if (pre_existing_vfs) { |
171 | num_vfs = pre_existing_vfs; |
172 | dev_warn(&adapter->pdev->dev, |
173 | "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n" ); |
174 | } else { |
175 | int err; |
176 | /* |
177 | * The 82599 supports up to 64 VFs per physical function |
178 | * but this implementation limits allocation to 63 so that |
179 | * basic networking resources are still available to the |
180 | * physical function. If the user requests greater than |
181 | * 63 VFs then it is an error - reset to default of zero. |
182 | */ |
183 | num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT); |
184 | |
185 | err = pci_enable_sriov(dev: adapter->pdev, nr_virtfn: num_vfs); |
186 | if (err) { |
187 | e_err(probe, "Failed to enable PCI sriov: %d\n" , err); |
188 | return; |
189 | } |
190 | } |
191 | |
192 | if (!__ixgbe_enable_sriov(adapter, num_vfs)) { |
193 | ixgbe_get_vfs(adapter); |
194 | return; |
195 | } |
196 | |
197 | /* If we have gotten to this point then there is no memory available |
198 | * to manage the VF devices - print message and bail. |
199 | */ |
200 | e_err(probe, "Unable to allocate memory for VF Data Storage - " |
201 | "SRIOV disabled\n" ); |
202 | ixgbe_disable_sriov(adapter); |
203 | } |
204 | |
205 | #endif /* #ifdef CONFIG_PCI_IOV */ |
206 | int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) |
207 | { |
208 | unsigned int num_vfs = adapter->num_vfs, vf; |
209 | unsigned long flags; |
210 | int ; |
211 | |
212 | spin_lock_irqsave(&adapter->vfs_lock, flags); |
213 | /* set num VFs to 0 to prevent access to vfinfo */ |
214 | adapter->num_vfs = 0; |
215 | spin_unlock_irqrestore(lock: &adapter->vfs_lock, flags); |
216 | |
217 | /* put the reference to all of the vf devices */ |
218 | for (vf = 0; vf < num_vfs; ++vf) { |
219 | struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; |
220 | |
221 | if (!vfdev) |
222 | continue; |
223 | adapter->vfinfo[vf].vfdev = NULL; |
224 | pci_dev_put(dev: vfdev); |
225 | } |
226 | |
227 | /* free VF control structures */ |
228 | kfree(objp: adapter->vfinfo); |
229 | adapter->vfinfo = NULL; |
230 | |
231 | /* free macvlan list */ |
232 | kfree(objp: adapter->mv_list); |
233 | adapter->mv_list = NULL; |
234 | |
235 | /* if SR-IOV is already disabled then there is nothing to do */ |
236 | if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) |
237 | return 0; |
238 | |
239 | #ifdef CONFIG_PCI_IOV |
240 | /* |
241 | * If our VFs are assigned we cannot shut down SR-IOV |
242 | * without causing issues, so just leave the hardware |
243 | * available but disabled |
244 | */ |
245 | if (pci_vfs_assigned(dev: adapter->pdev)) { |
246 | e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n" ); |
247 | return -EPERM; |
248 | } |
249 | /* disable iov and allow time for transactions to clear */ |
250 | pci_disable_sriov(dev: adapter->pdev); |
251 | #endif |
252 | |
253 | /* Disable VMDq flag so device will be set in VM mode */ |
254 | if (bitmap_weight(src: adapter->fwd_bitmask, nbits: adapter->num_rx_pools) == 1) { |
255 | adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; |
256 | adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; |
257 | rss = min_t(int, ixgbe_max_rss_indices(adapter), |
258 | num_online_cpus()); |
259 | } else { |
260 | rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); |
261 | } |
262 | |
263 | adapter->ring_feature[RING_F_VMDQ].offset = 0; |
264 | adapter->ring_feature[RING_F_RSS].limit = rss; |
265 | |
266 | /* take a breather then clean up driver data */ |
267 | msleep(msecs: 100); |
268 | return 0; |
269 | } |
270 | |
271 | static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) |
272 | { |
273 | #ifdef CONFIG_PCI_IOV |
274 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev: dev); |
275 | int pre_existing_vfs = pci_num_vf(dev); |
276 | int err = 0, num_rx_pools, i, limit; |
277 | u8 num_tc; |
278 | |
279 | if (pre_existing_vfs && pre_existing_vfs != num_vfs) |
280 | err = ixgbe_disable_sriov(adapter); |
281 | else if (pre_existing_vfs && pre_existing_vfs == num_vfs) |
282 | return num_vfs; |
283 | |
284 | if (err) |
285 | return err; |
286 | |
287 | /* While the SR-IOV capability structure reports total VFs to be 64, |
288 | * we limit the actual number allocated as below based on two factors. |
289 | * Num_TCs MAX_VFs |
290 | * 1 63 |
291 | * <=4 31 |
292 | * >4 15 |
293 | * First, we reserve some transmit/receive resources for the PF. |
294 | * Second, VMDQ also uses the same pools that SR-IOV does. We need to |
295 | * account for this, so that we don't accidentally allocate more VFs |
296 | * than we have available pools. The PCI bus driver already checks for |
297 | * other values out of range. |
298 | */ |
299 | num_tc = adapter->hw_tcs; |
300 | num_rx_pools = bitmap_weight(src: adapter->fwd_bitmask, |
301 | nbits: adapter->num_rx_pools); |
302 | limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : |
303 | (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; |
304 | |
305 | if (num_vfs > (limit - num_rx_pools)) { |
306 | e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n" , |
307 | num_tc, num_rx_pools - 1, limit - num_rx_pools); |
308 | return -EPERM; |
309 | } |
310 | |
311 | err = __ixgbe_enable_sriov(adapter, num_vfs); |
312 | if (err) |
313 | return err; |
314 | |
315 | for (i = 0; i < num_vfs; i++) |
316 | ixgbe_vf_configuration(pdev: dev, event_mask: (i | 0x10000000)); |
317 | |
318 | /* reset before enabling SRIOV to avoid mailbox issues */ |
319 | ixgbe_sriov_reinit(adapter); |
320 | |
321 | err = pci_enable_sriov(dev, nr_virtfn: num_vfs); |
322 | if (err) { |
323 | e_dev_warn("Failed to enable PCI sriov: %d\n" , err); |
324 | return err; |
325 | } |
326 | ixgbe_get_vfs(adapter); |
327 | |
328 | return num_vfs; |
329 | #else |
330 | return 0; |
331 | #endif |
332 | } |
333 | |
334 | static int ixgbe_pci_sriov_disable(struct pci_dev *dev) |
335 | { |
336 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev: dev); |
337 | int err; |
338 | #ifdef CONFIG_PCI_IOV |
339 | u32 current_flags = adapter->flags; |
340 | int prev_num_vf = pci_num_vf(dev); |
341 | #endif |
342 | |
343 | err = ixgbe_disable_sriov(adapter); |
344 | |
345 | /* Only reinit if no error and state changed */ |
346 | #ifdef CONFIG_PCI_IOV |
347 | if (!err && (current_flags != adapter->flags || |
348 | prev_num_vf != pci_num_vf(dev))) |
349 | ixgbe_sriov_reinit(adapter); |
350 | #endif |
351 | |
352 | return err; |
353 | } |
354 | |
355 | int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) |
356 | { |
357 | if (num_vfs == 0) |
358 | return ixgbe_pci_sriov_disable(dev); |
359 | else |
360 | return ixgbe_pci_sriov_enable(dev, num_vfs); |
361 | } |
362 | |
363 | static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, |
364 | u32 *msgbuf, u32 vf) |
365 | { |
366 | int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) |
367 | >> IXGBE_VT_MSGINFO_SHIFT; |
368 | u16 *hash_list = (u16 *)&msgbuf[1]; |
369 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; |
370 | struct ixgbe_hw *hw = &adapter->hw; |
371 | int i; |
372 | u32 vector_bit; |
373 | u32 vector_reg; |
374 | u32 mta_reg; |
375 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
376 | |
377 | /* only so many hash values supported */ |
378 | entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); |
379 | |
380 | /* |
381 | * salt away the number of multi cast addresses assigned |
382 | * to this VF for later use to restore when the PF multi cast |
383 | * list changes |
384 | */ |
385 | vfinfo->num_vf_mc_hashes = entries; |
386 | |
387 | /* |
388 | * VFs are limited to using the MTA hash table for their multicast |
389 | * addresses |
390 | */ |
391 | for (i = 0; i < entries; i++) { |
392 | vfinfo->vf_mc_hashes[i] = hash_list[i]; |
393 | } |
394 | |
395 | for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { |
396 | vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; |
397 | vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; |
398 | mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); |
399 | mta_reg |= BIT(vector_bit); |
400 | IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); |
401 | } |
402 | vmolr |= IXGBE_VMOLR_ROMPE; |
403 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); |
404 | |
405 | return 0; |
406 | } |
407 | |
408 | #ifdef CONFIG_PCI_IOV |
409 | void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) |
410 | { |
411 | struct ixgbe_hw *hw = &adapter->hw; |
412 | struct vf_data_storage *vfinfo; |
413 | int i, j; |
414 | u32 vector_bit; |
415 | u32 vector_reg; |
416 | u32 mta_reg; |
417 | |
418 | for (i = 0; i < adapter->num_vfs; i++) { |
419 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i)); |
420 | vfinfo = &adapter->vfinfo[i]; |
421 | for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { |
422 | hw->addr_ctrl.mta_in_use++; |
423 | vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; |
424 | vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; |
425 | mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); |
426 | mta_reg |= BIT(vector_bit); |
427 | IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); |
428 | } |
429 | |
430 | if (vfinfo->num_vf_mc_hashes) |
431 | vmolr |= IXGBE_VMOLR_ROMPE; |
432 | else |
433 | vmolr &= ~IXGBE_VMOLR_ROMPE; |
434 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr); |
435 | } |
436 | |
437 | /* Restore any VF macvlans */ |
438 | ixgbe_full_sync_mac_table(adapter); |
439 | } |
440 | #endif |
441 | |
442 | static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, |
443 | u32 vf) |
444 | { |
445 | struct ixgbe_hw *hw = &adapter->hw; |
446 | int err; |
447 | |
448 | /* If VLAN overlaps with one the PF is currently monitoring make |
449 | * sure that we are able to allocate a VLVF entry. This may be |
450 | * redundant but it guarantees PF will maintain visibility to |
451 | * the VLAN. |
452 | */ |
453 | if (add && test_bit(vid, adapter->active_vlans)) { |
454 | err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); |
455 | if (err) |
456 | return err; |
457 | } |
458 | |
459 | err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); |
460 | |
461 | if (add && !err) |
462 | return err; |
463 | |
464 | /* If we failed to add the VF VLAN or we are removing the VF VLAN |
465 | * we may need to drop the PF pool bit in order to allow us to free |
466 | * up the VLVF resources. |
467 | */ |
468 | if (test_bit(vid, adapter->active_vlans) || |
469 | (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) |
470 | ixgbe_update_pf_promisc_vlvf(adapter, vid); |
471 | |
472 | return err; |
473 | } |
474 | |
475 | static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf) |
476 | { |
477 | struct ixgbe_hw *hw = &adapter->hw; |
478 | u32 max_frs; |
479 | |
480 | if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { |
481 | e_err(drv, "VF max_frame %d out of range\n" , max_frame); |
482 | return -EINVAL; |
483 | } |
484 | |
485 | /* |
486 | * For 82599EB we have to keep all PFs and VFs operating with |
487 | * the same max_frame value in order to avoid sending an oversize |
488 | * frame to a VF. In order to guarantee this is handled correctly |
489 | * for all cases we have several special exceptions to take into |
490 | * account before we can enable the VF for receive |
491 | */ |
492 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
493 | struct net_device *dev = adapter->netdev; |
494 | int pf_max_frame = dev->mtu + ETH_HLEN; |
495 | u32 reg_offset, vf_shift, vfre; |
496 | s32 err = 0; |
497 | |
498 | #ifdef CONFIG_FCOE |
499 | if (dev->features & NETIF_F_FCOE_MTU) |
500 | pf_max_frame = max_t(int, pf_max_frame, |
501 | IXGBE_FCOE_JUMBO_FRAME_SIZE); |
502 | |
503 | #endif /* CONFIG_FCOE */ |
504 | switch (adapter->vfinfo[vf].vf_api) { |
505 | case ixgbe_mbox_api_11: |
506 | case ixgbe_mbox_api_12: |
507 | case ixgbe_mbox_api_13: |
508 | case ixgbe_mbox_api_14: |
509 | /* Version 1.1 supports jumbo frames on VFs if PF has |
510 | * jumbo frames enabled which means legacy VFs are |
511 | * disabled |
512 | */ |
513 | if (pf_max_frame > ETH_FRAME_LEN) |
514 | break; |
515 | fallthrough; |
516 | default: |
517 | /* If the PF or VF are running w/ jumbo frames enabled |
518 | * we need to shut down the VF Rx path as we cannot |
519 | * support jumbo frames on legacy VFs |
520 | */ |
521 | if ((pf_max_frame > ETH_FRAME_LEN) || |
522 | (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) |
523 | err = -EINVAL; |
524 | break; |
525 | } |
526 | |
527 | /* determine VF receive enable location */ |
528 | vf_shift = vf % 32; |
529 | reg_offset = vf / 32; |
530 | |
531 | /* enable or disable receive depending on error */ |
532 | vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); |
533 | if (err) |
534 | vfre &= ~BIT(vf_shift); |
535 | else |
536 | vfre |= BIT(vf_shift); |
537 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); |
538 | |
539 | if (err) { |
540 | e_err(drv, "VF max_frame %d out of range\n" , max_frame); |
541 | return err; |
542 | } |
543 | } |
544 | |
545 | /* pull current max frame size from hardware */ |
546 | max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); |
547 | max_frs &= IXGBE_MHADD_MFS_MASK; |
548 | max_frs >>= IXGBE_MHADD_MFS_SHIFT; |
549 | |
550 | if (max_frs < max_frame) { |
551 | max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT; |
552 | IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); |
553 | } |
554 | |
555 | e_info(hw, "VF requests change max MTU to %d\n" , max_frame); |
556 | |
557 | return 0; |
558 | } |
559 | |
560 | static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) |
561 | { |
562 | u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
563 | vmolr |= IXGBE_VMOLR_BAM; |
564 | if (aupe) |
565 | vmolr |= IXGBE_VMOLR_AUPE; |
566 | else |
567 | vmolr &= ~IXGBE_VMOLR_AUPE; |
568 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); |
569 | } |
570 | |
571 | static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) |
572 | { |
573 | struct ixgbe_hw *hw = &adapter->hw; |
574 | |
575 | IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); |
576 | } |
577 | |
578 | static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) |
579 | { |
580 | struct ixgbe_hw *hw = &adapter->hw; |
581 | u32 vlvfb_mask, pool_mask, i; |
582 | |
583 | /* create mask for VF and other pools */ |
584 | pool_mask = ~BIT(VMDQ_P(0) % 32); |
585 | vlvfb_mask = BIT(vf % 32); |
586 | |
587 | /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ |
588 | for (i = IXGBE_VLVF_ENTRIES; i--;) { |
589 | u32 bits[2], vlvfb, vid, vfta, vlvf; |
590 | u32 word = i * 2 + vf / 32; |
591 | u32 mask; |
592 | |
593 | vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); |
594 | |
595 | /* if our bit isn't set we can skip it */ |
596 | if (!(vlvfb & vlvfb_mask)) |
597 | continue; |
598 | |
599 | /* clear our bit from vlvfb */ |
600 | vlvfb ^= vlvfb_mask; |
601 | |
602 | /* create 64b mask to chedk to see if we should clear VLVF */ |
603 | bits[word % 2] = vlvfb; |
604 | bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); |
605 | |
606 | /* if other pools are present, just remove ourselves */ |
607 | if (bits[(VMDQ_P(0) / 32) ^ 1] || |
608 | (bits[VMDQ_P(0) / 32] & pool_mask)) |
609 | goto update_vlvfb; |
610 | |
611 | /* if PF is present, leave VFTA */ |
612 | if (bits[0] || bits[1]) |
613 | goto update_vlvf; |
614 | |
615 | /* if we cannot determine VLAN just remove ourselves */ |
616 | vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); |
617 | if (!vlvf) |
618 | goto update_vlvfb; |
619 | |
620 | vid = vlvf & VLAN_VID_MASK; |
621 | mask = BIT(vid % 32); |
622 | |
623 | /* clear bit from VFTA */ |
624 | vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); |
625 | if (vfta & mask) |
626 | IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); |
627 | update_vlvf: |
628 | /* clear POOL selection enable */ |
629 | IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); |
630 | |
631 | if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) |
632 | vlvfb = 0; |
633 | update_vlvfb: |
634 | /* clear pool bits */ |
635 | IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); |
636 | } |
637 | } |
638 | |
639 | static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, |
640 | int vf, int index, unsigned char *mac_addr) |
641 | { |
642 | struct vf_macvlans *entry; |
643 | bool found = false; |
644 | int retval = 0; |
645 | |
646 | if (index <= 1) { |
647 | list_for_each_entry(entry, &adapter->vf_mvs.l, l) { |
648 | if (entry->vf == vf) { |
649 | entry->vf = -1; |
650 | entry->free = true; |
651 | entry->is_macvlan = false; |
652 | ixgbe_del_mac_filter(adapter, |
653 | addr: entry->vf_macvlan, queue: vf); |
654 | } |
655 | } |
656 | } |
657 | |
658 | /* |
659 | * If index was zero then we were asked to clear the uc list |
660 | * for the VF. We're done. |
661 | */ |
662 | if (!index) |
663 | return 0; |
664 | |
665 | list_for_each_entry(entry, &adapter->vf_mvs.l, l) { |
666 | if (entry->free) { |
667 | found = true; |
668 | break; |
669 | } |
670 | } |
671 | |
672 | /* |
673 | * If we traversed the entire list and didn't find a free entry |
674 | * then we're out of space on the RAR table. It's also possible |
675 | * for the &adapter->vf_mvs.l list to be empty because the original |
676 | * memory allocation for the list failed, which is not fatal but does |
677 | * mean we can't support VF requests for MACVLAN because we couldn't |
678 | * allocate memory for the list management required. |
679 | */ |
680 | if (!found) |
681 | return -ENOSPC; |
682 | |
683 | retval = ixgbe_add_mac_filter(adapter, addr: mac_addr, queue: vf); |
684 | if (retval < 0) |
685 | return retval; |
686 | |
687 | entry->free = false; |
688 | entry->is_macvlan = true; |
689 | entry->vf = vf; |
690 | memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); |
691 | |
692 | return 0; |
693 | } |
694 | |
695 | static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) |
696 | { |
697 | struct ixgbe_hw *hw = &adapter->hw; |
698 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
699 | struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; |
700 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
701 | u8 num_tcs = adapter->hw_tcs; |
702 | u32 reg_val; |
703 | u32 queue; |
704 | |
705 | /* remove VLAN filters beloning to this VF */ |
706 | ixgbe_clear_vf_vlans(adapter, vf); |
707 | |
708 | /* add back PF assigned VLAN or VLAN 0 */ |
709 | ixgbe_set_vf_vlan(adapter, add: true, vid: vfinfo->pf_vlan, vf); |
710 | |
711 | /* reset offloads to defaults */ |
712 | ixgbe_set_vmolr(hw, vf, aupe: !vfinfo->pf_vlan); |
713 | |
714 | /* set outgoing tags for VFs */ |
715 | if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { |
716 | ixgbe_clear_vmvir(adapter, vf); |
717 | } else { |
718 | if (vfinfo->pf_qos || !num_tcs) |
719 | ixgbe_set_vmvir(adapter, vid: vfinfo->pf_vlan, |
720 | qos: vfinfo->pf_qos, vf); |
721 | else |
722 | ixgbe_set_vmvir(adapter, vid: vfinfo->pf_vlan, |
723 | qos: adapter->default_up, vf); |
724 | |
725 | if (vfinfo->spoofchk_enabled) { |
726 | hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); |
727 | hw->mac.ops.set_mac_anti_spoofing(hw, true, vf); |
728 | } |
729 | } |
730 | |
731 | /* reset multicast table array for vf */ |
732 | adapter->vfinfo[vf].num_vf_mc_hashes = 0; |
733 | |
734 | /* clear any ipsec table info */ |
735 | ixgbe_ipsec_vf_clear(adapter, vf); |
736 | |
737 | /* Flush and reset the mta with the new values */ |
738 | ixgbe_set_rx_mode(netdev: adapter->netdev); |
739 | |
740 | ixgbe_del_mac_filter(adapter, addr: adapter->vfinfo[vf].vf_mac_addresses, queue: vf); |
741 | ixgbe_set_vf_macvlan(adapter, vf, index: 0, NULL); |
742 | |
743 | /* reset VF api back to unknown */ |
744 | adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; |
745 | |
746 | /* Restart each queue for given VF */ |
747 | for (queue = 0; queue < q_per_pool; queue++) { |
748 | unsigned int reg_idx = (vf * q_per_pool) + queue; |
749 | |
750 | reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx)); |
751 | |
752 | /* Re-enabling only configured queues */ |
753 | if (reg_val) { |
754 | reg_val |= IXGBE_TXDCTL_ENABLE; |
755 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); |
756 | reg_val &= ~IXGBE_TXDCTL_ENABLE; |
757 | IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val); |
758 | } |
759 | } |
760 | |
761 | IXGBE_WRITE_FLUSH(hw); |
762 | } |
763 | |
764 | static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf) |
765 | { |
766 | struct ixgbe_hw *hw = &adapter->hw; |
767 | u32 word; |
768 | |
769 | /* Clear VF's mailbox memory */ |
770 | for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++) |
771 | IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0); |
772 | |
773 | IXGBE_WRITE_FLUSH(hw); |
774 | } |
775 | |
776 | static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, |
777 | int vf, unsigned char *mac_addr) |
778 | { |
779 | s32 retval; |
780 | |
781 | ixgbe_del_mac_filter(adapter, addr: adapter->vfinfo[vf].vf_mac_addresses, queue: vf); |
782 | retval = ixgbe_add_mac_filter(adapter, addr: mac_addr, queue: vf); |
783 | if (retval >= 0) |
784 | memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, |
785 | ETH_ALEN); |
786 | else |
787 | eth_zero_addr(addr: adapter->vfinfo[vf].vf_mac_addresses); |
788 | |
789 | return retval; |
790 | } |
791 | |
792 | int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) |
793 | { |
794 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
795 | unsigned int vfn = (event_mask & 0x3f); |
796 | |
797 | bool enable = ((event_mask & 0x10000000U) != 0); |
798 | |
799 | if (enable) |
800 | eth_zero_addr(addr: adapter->vfinfo[vfn].vf_mac_addresses); |
801 | |
802 | return 0; |
803 | } |
804 | |
805 | static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, |
806 | u32 qde) |
807 | { |
808 | struct ixgbe_hw *hw = &adapter->hw; |
809 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
810 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
811 | int i; |
812 | |
813 | for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { |
814 | u32 reg; |
815 | |
816 | /* flush previous write */ |
817 | IXGBE_WRITE_FLUSH(hw); |
818 | |
819 | /* indicate to hardware that we want to set drop enable */ |
820 | reg = IXGBE_QDE_WRITE | qde; |
821 | reg |= i << IXGBE_QDE_IDX_SHIFT; |
822 | IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); |
823 | } |
824 | } |
825 | |
826 | /** |
827 | * ixgbe_set_vf_rx_tx - Set VF rx tx |
828 | * @adapter: Pointer to adapter struct |
829 | * @vf: VF identifier |
830 | * |
831 | * Set or reset correct transmit and receive for vf |
832 | **/ |
833 | static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf) |
834 | { |
835 | u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; |
836 | struct ixgbe_hw *hw = &adapter->hw; |
837 | u32 reg_offset, vf_shift; |
838 | |
839 | vf_shift = vf % 32; |
840 | reg_offset = vf / 32; |
841 | |
842 | reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); |
843 | reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); |
844 | |
845 | if (adapter->vfinfo[vf].link_enable) { |
846 | reg_req_tx = reg_cur_tx | 1 << vf_shift; |
847 | reg_req_rx = reg_cur_rx | 1 << vf_shift; |
848 | } else { |
849 | reg_req_tx = reg_cur_tx & ~(1 << vf_shift); |
850 | reg_req_rx = reg_cur_rx & ~(1 << vf_shift); |
851 | } |
852 | |
853 | /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. |
854 | * For more info take a look at ixgbe_set_vf_lpe |
855 | */ |
856 | if (adapter->hw.mac.type == ixgbe_mac_82599EB) { |
857 | struct net_device *dev = adapter->netdev; |
858 | int pf_max_frame = dev->mtu + ETH_HLEN; |
859 | |
860 | #if IS_ENABLED(CONFIG_FCOE) |
861 | if (dev->features & NETIF_F_FCOE_MTU) |
862 | pf_max_frame = max_t(int, pf_max_frame, |
863 | IXGBE_FCOE_JUMBO_FRAME_SIZE); |
864 | #endif /* CONFIG_FCOE */ |
865 | |
866 | if (pf_max_frame > ETH_FRAME_LEN) |
867 | reg_req_rx = reg_cur_rx & ~(1 << vf_shift); |
868 | } |
869 | |
870 | /* Enable/Disable particular VF */ |
871 | if (reg_cur_tx != reg_req_tx) |
872 | IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx); |
873 | if (reg_cur_rx != reg_req_rx) |
874 | IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx); |
875 | } |
876 | |
877 | static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) |
878 | { |
879 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
880 | struct ixgbe_hw *hw = &adapter->hw; |
881 | unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; |
882 | u32 reg, reg_offset, vf_shift; |
883 | u32 msgbuf[4] = {0, 0, 0, 0}; |
884 | u8 *addr = (u8 *)(&msgbuf[1]); |
885 | u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
886 | int i; |
887 | |
888 | e_info(probe, "VF Reset msg received from vf %d\n" , vf); |
889 | |
890 | /* reset the filters for the device */ |
891 | ixgbe_vf_reset_event(adapter, vf); |
892 | |
893 | ixgbe_vf_clear_mbx(adapter, vf); |
894 | |
895 | /* set vf mac address */ |
896 | if (!is_zero_ether_addr(addr: vf_mac)) |
897 | ixgbe_set_vf_mac(adapter, vf, mac_addr: vf_mac); |
898 | |
899 | vf_shift = vf % 32; |
900 | reg_offset = vf / 32; |
901 | |
902 | /* force drop enable for all VF Rx queues */ |
903 | reg = IXGBE_QDE_ENABLE; |
904 | if (adapter->vfinfo[vf].pf_vlan) |
905 | reg |= IXGBE_QDE_HIDE_VLAN; |
906 | |
907 | ixgbe_write_qde(adapter, vf, qde: reg); |
908 | |
909 | ixgbe_set_vf_rx_tx(adapter, vf); |
910 | |
911 | /* enable VF mailbox for further messages */ |
912 | adapter->vfinfo[vf].clear_to_send = true; |
913 | |
914 | /* Enable counting of spoofed packets in the SSVPC register */ |
915 | reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); |
916 | reg |= BIT(vf_shift); |
917 | IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); |
918 | |
919 | /* |
920 | * Reset the VFs TDWBAL and TDWBAH registers |
921 | * which are not cleared by an FLR |
922 | */ |
923 | for (i = 0; i < q_per_pool; i++) { |
924 | IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBAHn(q_per_pool, vf, i), 0); |
925 | IXGBE_WRITE_REG(hw, IXGBE_PVFTDWBALn(q_per_pool, vf, i), 0); |
926 | } |
927 | |
928 | /* reply to reset with ack and vf mac address */ |
929 | msgbuf[0] = IXGBE_VF_RESET; |
930 | if (!is_zero_ether_addr(addr: vf_mac) && adapter->vfinfo[vf].pf_set_mac) { |
931 | msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; |
932 | memcpy(addr, vf_mac, ETH_ALEN); |
933 | } else { |
934 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; |
935 | } |
936 | |
937 | /* |
938 | * Piggyback the multicast filter type so VF can compute the |
939 | * correct vectors |
940 | */ |
941 | msgbuf[3] = hw->mac.mc_filter_type; |
942 | ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf); |
943 | |
944 | return 0; |
945 | } |
946 | |
947 | static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, |
948 | u32 *msgbuf, u32 vf) |
949 | { |
950 | u8 *new_mac = ((u8 *)(&msgbuf[1])); |
951 | |
952 | if (!is_valid_ether_addr(addr: new_mac)) { |
953 | e_warn(drv, "VF %d attempted to set invalid mac\n" , vf); |
954 | return -1; |
955 | } |
956 | |
957 | if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && |
958 | !ether_addr_equal(addr1: adapter->vfinfo[vf].vf_mac_addresses, addr2: new_mac)) { |
959 | e_warn(drv, |
960 | "VF %d attempted to override administratively set MAC address\n" |
961 | "Reload the VF driver to resume operations\n" , |
962 | vf); |
963 | return -1; |
964 | } |
965 | |
966 | return ixgbe_set_vf_mac(adapter, vf, mac_addr: new_mac) < 0; |
967 | } |
968 | |
969 | static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, |
970 | u32 *msgbuf, u32 vf) |
971 | { |
972 | u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; |
973 | u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); |
974 | u8 tcs = adapter->hw_tcs; |
975 | |
976 | if (adapter->vfinfo[vf].pf_vlan || tcs) { |
977 | e_warn(drv, |
978 | "VF %d attempted to override administratively set VLAN configuration\n" |
979 | "Reload the VF driver to resume operations\n" , |
980 | vf); |
981 | return -1; |
982 | } |
983 | |
984 | /* VLAN 0 is a special case, don't allow it to be removed */ |
985 | if (!vid && !add) |
986 | return 0; |
987 | |
988 | return ixgbe_set_vf_vlan(adapter, add, vid, vf); |
989 | } |
990 | |
991 | static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, |
992 | u32 *msgbuf, u32 vf) |
993 | { |
994 | u8 *new_mac = ((u8 *)(&msgbuf[1])); |
995 | int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> |
996 | IXGBE_VT_MSGINFO_SHIFT; |
997 | int err; |
998 | |
999 | if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && |
1000 | index > 0) { |
1001 | e_warn(drv, |
1002 | "VF %d requested MACVLAN filter but is administratively denied\n" , |
1003 | vf); |
1004 | return -1; |
1005 | } |
1006 | |
1007 | /* An non-zero index indicates the VF is setting a filter */ |
1008 | if (index) { |
1009 | if (!is_valid_ether_addr(addr: new_mac)) { |
1010 | e_warn(drv, "VF %d attempted to set invalid mac\n" , vf); |
1011 | return -1; |
1012 | } |
1013 | |
1014 | /* |
1015 | * If the VF is allowed to set MAC filters then turn off |
1016 | * anti-spoofing to avoid false positives. |
1017 | */ |
1018 | if (adapter->vfinfo[vf].spoofchk_enabled) { |
1019 | struct ixgbe_hw *hw = &adapter->hw; |
1020 | |
1021 | hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); |
1022 | hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); |
1023 | } |
1024 | } |
1025 | |
1026 | err = ixgbe_set_vf_macvlan(adapter, vf, index, mac_addr: new_mac); |
1027 | if (err == -ENOSPC) |
1028 | e_warn(drv, |
1029 | "VF %d has requested a MACVLAN filter but there is no space for it\n" , |
1030 | vf); |
1031 | |
1032 | return err < 0; |
1033 | } |
1034 | |
1035 | static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, |
1036 | u32 *msgbuf, u32 vf) |
1037 | { |
1038 | int api = msgbuf[1]; |
1039 | |
1040 | switch (api) { |
1041 | case ixgbe_mbox_api_10: |
1042 | case ixgbe_mbox_api_11: |
1043 | case ixgbe_mbox_api_12: |
1044 | case ixgbe_mbox_api_13: |
1045 | case ixgbe_mbox_api_14: |
1046 | adapter->vfinfo[vf].vf_api = api; |
1047 | return 0; |
1048 | default: |
1049 | break; |
1050 | } |
1051 | |
1052 | e_info(drv, "VF %d requested invalid api version %u\n" , vf, api); |
1053 | |
1054 | return -1; |
1055 | } |
1056 | |
1057 | static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, |
1058 | u32 *msgbuf, u32 vf) |
1059 | { |
1060 | struct net_device *dev = adapter->netdev; |
1061 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
1062 | unsigned int default_tc = 0; |
1063 | u8 num_tcs = adapter->hw_tcs; |
1064 | |
1065 | /* verify the PF is supporting the correct APIs */ |
1066 | switch (adapter->vfinfo[vf].vf_api) { |
1067 | case ixgbe_mbox_api_20: |
1068 | case ixgbe_mbox_api_11: |
1069 | case ixgbe_mbox_api_12: |
1070 | case ixgbe_mbox_api_13: |
1071 | case ixgbe_mbox_api_14: |
1072 | break; |
1073 | default: |
1074 | return -1; |
1075 | } |
1076 | |
1077 | /* only allow 1 Tx queue for bandwidth limiting */ |
1078 | msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); |
1079 | msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); |
1080 | |
1081 | /* if TCs > 1 determine which TC belongs to default user priority */ |
1082 | if (num_tcs > 1) |
1083 | default_tc = netdev_get_prio_tc_map(dev, prio: adapter->default_up); |
1084 | |
1085 | /* notify VF of need for VLAN tag stripping, and correct queue */ |
1086 | if (num_tcs) |
1087 | msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs; |
1088 | else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos) |
1089 | msgbuf[IXGBE_VF_TRANS_VLAN] = 1; |
1090 | else |
1091 | msgbuf[IXGBE_VF_TRANS_VLAN] = 0; |
1092 | |
1093 | /* notify VF of default queue */ |
1094 | msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc; |
1095 | |
1096 | return 0; |
1097 | } |
1098 | |
1099 | static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) |
1100 | { |
1101 | u32 i, j; |
1102 | u32 *out_buf = &msgbuf[1]; |
1103 | const u8 *reta = adapter->rss_indir_tbl; |
1104 | u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); |
1105 | |
1106 | /* Check if operation is permitted */ |
1107 | if (!adapter->vfinfo[vf].rss_query_enabled) |
1108 | return -EPERM; |
1109 | |
1110 | /* verify the PF is supporting the correct API */ |
1111 | switch (adapter->vfinfo[vf].vf_api) { |
1112 | case ixgbe_mbox_api_14: |
1113 | case ixgbe_mbox_api_13: |
1114 | case ixgbe_mbox_api_12: |
1115 | break; |
1116 | default: |
1117 | return -EOPNOTSUPP; |
1118 | } |
1119 | |
1120 | /* This mailbox command is supported (required) only for 82599 and x540 |
1121 | * VFs which support up to 4 RSS queues. Therefore we will compress the |
1122 | * RETA by saving only 2 bits from each entry. This way we will be able |
1123 | * to transfer the whole RETA in a single mailbox operation. |
1124 | */ |
1125 | for (i = 0; i < reta_size / 16; i++) { |
1126 | out_buf[i] = 0; |
1127 | for (j = 0; j < 16; j++) |
1128 | out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); |
1129 | } |
1130 | |
1131 | return 0; |
1132 | } |
1133 | |
1134 | static int (struct ixgbe_adapter *adapter, |
1135 | u32 *msgbuf, u32 vf) |
1136 | { |
1137 | u32 * = &msgbuf[1]; |
1138 | |
1139 | /* Check if the operation is permitted */ |
1140 | if (!adapter->vfinfo[vf].rss_query_enabled) |
1141 | return -EPERM; |
1142 | |
1143 | /* verify the PF is supporting the correct API */ |
1144 | switch (adapter->vfinfo[vf].vf_api) { |
1145 | case ixgbe_mbox_api_14: |
1146 | case ixgbe_mbox_api_13: |
1147 | case ixgbe_mbox_api_12: |
1148 | break; |
1149 | default: |
1150 | return -EOPNOTSUPP; |
1151 | } |
1152 | |
1153 | memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); |
1154 | |
1155 | return 0; |
1156 | } |
1157 | |
1158 | static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, |
1159 | u32 *msgbuf, u32 vf) |
1160 | { |
1161 | struct ixgbe_hw *hw = &adapter->hw; |
1162 | int xcast_mode = msgbuf[1]; |
1163 | u32 vmolr, fctrl, disable, enable; |
1164 | |
1165 | /* verify the PF is supporting the correct APIs */ |
1166 | switch (adapter->vfinfo[vf].vf_api) { |
1167 | case ixgbe_mbox_api_12: |
1168 | /* promisc introduced in 1.3 version */ |
1169 | if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) |
1170 | return -EOPNOTSUPP; |
1171 | fallthrough; |
1172 | case ixgbe_mbox_api_13: |
1173 | case ixgbe_mbox_api_14: |
1174 | break; |
1175 | default: |
1176 | return -EOPNOTSUPP; |
1177 | } |
1178 | |
1179 | if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && |
1180 | !adapter->vfinfo[vf].trusted) { |
1181 | xcast_mode = IXGBEVF_XCAST_MODE_MULTI; |
1182 | } |
1183 | |
1184 | if (adapter->vfinfo[vf].xcast_mode == xcast_mode) |
1185 | goto out; |
1186 | |
1187 | switch (xcast_mode) { |
1188 | case IXGBEVF_XCAST_MODE_NONE: |
1189 | disable = IXGBE_VMOLR_ROMPE | |
1190 | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; |
1191 | enable = IXGBE_VMOLR_BAM; |
1192 | break; |
1193 | case IXGBEVF_XCAST_MODE_MULTI: |
1194 | disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; |
1195 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; |
1196 | break; |
1197 | case IXGBEVF_XCAST_MODE_ALLMULTI: |
1198 | disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; |
1199 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; |
1200 | break; |
1201 | case IXGBEVF_XCAST_MODE_PROMISC: |
1202 | if (hw->mac.type <= ixgbe_mac_82599EB) |
1203 | return -EOPNOTSUPP; |
1204 | |
1205 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
1206 | if (!(fctrl & IXGBE_FCTRL_UPE)) { |
1207 | /* VF promisc requires PF in promisc */ |
1208 | e_warn(drv, |
1209 | "Enabling VF promisc requires PF in promisc\n" ); |
1210 | return -EPERM; |
1211 | } |
1212 | |
1213 | disable = IXGBE_VMOLR_VPE; |
1214 | enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | |
1215 | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; |
1216 | break; |
1217 | default: |
1218 | return -EOPNOTSUPP; |
1219 | } |
1220 | |
1221 | vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); |
1222 | vmolr &= ~disable; |
1223 | vmolr |= enable; |
1224 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); |
1225 | |
1226 | adapter->vfinfo[vf].xcast_mode = xcast_mode; |
1227 | |
1228 | out: |
1229 | msgbuf[1] = xcast_mode; |
1230 | |
1231 | return 0; |
1232 | } |
1233 | |
1234 | static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter, |
1235 | u32 *msgbuf, u32 vf) |
1236 | { |
1237 | u32 *link_state = &msgbuf[1]; |
1238 | |
1239 | /* verify the PF is supporting the correct API */ |
1240 | switch (adapter->vfinfo[vf].vf_api) { |
1241 | case ixgbe_mbox_api_12: |
1242 | case ixgbe_mbox_api_13: |
1243 | case ixgbe_mbox_api_14: |
1244 | break; |
1245 | default: |
1246 | return -EOPNOTSUPP; |
1247 | } |
1248 | |
1249 | *link_state = adapter->vfinfo[vf].link_enable; |
1250 | |
1251 | return 0; |
1252 | } |
1253 | |
1254 | static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) |
1255 | { |
1256 | u32 mbx_size = IXGBE_VFMAILBOX_SIZE; |
1257 | u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; |
1258 | struct ixgbe_hw *hw = &adapter->hw; |
1259 | s32 retval; |
1260 | |
1261 | retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); |
1262 | |
1263 | if (retval) { |
1264 | pr_err("Error receiving message from VF\n" ); |
1265 | return retval; |
1266 | } |
1267 | |
1268 | /* this is a message we already processed, do nothing */ |
1269 | if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK)) |
1270 | return 0; |
1271 | |
1272 | /* flush the ack before we write any messages back */ |
1273 | IXGBE_WRITE_FLUSH(hw); |
1274 | |
1275 | if (msgbuf[0] == IXGBE_VF_RESET) |
1276 | return ixgbe_vf_reset_msg(adapter, vf); |
1277 | |
1278 | /* |
1279 | * until the vf completes a virtual function reset it should not be |
1280 | * allowed to start any configuration. |
1281 | */ |
1282 | if (!adapter->vfinfo[vf].clear_to_send) { |
1283 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; |
1284 | ixgbe_write_mbx(hw, msgbuf, 1, vf); |
1285 | return 0; |
1286 | } |
1287 | |
1288 | switch ((msgbuf[0] & 0xFFFF)) { |
1289 | case IXGBE_VF_SET_MAC_ADDR: |
1290 | retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf); |
1291 | break; |
1292 | case IXGBE_VF_SET_MULTICAST: |
1293 | retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf); |
1294 | break; |
1295 | case IXGBE_VF_SET_VLAN: |
1296 | retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); |
1297 | break; |
1298 | case IXGBE_VF_SET_LPE: |
1299 | retval = ixgbe_set_vf_lpe(adapter, max_frame: msgbuf[1], vf); |
1300 | break; |
1301 | case IXGBE_VF_SET_MACVLAN: |
1302 | retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); |
1303 | break; |
1304 | case IXGBE_VF_API_NEGOTIATE: |
1305 | retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf); |
1306 | break; |
1307 | case IXGBE_VF_GET_QUEUES: |
1308 | retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); |
1309 | break; |
1310 | case IXGBE_VF_GET_RETA: |
1311 | retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); |
1312 | break; |
1313 | case IXGBE_VF_GET_RSS_KEY: |
1314 | retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); |
1315 | break; |
1316 | case IXGBE_VF_UPDATE_XCAST_MODE: |
1317 | retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); |
1318 | break; |
1319 | case IXGBE_VF_GET_LINK_STATE: |
1320 | retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf); |
1321 | break; |
1322 | case IXGBE_VF_IPSEC_ADD: |
1323 | retval = ixgbe_ipsec_vf_add_sa(adapter, mbuf: msgbuf, vf); |
1324 | break; |
1325 | case IXGBE_VF_IPSEC_DEL: |
1326 | retval = ixgbe_ipsec_vf_del_sa(adapter, mbuf: msgbuf, vf); |
1327 | break; |
1328 | default: |
1329 | e_err(drv, "Unhandled Msg %8.8x\n" , msgbuf[0]); |
1330 | retval = IXGBE_ERR_MBX; |
1331 | break; |
1332 | } |
1333 | |
1334 | /* notify the VF of the results of what it sent us */ |
1335 | if (retval) |
1336 | msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; |
1337 | else |
1338 | msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; |
1339 | |
1340 | msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; |
1341 | |
1342 | ixgbe_write_mbx(hw, msgbuf, mbx_size, vf); |
1343 | |
1344 | return retval; |
1345 | } |
1346 | |
1347 | static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) |
1348 | { |
1349 | struct ixgbe_hw *hw = &adapter->hw; |
1350 | u32 msg = IXGBE_VT_MSGTYPE_NACK; |
1351 | |
1352 | /* if device isn't clear to send it shouldn't be reading either */ |
1353 | if (!adapter->vfinfo[vf].clear_to_send) |
1354 | ixgbe_write_mbx(hw, &msg, 1, vf); |
1355 | } |
1356 | |
1357 | void ixgbe_msg_task(struct ixgbe_adapter *adapter) |
1358 | { |
1359 | struct ixgbe_hw *hw = &adapter->hw; |
1360 | unsigned long flags; |
1361 | u32 vf; |
1362 | |
1363 | spin_lock_irqsave(&adapter->vfs_lock, flags); |
1364 | for (vf = 0; vf < adapter->num_vfs; vf++) { |
1365 | /* process any reset requests */ |
1366 | if (!ixgbe_check_for_rst(hw, vf)) |
1367 | ixgbe_vf_reset_event(adapter, vf); |
1368 | |
1369 | /* process any messages pending */ |
1370 | if (!ixgbe_check_for_msg(hw, vf)) |
1371 | ixgbe_rcv_msg_from_vf(adapter, vf); |
1372 | |
1373 | /* process any acks */ |
1374 | if (!ixgbe_check_for_ack(hw, vf)) |
1375 | ixgbe_rcv_ack_from_vf(adapter, vf); |
1376 | } |
1377 | spin_unlock_irqrestore(lock: &adapter->vfs_lock, flags); |
1378 | } |
1379 | |
1380 | static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) |
1381 | { |
1382 | struct ixgbe_hw *hw = &adapter->hw; |
1383 | u32 ping; |
1384 | |
1385 | ping = IXGBE_PF_CONTROL_MSG; |
1386 | if (adapter->vfinfo[vf].clear_to_send) |
1387 | ping |= IXGBE_VT_MSGTYPE_CTS; |
1388 | ixgbe_write_mbx(hw, &ping, 1, vf); |
1389 | } |
1390 | |
1391 | void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) |
1392 | { |
1393 | struct ixgbe_hw *hw = &adapter->hw; |
1394 | u32 ping; |
1395 | int i; |
1396 | |
1397 | for (i = 0 ; i < adapter->num_vfs; i++) { |
1398 | ping = IXGBE_PF_CONTROL_MSG; |
1399 | if (adapter->vfinfo[i].clear_to_send) |
1400 | ping |= IXGBE_VT_MSGTYPE_CTS; |
1401 | ixgbe_write_mbx(hw, &ping, 1, i); |
1402 | } |
1403 | } |
1404 | |
1405 | /** |
1406 | * ixgbe_set_all_vfs - update vfs queues |
1407 | * @adapter: Pointer to adapter struct |
1408 | * |
1409 | * Update setting transmit and receive queues for all vfs |
1410 | **/ |
1411 | void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter) |
1412 | { |
1413 | int i; |
1414 | |
1415 | for (i = 0 ; i < adapter->num_vfs; i++) |
1416 | ixgbe_set_vf_link_state(adapter, vf: i, |
1417 | state: adapter->vfinfo[i].link_state); |
1418 | } |
1419 | |
1420 | int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) |
1421 | { |
1422 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1423 | s32 retval; |
1424 | |
1425 | if (vf >= adapter->num_vfs) |
1426 | return -EINVAL; |
1427 | |
1428 | if (is_valid_ether_addr(addr: mac)) { |
1429 | dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n" , |
1430 | mac, vf); |
1431 | dev_info(&adapter->pdev->dev, "Reload the VF driver to make this change effective." ); |
1432 | |
1433 | retval = ixgbe_set_vf_mac(adapter, vf, mac_addr: mac); |
1434 | if (retval >= 0) { |
1435 | adapter->vfinfo[vf].pf_set_mac = true; |
1436 | |
1437 | if (test_bit(__IXGBE_DOWN, &adapter->state)) { |
1438 | dev_warn(&adapter->pdev->dev, "The VF MAC address has been set, but the PF device is not up.\n" ); |
1439 | dev_warn(&adapter->pdev->dev, "Bring the PF device up before attempting to use the VF device.\n" ); |
1440 | } |
1441 | } else { |
1442 | dev_warn(&adapter->pdev->dev, "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n" ); |
1443 | } |
1444 | } else if (is_zero_ether_addr(addr: mac)) { |
1445 | unsigned char *vf_mac_addr = |
1446 | adapter->vfinfo[vf].vf_mac_addresses; |
1447 | |
1448 | /* nothing to do */ |
1449 | if (is_zero_ether_addr(addr: vf_mac_addr)) |
1450 | return 0; |
1451 | |
1452 | dev_info(&adapter->pdev->dev, "removing MAC on VF %d\n" , vf); |
1453 | |
1454 | retval = ixgbe_del_mac_filter(adapter, addr: vf_mac_addr, queue: vf); |
1455 | if (retval >= 0) { |
1456 | adapter->vfinfo[vf].pf_set_mac = false; |
1457 | memcpy(vf_mac_addr, mac, ETH_ALEN); |
1458 | } else { |
1459 | dev_warn(&adapter->pdev->dev, "Could NOT remove the VF MAC address.\n" ); |
1460 | } |
1461 | } else { |
1462 | retval = -EINVAL; |
1463 | } |
1464 | |
1465 | return retval; |
1466 | } |
1467 | |
1468 | static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, |
1469 | u16 vlan, u8 qos) |
1470 | { |
1471 | struct ixgbe_hw *hw = &adapter->hw; |
1472 | int err; |
1473 | |
1474 | err = ixgbe_set_vf_vlan(adapter, add: true, vid: vlan, vf); |
1475 | if (err) |
1476 | goto out; |
1477 | |
1478 | /* Revoke tagless access via VLAN 0 */ |
1479 | ixgbe_set_vf_vlan(adapter, add: false, vid: 0, vf); |
1480 | |
1481 | ixgbe_set_vmvir(adapter, vid: vlan, qos, vf); |
1482 | ixgbe_set_vmolr(hw, vf, aupe: false); |
1483 | |
1484 | /* enable hide vlan on X550 */ |
1485 | if (hw->mac.type >= ixgbe_mac_X550) |
1486 | ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | |
1487 | IXGBE_QDE_HIDE_VLAN); |
1488 | |
1489 | adapter->vfinfo[vf].pf_vlan = vlan; |
1490 | adapter->vfinfo[vf].pf_qos = qos; |
1491 | dev_info(&adapter->pdev->dev, |
1492 | "Setting VLAN %d, QOS 0x%x on VF %d\n" , vlan, qos, vf); |
1493 | if (test_bit(__IXGBE_DOWN, &adapter->state)) { |
1494 | dev_warn(&adapter->pdev->dev, |
1495 | "The VF VLAN has been set, but the PF device is not up.\n" ); |
1496 | dev_warn(&adapter->pdev->dev, |
1497 | "Bring the PF device up before attempting to use the VF device.\n" ); |
1498 | } |
1499 | |
1500 | out: |
1501 | return err; |
1502 | } |
1503 | |
1504 | static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) |
1505 | { |
1506 | struct ixgbe_hw *hw = &adapter->hw; |
1507 | int err; |
1508 | |
1509 | err = ixgbe_set_vf_vlan(adapter, add: false, |
1510 | vid: adapter->vfinfo[vf].pf_vlan, vf); |
1511 | /* Restore tagless access via VLAN 0 */ |
1512 | ixgbe_set_vf_vlan(adapter, add: true, vid: 0, vf); |
1513 | ixgbe_clear_vmvir(adapter, vf); |
1514 | ixgbe_set_vmolr(hw, vf, aupe: true); |
1515 | |
1516 | /* disable hide VLAN on X550 */ |
1517 | if (hw->mac.type >= ixgbe_mac_X550) |
1518 | ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); |
1519 | |
1520 | adapter->vfinfo[vf].pf_vlan = 0; |
1521 | adapter->vfinfo[vf].pf_qos = 0; |
1522 | |
1523 | return err; |
1524 | } |
1525 | |
1526 | int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, |
1527 | u8 qos, __be16 vlan_proto) |
1528 | { |
1529 | int err = 0; |
1530 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1531 | |
1532 | if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) |
1533 | return -EINVAL; |
1534 | if (vlan_proto != htons(ETH_P_8021Q)) |
1535 | return -EPROTONOSUPPORT; |
1536 | if (vlan || qos) { |
1537 | /* Check if there is already a port VLAN set, if so |
1538 | * we have to delete the old one first before we |
1539 | * can set the new one. The usage model had |
1540 | * previously assumed the user would delete the |
1541 | * old port VLAN before setting a new one but this |
1542 | * is not necessarily the case. |
1543 | */ |
1544 | if (adapter->vfinfo[vf].pf_vlan) |
1545 | err = ixgbe_disable_port_vlan(adapter, vf); |
1546 | if (err) |
1547 | goto out; |
1548 | err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); |
1549 | } else { |
1550 | err = ixgbe_disable_port_vlan(adapter, vf); |
1551 | } |
1552 | |
1553 | out: |
1554 | return err; |
1555 | } |
1556 | |
1557 | int ixgbe_link_mbps(struct ixgbe_adapter *adapter) |
1558 | { |
1559 | switch (adapter->link_speed) { |
1560 | case IXGBE_LINK_SPEED_100_FULL: |
1561 | return 100; |
1562 | case IXGBE_LINK_SPEED_1GB_FULL: |
1563 | return 1000; |
1564 | case IXGBE_LINK_SPEED_10GB_FULL: |
1565 | return 10000; |
1566 | default: |
1567 | return 0; |
1568 | } |
1569 | } |
1570 | |
1571 | static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) |
1572 | { |
1573 | struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; |
1574 | struct ixgbe_hw *hw = &adapter->hw; |
1575 | u32 bcnrc_val = 0; |
1576 | u16 queue, queues_per_pool; |
1577 | u16 tx_rate = adapter->vfinfo[vf].tx_rate; |
1578 | |
1579 | if (tx_rate) { |
1580 | /* start with base link speed value */ |
1581 | bcnrc_val = adapter->vf_rate_link_speed; |
1582 | |
1583 | /* Calculate the rate factor values to set */ |
1584 | bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; |
1585 | bcnrc_val /= tx_rate; |
1586 | |
1587 | /* clear everything but the rate factor */ |
1588 | bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | |
1589 | IXGBE_RTTBCNRC_RF_DEC_MASK; |
1590 | |
1591 | /* enable the rate scheduler */ |
1592 | bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; |
1593 | } |
1594 | |
1595 | /* |
1596 | * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM |
1597 | * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported |
1598 | * and 0x004 otherwise. |
1599 | */ |
1600 | switch (hw->mac.type) { |
1601 | case ixgbe_mac_82599EB: |
1602 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4); |
1603 | break; |
1604 | case ixgbe_mac_X540: |
1605 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14); |
1606 | break; |
1607 | default: |
1608 | break; |
1609 | } |
1610 | |
1611 | /* determine how many queues per pool based on VMDq mask */ |
1612 | queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); |
1613 | |
1614 | /* write value for all Tx queues belonging to VF */ |
1615 | for (queue = 0; queue < queues_per_pool; queue++) { |
1616 | unsigned int reg_idx = (vf * queues_per_pool) + queue; |
1617 | |
1618 | IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); |
1619 | IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); |
1620 | } |
1621 | } |
1622 | |
1623 | void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) |
1624 | { |
1625 | int i; |
1626 | |
1627 | /* VF Tx rate limit was not set */ |
1628 | if (!adapter->vf_rate_link_speed) |
1629 | return; |
1630 | |
1631 | if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { |
1632 | adapter->vf_rate_link_speed = 0; |
1633 | dev_info(&adapter->pdev->dev, |
1634 | "Link speed has been changed. VF Transmit rate is disabled\n" ); |
1635 | } |
1636 | |
1637 | for (i = 0; i < adapter->num_vfs; i++) { |
1638 | if (!adapter->vf_rate_link_speed) |
1639 | adapter->vfinfo[i].tx_rate = 0; |
1640 | |
1641 | ixgbe_set_vf_rate_limit(adapter, vf: i); |
1642 | } |
1643 | } |
1644 | |
1645 | int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, |
1646 | int max_tx_rate) |
1647 | { |
1648 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1649 | int link_speed; |
1650 | |
1651 | /* verify VF is active */ |
1652 | if (vf >= adapter->num_vfs) |
1653 | return -EINVAL; |
1654 | |
1655 | /* verify link is up */ |
1656 | if (!adapter->link_up) |
1657 | return -EINVAL; |
1658 | |
1659 | /* verify we are linked at 10Gbps */ |
1660 | link_speed = ixgbe_link_mbps(adapter); |
1661 | if (link_speed != 10000) |
1662 | return -EINVAL; |
1663 | |
1664 | if (min_tx_rate) |
1665 | return -EINVAL; |
1666 | |
1667 | /* rate limit cannot be less than 10Mbs or greater than link speed */ |
1668 | if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) |
1669 | return -EINVAL; |
1670 | |
1671 | /* store values */ |
1672 | adapter->vf_rate_link_speed = link_speed; |
1673 | adapter->vfinfo[vf].tx_rate = max_tx_rate; |
1674 | |
1675 | /* update hardware configuration */ |
1676 | ixgbe_set_vf_rate_limit(adapter, vf); |
1677 | |
1678 | return 0; |
1679 | } |
1680 | |
1681 | int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) |
1682 | { |
1683 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1684 | struct ixgbe_hw *hw = &adapter->hw; |
1685 | |
1686 | if (vf >= adapter->num_vfs) |
1687 | return -EINVAL; |
1688 | |
1689 | adapter->vfinfo[vf].spoofchk_enabled = setting; |
1690 | |
1691 | /* configure MAC spoofing */ |
1692 | hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); |
1693 | |
1694 | /* configure VLAN spoofing */ |
1695 | hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); |
1696 | |
1697 | /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be |
1698 | * calling set_ethertype_anti_spoofing for each VF in loop below |
1699 | */ |
1700 | if (hw->mac.ops.set_ethertype_anti_spoofing) { |
1701 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), |
1702 | (IXGBE_ETQF_FILTER_EN | |
1703 | IXGBE_ETQF_TX_ANTISPOOF | |
1704 | ETH_P_LLDP)); |
1705 | |
1706 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), |
1707 | (IXGBE_ETQF_FILTER_EN | |
1708 | IXGBE_ETQF_TX_ANTISPOOF | |
1709 | ETH_P_PAUSE)); |
1710 | |
1711 | hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); |
1712 | } |
1713 | |
1714 | return 0; |
1715 | } |
1716 | |
1717 | /** |
1718 | * ixgbe_set_vf_link_state - Set link state |
1719 | * @adapter: Pointer to adapter struct |
1720 | * @vf: VF identifier |
1721 | * @state: required link state |
1722 | * |
1723 | * Set a link force state on/off a single vf |
1724 | **/ |
1725 | void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state) |
1726 | { |
1727 | adapter->vfinfo[vf].link_state = state; |
1728 | |
1729 | switch (state) { |
1730 | case IFLA_VF_LINK_STATE_AUTO: |
1731 | if (test_bit(__IXGBE_DOWN, &adapter->state)) |
1732 | adapter->vfinfo[vf].link_enable = false; |
1733 | else |
1734 | adapter->vfinfo[vf].link_enable = true; |
1735 | break; |
1736 | case IFLA_VF_LINK_STATE_ENABLE: |
1737 | adapter->vfinfo[vf].link_enable = true; |
1738 | break; |
1739 | case IFLA_VF_LINK_STATE_DISABLE: |
1740 | adapter->vfinfo[vf].link_enable = false; |
1741 | break; |
1742 | } |
1743 | |
1744 | ixgbe_set_vf_rx_tx(adapter, vf); |
1745 | |
1746 | /* restart the VF */ |
1747 | adapter->vfinfo[vf].clear_to_send = false; |
1748 | ixgbe_ping_vf(adapter, vf); |
1749 | } |
1750 | |
1751 | /** |
1752 | * ixgbe_ndo_set_vf_link_state - Set link state |
1753 | * @netdev: network interface device structure |
1754 | * @vf: VF identifier |
1755 | * @state: required link state |
1756 | * |
1757 | * Set the link state of a specified VF, regardless of physical link state |
1758 | **/ |
1759 | int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state) |
1760 | { |
1761 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1762 | int ret = 0; |
1763 | |
1764 | if (vf < 0 || vf >= adapter->num_vfs) { |
1765 | dev_err(&adapter->pdev->dev, |
1766 | "NDO set VF link - invalid VF identifier %d\n" , vf); |
1767 | return -EINVAL; |
1768 | } |
1769 | |
1770 | switch (state) { |
1771 | case IFLA_VF_LINK_STATE_ENABLE: |
1772 | dev_info(&adapter->pdev->dev, |
1773 | "NDO set VF %d link state %d - not supported\n" , |
1774 | vf, state); |
1775 | break; |
1776 | case IFLA_VF_LINK_STATE_DISABLE: |
1777 | dev_info(&adapter->pdev->dev, |
1778 | "NDO set VF %d link state disable\n" , vf); |
1779 | ixgbe_set_vf_link_state(adapter, vf, state); |
1780 | break; |
1781 | case IFLA_VF_LINK_STATE_AUTO: |
1782 | dev_info(&adapter->pdev->dev, |
1783 | "NDO set VF %d link state auto\n" , vf); |
1784 | ixgbe_set_vf_link_state(adapter, vf, state); |
1785 | break; |
1786 | default: |
1787 | dev_err(&adapter->pdev->dev, |
1788 | "NDO set VF %d - invalid link state %d\n" , vf, state); |
1789 | ret = -EINVAL; |
1790 | } |
1791 | |
1792 | return ret; |
1793 | } |
1794 | |
1795 | int (struct net_device *netdev, int vf, |
1796 | bool setting) |
1797 | { |
1798 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1799 | |
1800 | /* This operation is currently supported only for 82599 and x540 |
1801 | * devices. |
1802 | */ |
1803 | if (adapter->hw.mac.type < ixgbe_mac_82599EB || |
1804 | adapter->hw.mac.type >= ixgbe_mac_X550) |
1805 | return -EOPNOTSUPP; |
1806 | |
1807 | if (vf >= adapter->num_vfs) |
1808 | return -EINVAL; |
1809 | |
1810 | adapter->vfinfo[vf].rss_query_enabled = setting; |
1811 | |
1812 | return 0; |
1813 | } |
1814 | |
1815 | int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) |
1816 | { |
1817 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1818 | |
1819 | if (vf >= adapter->num_vfs) |
1820 | return -EINVAL; |
1821 | |
1822 | /* nothing to do */ |
1823 | if (adapter->vfinfo[vf].trusted == setting) |
1824 | return 0; |
1825 | |
1826 | adapter->vfinfo[vf].trusted = setting; |
1827 | |
1828 | /* reset VF to reconfigure features */ |
1829 | adapter->vfinfo[vf].clear_to_send = false; |
1830 | ixgbe_ping_vf(adapter, vf); |
1831 | |
1832 | e_info(drv, "VF %u is %strusted\n" , vf, setting ? "" : "not " ); |
1833 | |
1834 | return 0; |
1835 | } |
1836 | |
1837 | int ixgbe_ndo_get_vf_config(struct net_device *netdev, |
1838 | int vf, struct ifla_vf_info *ivi) |
1839 | { |
1840 | struct ixgbe_adapter *adapter = netdev_priv(dev: netdev); |
1841 | if (vf >= adapter->num_vfs) |
1842 | return -EINVAL; |
1843 | ivi->vf = vf; |
1844 | memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); |
1845 | ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; |
1846 | ivi->min_tx_rate = 0; |
1847 | ivi->vlan = adapter->vfinfo[vf].pf_vlan; |
1848 | ivi->qos = adapter->vfinfo[vf].pf_qos; |
1849 | ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; |
1850 | ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; |
1851 | ivi->trusted = adapter->vfinfo[vf].trusted; |
1852 | return 0; |
1853 | } |
1854 | |