1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2013 - 2019 Intel Corporation. */ |
3 | |
4 | #include "fm10k.h" |
5 | #include "fm10k_vf.h" |
6 | #include "fm10k_pf.h" |
7 | |
8 | static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results, |
9 | struct fm10k_mbx_info *mbx) |
10 | { |
11 | struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; |
12 | struct fm10k_intfc *interface = hw->back; |
13 | struct pci_dev *pdev = interface->pdev; |
14 | |
15 | dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n" , |
16 | **results & FM10K_TLV_ID_MASK, vf_info->vf_idx); |
17 | |
18 | return fm10k_tlv_msg_error(hw, results, mbx); |
19 | } |
20 | |
21 | /** |
22 | * fm10k_iov_msg_queue_mac_vlan - Message handler for MAC/VLAN request from VF |
23 | * @hw: Pointer to hardware structure |
24 | * @results: Pointer array to message, results[0] is pointer to message |
25 | * @mbx: Pointer to mailbox information structure |
26 | * |
27 | * This function is a custom handler for MAC/VLAN requests from the VF. The |
28 | * assumption is that it is acceptable to directly hand off the message from |
29 | * the VF to the PF's switch manager. However, we use a MAC/VLAN message |
30 | * queue to avoid overloading the mailbox when a large number of requests |
31 | * come in. |
32 | **/ |
33 | static s32 fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw *hw, u32 **results, |
34 | struct fm10k_mbx_info *mbx) |
35 | { |
36 | struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; |
37 | struct fm10k_intfc *interface = hw->back; |
38 | u8 mac[ETH_ALEN]; |
39 | u32 *result; |
40 | int err = 0; |
41 | bool set; |
42 | u16 vlan; |
43 | u32 vid; |
44 | |
45 | /* we shouldn't be updating rules on a disabled interface */ |
46 | if (!FM10K_VF_FLAG_ENABLED(vf_info)) |
47 | err = FM10K_ERR_PARAM; |
48 | |
49 | if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) { |
50 | result = results[FM10K_MAC_VLAN_MSG_VLAN]; |
51 | |
52 | /* record VLAN id requested */ |
53 | err = fm10k_tlv_attr_get_u32(result, &vid); |
54 | if (err) |
55 | return err; |
56 | |
57 | set = !(vid & FM10K_VLAN_CLEAR); |
58 | vid &= ~FM10K_VLAN_CLEAR; |
59 | |
60 | /* if the length field has been set, this is a multi-bit |
61 | * update request. For multi-bit requests, simply disallow |
62 | * them when the pf_vid has been set. In this case, the PF |
63 | * should have already cleared the VLAN_TABLE, and if we |
64 | * allowed them, it could allow a rogue VF to receive traffic |
65 | * on a VLAN it was not assigned. In the single-bit case, we |
66 | * need to modify requests for VLAN 0 to use the default PF or |
67 | * SW vid when assigned. |
68 | */ |
69 | |
70 | if (vid >> 16) { |
71 | /* prevent multi-bit requests when PF has |
72 | * administratively set the VLAN for this VF |
73 | */ |
74 | if (vf_info->pf_vid) |
75 | return FM10K_ERR_PARAM; |
76 | } else { |
77 | err = fm10k_iov_select_vid(vf_info, vid: (u16)vid); |
78 | if (err < 0) |
79 | return err; |
80 | |
81 | vid = err; |
82 | } |
83 | |
84 | /* update VSI info for VF in regards to VLAN table */ |
85 | err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); |
86 | } |
87 | |
88 | if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { |
89 | result = results[FM10K_MAC_VLAN_MSG_MAC]; |
90 | |
91 | /* record unicast MAC address requested */ |
92 | err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); |
93 | if (err) |
94 | return err; |
95 | |
96 | /* block attempts to set MAC for a locked device */ |
97 | if (is_valid_ether_addr(addr: vf_info->mac) && |
98 | !ether_addr_equal(addr1: mac, addr2: vf_info->mac)) |
99 | return FM10K_ERR_PARAM; |
100 | |
101 | set = !(vlan & FM10K_VLAN_CLEAR); |
102 | vlan &= ~FM10K_VLAN_CLEAR; |
103 | |
104 | err = fm10k_iov_select_vid(vf_info, vid: vlan); |
105 | if (err < 0) |
106 | return err; |
107 | |
108 | vlan = (u16)err; |
109 | |
110 | /* Add this request to the MAC/VLAN queue */ |
111 | err = fm10k_queue_mac_request(interface, glort: vf_info->glort, |
112 | addr: mac, vid: vlan, set); |
113 | } |
114 | |
115 | if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { |
116 | result = results[FM10K_MAC_VLAN_MSG_MULTICAST]; |
117 | |
118 | /* record multicast MAC address requested */ |
119 | err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); |
120 | if (err) |
121 | return err; |
122 | |
123 | /* verify that the VF is allowed to request multicast */ |
124 | if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) |
125 | return FM10K_ERR_PARAM; |
126 | |
127 | set = !(vlan & FM10K_VLAN_CLEAR); |
128 | vlan &= ~FM10K_VLAN_CLEAR; |
129 | |
130 | err = fm10k_iov_select_vid(vf_info, vid: vlan); |
131 | if (err < 0) |
132 | return err; |
133 | |
134 | vlan = (u16)err; |
135 | |
136 | /* Add this request to the MAC/VLAN queue */ |
137 | err = fm10k_queue_mac_request(interface, glort: vf_info->glort, |
138 | addr: mac, vid: vlan, set); |
139 | } |
140 | |
141 | return err; |
142 | } |
143 | |
144 | static const struct fm10k_msg_data iov_mbx_data[] = { |
145 | FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), |
146 | FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), |
147 | FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_queue_mac_vlan), |
148 | FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), |
149 | FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error), |
150 | }; |
151 | |
152 | s32 fm10k_iov_event(struct fm10k_intfc *interface) |
153 | { |
154 | struct fm10k_hw *hw = &interface->hw; |
155 | struct fm10k_iov_data *iov_data; |
156 | s64 vflre; |
157 | int i; |
158 | |
159 | /* if there is no iov_data then there is no mailbox to process */ |
160 | if (!READ_ONCE(interface->iov_data)) |
161 | return 0; |
162 | |
163 | rcu_read_lock(); |
164 | |
165 | iov_data = interface->iov_data; |
166 | |
167 | /* check again now that we are in the RCU block */ |
168 | if (!iov_data) |
169 | goto read_unlock; |
170 | |
171 | if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR)) |
172 | goto read_unlock; |
173 | |
174 | /* read VFLRE to determine if any VFs have been reset */ |
175 | vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1)); |
176 | vflre <<= 32; |
177 | vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0)); |
178 | |
179 | i = iov_data->num_vfs; |
180 | |
181 | for (vflre <<= 64 - i; vflre && i--; vflre += vflre) { |
182 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
183 | |
184 | if (vflre >= 0) |
185 | continue; |
186 | |
187 | hw->iov.ops.reset_resources(hw, vf_info); |
188 | vf_info->mbx.ops.connect(hw, &vf_info->mbx); |
189 | } |
190 | |
191 | read_unlock: |
192 | rcu_read_unlock(); |
193 | |
194 | return 0; |
195 | } |
196 | |
197 | s32 fm10k_iov_mbx(struct fm10k_intfc *interface) |
198 | { |
199 | struct fm10k_hw *hw = &interface->hw; |
200 | struct fm10k_iov_data *iov_data; |
201 | int i; |
202 | |
203 | /* if there is no iov_data then there is no mailbox to process */ |
204 | if (!READ_ONCE(interface->iov_data)) |
205 | return 0; |
206 | |
207 | rcu_read_lock(); |
208 | |
209 | iov_data = interface->iov_data; |
210 | |
211 | /* check again now that we are in the RCU block */ |
212 | if (!iov_data) |
213 | goto read_unlock; |
214 | |
215 | /* lock the mailbox for transmit and receive */ |
216 | fm10k_mbx_lock(interface); |
217 | |
218 | /* Most VF messages sent to the PF cause the PF to respond by |
219 | * requesting from the SM mailbox. This means that too many VF |
220 | * messages processed at once could cause a mailbox timeout on the PF. |
221 | * To prevent this, store a pointer to the next VF mbx to process. Use |
222 | * that as the start of the loop so that we don't starve whichever VF |
223 | * got ignored on the previous run. |
224 | */ |
225 | process_mbx: |
226 | for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) { |
227 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
228 | struct fm10k_mbx_info *mbx = &vf_info->mbx; |
229 | u16 glort = vf_info->glort; |
230 | |
231 | /* process the SM mailbox first to drain outgoing messages */ |
232 | hw->mbx.ops.process(hw, &hw->mbx); |
233 | |
234 | /* verify port mapping is valid, if not reset port */ |
235 | if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) { |
236 | hw->iov.ops.reset_lport(hw, vf_info); |
237 | fm10k_clear_macvlan_queue(interface, glort, vlans: false); |
238 | } |
239 | |
240 | /* reset VFs that have mailbox timed out */ |
241 | if (!mbx->timeout) { |
242 | hw->iov.ops.reset_resources(hw, vf_info); |
243 | mbx->ops.connect(hw, mbx); |
244 | } |
245 | |
246 | /* guarantee we have free space in the SM mailbox */ |
247 | if (hw->mbx.state == FM10K_STATE_OPEN && |
248 | !hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) { |
249 | /* keep track of how many times this occurs */ |
250 | interface->hw_sm_mbx_full++; |
251 | |
252 | /* make sure we try again momentarily */ |
253 | fm10k_service_event_schedule(interface); |
254 | |
255 | break; |
256 | } |
257 | |
258 | /* cleanup mailbox and process received messages */ |
259 | mbx->ops.process(hw, mbx); |
260 | } |
261 | |
262 | /* if we stopped processing mailboxes early, update next_vf_mbx. |
263 | * Otherwise, reset next_vf_mbx, and restart loop so that we process |
264 | * the remaining mailboxes we skipped at the start. |
265 | */ |
266 | if (i >= 0) { |
267 | iov_data->next_vf_mbx = i + 1; |
268 | } else if (iov_data->next_vf_mbx) { |
269 | iov_data->next_vf_mbx = 0; |
270 | goto process_mbx; |
271 | } |
272 | |
273 | /* free the lock */ |
274 | fm10k_mbx_unlock(interface); |
275 | |
276 | read_unlock: |
277 | rcu_read_unlock(); |
278 | |
279 | return 0; |
280 | } |
281 | |
282 | void fm10k_iov_suspend(struct pci_dev *pdev) |
283 | { |
284 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
285 | struct fm10k_iov_data *iov_data = interface->iov_data; |
286 | struct fm10k_hw *hw = &interface->hw; |
287 | int num_vfs, i; |
288 | |
289 | /* pull out num_vfs from iov_data */ |
290 | num_vfs = iov_data ? iov_data->num_vfs : 0; |
291 | |
292 | /* shut down queue mapping for VFs */ |
293 | fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss), |
294 | FM10K_DGLORTMAP_NONE); |
295 | |
296 | /* Stop any active VFs and reset their resources */ |
297 | for (i = 0; i < num_vfs; i++) { |
298 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
299 | |
300 | hw->iov.ops.reset_resources(hw, vf_info); |
301 | hw->iov.ops.reset_lport(hw, vf_info); |
302 | fm10k_clear_macvlan_queue(interface, glort: vf_info->glort, vlans: false); |
303 | } |
304 | } |
305 | |
306 | static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev) |
307 | { |
308 | u32 err_mask; |
309 | int pos; |
310 | |
311 | pos = pci_find_ext_capability(dev: pdev, PCI_EXT_CAP_ID_ERR); |
312 | if (!pos) |
313 | return; |
314 | |
315 | /* Mask the completion abort bit in the ERR_UNCOR_MASK register, |
316 | * preventing the device from reporting these errors to the upstream |
317 | * PCIe root device. This avoids bringing down platforms which upgrade |
318 | * non-fatal completer aborts into machine check exceptions. Completer |
319 | * aborts can occur whenever a VF reads a queue it doesn't own. |
320 | */ |
321 | pci_read_config_dword(dev: pdev, where: pos + PCI_ERR_UNCOR_MASK, val: &err_mask); |
322 | err_mask |= PCI_ERR_UNC_COMP_ABORT; |
323 | pci_write_config_dword(dev: pdev, where: pos + PCI_ERR_UNCOR_MASK, val: err_mask); |
324 | } |
325 | |
326 | int fm10k_iov_resume(struct pci_dev *pdev) |
327 | { |
328 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
329 | struct fm10k_iov_data *iov_data = interface->iov_data; |
330 | struct fm10k_dglort_cfg dglort = { 0 }; |
331 | struct fm10k_hw *hw = &interface->hw; |
332 | int num_vfs, i; |
333 | |
334 | /* pull out num_vfs from iov_data */ |
335 | num_vfs = iov_data ? iov_data->num_vfs : 0; |
336 | |
337 | /* return error if iov_data is not already populated */ |
338 | if (!iov_data) |
339 | return -ENOMEM; |
340 | |
341 | /* Lower severity of completer abort error reporting as |
342 | * the VFs can trigger this any time they read a queue |
343 | * that they don't own. |
344 | */ |
345 | fm10k_mask_aer_comp_abort(pdev); |
346 | |
347 | /* allocate hardware resources for the VFs */ |
348 | hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); |
349 | |
350 | /* configure DGLORT mapping for RSS */ |
351 | dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; |
352 | dglort.idx = fm10k_dglort_vf_rss; |
353 | dglort.inner_rss = 1; |
354 | dglort.rss_l = fls(x: fm10k_queues_per_pool(hw) - 1); |
355 | dglort.queue_b = fm10k_vf_queue_index(hw, vf_idx: 0); |
356 | dglort.vsi_l = fls(x: hw->iov.total_vfs - 1); |
357 | dglort.vsi_b = 1; |
358 | |
359 | hw->mac.ops.configure_dglort_map(hw, &dglort); |
360 | |
361 | /* assign resources to the device */ |
362 | for (i = 0; i < num_vfs; i++) { |
363 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
364 | |
365 | /* allocate all but the last GLORT to the VFs */ |
366 | if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT)) |
367 | break; |
368 | |
369 | /* assign GLORT to VF, and restrict it to multicast */ |
370 | hw->iov.ops.set_lport(hw, vf_info, i, |
371 | FM10K_VF_FLAG_MULTI_CAPABLE); |
372 | |
373 | /* mailbox is disconnected so we don't send a message */ |
374 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
375 | |
376 | /* now we are ready so we can connect */ |
377 | vf_info->mbx.ops.connect(hw, &vf_info->mbx); |
378 | } |
379 | |
380 | return 0; |
381 | } |
382 | |
383 | s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid) |
384 | { |
385 | struct fm10k_iov_data *iov_data = interface->iov_data; |
386 | struct fm10k_hw *hw = &interface->hw; |
387 | struct fm10k_vf_info *vf_info; |
388 | u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE; |
389 | |
390 | /* no IOV support, not our message to process */ |
391 | if (!iov_data) |
392 | return FM10K_ERR_PARAM; |
393 | |
394 | /* glort outside our range, not our message to process */ |
395 | if (vf_idx >= iov_data->num_vfs) |
396 | return FM10K_ERR_PARAM; |
397 | |
398 | /* determine if an update has occurred and if so notify the VF */ |
399 | vf_info = &iov_data->vf_info[vf_idx]; |
400 | if (vf_info->sw_vid != pvid) { |
401 | vf_info->sw_vid = pvid; |
402 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
403 | } |
404 | |
405 | return 0; |
406 | } |
407 | |
408 | static void fm10k_iov_free_data(struct pci_dev *pdev) |
409 | { |
410 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
411 | |
412 | if (!interface->iov_data) |
413 | return; |
414 | |
415 | /* reclaim hardware resources */ |
416 | fm10k_iov_suspend(pdev); |
417 | |
418 | /* drop iov_data from interface */ |
419 | kfree_rcu(interface->iov_data, rcu); |
420 | interface->iov_data = NULL; |
421 | } |
422 | |
423 | static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) |
424 | { |
425 | struct fm10k_intfc *interface = pci_get_drvdata(pdev); |
426 | struct fm10k_iov_data *iov_data = interface->iov_data; |
427 | struct fm10k_hw *hw = &interface->hw; |
428 | size_t size; |
429 | int i; |
430 | |
431 | /* return error if iov_data is already populated */ |
432 | if (iov_data) |
433 | return -EBUSY; |
434 | |
435 | /* The PF should always be able to assign resources */ |
436 | if (!hw->iov.ops.assign_resources) |
437 | return -ENODEV; |
438 | |
439 | /* nothing to do if no VFs are requested */ |
440 | if (!num_vfs) |
441 | return 0; |
442 | |
443 | /* allocate memory for VF storage */ |
444 | size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]); |
445 | iov_data = kzalloc(size, GFP_KERNEL); |
446 | if (!iov_data) |
447 | return -ENOMEM; |
448 | |
449 | /* record number of VFs */ |
450 | iov_data->num_vfs = num_vfs; |
451 | |
452 | /* loop through vf_info structures initializing each entry */ |
453 | for (i = 0; i < num_vfs; i++) { |
454 | struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; |
455 | int err; |
456 | |
457 | /* Record VF VSI value */ |
458 | vf_info->vsi = i + 1; |
459 | vf_info->vf_idx = i; |
460 | |
461 | /* initialize mailbox memory */ |
462 | err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i); |
463 | if (err) { |
464 | dev_err(&pdev->dev, |
465 | "Unable to initialize SR-IOV mailbox\n" ); |
466 | kfree(objp: iov_data); |
467 | return err; |
468 | } |
469 | } |
470 | |
471 | /* assign iov_data to interface */ |
472 | interface->iov_data = iov_data; |
473 | |
474 | /* allocate hardware resources for the VFs */ |
475 | fm10k_iov_resume(pdev); |
476 | |
477 | return 0; |
478 | } |
479 | |
480 | void fm10k_iov_disable(struct pci_dev *pdev) |
481 | { |
482 | if (pci_num_vf(dev: pdev) && pci_vfs_assigned(dev: pdev)) |
483 | dev_err(&pdev->dev, |
484 | "Cannot disable SR-IOV while VFs are assigned\n" ); |
485 | else |
486 | pci_disable_sriov(dev: pdev); |
487 | |
488 | fm10k_iov_free_data(pdev); |
489 | } |
490 | |
491 | int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) |
492 | { |
493 | int current_vfs = pci_num_vf(dev: pdev); |
494 | int err = 0; |
495 | |
496 | if (current_vfs && pci_vfs_assigned(dev: pdev)) { |
497 | dev_err(&pdev->dev, |
498 | "Cannot modify SR-IOV while VFs are assigned\n" ); |
499 | num_vfs = current_vfs; |
500 | } else { |
501 | pci_disable_sriov(dev: pdev); |
502 | fm10k_iov_free_data(pdev); |
503 | } |
504 | |
505 | /* allocate resources for the VFs */ |
506 | err = fm10k_iov_alloc_data(pdev, num_vfs); |
507 | if (err) |
508 | return err; |
509 | |
510 | /* allocate VFs if not already allocated */ |
511 | if (num_vfs && num_vfs != current_vfs) { |
512 | err = pci_enable_sriov(dev: pdev, nr_virtfn: num_vfs); |
513 | if (err) { |
514 | dev_err(&pdev->dev, |
515 | "Enable PCI SR-IOV failed: %d\n" , err); |
516 | return err; |
517 | } |
518 | } |
519 | |
520 | return num_vfs; |
521 | } |
522 | |
523 | /** |
524 | * fm10k_iov_update_stats - Update stats for all VFs |
525 | * @interface: device private structure |
526 | * |
527 | * Updates the VF statistics for all enabled VFs. Expects to be called by |
528 | * fm10k_update_stats and assumes that locking via the __FM10K_UPDATING_STATS |
529 | * bit is already handled. |
530 | */ |
531 | void fm10k_iov_update_stats(struct fm10k_intfc *interface) |
532 | { |
533 | struct fm10k_iov_data *iov_data = interface->iov_data; |
534 | struct fm10k_hw *hw = &interface->hw; |
535 | int i; |
536 | |
537 | if (!iov_data) |
538 | return; |
539 | |
540 | for (i = 0; i < iov_data->num_vfs; i++) |
541 | hw->iov.ops.update_stats(hw, iov_data->vf_info[i].stats, i); |
542 | } |
543 | |
544 | static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface, |
545 | struct fm10k_vf_info *vf_info) |
546 | { |
547 | struct fm10k_hw *hw = &interface->hw; |
548 | |
549 | /* assigning the MAC address will send a mailbox message */ |
550 | fm10k_mbx_lock(interface); |
551 | |
552 | /* disable LPORT for this VF which clears switch rules */ |
553 | hw->iov.ops.reset_lport(hw, vf_info); |
554 | |
555 | fm10k_clear_macvlan_queue(interface, glort: vf_info->glort, vlans: false); |
556 | |
557 | /* assign new MAC+VLAN for this VF */ |
558 | hw->iov.ops.assign_default_mac_vlan(hw, vf_info); |
559 | |
560 | /* re-enable the LPORT for this VF */ |
561 | hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx, |
562 | FM10K_VF_FLAG_MULTI_CAPABLE); |
563 | |
564 | fm10k_mbx_unlock(interface); |
565 | } |
566 | |
567 | int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac) |
568 | { |
569 | struct fm10k_intfc *interface = netdev_priv(dev: netdev); |
570 | struct fm10k_iov_data *iov_data = interface->iov_data; |
571 | struct fm10k_vf_info *vf_info; |
572 | |
573 | /* verify SR-IOV is active and that vf idx is valid */ |
574 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
575 | return -EINVAL; |
576 | |
577 | /* verify MAC addr is valid */ |
578 | if (!is_zero_ether_addr(addr: mac) && !is_valid_ether_addr(addr: mac)) |
579 | return -EINVAL; |
580 | |
581 | /* record new MAC address */ |
582 | vf_info = &iov_data->vf_info[vf_idx]; |
583 | ether_addr_copy(dst: vf_info->mac, src: mac); |
584 | |
585 | fm10k_reset_vf_info(interface, vf_info); |
586 | |
587 | return 0; |
588 | } |
589 | |
590 | int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, |
591 | u8 qos, __be16 vlan_proto) |
592 | { |
593 | struct fm10k_intfc *interface = netdev_priv(dev: netdev); |
594 | struct fm10k_iov_data *iov_data = interface->iov_data; |
595 | struct fm10k_hw *hw = &interface->hw; |
596 | struct fm10k_vf_info *vf_info; |
597 | |
598 | /* verify SR-IOV is active and that vf idx is valid */ |
599 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
600 | return -EINVAL; |
601 | |
602 | /* QOS is unsupported and VLAN IDs accepted range 0-4094 */ |
603 | if (qos || (vid > (VLAN_VID_MASK - 1))) |
604 | return -EINVAL; |
605 | |
606 | /* VF VLAN Protocol part to default is unsupported */ |
607 | if (vlan_proto != htons(ETH_P_8021Q)) |
608 | return -EPROTONOSUPPORT; |
609 | |
610 | vf_info = &iov_data->vf_info[vf_idx]; |
611 | |
612 | /* exit if there is nothing to do */ |
613 | if (vf_info->pf_vid == vid) |
614 | return 0; |
615 | |
616 | /* record default VLAN ID for VF */ |
617 | vf_info->pf_vid = vid; |
618 | |
619 | /* Clear the VLAN table for the VF */ |
620 | hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false); |
621 | |
622 | fm10k_reset_vf_info(interface, vf_info); |
623 | |
624 | return 0; |
625 | } |
626 | |
627 | int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, |
628 | int __always_unused min_rate, int max_rate) |
629 | { |
630 | struct fm10k_intfc *interface = netdev_priv(dev: netdev); |
631 | struct fm10k_iov_data *iov_data = interface->iov_data; |
632 | struct fm10k_hw *hw = &interface->hw; |
633 | |
634 | /* verify SR-IOV is active and that vf idx is valid */ |
635 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
636 | return -EINVAL; |
637 | |
638 | /* rate limit cannot be less than 10Mbs or greater than link speed */ |
639 | if (max_rate && |
640 | (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX)) |
641 | return -EINVAL; |
642 | |
643 | /* store values */ |
644 | iov_data->vf_info[vf_idx].rate = max_rate; |
645 | |
646 | /* update hardware configuration */ |
647 | hw->iov.ops.configure_tc(hw, vf_idx, max_rate); |
648 | |
649 | return 0; |
650 | } |
651 | |
652 | int fm10k_ndo_get_vf_config(struct net_device *netdev, |
653 | int vf_idx, struct ifla_vf_info *ivi) |
654 | { |
655 | struct fm10k_intfc *interface = netdev_priv(dev: netdev); |
656 | struct fm10k_iov_data *iov_data = interface->iov_data; |
657 | struct fm10k_vf_info *vf_info; |
658 | |
659 | /* verify SR-IOV is active and that vf idx is valid */ |
660 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
661 | return -EINVAL; |
662 | |
663 | vf_info = &iov_data->vf_info[vf_idx]; |
664 | |
665 | ivi->vf = vf_idx; |
666 | ivi->max_tx_rate = vf_info->rate; |
667 | ivi->min_tx_rate = 0; |
668 | ether_addr_copy(dst: ivi->mac, src: vf_info->mac); |
669 | ivi->vlan = vf_info->pf_vid; |
670 | ivi->qos = 0; |
671 | |
672 | return 0; |
673 | } |
674 | |
675 | int fm10k_ndo_get_vf_stats(struct net_device *netdev, |
676 | int vf_idx, struct ifla_vf_stats *stats) |
677 | { |
678 | struct fm10k_intfc *interface = netdev_priv(dev: netdev); |
679 | struct fm10k_iov_data *iov_data = interface->iov_data; |
680 | struct fm10k_hw *hw = &interface->hw; |
681 | struct fm10k_hw_stats_q *hw_stats; |
682 | u32 idx, qpp; |
683 | |
684 | /* verify SR-IOV is active and that vf idx is valid */ |
685 | if (!iov_data || vf_idx >= iov_data->num_vfs) |
686 | return -EINVAL; |
687 | |
688 | qpp = fm10k_queues_per_pool(hw); |
689 | hw_stats = iov_data->vf_info[vf_idx].stats; |
690 | |
691 | for (idx = 0; idx < qpp; idx++) { |
692 | stats->rx_packets += hw_stats[idx].rx_packets.count; |
693 | stats->tx_packets += hw_stats[idx].tx_packets.count; |
694 | stats->rx_bytes += hw_stats[idx].rx_bytes.count; |
695 | stats->tx_bytes += hw_stats[idx].tx_bytes.count; |
696 | stats->rx_dropped += hw_stats[idx].rx_drops.count; |
697 | } |
698 | |
699 | return 0; |
700 | } |
701 | |