1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#include "iavf.h"
5#include "iavf_prototype.h"
6
7/**
8 * iavf_send_pf_msg
9 * @adapter: adapter structure
10 * @op: virtual channel opcode
11 * @msg: pointer to message buffer
12 * @len: message length
13 *
14 * Send message to PF and print status if failure.
15 **/
16static int iavf_send_pf_msg(struct iavf_adapter *adapter,
17 enum virtchnl_ops op, u8 *msg, u16 len)
18{
19 struct iavf_hw *hw = &adapter->hw;
20 enum iavf_status status;
21
22 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
23 return 0; /* nothing to see here, move along */
24
25 status = iavf_aq_send_msg_to_pf(hw, v_opcode: op, v_retval: 0, msg, msglen: len, NULL);
26 if (status)
27 dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
28 op, iavf_stat_str(hw, status),
29 iavf_aq_str(hw, hw->aq.asq_last_status));
30 return iavf_status_to_errno(status);
31}
32
33/**
34 * iavf_send_api_ver
35 * @adapter: adapter structure
36 *
37 * Send API version admin queue message to the PF. The reply is not checked
38 * in this function. Returns 0 if the message was successfully
39 * sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
40 **/
41int iavf_send_api_ver(struct iavf_adapter *adapter)
42{
43 struct virtchnl_version_info vvi;
44
45 vvi.major = VIRTCHNL_VERSION_MAJOR;
46 vvi.minor = VIRTCHNL_VERSION_MINOR;
47
48 return iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_VERSION, msg: (u8 *)&vvi,
49 len: sizeof(vvi));
50}
51
52/**
53 * iavf_poll_virtchnl_msg
54 * @hw: HW configuration structure
55 * @event: event to populate on success
56 * @op_to_poll: requested virtchnl op to poll for
57 *
58 * Initialize poll for virtchnl msg matching the requested_op. Returns 0
59 * if a message of the correct opcode is in the queue or an error code
60 * if no message matching the op code is waiting and other failures.
61 */
62static int
63iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
64 enum virtchnl_ops op_to_poll)
65{
66 enum virtchnl_ops received_op;
67 enum iavf_status status;
68 u32 v_retval;
69
70 while (1) {
71 /* When the AQ is empty, iavf_clean_arq_element will return
72 * nonzero and this loop will terminate.
73 */
74 status = iavf_clean_arq_element(hw, e: event, NULL);
75 if (status != IAVF_SUCCESS)
76 return iavf_status_to_errno(status);
77 received_op =
78 (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
79 if (op_to_poll == received_op)
80 break;
81 }
82
83 v_retval = le32_to_cpu(event->desc.cookie_low);
84 return virtchnl_status_to_errno(v_status: (enum virtchnl_status_code)v_retval);
85}
86
87/**
88 * iavf_verify_api_ver
89 * @adapter: adapter structure
90 *
91 * Compare API versions with the PF. Must be called after admin queue is
92 * initialized. Returns 0 if API versions match, -EIO if they do not,
93 * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
94 * from the firmware are propagated.
95 **/
96int iavf_verify_api_ver(struct iavf_adapter *adapter)
97{
98 struct iavf_arq_event_info event;
99 int err;
100
101 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
102 event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
103 if (!event.msg_buf)
104 return -ENOMEM;
105
106 err = iavf_poll_virtchnl_msg(hw: &adapter->hw, event: &event, op_to_poll: VIRTCHNL_OP_VERSION);
107 if (!err) {
108 struct virtchnl_version_info *pf_vvi =
109 (struct virtchnl_version_info *)event.msg_buf;
110 adapter->pf_version = *pf_vvi;
111
112 if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
113 (pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
114 pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
115 err = -EIO;
116 }
117
118 kfree(objp: event.msg_buf);
119
120 return err;
121}
122
123/**
124 * iavf_send_vf_config_msg
125 * @adapter: adapter structure
126 *
127 * Send VF configuration request admin queue message to the PF. The reply
128 * is not checked in this function. Returns 0 if the message was
129 * successfully sent, or one of the IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
130 **/
131int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
132{
133 u32 caps;
134
135 caps = VIRTCHNL_VF_OFFLOAD_L2 |
136 VIRTCHNL_VF_OFFLOAD_RSS_PF |
137 VIRTCHNL_VF_OFFLOAD_RSS_AQ |
138 VIRTCHNL_VF_OFFLOAD_RSS_REG |
139 VIRTCHNL_VF_OFFLOAD_VLAN |
140 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
141 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
142 VIRTCHNL_VF_OFFLOAD_ENCAP |
143 VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
144 VIRTCHNL_VF_OFFLOAD_CRC |
145 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
146 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
147 VIRTCHNL_VF_OFFLOAD_ADQ |
148 VIRTCHNL_VF_OFFLOAD_USO |
149 VIRTCHNL_VF_OFFLOAD_FDIR_PF |
150 VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
151 VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
152
153 adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
154 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_CONFIG;
155 if (PF_IS_V11(adapter))
156 return iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_GET_VF_RESOURCES,
157 msg: (u8 *)&caps, len: sizeof(caps));
158 else
159 return iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_GET_VF_RESOURCES,
160 NULL, len: 0);
161}
162
163int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
164{
165 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
166
167 if (!VLAN_V2_ALLOWED(adapter))
168 return -EOPNOTSUPP;
169
170 adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
171
172 return iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
173 NULL, len: 0);
174}
175
176/**
177 * iavf_validate_num_queues
178 * @adapter: adapter structure
179 *
180 * Validate that the number of queues the PF has sent in
181 * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle.
182 **/
183static void iavf_validate_num_queues(struct iavf_adapter *adapter)
184{
185 if (adapter->vf_res->num_queue_pairs > IAVF_MAX_REQ_QUEUES) {
186 struct virtchnl_vsi_resource *vsi_res;
187 int i;
188
189 dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n",
190 adapter->vf_res->num_queue_pairs,
191 IAVF_MAX_REQ_QUEUES);
192 dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n",
193 IAVF_MAX_REQ_QUEUES);
194 adapter->vf_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
195 for (i = 0; i < adapter->vf_res->num_vsis; i++) {
196 vsi_res = &adapter->vf_res->vsi_res[i];
197 vsi_res->num_queue_pairs = IAVF_MAX_REQ_QUEUES;
198 }
199 }
200}
201
202/**
203 * iavf_get_vf_config
204 * @adapter: private adapter structure
205 *
206 * Get VF configuration from PF and populate hw structure. Must be called after
207 * admin queue is initialized. Busy waits until response is received from PF,
208 * with maximum timeout. Response from PF is returned in the buffer for further
209 * processing by the caller.
210 **/
211int iavf_get_vf_config(struct iavf_adapter *adapter)
212{
213 struct iavf_hw *hw = &adapter->hw;
214 struct iavf_arq_event_info event;
215 u16 len;
216 int err;
217
218 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
219 event.buf_len = len;
220 event.msg_buf = kzalloc(size: len, GFP_KERNEL);
221 if (!event.msg_buf)
222 return -ENOMEM;
223
224 err = iavf_poll_virtchnl_msg(hw, event: &event, op_to_poll: VIRTCHNL_OP_GET_VF_RESOURCES);
225 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
226
227 /* some PFs send more queues than we should have so validate that
228 * we aren't getting too many queues
229 */
230 if (!err)
231 iavf_validate_num_queues(adapter);
232 iavf_vf_parse_hw_config(hw, msg: adapter->vf_res);
233
234 kfree(objp: event.msg_buf);
235
236 return err;
237}
238
239int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
240{
241 struct iavf_arq_event_info event;
242 int err;
243 u16 len;
244
245 len = sizeof(struct virtchnl_vlan_caps);
246 event.buf_len = len;
247 event.msg_buf = kzalloc(size: len, GFP_KERNEL);
248 if (!event.msg_buf)
249 return -ENOMEM;
250
251 err = iavf_poll_virtchnl_msg(hw: &adapter->hw, event: &event,
252 op_to_poll: VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
253 if (!err)
254 memcpy(&adapter->vlan_v2_caps, event.msg_buf,
255 min(event.msg_len, len));
256
257 kfree(objp: event.msg_buf);
258
259 return err;
260}
261
262/**
263 * iavf_configure_queues
264 * @adapter: adapter structure
265 *
266 * Request that the PF set up our (previously allocated) queues.
267 **/
268void iavf_configure_queues(struct iavf_adapter *adapter)
269{
270 struct virtchnl_vsi_queue_config_info *vqci;
271 int i, max_frame = adapter->vf_res->max_mtu;
272 int pairs = adapter->num_active_queues;
273 struct virtchnl_queue_pair_info *vqpi;
274 size_t len;
275
276 if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
277 max_frame = IAVF_MAX_RXBUFFER;
278
279 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
280 /* bail because we already have a command pending */
281 dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
282 adapter->current_op);
283 return;
284 }
285 adapter->current_op = VIRTCHNL_OP_CONFIG_VSI_QUEUES;
286 len = virtchnl_struct_size(vqci, qpair, pairs);
287 vqci = kzalloc(size: len, GFP_KERNEL);
288 if (!vqci)
289 return;
290
291 /* Limit maximum frame size when jumbo frames is not enabled */
292 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
293 (adapter->netdev->mtu <= ETH_DATA_LEN))
294 max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
295
296 vqci->vsi_id = adapter->vsi_res->vsi_id;
297 vqci->num_queue_pairs = pairs;
298 vqpi = vqci->qpair;
299 /* Size check is not needed here - HW max is 16 queue pairs, and we
300 * can fit info for 31 of them into the AQ buffer before it overflows.
301 */
302 for (i = 0; i < pairs; i++) {
303 vqpi->txq.vsi_id = vqci->vsi_id;
304 vqpi->txq.queue_id = i;
305 vqpi->txq.ring_len = adapter->tx_rings[i].count;
306 vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
307 vqpi->rxq.vsi_id = vqci->vsi_id;
308 vqpi->rxq.queue_id = i;
309 vqpi->rxq.ring_len = adapter->rx_rings[i].count;
310 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
311 vqpi->rxq.max_pkt_size = max_frame;
312 vqpi->rxq.databuffer_size =
313 ALIGN(adapter->rx_rings[i].rx_buf_len,
314 BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
315 if (CRC_OFFLOAD_ALLOWED(adapter))
316 vqpi->rxq.crc_disable = !!(adapter->netdev->features &
317 NETIF_F_RXFCS);
318 vqpi++;
319 }
320
321 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_QUEUES;
322 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_CONFIG_VSI_QUEUES,
323 msg: (u8 *)vqci, len);
324 kfree(objp: vqci);
325}
326
327/**
328 * iavf_enable_queues
329 * @adapter: adapter structure
330 *
331 * Request that the PF enable all of our queues.
332 **/
333void iavf_enable_queues(struct iavf_adapter *adapter)
334{
335 struct virtchnl_queue_select vqs;
336
337 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
338 /* bail because we already have a command pending */
339 dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n",
340 adapter->current_op);
341 return;
342 }
343 adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES;
344 vqs.vsi_id = adapter->vsi_res->vsi_id;
345 vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
346 vqs.rx_queues = vqs.tx_queues;
347 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_QUEUES;
348 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_ENABLE_QUEUES,
349 msg: (u8 *)&vqs, len: sizeof(vqs));
350}
351
352/**
353 * iavf_disable_queues
354 * @adapter: adapter structure
355 *
356 * Request that the PF disable all of our queues.
357 **/
358void iavf_disable_queues(struct iavf_adapter *adapter)
359{
360 struct virtchnl_queue_select vqs;
361
362 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
363 /* bail because we already have a command pending */
364 dev_err(&adapter->pdev->dev, "Cannot disable queues, command %d pending\n",
365 adapter->current_op);
366 return;
367 }
368 adapter->current_op = VIRTCHNL_OP_DISABLE_QUEUES;
369 vqs.vsi_id = adapter->vsi_res->vsi_id;
370 vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
371 vqs.rx_queues = vqs.tx_queues;
372 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_QUEUES;
373 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_DISABLE_QUEUES,
374 msg: (u8 *)&vqs, len: sizeof(vqs));
375}
376
377/**
378 * iavf_map_queues
379 * @adapter: adapter structure
380 *
381 * Request that the PF map queues to interrupt vectors. Misc causes, including
382 * admin queue, are always mapped to vector 0.
383 **/
384void iavf_map_queues(struct iavf_adapter *adapter)
385{
386 struct virtchnl_irq_map_info *vimi;
387 struct virtchnl_vector_map *vecmap;
388 struct iavf_q_vector *q_vector;
389 int v_idx, q_vectors;
390 size_t len;
391
392 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
393 /* bail because we already have a command pending */
394 dev_err(&adapter->pdev->dev, "Cannot map queues to vectors, command %d pending\n",
395 adapter->current_op);
396 return;
397 }
398 adapter->current_op = VIRTCHNL_OP_CONFIG_IRQ_MAP;
399
400 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
401
402 len = virtchnl_struct_size(vimi, vecmap, adapter->num_msix_vectors);
403 vimi = kzalloc(size: len, GFP_KERNEL);
404 if (!vimi)
405 return;
406
407 vimi->num_vectors = adapter->num_msix_vectors;
408 /* Queue vectors first */
409 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
410 q_vector = &adapter->q_vectors[v_idx];
411 vecmap = &vimi->vecmap[v_idx];
412
413 vecmap->vsi_id = adapter->vsi_res->vsi_id;
414 vecmap->vector_id = v_idx + NONQ_VECS;
415 vecmap->txq_map = q_vector->ring_mask;
416 vecmap->rxq_map = q_vector->ring_mask;
417 vecmap->rxitr_idx = IAVF_RX_ITR;
418 vecmap->txitr_idx = IAVF_TX_ITR;
419 }
420 /* Misc vector last - this is only for AdminQ messages */
421 vecmap = &vimi->vecmap[v_idx];
422 vecmap->vsi_id = adapter->vsi_res->vsi_id;
423 vecmap->vector_id = 0;
424 vecmap->txq_map = 0;
425 vecmap->rxq_map = 0;
426
427 adapter->aq_required &= ~IAVF_FLAG_AQ_MAP_VECTORS;
428 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_CONFIG_IRQ_MAP,
429 msg: (u8 *)vimi, len);
430 kfree(objp: vimi);
431}
432
433/**
434 * iavf_set_mac_addr_type - Set the correct request type from the filter type
435 * @virtchnl_ether_addr: pointer to requested list element
436 * @filter: pointer to requested filter
437 **/
438static void
439iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
440 const struct iavf_mac_filter *filter)
441{
442 virtchnl_ether_addr->type = filter->is_primary ?
443 VIRTCHNL_ETHER_ADDR_PRIMARY :
444 VIRTCHNL_ETHER_ADDR_EXTRA;
445}
446
447/**
448 * iavf_add_ether_addrs
449 * @adapter: adapter structure
450 *
451 * Request that the PF add one or more addresses to our filters.
452 **/
453void iavf_add_ether_addrs(struct iavf_adapter *adapter)
454{
455 struct virtchnl_ether_addr_list *veal;
456 struct iavf_mac_filter *f;
457 int i = 0, count = 0;
458 bool more = false;
459 size_t len;
460
461 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
462 /* bail because we already have a command pending */
463 dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n",
464 adapter->current_op);
465 return;
466 }
467
468 spin_lock_bh(lock: &adapter->mac_vlan_list_lock);
469
470 list_for_each_entry(f, &adapter->mac_filter_list, list) {
471 if (f->add)
472 count++;
473 }
474 if (!count) {
475 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
476 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
477 return;
478 }
479 adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR;
480
481 len = virtchnl_struct_size(veal, list, count);
482 if (len > IAVF_MAX_AQ_BUF_SIZE) {
483 dev_warn(&adapter->pdev->dev, "Too many add MAC changes in one request\n");
484 while (len > IAVF_MAX_AQ_BUF_SIZE)
485 len = virtchnl_struct_size(veal, list, --count);
486 more = true;
487 }
488
489 veal = kzalloc(size: len, GFP_ATOMIC);
490 if (!veal) {
491 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
492 return;
493 }
494
495 veal->vsi_id = adapter->vsi_res->vsi_id;
496 veal->num_elements = count;
497 list_for_each_entry(f, &adapter->mac_filter_list, list) {
498 if (f->add) {
499 ether_addr_copy(dst: veal->list[i].addr, src: f->macaddr);
500 iavf_set_mac_addr_type(virtchnl_ether_addr: &veal->list[i], filter: f);
501 i++;
502 f->add = false;
503 if (i == count)
504 break;
505 }
506 }
507 if (!more)
508 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_MAC_FILTER;
509
510 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
511
512 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_ADD_ETH_ADDR, msg: (u8 *)veal, len);
513 kfree(objp: veal);
514}
515
516/**
517 * iavf_del_ether_addrs
518 * @adapter: adapter structure
519 *
520 * Request that the PF remove one or more addresses from our filters.
521 **/
522void iavf_del_ether_addrs(struct iavf_adapter *adapter)
523{
524 struct virtchnl_ether_addr_list *veal;
525 struct iavf_mac_filter *f, *ftmp;
526 int i = 0, count = 0;
527 bool more = false;
528 size_t len;
529
530 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
531 /* bail because we already have a command pending */
532 dev_err(&adapter->pdev->dev, "Cannot remove filters, command %d pending\n",
533 adapter->current_op);
534 return;
535 }
536
537 spin_lock_bh(lock: &adapter->mac_vlan_list_lock);
538
539 list_for_each_entry(f, &adapter->mac_filter_list, list) {
540 if (f->remove)
541 count++;
542 }
543 if (!count) {
544 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
545 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
546 return;
547 }
548 adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR;
549
550 len = virtchnl_struct_size(veal, list, count);
551 if (len > IAVF_MAX_AQ_BUF_SIZE) {
552 dev_warn(&adapter->pdev->dev, "Too many delete MAC changes in one request\n");
553 while (len > IAVF_MAX_AQ_BUF_SIZE)
554 len = virtchnl_struct_size(veal, list, --count);
555 more = true;
556 }
557 veal = kzalloc(size: len, GFP_ATOMIC);
558 if (!veal) {
559 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
560 return;
561 }
562
563 veal->vsi_id = adapter->vsi_res->vsi_id;
564 veal->num_elements = count;
565 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
566 if (f->remove) {
567 ether_addr_copy(dst: veal->list[i].addr, src: f->macaddr);
568 iavf_set_mac_addr_type(virtchnl_ether_addr: &veal->list[i], filter: f);
569 i++;
570 list_del(entry: &f->list);
571 kfree(objp: f);
572 if (i == count)
573 break;
574 }
575 }
576 if (!more)
577 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_MAC_FILTER;
578
579 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
580
581 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_DEL_ETH_ADDR, msg: (u8 *)veal, len);
582 kfree(objp: veal);
583}
584
585/**
586 * iavf_mac_add_ok
587 * @adapter: adapter structure
588 *
589 * Submit list of filters based on PF response.
590 **/
591static void iavf_mac_add_ok(struct iavf_adapter *adapter)
592{
593 struct iavf_mac_filter *f, *ftmp;
594
595 spin_lock_bh(lock: &adapter->mac_vlan_list_lock);
596 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
597 f->is_new_mac = false;
598 if (!f->add && !f->add_handled)
599 f->add_handled = true;
600 }
601 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
602}
603
604/**
605 * iavf_mac_add_reject
606 * @adapter: adapter structure
607 *
608 * Remove filters from list based on PF response.
609 **/
610static void iavf_mac_add_reject(struct iavf_adapter *adapter)
611{
612 struct net_device *netdev = adapter->netdev;
613 struct iavf_mac_filter *f, *ftmp;
614
615 spin_lock_bh(lock: &adapter->mac_vlan_list_lock);
616 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
617 if (f->remove && ether_addr_equal(addr1: f->macaddr, addr2: netdev->dev_addr))
618 f->remove = false;
619
620 if (!f->add && !f->add_handled)
621 f->add_handled = true;
622
623 if (f->is_new_mac) {
624 list_del(entry: &f->list);
625 kfree(objp: f);
626 }
627 }
628 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
629}
630
631/**
632 * iavf_vlan_add_reject
633 * @adapter: adapter structure
634 *
635 * Remove VLAN filters from list based on PF response.
636 **/
637static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
638{
639 struct iavf_vlan_filter *f, *ftmp;
640
641 spin_lock_bh(lock: &adapter->mac_vlan_list_lock);
642 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
643 if (f->state == IAVF_VLAN_IS_NEW) {
644 list_del(entry: &f->list);
645 kfree(objp: f);
646 adapter->num_vlan_filters--;
647 }
648 }
649 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
650}
651
652/**
653 * iavf_add_vlans
654 * @adapter: adapter structure
655 *
656 * Request that the PF add one or more VLAN filters to our VSI.
657 **/
658void iavf_add_vlans(struct iavf_adapter *adapter)
659{
660 int len, i = 0, count = 0;
661 struct iavf_vlan_filter *f;
662 bool more = false;
663
664 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
665 /* bail because we already have a command pending */
666 dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n",
667 adapter->current_op);
668 return;
669 }
670
671 spin_lock_bh(lock: &adapter->mac_vlan_list_lock);
672
673 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
674 if (f->state == IAVF_VLAN_ADD)
675 count++;
676 }
677 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
678 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
679 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
680 return;
681 }
682
683 if (VLAN_ALLOWED(adapter)) {
684 struct virtchnl_vlan_filter_list *vvfl;
685
686 adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
687
688 len = virtchnl_struct_size(vvfl, vlan_id, count);
689 if (len > IAVF_MAX_AQ_BUF_SIZE) {
690 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
691 while (len > IAVF_MAX_AQ_BUF_SIZE)
692 len = virtchnl_struct_size(vvfl, vlan_id,
693 --count);
694 more = true;
695 }
696 vvfl = kzalloc(size: len, GFP_ATOMIC);
697 if (!vvfl) {
698 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
699 return;
700 }
701
702 vvfl->vsi_id = adapter->vsi_res->vsi_id;
703 vvfl->num_elements = count;
704 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
705 if (f->state == IAVF_VLAN_ADD) {
706 vvfl->vlan_id[i] = f->vlan.vid;
707 i++;
708 f->state = IAVF_VLAN_IS_NEW;
709 if (i == count)
710 break;
711 }
712 }
713 if (!more)
714 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
715
716 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
717
718 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_ADD_VLAN, msg: (u8 *)vvfl, len);
719 kfree(objp: vvfl);
720 } else {
721 u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
722 u16 current_vlans = iavf_get_num_vlans_added(adapter);
723 struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
724
725 adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
726
727 if ((count + current_vlans) > max_vlans &&
728 current_vlans < max_vlans) {
729 count = max_vlans - iavf_get_num_vlans_added(adapter);
730 more = true;
731 }
732
733 len = virtchnl_struct_size(vvfl_v2, filters, count);
734 if (len > IAVF_MAX_AQ_BUF_SIZE) {
735 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
736 while (len > IAVF_MAX_AQ_BUF_SIZE)
737 len = virtchnl_struct_size(vvfl_v2, filters,
738 --count);
739 more = true;
740 }
741
742 vvfl_v2 = kzalloc(size: len, GFP_ATOMIC);
743 if (!vvfl_v2) {
744 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
745 return;
746 }
747
748 vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
749 vvfl_v2->num_elements = count;
750 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
751 if (f->state == IAVF_VLAN_ADD) {
752 struct virtchnl_vlan_supported_caps *filtering_support =
753 &adapter->vlan_v2_caps.filtering.filtering_support;
754 struct virtchnl_vlan *vlan;
755
756 if (i == count)
757 break;
758
759 /* give priority over outer if it's enabled */
760 if (filtering_support->outer)
761 vlan = &vvfl_v2->filters[i].outer;
762 else
763 vlan = &vvfl_v2->filters[i].inner;
764
765 vlan->tci = f->vlan.vid;
766 vlan->tpid = f->vlan.tpid;
767
768 i++;
769 f->state = IAVF_VLAN_IS_NEW;
770 }
771 }
772
773 if (!more)
774 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
775
776 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
777
778 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_ADD_VLAN_V2,
779 msg: (u8 *)vvfl_v2, len);
780 kfree(objp: vvfl_v2);
781 }
782}
783
784/**
785 * iavf_del_vlans
786 * @adapter: adapter structure
787 *
788 * Request that the PF remove one or more VLAN filters from our VSI.
789 **/
790void iavf_del_vlans(struct iavf_adapter *adapter)
791{
792 struct iavf_vlan_filter *f, *ftmp;
793 int len, i = 0, count = 0;
794 bool more = false;
795
796 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
797 /* bail because we already have a command pending */
798 dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n",
799 adapter->current_op);
800 return;
801 }
802
803 spin_lock_bh(lock: &adapter->mac_vlan_list_lock);
804
805 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
806 /* since VLAN capabilities are not allowed, we dont want to send
807 * a VLAN delete request because it will most likely fail and
808 * create unnecessary errors/noise, so just free the VLAN
809 * filters marked for removal to enable bailing out before
810 * sending a virtchnl message
811 */
812 if (f->state == IAVF_VLAN_REMOVE &&
813 !VLAN_FILTERING_ALLOWED(adapter)) {
814 list_del(entry: &f->list);
815 kfree(objp: f);
816 adapter->num_vlan_filters--;
817 } else if (f->state == IAVF_VLAN_DISABLE &&
818 !VLAN_FILTERING_ALLOWED(adapter)) {
819 f->state = IAVF_VLAN_INACTIVE;
820 } else if (f->state == IAVF_VLAN_REMOVE ||
821 f->state == IAVF_VLAN_DISABLE) {
822 count++;
823 }
824 }
825 if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
826 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
827 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
828 return;
829 }
830
831 if (VLAN_ALLOWED(adapter)) {
832 struct virtchnl_vlan_filter_list *vvfl;
833
834 adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
835
836 len = virtchnl_struct_size(vvfl, vlan_id, count);
837 if (len > IAVF_MAX_AQ_BUF_SIZE) {
838 dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
839 while (len > IAVF_MAX_AQ_BUF_SIZE)
840 len = virtchnl_struct_size(vvfl, vlan_id,
841 --count);
842 more = true;
843 }
844 vvfl = kzalloc(size: len, GFP_ATOMIC);
845 if (!vvfl) {
846 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
847 return;
848 }
849
850 vvfl->vsi_id = adapter->vsi_res->vsi_id;
851 vvfl->num_elements = count;
852 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
853 if (f->state == IAVF_VLAN_DISABLE) {
854 vvfl->vlan_id[i] = f->vlan.vid;
855 f->state = IAVF_VLAN_INACTIVE;
856 i++;
857 if (i == count)
858 break;
859 } else if (f->state == IAVF_VLAN_REMOVE) {
860 vvfl->vlan_id[i] = f->vlan.vid;
861 list_del(entry: &f->list);
862 kfree(objp: f);
863 adapter->num_vlan_filters--;
864 i++;
865 if (i == count)
866 break;
867 }
868 }
869
870 if (!more)
871 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
872
873 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
874
875 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_DEL_VLAN, msg: (u8 *)vvfl, len);
876 kfree(objp: vvfl);
877 } else {
878 struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
879
880 adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
881
882 len = virtchnl_struct_size(vvfl_v2, filters, count);
883 if (len > IAVF_MAX_AQ_BUF_SIZE) {
884 dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
885 while (len > IAVF_MAX_AQ_BUF_SIZE)
886 len = virtchnl_struct_size(vvfl_v2, filters,
887 --count);
888 more = true;
889 }
890
891 vvfl_v2 = kzalloc(size: len, GFP_ATOMIC);
892 if (!vvfl_v2) {
893 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
894 return;
895 }
896
897 vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
898 vvfl_v2->num_elements = count;
899 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
900 if (f->state == IAVF_VLAN_DISABLE ||
901 f->state == IAVF_VLAN_REMOVE) {
902 struct virtchnl_vlan_supported_caps *filtering_support =
903 &adapter->vlan_v2_caps.filtering.filtering_support;
904 struct virtchnl_vlan *vlan;
905
906 /* give priority over outer if it's enabled */
907 if (filtering_support->outer)
908 vlan = &vvfl_v2->filters[i].outer;
909 else
910 vlan = &vvfl_v2->filters[i].inner;
911
912 vlan->tci = f->vlan.vid;
913 vlan->tpid = f->vlan.tpid;
914
915 if (f->state == IAVF_VLAN_DISABLE) {
916 f->state = IAVF_VLAN_INACTIVE;
917 } else {
918 list_del(entry: &f->list);
919 kfree(objp: f);
920 adapter->num_vlan_filters--;
921 }
922 i++;
923 if (i == count)
924 break;
925 }
926 }
927
928 if (!more)
929 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
930
931 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
932
933 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_DEL_VLAN_V2,
934 msg: (u8 *)vvfl_v2, len);
935 kfree(objp: vvfl_v2);
936 }
937}
938
939/**
940 * iavf_set_promiscuous
941 * @adapter: adapter structure
942 *
943 * Request that the PF enable promiscuous mode for our VSI.
944 **/
945void iavf_set_promiscuous(struct iavf_adapter *adapter)
946{
947 struct net_device *netdev = adapter->netdev;
948 struct virtchnl_promisc_info vpi;
949 unsigned int flags;
950
951 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
952 /* bail because we already have a command pending */
953 dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n",
954 adapter->current_op);
955 return;
956 }
957
958 /* prevent changes to promiscuous flags */
959 spin_lock_bh(lock: &adapter->current_netdev_promisc_flags_lock);
960
961 /* sanity check to prevent duplicate AQ calls */
962 if (!iavf_promiscuous_mode_changed(adapter)) {
963 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
964 dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
965 /* allow changes to promiscuous flags */
966 spin_unlock_bh(lock: &adapter->current_netdev_promisc_flags_lock);
967 return;
968 }
969
970 /* there are 2 bits, but only 3 states */
971 if (!(netdev->flags & IFF_PROMISC) &&
972 netdev->flags & IFF_ALLMULTI) {
973 /* State 1 - only multicast promiscuous mode enabled
974 * - !IFF_PROMISC && IFF_ALLMULTI
975 */
976 flags = FLAG_VF_MULTICAST_PROMISC;
977 adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
978 adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
979 dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
980 } else if (!(netdev->flags & IFF_PROMISC) &&
981 !(netdev->flags & IFF_ALLMULTI)) {
982 /* State 2 - unicast/multicast promiscuous mode disabled
983 * - !IFF_PROMISC && !IFF_ALLMULTI
984 */
985 flags = 0;
986 adapter->current_netdev_promisc_flags &=
987 ~(IFF_PROMISC | IFF_ALLMULTI);
988 dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
989 } else {
990 /* State 3 - unicast/multicast promiscuous mode enabled
991 * - IFF_PROMISC && IFF_ALLMULTI
992 * - IFF_PROMISC && !IFF_ALLMULTI
993 */
994 flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
995 adapter->current_netdev_promisc_flags |= IFF_PROMISC;
996 if (netdev->flags & IFF_ALLMULTI)
997 adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
998 else
999 adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
1000
1001 dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
1002 }
1003
1004 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
1005
1006 /* allow changes to promiscuous flags */
1007 spin_unlock_bh(lock: &adapter->current_netdev_promisc_flags_lock);
1008
1009 adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
1010 vpi.vsi_id = adapter->vsi_res->vsi_id;
1011 vpi.flags = flags;
1012 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1013 msg: (u8 *)&vpi, len: sizeof(vpi));
1014}
1015
1016/**
1017 * iavf_request_stats
1018 * @adapter: adapter structure
1019 *
1020 * Request VSI statistics from PF.
1021 **/
1022void iavf_request_stats(struct iavf_adapter *adapter)
1023{
1024 struct virtchnl_queue_select vqs;
1025
1026 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1027 /* no error message, this isn't crucial */
1028 return;
1029 }
1030
1031 adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
1032 adapter->current_op = VIRTCHNL_OP_GET_STATS;
1033 vqs.vsi_id = adapter->vsi_res->vsi_id;
1034 /* queue maps are ignored for this message - only the vsi is used */
1035 if (iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_GET_STATS, msg: (u8 *)&vqs,
1036 len: sizeof(vqs)))
1037 /* if the request failed, don't lock out others */
1038 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1039}
1040
1041/**
1042 * iavf_get_hena
1043 * @adapter: adapter structure
1044 *
1045 * Request hash enable capabilities from PF
1046 **/
1047void iavf_get_hena(struct iavf_adapter *adapter)
1048{
1049 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1050 /* bail because we already have a command pending */
1051 dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
1052 adapter->current_op);
1053 return;
1054 }
1055 adapter->current_op = VIRTCHNL_OP_GET_RSS_HENA_CAPS;
1056 adapter->aq_required &= ~IAVF_FLAG_AQ_GET_HENA;
1057 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_GET_RSS_HENA_CAPS, NULL, len: 0);
1058}
1059
1060/**
1061 * iavf_set_hena
1062 * @adapter: adapter structure
1063 *
1064 * Request the PF to set our RSS hash capabilities
1065 **/
1066void iavf_set_hena(struct iavf_adapter *adapter)
1067{
1068 struct virtchnl_rss_hena vrh;
1069
1070 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1071 /* bail because we already have a command pending */
1072 dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
1073 adapter->current_op);
1074 return;
1075 }
1076 vrh.hena = adapter->hena;
1077 adapter->current_op = VIRTCHNL_OP_SET_RSS_HENA;
1078 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_HENA;
1079 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_SET_RSS_HENA, msg: (u8 *)&vrh,
1080 len: sizeof(vrh));
1081}
1082
1083/**
1084 * iavf_set_rss_key
1085 * @adapter: adapter structure
1086 *
1087 * Request the PF to set our RSS hash key
1088 **/
1089void iavf_set_rss_key(struct iavf_adapter *adapter)
1090{
1091 struct virtchnl_rss_key *vrk;
1092 int len;
1093
1094 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1095 /* bail because we already have a command pending */
1096 dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
1097 adapter->current_op);
1098 return;
1099 }
1100 len = virtchnl_struct_size(vrk, key, adapter->rss_key_size);
1101 vrk = kzalloc(size: len, GFP_KERNEL);
1102 if (!vrk)
1103 return;
1104 vrk->vsi_id = adapter->vsi.id;
1105 vrk->key_len = adapter->rss_key_size;
1106 memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
1107
1108 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_KEY;
1109 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_KEY;
1110 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_CONFIG_RSS_KEY, msg: (u8 *)vrk, len);
1111 kfree(objp: vrk);
1112}
1113
1114/**
1115 * iavf_set_rss_lut
1116 * @adapter: adapter structure
1117 *
1118 * Request the PF to set our RSS lookup table
1119 **/
1120void iavf_set_rss_lut(struct iavf_adapter *adapter)
1121{
1122 struct virtchnl_rss_lut *vrl;
1123 int len;
1124
1125 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1126 /* bail because we already have a command pending */
1127 dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
1128 adapter->current_op);
1129 return;
1130 }
1131 len = virtchnl_struct_size(vrl, lut, adapter->rss_lut_size);
1132 vrl = kzalloc(size: len, GFP_KERNEL);
1133 if (!vrl)
1134 return;
1135 vrl->vsi_id = adapter->vsi.id;
1136 vrl->lut_entries = adapter->rss_lut_size;
1137 memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
1138 adapter->current_op = VIRTCHNL_OP_CONFIG_RSS_LUT;
1139 adapter->aq_required &= ~IAVF_FLAG_AQ_SET_RSS_LUT;
1140 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_CONFIG_RSS_LUT, msg: (u8 *)vrl, len);
1141 kfree(objp: vrl);
1142}
1143
1144/**
1145 * iavf_enable_vlan_stripping
1146 * @adapter: adapter structure
1147 *
1148 * Request VLAN header stripping to be enabled
1149 **/
1150void iavf_enable_vlan_stripping(struct iavf_adapter *adapter)
1151{
1152 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1153 /* bail because we already have a command pending */
1154 dev_err(&adapter->pdev->dev, "Cannot enable stripping, command %d pending\n",
1155 adapter->current_op);
1156 return;
1157 }
1158 adapter->current_op = VIRTCHNL_OP_ENABLE_VLAN_STRIPPING;
1159 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
1160 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, NULL, len: 0);
1161}
1162
1163/**
1164 * iavf_disable_vlan_stripping
1165 * @adapter: adapter structure
1166 *
1167 * Request VLAN header stripping to be disabled
1168 **/
1169void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
1170{
1171 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1172 /* bail because we already have a command pending */
1173 dev_err(&adapter->pdev->dev, "Cannot disable stripping, command %d pending\n",
1174 adapter->current_op);
1175 return;
1176 }
1177 adapter->current_op = VIRTCHNL_OP_DISABLE_VLAN_STRIPPING;
1178 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
1179 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, len: 0);
1180}
1181
1182/**
1183 * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
1184 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1185 */
1186static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
1187{
1188 switch (tpid) {
1189 case ETH_P_8021Q:
1190 return VIRTCHNL_VLAN_ETHERTYPE_8100;
1191 case ETH_P_8021AD:
1192 return VIRTCHNL_VLAN_ETHERTYPE_88A8;
1193 }
1194
1195 return 0;
1196}
1197
1198/**
1199 * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
1200 * @adapter: adapter structure
1201 * @msg: message structure used for updating offloads over virtchnl to update
1202 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
1203 * @offload_op: opcode used to determine which support structure to check
1204 */
1205static int
1206iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
1207 struct virtchnl_vlan_setting *msg, u16 tpid,
1208 enum virtchnl_ops offload_op)
1209{
1210 struct virtchnl_vlan_supported_caps *offload_support;
1211 u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);
1212
1213 /* reference the correct offload support structure */
1214 switch (offload_op) {
1215 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1216 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1217 offload_support =
1218 &adapter->vlan_v2_caps.offloads.stripping_support;
1219 break;
1220 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1221 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1222 offload_support =
1223 &adapter->vlan_v2_caps.offloads.insertion_support;
1224 break;
1225 default:
1226 dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
1227 offload_op);
1228 return -EINVAL;
1229 }
1230
1231 /* make sure ethertype is supported */
1232 if (offload_support->outer & vc_ethertype &&
1233 offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
1234 msg->outer_ethertype_setting = vc_ethertype;
1235 } else if (offload_support->inner & vc_ethertype &&
1236 offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
1237 msg->inner_ethertype_setting = vc_ethertype;
1238 } else {
1239 dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
1240 offload_op, tpid);
1241 return -EINVAL;
1242 }
1243
1244 return 0;
1245}
1246
1247/**
1248 * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
1249 * @adapter: adapter structure
1250 * @tpid: VLAN TPID
1251 * @offload_op: opcode used to determine which AQ required bit to clear
1252 */
1253static void
1254iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
1255 enum virtchnl_ops offload_op)
1256{
1257 switch (offload_op) {
1258 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
1259 if (tpid == ETH_P_8021Q)
1260 adapter->aq_required &=
1261 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
1262 else if (tpid == ETH_P_8021AD)
1263 adapter->aq_required &=
1264 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
1265 break;
1266 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
1267 if (tpid == ETH_P_8021Q)
1268 adapter->aq_required &=
1269 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
1270 else if (tpid == ETH_P_8021AD)
1271 adapter->aq_required &=
1272 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
1273 break;
1274 case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
1275 if (tpid == ETH_P_8021Q)
1276 adapter->aq_required &=
1277 ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
1278 else if (tpid == ETH_P_8021AD)
1279 adapter->aq_required &=
1280 ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
1281 break;
1282 case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
1283 if (tpid == ETH_P_8021Q)
1284 adapter->aq_required &=
1285 ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
1286 else if (tpid == ETH_P_8021AD)
1287 adapter->aq_required &=
1288 ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
1289 break;
1290 default:
1291 dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
1292 offload_op);
1293 }
1294}
1295
1296/**
1297 * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
1298 * @adapter: adapter structure
1299 * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
1300 * @offload_op: offload_op used to make the request over virtchnl
1301 */
1302static void
1303iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
1304 enum virtchnl_ops offload_op)
1305{
1306 struct virtchnl_vlan_setting *msg;
1307 int len = sizeof(*msg);
1308
1309 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1310 /* bail because we already have a command pending */
1311 dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
1312 offload_op, adapter->current_op);
1313 return;
1314 }
1315
1316 adapter->current_op = offload_op;
1317
1318 msg = kzalloc(size: len, GFP_KERNEL);
1319 if (!msg)
1320 return;
1321
1322 msg->vport_id = adapter->vsi_res->vsi_id;
1323
1324 /* always clear to prevent unsupported and endless requests */
1325 iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
1326
1327 /* only send valid offload requests */
1328 if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
1329 iavf_send_pf_msg(adapter, op: offload_op, msg: (u8 *)msg, len);
1330 else
1331 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1332
1333 kfree(objp: msg);
1334}
1335
1336/**
1337 * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
1338 * @adapter: adapter structure
1339 * @tpid: VLAN TPID used to enable VLAN stripping
1340 */
1341void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1342{
1343 iavf_send_vlan_offload_v2(adapter, tpid,
1344 offload_op: VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
1345}
1346
1347/**
1348 * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
1349 * @adapter: adapter structure
1350 * @tpid: VLAN TPID used to disable VLAN stripping
1351 */
1352void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
1353{
1354 iavf_send_vlan_offload_v2(adapter, tpid,
1355 offload_op: VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
1356}
1357
1358/**
1359 * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
1360 * @adapter: adapter structure
1361 * @tpid: VLAN TPID used to enable VLAN insertion
1362 */
1363void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1364{
1365 iavf_send_vlan_offload_v2(adapter, tpid,
1366 offload_op: VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
1367}
1368
1369/**
1370 * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
1371 * @adapter: adapter structure
1372 * @tpid: VLAN TPID used to disable VLAN insertion
1373 */
1374void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
1375{
1376 iavf_send_vlan_offload_v2(adapter, tpid,
1377 offload_op: VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
1378}
1379
1380/**
1381 * iavf_print_link_message - print link up or down
1382 * @adapter: adapter structure
1383 *
1384 * Log a message telling the world of our wonderous link status
1385 */
1386static void iavf_print_link_message(struct iavf_adapter *adapter)
1387{
1388 struct net_device *netdev = adapter->netdev;
1389 int link_speed_mbps;
1390 char *speed;
1391
1392 if (!adapter->link_up) {
1393 netdev_info(dev: netdev, format: "NIC Link is Down\n");
1394 return;
1395 }
1396
1397 if (ADV_LINK_SUPPORT(adapter)) {
1398 link_speed_mbps = adapter->link_speed_mbps;
1399 goto print_link_msg;
1400 }
1401
1402 switch (adapter->link_speed) {
1403 case VIRTCHNL_LINK_SPEED_40GB:
1404 link_speed_mbps = SPEED_40000;
1405 break;
1406 case VIRTCHNL_LINK_SPEED_25GB:
1407 link_speed_mbps = SPEED_25000;
1408 break;
1409 case VIRTCHNL_LINK_SPEED_20GB:
1410 link_speed_mbps = SPEED_20000;
1411 break;
1412 case VIRTCHNL_LINK_SPEED_10GB:
1413 link_speed_mbps = SPEED_10000;
1414 break;
1415 case VIRTCHNL_LINK_SPEED_5GB:
1416 link_speed_mbps = SPEED_5000;
1417 break;
1418 case VIRTCHNL_LINK_SPEED_2_5GB:
1419 link_speed_mbps = SPEED_2500;
1420 break;
1421 case VIRTCHNL_LINK_SPEED_1GB:
1422 link_speed_mbps = SPEED_1000;
1423 break;
1424 case VIRTCHNL_LINK_SPEED_100MB:
1425 link_speed_mbps = SPEED_100;
1426 break;
1427 default:
1428 link_speed_mbps = SPEED_UNKNOWN;
1429 break;
1430 }
1431
1432print_link_msg:
1433 if (link_speed_mbps > SPEED_1000) {
1434 if (link_speed_mbps == SPEED_2500) {
1435 speed = kasprintf(GFP_KERNEL, fmt: "%s", "2.5 Gbps");
1436 } else {
1437 /* convert to Gbps inline */
1438 speed = kasprintf(GFP_KERNEL, fmt: "%d Gbps",
1439 link_speed_mbps / 1000);
1440 }
1441 } else if (link_speed_mbps == SPEED_UNKNOWN) {
1442 speed = kasprintf(GFP_KERNEL, fmt: "%s", "Unknown Mbps");
1443 } else {
1444 speed = kasprintf(GFP_KERNEL, fmt: "%d Mbps", link_speed_mbps);
1445 }
1446
1447 netdev_info(dev: netdev, format: "NIC Link is Up Speed is %s Full Duplex\n", speed);
1448 kfree(objp: speed);
1449}
1450
1451/**
1452 * iavf_get_vpe_link_status
1453 * @adapter: adapter structure
1454 * @vpe: virtchnl_pf_event structure
1455 *
1456 * Helper function for determining the link status
1457 **/
1458static bool
1459iavf_get_vpe_link_status(struct iavf_adapter *adapter,
1460 struct virtchnl_pf_event *vpe)
1461{
1462 if (ADV_LINK_SUPPORT(adapter))
1463 return vpe->event_data.link_event_adv.link_status;
1464 else
1465 return vpe->event_data.link_event.link_status;
1466}
1467
1468/**
1469 * iavf_set_adapter_link_speed_from_vpe
1470 * @adapter: adapter structure for which we are setting the link speed
1471 * @vpe: virtchnl_pf_event structure that contains the link speed we are setting
1472 *
1473 * Helper function for setting iavf_adapter link speed
1474 **/
1475static void
1476iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
1477 struct virtchnl_pf_event *vpe)
1478{
1479 if (ADV_LINK_SUPPORT(adapter))
1480 adapter->link_speed_mbps =
1481 vpe->event_data.link_event_adv.link_speed;
1482 else
1483 adapter->link_speed = vpe->event_data.link_event.link_speed;
1484}
1485
1486/**
1487 * iavf_enable_channels
1488 * @adapter: adapter structure
1489 *
1490 * Request that the PF enable channels as specified by
1491 * the user via tc tool.
1492 **/
1493void iavf_enable_channels(struct iavf_adapter *adapter)
1494{
1495 struct virtchnl_tc_info *vti = NULL;
1496 size_t len;
1497 int i;
1498
1499 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1500 /* bail because we already have a command pending */
1501 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1502 adapter->current_op);
1503 return;
1504 }
1505
1506 len = virtchnl_struct_size(vti, list, adapter->num_tc);
1507 vti = kzalloc(size: len, GFP_KERNEL);
1508 if (!vti)
1509 return;
1510 vti->num_tc = adapter->num_tc;
1511 for (i = 0; i < vti->num_tc; i++) {
1512 vti->list[i].count = adapter->ch_config.ch_info[i].count;
1513 vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
1514 vti->list[i].pad = 0;
1515 vti->list[i].max_tx_rate =
1516 adapter->ch_config.ch_info[i].max_tx_rate;
1517 }
1518
1519 adapter->ch_config.state = __IAVF_TC_RUNNING;
1520 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1521 adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
1522 adapter->aq_required &= ~IAVF_FLAG_AQ_ENABLE_CHANNELS;
1523 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_ENABLE_CHANNELS, msg: (u8 *)vti, len);
1524 kfree(objp: vti);
1525}
1526
1527/**
1528 * iavf_disable_channels
1529 * @adapter: adapter structure
1530 *
1531 * Request that the PF disable channels that are configured
1532 **/
1533void iavf_disable_channels(struct iavf_adapter *adapter)
1534{
1535 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1536 /* bail because we already have a command pending */
1537 dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
1538 adapter->current_op);
1539 return;
1540 }
1541
1542 adapter->ch_config.state = __IAVF_TC_INVALID;
1543 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1544 adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
1545 adapter->aq_required &= ~IAVF_FLAG_AQ_DISABLE_CHANNELS;
1546 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_DISABLE_CHANNELS, NULL, len: 0);
1547}
1548
1549/**
1550 * iavf_print_cloud_filter
1551 * @adapter: adapter structure
1552 * @f: cloud filter to print
1553 *
1554 * Print the cloud filter
1555 **/
1556static void iavf_print_cloud_filter(struct iavf_adapter *adapter,
1557 struct virtchnl_filter *f)
1558{
1559 switch (f->flow_type) {
1560 case VIRTCHNL_TCP_V4_FLOW:
1561 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
1562 &f->data.tcp_spec.dst_mac,
1563 &f->data.tcp_spec.src_mac,
1564 ntohs(f->data.tcp_spec.vlan_id),
1565 &f->data.tcp_spec.dst_ip[0],
1566 &f->data.tcp_spec.src_ip[0],
1567 ntohs(f->data.tcp_spec.dst_port),
1568 ntohs(f->data.tcp_spec.src_port));
1569 break;
1570 case VIRTCHNL_TCP_V6_FLOW:
1571 dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
1572 &f->data.tcp_spec.dst_mac,
1573 &f->data.tcp_spec.src_mac,
1574 ntohs(f->data.tcp_spec.vlan_id),
1575 &f->data.tcp_spec.dst_ip,
1576 &f->data.tcp_spec.src_ip,
1577 ntohs(f->data.tcp_spec.dst_port),
1578 ntohs(f->data.tcp_spec.src_port));
1579 break;
1580 }
1581}
1582
1583/**
1584 * iavf_add_cloud_filter
1585 * @adapter: adapter structure
1586 *
1587 * Request that the PF add cloud filters as specified
1588 * by the user via tc tool.
1589 **/
1590void iavf_add_cloud_filter(struct iavf_adapter *adapter)
1591{
1592 struct iavf_cloud_filter *cf;
1593 struct virtchnl_filter *f;
1594 int len = 0, count = 0;
1595
1596 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1597 /* bail because we already have a command pending */
1598 dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
1599 adapter->current_op);
1600 return;
1601 }
1602 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1603 if (cf->add) {
1604 count++;
1605 break;
1606 }
1607 }
1608 if (!count) {
1609 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
1610 return;
1611 }
1612 adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
1613
1614 len = sizeof(struct virtchnl_filter);
1615 f = kzalloc(size: len, GFP_KERNEL);
1616 if (!f)
1617 return;
1618
1619 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1620 if (cf->add) {
1621 memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1622 cf->add = false;
1623 cf->state = __IAVF_CF_ADD_PENDING;
1624 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_ADD_CLOUD_FILTER,
1625 msg: (u8 *)f, len);
1626 }
1627 }
1628 kfree(objp: f);
1629}
1630
1631/**
1632 * iavf_del_cloud_filter
1633 * @adapter: adapter structure
1634 *
1635 * Request that the PF delete cloud filters as specified
1636 * by the user via tc tool.
1637 **/
1638void iavf_del_cloud_filter(struct iavf_adapter *adapter)
1639{
1640 struct iavf_cloud_filter *cf, *cftmp;
1641 struct virtchnl_filter *f;
1642 int len = 0, count = 0;
1643
1644 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1645 /* bail because we already have a command pending */
1646 dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
1647 adapter->current_op);
1648 return;
1649 }
1650 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1651 if (cf->del) {
1652 count++;
1653 break;
1654 }
1655 }
1656 if (!count) {
1657 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1658 return;
1659 }
1660 adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
1661
1662 len = sizeof(struct virtchnl_filter);
1663 f = kzalloc(size: len, GFP_KERNEL);
1664 if (!f)
1665 return;
1666
1667 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
1668 if (cf->del) {
1669 memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
1670 cf->del = false;
1671 cf->state = __IAVF_CF_DEL_PENDING;
1672 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_DEL_CLOUD_FILTER,
1673 msg: (u8 *)f, len);
1674 }
1675 }
1676 kfree(objp: f);
1677}
1678
1679/**
1680 * iavf_add_fdir_filter
1681 * @adapter: the VF adapter structure
1682 *
1683 * Request that the PF add Flow Director filters as specified
1684 * by the user via ethtool.
1685 **/
1686void iavf_add_fdir_filter(struct iavf_adapter *adapter)
1687{
1688 struct iavf_fdir_fltr *fdir;
1689 struct virtchnl_fdir_add *f;
1690 bool process_fltr = false;
1691 int len;
1692
1693 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1694 /* bail because we already have a command pending */
1695 dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
1696 adapter->current_op);
1697 return;
1698 }
1699
1700 len = sizeof(struct virtchnl_fdir_add);
1701 f = kzalloc(size: len, GFP_KERNEL);
1702 if (!f)
1703 return;
1704
1705 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
1706 list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1707 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1708 process_fltr = true;
1709 fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
1710 memcpy(f, &fdir->vc_add_msg, len);
1711 break;
1712 }
1713 }
1714 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
1715
1716 if (!process_fltr) {
1717 /* prevent iavf_add_fdir_filter() from being called when there
1718 * are no filters to add
1719 */
1720 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1721 kfree(objp: f);
1722 return;
1723 }
1724 adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
1725 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_ADD_FDIR_FILTER, msg: (u8 *)f, len);
1726 kfree(objp: f);
1727}
1728
1729/**
1730 * iavf_del_fdir_filter
1731 * @adapter: the VF adapter structure
1732 *
1733 * Request that the PF delete Flow Director filters as specified
1734 * by the user via ethtool.
1735 **/
1736void iavf_del_fdir_filter(struct iavf_adapter *adapter)
1737{
1738 struct iavf_fdir_fltr *fdir;
1739 struct virtchnl_fdir_del f;
1740 bool process_fltr = false;
1741 int len;
1742
1743 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1744 /* bail because we already have a command pending */
1745 dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
1746 adapter->current_op);
1747 return;
1748 }
1749
1750 len = sizeof(struct virtchnl_fdir_del);
1751
1752 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
1753 list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
1754 if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
1755 process_fltr = true;
1756 memset(&f, 0, len);
1757 f.vsi_id = fdir->vc_add_msg.vsi_id;
1758 f.flow_id = fdir->flow_id;
1759 fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
1760 break;
1761 }
1762 }
1763 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
1764
1765 if (!process_fltr) {
1766 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1767 return;
1768 }
1769
1770 adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
1771 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_DEL_FDIR_FILTER, msg: (u8 *)&f, len);
1772}
1773
1774/**
1775 * iavf_add_adv_rss_cfg
1776 * @adapter: the VF adapter structure
1777 *
1778 * Request that the PF add RSS configuration as specified
1779 * by the user via ethtool.
1780 **/
1781void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
1782{
1783 struct virtchnl_rss_cfg *rss_cfg;
1784 struct iavf_adv_rss *rss;
1785 bool process_rss = false;
1786 int len;
1787
1788 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1789 /* bail because we already have a command pending */
1790 dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
1791 adapter->current_op);
1792 return;
1793 }
1794
1795 len = sizeof(struct virtchnl_rss_cfg);
1796 rss_cfg = kzalloc(size: len, GFP_KERNEL);
1797 if (!rss_cfg)
1798 return;
1799
1800 spin_lock_bh(lock: &adapter->adv_rss_lock);
1801 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1802 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1803 process_rss = true;
1804 rss->state = IAVF_ADV_RSS_ADD_PENDING;
1805 memcpy(rss_cfg, &rss->cfg_msg, len);
1806 iavf_print_adv_rss_cfg(adapter, rss,
1807 action: "Input set change for",
1808 result: "is pending");
1809 break;
1810 }
1811 }
1812 spin_unlock_bh(lock: &adapter->adv_rss_lock);
1813
1814 if (process_rss) {
1815 adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
1816 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_ADD_RSS_CFG,
1817 msg: (u8 *)rss_cfg, len);
1818 } else {
1819 adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1820 }
1821
1822 kfree(objp: rss_cfg);
1823}
1824
1825/**
1826 * iavf_del_adv_rss_cfg
1827 * @adapter: the VF adapter structure
1828 *
1829 * Request that the PF delete RSS configuration as specified
1830 * by the user via ethtool.
1831 **/
1832void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
1833{
1834 struct virtchnl_rss_cfg *rss_cfg;
1835 struct iavf_adv_rss *rss;
1836 bool process_rss = false;
1837 int len;
1838
1839 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1840 /* bail because we already have a command pending */
1841 dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
1842 adapter->current_op);
1843 return;
1844 }
1845
1846 len = sizeof(struct virtchnl_rss_cfg);
1847 rss_cfg = kzalloc(size: len, GFP_KERNEL);
1848 if (!rss_cfg)
1849 return;
1850
1851 spin_lock_bh(lock: &adapter->adv_rss_lock);
1852 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
1853 if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
1854 process_rss = true;
1855 rss->state = IAVF_ADV_RSS_DEL_PENDING;
1856 memcpy(rss_cfg, &rss->cfg_msg, len);
1857 break;
1858 }
1859 }
1860 spin_unlock_bh(lock: &adapter->adv_rss_lock);
1861
1862 if (process_rss) {
1863 adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
1864 iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_DEL_RSS_CFG,
1865 msg: (u8 *)rss_cfg, len);
1866 } else {
1867 adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1868 }
1869
1870 kfree(objp: rss_cfg);
1871}
1872
1873/**
1874 * iavf_request_reset
1875 * @adapter: adapter structure
1876 *
1877 * Request that the PF reset this VF. No response is expected.
1878 **/
1879int iavf_request_reset(struct iavf_adapter *adapter)
1880{
1881 int err;
1882 /* Don't check CURRENT_OP - this is always higher priority */
1883 err = iavf_send_pf_msg(adapter, op: VIRTCHNL_OP_RESET_VF, NULL, len: 0);
1884 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1885 return err;
1886}
1887
1888/**
1889 * iavf_netdev_features_vlan_strip_set - update vlan strip status
1890 * @netdev: ptr to netdev being adjusted
1891 * @enable: enable or disable vlan strip
1892 *
1893 * Helper function to change vlan strip status in netdev->features.
1894 */
1895static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
1896 const bool enable)
1897{
1898 if (enable)
1899 netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1900 else
1901 netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
1902}
1903
1904/**
1905 * iavf_virtchnl_completion
1906 * @adapter: adapter structure
1907 * @v_opcode: opcode sent by PF
1908 * @v_retval: retval sent by PF
1909 * @msg: message sent by PF
1910 * @msglen: message length
1911 *
1912 * Asynchronous completion function for admin queue messages. Rather than busy
1913 * wait, we fire off our requests and assume that no errors will be returned.
1914 * This function handles the reply messages.
1915 **/
1916void iavf_virtchnl_completion(struct iavf_adapter *adapter,
1917 enum virtchnl_ops v_opcode,
1918 enum iavf_status v_retval, u8 *msg, u16 msglen)
1919{
1920 struct net_device *netdev = adapter->netdev;
1921
1922 if (v_opcode == VIRTCHNL_OP_EVENT) {
1923 struct virtchnl_pf_event *vpe =
1924 (struct virtchnl_pf_event *)msg;
1925 bool link_up = iavf_get_vpe_link_status(adapter, vpe);
1926
1927 switch (vpe->event) {
1928 case VIRTCHNL_EVENT_LINK_CHANGE:
1929 iavf_set_adapter_link_speed_from_vpe(adapter, vpe);
1930
1931 /* we've already got the right link status, bail */
1932 if (adapter->link_up == link_up)
1933 break;
1934
1935 if (link_up) {
1936 /* If we get link up message and start queues
1937 * before our queues are configured it will
1938 * trigger a TX hang. In that case, just ignore
1939 * the link status message,we'll get another one
1940 * after we enable queues and actually prepared
1941 * to send traffic.
1942 */
1943 if (adapter->state != __IAVF_RUNNING)
1944 break;
1945
1946 /* For ADq enabled VF, we reconfigure VSIs and
1947 * re-allocate queues. Hence wait till all
1948 * queues are enabled.
1949 */
1950 if (adapter->flags &
1951 IAVF_FLAG_QUEUES_DISABLED)
1952 break;
1953 }
1954
1955 adapter->link_up = link_up;
1956 if (link_up) {
1957 netif_tx_start_all_queues(dev: netdev);
1958 netif_carrier_on(dev: netdev);
1959 } else {
1960 netif_tx_stop_all_queues(dev: netdev);
1961 netif_carrier_off(dev: netdev);
1962 }
1963 iavf_print_link_message(adapter);
1964 break;
1965 case VIRTCHNL_EVENT_RESET_IMPENDING:
1966 dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
1967 if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
1968 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1969 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
1970 }
1971 break;
1972 default:
1973 dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n",
1974 vpe->event);
1975 break;
1976 }
1977 return;
1978 }
1979 if (v_retval) {
1980 switch (v_opcode) {
1981 case VIRTCHNL_OP_ADD_VLAN:
1982 dev_err(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
1983 iavf_stat_str(&adapter->hw, v_retval));
1984 break;
1985 case VIRTCHNL_OP_ADD_ETH_ADDR:
1986 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
1987 iavf_stat_str(&adapter->hw, v_retval));
1988 iavf_mac_add_reject(adapter);
1989 /* restore administratively set MAC address */
1990 ether_addr_copy(dst: adapter->hw.mac.addr, src: netdev->dev_addr);
1991 wake_up(&adapter->vc_waitqueue);
1992 break;
1993 case VIRTCHNL_OP_DEL_VLAN:
1994 dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
1995 iavf_stat_str(&adapter->hw, v_retval));
1996 break;
1997 case VIRTCHNL_OP_DEL_ETH_ADDR:
1998 dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
1999 iavf_stat_str(&adapter->hw, v_retval));
2000 break;
2001 case VIRTCHNL_OP_ENABLE_CHANNELS:
2002 dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
2003 iavf_stat_str(&adapter->hw, v_retval));
2004 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2005 adapter->ch_config.state = __IAVF_TC_INVALID;
2006 netdev_reset_tc(dev: netdev);
2007 netif_tx_start_all_queues(dev: netdev);
2008 break;
2009 case VIRTCHNL_OP_DISABLE_CHANNELS:
2010 dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
2011 iavf_stat_str(&adapter->hw, v_retval));
2012 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2013 adapter->ch_config.state = __IAVF_TC_RUNNING;
2014 netif_tx_start_all_queues(dev: netdev);
2015 break;
2016 case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2017 struct iavf_cloud_filter *cf, *cftmp;
2018
2019 list_for_each_entry_safe(cf, cftmp,
2020 &adapter->cloud_filter_list,
2021 list) {
2022 if (cf->state == __IAVF_CF_ADD_PENDING) {
2023 cf->state = __IAVF_CF_INVALID;
2024 dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
2025 iavf_stat_str(&adapter->hw,
2026 v_retval));
2027 iavf_print_cloud_filter(adapter,
2028 f: &cf->f);
2029 list_del(entry: &cf->list);
2030 kfree(objp: cf);
2031 adapter->num_cloud_filters--;
2032 }
2033 }
2034 }
2035 break;
2036 case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2037 struct iavf_cloud_filter *cf;
2038
2039 list_for_each_entry(cf, &adapter->cloud_filter_list,
2040 list) {
2041 if (cf->state == __IAVF_CF_DEL_PENDING) {
2042 cf->state = __IAVF_CF_ACTIVE;
2043 dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
2044 iavf_stat_str(&adapter->hw,
2045 v_retval));
2046 iavf_print_cloud_filter(adapter,
2047 f: &cf->f);
2048 }
2049 }
2050 }
2051 break;
2052 case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2053 struct iavf_fdir_fltr *fdir, *fdir_tmp;
2054
2055 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
2056 list_for_each_entry_safe(fdir, fdir_tmp,
2057 &adapter->fdir_list_head,
2058 list) {
2059 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2060 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
2061 iavf_stat_str(&adapter->hw,
2062 v_retval));
2063 iavf_print_fdir_fltr(adapter, fltr: fdir);
2064 if (msglen)
2065 dev_err(&adapter->pdev->dev,
2066 "%s\n", msg);
2067 list_del(entry: &fdir->list);
2068 kfree(objp: fdir);
2069 adapter->fdir_active_fltr--;
2070 }
2071 }
2072 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
2073 }
2074 break;
2075 case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2076 struct iavf_fdir_fltr *fdir;
2077
2078 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
2079 list_for_each_entry(fdir, &adapter->fdir_list_head,
2080 list) {
2081 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2082 fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2083 dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
2084 iavf_stat_str(&adapter->hw,
2085 v_retval));
2086 iavf_print_fdir_fltr(adapter, fltr: fdir);
2087 }
2088 }
2089 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
2090 }
2091 break;
2092 case VIRTCHNL_OP_ADD_RSS_CFG: {
2093 struct iavf_adv_rss *rss, *rss_tmp;
2094
2095 spin_lock_bh(lock: &adapter->adv_rss_lock);
2096 list_for_each_entry_safe(rss, rss_tmp,
2097 &adapter->adv_rss_list_head,
2098 list) {
2099 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2100 iavf_print_adv_rss_cfg(adapter, rss,
2101 action: "Failed to change the input set for",
2102 NULL);
2103 list_del(entry: &rss->list);
2104 kfree(objp: rss);
2105 }
2106 }
2107 spin_unlock_bh(lock: &adapter->adv_rss_lock);
2108 }
2109 break;
2110 case VIRTCHNL_OP_DEL_RSS_CFG: {
2111 struct iavf_adv_rss *rss;
2112
2113 spin_lock_bh(lock: &adapter->adv_rss_lock);
2114 list_for_each_entry(rss, &adapter->adv_rss_list_head,
2115 list) {
2116 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2117 rss->state = IAVF_ADV_RSS_ACTIVE;
2118 dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
2119 iavf_stat_str(&adapter->hw,
2120 v_retval));
2121 }
2122 }
2123 spin_unlock_bh(lock: &adapter->adv_rss_lock);
2124 }
2125 break;
2126 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2127 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2128 /* Vlan stripping could not be enabled by ethtool.
2129 * Disable it in netdev->features.
2130 */
2131 iavf_netdev_features_vlan_strip_set(netdev, enable: false);
2132 break;
2133 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2134 dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
2135 /* Vlan stripping could not be disabled by ethtool.
2136 * Enable it in netdev->features.
2137 */
2138 iavf_netdev_features_vlan_strip_set(netdev, enable: true);
2139 break;
2140 case VIRTCHNL_OP_ADD_VLAN_V2:
2141 iavf_vlan_add_reject(adapter);
2142 dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
2143 iavf_stat_str(&adapter->hw, v_retval));
2144 break;
2145 default:
2146 dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
2147 v_retval, iavf_stat_str(&adapter->hw, v_retval),
2148 v_opcode);
2149 }
2150 }
2151 switch (v_opcode) {
2152 case VIRTCHNL_OP_ADD_ETH_ADDR:
2153 if (!v_retval)
2154 iavf_mac_add_ok(adapter);
2155 if (!ether_addr_equal(addr1: netdev->dev_addr, addr2: adapter->hw.mac.addr))
2156 if (!ether_addr_equal(addr1: netdev->dev_addr,
2157 addr2: adapter->hw.mac.addr)) {
2158 netif_addr_lock_bh(dev: netdev);
2159 eth_hw_addr_set(dev: netdev, addr: adapter->hw.mac.addr);
2160 netif_addr_unlock_bh(dev: netdev);
2161 }
2162 wake_up(&adapter->vc_waitqueue);
2163 break;
2164 case VIRTCHNL_OP_GET_STATS: {
2165 struct iavf_eth_stats *stats =
2166 (struct iavf_eth_stats *)msg;
2167 netdev->stats.rx_packets = stats->rx_unicast +
2168 stats->rx_multicast +
2169 stats->rx_broadcast;
2170 netdev->stats.tx_packets = stats->tx_unicast +
2171 stats->tx_multicast +
2172 stats->tx_broadcast;
2173 netdev->stats.rx_bytes = stats->rx_bytes;
2174 netdev->stats.tx_bytes = stats->tx_bytes;
2175 netdev->stats.tx_errors = stats->tx_errors;
2176 netdev->stats.rx_dropped = stats->rx_discards;
2177 netdev->stats.tx_dropped = stats->tx_discards;
2178 adapter->current_stats = *stats;
2179 }
2180 break;
2181 case VIRTCHNL_OP_GET_VF_RESOURCES: {
2182 u16 len = IAVF_VIRTCHNL_VF_RESOURCE_SIZE;
2183
2184 memcpy(adapter->vf_res, msg, min(msglen, len));
2185 iavf_validate_num_queues(adapter);
2186 iavf_vf_parse_hw_config(hw: &adapter->hw, msg: adapter->vf_res);
2187 if (is_zero_ether_addr(addr: adapter->hw.mac.addr)) {
2188 /* restore current mac address */
2189 ether_addr_copy(dst: adapter->hw.mac.addr, src: netdev->dev_addr);
2190 } else {
2191 netif_addr_lock_bh(dev: netdev);
2192 /* refresh current mac address if changed */
2193 ether_addr_copy(dst: netdev->perm_addr,
2194 src: adapter->hw.mac.addr);
2195 netif_addr_unlock_bh(dev: netdev);
2196 }
2197 spin_lock_bh(lock: &adapter->mac_vlan_list_lock);
2198 iavf_add_filter(adapter, macaddr: adapter->hw.mac.addr);
2199
2200 if (VLAN_ALLOWED(adapter)) {
2201 if (!list_empty(head: &adapter->vlan_filter_list)) {
2202 struct iavf_vlan_filter *vlf;
2203
2204 /* re-add all VLAN filters over virtchnl */
2205 list_for_each_entry(vlf,
2206 &adapter->vlan_filter_list,
2207 list)
2208 vlf->state = IAVF_VLAN_ADD;
2209
2210 adapter->aq_required |=
2211 IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2212 }
2213 }
2214
2215 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
2216
2217 iavf_parse_vf_resource_msg(adapter);
2218
2219 /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
2220 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
2221 * configuration
2222 */
2223 if (VLAN_V2_ALLOWED(adapter))
2224 break;
2225 /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
2226 * wasn't successfully negotiated with the PF
2227 */
2228 }
2229 fallthrough;
2230 case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
2231 struct iavf_mac_filter *f;
2232 bool was_mac_changed;
2233 u64 aq_required = 0;
2234
2235 if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
2236 memcpy(&adapter->vlan_v2_caps, msg,
2237 min_t(u16, msglen,
2238 sizeof(adapter->vlan_v2_caps)));
2239
2240 iavf_process_config(adapter);
2241 adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
2242 iavf_schedule_finish_config(adapter);
2243
2244 iavf_set_queue_vlan_tag_loc(adapter);
2245
2246 was_mac_changed = !ether_addr_equal(addr1: netdev->dev_addr,
2247 addr2: adapter->hw.mac.addr);
2248
2249 spin_lock_bh(lock: &adapter->mac_vlan_list_lock);
2250
2251 /* re-add all MAC filters */
2252 list_for_each_entry(f, &adapter->mac_filter_list, list) {
2253 if (was_mac_changed &&
2254 ether_addr_equal(addr1: netdev->dev_addr, addr2: f->macaddr))
2255 ether_addr_copy(dst: f->macaddr,
2256 src: adapter->hw.mac.addr);
2257
2258 f->is_new_mac = true;
2259 f->add = true;
2260 f->add_handled = false;
2261 f->remove = false;
2262 }
2263
2264 /* re-add all VLAN filters */
2265 if (VLAN_FILTERING_ALLOWED(adapter)) {
2266 struct iavf_vlan_filter *vlf;
2267
2268 if (!list_empty(head: &adapter->vlan_filter_list)) {
2269 list_for_each_entry(vlf,
2270 &adapter->vlan_filter_list,
2271 list)
2272 vlf->state = IAVF_VLAN_ADD;
2273
2274 aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2275 }
2276 }
2277
2278 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
2279
2280 netif_addr_lock_bh(dev: netdev);
2281 eth_hw_addr_set(dev: netdev, addr: adapter->hw.mac.addr);
2282 netif_addr_unlock_bh(dev: netdev);
2283
2284 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER |
2285 aq_required;
2286 }
2287 break;
2288 case VIRTCHNL_OP_ENABLE_QUEUES:
2289 /* enable transmits */
2290 iavf_irq_enable(adapter, flush: true);
2291 wake_up(&adapter->reset_waitqueue);
2292 adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED;
2293 break;
2294 case VIRTCHNL_OP_DISABLE_QUEUES:
2295 iavf_free_all_tx_resources(adapter);
2296 iavf_free_all_rx_resources(adapter);
2297 if (adapter->state == __IAVF_DOWN_PENDING) {
2298 iavf_change_state(adapter, state: __IAVF_DOWN);
2299 wake_up(&adapter->down_waitqueue);
2300 }
2301 break;
2302 case VIRTCHNL_OP_VERSION:
2303 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2304 /* Don't display an error if we get these out of sequence.
2305 * If the firmware needed to get kicked, we'll get these and
2306 * it's no problem.
2307 */
2308 if (v_opcode != adapter->current_op)
2309 return;
2310 break;
2311 case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
2312 struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
2313
2314 if (msglen == sizeof(*vrh))
2315 adapter->hena = vrh->hena;
2316 else
2317 dev_warn(&adapter->pdev->dev,
2318 "Invalid message %d from PF\n", v_opcode);
2319 }
2320 break;
2321 case VIRTCHNL_OP_REQUEST_QUEUES: {
2322 struct virtchnl_vf_res_request *vfres =
2323 (struct virtchnl_vf_res_request *)msg;
2324
2325 if (vfres->num_queue_pairs != adapter->num_req_queues) {
2326 dev_info(&adapter->pdev->dev,
2327 "Requested %d queues, PF can support %d\n",
2328 adapter->num_req_queues,
2329 vfres->num_queue_pairs);
2330 adapter->num_req_queues = 0;
2331 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2332 }
2333 }
2334 break;
2335 case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
2336 struct iavf_cloud_filter *cf;
2337
2338 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2339 if (cf->state == __IAVF_CF_ADD_PENDING)
2340 cf->state = __IAVF_CF_ACTIVE;
2341 }
2342 }
2343 break;
2344 case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
2345 struct iavf_cloud_filter *cf, *cftmp;
2346
2347 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2348 list) {
2349 if (cf->state == __IAVF_CF_DEL_PENDING) {
2350 cf->state = __IAVF_CF_INVALID;
2351 list_del(entry: &cf->list);
2352 kfree(objp: cf);
2353 adapter->num_cloud_filters--;
2354 }
2355 }
2356 }
2357 break;
2358 case VIRTCHNL_OP_ADD_FDIR_FILTER: {
2359 struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
2360 struct iavf_fdir_fltr *fdir, *fdir_tmp;
2361
2362 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
2363 list_for_each_entry_safe(fdir, fdir_tmp,
2364 &adapter->fdir_list_head,
2365 list) {
2366 if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
2367 if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
2368 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
2369 fdir->loc);
2370 fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2371 fdir->flow_id = add_fltr->flow_id;
2372 } else {
2373 dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
2374 add_fltr->status);
2375 iavf_print_fdir_fltr(adapter, fltr: fdir);
2376 list_del(entry: &fdir->list);
2377 kfree(objp: fdir);
2378 adapter->fdir_active_fltr--;
2379 }
2380 }
2381 }
2382 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
2383 }
2384 break;
2385 case VIRTCHNL_OP_DEL_FDIR_FILTER: {
2386 struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
2387 struct iavf_fdir_fltr *fdir, *fdir_tmp;
2388
2389 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
2390 list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
2391 list) {
2392 if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
2393 if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
2394 dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
2395 fdir->loc);
2396 list_del(entry: &fdir->list);
2397 kfree(objp: fdir);
2398 adapter->fdir_active_fltr--;
2399 } else {
2400 fdir->state = IAVF_FDIR_FLTR_ACTIVE;
2401 dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
2402 del_fltr->status);
2403 iavf_print_fdir_fltr(adapter, fltr: fdir);
2404 }
2405 }
2406 }
2407 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
2408 }
2409 break;
2410 case VIRTCHNL_OP_ADD_RSS_CFG: {
2411 struct iavf_adv_rss *rss;
2412
2413 spin_lock_bh(lock: &adapter->adv_rss_lock);
2414 list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
2415 if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
2416 iavf_print_adv_rss_cfg(adapter, rss,
2417 action: "Input set change for",
2418 result: "successful");
2419 rss->state = IAVF_ADV_RSS_ACTIVE;
2420 }
2421 }
2422 spin_unlock_bh(lock: &adapter->adv_rss_lock);
2423 }
2424 break;
2425 case VIRTCHNL_OP_DEL_RSS_CFG: {
2426 struct iavf_adv_rss *rss, *rss_tmp;
2427
2428 spin_lock_bh(lock: &adapter->adv_rss_lock);
2429 list_for_each_entry_safe(rss, rss_tmp,
2430 &adapter->adv_rss_list_head, list) {
2431 if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
2432 list_del(entry: &rss->list);
2433 kfree(objp: rss);
2434 }
2435 }
2436 spin_unlock_bh(lock: &adapter->adv_rss_lock);
2437 }
2438 break;
2439 case VIRTCHNL_OP_ADD_VLAN_V2: {
2440 struct iavf_vlan_filter *f;
2441
2442 spin_lock_bh(lock: &adapter->mac_vlan_list_lock);
2443 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
2444 if (f->state == IAVF_VLAN_IS_NEW)
2445 f->state = IAVF_VLAN_ACTIVE;
2446 }
2447 spin_unlock_bh(lock: &adapter->mac_vlan_list_lock);
2448 }
2449 break;
2450 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2451 /* PF enabled vlan strip on this VF.
2452 * Update netdev->features if needed to be in sync with ethtool.
2453 */
2454 if (!v_retval)
2455 iavf_netdev_features_vlan_strip_set(netdev, enable: true);
2456 break;
2457 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2458 /* PF disabled vlan strip on this VF.
2459 * Update netdev->features if needed to be in sync with ethtool.
2460 */
2461 if (!v_retval)
2462 iavf_netdev_features_vlan_strip_set(netdev, enable: false);
2463 break;
2464 default:
2465 if (adapter->current_op && (v_opcode != adapter->current_op))
2466 dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
2467 adapter->current_op, v_opcode);
2468 break;
2469 } /* switch v_opcode */
2470 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2471}
2472

source code of linux/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c