1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4/* ethtool support for iavf */
5#include "iavf.h"
6
7#include <linux/uaccess.h>
8
9/* ethtool statistics helpers */
10
11/**
12 * struct iavf_stats - definition for an ethtool statistic
13 * @stat_string: statistic name to display in ethtool -S output
14 * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
15 * @stat_offset: offsetof() the stat from a base pointer
16 *
17 * This structure defines a statistic to be added to the ethtool stats buffer.
18 * It defines a statistic as offset from a common base pointer. Stats should
19 * be defined in constant arrays using the IAVF_STAT macro, with every element
20 * of the array using the same _type for calculating the sizeof_stat and
21 * stat_offset.
22 *
23 * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
24 * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
25 * the iavf_add_ethtool_stat() helper function.
26 *
27 * The @stat_string is interpreted as a format string, allowing formatted
28 * values to be inserted while looping over multiple structures for a given
29 * statistics array. Thus, every statistic string in an array should have the
30 * same type and number of format specifiers, to be formatted by variadic
31 * arguments to the iavf_add_stat_string() helper function.
32 **/
33struct iavf_stats {
34 char stat_string[ETH_GSTRING_LEN];
35 int sizeof_stat;
36 int stat_offset;
37};
38
39/* Helper macro to define an iavf_stat structure with proper size and type.
40 * Use this when defining constant statistics arrays. Note that @_type expects
41 * only a type name and is used multiple times.
42 */
43#define IAVF_STAT(_type, _name, _stat) { \
44 .stat_string = _name, \
45 .sizeof_stat = sizeof_field(_type, _stat), \
46 .stat_offset = offsetof(_type, _stat) \
47}
48
49/* Helper macro for defining some statistics related to queues */
50#define IAVF_QUEUE_STAT(_name, _stat) \
51 IAVF_STAT(struct iavf_ring, _name, _stat)
52
53/* Stats associated with a Tx or Rx ring */
54static const struct iavf_stats iavf_gstrings_queue_stats[] = {
55 IAVF_QUEUE_STAT("%s-%u.packets", stats.packets),
56 IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes),
57};
58
59/**
60 * iavf_add_one_ethtool_stat - copy the stat into the supplied buffer
61 * @data: location to store the stat value
62 * @pointer: basis for where to copy from
63 * @stat: the stat definition
64 *
65 * Copies the stat data defined by the pointer and stat structure pair into
66 * the memory supplied as data. Used to implement iavf_add_ethtool_stats and
67 * iavf_add_queue_stats. If the pointer is null, data will be zero'd.
68 */
69static void
70iavf_add_one_ethtool_stat(u64 *data, void *pointer,
71 const struct iavf_stats *stat)
72{
73 char *p;
74
75 if (!pointer) {
76 /* ensure that the ethtool data buffer is zero'd for any stats
77 * which don't have a valid pointer.
78 */
79 *data = 0;
80 return;
81 }
82
83 p = (char *)pointer + stat->stat_offset;
84 switch (stat->sizeof_stat) {
85 case sizeof(u64):
86 *data = *((u64 *)p);
87 break;
88 case sizeof(u32):
89 *data = *((u32 *)p);
90 break;
91 case sizeof(u16):
92 *data = *((u16 *)p);
93 break;
94 case sizeof(u8):
95 *data = *((u8 *)p);
96 break;
97 default:
98 WARN_ONCE(1, "unexpected stat size for %s",
99 stat->stat_string);
100 *data = 0;
101 }
102}
103
104/**
105 * __iavf_add_ethtool_stats - copy stats into the ethtool supplied buffer
106 * @data: ethtool stats buffer
107 * @pointer: location to copy stats from
108 * @stats: array of stats to copy
109 * @size: the size of the stats definition
110 *
111 * Copy the stats defined by the stats array using the pointer as a base into
112 * the data buffer supplied by ethtool. Updates the data pointer to point to
113 * the next empty location for successive calls to __iavf_add_ethtool_stats.
114 * If pointer is null, set the data values to zero and update the pointer to
115 * skip these stats.
116 **/
117static void
118__iavf_add_ethtool_stats(u64 **data, void *pointer,
119 const struct iavf_stats stats[],
120 const unsigned int size)
121{
122 unsigned int i;
123
124 for (i = 0; i < size; i++)
125 iavf_add_one_ethtool_stat(data: (*data)++, pointer, stat: &stats[i]);
126}
127
128/**
129 * iavf_add_ethtool_stats - copy stats into ethtool supplied buffer
130 * @data: ethtool stats buffer
131 * @pointer: location where stats are stored
132 * @stats: static const array of stat definitions
133 *
134 * Macro to ease the use of __iavf_add_ethtool_stats by taking a static
135 * constant stats array and passing the ARRAY_SIZE(). This avoids typos by
136 * ensuring that we pass the size associated with the given stats array.
137 *
138 * The parameter @stats is evaluated twice, so parameters with side effects
139 * should be avoided.
140 **/
141#define iavf_add_ethtool_stats(data, pointer, stats) \
142 __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
143
144/**
145 * iavf_add_queue_stats - copy queue statistics into supplied buffer
146 * @data: ethtool stats buffer
147 * @ring: the ring to copy
148 *
149 * Queue statistics must be copied while protected by
150 * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.
151 * Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
152 * ring pointer is null, zero out the queue stat values and update the data
153 * pointer. Otherwise safely copy the stats from the ring into the supplied
154 * buffer and update the data pointer when finished.
155 *
156 * This function expects to be called while under rcu_read_lock().
157 **/
158static void
159iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
160{
161 const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats);
162 const struct iavf_stats *stats = iavf_gstrings_queue_stats;
163 unsigned int start;
164 unsigned int i;
165
166 /* To avoid invalid statistics values, ensure that we keep retrying
167 * the copy until we get a consistent value according to
168 * u64_stats_fetch_retry. But first, make sure our ring is
169 * non-null before attempting to access its syncp.
170 */
171 do {
172 start = !ring ? 0 : u64_stats_fetch_begin(syncp: &ring->syncp);
173 for (i = 0; i < size; i++)
174 iavf_add_one_ethtool_stat(data: &(*data)[i], pointer: ring, stat: &stats[i]);
175 } while (ring && u64_stats_fetch_retry(syncp: &ring->syncp, start));
176
177 /* Once we successfully copy the stats in, update the data pointer */
178 *data += size;
179}
180
181/**
182 * __iavf_add_stat_strings - copy stat strings into ethtool buffer
183 * @p: ethtool supplied buffer
184 * @stats: stat definitions array
185 * @size: size of the stats array
186 *
187 * Format and copy the strings described by stats into the buffer pointed at
188 * by p.
189 **/
190static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],
191 const unsigned int size, ...)
192{
193 unsigned int i;
194
195 for (i = 0; i < size; i++) {
196 va_list args;
197
198 va_start(args, size);
199 vsnprintf(buf: *p, ETH_GSTRING_LEN, fmt: stats[i].stat_string, args);
200 *p += ETH_GSTRING_LEN;
201 va_end(args);
202 }
203}
204
205/**
206 * iavf_add_stat_strings - copy stat strings into ethtool buffer
207 * @p: ethtool supplied buffer
208 * @stats: stat definitions array
209 *
210 * Format and copy the strings described by the const static stats value into
211 * the buffer pointed at by p.
212 *
213 * The parameter @stats is evaluated twice, so parameters with side effects
214 * should be avoided. Additionally, stats must be an array such that
215 * ARRAY_SIZE can be called on it.
216 **/
217#define iavf_add_stat_strings(p, stats, ...) \
218 __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
219
220#define VF_STAT(_name, _stat) \
221 IAVF_STAT(struct iavf_adapter, _name, _stat)
222
223static const struct iavf_stats iavf_gstrings_stats[] = {
224 VF_STAT("rx_bytes", current_stats.rx_bytes),
225 VF_STAT("rx_unicast", current_stats.rx_unicast),
226 VF_STAT("rx_multicast", current_stats.rx_multicast),
227 VF_STAT("rx_broadcast", current_stats.rx_broadcast),
228 VF_STAT("rx_discards", current_stats.rx_discards),
229 VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
230 VF_STAT("tx_bytes", current_stats.tx_bytes),
231 VF_STAT("tx_unicast", current_stats.tx_unicast),
232 VF_STAT("tx_multicast", current_stats.tx_multicast),
233 VF_STAT("tx_broadcast", current_stats.tx_broadcast),
234 VF_STAT("tx_discards", current_stats.tx_discards),
235 VF_STAT("tx_errors", current_stats.tx_errors),
236};
237
238#define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats)
239
240#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
241
242/* For now we have one and only one private flag and it is only defined
243 * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
244 * of leaving all this code sitting around empty we will strip it unless
245 * our one private flag is actually available.
246 */
247struct iavf_priv_flags {
248 char flag_string[ETH_GSTRING_LEN];
249 u32 flag;
250 bool read_only;
251};
252
253#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
254 .flag_string = _name, \
255 .flag = _flag, \
256 .read_only = _read_only, \
257}
258
259static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
260 IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
261};
262
263#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
264
265/**
266 * iavf_get_link_ksettings - Get Link Speed and Duplex settings
267 * @netdev: network interface device structure
268 * @cmd: ethtool command
269 *
270 * Reports speed/duplex settings. Because this is a VF, we don't know what
271 * kind of link we really have, so we fake it.
272 **/
273static int iavf_get_link_ksettings(struct net_device *netdev,
274 struct ethtool_link_ksettings *cmd)
275{
276 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
277
278 ethtool_link_ksettings_zero_link_mode(cmd, supported);
279 cmd->base.autoneg = AUTONEG_DISABLE;
280 cmd->base.port = PORT_NONE;
281 cmd->base.duplex = DUPLEX_FULL;
282
283 if (ADV_LINK_SUPPORT(adapter)) {
284 if (adapter->link_speed_mbps &&
285 adapter->link_speed_mbps < U32_MAX)
286 cmd->base.speed = adapter->link_speed_mbps;
287 else
288 cmd->base.speed = SPEED_UNKNOWN;
289
290 return 0;
291 }
292
293 switch (adapter->link_speed) {
294 case VIRTCHNL_LINK_SPEED_40GB:
295 cmd->base.speed = SPEED_40000;
296 break;
297 case VIRTCHNL_LINK_SPEED_25GB:
298 cmd->base.speed = SPEED_25000;
299 break;
300 case VIRTCHNL_LINK_SPEED_20GB:
301 cmd->base.speed = SPEED_20000;
302 break;
303 case VIRTCHNL_LINK_SPEED_10GB:
304 cmd->base.speed = SPEED_10000;
305 break;
306 case VIRTCHNL_LINK_SPEED_5GB:
307 cmd->base.speed = SPEED_5000;
308 break;
309 case VIRTCHNL_LINK_SPEED_2_5GB:
310 cmd->base.speed = SPEED_2500;
311 break;
312 case VIRTCHNL_LINK_SPEED_1GB:
313 cmd->base.speed = SPEED_1000;
314 break;
315 case VIRTCHNL_LINK_SPEED_100MB:
316 cmd->base.speed = SPEED_100;
317 break;
318 default:
319 break;
320 }
321
322 return 0;
323}
324
325/**
326 * iavf_get_sset_count - Get length of string set
327 * @netdev: network interface device structure
328 * @sset: id of string set
329 *
330 * Reports size of various string tables.
331 **/
332static int iavf_get_sset_count(struct net_device *netdev, int sset)
333{
334 /* Report the maximum number queues, even if not every queue is
335 * currently configured. Since allocation of queues is in pairs,
336 * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
337 * at device creation and never changes.
338 */
339
340 if (sset == ETH_SS_STATS)
341 return IAVF_STATS_LEN +
342 (IAVF_QUEUE_STATS_LEN * 2 *
343 netdev->real_num_tx_queues);
344 else if (sset == ETH_SS_PRIV_FLAGS)
345 return IAVF_PRIV_FLAGS_STR_LEN;
346 else
347 return -EINVAL;
348}
349
350/**
351 * iavf_get_ethtool_stats - report device statistics
352 * @netdev: network interface device structure
353 * @stats: ethtool statistics structure
354 * @data: pointer to data buffer
355 *
356 * All statistics are added to the data buffer as an array of u64.
357 **/
358static void iavf_get_ethtool_stats(struct net_device *netdev,
359 struct ethtool_stats *stats, u64 *data)
360{
361 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
362 unsigned int i;
363
364 /* Explicitly request stats refresh */
365 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
366
367 iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
368
369 rcu_read_lock();
370 /* As num_active_queues describe both tx and rx queues, we can use
371 * it to iterate over rings' stats.
372 */
373 for (i = 0; i < adapter->num_active_queues; i++) {
374 struct iavf_ring *ring;
375
376 /* Tx rings stats */
377 ring = &adapter->tx_rings[i];
378 iavf_add_queue_stats(data: &data, ring);
379
380 /* Rx rings stats */
381 ring = &adapter->rx_rings[i];
382 iavf_add_queue_stats(data: &data, ring);
383 }
384 rcu_read_unlock();
385}
386
387/**
388 * iavf_get_priv_flag_strings - Get private flag strings
389 * @netdev: network interface device structure
390 * @data: buffer for string data
391 *
392 * Builds the private flags string table
393 **/
394static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
395{
396 unsigned int i;
397
398 for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++)
399 ethtool_sprintf(data: &data, fmt: "%s",
400 iavf_gstrings_priv_flags[i].flag_string);
401}
402
403/**
404 * iavf_get_stat_strings - Get stat strings
405 * @netdev: network interface device structure
406 * @data: buffer for string data
407 *
408 * Builds the statistics string table
409 **/
410static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
411{
412 unsigned int i;
413
414 iavf_add_stat_strings(&data, iavf_gstrings_stats);
415
416 /* Queues are always allocated in pairs, so we just use
417 * real_num_tx_queues for both Tx and Rx queues.
418 */
419 for (i = 0; i < netdev->real_num_tx_queues; i++) {
420 iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
421 "tx", i);
422 iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
423 "rx", i);
424 }
425}
426
427/**
428 * iavf_get_strings - Get string set
429 * @netdev: network interface device structure
430 * @sset: id of string set
431 * @data: buffer for string data
432 *
433 * Builds string tables for various string sets
434 **/
435static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
436{
437 switch (sset) {
438 case ETH_SS_STATS:
439 iavf_get_stat_strings(netdev, data);
440 break;
441 case ETH_SS_PRIV_FLAGS:
442 iavf_get_priv_flag_strings(netdev, data);
443 break;
444 default:
445 break;
446 }
447}
448
449/**
450 * iavf_get_priv_flags - report device private flags
451 * @netdev: network interface device structure
452 *
453 * The get string set count and the string set should be matched for each
454 * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags
455 * array.
456 *
457 * Returns a u32 bitmap of flags.
458 **/
459static u32 iavf_get_priv_flags(struct net_device *netdev)
460{
461 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
462 u32 i, ret_flags = 0;
463
464 for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
465 const struct iavf_priv_flags *priv_flags;
466
467 priv_flags = &iavf_gstrings_priv_flags[i];
468
469 if (priv_flags->flag & adapter->flags)
470 ret_flags |= BIT(i);
471 }
472
473 return ret_flags;
474}
475
476/**
477 * iavf_set_priv_flags - set private flags
478 * @netdev: network interface device structure
479 * @flags: bit flags to be set
480 **/
481static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
482{
483 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
484 u32 orig_flags, new_flags, changed_flags;
485 int ret = 0;
486 u32 i;
487
488 orig_flags = READ_ONCE(adapter->flags);
489 new_flags = orig_flags;
490
491 for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
492 const struct iavf_priv_flags *priv_flags;
493
494 priv_flags = &iavf_gstrings_priv_flags[i];
495
496 if (flags & BIT(i))
497 new_flags |= priv_flags->flag;
498 else
499 new_flags &= ~(priv_flags->flag);
500
501 if (priv_flags->read_only &&
502 ((orig_flags ^ new_flags) & ~BIT(i)))
503 return -EOPNOTSUPP;
504 }
505
506 /* Before we finalize any flag changes, any checks which we need to
507 * perform to determine if the new flags will be supported should go
508 * here...
509 */
510
511 /* Compare and exchange the new flags into place. If we failed, that
512 * is if cmpxchg returns anything but the old value, this means
513 * something else must have modified the flags variable since we
514 * copied it. We'll just punt with an error and log something in the
515 * message buffer.
516 */
517 if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
518 dev_warn(&adapter->pdev->dev,
519 "Unable to update adapter->flags as it was modified by another thread...\n");
520 return -EAGAIN;
521 }
522
523 changed_flags = orig_flags ^ new_flags;
524
525 /* Process any additional changes needed as a result of flag changes.
526 * The changed_flags value reflects the list of bits that were changed
527 * in the code above.
528 */
529
530 /* issue a reset to force legacy-rx change to take effect */
531 if (changed_flags & IAVF_FLAG_LEGACY_RX) {
532 if (netif_running(dev: netdev)) {
533 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
534 ret = iavf_wait_for_reset(adapter);
535 if (ret)
536 netdev_warn(dev: netdev, format: "Changing private flags timeout or interrupted waiting for reset");
537 }
538 }
539
540 return ret;
541}
542
543/**
544 * iavf_get_msglevel - Get debug message level
545 * @netdev: network interface device structure
546 *
547 * Returns current debug message level.
548 **/
549static u32 iavf_get_msglevel(struct net_device *netdev)
550{
551 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
552
553 return adapter->msg_enable;
554}
555
556/**
557 * iavf_set_msglevel - Set debug message level
558 * @netdev: network interface device structure
559 * @data: message level
560 *
561 * Set current debug message level. Higher values cause the driver to
562 * be noisier.
563 **/
564static void iavf_set_msglevel(struct net_device *netdev, u32 data)
565{
566 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
567
568 if (IAVF_DEBUG_USER & data)
569 adapter->hw.debug_mask = data;
570 adapter->msg_enable = data;
571}
572
573/**
574 * iavf_get_drvinfo - Get driver info
575 * @netdev: network interface device structure
576 * @drvinfo: ethool driver info structure
577 *
578 * Returns information about the driver and device for display to the user.
579 **/
580static void iavf_get_drvinfo(struct net_device *netdev,
581 struct ethtool_drvinfo *drvinfo)
582{
583 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
584
585 strscpy(p: drvinfo->driver, q: iavf_driver_name, size: 32);
586 strscpy(p: drvinfo->fw_version, q: "N/A", size: 4);
587 strscpy(p: drvinfo->bus_info, q: pci_name(pdev: adapter->pdev), size: 32);
588 drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
589}
590
591/**
592 * iavf_get_ringparam - Get ring parameters
593 * @netdev: network interface device structure
594 * @ring: ethtool ringparam structure
595 * @kernel_ring: ethtool extenal ringparam structure
596 * @extack: netlink extended ACK report struct
597 *
598 * Returns current ring parameters. TX and RX rings are reported separately,
599 * but the number of rings is not reported.
600 **/
601static void iavf_get_ringparam(struct net_device *netdev,
602 struct ethtool_ringparam *ring,
603 struct kernel_ethtool_ringparam *kernel_ring,
604 struct netlink_ext_ack *extack)
605{
606 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
607
608 ring->rx_max_pending = IAVF_MAX_RXD;
609 ring->tx_max_pending = IAVF_MAX_TXD;
610 ring->rx_pending = adapter->rx_desc_count;
611 ring->tx_pending = adapter->tx_desc_count;
612}
613
614/**
615 * iavf_set_ringparam - Set ring parameters
616 * @netdev: network interface device structure
617 * @ring: ethtool ringparam structure
618 * @kernel_ring: ethtool external ringparam structure
619 * @extack: netlink extended ACK report struct
620 *
621 * Sets ring parameters. TX and RX rings are controlled separately, but the
622 * number of rings is not specified, so all rings get the same settings.
623 **/
624static int iavf_set_ringparam(struct net_device *netdev,
625 struct ethtool_ringparam *ring,
626 struct kernel_ethtool_ringparam *kernel_ring,
627 struct netlink_ext_ack *extack)
628{
629 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
630 u32 new_rx_count, new_tx_count;
631 int ret = 0;
632
633 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
634 return -EINVAL;
635
636 if (ring->tx_pending > IAVF_MAX_TXD ||
637 ring->tx_pending < IAVF_MIN_TXD ||
638 ring->rx_pending > IAVF_MAX_RXD ||
639 ring->rx_pending < IAVF_MIN_RXD) {
640 netdev_err(dev: netdev, format: "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
641 ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
642 IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
643 return -EINVAL;
644 }
645
646 new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
647 if (new_tx_count != ring->tx_pending)
648 netdev_info(dev: netdev, format: "Requested Tx descriptor count rounded up to %d\n",
649 new_tx_count);
650
651 new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
652 if (new_rx_count != ring->rx_pending)
653 netdev_info(dev: netdev, format: "Requested Rx descriptor count rounded up to %d\n",
654 new_rx_count);
655
656 /* if nothing to do return success */
657 if ((new_tx_count == adapter->tx_desc_count) &&
658 (new_rx_count == adapter->rx_desc_count)) {
659 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
660 return 0;
661 }
662
663 if (new_tx_count != adapter->tx_desc_count) {
664 netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
665 adapter->tx_desc_count, new_tx_count);
666 adapter->tx_desc_count = new_tx_count;
667 }
668
669 if (new_rx_count != adapter->rx_desc_count) {
670 netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
671 adapter->rx_desc_count, new_rx_count);
672 adapter->rx_desc_count = new_rx_count;
673 }
674
675 if (netif_running(dev: netdev)) {
676 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
677 ret = iavf_wait_for_reset(adapter);
678 if (ret)
679 netdev_warn(dev: netdev, format: "Changing ring parameters timeout or interrupted waiting for reset");
680 }
681
682 return ret;
683}
684
685/**
686 * __iavf_get_coalesce - get per-queue coalesce settings
687 * @netdev: the netdev to check
688 * @ec: ethtool coalesce data structure
689 * @queue: which queue to pick
690 *
691 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
692 * are per queue. If queue is <0 then we default to queue 0 as the
693 * representative value.
694 **/
695static int __iavf_get_coalesce(struct net_device *netdev,
696 struct ethtool_coalesce *ec, int queue)
697{
698 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
699 struct iavf_ring *rx_ring, *tx_ring;
700
701 /* Rx and Tx usecs per queue value. If user doesn't specify the
702 * queue, return queue 0's value to represent.
703 */
704 if (queue < 0)
705 queue = 0;
706 else if (queue >= adapter->num_active_queues)
707 return -EINVAL;
708
709 rx_ring = &adapter->rx_rings[queue];
710 tx_ring = &adapter->tx_rings[queue];
711
712 if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
713 ec->use_adaptive_rx_coalesce = 1;
714
715 if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
716 ec->use_adaptive_tx_coalesce = 1;
717
718 ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
719 ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
720
721 return 0;
722}
723
724/**
725 * iavf_get_coalesce - Get interrupt coalescing settings
726 * @netdev: network interface device structure
727 * @ec: ethtool coalesce structure
728 * @kernel_coal: ethtool CQE mode setting structure
729 * @extack: extack for reporting error messages
730 *
731 * Returns current coalescing settings. This is referred to elsewhere in the
732 * driver as Interrupt Throttle Rate, as this is how the hardware describes
733 * this functionality. Note that if per-queue settings have been modified this
734 * only represents the settings of queue 0.
735 **/
736static int iavf_get_coalesce(struct net_device *netdev,
737 struct ethtool_coalesce *ec,
738 struct kernel_ethtool_coalesce *kernel_coal,
739 struct netlink_ext_ack *extack)
740{
741 return __iavf_get_coalesce(netdev, ec, queue: -1);
742}
743
744/**
745 * iavf_get_per_queue_coalesce - get coalesce values for specific queue
746 * @netdev: netdev to read
747 * @ec: coalesce settings from ethtool
748 * @queue: the queue to read
749 *
750 * Read specific queue's coalesce settings.
751 **/
752static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
753 struct ethtool_coalesce *ec)
754{
755 return __iavf_get_coalesce(netdev, ec, queue);
756}
757
758/**
759 * iavf_set_itr_per_queue - set ITR values for specific queue
760 * @adapter: the VF adapter struct to set values for
761 * @ec: coalesce settings from ethtool
762 * @queue: the queue to modify
763 *
764 * Change the ITR settings for a specific queue.
765 **/
766static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
767 struct ethtool_coalesce *ec, int queue)
768{
769 struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
770 struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
771 struct iavf_q_vector *q_vector;
772 u16 itr_setting;
773
774 itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
775
776 if (ec->rx_coalesce_usecs != itr_setting &&
777 ec->use_adaptive_rx_coalesce) {
778 netif_info(adapter, drv, adapter->netdev,
779 "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
780 return -EINVAL;
781 }
782
783 itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
784
785 if (ec->tx_coalesce_usecs != itr_setting &&
786 ec->use_adaptive_tx_coalesce) {
787 netif_info(adapter, drv, adapter->netdev,
788 "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
789 return -EINVAL;
790 }
791
792 rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
793 tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
794
795 rx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
796 if (!ec->use_adaptive_rx_coalesce)
797 rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
798
799 tx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
800 if (!ec->use_adaptive_tx_coalesce)
801 tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
802
803 q_vector = rx_ring->q_vector;
804 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
805
806 q_vector = tx_ring->q_vector;
807 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
808
809 /* The interrupt handler itself will take care of programming
810 * the Tx and Rx ITR values based on the values we have entered
811 * into the q_vector, no need to write the values now.
812 */
813 return 0;
814}
815
816/**
817 * __iavf_set_coalesce - set coalesce settings for particular queue
818 * @netdev: the netdev to change
819 * @ec: ethtool coalesce settings
820 * @queue: the queue to change
821 *
822 * Sets the coalesce settings for a particular queue.
823 **/
824static int __iavf_set_coalesce(struct net_device *netdev,
825 struct ethtool_coalesce *ec, int queue)
826{
827 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
828 int i;
829
830 if (ec->rx_coalesce_usecs == 0) {
831 if (ec->use_adaptive_rx_coalesce)
832 netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
833 } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
834 (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
835 netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
836 return -EINVAL;
837 } else if (ec->tx_coalesce_usecs == 0) {
838 if (ec->use_adaptive_tx_coalesce)
839 netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
840 } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
841 (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
842 netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
843 return -EINVAL;
844 }
845
846 /* Rx and Tx usecs has per queue value. If user doesn't specify the
847 * queue, apply to all queues.
848 */
849 if (queue < 0) {
850 for (i = 0; i < adapter->num_active_queues; i++)
851 if (iavf_set_itr_per_queue(adapter, ec, queue: i))
852 return -EINVAL;
853 } else if (queue < adapter->num_active_queues) {
854 if (iavf_set_itr_per_queue(adapter, ec, queue))
855 return -EINVAL;
856 } else {
857 netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
858 adapter->num_active_queues - 1);
859 return -EINVAL;
860 }
861
862 return 0;
863}
864
865/**
866 * iavf_set_coalesce - Set interrupt coalescing settings
867 * @netdev: network interface device structure
868 * @ec: ethtool coalesce structure
869 * @kernel_coal: ethtool CQE mode setting structure
870 * @extack: extack for reporting error messages
871 *
872 * Change current coalescing settings for every queue.
873 **/
874static int iavf_set_coalesce(struct net_device *netdev,
875 struct ethtool_coalesce *ec,
876 struct kernel_ethtool_coalesce *kernel_coal,
877 struct netlink_ext_ack *extack)
878{
879 return __iavf_set_coalesce(netdev, ec, queue: -1);
880}
881
882/**
883 * iavf_set_per_queue_coalesce - set specific queue's coalesce settings
884 * @netdev: the netdev to change
885 * @ec: ethtool's coalesce settings
886 * @queue: the queue to modify
887 *
888 * Modifies a specific queue's coalesce settings.
889 */
890static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
891 struct ethtool_coalesce *ec)
892{
893 return __iavf_set_coalesce(netdev, ec, queue);
894}
895
896/**
897 * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
898 * flow type values
899 * @flow: filter type to be converted
900 *
901 * Returns the corresponding ethtool flow type.
902 */
903static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
904{
905 switch (flow) {
906 case IAVF_FDIR_FLOW_IPV4_TCP:
907 return TCP_V4_FLOW;
908 case IAVF_FDIR_FLOW_IPV4_UDP:
909 return UDP_V4_FLOW;
910 case IAVF_FDIR_FLOW_IPV4_SCTP:
911 return SCTP_V4_FLOW;
912 case IAVF_FDIR_FLOW_IPV4_AH:
913 return AH_V4_FLOW;
914 case IAVF_FDIR_FLOW_IPV4_ESP:
915 return ESP_V4_FLOW;
916 case IAVF_FDIR_FLOW_IPV4_OTHER:
917 return IPV4_USER_FLOW;
918 case IAVF_FDIR_FLOW_IPV6_TCP:
919 return TCP_V6_FLOW;
920 case IAVF_FDIR_FLOW_IPV6_UDP:
921 return UDP_V6_FLOW;
922 case IAVF_FDIR_FLOW_IPV6_SCTP:
923 return SCTP_V6_FLOW;
924 case IAVF_FDIR_FLOW_IPV6_AH:
925 return AH_V6_FLOW;
926 case IAVF_FDIR_FLOW_IPV6_ESP:
927 return ESP_V6_FLOW;
928 case IAVF_FDIR_FLOW_IPV6_OTHER:
929 return IPV6_USER_FLOW;
930 case IAVF_FDIR_FLOW_NON_IP_L2:
931 return ETHER_FLOW;
932 default:
933 /* 0 is undefined ethtool flow */
934 return 0;
935 }
936}
937
938/**
939 * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
940 * @eth: Ethtool flow type to be converted
941 *
942 * Returns flow enum
943 */
944static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
945{
946 switch (eth) {
947 case TCP_V4_FLOW:
948 return IAVF_FDIR_FLOW_IPV4_TCP;
949 case UDP_V4_FLOW:
950 return IAVF_FDIR_FLOW_IPV4_UDP;
951 case SCTP_V4_FLOW:
952 return IAVF_FDIR_FLOW_IPV4_SCTP;
953 case AH_V4_FLOW:
954 return IAVF_FDIR_FLOW_IPV4_AH;
955 case ESP_V4_FLOW:
956 return IAVF_FDIR_FLOW_IPV4_ESP;
957 case IPV4_USER_FLOW:
958 return IAVF_FDIR_FLOW_IPV4_OTHER;
959 case TCP_V6_FLOW:
960 return IAVF_FDIR_FLOW_IPV6_TCP;
961 case UDP_V6_FLOW:
962 return IAVF_FDIR_FLOW_IPV6_UDP;
963 case SCTP_V6_FLOW:
964 return IAVF_FDIR_FLOW_IPV6_SCTP;
965 case AH_V6_FLOW:
966 return IAVF_FDIR_FLOW_IPV6_AH;
967 case ESP_V6_FLOW:
968 return IAVF_FDIR_FLOW_IPV6_ESP;
969 case IPV6_USER_FLOW:
970 return IAVF_FDIR_FLOW_IPV6_OTHER;
971 case ETHER_FLOW:
972 return IAVF_FDIR_FLOW_NON_IP_L2;
973 default:
974 return IAVF_FDIR_FLOW_NONE;
975 }
976}
977
978/**
979 * iavf_is_mask_valid - check mask field set
980 * @mask: full mask to check
981 * @field: field for which mask should be valid
982 *
983 * If the mask is fully set return true. If it is not valid for field return
984 * false.
985 */
986static bool iavf_is_mask_valid(u64 mask, u64 field)
987{
988 return (mask & field) == field;
989}
990
991/**
992 * iavf_parse_rx_flow_user_data - deconstruct user-defined data
993 * @fsp: pointer to ethtool Rx flow specification
994 * @fltr: pointer to Flow Director filter for userdef data storage
995 *
996 * Returns 0 on success, negative error value on failure
997 */
998static int
999iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
1000 struct iavf_fdir_fltr *fltr)
1001{
1002 struct iavf_flex_word *flex;
1003 int i, cnt = 0;
1004
1005 if (!(fsp->flow_type & FLOW_EXT))
1006 return 0;
1007
1008 for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) {
1009#define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0)
1010#define IAVF_USERDEF_FLEX_OFFS_S 16
1011#define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
1012#define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0)
1013 u32 value = be32_to_cpu(fsp->h_ext.data[i]);
1014 u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
1015
1016 if (!value || !mask)
1017 continue;
1018
1019 if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
1020 return -EINVAL;
1021
1022 /* 504 is the maximum value for offsets, and offset is measured
1023 * from the start of the MAC address.
1024 */
1025#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
1026 flex = &fltr->flex_words[cnt++];
1027 flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
1028 flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >>
1029 IAVF_USERDEF_FLEX_OFFS_S;
1030 if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
1031 return -EINVAL;
1032 }
1033
1034 fltr->flex_cnt = cnt;
1035
1036 return 0;
1037}
1038
1039/**
1040 * iavf_fill_rx_flow_ext_data - fill the additional data
1041 * @fsp: pointer to ethtool Rx flow specification
1042 * @fltr: pointer to Flow Director filter to get additional data
1043 */
1044static void
1045iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
1046 struct iavf_fdir_fltr *fltr)
1047{
1048 if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
1049 return;
1050
1051 fsp->flow_type |= FLOW_EXT;
1052
1053 memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
1054 memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
1055}
1056
1057/**
1058 * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
1059 * @adapter: the VF adapter structure that contains filter list
1060 * @cmd: ethtool command data structure to receive the filter data
1061 *
1062 * Returns 0 as expected for success by ethtool
1063 */
1064static int
1065iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
1066 struct ethtool_rxnfc *cmd)
1067{
1068 struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1069 struct iavf_fdir_fltr *rule = NULL;
1070 int ret = 0;
1071
1072 if (!FDIR_FLTR_SUPPORT(adapter))
1073 return -EOPNOTSUPP;
1074
1075 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
1076
1077 rule = iavf_find_fdir_fltr_by_loc(adapter, loc: fsp->location);
1078 if (!rule) {
1079 ret = -EINVAL;
1080 goto release_lock;
1081 }
1082
1083 fsp->flow_type = iavf_fltr_to_ethtool_flow(flow: rule->flow_type);
1084
1085 memset(&fsp->m_u, 0, sizeof(fsp->m_u));
1086 memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
1087
1088 switch (fsp->flow_type) {
1089 case TCP_V4_FLOW:
1090 case UDP_V4_FLOW:
1091 case SCTP_V4_FLOW:
1092 fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
1093 fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
1094 fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
1095 fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
1096 fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
1097 fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
1098 fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
1099 fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
1100 fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
1101 fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
1102 break;
1103 case AH_V4_FLOW:
1104 case ESP_V4_FLOW:
1105 fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
1106 fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
1107 fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
1108 fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
1109 fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
1110 fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
1111 fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
1112 fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
1113 break;
1114 case IPV4_USER_FLOW:
1115 fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
1116 fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
1117 fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
1118 fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
1119 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
1120 fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
1121 fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
1122 fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
1123 fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
1124 fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
1125 fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
1126 fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
1127 break;
1128 case TCP_V6_FLOW:
1129 case UDP_V6_FLOW:
1130 case SCTP_V6_FLOW:
1131 memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1132 sizeof(struct in6_addr));
1133 memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1134 sizeof(struct in6_addr));
1135 fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
1136 fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
1137 fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
1138 memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1139 sizeof(struct in6_addr));
1140 memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1141 sizeof(struct in6_addr));
1142 fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
1143 fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
1144 fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
1145 break;
1146 case AH_V6_FLOW:
1147 case ESP_V6_FLOW:
1148 memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1149 sizeof(struct in6_addr));
1150 memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1151 sizeof(struct in6_addr));
1152 fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
1153 fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
1154 memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1155 sizeof(struct in6_addr));
1156 memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1157 sizeof(struct in6_addr));
1158 fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
1159 fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
1160 break;
1161 case IPV6_USER_FLOW:
1162 memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
1163 sizeof(struct in6_addr));
1164 memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
1165 sizeof(struct in6_addr));
1166 fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
1167 fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
1168 fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
1169 memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
1170 sizeof(struct in6_addr));
1171 memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
1172 sizeof(struct in6_addr));
1173 fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
1174 fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
1175 fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
1176 break;
1177 case ETHER_FLOW:
1178 fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
1179 fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
1180 break;
1181 default:
1182 ret = -EINVAL;
1183 break;
1184 }
1185
1186 iavf_fill_rx_flow_ext_data(fsp, fltr: rule);
1187
1188 if (rule->action == VIRTCHNL_ACTION_DROP)
1189 fsp->ring_cookie = RX_CLS_FLOW_DISC;
1190 else
1191 fsp->ring_cookie = rule->q_index;
1192
1193release_lock:
1194 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
1195 return ret;
1196}
1197
1198/**
1199 * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
1200 * @adapter: the VF adapter structure containing the filter list
1201 * @cmd: ethtool command data structure
1202 * @rule_locs: ethtool array passed in from OS to receive filter IDs
1203 *
1204 * Returns 0 as expected for success by ethtool
1205 */
1206static int
1207iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
1208 u32 *rule_locs)
1209{
1210 struct iavf_fdir_fltr *fltr;
1211 unsigned int cnt = 0;
1212 int val = 0;
1213
1214 if (!FDIR_FLTR_SUPPORT(adapter))
1215 return -EOPNOTSUPP;
1216
1217 cmd->data = IAVF_MAX_FDIR_FILTERS;
1218
1219 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
1220
1221 list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
1222 if (cnt == cmd->rule_cnt) {
1223 val = -EMSGSIZE;
1224 goto release_lock;
1225 }
1226 rule_locs[cnt] = fltr->loc;
1227 cnt++;
1228 }
1229
1230release_lock:
1231 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
1232 if (!val)
1233 cmd->rule_cnt = cnt;
1234
1235 return val;
1236}
1237
1238/**
1239 * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
1240 * @adapter: pointer to the VF adapter structure
1241 * @fsp: pointer to ethtool Rx flow specification
1242 * @fltr: filter structure
1243 */
1244static int
1245iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
1246 struct iavf_fdir_fltr *fltr)
1247{
1248 u32 flow_type, q_index = 0;
1249 enum virtchnl_action act;
1250 int err;
1251
1252 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1253 act = VIRTCHNL_ACTION_DROP;
1254 } else {
1255 q_index = fsp->ring_cookie;
1256 if (q_index >= adapter->num_active_queues)
1257 return -EINVAL;
1258
1259 act = VIRTCHNL_ACTION_QUEUE;
1260 }
1261
1262 fltr->action = act;
1263 fltr->loc = fsp->location;
1264 fltr->q_index = q_index;
1265
1266 if (fsp->flow_type & FLOW_EXT) {
1267 memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
1268 sizeof(fltr->ext_data.usr_def));
1269 memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
1270 sizeof(fltr->ext_mask.usr_def));
1271 }
1272
1273 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
1274 fltr->flow_type = iavf_ethtool_flow_to_fltr(eth: flow_type);
1275
1276 switch (flow_type) {
1277 case TCP_V4_FLOW:
1278 case UDP_V4_FLOW:
1279 case SCTP_V4_FLOW:
1280 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1281 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1282 fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1283 fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1284 fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
1285 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1286 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1287 fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1288 fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1289 fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
1290 fltr->ip_ver = 4;
1291 break;
1292 case AH_V4_FLOW:
1293 case ESP_V4_FLOW:
1294 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
1295 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
1296 fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
1297 fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
1298 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
1299 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
1300 fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
1301 fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
1302 fltr->ip_ver = 4;
1303 break;
1304 case IPV4_USER_FLOW:
1305 fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1306 fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1307 fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1308 fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
1309 fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
1310 fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1311 fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1312 fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1313 fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
1314 fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
1315 fltr->ip_ver = 4;
1316 break;
1317 case TCP_V6_FLOW:
1318 case UDP_V6_FLOW:
1319 case SCTP_V6_FLOW:
1320 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1321 sizeof(struct in6_addr));
1322 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1323 sizeof(struct in6_addr));
1324 fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1325 fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1326 fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
1327 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1328 sizeof(struct in6_addr));
1329 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1330 sizeof(struct in6_addr));
1331 fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1332 fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1333 fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
1334 fltr->ip_ver = 6;
1335 break;
1336 case AH_V6_FLOW:
1337 case ESP_V6_FLOW:
1338 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
1339 sizeof(struct in6_addr));
1340 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
1341 sizeof(struct in6_addr));
1342 fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
1343 fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
1344 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
1345 sizeof(struct in6_addr));
1346 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
1347 sizeof(struct in6_addr));
1348 fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
1349 fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
1350 fltr->ip_ver = 6;
1351 break;
1352 case IPV6_USER_FLOW:
1353 memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1354 sizeof(struct in6_addr));
1355 memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1356 sizeof(struct in6_addr));
1357 fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1358 fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
1359 fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1360 memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1361 sizeof(struct in6_addr));
1362 memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1363 sizeof(struct in6_addr));
1364 fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1365 fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
1366 fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1367 fltr->ip_ver = 6;
1368 break;
1369 case ETHER_FLOW:
1370 fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
1371 fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
1372 break;
1373 default:
1374 /* not doing un-parsed flow types */
1375 return -EINVAL;
1376 }
1377
1378 err = iavf_validate_fdir_fltr_masks(adapter, fltr);
1379 if (err)
1380 return err;
1381
1382 if (iavf_fdir_is_dup_fltr(adapter, fltr))
1383 return -EEXIST;
1384
1385 err = iavf_parse_rx_flow_user_data(fsp, fltr);
1386 if (err)
1387 return err;
1388
1389 return iavf_fill_fdir_add_msg(adapter, fltr);
1390}
1391
1392/**
1393 * iavf_add_fdir_ethtool - add Flow Director filter
1394 * @adapter: pointer to the VF adapter structure
1395 * @cmd: command to add Flow Director filter
1396 *
1397 * Returns 0 on success and negative values for failure
1398 */
1399static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1400{
1401 struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1402 struct iavf_fdir_fltr *fltr;
1403 int count = 50;
1404 int err;
1405
1406 if (!FDIR_FLTR_SUPPORT(adapter))
1407 return -EOPNOTSUPP;
1408
1409 if (fsp->flow_type & FLOW_MAC_EXT)
1410 return -EINVAL;
1411
1412 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
1413 if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
1414 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
1415 dev_err(&adapter->pdev->dev,
1416 "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
1417 IAVF_MAX_FDIR_FILTERS);
1418 return -ENOSPC;
1419 }
1420
1421 if (iavf_find_fdir_fltr_by_loc(adapter, loc: fsp->location)) {
1422 dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
1423 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
1424 return -EEXIST;
1425 }
1426 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
1427
1428 fltr = kzalloc(size: sizeof(*fltr), GFP_KERNEL);
1429 if (!fltr)
1430 return -ENOMEM;
1431
1432 while (!mutex_trylock(lock: &adapter->crit_lock)) {
1433 if (--count == 0) {
1434 kfree(objp: fltr);
1435 return -EINVAL;
1436 }
1437 udelay(1);
1438 }
1439
1440 err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
1441 if (err)
1442 goto ret;
1443
1444 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
1445 iavf_fdir_list_add_fltr(adapter, fltr);
1446 adapter->fdir_active_fltr++;
1447 fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
1448 adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
1449 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
1450
1451 mod_delayed_work(wq: adapter->wq, dwork: &adapter->watchdog_task, delay: 0);
1452
1453ret:
1454 if (err && fltr)
1455 kfree(objp: fltr);
1456
1457 mutex_unlock(lock: &adapter->crit_lock);
1458 return err;
1459}
1460
1461/**
1462 * iavf_del_fdir_ethtool - delete Flow Director filter
1463 * @adapter: pointer to the VF adapter structure
1464 * @cmd: command to delete Flow Director filter
1465 *
1466 * Returns 0 on success and negative values for failure
1467 */
1468static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
1469{
1470 struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1471 struct iavf_fdir_fltr *fltr = NULL;
1472 int err = 0;
1473
1474 if (!FDIR_FLTR_SUPPORT(adapter))
1475 return -EOPNOTSUPP;
1476
1477 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
1478 fltr = iavf_find_fdir_fltr_by_loc(adapter, loc: fsp->location);
1479 if (fltr) {
1480 if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
1481 fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1482 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1483 } else {
1484 err = -EBUSY;
1485 }
1486 } else if (adapter->fdir_active_fltr) {
1487 err = -EINVAL;
1488 }
1489 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
1490
1491 if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
1492 mod_delayed_work(wq: adapter->wq, dwork: &adapter->watchdog_task, delay: 0);
1493
1494 return err;
1495}
1496
1497/**
1498 * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
1499 * @cmd: ethtool rxnfc command
1500 *
1501 * This function parses the rxnfc command and returns intended
1502 * header types for RSS configuration
1503 */
1504static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
1505{
1506 u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
1507
1508 switch (cmd->flow_type) {
1509 case TCP_V4_FLOW:
1510 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1511 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1512 break;
1513 case UDP_V4_FLOW:
1514 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1515 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1516 break;
1517 case SCTP_V4_FLOW:
1518 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1519 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
1520 break;
1521 case TCP_V6_FLOW:
1522 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
1523 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1524 break;
1525 case UDP_V6_FLOW:
1526 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
1527 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1528 break;
1529 case SCTP_V6_FLOW:
1530 hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
1531 IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
1532 break;
1533 default:
1534 break;
1535 }
1536
1537 return hdrs;
1538}
1539
1540/**
1541 * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
1542 * @cmd: ethtool rxnfc command
1543 *
1544 * This function parses the rxnfc command and returns intended hash fields for
1545 * RSS configuration
1546 */
1547static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd)
1548{
1549 u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
1550
1551 if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
1552 switch (cmd->flow_type) {
1553 case TCP_V4_FLOW:
1554 case UDP_V4_FLOW:
1555 case SCTP_V4_FLOW:
1556 if (cmd->data & RXH_IP_SRC)
1557 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
1558 if (cmd->data & RXH_IP_DST)
1559 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
1560 break;
1561 case TCP_V6_FLOW:
1562 case UDP_V6_FLOW:
1563 case SCTP_V6_FLOW:
1564 if (cmd->data & RXH_IP_SRC)
1565 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
1566 if (cmd->data & RXH_IP_DST)
1567 hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
1568 break;
1569 default:
1570 break;
1571 }
1572 }
1573
1574 if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
1575 switch (cmd->flow_type) {
1576 case TCP_V4_FLOW:
1577 case TCP_V6_FLOW:
1578 if (cmd->data & RXH_L4_B_0_1)
1579 hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
1580 if (cmd->data & RXH_L4_B_2_3)
1581 hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
1582 break;
1583 case UDP_V4_FLOW:
1584 case UDP_V6_FLOW:
1585 if (cmd->data & RXH_L4_B_0_1)
1586 hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
1587 if (cmd->data & RXH_L4_B_2_3)
1588 hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
1589 break;
1590 case SCTP_V4_FLOW:
1591 case SCTP_V6_FLOW:
1592 if (cmd->data & RXH_L4_B_0_1)
1593 hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
1594 if (cmd->data & RXH_L4_B_2_3)
1595 hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
1596 break;
1597 default:
1598 break;
1599 }
1600 }
1601
1602 return hfld;
1603}
1604
1605/**
1606 * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
1607 * @adapter: pointer to the VF adapter structure
1608 * @cmd: ethtool rxnfc command
1609 *
1610 * Returns Success if the flow input set is supported.
1611 */
1612static int
1613iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
1614 struct ethtool_rxnfc *cmd)
1615{
1616 struct iavf_adv_rss *rss_old, *rss_new;
1617 bool rss_new_add = false;
1618 int count = 50, err = 0;
1619 u64 hash_flds;
1620 u32 hdrs;
1621
1622 if (!ADV_RSS_SUPPORT(adapter))
1623 return -EOPNOTSUPP;
1624
1625 hdrs = iavf_adv_rss_parse_hdrs(cmd);
1626 if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1627 return -EINVAL;
1628
1629 hash_flds = iavf_adv_rss_parse_hash_flds(cmd);
1630 if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1631 return -EINVAL;
1632
1633 rss_new = kzalloc(size: sizeof(*rss_new), GFP_KERNEL);
1634 if (!rss_new)
1635 return -ENOMEM;
1636
1637 if (iavf_fill_adv_rss_cfg_msg(rss_cfg: &rss_new->cfg_msg, packet_hdrs: hdrs, hash_flds)) {
1638 kfree(objp: rss_new);
1639 return -EINVAL;
1640 }
1641
1642 while (!mutex_trylock(lock: &adapter->crit_lock)) {
1643 if (--count == 0) {
1644 kfree(objp: rss_new);
1645 return -EINVAL;
1646 }
1647
1648 udelay(1);
1649 }
1650
1651 spin_lock_bh(lock: &adapter->adv_rss_lock);
1652 rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, packet_hdrs: hdrs);
1653 if (rss_old) {
1654 if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
1655 err = -EBUSY;
1656 } else if (rss_old->hash_flds != hash_flds) {
1657 rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
1658 rss_old->hash_flds = hash_flds;
1659 memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
1660 sizeof(rss_new->cfg_msg));
1661 adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1662 } else {
1663 err = -EEXIST;
1664 }
1665 } else {
1666 rss_new_add = true;
1667 rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
1668 rss_new->packet_hdrs = hdrs;
1669 rss_new->hash_flds = hash_flds;
1670 list_add_tail(new: &rss_new->list, head: &adapter->adv_rss_list_head);
1671 adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
1672 }
1673 spin_unlock_bh(lock: &adapter->adv_rss_lock);
1674
1675 if (!err)
1676 mod_delayed_work(wq: adapter->wq, dwork: &adapter->watchdog_task, delay: 0);
1677
1678 mutex_unlock(lock: &adapter->crit_lock);
1679
1680 if (!rss_new_add)
1681 kfree(objp: rss_new);
1682
1683 return err;
1684}
1685
1686/**
1687 * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
1688 * @adapter: pointer to the VF adapter structure
1689 * @cmd: ethtool rxnfc command
1690 *
1691 * Returns Success if the flow input set is supported.
1692 */
1693static int
1694iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
1695 struct ethtool_rxnfc *cmd)
1696{
1697 struct iavf_adv_rss *rss;
1698 u64 hash_flds;
1699 u32 hdrs;
1700
1701 if (!ADV_RSS_SUPPORT(adapter))
1702 return -EOPNOTSUPP;
1703
1704 cmd->data = 0;
1705
1706 hdrs = iavf_adv_rss_parse_hdrs(cmd);
1707 if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
1708 return -EINVAL;
1709
1710 spin_lock_bh(lock: &adapter->adv_rss_lock);
1711 rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, packet_hdrs: hdrs);
1712 if (rss)
1713 hash_flds = rss->hash_flds;
1714 else
1715 hash_flds = IAVF_ADV_RSS_HASH_INVALID;
1716 spin_unlock_bh(lock: &adapter->adv_rss_lock);
1717
1718 if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
1719 return -EINVAL;
1720
1721 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
1722 IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
1723 cmd->data |= (u64)RXH_IP_SRC;
1724
1725 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
1726 IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
1727 cmd->data |= (u64)RXH_IP_DST;
1728
1729 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
1730 IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
1731 IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
1732 cmd->data |= (u64)RXH_L4_B_0_1;
1733
1734 if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
1735 IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
1736 IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
1737 cmd->data |= (u64)RXH_L4_B_2_3;
1738
1739 return 0;
1740}
1741
1742/**
1743 * iavf_set_rxnfc - command to set Rx flow rules.
1744 * @netdev: network interface device structure
1745 * @cmd: ethtool rxnfc command
1746 *
1747 * Returns 0 for success and negative values for errors
1748 */
1749static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1750{
1751 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
1752 int ret = -EOPNOTSUPP;
1753
1754 switch (cmd->cmd) {
1755 case ETHTOOL_SRXCLSRLINS:
1756 ret = iavf_add_fdir_ethtool(adapter, cmd);
1757 break;
1758 case ETHTOOL_SRXCLSRLDEL:
1759 ret = iavf_del_fdir_ethtool(adapter, cmd);
1760 break;
1761 case ETHTOOL_SRXFH:
1762 ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
1763 break;
1764 default:
1765 break;
1766 }
1767
1768 return ret;
1769}
1770
1771/**
1772 * iavf_get_rxnfc - command to get RX flow classification rules
1773 * @netdev: network interface device structure
1774 * @cmd: ethtool rxnfc command
1775 * @rule_locs: pointer to store rule locations
1776 *
1777 * Returns Success if the command is supported.
1778 **/
1779static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1780 u32 *rule_locs)
1781{
1782 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
1783 int ret = -EOPNOTSUPP;
1784
1785 switch (cmd->cmd) {
1786 case ETHTOOL_GRXRINGS:
1787 cmd->data = adapter->num_active_queues;
1788 ret = 0;
1789 break;
1790 case ETHTOOL_GRXCLSRLCNT:
1791 if (!FDIR_FLTR_SUPPORT(adapter))
1792 break;
1793 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
1794 cmd->rule_cnt = adapter->fdir_active_fltr;
1795 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
1796 cmd->data = IAVF_MAX_FDIR_FILTERS;
1797 ret = 0;
1798 break;
1799 case ETHTOOL_GRXCLSRULE:
1800 ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
1801 break;
1802 case ETHTOOL_GRXCLSRLALL:
1803 ret = iavf_get_fdir_fltr_ids(adapter, cmd, rule_locs: (u32 *)rule_locs);
1804 break;
1805 case ETHTOOL_GRXFH:
1806 ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
1807 break;
1808 default:
1809 break;
1810 }
1811
1812 return ret;
1813}
1814/**
1815 * iavf_get_channels: get the number of channels supported by the device
1816 * @netdev: network interface device structure
1817 * @ch: channel information structure
1818 *
1819 * For the purposes of our device, we only use combined channels, i.e. a tx/rx
1820 * queue pair. Report one extra channel to match our "other" MSI-X vector.
1821 **/
1822static void iavf_get_channels(struct net_device *netdev,
1823 struct ethtool_channels *ch)
1824{
1825 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
1826
1827 /* Report maximum channels */
1828 ch->max_combined = adapter->vsi_res->num_queue_pairs;
1829
1830 ch->max_other = NONQ_VECS;
1831 ch->other_count = NONQ_VECS;
1832
1833 ch->combined_count = adapter->num_active_queues;
1834}
1835
1836/**
1837 * iavf_set_channels: set the new channel count
1838 * @netdev: network interface device structure
1839 * @ch: channel information structure
1840 *
1841 * Negotiate a new number of channels with the PF then do a reset. During
1842 * reset we'll realloc queues and fix the RSS table. Returns 0 on success,
1843 * negative on failure.
1844 **/
1845static int iavf_set_channels(struct net_device *netdev,
1846 struct ethtool_channels *ch)
1847{
1848 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
1849 u32 num_req = ch->combined_count;
1850 int ret = 0;
1851
1852 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1853 adapter->num_tc) {
1854 dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
1855 return -EINVAL;
1856 }
1857
1858 /* All of these should have already been checked by ethtool before this
1859 * even gets to us, but just to be sure.
1860 */
1861 if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
1862 return -EINVAL;
1863
1864 if (num_req == adapter->num_active_queues)
1865 return 0;
1866
1867 if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
1868 return -EINVAL;
1869
1870 adapter->num_req_queues = num_req;
1871 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
1872 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
1873
1874 ret = iavf_wait_for_reset(adapter);
1875 if (ret)
1876 netdev_warn(dev: netdev, format: "Changing channel count timeout or interrupted waiting for reset");
1877
1878 return ret;
1879}
1880
1881/**
1882 * iavf_get_rxfh_key_size - get the RSS hash key size
1883 * @netdev: network interface device structure
1884 *
1885 * Returns the table size.
1886 **/
1887static u32 iavf_get_rxfh_key_size(struct net_device *netdev)
1888{
1889 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
1890
1891 return adapter->rss_key_size;
1892}
1893
1894/**
1895 * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size
1896 * @netdev: network interface device structure
1897 *
1898 * Returns the table size.
1899 **/
1900static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
1901{
1902 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
1903
1904 return adapter->rss_lut_size;
1905}
1906
1907/**
1908 * iavf_get_rxfh - get the rx flow hash indirection table
1909 * @netdev: network interface device structure
1910 * @indir: indirection table
1911 * @key: hash key
1912 * @hfunc: hash function in use
1913 *
1914 * Reads the indirection table directly from the hardware. Always returns 0.
1915 **/
1916static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
1917 u8 *hfunc)
1918{
1919 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
1920 u16 i;
1921
1922 if (hfunc)
1923 *hfunc = ETH_RSS_HASH_TOP;
1924 if (key)
1925 memcpy(key, adapter->rss_key, adapter->rss_key_size);
1926
1927 if (indir)
1928 /* Each 32 bits pointed by 'indir' is stored with a lut entry */
1929 for (i = 0; i < adapter->rss_lut_size; i++)
1930 indir[i] = (u32)adapter->rss_lut[i];
1931
1932 return 0;
1933}
1934
1935/**
1936 * iavf_set_rxfh - set the rx flow hash indirection table
1937 * @netdev: network interface device structure
1938 * @indir: indirection table
1939 * @key: hash key
1940 * @hfunc: hash function to use
1941 *
1942 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
1943 * returns 0 after programming the table.
1944 **/
1945static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
1946 const u8 *key, const u8 hfunc)
1947{
1948 struct iavf_adapter *adapter = netdev_priv(dev: netdev);
1949 u16 i;
1950
1951 /* Only support toeplitz hash function */
1952 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1953 return -EOPNOTSUPP;
1954
1955 if (!key && !indir)
1956 return 0;
1957
1958 if (key)
1959 memcpy(adapter->rss_key, key, adapter->rss_key_size);
1960
1961 if (indir) {
1962 /* Each 32 bits pointed by 'indir' is stored with a lut entry */
1963 for (i = 0; i < adapter->rss_lut_size; i++)
1964 adapter->rss_lut[i] = (u8)(indir[i]);
1965 }
1966
1967 return iavf_config_rss(adapter);
1968}
1969
1970static const struct ethtool_ops iavf_ethtool_ops = {
1971 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1972 ETHTOOL_COALESCE_USE_ADAPTIVE,
1973 .get_drvinfo = iavf_get_drvinfo,
1974 .get_link = ethtool_op_get_link,
1975 .get_ringparam = iavf_get_ringparam,
1976 .set_ringparam = iavf_set_ringparam,
1977 .get_strings = iavf_get_strings,
1978 .get_ethtool_stats = iavf_get_ethtool_stats,
1979 .get_sset_count = iavf_get_sset_count,
1980 .get_priv_flags = iavf_get_priv_flags,
1981 .set_priv_flags = iavf_set_priv_flags,
1982 .get_msglevel = iavf_get_msglevel,
1983 .set_msglevel = iavf_set_msglevel,
1984 .get_coalesce = iavf_get_coalesce,
1985 .set_coalesce = iavf_set_coalesce,
1986 .get_per_queue_coalesce = iavf_get_per_queue_coalesce,
1987 .set_per_queue_coalesce = iavf_set_per_queue_coalesce,
1988 .set_rxnfc = iavf_set_rxnfc,
1989 .get_rxnfc = iavf_get_rxnfc,
1990 .get_rxfh_indir_size = iavf_get_rxfh_indir_size,
1991 .get_rxfh = iavf_get_rxfh,
1992 .set_rxfh = iavf_set_rxfh,
1993 .get_channels = iavf_get_channels,
1994 .set_channels = iavf_set_channels,
1995 .get_rxfh_key_size = iavf_get_rxfh_key_size,
1996 .get_link_ksettings = iavf_get_link_ksettings,
1997};
1998
1999/**
2000 * iavf_set_ethtool_ops - Initialize ethtool ops struct
2001 * @netdev: network interface device structure
2002 *
2003 * Sets ethtool ops struct in our netdev so that ethtool can call
2004 * our functions.
2005 **/
2006void iavf_set_ethtool_ops(struct net_device *netdev)
2007{
2008 netdev->ethtool_ops = &iavf_ethtool_ops;
2009}
2010

source code of linux/drivers/net/ethernet/intel/iavf/iavf_ethtool.c