1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2018-2023, Intel Corporation. */
3
4/* flow director ethtool support for ice */
5
6#include "ice.h"
7#include "ice_lib.h"
8#include "ice_fdir.h"
9#include "ice_flow.h"
10
11static struct in6_addr full_ipv6_addr_mask = {
12 .in6_u = {
13 .u6_addr8 = {
14 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
15 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
16 }
17 }
18};
19
20static struct in6_addr zero_ipv6_addr_mask = {
21 .in6_u = {
22 .u6_addr8 = {
23 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
24 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
25 }
26 }
27};
28
29/* calls to ice_flow_add_prof require the number of segments in the array
30 * for segs_cnt. In this code that is one more than the index.
31 */
32#define TNL_SEG_CNT(_TNL_) ((_TNL_) + 1)
33
34/**
35 * ice_fltr_to_ethtool_flow - convert filter type values to ethtool
36 * flow type values
37 * @flow: filter type to be converted
38 *
39 * Returns the corresponding ethtool flow type.
40 */
41static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
42{
43 switch (flow) {
44 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
45 return TCP_V4_FLOW;
46 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
47 return UDP_V4_FLOW;
48 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
49 return SCTP_V4_FLOW;
50 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
51 return IPV4_USER_FLOW;
52 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
53 return TCP_V6_FLOW;
54 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
55 return UDP_V6_FLOW;
56 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
57 return SCTP_V6_FLOW;
58 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
59 return IPV6_USER_FLOW;
60 default:
61 /* 0 is undefined ethtool flow */
62 return 0;
63 }
64}
65
66/**
67 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
68 * @eth: Ethtool flow type to be converted
69 *
70 * Returns flow enum
71 */
72static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
73{
74 switch (eth) {
75 case TCP_V4_FLOW:
76 return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
77 case UDP_V4_FLOW:
78 return ICE_FLTR_PTYPE_NONF_IPV4_UDP;
79 case SCTP_V4_FLOW:
80 return ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
81 case IPV4_USER_FLOW:
82 return ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
83 case TCP_V6_FLOW:
84 return ICE_FLTR_PTYPE_NONF_IPV6_TCP;
85 case UDP_V6_FLOW:
86 return ICE_FLTR_PTYPE_NONF_IPV6_UDP;
87 case SCTP_V6_FLOW:
88 return ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
89 case IPV6_USER_FLOW:
90 return ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
91 default:
92 return ICE_FLTR_PTYPE_NONF_NONE;
93 }
94}
95
96/**
97 * ice_is_mask_valid - check mask field set
98 * @mask: full mask to check
99 * @field: field for which mask should be valid
100 *
101 * If the mask is fully set return true. If it is not valid for field return
102 * false.
103 */
104static bool ice_is_mask_valid(u64 mask, u64 field)
105{
106 return (mask & field) == field;
107}
108
109/**
110 * ice_get_ethtool_fdir_entry - fill ethtool structure with fdir filter data
111 * @hw: hardware structure that contains filter list
112 * @cmd: ethtool command data structure to receive the filter data
113 *
114 * Returns 0 on success and -EINVAL on failure
115 */
116int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
117{
118 struct ethtool_rx_flow_spec *fsp;
119 struct ice_fdir_fltr *rule;
120 int ret = 0;
121 u16 idx;
122
123 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
124
125 mutex_lock(&hw->fdir_fltr_lock);
126
127 rule = ice_fdir_find_fltr_by_idx(hw, fltr_idx: fsp->location);
128
129 if (!rule || fsp->location != rule->fltr_id) {
130 ret = -EINVAL;
131 goto release_lock;
132 }
133
134 fsp->flow_type = ice_fltr_to_ethtool_flow(flow: rule->flow_type);
135
136 memset(&fsp->m_u, 0, sizeof(fsp->m_u));
137 memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
138
139 switch (fsp->flow_type) {
140 case IPV4_USER_FLOW:
141 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
142 fsp->h_u.usr_ip4_spec.proto = 0;
143 fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip.v4.l4_header;
144 fsp->h_u.usr_ip4_spec.tos = rule->ip.v4.tos;
145 fsp->h_u.usr_ip4_spec.ip4src = rule->ip.v4.src_ip;
146 fsp->h_u.usr_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
147 fsp->m_u.usr_ip4_spec.ip4src = rule->mask.v4.src_ip;
148 fsp->m_u.usr_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
149 fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
150 fsp->m_u.usr_ip4_spec.proto = 0;
151 fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->mask.v4.l4_header;
152 fsp->m_u.usr_ip4_spec.tos = rule->mask.v4.tos;
153 break;
154 case TCP_V4_FLOW:
155 case UDP_V4_FLOW:
156 case SCTP_V4_FLOW:
157 fsp->h_u.tcp_ip4_spec.psrc = rule->ip.v4.src_port;
158 fsp->h_u.tcp_ip4_spec.pdst = rule->ip.v4.dst_port;
159 fsp->h_u.tcp_ip4_spec.ip4src = rule->ip.v4.src_ip;
160 fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip.v4.dst_ip;
161 fsp->m_u.tcp_ip4_spec.psrc = rule->mask.v4.src_port;
162 fsp->m_u.tcp_ip4_spec.pdst = rule->mask.v4.dst_port;
163 fsp->m_u.tcp_ip4_spec.ip4src = rule->mask.v4.src_ip;
164 fsp->m_u.tcp_ip4_spec.ip4dst = rule->mask.v4.dst_ip;
165 break;
166 case IPV6_USER_FLOW:
167 fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip.v6.l4_header;
168 fsp->h_u.usr_ip6_spec.tclass = rule->ip.v6.tc;
169 fsp->h_u.usr_ip6_spec.l4_proto = rule->ip.v6.proto;
170 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
171 sizeof(struct in6_addr));
172 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
173 sizeof(struct in6_addr));
174 memcpy(fsp->m_u.tcp_ip6_spec.ip6src, rule->mask.v6.src_ip,
175 sizeof(struct in6_addr));
176 memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, rule->mask.v6.dst_ip,
177 sizeof(struct in6_addr));
178 fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->mask.v6.l4_header;
179 fsp->m_u.usr_ip6_spec.tclass = rule->mask.v6.tc;
180 fsp->m_u.usr_ip6_spec.l4_proto = rule->mask.v6.proto;
181 break;
182 case TCP_V6_FLOW:
183 case UDP_V6_FLOW:
184 case SCTP_V6_FLOW:
185 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->ip.v6.src_ip,
186 sizeof(struct in6_addr));
187 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->ip.v6.dst_ip,
188 sizeof(struct in6_addr));
189 fsp->h_u.tcp_ip6_spec.psrc = rule->ip.v6.src_port;
190 fsp->h_u.tcp_ip6_spec.pdst = rule->ip.v6.dst_port;
191 memcpy(fsp->m_u.tcp_ip6_spec.ip6src,
192 rule->mask.v6.src_ip,
193 sizeof(struct in6_addr));
194 memcpy(fsp->m_u.tcp_ip6_spec.ip6dst,
195 rule->mask.v6.dst_ip,
196 sizeof(struct in6_addr));
197 fsp->m_u.tcp_ip6_spec.psrc = rule->mask.v6.src_port;
198 fsp->m_u.tcp_ip6_spec.pdst = rule->mask.v6.dst_port;
199 fsp->h_u.tcp_ip6_spec.tclass = rule->ip.v6.tc;
200 fsp->m_u.tcp_ip6_spec.tclass = rule->mask.v6.tc;
201 break;
202 default:
203 break;
204 }
205
206 if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
207 fsp->ring_cookie = RX_CLS_FLOW_DISC;
208 else
209 fsp->ring_cookie = rule->orig_q_index;
210
211 idx = ice_ethtool_flow_to_fltr(eth: fsp->flow_type);
212 if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
213 dev_err(ice_hw_to_dev(hw), "Missing input index for flow_type %d\n",
214 rule->flow_type);
215 ret = -EINVAL;
216 }
217
218release_lock:
219 mutex_unlock(lock: &hw->fdir_fltr_lock);
220 return ret;
221}
222
223/**
224 * ice_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
225 * @hw: hardware structure containing the filter list
226 * @cmd: ethtool command data structure
227 * @rule_locs: ethtool array passed in from OS to receive filter IDs
228 *
229 * Returns 0 as expected for success by ethtool
230 */
231int
232ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
233 u32 *rule_locs)
234{
235 struct ice_fdir_fltr *f_rule;
236 unsigned int cnt = 0;
237 int val = 0;
238
239 /* report total rule count */
240 cmd->data = ice_get_fdir_cnt_all(hw);
241
242 mutex_lock(&hw->fdir_fltr_lock);
243
244 list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
245 if (cnt == cmd->rule_cnt) {
246 val = -EMSGSIZE;
247 goto release_lock;
248 }
249 rule_locs[cnt] = f_rule->fltr_id;
250 cnt++;
251 }
252
253release_lock:
254 mutex_unlock(lock: &hw->fdir_fltr_lock);
255 if (!val)
256 cmd->rule_cnt = cnt;
257 return val;
258}
259
260/**
261 * ice_fdir_remap_entries - update the FDir entries in profile
262 * @prof: FDir structure pointer
263 * @tun: tunneled or non-tunneled packet
264 * @idx: FDir entry index
265 */
266static void
267ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
268{
269 if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) {
270 int i;
271
272 for (i = idx; i < (prof->cnt - 1); i++) {
273 u64 old_entry_h;
274
275 old_entry_h = prof->entry_h[i + 1][tun];
276 prof->entry_h[i][tun] = old_entry_h;
277 prof->vsi_h[i] = prof->vsi_h[i + 1];
278 }
279
280 prof->entry_h[i][tun] = 0;
281 prof->vsi_h[i] = 0;
282 }
283}
284
285/**
286 * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
287 * @hw: hardware structure containing filter list
288 * @vsi_idx: VSI handle
289 */
290void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
291{
292 int status, flow;
293
294 if (!hw->fdir_prof)
295 return;
296
297 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
298 struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
299 int tun, i;
300
301 if (!prof || !prof->cnt)
302 continue;
303
304 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
305 u64 prof_id;
306
307 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
308
309 for (i = 0; i < prof->cnt; i++) {
310 if (prof->vsi_h[i] != vsi_idx)
311 continue;
312
313 prof->entry_h[i][tun] = 0;
314 prof->vsi_h[i] = 0;
315 break;
316 }
317
318 /* after clearing FDir entries update the remaining */
319 ice_fdir_remap_entries(prof, tun, idx: i);
320
321 /* find flow profile corresponding to prof_id and clear
322 * vsi_idx from bitmap.
323 */
324 status = ice_flow_rem_vsi_prof(hw, vsi_handle: vsi_idx, prof_id);
325 if (status) {
326 dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
327 status);
328 }
329 }
330 prof->cnt--;
331 }
332}
333
334/**
335 * ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
336 * @hw: hardware structure containing the filter list
337 * @blk: hardware block
338 * @flow: FDir flow type to release
339 */
340static struct ice_fd_hw_prof *
341ice_fdir_get_hw_prof(struct ice_hw *hw, enum ice_block blk, int flow)
342{
343 if (blk == ICE_BLK_FD && hw->fdir_prof)
344 return hw->fdir_prof[flow];
345
346 return NULL;
347}
348
349/**
350 * ice_fdir_erase_flow_from_hw - remove a flow from the HW profile tables
351 * @hw: hardware structure containing the filter list
352 * @blk: hardware block
353 * @flow: FDir flow type to release
354 */
355static void
356ice_fdir_erase_flow_from_hw(struct ice_hw *hw, enum ice_block blk, int flow)
357{
358 struct ice_fd_hw_prof *prof = ice_fdir_get_hw_prof(hw, blk, flow);
359 int tun;
360
361 if (!prof)
362 return;
363
364 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
365 u64 prof_id;
366 int j;
367
368 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
369 for (j = 0; j < prof->cnt; j++) {
370 u16 vsi_num;
371
372 if (!prof->entry_h[j][tun] || !prof->vsi_h[j])
373 continue;
374 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle: prof->vsi_h[j]);
375 ice_rem_prof_id_flow(hw, blk, vsi: vsi_num, hdl: prof_id);
376 ice_flow_rem_entry(hw, blk, entry_h: prof->entry_h[j][tun]);
377 prof->entry_h[j][tun] = 0;
378 }
379 ice_flow_rem_prof(hw, blk, prof_id);
380 }
381}
382
383/**
384 * ice_fdir_rem_flow - release the ice_flow structures for a filter type
385 * @hw: hardware structure containing the filter list
386 * @blk: hardware block
387 * @flow_type: FDir flow type to release
388 */
389static void
390ice_fdir_rem_flow(struct ice_hw *hw, enum ice_block blk,
391 enum ice_fltr_ptype flow_type)
392{
393 int flow = (int)flow_type & ~FLOW_EXT;
394 struct ice_fd_hw_prof *prof;
395 int tun, i;
396
397 prof = ice_fdir_get_hw_prof(hw, blk, flow);
398 if (!prof)
399 return;
400
401 ice_fdir_erase_flow_from_hw(hw, blk, flow);
402 for (i = 0; i < prof->cnt; i++)
403 prof->vsi_h[i] = 0;
404 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
405 if (!prof->fdir_seg[tun])
406 continue;
407 devm_kfree(dev: ice_hw_to_dev(hw), p: prof->fdir_seg[tun]);
408 prof->fdir_seg[tun] = NULL;
409 }
410 prof->cnt = 0;
411}
412
413/**
414 * ice_fdir_release_flows - release all flows in use for later replay
415 * @hw: pointer to HW instance
416 */
417void ice_fdir_release_flows(struct ice_hw *hw)
418{
419 int flow;
420
421 /* release Flow Director HW table entries */
422 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++)
423 ice_fdir_erase_flow_from_hw(hw, blk: ICE_BLK_FD, flow);
424}
425
426/**
427 * ice_fdir_replay_flows - replay HW Flow Director filter info
428 * @hw: pointer to HW instance
429 */
430void ice_fdir_replay_flows(struct ice_hw *hw)
431{
432 int flow;
433
434 for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
435 int tun;
436
437 if (!hw->fdir_prof[flow] || !hw->fdir_prof[flow]->cnt)
438 continue;
439 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
440 struct ice_flow_prof *hw_prof;
441 struct ice_fd_hw_prof *prof;
442 u64 prof_id;
443 int j;
444
445 prof = hw->fdir_prof[flow];
446 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
447 ice_flow_add_prof(hw, blk: ICE_BLK_FD, dir: ICE_FLOW_RX, prof_id,
448 segs: prof->fdir_seg[tun], TNL_SEG_CNT(tun),
449 prof: &hw_prof);
450 for (j = 0; j < prof->cnt; j++) {
451 enum ice_flow_priority prio;
452 u64 entry_h = 0;
453 int err;
454
455 prio = ICE_FLOW_PRIO_NORMAL;
456 err = ice_flow_add_entry(hw, blk: ICE_BLK_FD,
457 prof_id,
458 entry_id: prof->vsi_h[0],
459 vsi: prof->vsi_h[j],
460 prio, data: prof->fdir_seg,
461 entry_h: &entry_h);
462 if (err) {
463 dev_err(ice_hw_to_dev(hw), "Could not replay Flow Director, flow type %d\n",
464 flow);
465 continue;
466 }
467 prof->entry_h[j][tun] = entry_h;
468 }
469 }
470 }
471}
472
473/**
474 * ice_parse_rx_flow_user_data - deconstruct user-defined data
475 * @fsp: pointer to ethtool Rx flow specification
476 * @data: pointer to userdef data structure for storage
477 *
478 * Returns 0 on success, negative error value on failure
479 */
480static int
481ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
482 struct ice_rx_flow_userdef *data)
483{
484 u64 value, mask;
485
486 memset(data, 0, sizeof(*data));
487 if (!(fsp->flow_type & FLOW_EXT))
488 return 0;
489
490 value = be64_to_cpu(*((__force __be64 *)fsp->h_ext.data));
491 mask = be64_to_cpu(*((__force __be64 *)fsp->m_ext.data));
492 if (!mask)
493 return 0;
494
495#define ICE_USERDEF_FLEX_WORD_M GENMASK_ULL(15, 0)
496#define ICE_USERDEF_FLEX_OFFS_S 16
497#define ICE_USERDEF_FLEX_OFFS_M GENMASK_ULL(31, ICE_USERDEF_FLEX_OFFS_S)
498#define ICE_USERDEF_FLEX_FLTR_M GENMASK_ULL(31, 0)
499
500 /* 0x1fe is the maximum value for offsets stored in the internal
501 * filtering tables.
502 */
503#define ICE_USERDEF_FLEX_MAX_OFFS_VAL 0x1fe
504
505 if (!ice_is_mask_valid(mask, ICE_USERDEF_FLEX_FLTR_M) ||
506 value > ICE_USERDEF_FLEX_FLTR_M)
507 return -EINVAL;
508
509 data->flex_word = value & ICE_USERDEF_FLEX_WORD_M;
510 data->flex_offset = (value & ICE_USERDEF_FLEX_OFFS_M) >>
511 ICE_USERDEF_FLEX_OFFS_S;
512 if (data->flex_offset > ICE_USERDEF_FLEX_MAX_OFFS_VAL)
513 return -EINVAL;
514
515 data->flex_fltr = true;
516
517 return 0;
518}
519
520/**
521 * ice_fdir_num_avail_fltr - return the number of unused flow director filters
522 * @hw: pointer to hardware structure
523 * @vsi: software VSI structure
524 *
525 * There are 2 filter pools: guaranteed and best effort(shared). Each VSI can
526 * use filters from either pool. The guaranteed pool is divided between VSIs.
527 * The best effort filter pool is common to all VSIs and is a device shared
528 * resource pool. The number of filters available to this VSI is the sum of
529 * the VSIs guaranteed filter pool and the global available best effort
530 * filter pool.
531 *
532 * Returns the number of available flow director filters to this VSI
533 */
534static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
535{
536 u16 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle: vsi->idx);
537 u16 num_guar;
538 u16 num_be;
539
540 /* total guaranteed filters assigned to this VSI */
541 num_guar = vsi->num_gfltr;
542
543 /* total global best effort filters */
544 num_be = hw->func_caps.fd_fltr_best_effort;
545
546 /* Subtract the number of programmed filters from the global values */
547 switch (hw->mac_type) {
548 case ICE_MAC_E830:
549 num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M,
550 rd32(hw, VSIQF_FD_CNT(vsi_num)));
551 num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M,
552 rd32(hw, GLQF_FD_CNT));
553 break;
554 case ICE_MAC_E810:
555 default:
556 num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M,
557 rd32(hw, VSIQF_FD_CNT(vsi_num)));
558 num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M,
559 rd32(hw, GLQF_FD_CNT));
560 }
561
562 return num_guar + num_be;
563}
564
565/**
566 * ice_fdir_alloc_flow_prof - allocate FDir flow profile structure(s)
567 * @hw: HW structure containing the FDir flow profile structure(s)
568 * @flow: flow type to allocate the flow profile for
569 *
570 * Allocate the fdir_prof and fdir_prof[flow] if not already created. Return 0
571 * on success and negative on error.
572 */
573static int
574ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
575{
576 if (!hw)
577 return -EINVAL;
578
579 if (!hw->fdir_prof) {
580 hw->fdir_prof = devm_kcalloc(dev: ice_hw_to_dev(hw),
581 n: ICE_FLTR_PTYPE_MAX,
582 size: sizeof(*hw->fdir_prof),
583 GFP_KERNEL);
584 if (!hw->fdir_prof)
585 return -ENOMEM;
586 }
587
588 if (!hw->fdir_prof[flow]) {
589 hw->fdir_prof[flow] = devm_kzalloc(dev: ice_hw_to_dev(hw),
590 size: sizeof(**hw->fdir_prof),
591 GFP_KERNEL);
592 if (!hw->fdir_prof[flow])
593 return -ENOMEM;
594 }
595
596 return 0;
597}
598
599/**
600 * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure
601 * @prof: pointer to flow director HW profile
602 * @vsi_idx: vsi_idx to locate
603 *
604 * return the index of the vsi_idx. if vsi_idx is not found insert it
605 * into the vsi_h table.
606 */
607static u16
608ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx)
609{
610 u16 idx = 0;
611
612 for (idx = 0; idx < prof->cnt; idx++)
613 if (prof->vsi_h[idx] == vsi_idx)
614 return idx;
615
616 if (idx == prof->cnt)
617 prof->vsi_h[prof->cnt++] = vsi_idx;
618 return idx;
619}
620
621/**
622 * ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
623 * @pf: pointer to the PF structure
624 * @seg: protocol header description pointer
625 * @flow: filter enum
626 * @tun: FDir segment to program
627 */
628static int
629ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
630 enum ice_fltr_ptype flow, enum ice_fd_hw_seg tun)
631{
632 struct device *dev = ice_pf_to_dev(pf);
633 struct ice_vsi *main_vsi, *ctrl_vsi;
634 struct ice_flow_seg_info *old_seg;
635 struct ice_flow_prof *prof = NULL;
636 struct ice_fd_hw_prof *hw_prof;
637 struct ice_hw *hw = &pf->hw;
638 u64 entry1_h = 0;
639 u64 entry2_h = 0;
640 bool del_last;
641 u64 prof_id;
642 int err;
643 int idx;
644
645 main_vsi = ice_get_main_vsi(pf);
646 if (!main_vsi)
647 return -EINVAL;
648
649 ctrl_vsi = ice_get_ctrl_vsi(pf);
650 if (!ctrl_vsi)
651 return -EINVAL;
652
653 err = ice_fdir_alloc_flow_prof(hw, flow);
654 if (err)
655 return err;
656
657 hw_prof = hw->fdir_prof[flow];
658 old_seg = hw_prof->fdir_seg[tun];
659 if (old_seg) {
660 /* This flow_type already has a changed input set.
661 * If it matches the requested input set then we are
662 * done. Or, if it's different then it's an error.
663 */
664 if (!memcmp(p: old_seg, q: seg, size: sizeof(*seg)))
665 return -EEXIST;
666
667 /* if there are FDir filters using this flow,
668 * then return error.
669 */
670 if (hw->fdir_fltr_cnt[flow]) {
671 dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
672 return -EINVAL;
673 }
674
675 if (ice_is_arfs_using_perfect_flow(hw, flow_type: flow)) {
676 dev_err(dev, "aRFS using perfect flow type %d, cannot change input set\n",
677 flow);
678 return -EINVAL;
679 }
680
681 /* remove HW filter definition */
682 ice_fdir_rem_flow(hw, blk: ICE_BLK_FD, flow_type: flow);
683 }
684
685 /* Adding a profile, but there is only one header supported.
686 * That is the final parameters are 1 header (segment), no
687 * actions (NULL) and zero actions 0.
688 */
689 prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
690 err = ice_flow_add_prof(hw, blk: ICE_BLK_FD, dir: ICE_FLOW_RX, prof_id, segs: seg,
691 TNL_SEG_CNT(tun), prof: &prof);
692 if (err)
693 return err;
694 err = ice_flow_add_entry(hw, blk: ICE_BLK_FD, prof_id, entry_id: main_vsi->idx,
695 vsi: main_vsi->idx, prio: ICE_FLOW_PRIO_NORMAL,
696 data: seg, entry_h: &entry1_h);
697 if (err)
698 goto err_prof;
699 err = ice_flow_add_entry(hw, blk: ICE_BLK_FD, prof_id, entry_id: main_vsi->idx,
700 vsi: ctrl_vsi->idx, prio: ICE_FLOW_PRIO_NORMAL,
701 data: seg, entry_h: &entry2_h);
702 if (err)
703 goto err_entry;
704
705 hw_prof->fdir_seg[tun] = seg;
706 hw_prof->entry_h[0][tun] = entry1_h;
707 hw_prof->entry_h[1][tun] = entry2_h;
708 hw_prof->vsi_h[0] = main_vsi->idx;
709 hw_prof->vsi_h[1] = ctrl_vsi->idx;
710 if (!hw_prof->cnt)
711 hw_prof->cnt = 2;
712
713 for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) {
714 u16 vsi_idx;
715 u16 vsi_h;
716
717 if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx])
718 continue;
719
720 entry1_h = 0;
721 vsi_h = main_vsi->tc_map_vsi[idx]->idx;
722 err = ice_flow_add_entry(hw, blk: ICE_BLK_FD, prof_id,
723 entry_id: main_vsi->idx, vsi: vsi_h,
724 prio: ICE_FLOW_PRIO_NORMAL, data: seg,
725 entry_h: &entry1_h);
726 if (err) {
727 dev_err(dev, "Could not add Channel VSI %d to flow group\n",
728 idx);
729 goto err_unroll;
730 }
731
732 vsi_idx = ice_fdir_prof_vsi_idx(prof: hw_prof,
733 vsi_idx: main_vsi->tc_map_vsi[idx]->idx);
734 hw_prof->entry_h[vsi_idx][tun] = entry1_h;
735 }
736
737 return 0;
738
739err_unroll:
740 entry1_h = 0;
741 hw_prof->fdir_seg[tun] = NULL;
742
743 /* The variable del_last will be used to determine when to clean up
744 * the VSI group data. The VSI data is not needed if there are no
745 * segments.
746 */
747 del_last = true;
748 for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++)
749 if (hw_prof->fdir_seg[idx]) {
750 del_last = false;
751 break;
752 }
753
754 for (idx = 0; idx < hw_prof->cnt; idx++) {
755 u16 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle: hw_prof->vsi_h[idx]);
756
757 if (!hw_prof->entry_h[idx][tun])
758 continue;
759 ice_rem_prof_id_flow(hw, blk: ICE_BLK_FD, vsi: vsi_num, hdl: prof_id);
760 ice_flow_rem_entry(hw, blk: ICE_BLK_FD, entry_h: hw_prof->entry_h[idx][tun]);
761 hw_prof->entry_h[idx][tun] = 0;
762 if (del_last)
763 hw_prof->vsi_h[idx] = 0;
764 }
765 if (del_last)
766 hw_prof->cnt = 0;
767err_entry:
768 ice_rem_prof_id_flow(hw, blk: ICE_BLK_FD,
769 vsi: ice_get_hw_vsi_num(hw, vsi_handle: main_vsi->idx), hdl: prof_id);
770 ice_flow_rem_entry(hw, blk: ICE_BLK_FD, entry_h: entry1_h);
771err_prof:
772 ice_flow_rem_prof(hw, blk: ICE_BLK_FD, prof_id);
773 dev_err(dev, "Failed to add filter. Flow director filters on each port must have the same input set.\n");
774
775 return err;
776}
777
778/**
779 * ice_set_init_fdir_seg
780 * @seg: flow segment for programming
781 * @l3_proto: ICE_FLOW_SEG_HDR_IPV4 or ICE_FLOW_SEG_HDR_IPV6
782 * @l4_proto: ICE_FLOW_SEG_HDR_TCP or ICE_FLOW_SEG_HDR_UDP
783 *
784 * Set the configuration for perfect filters to the provided flow segment for
785 * programming the HW filter. This is to be called only when initializing
786 * filters as this function it assumes no filters exist.
787 */
788static int
789ice_set_init_fdir_seg(struct ice_flow_seg_info *seg,
790 enum ice_flow_seg_hdr l3_proto,
791 enum ice_flow_seg_hdr l4_proto)
792{
793 enum ice_flow_field src_addr, dst_addr, src_port, dst_port;
794
795 if (!seg)
796 return -EINVAL;
797
798 if (l3_proto == ICE_FLOW_SEG_HDR_IPV4) {
799 src_addr = ICE_FLOW_FIELD_IDX_IPV4_SA;
800 dst_addr = ICE_FLOW_FIELD_IDX_IPV4_DA;
801 } else if (l3_proto == ICE_FLOW_SEG_HDR_IPV6) {
802 src_addr = ICE_FLOW_FIELD_IDX_IPV6_SA;
803 dst_addr = ICE_FLOW_FIELD_IDX_IPV6_DA;
804 } else {
805 return -EINVAL;
806 }
807
808 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
809 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
810 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
811 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
812 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
813 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
814 } else {
815 return -EINVAL;
816 }
817
818 ICE_FLOW_SET_HDRS(seg, l3_proto | l4_proto);
819
820 /* IP source address */
821 ice_flow_set_fld(seg, fld: src_addr, ICE_FLOW_FLD_OFF_INVAL,
822 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, range: false);
823
824 /* IP destination address */
825 ice_flow_set_fld(seg, fld: dst_addr, ICE_FLOW_FLD_OFF_INVAL,
826 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, range: false);
827
828 /* Layer 4 source port */
829 ice_flow_set_fld(seg, fld: src_port, ICE_FLOW_FLD_OFF_INVAL,
830 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, range: false);
831
832 /* Layer 4 destination port */
833 ice_flow_set_fld(seg, fld: dst_port, ICE_FLOW_FLD_OFF_INVAL,
834 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, range: false);
835
836 return 0;
837}
838
839/**
840 * ice_create_init_fdir_rule
841 * @pf: PF structure
842 * @flow: filter enum
843 *
844 * Return error value or 0 on success.
845 */
846static int
847ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
848{
849 struct ice_flow_seg_info *seg, *tun_seg;
850 struct device *dev = ice_pf_to_dev(pf);
851 struct ice_hw *hw = &pf->hw;
852 int ret;
853
854 /* if there is already a filter rule for kind return -EINVAL */
855 if (hw->fdir_prof && hw->fdir_prof[flow] &&
856 hw->fdir_prof[flow]->fdir_seg[0])
857 return -EINVAL;
858
859 seg = devm_kzalloc(dev, size: sizeof(*seg), GFP_KERNEL);
860 if (!seg)
861 return -ENOMEM;
862
863 tun_seg = devm_kcalloc(dev, n: ICE_FD_HW_SEG_MAX, size: sizeof(*tun_seg),
864 GFP_KERNEL);
865 if (!tun_seg) {
866 devm_kfree(dev, p: seg);
867 return -ENOMEM;
868 }
869
870 if (flow == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
871 ret = ice_set_init_fdir_seg(seg, l3_proto: ICE_FLOW_SEG_HDR_IPV4,
872 l4_proto: ICE_FLOW_SEG_HDR_TCP);
873 else if (flow == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
874 ret = ice_set_init_fdir_seg(seg, l3_proto: ICE_FLOW_SEG_HDR_IPV4,
875 l4_proto: ICE_FLOW_SEG_HDR_UDP);
876 else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_TCP)
877 ret = ice_set_init_fdir_seg(seg, l3_proto: ICE_FLOW_SEG_HDR_IPV6,
878 l4_proto: ICE_FLOW_SEG_HDR_TCP);
879 else if (flow == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
880 ret = ice_set_init_fdir_seg(seg, l3_proto: ICE_FLOW_SEG_HDR_IPV6,
881 l4_proto: ICE_FLOW_SEG_HDR_UDP);
882 else
883 ret = -EINVAL;
884 if (ret)
885 goto err_exit;
886
887 /* add filter for outer headers */
888 ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow, tun: ICE_FD_HW_SEG_NON_TUN);
889 if (ret)
890 /* could not write filter, free memory */
891 goto err_exit;
892
893 /* make tunneled filter HW entries if possible */
894 memcpy(&tun_seg[1], seg, sizeof(*seg));
895 ret = ice_fdir_set_hw_fltr_rule(pf, seg: tun_seg, flow, tun: ICE_FD_HW_SEG_TUN);
896 if (ret)
897 /* could not write tunnel filter, but outer header filter
898 * exists
899 */
900 devm_kfree(dev, p: tun_seg);
901
902 set_bit(nr: flow, addr: hw->fdir_perfect_fltr);
903 return ret;
904err_exit:
905 devm_kfree(dev, p: tun_seg);
906 devm_kfree(dev, p: seg);
907
908 return -EOPNOTSUPP;
909}
910
911/**
912 * ice_set_fdir_ip4_seg
913 * @seg: flow segment for programming
914 * @tcp_ip4_spec: mask data from ethtool
915 * @l4_proto: Layer 4 protocol to program
916 * @perfect_fltr: only valid on success; returns true if perfect filter,
917 * false if not
918 *
919 * Set the mask data into the flow segment to be used to program HW
920 * table based on provided L4 protocol for IPv4
921 */
922static int
923ice_set_fdir_ip4_seg(struct ice_flow_seg_info *seg,
924 struct ethtool_tcpip4_spec *tcp_ip4_spec,
925 enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
926{
927 enum ice_flow_field src_port, dst_port;
928
929 /* make sure we don't have any empty rule */
930 if (!tcp_ip4_spec->psrc && !tcp_ip4_spec->ip4src &&
931 !tcp_ip4_spec->pdst && !tcp_ip4_spec->ip4dst)
932 return -EINVAL;
933
934 /* filtering on TOS not supported */
935 if (tcp_ip4_spec->tos)
936 return -EOPNOTSUPP;
937
938 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
939 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
940 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
941 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
942 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
943 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
944 } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
945 src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
946 dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
947 } else {
948 return -EOPNOTSUPP;
949 }
950
951 *perfect_fltr = true;
952 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 | l4_proto);
953
954 /* IP source address */
955 if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF))
956 ice_flow_set_fld(seg, fld: ICE_FLOW_FIELD_IDX_IPV4_SA,
957 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
958 ICE_FLOW_FLD_OFF_INVAL, range: false);
959 else if (!tcp_ip4_spec->ip4src)
960 *perfect_fltr = false;
961 else
962 return -EOPNOTSUPP;
963
964 /* IP destination address */
965 if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
966 ice_flow_set_fld(seg, fld: ICE_FLOW_FIELD_IDX_IPV4_DA,
967 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
968 ICE_FLOW_FLD_OFF_INVAL, range: false);
969 else if (!tcp_ip4_spec->ip4dst)
970 *perfect_fltr = false;
971 else
972 return -EOPNOTSUPP;
973
974 /* Layer 4 source port */
975 if (tcp_ip4_spec->psrc == htons(0xFFFF))
976 ice_flow_set_fld(seg, fld: src_port, ICE_FLOW_FLD_OFF_INVAL,
977 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
978 range: false);
979 else if (!tcp_ip4_spec->psrc)
980 *perfect_fltr = false;
981 else
982 return -EOPNOTSUPP;
983
984 /* Layer 4 destination port */
985 if (tcp_ip4_spec->pdst == htons(0xFFFF))
986 ice_flow_set_fld(seg, fld: dst_port, ICE_FLOW_FLD_OFF_INVAL,
987 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
988 range: false);
989 else if (!tcp_ip4_spec->pdst)
990 *perfect_fltr = false;
991 else
992 return -EOPNOTSUPP;
993
994 return 0;
995}
996
997/**
998 * ice_set_fdir_ip4_usr_seg
999 * @seg: flow segment for programming
1000 * @usr_ip4_spec: ethtool userdef packet offset
1001 * @perfect_fltr: only valid on success; returns true if perfect filter,
1002 * false if not
1003 *
1004 * Set the offset data into the flow segment to be used to program HW
1005 * table for IPv4
1006 */
1007static int
1008ice_set_fdir_ip4_usr_seg(struct ice_flow_seg_info *seg,
1009 struct ethtool_usrip4_spec *usr_ip4_spec,
1010 bool *perfect_fltr)
1011{
1012 /* first 4 bytes of Layer 4 header */
1013 if (usr_ip4_spec->l4_4_bytes)
1014 return -EINVAL;
1015 if (usr_ip4_spec->tos)
1016 return -EINVAL;
1017 if (usr_ip4_spec->ip_ver)
1018 return -EINVAL;
1019 /* Filtering on Layer 4 protocol not supported */
1020 if (usr_ip4_spec->proto)
1021 return -EOPNOTSUPP;
1022 /* empty rules are not valid */
1023 if (!usr_ip4_spec->ip4src && !usr_ip4_spec->ip4dst)
1024 return -EINVAL;
1025
1026 *perfect_fltr = true;
1027 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
1028
1029 /* IP source address */
1030 if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF))
1031 ice_flow_set_fld(seg, fld: ICE_FLOW_FIELD_IDX_IPV4_SA,
1032 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1033 ICE_FLOW_FLD_OFF_INVAL, range: false);
1034 else if (!usr_ip4_spec->ip4src)
1035 *perfect_fltr = false;
1036 else
1037 return -EOPNOTSUPP;
1038
1039 /* IP destination address */
1040 if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF))
1041 ice_flow_set_fld(seg, fld: ICE_FLOW_FIELD_IDX_IPV4_DA,
1042 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1043 ICE_FLOW_FLD_OFF_INVAL, range: false);
1044 else if (!usr_ip4_spec->ip4dst)
1045 *perfect_fltr = false;
1046 else
1047 return -EOPNOTSUPP;
1048
1049 return 0;
1050}
1051
1052/**
1053 * ice_set_fdir_ip6_seg
1054 * @seg: flow segment for programming
1055 * @tcp_ip6_spec: mask data from ethtool
1056 * @l4_proto: Layer 4 protocol to program
1057 * @perfect_fltr: only valid on success; returns true if perfect filter,
1058 * false if not
1059 *
1060 * Set the mask data into the flow segment to be used to program HW
1061 * table based on provided L4 protocol for IPv6
1062 */
1063static int
1064ice_set_fdir_ip6_seg(struct ice_flow_seg_info *seg,
1065 struct ethtool_tcpip6_spec *tcp_ip6_spec,
1066 enum ice_flow_seg_hdr l4_proto, bool *perfect_fltr)
1067{
1068 enum ice_flow_field src_port, dst_port;
1069
1070 /* make sure we don't have any empty rule */
1071 if (!memcmp(p: tcp_ip6_spec->ip6src, q: &zero_ipv6_addr_mask,
1072 size: sizeof(struct in6_addr)) &&
1073 !memcmp(p: tcp_ip6_spec->ip6dst, q: &zero_ipv6_addr_mask,
1074 size: sizeof(struct in6_addr)) &&
1075 !tcp_ip6_spec->psrc && !tcp_ip6_spec->pdst)
1076 return -EINVAL;
1077
1078 /* filtering on TC not supported */
1079 if (tcp_ip6_spec->tclass)
1080 return -EOPNOTSUPP;
1081
1082 if (l4_proto == ICE_FLOW_SEG_HDR_TCP) {
1083 src_port = ICE_FLOW_FIELD_IDX_TCP_SRC_PORT;
1084 dst_port = ICE_FLOW_FIELD_IDX_TCP_DST_PORT;
1085 } else if (l4_proto == ICE_FLOW_SEG_HDR_UDP) {
1086 src_port = ICE_FLOW_FIELD_IDX_UDP_SRC_PORT;
1087 dst_port = ICE_FLOW_FIELD_IDX_UDP_DST_PORT;
1088 } else if (l4_proto == ICE_FLOW_SEG_HDR_SCTP) {
1089 src_port = ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT;
1090 dst_port = ICE_FLOW_FIELD_IDX_SCTP_DST_PORT;
1091 } else {
1092 return -EINVAL;
1093 }
1094
1095 *perfect_fltr = true;
1096 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 | l4_proto);
1097
1098 if (!memcmp(p: tcp_ip6_spec->ip6src, q: &full_ipv6_addr_mask,
1099 size: sizeof(struct in6_addr)))
1100 ice_flow_set_fld(seg, fld: ICE_FLOW_FIELD_IDX_IPV6_SA,
1101 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1102 ICE_FLOW_FLD_OFF_INVAL, range: false);
1103 else if (!memcmp(p: tcp_ip6_spec->ip6src, q: &zero_ipv6_addr_mask,
1104 size: sizeof(struct in6_addr)))
1105 *perfect_fltr = false;
1106 else
1107 return -EOPNOTSUPP;
1108
1109 if (!memcmp(p: tcp_ip6_spec->ip6dst, q: &full_ipv6_addr_mask,
1110 size: sizeof(struct in6_addr)))
1111 ice_flow_set_fld(seg, fld: ICE_FLOW_FIELD_IDX_IPV6_DA,
1112 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1113 ICE_FLOW_FLD_OFF_INVAL, range: false);
1114 else if (!memcmp(p: tcp_ip6_spec->ip6dst, q: &zero_ipv6_addr_mask,
1115 size: sizeof(struct in6_addr)))
1116 *perfect_fltr = false;
1117 else
1118 return -EOPNOTSUPP;
1119
1120 /* Layer 4 source port */
1121 if (tcp_ip6_spec->psrc == htons(0xFFFF))
1122 ice_flow_set_fld(seg, fld: src_port, ICE_FLOW_FLD_OFF_INVAL,
1123 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1124 range: false);
1125 else if (!tcp_ip6_spec->psrc)
1126 *perfect_fltr = false;
1127 else
1128 return -EOPNOTSUPP;
1129
1130 /* Layer 4 destination port */
1131 if (tcp_ip6_spec->pdst == htons(0xFFFF))
1132 ice_flow_set_fld(seg, fld: dst_port, ICE_FLOW_FLD_OFF_INVAL,
1133 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1134 range: false);
1135 else if (!tcp_ip6_spec->pdst)
1136 *perfect_fltr = false;
1137 else
1138 return -EOPNOTSUPP;
1139
1140 return 0;
1141}
1142
1143/**
1144 * ice_set_fdir_ip6_usr_seg
1145 * @seg: flow segment for programming
1146 * @usr_ip6_spec: ethtool userdef packet offset
1147 * @perfect_fltr: only valid on success; returns true if perfect filter,
1148 * false if not
1149 *
1150 * Set the offset data into the flow segment to be used to program HW
1151 * table for IPv6
1152 */
1153static int
1154ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
1155 struct ethtool_usrip6_spec *usr_ip6_spec,
1156 bool *perfect_fltr)
1157{
1158 /* filtering on Layer 4 bytes not supported */
1159 if (usr_ip6_spec->l4_4_bytes)
1160 return -EOPNOTSUPP;
1161 /* filtering on TC not supported */
1162 if (usr_ip6_spec->tclass)
1163 return -EOPNOTSUPP;
1164 /* filtering on Layer 4 protocol not supported */
1165 if (usr_ip6_spec->l4_proto)
1166 return -EOPNOTSUPP;
1167 /* empty rules are not valid */
1168 if (!memcmp(p: usr_ip6_spec->ip6src, q: &zero_ipv6_addr_mask,
1169 size: sizeof(struct in6_addr)) &&
1170 !memcmp(p: usr_ip6_spec->ip6dst, q: &zero_ipv6_addr_mask,
1171 size: sizeof(struct in6_addr)))
1172 return -EINVAL;
1173
1174 *perfect_fltr = true;
1175 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
1176
1177 if (!memcmp(p: usr_ip6_spec->ip6src, q: &full_ipv6_addr_mask,
1178 size: sizeof(struct in6_addr)))
1179 ice_flow_set_fld(seg, fld: ICE_FLOW_FIELD_IDX_IPV6_SA,
1180 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1181 ICE_FLOW_FLD_OFF_INVAL, range: false);
1182 else if (!memcmp(p: usr_ip6_spec->ip6src, q: &zero_ipv6_addr_mask,
1183 size: sizeof(struct in6_addr)))
1184 *perfect_fltr = false;
1185 else
1186 return -EOPNOTSUPP;
1187
1188 if (!memcmp(p: usr_ip6_spec->ip6dst, q: &full_ipv6_addr_mask,
1189 size: sizeof(struct in6_addr)))
1190 ice_flow_set_fld(seg, fld: ICE_FLOW_FIELD_IDX_IPV6_DA,
1191 ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
1192 ICE_FLOW_FLD_OFF_INVAL, range: false);
1193 else if (!memcmp(p: usr_ip6_spec->ip6dst, q: &zero_ipv6_addr_mask,
1194 size: sizeof(struct in6_addr)))
1195 *perfect_fltr = false;
1196 else
1197 return -EOPNOTSUPP;
1198
1199 return 0;
1200}
1201
1202/**
1203 * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
1204 * @pf: PF structure
1205 * @fsp: pointer to ethtool Rx flow specification
1206 * @user: user defined data from flow specification
1207 *
1208 * Returns 0 on success.
1209 */
1210static int
1211ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
1212 struct ice_rx_flow_userdef *user)
1213{
1214 struct ice_flow_seg_info *seg, *tun_seg;
1215 struct device *dev = ice_pf_to_dev(pf);
1216 enum ice_fltr_ptype fltr_idx;
1217 struct ice_hw *hw = &pf->hw;
1218 bool perfect_filter;
1219 int ret;
1220
1221 seg = devm_kzalloc(dev, size: sizeof(*seg), GFP_KERNEL);
1222 if (!seg)
1223 return -ENOMEM;
1224
1225 tun_seg = devm_kcalloc(dev, n: ICE_FD_HW_SEG_MAX, size: sizeof(*tun_seg),
1226 GFP_KERNEL);
1227 if (!tun_seg) {
1228 devm_kfree(dev, p: seg);
1229 return -ENOMEM;
1230 }
1231
1232 switch (fsp->flow_type & ~FLOW_EXT) {
1233 case TCP_V4_FLOW:
1234 ret = ice_set_fdir_ip4_seg(seg, tcp_ip4_spec: &fsp->m_u.tcp_ip4_spec,
1235 l4_proto: ICE_FLOW_SEG_HDR_TCP,
1236 perfect_fltr: &perfect_filter);
1237 break;
1238 case UDP_V4_FLOW:
1239 ret = ice_set_fdir_ip4_seg(seg, tcp_ip4_spec: &fsp->m_u.tcp_ip4_spec,
1240 l4_proto: ICE_FLOW_SEG_HDR_UDP,
1241 perfect_fltr: &perfect_filter);
1242 break;
1243 case SCTP_V4_FLOW:
1244 ret = ice_set_fdir_ip4_seg(seg, tcp_ip4_spec: &fsp->m_u.tcp_ip4_spec,
1245 l4_proto: ICE_FLOW_SEG_HDR_SCTP,
1246 perfect_fltr: &perfect_filter);
1247 break;
1248 case IPV4_USER_FLOW:
1249 ret = ice_set_fdir_ip4_usr_seg(seg, usr_ip4_spec: &fsp->m_u.usr_ip4_spec,
1250 perfect_fltr: &perfect_filter);
1251 break;
1252 case TCP_V6_FLOW:
1253 ret = ice_set_fdir_ip6_seg(seg, tcp_ip6_spec: &fsp->m_u.tcp_ip6_spec,
1254 l4_proto: ICE_FLOW_SEG_HDR_TCP,
1255 perfect_fltr: &perfect_filter);
1256 break;
1257 case UDP_V6_FLOW:
1258 ret = ice_set_fdir_ip6_seg(seg, tcp_ip6_spec: &fsp->m_u.tcp_ip6_spec,
1259 l4_proto: ICE_FLOW_SEG_HDR_UDP,
1260 perfect_fltr: &perfect_filter);
1261 break;
1262 case SCTP_V6_FLOW:
1263 ret = ice_set_fdir_ip6_seg(seg, tcp_ip6_spec: &fsp->m_u.tcp_ip6_spec,
1264 l4_proto: ICE_FLOW_SEG_HDR_SCTP,
1265 perfect_fltr: &perfect_filter);
1266 break;
1267 case IPV6_USER_FLOW:
1268 ret = ice_set_fdir_ip6_usr_seg(seg, usr_ip6_spec: &fsp->m_u.usr_ip6_spec,
1269 perfect_fltr: &perfect_filter);
1270 break;
1271 default:
1272 ret = -EINVAL;
1273 }
1274 if (ret)
1275 goto err_exit;
1276
1277 /* tunnel segments are shifted up one. */
1278 memcpy(&tun_seg[1], seg, sizeof(*seg));
1279
1280 if (user && user->flex_fltr) {
1281 perfect_filter = false;
1282 ice_flow_add_fld_raw(seg, off: user->flex_offset,
1283 ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1284 ICE_FLOW_FLD_OFF_INVAL,
1285 ICE_FLOW_FLD_OFF_INVAL);
1286 ice_flow_add_fld_raw(seg: &tun_seg[1], off: user->flex_offset,
1287 ICE_FLTR_PRGM_FLEX_WORD_SIZE,
1288 ICE_FLOW_FLD_OFF_INVAL,
1289 ICE_FLOW_FLD_OFF_INVAL);
1290 }
1291
1292 fltr_idx = ice_ethtool_flow_to_fltr(eth: fsp->flow_type & ~FLOW_EXT);
1293
1294 assign_bit(nr: fltr_idx, addr: hw->fdir_perfect_fltr, value: perfect_filter);
1295
1296 /* add filter for outer headers */
1297 ret = ice_fdir_set_hw_fltr_rule(pf, seg, flow: fltr_idx,
1298 tun: ICE_FD_HW_SEG_NON_TUN);
1299 if (ret == -EEXIST) {
1300 /* Rule already exists, free memory and count as success */
1301 ret = 0;
1302 goto err_exit;
1303 } else if (ret) {
1304 /* could not write filter, free memory */
1305 goto err_exit;
1306 }
1307
1308 /* make tunneled filter HW entries if possible */
1309 memcpy(&tun_seg[1], seg, sizeof(*seg));
1310 ret = ice_fdir_set_hw_fltr_rule(pf, seg: tun_seg, flow: fltr_idx,
1311 tun: ICE_FD_HW_SEG_TUN);
1312 if (ret == -EEXIST) {
1313 /* Rule already exists, free memory and count as success */
1314 devm_kfree(dev, p: tun_seg);
1315 ret = 0;
1316 } else if (ret) {
1317 /* could not write tunnel filter, but outer filter exists */
1318 devm_kfree(dev, p: tun_seg);
1319 }
1320
1321 return ret;
1322
1323err_exit:
1324 devm_kfree(dev, p: tun_seg);
1325 devm_kfree(dev, p: seg);
1326
1327 return ret;
1328}
1329
1330/**
1331 * ice_update_per_q_fltr
1332 * @vsi: ptr to VSI
1333 * @q_index: queue index
1334 * @inc: true to increment or false to decrement per queue filter count
1335 *
1336 * This function is used to keep track of per queue sideband filters
1337 */
1338static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc)
1339{
1340 struct ice_rx_ring *rx_ring;
1341
1342 if (!vsi->num_rxq || q_index >= vsi->num_rxq)
1343 return;
1344
1345 rx_ring = vsi->rx_rings[q_index];
1346 if (!rx_ring || !rx_ring->ch)
1347 return;
1348
1349 if (inc)
1350 atomic_inc(v: &rx_ring->ch->num_sb_fltr);
1351 else
1352 atomic_dec_if_positive(v: &rx_ring->ch->num_sb_fltr);
1353}
1354
1355/**
1356 * ice_fdir_write_fltr - send a flow director filter to the hardware
1357 * @pf: PF data structure
1358 * @input: filter structure
1359 * @add: true adds filter and false removed filter
1360 * @is_tun: true adds inner filter on tunnel and false outer headers
1361 *
1362 * returns 0 on success and negative value on error
1363 */
1364int
1365ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
1366 bool is_tun)
1367{
1368 struct device *dev = ice_pf_to_dev(pf);
1369 struct ice_hw *hw = &pf->hw;
1370 struct ice_fltr_desc desc;
1371 struct ice_vsi *ctrl_vsi;
1372 u8 *pkt, *frag_pkt;
1373 bool has_frag;
1374 int err;
1375
1376 ctrl_vsi = ice_get_ctrl_vsi(pf);
1377 if (!ctrl_vsi)
1378 return -EINVAL;
1379
1380 pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1381 if (!pkt)
1382 return -ENOMEM;
1383 frag_pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
1384 if (!frag_pkt) {
1385 err = -ENOMEM;
1386 goto err_free;
1387 }
1388
1389 ice_fdir_get_prgm_desc(hw, input, fdesc: &desc, add);
1390 err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, frag: false, tun: is_tun);
1391 if (err)
1392 goto err_free_all;
1393 err = ice_prgm_fdir_fltr(vsi: ctrl_vsi, fdir_desc: &desc, raw_packet: pkt);
1394 if (err)
1395 goto err_free_all;
1396
1397 /* repeat for fragment packet */
1398 has_frag = ice_fdir_has_frag(flow: input->flow_type);
1399 if (has_frag) {
1400 /* does not return error */
1401 ice_fdir_get_prgm_desc(hw, input, fdesc: &desc, add);
1402 err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt: frag_pkt, frag: true,
1403 tun: is_tun);
1404 if (err)
1405 goto err_frag;
1406 err = ice_prgm_fdir_fltr(vsi: ctrl_vsi, fdir_desc: &desc, raw_packet: frag_pkt);
1407 if (err)
1408 goto err_frag;
1409 } else {
1410 devm_kfree(dev, p: frag_pkt);
1411 }
1412
1413 return 0;
1414
1415err_free_all:
1416 devm_kfree(dev, p: frag_pkt);
1417err_free:
1418 devm_kfree(dev, p: pkt);
1419 return err;
1420
1421err_frag:
1422 devm_kfree(dev, p: frag_pkt);
1423 return err;
1424}
1425
1426/**
1427 * ice_fdir_write_all_fltr - send a flow director filter to the hardware
1428 * @pf: PF data structure
1429 * @input: filter structure
1430 * @add: true adds filter and false removed filter
1431 *
1432 * returns 0 on success and negative value on error
1433 */
1434static int
1435ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
1436 bool add)
1437{
1438 u16 port_num;
1439 int tun;
1440
1441 for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
1442 bool is_tun = tun == ICE_FD_HW_SEG_TUN;
1443 int err;
1444
1445 if (is_tun && !ice_get_open_tunnel_port(hw: &pf->hw, port: &port_num, type: TNL_ALL))
1446 continue;
1447 err = ice_fdir_write_fltr(pf, input, add, is_tun);
1448 if (err)
1449 return err;
1450 }
1451 return 0;
1452}
1453
1454/**
1455 * ice_fdir_replay_fltrs - replay filters from the HW filter list
1456 * @pf: board private structure
1457 */
1458void ice_fdir_replay_fltrs(struct ice_pf *pf)
1459{
1460 struct ice_fdir_fltr *f_rule;
1461 struct ice_hw *hw = &pf->hw;
1462
1463 list_for_each_entry(f_rule, &hw->fdir_list_head, fltr_node) {
1464 int err = ice_fdir_write_all_fltr(pf, input: f_rule, add: true);
1465
1466 if (err)
1467 dev_dbg(ice_pf_to_dev(pf), "Flow Director error %d, could not reprogram filter %d\n",
1468 err, f_rule->fltr_id);
1469 }
1470}
1471
1472/**
1473 * ice_fdir_create_dflt_rules - create default perfect filters
1474 * @pf: PF data structure
1475 *
1476 * Returns 0 for success or error.
1477 */
1478int ice_fdir_create_dflt_rules(struct ice_pf *pf)
1479{
1480 int err;
1481
1482 /* Create perfect TCP and UDP rules in hardware. */
1483 err = ice_create_init_fdir_rule(pf, flow: ICE_FLTR_PTYPE_NONF_IPV4_TCP);
1484 if (err)
1485 return err;
1486
1487 err = ice_create_init_fdir_rule(pf, flow: ICE_FLTR_PTYPE_NONF_IPV4_UDP);
1488 if (err)
1489 return err;
1490
1491 err = ice_create_init_fdir_rule(pf, flow: ICE_FLTR_PTYPE_NONF_IPV6_TCP);
1492 if (err)
1493 return err;
1494
1495 err = ice_create_init_fdir_rule(pf, flow: ICE_FLTR_PTYPE_NONF_IPV6_UDP);
1496
1497 return err;
1498}
1499
1500/**
1501 * ice_fdir_del_all_fltrs - Delete all flow director filters
1502 * @vsi: the VSI being changed
1503 *
1504 * This function needs to be called while holding hw->fdir_fltr_lock
1505 */
1506void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
1507{
1508 struct ice_fdir_fltr *f_rule, *tmp;
1509 struct ice_pf *pf = vsi->back;
1510 struct ice_hw *hw = &pf->hw;
1511
1512 list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
1513 ice_fdir_write_all_fltr(pf, input: f_rule, add: false);
1514 ice_fdir_update_cntrs(hw, flow: f_rule->flow_type, add: false);
1515 list_del(entry: &f_rule->fltr_node);
1516 devm_kfree(ice_pf_to_dev(pf), p: f_rule);
1517 }
1518}
1519
1520/**
1521 * ice_vsi_manage_fdir - turn on/off flow director
1522 * @vsi: the VSI being changed
1523 * @ena: boolean value indicating if this is an enable or disable request
1524 */
1525void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
1526{
1527 struct ice_pf *pf = vsi->back;
1528 struct ice_hw *hw = &pf->hw;
1529 enum ice_fltr_ptype flow;
1530
1531 if (ena) {
1532 set_bit(nr: ICE_FLAG_FD_ENA, addr: pf->flags);
1533 ice_fdir_create_dflt_rules(pf);
1534 return;
1535 }
1536
1537 mutex_lock(&hw->fdir_fltr_lock);
1538 if (!test_and_clear_bit(nr: ICE_FLAG_FD_ENA, addr: pf->flags))
1539 goto release_lock;
1540
1541 ice_fdir_del_all_fltrs(vsi);
1542
1543 if (hw->fdir_prof)
1544 for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
1545 flow++)
1546 if (hw->fdir_prof[flow])
1547 ice_fdir_rem_flow(hw, blk: ICE_BLK_FD, flow_type: flow);
1548
1549release_lock:
1550 mutex_unlock(lock: &hw->fdir_fltr_lock);
1551}
1552
1553/**
1554 * ice_fdir_do_rem_flow - delete flow and possibly add perfect flow
1555 * @pf: PF structure
1556 * @flow_type: FDir flow type to release
1557 */
1558static void
1559ice_fdir_do_rem_flow(struct ice_pf *pf, enum ice_fltr_ptype flow_type)
1560{
1561 struct ice_hw *hw = &pf->hw;
1562 bool need_perfect = false;
1563
1564 if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
1565 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
1566 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
1567 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1568 need_perfect = true;
1569
1570 if (need_perfect && test_bit(flow_type, hw->fdir_perfect_fltr))
1571 return;
1572
1573 ice_fdir_rem_flow(hw, blk: ICE_BLK_FD, flow_type);
1574 if (need_perfect)
1575 ice_create_init_fdir_rule(pf, flow: flow_type);
1576}
1577
1578/**
1579 * ice_fdir_update_list_entry - add or delete a filter from the filter list
1580 * @pf: PF structure
1581 * @input: filter structure
1582 * @fltr_idx: ethtool index of filter to modify
1583 *
1584 * returns 0 on success and negative on errors
1585 */
1586static int
1587ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
1588 int fltr_idx)
1589{
1590 struct ice_fdir_fltr *old_fltr;
1591 struct ice_hw *hw = &pf->hw;
1592 struct ice_vsi *vsi;
1593 int err = -ENOENT;
1594
1595 /* Do not update filters during reset */
1596 if (ice_is_reset_in_progress(state: pf->state))
1597 return -EBUSY;
1598
1599 vsi = ice_get_main_vsi(pf);
1600 if (!vsi)
1601 return -EINVAL;
1602
1603 old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
1604 if (old_fltr) {
1605 err = ice_fdir_write_all_fltr(pf, input: old_fltr, add: false);
1606 if (err)
1607 return err;
1608 ice_fdir_update_cntrs(hw, flow: old_fltr->flow_type, add: false);
1609 /* update sb-filters count, specific to ring->channel */
1610 ice_update_per_q_fltr(vsi, q_index: old_fltr->orig_q_index, inc: false);
1611 if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
1612 /* we just deleted the last filter of flow_type so we
1613 * should also delete the HW filter info.
1614 */
1615 ice_fdir_do_rem_flow(pf, flow_type: old_fltr->flow_type);
1616 list_del(entry: &old_fltr->fltr_node);
1617 devm_kfree(dev: ice_hw_to_dev(hw), p: old_fltr);
1618 }
1619 if (!input)
1620 return err;
1621 ice_fdir_list_add_fltr(hw, input);
1622 /* update sb-filters count, specific to ring->channel */
1623 ice_update_per_q_fltr(vsi, q_index: input->orig_q_index, inc: true);
1624 ice_fdir_update_cntrs(hw, flow: input->flow_type, add: true);
1625 return 0;
1626}
1627
1628/**
1629 * ice_del_fdir_ethtool - delete Flow Director filter
1630 * @vsi: pointer to target VSI
1631 * @cmd: command to add or delete Flow Director filter
1632 *
1633 * Returns 0 on success and negative values for failure
1634 */
1635int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1636{
1637 struct ethtool_rx_flow_spec *fsp =
1638 (struct ethtool_rx_flow_spec *)&cmd->fs;
1639 struct ice_pf *pf = vsi->back;
1640 struct ice_hw *hw = &pf->hw;
1641 int val;
1642
1643 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1644 return -EOPNOTSUPP;
1645
1646 /* Do not delete filters during reset */
1647 if (ice_is_reset_in_progress(state: pf->state)) {
1648 dev_err(ice_pf_to_dev(pf), "Device is resetting - deleting Flow Director filters not supported during reset\n");
1649 return -EBUSY;
1650 }
1651
1652 if (test_bit(ICE_FD_FLUSH_REQ, pf->state))
1653 return -EBUSY;
1654
1655 mutex_lock(&hw->fdir_fltr_lock);
1656 val = ice_fdir_update_list_entry(pf, NULL, fltr_idx: fsp->location);
1657 mutex_unlock(lock: &hw->fdir_fltr_lock);
1658
1659 return val;
1660}
1661
1662/**
1663 * ice_update_ring_dest_vsi - update dest ring and dest VSI
1664 * @vsi: pointer to target VSI
1665 * @dest_vsi: ptr to dest VSI index
1666 * @ring: ptr to dest ring
1667 *
1668 * This function updates destination VSI and queue if user specifies
1669 * target queue which falls in channel's (aka ADQ) queue region
1670 */
1671static void
1672ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
1673{
1674 struct ice_channel *ch;
1675
1676 list_for_each_entry(ch, &vsi->ch_list, list) {
1677 if (!ch->ch_vsi)
1678 continue;
1679
1680 /* make sure to locate corresponding channel based on "queue"
1681 * specified
1682 */
1683 if ((*ring < ch->base_q) ||
1684 (*ring >= (ch->base_q + ch->num_rxq)))
1685 continue;
1686
1687 /* update the dest_vsi based on channel */
1688 *dest_vsi = ch->ch_vsi->idx;
1689
1690 /* update the "ring" to be correct based on channel */
1691 *ring -= ch->base_q;
1692 }
1693}
1694
1695/**
1696 * ice_set_fdir_input_set - Set the input set for Flow Director
1697 * @vsi: pointer to target VSI
1698 * @fsp: pointer to ethtool Rx flow specification
1699 * @input: filter structure
1700 */
1701static int
1702ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
1703 struct ice_fdir_fltr *input)
1704{
1705 u16 dest_vsi, q_index = 0;
1706 u16 orig_q_index = 0;
1707 struct ice_pf *pf;
1708 struct ice_hw *hw;
1709 int flow_type;
1710 u8 dest_ctl;
1711
1712 if (!vsi || !fsp || !input)
1713 return -EINVAL;
1714
1715 pf = vsi->back;
1716 hw = &pf->hw;
1717
1718 dest_vsi = vsi->idx;
1719 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
1720 dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1721 } else {
1722 u32 ring = ethtool_get_flow_spec_ring(ring_cookie: fsp->ring_cookie);
1723 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie: fsp->ring_cookie);
1724
1725 if (vf) {
1726 dev_err(ice_pf_to_dev(pf), "Failed to add filter. Flow director filters are not supported on VF queues.\n");
1727 return -EINVAL;
1728 }
1729
1730 if (ring >= vsi->num_rxq)
1731 return -EINVAL;
1732
1733 orig_q_index = ring;
1734 ice_update_ring_dest_vsi(vsi, dest_vsi: &dest_vsi, ring: &ring);
1735 dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1736 q_index = ring;
1737 }
1738
1739 input->fltr_id = fsp->location;
1740 input->q_index = q_index;
1741 flow_type = fsp->flow_type & ~FLOW_EXT;
1742
1743 /* Record the original queue index as specified by user.
1744 * with channel configuration 'q_index' becomes relative
1745 * to TC (channel).
1746 */
1747 input->orig_q_index = orig_q_index;
1748 input->dest_vsi = dest_vsi;
1749 input->dest_ctl = dest_ctl;
1750 input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
1751 input->cnt_index = ICE_FD_SB_STAT_IDX(hw->fd_ctr_base);
1752 input->flow_type = ice_ethtool_flow_to_fltr(eth: flow_type);
1753
1754 if (fsp->flow_type & FLOW_EXT) {
1755 memcpy(input->ext_data.usr_def, fsp->h_ext.data,
1756 sizeof(input->ext_data.usr_def));
1757 input->ext_data.vlan_type = fsp->h_ext.vlan_etype;
1758 input->ext_data.vlan_tag = fsp->h_ext.vlan_tci;
1759 memcpy(input->ext_mask.usr_def, fsp->m_ext.data,
1760 sizeof(input->ext_mask.usr_def));
1761 input->ext_mask.vlan_type = fsp->m_ext.vlan_etype;
1762 input->ext_mask.vlan_tag = fsp->m_ext.vlan_tci;
1763 }
1764
1765 switch (flow_type) {
1766 case TCP_V4_FLOW:
1767 case UDP_V4_FLOW:
1768 case SCTP_V4_FLOW:
1769 input->ip.v4.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1770 input->ip.v4.src_port = fsp->h_u.tcp_ip4_spec.psrc;
1771 input->ip.v4.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
1772 input->ip.v4.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
1773 input->mask.v4.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
1774 input->mask.v4.src_port = fsp->m_u.tcp_ip4_spec.psrc;
1775 input->mask.v4.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
1776 input->mask.v4.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
1777 break;
1778 case IPV4_USER_FLOW:
1779 input->ip.v4.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
1780 input->ip.v4.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
1781 input->ip.v4.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
1782 input->ip.v4.proto = fsp->h_u.usr_ip4_spec.proto;
1783 input->ip.v4.ip_ver = fsp->h_u.usr_ip4_spec.ip_ver;
1784 input->ip.v4.tos = fsp->h_u.usr_ip4_spec.tos;
1785 input->mask.v4.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
1786 input->mask.v4.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
1787 input->mask.v4.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
1788 input->mask.v4.proto = fsp->m_u.usr_ip4_spec.proto;
1789 input->mask.v4.ip_ver = fsp->m_u.usr_ip4_spec.ip_ver;
1790 input->mask.v4.tos = fsp->m_u.usr_ip4_spec.tos;
1791 break;
1792 case TCP_V6_FLOW:
1793 case UDP_V6_FLOW:
1794 case SCTP_V6_FLOW:
1795 memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1796 sizeof(struct in6_addr));
1797 memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1798 sizeof(struct in6_addr));
1799 input->ip.v6.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
1800 input->ip.v6.src_port = fsp->h_u.tcp_ip6_spec.psrc;
1801 input->ip.v6.tc = fsp->h_u.tcp_ip6_spec.tclass;
1802 memcpy(input->mask.v6.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
1803 sizeof(struct in6_addr));
1804 memcpy(input->mask.v6.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
1805 sizeof(struct in6_addr));
1806 input->mask.v6.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
1807 input->mask.v6.src_port = fsp->m_u.tcp_ip6_spec.psrc;
1808 input->mask.v6.tc = fsp->m_u.tcp_ip6_spec.tclass;
1809 break;
1810 case IPV6_USER_FLOW:
1811 memcpy(input->ip.v6.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
1812 sizeof(struct in6_addr));
1813 memcpy(input->ip.v6.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
1814 sizeof(struct in6_addr));
1815 input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
1816 input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
1817
1818 /* if no protocol requested, use IPPROTO_NONE */
1819 if (!fsp->m_u.usr_ip6_spec.l4_proto)
1820 input->ip.v6.proto = IPPROTO_NONE;
1821 else
1822 input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
1823
1824 memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
1825 sizeof(struct in6_addr));
1826 memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
1827 sizeof(struct in6_addr));
1828 input->mask.v6.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
1829 input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
1830 input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
1831 break;
1832 default:
1833 /* not doing un-parsed flow types */
1834 return -EINVAL;
1835 }
1836
1837 return 0;
1838}
1839
1840/**
1841 * ice_add_fdir_ethtool - Add/Remove Flow Director filter
1842 * @vsi: pointer to target VSI
1843 * @cmd: command to add or delete Flow Director filter
1844 *
1845 * Returns 0 on success and negative values for failure
1846 */
1847int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
1848{
1849 struct ice_rx_flow_userdef userdata;
1850 struct ethtool_rx_flow_spec *fsp;
1851 struct ice_fdir_fltr *input;
1852 struct device *dev;
1853 struct ice_pf *pf;
1854 struct ice_hw *hw;
1855 int fltrs_needed;
1856 u16 tunnel_port;
1857 int ret;
1858
1859 if (!vsi)
1860 return -EINVAL;
1861
1862 pf = vsi->back;
1863 hw = &pf->hw;
1864 dev = ice_pf_to_dev(pf);
1865
1866 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
1867 return -EOPNOTSUPP;
1868
1869 /* Do not program filters during reset */
1870 if (ice_is_reset_in_progress(state: pf->state)) {
1871 dev_err(dev, "Device is resetting - adding Flow Director filters not supported during reset\n");
1872 return -EBUSY;
1873 }
1874
1875 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1876
1877 if (ice_parse_rx_flow_user_data(fsp, data: &userdata))
1878 return -EINVAL;
1879
1880 if (fsp->flow_type & FLOW_MAC_EXT)
1881 return -EINVAL;
1882
1883 ret = ice_cfg_fdir_xtrct_seq(pf, fsp, user: &userdata);
1884 if (ret)
1885 return ret;
1886
1887 if (fsp->location >= ice_get_fdir_cnt_all(hw)) {
1888 dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
1889 return -ENOSPC;
1890 }
1891
1892 /* return error if not an update and no available filters */
1893 fltrs_needed = ice_get_open_tunnel_port(hw, port: &tunnel_port, type: TNL_ALL) ? 2 : 1;
1894 if (!ice_fdir_find_fltr_by_idx(hw, fltr_idx: fsp->location) &&
1895 ice_fdir_num_avail_fltr(hw, vsi: pf->vsi[vsi->idx]) < fltrs_needed) {
1896 dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
1897 return -ENOSPC;
1898 }
1899
1900 input = devm_kzalloc(dev, size: sizeof(*input), GFP_KERNEL);
1901 if (!input)
1902 return -ENOMEM;
1903
1904 ret = ice_set_fdir_input_set(vsi, fsp, input);
1905 if (ret)
1906 goto free_input;
1907
1908 mutex_lock(&hw->fdir_fltr_lock);
1909 if (ice_fdir_is_dup_fltr(hw, input)) {
1910 ret = -EINVAL;
1911 goto release_lock;
1912 }
1913
1914 if (userdata.flex_fltr) {
1915 input->flex_fltr = true;
1916 input->flex_word = cpu_to_be16(userdata.flex_word);
1917 input->flex_offset = userdata.flex_offset;
1918 }
1919
1920 input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1921 input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
1922 input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
1923
1924 /* input struct is added to the HW filter list */
1925 ret = ice_fdir_update_list_entry(pf, input, fltr_idx: fsp->location);
1926 if (ret)
1927 goto release_lock;
1928
1929 ret = ice_fdir_write_all_fltr(pf, input, add: true);
1930 if (ret)
1931 goto remove_sw_rule;
1932
1933 goto release_lock;
1934
1935remove_sw_rule:
1936 ice_fdir_update_cntrs(hw, flow: input->flow_type, add: false);
1937 /* update sb-filters count, specific to ring->channel */
1938 ice_update_per_q_fltr(vsi, q_index: input->orig_q_index, inc: false);
1939 list_del(entry: &input->fltr_node);
1940release_lock:
1941 mutex_unlock(lock: &hw->fdir_fltr_lock);
1942free_input:
1943 if (ret)
1944 devm_kfree(dev, p: input);
1945
1946 return ret;
1947}
1948

source code of linux/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c