1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (C) 2019-2021, Intel Corporation. */ |
3 | |
4 | #include "ice.h" |
5 | #include "ice_tc_lib.h" |
6 | #include "ice_fltr.h" |
7 | #include "ice_lib.h" |
8 | #include "ice_protocol_type.h" |
9 | |
10 | #define ICE_TC_METADATA_LKUP_IDX 0 |
11 | |
12 | /** |
13 | * ice_tc_count_lkups - determine lookup count for switch filter |
14 | * @flags: TC-flower flags |
15 | * @headers: Pointer to TC flower filter header structure |
16 | * @fltr: Pointer to outer TC filter structure |
17 | * |
18 | * Determine lookup count based on TC flower input for switch filter. |
19 | */ |
20 | static int |
21 | ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *, |
22 | struct ice_tc_flower_fltr *fltr) |
23 | { |
24 | int lkups_cnt = 1; /* 0th lookup is metadata */ |
25 | |
26 | /* Always add metadata as the 0th lookup. Included elements: |
27 | * - Direction flag (always present) |
28 | * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified) |
29 | * - Tunnel flag (present if tunnel) |
30 | */ |
31 | |
32 | if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) |
33 | lkups_cnt++; |
34 | |
35 | if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) |
36 | lkups_cnt++; |
37 | |
38 | if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS) |
39 | lkups_cnt++; |
40 | |
41 | if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | |
42 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | |
43 | ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | |
44 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) |
45 | lkups_cnt++; |
46 | |
47 | if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS | |
48 | ICE_TC_FLWR_FIELD_ENC_IP_TTL)) |
49 | lkups_cnt++; |
50 | |
51 | if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) |
52 | lkups_cnt++; |
53 | |
54 | if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) |
55 | lkups_cnt++; |
56 | |
57 | /* are MAC fields specified? */ |
58 | if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC)) |
59 | lkups_cnt++; |
60 | |
61 | /* is VLAN specified? */ |
62 | if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) |
63 | lkups_cnt++; |
64 | |
65 | /* is CVLAN specified? */ |
66 | if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) |
67 | lkups_cnt++; |
68 | |
69 | /* are PPPoE options specified? */ |
70 | if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID | |
71 | ICE_TC_FLWR_FIELD_PPP_PROTO)) |
72 | lkups_cnt++; |
73 | |
74 | /* are IPv[4|6] fields specified? */ |
75 | if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 | |
76 | ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6)) |
77 | lkups_cnt++; |
78 | |
79 | if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL)) |
80 | lkups_cnt++; |
81 | |
82 | /* are L2TPv3 options specified? */ |
83 | if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) |
84 | lkups_cnt++; |
85 | |
86 | /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */ |
87 | if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | |
88 | ICE_TC_FLWR_FIELD_SRC_L4_PORT)) |
89 | lkups_cnt++; |
90 | |
91 | return lkups_cnt; |
92 | } |
93 | |
94 | static enum ice_protocol_type ice_proto_type_from_mac(bool inner) |
95 | { |
96 | return inner ? ICE_MAC_IL : ICE_MAC_OFOS; |
97 | } |
98 | |
99 | static enum ice_protocol_type ice_proto_type_from_etype(bool inner) |
100 | { |
101 | return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL; |
102 | } |
103 | |
104 | static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner) |
105 | { |
106 | return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS; |
107 | } |
108 | |
109 | static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner) |
110 | { |
111 | return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS; |
112 | } |
113 | |
114 | static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto) |
115 | { |
116 | switch (ip_proto) { |
117 | case IPPROTO_TCP: |
118 | return ICE_TCP_IL; |
119 | case IPPROTO_UDP: |
120 | return ICE_UDP_ILOS; |
121 | } |
122 | |
123 | return 0; |
124 | } |
125 | |
126 | static enum ice_protocol_type |
127 | ice_proto_type_from_tunnel(enum ice_tunnel_type type) |
128 | { |
129 | switch (type) { |
130 | case TNL_VXLAN: |
131 | return ICE_VXLAN; |
132 | case TNL_GENEVE: |
133 | return ICE_GENEVE; |
134 | case TNL_GRETAP: |
135 | return ICE_NVGRE; |
136 | case TNL_GTPU: |
137 | /* NO_PAY profiles will not work with GTP-U */ |
138 | return ICE_GTP; |
139 | case TNL_GTPC: |
140 | return ICE_GTP_NO_PAY; |
141 | default: |
142 | return 0; |
143 | } |
144 | } |
145 | |
146 | static enum ice_sw_tunnel_type |
147 | ice_sw_type_from_tunnel(enum ice_tunnel_type type) |
148 | { |
149 | switch (type) { |
150 | case TNL_VXLAN: |
151 | return ICE_SW_TUN_VXLAN; |
152 | case TNL_GENEVE: |
153 | return ICE_SW_TUN_GENEVE; |
154 | case TNL_GRETAP: |
155 | return ICE_SW_TUN_NVGRE; |
156 | case TNL_GTPU: |
157 | return ICE_SW_TUN_GTPU; |
158 | case TNL_GTPC: |
159 | return ICE_SW_TUN_GTPC; |
160 | default: |
161 | return ICE_NON_TUN; |
162 | } |
163 | } |
164 | |
165 | static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid) |
166 | { |
167 | switch (vlan_tpid) { |
168 | case ETH_P_8021Q: |
169 | case ETH_P_8021AD: |
170 | case ETH_P_QINQ1: |
171 | return vlan_tpid; |
172 | default: |
173 | return 0; |
174 | } |
175 | } |
176 | |
177 | static int |
178 | ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, |
179 | struct ice_adv_lkup_elem *list, int i) |
180 | { |
181 | struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers; |
182 | |
183 | if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) { |
184 | u32 tenant_id; |
185 | |
186 | list[i].type = ice_proto_type_from_tunnel(type: fltr->tunnel_type); |
187 | switch (fltr->tunnel_type) { |
188 | case TNL_VXLAN: |
189 | case TNL_GENEVE: |
190 | tenant_id = be32_to_cpu(fltr->tenant_id) << 8; |
191 | list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id); |
192 | memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00" , 4); |
193 | i++; |
194 | break; |
195 | case TNL_GRETAP: |
196 | list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id; |
197 | memcpy(&list[i].m_u.nvgre_hdr.tni_flow, |
198 | "\xff\xff\xff\xff" , 4); |
199 | i++; |
200 | break; |
201 | case TNL_GTPC: |
202 | case TNL_GTPU: |
203 | list[i].h_u.gtp_hdr.teid = fltr->tenant_id; |
204 | memcpy(&list[i].m_u.gtp_hdr.teid, |
205 | "\xff\xff\xff\xff" , 4); |
206 | i++; |
207 | break; |
208 | default: |
209 | break; |
210 | } |
211 | } |
212 | |
213 | if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) { |
214 | list[i].type = ice_proto_type_from_mac(inner: false); |
215 | ether_addr_copy(dst: list[i].h_u.eth_hdr.dst_addr, |
216 | src: hdr->l2_key.dst_mac); |
217 | ether_addr_copy(dst: list[i].m_u.eth_hdr.dst_addr, |
218 | src: hdr->l2_mask.dst_mac); |
219 | i++; |
220 | } |
221 | |
222 | if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS && |
223 | (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { |
224 | list[i].type = ice_proto_type_from_tunnel(type: fltr->tunnel_type); |
225 | |
226 | if (fltr->gtp_pdu_info_masks.pdu_type) { |
227 | list[i].h_u.gtp_hdr.pdu_type = |
228 | fltr->gtp_pdu_info_keys.pdu_type << 4; |
229 | memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0" , 1); |
230 | } |
231 | |
232 | if (fltr->gtp_pdu_info_masks.qfi) { |
233 | list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi; |
234 | memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f" , 1); |
235 | } |
236 | |
237 | i++; |
238 | } |
239 | |
240 | if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | |
241 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { |
242 | list[i].type = ice_proto_type_from_ipv4(inner: false); |
243 | |
244 | if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) { |
245 | list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4; |
246 | list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4; |
247 | } |
248 | if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) { |
249 | list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4; |
250 | list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4; |
251 | } |
252 | i++; |
253 | } |
254 | |
255 | if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | |
256 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) { |
257 | list[i].type = ice_proto_type_from_ipv6(inner: false); |
258 | |
259 | if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) { |
260 | memcpy(&list[i].h_u.ipv6_hdr.src_addr, |
261 | &hdr->l3_key.src_ipv6_addr, |
262 | sizeof(hdr->l3_key.src_ipv6_addr)); |
263 | memcpy(&list[i].m_u.ipv6_hdr.src_addr, |
264 | &hdr->l3_mask.src_ipv6_addr, |
265 | sizeof(hdr->l3_mask.src_ipv6_addr)); |
266 | } |
267 | if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) { |
268 | memcpy(&list[i].h_u.ipv6_hdr.dst_addr, |
269 | &hdr->l3_key.dst_ipv6_addr, |
270 | sizeof(hdr->l3_key.dst_ipv6_addr)); |
271 | memcpy(&list[i].m_u.ipv6_hdr.dst_addr, |
272 | &hdr->l3_mask.dst_ipv6_addr, |
273 | sizeof(hdr->l3_mask.dst_ipv6_addr)); |
274 | } |
275 | i++; |
276 | } |
277 | |
278 | if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) && |
279 | (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS | |
280 | ICE_TC_FLWR_FIELD_ENC_IP_TTL))) { |
281 | list[i].type = ice_proto_type_from_ipv4(inner: false); |
282 | |
283 | if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) { |
284 | list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos; |
285 | list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos; |
286 | } |
287 | |
288 | if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) { |
289 | list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl; |
290 | list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl; |
291 | } |
292 | |
293 | i++; |
294 | } |
295 | |
296 | if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) && |
297 | (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS | |
298 | ICE_TC_FLWR_FIELD_ENC_IP_TTL))) { |
299 | struct ice_ipv6_hdr *hdr_h, *hdr_m; |
300 | |
301 | hdr_h = &list[i].h_u.ipv6_hdr; |
302 | hdr_m = &list[i].m_u.ipv6_hdr; |
303 | list[i].type = ice_proto_type_from_ipv6(inner: false); |
304 | |
305 | if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) { |
306 | be32p_replace_bits(p: &hdr_h->be_ver_tc_flow, |
307 | val: hdr->l3_key.tos, |
308 | ICE_IPV6_HDR_TC_MASK); |
309 | be32p_replace_bits(p: &hdr_m->be_ver_tc_flow, |
310 | val: hdr->l3_mask.tos, |
311 | ICE_IPV6_HDR_TC_MASK); |
312 | } |
313 | |
314 | if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) { |
315 | hdr_h->hop_limit = hdr->l3_key.ttl; |
316 | hdr_m->hop_limit = hdr->l3_mask.ttl; |
317 | } |
318 | |
319 | i++; |
320 | } |
321 | |
322 | if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) && |
323 | hdr->l3_key.ip_proto == IPPROTO_UDP) { |
324 | list[i].type = ICE_UDP_OF; |
325 | list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port; |
326 | list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port; |
327 | i++; |
328 | } |
329 | |
330 | /* always fill matching on tunneled packets in metadata */ |
331 | ice_rule_add_tunnel_metadata(lkup: &list[ICE_TC_METADATA_LKUP_IDX]); |
332 | |
333 | return i; |
334 | } |
335 | |
336 | /** |
337 | * ice_tc_fill_rules - fill filter rules based on TC fltr |
338 | * @hw: pointer to HW structure |
339 | * @flags: tc flower field flags |
340 | * @tc_fltr: pointer to TC flower filter |
341 | * @list: list of advance rule elements |
342 | * @rule_info: pointer to information about rule |
343 | * @l4_proto: pointer to information such as L4 proto type |
344 | * |
345 | * Fill ice_adv_lkup_elem list based on TC flower flags and |
346 | * TC flower headers. This list should be used to add |
347 | * advance filter in hardware. |
348 | */ |
349 | static int |
350 | ice_tc_fill_rules(struct ice_hw *hw, u32 flags, |
351 | struct ice_tc_flower_fltr *tc_fltr, |
352 | struct ice_adv_lkup_elem *list, |
353 | struct ice_adv_rule_info *rule_info, |
354 | u16 *l4_proto) |
355 | { |
356 | struct ice_tc_flower_lyr_2_4_hdrs * = &tc_fltr->outer_headers; |
357 | bool inner = false; |
358 | u16 vlan_tpid = 0; |
359 | int i = 1; /* 0th lookup is metadata */ |
360 | |
361 | rule_info->vlan_type = vlan_tpid; |
362 | |
363 | /* Always add direction metadata */ |
364 | ice_rule_add_direction_metadata(lkup: &list[ICE_TC_METADATA_LKUP_IDX]); |
365 | |
366 | rule_info->tun_type = ice_sw_type_from_tunnel(type: tc_fltr->tunnel_type); |
367 | if (tc_fltr->tunnel_type != TNL_LAST) { |
368 | i = ice_tc_fill_tunnel_outer(flags, fltr: tc_fltr, list, i); |
369 | |
370 | headers = &tc_fltr->inner_headers; |
371 | inner = true; |
372 | } |
373 | |
374 | if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { |
375 | list[i].type = ice_proto_type_from_etype(inner); |
376 | list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto; |
377 | list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto; |
378 | i++; |
379 | } |
380 | |
381 | if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
382 | ICE_TC_FLWR_FIELD_SRC_MAC)) { |
383 | struct ice_tc_l2_hdr *l2_key, *l2_mask; |
384 | |
385 | l2_key = &headers->l2_key; |
386 | l2_mask = &headers->l2_mask; |
387 | |
388 | list[i].type = ice_proto_type_from_mac(inner); |
389 | if (flags & ICE_TC_FLWR_FIELD_DST_MAC) { |
390 | ether_addr_copy(dst: list[i].h_u.eth_hdr.dst_addr, |
391 | src: l2_key->dst_mac); |
392 | ether_addr_copy(dst: list[i].m_u.eth_hdr.dst_addr, |
393 | src: l2_mask->dst_mac); |
394 | } |
395 | if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) { |
396 | ether_addr_copy(dst: list[i].h_u.eth_hdr.src_addr, |
397 | src: l2_key->src_mac); |
398 | ether_addr_copy(dst: list[i].m_u.eth_hdr.src_addr, |
399 | src: l2_mask->src_mac); |
400 | } |
401 | i++; |
402 | } |
403 | |
404 | /* copy VLAN info */ |
405 | if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) { |
406 | if (flags & ICE_TC_FLWR_FIELD_CVLAN) |
407 | list[i].type = ICE_VLAN_EX; |
408 | else |
409 | list[i].type = ICE_VLAN_OFOS; |
410 | |
411 | if (flags & ICE_TC_FLWR_FIELD_VLAN) { |
412 | list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id; |
413 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF); |
414 | } |
415 | |
416 | if (flags & ICE_TC_FLWR_FIELD_VLAN_PRIO) { |
417 | if (flags & ICE_TC_FLWR_FIELD_VLAN) { |
418 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF); |
419 | } else { |
420 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000); |
421 | list[i].h_u.vlan_hdr.vlan = 0; |
422 | } |
423 | list[i].h_u.vlan_hdr.vlan |= |
424 | headers->vlan_hdr.vlan_prio; |
425 | } |
426 | |
427 | i++; |
428 | } |
429 | |
430 | if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID) { |
431 | vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid); |
432 | rule_info->vlan_type = |
433 | ice_check_supported_vlan_tpid(vlan_tpid); |
434 | |
435 | ice_rule_add_vlan_metadata(lkup: &list[ICE_TC_METADATA_LKUP_IDX]); |
436 | } |
437 | |
438 | if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) { |
439 | list[i].type = ICE_VLAN_IN; |
440 | |
441 | if (flags & ICE_TC_FLWR_FIELD_CVLAN) { |
442 | list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id; |
443 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF); |
444 | } |
445 | |
446 | if (flags & ICE_TC_FLWR_FIELD_CVLAN_PRIO) { |
447 | if (flags & ICE_TC_FLWR_FIELD_CVLAN) { |
448 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF); |
449 | } else { |
450 | list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000); |
451 | list[i].h_u.vlan_hdr.vlan = 0; |
452 | } |
453 | list[i].h_u.vlan_hdr.vlan |= |
454 | headers->cvlan_hdr.vlan_prio; |
455 | } |
456 | |
457 | i++; |
458 | } |
459 | |
460 | if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID | |
461 | ICE_TC_FLWR_FIELD_PPP_PROTO)) { |
462 | struct ice_pppoe_hdr *vals, *masks; |
463 | |
464 | vals = &list[i].h_u.pppoe_hdr; |
465 | masks = &list[i].m_u.pppoe_hdr; |
466 | |
467 | list[i].type = ICE_PPPOE; |
468 | |
469 | if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) { |
470 | vals->session_id = headers->pppoe_hdr.session_id; |
471 | masks->session_id = cpu_to_be16(0xFFFF); |
472 | } |
473 | |
474 | if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) { |
475 | vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto; |
476 | masks->ppp_prot_id = cpu_to_be16(0xFFFF); |
477 | } |
478 | |
479 | i++; |
480 | } |
481 | |
482 | /* copy L3 (IPv[4|6]: src, dest) address */ |
483 | if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | |
484 | ICE_TC_FLWR_FIELD_SRC_IPV4)) { |
485 | struct ice_tc_l3_hdr *l3_key, *l3_mask; |
486 | |
487 | list[i].type = ice_proto_type_from_ipv4(inner); |
488 | l3_key = &headers->l3_key; |
489 | l3_mask = &headers->l3_mask; |
490 | if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) { |
491 | list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4; |
492 | list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4; |
493 | } |
494 | if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) { |
495 | list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4; |
496 | list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4; |
497 | } |
498 | i++; |
499 | } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 | |
500 | ICE_TC_FLWR_FIELD_SRC_IPV6)) { |
501 | struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask; |
502 | struct ice_tc_l3_hdr *l3_key, *l3_mask; |
503 | |
504 | list[i].type = ice_proto_type_from_ipv6(inner); |
505 | ipv6_hdr = &list[i].h_u.ipv6_hdr; |
506 | ipv6_mask = &list[i].m_u.ipv6_hdr; |
507 | l3_key = &headers->l3_key; |
508 | l3_mask = &headers->l3_mask; |
509 | |
510 | if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) { |
511 | memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr, |
512 | sizeof(l3_key->dst_ipv6_addr)); |
513 | memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr, |
514 | sizeof(l3_mask->dst_ipv6_addr)); |
515 | } |
516 | if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) { |
517 | memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr, |
518 | sizeof(l3_key->src_ipv6_addr)); |
519 | memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr, |
520 | sizeof(l3_mask->src_ipv6_addr)); |
521 | } |
522 | i++; |
523 | } |
524 | |
525 | if (headers->l2_key.n_proto == htons(ETH_P_IP) && |
526 | (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) { |
527 | list[i].type = ice_proto_type_from_ipv4(inner); |
528 | |
529 | if (flags & ICE_TC_FLWR_FIELD_IP_TOS) { |
530 | list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos; |
531 | list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos; |
532 | } |
533 | |
534 | if (flags & ICE_TC_FLWR_FIELD_IP_TTL) { |
535 | list[i].h_u.ipv4_hdr.time_to_live = |
536 | headers->l3_key.ttl; |
537 | list[i].m_u.ipv4_hdr.time_to_live = |
538 | headers->l3_mask.ttl; |
539 | } |
540 | |
541 | i++; |
542 | } |
543 | |
544 | if (headers->l2_key.n_proto == htons(ETH_P_IPV6) && |
545 | (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) { |
546 | struct ice_ipv6_hdr *hdr_h, *hdr_m; |
547 | |
548 | hdr_h = &list[i].h_u.ipv6_hdr; |
549 | hdr_m = &list[i].m_u.ipv6_hdr; |
550 | list[i].type = ice_proto_type_from_ipv6(inner); |
551 | |
552 | if (flags & ICE_TC_FLWR_FIELD_IP_TOS) { |
553 | be32p_replace_bits(p: &hdr_h->be_ver_tc_flow, |
554 | val: headers->l3_key.tos, |
555 | ICE_IPV6_HDR_TC_MASK); |
556 | be32p_replace_bits(p: &hdr_m->be_ver_tc_flow, |
557 | val: headers->l3_mask.tos, |
558 | ICE_IPV6_HDR_TC_MASK); |
559 | } |
560 | |
561 | if (flags & ICE_TC_FLWR_FIELD_IP_TTL) { |
562 | hdr_h->hop_limit = headers->l3_key.ttl; |
563 | hdr_m->hop_limit = headers->l3_mask.ttl; |
564 | } |
565 | |
566 | i++; |
567 | } |
568 | |
569 | if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) { |
570 | list[i].type = ICE_L2TPV3; |
571 | |
572 | list[i].h_u.l2tpv3_sess_hdr.session_id = |
573 | headers->l2tpv3_hdr.session_id; |
574 | list[i].m_u.l2tpv3_sess_hdr.session_id = |
575 | cpu_to_be32(0xFFFFFFFF); |
576 | |
577 | i++; |
578 | } |
579 | |
580 | /* copy L4 (src, dest) port */ |
581 | if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT | |
582 | ICE_TC_FLWR_FIELD_SRC_L4_PORT)) { |
583 | struct ice_tc_l4_hdr *l4_key, *l4_mask; |
584 | |
585 | list[i].type = ice_proto_type_from_l4_port(ip_proto: headers->l3_key.ip_proto); |
586 | l4_key = &headers->l4_key; |
587 | l4_mask = &headers->l4_mask; |
588 | |
589 | if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) { |
590 | list[i].h_u.l4_hdr.dst_port = l4_key->dst_port; |
591 | list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port; |
592 | } |
593 | if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) { |
594 | list[i].h_u.l4_hdr.src_port = l4_key->src_port; |
595 | list[i].m_u.l4_hdr.src_port = l4_mask->src_port; |
596 | } |
597 | i++; |
598 | } |
599 | |
600 | return i; |
601 | } |
602 | |
603 | /** |
604 | * ice_tc_tun_get_type - get the tunnel type |
605 | * @tunnel_dev: ptr to tunnel device |
606 | * |
607 | * This function detects appropriate tunnel_type if specified device is |
608 | * tunnel device such as VXLAN/Geneve |
609 | */ |
610 | static int ice_tc_tun_get_type(struct net_device *tunnel_dev) |
611 | { |
612 | if (netif_is_vxlan(dev: tunnel_dev)) |
613 | return TNL_VXLAN; |
614 | if (netif_is_geneve(dev: tunnel_dev)) |
615 | return TNL_GENEVE; |
616 | if (netif_is_gretap(dev: tunnel_dev) || |
617 | netif_is_ip6gretap(dev: tunnel_dev)) |
618 | return TNL_GRETAP; |
619 | |
620 | /* Assume GTP-U by default in case of GTP netdev. |
621 | * GTP-C may be selected later, based on enc_dst_port. |
622 | */ |
623 | if (netif_is_gtp(dev: tunnel_dev)) |
624 | return TNL_GTPU; |
625 | return TNL_LAST; |
626 | } |
627 | |
628 | bool ice_is_tunnel_supported(struct net_device *dev) |
629 | { |
630 | return ice_tc_tun_get_type(tunnel_dev: dev) != TNL_LAST; |
631 | } |
632 | |
633 | static int |
634 | ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr, |
635 | struct flow_action_entry *act) |
636 | { |
637 | struct ice_repr *repr; |
638 | |
639 | switch (act->id) { |
640 | case FLOW_ACTION_DROP: |
641 | fltr->action.fltr_act = ICE_DROP_PACKET; |
642 | break; |
643 | |
644 | case FLOW_ACTION_REDIRECT: |
645 | fltr->action.fltr_act = ICE_FWD_TO_VSI; |
646 | |
647 | if (ice_is_port_repr_netdev(netdev: act->dev)) { |
648 | repr = ice_netdev_to_repr(netdev: act->dev); |
649 | |
650 | fltr->dest_vsi = repr->src_vsi; |
651 | fltr->direction = ICE_ESWITCH_FLTR_INGRESS; |
652 | } else if (netif_is_ice(dev: act->dev) || |
653 | ice_is_tunnel_supported(dev: act->dev)) { |
654 | fltr->direction = ICE_ESWITCH_FLTR_EGRESS; |
655 | } else { |
656 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode" ); |
657 | return -EINVAL; |
658 | } |
659 | |
660 | break; |
661 | |
662 | default: |
663 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode" ); |
664 | return -EINVAL; |
665 | } |
666 | |
667 | return 0; |
668 | } |
669 | |
670 | static int |
671 | ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) |
672 | { |
673 | struct ice_tc_flower_lyr_2_4_hdrs * = &fltr->outer_headers; |
674 | struct ice_adv_rule_info rule_info = { 0 }; |
675 | struct ice_rule_query_data rule_added; |
676 | struct ice_hw *hw = &vsi->back->hw; |
677 | struct ice_adv_lkup_elem *list; |
678 | u32 flags = fltr->flags; |
679 | int lkups_cnt; |
680 | int ret; |
681 | int i; |
682 | |
683 | if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) { |
684 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)" ); |
685 | return -EOPNOTSUPP; |
686 | } |
687 | |
688 | lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); |
689 | list = kcalloc(n: lkups_cnt, size: sizeof(*list), GFP_ATOMIC); |
690 | if (!list) |
691 | return -ENOMEM; |
692 | |
693 | i = ice_tc_fill_rules(hw, flags, tc_fltr: fltr, list, rule_info: &rule_info, NULL); |
694 | if (i != lkups_cnt) { |
695 | ret = -EINVAL; |
696 | goto exit; |
697 | } |
698 | |
699 | /* egress traffic is always redirect to uplink */ |
700 | if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS) |
701 | fltr->dest_vsi = vsi->back->switchdev.uplink_vsi; |
702 | |
703 | rule_info.sw_act.fltr_act = fltr->action.fltr_act; |
704 | if (fltr->action.fltr_act != ICE_DROP_PACKET) |
705 | rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx; |
706 | /* For now, making priority to be highest, and it also becomes |
707 | * the priority for recipe which will get created as a result of |
708 | * new extraction sequence based on input set. |
709 | * Priority '7' is max val for switch recipe, higher the number |
710 | * results into order of switch rule evaluation. |
711 | */ |
712 | rule_info.priority = 7; |
713 | rule_info.flags_info.act_valid = true; |
714 | |
715 | if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { |
716 | rule_info.sw_act.flag |= ICE_FLTR_RX; |
717 | rule_info.sw_act.src = hw->pf_id; |
718 | rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; |
719 | } else { |
720 | rule_info.sw_act.flag |= ICE_FLTR_TX; |
721 | rule_info.sw_act.src = vsi->idx; |
722 | rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; |
723 | } |
724 | |
725 | /* specify the cookie as filter_rule_id */ |
726 | rule_info.fltr_rule_id = fltr->cookie; |
727 | |
728 | ret = ice_add_adv_rule(hw, lkups: list, lkups_cnt, rinfo: &rule_info, added_entry: &rule_added); |
729 | if (ret == -EEXIST) { |
730 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist" ); |
731 | ret = -EINVAL; |
732 | goto exit; |
733 | } else if (ret) { |
734 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error" ); |
735 | goto exit; |
736 | } |
737 | |
738 | /* store the output params, which are needed later for removing |
739 | * advanced switch filter |
740 | */ |
741 | fltr->rid = rule_added.rid; |
742 | fltr->rule_id = rule_added.rule_id; |
743 | fltr->dest_vsi_handle = rule_added.vsi_handle; |
744 | |
745 | exit: |
746 | kfree(objp: list); |
747 | return ret; |
748 | } |
749 | |
750 | /** |
751 | * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action) |
752 | * @vsi: Pointer to VSI |
753 | * @queue: Queue index |
754 | * |
755 | * Locate the VSI using specified "queue". When ADQ is not enabled, |
756 | * always return input VSI, otherwise locate corresponding |
757 | * VSI based on per channel "offset" and "qcount" |
758 | */ |
759 | struct ice_vsi * |
760 | ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue) |
761 | { |
762 | int num_tc, tc; |
763 | |
764 | /* if ADQ is not active, passed VSI is the candidate VSI */ |
765 | if (!ice_is_adq_active(pf: vsi->back)) |
766 | return vsi; |
767 | |
768 | /* Locate the VSI (it could still be main PF VSI or CHNL_VSI depending |
769 | * upon queue number) |
770 | */ |
771 | num_tc = vsi->mqprio_qopt.qopt.num_tc; |
772 | |
773 | for (tc = 0; tc < num_tc; tc++) { |
774 | int qcount = vsi->mqprio_qopt.qopt.count[tc]; |
775 | int offset = vsi->mqprio_qopt.qopt.offset[tc]; |
776 | |
777 | if (queue >= offset && queue < offset + qcount) { |
778 | /* for non-ADQ TCs, passed VSI is the candidate VSI */ |
779 | if (tc < ICE_CHNL_START_TC) |
780 | return vsi; |
781 | else |
782 | return vsi->tc_map_vsi[tc]; |
783 | } |
784 | } |
785 | return NULL; |
786 | } |
787 | |
788 | static struct ice_rx_ring * |
789 | ice_locate_rx_ring_using_queue(struct ice_vsi *vsi, |
790 | struct ice_tc_flower_fltr *tc_fltr) |
791 | { |
792 | u16 queue = tc_fltr->action.fwd.q.queue; |
793 | |
794 | return queue < vsi->num_rxq ? vsi->rx_rings[queue] : NULL; |
795 | } |
796 | |
797 | /** |
798 | * ice_tc_forward_action - Determine destination VSI and queue for the action |
799 | * @vsi: Pointer to VSI |
800 | * @tc_fltr: Pointer to TC flower filter structure |
801 | * |
802 | * Validates the tc forward action and determines the destination VSI and queue |
803 | * for the forward action. |
804 | */ |
805 | static struct ice_vsi * |
806 | ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) |
807 | { |
808 | struct ice_rx_ring *ring = NULL; |
809 | struct ice_vsi *dest_vsi = NULL; |
810 | struct ice_pf *pf = vsi->back; |
811 | struct device *dev; |
812 | u32 tc_class; |
813 | int q; |
814 | |
815 | dev = ice_pf_to_dev(pf); |
816 | |
817 | /* Get the destination VSI and/or destination queue and validate them */ |
818 | switch (tc_fltr->action.fltr_act) { |
819 | case ICE_FWD_TO_VSI: |
820 | tc_class = tc_fltr->action.fwd.tc.tc_class; |
821 | /* Select the destination VSI */ |
822 | if (tc_class < ICE_CHNL_START_TC) { |
823 | NL_SET_ERR_MSG_MOD(tc_fltr->extack, |
824 | "Unable to add filter because of unsupported destination" ); |
825 | return ERR_PTR(error: -EOPNOTSUPP); |
826 | } |
827 | /* Locate ADQ VSI depending on hw_tc number */ |
828 | dest_vsi = vsi->tc_map_vsi[tc_class]; |
829 | break; |
830 | case ICE_FWD_TO_Q: |
831 | /* Locate the Rx queue */ |
832 | ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr); |
833 | if (!ring) { |
834 | dev_err(dev, |
835 | "Unable to locate Rx queue for action fwd_to_queue: %u\n" , |
836 | tc_fltr->action.fwd.q.queue); |
837 | return ERR_PTR(error: -EINVAL); |
838 | } |
839 | /* Determine destination VSI even though the action is |
840 | * FWD_TO_QUEUE, because QUEUE is associated with VSI |
841 | */ |
842 | q = tc_fltr->action.fwd.q.queue; |
843 | dest_vsi = ice_locate_vsi_using_queue(vsi, queue: q); |
844 | break; |
845 | default: |
846 | dev_err(dev, |
847 | "Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n" , |
848 | tc_fltr->action.fltr_act); |
849 | return ERR_PTR(error: -EINVAL); |
850 | } |
851 | /* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */ |
852 | if (!dest_vsi) { |
853 | dev_err(dev, |
854 | "Unable to add filter because specified destination VSI doesn't exist\n" ); |
855 | return ERR_PTR(error: -EINVAL); |
856 | } |
857 | return dest_vsi; |
858 | } |
859 | |
860 | /** |
861 | * ice_add_tc_flower_adv_fltr - add appropriate filter rules |
862 | * @vsi: Pointer to VSI |
863 | * @tc_fltr: Pointer to TC flower filter structure |
864 | * |
865 | * based on filter parameters using Advance recipes supported |
866 | * by OS package. |
867 | */ |
868 | static int |
869 | ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, |
870 | struct ice_tc_flower_fltr *tc_fltr) |
871 | { |
872 | struct ice_tc_flower_lyr_2_4_hdrs * = &tc_fltr->outer_headers; |
873 | struct ice_adv_rule_info rule_info = {0}; |
874 | struct ice_rule_query_data rule_added; |
875 | struct ice_adv_lkup_elem *list; |
876 | struct ice_pf *pf = vsi->back; |
877 | struct ice_hw *hw = &pf->hw; |
878 | u32 flags = tc_fltr->flags; |
879 | struct ice_vsi *dest_vsi; |
880 | struct device *dev; |
881 | u16 lkups_cnt = 0; |
882 | u16 l4_proto = 0; |
883 | int ret = 0; |
884 | u16 i = 0; |
885 | |
886 | dev = ice_pf_to_dev(pf); |
887 | if (ice_is_safe_mode(pf)) { |
888 | NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode" ); |
889 | return -EOPNOTSUPP; |
890 | } |
891 | |
892 | if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 | |
893 | ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | |
894 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | |
895 | ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | |
896 | ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) { |
897 | NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)" ); |
898 | return -EOPNOTSUPP; |
899 | } |
900 | |
901 | /* validate forwarding action VSI and queue */ |
902 | if (ice_is_forward_action(fltr_act: tc_fltr->action.fltr_act)) { |
903 | dest_vsi = ice_tc_forward_action(vsi, tc_fltr); |
904 | if (IS_ERR(ptr: dest_vsi)) |
905 | return PTR_ERR(ptr: dest_vsi); |
906 | } |
907 | |
908 | lkups_cnt = ice_tc_count_lkups(flags, headers, fltr: tc_fltr); |
909 | list = kcalloc(n: lkups_cnt, size: sizeof(*list), GFP_ATOMIC); |
910 | if (!list) |
911 | return -ENOMEM; |
912 | |
913 | i = ice_tc_fill_rules(hw, flags, tc_fltr, list, rule_info: &rule_info, l4_proto: &l4_proto); |
914 | if (i != lkups_cnt) { |
915 | ret = -EINVAL; |
916 | goto exit; |
917 | } |
918 | |
919 | rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act; |
920 | /* specify the cookie as filter_rule_id */ |
921 | rule_info.fltr_rule_id = tc_fltr->cookie; |
922 | |
923 | switch (tc_fltr->action.fltr_act) { |
924 | case ICE_FWD_TO_VSI: |
925 | rule_info.sw_act.vsi_handle = dest_vsi->idx; |
926 | rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; |
927 | rule_info.sw_act.src = hw->pf_id; |
928 | dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n" , |
929 | tc_fltr->action.fwd.tc.tc_class, |
930 | rule_info.sw_act.vsi_handle, lkups_cnt); |
931 | break; |
932 | case ICE_FWD_TO_Q: |
933 | /* HW queue number in global space */ |
934 | rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue; |
935 | rule_info.sw_act.vsi_handle = dest_vsi->idx; |
936 | rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE; |
937 | rule_info.sw_act.src = hw->pf_id; |
938 | dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n" , |
939 | tc_fltr->action.fwd.q.queue, |
940 | tc_fltr->action.fwd.q.hw_queue, lkups_cnt); |
941 | break; |
942 | case ICE_DROP_PACKET: |
943 | rule_info.sw_act.flag |= ICE_FLTR_RX; |
944 | rule_info.sw_act.src = hw->pf_id; |
945 | rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; |
946 | break; |
947 | default: |
948 | ret = -EOPNOTSUPP; |
949 | goto exit; |
950 | } |
951 | |
952 | ret = ice_add_adv_rule(hw, lkups: list, lkups_cnt, rinfo: &rule_info, added_entry: &rule_added); |
953 | if (ret == -EEXIST) { |
954 | NL_SET_ERR_MSG_MOD(tc_fltr->extack, |
955 | "Unable to add filter because it already exist" ); |
956 | ret = -EINVAL; |
957 | goto exit; |
958 | } else if (ret) { |
959 | NL_SET_ERR_MSG_MOD(tc_fltr->extack, |
960 | "Unable to add filter due to error" ); |
961 | goto exit; |
962 | } |
963 | |
964 | /* store the output params, which are needed later for removing |
965 | * advanced switch filter |
966 | */ |
967 | tc_fltr->rid = rule_added.rid; |
968 | tc_fltr->rule_id = rule_added.rule_id; |
969 | tc_fltr->dest_vsi_handle = rule_added.vsi_handle; |
970 | if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI || |
971 | tc_fltr->action.fltr_act == ICE_FWD_TO_Q) { |
972 | tc_fltr->dest_vsi = dest_vsi; |
973 | /* keep track of advanced switch filter for |
974 | * destination VSI |
975 | */ |
976 | dest_vsi->num_chnl_fltr++; |
977 | |
978 | /* keeps track of channel filters for PF VSI */ |
979 | if (vsi->type == ICE_VSI_PF && |
980 | (flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
981 | ICE_TC_FLWR_FIELD_ENC_DST_MAC))) |
982 | pf->num_dmac_chnl_fltrs++; |
983 | } |
984 | switch (tc_fltr->action.fltr_act) { |
985 | case ICE_FWD_TO_VSI: |
986 | dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to TC %u, rid %u, rule_id %u, vsi_idx %u\n" , |
987 | lkups_cnt, flags, |
988 | tc_fltr->action.fwd.tc.tc_class, rule_added.rid, |
989 | rule_added.rule_id, rule_added.vsi_handle); |
990 | break; |
991 | case ICE_FWD_TO_Q: |
992 | dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u) , rid %u, rule_id %u\n" , |
993 | lkups_cnt, flags, tc_fltr->action.fwd.q.queue, |
994 | tc_fltr->action.fwd.q.hw_queue, rule_added.rid, |
995 | rule_added.rule_id); |
996 | break; |
997 | case ICE_DROP_PACKET: |
998 | dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n" , |
999 | lkups_cnt, flags, rule_added.rid, rule_added.rule_id); |
1000 | break; |
1001 | default: |
1002 | break; |
1003 | } |
1004 | exit: |
1005 | kfree(objp: list); |
1006 | return ret; |
1007 | } |
1008 | |
1009 | /** |
1010 | * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter |
1011 | * @match: Pointer to flow match structure |
1012 | * @fltr: Pointer to filter structure |
1013 | * @headers: Pointer to outer header fields |
1014 | * @returns PPP protocol used in filter (ppp_ses or ppp_disc) |
1015 | */ |
1016 | static u16 |
1017 | ice_tc_set_pppoe(struct flow_match_pppoe *match, |
1018 | struct ice_tc_flower_fltr *fltr, |
1019 | struct ice_tc_flower_lyr_2_4_hdrs *) |
1020 | { |
1021 | if (match->mask->session_id) { |
1022 | fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID; |
1023 | headers->pppoe_hdr.session_id = match->key->session_id; |
1024 | } |
1025 | |
1026 | if (match->mask->ppp_proto) { |
1027 | fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO; |
1028 | headers->pppoe_hdr.ppp_proto = match->key->ppp_proto; |
1029 | } |
1030 | |
1031 | return be16_to_cpu(match->key->type); |
1032 | } |
1033 | |
1034 | /** |
1035 | * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter |
1036 | * @match: Pointer to flow match structure |
1037 | * @fltr: Pointer to filter structure |
1038 | * @headers: inner or outer header fields |
1039 | * @is_encap: set true for tunnel IPv4 address |
1040 | */ |
1041 | static int |
1042 | ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match, |
1043 | struct ice_tc_flower_fltr *fltr, |
1044 | struct ice_tc_flower_lyr_2_4_hdrs *, bool is_encap) |
1045 | { |
1046 | if (match->key->dst) { |
1047 | if (is_encap) |
1048 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4; |
1049 | else |
1050 | fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4; |
1051 | headers->l3_key.dst_ipv4 = match->key->dst; |
1052 | headers->l3_mask.dst_ipv4 = match->mask->dst; |
1053 | } |
1054 | if (match->key->src) { |
1055 | if (is_encap) |
1056 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4; |
1057 | else |
1058 | fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4; |
1059 | headers->l3_key.src_ipv4 = match->key->src; |
1060 | headers->l3_mask.src_ipv4 = match->mask->src; |
1061 | } |
1062 | return 0; |
1063 | } |
1064 | |
1065 | /** |
1066 | * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter |
1067 | * @match: Pointer to flow match structure |
1068 | * @fltr: Pointer to filter structure |
1069 | * @headers: inner or outer header fields |
1070 | * @is_encap: set true for tunnel IPv6 address |
1071 | */ |
1072 | static int |
1073 | ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match, |
1074 | struct ice_tc_flower_fltr *fltr, |
1075 | struct ice_tc_flower_lyr_2_4_hdrs *, bool is_encap) |
1076 | { |
1077 | struct ice_tc_l3_hdr *l3_key, *l3_mask; |
1078 | |
1079 | /* src and dest IPV6 address should not be LOOPBACK |
1080 | * (0:0:0:0:0:0:0:1), which can be represented as ::1 |
1081 | */ |
1082 | if (ipv6_addr_loopback(a: &match->key->dst) || |
1083 | ipv6_addr_loopback(a: &match->key->src)) { |
1084 | NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK" ); |
1085 | return -EINVAL; |
1086 | } |
1087 | /* if src/dest IPv6 address is *,* error */ |
1088 | if (ipv6_addr_any(a: &match->mask->dst) && |
1089 | ipv6_addr_any(a: &match->mask->src)) { |
1090 | NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any" ); |
1091 | return -EINVAL; |
1092 | } |
1093 | if (!ipv6_addr_any(a: &match->mask->dst)) { |
1094 | if (is_encap) |
1095 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6; |
1096 | else |
1097 | fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6; |
1098 | } |
1099 | if (!ipv6_addr_any(a: &match->mask->src)) { |
1100 | if (is_encap) |
1101 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6; |
1102 | else |
1103 | fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6; |
1104 | } |
1105 | |
1106 | l3_key = &headers->l3_key; |
1107 | l3_mask = &headers->l3_mask; |
1108 | |
1109 | if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 | |
1110 | ICE_TC_FLWR_FIELD_SRC_IPV6)) { |
1111 | memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr, |
1112 | sizeof(match->key->src.s6_addr)); |
1113 | memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr, |
1114 | sizeof(match->mask->src.s6_addr)); |
1115 | } |
1116 | if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 | |
1117 | ICE_TC_FLWR_FIELD_DEST_IPV6)) { |
1118 | memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr, |
1119 | sizeof(match->key->dst.s6_addr)); |
1120 | memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr, |
1121 | sizeof(match->mask->dst.s6_addr)); |
1122 | } |
1123 | |
1124 | return 0; |
1125 | } |
1126 | |
1127 | /** |
1128 | * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter |
1129 | * @match: Pointer to flow match structure |
1130 | * @fltr: Pointer to filter structure |
1131 | * @headers: inner or outer header fields |
1132 | * @is_encap: set true for tunnel |
1133 | */ |
1134 | static void |
1135 | ice_tc_set_tos_ttl(struct flow_match_ip *match, |
1136 | struct ice_tc_flower_fltr *fltr, |
1137 | struct ice_tc_flower_lyr_2_4_hdrs *, |
1138 | bool is_encap) |
1139 | { |
1140 | if (match->mask->tos) { |
1141 | if (is_encap) |
1142 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS; |
1143 | else |
1144 | fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS; |
1145 | |
1146 | headers->l3_key.tos = match->key->tos; |
1147 | headers->l3_mask.tos = match->mask->tos; |
1148 | } |
1149 | |
1150 | if (match->mask->ttl) { |
1151 | if (is_encap) |
1152 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL; |
1153 | else |
1154 | fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL; |
1155 | |
1156 | headers->l3_key.ttl = match->key->ttl; |
1157 | headers->l3_mask.ttl = match->mask->ttl; |
1158 | } |
1159 | } |
1160 | |
1161 | /** |
1162 | * ice_tc_set_port - Parse ports from TC flower filter |
1163 | * @match: Flow match structure |
1164 | * @fltr: Pointer to filter structure |
1165 | * @headers: inner or outer header fields |
1166 | * @is_encap: set true for tunnel port |
1167 | */ |
1168 | static int |
1169 | ice_tc_set_port(struct flow_match_ports match, |
1170 | struct ice_tc_flower_fltr *fltr, |
1171 | struct ice_tc_flower_lyr_2_4_hdrs *, bool is_encap) |
1172 | { |
1173 | if (match.key->dst) { |
1174 | if (is_encap) |
1175 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT; |
1176 | else |
1177 | fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT; |
1178 | |
1179 | headers->l4_key.dst_port = match.key->dst; |
1180 | headers->l4_mask.dst_port = match.mask->dst; |
1181 | } |
1182 | if (match.key->src) { |
1183 | if (is_encap) |
1184 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT; |
1185 | else |
1186 | fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT; |
1187 | |
1188 | headers->l4_key.src_port = match.key->src; |
1189 | headers->l4_mask.src_port = match.mask->src; |
1190 | } |
1191 | return 0; |
1192 | } |
1193 | |
1194 | static struct net_device * |
1195 | ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule) |
1196 | { |
1197 | struct flow_action_entry *act; |
1198 | int i; |
1199 | |
1200 | if (ice_is_tunnel_supported(dev)) |
1201 | return dev; |
1202 | |
1203 | flow_action_for_each(i, act, &rule->action) { |
1204 | if (act->id == FLOW_ACTION_REDIRECT && |
1205 | ice_is_tunnel_supported(dev: act->dev)) |
1206 | return act->dev; |
1207 | } |
1208 | |
1209 | return NULL; |
1210 | } |
1211 | |
1212 | /** |
1213 | * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C |
1214 | * @match: Flow match structure |
1215 | * @fltr: Pointer to filter structure |
1216 | * |
1217 | * GTP-C/GTP-U is selected based on destination port number (enc_dst_port). |
1218 | * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU, |
1219 | * therefore making GTP-U the default choice (when destination port number is |
1220 | * not specified). |
1221 | */ |
1222 | static int |
1223 | ice_parse_gtp_type(struct flow_match_ports match, |
1224 | struct ice_tc_flower_fltr *fltr) |
1225 | { |
1226 | u16 dst_port; |
1227 | |
1228 | if (match.key->dst) { |
1229 | dst_port = be16_to_cpu(match.key->dst); |
1230 | |
1231 | switch (dst_port) { |
1232 | case 2152: |
1233 | break; |
1234 | case 2123: |
1235 | fltr->tunnel_type = TNL_GTPC; |
1236 | break; |
1237 | default: |
1238 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number" ); |
1239 | return -EINVAL; |
1240 | } |
1241 | } |
1242 | |
1243 | return 0; |
1244 | } |
1245 | |
1246 | static int |
1247 | ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, |
1248 | struct ice_tc_flower_fltr *fltr) |
1249 | { |
1250 | struct ice_tc_flower_lyr_2_4_hdrs * = &fltr->outer_headers; |
1251 | struct flow_match_control enc_control; |
1252 | |
1253 | fltr->tunnel_type = ice_tc_tun_get_type(tunnel_dev: dev); |
1254 | headers->l3_key.ip_proto = IPPROTO_UDP; |
1255 | |
1256 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_KEYID)) { |
1257 | struct flow_match_enc_keyid enc_keyid; |
1258 | |
1259 | flow_rule_match_enc_keyid(rule, out: &enc_keyid); |
1260 | |
1261 | if (!enc_keyid.mask->keyid || |
1262 | enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32)) |
1263 | return -EINVAL; |
1264 | |
1265 | fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID; |
1266 | fltr->tenant_id = enc_keyid.key->keyid; |
1267 | } |
1268 | |
1269 | flow_rule_match_enc_control(rule, out: &enc_control); |
1270 | |
1271 | if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { |
1272 | struct flow_match_ipv4_addrs match; |
1273 | |
1274 | flow_rule_match_enc_ipv4_addrs(rule, out: &match); |
1275 | if (ice_tc_set_ipv4(match: &match, fltr, headers, is_encap: true)) |
1276 | return -EINVAL; |
1277 | } else if (enc_control.key->addr_type == |
1278 | FLOW_DISSECTOR_KEY_IPV6_ADDRS) { |
1279 | struct flow_match_ipv6_addrs match; |
1280 | |
1281 | flow_rule_match_enc_ipv6_addrs(rule, out: &match); |
1282 | if (ice_tc_set_ipv6(match: &match, fltr, headers, is_encap: true)) |
1283 | return -EINVAL; |
1284 | } |
1285 | |
1286 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_IP)) { |
1287 | struct flow_match_ip match; |
1288 | |
1289 | flow_rule_match_enc_ip(rule, out: &match); |
1290 | ice_tc_set_tos_ttl(match: &match, fltr, headers, is_encap: true); |
1291 | } |
1292 | |
1293 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_PORTS) && |
1294 | fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) { |
1295 | struct flow_match_ports match; |
1296 | |
1297 | flow_rule_match_enc_ports(rule, out: &match); |
1298 | |
1299 | if (fltr->tunnel_type != TNL_GTPU) { |
1300 | if (ice_tc_set_port(match, fltr, headers, is_encap: true)) |
1301 | return -EINVAL; |
1302 | } else { |
1303 | if (ice_parse_gtp_type(match, fltr)) |
1304 | return -EINVAL; |
1305 | } |
1306 | } |
1307 | |
1308 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ENC_OPTS)) { |
1309 | struct flow_match_enc_opts match; |
1310 | |
1311 | flow_rule_match_enc_opts(rule, out: &match); |
1312 | |
1313 | memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0], |
1314 | sizeof(struct gtp_pdu_session_info)); |
1315 | |
1316 | memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0], |
1317 | sizeof(struct gtp_pdu_session_info)); |
1318 | |
1319 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS; |
1320 | } |
1321 | |
1322 | return 0; |
1323 | } |
1324 | |
1325 | /** |
1326 | * ice_parse_cls_flower - Parse TC flower filters provided by kernel |
1327 | * @vsi: Pointer to the VSI |
1328 | * @filter_dev: Pointer to device on which filter is being added |
1329 | * @f: Pointer to struct flow_cls_offload |
1330 | * @fltr: Pointer to filter structure |
1331 | */ |
1332 | static int |
1333 | ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, |
1334 | struct flow_cls_offload *f, |
1335 | struct ice_tc_flower_fltr *fltr) |
1336 | { |
1337 | struct ice_tc_flower_lyr_2_4_hdrs * = &fltr->outer_headers; |
1338 | struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: f); |
1339 | u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; |
1340 | struct flow_dissector *dissector; |
1341 | struct net_device *tunnel_dev; |
1342 | |
1343 | dissector = rule->match.dissector; |
1344 | |
1345 | if (dissector->used_keys & |
1346 | ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | |
1347 | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | |
1348 | BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | |
1349 | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | |
1350 | BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | |
1351 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | |
1352 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | |
1353 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | |
1354 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | |
1355 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | |
1356 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | |
1357 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | |
1358 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | |
1359 | BIT_ULL(FLOW_DISSECTOR_KEY_IP) | |
1360 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | |
1361 | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | |
1362 | BIT_ULL(FLOW_DISSECTOR_KEY_PPPOE) | |
1363 | BIT_ULL(FLOW_DISSECTOR_KEY_L2TPV3))) { |
1364 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used" ); |
1365 | return -EOPNOTSUPP; |
1366 | } |
1367 | |
1368 | tunnel_dev = ice_get_tunnel_device(dev: filter_dev, rule); |
1369 | if (tunnel_dev) { |
1370 | int err; |
1371 | |
1372 | filter_dev = tunnel_dev; |
1373 | |
1374 | err = ice_parse_tunnel_attr(dev: filter_dev, rule, fltr); |
1375 | if (err) { |
1376 | NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes" ); |
1377 | return err; |
1378 | } |
1379 | |
1380 | /* header pointers should point to the inner headers, outer |
1381 | * header were already set by ice_parse_tunnel_attr |
1382 | */ |
1383 | headers = &fltr->inner_headers; |
1384 | } else if (dissector->used_keys & |
1385 | (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | |
1386 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | |
1387 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | |
1388 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) { |
1389 | NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel" ); |
1390 | return -EOPNOTSUPP; |
1391 | } else { |
1392 | fltr->tunnel_type = TNL_LAST; |
1393 | } |
1394 | |
1395 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_BASIC)) { |
1396 | struct flow_match_basic match; |
1397 | |
1398 | flow_rule_match_basic(rule, out: &match); |
1399 | |
1400 | n_proto_key = ntohs(match.key->n_proto); |
1401 | n_proto_mask = ntohs(match.mask->n_proto); |
1402 | |
1403 | if (n_proto_key == ETH_P_ALL || n_proto_key == 0 || |
1404 | fltr->tunnel_type == TNL_GTPU || |
1405 | fltr->tunnel_type == TNL_GTPC) { |
1406 | n_proto_key = 0; |
1407 | n_proto_mask = 0; |
1408 | } else { |
1409 | fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; |
1410 | } |
1411 | |
1412 | headers->l2_key.n_proto = cpu_to_be16(n_proto_key); |
1413 | headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); |
1414 | headers->l3_key.ip_proto = match.key->ip_proto; |
1415 | } |
1416 | |
1417 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
1418 | struct flow_match_eth_addrs match; |
1419 | |
1420 | flow_rule_match_eth_addrs(rule, out: &match); |
1421 | |
1422 | if (!is_zero_ether_addr(addr: match.key->dst)) { |
1423 | ether_addr_copy(dst: headers->l2_key.dst_mac, |
1424 | src: match.key->dst); |
1425 | ether_addr_copy(dst: headers->l2_mask.dst_mac, |
1426 | src: match.mask->dst); |
1427 | fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; |
1428 | } |
1429 | |
1430 | if (!is_zero_ether_addr(addr: match.key->src)) { |
1431 | ether_addr_copy(dst: headers->l2_key.src_mac, |
1432 | src: match.key->src); |
1433 | ether_addr_copy(dst: headers->l2_mask.src_mac, |
1434 | src: match.mask->src); |
1435 | fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC; |
1436 | } |
1437 | } |
1438 | |
1439 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_VLAN) || |
1440 | is_vlan_dev(dev: filter_dev)) { |
1441 | struct flow_dissector_key_vlan mask; |
1442 | struct flow_dissector_key_vlan key; |
1443 | struct flow_match_vlan match; |
1444 | |
1445 | if (is_vlan_dev(dev: filter_dev)) { |
1446 | match.key = &key; |
1447 | match.key->vlan_id = vlan_dev_vlan_id(dev: filter_dev); |
1448 | match.key->vlan_priority = 0; |
1449 | match.mask = &mask; |
1450 | memset(match.mask, 0xff, sizeof(*match.mask)); |
1451 | match.mask->vlan_priority = 0; |
1452 | } else { |
1453 | flow_rule_match_vlan(rule, out: &match); |
1454 | } |
1455 | |
1456 | if (match.mask->vlan_id) { |
1457 | if (match.mask->vlan_id == VLAN_VID_MASK) { |
1458 | fltr->flags |= ICE_TC_FLWR_FIELD_VLAN; |
1459 | headers->vlan_hdr.vlan_id = |
1460 | cpu_to_be16(match.key->vlan_id & |
1461 | VLAN_VID_MASK); |
1462 | } else { |
1463 | NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask" ); |
1464 | return -EINVAL; |
1465 | } |
1466 | } |
1467 | |
1468 | if (match.mask->vlan_priority) { |
1469 | fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO; |
1470 | headers->vlan_hdr.vlan_prio = |
1471 | be16_encode_bits(v: match.key->vlan_priority, |
1472 | VLAN_PRIO_MASK); |
1473 | } |
1474 | |
1475 | if (match.mask->vlan_tpid) { |
1476 | headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid; |
1477 | fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_TPID; |
1478 | } |
1479 | } |
1480 | |
1481 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_CVLAN)) { |
1482 | struct flow_match_vlan match; |
1483 | |
1484 | if (!ice_is_dvm_ena(hw: &vsi->back->hw)) { |
1485 | NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled" ); |
1486 | return -EINVAL; |
1487 | } |
1488 | |
1489 | flow_rule_match_cvlan(rule, out: &match); |
1490 | |
1491 | if (match.mask->vlan_id) { |
1492 | if (match.mask->vlan_id == VLAN_VID_MASK) { |
1493 | fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN; |
1494 | headers->cvlan_hdr.vlan_id = |
1495 | cpu_to_be16(match.key->vlan_id & |
1496 | VLAN_VID_MASK); |
1497 | } else { |
1498 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1499 | "Bad CVLAN mask" ); |
1500 | return -EINVAL; |
1501 | } |
1502 | } |
1503 | |
1504 | if (match.mask->vlan_priority) { |
1505 | fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO; |
1506 | headers->cvlan_hdr.vlan_prio = |
1507 | be16_encode_bits(v: match.key->vlan_priority, |
1508 | VLAN_PRIO_MASK); |
1509 | } |
1510 | } |
1511 | |
1512 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_PPPOE)) { |
1513 | struct flow_match_pppoe match; |
1514 | |
1515 | flow_rule_match_pppoe(rule, out: &match); |
1516 | n_proto_key = ice_tc_set_pppoe(match: &match, fltr, headers); |
1517 | |
1518 | /* If ethertype equals ETH_P_PPP_SES, n_proto might be |
1519 | * overwritten by encapsulated protocol (ppp_proto field) or set |
1520 | * to 0. To correct this, flow_match_pppoe provides the type |
1521 | * field, which contains the actual ethertype (ETH_P_PPP_SES). |
1522 | */ |
1523 | headers->l2_key.n_proto = cpu_to_be16(n_proto_key); |
1524 | headers->l2_mask.n_proto = cpu_to_be16(0xFFFF); |
1525 | fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; |
1526 | } |
1527 | |
1528 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_CONTROL)) { |
1529 | struct flow_match_control match; |
1530 | |
1531 | flow_rule_match_control(rule, out: &match); |
1532 | |
1533 | addr_type = match.key->addr_type; |
1534 | } |
1535 | |
1536 | if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { |
1537 | struct flow_match_ipv4_addrs match; |
1538 | |
1539 | flow_rule_match_ipv4_addrs(rule, out: &match); |
1540 | if (ice_tc_set_ipv4(match: &match, fltr, headers, is_encap: false)) |
1541 | return -EINVAL; |
1542 | } |
1543 | |
1544 | if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { |
1545 | struct flow_match_ipv6_addrs match; |
1546 | |
1547 | flow_rule_match_ipv6_addrs(rule, out: &match); |
1548 | if (ice_tc_set_ipv6(match: &match, fltr, headers, is_encap: false)) |
1549 | return -EINVAL; |
1550 | } |
1551 | |
1552 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_IP)) { |
1553 | struct flow_match_ip match; |
1554 | |
1555 | flow_rule_match_ip(rule, out: &match); |
1556 | ice_tc_set_tos_ttl(match: &match, fltr, headers, is_encap: false); |
1557 | } |
1558 | |
1559 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_L2TPV3)) { |
1560 | struct flow_match_l2tpv3 match; |
1561 | |
1562 | flow_rule_match_l2tpv3(rule, out: &match); |
1563 | |
1564 | fltr->flags |= ICE_TC_FLWR_FIELD_L2TPV3_SESSID; |
1565 | headers->l2tpv3_hdr.session_id = match.key->session_id; |
1566 | } |
1567 | |
1568 | if (flow_rule_match_key(rule, key: FLOW_DISSECTOR_KEY_PORTS)) { |
1569 | struct flow_match_ports match; |
1570 | |
1571 | flow_rule_match_ports(rule, out: &match); |
1572 | if (ice_tc_set_port(match, fltr, headers, is_encap: false)) |
1573 | return -EINVAL; |
1574 | switch (headers->l3_key.ip_proto) { |
1575 | case IPPROTO_TCP: |
1576 | case IPPROTO_UDP: |
1577 | break; |
1578 | default: |
1579 | NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported" ); |
1580 | return -EINVAL; |
1581 | } |
1582 | } |
1583 | return 0; |
1584 | } |
1585 | |
1586 | /** |
1587 | * ice_add_switch_fltr - Add TC flower filters |
1588 | * @vsi: Pointer to VSI |
1589 | * @fltr: Pointer to struct ice_tc_flower_fltr |
1590 | * |
1591 | * Add filter in HW switch block |
1592 | */ |
1593 | static int |
1594 | ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) |
1595 | { |
1596 | if (fltr->action.fltr_act == ICE_FWD_TO_QGRP) |
1597 | return -EOPNOTSUPP; |
1598 | |
1599 | if (ice_is_eswitch_mode_switchdev(pf: vsi->back)) |
1600 | return ice_eswitch_add_tc_fltr(vsi, fltr); |
1601 | |
1602 | return ice_add_tc_flower_adv_fltr(vsi, tc_fltr: fltr); |
1603 | } |
1604 | |
1605 | /** |
1606 | * ice_prep_adq_filter - Prepare ADQ filter with the required additional headers |
1607 | * @vsi: Pointer to VSI |
1608 | * @fltr: Pointer to TC flower filter structure |
1609 | * |
1610 | * Prepare ADQ filter with the required additional header fields |
1611 | */ |
1612 | static int |
1613 | ice_prep_adq_filter(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) |
1614 | { |
1615 | if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) && |
1616 | (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
1617 | ICE_TC_FLWR_FIELD_SRC_MAC))) { |
1618 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1619 | "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination" ); |
1620 | return -EOPNOTSUPP; |
1621 | } |
1622 | |
1623 | /* For ADQ, filter must include dest MAC address, otherwise unwanted |
1624 | * packets with unrelated MAC address get delivered to ADQ VSIs as long |
1625 | * as remaining filter criteria is satisfied such as dest IP address |
1626 | * and dest/src L4 port. Below code handles the following cases: |
1627 | * 1. For non-tunnel, if user specify MAC addresses, use them. |
1628 | * 2. For non-tunnel, if user didn't specify MAC address, add implicit |
1629 | * dest MAC to be lower netdev's active unicast MAC address |
1630 | * 3. For tunnel, as of now TC-filter through flower classifier doesn't |
1631 | * have provision for user to specify outer DMAC, hence driver to |
1632 | * implicitly add outer dest MAC to be lower netdev's active unicast |
1633 | * MAC address. |
1634 | */ |
1635 | if (fltr->tunnel_type != TNL_LAST && |
1636 | !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)) |
1637 | fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC; |
1638 | |
1639 | if (fltr->tunnel_type == TNL_LAST && |
1640 | !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) |
1641 | fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC; |
1642 | |
1643 | if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
1644 | ICE_TC_FLWR_FIELD_ENC_DST_MAC)) { |
1645 | ether_addr_copy(dst: fltr->outer_headers.l2_key.dst_mac, |
1646 | src: vsi->netdev->dev_addr); |
1647 | eth_broadcast_addr(addr: fltr->outer_headers.l2_mask.dst_mac); |
1648 | } |
1649 | |
1650 | /* Make sure VLAN is already added to main VSI, before allowing ADQ to |
1651 | * add a VLAN based filter such as MAC + VLAN + L4 port. |
1652 | */ |
1653 | if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) { |
1654 | u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id); |
1655 | |
1656 | if (!ice_vlan_fltr_exist(hw: &vsi->back->hw, vlan_id, vsi_handle: vsi->idx)) { |
1657 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1658 | "Unable to add filter because legacy VLAN filter for specified destination doesn't exist" ); |
1659 | return -EINVAL; |
1660 | } |
1661 | } |
1662 | return 0; |
1663 | } |
1664 | |
1665 | /** |
1666 | * ice_handle_tclass_action - Support directing to a traffic class |
1667 | * @vsi: Pointer to VSI |
1668 | * @cls_flower: Pointer to TC flower offload structure |
1669 | * @fltr: Pointer to TC flower filter structure |
1670 | * |
1671 | * Support directing traffic to a traffic class/queue-set |
1672 | */ |
1673 | static int |
1674 | ice_handle_tclass_action(struct ice_vsi *vsi, |
1675 | struct flow_cls_offload *cls_flower, |
1676 | struct ice_tc_flower_fltr *fltr) |
1677 | { |
1678 | int tc = tc_classid_to_hwtc(dev: vsi->netdev, classid: cls_flower->classid); |
1679 | |
1680 | /* user specified hw_tc (must be non-zero for ADQ TC), action is forward |
1681 | * to hw_tc (i.e. ADQ channel number) |
1682 | */ |
1683 | if (tc < ICE_CHNL_START_TC) { |
1684 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1685 | "Unable to add filter because of unsupported destination" ); |
1686 | return -EOPNOTSUPP; |
1687 | } |
1688 | if (!(vsi->all_enatc & BIT(tc))) { |
1689 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1690 | "Unable to add filter because of non-existence destination" ); |
1691 | return -EINVAL; |
1692 | } |
1693 | fltr->action.fltr_act = ICE_FWD_TO_VSI; |
1694 | fltr->action.fwd.tc.tc_class = tc; |
1695 | |
1696 | return ice_prep_adq_filter(vsi, fltr); |
1697 | } |
1698 | |
1699 | static int |
1700 | ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, |
1701 | struct flow_action_entry *act) |
1702 | { |
1703 | struct ice_vsi *ch_vsi = NULL; |
1704 | u16 queue = act->rx_queue; |
1705 | |
1706 | if (queue >= vsi->num_rxq) { |
1707 | NL_SET_ERR_MSG_MOD(fltr->extack, |
1708 | "Unable to add filter because specified queue is invalid" ); |
1709 | return -EINVAL; |
1710 | } |
1711 | fltr->action.fltr_act = ICE_FWD_TO_Q; |
1712 | fltr->action.fwd.q.queue = queue; |
1713 | /* determine corresponding HW queue */ |
1714 | fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue]; |
1715 | |
1716 | /* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare |
1717 | * ADQ switch filter |
1718 | */ |
1719 | ch_vsi = ice_locate_vsi_using_queue(vsi, queue: fltr->action.fwd.q.queue); |
1720 | if (!ch_vsi) |
1721 | return -EINVAL; |
1722 | fltr->dest_vsi = ch_vsi; |
1723 | if (!ice_is_chnl_fltr(f: fltr)) |
1724 | return 0; |
1725 | |
1726 | return ice_prep_adq_filter(vsi, fltr); |
1727 | } |
1728 | |
1729 | static int |
1730 | ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr, |
1731 | struct flow_action_entry *act) |
1732 | { |
1733 | switch (act->id) { |
1734 | case FLOW_ACTION_RX_QUEUE_MAPPING: |
1735 | /* forward to queue */ |
1736 | return ice_tc_forward_to_queue(vsi, fltr, act); |
1737 | case FLOW_ACTION_DROP: |
1738 | fltr->action.fltr_act = ICE_DROP_PACKET; |
1739 | return 0; |
1740 | default: |
1741 | NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action" ); |
1742 | return -EOPNOTSUPP; |
1743 | } |
1744 | } |
1745 | |
1746 | /** |
1747 | * ice_parse_tc_flower_actions - Parse the actions for a TC filter |
1748 | * @vsi: Pointer to VSI |
1749 | * @cls_flower: Pointer to TC flower offload structure |
1750 | * @fltr: Pointer to TC flower filter structure |
1751 | * |
1752 | * Parse the actions for a TC filter |
1753 | */ |
1754 | static int |
1755 | ice_parse_tc_flower_actions(struct ice_vsi *vsi, |
1756 | struct flow_cls_offload *cls_flower, |
1757 | struct ice_tc_flower_fltr *fltr) |
1758 | { |
1759 | struct flow_rule *rule = flow_cls_offload_flow_rule(flow_cmd: cls_flower); |
1760 | struct flow_action *flow_action = &rule->action; |
1761 | struct flow_action_entry *act; |
1762 | int i, err; |
1763 | |
1764 | if (cls_flower->classid) |
1765 | return ice_handle_tclass_action(vsi, cls_flower, fltr); |
1766 | |
1767 | if (!flow_action_has_entries(action: flow_action)) |
1768 | return -EINVAL; |
1769 | |
1770 | flow_action_for_each(i, act, flow_action) { |
1771 | if (ice_is_eswitch_mode_switchdev(pf: vsi->back)) |
1772 | err = ice_eswitch_tc_parse_action(fltr, act); |
1773 | else |
1774 | err = ice_tc_parse_action(vsi, fltr, act); |
1775 | if (err) |
1776 | return err; |
1777 | continue; |
1778 | } |
1779 | return 0; |
1780 | } |
1781 | |
1782 | /** |
1783 | * ice_del_tc_fltr - deletes a filter from HW table |
1784 | * @vsi: Pointer to VSI |
1785 | * @fltr: Pointer to struct ice_tc_flower_fltr |
1786 | * |
1787 | * This function deletes a filter from HW table and manages book-keeping |
1788 | */ |
1789 | static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) |
1790 | { |
1791 | struct ice_rule_query_data rule_rem; |
1792 | struct ice_pf *pf = vsi->back; |
1793 | int err; |
1794 | |
1795 | rule_rem.rid = fltr->rid; |
1796 | rule_rem.rule_id = fltr->rule_id; |
1797 | rule_rem.vsi_handle = fltr->dest_vsi_handle; |
1798 | err = ice_rem_adv_rule_by_id(hw: &pf->hw, remove_entry: &rule_rem); |
1799 | if (err) { |
1800 | if (err == -ENOENT) { |
1801 | NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist" ); |
1802 | return -ENOENT; |
1803 | } |
1804 | NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter" ); |
1805 | return -EIO; |
1806 | } |
1807 | |
1808 | /* update advanced switch filter count for destination |
1809 | * VSI if filter destination was VSI |
1810 | */ |
1811 | if (fltr->dest_vsi) { |
1812 | if (fltr->dest_vsi->type == ICE_VSI_CHNL) { |
1813 | fltr->dest_vsi->num_chnl_fltr--; |
1814 | |
1815 | /* keeps track of channel filters for PF VSI */ |
1816 | if (vsi->type == ICE_VSI_PF && |
1817 | (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC | |
1818 | ICE_TC_FLWR_FIELD_ENC_DST_MAC))) |
1819 | pf->num_dmac_chnl_fltrs--; |
1820 | } |
1821 | } |
1822 | return 0; |
1823 | } |
1824 | |
1825 | /** |
1826 | * ice_add_tc_fltr - adds a TC flower filter |
1827 | * @netdev: Pointer to netdev |
1828 | * @vsi: Pointer to VSI |
1829 | * @f: Pointer to flower offload structure |
1830 | * @__fltr: Pointer to struct ice_tc_flower_fltr |
1831 | * |
1832 | * This function parses TC-flower input fields, parses action, |
1833 | * and adds a filter. |
1834 | */ |
1835 | static int |
1836 | ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, |
1837 | struct flow_cls_offload *f, |
1838 | struct ice_tc_flower_fltr **__fltr) |
1839 | { |
1840 | struct ice_tc_flower_fltr *fltr; |
1841 | int err; |
1842 | |
1843 | /* by default, set output to be INVALID */ |
1844 | *__fltr = NULL; |
1845 | |
1846 | fltr = kzalloc(size: sizeof(*fltr), GFP_KERNEL); |
1847 | if (!fltr) |
1848 | return -ENOMEM; |
1849 | |
1850 | fltr->cookie = f->cookie; |
1851 | fltr->extack = f->common.extack; |
1852 | fltr->src_vsi = vsi; |
1853 | INIT_HLIST_NODE(h: &fltr->tc_flower_node); |
1854 | |
1855 | err = ice_parse_cls_flower(filter_dev: netdev, vsi, f, fltr); |
1856 | if (err < 0) |
1857 | goto err; |
1858 | |
1859 | err = ice_parse_tc_flower_actions(vsi, cls_flower: f, fltr); |
1860 | if (err < 0) |
1861 | goto err; |
1862 | |
1863 | err = ice_add_switch_fltr(vsi, fltr); |
1864 | if (err < 0) |
1865 | goto err; |
1866 | |
1867 | /* return the newly created filter */ |
1868 | *__fltr = fltr; |
1869 | |
1870 | return 0; |
1871 | err: |
1872 | kfree(objp: fltr); |
1873 | return err; |
1874 | } |
1875 | |
1876 | /** |
1877 | * ice_find_tc_flower_fltr - Find the TC flower filter in the list |
1878 | * @pf: Pointer to PF |
1879 | * @cookie: filter specific cookie |
1880 | */ |
1881 | static struct ice_tc_flower_fltr * |
1882 | ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie) |
1883 | { |
1884 | struct ice_tc_flower_fltr *fltr; |
1885 | |
1886 | hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) |
1887 | if (cookie == fltr->cookie) |
1888 | return fltr; |
1889 | |
1890 | return NULL; |
1891 | } |
1892 | |
1893 | /** |
1894 | * ice_add_cls_flower - add TC flower filters |
1895 | * @netdev: Pointer to filter device |
1896 | * @vsi: Pointer to VSI |
1897 | * @cls_flower: Pointer to flower offload structure |
1898 | */ |
1899 | int |
1900 | ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, |
1901 | struct flow_cls_offload *cls_flower) |
1902 | { |
1903 | struct netlink_ext_ack *extack = cls_flower->common.extack; |
1904 | struct net_device *vsi_netdev = vsi->netdev; |
1905 | struct ice_tc_flower_fltr *fltr; |
1906 | struct ice_pf *pf = vsi->back; |
1907 | int err; |
1908 | |
1909 | if (ice_is_reset_in_progress(state: pf->state)) |
1910 | return -EBUSY; |
1911 | if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) |
1912 | return -EINVAL; |
1913 | |
1914 | if (ice_is_port_repr_netdev(netdev)) |
1915 | vsi_netdev = netdev; |
1916 | |
1917 | if (!(vsi_netdev->features & NETIF_F_HW_TC) && |
1918 | !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) { |
1919 | /* Based on TC indirect notifications from kernel, all ice |
1920 | * devices get an instance of rule from higher level device. |
1921 | * Avoid triggering explicit error in this case. |
1922 | */ |
1923 | if (netdev == vsi_netdev) |
1924 | NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again" ); |
1925 | return -EINVAL; |
1926 | } |
1927 | |
1928 | /* avoid duplicate entries, if exists - return error */ |
1929 | fltr = ice_find_tc_flower_fltr(pf, cookie: cls_flower->cookie); |
1930 | if (fltr) { |
1931 | NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring" ); |
1932 | return -EEXIST; |
1933 | } |
1934 | |
1935 | /* prep and add TC-flower filter in HW */ |
1936 | err = ice_add_tc_fltr(netdev, vsi, f: cls_flower, fltr: &fltr); |
1937 | if (err) |
1938 | return err; |
1939 | |
1940 | /* add filter into an ordered list */ |
1941 | hlist_add_head(n: &fltr->tc_flower_node, h: &pf->tc_flower_fltr_list); |
1942 | return 0; |
1943 | } |
1944 | |
1945 | /** |
1946 | * ice_del_cls_flower - delete TC flower filters |
1947 | * @vsi: Pointer to VSI |
1948 | * @cls_flower: Pointer to struct flow_cls_offload |
1949 | */ |
1950 | int |
1951 | ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower) |
1952 | { |
1953 | struct ice_tc_flower_fltr *fltr; |
1954 | struct ice_pf *pf = vsi->back; |
1955 | int err; |
1956 | |
1957 | /* find filter */ |
1958 | fltr = ice_find_tc_flower_fltr(pf, cookie: cls_flower->cookie); |
1959 | if (!fltr) { |
1960 | if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) && |
1961 | hlist_empty(h: &pf->tc_flower_fltr_list)) |
1962 | return 0; |
1963 | |
1964 | NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it" ); |
1965 | return -EINVAL; |
1966 | } |
1967 | |
1968 | fltr->extack = cls_flower->common.extack; |
1969 | /* delete filter from HW */ |
1970 | err = ice_del_tc_fltr(vsi, fltr); |
1971 | if (err) |
1972 | return err; |
1973 | |
1974 | /* delete filter from an ordered list */ |
1975 | hlist_del(n: &fltr->tc_flower_node); |
1976 | |
1977 | /* free the filter node */ |
1978 | kfree(objp: fltr); |
1979 | |
1980 | return 0; |
1981 | } |
1982 | |
1983 | /** |
1984 | * ice_replay_tc_fltrs - replay TC filters |
1985 | * @pf: pointer to PF struct |
1986 | */ |
1987 | void ice_replay_tc_fltrs(struct ice_pf *pf) |
1988 | { |
1989 | struct ice_tc_flower_fltr *fltr; |
1990 | struct hlist_node *node; |
1991 | |
1992 | hlist_for_each_entry_safe(fltr, node, |
1993 | &pf->tc_flower_fltr_list, |
1994 | tc_flower_node) { |
1995 | fltr->extack = NULL; |
1996 | ice_add_switch_fltr(vsi: fltr->src_vsi, fltr); |
1997 | } |
1998 | } |
1999 | |