1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2020, Intel Corporation. */
3
4/* flow director ethtool support for iavf */
5
6#include "iavf.h"
7
8#define GTPU_PORT 2152
9#define NAT_T_ESP_PORT 4500
10#define PFCP_PORT 8805
11
12static const struct in6_addr ipv6_addr_full_mask = {
13 .in6_u = {
14 .u6_addr8 = {
15 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
16 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
17 }
18 }
19};
20
21static const struct in6_addr ipv6_addr_zero_mask = {
22 .in6_u = {
23 .u6_addr8 = {
24 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
25 }
26 }
27};
28
29/**
30 * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks
31 * @adapter: pointer to the VF adapter structure
32 * @fltr: Flow Director filter data structure
33 *
34 * Returns 0 if all masks of packet fields are either full or empty. Returns
35 * error on at least one partial mask.
36 */
37int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
38 struct iavf_fdir_fltr *fltr)
39{
40 if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX))
41 goto partial_mask;
42
43 if (fltr->ip_ver == 4) {
44 if (fltr->ip_mask.v4_addrs.src_ip &&
45 fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX))
46 goto partial_mask;
47
48 if (fltr->ip_mask.v4_addrs.dst_ip &&
49 fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX))
50 goto partial_mask;
51
52 if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX)
53 goto partial_mask;
54 } else if (fltr->ip_ver == 6) {
55 if (memcmp(p: &fltr->ip_mask.v6_addrs.src_ip, q: &ipv6_addr_zero_mask,
56 size: sizeof(struct in6_addr)) &&
57 memcmp(p: &fltr->ip_mask.v6_addrs.src_ip, q: &ipv6_addr_full_mask,
58 size: sizeof(struct in6_addr)))
59 goto partial_mask;
60
61 if (memcmp(p: &fltr->ip_mask.v6_addrs.dst_ip, q: &ipv6_addr_zero_mask,
62 size: sizeof(struct in6_addr)) &&
63 memcmp(p: &fltr->ip_mask.v6_addrs.dst_ip, q: &ipv6_addr_full_mask,
64 size: sizeof(struct in6_addr)))
65 goto partial_mask;
66
67 if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX)
68 goto partial_mask;
69 }
70
71 if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX)
72 goto partial_mask;
73
74 if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX))
75 goto partial_mask;
76
77 if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX))
78 goto partial_mask;
79
80 if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX))
81 goto partial_mask;
82
83 if (fltr->ip_mask.l4_header &&
84 fltr->ip_mask.l4_header != htonl(U32_MAX))
85 goto partial_mask;
86
87 return 0;
88
89partial_mask:
90 dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n");
91 return -EOPNOTSUPP;
92}
93
94/**
95 * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
96 * @fltr: Flow Director filter data structure
97 */
98static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr)
99{
100 return sizeof(struct ethhdr) +
101 (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
102 sizeof(struct udphdr);
103}
104
105/**
106 * iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header
107 * @fltr: Flow Director filter data structure
108 * @proto_hdrs: Flow Director protocol headers data structure
109 *
110 * Returns 0 if the GTP-U protocol header is set successfully
111 */
112static int
113iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr,
114 struct virtchnl_proto_hdrs *proto_hdrs)
115{
116 struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
117 struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
118 struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */
119 u16 adj_offs, hdr_offs;
120 int i;
121
122 VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP);
123
124 adj_offs = iavf_pkt_udp_no_pay_len(fltr);
125
126 for (i = 0; i < fltr->flex_cnt; i++) {
127#define IAVF_GTPU_HDR_TEID_OFFS0 4
128#define IAVF_GTPU_HDR_TEID_OFFS1 6
129#define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10
130#define IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK 0x00FF /* skip N_PDU */
131/* PDU Session Container Extension Header (PSC) */
132#define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85
133#define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13
134#define IAVF_GTPU_HDR_PSC_PDU_QFI_MASK 0x3F /* skip Type */
135#define IAVF_GTPU_EH_QFI_IDX 1
136
137 if (fltr->flex_words[i].offset < adj_offs)
138 return -EINVAL;
139
140 hdr_offs = fltr->flex_words[i].offset - adj_offs;
141
142 switch (hdr_offs) {
143 case IAVF_GTPU_HDR_TEID_OFFS0:
144 case IAVF_GTPU_HDR_TEID_OFFS1: {
145 __be16 *pay_word = (__be16 *)ghdr->buffer;
146
147 pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word);
148 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID);
149 }
150 break;
151 case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS:
152 if ((fltr->flex_words[i].word &
153 IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK) !=
154 IAVF_GTPU_PSC_EXTHDR_TYPE)
155 return -EOPNOTSUPP;
156 if (!ehdr)
157 ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
158 VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH);
159 break;
160 case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS:
161 if (!ehdr)
162 return -EINVAL;
163 ehdr->buffer[IAVF_GTPU_EH_QFI_IDX] =
164 fltr->flex_words[i].word &
165 IAVF_GTPU_HDR_PSC_PDU_QFI_MASK;
166 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI);
167 break;
168 default:
169 return -EINVAL;
170 }
171 }
172
173 uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
174
175 return 0;
176}
177
178/**
179 * iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header
180 * @fltr: Flow Director filter data structure
181 * @proto_hdrs: Flow Director protocol headers data structure
182 *
183 * Returns 0 if the PFCP protocol header is set successfully
184 */
185static int
186iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr,
187 struct virtchnl_proto_hdrs *proto_hdrs)
188{
189 struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
190 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
191 u16 adj_offs, hdr_offs;
192 int i;
193
194 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
195
196 adj_offs = iavf_pkt_udp_no_pay_len(fltr);
197
198 for (i = 0; i < fltr->flex_cnt; i++) {
199#define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0
200 if (fltr->flex_words[i].offset < adj_offs)
201 return -EINVAL;
202
203 hdr_offs = fltr->flex_words[i].offset - adj_offs;
204
205 switch (hdr_offs) {
206 case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS:
207 hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff;
208 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
209 break;
210 default:
211 return -EINVAL;
212 }
213 }
214
215 uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
216
217 return 0;
218}
219
220/**
221 * iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header
222 * @fltr: Flow Director filter data structure
223 * @proto_hdrs: Flow Director protocol headers data structure
224 *
225 * Returns 0 if the NAT-T-ESP protocol header is set successfully
226 */
227static int
228iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr,
229 struct virtchnl_proto_hdrs *proto_hdrs)
230{
231 struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
232 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
233 u16 adj_offs, hdr_offs;
234 u32 spi = 0;
235 int i;
236
237 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
238
239 adj_offs = iavf_pkt_udp_no_pay_len(fltr);
240
241 for (i = 0; i < fltr->flex_cnt; i++) {
242#define IAVF_NAT_T_ESP_SPI_OFFS0 0
243#define IAVF_NAT_T_ESP_SPI_OFFS1 2
244 if (fltr->flex_words[i].offset < adj_offs)
245 return -EINVAL;
246
247 hdr_offs = fltr->flex_words[i].offset - adj_offs;
248
249 switch (hdr_offs) {
250 case IAVF_NAT_T_ESP_SPI_OFFS0:
251 spi |= fltr->flex_words[i].word << 16;
252 break;
253 case IAVF_NAT_T_ESP_SPI_OFFS1:
254 spi |= fltr->flex_words[i].word;
255 break;
256 default:
257 return -EINVAL;
258 }
259 }
260
261 if (!spi)
262 return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */
263
264 *(__be32 *)hdr->buffer = htonl(spi);
265 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
266
267 uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
268
269 return 0;
270}
271
272/**
273 * iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header
274 * @fltr: Flow Director filter data structure
275 * @proto_hdrs: Flow Director protocol headers data structure
276 *
277 * Returns 0 if the UDP payload defined protocol header is set successfully
278 */
279static int
280iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr,
281 struct virtchnl_proto_hdrs *proto_hdrs)
282{
283 int err;
284
285 switch (ntohs(fltr->ip_data.dst_port)) {
286 case GTPU_PORT:
287 err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs);
288 break;
289 case NAT_T_ESP_PORT:
290 err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs);
291 break;
292 case PFCP_PORT:
293 err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs);
294 break;
295 default:
296 err = -EOPNOTSUPP;
297 break;
298 }
299
300 return err;
301}
302
303/**
304 * iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header
305 * @fltr: Flow Director filter data structure
306 * @proto_hdrs: Flow Director protocol headers data structure
307 *
308 * Returns 0 if the IPv4 protocol header is set successfully
309 */
310static int
311iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
312 struct virtchnl_proto_hdrs *proto_hdrs)
313{
314 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
315 struct iphdr *iph = (struct iphdr *)hdr->buffer;
316
317 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
318
319 if (fltr->ip_mask.tos == U8_MAX) {
320 iph->tos = fltr->ip_data.tos;
321 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
322 }
323
324 if (fltr->ip_mask.proto == U8_MAX) {
325 iph->protocol = fltr->ip_data.proto;
326 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
327 }
328
329 if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) {
330 iph->saddr = fltr->ip_data.v4_addrs.src_ip;
331 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
332 }
333
334 if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) {
335 iph->daddr = fltr->ip_data.v4_addrs.dst_ip;
336 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
337 }
338
339 return 0;
340}
341
342/**
343 * iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header
344 * @fltr: Flow Director filter data structure
345 * @proto_hdrs: Flow Director protocol headers data structure
346 *
347 * Returns 0 if the IPv6 protocol header is set successfully
348 */
349static int
350iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
351 struct virtchnl_proto_hdrs *proto_hdrs)
352{
353 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
354 struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer;
355
356 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
357
358 if (fltr->ip_mask.tclass == U8_MAX) {
359 iph->priority = (fltr->ip_data.tclass >> 4) & 0xF;
360 iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0;
361 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
362 }
363
364 if (fltr->ip_mask.proto == U8_MAX) {
365 iph->nexthdr = fltr->ip_data.proto;
366 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
367 }
368
369 if (!memcmp(p: &fltr->ip_mask.v6_addrs.src_ip, q: &ipv6_addr_full_mask,
370 size: sizeof(struct in6_addr))) {
371 memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip,
372 sizeof(struct in6_addr));
373 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
374 }
375
376 if (!memcmp(p: &fltr->ip_mask.v6_addrs.dst_ip, q: &ipv6_addr_full_mask,
377 size: sizeof(struct in6_addr))) {
378 memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip,
379 sizeof(struct in6_addr));
380 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
381 }
382
383 return 0;
384}
385
386/**
387 * iavf_fill_fdir_tcp_hdr - fill the TCP protocol header
388 * @fltr: Flow Director filter data structure
389 * @proto_hdrs: Flow Director protocol headers data structure
390 *
391 * Returns 0 if the TCP protocol header is set successfully
392 */
393static int
394iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr,
395 struct virtchnl_proto_hdrs *proto_hdrs)
396{
397 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
398 struct tcphdr *tcph = (struct tcphdr *)hdr->buffer;
399
400 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
401
402 if (fltr->ip_mask.src_port == htons(U16_MAX)) {
403 tcph->source = fltr->ip_data.src_port;
404 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
405 }
406
407 if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
408 tcph->dest = fltr->ip_data.dst_port;
409 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
410 }
411
412 return 0;
413}
414
415/**
416 * iavf_fill_fdir_udp_hdr - fill the UDP protocol header
417 * @fltr: Flow Director filter data structure
418 * @proto_hdrs: Flow Director protocol headers data structure
419 *
420 * Returns 0 if the UDP protocol header is set successfully
421 */
422static int
423iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr,
424 struct virtchnl_proto_hdrs *proto_hdrs)
425{
426 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
427 struct udphdr *udph = (struct udphdr *)hdr->buffer;
428
429 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
430
431 if (fltr->ip_mask.src_port == htons(U16_MAX)) {
432 udph->source = fltr->ip_data.src_port;
433 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
434 }
435
436 if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
437 udph->dest = fltr->ip_data.dst_port;
438 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
439 }
440
441 if (!fltr->flex_cnt)
442 return 0;
443
444 return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs);
445}
446
447/**
448 * iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header
449 * @fltr: Flow Director filter data structure
450 * @proto_hdrs: Flow Director protocol headers data structure
451 *
452 * Returns 0 if the SCTP protocol header is set successfully
453 */
454static int
455iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr,
456 struct virtchnl_proto_hdrs *proto_hdrs)
457{
458 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
459 struct sctphdr *sctph = (struct sctphdr *)hdr->buffer;
460
461 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
462
463 if (fltr->ip_mask.src_port == htons(U16_MAX)) {
464 sctph->source = fltr->ip_data.src_port;
465 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
466 }
467
468 if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
469 sctph->dest = fltr->ip_data.dst_port;
470 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
471 }
472
473 return 0;
474}
475
476/**
477 * iavf_fill_fdir_ah_hdr - fill the AH protocol header
478 * @fltr: Flow Director filter data structure
479 * @proto_hdrs: Flow Director protocol headers data structure
480 *
481 * Returns 0 if the AH protocol header is set successfully
482 */
483static int
484iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr,
485 struct virtchnl_proto_hdrs *proto_hdrs)
486{
487 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
488 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer;
489
490 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
491
492 if (fltr->ip_mask.spi == htonl(U32_MAX)) {
493 ah->spi = fltr->ip_data.spi;
494 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
495 }
496
497 return 0;
498}
499
500/**
501 * iavf_fill_fdir_esp_hdr - fill the ESP protocol header
502 * @fltr: Flow Director filter data structure
503 * @proto_hdrs: Flow Director protocol headers data structure
504 *
505 * Returns 0 if the ESP protocol header is set successfully
506 */
507static int
508iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr,
509 struct virtchnl_proto_hdrs *proto_hdrs)
510{
511 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
512 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer;
513
514 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
515
516 if (fltr->ip_mask.spi == htonl(U32_MAX)) {
517 esph->spi = fltr->ip_data.spi;
518 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
519 }
520
521 return 0;
522}
523
524/**
525 * iavf_fill_fdir_l4_hdr - fill the L4 protocol header
526 * @fltr: Flow Director filter data structure
527 * @proto_hdrs: Flow Director protocol headers data structure
528 *
529 * Returns 0 if the L4 protocol header is set successfully
530 */
531static int
532iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr,
533 struct virtchnl_proto_hdrs *proto_hdrs)
534{
535 struct virtchnl_proto_hdr *hdr;
536 __be32 *l4_4_data;
537
538 if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */
539 return 0;
540
541 hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
542 l4_4_data = (__be32 *)hdr->buffer;
543
544 /* L2TPv3 over IP with 'Session ID' */
545 if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) {
546 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
547 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
548
549 *l4_4_data = fltr->ip_data.l4_header;
550 } else {
551 return -EOPNOTSUPP;
552 }
553
554 return 0;
555}
556
557/**
558 * iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header
559 * @fltr: Flow Director filter data structure
560 * @proto_hdrs: Flow Director protocol headers data structure
561 *
562 * Returns 0 if the Ethernet protocol header is set successfully
563 */
564static int
565iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr,
566 struct virtchnl_proto_hdrs *proto_hdrs)
567{
568 struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
569 struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer;
570
571 VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
572
573 if (fltr->eth_mask.etype == htons(U16_MAX)) {
574 if (fltr->eth_data.etype == htons(ETH_P_IP) ||
575 fltr->eth_data.etype == htons(ETH_P_IPV6))
576 return -EOPNOTSUPP;
577
578 ehdr->h_proto = fltr->eth_data.etype;
579 VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
580 }
581
582 return 0;
583}
584
585/**
586 * iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message
587 * @adapter: pointer to the VF adapter structure
588 * @fltr: Flow Director filter data structure
589 *
590 * Returns 0 if the add Flow Director virtchnl message is filled successfully
591 */
592int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
593{
594 struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg;
595 struct virtchnl_proto_hdrs *proto_hdrs;
596 int err;
597
598 proto_hdrs = &vc_msg->rule_cfg.proto_hdrs;
599
600 err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */
601 if (err)
602 return err;
603
604 switch (fltr->flow_type) {
605 case IAVF_FDIR_FLOW_IPV4_TCP:
606 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
607 iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
608 break;
609 case IAVF_FDIR_FLOW_IPV4_UDP:
610 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
611 iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
612 break;
613 case IAVF_FDIR_FLOW_IPV4_SCTP:
614 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
615 iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
616 break;
617 case IAVF_FDIR_FLOW_IPV4_AH:
618 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
619 iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
620 break;
621 case IAVF_FDIR_FLOW_IPV4_ESP:
622 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
623 iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
624 break;
625 case IAVF_FDIR_FLOW_IPV4_OTHER:
626 err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
627 iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
628 break;
629 case IAVF_FDIR_FLOW_IPV6_TCP:
630 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
631 iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
632 break;
633 case IAVF_FDIR_FLOW_IPV6_UDP:
634 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
635 iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
636 break;
637 case IAVF_FDIR_FLOW_IPV6_SCTP:
638 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
639 iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
640 break;
641 case IAVF_FDIR_FLOW_IPV6_AH:
642 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
643 iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
644 break;
645 case IAVF_FDIR_FLOW_IPV6_ESP:
646 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
647 iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
648 break;
649 case IAVF_FDIR_FLOW_IPV6_OTHER:
650 err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
651 iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
652 break;
653 case IAVF_FDIR_FLOW_NON_IP_L2:
654 break;
655 default:
656 err = -EINVAL;
657 break;
658 }
659
660 if (err)
661 return err;
662
663 vc_msg->vsi_id = adapter->vsi.id;
664 vc_msg->rule_cfg.action_set.count = 1;
665 vc_msg->rule_cfg.action_set.actions[0].type = fltr->action;
666 vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index;
667
668 return 0;
669}
670
671/**
672 * iavf_fdir_flow_proto_name - get the flow protocol name
673 * @flow_type: Flow Director filter flow type
674 **/
675static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type)
676{
677 switch (flow_type) {
678 case IAVF_FDIR_FLOW_IPV4_TCP:
679 case IAVF_FDIR_FLOW_IPV6_TCP:
680 return "TCP";
681 case IAVF_FDIR_FLOW_IPV4_UDP:
682 case IAVF_FDIR_FLOW_IPV6_UDP:
683 return "UDP";
684 case IAVF_FDIR_FLOW_IPV4_SCTP:
685 case IAVF_FDIR_FLOW_IPV6_SCTP:
686 return "SCTP";
687 case IAVF_FDIR_FLOW_IPV4_AH:
688 case IAVF_FDIR_FLOW_IPV6_AH:
689 return "AH";
690 case IAVF_FDIR_FLOW_IPV4_ESP:
691 case IAVF_FDIR_FLOW_IPV6_ESP:
692 return "ESP";
693 case IAVF_FDIR_FLOW_IPV4_OTHER:
694 case IAVF_FDIR_FLOW_IPV6_OTHER:
695 return "Other";
696 case IAVF_FDIR_FLOW_NON_IP_L2:
697 return "Ethernet";
698 default:
699 return NULL;
700 }
701}
702
703/**
704 * iavf_print_fdir_fltr
705 * @adapter: adapter structure
706 * @fltr: Flow Director filter to print
707 *
708 * Print the Flow Director filter
709 **/
710void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
711{
712 const char *proto = iavf_fdir_flow_proto_name(flow_type: fltr->flow_type);
713
714 if (!proto)
715 return;
716
717 switch (fltr->flow_type) {
718 case IAVF_FDIR_FLOW_IPV4_TCP:
719 case IAVF_FDIR_FLOW_IPV4_UDP:
720 case IAVF_FDIR_FLOW_IPV4_SCTP:
721 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n",
722 fltr->loc,
723 &fltr->ip_data.v4_addrs.dst_ip,
724 &fltr->ip_data.v4_addrs.src_ip,
725 proto,
726 ntohs(fltr->ip_data.dst_port),
727 ntohs(fltr->ip_data.src_port));
728 break;
729 case IAVF_FDIR_FLOW_IPV4_AH:
730 case IAVF_FDIR_FLOW_IPV4_ESP:
731 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n",
732 fltr->loc,
733 &fltr->ip_data.v4_addrs.dst_ip,
734 &fltr->ip_data.v4_addrs.src_ip,
735 proto,
736 ntohl(fltr->ip_data.spi));
737 break;
738 case IAVF_FDIR_FLOW_IPV4_OTHER:
739 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n",
740 fltr->loc,
741 &fltr->ip_data.v4_addrs.dst_ip,
742 &fltr->ip_data.v4_addrs.src_ip,
743 fltr->ip_data.proto,
744 ntohl(fltr->ip_data.l4_header));
745 break;
746 case IAVF_FDIR_FLOW_IPV6_TCP:
747 case IAVF_FDIR_FLOW_IPV6_UDP:
748 case IAVF_FDIR_FLOW_IPV6_SCTP:
749 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n",
750 fltr->loc,
751 &fltr->ip_data.v6_addrs.dst_ip,
752 &fltr->ip_data.v6_addrs.src_ip,
753 proto,
754 ntohs(fltr->ip_data.dst_port),
755 ntohs(fltr->ip_data.src_port));
756 break;
757 case IAVF_FDIR_FLOW_IPV6_AH:
758 case IAVF_FDIR_FLOW_IPV6_ESP:
759 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n",
760 fltr->loc,
761 &fltr->ip_data.v6_addrs.dst_ip,
762 &fltr->ip_data.v6_addrs.src_ip,
763 proto,
764 ntohl(fltr->ip_data.spi));
765 break;
766 case IAVF_FDIR_FLOW_IPV6_OTHER:
767 dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n",
768 fltr->loc,
769 &fltr->ip_data.v6_addrs.dst_ip,
770 &fltr->ip_data.v6_addrs.src_ip,
771 fltr->ip_data.proto,
772 ntohl(fltr->ip_data.l4_header));
773 break;
774 case IAVF_FDIR_FLOW_NON_IP_L2:
775 dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n",
776 fltr->loc,
777 ntohs(fltr->eth_data.etype));
778 break;
779 default:
780 break;
781 }
782}
783
784/**
785 * iavf_fdir_is_dup_fltr - test if filter is already in list
786 * @adapter: pointer to the VF adapter structure
787 * @fltr: Flow Director filter data structure
788 *
789 * Returns true if the filter is found in the list
790 */
791bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
792{
793 struct iavf_fdir_fltr *tmp;
794 bool ret = false;
795
796 spin_lock_bh(lock: &adapter->fdir_fltr_lock);
797 list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
798 if (tmp->flow_type != fltr->flow_type)
799 continue;
800
801 if (!memcmp(p: &tmp->eth_data, q: &fltr->eth_data,
802 size: sizeof(fltr->eth_data)) &&
803 !memcmp(p: &tmp->ip_data, q: &fltr->ip_data,
804 size: sizeof(fltr->ip_data)) &&
805 !memcmp(p: &tmp->ext_data, q: &fltr->ext_data,
806 size: sizeof(fltr->ext_data))) {
807 ret = true;
808 break;
809 }
810 }
811 spin_unlock_bh(lock: &adapter->fdir_fltr_lock);
812
813 return ret;
814}
815
816/**
817 * iavf_find_fdir_fltr_by_loc - find filter with location
818 * @adapter: pointer to the VF adapter structure
819 * @loc: location to find.
820 *
821 * Returns pointer to Flow Director filter if found or null
822 */
823struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc)
824{
825 struct iavf_fdir_fltr *rule;
826
827 list_for_each_entry(rule, &adapter->fdir_list_head, list)
828 if (rule->loc == loc)
829 return rule;
830
831 return NULL;
832}
833
834/**
835 * iavf_fdir_list_add_fltr - add a new node to the flow director filter list
836 * @adapter: pointer to the VF adapter structure
837 * @fltr: filter node to add to structure
838 */
839void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
840{
841 struct iavf_fdir_fltr *rule, *parent = NULL;
842
843 list_for_each_entry(rule, &adapter->fdir_list_head, list) {
844 if (rule->loc >= fltr->loc)
845 break;
846 parent = rule;
847 }
848
849 if (parent)
850 list_add(new: &fltr->list, head: &parent->list);
851 else
852 list_add(new: &fltr->list, head: &adapter->fdir_list_head);
853}
854

source code of linux/drivers/net/ethernet/intel/iavf/iavf_fdir.c