1// SPDX-License-Identifier: GPL-2.0
2/*
3 * RSS and Classifier helpers for Marvell PPv2 Network Controller
4 *
5 * Copyright (C) 2014 Marvell
6 *
7 * Marcin Wojtas <mw@semihalf.com>
8 */
9
10#include "mvpp2.h"
11#include "mvpp2_cls.h"
12#include "mvpp2_prs.h"
13
14#define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
15{ \
16 .flow_type = _type, \
17 .flow_id = _id, \
18 .supported_hash_opts = _opts, \
19 .prs_ri = { \
20 .ri = _ri, \
21 .ri_mask = _ri_mask \
22 } \
23}
24
25static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
26 /* TCP over IPv4 flows, Not fragmented, no vlan tag */
27 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
28 MVPP22_CLS_HEK_IP4_5T,
29 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
30 MVPP2_PRS_RI_L4_TCP,
31 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
32
33 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
34 MVPP22_CLS_HEK_IP4_5T,
35 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
36 MVPP2_PRS_RI_L4_TCP,
37 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
38
39 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
40 MVPP22_CLS_HEK_IP4_5T,
41 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
42 MVPP2_PRS_RI_L4_TCP,
43 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
44
45 /* TCP over IPv4 flows, Not fragmented, with vlan tag */
46 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
47 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
48 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
49 MVPP2_PRS_IP_MASK),
50
51 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
52 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
53 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
54 MVPP2_PRS_IP_MASK),
55
56 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
57 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
58 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
59 MVPP2_PRS_IP_MASK),
60
61 /* TCP over IPv4 flows, fragmented, no vlan tag */
62 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
63 MVPP22_CLS_HEK_IP4_2T,
64 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
65 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
66 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
67
68 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
69 MVPP22_CLS_HEK_IP4_2T,
70 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
71 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
72 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
73
74 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
75 MVPP22_CLS_HEK_IP4_2T,
76 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
77 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
78 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
79
80 /* TCP over IPv4 flows, fragmented, with vlan tag */
81 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
82 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
83 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
84 MVPP2_PRS_RI_L4_TCP,
85 MVPP2_PRS_IP_MASK),
86
87 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
88 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
89 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
90 MVPP2_PRS_RI_L4_TCP,
91 MVPP2_PRS_IP_MASK),
92
93 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
94 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
95 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
96 MVPP2_PRS_RI_L4_TCP,
97 MVPP2_PRS_IP_MASK),
98
99 /* UDP over IPv4 flows, Not fragmented, no vlan tag */
100 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
101 MVPP22_CLS_HEK_IP4_5T,
102 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
103 MVPP2_PRS_RI_L4_UDP,
104 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
105
106 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
107 MVPP22_CLS_HEK_IP4_5T,
108 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
109 MVPP2_PRS_RI_L4_UDP,
110 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
111
112 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
113 MVPP22_CLS_HEK_IP4_5T,
114 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
115 MVPP2_PRS_RI_L4_UDP,
116 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
117
118 /* UDP over IPv4 flows, Not fragmented, with vlan tag */
119 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
120 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
121 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
122 MVPP2_PRS_IP_MASK),
123
124 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
125 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
126 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
127 MVPP2_PRS_IP_MASK),
128
129 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
130 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
131 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
132 MVPP2_PRS_IP_MASK),
133
134 /* UDP over IPv4 flows, fragmented, no vlan tag */
135 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
136 MVPP22_CLS_HEK_IP4_2T,
137 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
138 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
139 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
140
141 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
142 MVPP22_CLS_HEK_IP4_2T,
143 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
144 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
145 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
146
147 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
148 MVPP22_CLS_HEK_IP4_2T,
149 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
150 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
151 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
152
153 /* UDP over IPv4 flows, fragmented, with vlan tag */
154 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
155 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
156 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
157 MVPP2_PRS_RI_L4_UDP,
158 MVPP2_PRS_IP_MASK),
159
160 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
161 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
162 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
163 MVPP2_PRS_RI_L4_UDP,
164 MVPP2_PRS_IP_MASK),
165
166 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
167 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
168 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
169 MVPP2_PRS_RI_L4_UDP,
170 MVPP2_PRS_IP_MASK),
171
172 /* TCP over IPv6 flows, not fragmented, no vlan tag */
173 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
174 MVPP22_CLS_HEK_IP6_5T,
175 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
176 MVPP2_PRS_RI_L4_TCP,
177 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
178
179 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
180 MVPP22_CLS_HEK_IP6_5T,
181 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
182 MVPP2_PRS_RI_L4_TCP,
183 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
184
185 /* TCP over IPv6 flows, not fragmented, with vlan tag */
186 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
187 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
188 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
189 MVPP2_PRS_IP_MASK),
190
191 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
192 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
193 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
194 MVPP2_PRS_IP_MASK),
195
196 /* TCP over IPv6 flows, fragmented, no vlan tag */
197 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
198 MVPP22_CLS_HEK_IP6_2T,
199 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
200 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
201 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
202
203 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
204 MVPP22_CLS_HEK_IP6_2T,
205 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
206 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
207 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
208
209 /* TCP over IPv6 flows, fragmented, with vlan tag */
210 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
211 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
212 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
213 MVPP2_PRS_RI_L4_TCP,
214 MVPP2_PRS_IP_MASK),
215
216 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
217 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
218 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
219 MVPP2_PRS_RI_L4_TCP,
220 MVPP2_PRS_IP_MASK),
221
222 /* UDP over IPv6 flows, not fragmented, no vlan tag */
223 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
224 MVPP22_CLS_HEK_IP6_5T,
225 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
226 MVPP2_PRS_RI_L4_UDP,
227 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
228
229 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
230 MVPP22_CLS_HEK_IP6_5T,
231 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
232 MVPP2_PRS_RI_L4_UDP,
233 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
234
235 /* UDP over IPv6 flows, not fragmented, with vlan tag */
236 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
237 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
238 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
239 MVPP2_PRS_IP_MASK),
240
241 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
242 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
243 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
244 MVPP2_PRS_IP_MASK),
245
246 /* UDP over IPv6 flows, fragmented, no vlan tag */
247 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
248 MVPP22_CLS_HEK_IP6_2T,
249 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
250 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
251 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
252
253 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
254 MVPP22_CLS_HEK_IP6_2T,
255 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
256 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
257 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
258
259 /* UDP over IPv6 flows, fragmented, with vlan tag */
260 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
261 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
262 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
263 MVPP2_PRS_RI_L4_UDP,
264 MVPP2_PRS_IP_MASK),
265
266 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
267 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
268 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
269 MVPP2_PRS_RI_L4_UDP,
270 MVPP2_PRS_IP_MASK),
271
272 /* IPv4 flows, no vlan tag */
273 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
274 MVPP22_CLS_HEK_IP4_2T,
275 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
276 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
277 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
278 MVPP22_CLS_HEK_IP4_2T,
279 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
280 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
281 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
282 MVPP22_CLS_HEK_IP4_2T,
283 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
284 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
285
286 /* IPv4 flows, with vlan tag */
287 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
288 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
289 MVPP2_PRS_RI_L3_IP4,
290 MVPP2_PRS_RI_L3_PROTO_MASK),
291 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
292 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
293 MVPP2_PRS_RI_L3_IP4_OPT,
294 MVPP2_PRS_RI_L3_PROTO_MASK),
295 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
296 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
297 MVPP2_PRS_RI_L3_IP4_OTHER,
298 MVPP2_PRS_RI_L3_PROTO_MASK),
299
300 /* IPv6 flows, no vlan tag */
301 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
302 MVPP22_CLS_HEK_IP6_2T,
303 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
304 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
305 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
306 MVPP22_CLS_HEK_IP6_2T,
307 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
308 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
309
310 /* IPv6 flows, with vlan tag */
311 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
312 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
313 MVPP2_PRS_RI_L3_IP6,
314 MVPP2_PRS_RI_L3_PROTO_MASK),
315 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
316 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
317 MVPP2_PRS_RI_L3_IP6,
318 MVPP2_PRS_RI_L3_PROTO_MASK),
319
320 /* Non IP flow, no vlan tag */
321 MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
322 0,
323 MVPP2_PRS_RI_VLAN_NONE,
324 MVPP2_PRS_RI_VLAN_MASK),
325 /* Non IP flow, with vlan tag */
326 MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
327 MVPP22_CLS_HEK_OPT_VLAN,
328 0, 0),
329};
330
331u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index)
332{
333 mvpp2_write(priv, MVPP2_CTRS_IDX, data: index);
334
335 return mvpp2_read(priv, MVPP2_CLS_FLOW_TBL_HIT_CTR);
336}
337
338void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
339 struct mvpp2_cls_flow_entry *fe)
340{
341 fe->index = index;
342 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, data: index);
343 fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
344 fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
345 fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
346}
347
348/* Update classification flow table registers */
349static void mvpp2_cls_flow_write(struct mvpp2 *priv,
350 struct mvpp2_cls_flow_entry *fe)
351{
352 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, data: fe->index);
353 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, data: fe->data[0]);
354 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, data: fe->data[1]);
355 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, data: fe->data[2]);
356}
357
358u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
359{
360 mvpp2_write(priv, MVPP2_CTRS_IDX, data: index);
361
362 return mvpp2_read(priv, MVPP2_CLS_DEC_TBL_HIT_CTR);
363}
364
365void mvpp2_cls_lookup_read(struct mvpp2 *priv, int lkpid, int way,
366 struct mvpp2_cls_lookup_entry *le)
367{
368 u32 val;
369
370 val = (way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | lkpid;
371 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, data: val);
372 le->way = way;
373 le->lkpid = lkpid;
374 le->data = mvpp2_read(priv, MVPP2_CLS_LKP_TBL_REG);
375}
376
377/* Update classification lookup table register */
378static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
379 struct mvpp2_cls_lookup_entry *le)
380{
381 u32 val;
382
383 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
384 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, data: val);
385 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, data: le->data);
386}
387
388/* Operations on flow entry */
389static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
390{
391 return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
392}
393
394static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
395 int num_of_fields)
396{
397 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
398 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
399}
400
401static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
402 int field_index)
403{
404 return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
405 MVPP2_CLS_FLOW_TBL2_FLD_MASK;
406}
407
408static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
409 int field_index, int field_id)
410{
411 fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
412 MVPP2_CLS_FLOW_TBL2_FLD_MASK);
413 fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
414}
415
416static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
417 int engine)
418{
419 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
420 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
421}
422
423int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe)
424{
425 return (fe->data[0] >> MVPP2_CLS_FLOW_TBL0_OFFS) &
426 MVPP2_CLS_FLOW_TBL0_ENG_MASK;
427}
428
429static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
430 bool from_packet)
431{
432 if (from_packet)
433 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
434 else
435 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
436}
437
438static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
439 bool is_last)
440{
441 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
442 fe->data[0] |= !!is_last;
443}
444
445static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
446{
447 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
448 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
449}
450
451static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
452 u32 port)
453{
454 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
455}
456
457static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe,
458 u32 port)
459{
460 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
461}
462
463static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
464 u8 lu_type)
465{
466 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
467 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
468}
469
470/* Initialize the parser entry for the given flow */
471static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
472 const struct mvpp2_cls_flow *flow)
473{
474 mvpp2_prs_add_flow(priv, flow: flow->flow_id, ri: flow->prs_ri.ri,
475 ri_mask: flow->prs_ri.ri_mask);
476}
477
478/* Initialize the Lookup Id table entry for the given flow */
479static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
480 const struct mvpp2_cls_flow *flow)
481{
482 struct mvpp2_cls_lookup_entry le;
483
484 le.way = 0;
485 le.lkpid = flow->flow_id;
486
487 /* The default RxQ for this port is set in the C2 lookup */
488 le.data = 0;
489
490 /* We point on the first lookup in the sequence for the flow, that is
491 * the C2 lookup.
492 */
493 le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
494
495 /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
496 le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
497
498 mvpp2_cls_lookup_write(priv, le: &le);
499}
500
501static void mvpp2_cls_c2_write(struct mvpp2 *priv,
502 struct mvpp2_cls_c2_entry *c2)
503{
504 u32 val;
505 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, data: c2->index);
506
507 val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
508 if (c2->valid)
509 val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
510 else
511 val |= MVPP22_CLS_C2_TCAM_INV_BIT;
512 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, data: val);
513
514 mvpp2_write(priv, MVPP22_CLS_C2_ACT, data: c2->act);
515
516 mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, data: c2->attr[0]);
517 mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, data: c2->attr[1]);
518 mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, data: c2->attr[2]);
519 mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, data: c2->attr[3]);
520
521 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, data: c2->tcam[0]);
522 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, data: c2->tcam[1]);
523 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, data: c2->tcam[2]);
524 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, data: c2->tcam[3]);
525 /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
526 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, data: c2->tcam[4]);
527}
528
529void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
530 struct mvpp2_cls_c2_entry *c2)
531{
532 u32 val;
533 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, data: index);
534
535 c2->index = index;
536
537 c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
538 c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
539 c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
540 c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
541 c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
542
543 c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
544
545 c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
546 c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
547 c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
548 c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
549
550 val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
551 c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
552}
553
554static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
555{
556 switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
557 case ETHER_FLOW:
558 return MVPP22_FLOW_ETHERNET;
559 case TCP_V4_FLOW:
560 return MVPP22_FLOW_TCP4;
561 case TCP_V6_FLOW:
562 return MVPP22_FLOW_TCP6;
563 case UDP_V4_FLOW:
564 return MVPP22_FLOW_UDP4;
565 case UDP_V6_FLOW:
566 return MVPP22_FLOW_UDP6;
567 case IPV4_FLOW:
568 return MVPP22_FLOW_IP4;
569 case IPV6_FLOW:
570 return MVPP22_FLOW_IP6;
571 default:
572 return -EOPNOTSUPP;
573 }
574}
575
576static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc)
577{
578 return MVPP22_CLS_C2_RFS_LOC(port->id, loc);
579}
580
581/* Initialize the flow table entries for the given flow */
582static void mvpp2_cls_flow_init(struct mvpp2 *priv,
583 const struct mvpp2_cls_flow *flow)
584{
585 struct mvpp2_cls_flow_entry fe;
586 int i, pri = 0;
587
588 /* Assign default values to all entries in the flow */
589 for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
590 i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
591 memset(&fe, 0, sizeof(fe));
592 fe.index = i;
593 mvpp2_cls_flow_pri_set(fe: &fe, prio: pri++);
594
595 if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
596 mvpp2_cls_flow_last_set(fe: &fe, is_last: 1);
597
598 mvpp2_cls_flow_write(priv, fe: &fe);
599 }
600
601 /* RSS config C2 lookup */
602 mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
603 fe: &fe);
604
605 mvpp2_cls_flow_eng_set(fe: &fe, engine: MVPP22_CLS_ENGINE_C2);
606 mvpp2_cls_flow_port_id_sel(fe: &fe, from_packet: true);
607 mvpp2_cls_flow_lu_type_set(fe: &fe, lu_type: MVPP22_CLS_LU_TYPE_ALL);
608
609 /* Add all ports */
610 for (i = 0; i < MVPP2_MAX_PORTS; i++)
611 mvpp2_cls_flow_port_add(fe: &fe, BIT(i));
612
613 mvpp2_cls_flow_write(priv, fe: &fe);
614
615 /* C3Hx lookups */
616 for (i = 0; i < MVPP2_MAX_PORTS; i++) {
617 mvpp2_cls_flow_read(priv,
618 MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
619 fe: &fe);
620
621 /* Set a default engine. Will be overwritten when setting the
622 * real HEK parameters
623 */
624 mvpp2_cls_flow_eng_set(fe: &fe, engine: MVPP22_CLS_ENGINE_C3HA);
625 mvpp2_cls_flow_port_id_sel(fe: &fe, from_packet: true);
626 mvpp2_cls_flow_port_add(fe: &fe, BIT(i));
627
628 mvpp2_cls_flow_write(priv, fe: &fe);
629 }
630}
631
632/* Adds a field to the Header Extracted Key generation parameters*/
633static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
634 u32 field_id)
635{
636 int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
637
638 if (nb_fields == MVPP2_FLOW_N_FIELDS)
639 return -EINVAL;
640
641 mvpp2_cls_flow_hek_set(fe, field_index: nb_fields, field_id);
642
643 mvpp2_cls_flow_hek_num_set(fe, num_of_fields: nb_fields + 1);
644
645 return 0;
646}
647
648static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
649 unsigned long hash_opts)
650{
651 u32 field_id;
652 int i;
653
654 /* Clear old fields */
655 mvpp2_cls_flow_hek_num_set(fe, num_of_fields: 0);
656 fe->data[2] = 0;
657
658 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
659 switch (BIT(i)) {
660 case MVPP22_CLS_HEK_OPT_MAC_DA:
661 field_id = MVPP22_CLS_FIELD_MAC_DA;
662 break;
663 case MVPP22_CLS_HEK_OPT_VLAN:
664 field_id = MVPP22_CLS_FIELD_VLAN;
665 break;
666 case MVPP22_CLS_HEK_OPT_VLAN_PRI:
667 field_id = MVPP22_CLS_FIELD_VLAN_PRI;
668 break;
669 case MVPP22_CLS_HEK_OPT_IP4SA:
670 field_id = MVPP22_CLS_FIELD_IP4SA;
671 break;
672 case MVPP22_CLS_HEK_OPT_IP4DA:
673 field_id = MVPP22_CLS_FIELD_IP4DA;
674 break;
675 case MVPP22_CLS_HEK_OPT_IP6SA:
676 field_id = MVPP22_CLS_FIELD_IP6SA;
677 break;
678 case MVPP22_CLS_HEK_OPT_IP6DA:
679 field_id = MVPP22_CLS_FIELD_IP6DA;
680 break;
681 case MVPP22_CLS_HEK_OPT_L4SIP:
682 field_id = MVPP22_CLS_FIELD_L4SIP;
683 break;
684 case MVPP22_CLS_HEK_OPT_L4DIP:
685 field_id = MVPP22_CLS_FIELD_L4DIP;
686 break;
687 default:
688 return -EINVAL;
689 }
690 if (mvpp2_flow_add_hek_field(fe, field_id))
691 return -EINVAL;
692 }
693
694 return 0;
695}
696
697/* Returns the size, in bits, of the corresponding HEK field */
698static int mvpp2_cls_hek_field_size(u32 field)
699{
700 switch (field) {
701 case MVPP22_CLS_HEK_OPT_MAC_DA:
702 return 48;
703 case MVPP22_CLS_HEK_OPT_VLAN:
704 return 12;
705 case MVPP22_CLS_HEK_OPT_VLAN_PRI:
706 return 3;
707 case MVPP22_CLS_HEK_OPT_IP4SA:
708 case MVPP22_CLS_HEK_OPT_IP4DA:
709 return 32;
710 case MVPP22_CLS_HEK_OPT_IP6SA:
711 case MVPP22_CLS_HEK_OPT_IP6DA:
712 return 128;
713 case MVPP22_CLS_HEK_OPT_L4SIP:
714 case MVPP22_CLS_HEK_OPT_L4DIP:
715 return 16;
716 default:
717 return -1;
718 }
719}
720
721const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
722{
723 if (flow >= MVPP2_N_PRS_FLOWS)
724 return NULL;
725
726 return &cls_flows[flow];
727}
728
729/* Set the hash generation options for the given traffic flow.
730 * One traffic flow (in the ethtool sense) has multiple classification flows,
731 * to handle specific cases such as fragmentation, or the presence of a
732 * VLAN / DSA Tag.
733 *
734 * Each of these individual flows has different constraints, for example we
735 * can't hash fragmented packets on L4 data (else we would risk having packet
736 * re-ordering), so each classification flows masks the options with their
737 * supported ones.
738 *
739 */
740static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
741 u16 requested_opts)
742{
743 const struct mvpp2_cls_flow *flow;
744 struct mvpp2_cls_flow_entry fe;
745 int i, engine, flow_index;
746 u16 hash_opts;
747
748 for_each_cls_flow_id_with_type(i, flow_type) {
749 flow = mvpp2_cls_flow_get(flow: i);
750 if (!flow)
751 return -EINVAL;
752
753 flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
754
755 mvpp2_cls_flow_read(priv: port->priv, index: flow_index, fe: &fe);
756
757 hash_opts = flow->supported_hash_opts & requested_opts;
758
759 /* Use C3HB engine to access L4 infos. This adds L4 infos to the
760 * hash parameters
761 */
762 if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
763 engine = MVPP22_CLS_ENGINE_C3HB;
764 else
765 engine = MVPP22_CLS_ENGINE_C3HA;
766
767 if (mvpp2_flow_set_hek_fields(fe: &fe, hash_opts))
768 return -EINVAL;
769
770 mvpp2_cls_flow_eng_set(fe: &fe, engine);
771
772 mvpp2_cls_flow_write(priv: port->priv, fe: &fe);
773 }
774
775 return 0;
776}
777
778u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
779{
780 u16 hash_opts = 0;
781 int n_fields, i, field;
782
783 n_fields = mvpp2_cls_flow_hek_num_get(fe);
784
785 for (i = 0; i < n_fields; i++) {
786 field = mvpp2_cls_flow_hek_get(fe, field_index: i);
787
788 switch (field) {
789 case MVPP22_CLS_FIELD_MAC_DA:
790 hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
791 break;
792 case MVPP22_CLS_FIELD_VLAN:
793 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
794 break;
795 case MVPP22_CLS_FIELD_VLAN_PRI:
796 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
797 break;
798 case MVPP22_CLS_FIELD_L3_PROTO:
799 hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
800 break;
801 case MVPP22_CLS_FIELD_IP4SA:
802 hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
803 break;
804 case MVPP22_CLS_FIELD_IP4DA:
805 hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
806 break;
807 case MVPP22_CLS_FIELD_IP6SA:
808 hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
809 break;
810 case MVPP22_CLS_FIELD_IP6DA:
811 hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
812 break;
813 case MVPP22_CLS_FIELD_L4SIP:
814 hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
815 break;
816 case MVPP22_CLS_FIELD_L4DIP:
817 hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
818 break;
819 default:
820 break;
821 }
822 }
823 return hash_opts;
824}
825
826/* Returns the hash opts for this flow. There are several classifier flows
827 * for one traffic flow, this returns an aggregation of all configurations.
828 */
829static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
830{
831 const struct mvpp2_cls_flow *flow;
832 struct mvpp2_cls_flow_entry fe;
833 int i, flow_index;
834 u16 hash_opts = 0;
835
836 for_each_cls_flow_id_with_type(i, flow_type) {
837 flow = mvpp2_cls_flow_get(flow: i);
838 if (!flow)
839 return 0;
840
841 flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
842
843 mvpp2_cls_flow_read(priv: port->priv, index: flow_index, fe: &fe);
844
845 hash_opts |= mvpp2_flow_get_hek_fields(fe: &fe);
846 }
847
848 return hash_opts;
849}
850
851static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
852{
853 const struct mvpp2_cls_flow *flow;
854 int i;
855
856 for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
857 flow = mvpp2_cls_flow_get(flow: i);
858 if (!flow)
859 break;
860
861 mvpp2_cls_flow_prs_init(priv, flow);
862 mvpp2_cls_flow_lkp_init(priv, flow);
863 mvpp2_cls_flow_init(priv, flow);
864 }
865}
866
867static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
868{
869 struct mvpp2_cls_c2_entry c2;
870 u8 qh, ql, pmap;
871
872 memset(&c2, 0, sizeof(c2));
873
874 c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
875
876 pmap = BIT(port->id);
877 c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
878 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
879
880 /* Match on Lookup Type */
881 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
882 c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL);
883
884 /* Update RSS status after matching this entry */
885 c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
886
887 /* Mark packet as "forwarded to software", needed for RSS */
888 c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
889
890 /* Configure the default rx queue : Update Queue Low and Queue High, but
891 * don't lock, since the rx queue selection might be overridden by RSS
892 */
893 c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
894 MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
895
896 qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
897 ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
898
899 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
900 MVPP22_CLS_C2_ATTR0_QLOW(ql);
901
902 c2.valid = true;
903
904 mvpp2_cls_c2_write(priv: port->priv, c2: &c2);
905}
906
907/* Classifier default initialization */
908void mvpp2_cls_init(struct mvpp2 *priv)
909{
910 struct mvpp2_cls_lookup_entry le;
911 struct mvpp2_cls_flow_entry fe;
912 struct mvpp2_cls_c2_entry c2;
913 int index;
914
915 /* Enable classifier */
916 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
917
918 /* Clear classifier flow table */
919 memset(&fe.data, 0, sizeof(fe.data));
920 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
921 fe.index = index;
922 mvpp2_cls_flow_write(priv, fe: &fe);
923 }
924
925 /* Clear classifier lookup table */
926 le.data = 0;
927 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
928 le.lkpid = index;
929 le.way = 0;
930 mvpp2_cls_lookup_write(priv, le: &le);
931
932 le.way = 1;
933 mvpp2_cls_lookup_write(priv, le: &le);
934 }
935
936 /* Clear C2 TCAM engine table */
937 memset(&c2, 0, sizeof(c2));
938 c2.valid = false;
939 for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
940 c2.index = index;
941 mvpp2_cls_c2_write(priv, c2: &c2);
942 }
943
944 /* Disable the FIFO stages in C2 engine, which are only used in BIST
945 * mode
946 */
947 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
948 MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
949
950 mvpp2_cls_port_init_flows(priv);
951}
952
953void mvpp2_cls_port_config(struct mvpp2_port *port)
954{
955 struct mvpp2_cls_lookup_entry le;
956 u32 val;
957
958 /* Set way for the port */
959 val = mvpp2_read(priv: port->priv, MVPP2_CLS_PORT_WAY_REG);
960 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
961 mvpp2_write(priv: port->priv, MVPP2_CLS_PORT_WAY_REG, data: val);
962
963 /* Pick the entry to be accessed in lookup ID decoding table
964 * according to the way and lkpid.
965 */
966 le.lkpid = port->id;
967 le.way = 0;
968 le.data = 0;
969
970 /* Set initial CPU queue for receiving packets */
971 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
972 le.data |= port->first_rxq;
973
974 /* Disable classification engines */
975 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
976
977 /* Update lookup ID table entry */
978 mvpp2_cls_lookup_write(priv: port->priv, le: &le);
979
980 mvpp2_port_c2_cls_init(port);
981}
982
983u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index)
984{
985 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, data: c2_index);
986
987 return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
988}
989
990static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
991{
992 struct mvpp2_cls_c2_entry c2;
993 u8 qh, ql;
994
995 mvpp2_cls_c2_read(priv: port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), c2: &c2);
996
997 /* The RxQ number is used to select the RSS table. It that case, we set
998 * it to be the ctx number.
999 */
1000 qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1001 ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1002
1003 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
1004 MVPP22_CLS_C2_ATTR0_QLOW(ql);
1005
1006 c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
1007
1008 mvpp2_cls_c2_write(priv: port->priv, c2: &c2);
1009}
1010
1011static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
1012{
1013 struct mvpp2_cls_c2_entry c2;
1014 u8 qh, ql;
1015
1016 mvpp2_cls_c2_read(priv: port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), c2: &c2);
1017
1018 /* Reset the default destination RxQ to the port's first rx queue. */
1019 qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1020 ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1021
1022 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
1023 MVPP22_CLS_C2_ATTR0_QLOW(ql);
1024
1025 c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
1026
1027 mvpp2_cls_c2_write(priv: port->priv, c2: &c2);
1028}
1029
1030static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx)
1031{
1032 return port->rss_ctx[port_rss_ctx];
1033}
1034
1035int mvpp22_port_rss_enable(struct mvpp2_port *port)
1036{
1037 if (mvpp22_rss_ctx(port, port_rss_ctx: 0) < 0)
1038 return -EINVAL;
1039
1040 mvpp2_rss_port_c2_enable(port, ctx: mvpp22_rss_ctx(port, port_rss_ctx: 0));
1041
1042 return 0;
1043}
1044
1045int mvpp22_port_rss_disable(struct mvpp2_port *port)
1046{
1047 if (mvpp22_rss_ctx(port, port_rss_ctx: 0) < 0)
1048 return -EINVAL;
1049
1050 mvpp2_rss_port_c2_disable(port);
1051
1052 return 0;
1053}
1054
1055static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
1056{
1057 struct mvpp2_cls_c2_entry c2;
1058
1059 mvpp2_cls_c2_read(priv: port->priv, index: entry, c2: &c2);
1060
1061 /* Clear the port map so that the entry doesn't match anymore */
1062 c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id)));
1063
1064 mvpp2_cls_c2_write(priv: port->priv, c2: &c2);
1065}
1066
1067/* Set CPU queue number for oversize packets */
1068void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
1069{
1070 u32 val;
1071
1072 mvpp2_write(priv: port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
1073 data: port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
1074
1075 mvpp2_write(priv: port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
1076 data: (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
1077
1078 val = mvpp2_read(priv: port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
1079 val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
1080 mvpp2_write(priv: port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, data: val);
1081}
1082
1083static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
1084 struct mvpp2_rfs_rule *rule)
1085{
1086 struct flow_action_entry *act;
1087 struct mvpp2_cls_c2_entry c2;
1088 u8 qh, ql, pmap;
1089 int index, ctx;
1090
1091 if (!flow_action_basic_hw_stats_check(action: &rule->flow->action, NULL))
1092 return -EOPNOTSUPP;
1093
1094 memset(&c2, 0, sizeof(c2));
1095
1096 index = mvpp2_cls_c2_port_flow_index(port, loc: rule->loc);
1097 if (index < 0)
1098 return -EINVAL;
1099 c2.index = index;
1100
1101 act = &rule->flow->action.entries[0];
1102
1103 rule->c2_index = c2.index;
1104
1105 c2.tcam[3] = (rule->c2_tcam & 0xffff) |
1106 ((rule->c2_tcam_mask & 0xffff) << 16);
1107 c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) |
1108 (((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
1109 c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) |
1110 (((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
1111 c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) |
1112 (((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
1113
1114 pmap = BIT(port->id);
1115 c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
1116 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
1117
1118 /* Match on Lookup Type */
1119 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
1120 c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
1121
1122 if (act->id == FLOW_ACTION_DROP) {
1123 c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
1124 } else {
1125 /* We want to keep the default color derived from the Header
1126 * Parser drop entries, for VLAN and MAC filtering. This will
1127 * assign a default color of Green or Red, and we want matches
1128 * with a non-drop action to keep that color.
1129 */
1130 c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
1131
1132 /* Update RSS status after matching this entry */
1133 if (act->queue.ctx)
1134 c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
1135
1136 /* Always lock the RSS_EN decision. We might have high prio
1137 * rules steering to an RXQ, and a lower one steering to RSS,
1138 * we don't want the low prio RSS rule overwriting this flag.
1139 */
1140 c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
1141
1142 /* Mark packet as "forwarded to software", needed for RSS */
1143 c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
1144
1145 c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
1146 MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
1147
1148 if (act->queue.ctx) {
1149 /* Get the global ctx number */
1150 ctx = mvpp22_rss_ctx(port, port_rss_ctx: act->queue.ctx);
1151 if (ctx < 0)
1152 return -EINVAL;
1153
1154 qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1155 ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1156 } else {
1157 qh = ((act->queue.index + port->first_rxq) >> 3) &
1158 MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1159 ql = (act->queue.index + port->first_rxq) &
1160 MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1161 }
1162
1163 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
1164 MVPP22_CLS_C2_ATTR0_QLOW(ql);
1165 }
1166
1167 c2.valid = true;
1168
1169 mvpp2_cls_c2_write(priv: port->priv, c2: &c2);
1170
1171 return 0;
1172}
1173
1174static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port,
1175 struct mvpp2_rfs_rule *rule)
1176{
1177 return mvpp2_port_c2_tcam_rule_add(port, rule);
1178}
1179
1180static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port,
1181 struct mvpp2_rfs_rule *rule)
1182{
1183 const struct mvpp2_cls_flow *flow;
1184 struct mvpp2_cls_flow_entry fe;
1185 int index, i;
1186
1187 for_each_cls_flow_id_containing_type(i, rule->flow_type) {
1188 flow = mvpp2_cls_flow_get(flow: i);
1189 if (!flow)
1190 return 0;
1191
1192 index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
1193
1194 mvpp2_cls_flow_read(priv: port->priv, index, fe: &fe);
1195 mvpp2_cls_flow_port_remove(fe: &fe, BIT(port->id));
1196 mvpp2_cls_flow_write(priv: port->priv, fe: &fe);
1197 }
1198
1199 if (rule->c2_index >= 0)
1200 mvpp22_port_c2_lookup_disable(port, entry: rule->c2_index);
1201
1202 return 0;
1203}
1204
1205static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
1206 struct mvpp2_rfs_rule *rule)
1207{
1208 const struct mvpp2_cls_flow *flow;
1209 struct mvpp2 *priv = port->priv;
1210 struct mvpp2_cls_flow_entry fe;
1211 int index, ret, i;
1212
1213 if (rule->engine != MVPP22_CLS_ENGINE_C2)
1214 return -EOPNOTSUPP;
1215
1216 ret = mvpp2_port_c2_rfs_rule_insert(port, rule);
1217 if (ret)
1218 return ret;
1219
1220 for_each_cls_flow_id_containing_type(i, rule->flow_type) {
1221 flow = mvpp2_cls_flow_get(flow: i);
1222 if (!flow)
1223 return 0;
1224
1225 if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields)
1226 continue;
1227
1228 index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
1229
1230 mvpp2_cls_flow_read(priv, index, fe: &fe);
1231 mvpp2_cls_flow_eng_set(fe: &fe, engine: rule->engine);
1232 mvpp2_cls_flow_port_id_sel(fe: &fe, from_packet: true);
1233 mvpp2_flow_set_hek_fields(fe: &fe, hash_opts: rule->hek_fields);
1234 mvpp2_cls_flow_lu_type_set(fe: &fe, lu_type: rule->loc);
1235 mvpp2_cls_flow_port_add(fe: &fe, port: 0xf);
1236
1237 mvpp2_cls_flow_write(priv, fe: &fe);
1238 }
1239
1240 return 0;
1241}
1242
1243static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
1244{
1245 struct flow_rule *flow = rule->flow;
1246 int offs = 0;
1247
1248 /* The order of insertion in C2 tcam must match the order in which
1249 * the fields are found in the header
1250 */
1251 if (flow_rule_match_key(rule: flow, key: FLOW_DISSECTOR_KEY_VLAN)) {
1252 struct flow_match_vlan match;
1253
1254 flow_rule_match_vlan(rule: flow, out: &match);
1255 if (match.mask->vlan_id) {
1256 rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN;
1257
1258 rule->c2_tcam |= ((u64)match.key->vlan_id) << offs;
1259 rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs;
1260
1261 /* Don't update the offset yet */
1262 }
1263
1264 if (match.mask->vlan_priority) {
1265 rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
1266
1267 /* VLAN pri is always at offset 13 relative to the
1268 * current offset
1269 */
1270 rule->c2_tcam |= ((u64)match.key->vlan_priority) <<
1271 (offs + 13);
1272 rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) <<
1273 (offs + 13);
1274 }
1275
1276 if (match.mask->vlan_dei)
1277 return -EOPNOTSUPP;
1278
1279 /* vlan id and prio always seem to take a full 16-bit slot in
1280 * the Header Extracted Key.
1281 */
1282 offs += 16;
1283 }
1284
1285 if (flow_rule_match_key(rule: flow, key: FLOW_DISSECTOR_KEY_PORTS)) {
1286 struct flow_match_ports match;
1287
1288 flow_rule_match_ports(rule: flow, out: &match);
1289 if (match.mask->src) {
1290 rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
1291
1292 rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
1293 rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
1294 offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
1295 }
1296
1297 if (match.mask->dst) {
1298 rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
1299
1300 rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
1301 rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
1302 offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
1303 }
1304 }
1305
1306 if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS)
1307 return -EOPNOTSUPP;
1308
1309 return 0;
1310}
1311
1312static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
1313{
1314 struct flow_rule *flow = rule->flow;
1315 struct flow_action_entry *act;
1316
1317 if (!flow_action_basic_hw_stats_check(action: &rule->flow->action, NULL))
1318 return -EOPNOTSUPP;
1319
1320 act = &flow->action.entries[0];
1321 if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
1322 return -EOPNOTSUPP;
1323
1324 /* When both an RSS context and an queue index are set, the index
1325 * is considered as an offset to be added to the indirection table
1326 * entries. We don't support this, so reject this rule.
1327 */
1328 if (act->queue.ctx && act->queue.index)
1329 return -EOPNOTSUPP;
1330
1331 /* For now, only use the C2 engine which has a HEK size limited to 64
1332 * bits for TCAM matching.
1333 */
1334 rule->engine = MVPP22_CLS_ENGINE_C2;
1335
1336 if (mvpp2_cls_c2_build_match(rule))
1337 return -EINVAL;
1338
1339 return 0;
1340}
1341
1342int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
1343 struct ethtool_rxnfc *rxnfc)
1344{
1345 struct mvpp2_ethtool_fs *efs;
1346
1347 if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1348 return -EINVAL;
1349
1350 efs = port->rfs_rules[rxnfc->fs.location];
1351 if (!efs)
1352 return -ENOENT;
1353
1354 memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
1355
1356 return 0;
1357}
1358
1359int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
1360 struct ethtool_rxnfc *info)
1361{
1362 struct ethtool_rx_flow_spec_input input = {};
1363 struct ethtool_rx_flow_rule *ethtool_rule;
1364 struct mvpp2_ethtool_fs *efs, *old_efs;
1365 int ret = 0;
1366
1367 if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1368 return -EINVAL;
1369
1370 efs = kzalloc(size: sizeof(*efs), GFP_KERNEL);
1371 if (!efs)
1372 return -ENOMEM;
1373
1374 input.fs = &info->fs;
1375
1376 /* We need to manually set the rss_ctx, since this info isn't present
1377 * in info->fs
1378 */
1379 if (info->fs.flow_type & FLOW_RSS)
1380 input.rss_ctx = info->rss_context;
1381
1382 ethtool_rule = ethtool_rx_flow_rule_create(input: &input);
1383 if (IS_ERR(ptr: ethtool_rule)) {
1384 ret = PTR_ERR(ptr: ethtool_rule);
1385 goto clean_rule;
1386 }
1387
1388 efs->rule.flow = ethtool_rule->rule;
1389 efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(flow_type: info->fs.flow_type);
1390 if (efs->rule.flow_type < 0) {
1391 ret = efs->rule.flow_type;
1392 goto clean_rule;
1393 }
1394
1395 ret = mvpp2_cls_rfs_parse_rule(rule: &efs->rule);
1396 if (ret)
1397 goto clean_eth_rule;
1398
1399 efs->rule.loc = info->fs.location;
1400
1401 /* Replace an already existing rule */
1402 if (port->rfs_rules[efs->rule.loc]) {
1403 old_efs = port->rfs_rules[efs->rule.loc];
1404 ret = mvpp2_port_cls_rfs_rule_remove(port, rule: &old_efs->rule);
1405 if (ret)
1406 goto clean_eth_rule;
1407 kfree(objp: old_efs);
1408 port->n_rfs_rules--;
1409 }
1410
1411 ret = mvpp2_port_flt_rfs_rule_insert(port, rule: &efs->rule);
1412 if (ret)
1413 goto clean_eth_rule;
1414
1415 ethtool_rx_flow_rule_destroy(rule: ethtool_rule);
1416 efs->rule.flow = NULL;
1417
1418 memcpy(&efs->rxnfc, info, sizeof(*info));
1419 port->rfs_rules[efs->rule.loc] = efs;
1420 port->n_rfs_rules++;
1421
1422 return ret;
1423
1424clean_eth_rule:
1425 ethtool_rx_flow_rule_destroy(rule: ethtool_rule);
1426clean_rule:
1427 kfree(objp: efs);
1428 return ret;
1429}
1430
1431int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
1432 struct ethtool_rxnfc *info)
1433{
1434 struct mvpp2_ethtool_fs *efs;
1435 int ret;
1436
1437 if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1438 return -EINVAL;
1439
1440 efs = port->rfs_rules[info->fs.location];
1441 if (!efs)
1442 return -EINVAL;
1443
1444 /* Remove the rule from the engines. */
1445 ret = mvpp2_port_cls_rfs_rule_remove(port, rule: &efs->rule);
1446 if (ret)
1447 return ret;
1448
1449 port->n_rfs_rules--;
1450 port->rfs_rules[info->fs.location] = NULL;
1451 kfree(objp: efs);
1452
1453 return 0;
1454}
1455
1456static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
1457{
1458 int nrxqs, cpu, cpus = num_possible_cpus();
1459
1460 /* Number of RXQs per CPU */
1461 nrxqs = port->nrxqs / cpus;
1462
1463 /* CPU that will handle this rx queue */
1464 cpu = rxq / nrxqs;
1465
1466 if (!cpu_online(cpu))
1467 return port->first_rxq;
1468
1469 /* Indirection to better distribute the paquets on the CPUs when
1470 * configuring the RSS queues.
1471 */
1472 return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
1473}
1474
1475static void mvpp22_rss_fill_table(struct mvpp2_port *port,
1476 struct mvpp2_rss_table *table,
1477 u32 rss_ctx)
1478{
1479 struct mvpp2 *priv = port->priv;
1480 int i;
1481
1482 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
1483 u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
1484 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
1485 mvpp2_write(priv, MVPP22_RSS_INDEX, data: sel);
1486
1487 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
1488 data: mvpp22_rxfh_indir(port, rxq: table->indir[i]));
1489 }
1490}
1491
1492static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
1493{
1494 struct mvpp2 *priv = port->priv;
1495 u32 ctx;
1496
1497 /* Find the first free RSS table */
1498 for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) {
1499 if (!priv->rss_tables[ctx])
1500 break;
1501 }
1502
1503 if (ctx == MVPP22_N_RSS_TABLES)
1504 return -EINVAL;
1505
1506 priv->rss_tables[ctx] = kzalloc(size: sizeof(*priv->rss_tables[ctx]),
1507 GFP_KERNEL);
1508 if (!priv->rss_tables[ctx])
1509 return -ENOMEM;
1510
1511 *rss_ctx = ctx;
1512
1513 /* Set the table width: replace the whole classifier Rx queue number
1514 * with the ones configured in RSS table entries.
1515 */
1516 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
1517 mvpp2_write(priv, MVPP22_RSS_WIDTH, data: 8);
1518
1519 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx));
1520 mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx));
1521
1522 return 0;
1523}
1524
1525int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
1526{
1527 u32 rss_ctx;
1528 int ret, i;
1529
1530 ret = mvpp22_rss_context_create(port, rss_ctx: &rss_ctx);
1531 if (ret)
1532 return ret;
1533
1534 /* Find the first available context number in the port, starting from 1.
1535 * Context 0 on each port is reserved for the default context.
1536 */
1537 for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
1538 if (port->rss_ctx[i] < 0)
1539 break;
1540 }
1541
1542 if (i == MVPP22_N_RSS_TABLES)
1543 return -EINVAL;
1544
1545 port->rss_ctx[i] = rss_ctx;
1546 *port_ctx = i;
1547
1548 return 0;
1549}
1550
1551static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv,
1552 int rss_ctx)
1553{
1554 if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
1555 return NULL;
1556
1557 return priv->rss_tables[rss_ctx];
1558}
1559
1560int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
1561{
1562 struct mvpp2 *priv = port->priv;
1563 struct ethtool_rxnfc *rxnfc;
1564 int i, rss_ctx, ret;
1565
1566 rss_ctx = mvpp22_rss_ctx(port, port_rss_ctx: port_ctx);
1567
1568 if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
1569 return -EINVAL;
1570
1571 /* Invalidate any active classification rule that use this context */
1572 for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
1573 if (!port->rfs_rules[i])
1574 continue;
1575
1576 rxnfc = &port->rfs_rules[i]->rxnfc;
1577 if (!(rxnfc->fs.flow_type & FLOW_RSS) ||
1578 rxnfc->rss_context != port_ctx)
1579 continue;
1580
1581 ret = mvpp2_ethtool_cls_rule_del(port, info: rxnfc);
1582 if (ret) {
1583 netdev_warn(dev: port->dev,
1584 format: "couldn't remove classification rule %d associated to this context",
1585 rxnfc->fs.location);
1586 }
1587 }
1588
1589 kfree(objp: priv->rss_tables[rss_ctx]);
1590
1591 priv->rss_tables[rss_ctx] = NULL;
1592 port->rss_ctx[port_ctx] = -1;
1593
1594 return 0;
1595}
1596
1597int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx,
1598 const u32 *indir)
1599{
1600 int rss_ctx = mvpp22_rss_ctx(port, port_rss_ctx: port_ctx);
1601 struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(priv: port->priv,
1602 rss_ctx);
1603
1604 if (!rss_table)
1605 return -EINVAL;
1606
1607 memcpy(rss_table->indir, indir,
1608 MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
1609
1610 mvpp22_rss_fill_table(port, table: rss_table, rss_ctx);
1611
1612 return 0;
1613}
1614
1615int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
1616 u32 *indir)
1617{
1618 int rss_ctx = mvpp22_rss_ctx(port, port_rss_ctx: port_ctx);
1619 struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(priv: port->priv,
1620 rss_ctx);
1621
1622 if (!rss_table)
1623 return -EINVAL;
1624
1625 memcpy(indir, rss_table->indir,
1626 MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
1627
1628 return 0;
1629}
1630
1631int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
1632{
1633 u16 hash_opts = 0;
1634 u32 flow_type;
1635
1636 flow_type = mvpp2_cls_ethtool_flow_to_type(flow_type: info->flow_type);
1637
1638 switch (flow_type) {
1639 case MVPP22_FLOW_TCP4:
1640 case MVPP22_FLOW_UDP4:
1641 case MVPP22_FLOW_TCP6:
1642 case MVPP22_FLOW_UDP6:
1643 if (info->data & RXH_L4_B_0_1)
1644 hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
1645 if (info->data & RXH_L4_B_2_3)
1646 hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
1647 fallthrough;
1648 case MVPP22_FLOW_IP4:
1649 case MVPP22_FLOW_IP6:
1650 if (info->data & RXH_L2DA)
1651 hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
1652 if (info->data & RXH_VLAN)
1653 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
1654 if (info->data & RXH_L3_PROTO)
1655 hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
1656 if (info->data & RXH_IP_SRC)
1657 hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
1658 MVPP22_CLS_HEK_OPT_IP6SA);
1659 if (info->data & RXH_IP_DST)
1660 hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
1661 MVPP22_CLS_HEK_OPT_IP6DA);
1662 break;
1663 default: return -EOPNOTSUPP;
1664 }
1665
1666 return mvpp2_port_rss_hash_opts_set(port, flow_type, requested_opts: hash_opts);
1667}
1668
1669int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
1670{
1671 unsigned long hash_opts;
1672 u32 flow_type;
1673 int i;
1674
1675 flow_type = mvpp2_cls_ethtool_flow_to_type(flow_type: info->flow_type);
1676
1677 hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type);
1678 info->data = 0;
1679
1680 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
1681 switch (BIT(i)) {
1682 case MVPP22_CLS_HEK_OPT_MAC_DA:
1683 info->data |= RXH_L2DA;
1684 break;
1685 case MVPP22_CLS_HEK_OPT_VLAN:
1686 info->data |= RXH_VLAN;
1687 break;
1688 case MVPP22_CLS_HEK_OPT_L3_PROTO:
1689 info->data |= RXH_L3_PROTO;
1690 break;
1691 case MVPP22_CLS_HEK_OPT_IP4SA:
1692 case MVPP22_CLS_HEK_OPT_IP6SA:
1693 info->data |= RXH_IP_SRC;
1694 break;
1695 case MVPP22_CLS_HEK_OPT_IP4DA:
1696 case MVPP22_CLS_HEK_OPT_IP6DA:
1697 info->data |= RXH_IP_DST;
1698 break;
1699 case MVPP22_CLS_HEK_OPT_L4SIP:
1700 info->data |= RXH_L4_B_0_1;
1701 break;
1702 case MVPP22_CLS_HEK_OPT_L4DIP:
1703 info->data |= RXH_L4_B_2_3;
1704 break;
1705 default:
1706 return -EINVAL;
1707 }
1708 }
1709 return 0;
1710}
1711
1712int mvpp22_port_rss_init(struct mvpp2_port *port)
1713{
1714 struct mvpp2_rss_table *table;
1715 u32 context = 0;
1716 int i, ret;
1717
1718 for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
1719 port->rss_ctx[i] = -1;
1720
1721 ret = mvpp22_rss_context_create(port, rss_ctx: &context);
1722 if (ret)
1723 return ret;
1724
1725 table = mvpp22_rss_table_get(priv: port->priv, rss_ctx: context);
1726 if (!table)
1727 return -EINVAL;
1728
1729 port->rss_ctx[0] = context;
1730
1731 /* Configure the first table to evenly distribute the packets across
1732 * real Rx Queues. The table entries map a hash to a port Rx Queue.
1733 */
1734 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
1735 table->indir[i] = ethtool_rxfh_indir_default(index: i, n_rx_rings: port->nrxqs);
1736
1737 mvpp22_rss_fill_table(port, table, rss_ctx: mvpp22_rss_ctx(port, port_rss_ctx: 0));
1738
1739 /* Configure default flows */
1740 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
1741 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T);
1742 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T);
1743 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
1744 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
1745 mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
1746
1747 return 0;
1748}
1749

source code of linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c