1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2019, Intel Corporation. */ |
3 | |
4 | #include "ice_common.h" |
5 | #include "ice_flex_pipe.h" |
6 | #include "ice_flow.h" |
7 | #include "ice.h" |
8 | |
9 | static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { |
10 | /* SWITCH */ |
11 | { |
12 | ICE_SID_XLT0_SW, |
13 | ICE_SID_XLT_KEY_BUILDER_SW, |
14 | ICE_SID_XLT1_SW, |
15 | ICE_SID_XLT2_SW, |
16 | ICE_SID_PROFID_TCAM_SW, |
17 | ICE_SID_PROFID_REDIR_SW, |
18 | ICE_SID_FLD_VEC_SW, |
19 | ICE_SID_CDID_KEY_BUILDER_SW, |
20 | ICE_SID_CDID_REDIR_SW |
21 | }, |
22 | |
23 | /* ACL */ |
24 | { |
25 | ICE_SID_XLT0_ACL, |
26 | ICE_SID_XLT_KEY_BUILDER_ACL, |
27 | ICE_SID_XLT1_ACL, |
28 | ICE_SID_XLT2_ACL, |
29 | ICE_SID_PROFID_TCAM_ACL, |
30 | ICE_SID_PROFID_REDIR_ACL, |
31 | ICE_SID_FLD_VEC_ACL, |
32 | ICE_SID_CDID_KEY_BUILDER_ACL, |
33 | ICE_SID_CDID_REDIR_ACL |
34 | }, |
35 | |
36 | /* FD */ |
37 | { |
38 | ICE_SID_XLT0_FD, |
39 | ICE_SID_XLT_KEY_BUILDER_FD, |
40 | ICE_SID_XLT1_FD, |
41 | ICE_SID_XLT2_FD, |
42 | ICE_SID_PROFID_TCAM_FD, |
43 | ICE_SID_PROFID_REDIR_FD, |
44 | ICE_SID_FLD_VEC_FD, |
45 | ICE_SID_CDID_KEY_BUILDER_FD, |
46 | ICE_SID_CDID_REDIR_FD |
47 | }, |
48 | |
49 | /* RSS */ |
50 | { |
51 | ICE_SID_XLT0_RSS, |
52 | ICE_SID_XLT_KEY_BUILDER_RSS, |
53 | ICE_SID_XLT1_RSS, |
54 | ICE_SID_XLT2_RSS, |
55 | ICE_SID_PROFID_TCAM_RSS, |
56 | ICE_SID_PROFID_REDIR_RSS, |
57 | ICE_SID_FLD_VEC_RSS, |
58 | ICE_SID_CDID_KEY_BUILDER_RSS, |
59 | ICE_SID_CDID_REDIR_RSS |
60 | }, |
61 | |
62 | /* PE */ |
63 | { |
64 | ICE_SID_XLT0_PE, |
65 | ICE_SID_XLT_KEY_BUILDER_PE, |
66 | ICE_SID_XLT1_PE, |
67 | ICE_SID_XLT2_PE, |
68 | ICE_SID_PROFID_TCAM_PE, |
69 | ICE_SID_PROFID_REDIR_PE, |
70 | ICE_SID_FLD_VEC_PE, |
71 | ICE_SID_CDID_KEY_BUILDER_PE, |
72 | ICE_SID_CDID_REDIR_PE |
73 | } |
74 | }; |
75 | |
76 | /** |
77 | * ice_sect_id - returns section ID |
78 | * @blk: block type |
79 | * @sect: section type |
80 | * |
81 | * This helper function returns the proper section ID given a block type and a |
82 | * section type. |
83 | */ |
84 | static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) |
85 | { |
86 | return ice_sect_lkup[blk][sect]; |
87 | } |
88 | |
89 | /** |
90 | * ice_hw_ptype_ena - check if the PTYPE is enabled or not |
91 | * @hw: pointer to the HW structure |
92 | * @ptype: the hardware PTYPE |
93 | */ |
94 | bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype) |
95 | { |
96 | return ptype < ICE_FLOW_PTYPE_MAX && |
97 | test_bit(ptype, hw->hw_ptype); |
98 | } |
99 | |
100 | /* Key creation */ |
101 | |
102 | #define ICE_DC_KEY 0x1 /* don't care */ |
103 | #define ICE_DC_KEYINV 0x1 |
104 | #define ICE_NM_KEY 0x0 /* never match */ |
105 | #define ICE_NM_KEYINV 0x0 |
106 | #define ICE_0_KEY 0x1 /* match 0 */ |
107 | #define ICE_0_KEYINV 0x0 |
108 | #define ICE_1_KEY 0x0 /* match 1 */ |
109 | #define ICE_1_KEYINV 0x1 |
110 | |
111 | /** |
112 | * ice_gen_key_word - generate 16-bits of a key/mask word |
113 | * @val: the value |
114 | * @valid: valid bits mask (change only the valid bits) |
115 | * @dont_care: don't care mask |
116 | * @nvr_mtch: never match mask |
117 | * @key: pointer to an array of where the resulting key portion |
118 | * @key_inv: pointer to an array of where the resulting key invert portion |
119 | * |
120 | * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask |
121 | * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits |
122 | * of key and 8 bits of key invert. |
123 | * |
124 | * '0' = b01, always match a 0 bit |
125 | * '1' = b10, always match a 1 bit |
126 | * '?' = b11, don't care bit (always matches) |
127 | * '~' = b00, never match bit |
128 | * |
129 | * Input: |
130 | * val: b0 1 0 1 0 1 |
131 | * dont_care: b0 0 1 1 0 0 |
132 | * never_mtch: b0 0 0 0 1 1 |
133 | * ------------------------------ |
134 | * Result: key: b01 10 11 11 00 00 |
135 | */ |
136 | static int |
137 | ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, |
138 | u8 *key_inv) |
139 | { |
140 | u8 in_key = *key, in_key_inv = *key_inv; |
141 | u8 i; |
142 | |
143 | /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ |
144 | if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) |
145 | return -EIO; |
146 | |
147 | *key = 0; |
148 | *key_inv = 0; |
149 | |
150 | /* encode the 8 bits into 8-bit key and 8-bit key invert */ |
151 | for (i = 0; i < 8; i++) { |
152 | *key >>= 1; |
153 | *key_inv >>= 1; |
154 | |
155 | if (!(valid & 0x1)) { /* change only valid bits */ |
156 | *key |= (in_key & 0x1) << 7; |
157 | *key_inv |= (in_key_inv & 0x1) << 7; |
158 | } else if (dont_care & 0x1) { /* don't care bit */ |
159 | *key |= ICE_DC_KEY << 7; |
160 | *key_inv |= ICE_DC_KEYINV << 7; |
161 | } else if (nvr_mtch & 0x1) { /* never match bit */ |
162 | *key |= ICE_NM_KEY << 7; |
163 | *key_inv |= ICE_NM_KEYINV << 7; |
164 | } else if (val & 0x01) { /* exact 1 match */ |
165 | *key |= ICE_1_KEY << 7; |
166 | *key_inv |= ICE_1_KEYINV << 7; |
167 | } else { /* exact 0 match */ |
168 | *key |= ICE_0_KEY << 7; |
169 | *key_inv |= ICE_0_KEYINV << 7; |
170 | } |
171 | |
172 | dont_care >>= 1; |
173 | nvr_mtch >>= 1; |
174 | valid >>= 1; |
175 | val >>= 1; |
176 | in_key >>= 1; |
177 | in_key_inv >>= 1; |
178 | } |
179 | |
180 | return 0; |
181 | } |
182 | |
183 | /** |
184 | * ice_bits_max_set - determine if the number of bits set is within a maximum |
185 | * @mask: pointer to the byte array which is the mask |
186 | * @size: the number of bytes in the mask |
187 | * @max: the max number of set bits |
188 | * |
189 | * This function determines if there are at most 'max' number of bits set in an |
190 | * array. Returns true if the number for bits set is <= max or will return false |
191 | * otherwise. |
192 | */ |
193 | static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) |
194 | { |
195 | u16 count = 0; |
196 | u16 i; |
197 | |
198 | /* check each byte */ |
199 | for (i = 0; i < size; i++) { |
200 | /* if 0, go to next byte */ |
201 | if (!mask[i]) |
202 | continue; |
203 | |
204 | /* We know there is at least one set bit in this byte because of |
205 | * the above check; if we already have found 'max' number of |
206 | * bits set, then we can return failure now. |
207 | */ |
208 | if (count == max) |
209 | return false; |
210 | |
211 | /* count the bits in this byte, checking threshold */ |
212 | count += hweight8(mask[i]); |
213 | if (count > max) |
214 | return false; |
215 | } |
216 | |
217 | return true; |
218 | } |
219 | |
220 | /** |
221 | * ice_set_key - generate a variable sized key with multiples of 16-bits |
222 | * @key: pointer to where the key will be stored |
223 | * @size: the size of the complete key in bytes (must be even) |
224 | * @val: array of 8-bit values that makes up the value portion of the key |
225 | * @upd: array of 8-bit masks that determine what key portion to update |
226 | * @dc: array of 8-bit masks that make up the don't care mask |
227 | * @nm: array of 8-bit masks that make up the never match mask |
228 | * @off: the offset of the first byte in the key to update |
229 | * @len: the number of bytes in the key update |
230 | * |
231 | * This function generates a key from a value, a don't care mask and a never |
232 | * match mask. |
233 | * upd, dc, and nm are optional parameters, and can be NULL: |
234 | * upd == NULL --> upd mask is all 1's (update all bits) |
235 | * dc == NULL --> dc mask is all 0's (no don't care bits) |
236 | * nm == NULL --> nm mask is all 0's (no never match bits) |
237 | */ |
238 | static int |
239 | ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, |
240 | u16 len) |
241 | { |
242 | u16 half_size; |
243 | u16 i; |
244 | |
245 | /* size must be a multiple of 2 bytes. */ |
246 | if (size % 2) |
247 | return -EIO; |
248 | |
249 | half_size = size / 2; |
250 | if (off + len > half_size) |
251 | return -EIO; |
252 | |
253 | /* Make sure at most one bit is set in the never match mask. Having more |
254 | * than one never match mask bit set will cause HW to consume excessive |
255 | * power otherwise; this is a power management efficiency check. |
256 | */ |
257 | #define ICE_NVR_MTCH_BITS_MAX 1 |
258 | if (nm && !ice_bits_max_set(mask: nm, size: len, ICE_NVR_MTCH_BITS_MAX)) |
259 | return -EIO; |
260 | |
261 | for (i = 0; i < len; i++) |
262 | if (ice_gen_key_word(val: val[i], valid: upd ? upd[i] : 0xff, |
263 | dont_care: dc ? dc[i] : 0, nvr_mtch: nm ? nm[i] : 0, |
264 | key: key + off + i, key_inv: key + half_size + off + i)) |
265 | return -EIO; |
266 | |
267 | return 0; |
268 | } |
269 | |
270 | /** |
271 | * ice_acquire_change_lock |
272 | * @hw: pointer to the HW structure |
273 | * @access: access type (read or write) |
274 | * |
275 | * This function will request ownership of the change lock. |
276 | */ |
277 | int |
278 | ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) |
279 | { |
280 | return ice_acquire_res(hw, res: ICE_CHANGE_LOCK_RES_ID, access, |
281 | ICE_CHANGE_LOCK_TIMEOUT); |
282 | } |
283 | |
284 | /** |
285 | * ice_release_change_lock |
286 | * @hw: pointer to the HW structure |
287 | * |
288 | * This function will release the change lock using the proper Admin Command. |
289 | */ |
290 | void ice_release_change_lock(struct ice_hw *hw) |
291 | { |
292 | ice_release_res(hw, res: ICE_CHANGE_LOCK_RES_ID); |
293 | } |
294 | |
295 | /** |
296 | * ice_get_open_tunnel_port - retrieve an open tunnel port |
297 | * @hw: pointer to the HW structure |
298 | * @port: returns open port |
299 | * @type: type of tunnel, can be TNL_LAST if it doesn't matter |
300 | */ |
301 | bool |
302 | ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port, |
303 | enum ice_tunnel_type type) |
304 | { |
305 | bool res = false; |
306 | u16 i; |
307 | |
308 | mutex_lock(&hw->tnl_lock); |
309 | |
310 | for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) |
311 | if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port && |
312 | (type == TNL_LAST || type == hw->tnl.tbl[i].type)) { |
313 | *port = hw->tnl.tbl[i].port; |
314 | res = true; |
315 | break; |
316 | } |
317 | |
318 | mutex_unlock(lock: &hw->tnl_lock); |
319 | |
320 | return res; |
321 | } |
322 | |
323 | /** |
324 | * ice_upd_dvm_boost_entry |
325 | * @hw: pointer to the HW structure |
326 | * @entry: pointer to double vlan boost entry info |
327 | */ |
328 | static int |
329 | ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry) |
330 | { |
331 | struct ice_boost_tcam_section *sect_rx, *sect_tx; |
332 | int status = -ENOSPC; |
333 | struct ice_buf_build *bld; |
334 | u8 val, dc, nm; |
335 | |
336 | bld = ice_pkg_buf_alloc(hw); |
337 | if (!bld) |
338 | return -ENOMEM; |
339 | |
340 | /* allocate 2 sections, one for Rx parser, one for Tx parser */ |
341 | if (ice_pkg_buf_reserve_section(bld, count: 2)) |
342 | goto ice_upd_dvm_boost_entry_err; |
343 | |
344 | sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, |
345 | struct_size(sect_rx, tcam, 1)); |
346 | if (!sect_rx) |
347 | goto ice_upd_dvm_boost_entry_err; |
348 | sect_rx->count = cpu_to_le16(1); |
349 | |
350 | sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, |
351 | struct_size(sect_tx, tcam, 1)); |
352 | if (!sect_tx) |
353 | goto ice_upd_dvm_boost_entry_err; |
354 | sect_tx->count = cpu_to_le16(1); |
355 | |
356 | /* copy original boost entry to update package buffer */ |
357 | memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam)); |
358 | |
359 | /* re-write the don't care and never match bits accordingly */ |
360 | if (entry->enable) { |
361 | /* all bits are don't care */ |
362 | val = 0x00; |
363 | dc = 0xFF; |
364 | nm = 0x00; |
365 | } else { |
366 | /* disable, one never match bit, the rest are don't care */ |
367 | val = 0x00; |
368 | dc = 0xF7; |
369 | nm = 0x08; |
370 | } |
371 | |
372 | ice_set_key(key: (u8 *)§_rx->tcam[0].key, size: sizeof(sect_rx->tcam[0].key), |
373 | val: &val, NULL, dc: &dc, nm: &nm, off: 0, len: sizeof(u8)); |
374 | |
375 | /* exact copy of entry to Tx section entry */ |
376 | memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam)); |
377 | |
378 | status = ice_update_pkg_no_lock(hw, bufs: ice_pkg_buf(bld), count: 1); |
379 | |
380 | ice_upd_dvm_boost_entry_err: |
381 | ice_pkg_buf_free(hw, bld); |
382 | |
383 | return status; |
384 | } |
385 | |
386 | /** |
387 | * ice_set_dvm_boost_entries |
388 | * @hw: pointer to the HW structure |
389 | * |
390 | * Enable double vlan by updating the appropriate boost tcam entries. |
391 | */ |
392 | int ice_set_dvm_boost_entries(struct ice_hw *hw) |
393 | { |
394 | u16 i; |
395 | |
396 | for (i = 0; i < hw->dvm_upd.count; i++) { |
397 | int status; |
398 | |
399 | status = ice_upd_dvm_boost_entry(hw, entry: &hw->dvm_upd.tbl[i]); |
400 | if (status) |
401 | return status; |
402 | } |
403 | |
404 | return 0; |
405 | } |
406 | |
407 | /** |
408 | * ice_tunnel_idx_to_entry - convert linear index to the sparse one |
409 | * @hw: pointer to the HW structure |
410 | * @type: type of tunnel |
411 | * @idx: linear index |
412 | * |
413 | * Stack assumes we have 2 linear tables with indexes [0, count_valid), |
414 | * but really the port table may be sprase, and types are mixed, so convert |
415 | * the stack index into the device index. |
416 | */ |
417 | static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type, |
418 | u16 idx) |
419 | { |
420 | u16 i; |
421 | |
422 | for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) |
423 | if (hw->tnl.tbl[i].valid && |
424 | hw->tnl.tbl[i].type == type && |
425 | idx-- == 0) |
426 | return i; |
427 | |
428 | WARN_ON_ONCE(1); |
429 | return 0; |
430 | } |
431 | |
432 | /** |
433 | * ice_create_tunnel |
434 | * @hw: pointer to the HW structure |
435 | * @index: device table entry |
436 | * @type: type of tunnel |
437 | * @port: port of tunnel to create |
438 | * |
439 | * Create a tunnel by updating the parse graph in the parser. We do that by |
440 | * creating a package buffer with the tunnel info and issuing an update package |
441 | * command. |
442 | */ |
443 | static int |
444 | ice_create_tunnel(struct ice_hw *hw, u16 index, |
445 | enum ice_tunnel_type type, u16 port) |
446 | { |
447 | struct ice_boost_tcam_section *sect_rx, *sect_tx; |
448 | struct ice_buf_build *bld; |
449 | int status = -ENOSPC; |
450 | |
451 | mutex_lock(&hw->tnl_lock); |
452 | |
453 | bld = ice_pkg_buf_alloc(hw); |
454 | if (!bld) { |
455 | status = -ENOMEM; |
456 | goto ice_create_tunnel_end; |
457 | } |
458 | |
459 | /* allocate 2 sections, one for Rx parser, one for Tx parser */ |
460 | if (ice_pkg_buf_reserve_section(bld, count: 2)) |
461 | goto ice_create_tunnel_err; |
462 | |
463 | sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, |
464 | struct_size(sect_rx, tcam, 1)); |
465 | if (!sect_rx) |
466 | goto ice_create_tunnel_err; |
467 | sect_rx->count = cpu_to_le16(1); |
468 | |
469 | sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, |
470 | struct_size(sect_tx, tcam, 1)); |
471 | if (!sect_tx) |
472 | goto ice_create_tunnel_err; |
473 | sect_tx->count = cpu_to_le16(1); |
474 | |
475 | /* copy original boost entry to update package buffer */ |
476 | memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, |
477 | sizeof(*sect_rx->tcam)); |
478 | |
479 | /* over-write the never-match dest port key bits with the encoded port |
480 | * bits |
481 | */ |
482 | ice_set_key(key: (u8 *)§_rx->tcam[0].key, size: sizeof(sect_rx->tcam[0].key), |
483 | val: (u8 *)&port, NULL, NULL, NULL, |
484 | off: (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key), |
485 | len: sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key)); |
486 | |
487 | /* exact copy of entry to Tx section entry */ |
488 | memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam)); |
489 | |
490 | status = ice_update_pkg(hw, bufs: ice_pkg_buf(bld), count: 1); |
491 | if (!status) |
492 | hw->tnl.tbl[index].port = port; |
493 | |
494 | ice_create_tunnel_err: |
495 | ice_pkg_buf_free(hw, bld); |
496 | |
497 | ice_create_tunnel_end: |
498 | mutex_unlock(lock: &hw->tnl_lock); |
499 | |
500 | return status; |
501 | } |
502 | |
503 | /** |
504 | * ice_destroy_tunnel |
505 | * @hw: pointer to the HW structure |
506 | * @index: device table entry |
507 | * @type: type of tunnel |
508 | * @port: port of tunnel to destroy (ignored if the all parameter is true) |
509 | * |
510 | * Destroys a tunnel or all tunnels by creating an update package buffer |
511 | * targeting the specific updates requested and then performing an update |
512 | * package. |
513 | */ |
514 | static int |
515 | ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type, |
516 | u16 port) |
517 | { |
518 | struct ice_boost_tcam_section *sect_rx, *sect_tx; |
519 | struct ice_buf_build *bld; |
520 | int status = -ENOSPC; |
521 | |
522 | mutex_lock(&hw->tnl_lock); |
523 | |
524 | if (WARN_ON(!hw->tnl.tbl[index].valid || |
525 | hw->tnl.tbl[index].type != type || |
526 | hw->tnl.tbl[index].port != port)) { |
527 | status = -EIO; |
528 | goto ice_destroy_tunnel_end; |
529 | } |
530 | |
531 | bld = ice_pkg_buf_alloc(hw); |
532 | if (!bld) { |
533 | status = -ENOMEM; |
534 | goto ice_destroy_tunnel_end; |
535 | } |
536 | |
537 | /* allocate 2 sections, one for Rx parser, one for Tx parser */ |
538 | if (ice_pkg_buf_reserve_section(bld, count: 2)) |
539 | goto ice_destroy_tunnel_err; |
540 | |
541 | sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, |
542 | struct_size(sect_rx, tcam, 1)); |
543 | if (!sect_rx) |
544 | goto ice_destroy_tunnel_err; |
545 | sect_rx->count = cpu_to_le16(1); |
546 | |
547 | sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, |
548 | struct_size(sect_tx, tcam, 1)); |
549 | if (!sect_tx) |
550 | goto ice_destroy_tunnel_err; |
551 | sect_tx->count = cpu_to_le16(1); |
552 | |
553 | /* copy original boost entry to update package buffer, one copy to Rx |
554 | * section, another copy to the Tx section |
555 | */ |
556 | memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, |
557 | sizeof(*sect_rx->tcam)); |
558 | memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry, |
559 | sizeof(*sect_tx->tcam)); |
560 | |
561 | status = ice_update_pkg(hw, bufs: ice_pkg_buf(bld), count: 1); |
562 | if (!status) |
563 | hw->tnl.tbl[index].port = 0; |
564 | |
565 | ice_destroy_tunnel_err: |
566 | ice_pkg_buf_free(hw, bld); |
567 | |
568 | ice_destroy_tunnel_end: |
569 | mutex_unlock(lock: &hw->tnl_lock); |
570 | |
571 | return status; |
572 | } |
573 | |
574 | int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, |
575 | unsigned int idx, struct udp_tunnel_info *ti) |
576 | { |
577 | struct ice_netdev_priv *np = netdev_priv(dev: netdev); |
578 | struct ice_vsi *vsi = np->vsi; |
579 | struct ice_pf *pf = vsi->back; |
580 | enum ice_tunnel_type tnl_type; |
581 | int status; |
582 | u16 index; |
583 | |
584 | tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE; |
585 | index = ice_tunnel_idx_to_entry(hw: &pf->hw, type: tnl_type, idx); |
586 | |
587 | status = ice_create_tunnel(hw: &pf->hw, index, type: tnl_type, ntohs(ti->port)); |
588 | if (status) { |
589 | netdev_err(dev: netdev, format: "Error adding UDP tunnel - %d\n" , |
590 | status); |
591 | return -EIO; |
592 | } |
593 | |
594 | udp_tunnel_nic_set_port_priv(dev: netdev, table, idx, priv: index); |
595 | return 0; |
596 | } |
597 | |
598 | int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, |
599 | unsigned int idx, struct udp_tunnel_info *ti) |
600 | { |
601 | struct ice_netdev_priv *np = netdev_priv(dev: netdev); |
602 | struct ice_vsi *vsi = np->vsi; |
603 | struct ice_pf *pf = vsi->back; |
604 | enum ice_tunnel_type tnl_type; |
605 | int status; |
606 | |
607 | tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE; |
608 | |
609 | status = ice_destroy_tunnel(hw: &pf->hw, index: ti->hw_priv, type: tnl_type, |
610 | ntohs(ti->port)); |
611 | if (status) { |
612 | netdev_err(dev: netdev, format: "Error removing UDP tunnel - %d\n" , |
613 | status); |
614 | return -EIO; |
615 | } |
616 | |
617 | return 0; |
618 | } |
619 | |
620 | /** |
621 | * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index |
622 | * @hw: pointer to the hardware structure |
623 | * @blk: hardware block |
624 | * @prof: profile ID |
625 | * @fv_idx: field vector word index |
626 | * @prot: variable to receive the protocol ID |
627 | * @off: variable to receive the protocol offset |
628 | */ |
629 | int |
630 | ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, |
631 | u8 *prot, u16 *off) |
632 | { |
633 | struct ice_fv_word *fv_ext; |
634 | |
635 | if (prof >= hw->blk[blk].es.count) |
636 | return -EINVAL; |
637 | |
638 | if (fv_idx >= hw->blk[blk].es.fvw) |
639 | return -EINVAL; |
640 | |
641 | fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); |
642 | |
643 | *prot = fv_ext[fv_idx].prot_id; |
644 | *off = fv_ext[fv_idx].off; |
645 | |
646 | return 0; |
647 | } |
648 | |
649 | /* PTG Management */ |
650 | |
651 | /** |
652 | * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) |
653 | * @hw: pointer to the hardware structure |
654 | * @blk: HW block |
655 | * @ptype: the ptype to search for |
656 | * @ptg: pointer to variable that receives the PTG |
657 | * |
658 | * This function will search the PTGs for a particular ptype, returning the |
659 | * PTG ID that contains it through the PTG parameter, with the value of |
660 | * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. |
661 | */ |
662 | static int |
663 | ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) |
664 | { |
665 | if (ptype >= ICE_XLT1_CNT || !ptg) |
666 | return -EINVAL; |
667 | |
668 | *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; |
669 | return 0; |
670 | } |
671 | |
672 | /** |
673 | * ice_ptg_alloc_val - Allocates a new packet type group ID by value |
674 | * @hw: pointer to the hardware structure |
675 | * @blk: HW block |
676 | * @ptg: the PTG to allocate |
677 | * |
678 | * This function allocates a given packet type group ID specified by the PTG |
679 | * parameter. |
680 | */ |
681 | static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) |
682 | { |
683 | hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; |
684 | } |
685 | |
686 | /** |
687 | * ice_ptg_remove_ptype - Removes ptype from a particular packet type group |
688 | * @hw: pointer to the hardware structure |
689 | * @blk: HW block |
690 | * @ptype: the ptype to remove |
691 | * @ptg: the PTG to remove the ptype from |
692 | * |
693 | * This function will remove the ptype from the specific PTG, and move it to |
694 | * the default PTG (ICE_DEFAULT_PTG). |
695 | */ |
696 | static int |
697 | ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) |
698 | { |
699 | struct ice_ptg_ptype **ch; |
700 | struct ice_ptg_ptype *p; |
701 | |
702 | if (ptype > ICE_XLT1_CNT - 1) |
703 | return -EINVAL; |
704 | |
705 | if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) |
706 | return -ENOENT; |
707 | |
708 | /* Should not happen if .in_use is set, bad config */ |
709 | if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) |
710 | return -EIO; |
711 | |
712 | /* find the ptype within this PTG, and bypass the link over it */ |
713 | p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; |
714 | ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; |
715 | while (p) { |
716 | if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { |
717 | *ch = p->next_ptype; |
718 | break; |
719 | } |
720 | |
721 | ch = &p->next_ptype; |
722 | p = p->next_ptype; |
723 | } |
724 | |
725 | hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; |
726 | hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; |
727 | |
728 | return 0; |
729 | } |
730 | |
731 | /** |
732 | * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group |
733 | * @hw: pointer to the hardware structure |
734 | * @blk: HW block |
735 | * @ptype: the ptype to add or move |
736 | * @ptg: the PTG to add or move the ptype to |
737 | * |
738 | * This function will either add or move a ptype to a particular PTG depending |
739 | * on if the ptype is already part of another group. Note that using a |
740 | * destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the |
741 | * default PTG. |
742 | */ |
743 | static int |
744 | ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) |
745 | { |
746 | u8 original_ptg; |
747 | int status; |
748 | |
749 | if (ptype > ICE_XLT1_CNT - 1) |
750 | return -EINVAL; |
751 | |
752 | if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) |
753 | return -ENOENT; |
754 | |
755 | status = ice_ptg_find_ptype(hw, blk, ptype, ptg: &original_ptg); |
756 | if (status) |
757 | return status; |
758 | |
759 | /* Is ptype already in the correct PTG? */ |
760 | if (original_ptg == ptg) |
761 | return 0; |
762 | |
763 | /* Remove from original PTG and move back to the default PTG */ |
764 | if (original_ptg != ICE_DEFAULT_PTG) |
765 | ice_ptg_remove_ptype(hw, blk, ptype, ptg: original_ptg); |
766 | |
767 | /* Moving to default PTG? Then we're done with this request */ |
768 | if (ptg == ICE_DEFAULT_PTG) |
769 | return 0; |
770 | |
771 | /* Add ptype to PTG at beginning of list */ |
772 | hw->blk[blk].xlt1.ptypes[ptype].next_ptype = |
773 | hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; |
774 | hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = |
775 | &hw->blk[blk].xlt1.ptypes[ptype]; |
776 | |
777 | hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; |
778 | hw->blk[blk].xlt1.t[ptype] = ptg; |
779 | |
780 | return 0; |
781 | } |
782 | |
783 | /* Block / table size info */ |
784 | struct ice_blk_size_details { |
785 | u16 xlt1; /* # XLT1 entries */ |
786 | u16 xlt2; /* # XLT2 entries */ |
787 | u16 prof_tcam; /* # profile ID TCAM entries */ |
788 | u16 prof_id; /* # profile IDs */ |
789 | u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ |
790 | u16 prof_redir; /* # profile redirection entries */ |
791 | u16 es; /* # extraction sequence entries */ |
792 | u16 fvw; /* # field vector words */ |
793 | u8 overwrite; /* overwrite existing entries allowed */ |
794 | u8 reverse; /* reverse FV order */ |
795 | }; |
796 | |
797 | static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { |
798 | /** |
799 | * Table Definitions |
800 | * XLT1 - Number of entries in XLT1 table |
801 | * XLT2 - Number of entries in XLT2 table |
802 | * TCAM - Number of entries Profile ID TCAM table |
803 | * CDID - Control Domain ID of the hardware block |
804 | * PRED - Number of entries in the Profile Redirection Table |
805 | * FV - Number of entries in the Field Vector |
806 | * FVW - Width (in WORDs) of the Field Vector |
807 | * OVR - Overwrite existing table entries |
808 | * REV - Reverse FV |
809 | */ |
810 | /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ |
811 | /* Overwrite , Reverse FV */ |
812 | /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, |
813 | false, false }, |
814 | /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, |
815 | false, false }, |
816 | /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, |
817 | false, true }, |
818 | /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, |
819 | true, true }, |
820 | /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, |
821 | false, false }, |
822 | }; |
823 | |
824 | enum ice_sid_all { |
825 | ICE_SID_XLT1_OFF = 0, |
826 | ICE_SID_XLT2_OFF, |
827 | ICE_SID_PR_OFF, |
828 | ICE_SID_PR_REDIR_OFF, |
829 | ICE_SID_ES_OFF, |
830 | ICE_SID_OFF_COUNT, |
831 | }; |
832 | |
833 | /* Characteristic handling */ |
834 | |
835 | /** |
836 | * ice_match_prop_lst - determine if properties of two lists match |
837 | * @list1: first properties list |
838 | * @list2: second properties list |
839 | * |
840 | * Count, cookies and the order must match in order to be considered equivalent. |
841 | */ |
842 | static bool |
843 | ice_match_prop_lst(struct list_head *list1, struct list_head *list2) |
844 | { |
845 | struct ice_vsig_prof *tmp1; |
846 | struct ice_vsig_prof *tmp2; |
847 | u16 chk_count = 0; |
848 | u16 count = 0; |
849 | |
850 | /* compare counts */ |
851 | list_for_each_entry(tmp1, list1, list) |
852 | count++; |
853 | list_for_each_entry(tmp2, list2, list) |
854 | chk_count++; |
855 | if (!count || count != chk_count) |
856 | return false; |
857 | |
858 | tmp1 = list_first_entry(list1, struct ice_vsig_prof, list); |
859 | tmp2 = list_first_entry(list2, struct ice_vsig_prof, list); |
860 | |
861 | /* profile cookies must compare, and in the exact same order to take |
862 | * into account priority |
863 | */ |
864 | while (count--) { |
865 | if (tmp2->profile_cookie != tmp1->profile_cookie) |
866 | return false; |
867 | |
868 | tmp1 = list_next_entry(tmp1, list); |
869 | tmp2 = list_next_entry(tmp2, list); |
870 | } |
871 | |
872 | return true; |
873 | } |
874 | |
875 | /* VSIG Management */ |
876 | |
877 | /** |
878 | * ice_vsig_find_vsi - find a VSIG that contains a specified VSI |
879 | * @hw: pointer to the hardware structure |
880 | * @blk: HW block |
881 | * @vsi: VSI of interest |
882 | * @vsig: pointer to receive the VSI group |
883 | * |
884 | * This function will lookup the VSI entry in the XLT2 list and return |
885 | * the VSI group its associated with. |
886 | */ |
887 | static int |
888 | ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) |
889 | { |
890 | if (!vsig || vsi >= ICE_MAX_VSI) |
891 | return -EINVAL; |
892 | |
893 | /* As long as there's a default or valid VSIG associated with the input |
894 | * VSI, the functions returns a success. Any handling of VSIG will be |
895 | * done by the following add, update or remove functions. |
896 | */ |
897 | *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; |
898 | |
899 | return 0; |
900 | } |
901 | |
902 | /** |
903 | * ice_vsig_alloc_val - allocate a new VSIG by value |
904 | * @hw: pointer to the hardware structure |
905 | * @blk: HW block |
906 | * @vsig: the VSIG to allocate |
907 | * |
908 | * This function will allocate a given VSIG specified by the VSIG parameter. |
909 | */ |
910 | static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) |
911 | { |
912 | u16 idx = vsig & ICE_VSIG_IDX_M; |
913 | |
914 | if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { |
915 | INIT_LIST_HEAD(list: &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); |
916 | hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; |
917 | } |
918 | |
919 | return ICE_VSIG_VALUE(idx, hw->pf_id); |
920 | } |
921 | |
922 | /** |
923 | * ice_vsig_alloc - Finds a free entry and allocates a new VSIG |
924 | * @hw: pointer to the hardware structure |
925 | * @blk: HW block |
926 | * |
927 | * This function will iterate through the VSIG list and mark the first |
928 | * unused entry for the new VSIG entry as used and return that value. |
929 | */ |
930 | static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) |
931 | { |
932 | u16 i; |
933 | |
934 | for (i = 1; i < ICE_MAX_VSIGS; i++) |
935 | if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use) |
936 | return ice_vsig_alloc_val(hw, blk, vsig: i); |
937 | |
938 | return ICE_DEFAULT_VSIG; |
939 | } |
940 | |
941 | /** |
942 | * ice_find_dup_props_vsig - find VSI group with a specified set of properties |
943 | * @hw: pointer to the hardware structure |
944 | * @blk: HW block |
945 | * @chs: characteristic list |
946 | * @vsig: returns the VSIG with the matching profiles, if found |
947 | * |
948 | * Each VSIG is associated with a characteristic set; i.e. all VSIs under |
949 | * a group have the same characteristic set. To check if there exists a VSIG |
950 | * which has the same characteristics as the input characteristics; this |
951 | * function will iterate through the XLT2 list and return the VSIG that has a |
952 | * matching configuration. In order to make sure that priorities are accounted |
953 | * for, the list must match exactly, including the order in which the |
954 | * characteristics are listed. |
955 | */ |
956 | static int |
957 | ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, |
958 | struct list_head *chs, u16 *vsig) |
959 | { |
960 | struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2; |
961 | u16 i; |
962 | |
963 | for (i = 0; i < xlt2->count; i++) |
964 | if (xlt2->vsig_tbl[i].in_use && |
965 | ice_match_prop_lst(list1: chs, list2: &xlt2->vsig_tbl[i].prop_lst)) { |
966 | *vsig = ICE_VSIG_VALUE(i, hw->pf_id); |
967 | return 0; |
968 | } |
969 | |
970 | return -ENOENT; |
971 | } |
972 | |
973 | /** |
974 | * ice_vsig_free - free VSI group |
975 | * @hw: pointer to the hardware structure |
976 | * @blk: HW block |
977 | * @vsig: VSIG to remove |
978 | * |
979 | * The function will remove all VSIs associated with the input VSIG and move |
980 | * them to the DEFAULT_VSIG and mark the VSIG available. |
981 | */ |
982 | static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) |
983 | { |
984 | struct ice_vsig_prof *dtmp, *del; |
985 | struct ice_vsig_vsi *vsi_cur; |
986 | u16 idx; |
987 | |
988 | idx = vsig & ICE_VSIG_IDX_M; |
989 | if (idx >= ICE_MAX_VSIGS) |
990 | return -EINVAL; |
991 | |
992 | if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) |
993 | return -ENOENT; |
994 | |
995 | hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; |
996 | |
997 | vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; |
998 | /* If the VSIG has at least 1 VSI then iterate through the |
999 | * list and remove the VSIs before deleting the group. |
1000 | */ |
1001 | if (vsi_cur) { |
1002 | /* remove all vsis associated with this VSIG XLT2 entry */ |
1003 | do { |
1004 | struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; |
1005 | |
1006 | vsi_cur->vsig = ICE_DEFAULT_VSIG; |
1007 | vsi_cur->changed = 1; |
1008 | vsi_cur->next_vsi = NULL; |
1009 | vsi_cur = tmp; |
1010 | } while (vsi_cur); |
1011 | |
1012 | /* NULL terminate head of VSI list */ |
1013 | hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL; |
1014 | } |
1015 | |
1016 | /* free characteristic list */ |
1017 | list_for_each_entry_safe(del, dtmp, |
1018 | &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, |
1019 | list) { |
1020 | list_del(entry: &del->list); |
1021 | devm_kfree(dev: ice_hw_to_dev(hw), p: del); |
1022 | } |
1023 | |
1024 | /* if VSIG characteristic list was cleared for reset |
1025 | * re-initialize the list head |
1026 | */ |
1027 | INIT_LIST_HEAD(list: &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); |
1028 | |
1029 | return 0; |
1030 | } |
1031 | |
1032 | /** |
1033 | * ice_vsig_remove_vsi - remove VSI from VSIG |
1034 | * @hw: pointer to the hardware structure |
1035 | * @blk: HW block |
1036 | * @vsi: VSI to remove |
1037 | * @vsig: VSI group to remove from |
1038 | * |
1039 | * The function will remove the input VSI from its VSI group and move it |
1040 | * to the DEFAULT_VSIG. |
1041 | */ |
1042 | static int |
1043 | ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) |
1044 | { |
1045 | struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; |
1046 | u16 idx; |
1047 | |
1048 | idx = vsig & ICE_VSIG_IDX_M; |
1049 | |
1050 | if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) |
1051 | return -EINVAL; |
1052 | |
1053 | if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) |
1054 | return -ENOENT; |
1055 | |
1056 | /* entry already in default VSIG, don't have to remove */ |
1057 | if (idx == ICE_DEFAULT_VSIG) |
1058 | return 0; |
1059 | |
1060 | vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; |
1061 | if (!(*vsi_head)) |
1062 | return -EIO; |
1063 | |
1064 | vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; |
1065 | vsi_cur = (*vsi_head); |
1066 | |
1067 | /* iterate the VSI list, skip over the entry to be removed */ |
1068 | while (vsi_cur) { |
1069 | if (vsi_tgt == vsi_cur) { |
1070 | (*vsi_head) = vsi_cur->next_vsi; |
1071 | break; |
1072 | } |
1073 | vsi_head = &vsi_cur->next_vsi; |
1074 | vsi_cur = vsi_cur->next_vsi; |
1075 | } |
1076 | |
1077 | /* verify if VSI was removed from group list */ |
1078 | if (!vsi_cur) |
1079 | return -ENOENT; |
1080 | |
1081 | vsi_cur->vsig = ICE_DEFAULT_VSIG; |
1082 | vsi_cur->changed = 1; |
1083 | vsi_cur->next_vsi = NULL; |
1084 | |
1085 | return 0; |
1086 | } |
1087 | |
1088 | /** |
1089 | * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group |
1090 | * @hw: pointer to the hardware structure |
1091 | * @blk: HW block |
1092 | * @vsi: VSI to move |
1093 | * @vsig: destination VSI group |
1094 | * |
1095 | * This function will move or add the input VSI to the target VSIG. |
1096 | * The function will find the original VSIG the VSI belongs to and |
1097 | * move the entry to the DEFAULT_VSIG, update the original VSIG and |
1098 | * then move entry to the new VSIG. |
1099 | */ |
1100 | static int |
1101 | ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) |
1102 | { |
1103 | struct ice_vsig_vsi *tmp; |
1104 | u16 orig_vsig, idx; |
1105 | int status; |
1106 | |
1107 | idx = vsig & ICE_VSIG_IDX_M; |
1108 | |
1109 | if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) |
1110 | return -EINVAL; |
1111 | |
1112 | /* if VSIG not in use and VSIG is not default type this VSIG |
1113 | * doesn't exist. |
1114 | */ |
1115 | if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && |
1116 | vsig != ICE_DEFAULT_VSIG) |
1117 | return -ENOENT; |
1118 | |
1119 | status = ice_vsig_find_vsi(hw, blk, vsi, vsig: &orig_vsig); |
1120 | if (status) |
1121 | return status; |
1122 | |
1123 | /* no update required if vsigs match */ |
1124 | if (orig_vsig == vsig) |
1125 | return 0; |
1126 | |
1127 | if (orig_vsig != ICE_DEFAULT_VSIG) { |
1128 | /* remove entry from orig_vsig and add to default VSIG */ |
1129 | status = ice_vsig_remove_vsi(hw, blk, vsi, vsig: orig_vsig); |
1130 | if (status) |
1131 | return status; |
1132 | } |
1133 | |
1134 | if (idx == ICE_DEFAULT_VSIG) |
1135 | return 0; |
1136 | |
1137 | /* Create VSI entry and add VSIG and prop_mask values */ |
1138 | hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; |
1139 | hw->blk[blk].xlt2.vsis[vsi].changed = 1; |
1140 | |
1141 | /* Add new entry to the head of the VSIG list */ |
1142 | tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; |
1143 | hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = |
1144 | &hw->blk[blk].xlt2.vsis[vsi]; |
1145 | hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; |
1146 | hw->blk[blk].xlt2.t[vsi] = vsig; |
1147 | |
1148 | return 0; |
1149 | } |
1150 | |
1151 | /** |
1152 | * ice_prof_has_mask_idx - determine if profile index masking is identical |
1153 | * @hw: pointer to the hardware structure |
1154 | * @blk: HW block |
1155 | * @prof: profile to check |
1156 | * @idx: profile index to check |
1157 | * @mask: mask to match |
1158 | */ |
1159 | static bool |
1160 | ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx, |
1161 | u16 mask) |
1162 | { |
1163 | bool expect_no_mask = false; |
1164 | bool found = false; |
1165 | bool match = false; |
1166 | u16 i; |
1167 | |
1168 | /* If mask is 0x0000 or 0xffff, then there is no masking */ |
1169 | if (mask == 0 || mask == 0xffff) |
1170 | expect_no_mask = true; |
1171 | |
1172 | /* Scan the enabled masks on this profile, for the specified idx */ |
1173 | for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first + |
1174 | hw->blk[blk].masks.count; i++) |
1175 | if (hw->blk[blk].es.mask_ena[prof] & BIT(i)) |
1176 | if (hw->blk[blk].masks.masks[i].in_use && |
1177 | hw->blk[blk].masks.masks[i].idx == idx) { |
1178 | found = true; |
1179 | if (hw->blk[blk].masks.masks[i].mask == mask) |
1180 | match = true; |
1181 | break; |
1182 | } |
1183 | |
1184 | if (expect_no_mask) { |
1185 | if (found) |
1186 | return false; |
1187 | } else { |
1188 | if (!match) |
1189 | return false; |
1190 | } |
1191 | |
1192 | return true; |
1193 | } |
1194 | |
1195 | /** |
1196 | * ice_prof_has_mask - determine if profile masking is identical |
1197 | * @hw: pointer to the hardware structure |
1198 | * @blk: HW block |
1199 | * @prof: profile to check |
1200 | * @masks: masks to match |
1201 | */ |
1202 | static bool |
1203 | ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks) |
1204 | { |
1205 | u16 i; |
1206 | |
1207 | /* es->mask_ena[prof] will have the mask */ |
1208 | for (i = 0; i < hw->blk[blk].es.fvw; i++) |
1209 | if (!ice_prof_has_mask_idx(hw, blk, prof, idx: i, mask: masks[i])) |
1210 | return false; |
1211 | |
1212 | return true; |
1213 | } |
1214 | |
1215 | /** |
1216 | * ice_find_prof_id_with_mask - find profile ID for a given field vector |
1217 | * @hw: pointer to the hardware structure |
1218 | * @blk: HW block |
1219 | * @fv: field vector to search for |
1220 | * @masks: masks for FV |
1221 | * @prof_id: receives the profile ID |
1222 | */ |
1223 | static int |
1224 | ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk, |
1225 | struct ice_fv_word *fv, u16 *masks, u8 *prof_id) |
1226 | { |
1227 | struct ice_es *es = &hw->blk[blk].es; |
1228 | u8 i; |
1229 | |
1230 | /* For FD, we don't want to re-use a existed profile with the same |
1231 | * field vector and mask. This will cause rule interference. |
1232 | */ |
1233 | if (blk == ICE_BLK_FD) |
1234 | return -ENOENT; |
1235 | |
1236 | for (i = 0; i < (u8)es->count; i++) { |
1237 | u16 off = i * es->fvw; |
1238 | |
1239 | if (memcmp(p: &es->t[off], q: fv, size: es->fvw * sizeof(*fv))) |
1240 | continue; |
1241 | |
1242 | /* check if masks settings are the same for this profile */ |
1243 | if (masks && !ice_prof_has_mask(hw, blk, prof: i, masks)) |
1244 | continue; |
1245 | |
1246 | *prof_id = i; |
1247 | return 0; |
1248 | } |
1249 | |
1250 | return -ENOENT; |
1251 | } |
1252 | |
1253 | /** |
1254 | * ice_prof_id_rsrc_type - get profile ID resource type for a block type |
1255 | * @blk: the block type |
1256 | * @rsrc_type: pointer to variable to receive the resource type |
1257 | */ |
1258 | static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) |
1259 | { |
1260 | switch (blk) { |
1261 | case ICE_BLK_FD: |
1262 | *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID; |
1263 | break; |
1264 | case ICE_BLK_RSS: |
1265 | *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; |
1266 | break; |
1267 | default: |
1268 | return false; |
1269 | } |
1270 | return true; |
1271 | } |
1272 | |
1273 | /** |
1274 | * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type |
1275 | * @blk: the block type |
1276 | * @rsrc_type: pointer to variable to receive the resource type |
1277 | */ |
1278 | static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) |
1279 | { |
1280 | switch (blk) { |
1281 | case ICE_BLK_FD: |
1282 | *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM; |
1283 | break; |
1284 | case ICE_BLK_RSS: |
1285 | *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; |
1286 | break; |
1287 | default: |
1288 | return false; |
1289 | } |
1290 | return true; |
1291 | } |
1292 | |
1293 | /** |
1294 | * ice_alloc_tcam_ent - allocate hardware TCAM entry |
1295 | * @hw: pointer to the HW struct |
1296 | * @blk: the block to allocate the TCAM for |
1297 | * @btm: true to allocate from bottom of table, false to allocate from top |
1298 | * @tcam_idx: pointer to variable to receive the TCAM entry |
1299 | * |
1300 | * This function allocates a new entry in a Profile ID TCAM for a specific |
1301 | * block. |
1302 | */ |
1303 | static int |
1304 | ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, |
1305 | u16 *tcam_idx) |
1306 | { |
1307 | u16 res_type; |
1308 | |
1309 | if (!ice_tcam_ent_rsrc_type(blk, rsrc_type: &res_type)) |
1310 | return -EINVAL; |
1311 | |
1312 | return ice_alloc_hw_res(hw, type: res_type, num: 1, btm, res: tcam_idx); |
1313 | } |
1314 | |
1315 | /** |
1316 | * ice_free_tcam_ent - free hardware TCAM entry |
1317 | * @hw: pointer to the HW struct |
1318 | * @blk: the block from which to free the TCAM entry |
1319 | * @tcam_idx: the TCAM entry to free |
1320 | * |
1321 | * This function frees an entry in a Profile ID TCAM for a specific block. |
1322 | */ |
1323 | static int |
1324 | ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) |
1325 | { |
1326 | u16 res_type; |
1327 | |
1328 | if (!ice_tcam_ent_rsrc_type(blk, rsrc_type: &res_type)) |
1329 | return -EINVAL; |
1330 | |
1331 | return ice_free_hw_res(hw, type: res_type, num: 1, res: &tcam_idx); |
1332 | } |
1333 | |
1334 | /** |
1335 | * ice_alloc_prof_id - allocate profile ID |
1336 | * @hw: pointer to the HW struct |
1337 | * @blk: the block to allocate the profile ID for |
1338 | * @prof_id: pointer to variable to receive the profile ID |
1339 | * |
1340 | * This function allocates a new profile ID, which also corresponds to a Field |
1341 | * Vector (Extraction Sequence) entry. |
1342 | */ |
1343 | static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) |
1344 | { |
1345 | u16 res_type; |
1346 | u16 get_prof; |
1347 | int status; |
1348 | |
1349 | if (!ice_prof_id_rsrc_type(blk, rsrc_type: &res_type)) |
1350 | return -EINVAL; |
1351 | |
1352 | status = ice_alloc_hw_res(hw, type: res_type, num: 1, btm: false, res: &get_prof); |
1353 | if (!status) |
1354 | *prof_id = (u8)get_prof; |
1355 | |
1356 | return status; |
1357 | } |
1358 | |
1359 | /** |
1360 | * ice_free_prof_id - free profile ID |
1361 | * @hw: pointer to the HW struct |
1362 | * @blk: the block from which to free the profile ID |
1363 | * @prof_id: the profile ID to free |
1364 | * |
1365 | * This function frees a profile ID, which also corresponds to a Field Vector. |
1366 | */ |
1367 | static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) |
1368 | { |
1369 | u16 tmp_prof_id = (u16)prof_id; |
1370 | u16 res_type; |
1371 | |
1372 | if (!ice_prof_id_rsrc_type(blk, rsrc_type: &res_type)) |
1373 | return -EINVAL; |
1374 | |
1375 | return ice_free_hw_res(hw, type: res_type, num: 1, res: &tmp_prof_id); |
1376 | } |
1377 | |
1378 | /** |
1379 | * ice_prof_inc_ref - increment reference count for profile |
1380 | * @hw: pointer to the HW struct |
1381 | * @blk: the block from which to free the profile ID |
1382 | * @prof_id: the profile ID for which to increment the reference count |
1383 | */ |
1384 | static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) |
1385 | { |
1386 | if (prof_id > hw->blk[blk].es.count) |
1387 | return -EINVAL; |
1388 | |
1389 | hw->blk[blk].es.ref_count[prof_id]++; |
1390 | |
1391 | return 0; |
1392 | } |
1393 | |
1394 | /** |
1395 | * ice_write_prof_mask_reg - write profile mask register |
1396 | * @hw: pointer to the HW struct |
1397 | * @blk: hardware block |
1398 | * @mask_idx: mask index |
1399 | * @idx: index of the FV which will use the mask |
1400 | * @mask: the 16-bit mask |
1401 | */ |
1402 | static void |
1403 | ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx, |
1404 | u16 idx, u16 mask) |
1405 | { |
1406 | u32 offset; |
1407 | u32 val; |
1408 | |
1409 | switch (blk) { |
1410 | case ICE_BLK_RSS: |
1411 | offset = GLQF_HMASK(mask_idx); |
1412 | val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M; |
1413 | val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M; |
1414 | break; |
1415 | case ICE_BLK_FD: |
1416 | offset = GLQF_FDMASK(mask_idx); |
1417 | val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M; |
1418 | val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M; |
1419 | break; |
1420 | default: |
1421 | ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n" , |
1422 | blk); |
1423 | return; |
1424 | } |
1425 | |
1426 | wr32(hw, offset, val); |
1427 | ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n" , |
1428 | blk, idx, offset, val); |
1429 | } |
1430 | |
1431 | /** |
1432 | * ice_write_prof_mask_enable_res - write profile mask enable register |
1433 | * @hw: pointer to the HW struct |
1434 | * @blk: hardware block |
1435 | * @prof_id: profile ID |
1436 | * @enable_mask: enable mask |
1437 | */ |
1438 | static void |
1439 | ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk, |
1440 | u16 prof_id, u32 enable_mask) |
1441 | { |
1442 | u32 offset; |
1443 | |
1444 | switch (blk) { |
1445 | case ICE_BLK_RSS: |
1446 | offset = GLQF_HMASK_SEL(prof_id); |
1447 | break; |
1448 | case ICE_BLK_FD: |
1449 | offset = GLQF_FDMASK_SEL(prof_id); |
1450 | break; |
1451 | default: |
1452 | ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n" , |
1453 | blk); |
1454 | return; |
1455 | } |
1456 | |
1457 | wr32(hw, offset, enable_mask); |
1458 | ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n" , |
1459 | blk, prof_id, offset, enable_mask); |
1460 | } |
1461 | |
1462 | /** |
1463 | * ice_init_prof_masks - initial prof masks |
1464 | * @hw: pointer to the HW struct |
1465 | * @blk: hardware block |
1466 | */ |
1467 | static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk) |
1468 | { |
1469 | u16 per_pf; |
1470 | u16 i; |
1471 | |
1472 | mutex_init(&hw->blk[blk].masks.lock); |
1473 | |
1474 | per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs; |
1475 | |
1476 | hw->blk[blk].masks.count = per_pf; |
1477 | hw->blk[blk].masks.first = hw->pf_id * per_pf; |
1478 | |
1479 | memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks)); |
1480 | |
1481 | for (i = hw->blk[blk].masks.first; |
1482 | i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) |
1483 | ice_write_prof_mask_reg(hw, blk, mask_idx: i, idx: 0, mask: 0); |
1484 | } |
1485 | |
1486 | /** |
1487 | * ice_init_all_prof_masks - initialize all prof masks |
1488 | * @hw: pointer to the HW struct |
1489 | */ |
1490 | static void ice_init_all_prof_masks(struct ice_hw *hw) |
1491 | { |
1492 | ice_init_prof_masks(hw, blk: ICE_BLK_RSS); |
1493 | ice_init_prof_masks(hw, blk: ICE_BLK_FD); |
1494 | } |
1495 | |
1496 | /** |
1497 | * ice_alloc_prof_mask - allocate profile mask |
1498 | * @hw: pointer to the HW struct |
1499 | * @blk: hardware block |
1500 | * @idx: index of FV which will use the mask |
1501 | * @mask: the 16-bit mask |
1502 | * @mask_idx: variable to receive the mask index |
1503 | */ |
1504 | static int |
1505 | ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask, |
1506 | u16 *mask_idx) |
1507 | { |
1508 | bool found_unused = false, found_copy = false; |
1509 | u16 unused_idx = 0, copy_idx = 0; |
1510 | int status = -ENOSPC; |
1511 | u16 i; |
1512 | |
1513 | if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) |
1514 | return -EINVAL; |
1515 | |
1516 | mutex_lock(&hw->blk[blk].masks.lock); |
1517 | |
1518 | for (i = hw->blk[blk].masks.first; |
1519 | i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) |
1520 | if (hw->blk[blk].masks.masks[i].in_use) { |
1521 | /* if mask is in use and it exactly duplicates the |
1522 | * desired mask and index, then in can be reused |
1523 | */ |
1524 | if (hw->blk[blk].masks.masks[i].mask == mask && |
1525 | hw->blk[blk].masks.masks[i].idx == idx) { |
1526 | found_copy = true; |
1527 | copy_idx = i; |
1528 | break; |
1529 | } |
1530 | } else { |
1531 | /* save off unused index, but keep searching in case |
1532 | * there is an exact match later on |
1533 | */ |
1534 | if (!found_unused) { |
1535 | found_unused = true; |
1536 | unused_idx = i; |
1537 | } |
1538 | } |
1539 | |
1540 | if (found_copy) |
1541 | i = copy_idx; |
1542 | else if (found_unused) |
1543 | i = unused_idx; |
1544 | else |
1545 | goto err_ice_alloc_prof_mask; |
1546 | |
1547 | /* update mask for a new entry */ |
1548 | if (found_unused) { |
1549 | hw->blk[blk].masks.masks[i].in_use = true; |
1550 | hw->blk[blk].masks.masks[i].mask = mask; |
1551 | hw->blk[blk].masks.masks[i].idx = idx; |
1552 | hw->blk[blk].masks.masks[i].ref = 0; |
1553 | ice_write_prof_mask_reg(hw, blk, mask_idx: i, idx, mask); |
1554 | } |
1555 | |
1556 | hw->blk[blk].masks.masks[i].ref++; |
1557 | *mask_idx = i; |
1558 | status = 0; |
1559 | |
1560 | err_ice_alloc_prof_mask: |
1561 | mutex_unlock(lock: &hw->blk[blk].masks.lock); |
1562 | |
1563 | return status; |
1564 | } |
1565 | |
1566 | /** |
1567 | * ice_free_prof_mask - free profile mask |
1568 | * @hw: pointer to the HW struct |
1569 | * @blk: hardware block |
1570 | * @mask_idx: index of mask |
1571 | */ |
1572 | static int |
1573 | ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx) |
1574 | { |
1575 | if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) |
1576 | return -EINVAL; |
1577 | |
1578 | if (!(mask_idx >= hw->blk[blk].masks.first && |
1579 | mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count)) |
1580 | return -ENOENT; |
1581 | |
1582 | mutex_lock(&hw->blk[blk].masks.lock); |
1583 | |
1584 | if (!hw->blk[blk].masks.masks[mask_idx].in_use) |
1585 | goto exit_ice_free_prof_mask; |
1586 | |
1587 | if (hw->blk[blk].masks.masks[mask_idx].ref > 1) { |
1588 | hw->blk[blk].masks.masks[mask_idx].ref--; |
1589 | goto exit_ice_free_prof_mask; |
1590 | } |
1591 | |
1592 | /* remove mask */ |
1593 | hw->blk[blk].masks.masks[mask_idx].in_use = false; |
1594 | hw->blk[blk].masks.masks[mask_idx].mask = 0; |
1595 | hw->blk[blk].masks.masks[mask_idx].idx = 0; |
1596 | |
1597 | /* update mask as unused entry */ |
1598 | ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n" , blk, |
1599 | mask_idx); |
1600 | ice_write_prof_mask_reg(hw, blk, mask_idx, idx: 0, mask: 0); |
1601 | |
1602 | exit_ice_free_prof_mask: |
1603 | mutex_unlock(lock: &hw->blk[blk].masks.lock); |
1604 | |
1605 | return 0; |
1606 | } |
1607 | |
1608 | /** |
1609 | * ice_free_prof_masks - free all profile masks for a profile |
1610 | * @hw: pointer to the HW struct |
1611 | * @blk: hardware block |
1612 | * @prof_id: profile ID |
1613 | */ |
1614 | static int |
1615 | ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id) |
1616 | { |
1617 | u32 mask_bm; |
1618 | u16 i; |
1619 | |
1620 | if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) |
1621 | return -EINVAL; |
1622 | |
1623 | mask_bm = hw->blk[blk].es.mask_ena[prof_id]; |
1624 | for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++) |
1625 | if (mask_bm & BIT(i)) |
1626 | ice_free_prof_mask(hw, blk, mask_idx: i); |
1627 | |
1628 | return 0; |
1629 | } |
1630 | |
1631 | /** |
1632 | * ice_shutdown_prof_masks - releases lock for masking |
1633 | * @hw: pointer to the HW struct |
1634 | * @blk: hardware block |
1635 | * |
1636 | * This should be called before unloading the driver |
1637 | */ |
1638 | static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk) |
1639 | { |
1640 | u16 i; |
1641 | |
1642 | mutex_lock(&hw->blk[blk].masks.lock); |
1643 | |
1644 | for (i = hw->blk[blk].masks.first; |
1645 | i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) { |
1646 | ice_write_prof_mask_reg(hw, blk, mask_idx: i, idx: 0, mask: 0); |
1647 | |
1648 | hw->blk[blk].masks.masks[i].in_use = false; |
1649 | hw->blk[blk].masks.masks[i].idx = 0; |
1650 | hw->blk[blk].masks.masks[i].mask = 0; |
1651 | } |
1652 | |
1653 | mutex_unlock(lock: &hw->blk[blk].masks.lock); |
1654 | mutex_destroy(lock: &hw->blk[blk].masks.lock); |
1655 | } |
1656 | |
1657 | /** |
1658 | * ice_shutdown_all_prof_masks - releases all locks for masking |
1659 | * @hw: pointer to the HW struct |
1660 | * |
1661 | * This should be called before unloading the driver |
1662 | */ |
1663 | static void ice_shutdown_all_prof_masks(struct ice_hw *hw) |
1664 | { |
1665 | ice_shutdown_prof_masks(hw, blk: ICE_BLK_RSS); |
1666 | ice_shutdown_prof_masks(hw, blk: ICE_BLK_FD); |
1667 | } |
1668 | |
1669 | /** |
1670 | * ice_update_prof_masking - set registers according to masking |
1671 | * @hw: pointer to the HW struct |
1672 | * @blk: hardware block |
1673 | * @prof_id: profile ID |
1674 | * @masks: masks |
1675 | */ |
1676 | static int |
1677 | ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id, |
1678 | u16 *masks) |
1679 | { |
1680 | bool err = false; |
1681 | u32 ena_mask = 0; |
1682 | u16 idx; |
1683 | u16 i; |
1684 | |
1685 | /* Only support FD and RSS masking, otherwise nothing to be done */ |
1686 | if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD) |
1687 | return 0; |
1688 | |
1689 | for (i = 0; i < hw->blk[blk].es.fvw; i++) |
1690 | if (masks[i] && masks[i] != 0xFFFF) { |
1691 | if (!ice_alloc_prof_mask(hw, blk, idx: i, mask: masks[i], mask_idx: &idx)) { |
1692 | ena_mask |= BIT(idx); |
1693 | } else { |
1694 | /* not enough bitmaps */ |
1695 | err = true; |
1696 | break; |
1697 | } |
1698 | } |
1699 | |
1700 | if (err) { |
1701 | /* free any bitmaps we have allocated */ |
1702 | for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++) |
1703 | if (ena_mask & BIT(i)) |
1704 | ice_free_prof_mask(hw, blk, mask_idx: i); |
1705 | |
1706 | return -EIO; |
1707 | } |
1708 | |
1709 | /* enable the masks for this profile */ |
1710 | ice_write_prof_mask_enable_res(hw, blk, prof_id, enable_mask: ena_mask); |
1711 | |
1712 | /* store enabled masks with profile so that they can be freed later */ |
1713 | hw->blk[blk].es.mask_ena[prof_id] = ena_mask; |
1714 | |
1715 | return 0; |
1716 | } |
1717 | |
1718 | /** |
1719 | * ice_write_es - write an extraction sequence to hardware |
1720 | * @hw: pointer to the HW struct |
1721 | * @blk: the block in which to write the extraction sequence |
1722 | * @prof_id: the profile ID to write |
1723 | * @fv: pointer to the extraction sequence to write - NULL to clear extraction |
1724 | */ |
1725 | static void |
1726 | ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, |
1727 | struct ice_fv_word *fv) |
1728 | { |
1729 | u16 off; |
1730 | |
1731 | off = prof_id * hw->blk[blk].es.fvw; |
1732 | if (!fv) { |
1733 | memset(&hw->blk[blk].es.t[off], 0, |
1734 | hw->blk[blk].es.fvw * sizeof(*fv)); |
1735 | hw->blk[blk].es.written[prof_id] = false; |
1736 | } else { |
1737 | memcpy(&hw->blk[blk].es.t[off], fv, |
1738 | hw->blk[blk].es.fvw * sizeof(*fv)); |
1739 | } |
1740 | } |
1741 | |
1742 | /** |
1743 | * ice_prof_dec_ref - decrement reference count for profile |
1744 | * @hw: pointer to the HW struct |
1745 | * @blk: the block from which to free the profile ID |
1746 | * @prof_id: the profile ID for which to decrement the reference count |
1747 | */ |
1748 | static int |
1749 | ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) |
1750 | { |
1751 | if (prof_id > hw->blk[blk].es.count) |
1752 | return -EINVAL; |
1753 | |
1754 | if (hw->blk[blk].es.ref_count[prof_id] > 0) { |
1755 | if (!--hw->blk[blk].es.ref_count[prof_id]) { |
1756 | ice_write_es(hw, blk, prof_id, NULL); |
1757 | ice_free_prof_masks(hw, blk, prof_id); |
1758 | return ice_free_prof_id(hw, blk, prof_id); |
1759 | } |
1760 | } |
1761 | |
1762 | return 0; |
1763 | } |
1764 | |
1765 | /* Block / table section IDs */ |
1766 | static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { |
1767 | /* SWITCH */ |
1768 | { ICE_SID_XLT1_SW, |
1769 | ICE_SID_XLT2_SW, |
1770 | ICE_SID_PROFID_TCAM_SW, |
1771 | ICE_SID_PROFID_REDIR_SW, |
1772 | ICE_SID_FLD_VEC_SW |
1773 | }, |
1774 | |
1775 | /* ACL */ |
1776 | { ICE_SID_XLT1_ACL, |
1777 | ICE_SID_XLT2_ACL, |
1778 | ICE_SID_PROFID_TCAM_ACL, |
1779 | ICE_SID_PROFID_REDIR_ACL, |
1780 | ICE_SID_FLD_VEC_ACL |
1781 | }, |
1782 | |
1783 | /* FD */ |
1784 | { ICE_SID_XLT1_FD, |
1785 | ICE_SID_XLT2_FD, |
1786 | ICE_SID_PROFID_TCAM_FD, |
1787 | ICE_SID_PROFID_REDIR_FD, |
1788 | ICE_SID_FLD_VEC_FD |
1789 | }, |
1790 | |
1791 | /* RSS */ |
1792 | { ICE_SID_XLT1_RSS, |
1793 | ICE_SID_XLT2_RSS, |
1794 | ICE_SID_PROFID_TCAM_RSS, |
1795 | ICE_SID_PROFID_REDIR_RSS, |
1796 | ICE_SID_FLD_VEC_RSS |
1797 | }, |
1798 | |
1799 | /* PE */ |
1800 | { ICE_SID_XLT1_PE, |
1801 | ICE_SID_XLT2_PE, |
1802 | ICE_SID_PROFID_TCAM_PE, |
1803 | ICE_SID_PROFID_REDIR_PE, |
1804 | ICE_SID_FLD_VEC_PE |
1805 | } |
1806 | }; |
1807 | |
1808 | /** |
1809 | * ice_init_sw_xlt1_db - init software XLT1 database from HW tables |
1810 | * @hw: pointer to the hardware structure |
1811 | * @blk: the HW block to initialize |
1812 | */ |
1813 | static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) |
1814 | { |
1815 | u16 pt; |
1816 | |
1817 | for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { |
1818 | u8 ptg; |
1819 | |
1820 | ptg = hw->blk[blk].xlt1.t[pt]; |
1821 | if (ptg != ICE_DEFAULT_PTG) { |
1822 | ice_ptg_alloc_val(hw, blk, ptg); |
1823 | ice_ptg_add_mv_ptype(hw, blk, ptype: pt, ptg); |
1824 | } |
1825 | } |
1826 | } |
1827 | |
1828 | /** |
1829 | * ice_init_sw_xlt2_db - init software XLT2 database from HW tables |
1830 | * @hw: pointer to the hardware structure |
1831 | * @blk: the HW block to initialize |
1832 | */ |
1833 | static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) |
1834 | { |
1835 | u16 vsi; |
1836 | |
1837 | for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { |
1838 | u16 vsig; |
1839 | |
1840 | vsig = hw->blk[blk].xlt2.t[vsi]; |
1841 | if (vsig) { |
1842 | ice_vsig_alloc_val(hw, blk, vsig); |
1843 | ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); |
1844 | /* no changes at this time, since this has been |
1845 | * initialized from the original package |
1846 | */ |
1847 | hw->blk[blk].xlt2.vsis[vsi].changed = 0; |
1848 | } |
1849 | } |
1850 | } |
1851 | |
1852 | /** |
1853 | * ice_init_sw_db - init software database from HW tables |
1854 | * @hw: pointer to the hardware structure |
1855 | */ |
1856 | static void ice_init_sw_db(struct ice_hw *hw) |
1857 | { |
1858 | u16 i; |
1859 | |
1860 | for (i = 0; i < ICE_BLK_COUNT; i++) { |
1861 | ice_init_sw_xlt1_db(hw, blk: (enum ice_block)i); |
1862 | ice_init_sw_xlt2_db(hw, blk: (enum ice_block)i); |
1863 | } |
1864 | } |
1865 | |
1866 | /** |
1867 | * ice_fill_tbl - Reads content of a single table type into database |
1868 | * @hw: pointer to the hardware structure |
1869 | * @block_id: Block ID of the table to copy |
1870 | * @sid: Section ID of the table to copy |
1871 | * |
1872 | * Will attempt to read the entire content of a given table of a single block |
1873 | * into the driver database. We assume that the buffer will always |
1874 | * be as large or larger than the data contained in the package. If |
1875 | * this condition is not met, there is most likely an error in the package |
1876 | * contents. |
1877 | */ |
1878 | static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) |
1879 | { |
1880 | u32 dst_len, sect_len, offset = 0; |
1881 | struct ice_prof_redir_section *pr; |
1882 | struct ice_prof_id_section *pid; |
1883 | struct ice_xlt1_section *xlt1; |
1884 | struct ice_xlt2_section *xlt2; |
1885 | struct ice_sw_fv_section *es; |
1886 | struct ice_pkg_enum state; |
1887 | u8 *src, *dst; |
1888 | void *sect; |
1889 | |
1890 | /* if the HW segment pointer is null then the first iteration of |
1891 | * ice_pkg_enum_section() will fail. In this case the HW tables will |
1892 | * not be filled and return success. |
1893 | */ |
1894 | if (!hw->seg) { |
1895 | ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n" ); |
1896 | return; |
1897 | } |
1898 | |
1899 | memset(&state, 0, sizeof(state)); |
1900 | |
1901 | sect = ice_pkg_enum_section(ice_seg: hw->seg, state: &state, sect_type: sid); |
1902 | |
1903 | while (sect) { |
1904 | switch (sid) { |
1905 | case ICE_SID_XLT1_SW: |
1906 | case ICE_SID_XLT1_FD: |
1907 | case ICE_SID_XLT1_RSS: |
1908 | case ICE_SID_XLT1_ACL: |
1909 | case ICE_SID_XLT1_PE: |
1910 | xlt1 = sect; |
1911 | src = xlt1->value; |
1912 | sect_len = le16_to_cpu(xlt1->count) * |
1913 | sizeof(*hw->blk[block_id].xlt1.t); |
1914 | dst = hw->blk[block_id].xlt1.t; |
1915 | dst_len = hw->blk[block_id].xlt1.count * |
1916 | sizeof(*hw->blk[block_id].xlt1.t); |
1917 | break; |
1918 | case ICE_SID_XLT2_SW: |
1919 | case ICE_SID_XLT2_FD: |
1920 | case ICE_SID_XLT2_RSS: |
1921 | case ICE_SID_XLT2_ACL: |
1922 | case ICE_SID_XLT2_PE: |
1923 | xlt2 = sect; |
1924 | src = (__force u8 *)xlt2->value; |
1925 | sect_len = le16_to_cpu(xlt2->count) * |
1926 | sizeof(*hw->blk[block_id].xlt2.t); |
1927 | dst = (u8 *)hw->blk[block_id].xlt2.t; |
1928 | dst_len = hw->blk[block_id].xlt2.count * |
1929 | sizeof(*hw->blk[block_id].xlt2.t); |
1930 | break; |
1931 | case ICE_SID_PROFID_TCAM_SW: |
1932 | case ICE_SID_PROFID_TCAM_FD: |
1933 | case ICE_SID_PROFID_TCAM_RSS: |
1934 | case ICE_SID_PROFID_TCAM_ACL: |
1935 | case ICE_SID_PROFID_TCAM_PE: |
1936 | pid = sect; |
1937 | src = (u8 *)pid->entry; |
1938 | sect_len = le16_to_cpu(pid->count) * |
1939 | sizeof(*hw->blk[block_id].prof.t); |
1940 | dst = (u8 *)hw->blk[block_id].prof.t; |
1941 | dst_len = hw->blk[block_id].prof.count * |
1942 | sizeof(*hw->blk[block_id].prof.t); |
1943 | break; |
1944 | case ICE_SID_PROFID_REDIR_SW: |
1945 | case ICE_SID_PROFID_REDIR_FD: |
1946 | case ICE_SID_PROFID_REDIR_RSS: |
1947 | case ICE_SID_PROFID_REDIR_ACL: |
1948 | case ICE_SID_PROFID_REDIR_PE: |
1949 | pr = sect; |
1950 | src = pr->redir_value; |
1951 | sect_len = le16_to_cpu(pr->count) * |
1952 | sizeof(*hw->blk[block_id].prof_redir.t); |
1953 | dst = hw->blk[block_id].prof_redir.t; |
1954 | dst_len = hw->blk[block_id].prof_redir.count * |
1955 | sizeof(*hw->blk[block_id].prof_redir.t); |
1956 | break; |
1957 | case ICE_SID_FLD_VEC_SW: |
1958 | case ICE_SID_FLD_VEC_FD: |
1959 | case ICE_SID_FLD_VEC_RSS: |
1960 | case ICE_SID_FLD_VEC_ACL: |
1961 | case ICE_SID_FLD_VEC_PE: |
1962 | es = sect; |
1963 | src = (u8 *)es->fv; |
1964 | sect_len = (u32)(le16_to_cpu(es->count) * |
1965 | hw->blk[block_id].es.fvw) * |
1966 | sizeof(*hw->blk[block_id].es.t); |
1967 | dst = (u8 *)hw->blk[block_id].es.t; |
1968 | dst_len = (u32)(hw->blk[block_id].es.count * |
1969 | hw->blk[block_id].es.fvw) * |
1970 | sizeof(*hw->blk[block_id].es.t); |
1971 | break; |
1972 | default: |
1973 | return; |
1974 | } |
1975 | |
1976 | /* if the section offset exceeds destination length, terminate |
1977 | * table fill. |
1978 | */ |
1979 | if (offset > dst_len) |
1980 | return; |
1981 | |
1982 | /* if the sum of section size and offset exceed destination size |
1983 | * then we are out of bounds of the HW table size for that PF. |
1984 | * Changing section length to fill the remaining table space |
1985 | * of that PF. |
1986 | */ |
1987 | if ((offset + sect_len) > dst_len) |
1988 | sect_len = dst_len - offset; |
1989 | |
1990 | memcpy(dst + offset, src, sect_len); |
1991 | offset += sect_len; |
1992 | sect = ice_pkg_enum_section(NULL, state: &state, sect_type: sid); |
1993 | } |
1994 | } |
1995 | |
1996 | /** |
1997 | * ice_fill_blk_tbls - Read package context for tables |
1998 | * @hw: pointer to the hardware structure |
1999 | * |
2000 | * Reads the current package contents and populates the driver |
2001 | * database with the data iteratively for all advanced feature |
2002 | * blocks. Assume that the HW tables have been allocated. |
2003 | */ |
2004 | void ice_fill_blk_tbls(struct ice_hw *hw) |
2005 | { |
2006 | u8 i; |
2007 | |
2008 | for (i = 0; i < ICE_BLK_COUNT; i++) { |
2009 | enum ice_block blk_id = (enum ice_block)i; |
2010 | |
2011 | ice_fill_tbl(hw, block_id: blk_id, sid: hw->blk[blk_id].xlt1.sid); |
2012 | ice_fill_tbl(hw, block_id: blk_id, sid: hw->blk[blk_id].xlt2.sid); |
2013 | ice_fill_tbl(hw, block_id: blk_id, sid: hw->blk[blk_id].prof.sid); |
2014 | ice_fill_tbl(hw, block_id: blk_id, sid: hw->blk[blk_id].prof_redir.sid); |
2015 | ice_fill_tbl(hw, block_id: blk_id, sid: hw->blk[blk_id].es.sid); |
2016 | } |
2017 | |
2018 | ice_init_sw_db(hw); |
2019 | } |
2020 | |
2021 | /** |
2022 | * ice_free_prof_map - free profile map |
2023 | * @hw: pointer to the hardware structure |
2024 | * @blk_idx: HW block index |
2025 | */ |
2026 | static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx) |
2027 | { |
2028 | struct ice_es *es = &hw->blk[blk_idx].es; |
2029 | struct ice_prof_map *del, *tmp; |
2030 | |
2031 | mutex_lock(&es->prof_map_lock); |
2032 | list_for_each_entry_safe(del, tmp, &es->prof_map, list) { |
2033 | list_del(entry: &del->list); |
2034 | devm_kfree(dev: ice_hw_to_dev(hw), p: del); |
2035 | } |
2036 | INIT_LIST_HEAD(list: &es->prof_map); |
2037 | mutex_unlock(lock: &es->prof_map_lock); |
2038 | } |
2039 | |
2040 | /** |
2041 | * ice_free_flow_profs - free flow profile entries |
2042 | * @hw: pointer to the hardware structure |
2043 | * @blk_idx: HW block index |
2044 | */ |
2045 | static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) |
2046 | { |
2047 | struct ice_flow_prof *p, *tmp; |
2048 | |
2049 | mutex_lock(&hw->fl_profs_locks[blk_idx]); |
2050 | list_for_each_entry_safe(p, tmp, &hw->fl_profs[blk_idx], l_entry) { |
2051 | struct ice_flow_entry *e, *t; |
2052 | |
2053 | list_for_each_entry_safe(e, t, &p->entries, l_entry) |
2054 | ice_flow_rem_entry(hw, blk: (enum ice_block)blk_idx, |
2055 | ICE_FLOW_ENTRY_HNDL(e)); |
2056 | |
2057 | list_del(entry: &p->l_entry); |
2058 | |
2059 | mutex_destroy(lock: &p->entries_lock); |
2060 | devm_kfree(dev: ice_hw_to_dev(hw), p); |
2061 | } |
2062 | mutex_unlock(lock: &hw->fl_profs_locks[blk_idx]); |
2063 | |
2064 | /* if driver is in reset and tables are being cleared |
2065 | * re-initialize the flow profile list heads |
2066 | */ |
2067 | INIT_LIST_HEAD(list: &hw->fl_profs[blk_idx]); |
2068 | } |
2069 | |
2070 | /** |
2071 | * ice_free_vsig_tbl - free complete VSIG table entries |
2072 | * @hw: pointer to the hardware structure |
2073 | * @blk: the HW block on which to free the VSIG table entries |
2074 | */ |
2075 | static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk) |
2076 | { |
2077 | u16 i; |
2078 | |
2079 | if (!hw->blk[blk].xlt2.vsig_tbl) |
2080 | return; |
2081 | |
2082 | for (i = 1; i < ICE_MAX_VSIGS; i++) |
2083 | if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) |
2084 | ice_vsig_free(hw, blk, vsig: i); |
2085 | } |
2086 | |
2087 | /** |
2088 | * ice_free_hw_tbls - free hardware table memory |
2089 | * @hw: pointer to the hardware structure |
2090 | */ |
2091 | void ice_free_hw_tbls(struct ice_hw *hw) |
2092 | { |
2093 | struct ice_rss_cfg *r, *rt; |
2094 | u8 i; |
2095 | |
2096 | for (i = 0; i < ICE_BLK_COUNT; i++) { |
2097 | if (hw->blk[i].is_list_init) { |
2098 | struct ice_es *es = &hw->blk[i].es; |
2099 | |
2100 | ice_free_prof_map(hw, blk_idx: i); |
2101 | mutex_destroy(lock: &es->prof_map_lock); |
2102 | |
2103 | ice_free_flow_profs(hw, blk_idx: i); |
2104 | mutex_destroy(lock: &hw->fl_profs_locks[i]); |
2105 | |
2106 | hw->blk[i].is_list_init = false; |
2107 | } |
2108 | ice_free_vsig_tbl(hw, blk: (enum ice_block)i); |
2109 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].xlt1.ptypes); |
2110 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].xlt1.ptg_tbl); |
2111 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].xlt1.t); |
2112 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].xlt2.t); |
2113 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].xlt2.vsig_tbl); |
2114 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].xlt2.vsis); |
2115 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].prof.t); |
2116 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].prof_redir.t); |
2117 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].es.t); |
2118 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].es.ref_count); |
2119 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].es.written); |
2120 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->blk[i].es.mask_ena); |
2121 | } |
2122 | |
2123 | list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) { |
2124 | list_del(entry: &r->l_entry); |
2125 | devm_kfree(dev: ice_hw_to_dev(hw), p: r); |
2126 | } |
2127 | mutex_destroy(lock: &hw->rss_locks); |
2128 | ice_shutdown_all_prof_masks(hw); |
2129 | memset(hw->blk, 0, sizeof(hw->blk)); |
2130 | } |
2131 | |
2132 | /** |
2133 | * ice_init_flow_profs - init flow profile locks and list heads |
2134 | * @hw: pointer to the hardware structure |
2135 | * @blk_idx: HW block index |
2136 | */ |
2137 | static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) |
2138 | { |
2139 | mutex_init(&hw->fl_profs_locks[blk_idx]); |
2140 | INIT_LIST_HEAD(list: &hw->fl_profs[blk_idx]); |
2141 | } |
2142 | |
2143 | /** |
2144 | * ice_clear_hw_tbls - clear HW tables and flow profiles |
2145 | * @hw: pointer to the hardware structure |
2146 | */ |
2147 | void ice_clear_hw_tbls(struct ice_hw *hw) |
2148 | { |
2149 | u8 i; |
2150 | |
2151 | for (i = 0; i < ICE_BLK_COUNT; i++) { |
2152 | struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; |
2153 | struct ice_prof_tcam *prof = &hw->blk[i].prof; |
2154 | struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; |
2155 | struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; |
2156 | struct ice_es *es = &hw->blk[i].es; |
2157 | |
2158 | if (hw->blk[i].is_list_init) { |
2159 | ice_free_prof_map(hw, blk_idx: i); |
2160 | ice_free_flow_profs(hw, blk_idx: i); |
2161 | } |
2162 | |
2163 | ice_free_vsig_tbl(hw, blk: (enum ice_block)i); |
2164 | |
2165 | memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes)); |
2166 | memset(xlt1->ptg_tbl, 0, |
2167 | ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl)); |
2168 | memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t)); |
2169 | |
2170 | memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis)); |
2171 | memset(xlt2->vsig_tbl, 0, |
2172 | xlt2->count * sizeof(*xlt2->vsig_tbl)); |
2173 | memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t)); |
2174 | |
2175 | memset(prof->t, 0, prof->count * sizeof(*prof->t)); |
2176 | memset(prof_redir->t, 0, |
2177 | prof_redir->count * sizeof(*prof_redir->t)); |
2178 | |
2179 | memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); |
2180 | memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); |
2181 | memset(es->written, 0, es->count * sizeof(*es->written)); |
2182 | memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena)); |
2183 | } |
2184 | } |
2185 | |
2186 | /** |
2187 | * ice_init_hw_tbls - init hardware table memory |
2188 | * @hw: pointer to the hardware structure |
2189 | */ |
2190 | int ice_init_hw_tbls(struct ice_hw *hw) |
2191 | { |
2192 | u8 i; |
2193 | |
2194 | mutex_init(&hw->rss_locks); |
2195 | INIT_LIST_HEAD(list: &hw->rss_list_head); |
2196 | ice_init_all_prof_masks(hw); |
2197 | for (i = 0; i < ICE_BLK_COUNT; i++) { |
2198 | struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; |
2199 | struct ice_prof_tcam *prof = &hw->blk[i].prof; |
2200 | struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; |
2201 | struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; |
2202 | struct ice_es *es = &hw->blk[i].es; |
2203 | u16 j; |
2204 | |
2205 | if (hw->blk[i].is_list_init) |
2206 | continue; |
2207 | |
2208 | ice_init_flow_profs(hw, blk_idx: i); |
2209 | mutex_init(&es->prof_map_lock); |
2210 | INIT_LIST_HEAD(list: &es->prof_map); |
2211 | hw->blk[i].is_list_init = true; |
2212 | |
2213 | hw->blk[i].overwrite = blk_sizes[i].overwrite; |
2214 | es->reverse = blk_sizes[i].reverse; |
2215 | |
2216 | xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; |
2217 | xlt1->count = blk_sizes[i].xlt1; |
2218 | |
2219 | xlt1->ptypes = devm_kcalloc(dev: ice_hw_to_dev(hw), n: xlt1->count, |
2220 | size: sizeof(*xlt1->ptypes), GFP_KERNEL); |
2221 | |
2222 | if (!xlt1->ptypes) |
2223 | goto err; |
2224 | |
2225 | xlt1->ptg_tbl = devm_kcalloc(dev: ice_hw_to_dev(hw), ICE_MAX_PTGS, |
2226 | size: sizeof(*xlt1->ptg_tbl), |
2227 | GFP_KERNEL); |
2228 | |
2229 | if (!xlt1->ptg_tbl) |
2230 | goto err; |
2231 | |
2232 | xlt1->t = devm_kcalloc(dev: ice_hw_to_dev(hw), n: xlt1->count, |
2233 | size: sizeof(*xlt1->t), GFP_KERNEL); |
2234 | if (!xlt1->t) |
2235 | goto err; |
2236 | |
2237 | xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; |
2238 | xlt2->count = blk_sizes[i].xlt2; |
2239 | |
2240 | xlt2->vsis = devm_kcalloc(dev: ice_hw_to_dev(hw), n: xlt2->count, |
2241 | size: sizeof(*xlt2->vsis), GFP_KERNEL); |
2242 | |
2243 | if (!xlt2->vsis) |
2244 | goto err; |
2245 | |
2246 | xlt2->vsig_tbl = devm_kcalloc(dev: ice_hw_to_dev(hw), n: xlt2->count, |
2247 | size: sizeof(*xlt2->vsig_tbl), |
2248 | GFP_KERNEL); |
2249 | if (!xlt2->vsig_tbl) |
2250 | goto err; |
2251 | |
2252 | for (j = 0; j < xlt2->count; j++) |
2253 | INIT_LIST_HEAD(list: &xlt2->vsig_tbl[j].prop_lst); |
2254 | |
2255 | xlt2->t = devm_kcalloc(dev: ice_hw_to_dev(hw), n: xlt2->count, |
2256 | size: sizeof(*xlt2->t), GFP_KERNEL); |
2257 | if (!xlt2->t) |
2258 | goto err; |
2259 | |
2260 | prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; |
2261 | prof->count = blk_sizes[i].prof_tcam; |
2262 | prof->max_prof_id = blk_sizes[i].prof_id; |
2263 | prof->cdid_bits = blk_sizes[i].prof_cdid_bits; |
2264 | prof->t = devm_kcalloc(dev: ice_hw_to_dev(hw), n: prof->count, |
2265 | size: sizeof(*prof->t), GFP_KERNEL); |
2266 | |
2267 | if (!prof->t) |
2268 | goto err; |
2269 | |
2270 | prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; |
2271 | prof_redir->count = blk_sizes[i].prof_redir; |
2272 | prof_redir->t = devm_kcalloc(dev: ice_hw_to_dev(hw), |
2273 | n: prof_redir->count, |
2274 | size: sizeof(*prof_redir->t), |
2275 | GFP_KERNEL); |
2276 | |
2277 | if (!prof_redir->t) |
2278 | goto err; |
2279 | |
2280 | es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; |
2281 | es->count = blk_sizes[i].es; |
2282 | es->fvw = blk_sizes[i].fvw; |
2283 | es->t = devm_kcalloc(dev: ice_hw_to_dev(hw), |
2284 | n: (u32)(es->count * es->fvw), |
2285 | size: sizeof(*es->t), GFP_KERNEL); |
2286 | if (!es->t) |
2287 | goto err; |
2288 | |
2289 | es->ref_count = devm_kcalloc(dev: ice_hw_to_dev(hw), n: es->count, |
2290 | size: sizeof(*es->ref_count), |
2291 | GFP_KERNEL); |
2292 | if (!es->ref_count) |
2293 | goto err; |
2294 | |
2295 | es->written = devm_kcalloc(dev: ice_hw_to_dev(hw), n: es->count, |
2296 | size: sizeof(*es->written), GFP_KERNEL); |
2297 | if (!es->written) |
2298 | goto err; |
2299 | |
2300 | es->mask_ena = devm_kcalloc(dev: ice_hw_to_dev(hw), n: es->count, |
2301 | size: sizeof(*es->mask_ena), GFP_KERNEL); |
2302 | if (!es->mask_ena) |
2303 | goto err; |
2304 | } |
2305 | return 0; |
2306 | |
2307 | err: |
2308 | ice_free_hw_tbls(hw); |
2309 | return -ENOMEM; |
2310 | } |
2311 | |
2312 | /** |
2313 | * ice_prof_gen_key - generate profile ID key |
2314 | * @hw: pointer to the HW struct |
2315 | * @blk: the block in which to write profile ID to |
2316 | * @ptg: packet type group (PTG) portion of key |
2317 | * @vsig: VSIG portion of key |
2318 | * @cdid: CDID portion of key |
2319 | * @flags: flag portion of key |
2320 | * @vl_msk: valid mask |
2321 | * @dc_msk: don't care mask |
2322 | * @nm_msk: never match mask |
2323 | * @key: output of profile ID key |
2324 | */ |
2325 | static int |
2326 | ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, |
2327 | u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], |
2328 | u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], |
2329 | u8 key[ICE_TCAM_KEY_SZ]) |
2330 | { |
2331 | struct ice_prof_id_key inkey; |
2332 | |
2333 | inkey.xlt1 = ptg; |
2334 | inkey.xlt2_cdid = cpu_to_le16(vsig); |
2335 | inkey.flags = cpu_to_le16(flags); |
2336 | |
2337 | switch (hw->blk[blk].prof.cdid_bits) { |
2338 | case 0: |
2339 | break; |
2340 | case 2: |
2341 | #define ICE_CD_2_M 0xC000U |
2342 | #define ICE_CD_2_S 14 |
2343 | inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_2_M); |
2344 | inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_2_S); |
2345 | break; |
2346 | case 4: |
2347 | #define ICE_CD_4_M 0xF000U |
2348 | #define ICE_CD_4_S 12 |
2349 | inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_4_M); |
2350 | inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_4_S); |
2351 | break; |
2352 | case 8: |
2353 | #define ICE_CD_8_M 0xFF00U |
2354 | #define ICE_CD_8_S 16 |
2355 | inkey.xlt2_cdid &= ~cpu_to_le16(ICE_CD_8_M); |
2356 | inkey.xlt2_cdid |= cpu_to_le16(BIT(cdid) << ICE_CD_8_S); |
2357 | break; |
2358 | default: |
2359 | ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n" ); |
2360 | break; |
2361 | } |
2362 | |
2363 | return ice_set_key(key, ICE_TCAM_KEY_SZ, val: (u8 *)&inkey, upd: vl_msk, dc: dc_msk, |
2364 | nm: nm_msk, off: 0, ICE_TCAM_KEY_SZ / 2); |
2365 | } |
2366 | |
2367 | /** |
2368 | * ice_tcam_write_entry - write TCAM entry |
2369 | * @hw: pointer to the HW struct |
2370 | * @blk: the block in which to write profile ID to |
2371 | * @idx: the entry index to write to |
2372 | * @prof_id: profile ID |
2373 | * @ptg: packet type group (PTG) portion of key |
2374 | * @vsig: VSIG portion of key |
2375 | * @cdid: CDID portion of key |
2376 | * @flags: flag portion of key |
2377 | * @vl_msk: valid mask |
2378 | * @dc_msk: don't care mask |
2379 | * @nm_msk: never match mask |
2380 | */ |
2381 | static int |
2382 | ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, |
2383 | u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, |
2384 | u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], |
2385 | u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], |
2386 | u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) |
2387 | { |
2388 | struct ice_prof_tcam_entry; |
2389 | int status; |
2390 | |
2391 | status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, |
2392 | dc_msk, nm_msk, key: hw->blk[blk].prof.t[idx].key); |
2393 | if (!status) { |
2394 | hw->blk[blk].prof.t[idx].addr = cpu_to_le16(idx); |
2395 | hw->blk[blk].prof.t[idx].prof_id = prof_id; |
2396 | } |
2397 | |
2398 | return status; |
2399 | } |
2400 | |
2401 | /** |
2402 | * ice_vsig_get_ref - returns number of VSIs belong to a VSIG |
2403 | * @hw: pointer to the hardware structure |
2404 | * @blk: HW block |
2405 | * @vsig: VSIG to query |
2406 | * @refs: pointer to variable to receive the reference count |
2407 | */ |
2408 | static int |
2409 | ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) |
2410 | { |
2411 | u16 idx = vsig & ICE_VSIG_IDX_M; |
2412 | struct ice_vsig_vsi *ptr; |
2413 | |
2414 | *refs = 0; |
2415 | |
2416 | if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) |
2417 | return -ENOENT; |
2418 | |
2419 | ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; |
2420 | while (ptr) { |
2421 | (*refs)++; |
2422 | ptr = ptr->next_vsi; |
2423 | } |
2424 | |
2425 | return 0; |
2426 | } |
2427 | |
2428 | /** |
2429 | * ice_has_prof_vsig - check to see if VSIG has a specific profile |
2430 | * @hw: pointer to the hardware structure |
2431 | * @blk: HW block |
2432 | * @vsig: VSIG to check against |
2433 | * @hdl: profile handle |
2434 | */ |
2435 | static bool |
2436 | ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) |
2437 | { |
2438 | u16 idx = vsig & ICE_VSIG_IDX_M; |
2439 | struct ice_vsig_prof *ent; |
2440 | |
2441 | list_for_each_entry(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, |
2442 | list) |
2443 | if (ent->profile_cookie == hdl) |
2444 | return true; |
2445 | |
2446 | ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n" , |
2447 | vsig); |
2448 | return false; |
2449 | } |
2450 | |
2451 | /** |
2452 | * ice_prof_bld_es - build profile ID extraction sequence changes |
2453 | * @hw: pointer to the HW struct |
2454 | * @blk: hardware block |
2455 | * @bld: the update package buffer build to add to |
2456 | * @chgs: the list of changes to make in hardware |
2457 | */ |
2458 | static int |
2459 | ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, |
2460 | struct ice_buf_build *bld, struct list_head *chgs) |
2461 | { |
2462 | u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word); |
2463 | struct ice_chs_chg *tmp; |
2464 | |
2465 | list_for_each_entry(tmp, chgs, list_entry) |
2466 | if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) { |
2467 | u16 off = tmp->prof_id * hw->blk[blk].es.fvw; |
2468 | struct ice_pkg_es *p; |
2469 | u32 id; |
2470 | |
2471 | id = ice_sect_id(blk, sect: ICE_VEC_TBL); |
2472 | p = ice_pkg_buf_alloc_section(bld, type: id, |
2473 | struct_size(p, es, 1) + |
2474 | vec_size - |
2475 | sizeof(p->es[0])); |
2476 | |
2477 | if (!p) |
2478 | return -ENOSPC; |
2479 | |
2480 | p->count = cpu_to_le16(1); |
2481 | p->offset = cpu_to_le16(tmp->prof_id); |
2482 | |
2483 | memcpy(p->es, &hw->blk[blk].es.t[off], vec_size); |
2484 | } |
2485 | |
2486 | return 0; |
2487 | } |
2488 | |
2489 | /** |
2490 | * ice_prof_bld_tcam - build profile ID TCAM changes |
2491 | * @hw: pointer to the HW struct |
2492 | * @blk: hardware block |
2493 | * @bld: the update package buffer build to add to |
2494 | * @chgs: the list of changes to make in hardware |
2495 | */ |
2496 | static int |
2497 | ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, |
2498 | struct ice_buf_build *bld, struct list_head *chgs) |
2499 | { |
2500 | struct ice_chs_chg *tmp; |
2501 | |
2502 | list_for_each_entry(tmp, chgs, list_entry) |
2503 | if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) { |
2504 | struct ice_prof_id_section *p; |
2505 | u32 id; |
2506 | |
2507 | id = ice_sect_id(blk, sect: ICE_PROF_TCAM); |
2508 | p = ice_pkg_buf_alloc_section(bld, type: id, |
2509 | struct_size(p, entry, 1)); |
2510 | |
2511 | if (!p) |
2512 | return -ENOSPC; |
2513 | |
2514 | p->count = cpu_to_le16(1); |
2515 | p->entry[0].addr = cpu_to_le16(tmp->tcam_idx); |
2516 | p->entry[0].prof_id = tmp->prof_id; |
2517 | |
2518 | memcpy(p->entry[0].key, |
2519 | &hw->blk[blk].prof.t[tmp->tcam_idx].key, |
2520 | sizeof(hw->blk[blk].prof.t->key)); |
2521 | } |
2522 | |
2523 | return 0; |
2524 | } |
2525 | |
2526 | /** |
2527 | * ice_prof_bld_xlt1 - build XLT1 changes |
2528 | * @blk: hardware block |
2529 | * @bld: the update package buffer build to add to |
2530 | * @chgs: the list of changes to make in hardware |
2531 | */ |
2532 | static int |
2533 | ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, |
2534 | struct list_head *chgs) |
2535 | { |
2536 | struct ice_chs_chg *tmp; |
2537 | |
2538 | list_for_each_entry(tmp, chgs, list_entry) |
2539 | if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) { |
2540 | struct ice_xlt1_section *p; |
2541 | u32 id; |
2542 | |
2543 | id = ice_sect_id(blk, sect: ICE_XLT1); |
2544 | p = ice_pkg_buf_alloc_section(bld, type: id, |
2545 | struct_size(p, value, 1)); |
2546 | |
2547 | if (!p) |
2548 | return -ENOSPC; |
2549 | |
2550 | p->count = cpu_to_le16(1); |
2551 | p->offset = cpu_to_le16(tmp->ptype); |
2552 | p->value[0] = tmp->ptg; |
2553 | } |
2554 | |
2555 | return 0; |
2556 | } |
2557 | |
2558 | /** |
2559 | * ice_prof_bld_xlt2 - build XLT2 changes |
2560 | * @blk: hardware block |
2561 | * @bld: the update package buffer build to add to |
2562 | * @chgs: the list of changes to make in hardware |
2563 | */ |
2564 | static int |
2565 | ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, |
2566 | struct list_head *chgs) |
2567 | { |
2568 | struct ice_chs_chg *tmp; |
2569 | |
2570 | list_for_each_entry(tmp, chgs, list_entry) { |
2571 | struct ice_xlt2_section *p; |
2572 | u32 id; |
2573 | |
2574 | switch (tmp->type) { |
2575 | case ICE_VSIG_ADD: |
2576 | case ICE_VSI_MOVE: |
2577 | case ICE_VSIG_REM: |
2578 | id = ice_sect_id(blk, sect: ICE_XLT2); |
2579 | p = ice_pkg_buf_alloc_section(bld, type: id, |
2580 | struct_size(p, value, 1)); |
2581 | |
2582 | if (!p) |
2583 | return -ENOSPC; |
2584 | |
2585 | p->count = cpu_to_le16(1); |
2586 | p->offset = cpu_to_le16(tmp->vsi); |
2587 | p->value[0] = cpu_to_le16(tmp->vsig); |
2588 | break; |
2589 | default: |
2590 | break; |
2591 | } |
2592 | } |
2593 | |
2594 | return 0; |
2595 | } |
2596 | |
2597 | /** |
2598 | * ice_upd_prof_hw - update hardware using the change list |
2599 | * @hw: pointer to the HW struct |
2600 | * @blk: hardware block |
2601 | * @chgs: the list of changes to make in hardware |
2602 | */ |
2603 | static int |
2604 | ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, |
2605 | struct list_head *chgs) |
2606 | { |
2607 | struct ice_buf_build *b; |
2608 | struct ice_chs_chg *tmp; |
2609 | u16 pkg_sects; |
2610 | u16 xlt1 = 0; |
2611 | u16 xlt2 = 0; |
2612 | u16 tcam = 0; |
2613 | u16 es = 0; |
2614 | int status; |
2615 | u16 sects; |
2616 | |
2617 | /* count number of sections we need */ |
2618 | list_for_each_entry(tmp, chgs, list_entry) { |
2619 | switch (tmp->type) { |
2620 | case ICE_PTG_ES_ADD: |
2621 | if (tmp->add_ptg) |
2622 | xlt1++; |
2623 | if (tmp->add_prof) |
2624 | es++; |
2625 | break; |
2626 | case ICE_TCAM_ADD: |
2627 | tcam++; |
2628 | break; |
2629 | case ICE_VSIG_ADD: |
2630 | case ICE_VSI_MOVE: |
2631 | case ICE_VSIG_REM: |
2632 | xlt2++; |
2633 | break; |
2634 | default: |
2635 | break; |
2636 | } |
2637 | } |
2638 | sects = xlt1 + xlt2 + tcam + es; |
2639 | |
2640 | if (!sects) |
2641 | return 0; |
2642 | |
2643 | /* Build update package buffer */ |
2644 | b = ice_pkg_buf_alloc(hw); |
2645 | if (!b) |
2646 | return -ENOMEM; |
2647 | |
2648 | status = ice_pkg_buf_reserve_section(bld: b, count: sects); |
2649 | if (status) |
2650 | goto error_tmp; |
2651 | |
2652 | /* Preserve order of table update: ES, TCAM, PTG, VSIG */ |
2653 | if (es) { |
2654 | status = ice_prof_bld_es(hw, blk, bld: b, chgs); |
2655 | if (status) |
2656 | goto error_tmp; |
2657 | } |
2658 | |
2659 | if (tcam) { |
2660 | status = ice_prof_bld_tcam(hw, blk, bld: b, chgs); |
2661 | if (status) |
2662 | goto error_tmp; |
2663 | } |
2664 | |
2665 | if (xlt1) { |
2666 | status = ice_prof_bld_xlt1(blk, bld: b, chgs); |
2667 | if (status) |
2668 | goto error_tmp; |
2669 | } |
2670 | |
2671 | if (xlt2) { |
2672 | status = ice_prof_bld_xlt2(blk, bld: b, chgs); |
2673 | if (status) |
2674 | goto error_tmp; |
2675 | } |
2676 | |
2677 | /* After package buffer build check if the section count in buffer is |
2678 | * non-zero and matches the number of sections detected for package |
2679 | * update. |
2680 | */ |
2681 | pkg_sects = ice_pkg_buf_get_active_sections(bld: b); |
2682 | if (!pkg_sects || pkg_sects != sects) { |
2683 | status = -EINVAL; |
2684 | goto error_tmp; |
2685 | } |
2686 | |
2687 | /* update package */ |
2688 | status = ice_update_pkg(hw, bufs: ice_pkg_buf(bld: b), count: 1); |
2689 | if (status == -EIO) |
2690 | ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n" ); |
2691 | |
2692 | error_tmp: |
2693 | ice_pkg_buf_free(hw, bld: b); |
2694 | return status; |
2695 | } |
2696 | |
2697 | /** |
2698 | * ice_update_fd_mask - set Flow Director Field Vector mask for a profile |
2699 | * @hw: pointer to the HW struct |
2700 | * @prof_id: profile ID |
2701 | * @mask_sel: mask select |
2702 | * |
2703 | * This function enable any of the masks selected by the mask select parameter |
2704 | * for the profile specified. |
2705 | */ |
2706 | static void ice_update_fd_mask(struct ice_hw *hw, u16 prof_id, u32 mask_sel) |
2707 | { |
2708 | wr32(hw, GLQF_FDMASK_SEL(prof_id), mask_sel); |
2709 | |
2710 | ice_debug(hw, ICE_DBG_INIT, "fd mask(%d): %x = %x\n" , prof_id, |
2711 | GLQF_FDMASK_SEL(prof_id), mask_sel); |
2712 | } |
2713 | |
2714 | struct ice_fd_src_dst_pair { |
2715 | u8 prot_id; |
2716 | u8 count; |
2717 | u16 off; |
2718 | }; |
2719 | |
2720 | static const struct ice_fd_src_dst_pair ice_fd_pairs[] = { |
2721 | /* These are defined in pairs */ |
2722 | { ICE_PROT_IPV4_OF_OR_S, 2, 12 }, |
2723 | { ICE_PROT_IPV4_OF_OR_S, 2, 16 }, |
2724 | |
2725 | { ICE_PROT_IPV4_IL, 2, 12 }, |
2726 | { ICE_PROT_IPV4_IL, 2, 16 }, |
2727 | |
2728 | { ICE_PROT_IPV6_OF_OR_S, 8, 8 }, |
2729 | { ICE_PROT_IPV6_OF_OR_S, 8, 24 }, |
2730 | |
2731 | { ICE_PROT_IPV6_IL, 8, 8 }, |
2732 | { ICE_PROT_IPV6_IL, 8, 24 }, |
2733 | |
2734 | { ICE_PROT_TCP_IL, 1, 0 }, |
2735 | { ICE_PROT_TCP_IL, 1, 2 }, |
2736 | |
2737 | { ICE_PROT_UDP_OF, 1, 0 }, |
2738 | { ICE_PROT_UDP_OF, 1, 2 }, |
2739 | |
2740 | { ICE_PROT_UDP_IL_OR_S, 1, 0 }, |
2741 | { ICE_PROT_UDP_IL_OR_S, 1, 2 }, |
2742 | |
2743 | { ICE_PROT_SCTP_IL, 1, 0 }, |
2744 | { ICE_PROT_SCTP_IL, 1, 2 } |
2745 | }; |
2746 | |
2747 | #define ICE_FD_SRC_DST_PAIR_COUNT ARRAY_SIZE(ice_fd_pairs) |
2748 | |
2749 | /** |
2750 | * ice_update_fd_swap - set register appropriately for a FD FV extraction |
2751 | * @hw: pointer to the HW struct |
2752 | * @prof_id: profile ID |
2753 | * @es: extraction sequence (length of array is determined by the block) |
2754 | */ |
2755 | static int |
2756 | ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es) |
2757 | { |
2758 | DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT); |
2759 | u8 pair_start[ICE_FD_SRC_DST_PAIR_COUNT] = { 0 }; |
2760 | #define ICE_FD_FV_NOT_FOUND (-2) |
2761 | s8 first_free = ICE_FD_FV_NOT_FOUND; |
2762 | u8 used[ICE_MAX_FV_WORDS] = { 0 }; |
2763 | s8 orig_free, si; |
2764 | u32 mask_sel = 0; |
2765 | u8 i, j, k; |
2766 | |
2767 | bitmap_zero(dst: pair_list, ICE_FD_SRC_DST_PAIR_COUNT); |
2768 | |
2769 | /* This code assumes that the Flow Director field vectors are assigned |
2770 | * from the end of the FV indexes working towards the zero index, that |
2771 | * only complete fields will be included and will be consecutive, and |
2772 | * that there are no gaps between valid indexes. |
2773 | */ |
2774 | |
2775 | /* Determine swap fields present */ |
2776 | for (i = 0; i < hw->blk[ICE_BLK_FD].es.fvw; i++) { |
2777 | /* Find the first free entry, assuming right to left population. |
2778 | * This is where we can start adding additional pairs if needed. |
2779 | */ |
2780 | if (first_free == ICE_FD_FV_NOT_FOUND && es[i].prot_id != |
2781 | ICE_PROT_INVALID) |
2782 | first_free = i - 1; |
2783 | |
2784 | for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) |
2785 | if (es[i].prot_id == ice_fd_pairs[j].prot_id && |
2786 | es[i].off == ice_fd_pairs[j].off) { |
2787 | __set_bit(j, pair_list); |
2788 | pair_start[j] = i; |
2789 | } |
2790 | } |
2791 | |
2792 | orig_free = first_free; |
2793 | |
2794 | /* determine missing swap fields that need to be added */ |
2795 | for (i = 0; i < ICE_FD_SRC_DST_PAIR_COUNT; i += 2) { |
2796 | u8 bit1 = test_bit(i + 1, pair_list); |
2797 | u8 bit0 = test_bit(i, pair_list); |
2798 | |
2799 | if (bit0 ^ bit1) { |
2800 | u8 index; |
2801 | |
2802 | /* add the appropriate 'paired' entry */ |
2803 | if (!bit0) |
2804 | index = i; |
2805 | else |
2806 | index = i + 1; |
2807 | |
2808 | /* check for room */ |
2809 | if (first_free + 1 < (s8)ice_fd_pairs[index].count) |
2810 | return -ENOSPC; |
2811 | |
2812 | /* place in extraction sequence */ |
2813 | for (k = 0; k < ice_fd_pairs[index].count; k++) { |
2814 | es[first_free - k].prot_id = |
2815 | ice_fd_pairs[index].prot_id; |
2816 | es[first_free - k].off = |
2817 | ice_fd_pairs[index].off + (k * 2); |
2818 | |
2819 | if (k > first_free) |
2820 | return -EIO; |
2821 | |
2822 | /* keep track of non-relevant fields */ |
2823 | mask_sel |= BIT(first_free - k); |
2824 | } |
2825 | |
2826 | pair_start[index] = first_free; |
2827 | first_free -= ice_fd_pairs[index].count; |
2828 | } |
2829 | } |
2830 | |
2831 | /* fill in the swap array */ |
2832 | si = hw->blk[ICE_BLK_FD].es.fvw - 1; |
2833 | while (si >= 0) { |
2834 | u8 indexes_used = 1; |
2835 | |
2836 | /* assume flat at this index */ |
2837 | #define ICE_SWAP_VALID 0x80 |
2838 | used[si] = si | ICE_SWAP_VALID; |
2839 | |
2840 | if (orig_free == ICE_FD_FV_NOT_FOUND || si <= orig_free) { |
2841 | si -= indexes_used; |
2842 | continue; |
2843 | } |
2844 | |
2845 | /* check for a swap location */ |
2846 | for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++) |
2847 | if (es[si].prot_id == ice_fd_pairs[j].prot_id && |
2848 | es[si].off == ice_fd_pairs[j].off) { |
2849 | u8 idx; |
2850 | |
2851 | /* determine the appropriate matching field */ |
2852 | idx = j + ((j % 2) ? -1 : 1); |
2853 | |
2854 | indexes_used = ice_fd_pairs[idx].count; |
2855 | for (k = 0; k < indexes_used; k++) { |
2856 | used[si - k] = (pair_start[idx] - k) | |
2857 | ICE_SWAP_VALID; |
2858 | } |
2859 | |
2860 | break; |
2861 | } |
2862 | |
2863 | si -= indexes_used; |
2864 | } |
2865 | |
2866 | /* for each set of 4 swap and 4 inset indexes, write the appropriate |
2867 | * register |
2868 | */ |
2869 | for (j = 0; j < hw->blk[ICE_BLK_FD].es.fvw / 4; j++) { |
2870 | u32 raw_swap = 0; |
2871 | u32 raw_in = 0; |
2872 | |
2873 | for (k = 0; k < 4; k++) { |
2874 | u8 idx; |
2875 | |
2876 | idx = (j * 4) + k; |
2877 | if (used[idx] && !(mask_sel & BIT(idx))) { |
2878 | raw_swap |= used[idx] << (k * BITS_PER_BYTE); |
2879 | #define ICE_INSET_DFLT 0x9f |
2880 | raw_in |= ICE_INSET_DFLT << (k * BITS_PER_BYTE); |
2881 | } |
2882 | } |
2883 | |
2884 | /* write the appropriate swap register set */ |
2885 | wr32(hw, GLQF_FDSWAP(prof_id, j), raw_swap); |
2886 | |
2887 | ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): %x = %08x\n" , |
2888 | prof_id, j, GLQF_FDSWAP(prof_id, j), raw_swap); |
2889 | |
2890 | /* write the appropriate inset register set */ |
2891 | wr32(hw, GLQF_FDINSET(prof_id, j), raw_in); |
2892 | |
2893 | ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): %x = %08x\n" , |
2894 | prof_id, j, GLQF_FDINSET(prof_id, j), raw_in); |
2895 | } |
2896 | |
2897 | /* initially clear the mask select for this profile */ |
2898 | ice_update_fd_mask(hw, prof_id, mask_sel: 0); |
2899 | |
2900 | return 0; |
2901 | } |
2902 | |
2903 | /* The entries here needs to match the order of enum ice_ptype_attrib */ |
2904 | static const struct ice_ptype_attrib_info ice_ptype_attributes[] = { |
2905 | { ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK }, |
2906 | { ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK }, |
2907 | { ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK }, |
2908 | { ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK }, |
2909 | }; |
2910 | |
2911 | /** |
2912 | * ice_get_ptype_attrib_info - get PTYPE attribute information |
2913 | * @type: attribute type |
2914 | * @info: pointer to variable to the attribute information |
2915 | */ |
2916 | static void |
2917 | ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type, |
2918 | struct ice_ptype_attrib_info *info) |
2919 | { |
2920 | *info = ice_ptype_attributes[type]; |
2921 | } |
2922 | |
2923 | /** |
2924 | * ice_add_prof_attrib - add any PTG with attributes to profile |
2925 | * @prof: pointer to the profile to which PTG entries will be added |
2926 | * @ptg: PTG to be added |
2927 | * @ptype: PTYPE that needs to be looked up |
2928 | * @attr: array of attributes that will be considered |
2929 | * @attr_cnt: number of elements in the attribute array |
2930 | */ |
2931 | static int |
2932 | ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, |
2933 | const struct ice_ptype_attributes *attr, u16 attr_cnt) |
2934 | { |
2935 | bool found = false; |
2936 | u16 i; |
2937 | |
2938 | for (i = 0; i < attr_cnt; i++) |
2939 | if (attr[i].ptype == ptype) { |
2940 | found = true; |
2941 | |
2942 | prof->ptg[prof->ptg_cnt] = ptg; |
2943 | ice_get_ptype_attrib_info(type: attr[i].attrib, |
2944 | info: &prof->attr[prof->ptg_cnt]); |
2945 | |
2946 | if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) |
2947 | return -ENOSPC; |
2948 | } |
2949 | |
2950 | if (!found) |
2951 | return -ENOENT; |
2952 | |
2953 | return 0; |
2954 | } |
2955 | |
2956 | /** |
2957 | * ice_add_prof - add profile |
2958 | * @hw: pointer to the HW struct |
2959 | * @blk: hardware block |
2960 | * @id: profile tracking ID |
2961 | * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) |
2962 | * @attr: array of attributes |
2963 | * @attr_cnt: number of elements in attr array |
2964 | * @es: extraction sequence (length of array is determined by the block) |
2965 | * @masks: mask for extraction sequence |
2966 | * |
2967 | * This function registers a profile, which matches a set of PTYPES with a |
2968 | * particular extraction sequence. While the hardware profile is allocated |
2969 | * it will not be written until the first call to ice_add_flow that specifies |
2970 | * the ID value used here. |
2971 | */ |
2972 | int |
2973 | ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], |
2974 | const struct ice_ptype_attributes *attr, u16 attr_cnt, |
2975 | struct ice_fv_word *es, u16 *masks) |
2976 | { |
2977 | u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); |
2978 | DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); |
2979 | struct ice_prof_map *prof; |
2980 | u8 byte = 0; |
2981 | u8 prof_id; |
2982 | int status; |
2983 | |
2984 | bitmap_zero(dst: ptgs_used, ICE_XLT1_CNT); |
2985 | |
2986 | mutex_lock(&hw->blk[blk].es.prof_map_lock); |
2987 | |
2988 | /* search for existing profile */ |
2989 | status = ice_find_prof_id_with_mask(hw, blk, fv: es, masks, prof_id: &prof_id); |
2990 | if (status) { |
2991 | /* allocate profile ID */ |
2992 | status = ice_alloc_prof_id(hw, blk, prof_id: &prof_id); |
2993 | if (status) |
2994 | goto err_ice_add_prof; |
2995 | if (blk == ICE_BLK_FD) { |
2996 | /* For Flow Director block, the extraction sequence may |
2997 | * need to be altered in the case where there are paired |
2998 | * fields that have no match. This is necessary because |
2999 | * for Flow Director, src and dest fields need to paired |
3000 | * for filter programming and these values are swapped |
3001 | * during Tx. |
3002 | */ |
3003 | status = ice_update_fd_swap(hw, prof_id, es); |
3004 | if (status) |
3005 | goto err_ice_add_prof; |
3006 | } |
3007 | status = ice_update_prof_masking(hw, blk, prof_id, masks); |
3008 | if (status) |
3009 | goto err_ice_add_prof; |
3010 | |
3011 | /* and write new es */ |
3012 | ice_write_es(hw, blk, prof_id, fv: es); |
3013 | } |
3014 | |
3015 | ice_prof_inc_ref(hw, blk, prof_id); |
3016 | |
3017 | /* add profile info */ |
3018 | prof = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*prof), GFP_KERNEL); |
3019 | if (!prof) { |
3020 | status = -ENOMEM; |
3021 | goto err_ice_add_prof; |
3022 | } |
3023 | |
3024 | prof->profile_cookie = id; |
3025 | prof->prof_id = prof_id; |
3026 | prof->ptg_cnt = 0; |
3027 | prof->context = 0; |
3028 | |
3029 | /* build list of ptgs */ |
3030 | while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { |
3031 | u8 bit; |
3032 | |
3033 | if (!ptypes[byte]) { |
3034 | bytes--; |
3035 | byte++; |
3036 | continue; |
3037 | } |
3038 | |
3039 | /* Examine 8 bits per byte */ |
3040 | for_each_set_bit(bit, (unsigned long *)&ptypes[byte], |
3041 | BITS_PER_BYTE) { |
3042 | u16 ptype; |
3043 | u8 ptg; |
3044 | |
3045 | ptype = byte * BITS_PER_BYTE + bit; |
3046 | |
3047 | /* The package should place all ptypes in a non-zero |
3048 | * PTG, so the following call should never fail. |
3049 | */ |
3050 | if (ice_ptg_find_ptype(hw, blk, ptype, ptg: &ptg)) |
3051 | continue; |
3052 | |
3053 | /* If PTG is already added, skip and continue */ |
3054 | if (test_bit(ptg, ptgs_used)) |
3055 | continue; |
3056 | |
3057 | __set_bit(ptg, ptgs_used); |
3058 | /* Check to see there are any attributes for |
3059 | * this PTYPE, and add them if found. |
3060 | */ |
3061 | status = ice_add_prof_attrib(prof, ptg, ptype, |
3062 | attr, attr_cnt); |
3063 | if (status == -ENOSPC) |
3064 | break; |
3065 | if (status) { |
3066 | /* This is simple a PTYPE/PTG with no |
3067 | * attribute |
3068 | */ |
3069 | prof->ptg[prof->ptg_cnt] = ptg; |
3070 | prof->attr[prof->ptg_cnt].flags = 0; |
3071 | prof->attr[prof->ptg_cnt].mask = 0; |
3072 | |
3073 | if (++prof->ptg_cnt >= |
3074 | ICE_MAX_PTG_PER_PROFILE) |
3075 | break; |
3076 | } |
3077 | } |
3078 | |
3079 | bytes--; |
3080 | byte++; |
3081 | } |
3082 | |
3083 | list_add(new: &prof->list, head: &hw->blk[blk].es.prof_map); |
3084 | status = 0; |
3085 | |
3086 | err_ice_add_prof: |
3087 | mutex_unlock(lock: &hw->blk[blk].es.prof_map_lock); |
3088 | return status; |
3089 | } |
3090 | |
3091 | /** |
3092 | * ice_search_prof_id - Search for a profile tracking ID |
3093 | * @hw: pointer to the HW struct |
3094 | * @blk: hardware block |
3095 | * @id: profile tracking ID |
3096 | * |
3097 | * This will search for a profile tracking ID which was previously added. |
3098 | * The profile map lock should be held before calling this function. |
3099 | */ |
3100 | static struct ice_prof_map * |
3101 | ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) |
3102 | { |
3103 | struct ice_prof_map *entry = NULL; |
3104 | struct ice_prof_map *map; |
3105 | |
3106 | list_for_each_entry(map, &hw->blk[blk].es.prof_map, list) |
3107 | if (map->profile_cookie == id) { |
3108 | entry = map; |
3109 | break; |
3110 | } |
3111 | |
3112 | return entry; |
3113 | } |
3114 | |
3115 | /** |
3116 | * ice_vsig_prof_id_count - count profiles in a VSIG |
3117 | * @hw: pointer to the HW struct |
3118 | * @blk: hardware block |
3119 | * @vsig: VSIG to remove the profile from |
3120 | */ |
3121 | static u16 |
3122 | ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) |
3123 | { |
3124 | u16 idx = vsig & ICE_VSIG_IDX_M, count = 0; |
3125 | struct ice_vsig_prof *p; |
3126 | |
3127 | list_for_each_entry(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, |
3128 | list) |
3129 | count++; |
3130 | |
3131 | return count; |
3132 | } |
3133 | |
3134 | /** |
3135 | * ice_rel_tcam_idx - release a TCAM index |
3136 | * @hw: pointer to the HW struct |
3137 | * @blk: hardware block |
3138 | * @idx: the index to release |
3139 | */ |
3140 | static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) |
3141 | { |
3142 | /* Masks to invoke a never match entry */ |
3143 | u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
3144 | u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; |
3145 | u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; |
3146 | int status; |
3147 | |
3148 | /* write the TCAM entry */ |
3149 | status = ice_tcam_write_entry(hw, blk, idx, prof_id: 0, ptg: 0, vsig: 0, cdid: 0, flags: 0, vl_msk, |
3150 | dc_msk, nm_msk); |
3151 | if (status) |
3152 | return status; |
3153 | |
3154 | /* release the TCAM entry */ |
3155 | status = ice_free_tcam_ent(hw, blk, tcam_idx: idx); |
3156 | |
3157 | return status; |
3158 | } |
3159 | |
3160 | /** |
3161 | * ice_rem_prof_id - remove one profile from a VSIG |
3162 | * @hw: pointer to the HW struct |
3163 | * @blk: hardware block |
3164 | * @prof: pointer to profile structure to remove |
3165 | */ |
3166 | static int |
3167 | ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, |
3168 | struct ice_vsig_prof *prof) |
3169 | { |
3170 | int status; |
3171 | u16 i; |
3172 | |
3173 | for (i = 0; i < prof->tcam_count; i++) |
3174 | if (prof->tcam[i].in_use) { |
3175 | prof->tcam[i].in_use = false; |
3176 | status = ice_rel_tcam_idx(hw, blk, |
3177 | idx: prof->tcam[i].tcam_idx); |
3178 | if (status) |
3179 | return -EIO; |
3180 | } |
3181 | |
3182 | return 0; |
3183 | } |
3184 | |
3185 | /** |
3186 | * ice_rem_vsig - remove VSIG |
3187 | * @hw: pointer to the HW struct |
3188 | * @blk: hardware block |
3189 | * @vsig: the VSIG to remove |
3190 | * @chg: the change list |
3191 | */ |
3192 | static int |
3193 | ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, |
3194 | struct list_head *chg) |
3195 | { |
3196 | u16 idx = vsig & ICE_VSIG_IDX_M; |
3197 | struct ice_vsig_vsi *vsi_cur; |
3198 | struct ice_vsig_prof *d, *t; |
3199 | |
3200 | /* remove TCAM entries */ |
3201 | list_for_each_entry_safe(d, t, |
3202 | &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, |
3203 | list) { |
3204 | int status; |
3205 | |
3206 | status = ice_rem_prof_id(hw, blk, prof: d); |
3207 | if (status) |
3208 | return status; |
3209 | |
3210 | list_del(entry: &d->list); |
3211 | devm_kfree(dev: ice_hw_to_dev(hw), p: d); |
3212 | } |
3213 | |
3214 | /* Move all VSIS associated with this VSIG to the default VSIG */ |
3215 | vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; |
3216 | /* If the VSIG has at least 1 VSI then iterate through the list |
3217 | * and remove the VSIs before deleting the group. |
3218 | */ |
3219 | if (vsi_cur) |
3220 | do { |
3221 | struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; |
3222 | struct ice_chs_chg *p; |
3223 | |
3224 | p = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*p), |
3225 | GFP_KERNEL); |
3226 | if (!p) |
3227 | return -ENOMEM; |
3228 | |
3229 | p->type = ICE_VSIG_REM; |
3230 | p->orig_vsig = vsig; |
3231 | p->vsig = ICE_DEFAULT_VSIG; |
3232 | p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis; |
3233 | |
3234 | list_add(new: &p->list_entry, head: chg); |
3235 | |
3236 | vsi_cur = tmp; |
3237 | } while (vsi_cur); |
3238 | |
3239 | return ice_vsig_free(hw, blk, vsig); |
3240 | } |
3241 | |
3242 | /** |
3243 | * ice_rem_prof_id_vsig - remove a specific profile from a VSIG |
3244 | * @hw: pointer to the HW struct |
3245 | * @blk: hardware block |
3246 | * @vsig: VSIG to remove the profile from |
3247 | * @hdl: profile handle indicating which profile to remove |
3248 | * @chg: list to receive a record of changes |
3249 | */ |
3250 | static int |
3251 | ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, |
3252 | struct list_head *chg) |
3253 | { |
3254 | u16 idx = vsig & ICE_VSIG_IDX_M; |
3255 | struct ice_vsig_prof *p, *t; |
3256 | |
3257 | list_for_each_entry_safe(p, t, |
3258 | &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, |
3259 | list) |
3260 | if (p->profile_cookie == hdl) { |
3261 | int status; |
3262 | |
3263 | if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) |
3264 | /* this is the last profile, remove the VSIG */ |
3265 | return ice_rem_vsig(hw, blk, vsig, chg); |
3266 | |
3267 | status = ice_rem_prof_id(hw, blk, prof: p); |
3268 | if (!status) { |
3269 | list_del(entry: &p->list); |
3270 | devm_kfree(dev: ice_hw_to_dev(hw), p); |
3271 | } |
3272 | return status; |
3273 | } |
3274 | |
3275 | return -ENOENT; |
3276 | } |
3277 | |
3278 | /** |
3279 | * ice_rem_flow_all - remove all flows with a particular profile |
3280 | * @hw: pointer to the HW struct |
3281 | * @blk: hardware block |
3282 | * @id: profile tracking ID |
3283 | */ |
3284 | static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) |
3285 | { |
3286 | struct ice_chs_chg *del, *tmp; |
3287 | struct list_head chg; |
3288 | int status; |
3289 | u16 i; |
3290 | |
3291 | INIT_LIST_HEAD(list: &chg); |
3292 | |
3293 | for (i = 1; i < ICE_MAX_VSIGS; i++) |
3294 | if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) { |
3295 | if (ice_has_prof_vsig(hw, blk, vsig: i, hdl: id)) { |
3296 | status = ice_rem_prof_id_vsig(hw, blk, vsig: i, hdl: id, |
3297 | chg: &chg); |
3298 | if (status) |
3299 | goto err_ice_rem_flow_all; |
3300 | } |
3301 | } |
3302 | |
3303 | status = ice_upd_prof_hw(hw, blk, chgs: &chg); |
3304 | |
3305 | err_ice_rem_flow_all: |
3306 | list_for_each_entry_safe(del, tmp, &chg, list_entry) { |
3307 | list_del(entry: &del->list_entry); |
3308 | devm_kfree(dev: ice_hw_to_dev(hw), p: del); |
3309 | } |
3310 | |
3311 | return status; |
3312 | } |
3313 | |
3314 | /** |
3315 | * ice_rem_prof - remove profile |
3316 | * @hw: pointer to the HW struct |
3317 | * @blk: hardware block |
3318 | * @id: profile tracking ID |
3319 | * |
3320 | * This will remove the profile specified by the ID parameter, which was |
3321 | * previously created through ice_add_prof. If any existing entries |
3322 | * are associated with this profile, they will be removed as well. |
3323 | */ |
3324 | int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) |
3325 | { |
3326 | struct ice_prof_map *pmap; |
3327 | int status; |
3328 | |
3329 | mutex_lock(&hw->blk[blk].es.prof_map_lock); |
3330 | |
3331 | pmap = ice_search_prof_id(hw, blk, id); |
3332 | if (!pmap) { |
3333 | status = -ENOENT; |
3334 | goto err_ice_rem_prof; |
3335 | } |
3336 | |
3337 | /* remove all flows with this profile */ |
3338 | status = ice_rem_flow_all(hw, blk, id: pmap->profile_cookie); |
3339 | if (status) |
3340 | goto err_ice_rem_prof; |
3341 | |
3342 | /* dereference profile, and possibly remove */ |
3343 | ice_prof_dec_ref(hw, blk, prof_id: pmap->prof_id); |
3344 | |
3345 | list_del(entry: &pmap->list); |
3346 | devm_kfree(dev: ice_hw_to_dev(hw), p: pmap); |
3347 | |
3348 | err_ice_rem_prof: |
3349 | mutex_unlock(lock: &hw->blk[blk].es.prof_map_lock); |
3350 | return status; |
3351 | } |
3352 | |
3353 | /** |
3354 | * ice_get_prof - get profile |
3355 | * @hw: pointer to the HW struct |
3356 | * @blk: hardware block |
3357 | * @hdl: profile handle |
3358 | * @chg: change list |
3359 | */ |
3360 | static int |
3361 | ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, |
3362 | struct list_head *chg) |
3363 | { |
3364 | struct ice_prof_map *map; |
3365 | struct ice_chs_chg *p; |
3366 | int status = 0; |
3367 | u16 i; |
3368 | |
3369 | mutex_lock(&hw->blk[blk].es.prof_map_lock); |
3370 | /* Get the details on the profile specified by the handle ID */ |
3371 | map = ice_search_prof_id(hw, blk, id: hdl); |
3372 | if (!map) { |
3373 | status = -ENOENT; |
3374 | goto err_ice_get_prof; |
3375 | } |
3376 | |
3377 | for (i = 0; i < map->ptg_cnt; i++) |
3378 | if (!hw->blk[blk].es.written[map->prof_id]) { |
3379 | /* add ES to change list */ |
3380 | p = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*p), |
3381 | GFP_KERNEL); |
3382 | if (!p) { |
3383 | status = -ENOMEM; |
3384 | goto err_ice_get_prof; |
3385 | } |
3386 | |
3387 | p->type = ICE_PTG_ES_ADD; |
3388 | p->ptype = 0; |
3389 | p->ptg = map->ptg[i]; |
3390 | p->add_ptg = 0; |
3391 | |
3392 | p->add_prof = 1; |
3393 | p->prof_id = map->prof_id; |
3394 | |
3395 | hw->blk[blk].es.written[map->prof_id] = true; |
3396 | |
3397 | list_add(new: &p->list_entry, head: chg); |
3398 | } |
3399 | |
3400 | err_ice_get_prof: |
3401 | mutex_unlock(lock: &hw->blk[blk].es.prof_map_lock); |
3402 | /* let caller clean up the change list */ |
3403 | return status; |
3404 | } |
3405 | |
3406 | /** |
3407 | * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG |
3408 | * @hw: pointer to the HW struct |
3409 | * @blk: hardware block |
3410 | * @vsig: VSIG from which to copy the list |
3411 | * @lst: output list |
3412 | * |
3413 | * This routine makes a copy of the list of profiles in the specified VSIG. |
3414 | */ |
3415 | static int |
3416 | ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, |
3417 | struct list_head *lst) |
3418 | { |
3419 | struct ice_vsig_prof *ent1, *ent2; |
3420 | u16 idx = vsig & ICE_VSIG_IDX_M; |
3421 | |
3422 | list_for_each_entry(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, |
3423 | list) { |
3424 | struct ice_vsig_prof *p; |
3425 | |
3426 | /* copy to the input list */ |
3427 | p = devm_kmemdup(dev: ice_hw_to_dev(hw), src: ent1, len: sizeof(*p), |
3428 | GFP_KERNEL); |
3429 | if (!p) |
3430 | goto err_ice_get_profs_vsig; |
3431 | |
3432 | list_add_tail(new: &p->list, head: lst); |
3433 | } |
3434 | |
3435 | return 0; |
3436 | |
3437 | err_ice_get_profs_vsig: |
3438 | list_for_each_entry_safe(ent1, ent2, lst, list) { |
3439 | list_del(entry: &ent1->list); |
3440 | devm_kfree(dev: ice_hw_to_dev(hw), p: ent1); |
3441 | } |
3442 | |
3443 | return -ENOMEM; |
3444 | } |
3445 | |
3446 | /** |
3447 | * ice_add_prof_to_lst - add profile entry to a list |
3448 | * @hw: pointer to the HW struct |
3449 | * @blk: hardware block |
3450 | * @lst: the list to be added to |
3451 | * @hdl: profile handle of entry to add |
3452 | */ |
3453 | static int |
3454 | ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, |
3455 | struct list_head *lst, u64 hdl) |
3456 | { |
3457 | struct ice_prof_map *map; |
3458 | struct ice_vsig_prof *p; |
3459 | int status = 0; |
3460 | u16 i; |
3461 | |
3462 | mutex_lock(&hw->blk[blk].es.prof_map_lock); |
3463 | map = ice_search_prof_id(hw, blk, id: hdl); |
3464 | if (!map) { |
3465 | status = -ENOENT; |
3466 | goto err_ice_add_prof_to_lst; |
3467 | } |
3468 | |
3469 | p = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*p), GFP_KERNEL); |
3470 | if (!p) { |
3471 | status = -ENOMEM; |
3472 | goto err_ice_add_prof_to_lst; |
3473 | } |
3474 | |
3475 | p->profile_cookie = map->profile_cookie; |
3476 | p->prof_id = map->prof_id; |
3477 | p->tcam_count = map->ptg_cnt; |
3478 | |
3479 | for (i = 0; i < map->ptg_cnt; i++) { |
3480 | p->tcam[i].prof_id = map->prof_id; |
3481 | p->tcam[i].tcam_idx = ICE_INVALID_TCAM; |
3482 | p->tcam[i].ptg = map->ptg[i]; |
3483 | } |
3484 | |
3485 | list_add(new: &p->list, head: lst); |
3486 | |
3487 | err_ice_add_prof_to_lst: |
3488 | mutex_unlock(lock: &hw->blk[blk].es.prof_map_lock); |
3489 | return status; |
3490 | } |
3491 | |
3492 | /** |
3493 | * ice_move_vsi - move VSI to another VSIG |
3494 | * @hw: pointer to the HW struct |
3495 | * @blk: hardware block |
3496 | * @vsi: the VSI to move |
3497 | * @vsig: the VSIG to move the VSI to |
3498 | * @chg: the change list |
3499 | */ |
3500 | static int |
3501 | ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, |
3502 | struct list_head *chg) |
3503 | { |
3504 | struct ice_chs_chg *p; |
3505 | u16 orig_vsig; |
3506 | int status; |
3507 | |
3508 | p = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*p), GFP_KERNEL); |
3509 | if (!p) |
3510 | return -ENOMEM; |
3511 | |
3512 | status = ice_vsig_find_vsi(hw, blk, vsi, vsig: &orig_vsig); |
3513 | if (!status) |
3514 | status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); |
3515 | |
3516 | if (status) { |
3517 | devm_kfree(dev: ice_hw_to_dev(hw), p); |
3518 | return status; |
3519 | } |
3520 | |
3521 | p->type = ICE_VSI_MOVE; |
3522 | p->vsi = vsi; |
3523 | p->orig_vsig = orig_vsig; |
3524 | p->vsig = vsig; |
3525 | |
3526 | list_add(new: &p->list_entry, head: chg); |
3527 | |
3528 | return 0; |
3529 | } |
3530 | |
3531 | /** |
3532 | * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list |
3533 | * @hw: pointer to the HW struct |
3534 | * @idx: the index of the TCAM entry to remove |
3535 | * @chg: the list of change structures to search |
3536 | */ |
3537 | static void |
3538 | ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg) |
3539 | { |
3540 | struct ice_chs_chg *pos, *tmp; |
3541 | |
3542 | list_for_each_entry_safe(tmp, pos, chg, list_entry) |
3543 | if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) { |
3544 | list_del(entry: &tmp->list_entry); |
3545 | devm_kfree(dev: ice_hw_to_dev(hw), p: tmp); |
3546 | } |
3547 | } |
3548 | |
3549 | /** |
3550 | * ice_prof_tcam_ena_dis - add enable or disable TCAM change |
3551 | * @hw: pointer to the HW struct |
3552 | * @blk: hardware block |
3553 | * @enable: true to enable, false to disable |
3554 | * @vsig: the VSIG of the TCAM entry |
3555 | * @tcam: pointer the TCAM info structure of the TCAM to disable |
3556 | * @chg: the change list |
3557 | * |
3558 | * This function appends an enable or disable TCAM entry in the change log |
3559 | */ |
3560 | static int |
3561 | ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, |
3562 | u16 vsig, struct ice_tcam_inf *tcam, |
3563 | struct list_head *chg) |
3564 | { |
3565 | struct ice_chs_chg *p; |
3566 | int status; |
3567 | |
3568 | u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
3569 | u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; |
3570 | u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; |
3571 | |
3572 | /* if disabling, free the TCAM */ |
3573 | if (!enable) { |
3574 | status = ice_rel_tcam_idx(hw, blk, idx: tcam->tcam_idx); |
3575 | |
3576 | /* if we have already created a change for this TCAM entry, then |
3577 | * we need to remove that entry, in order to prevent writing to |
3578 | * a TCAM entry we no longer will have ownership of. |
3579 | */ |
3580 | ice_rem_chg_tcam_ent(hw, idx: tcam->tcam_idx, chg); |
3581 | tcam->tcam_idx = 0; |
3582 | tcam->in_use = 0; |
3583 | return status; |
3584 | } |
3585 | |
3586 | /* for re-enabling, reallocate a TCAM */ |
3587 | /* for entries with empty attribute masks, allocate entry from |
3588 | * the bottom of the TCAM table; otherwise, allocate from the |
3589 | * top of the table in order to give it higher priority |
3590 | */ |
3591 | status = ice_alloc_tcam_ent(hw, blk, btm: tcam->attr.mask == 0, |
3592 | tcam_idx: &tcam->tcam_idx); |
3593 | if (status) |
3594 | return status; |
3595 | |
3596 | /* add TCAM to change list */ |
3597 | p = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*p), GFP_KERNEL); |
3598 | if (!p) |
3599 | return -ENOMEM; |
3600 | |
3601 | status = ice_tcam_write_entry(hw, blk, idx: tcam->tcam_idx, prof_id: tcam->prof_id, |
3602 | ptg: tcam->ptg, vsig, cdid: 0, flags: tcam->attr.flags, |
3603 | vl_msk, dc_msk, nm_msk); |
3604 | if (status) |
3605 | goto err_ice_prof_tcam_ena_dis; |
3606 | |
3607 | tcam->in_use = 1; |
3608 | |
3609 | p->type = ICE_TCAM_ADD; |
3610 | p->add_tcam_idx = true; |
3611 | p->prof_id = tcam->prof_id; |
3612 | p->ptg = tcam->ptg; |
3613 | p->vsig = 0; |
3614 | p->tcam_idx = tcam->tcam_idx; |
3615 | |
3616 | /* log change */ |
3617 | list_add(new: &p->list_entry, head: chg); |
3618 | |
3619 | return 0; |
3620 | |
3621 | err_ice_prof_tcam_ena_dis: |
3622 | devm_kfree(dev: ice_hw_to_dev(hw), p); |
3623 | return status; |
3624 | } |
3625 | |
3626 | /** |
3627 | * ice_adj_prof_priorities - adjust profile based on priorities |
3628 | * @hw: pointer to the HW struct |
3629 | * @blk: hardware block |
3630 | * @vsig: the VSIG for which to adjust profile priorities |
3631 | * @chg: the change list |
3632 | */ |
3633 | static int |
3634 | ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, |
3635 | struct list_head *chg) |
3636 | { |
3637 | DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); |
3638 | struct ice_vsig_prof *t; |
3639 | int status; |
3640 | u16 idx; |
3641 | |
3642 | bitmap_zero(dst: ptgs_used, ICE_XLT1_CNT); |
3643 | idx = vsig & ICE_VSIG_IDX_M; |
3644 | |
3645 | /* Priority is based on the order in which the profiles are added. The |
3646 | * newest added profile has highest priority and the oldest added |
3647 | * profile has the lowest priority. Since the profile property list for |
3648 | * a VSIG is sorted from newest to oldest, this code traverses the list |
3649 | * in order and enables the first of each PTG that it finds (that is not |
3650 | * already enabled); it also disables any duplicate PTGs that it finds |
3651 | * in the older profiles (that are currently enabled). |
3652 | */ |
3653 | |
3654 | list_for_each_entry(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, |
3655 | list) { |
3656 | u16 i; |
3657 | |
3658 | for (i = 0; i < t->tcam_count; i++) { |
3659 | /* Scan the priorities from newest to oldest. |
3660 | * Make sure that the newest profiles take priority. |
3661 | */ |
3662 | if (test_bit(t->tcam[i].ptg, ptgs_used) && |
3663 | t->tcam[i].in_use) { |
3664 | /* need to mark this PTG as never match, as it |
3665 | * was already in use and therefore duplicate |
3666 | * (and lower priority) |
3667 | */ |
3668 | status = ice_prof_tcam_ena_dis(hw, blk, enable: false, |
3669 | vsig, |
3670 | tcam: &t->tcam[i], |
3671 | chg); |
3672 | if (status) |
3673 | return status; |
3674 | } else if (!test_bit(t->tcam[i].ptg, ptgs_used) && |
3675 | !t->tcam[i].in_use) { |
3676 | /* need to enable this PTG, as it in not in use |
3677 | * and not enabled (highest priority) |
3678 | */ |
3679 | status = ice_prof_tcam_ena_dis(hw, blk, enable: true, |
3680 | vsig, |
3681 | tcam: &t->tcam[i], |
3682 | chg); |
3683 | if (status) |
3684 | return status; |
3685 | } |
3686 | |
3687 | /* keep track of used ptgs */ |
3688 | __set_bit(t->tcam[i].ptg, ptgs_used); |
3689 | } |
3690 | } |
3691 | |
3692 | return 0; |
3693 | } |
3694 | |
3695 | /** |
3696 | * ice_add_prof_id_vsig - add profile to VSIG |
3697 | * @hw: pointer to the HW struct |
3698 | * @blk: hardware block |
3699 | * @vsig: the VSIG to which this profile is to be added |
3700 | * @hdl: the profile handle indicating the profile to add |
3701 | * @rev: true to add entries to the end of the list |
3702 | * @chg: the change list |
3703 | */ |
3704 | static int |
3705 | ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, |
3706 | bool rev, struct list_head *chg) |
3707 | { |
3708 | /* Masks that ignore flags */ |
3709 | u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
3710 | u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; |
3711 | u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; |
3712 | struct ice_prof_map *map; |
3713 | struct ice_vsig_prof *t; |
3714 | struct ice_chs_chg *p; |
3715 | u16 vsig_idx, i; |
3716 | int status = 0; |
3717 | |
3718 | /* Error, if this VSIG already has this profile */ |
3719 | if (ice_has_prof_vsig(hw, blk, vsig, hdl)) |
3720 | return -EEXIST; |
3721 | |
3722 | /* new VSIG profile structure */ |
3723 | t = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*t), GFP_KERNEL); |
3724 | if (!t) |
3725 | return -ENOMEM; |
3726 | |
3727 | mutex_lock(&hw->blk[blk].es.prof_map_lock); |
3728 | /* Get the details on the profile specified by the handle ID */ |
3729 | map = ice_search_prof_id(hw, blk, id: hdl); |
3730 | if (!map) { |
3731 | status = -ENOENT; |
3732 | goto err_ice_add_prof_id_vsig; |
3733 | } |
3734 | |
3735 | t->profile_cookie = map->profile_cookie; |
3736 | t->prof_id = map->prof_id; |
3737 | t->tcam_count = map->ptg_cnt; |
3738 | |
3739 | /* create TCAM entries */ |
3740 | for (i = 0; i < map->ptg_cnt; i++) { |
3741 | u16 tcam_idx; |
3742 | |
3743 | /* add TCAM to change list */ |
3744 | p = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*p), GFP_KERNEL); |
3745 | if (!p) { |
3746 | status = -ENOMEM; |
3747 | goto err_ice_add_prof_id_vsig; |
3748 | } |
3749 | |
3750 | /* allocate the TCAM entry index */ |
3751 | /* for entries with empty attribute masks, allocate entry from |
3752 | * the bottom of the TCAM table; otherwise, allocate from the |
3753 | * top of the table in order to give it higher priority |
3754 | */ |
3755 | status = ice_alloc_tcam_ent(hw, blk, btm: map->attr[i].mask == 0, |
3756 | tcam_idx: &tcam_idx); |
3757 | if (status) { |
3758 | devm_kfree(dev: ice_hw_to_dev(hw), p); |
3759 | goto err_ice_add_prof_id_vsig; |
3760 | } |
3761 | |
3762 | t->tcam[i].ptg = map->ptg[i]; |
3763 | t->tcam[i].prof_id = map->prof_id; |
3764 | t->tcam[i].tcam_idx = tcam_idx; |
3765 | t->tcam[i].attr = map->attr[i]; |
3766 | t->tcam[i].in_use = true; |
3767 | |
3768 | p->type = ICE_TCAM_ADD; |
3769 | p->add_tcam_idx = true; |
3770 | p->prof_id = t->tcam[i].prof_id; |
3771 | p->ptg = t->tcam[i].ptg; |
3772 | p->vsig = vsig; |
3773 | p->tcam_idx = t->tcam[i].tcam_idx; |
3774 | |
3775 | /* write the TCAM entry */ |
3776 | status = ice_tcam_write_entry(hw, blk, idx: t->tcam[i].tcam_idx, |
3777 | prof_id: t->tcam[i].prof_id, |
3778 | ptg: t->tcam[i].ptg, vsig, cdid: 0, flags: 0, |
3779 | vl_msk, dc_msk, nm_msk); |
3780 | if (status) { |
3781 | devm_kfree(dev: ice_hw_to_dev(hw), p); |
3782 | goto err_ice_add_prof_id_vsig; |
3783 | } |
3784 | |
3785 | /* log change */ |
3786 | list_add(new: &p->list_entry, head: chg); |
3787 | } |
3788 | |
3789 | /* add profile to VSIG */ |
3790 | vsig_idx = vsig & ICE_VSIG_IDX_M; |
3791 | if (rev) |
3792 | list_add_tail(new: &t->list, |
3793 | head: &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); |
3794 | else |
3795 | list_add(new: &t->list, |
3796 | head: &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); |
3797 | |
3798 | mutex_unlock(lock: &hw->blk[blk].es.prof_map_lock); |
3799 | return status; |
3800 | |
3801 | err_ice_add_prof_id_vsig: |
3802 | mutex_unlock(lock: &hw->blk[blk].es.prof_map_lock); |
3803 | /* let caller clean up the change list */ |
3804 | devm_kfree(dev: ice_hw_to_dev(hw), p: t); |
3805 | return status; |
3806 | } |
3807 | |
3808 | /** |
3809 | * ice_create_prof_id_vsig - add a new VSIG with a single profile |
3810 | * @hw: pointer to the HW struct |
3811 | * @blk: hardware block |
3812 | * @vsi: the initial VSI that will be in VSIG |
3813 | * @hdl: the profile handle of the profile that will be added to the VSIG |
3814 | * @chg: the change list |
3815 | */ |
3816 | static int |
3817 | ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, |
3818 | struct list_head *chg) |
3819 | { |
3820 | struct ice_chs_chg *p; |
3821 | u16 new_vsig; |
3822 | int status; |
3823 | |
3824 | p = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*p), GFP_KERNEL); |
3825 | if (!p) |
3826 | return -ENOMEM; |
3827 | |
3828 | new_vsig = ice_vsig_alloc(hw, blk); |
3829 | if (!new_vsig) { |
3830 | status = -EIO; |
3831 | goto err_ice_create_prof_id_vsig; |
3832 | } |
3833 | |
3834 | status = ice_move_vsi(hw, blk, vsi, vsig: new_vsig, chg); |
3835 | if (status) |
3836 | goto err_ice_create_prof_id_vsig; |
3837 | |
3838 | status = ice_add_prof_id_vsig(hw, blk, vsig: new_vsig, hdl, rev: false, chg); |
3839 | if (status) |
3840 | goto err_ice_create_prof_id_vsig; |
3841 | |
3842 | p->type = ICE_VSIG_ADD; |
3843 | p->vsi = vsi; |
3844 | p->orig_vsig = ICE_DEFAULT_VSIG; |
3845 | p->vsig = new_vsig; |
3846 | |
3847 | list_add(new: &p->list_entry, head: chg); |
3848 | |
3849 | return 0; |
3850 | |
3851 | err_ice_create_prof_id_vsig: |
3852 | /* let caller clean up the change list */ |
3853 | devm_kfree(dev: ice_hw_to_dev(hw), p); |
3854 | return status; |
3855 | } |
3856 | |
3857 | /** |
3858 | * ice_create_vsig_from_lst - create a new VSIG with a list of profiles |
3859 | * @hw: pointer to the HW struct |
3860 | * @blk: hardware block |
3861 | * @vsi: the initial VSI that will be in VSIG |
3862 | * @lst: the list of profile that will be added to the VSIG |
3863 | * @new_vsig: return of new VSIG |
3864 | * @chg: the change list |
3865 | */ |
3866 | static int |
3867 | ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, |
3868 | struct list_head *lst, u16 *new_vsig, |
3869 | struct list_head *chg) |
3870 | { |
3871 | struct ice_vsig_prof *t; |
3872 | int status; |
3873 | u16 vsig; |
3874 | |
3875 | vsig = ice_vsig_alloc(hw, blk); |
3876 | if (!vsig) |
3877 | return -EIO; |
3878 | |
3879 | status = ice_move_vsi(hw, blk, vsi, vsig, chg); |
3880 | if (status) |
3881 | return status; |
3882 | |
3883 | list_for_each_entry(t, lst, list) { |
3884 | /* Reverse the order here since we are copying the list */ |
3885 | status = ice_add_prof_id_vsig(hw, blk, vsig, hdl: t->profile_cookie, |
3886 | rev: true, chg); |
3887 | if (status) |
3888 | return status; |
3889 | } |
3890 | |
3891 | *new_vsig = vsig; |
3892 | |
3893 | return 0; |
3894 | } |
3895 | |
3896 | /** |
3897 | * ice_find_prof_vsig - find a VSIG with a specific profile handle |
3898 | * @hw: pointer to the HW struct |
3899 | * @blk: hardware block |
3900 | * @hdl: the profile handle of the profile to search for |
3901 | * @vsig: returns the VSIG with the matching profile |
3902 | */ |
3903 | static bool |
3904 | ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) |
3905 | { |
3906 | struct ice_vsig_prof *t; |
3907 | struct list_head lst; |
3908 | int status; |
3909 | |
3910 | INIT_LIST_HEAD(list: &lst); |
3911 | |
3912 | t = kzalloc(size: sizeof(*t), GFP_KERNEL); |
3913 | if (!t) |
3914 | return false; |
3915 | |
3916 | t->profile_cookie = hdl; |
3917 | list_add(new: &t->list, head: &lst); |
3918 | |
3919 | status = ice_find_dup_props_vsig(hw, blk, chs: &lst, vsig); |
3920 | |
3921 | list_del(entry: &t->list); |
3922 | kfree(objp: t); |
3923 | |
3924 | return !status; |
3925 | } |
3926 | |
3927 | /** |
3928 | * ice_add_prof_id_flow - add profile flow |
3929 | * @hw: pointer to the HW struct |
3930 | * @blk: hardware block |
3931 | * @vsi: the VSI to enable with the profile specified by ID |
3932 | * @hdl: profile handle |
3933 | * |
3934 | * Calling this function will update the hardware tables to enable the |
3935 | * profile indicated by the ID parameter for the VSIs specified in the VSI |
3936 | * array. Once successfully called, the flow will be enabled. |
3937 | */ |
3938 | int |
3939 | ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) |
3940 | { |
3941 | struct ice_vsig_prof *tmp1, *del1; |
3942 | struct ice_chs_chg *tmp, *del; |
3943 | struct list_head union_lst; |
3944 | struct list_head chg; |
3945 | int status; |
3946 | u16 vsig; |
3947 | |
3948 | INIT_LIST_HEAD(list: &union_lst); |
3949 | INIT_LIST_HEAD(list: &chg); |
3950 | |
3951 | /* Get profile */ |
3952 | status = ice_get_prof(hw, blk, hdl, chg: &chg); |
3953 | if (status) |
3954 | return status; |
3955 | |
3956 | /* determine if VSI is already part of a VSIG */ |
3957 | status = ice_vsig_find_vsi(hw, blk, vsi, vsig: &vsig); |
3958 | if (!status && vsig) { |
3959 | bool only_vsi; |
3960 | u16 or_vsig; |
3961 | u16 ref; |
3962 | |
3963 | /* found in VSIG */ |
3964 | or_vsig = vsig; |
3965 | |
3966 | /* make sure that there is no overlap/conflict between the new |
3967 | * characteristics and the existing ones; we don't support that |
3968 | * scenario |
3969 | */ |
3970 | if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { |
3971 | status = -EEXIST; |
3972 | goto err_ice_add_prof_id_flow; |
3973 | } |
3974 | |
3975 | /* last VSI in the VSIG? */ |
3976 | status = ice_vsig_get_ref(hw, blk, vsig, refs: &ref); |
3977 | if (status) |
3978 | goto err_ice_add_prof_id_flow; |
3979 | only_vsi = (ref == 1); |
3980 | |
3981 | /* create a union of the current profiles and the one being |
3982 | * added |
3983 | */ |
3984 | status = ice_get_profs_vsig(hw, blk, vsig, lst: &union_lst); |
3985 | if (status) |
3986 | goto err_ice_add_prof_id_flow; |
3987 | |
3988 | status = ice_add_prof_to_lst(hw, blk, lst: &union_lst, hdl); |
3989 | if (status) |
3990 | goto err_ice_add_prof_id_flow; |
3991 | |
3992 | /* search for an existing VSIG with an exact charc match */ |
3993 | status = ice_find_dup_props_vsig(hw, blk, chs: &union_lst, vsig: &vsig); |
3994 | if (!status) { |
3995 | /* move VSI to the VSIG that matches */ |
3996 | status = ice_move_vsi(hw, blk, vsi, vsig, chg: &chg); |
3997 | if (status) |
3998 | goto err_ice_add_prof_id_flow; |
3999 | |
4000 | /* VSI has been moved out of or_vsig. If the or_vsig had |
4001 | * only that VSI it is now empty and can be removed. |
4002 | */ |
4003 | if (only_vsi) { |
4004 | status = ice_rem_vsig(hw, blk, vsig: or_vsig, chg: &chg); |
4005 | if (status) |
4006 | goto err_ice_add_prof_id_flow; |
4007 | } |
4008 | } else if (only_vsi) { |
4009 | /* If the original VSIG only contains one VSI, then it |
4010 | * will be the requesting VSI. In this case the VSI is |
4011 | * not sharing entries and we can simply add the new |
4012 | * profile to the VSIG. |
4013 | */ |
4014 | status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, rev: false, |
4015 | chg: &chg); |
4016 | if (status) |
4017 | goto err_ice_add_prof_id_flow; |
4018 | |
4019 | /* Adjust priorities */ |
4020 | status = ice_adj_prof_priorities(hw, blk, vsig, chg: &chg); |
4021 | if (status) |
4022 | goto err_ice_add_prof_id_flow; |
4023 | } else { |
4024 | /* No match, so we need a new VSIG */ |
4025 | status = ice_create_vsig_from_lst(hw, blk, vsi, |
4026 | lst: &union_lst, new_vsig: &vsig, |
4027 | chg: &chg); |
4028 | if (status) |
4029 | goto err_ice_add_prof_id_flow; |
4030 | |
4031 | /* Adjust priorities */ |
4032 | status = ice_adj_prof_priorities(hw, blk, vsig, chg: &chg); |
4033 | if (status) |
4034 | goto err_ice_add_prof_id_flow; |
4035 | } |
4036 | } else { |
4037 | /* need to find or add a VSIG */ |
4038 | /* search for an existing VSIG with an exact charc match */ |
4039 | if (ice_find_prof_vsig(hw, blk, hdl, vsig: &vsig)) { |
4040 | /* found an exact match */ |
4041 | /* add or move VSI to the VSIG that matches */ |
4042 | status = ice_move_vsi(hw, blk, vsi, vsig, chg: &chg); |
4043 | if (status) |
4044 | goto err_ice_add_prof_id_flow; |
4045 | } else { |
4046 | /* we did not find an exact match */ |
4047 | /* we need to add a VSIG */ |
4048 | status = ice_create_prof_id_vsig(hw, blk, vsi, hdl, |
4049 | chg: &chg); |
4050 | if (status) |
4051 | goto err_ice_add_prof_id_flow; |
4052 | } |
4053 | } |
4054 | |
4055 | /* update hardware */ |
4056 | if (!status) |
4057 | status = ice_upd_prof_hw(hw, blk, chgs: &chg); |
4058 | |
4059 | err_ice_add_prof_id_flow: |
4060 | list_for_each_entry_safe(del, tmp, &chg, list_entry) { |
4061 | list_del(entry: &del->list_entry); |
4062 | devm_kfree(dev: ice_hw_to_dev(hw), p: del); |
4063 | } |
4064 | |
4065 | list_for_each_entry_safe(del1, tmp1, &union_lst, list) { |
4066 | list_del(entry: &del1->list); |
4067 | devm_kfree(dev: ice_hw_to_dev(hw), p: del1); |
4068 | } |
4069 | |
4070 | return status; |
4071 | } |
4072 | |
4073 | /** |
4074 | * ice_rem_prof_from_list - remove a profile from list |
4075 | * @hw: pointer to the HW struct |
4076 | * @lst: list to remove the profile from |
4077 | * @hdl: the profile handle indicating the profile to remove |
4078 | */ |
4079 | static int |
4080 | ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl) |
4081 | { |
4082 | struct ice_vsig_prof *ent, *tmp; |
4083 | |
4084 | list_for_each_entry_safe(ent, tmp, lst, list) |
4085 | if (ent->profile_cookie == hdl) { |
4086 | list_del(entry: &ent->list); |
4087 | devm_kfree(dev: ice_hw_to_dev(hw), p: ent); |
4088 | return 0; |
4089 | } |
4090 | |
4091 | return -ENOENT; |
4092 | } |
4093 | |
4094 | /** |
4095 | * ice_rem_prof_id_flow - remove flow |
4096 | * @hw: pointer to the HW struct |
4097 | * @blk: hardware block |
4098 | * @vsi: the VSI from which to remove the profile specified by ID |
4099 | * @hdl: profile tracking handle |
4100 | * |
4101 | * Calling this function will update the hardware tables to remove the |
4102 | * profile indicated by the ID parameter for the VSIs specified in the VSI |
4103 | * array. Once successfully called, the flow will be disabled. |
4104 | */ |
4105 | int |
4106 | ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) |
4107 | { |
4108 | struct ice_vsig_prof *tmp1, *del1; |
4109 | struct ice_chs_chg *tmp, *del; |
4110 | struct list_head chg, copy; |
4111 | int status; |
4112 | u16 vsig; |
4113 | |
4114 | INIT_LIST_HEAD(list: ©); |
4115 | INIT_LIST_HEAD(list: &chg); |
4116 | |
4117 | /* determine if VSI is already part of a VSIG */ |
4118 | status = ice_vsig_find_vsi(hw, blk, vsi, vsig: &vsig); |
4119 | if (!status && vsig) { |
4120 | bool last_profile; |
4121 | bool only_vsi; |
4122 | u16 ref; |
4123 | |
4124 | /* found in VSIG */ |
4125 | last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1; |
4126 | status = ice_vsig_get_ref(hw, blk, vsig, refs: &ref); |
4127 | if (status) |
4128 | goto err_ice_rem_prof_id_flow; |
4129 | only_vsi = (ref == 1); |
4130 | |
4131 | if (only_vsi) { |
4132 | /* If the original VSIG only contains one reference, |
4133 | * which will be the requesting VSI, then the VSI is not |
4134 | * sharing entries and we can simply remove the specific |
4135 | * characteristics from the VSIG. |
4136 | */ |
4137 | |
4138 | if (last_profile) { |
4139 | /* If there are no profiles left for this VSIG, |
4140 | * then simply remove the VSIG. |
4141 | */ |
4142 | status = ice_rem_vsig(hw, blk, vsig, chg: &chg); |
4143 | if (status) |
4144 | goto err_ice_rem_prof_id_flow; |
4145 | } else { |
4146 | status = ice_rem_prof_id_vsig(hw, blk, vsig, |
4147 | hdl, chg: &chg); |
4148 | if (status) |
4149 | goto err_ice_rem_prof_id_flow; |
4150 | |
4151 | /* Adjust priorities */ |
4152 | status = ice_adj_prof_priorities(hw, blk, vsig, |
4153 | chg: &chg); |
4154 | if (status) |
4155 | goto err_ice_rem_prof_id_flow; |
4156 | } |
4157 | |
4158 | } else { |
4159 | /* Make a copy of the VSIG's list of Profiles */ |
4160 | status = ice_get_profs_vsig(hw, blk, vsig, lst: ©); |
4161 | if (status) |
4162 | goto err_ice_rem_prof_id_flow; |
4163 | |
4164 | /* Remove specified profile entry from the list */ |
4165 | status = ice_rem_prof_from_list(hw, lst: ©, hdl); |
4166 | if (status) |
4167 | goto err_ice_rem_prof_id_flow; |
4168 | |
4169 | if (list_empty(head: ©)) { |
4170 | status = ice_move_vsi(hw, blk, vsi, |
4171 | ICE_DEFAULT_VSIG, chg: &chg); |
4172 | if (status) |
4173 | goto err_ice_rem_prof_id_flow; |
4174 | |
4175 | } else if (!ice_find_dup_props_vsig(hw, blk, chs: ©, |
4176 | vsig: &vsig)) { |
4177 | /* found an exact match */ |
4178 | /* add or move VSI to the VSIG that matches */ |
4179 | /* Search for a VSIG with a matching profile |
4180 | * list |
4181 | */ |
4182 | |
4183 | /* Found match, move VSI to the matching VSIG */ |
4184 | status = ice_move_vsi(hw, blk, vsi, vsig, chg: &chg); |
4185 | if (status) |
4186 | goto err_ice_rem_prof_id_flow; |
4187 | } else { |
4188 | /* since no existing VSIG supports this |
4189 | * characteristic pattern, we need to create a |
4190 | * new VSIG and TCAM entries |
4191 | */ |
4192 | status = ice_create_vsig_from_lst(hw, blk, vsi, |
4193 | lst: ©, new_vsig: &vsig, |
4194 | chg: &chg); |
4195 | if (status) |
4196 | goto err_ice_rem_prof_id_flow; |
4197 | |
4198 | /* Adjust priorities */ |
4199 | status = ice_adj_prof_priorities(hw, blk, vsig, |
4200 | chg: &chg); |
4201 | if (status) |
4202 | goto err_ice_rem_prof_id_flow; |
4203 | } |
4204 | } |
4205 | } else { |
4206 | status = -ENOENT; |
4207 | } |
4208 | |
4209 | /* update hardware tables */ |
4210 | if (!status) |
4211 | status = ice_upd_prof_hw(hw, blk, chgs: &chg); |
4212 | |
4213 | err_ice_rem_prof_id_flow: |
4214 | list_for_each_entry_safe(del, tmp, &chg, list_entry) { |
4215 | list_del(entry: &del->list_entry); |
4216 | devm_kfree(dev: ice_hw_to_dev(hw), p: del); |
4217 | } |
4218 | |
4219 | list_for_each_entry_safe(del1, tmp1, ©, list) { |
4220 | list_del(entry: &del1->list); |
4221 | devm_kfree(dev: ice_hw_to_dev(hw), p: del1); |
4222 | } |
4223 | |
4224 | return status; |
4225 | } |
4226 | |