1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2018, Intel Corporation. */ |
3 | |
4 | #include <net/devlink.h> |
5 | #include "ice_sched.h" |
6 | |
7 | /** |
8 | * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB |
9 | * @pi: port information structure |
10 | * @info: Scheduler element information from firmware |
11 | * |
12 | * This function inserts the root node of the scheduling tree topology |
13 | * to the SW DB. |
14 | */ |
15 | static int |
16 | ice_sched_add_root_node(struct ice_port_info *pi, |
17 | struct ice_aqc_txsched_elem_data *info) |
18 | { |
19 | struct ice_sched_node *root; |
20 | struct ice_hw *hw; |
21 | |
22 | if (!pi) |
23 | return -EINVAL; |
24 | |
25 | hw = pi->hw; |
26 | |
27 | root = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*root), GFP_KERNEL); |
28 | if (!root) |
29 | return -ENOMEM; |
30 | |
31 | /* coverity[suspicious_sizeof] */ |
32 | root->children = devm_kcalloc(dev: ice_hw_to_dev(hw), n: hw->max_children[0], |
33 | size: sizeof(*root), GFP_KERNEL); |
34 | if (!root->children) { |
35 | devm_kfree(dev: ice_hw_to_dev(hw), p: root); |
36 | return -ENOMEM; |
37 | } |
38 | |
39 | memcpy(&root->info, info, sizeof(*info)); |
40 | pi->root = root; |
41 | return 0; |
42 | } |
43 | |
44 | /** |
45 | * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB |
46 | * @start_node: pointer to the starting ice_sched_node struct in a sub-tree |
47 | * @teid: node TEID to search |
48 | * |
49 | * This function searches for a node matching the TEID in the scheduling tree |
50 | * from the SW DB. The search is recursive and is restricted by the number of |
51 | * layers it has searched through; stopping at the max supported layer. |
52 | * |
53 | * This function needs to be called when holding the port_info->sched_lock |
54 | */ |
55 | struct ice_sched_node * |
56 | ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) |
57 | { |
58 | u16 i; |
59 | |
60 | /* The TEID is same as that of the start_node */ |
61 | if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) |
62 | return start_node; |
63 | |
64 | /* The node has no children or is at the max layer */ |
65 | if (!start_node->num_children || |
66 | start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || |
67 | start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) |
68 | return NULL; |
69 | |
70 | /* Check if TEID matches to any of the children nodes */ |
71 | for (i = 0; i < start_node->num_children; i++) |
72 | if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) |
73 | return start_node->children[i]; |
74 | |
75 | /* Search within each child's sub-tree */ |
76 | for (i = 0; i < start_node->num_children; i++) { |
77 | struct ice_sched_node *tmp; |
78 | |
79 | tmp = ice_sched_find_node_by_teid(start_node: start_node->children[i], |
80 | teid); |
81 | if (tmp) |
82 | return tmp; |
83 | } |
84 | |
85 | return NULL; |
86 | } |
87 | |
88 | /** |
89 | * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd |
90 | * @hw: pointer to the HW struct |
91 | * @cmd_opc: cmd opcode |
92 | * @elems_req: number of elements to request |
93 | * @buf: pointer to buffer |
94 | * @buf_size: buffer size in bytes |
95 | * @elems_resp: returns total number of elements response |
96 | * @cd: pointer to command details structure or NULL |
97 | * |
98 | * This function sends a scheduling elements cmd (cmd_opc) |
99 | */ |
100 | static int |
101 | ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, |
102 | u16 elems_req, void *buf, u16 buf_size, |
103 | u16 *elems_resp, struct ice_sq_cd *cd) |
104 | { |
105 | struct ice_aqc_sched_elem_cmd *cmd; |
106 | struct ice_aq_desc desc; |
107 | int status; |
108 | |
109 | cmd = &desc.params.sched_elem_cmd; |
110 | ice_fill_dflt_direct_cmd_desc(desc: &desc, opcode: cmd_opc); |
111 | cmd->num_elem_req = cpu_to_le16(elems_req); |
112 | desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); |
113 | status = ice_aq_send_cmd(hw, desc: &desc, buf, buf_size, cd); |
114 | if (!status && elems_resp) |
115 | *elems_resp = le16_to_cpu(cmd->num_elem_resp); |
116 | |
117 | return status; |
118 | } |
119 | |
120 | /** |
121 | * ice_aq_query_sched_elems - query scheduler elements |
122 | * @hw: pointer to the HW struct |
123 | * @elems_req: number of elements to query |
124 | * @buf: pointer to buffer |
125 | * @buf_size: buffer size in bytes |
126 | * @elems_ret: returns total number of elements returned |
127 | * @cd: pointer to command details structure or NULL |
128 | * |
129 | * Query scheduling elements (0x0404) |
130 | */ |
131 | int |
132 | ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, |
133 | struct ice_aqc_txsched_elem_data *buf, u16 buf_size, |
134 | u16 *elems_ret, struct ice_sq_cd *cd) |
135 | { |
136 | return ice_aqc_send_sched_elem_cmd(hw, cmd_opc: ice_aqc_opc_get_sched_elems, |
137 | elems_req, buf: (void *)buf, buf_size, |
138 | elems_resp: elems_ret, cd); |
139 | } |
140 | |
141 | /** |
142 | * ice_sched_add_node - Insert the Tx scheduler node in SW DB |
143 | * @pi: port information structure |
144 | * @layer: Scheduler layer of the node |
145 | * @info: Scheduler element information from firmware |
146 | * @prealloc_node: preallocated ice_sched_node struct for SW DB |
147 | * |
148 | * This function inserts a scheduler node to the SW DB. |
149 | */ |
150 | int |
151 | ice_sched_add_node(struct ice_port_info *pi, u8 layer, |
152 | struct ice_aqc_txsched_elem_data *info, |
153 | struct ice_sched_node *prealloc_node) |
154 | { |
155 | struct ice_aqc_txsched_elem_data elem; |
156 | struct ice_sched_node *parent; |
157 | struct ice_sched_node *node; |
158 | struct ice_hw *hw; |
159 | int status; |
160 | |
161 | if (!pi) |
162 | return -EINVAL; |
163 | |
164 | hw = pi->hw; |
165 | |
166 | /* A valid parent node should be there */ |
167 | parent = ice_sched_find_node_by_teid(start_node: pi->root, |
168 | le32_to_cpu(info->parent_teid)); |
169 | if (!parent) { |
170 | ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n" , |
171 | le32_to_cpu(info->parent_teid)); |
172 | return -EINVAL; |
173 | } |
174 | |
175 | /* query the current node information from FW before adding it |
176 | * to the SW DB |
177 | */ |
178 | status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), buf: &elem); |
179 | if (status) |
180 | return status; |
181 | |
182 | if (prealloc_node) |
183 | node = prealloc_node; |
184 | else |
185 | node = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*node), GFP_KERNEL); |
186 | if (!node) |
187 | return -ENOMEM; |
188 | if (hw->max_children[layer]) { |
189 | /* coverity[suspicious_sizeof] */ |
190 | node->children = devm_kcalloc(dev: ice_hw_to_dev(hw), |
191 | n: hw->max_children[layer], |
192 | size: sizeof(*node), GFP_KERNEL); |
193 | if (!node->children) { |
194 | devm_kfree(dev: ice_hw_to_dev(hw), p: node); |
195 | return -ENOMEM; |
196 | } |
197 | } |
198 | |
199 | node->in_use = true; |
200 | node->parent = parent; |
201 | node->tx_sched_layer = layer; |
202 | parent->children[parent->num_children++] = node; |
203 | node->info = elem; |
204 | return 0; |
205 | } |
206 | |
207 | /** |
208 | * ice_aq_delete_sched_elems - delete scheduler elements |
209 | * @hw: pointer to the HW struct |
210 | * @grps_req: number of groups to delete |
211 | * @buf: pointer to buffer |
212 | * @buf_size: buffer size in bytes |
213 | * @grps_del: returns total number of elements deleted |
214 | * @cd: pointer to command details structure or NULL |
215 | * |
216 | * Delete scheduling elements (0x040F) |
217 | */ |
218 | static int |
219 | ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, |
220 | struct ice_aqc_delete_elem *buf, u16 buf_size, |
221 | u16 *grps_del, struct ice_sq_cd *cd) |
222 | { |
223 | return ice_aqc_send_sched_elem_cmd(hw, cmd_opc: ice_aqc_opc_delete_sched_elems, |
224 | elems_req: grps_req, buf: (void *)buf, buf_size, |
225 | elems_resp: grps_del, cd); |
226 | } |
227 | |
228 | /** |
229 | * ice_sched_remove_elems - remove nodes from HW |
230 | * @hw: pointer to the HW struct |
231 | * @parent: pointer to the parent node |
232 | * @node_teid: node teid to be deleted |
233 | * |
234 | * This function remove nodes from HW |
235 | */ |
236 | static int |
237 | ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, |
238 | u32 node_teid) |
239 | { |
240 | DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1); |
241 | u16 buf_size = __struct_size(buf); |
242 | u16 num_groups_removed = 0; |
243 | int status; |
244 | |
245 | buf->hdr.parent_teid = parent->info.node_teid; |
246 | buf->hdr.num_elems = cpu_to_le16(1); |
247 | buf->teid[0] = cpu_to_le32(node_teid); |
248 | |
249 | status = ice_aq_delete_sched_elems(hw, grps_req: 1, buf, buf_size, |
250 | grps_del: &num_groups_removed, NULL); |
251 | if (status || num_groups_removed != 1) |
252 | ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n" , |
253 | hw->adminq.sq_last_status); |
254 | |
255 | return status; |
256 | } |
257 | |
258 | /** |
259 | * ice_sched_get_first_node - get the first node of the given layer |
260 | * @pi: port information structure |
261 | * @parent: pointer the base node of the subtree |
262 | * @layer: layer number |
263 | * |
264 | * This function retrieves the first node of the given layer from the subtree |
265 | */ |
266 | static struct ice_sched_node * |
267 | ice_sched_get_first_node(struct ice_port_info *pi, |
268 | struct ice_sched_node *parent, u8 layer) |
269 | { |
270 | return pi->sib_head[parent->tc_num][layer]; |
271 | } |
272 | |
273 | /** |
274 | * ice_sched_get_tc_node - get pointer to TC node |
275 | * @pi: port information structure |
276 | * @tc: TC number |
277 | * |
278 | * This function returns the TC node pointer |
279 | */ |
280 | struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) |
281 | { |
282 | u8 i; |
283 | |
284 | if (!pi || !pi->root) |
285 | return NULL; |
286 | for (i = 0; i < pi->root->num_children; i++) |
287 | if (pi->root->children[i]->tc_num == tc) |
288 | return pi->root->children[i]; |
289 | return NULL; |
290 | } |
291 | |
292 | /** |
293 | * ice_free_sched_node - Free a Tx scheduler node from SW DB |
294 | * @pi: port information structure |
295 | * @node: pointer to the ice_sched_node struct |
296 | * |
297 | * This function frees up a node from SW DB as well as from HW |
298 | * |
299 | * This function needs to be called with the port_info->sched_lock held |
300 | */ |
301 | void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) |
302 | { |
303 | struct ice_sched_node *parent; |
304 | struct ice_hw *hw = pi->hw; |
305 | u8 i, j; |
306 | |
307 | /* Free the children before freeing up the parent node |
308 | * The parent array is updated below and that shifts the nodes |
309 | * in the array. So always pick the first child if num children > 0 |
310 | */ |
311 | while (node->num_children) |
312 | ice_free_sched_node(pi, node: node->children[0]); |
313 | |
314 | /* Leaf, TC and root nodes can't be deleted by SW */ |
315 | if (node->tx_sched_layer >= hw->sw_entry_point_layer && |
316 | node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && |
317 | node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && |
318 | node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { |
319 | u32 teid = le32_to_cpu(node->info.node_teid); |
320 | |
321 | ice_sched_remove_elems(hw, parent: node->parent, node_teid: teid); |
322 | } |
323 | parent = node->parent; |
324 | /* root has no parent */ |
325 | if (parent) { |
326 | struct ice_sched_node *p; |
327 | |
328 | /* update the parent */ |
329 | for (i = 0; i < parent->num_children; i++) |
330 | if (parent->children[i] == node) { |
331 | for (j = i + 1; j < parent->num_children; j++) |
332 | parent->children[j - 1] = |
333 | parent->children[j]; |
334 | parent->num_children--; |
335 | break; |
336 | } |
337 | |
338 | p = ice_sched_get_first_node(pi, parent: node, layer: node->tx_sched_layer); |
339 | while (p) { |
340 | if (p->sibling == node) { |
341 | p->sibling = node->sibling; |
342 | break; |
343 | } |
344 | p = p->sibling; |
345 | } |
346 | |
347 | /* update the sibling head if head is getting removed */ |
348 | if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) |
349 | pi->sib_head[node->tc_num][node->tx_sched_layer] = |
350 | node->sibling; |
351 | } |
352 | |
353 | devm_kfree(dev: ice_hw_to_dev(hw), p: node->children); |
354 | kfree(objp: node->name); |
355 | xa_erase(&pi->sched_node_ids, index: node->id); |
356 | devm_kfree(dev: ice_hw_to_dev(hw), p: node); |
357 | } |
358 | |
359 | /** |
360 | * ice_aq_get_dflt_topo - gets default scheduler topology |
361 | * @hw: pointer to the HW struct |
362 | * @lport: logical port number |
363 | * @buf: pointer to buffer |
364 | * @buf_size: buffer size in bytes |
365 | * @num_branches: returns total number of queue to port branches |
366 | * @cd: pointer to command details structure or NULL |
367 | * |
368 | * Get default scheduler topology (0x400) |
369 | */ |
370 | static int |
371 | ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, |
372 | struct ice_aqc_get_topo_elem *buf, u16 buf_size, |
373 | u8 *num_branches, struct ice_sq_cd *cd) |
374 | { |
375 | struct ice_aqc_get_topo *cmd; |
376 | struct ice_aq_desc desc; |
377 | int status; |
378 | |
379 | cmd = &desc.params.get_topo; |
380 | ice_fill_dflt_direct_cmd_desc(desc: &desc, opcode: ice_aqc_opc_get_dflt_topo); |
381 | cmd->port_num = lport; |
382 | status = ice_aq_send_cmd(hw, desc: &desc, buf, buf_size, cd); |
383 | if (!status && num_branches) |
384 | *num_branches = cmd->num_branches; |
385 | |
386 | return status; |
387 | } |
388 | |
389 | /** |
390 | * ice_aq_add_sched_elems - adds scheduling element |
391 | * @hw: pointer to the HW struct |
392 | * @grps_req: the number of groups that are requested to be added |
393 | * @buf: pointer to buffer |
394 | * @buf_size: buffer size in bytes |
395 | * @grps_added: returns total number of groups added |
396 | * @cd: pointer to command details structure or NULL |
397 | * |
398 | * Add scheduling elements (0x0401) |
399 | */ |
400 | static int |
401 | ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, |
402 | struct ice_aqc_add_elem *buf, u16 buf_size, |
403 | u16 *grps_added, struct ice_sq_cd *cd) |
404 | { |
405 | return ice_aqc_send_sched_elem_cmd(hw, cmd_opc: ice_aqc_opc_add_sched_elems, |
406 | elems_req: grps_req, buf: (void *)buf, buf_size, |
407 | elems_resp: grps_added, cd); |
408 | } |
409 | |
410 | /** |
411 | * ice_aq_cfg_sched_elems - configures scheduler elements |
412 | * @hw: pointer to the HW struct |
413 | * @elems_req: number of elements to configure |
414 | * @buf: pointer to buffer |
415 | * @buf_size: buffer size in bytes |
416 | * @elems_cfgd: returns total number of elements configured |
417 | * @cd: pointer to command details structure or NULL |
418 | * |
419 | * Configure scheduling elements (0x0403) |
420 | */ |
421 | static int |
422 | ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, |
423 | struct ice_aqc_txsched_elem_data *buf, u16 buf_size, |
424 | u16 *elems_cfgd, struct ice_sq_cd *cd) |
425 | { |
426 | return ice_aqc_send_sched_elem_cmd(hw, cmd_opc: ice_aqc_opc_cfg_sched_elems, |
427 | elems_req, buf: (void *)buf, buf_size, |
428 | elems_resp: elems_cfgd, cd); |
429 | } |
430 | |
431 | /** |
432 | * ice_aq_move_sched_elems - move scheduler element (just 1 group) |
433 | * @hw: pointer to the HW struct |
434 | * @buf: pointer to buffer |
435 | * @buf_size: buffer size in bytes |
436 | * @grps_movd: returns total number of groups moved |
437 | * |
438 | * Move scheduling elements (0x0408) |
439 | */ |
440 | int |
441 | ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf, |
442 | u16 buf_size, u16 *grps_movd) |
443 | { |
444 | return ice_aqc_send_sched_elem_cmd(hw, cmd_opc: ice_aqc_opc_move_sched_elems, |
445 | elems_req: 1, buf, buf_size, elems_resp: grps_movd, NULL); |
446 | } |
447 | |
448 | /** |
449 | * ice_aq_suspend_sched_elems - suspend scheduler elements |
450 | * @hw: pointer to the HW struct |
451 | * @elems_req: number of elements to suspend |
452 | * @buf: pointer to buffer |
453 | * @buf_size: buffer size in bytes |
454 | * @elems_ret: returns total number of elements suspended |
455 | * @cd: pointer to command details structure or NULL |
456 | * |
457 | * Suspend scheduling elements (0x0409) |
458 | */ |
459 | static int |
460 | ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, |
461 | u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) |
462 | { |
463 | return ice_aqc_send_sched_elem_cmd(hw, cmd_opc: ice_aqc_opc_suspend_sched_elems, |
464 | elems_req, buf: (void *)buf, buf_size, |
465 | elems_resp: elems_ret, cd); |
466 | } |
467 | |
468 | /** |
469 | * ice_aq_resume_sched_elems - resume scheduler elements |
470 | * @hw: pointer to the HW struct |
471 | * @elems_req: number of elements to resume |
472 | * @buf: pointer to buffer |
473 | * @buf_size: buffer size in bytes |
474 | * @elems_ret: returns total number of elements resumed |
475 | * @cd: pointer to command details structure or NULL |
476 | * |
477 | * resume scheduling elements (0x040A) |
478 | */ |
479 | static int |
480 | ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, |
481 | u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) |
482 | { |
483 | return ice_aqc_send_sched_elem_cmd(hw, cmd_opc: ice_aqc_opc_resume_sched_elems, |
484 | elems_req, buf: (void *)buf, buf_size, |
485 | elems_resp: elems_ret, cd); |
486 | } |
487 | |
488 | /** |
489 | * ice_aq_query_sched_res - query scheduler resource |
490 | * @hw: pointer to the HW struct |
491 | * @buf_size: buffer size in bytes |
492 | * @buf: pointer to buffer |
493 | * @cd: pointer to command details structure or NULL |
494 | * |
495 | * Query scheduler resource allocation (0x0412) |
496 | */ |
497 | static int |
498 | ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, |
499 | struct ice_aqc_query_txsched_res_resp *buf, |
500 | struct ice_sq_cd *cd) |
501 | { |
502 | struct ice_aq_desc desc; |
503 | |
504 | ice_fill_dflt_direct_cmd_desc(desc: &desc, opcode: ice_aqc_opc_query_sched_res); |
505 | return ice_aq_send_cmd(hw, desc: &desc, buf, buf_size, cd); |
506 | } |
507 | |
508 | /** |
509 | * ice_sched_suspend_resume_elems - suspend or resume HW nodes |
510 | * @hw: pointer to the HW struct |
511 | * @num_nodes: number of nodes |
512 | * @node_teids: array of node teids to be suspended or resumed |
513 | * @suspend: true means suspend / false means resume |
514 | * |
515 | * This function suspends or resumes HW nodes |
516 | */ |
517 | int |
518 | ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, |
519 | bool suspend) |
520 | { |
521 | u16 i, buf_size, num_elem_ret = 0; |
522 | __le32 *buf; |
523 | int status; |
524 | |
525 | buf_size = sizeof(*buf) * num_nodes; |
526 | buf = devm_kzalloc(dev: ice_hw_to_dev(hw), size: buf_size, GFP_KERNEL); |
527 | if (!buf) |
528 | return -ENOMEM; |
529 | |
530 | for (i = 0; i < num_nodes; i++) |
531 | buf[i] = cpu_to_le32(node_teids[i]); |
532 | |
533 | if (suspend) |
534 | status = ice_aq_suspend_sched_elems(hw, elems_req: num_nodes, buf, |
535 | buf_size, elems_ret: &num_elem_ret, |
536 | NULL); |
537 | else |
538 | status = ice_aq_resume_sched_elems(hw, elems_req: num_nodes, buf, |
539 | buf_size, elems_ret: &num_elem_ret, |
540 | NULL); |
541 | if (status || num_elem_ret != num_nodes) |
542 | ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n" ); |
543 | |
544 | devm_kfree(dev: ice_hw_to_dev(hw), p: buf); |
545 | return status; |
546 | } |
547 | |
548 | /** |
549 | * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC |
550 | * @hw: pointer to the HW struct |
551 | * @vsi_handle: VSI handle |
552 | * @tc: TC number |
553 | * @new_numqs: number of queues |
554 | */ |
555 | static int |
556 | ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) |
557 | { |
558 | struct ice_vsi_ctx *vsi_ctx; |
559 | struct ice_q_ctx *q_ctx; |
560 | u16 idx; |
561 | |
562 | vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); |
563 | if (!vsi_ctx) |
564 | return -EINVAL; |
565 | /* allocate LAN queue contexts */ |
566 | if (!vsi_ctx->lan_q_ctx[tc]) { |
567 | q_ctx = devm_kcalloc(dev: ice_hw_to_dev(hw), n: new_numqs, |
568 | size: sizeof(*q_ctx), GFP_KERNEL); |
569 | if (!q_ctx) |
570 | return -ENOMEM; |
571 | |
572 | for (idx = 0; idx < new_numqs; idx++) { |
573 | q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE; |
574 | q_ctx[idx].q_teid = ICE_INVAL_TEID; |
575 | } |
576 | |
577 | vsi_ctx->lan_q_ctx[tc] = q_ctx; |
578 | vsi_ctx->num_lan_q_entries[tc] = new_numqs; |
579 | return 0; |
580 | } |
581 | /* num queues are increased, update the queue contexts */ |
582 | if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { |
583 | u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; |
584 | |
585 | q_ctx = devm_kcalloc(dev: ice_hw_to_dev(hw), n: new_numqs, |
586 | size: sizeof(*q_ctx), GFP_KERNEL); |
587 | if (!q_ctx) |
588 | return -ENOMEM; |
589 | |
590 | memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], |
591 | prev_num * sizeof(*q_ctx)); |
592 | devm_kfree(dev: ice_hw_to_dev(hw), p: vsi_ctx->lan_q_ctx[tc]); |
593 | |
594 | for (idx = prev_num; idx < new_numqs; idx++) { |
595 | q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE; |
596 | q_ctx[idx].q_teid = ICE_INVAL_TEID; |
597 | } |
598 | |
599 | vsi_ctx->lan_q_ctx[tc] = q_ctx; |
600 | vsi_ctx->num_lan_q_entries[tc] = new_numqs; |
601 | } |
602 | return 0; |
603 | } |
604 | |
605 | /** |
606 | * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC |
607 | * @hw: pointer to the HW struct |
608 | * @vsi_handle: VSI handle |
609 | * @tc: TC number |
610 | * @new_numqs: number of queues |
611 | */ |
612 | static int |
613 | ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) |
614 | { |
615 | struct ice_vsi_ctx *vsi_ctx; |
616 | struct ice_q_ctx *q_ctx; |
617 | |
618 | vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); |
619 | if (!vsi_ctx) |
620 | return -EINVAL; |
621 | /* allocate RDMA queue contexts */ |
622 | if (!vsi_ctx->rdma_q_ctx[tc]) { |
623 | vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(dev: ice_hw_to_dev(hw), |
624 | n: new_numqs, |
625 | size: sizeof(*q_ctx), |
626 | GFP_KERNEL); |
627 | if (!vsi_ctx->rdma_q_ctx[tc]) |
628 | return -ENOMEM; |
629 | vsi_ctx->num_rdma_q_entries[tc] = new_numqs; |
630 | return 0; |
631 | } |
632 | /* num queues are increased, update the queue contexts */ |
633 | if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) { |
634 | u16 prev_num = vsi_ctx->num_rdma_q_entries[tc]; |
635 | |
636 | q_ctx = devm_kcalloc(dev: ice_hw_to_dev(hw), n: new_numqs, |
637 | size: sizeof(*q_ctx), GFP_KERNEL); |
638 | if (!q_ctx) |
639 | return -ENOMEM; |
640 | memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc], |
641 | prev_num * sizeof(*q_ctx)); |
642 | devm_kfree(dev: ice_hw_to_dev(hw), p: vsi_ctx->rdma_q_ctx[tc]); |
643 | vsi_ctx->rdma_q_ctx[tc] = q_ctx; |
644 | vsi_ctx->num_rdma_q_entries[tc] = new_numqs; |
645 | } |
646 | return 0; |
647 | } |
648 | |
649 | /** |
650 | * ice_aq_rl_profile - performs a rate limiting task |
651 | * @hw: pointer to the HW struct |
652 | * @opcode: opcode for add, query, or remove profile(s) |
653 | * @num_profiles: the number of profiles |
654 | * @buf: pointer to buffer |
655 | * @buf_size: buffer size in bytes |
656 | * @num_processed: number of processed add or remove profile(s) to return |
657 | * @cd: pointer to command details structure |
658 | * |
659 | * RL profile function to add, query, or remove profile(s) |
660 | */ |
661 | static int |
662 | ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, |
663 | u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, |
664 | u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) |
665 | { |
666 | struct ice_aqc_rl_profile *cmd; |
667 | struct ice_aq_desc desc; |
668 | int status; |
669 | |
670 | cmd = &desc.params.rl_profile; |
671 | |
672 | ice_fill_dflt_direct_cmd_desc(desc: &desc, opcode); |
673 | desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); |
674 | cmd->num_profiles = cpu_to_le16(num_profiles); |
675 | status = ice_aq_send_cmd(hw, desc: &desc, buf, buf_size, cd); |
676 | if (!status && num_processed) |
677 | *num_processed = le16_to_cpu(cmd->num_processed); |
678 | return status; |
679 | } |
680 | |
681 | /** |
682 | * ice_aq_add_rl_profile - adds rate limiting profile(s) |
683 | * @hw: pointer to the HW struct |
684 | * @num_profiles: the number of profile(s) to be add |
685 | * @buf: pointer to buffer |
686 | * @buf_size: buffer size in bytes |
687 | * @num_profiles_added: total number of profiles added to return |
688 | * @cd: pointer to command details structure |
689 | * |
690 | * Add RL profile (0x0410) |
691 | */ |
692 | static int |
693 | ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, |
694 | struct ice_aqc_rl_profile_elem *buf, u16 buf_size, |
695 | u16 *num_profiles_added, struct ice_sq_cd *cd) |
696 | { |
697 | return ice_aq_rl_profile(hw, opcode: ice_aqc_opc_add_rl_profiles, num_profiles, |
698 | buf, buf_size, num_processed: num_profiles_added, cd); |
699 | } |
700 | |
701 | /** |
702 | * ice_aq_remove_rl_profile - removes RL profile(s) |
703 | * @hw: pointer to the HW struct |
704 | * @num_profiles: the number of profile(s) to remove |
705 | * @buf: pointer to buffer |
706 | * @buf_size: buffer size in bytes |
707 | * @num_profiles_removed: total number of profiles removed to return |
708 | * @cd: pointer to command details structure or NULL |
709 | * |
710 | * Remove RL profile (0x0415) |
711 | */ |
712 | static int |
713 | ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, |
714 | struct ice_aqc_rl_profile_elem *buf, u16 buf_size, |
715 | u16 *num_profiles_removed, struct ice_sq_cd *cd) |
716 | { |
717 | return ice_aq_rl_profile(hw, opcode: ice_aqc_opc_remove_rl_profiles, |
718 | num_profiles, buf, buf_size, |
719 | num_processed: num_profiles_removed, cd); |
720 | } |
721 | |
722 | /** |
723 | * ice_sched_del_rl_profile - remove RL profile |
724 | * @hw: pointer to the HW struct |
725 | * @rl_info: rate limit profile information |
726 | * |
727 | * If the profile ID is not referenced anymore, it removes profile ID with |
728 | * its associated parameters from HW DB,and locally. The caller needs to |
729 | * hold scheduler lock. |
730 | */ |
731 | static int |
732 | ice_sched_del_rl_profile(struct ice_hw *hw, |
733 | struct ice_aqc_rl_profile_info *rl_info) |
734 | { |
735 | struct ice_aqc_rl_profile_elem *buf; |
736 | u16 num_profiles_removed; |
737 | u16 num_profiles = 1; |
738 | int status; |
739 | |
740 | if (rl_info->prof_id_ref != 0) |
741 | return -EBUSY; |
742 | |
743 | /* Safe to remove profile ID */ |
744 | buf = &rl_info->profile; |
745 | status = ice_aq_remove_rl_profile(hw, num_profiles, buf, buf_size: sizeof(*buf), |
746 | num_profiles_removed: &num_profiles_removed, NULL); |
747 | if (status || num_profiles_removed != num_profiles) |
748 | return -EIO; |
749 | |
750 | /* Delete stale entry now */ |
751 | list_del(entry: &rl_info->list_entry); |
752 | devm_kfree(dev: ice_hw_to_dev(hw), p: rl_info); |
753 | return status; |
754 | } |
755 | |
756 | /** |
757 | * ice_sched_clear_rl_prof - clears RL prof entries |
758 | * @pi: port information structure |
759 | * |
760 | * This function removes all RL profile from HW as well as from SW DB. |
761 | */ |
762 | static void ice_sched_clear_rl_prof(struct ice_port_info *pi) |
763 | { |
764 | u16 ln; |
765 | |
766 | for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { |
767 | struct ice_aqc_rl_profile_info *rl_prof_elem; |
768 | struct ice_aqc_rl_profile_info *rl_prof_tmp; |
769 | |
770 | list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, |
771 | &pi->rl_prof_list[ln], list_entry) { |
772 | struct ice_hw *hw = pi->hw; |
773 | int status; |
774 | |
775 | rl_prof_elem->prof_id_ref = 0; |
776 | status = ice_sched_del_rl_profile(hw, rl_info: rl_prof_elem); |
777 | if (status) { |
778 | ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n" ); |
779 | /* On error, free mem required */ |
780 | list_del(entry: &rl_prof_elem->list_entry); |
781 | devm_kfree(dev: ice_hw_to_dev(hw), p: rl_prof_elem); |
782 | } |
783 | } |
784 | } |
785 | } |
786 | |
787 | /** |
788 | * ice_sched_clear_agg - clears the aggregator related information |
789 | * @hw: pointer to the hardware structure |
790 | * |
791 | * This function removes aggregator list and free up aggregator related memory |
792 | * previously allocated. |
793 | */ |
794 | void ice_sched_clear_agg(struct ice_hw *hw) |
795 | { |
796 | struct ice_sched_agg_info *agg_info; |
797 | struct ice_sched_agg_info *atmp; |
798 | |
799 | list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) { |
800 | struct ice_sched_agg_vsi_info *agg_vsi_info; |
801 | struct ice_sched_agg_vsi_info *vtmp; |
802 | |
803 | list_for_each_entry_safe(agg_vsi_info, vtmp, |
804 | &agg_info->agg_vsi_list, list_entry) { |
805 | list_del(entry: &agg_vsi_info->list_entry); |
806 | devm_kfree(dev: ice_hw_to_dev(hw), p: agg_vsi_info); |
807 | } |
808 | list_del(entry: &agg_info->list_entry); |
809 | devm_kfree(dev: ice_hw_to_dev(hw), p: agg_info); |
810 | } |
811 | } |
812 | |
813 | /** |
814 | * ice_sched_clear_tx_topo - clears the scheduler tree nodes |
815 | * @pi: port information structure |
816 | * |
817 | * This function removes all the nodes from HW as well as from SW DB. |
818 | */ |
819 | static void ice_sched_clear_tx_topo(struct ice_port_info *pi) |
820 | { |
821 | if (!pi) |
822 | return; |
823 | /* remove RL profiles related lists */ |
824 | ice_sched_clear_rl_prof(pi); |
825 | if (pi->root) { |
826 | ice_free_sched_node(pi, node: pi->root); |
827 | pi->root = NULL; |
828 | } |
829 | } |
830 | |
831 | /** |
832 | * ice_sched_clear_port - clear the scheduler elements from SW DB for a port |
833 | * @pi: port information structure |
834 | * |
835 | * Cleanup scheduling elements from SW DB |
836 | */ |
837 | void ice_sched_clear_port(struct ice_port_info *pi) |
838 | { |
839 | if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) |
840 | return; |
841 | |
842 | pi->port_state = ICE_SCHED_PORT_STATE_INIT; |
843 | mutex_lock(&pi->sched_lock); |
844 | ice_sched_clear_tx_topo(pi); |
845 | mutex_unlock(lock: &pi->sched_lock); |
846 | mutex_destroy(lock: &pi->sched_lock); |
847 | } |
848 | |
849 | /** |
850 | * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports |
851 | * @hw: pointer to the HW struct |
852 | * |
853 | * Cleanup scheduling elements from SW DB for all the ports |
854 | */ |
855 | void ice_sched_cleanup_all(struct ice_hw *hw) |
856 | { |
857 | if (!hw) |
858 | return; |
859 | |
860 | devm_kfree(dev: ice_hw_to_dev(hw), p: hw->layer_info); |
861 | hw->layer_info = NULL; |
862 | |
863 | ice_sched_clear_port(pi: hw->port_info); |
864 | |
865 | hw->num_tx_sched_layers = 0; |
866 | hw->num_tx_sched_phys_layers = 0; |
867 | hw->flattened_layers = 0; |
868 | hw->max_cgds = 0; |
869 | } |
870 | |
871 | /** |
872 | * ice_sched_add_elems - add nodes to HW and SW DB |
873 | * @pi: port information structure |
874 | * @tc_node: pointer to the branch node |
875 | * @parent: pointer to the parent node |
876 | * @layer: layer number to add nodes |
877 | * @num_nodes: number of nodes |
878 | * @num_nodes_added: pointer to num nodes added |
879 | * @first_node_teid: if new nodes are added then return the TEID of first node |
880 | * @prealloc_nodes: preallocated nodes struct for software DB |
881 | * |
882 | * This function add nodes to HW as well as to SW DB for a given layer |
883 | */ |
884 | int |
885 | ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, |
886 | struct ice_sched_node *parent, u8 layer, u16 num_nodes, |
887 | u16 *num_nodes_added, u32 *first_node_teid, |
888 | struct ice_sched_node **prealloc_nodes) |
889 | { |
890 | struct ice_sched_node *prev, *new_node; |
891 | struct ice_aqc_add_elem *buf; |
892 | u16 i, num_groups_added = 0; |
893 | struct ice_hw *hw = pi->hw; |
894 | size_t buf_size; |
895 | int status = 0; |
896 | u32 teid; |
897 | |
898 | buf_size = struct_size(buf, generic, num_nodes); |
899 | buf = devm_kzalloc(dev: ice_hw_to_dev(hw), size: buf_size, GFP_KERNEL); |
900 | if (!buf) |
901 | return -ENOMEM; |
902 | |
903 | buf->hdr.parent_teid = parent->info.node_teid; |
904 | buf->hdr.num_elems = cpu_to_le16(num_nodes); |
905 | for (i = 0; i < num_nodes; i++) { |
906 | buf->generic[i].parent_teid = parent->info.node_teid; |
907 | buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; |
908 | buf->generic[i].data.valid_sections = |
909 | ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | |
910 | ICE_AQC_ELEM_VALID_EIR; |
911 | buf->generic[i].data.generic = 0; |
912 | buf->generic[i].data.cir_bw.bw_profile_idx = |
913 | cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); |
914 | buf->generic[i].data.cir_bw.bw_alloc = |
915 | cpu_to_le16(ICE_SCHED_DFLT_BW_WT); |
916 | buf->generic[i].data.eir_bw.bw_profile_idx = |
917 | cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); |
918 | buf->generic[i].data.eir_bw.bw_alloc = |
919 | cpu_to_le16(ICE_SCHED_DFLT_BW_WT); |
920 | } |
921 | |
922 | status = ice_aq_add_sched_elems(hw, grps_req: 1, buf, buf_size, |
923 | grps_added: &num_groups_added, NULL); |
924 | if (status || num_groups_added != 1) { |
925 | ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n" , |
926 | hw->adminq.sq_last_status); |
927 | devm_kfree(dev: ice_hw_to_dev(hw), p: buf); |
928 | return -EIO; |
929 | } |
930 | |
931 | *num_nodes_added = num_nodes; |
932 | /* add nodes to the SW DB */ |
933 | for (i = 0; i < num_nodes; i++) { |
934 | if (prealloc_nodes) |
935 | status = ice_sched_add_node(pi, layer, info: &buf->generic[i], prealloc_node: prealloc_nodes[i]); |
936 | else |
937 | status = ice_sched_add_node(pi, layer, info: &buf->generic[i], NULL); |
938 | |
939 | if (status) { |
940 | ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n" , |
941 | status); |
942 | break; |
943 | } |
944 | |
945 | teid = le32_to_cpu(buf->generic[i].node_teid); |
946 | new_node = ice_sched_find_node_by_teid(start_node: parent, teid); |
947 | if (!new_node) { |
948 | ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n" , teid); |
949 | break; |
950 | } |
951 | |
952 | new_node->sibling = NULL; |
953 | new_node->tc_num = tc_node->tc_num; |
954 | new_node->tx_weight = ICE_SCHED_DFLT_BW_WT; |
955 | new_node->tx_share = ICE_SCHED_DFLT_BW; |
956 | new_node->tx_max = ICE_SCHED_DFLT_BW; |
957 | new_node->name = kzalloc(SCHED_NODE_NAME_MAX_LEN, GFP_KERNEL); |
958 | if (!new_node->name) |
959 | return -ENOMEM; |
960 | |
961 | status = xa_alloc(xa: &pi->sched_node_ids, id: &new_node->id, NULL, XA_LIMIT(0, UINT_MAX), |
962 | GFP_KERNEL); |
963 | if (status) { |
964 | ice_debug(hw, ICE_DBG_SCHED, "xa_alloc failed for sched node status =%d\n" , |
965 | status); |
966 | break; |
967 | } |
968 | |
969 | snprintf(buf: new_node->name, SCHED_NODE_NAME_MAX_LEN, fmt: "node_%u" , new_node->id); |
970 | |
971 | /* add it to previous node sibling pointer */ |
972 | /* Note: siblings are not linked across branches */ |
973 | prev = ice_sched_get_first_node(pi, parent: tc_node, layer); |
974 | if (prev && prev != new_node) { |
975 | while (prev->sibling) |
976 | prev = prev->sibling; |
977 | prev->sibling = new_node; |
978 | } |
979 | |
980 | /* initialize the sibling head */ |
981 | if (!pi->sib_head[tc_node->tc_num][layer]) |
982 | pi->sib_head[tc_node->tc_num][layer] = new_node; |
983 | |
984 | if (i == 0) |
985 | *first_node_teid = teid; |
986 | } |
987 | |
988 | devm_kfree(dev: ice_hw_to_dev(hw), p: buf); |
989 | return status; |
990 | } |
991 | |
992 | /** |
993 | * ice_sched_add_nodes_to_hw_layer - Add nodes to HW layer |
994 | * @pi: port information structure |
995 | * @tc_node: pointer to TC node |
996 | * @parent: pointer to parent node |
997 | * @layer: layer number to add nodes |
998 | * @num_nodes: number of nodes to be added |
999 | * @first_node_teid: pointer to the first node TEID |
1000 | * @num_nodes_added: pointer to number of nodes added |
1001 | * |
1002 | * Add nodes into specific HW layer. |
1003 | */ |
1004 | static int |
1005 | ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, |
1006 | struct ice_sched_node *tc_node, |
1007 | struct ice_sched_node *parent, u8 layer, |
1008 | u16 num_nodes, u32 *first_node_teid, |
1009 | u16 *num_nodes_added) |
1010 | { |
1011 | u16 max_child_nodes; |
1012 | |
1013 | *num_nodes_added = 0; |
1014 | |
1015 | if (!num_nodes) |
1016 | return 0; |
1017 | |
1018 | if (!parent || layer < pi->hw->sw_entry_point_layer) |
1019 | return -EINVAL; |
1020 | |
1021 | /* max children per node per layer */ |
1022 | max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; |
1023 | |
1024 | /* current number of children + required nodes exceed max children */ |
1025 | if ((parent->num_children + num_nodes) > max_child_nodes) { |
1026 | /* Fail if the parent is a TC node */ |
1027 | if (parent == tc_node) |
1028 | return -EIO; |
1029 | return -ENOSPC; |
1030 | } |
1031 | |
1032 | return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, |
1033 | num_nodes_added, first_node_teid, NULL); |
1034 | } |
1035 | |
1036 | /** |
1037 | * ice_sched_add_nodes_to_layer - Add nodes to a given layer |
1038 | * @pi: port information structure |
1039 | * @tc_node: pointer to TC node |
1040 | * @parent: pointer to parent node |
1041 | * @layer: layer number to add nodes |
1042 | * @num_nodes: number of nodes to be added |
1043 | * @first_node_teid: pointer to the first node TEID |
1044 | * @num_nodes_added: pointer to number of nodes added |
1045 | * |
1046 | * This function add nodes to a given layer. |
1047 | */ |
1048 | int |
1049 | ice_sched_add_nodes_to_layer(struct ice_port_info *pi, |
1050 | struct ice_sched_node *tc_node, |
1051 | struct ice_sched_node *parent, u8 layer, |
1052 | u16 num_nodes, u32 *first_node_teid, |
1053 | u16 *num_nodes_added) |
1054 | { |
1055 | u32 *first_teid_ptr = first_node_teid; |
1056 | u16 new_num_nodes = num_nodes; |
1057 | int status = 0; |
1058 | |
1059 | *num_nodes_added = 0; |
1060 | while (*num_nodes_added < num_nodes) { |
1061 | u16 max_child_nodes, num_added = 0; |
1062 | u32 temp; |
1063 | |
1064 | status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, |
1065 | layer, num_nodes: new_num_nodes, |
1066 | first_node_teid: first_teid_ptr, |
1067 | num_nodes_added: &num_added); |
1068 | if (!status) |
1069 | *num_nodes_added += num_added; |
1070 | /* added more nodes than requested ? */ |
1071 | if (*num_nodes_added > num_nodes) { |
1072 | ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n" , num_nodes, |
1073 | *num_nodes_added); |
1074 | status = -EIO; |
1075 | break; |
1076 | } |
1077 | /* break if all the nodes are added successfully */ |
1078 | if (!status && (*num_nodes_added == num_nodes)) |
1079 | break; |
1080 | /* break if the error is not max limit */ |
1081 | if (status && status != -ENOSPC) |
1082 | break; |
1083 | /* Exceeded the max children */ |
1084 | max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; |
1085 | /* utilize all the spaces if the parent is not full */ |
1086 | if (parent->num_children < max_child_nodes) { |
1087 | new_num_nodes = max_child_nodes - parent->num_children; |
1088 | } else { |
1089 | /* This parent is full, try the next sibling */ |
1090 | parent = parent->sibling; |
1091 | /* Don't modify the first node TEID memory if the |
1092 | * first node was added already in the above call. |
1093 | * Instead send some temp memory for all other |
1094 | * recursive calls. |
1095 | */ |
1096 | if (num_added) |
1097 | first_teid_ptr = &temp; |
1098 | |
1099 | new_num_nodes = num_nodes - *num_nodes_added; |
1100 | } |
1101 | } |
1102 | return status; |
1103 | } |
1104 | |
1105 | /** |
1106 | * ice_sched_get_qgrp_layer - get the current queue group layer number |
1107 | * @hw: pointer to the HW struct |
1108 | * |
1109 | * This function returns the current queue group layer number |
1110 | */ |
1111 | static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) |
1112 | { |
1113 | /* It's always total layers - 1, the array is 0 relative so -2 */ |
1114 | return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; |
1115 | } |
1116 | |
1117 | /** |
1118 | * ice_sched_get_vsi_layer - get the current VSI layer number |
1119 | * @hw: pointer to the HW struct |
1120 | * |
1121 | * This function returns the current VSI layer number |
1122 | */ |
1123 | u8 ice_sched_get_vsi_layer(struct ice_hw *hw) |
1124 | { |
1125 | /* Num Layers VSI layer |
1126 | * 9 6 |
1127 | * 7 4 |
1128 | * 5 or less sw_entry_point_layer |
1129 | */ |
1130 | /* calculate the VSI layer based on number of layers. */ |
1131 | if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { |
1132 | u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; |
1133 | |
1134 | if (layer > hw->sw_entry_point_layer) |
1135 | return layer; |
1136 | } |
1137 | return hw->sw_entry_point_layer; |
1138 | } |
1139 | |
1140 | /** |
1141 | * ice_sched_get_agg_layer - get the current aggregator layer number |
1142 | * @hw: pointer to the HW struct |
1143 | * |
1144 | * This function returns the current aggregator layer number |
1145 | */ |
1146 | u8 ice_sched_get_agg_layer(struct ice_hw *hw) |
1147 | { |
1148 | /* Num Layers aggregator layer |
1149 | * 9 4 |
1150 | * 7 or less sw_entry_point_layer |
1151 | */ |
1152 | /* calculate the aggregator layer based on number of layers. */ |
1153 | if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) { |
1154 | u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; |
1155 | |
1156 | if (layer > hw->sw_entry_point_layer) |
1157 | return layer; |
1158 | } |
1159 | return hw->sw_entry_point_layer; |
1160 | } |
1161 | |
1162 | /** |
1163 | * ice_rm_dflt_leaf_node - remove the default leaf node in the tree |
1164 | * @pi: port information structure |
1165 | * |
1166 | * This function removes the leaf node that was created by the FW |
1167 | * during initialization |
1168 | */ |
1169 | static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) |
1170 | { |
1171 | struct ice_sched_node *node; |
1172 | |
1173 | node = pi->root; |
1174 | while (node) { |
1175 | if (!node->num_children) |
1176 | break; |
1177 | node = node->children[0]; |
1178 | } |
1179 | if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { |
1180 | u32 teid = le32_to_cpu(node->info.node_teid); |
1181 | int status; |
1182 | |
1183 | /* remove the default leaf node */ |
1184 | status = ice_sched_remove_elems(hw: pi->hw, parent: node->parent, node_teid: teid); |
1185 | if (!status) |
1186 | ice_free_sched_node(pi, node); |
1187 | } |
1188 | } |
1189 | |
1190 | /** |
1191 | * ice_sched_rm_dflt_nodes - free the default nodes in the tree |
1192 | * @pi: port information structure |
1193 | * |
1194 | * This function frees all the nodes except root and TC that were created by |
1195 | * the FW during initialization |
1196 | */ |
1197 | static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) |
1198 | { |
1199 | struct ice_sched_node *node; |
1200 | |
1201 | ice_rm_dflt_leaf_node(pi); |
1202 | |
1203 | /* remove the default nodes except TC and root nodes */ |
1204 | node = pi->root; |
1205 | while (node) { |
1206 | if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && |
1207 | node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && |
1208 | node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { |
1209 | ice_free_sched_node(pi, node); |
1210 | break; |
1211 | } |
1212 | |
1213 | if (!node->num_children) |
1214 | break; |
1215 | node = node->children[0]; |
1216 | } |
1217 | } |
1218 | |
1219 | /** |
1220 | * ice_sched_init_port - Initialize scheduler by querying information from FW |
1221 | * @pi: port info structure for the tree to cleanup |
1222 | * |
1223 | * This function is the initial call to find the total number of Tx scheduler |
1224 | * resources, default topology created by firmware and storing the information |
1225 | * in SW DB. |
1226 | */ |
1227 | int ice_sched_init_port(struct ice_port_info *pi) |
1228 | { |
1229 | struct ice_aqc_get_topo_elem *buf; |
1230 | struct ice_hw *hw; |
1231 | u8 num_branches; |
1232 | u16 num_elems; |
1233 | int status; |
1234 | u8 i, j; |
1235 | |
1236 | if (!pi) |
1237 | return -EINVAL; |
1238 | hw = pi->hw; |
1239 | |
1240 | /* Query the Default Topology from FW */ |
1241 | buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); |
1242 | if (!buf) |
1243 | return -ENOMEM; |
1244 | |
1245 | /* Query default scheduling tree topology */ |
1246 | status = ice_aq_get_dflt_topo(hw, lport: pi->lport, buf, ICE_AQ_MAX_BUF_LEN, |
1247 | num_branches: &num_branches, NULL); |
1248 | if (status) |
1249 | goto err_init_port; |
1250 | |
1251 | /* num_branches should be between 1-8 */ |
1252 | if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { |
1253 | ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n" , |
1254 | num_branches); |
1255 | status = -EINVAL; |
1256 | goto err_init_port; |
1257 | } |
1258 | |
1259 | /* get the number of elements on the default/first branch */ |
1260 | num_elems = le16_to_cpu(buf[0].hdr.num_elems); |
1261 | |
1262 | /* num_elems should always be between 1-9 */ |
1263 | if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { |
1264 | ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n" , |
1265 | num_elems); |
1266 | status = -EINVAL; |
1267 | goto err_init_port; |
1268 | } |
1269 | |
1270 | /* If the last node is a leaf node then the index of the queue group |
1271 | * layer is two less than the number of elements. |
1272 | */ |
1273 | if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == |
1274 | ICE_AQC_ELEM_TYPE_LEAF) |
1275 | pi->last_node_teid = |
1276 | le32_to_cpu(buf[0].generic[num_elems - 2].node_teid); |
1277 | else |
1278 | pi->last_node_teid = |
1279 | le32_to_cpu(buf[0].generic[num_elems - 1].node_teid); |
1280 | |
1281 | /* Insert the Tx Sched root node */ |
1282 | status = ice_sched_add_root_node(pi, info: &buf[0].generic[0]); |
1283 | if (status) |
1284 | goto err_init_port; |
1285 | |
1286 | /* Parse the default tree and cache the information */ |
1287 | for (i = 0; i < num_branches; i++) { |
1288 | num_elems = le16_to_cpu(buf[i].hdr.num_elems); |
1289 | |
1290 | /* Skip root element as already inserted */ |
1291 | for (j = 1; j < num_elems; j++) { |
1292 | /* update the sw entry point */ |
1293 | if (buf[0].generic[j].data.elem_type == |
1294 | ICE_AQC_ELEM_TYPE_ENTRY_POINT) |
1295 | hw->sw_entry_point_layer = j; |
1296 | |
1297 | status = ice_sched_add_node(pi, layer: j, info: &buf[i].generic[j], NULL); |
1298 | if (status) |
1299 | goto err_init_port; |
1300 | } |
1301 | } |
1302 | |
1303 | /* Remove the default nodes. */ |
1304 | if (pi->root) |
1305 | ice_sched_rm_dflt_nodes(pi); |
1306 | |
1307 | /* initialize the port for handling the scheduler tree */ |
1308 | pi->port_state = ICE_SCHED_PORT_STATE_READY; |
1309 | mutex_init(&pi->sched_lock); |
1310 | for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) |
1311 | INIT_LIST_HEAD(list: &pi->rl_prof_list[i]); |
1312 | |
1313 | err_init_port: |
1314 | if (status && pi->root) { |
1315 | ice_free_sched_node(pi, node: pi->root); |
1316 | pi->root = NULL; |
1317 | } |
1318 | |
1319 | kfree(objp: buf); |
1320 | return status; |
1321 | } |
1322 | |
1323 | /** |
1324 | * ice_sched_query_res_alloc - query the FW for num of logical sched layers |
1325 | * @hw: pointer to the HW struct |
1326 | * |
1327 | * query FW for allocated scheduler resources and store in HW struct |
1328 | */ |
1329 | int ice_sched_query_res_alloc(struct ice_hw *hw) |
1330 | { |
1331 | struct ice_aqc_query_txsched_res_resp *buf; |
1332 | __le16 max_sibl; |
1333 | int status = 0; |
1334 | u16 i; |
1335 | |
1336 | if (hw->layer_info) |
1337 | return status; |
1338 | |
1339 | buf = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*buf), GFP_KERNEL); |
1340 | if (!buf) |
1341 | return -ENOMEM; |
1342 | |
1343 | status = ice_aq_query_sched_res(hw, buf_size: sizeof(*buf), buf, NULL); |
1344 | if (status) |
1345 | goto sched_query_out; |
1346 | |
1347 | hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels); |
1348 | hw->num_tx_sched_phys_layers = |
1349 | le16_to_cpu(buf->sched_props.phys_levels); |
1350 | hw->flattened_layers = buf->sched_props.flattening_bitmap; |
1351 | hw->max_cgds = buf->sched_props.max_pf_cgds; |
1352 | |
1353 | /* max sibling group size of current layer refers to the max children |
1354 | * of the below layer node. |
1355 | * layer 1 node max children will be layer 2 max sibling group size |
1356 | * layer 2 node max children will be layer 3 max sibling group size |
1357 | * and so on. This array will be populated from root (index 0) to |
1358 | * qgroup layer 7. Leaf node has no children. |
1359 | */ |
1360 | for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { |
1361 | max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; |
1362 | hw->max_children[i] = le16_to_cpu(max_sibl); |
1363 | } |
1364 | |
1365 | hw->layer_info = devm_kmemdup(dev: ice_hw_to_dev(hw), src: buf->layer_props, |
1366 | len: (hw->num_tx_sched_layers * |
1367 | sizeof(*hw->layer_info)), |
1368 | GFP_KERNEL); |
1369 | if (!hw->layer_info) { |
1370 | status = -ENOMEM; |
1371 | goto sched_query_out; |
1372 | } |
1373 | |
1374 | sched_query_out: |
1375 | devm_kfree(dev: ice_hw_to_dev(hw), p: buf); |
1376 | return status; |
1377 | } |
1378 | |
1379 | /** |
1380 | * ice_sched_get_psm_clk_freq - determine the PSM clock frequency |
1381 | * @hw: pointer to the HW struct |
1382 | * |
1383 | * Determine the PSM clock frequency and store in HW struct |
1384 | */ |
1385 | void ice_sched_get_psm_clk_freq(struct ice_hw *hw) |
1386 | { |
1387 | u32 val, clk_src; |
1388 | |
1389 | val = rd32(hw, GLGEN_CLKSTAT_SRC); |
1390 | clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> |
1391 | GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; |
1392 | |
1393 | #define PSM_CLK_SRC_367_MHZ 0x0 |
1394 | #define PSM_CLK_SRC_416_MHZ 0x1 |
1395 | #define PSM_CLK_SRC_446_MHZ 0x2 |
1396 | #define PSM_CLK_SRC_390_MHZ 0x3 |
1397 | |
1398 | switch (clk_src) { |
1399 | case PSM_CLK_SRC_367_MHZ: |
1400 | hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; |
1401 | break; |
1402 | case PSM_CLK_SRC_416_MHZ: |
1403 | hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ; |
1404 | break; |
1405 | case PSM_CLK_SRC_446_MHZ: |
1406 | hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; |
1407 | break; |
1408 | case PSM_CLK_SRC_390_MHZ: |
1409 | hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; |
1410 | break; |
1411 | default: |
1412 | ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n" , |
1413 | clk_src); |
1414 | /* fall back to a safe default */ |
1415 | hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; |
1416 | } |
1417 | } |
1418 | |
1419 | /** |
1420 | * ice_sched_find_node_in_subtree - Find node in part of base node subtree |
1421 | * @hw: pointer to the HW struct |
1422 | * @base: pointer to the base node |
1423 | * @node: pointer to the node to search |
1424 | * |
1425 | * This function checks whether a given node is part of the base node |
1426 | * subtree or not |
1427 | */ |
1428 | static bool |
1429 | ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, |
1430 | struct ice_sched_node *node) |
1431 | { |
1432 | u8 i; |
1433 | |
1434 | for (i = 0; i < base->num_children; i++) { |
1435 | struct ice_sched_node *child = base->children[i]; |
1436 | |
1437 | if (node == child) |
1438 | return true; |
1439 | |
1440 | if (child->tx_sched_layer > node->tx_sched_layer) |
1441 | return false; |
1442 | |
1443 | /* this recursion is intentional, and wouldn't |
1444 | * go more than 8 calls |
1445 | */ |
1446 | if (ice_sched_find_node_in_subtree(hw, base: child, node)) |
1447 | return true; |
1448 | } |
1449 | return false; |
1450 | } |
1451 | |
1452 | /** |
1453 | * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node |
1454 | * @pi: port information structure |
1455 | * @vsi_node: software VSI handle |
1456 | * @qgrp_node: first queue group node identified for scanning |
1457 | * @owner: LAN or RDMA |
1458 | * |
1459 | * This function retrieves a free LAN or RDMA queue group node by scanning |
1460 | * qgrp_node and its siblings for the queue group with the fewest number |
1461 | * of queues currently assigned. |
1462 | */ |
1463 | static struct ice_sched_node * |
1464 | ice_sched_get_free_qgrp(struct ice_port_info *pi, |
1465 | struct ice_sched_node *vsi_node, |
1466 | struct ice_sched_node *qgrp_node, u8 owner) |
1467 | { |
1468 | struct ice_sched_node *min_qgrp; |
1469 | u8 min_children; |
1470 | |
1471 | if (!qgrp_node) |
1472 | return qgrp_node; |
1473 | min_children = qgrp_node->num_children; |
1474 | if (!min_children) |
1475 | return qgrp_node; |
1476 | min_qgrp = qgrp_node; |
1477 | /* scan all queue groups until find a node which has less than the |
1478 | * minimum number of children. This way all queue group nodes get |
1479 | * equal number of shares and active. The bandwidth will be equally |
1480 | * distributed across all queues. |
1481 | */ |
1482 | while (qgrp_node) { |
1483 | /* make sure the qgroup node is part of the VSI subtree */ |
1484 | if (ice_sched_find_node_in_subtree(hw: pi->hw, base: vsi_node, node: qgrp_node)) |
1485 | if (qgrp_node->num_children < min_children && |
1486 | qgrp_node->owner == owner) { |
1487 | /* replace the new min queue group node */ |
1488 | min_qgrp = qgrp_node; |
1489 | min_children = min_qgrp->num_children; |
1490 | /* break if it has no children, */ |
1491 | if (!min_children) |
1492 | break; |
1493 | } |
1494 | qgrp_node = qgrp_node->sibling; |
1495 | } |
1496 | return min_qgrp; |
1497 | } |
1498 | |
1499 | /** |
1500 | * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node |
1501 | * @pi: port information structure |
1502 | * @vsi_handle: software VSI handle |
1503 | * @tc: branch number |
1504 | * @owner: LAN or RDMA |
1505 | * |
1506 | * This function retrieves a free LAN or RDMA queue group node |
1507 | */ |
1508 | struct ice_sched_node * |
1509 | ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
1510 | u8 owner) |
1511 | { |
1512 | struct ice_sched_node *vsi_node, *qgrp_node; |
1513 | struct ice_vsi_ctx *vsi_ctx; |
1514 | u16 max_children; |
1515 | u8 qgrp_layer; |
1516 | |
1517 | qgrp_layer = ice_sched_get_qgrp_layer(hw: pi->hw); |
1518 | max_children = pi->hw->max_children[qgrp_layer]; |
1519 | |
1520 | vsi_ctx = ice_get_vsi_ctx(hw: pi->hw, vsi_handle); |
1521 | if (!vsi_ctx) |
1522 | return NULL; |
1523 | vsi_node = vsi_ctx->sched.vsi_node[tc]; |
1524 | /* validate invalid VSI ID */ |
1525 | if (!vsi_node) |
1526 | return NULL; |
1527 | |
1528 | /* get the first queue group node from VSI sub-tree */ |
1529 | qgrp_node = ice_sched_get_first_node(pi, parent: vsi_node, layer: qgrp_layer); |
1530 | while (qgrp_node) { |
1531 | /* make sure the qgroup node is part of the VSI subtree */ |
1532 | if (ice_sched_find_node_in_subtree(hw: pi->hw, base: vsi_node, node: qgrp_node)) |
1533 | if (qgrp_node->num_children < max_children && |
1534 | qgrp_node->owner == owner) |
1535 | break; |
1536 | qgrp_node = qgrp_node->sibling; |
1537 | } |
1538 | |
1539 | /* Select the best queue group */ |
1540 | return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); |
1541 | } |
1542 | |
1543 | /** |
1544 | * ice_sched_get_vsi_node - Get a VSI node based on VSI ID |
1545 | * @pi: pointer to the port information structure |
1546 | * @tc_node: pointer to the TC node |
1547 | * @vsi_handle: software VSI handle |
1548 | * |
1549 | * This function retrieves a VSI node for a given VSI ID from a given |
1550 | * TC branch |
1551 | */ |
1552 | static struct ice_sched_node * |
1553 | ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, |
1554 | u16 vsi_handle) |
1555 | { |
1556 | struct ice_sched_node *node; |
1557 | u8 vsi_layer; |
1558 | |
1559 | vsi_layer = ice_sched_get_vsi_layer(hw: pi->hw); |
1560 | node = ice_sched_get_first_node(pi, parent: tc_node, layer: vsi_layer); |
1561 | |
1562 | /* Check whether it already exists */ |
1563 | while (node) { |
1564 | if (node->vsi_handle == vsi_handle) |
1565 | return node; |
1566 | node = node->sibling; |
1567 | } |
1568 | |
1569 | return node; |
1570 | } |
1571 | |
1572 | /** |
1573 | * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID |
1574 | * @pi: pointer to the port information structure |
1575 | * @tc_node: pointer to the TC node |
1576 | * @agg_id: aggregator ID |
1577 | * |
1578 | * This function retrieves an aggregator node for a given aggregator ID from |
1579 | * a given TC branch |
1580 | */ |
1581 | struct ice_sched_node * |
1582 | ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, |
1583 | u32 agg_id) |
1584 | { |
1585 | struct ice_sched_node *node; |
1586 | struct ice_hw *hw = pi->hw; |
1587 | u8 agg_layer; |
1588 | |
1589 | if (!hw) |
1590 | return NULL; |
1591 | agg_layer = ice_sched_get_agg_layer(hw); |
1592 | node = ice_sched_get_first_node(pi, parent: tc_node, layer: agg_layer); |
1593 | |
1594 | /* Check whether it already exists */ |
1595 | while (node) { |
1596 | if (node->agg_id == agg_id) |
1597 | return node; |
1598 | node = node->sibling; |
1599 | } |
1600 | |
1601 | return node; |
1602 | } |
1603 | |
1604 | /** |
1605 | * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes |
1606 | * @hw: pointer to the HW struct |
1607 | * @num_qs: number of queues |
1608 | * @num_nodes: num nodes array |
1609 | * |
1610 | * This function calculates the number of VSI child nodes based on the |
1611 | * number of queues. |
1612 | */ |
1613 | static void |
1614 | ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) |
1615 | { |
1616 | u16 num = num_qs; |
1617 | u8 i, qgl, vsil; |
1618 | |
1619 | qgl = ice_sched_get_qgrp_layer(hw); |
1620 | vsil = ice_sched_get_vsi_layer(hw); |
1621 | |
1622 | /* calculate num nodes from queue group to VSI layer */ |
1623 | for (i = qgl; i > vsil; i--) { |
1624 | /* round to the next integer if there is a remainder */ |
1625 | num = DIV_ROUND_UP(num, hw->max_children[i]); |
1626 | |
1627 | /* need at least one node */ |
1628 | num_nodes[i] = num ? num : 1; |
1629 | } |
1630 | } |
1631 | |
1632 | /** |
1633 | * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree |
1634 | * @pi: port information structure |
1635 | * @vsi_handle: software VSI handle |
1636 | * @tc_node: pointer to the TC node |
1637 | * @num_nodes: pointer to the num nodes that needs to be added per layer |
1638 | * @owner: node owner (LAN or RDMA) |
1639 | * |
1640 | * This function adds the VSI child nodes to tree. It gets called for |
1641 | * LAN and RDMA separately. |
1642 | */ |
1643 | static int |
1644 | ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, |
1645 | struct ice_sched_node *tc_node, u16 *num_nodes, |
1646 | u8 owner) |
1647 | { |
1648 | struct ice_sched_node *parent, *node; |
1649 | struct ice_hw *hw = pi->hw; |
1650 | u32 first_node_teid; |
1651 | u16 num_added = 0; |
1652 | u8 i, qgl, vsil; |
1653 | |
1654 | qgl = ice_sched_get_qgrp_layer(hw); |
1655 | vsil = ice_sched_get_vsi_layer(hw); |
1656 | parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); |
1657 | for (i = vsil + 1; i <= qgl; i++) { |
1658 | int status; |
1659 | |
1660 | if (!parent) |
1661 | return -EIO; |
1662 | |
1663 | status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, layer: i, |
1664 | num_nodes: num_nodes[i], |
1665 | first_node_teid: &first_node_teid, |
1666 | num_nodes_added: &num_added); |
1667 | if (status || num_nodes[i] != num_added) |
1668 | return -EIO; |
1669 | |
1670 | /* The newly added node can be a new parent for the next |
1671 | * layer nodes |
1672 | */ |
1673 | if (num_added) { |
1674 | parent = ice_sched_find_node_by_teid(start_node: tc_node, |
1675 | teid: first_node_teid); |
1676 | node = parent; |
1677 | while (node) { |
1678 | node->owner = owner; |
1679 | node = node->sibling; |
1680 | } |
1681 | } else { |
1682 | parent = parent->children[0]; |
1683 | } |
1684 | } |
1685 | |
1686 | return 0; |
1687 | } |
1688 | |
1689 | /** |
1690 | * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes |
1691 | * @pi: pointer to the port info structure |
1692 | * @tc_node: pointer to TC node |
1693 | * @num_nodes: pointer to num nodes array |
1694 | * |
1695 | * This function calculates the number of supported nodes needed to add this |
1696 | * VSI into Tx tree including the VSI, parent and intermediate nodes in below |
1697 | * layers |
1698 | */ |
1699 | static void |
1700 | ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, |
1701 | struct ice_sched_node *tc_node, u16 *num_nodes) |
1702 | { |
1703 | struct ice_sched_node *node; |
1704 | u8 vsil; |
1705 | int i; |
1706 | |
1707 | vsil = ice_sched_get_vsi_layer(hw: pi->hw); |
1708 | for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--) |
1709 | /* Add intermediate nodes if TC has no children and |
1710 | * need at least one node for VSI |
1711 | */ |
1712 | if (!tc_node->num_children || i == vsil) { |
1713 | num_nodes[i]++; |
1714 | } else { |
1715 | /* If intermediate nodes are reached max children |
1716 | * then add a new one. |
1717 | */ |
1718 | node = ice_sched_get_first_node(pi, parent: tc_node, layer: (u8)i); |
1719 | /* scan all the siblings */ |
1720 | while (node) { |
1721 | if (node->num_children < pi->hw->max_children[i]) |
1722 | break; |
1723 | node = node->sibling; |
1724 | } |
1725 | |
1726 | /* tree has one intermediate node to add this new VSI. |
1727 | * So no need to calculate supported nodes for below |
1728 | * layers. |
1729 | */ |
1730 | if (node) |
1731 | break; |
1732 | /* all the nodes are full, allocate a new one */ |
1733 | num_nodes[i]++; |
1734 | } |
1735 | } |
1736 | |
1737 | /** |
1738 | * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree |
1739 | * @pi: port information structure |
1740 | * @vsi_handle: software VSI handle |
1741 | * @tc_node: pointer to TC node |
1742 | * @num_nodes: pointer to num nodes array |
1743 | * |
1744 | * This function adds the VSI supported nodes into Tx tree including the |
1745 | * VSI, its parent and intermediate nodes in below layers |
1746 | */ |
1747 | static int |
1748 | ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, |
1749 | struct ice_sched_node *tc_node, u16 *num_nodes) |
1750 | { |
1751 | struct ice_sched_node *parent = tc_node; |
1752 | u32 first_node_teid; |
1753 | u16 num_added = 0; |
1754 | u8 i, vsil; |
1755 | |
1756 | if (!pi) |
1757 | return -EINVAL; |
1758 | |
1759 | vsil = ice_sched_get_vsi_layer(hw: pi->hw); |
1760 | for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { |
1761 | int status; |
1762 | |
1763 | status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, |
1764 | layer: i, num_nodes: num_nodes[i], |
1765 | first_node_teid: &first_node_teid, |
1766 | num_nodes_added: &num_added); |
1767 | if (status || num_nodes[i] != num_added) |
1768 | return -EIO; |
1769 | |
1770 | /* The newly added node can be a new parent for the next |
1771 | * layer nodes |
1772 | */ |
1773 | if (num_added) |
1774 | parent = ice_sched_find_node_by_teid(start_node: tc_node, |
1775 | teid: first_node_teid); |
1776 | else |
1777 | parent = parent->children[0]; |
1778 | |
1779 | if (!parent) |
1780 | return -EIO; |
1781 | |
1782 | if (i == vsil) |
1783 | parent->vsi_handle = vsi_handle; |
1784 | } |
1785 | |
1786 | return 0; |
1787 | } |
1788 | |
1789 | /** |
1790 | * ice_sched_add_vsi_to_topo - add a new VSI into tree |
1791 | * @pi: port information structure |
1792 | * @vsi_handle: software VSI handle |
1793 | * @tc: TC number |
1794 | * |
1795 | * This function adds a new VSI into scheduler tree |
1796 | */ |
1797 | static int |
1798 | ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) |
1799 | { |
1800 | u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; |
1801 | struct ice_sched_node *tc_node; |
1802 | |
1803 | tc_node = ice_sched_get_tc_node(pi, tc); |
1804 | if (!tc_node) |
1805 | return -EINVAL; |
1806 | |
1807 | /* calculate number of supported nodes needed for this VSI */ |
1808 | ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); |
1809 | |
1810 | /* add VSI supported nodes to TC subtree */ |
1811 | return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, |
1812 | num_nodes); |
1813 | } |
1814 | |
1815 | /** |
1816 | * ice_sched_update_vsi_child_nodes - update VSI child nodes |
1817 | * @pi: port information structure |
1818 | * @vsi_handle: software VSI handle |
1819 | * @tc: TC number |
1820 | * @new_numqs: new number of max queues |
1821 | * @owner: owner of this subtree |
1822 | * |
1823 | * This function updates the VSI child nodes based on the number of queues |
1824 | */ |
1825 | static int |
1826 | ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, |
1827 | u8 tc, u16 new_numqs, u8 owner) |
1828 | { |
1829 | u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; |
1830 | struct ice_sched_node *vsi_node; |
1831 | struct ice_sched_node *tc_node; |
1832 | struct ice_vsi_ctx *vsi_ctx; |
1833 | struct ice_hw *hw = pi->hw; |
1834 | u16 prev_numqs; |
1835 | int status = 0; |
1836 | |
1837 | tc_node = ice_sched_get_tc_node(pi, tc); |
1838 | if (!tc_node) |
1839 | return -EIO; |
1840 | |
1841 | vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); |
1842 | if (!vsi_node) |
1843 | return -EIO; |
1844 | |
1845 | vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); |
1846 | if (!vsi_ctx) |
1847 | return -EINVAL; |
1848 | |
1849 | if (owner == ICE_SCHED_NODE_OWNER_LAN) |
1850 | prev_numqs = vsi_ctx->sched.max_lanq[tc]; |
1851 | else |
1852 | prev_numqs = vsi_ctx->sched.max_rdmaq[tc]; |
1853 | /* num queues are not changed or less than the previous number */ |
1854 | if (new_numqs <= prev_numqs) |
1855 | return status; |
1856 | if (owner == ICE_SCHED_NODE_OWNER_LAN) { |
1857 | status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); |
1858 | if (status) |
1859 | return status; |
1860 | } else { |
1861 | status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs); |
1862 | if (status) |
1863 | return status; |
1864 | } |
1865 | |
1866 | if (new_numqs) |
1867 | ice_sched_calc_vsi_child_nodes(hw, num_qs: new_numqs, num_nodes: new_num_nodes); |
1868 | /* Keep the max number of queue configuration all the time. Update the |
1869 | * tree only if number of queues > previous number of queues. This may |
1870 | * leave some extra nodes in the tree if number of queues < previous |
1871 | * number but that wouldn't harm anything. Removing those extra nodes |
1872 | * may complicate the code if those nodes are part of SRL or |
1873 | * individually rate limited. |
1874 | */ |
1875 | status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, |
1876 | num_nodes: new_num_nodes, owner); |
1877 | if (status) |
1878 | return status; |
1879 | if (owner == ICE_SCHED_NODE_OWNER_LAN) |
1880 | vsi_ctx->sched.max_lanq[tc] = new_numqs; |
1881 | else |
1882 | vsi_ctx->sched.max_rdmaq[tc] = new_numqs; |
1883 | |
1884 | return 0; |
1885 | } |
1886 | |
1887 | /** |
1888 | * ice_sched_cfg_vsi - configure the new/existing VSI |
1889 | * @pi: port information structure |
1890 | * @vsi_handle: software VSI handle |
1891 | * @tc: TC number |
1892 | * @maxqs: max number of queues |
1893 | * @owner: LAN or RDMA |
1894 | * @enable: TC enabled or disabled |
1895 | * |
1896 | * This function adds/updates VSI nodes based on the number of queues. If TC is |
1897 | * enabled and VSI is in suspended state then resume the VSI back. If TC is |
1898 | * disabled then suspend the VSI if it is not already. |
1899 | */ |
1900 | int |
1901 | ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, |
1902 | u8 owner, bool enable) |
1903 | { |
1904 | struct ice_sched_node *vsi_node, *tc_node; |
1905 | struct ice_vsi_ctx *vsi_ctx; |
1906 | struct ice_hw *hw = pi->hw; |
1907 | int status = 0; |
1908 | |
1909 | ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n" , vsi_handle); |
1910 | tc_node = ice_sched_get_tc_node(pi, tc); |
1911 | if (!tc_node) |
1912 | return -EINVAL; |
1913 | vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); |
1914 | if (!vsi_ctx) |
1915 | return -EINVAL; |
1916 | vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); |
1917 | |
1918 | /* suspend the VSI if TC is not enabled */ |
1919 | if (!enable) { |
1920 | if (vsi_node && vsi_node->in_use) { |
1921 | u32 teid = le32_to_cpu(vsi_node->info.node_teid); |
1922 | |
1923 | status = ice_sched_suspend_resume_elems(hw, num_nodes: 1, node_teids: &teid, |
1924 | suspend: true); |
1925 | if (!status) |
1926 | vsi_node->in_use = false; |
1927 | } |
1928 | return status; |
1929 | } |
1930 | |
1931 | /* TC is enabled, if it is a new VSI then add it to the tree */ |
1932 | if (!vsi_node) { |
1933 | status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); |
1934 | if (status) |
1935 | return status; |
1936 | |
1937 | vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); |
1938 | if (!vsi_node) |
1939 | return -EIO; |
1940 | |
1941 | vsi_ctx->sched.vsi_node[tc] = vsi_node; |
1942 | vsi_node->in_use = true; |
1943 | /* invalidate the max queues whenever VSI gets added first time |
1944 | * into the scheduler tree (boot or after reset). We need to |
1945 | * recreate the child nodes all the time in these cases. |
1946 | */ |
1947 | vsi_ctx->sched.max_lanq[tc] = 0; |
1948 | vsi_ctx->sched.max_rdmaq[tc] = 0; |
1949 | } |
1950 | |
1951 | /* update the VSI child nodes */ |
1952 | status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, new_numqs: maxqs, |
1953 | owner); |
1954 | if (status) |
1955 | return status; |
1956 | |
1957 | /* TC is enabled, resume the VSI if it is in the suspend state */ |
1958 | if (!vsi_node->in_use) { |
1959 | u32 teid = le32_to_cpu(vsi_node->info.node_teid); |
1960 | |
1961 | status = ice_sched_suspend_resume_elems(hw, num_nodes: 1, node_teids: &teid, suspend: false); |
1962 | if (!status) |
1963 | vsi_node->in_use = true; |
1964 | } |
1965 | |
1966 | return status; |
1967 | } |
1968 | |
1969 | /** |
1970 | * ice_sched_rm_agg_vsi_info - remove aggregator related VSI info entry |
1971 | * @pi: port information structure |
1972 | * @vsi_handle: software VSI handle |
1973 | * |
1974 | * This function removes single aggregator VSI info entry from |
1975 | * aggregator list. |
1976 | */ |
1977 | static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) |
1978 | { |
1979 | struct ice_sched_agg_info *agg_info; |
1980 | struct ice_sched_agg_info *atmp; |
1981 | |
1982 | list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list, |
1983 | list_entry) { |
1984 | struct ice_sched_agg_vsi_info *agg_vsi_info; |
1985 | struct ice_sched_agg_vsi_info *vtmp; |
1986 | |
1987 | list_for_each_entry_safe(agg_vsi_info, vtmp, |
1988 | &agg_info->agg_vsi_list, list_entry) |
1989 | if (agg_vsi_info->vsi_handle == vsi_handle) { |
1990 | list_del(entry: &agg_vsi_info->list_entry); |
1991 | devm_kfree(dev: ice_hw_to_dev(hw: pi->hw), |
1992 | p: agg_vsi_info); |
1993 | return; |
1994 | } |
1995 | } |
1996 | } |
1997 | |
1998 | /** |
1999 | * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree |
2000 | * @node: pointer to the sub-tree node |
2001 | * |
2002 | * This function checks for a leaf node presence in a given sub-tree node. |
2003 | */ |
2004 | static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) |
2005 | { |
2006 | u8 i; |
2007 | |
2008 | for (i = 0; i < node->num_children; i++) |
2009 | if (ice_sched_is_leaf_node_present(node: node->children[i])) |
2010 | return true; |
2011 | /* check for a leaf node */ |
2012 | return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); |
2013 | } |
2014 | |
2015 | /** |
2016 | * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes |
2017 | * @pi: port information structure |
2018 | * @vsi_handle: software VSI handle |
2019 | * @owner: LAN or RDMA |
2020 | * |
2021 | * This function removes the VSI and its LAN or RDMA children nodes from the |
2022 | * scheduler tree. |
2023 | */ |
2024 | static int |
2025 | ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) |
2026 | { |
2027 | struct ice_vsi_ctx *vsi_ctx; |
2028 | int status = -EINVAL; |
2029 | u8 i; |
2030 | |
2031 | ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n" , vsi_handle); |
2032 | if (!ice_is_vsi_valid(hw: pi->hw, vsi_handle)) |
2033 | return status; |
2034 | mutex_lock(&pi->sched_lock); |
2035 | vsi_ctx = ice_get_vsi_ctx(hw: pi->hw, vsi_handle); |
2036 | if (!vsi_ctx) |
2037 | goto exit_sched_rm_vsi_cfg; |
2038 | |
2039 | ice_for_each_traffic_class(i) { |
2040 | struct ice_sched_node *vsi_node, *tc_node; |
2041 | u8 j = 0; |
2042 | |
2043 | tc_node = ice_sched_get_tc_node(pi, tc: i); |
2044 | if (!tc_node) |
2045 | continue; |
2046 | |
2047 | vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); |
2048 | if (!vsi_node) |
2049 | continue; |
2050 | |
2051 | if (ice_sched_is_leaf_node_present(node: vsi_node)) { |
2052 | ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n" , i); |
2053 | status = -EBUSY; |
2054 | goto exit_sched_rm_vsi_cfg; |
2055 | } |
2056 | while (j < vsi_node->num_children) { |
2057 | if (vsi_node->children[j]->owner == owner) { |
2058 | ice_free_sched_node(pi, node: vsi_node->children[j]); |
2059 | |
2060 | /* reset the counter again since the num |
2061 | * children will be updated after node removal |
2062 | */ |
2063 | j = 0; |
2064 | } else { |
2065 | j++; |
2066 | } |
2067 | } |
2068 | /* remove the VSI if it has no children */ |
2069 | if (!vsi_node->num_children) { |
2070 | ice_free_sched_node(pi, node: vsi_node); |
2071 | vsi_ctx->sched.vsi_node[i] = NULL; |
2072 | |
2073 | /* clean up aggregator related VSI info if any */ |
2074 | ice_sched_rm_agg_vsi_info(pi, vsi_handle); |
2075 | } |
2076 | if (owner == ICE_SCHED_NODE_OWNER_LAN) |
2077 | vsi_ctx->sched.max_lanq[i] = 0; |
2078 | else |
2079 | vsi_ctx->sched.max_rdmaq[i] = 0; |
2080 | } |
2081 | status = 0; |
2082 | |
2083 | exit_sched_rm_vsi_cfg: |
2084 | mutex_unlock(lock: &pi->sched_lock); |
2085 | return status; |
2086 | } |
2087 | |
2088 | /** |
2089 | * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes |
2090 | * @pi: port information structure |
2091 | * @vsi_handle: software VSI handle |
2092 | * |
2093 | * This function clears the VSI and its LAN children nodes from scheduler tree |
2094 | * for all TCs. |
2095 | */ |
2096 | int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) |
2097 | { |
2098 | return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); |
2099 | } |
2100 | |
2101 | /** |
2102 | * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes |
2103 | * @pi: port information structure |
2104 | * @vsi_handle: software VSI handle |
2105 | * |
2106 | * This function clears the VSI and its RDMA children nodes from scheduler tree |
2107 | * for all TCs. |
2108 | */ |
2109 | int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle) |
2110 | { |
2111 | return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA); |
2112 | } |
2113 | |
2114 | /** |
2115 | * ice_get_agg_info - get the aggregator ID |
2116 | * @hw: pointer to the hardware structure |
2117 | * @agg_id: aggregator ID |
2118 | * |
2119 | * This function validates aggregator ID. The function returns info if |
2120 | * aggregator ID is present in list otherwise it returns null. |
2121 | */ |
2122 | static struct ice_sched_agg_info * |
2123 | ice_get_agg_info(struct ice_hw *hw, u32 agg_id) |
2124 | { |
2125 | struct ice_sched_agg_info *agg_info; |
2126 | |
2127 | list_for_each_entry(agg_info, &hw->agg_list, list_entry) |
2128 | if (agg_info->agg_id == agg_id) |
2129 | return agg_info; |
2130 | |
2131 | return NULL; |
2132 | } |
2133 | |
2134 | /** |
2135 | * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree |
2136 | * @hw: pointer to the HW struct |
2137 | * @node: pointer to a child node |
2138 | * @num_nodes: num nodes count array |
2139 | * |
2140 | * This function walks through the aggregator subtree to find a free parent |
2141 | * node |
2142 | */ |
2143 | struct ice_sched_node * |
2144 | ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, |
2145 | u16 *num_nodes) |
2146 | { |
2147 | u8 l = node->tx_sched_layer; |
2148 | u8 vsil, i; |
2149 | |
2150 | vsil = ice_sched_get_vsi_layer(hw); |
2151 | |
2152 | /* Is it VSI parent layer ? */ |
2153 | if (l == vsil - 1) |
2154 | return (node->num_children < hw->max_children[l]) ? node : NULL; |
2155 | |
2156 | /* We have intermediate nodes. Let's walk through the subtree. If the |
2157 | * intermediate node has space to add a new node then clear the count |
2158 | */ |
2159 | if (node->num_children < hw->max_children[l]) |
2160 | num_nodes[l] = 0; |
2161 | /* The below recursive call is intentional and wouldn't go more than |
2162 | * 2 or 3 iterations. |
2163 | */ |
2164 | |
2165 | for (i = 0; i < node->num_children; i++) { |
2166 | struct ice_sched_node *parent; |
2167 | |
2168 | parent = ice_sched_get_free_vsi_parent(hw, node: node->children[i], |
2169 | num_nodes); |
2170 | if (parent) |
2171 | return parent; |
2172 | } |
2173 | |
2174 | return NULL; |
2175 | } |
2176 | |
2177 | /** |
2178 | * ice_sched_update_parent - update the new parent in SW DB |
2179 | * @new_parent: pointer to a new parent node |
2180 | * @node: pointer to a child node |
2181 | * |
2182 | * This function removes the child from the old parent and adds it to a new |
2183 | * parent |
2184 | */ |
2185 | void |
2186 | ice_sched_update_parent(struct ice_sched_node *new_parent, |
2187 | struct ice_sched_node *node) |
2188 | { |
2189 | struct ice_sched_node *old_parent; |
2190 | u8 i, j; |
2191 | |
2192 | old_parent = node->parent; |
2193 | |
2194 | /* update the old parent children */ |
2195 | for (i = 0; i < old_parent->num_children; i++) |
2196 | if (old_parent->children[i] == node) { |
2197 | for (j = i + 1; j < old_parent->num_children; j++) |
2198 | old_parent->children[j - 1] = |
2199 | old_parent->children[j]; |
2200 | old_parent->num_children--; |
2201 | break; |
2202 | } |
2203 | |
2204 | /* now move the node to a new parent */ |
2205 | new_parent->children[new_parent->num_children++] = node; |
2206 | node->parent = new_parent; |
2207 | node->info.parent_teid = new_parent->info.node_teid; |
2208 | } |
2209 | |
2210 | /** |
2211 | * ice_sched_move_nodes - move child nodes to a given parent |
2212 | * @pi: port information structure |
2213 | * @parent: pointer to parent node |
2214 | * @num_items: number of child nodes to be moved |
2215 | * @list: pointer to child node teids |
2216 | * |
2217 | * This function move the child nodes to a given parent. |
2218 | */ |
2219 | int |
2220 | ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, |
2221 | u16 num_items, u32 *list) |
2222 | { |
2223 | DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1); |
2224 | u16 buf_len = __struct_size(buf); |
2225 | struct ice_sched_node *node; |
2226 | u16 i, grps_movd = 0; |
2227 | struct ice_hw *hw; |
2228 | int status = 0; |
2229 | |
2230 | hw = pi->hw; |
2231 | |
2232 | if (!parent || !num_items) |
2233 | return -EINVAL; |
2234 | |
2235 | /* Does parent have enough space */ |
2236 | if (parent->num_children + num_items > |
2237 | hw->max_children[parent->tx_sched_layer]) |
2238 | return -ENOSPC; |
2239 | |
2240 | for (i = 0; i < num_items; i++) { |
2241 | node = ice_sched_find_node_by_teid(start_node: pi->root, teid: list[i]); |
2242 | if (!node) { |
2243 | status = -EINVAL; |
2244 | break; |
2245 | } |
2246 | |
2247 | buf->hdr.src_parent_teid = node->info.parent_teid; |
2248 | buf->hdr.dest_parent_teid = parent->info.node_teid; |
2249 | buf->teid[0] = node->info.node_teid; |
2250 | buf->hdr.num_elems = cpu_to_le16(1); |
2251 | status = ice_aq_move_sched_elems(hw, buf, buf_size: buf_len, grps_movd: &grps_movd); |
2252 | if (status && grps_movd != 1) { |
2253 | status = -EIO; |
2254 | break; |
2255 | } |
2256 | |
2257 | /* update the SW DB */ |
2258 | ice_sched_update_parent(new_parent: parent, node); |
2259 | } |
2260 | |
2261 | return status; |
2262 | } |
2263 | |
2264 | /** |
2265 | * ice_sched_move_vsi_to_agg - move VSI to aggregator node |
2266 | * @pi: port information structure |
2267 | * @vsi_handle: software VSI handle |
2268 | * @agg_id: aggregator ID |
2269 | * @tc: TC number |
2270 | * |
2271 | * This function moves a VSI to an aggregator node or its subtree. |
2272 | * Intermediate nodes may be created if required. |
2273 | */ |
2274 | static int |
2275 | ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, |
2276 | u8 tc) |
2277 | { |
2278 | struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; |
2279 | u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; |
2280 | u32 first_node_teid, vsi_teid; |
2281 | u16 num_nodes_added; |
2282 | u8 aggl, vsil, i; |
2283 | int status; |
2284 | |
2285 | tc_node = ice_sched_get_tc_node(pi, tc); |
2286 | if (!tc_node) |
2287 | return -EIO; |
2288 | |
2289 | agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); |
2290 | if (!agg_node) |
2291 | return -ENOENT; |
2292 | |
2293 | vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); |
2294 | if (!vsi_node) |
2295 | return -ENOENT; |
2296 | |
2297 | /* Is this VSI already part of given aggregator? */ |
2298 | if (ice_sched_find_node_in_subtree(hw: pi->hw, base: agg_node, node: vsi_node)) |
2299 | return 0; |
2300 | |
2301 | aggl = ice_sched_get_agg_layer(hw: pi->hw); |
2302 | vsil = ice_sched_get_vsi_layer(hw: pi->hw); |
2303 | |
2304 | /* set intermediate node count to 1 between aggregator and VSI layers */ |
2305 | for (i = aggl + 1; i < vsil; i++) |
2306 | num_nodes[i] = 1; |
2307 | |
2308 | /* Check if the aggregator subtree has any free node to add the VSI */ |
2309 | for (i = 0; i < agg_node->num_children; i++) { |
2310 | parent = ice_sched_get_free_vsi_parent(hw: pi->hw, |
2311 | node: agg_node->children[i], |
2312 | num_nodes); |
2313 | if (parent) |
2314 | goto move_nodes; |
2315 | } |
2316 | |
2317 | /* add new nodes */ |
2318 | parent = agg_node; |
2319 | for (i = aggl + 1; i < vsil; i++) { |
2320 | status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, layer: i, |
2321 | num_nodes: num_nodes[i], |
2322 | first_node_teid: &first_node_teid, |
2323 | num_nodes_added: &num_nodes_added); |
2324 | if (status || num_nodes[i] != num_nodes_added) |
2325 | return -EIO; |
2326 | |
2327 | /* The newly added node can be a new parent for the next |
2328 | * layer nodes |
2329 | */ |
2330 | if (num_nodes_added) |
2331 | parent = ice_sched_find_node_by_teid(start_node: tc_node, |
2332 | teid: first_node_teid); |
2333 | else |
2334 | parent = parent->children[0]; |
2335 | |
2336 | if (!parent) |
2337 | return -EIO; |
2338 | } |
2339 | |
2340 | move_nodes: |
2341 | vsi_teid = le32_to_cpu(vsi_node->info.node_teid); |
2342 | return ice_sched_move_nodes(pi, parent, num_items: 1, list: &vsi_teid); |
2343 | } |
2344 | |
2345 | /** |
2346 | * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator |
2347 | * @pi: port information structure |
2348 | * @agg_info: aggregator info |
2349 | * @tc: traffic class number |
2350 | * @rm_vsi_info: true or false |
2351 | * |
2352 | * This function move all the VSI(s) to the default aggregator and delete |
2353 | * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The |
2354 | * caller holds the scheduler lock. |
2355 | */ |
2356 | static int |
2357 | ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, |
2358 | struct ice_sched_agg_info *agg_info, u8 tc, |
2359 | bool rm_vsi_info) |
2360 | { |
2361 | struct ice_sched_agg_vsi_info *agg_vsi_info; |
2362 | struct ice_sched_agg_vsi_info *tmp; |
2363 | int status = 0; |
2364 | |
2365 | list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list, |
2366 | list_entry) { |
2367 | u16 vsi_handle = agg_vsi_info->vsi_handle; |
2368 | |
2369 | /* Move VSI to default aggregator */ |
2370 | if (!ice_is_tc_ena(bitmap: agg_vsi_info->tc_bitmap[0], tc)) |
2371 | continue; |
2372 | |
2373 | status = ice_sched_move_vsi_to_agg(pi, vsi_handle, |
2374 | ICE_DFLT_AGG_ID, tc); |
2375 | if (status) |
2376 | break; |
2377 | |
2378 | clear_bit(nr: tc, addr: agg_vsi_info->tc_bitmap); |
2379 | if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) { |
2380 | list_del(entry: &agg_vsi_info->list_entry); |
2381 | devm_kfree(dev: ice_hw_to_dev(hw: pi->hw), p: agg_vsi_info); |
2382 | } |
2383 | } |
2384 | |
2385 | return status; |
2386 | } |
2387 | |
2388 | /** |
2389 | * ice_sched_is_agg_inuse - check whether the aggregator is in use or not |
2390 | * @pi: port information structure |
2391 | * @node: node pointer |
2392 | * |
2393 | * This function checks whether the aggregator is attached with any VSI or not. |
2394 | */ |
2395 | static bool |
2396 | ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) |
2397 | { |
2398 | u8 vsil, i; |
2399 | |
2400 | vsil = ice_sched_get_vsi_layer(hw: pi->hw); |
2401 | if (node->tx_sched_layer < vsil - 1) { |
2402 | for (i = 0; i < node->num_children; i++) |
2403 | if (ice_sched_is_agg_inuse(pi, node: node->children[i])) |
2404 | return true; |
2405 | return false; |
2406 | } else { |
2407 | return node->num_children ? true : false; |
2408 | } |
2409 | } |
2410 | |
2411 | /** |
2412 | * ice_sched_rm_agg_cfg - remove the aggregator node |
2413 | * @pi: port information structure |
2414 | * @agg_id: aggregator ID |
2415 | * @tc: TC number |
2416 | * |
2417 | * This function removes the aggregator node and intermediate nodes if any |
2418 | * from the given TC |
2419 | */ |
2420 | static int |
2421 | ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) |
2422 | { |
2423 | struct ice_sched_node *tc_node, *agg_node; |
2424 | struct ice_hw *hw = pi->hw; |
2425 | |
2426 | tc_node = ice_sched_get_tc_node(pi, tc); |
2427 | if (!tc_node) |
2428 | return -EIO; |
2429 | |
2430 | agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); |
2431 | if (!agg_node) |
2432 | return -ENOENT; |
2433 | |
2434 | /* Can't remove the aggregator node if it has children */ |
2435 | if (ice_sched_is_agg_inuse(pi, node: agg_node)) |
2436 | return -EBUSY; |
2437 | |
2438 | /* need to remove the whole subtree if aggregator node is the |
2439 | * only child. |
2440 | */ |
2441 | while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) { |
2442 | struct ice_sched_node *parent = agg_node->parent; |
2443 | |
2444 | if (!parent) |
2445 | return -EIO; |
2446 | |
2447 | if (parent->num_children > 1) |
2448 | break; |
2449 | |
2450 | agg_node = parent; |
2451 | } |
2452 | |
2453 | ice_free_sched_node(pi, node: agg_node); |
2454 | return 0; |
2455 | } |
2456 | |
2457 | /** |
2458 | * ice_rm_agg_cfg_tc - remove aggregator configuration for TC |
2459 | * @pi: port information structure |
2460 | * @agg_info: aggregator ID |
2461 | * @tc: TC number |
2462 | * @rm_vsi_info: bool value true or false |
2463 | * |
2464 | * This function removes aggregator reference to VSI of given TC. It removes |
2465 | * the aggregator configuration completely for requested TC. The caller needs |
2466 | * to hold the scheduler lock. |
2467 | */ |
2468 | static int |
2469 | ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, |
2470 | u8 tc, bool rm_vsi_info) |
2471 | { |
2472 | int status = 0; |
2473 | |
2474 | /* If nothing to remove - return success */ |
2475 | if (!ice_is_tc_ena(bitmap: agg_info->tc_bitmap[0], tc)) |
2476 | goto exit_rm_agg_cfg_tc; |
2477 | |
2478 | status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info); |
2479 | if (status) |
2480 | goto exit_rm_agg_cfg_tc; |
2481 | |
2482 | /* Delete aggregator node(s) */ |
2483 | status = ice_sched_rm_agg_cfg(pi, agg_id: agg_info->agg_id, tc); |
2484 | if (status) |
2485 | goto exit_rm_agg_cfg_tc; |
2486 | |
2487 | clear_bit(nr: tc, addr: agg_info->tc_bitmap); |
2488 | exit_rm_agg_cfg_tc: |
2489 | return status; |
2490 | } |
2491 | |
2492 | /** |
2493 | * ice_save_agg_tc_bitmap - save aggregator TC bitmap |
2494 | * @pi: port information structure |
2495 | * @agg_id: aggregator ID |
2496 | * @tc_bitmap: 8 bits TC bitmap |
2497 | * |
2498 | * Save aggregator TC bitmap. This function needs to be called with scheduler |
2499 | * lock held. |
2500 | */ |
2501 | static int |
2502 | ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, |
2503 | unsigned long *tc_bitmap) |
2504 | { |
2505 | struct ice_sched_agg_info *agg_info; |
2506 | |
2507 | agg_info = ice_get_agg_info(hw: pi->hw, agg_id); |
2508 | if (!agg_info) |
2509 | return -EINVAL; |
2510 | bitmap_copy(dst: agg_info->replay_tc_bitmap, src: tc_bitmap, |
2511 | ICE_MAX_TRAFFIC_CLASS); |
2512 | return 0; |
2513 | } |
2514 | |
2515 | /** |
2516 | * ice_sched_add_agg_cfg - create an aggregator node |
2517 | * @pi: port information structure |
2518 | * @agg_id: aggregator ID |
2519 | * @tc: TC number |
2520 | * |
2521 | * This function creates an aggregator node and intermediate nodes if required |
2522 | * for the given TC |
2523 | */ |
2524 | static int |
2525 | ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) |
2526 | { |
2527 | struct ice_sched_node *parent, *agg_node, *tc_node; |
2528 | u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; |
2529 | struct ice_hw *hw = pi->hw; |
2530 | u32 first_node_teid; |
2531 | u16 num_nodes_added; |
2532 | int status = 0; |
2533 | u8 i, aggl; |
2534 | |
2535 | tc_node = ice_sched_get_tc_node(pi, tc); |
2536 | if (!tc_node) |
2537 | return -EIO; |
2538 | |
2539 | agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); |
2540 | /* Does Agg node already exist ? */ |
2541 | if (agg_node) |
2542 | return status; |
2543 | |
2544 | aggl = ice_sched_get_agg_layer(hw); |
2545 | |
2546 | /* need one node in Agg layer */ |
2547 | num_nodes[aggl] = 1; |
2548 | |
2549 | /* Check whether the intermediate nodes have space to add the |
2550 | * new aggregator. If they are full, then SW needs to allocate a new |
2551 | * intermediate node on those layers |
2552 | */ |
2553 | for (i = hw->sw_entry_point_layer; i < aggl; i++) { |
2554 | parent = ice_sched_get_first_node(pi, parent: tc_node, layer: i); |
2555 | |
2556 | /* scan all the siblings */ |
2557 | while (parent) { |
2558 | if (parent->num_children < hw->max_children[i]) |
2559 | break; |
2560 | parent = parent->sibling; |
2561 | } |
2562 | |
2563 | /* all the nodes are full, reserve one for this layer */ |
2564 | if (!parent) |
2565 | num_nodes[i]++; |
2566 | } |
2567 | |
2568 | /* add the aggregator node */ |
2569 | parent = tc_node; |
2570 | for (i = hw->sw_entry_point_layer; i <= aggl; i++) { |
2571 | if (!parent) |
2572 | return -EIO; |
2573 | |
2574 | status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, layer: i, |
2575 | num_nodes: num_nodes[i], |
2576 | first_node_teid: &first_node_teid, |
2577 | num_nodes_added: &num_nodes_added); |
2578 | if (status || num_nodes[i] != num_nodes_added) |
2579 | return -EIO; |
2580 | |
2581 | /* The newly added node can be a new parent for the next |
2582 | * layer nodes |
2583 | */ |
2584 | if (num_nodes_added) { |
2585 | parent = ice_sched_find_node_by_teid(start_node: tc_node, |
2586 | teid: first_node_teid); |
2587 | /* register aggregator ID with the aggregator node */ |
2588 | if (parent && i == aggl) |
2589 | parent->agg_id = agg_id; |
2590 | } else { |
2591 | parent = parent->children[0]; |
2592 | } |
2593 | } |
2594 | |
2595 | return 0; |
2596 | } |
2597 | |
2598 | /** |
2599 | * ice_sched_cfg_agg - configure aggregator node |
2600 | * @pi: port information structure |
2601 | * @agg_id: aggregator ID |
2602 | * @agg_type: aggregator type queue, VSI, or aggregator group |
2603 | * @tc_bitmap: bits TC bitmap |
2604 | * |
2605 | * It registers a unique aggregator node into scheduler services. It |
2606 | * allows a user to register with a unique ID to track it's resources. |
2607 | * The aggregator type determines if this is a queue group, VSI group |
2608 | * or aggregator group. It then creates the aggregator node(s) for requested |
2609 | * TC(s) or removes an existing aggregator node including its configuration |
2610 | * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator |
2611 | * resources and remove aggregator ID. |
2612 | * This function needs to be called with scheduler lock held. |
2613 | */ |
2614 | static int |
2615 | ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, |
2616 | enum ice_agg_type agg_type, unsigned long *tc_bitmap) |
2617 | { |
2618 | struct ice_sched_agg_info *agg_info; |
2619 | struct ice_hw *hw = pi->hw; |
2620 | int status = 0; |
2621 | u8 tc; |
2622 | |
2623 | agg_info = ice_get_agg_info(hw, agg_id); |
2624 | if (!agg_info) { |
2625 | /* Create new entry for new aggregator ID */ |
2626 | agg_info = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*agg_info), |
2627 | GFP_KERNEL); |
2628 | if (!agg_info) |
2629 | return -ENOMEM; |
2630 | |
2631 | agg_info->agg_id = agg_id; |
2632 | agg_info->agg_type = agg_type; |
2633 | agg_info->tc_bitmap[0] = 0; |
2634 | |
2635 | /* Initialize the aggregator VSI list head */ |
2636 | INIT_LIST_HEAD(list: &agg_info->agg_vsi_list); |
2637 | |
2638 | /* Add new entry in aggregator list */ |
2639 | list_add(new: &agg_info->list_entry, head: &hw->agg_list); |
2640 | } |
2641 | /* Create aggregator node(s) for requested TC(s) */ |
2642 | ice_for_each_traffic_class(tc) { |
2643 | if (!ice_is_tc_ena(bitmap: *tc_bitmap, tc)) { |
2644 | /* Delete aggregator cfg TC if it exists previously */ |
2645 | status = ice_rm_agg_cfg_tc(pi, agg_info, tc, rm_vsi_info: false); |
2646 | if (status) |
2647 | break; |
2648 | continue; |
2649 | } |
2650 | |
2651 | /* Check if aggregator node for TC already exists */ |
2652 | if (ice_is_tc_ena(bitmap: agg_info->tc_bitmap[0], tc)) |
2653 | continue; |
2654 | |
2655 | /* Create new aggregator node for TC */ |
2656 | status = ice_sched_add_agg_cfg(pi, agg_id, tc); |
2657 | if (status) |
2658 | break; |
2659 | |
2660 | /* Save aggregator node's TC information */ |
2661 | set_bit(nr: tc, addr: agg_info->tc_bitmap); |
2662 | } |
2663 | |
2664 | return status; |
2665 | } |
2666 | |
2667 | /** |
2668 | * ice_cfg_agg - config aggregator node |
2669 | * @pi: port information structure |
2670 | * @agg_id: aggregator ID |
2671 | * @agg_type: aggregator type queue, VSI, or aggregator group |
2672 | * @tc_bitmap: bits TC bitmap |
2673 | * |
2674 | * This function configures aggregator node(s). |
2675 | */ |
2676 | int |
2677 | ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, |
2678 | u8 tc_bitmap) |
2679 | { |
2680 | unsigned long bitmap = tc_bitmap; |
2681 | int status; |
2682 | |
2683 | mutex_lock(&pi->sched_lock); |
2684 | status = ice_sched_cfg_agg(pi, agg_id, agg_type, tc_bitmap: &bitmap); |
2685 | if (!status) |
2686 | status = ice_save_agg_tc_bitmap(pi, agg_id, tc_bitmap: &bitmap); |
2687 | mutex_unlock(lock: &pi->sched_lock); |
2688 | return status; |
2689 | } |
2690 | |
2691 | /** |
2692 | * ice_get_agg_vsi_info - get the aggregator ID |
2693 | * @agg_info: aggregator info |
2694 | * @vsi_handle: software VSI handle |
2695 | * |
2696 | * The function returns aggregator VSI info based on VSI handle. This function |
2697 | * needs to be called with scheduler lock held. |
2698 | */ |
2699 | static struct ice_sched_agg_vsi_info * |
2700 | ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle) |
2701 | { |
2702 | struct ice_sched_agg_vsi_info *agg_vsi_info; |
2703 | |
2704 | list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry) |
2705 | if (agg_vsi_info->vsi_handle == vsi_handle) |
2706 | return agg_vsi_info; |
2707 | |
2708 | return NULL; |
2709 | } |
2710 | |
2711 | /** |
2712 | * ice_get_vsi_agg_info - get the aggregator info of VSI |
2713 | * @hw: pointer to the hardware structure |
2714 | * @vsi_handle: Sw VSI handle |
2715 | * |
2716 | * The function returns aggregator info of VSI represented via vsi_handle. The |
2717 | * VSI has in this case a different aggregator than the default one. This |
2718 | * function needs to be called with scheduler lock held. |
2719 | */ |
2720 | static struct ice_sched_agg_info * |
2721 | ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) |
2722 | { |
2723 | struct ice_sched_agg_info *agg_info; |
2724 | |
2725 | list_for_each_entry(agg_info, &hw->agg_list, list_entry) { |
2726 | struct ice_sched_agg_vsi_info *agg_vsi_info; |
2727 | |
2728 | agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); |
2729 | if (agg_vsi_info) |
2730 | return agg_info; |
2731 | } |
2732 | return NULL; |
2733 | } |
2734 | |
2735 | /** |
2736 | * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap |
2737 | * @pi: port information structure |
2738 | * @agg_id: aggregator ID |
2739 | * @vsi_handle: software VSI handle |
2740 | * @tc_bitmap: TC bitmap of enabled TC(s) |
2741 | * |
2742 | * Save VSI to aggregator TC bitmap. This function needs to call with scheduler |
2743 | * lock held. |
2744 | */ |
2745 | static int |
2746 | ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, |
2747 | unsigned long *tc_bitmap) |
2748 | { |
2749 | struct ice_sched_agg_vsi_info *agg_vsi_info; |
2750 | struct ice_sched_agg_info *agg_info; |
2751 | |
2752 | agg_info = ice_get_agg_info(hw: pi->hw, agg_id); |
2753 | if (!agg_info) |
2754 | return -EINVAL; |
2755 | /* check if entry already exist */ |
2756 | agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); |
2757 | if (!agg_vsi_info) |
2758 | return -EINVAL; |
2759 | bitmap_copy(dst: agg_vsi_info->replay_tc_bitmap, src: tc_bitmap, |
2760 | ICE_MAX_TRAFFIC_CLASS); |
2761 | return 0; |
2762 | } |
2763 | |
2764 | /** |
2765 | * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator |
2766 | * @pi: port information structure |
2767 | * @agg_id: aggregator ID |
2768 | * @vsi_handle: software VSI handle |
2769 | * @tc_bitmap: TC bitmap of enabled TC(s) |
2770 | * |
2771 | * This function moves VSI to a new or default aggregator node. If VSI is |
2772 | * already associated to the aggregator node then no operation is performed on |
2773 | * the tree. This function needs to be called with scheduler lock held. |
2774 | */ |
2775 | static int |
2776 | ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, |
2777 | u16 vsi_handle, unsigned long *tc_bitmap) |
2778 | { |
2779 | struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL; |
2780 | struct ice_sched_agg_info *agg_info, *old_agg_info; |
2781 | struct ice_hw *hw = pi->hw; |
2782 | int status = 0; |
2783 | u8 tc; |
2784 | |
2785 | if (!ice_is_vsi_valid(hw: pi->hw, vsi_handle)) |
2786 | return -EINVAL; |
2787 | agg_info = ice_get_agg_info(hw, agg_id); |
2788 | if (!agg_info) |
2789 | return -EINVAL; |
2790 | /* If the VSI is already part of another aggregator then update |
2791 | * its VSI info list |
2792 | */ |
2793 | old_agg_info = ice_get_vsi_agg_info(hw, vsi_handle); |
2794 | if (old_agg_info && old_agg_info != agg_info) { |
2795 | struct ice_sched_agg_vsi_info *vtmp; |
2796 | |
2797 | list_for_each_entry_safe(iter, vtmp, |
2798 | &old_agg_info->agg_vsi_list, |
2799 | list_entry) |
2800 | if (iter->vsi_handle == vsi_handle) { |
2801 | old_agg_vsi_info = iter; |
2802 | break; |
2803 | } |
2804 | } |
2805 | |
2806 | /* check if entry already exist */ |
2807 | agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); |
2808 | if (!agg_vsi_info) { |
2809 | /* Create new entry for VSI under aggregator list */ |
2810 | agg_vsi_info = devm_kzalloc(dev: ice_hw_to_dev(hw), |
2811 | size: sizeof(*agg_vsi_info), GFP_KERNEL); |
2812 | if (!agg_vsi_info) |
2813 | return -EINVAL; |
2814 | |
2815 | /* add VSI ID into the aggregator list */ |
2816 | agg_vsi_info->vsi_handle = vsi_handle; |
2817 | list_add(new: &agg_vsi_info->list_entry, head: &agg_info->agg_vsi_list); |
2818 | } |
2819 | /* Move VSI node to new aggregator node for requested TC(s) */ |
2820 | ice_for_each_traffic_class(tc) { |
2821 | if (!ice_is_tc_ena(bitmap: *tc_bitmap, tc)) |
2822 | continue; |
2823 | |
2824 | /* Move VSI to new aggregator */ |
2825 | status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc); |
2826 | if (status) |
2827 | break; |
2828 | |
2829 | set_bit(nr: tc, addr: agg_vsi_info->tc_bitmap); |
2830 | if (old_agg_vsi_info) |
2831 | clear_bit(nr: tc, addr: old_agg_vsi_info->tc_bitmap); |
2832 | } |
2833 | if (old_agg_vsi_info && !old_agg_vsi_info->tc_bitmap[0]) { |
2834 | list_del(entry: &old_agg_vsi_info->list_entry); |
2835 | devm_kfree(dev: ice_hw_to_dev(hw: pi->hw), p: old_agg_vsi_info); |
2836 | } |
2837 | return status; |
2838 | } |
2839 | |
2840 | /** |
2841 | * ice_sched_rm_unused_rl_prof - remove unused RL profile |
2842 | * @pi: port information structure |
2843 | * |
2844 | * This function removes unused rate limit profiles from the HW and |
2845 | * SW DB. The caller needs to hold scheduler lock. |
2846 | */ |
2847 | static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) |
2848 | { |
2849 | u16 ln; |
2850 | |
2851 | for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { |
2852 | struct ice_aqc_rl_profile_info *rl_prof_elem; |
2853 | struct ice_aqc_rl_profile_info *rl_prof_tmp; |
2854 | |
2855 | list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, |
2856 | &pi->rl_prof_list[ln], list_entry) { |
2857 | if (!ice_sched_del_rl_profile(hw: pi->hw, rl_info: rl_prof_elem)) |
2858 | ice_debug(pi->hw, ICE_DBG_SCHED, "Removed rl profile\n" ); |
2859 | } |
2860 | } |
2861 | } |
2862 | |
2863 | /** |
2864 | * ice_sched_update_elem - update element |
2865 | * @hw: pointer to the HW struct |
2866 | * @node: pointer to node |
2867 | * @info: node info to update |
2868 | * |
2869 | * Update the HW DB, and local SW DB of node. Update the scheduling |
2870 | * parameters of node from argument info data buffer (Info->data buf) and |
2871 | * returns success or error on config sched element failure. The caller |
2872 | * needs to hold scheduler lock. |
2873 | */ |
2874 | static int |
2875 | ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, |
2876 | struct ice_aqc_txsched_elem_data *info) |
2877 | { |
2878 | struct ice_aqc_txsched_elem_data buf; |
2879 | u16 elem_cfgd = 0; |
2880 | u16 num_elems = 1; |
2881 | int status; |
2882 | |
2883 | buf = *info; |
2884 | /* Parent TEID is reserved field in this aq call */ |
2885 | buf.parent_teid = 0; |
2886 | /* Element type is reserved field in this aq call */ |
2887 | buf.data.elem_type = 0; |
2888 | /* Flags is reserved field in this aq call */ |
2889 | buf.data.flags = 0; |
2890 | |
2891 | /* Update HW DB */ |
2892 | /* Configure element node */ |
2893 | status = ice_aq_cfg_sched_elems(hw, elems_req: num_elems, buf: &buf, buf_size: sizeof(buf), |
2894 | elems_cfgd: &elem_cfgd, NULL); |
2895 | if (status || elem_cfgd != num_elems) { |
2896 | ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n" ); |
2897 | return -EIO; |
2898 | } |
2899 | |
2900 | /* Config success case */ |
2901 | /* Now update local SW DB */ |
2902 | /* Only copy the data portion of info buffer */ |
2903 | node->info.data = info->data; |
2904 | return status; |
2905 | } |
2906 | |
2907 | /** |
2908 | * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params |
2909 | * @hw: pointer to the HW struct |
2910 | * @node: sched node to configure |
2911 | * @rl_type: rate limit type CIR, EIR, or shared |
2912 | * @bw_alloc: BW weight/allocation |
2913 | * |
2914 | * This function configures node element's BW allocation. |
2915 | */ |
2916 | static int |
2917 | ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, |
2918 | enum ice_rl_type rl_type, u16 bw_alloc) |
2919 | { |
2920 | struct ice_aqc_txsched_elem_data buf; |
2921 | struct ice_aqc_txsched_elem *data; |
2922 | |
2923 | buf = node->info; |
2924 | data = &buf.data; |
2925 | if (rl_type == ICE_MIN_BW) { |
2926 | data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; |
2927 | data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc); |
2928 | } else if (rl_type == ICE_MAX_BW) { |
2929 | data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; |
2930 | data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); |
2931 | } else { |
2932 | return -EINVAL; |
2933 | } |
2934 | |
2935 | /* Configure element */ |
2936 | return ice_sched_update_elem(hw, node, info: &buf); |
2937 | } |
2938 | |
2939 | /** |
2940 | * ice_move_vsi_to_agg - moves VSI to new or default aggregator |
2941 | * @pi: port information structure |
2942 | * @agg_id: aggregator ID |
2943 | * @vsi_handle: software VSI handle |
2944 | * @tc_bitmap: TC bitmap of enabled TC(s) |
2945 | * |
2946 | * Move or associate VSI to a new or default aggregator node. |
2947 | */ |
2948 | int |
2949 | ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, |
2950 | u8 tc_bitmap) |
2951 | { |
2952 | unsigned long bitmap = tc_bitmap; |
2953 | int status; |
2954 | |
2955 | mutex_lock(&pi->sched_lock); |
2956 | status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, |
2957 | tc_bitmap: (unsigned long *)&bitmap); |
2958 | if (!status) |
2959 | status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle, |
2960 | tc_bitmap: (unsigned long *)&bitmap); |
2961 | mutex_unlock(lock: &pi->sched_lock); |
2962 | return status; |
2963 | } |
2964 | |
2965 | /** |
2966 | * ice_set_clear_cir_bw - set or clear CIR BW |
2967 | * @bw_t_info: bandwidth type information structure |
2968 | * @bw: bandwidth in Kbps - Kilo bits per sec |
2969 | * |
2970 | * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. |
2971 | */ |
2972 | static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) |
2973 | { |
2974 | if (bw == ICE_SCHED_DFLT_BW) { |
2975 | clear_bit(nr: ICE_BW_TYPE_CIR, addr: bw_t_info->bw_t_bitmap); |
2976 | bw_t_info->cir_bw.bw = 0; |
2977 | } else { |
2978 | /* Save type of BW information */ |
2979 | set_bit(nr: ICE_BW_TYPE_CIR, addr: bw_t_info->bw_t_bitmap); |
2980 | bw_t_info->cir_bw.bw = bw; |
2981 | } |
2982 | } |
2983 | |
2984 | /** |
2985 | * ice_set_clear_eir_bw - set or clear EIR BW |
2986 | * @bw_t_info: bandwidth type information structure |
2987 | * @bw: bandwidth in Kbps - Kilo bits per sec |
2988 | * |
2989 | * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. |
2990 | */ |
2991 | static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) |
2992 | { |
2993 | if (bw == ICE_SCHED_DFLT_BW) { |
2994 | clear_bit(nr: ICE_BW_TYPE_EIR, addr: bw_t_info->bw_t_bitmap); |
2995 | bw_t_info->eir_bw.bw = 0; |
2996 | } else { |
2997 | /* EIR BW and Shared BW profiles are mutually exclusive and |
2998 | * hence only one of them may be set for any given element. |
2999 | * First clear earlier saved shared BW information. |
3000 | */ |
3001 | clear_bit(nr: ICE_BW_TYPE_SHARED, addr: bw_t_info->bw_t_bitmap); |
3002 | bw_t_info->shared_bw = 0; |
3003 | /* save EIR BW information */ |
3004 | set_bit(nr: ICE_BW_TYPE_EIR, addr: bw_t_info->bw_t_bitmap); |
3005 | bw_t_info->eir_bw.bw = bw; |
3006 | } |
3007 | } |
3008 | |
3009 | /** |
3010 | * ice_set_clear_shared_bw - set or clear shared BW |
3011 | * @bw_t_info: bandwidth type information structure |
3012 | * @bw: bandwidth in Kbps - Kilo bits per sec |
3013 | * |
3014 | * Save or clear shared bandwidth (BW) in the passed param bw_t_info. |
3015 | */ |
3016 | static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) |
3017 | { |
3018 | if (bw == ICE_SCHED_DFLT_BW) { |
3019 | clear_bit(nr: ICE_BW_TYPE_SHARED, addr: bw_t_info->bw_t_bitmap); |
3020 | bw_t_info->shared_bw = 0; |
3021 | } else { |
3022 | /* EIR BW and Shared BW profiles are mutually exclusive and |
3023 | * hence only one of them may be set for any given element. |
3024 | * First clear earlier saved EIR BW information. |
3025 | */ |
3026 | clear_bit(nr: ICE_BW_TYPE_EIR, addr: bw_t_info->bw_t_bitmap); |
3027 | bw_t_info->eir_bw.bw = 0; |
3028 | /* save shared BW information */ |
3029 | set_bit(nr: ICE_BW_TYPE_SHARED, addr: bw_t_info->bw_t_bitmap); |
3030 | bw_t_info->shared_bw = bw; |
3031 | } |
3032 | } |
3033 | |
3034 | /** |
3035 | * ice_sched_save_vsi_bw - save VSI node's BW information |
3036 | * @pi: port information structure |
3037 | * @vsi_handle: sw VSI handle |
3038 | * @tc: traffic class |
3039 | * @rl_type: rate limit type min, max, or shared |
3040 | * @bw: bandwidth in Kbps - Kilo bits per sec |
3041 | * |
3042 | * Save BW information of VSI type node for post replay use. |
3043 | */ |
3044 | static int |
3045 | ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
3046 | enum ice_rl_type rl_type, u32 bw) |
3047 | { |
3048 | struct ice_vsi_ctx *vsi_ctx; |
3049 | |
3050 | if (!ice_is_vsi_valid(hw: pi->hw, vsi_handle)) |
3051 | return -EINVAL; |
3052 | vsi_ctx = ice_get_vsi_ctx(hw: pi->hw, vsi_handle); |
3053 | if (!vsi_ctx) |
3054 | return -EINVAL; |
3055 | switch (rl_type) { |
3056 | case ICE_MIN_BW: |
3057 | ice_set_clear_cir_bw(bw_t_info: &vsi_ctx->sched.bw_t_info[tc], bw); |
3058 | break; |
3059 | case ICE_MAX_BW: |
3060 | ice_set_clear_eir_bw(bw_t_info: &vsi_ctx->sched.bw_t_info[tc], bw); |
3061 | break; |
3062 | case ICE_SHARED_BW: |
3063 | ice_set_clear_shared_bw(bw_t_info: &vsi_ctx->sched.bw_t_info[tc], bw); |
3064 | break; |
3065 | default: |
3066 | return -EINVAL; |
3067 | } |
3068 | return 0; |
3069 | } |
3070 | |
3071 | /** |
3072 | * ice_sched_calc_wakeup - calculate RL profile wakeup parameter |
3073 | * @hw: pointer to the HW struct |
3074 | * @bw: bandwidth in Kbps |
3075 | * |
3076 | * This function calculates the wakeup parameter of RL profile. |
3077 | */ |
3078 | static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) |
3079 | { |
3080 | s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; |
3081 | s32 wakeup_f_int; |
3082 | u16 wakeup = 0; |
3083 | |
3084 | /* Get the wakeup integer value */ |
3085 | bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); |
3086 | wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec); |
3087 | if (wakeup_int > 63) { |
3088 | wakeup = (u16)((1 << 15) | wakeup_int); |
3089 | } else { |
3090 | /* Calculate fraction value up to 4 decimals |
3091 | * Convert Integer value to a constant multiplier |
3092 | */ |
3093 | wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; |
3094 | wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER * |
3095 | hw->psm_clk_freq, bytes_per_sec); |
3096 | |
3097 | /* Get Fraction value */ |
3098 | wakeup_f = wakeup_a - wakeup_b; |
3099 | |
3100 | /* Round up the Fractional value via Ceil(Fractional value) */ |
3101 | if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2)) |
3102 | wakeup_f += 1; |
3103 | |
3104 | wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION, |
3105 | ICE_RL_PROF_MULTIPLIER); |
3106 | wakeup |= (u16)(wakeup_int << 9); |
3107 | wakeup |= (u16)(0x1ff & wakeup_f_int); |
3108 | } |
3109 | |
3110 | return wakeup; |
3111 | } |
3112 | |
3113 | /** |
3114 | * ice_sched_bw_to_rl_profile - convert BW to profile parameters |
3115 | * @hw: pointer to the HW struct |
3116 | * @bw: bandwidth in Kbps |
3117 | * @profile: profile parameters to return |
3118 | * |
3119 | * This function converts the BW to profile structure format. |
3120 | */ |
3121 | static int |
3122 | ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, |
3123 | struct ice_aqc_rl_profile_elem *profile) |
3124 | { |
3125 | s64 bytes_per_sec, ts_rate, mv_tmp; |
3126 | int status = -EINVAL; |
3127 | bool found = false; |
3128 | s32 encode = 0; |
3129 | s64 mv = 0; |
3130 | s32 i; |
3131 | |
3132 | /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ |
3133 | if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) |
3134 | return status; |
3135 | |
3136 | /* Bytes per second from Kbps */ |
3137 | bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); |
3138 | |
3139 | /* encode is 6 bits but really useful are 5 bits */ |
3140 | for (i = 0; i < 64; i++) { |
3141 | u64 pow_result = BIT_ULL(i); |
3142 | |
3143 | ts_rate = div64_long((s64)hw->psm_clk_freq, |
3144 | pow_result * ICE_RL_PROF_TS_MULTIPLIER); |
3145 | if (ts_rate <= 0) |
3146 | continue; |
3147 | |
3148 | /* Multiplier value */ |
3149 | mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, |
3150 | ts_rate); |
3151 | |
3152 | /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ |
3153 | mv = round_up_64bit(a: mv_tmp, ICE_RL_PROF_MULTIPLIER); |
3154 | |
3155 | /* First multiplier value greater than the given |
3156 | * accuracy bytes |
3157 | */ |
3158 | if (mv > ICE_RL_PROF_ACCURACY_BYTES) { |
3159 | encode = i; |
3160 | found = true; |
3161 | break; |
3162 | } |
3163 | } |
3164 | if (found) { |
3165 | u16 wm; |
3166 | |
3167 | wm = ice_sched_calc_wakeup(hw, bw); |
3168 | profile->rl_multiply = cpu_to_le16(mv); |
3169 | profile->wake_up_calc = cpu_to_le16(wm); |
3170 | profile->rl_encode = cpu_to_le16(encode); |
3171 | status = 0; |
3172 | } else { |
3173 | status = -ENOENT; |
3174 | } |
3175 | |
3176 | return status; |
3177 | } |
3178 | |
3179 | /** |
3180 | * ice_sched_add_rl_profile - add RL profile |
3181 | * @pi: port information structure |
3182 | * @rl_type: type of rate limit BW - min, max, or shared |
3183 | * @bw: bandwidth in Kbps - Kilo bits per sec |
3184 | * @layer_num: specifies in which layer to create profile |
3185 | * |
3186 | * This function first checks the existing list for corresponding BW |
3187 | * parameter. If it exists, it returns the associated profile otherwise |
3188 | * it creates a new rate limit profile for requested BW, and adds it to |
3189 | * the HW DB and local list. It returns the new profile or null on error. |
3190 | * The caller needs to hold the scheduler lock. |
3191 | */ |
3192 | static struct ice_aqc_rl_profile_info * |
3193 | ice_sched_add_rl_profile(struct ice_port_info *pi, |
3194 | enum ice_rl_type rl_type, u32 bw, u8 layer_num) |
3195 | { |
3196 | struct ice_aqc_rl_profile_info *rl_prof_elem; |
3197 | u16 profiles_added = 0, num_profiles = 1; |
3198 | struct ice_aqc_rl_profile_elem *buf; |
3199 | struct ice_hw *hw; |
3200 | u8 profile_type; |
3201 | int status; |
3202 | |
3203 | if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) |
3204 | return NULL; |
3205 | switch (rl_type) { |
3206 | case ICE_MIN_BW: |
3207 | profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; |
3208 | break; |
3209 | case ICE_MAX_BW: |
3210 | profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; |
3211 | break; |
3212 | case ICE_SHARED_BW: |
3213 | profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; |
3214 | break; |
3215 | default: |
3216 | return NULL; |
3217 | } |
3218 | |
3219 | if (!pi) |
3220 | return NULL; |
3221 | hw = pi->hw; |
3222 | list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], |
3223 | list_entry) |
3224 | if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == |
3225 | profile_type && rl_prof_elem->bw == bw) |
3226 | /* Return existing profile ID info */ |
3227 | return rl_prof_elem; |
3228 | |
3229 | /* Create new profile ID */ |
3230 | rl_prof_elem = devm_kzalloc(dev: ice_hw_to_dev(hw), size: sizeof(*rl_prof_elem), |
3231 | GFP_KERNEL); |
3232 | |
3233 | if (!rl_prof_elem) |
3234 | return NULL; |
3235 | |
3236 | status = ice_sched_bw_to_rl_profile(hw, bw, profile: &rl_prof_elem->profile); |
3237 | if (status) |
3238 | goto exit_add_rl_prof; |
3239 | |
3240 | rl_prof_elem->bw = bw; |
3241 | /* layer_num is zero relative, and fw expects level from 1 to 9 */ |
3242 | rl_prof_elem->profile.level = layer_num + 1; |
3243 | rl_prof_elem->profile.flags = profile_type; |
3244 | rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size); |
3245 | |
3246 | /* Create new entry in HW DB */ |
3247 | buf = &rl_prof_elem->profile; |
3248 | status = ice_aq_add_rl_profile(hw, num_profiles, buf, buf_size: sizeof(*buf), |
3249 | num_profiles_added: &profiles_added, NULL); |
3250 | if (status || profiles_added != num_profiles) |
3251 | goto exit_add_rl_prof; |
3252 | |
3253 | /* Good entry - add in the list */ |
3254 | rl_prof_elem->prof_id_ref = 0; |
3255 | list_add(new: &rl_prof_elem->list_entry, head: &pi->rl_prof_list[layer_num]); |
3256 | return rl_prof_elem; |
3257 | |
3258 | exit_add_rl_prof: |
3259 | devm_kfree(dev: ice_hw_to_dev(hw), p: rl_prof_elem); |
3260 | return NULL; |
3261 | } |
3262 | |
3263 | /** |
3264 | * ice_sched_cfg_node_bw_lmt - configure node sched params |
3265 | * @hw: pointer to the HW struct |
3266 | * @node: sched node to configure |
3267 | * @rl_type: rate limit type CIR, EIR, or shared |
3268 | * @rl_prof_id: rate limit profile ID |
3269 | * |
3270 | * This function configures node element's BW limit. |
3271 | */ |
3272 | static int |
3273 | ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, |
3274 | enum ice_rl_type rl_type, u16 rl_prof_id) |
3275 | { |
3276 | struct ice_aqc_txsched_elem_data buf; |
3277 | struct ice_aqc_txsched_elem *data; |
3278 | |
3279 | buf = node->info; |
3280 | data = &buf.data; |
3281 | switch (rl_type) { |
3282 | case ICE_MIN_BW: |
3283 | data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; |
3284 | data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); |
3285 | break; |
3286 | case ICE_MAX_BW: |
3287 | /* EIR BW and Shared BW profiles are mutually exclusive and |
3288 | * hence only one of them may be set for any given element |
3289 | */ |
3290 | if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) |
3291 | return -EIO; |
3292 | data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; |
3293 | data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); |
3294 | break; |
3295 | case ICE_SHARED_BW: |
3296 | /* Check for removing shared BW */ |
3297 | if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) { |
3298 | /* remove shared profile */ |
3299 | data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED; |
3300 | data->srl_id = 0; /* clear SRL field */ |
3301 | |
3302 | /* enable back EIR to default profile */ |
3303 | data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; |
3304 | data->eir_bw.bw_profile_idx = |
3305 | cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); |
3306 | break; |
3307 | } |
3308 | /* EIR BW and Shared BW profiles are mutually exclusive and |
3309 | * hence only one of them may be set for any given element |
3310 | */ |
3311 | if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && |
3312 | (le16_to_cpu(data->eir_bw.bw_profile_idx) != |
3313 | ICE_SCHED_DFLT_RL_PROF_ID)) |
3314 | return -EIO; |
3315 | /* EIR BW is set to default, disable it */ |
3316 | data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; |
3317 | /* Okay to enable shared BW now */ |
3318 | data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; |
3319 | data->srl_id = cpu_to_le16(rl_prof_id); |
3320 | break; |
3321 | default: |
3322 | /* Unknown rate limit type */ |
3323 | return -EINVAL; |
3324 | } |
3325 | |
3326 | /* Configure element */ |
3327 | return ice_sched_update_elem(hw, node, info: &buf); |
3328 | } |
3329 | |
3330 | /** |
3331 | * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID |
3332 | * @node: sched node |
3333 | * @rl_type: rate limit type |
3334 | * |
3335 | * If existing profile matches, it returns the corresponding rate |
3336 | * limit profile ID, otherwise it returns an invalid ID as error. |
3337 | */ |
3338 | static u16 |
3339 | ice_sched_get_node_rl_prof_id(struct ice_sched_node *node, |
3340 | enum ice_rl_type rl_type) |
3341 | { |
3342 | u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; |
3343 | struct ice_aqc_txsched_elem *data; |
3344 | |
3345 | data = &node->info.data; |
3346 | switch (rl_type) { |
3347 | case ICE_MIN_BW: |
3348 | if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) |
3349 | rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx); |
3350 | break; |
3351 | case ICE_MAX_BW: |
3352 | if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) |
3353 | rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx); |
3354 | break; |
3355 | case ICE_SHARED_BW: |
3356 | if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) |
3357 | rl_prof_id = le16_to_cpu(data->srl_id); |
3358 | break; |
3359 | default: |
3360 | break; |
3361 | } |
3362 | |
3363 | return rl_prof_id; |
3364 | } |
3365 | |
3366 | /** |
3367 | * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer |
3368 | * @pi: port information structure |
3369 | * @rl_type: type of rate limit BW - min, max, or shared |
3370 | * @layer_index: layer index |
3371 | * |
3372 | * This function returns requested profile creation layer. |
3373 | */ |
3374 | static u8 |
3375 | ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, |
3376 | u8 layer_index) |
3377 | { |
3378 | struct ice_hw *hw = pi->hw; |
3379 | |
3380 | if (layer_index >= hw->num_tx_sched_layers) |
3381 | return ICE_SCHED_INVAL_LAYER_NUM; |
3382 | switch (rl_type) { |
3383 | case ICE_MIN_BW: |
3384 | if (hw->layer_info[layer_index].max_cir_rl_profiles) |
3385 | return layer_index; |
3386 | break; |
3387 | case ICE_MAX_BW: |
3388 | if (hw->layer_info[layer_index].max_eir_rl_profiles) |
3389 | return layer_index; |
3390 | break; |
3391 | case ICE_SHARED_BW: |
3392 | /* if current layer doesn't support SRL profile creation |
3393 | * then try a layer up or down. |
3394 | */ |
3395 | if (hw->layer_info[layer_index].max_srl_profiles) |
3396 | return layer_index; |
3397 | else if (layer_index < hw->num_tx_sched_layers - 1 && |
3398 | hw->layer_info[layer_index + 1].max_srl_profiles) |
3399 | return layer_index + 1; |
3400 | else if (layer_index > 0 && |
3401 | hw->layer_info[layer_index - 1].max_srl_profiles) |
3402 | return layer_index - 1; |
3403 | break; |
3404 | default: |
3405 | break; |
3406 | } |
3407 | return ICE_SCHED_INVAL_LAYER_NUM; |
3408 | } |
3409 | |
3410 | /** |
3411 | * ice_sched_get_srl_node - get shared rate limit node |
3412 | * @node: tree node |
3413 | * @srl_layer: shared rate limit layer |
3414 | * |
3415 | * This function returns SRL node to be used for shared rate limit purpose. |
3416 | * The caller needs to hold scheduler lock. |
3417 | */ |
3418 | static struct ice_sched_node * |
3419 | ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) |
3420 | { |
3421 | if (srl_layer > node->tx_sched_layer) |
3422 | return node->children[0]; |
3423 | else if (srl_layer < node->tx_sched_layer) |
3424 | /* Node can't be created without a parent. It will always |
3425 | * have a valid parent except root node. |
3426 | */ |
3427 | return node->parent; |
3428 | else |
3429 | return node; |
3430 | } |
3431 | |
3432 | /** |
3433 | * ice_sched_rm_rl_profile - remove RL profile ID |
3434 | * @pi: port information structure |
3435 | * @layer_num: layer number where profiles are saved |
3436 | * @profile_type: profile type like EIR, CIR, or SRL |
3437 | * @profile_id: profile ID to remove |
3438 | * |
3439 | * This function removes rate limit profile from layer 'layer_num' of type |
3440 | * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold |
3441 | * scheduler lock. |
3442 | */ |
3443 | static int |
3444 | ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, |
3445 | u16 profile_id) |
3446 | { |
3447 | struct ice_aqc_rl_profile_info *rl_prof_elem; |
3448 | int status = 0; |
3449 | |
3450 | if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) |
3451 | return -EINVAL; |
3452 | /* Check the existing list for RL profile */ |
3453 | list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], |
3454 | list_entry) |
3455 | if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == |
3456 | profile_type && |
3457 | le16_to_cpu(rl_prof_elem->profile.profile_id) == |
3458 | profile_id) { |
3459 | if (rl_prof_elem->prof_id_ref) |
3460 | rl_prof_elem->prof_id_ref--; |
3461 | |
3462 | /* Remove old profile ID from database */ |
3463 | status = ice_sched_del_rl_profile(hw: pi->hw, rl_info: rl_prof_elem); |
3464 | if (status && status != -EBUSY) |
3465 | ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n" ); |
3466 | break; |
3467 | } |
3468 | if (status == -EBUSY) |
3469 | status = 0; |
3470 | return status; |
3471 | } |
3472 | |
3473 | /** |
3474 | * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default |
3475 | * @pi: port information structure |
3476 | * @node: pointer to node structure |
3477 | * @rl_type: rate limit type min, max, or shared |
3478 | * @layer_num: layer number where RL profiles are saved |
3479 | * |
3480 | * This function configures node element's BW rate limit profile ID of |
3481 | * type CIR, EIR, or SRL to default. This function needs to be called |
3482 | * with the scheduler lock held. |
3483 | */ |
3484 | static int |
3485 | ice_sched_set_node_bw_dflt(struct ice_port_info *pi, |
3486 | struct ice_sched_node *node, |
3487 | enum ice_rl_type rl_type, u8 layer_num) |
3488 | { |
3489 | struct ice_hw *hw; |
3490 | u8 profile_type; |
3491 | u16 rl_prof_id; |
3492 | u16 old_id; |
3493 | int status; |
3494 | |
3495 | hw = pi->hw; |
3496 | switch (rl_type) { |
3497 | case ICE_MIN_BW: |
3498 | profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; |
3499 | rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; |
3500 | break; |
3501 | case ICE_MAX_BW: |
3502 | profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; |
3503 | rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; |
3504 | break; |
3505 | case ICE_SHARED_BW: |
3506 | profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; |
3507 | /* No SRL is configured for default case */ |
3508 | rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; |
3509 | break; |
3510 | default: |
3511 | return -EINVAL; |
3512 | } |
3513 | /* Save existing RL prof ID for later clean up */ |
3514 | old_id = ice_sched_get_node_rl_prof_id(node, rl_type); |
3515 | /* Configure BW scheduling parameters */ |
3516 | status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); |
3517 | if (status) |
3518 | return status; |
3519 | |
3520 | /* Remove stale RL profile ID */ |
3521 | if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || |
3522 | old_id == ICE_SCHED_INVAL_PROF_ID) |
3523 | return 0; |
3524 | |
3525 | return ice_sched_rm_rl_profile(pi, layer_num, profile_type, profile_id: old_id); |
3526 | } |
3527 | |
3528 | /** |
3529 | * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness |
3530 | * @pi: port information structure |
3531 | * @node: pointer to node structure |
3532 | * @layer_num: layer number where rate limit profiles are saved |
3533 | * @rl_type: rate limit type min, max, or shared |
3534 | * @bw: bandwidth value |
3535 | * |
3536 | * This function prepares node element's bandwidth to SRL or EIR exclusively. |
3537 | * EIR BW and Shared BW profiles are mutually exclusive and hence only one of |
3538 | * them may be set for any given element. This function needs to be called |
3539 | * with the scheduler lock held. |
3540 | */ |
3541 | static int |
3542 | ice_sched_set_eir_srl_excl(struct ice_port_info *pi, |
3543 | struct ice_sched_node *node, |
3544 | u8 layer_num, enum ice_rl_type rl_type, u32 bw) |
3545 | { |
3546 | if (rl_type == ICE_SHARED_BW) { |
3547 | /* SRL node passed in this case, it may be different node */ |
3548 | if (bw == ICE_SCHED_DFLT_BW) |
3549 | /* SRL being removed, ice_sched_cfg_node_bw_lmt() |
3550 | * enables EIR to default. EIR is not set in this |
3551 | * case, so no additional action is required. |
3552 | */ |
3553 | return 0; |
3554 | |
3555 | /* SRL being configured, set EIR to default here. |
3556 | * ice_sched_cfg_node_bw_lmt() disables EIR when it |
3557 | * configures SRL |
3558 | */ |
3559 | return ice_sched_set_node_bw_dflt(pi, node, rl_type: ICE_MAX_BW, |
3560 | layer_num); |
3561 | } else if (rl_type == ICE_MAX_BW && |
3562 | node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) { |
3563 | /* Remove Shared profile. Set default shared BW call |
3564 | * removes shared profile for a node. |
3565 | */ |
3566 | return ice_sched_set_node_bw_dflt(pi, node, |
3567 | rl_type: ICE_SHARED_BW, |
3568 | layer_num); |
3569 | } |
3570 | return 0; |
3571 | } |
3572 | |
3573 | /** |
3574 | * ice_sched_set_node_bw - set node's bandwidth |
3575 | * @pi: port information structure |
3576 | * @node: tree node |
3577 | * @rl_type: rate limit type min, max, or shared |
3578 | * @bw: bandwidth in Kbps - Kilo bits per sec |
3579 | * @layer_num: layer number |
3580 | * |
3581 | * This function adds new profile corresponding to requested BW, configures |
3582 | * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile |
3583 | * ID from local database. The caller needs to hold scheduler lock. |
3584 | */ |
3585 | int |
3586 | ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, |
3587 | enum ice_rl_type rl_type, u32 bw, u8 layer_num) |
3588 | { |
3589 | struct ice_aqc_rl_profile_info *rl_prof_info; |
3590 | struct ice_hw *hw = pi->hw; |
3591 | u16 old_id, rl_prof_id; |
3592 | int status = -EINVAL; |
3593 | |
3594 | rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num); |
3595 | if (!rl_prof_info) |
3596 | return status; |
3597 | |
3598 | rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id); |
3599 | |
3600 | /* Save existing RL prof ID for later clean up */ |
3601 | old_id = ice_sched_get_node_rl_prof_id(node, rl_type); |
3602 | /* Configure BW scheduling parameters */ |
3603 | status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); |
3604 | if (status) |
3605 | return status; |
3606 | |
3607 | /* New changes has been applied */ |
3608 | /* Increment the profile ID reference count */ |
3609 | rl_prof_info->prof_id_ref++; |
3610 | |
3611 | /* Check for old ID removal */ |
3612 | if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || |
3613 | old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) |
3614 | return 0; |
3615 | |
3616 | return ice_sched_rm_rl_profile(pi, layer_num, |
3617 | profile_type: rl_prof_info->profile.flags & |
3618 | ICE_AQC_RL_PROFILE_TYPE_M, profile_id: old_id); |
3619 | } |
3620 | |
3621 | /** |
3622 | * ice_sched_set_node_priority - set node's priority |
3623 | * @pi: port information structure |
3624 | * @node: tree node |
3625 | * @priority: number 0-7 representing priority among siblings |
3626 | * |
3627 | * This function sets priority of a node among it's siblings. |
3628 | */ |
3629 | int |
3630 | ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node, |
3631 | u16 priority) |
3632 | { |
3633 | struct ice_aqc_txsched_elem_data buf; |
3634 | struct ice_aqc_txsched_elem *data; |
3635 | |
3636 | buf = node->info; |
3637 | data = &buf.data; |
3638 | |
3639 | data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; |
3640 | data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_PRIO_M, priority); |
3641 | |
3642 | return ice_sched_update_elem(hw: pi->hw, node, info: &buf); |
3643 | } |
3644 | |
3645 | /** |
3646 | * ice_sched_set_node_weight - set node's weight |
3647 | * @pi: port information structure |
3648 | * @node: tree node |
3649 | * @weight: number 1-200 representing weight for WFQ |
3650 | * |
3651 | * This function sets weight of the node for WFQ algorithm. |
3652 | */ |
3653 | int |
3654 | ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight) |
3655 | { |
3656 | struct ice_aqc_txsched_elem_data buf; |
3657 | struct ice_aqc_txsched_elem *data; |
3658 | |
3659 | buf = node->info; |
3660 | data = &buf.data; |
3661 | |
3662 | data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR | |
3663 | ICE_AQC_ELEM_VALID_GENERIC; |
3664 | data->cir_bw.bw_alloc = cpu_to_le16(weight); |
3665 | data->eir_bw.bw_alloc = cpu_to_le16(weight); |
3666 | |
3667 | data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_SP_M, 0x0); |
3668 | |
3669 | return ice_sched_update_elem(hw: pi->hw, node, info: &buf); |
3670 | } |
3671 | |
3672 | /** |
3673 | * ice_sched_set_node_bw_lmt - set node's BW limit |
3674 | * @pi: port information structure |
3675 | * @node: tree node |
3676 | * @rl_type: rate limit type min, max, or shared |
3677 | * @bw: bandwidth in Kbps - Kilo bits per sec |
3678 | * |
3679 | * It updates node's BW limit parameters like BW RL profile ID of type CIR, |
3680 | * EIR, or SRL. The caller needs to hold scheduler lock. |
3681 | */ |
3682 | int |
3683 | ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, |
3684 | enum ice_rl_type rl_type, u32 bw) |
3685 | { |
3686 | struct ice_sched_node *cfg_node = node; |
3687 | int status; |
3688 | |
3689 | struct ice_hw *hw; |
3690 | u8 layer_num; |
3691 | |
3692 | if (!pi) |
3693 | return -EINVAL; |
3694 | hw = pi->hw; |
3695 | /* Remove unused RL profile IDs from HW and SW DB */ |
3696 | ice_sched_rm_unused_rl_prof(pi); |
3697 | layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, |
3698 | layer_index: node->tx_sched_layer); |
3699 | if (layer_num >= hw->num_tx_sched_layers) |
3700 | return -EINVAL; |
3701 | |
3702 | if (rl_type == ICE_SHARED_BW) { |
3703 | /* SRL node may be different */ |
3704 | cfg_node = ice_sched_get_srl_node(node, srl_layer: layer_num); |
3705 | if (!cfg_node) |
3706 | return -EIO; |
3707 | } |
3708 | /* EIR BW and Shared BW profiles are mutually exclusive and |
3709 | * hence only one of them may be set for any given element |
3710 | */ |
3711 | status = ice_sched_set_eir_srl_excl(pi, node: cfg_node, layer_num, rl_type, |
3712 | bw); |
3713 | if (status) |
3714 | return status; |
3715 | if (bw == ICE_SCHED_DFLT_BW) |
3716 | return ice_sched_set_node_bw_dflt(pi, node: cfg_node, rl_type, |
3717 | layer_num); |
3718 | return ice_sched_set_node_bw(pi, node: cfg_node, rl_type, bw, layer_num); |
3719 | } |
3720 | |
3721 | /** |
3722 | * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default |
3723 | * @pi: port information structure |
3724 | * @node: pointer to node structure |
3725 | * @rl_type: rate limit type min, max, or shared |
3726 | * |
3727 | * This function configures node element's BW rate limit profile ID of |
3728 | * type CIR, EIR, or SRL to default. This function needs to be called |
3729 | * with the scheduler lock held. |
3730 | */ |
3731 | static int |
3732 | ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, |
3733 | struct ice_sched_node *node, |
3734 | enum ice_rl_type rl_type) |
3735 | { |
3736 | return ice_sched_set_node_bw_lmt(pi, node, rl_type, |
3737 | ICE_SCHED_DFLT_BW); |
3738 | } |
3739 | |
3740 | /** |
3741 | * ice_sched_validate_srl_node - Check node for SRL applicability |
3742 | * @node: sched node to configure |
3743 | * @sel_layer: selected SRL layer |
3744 | * |
3745 | * This function checks if the SRL can be applied to a selected layer node on |
3746 | * behalf of the requested node (first argument). This function needs to be |
3747 | * called with scheduler lock held. |
3748 | */ |
3749 | static int |
3750 | ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) |
3751 | { |
3752 | /* SRL profiles are not available on all layers. Check if the |
3753 | * SRL profile can be applied to a node above or below the |
3754 | * requested node. SRL configuration is possible only if the |
3755 | * selected layer's node has single child. |
3756 | */ |
3757 | if (sel_layer == node->tx_sched_layer || |
3758 | ((sel_layer == node->tx_sched_layer + 1) && |
3759 | node->num_children == 1) || |
3760 | ((sel_layer == node->tx_sched_layer - 1) && |
3761 | (node->parent && node->parent->num_children == 1))) |
3762 | return 0; |
3763 | |
3764 | return -EIO; |
3765 | } |
3766 | |
3767 | /** |
3768 | * ice_sched_save_q_bw - save queue node's BW information |
3769 | * @q_ctx: queue context structure |
3770 | * @rl_type: rate limit type min, max, or shared |
3771 | * @bw: bandwidth in Kbps - Kilo bits per sec |
3772 | * |
3773 | * Save BW information of queue type node for post replay use. |
3774 | */ |
3775 | static int |
3776 | ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) |
3777 | { |
3778 | switch (rl_type) { |
3779 | case ICE_MIN_BW: |
3780 | ice_set_clear_cir_bw(bw_t_info: &q_ctx->bw_t_info, bw); |
3781 | break; |
3782 | case ICE_MAX_BW: |
3783 | ice_set_clear_eir_bw(bw_t_info: &q_ctx->bw_t_info, bw); |
3784 | break; |
3785 | case ICE_SHARED_BW: |
3786 | ice_set_clear_shared_bw(bw_t_info: &q_ctx->bw_t_info, bw); |
3787 | break; |
3788 | default: |
3789 | return -EINVAL; |
3790 | } |
3791 | return 0; |
3792 | } |
3793 | |
3794 | /** |
3795 | * ice_sched_set_q_bw_lmt - sets queue BW limit |
3796 | * @pi: port information structure |
3797 | * @vsi_handle: sw VSI handle |
3798 | * @tc: traffic class |
3799 | * @q_handle: software queue handle |
3800 | * @rl_type: min, max, or shared |
3801 | * @bw: bandwidth in Kbps |
3802 | * |
3803 | * This function sets BW limit of queue scheduling node. |
3804 | */ |
3805 | static int |
3806 | ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
3807 | u16 q_handle, enum ice_rl_type rl_type, u32 bw) |
3808 | { |
3809 | struct ice_sched_node *node; |
3810 | struct ice_q_ctx *q_ctx; |
3811 | int status = -EINVAL; |
3812 | |
3813 | if (!ice_is_vsi_valid(hw: pi->hw, vsi_handle)) |
3814 | return -EINVAL; |
3815 | mutex_lock(&pi->sched_lock); |
3816 | q_ctx = ice_get_lan_q_ctx(hw: pi->hw, vsi_handle, tc, q_handle); |
3817 | if (!q_ctx) |
3818 | goto exit_q_bw_lmt; |
3819 | node = ice_sched_find_node_by_teid(start_node: pi->root, teid: q_ctx->q_teid); |
3820 | if (!node) { |
3821 | ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n" ); |
3822 | goto exit_q_bw_lmt; |
3823 | } |
3824 | |
3825 | /* Return error if it is not a leaf node */ |
3826 | if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) |
3827 | goto exit_q_bw_lmt; |
3828 | |
3829 | /* SRL bandwidth layer selection */ |
3830 | if (rl_type == ICE_SHARED_BW) { |
3831 | u8 sel_layer; /* selected layer */ |
3832 | |
3833 | sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, |
3834 | layer_index: node->tx_sched_layer); |
3835 | if (sel_layer >= pi->hw->num_tx_sched_layers) { |
3836 | status = -EINVAL; |
3837 | goto exit_q_bw_lmt; |
3838 | } |
3839 | status = ice_sched_validate_srl_node(node, sel_layer); |
3840 | if (status) |
3841 | goto exit_q_bw_lmt; |
3842 | } |
3843 | |
3844 | if (bw == ICE_SCHED_DFLT_BW) |
3845 | status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); |
3846 | else |
3847 | status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); |
3848 | |
3849 | if (!status) |
3850 | status = ice_sched_save_q_bw(q_ctx, rl_type, bw); |
3851 | |
3852 | exit_q_bw_lmt: |
3853 | mutex_unlock(lock: &pi->sched_lock); |
3854 | return status; |
3855 | } |
3856 | |
3857 | /** |
3858 | * ice_cfg_q_bw_lmt - configure queue BW limit |
3859 | * @pi: port information structure |
3860 | * @vsi_handle: sw VSI handle |
3861 | * @tc: traffic class |
3862 | * @q_handle: software queue handle |
3863 | * @rl_type: min, max, or shared |
3864 | * @bw: bandwidth in Kbps |
3865 | * |
3866 | * This function configures BW limit of queue scheduling node. |
3867 | */ |
3868 | int |
3869 | ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
3870 | u16 q_handle, enum ice_rl_type rl_type, u32 bw) |
3871 | { |
3872 | return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, |
3873 | bw); |
3874 | } |
3875 | |
3876 | /** |
3877 | * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit |
3878 | * @pi: port information structure |
3879 | * @vsi_handle: sw VSI handle |
3880 | * @tc: traffic class |
3881 | * @q_handle: software queue handle |
3882 | * @rl_type: min, max, or shared |
3883 | * |
3884 | * This function configures BW default limit of queue scheduling node. |
3885 | */ |
3886 | int |
3887 | ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
3888 | u16 q_handle, enum ice_rl_type rl_type) |
3889 | { |
3890 | return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, |
3891 | ICE_SCHED_DFLT_BW); |
3892 | } |
3893 | |
3894 | /** |
3895 | * ice_sched_get_node_by_id_type - get node from ID type |
3896 | * @pi: port information structure |
3897 | * @id: identifier |
3898 | * @agg_type: type of aggregator |
3899 | * @tc: traffic class |
3900 | * |
3901 | * This function returns node identified by ID of type aggregator, and |
3902 | * based on traffic class (TC). This function needs to be called with |
3903 | * the scheduler lock held. |
3904 | */ |
3905 | static struct ice_sched_node * |
3906 | ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, |
3907 | enum ice_agg_type agg_type, u8 tc) |
3908 | { |
3909 | struct ice_sched_node *node = NULL; |
3910 | |
3911 | switch (agg_type) { |
3912 | case ICE_AGG_TYPE_VSI: { |
3913 | struct ice_vsi_ctx *vsi_ctx; |
3914 | u16 vsi_handle = (u16)id; |
3915 | |
3916 | if (!ice_is_vsi_valid(hw: pi->hw, vsi_handle)) |
3917 | break; |
3918 | /* Get sched_vsi_info */ |
3919 | vsi_ctx = ice_get_vsi_ctx(hw: pi->hw, vsi_handle); |
3920 | if (!vsi_ctx) |
3921 | break; |
3922 | node = vsi_ctx->sched.vsi_node[tc]; |
3923 | break; |
3924 | } |
3925 | |
3926 | case ICE_AGG_TYPE_AGG: { |
3927 | struct ice_sched_node *tc_node; |
3928 | |
3929 | tc_node = ice_sched_get_tc_node(pi, tc); |
3930 | if (tc_node) |
3931 | node = ice_sched_get_agg_node(pi, tc_node, agg_id: id); |
3932 | break; |
3933 | } |
3934 | |
3935 | default: |
3936 | break; |
3937 | } |
3938 | |
3939 | return node; |
3940 | } |
3941 | |
3942 | /** |
3943 | * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC |
3944 | * @pi: port information structure |
3945 | * @id: ID (software VSI handle or AGG ID) |
3946 | * @agg_type: aggregator type (VSI or AGG type node) |
3947 | * @tc: traffic class |
3948 | * @rl_type: min or max |
3949 | * @bw: bandwidth in Kbps |
3950 | * |
3951 | * This function sets BW limit of VSI or Aggregator scheduling node |
3952 | * based on TC information from passed in argument BW. |
3953 | */ |
3954 | static int |
3955 | ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, |
3956 | enum ice_agg_type agg_type, u8 tc, |
3957 | enum ice_rl_type rl_type, u32 bw) |
3958 | { |
3959 | struct ice_sched_node *node; |
3960 | int status = -EINVAL; |
3961 | |
3962 | if (!pi) |
3963 | return status; |
3964 | |
3965 | if (rl_type == ICE_UNKNOWN_BW) |
3966 | return status; |
3967 | |
3968 | mutex_lock(&pi->sched_lock); |
3969 | node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc); |
3970 | if (!node) { |
3971 | ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n" ); |
3972 | goto exit_set_node_bw_lmt_per_tc; |
3973 | } |
3974 | if (bw == ICE_SCHED_DFLT_BW) |
3975 | status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); |
3976 | else |
3977 | status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); |
3978 | |
3979 | exit_set_node_bw_lmt_per_tc: |
3980 | mutex_unlock(lock: &pi->sched_lock); |
3981 | return status; |
3982 | } |
3983 | |
3984 | /** |
3985 | * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC |
3986 | * @pi: port information structure |
3987 | * @vsi_handle: software VSI handle |
3988 | * @tc: traffic class |
3989 | * @rl_type: min or max |
3990 | * @bw: bandwidth in Kbps |
3991 | * |
3992 | * This function configures BW limit of VSI scheduling node based on TC |
3993 | * information. |
3994 | */ |
3995 | int |
3996 | ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
3997 | enum ice_rl_type rl_type, u32 bw) |
3998 | { |
3999 | int status; |
4000 | |
4001 | status = ice_sched_set_node_bw_lmt_per_tc(pi, id: vsi_handle, |
4002 | agg_type: ICE_AGG_TYPE_VSI, |
4003 | tc, rl_type, bw); |
4004 | if (!status) { |
4005 | mutex_lock(&pi->sched_lock); |
4006 | status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); |
4007 | mutex_unlock(lock: &pi->sched_lock); |
4008 | } |
4009 | return status; |
4010 | } |
4011 | |
4012 | /** |
4013 | * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC |
4014 | * @pi: port information structure |
4015 | * @vsi_handle: software VSI handle |
4016 | * @tc: traffic class |
4017 | * @rl_type: min or max |
4018 | * |
4019 | * This function configures default BW limit of VSI scheduling node based on TC |
4020 | * information. |
4021 | */ |
4022 | int |
4023 | ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
4024 | enum ice_rl_type rl_type) |
4025 | { |
4026 | int status; |
4027 | |
4028 | status = ice_sched_set_node_bw_lmt_per_tc(pi, id: vsi_handle, |
4029 | agg_type: ICE_AGG_TYPE_VSI, |
4030 | tc, rl_type, |
4031 | ICE_SCHED_DFLT_BW); |
4032 | if (!status) { |
4033 | mutex_lock(&pi->sched_lock); |
4034 | status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, |
4035 | ICE_SCHED_DFLT_BW); |
4036 | mutex_unlock(lock: &pi->sched_lock); |
4037 | } |
4038 | return status; |
4039 | } |
4040 | |
4041 | /** |
4042 | * ice_cfg_rl_burst_size - Set burst size value |
4043 | * @hw: pointer to the HW struct |
4044 | * @bytes: burst size in bytes |
4045 | * |
4046 | * This function configures/set the burst size to requested new value. The new |
4047 | * burst size value is used for future rate limit calls. It doesn't change the |
4048 | * existing or previously created RL profiles. |
4049 | */ |
4050 | int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) |
4051 | { |
4052 | u16 burst_size_to_prog; |
4053 | |
4054 | if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || |
4055 | bytes > ICE_MAX_BURST_SIZE_ALLOWED) |
4056 | return -EINVAL; |
4057 | if (ice_round_to_num(N: bytes, R: 64) <= |
4058 | ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { |
4059 | /* 64 byte granularity case */ |
4060 | /* Disable MSB granularity bit */ |
4061 | burst_size_to_prog = ICE_64_BYTE_GRANULARITY; |
4062 | /* round number to nearest 64 byte granularity */ |
4063 | bytes = ice_round_to_num(N: bytes, R: 64); |
4064 | /* The value is in 64 byte chunks */ |
4065 | burst_size_to_prog |= (u16)(bytes / 64); |
4066 | } else { |
4067 | /* k bytes granularity case */ |
4068 | /* Enable MSB granularity bit */ |
4069 | burst_size_to_prog = ICE_KBYTE_GRANULARITY; |
4070 | /* round number to nearest 1024 granularity */ |
4071 | bytes = ice_round_to_num(N: bytes, R: 1024); |
4072 | /* check rounding doesn't go beyond allowed */ |
4073 | if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) |
4074 | bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; |
4075 | /* The value is in k bytes */ |
4076 | burst_size_to_prog |= (u16)(bytes / 1024); |
4077 | } |
4078 | hw->max_burst_size = burst_size_to_prog; |
4079 | return 0; |
4080 | } |
4081 | |
4082 | /** |
4083 | * ice_sched_replay_node_prio - re-configure node priority |
4084 | * @hw: pointer to the HW struct |
4085 | * @node: sched node to configure |
4086 | * @priority: priority value |
4087 | * |
4088 | * This function configures node element's priority value. It |
4089 | * needs to be called with scheduler lock held. |
4090 | */ |
4091 | static int |
4092 | ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, |
4093 | u8 priority) |
4094 | { |
4095 | struct ice_aqc_txsched_elem_data buf; |
4096 | struct ice_aqc_txsched_elem *data; |
4097 | int status; |
4098 | |
4099 | buf = node->info; |
4100 | data = &buf.data; |
4101 | data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; |
4102 | data->generic = priority; |
4103 | |
4104 | /* Configure element */ |
4105 | status = ice_sched_update_elem(hw, node, info: &buf); |
4106 | return status; |
4107 | } |
4108 | |
4109 | /** |
4110 | * ice_sched_replay_node_bw - replay node(s) BW |
4111 | * @hw: pointer to the HW struct |
4112 | * @node: sched node to configure |
4113 | * @bw_t_info: BW type information |
4114 | * |
4115 | * This function restores node's BW from bw_t_info. The caller needs |
4116 | * to hold the scheduler lock. |
4117 | */ |
4118 | static int |
4119 | ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, |
4120 | struct ice_bw_type_info *bw_t_info) |
4121 | { |
4122 | struct ice_port_info *pi = hw->port_info; |
4123 | int status = -EINVAL; |
4124 | u16 bw_alloc; |
4125 | |
4126 | if (!node) |
4127 | return status; |
4128 | if (bitmap_empty(src: bw_t_info->bw_t_bitmap, nbits: ICE_BW_TYPE_CNT)) |
4129 | return 0; |
4130 | if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) { |
4131 | status = ice_sched_replay_node_prio(hw, node, |
4132 | priority: bw_t_info->generic); |
4133 | if (status) |
4134 | return status; |
4135 | } |
4136 | if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) { |
4137 | status = ice_sched_set_node_bw_lmt(pi, node, rl_type: ICE_MIN_BW, |
4138 | bw: bw_t_info->cir_bw.bw); |
4139 | if (status) |
4140 | return status; |
4141 | } |
4142 | if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) { |
4143 | bw_alloc = bw_t_info->cir_bw.bw_alloc; |
4144 | status = ice_sched_cfg_node_bw_alloc(hw, node, rl_type: ICE_MIN_BW, |
4145 | bw_alloc); |
4146 | if (status) |
4147 | return status; |
4148 | } |
4149 | if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) { |
4150 | status = ice_sched_set_node_bw_lmt(pi, node, rl_type: ICE_MAX_BW, |
4151 | bw: bw_t_info->eir_bw.bw); |
4152 | if (status) |
4153 | return status; |
4154 | } |
4155 | if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) { |
4156 | bw_alloc = bw_t_info->eir_bw.bw_alloc; |
4157 | status = ice_sched_cfg_node_bw_alloc(hw, node, rl_type: ICE_MAX_BW, |
4158 | bw_alloc); |
4159 | if (status) |
4160 | return status; |
4161 | } |
4162 | if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap)) |
4163 | status = ice_sched_set_node_bw_lmt(pi, node, rl_type: ICE_SHARED_BW, |
4164 | bw: bw_t_info->shared_bw); |
4165 | return status; |
4166 | } |
4167 | |
4168 | /** |
4169 | * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap |
4170 | * @pi: port info struct |
4171 | * @tc_bitmap: 8 bits TC bitmap to check |
4172 | * @ena_tc_bitmap: 8 bits enabled TC bitmap to return |
4173 | * |
4174 | * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs |
4175 | * may be missing, it returns enabled TCs. This function needs to be called with |
4176 | * scheduler lock held. |
4177 | */ |
4178 | static void |
4179 | ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, |
4180 | unsigned long *tc_bitmap, |
4181 | unsigned long *ena_tc_bitmap) |
4182 | { |
4183 | u8 tc; |
4184 | |
4185 | /* Some TC(s) may be missing after reset, adjust for replay */ |
4186 | ice_for_each_traffic_class(tc) |
4187 | if (ice_is_tc_ena(bitmap: *tc_bitmap, tc) && |
4188 | (ice_sched_get_tc_node(pi, tc))) |
4189 | set_bit(nr: tc, addr: ena_tc_bitmap); |
4190 | } |
4191 | |
4192 | /** |
4193 | * ice_sched_replay_agg - recreate aggregator node(s) |
4194 | * @hw: pointer to the HW struct |
4195 | * |
4196 | * This function recreate aggregator type nodes which are not replayed earlier. |
4197 | * It also replay aggregator BW information. These aggregator nodes are not |
4198 | * associated with VSI type node yet. |
4199 | */ |
4200 | void ice_sched_replay_agg(struct ice_hw *hw) |
4201 | { |
4202 | struct ice_port_info *pi = hw->port_info; |
4203 | struct ice_sched_agg_info *agg_info; |
4204 | |
4205 | mutex_lock(&pi->sched_lock); |
4206 | list_for_each_entry(agg_info, &hw->agg_list, list_entry) |
4207 | /* replay aggregator (re-create aggregator node) */ |
4208 | if (!bitmap_equal(src1: agg_info->tc_bitmap, src2: agg_info->replay_tc_bitmap, |
4209 | ICE_MAX_TRAFFIC_CLASS)) { |
4210 | DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); |
4211 | int status; |
4212 | |
4213 | bitmap_zero(dst: replay_bitmap, ICE_MAX_TRAFFIC_CLASS); |
4214 | ice_sched_get_ena_tc_bitmap(pi, |
4215 | tc_bitmap: agg_info->replay_tc_bitmap, |
4216 | ena_tc_bitmap: replay_bitmap); |
4217 | status = ice_sched_cfg_agg(pi: hw->port_info, |
4218 | agg_id: agg_info->agg_id, |
4219 | agg_type: ICE_AGG_TYPE_AGG, |
4220 | tc_bitmap: replay_bitmap); |
4221 | if (status) { |
4222 | dev_info(ice_hw_to_dev(hw), |
4223 | "Replay agg id[%d] failed\n" , |
4224 | agg_info->agg_id); |
4225 | /* Move on to next one */ |
4226 | continue; |
4227 | } |
4228 | } |
4229 | mutex_unlock(lock: &pi->sched_lock); |
4230 | } |
4231 | |
4232 | /** |
4233 | * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization |
4234 | * @hw: pointer to the HW struct |
4235 | * |
4236 | * This function initialize aggregator(s) TC bitmap to zero. A required |
4237 | * preinit step for replaying aggregators. |
4238 | */ |
4239 | void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) |
4240 | { |
4241 | struct ice_port_info *pi = hw->port_info; |
4242 | struct ice_sched_agg_info *agg_info; |
4243 | |
4244 | mutex_lock(&pi->sched_lock); |
4245 | list_for_each_entry(agg_info, &hw->agg_list, list_entry) { |
4246 | struct ice_sched_agg_vsi_info *agg_vsi_info; |
4247 | |
4248 | agg_info->tc_bitmap[0] = 0; |
4249 | list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, |
4250 | list_entry) |
4251 | agg_vsi_info->tc_bitmap[0] = 0; |
4252 | } |
4253 | mutex_unlock(lock: &pi->sched_lock); |
4254 | } |
4255 | |
4256 | /** |
4257 | * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s) |
4258 | * @hw: pointer to the HW struct |
4259 | * @vsi_handle: software VSI handle |
4260 | * |
4261 | * This function replays aggregator node, VSI to aggregator type nodes, and |
4262 | * their node bandwidth information. This function needs to be called with |
4263 | * scheduler lock held. |
4264 | */ |
4265 | static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) |
4266 | { |
4267 | DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); |
4268 | struct ice_sched_agg_vsi_info *agg_vsi_info; |
4269 | struct ice_port_info *pi = hw->port_info; |
4270 | struct ice_sched_agg_info *agg_info; |
4271 | int status; |
4272 | |
4273 | bitmap_zero(dst: replay_bitmap, ICE_MAX_TRAFFIC_CLASS); |
4274 | if (!ice_is_vsi_valid(hw, vsi_handle)) |
4275 | return -EINVAL; |
4276 | agg_info = ice_get_vsi_agg_info(hw, vsi_handle); |
4277 | if (!agg_info) |
4278 | return 0; /* Not present in list - default Agg case */ |
4279 | agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); |
4280 | if (!agg_vsi_info) |
4281 | return 0; /* Not present in list - default Agg case */ |
4282 | ice_sched_get_ena_tc_bitmap(pi, tc_bitmap: agg_info->replay_tc_bitmap, |
4283 | ena_tc_bitmap: replay_bitmap); |
4284 | /* Replay aggregator node associated to vsi_handle */ |
4285 | status = ice_sched_cfg_agg(pi: hw->port_info, agg_id: agg_info->agg_id, |
4286 | agg_type: ICE_AGG_TYPE_AGG, tc_bitmap: replay_bitmap); |
4287 | if (status) |
4288 | return status; |
4289 | |
4290 | bitmap_zero(dst: replay_bitmap, ICE_MAX_TRAFFIC_CLASS); |
4291 | ice_sched_get_ena_tc_bitmap(pi, tc_bitmap: agg_vsi_info->replay_tc_bitmap, |
4292 | ena_tc_bitmap: replay_bitmap); |
4293 | /* Move this VSI (vsi_handle) to above aggregator */ |
4294 | return ice_sched_assoc_vsi_to_agg(pi, agg_id: agg_info->agg_id, vsi_handle, |
4295 | tc_bitmap: replay_bitmap); |
4296 | } |
4297 | |
4298 | /** |
4299 | * ice_replay_vsi_agg - replay VSI to aggregator node |
4300 | * @hw: pointer to the HW struct |
4301 | * @vsi_handle: software VSI handle |
4302 | * |
4303 | * This function replays association of VSI to aggregator type nodes, and |
4304 | * node bandwidth information. |
4305 | */ |
4306 | int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) |
4307 | { |
4308 | struct ice_port_info *pi = hw->port_info; |
4309 | int status; |
4310 | |
4311 | mutex_lock(&pi->sched_lock); |
4312 | status = ice_sched_replay_vsi_agg(hw, vsi_handle); |
4313 | mutex_unlock(lock: &pi->sched_lock); |
4314 | return status; |
4315 | } |
4316 | |
4317 | /** |
4318 | * ice_sched_replay_q_bw - replay queue type node BW |
4319 | * @pi: port information structure |
4320 | * @q_ctx: queue context structure |
4321 | * |
4322 | * This function replays queue type node bandwidth. This function needs to be |
4323 | * called with scheduler lock held. |
4324 | */ |
4325 | int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) |
4326 | { |
4327 | struct ice_sched_node *q_node; |
4328 | |
4329 | /* Following also checks the presence of node in tree */ |
4330 | q_node = ice_sched_find_node_by_teid(start_node: pi->root, teid: q_ctx->q_teid); |
4331 | if (!q_node) |
4332 | return -EINVAL; |
4333 | return ice_sched_replay_node_bw(hw: pi->hw, node: q_node, bw_t_info: &q_ctx->bw_t_info); |
4334 | } |
4335 | |