1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ |
3 | #include <net/sch_generic.h> |
4 | |
5 | #include <net/pkt_cls.h> |
6 | #include "en.h" |
7 | #include "params.h" |
8 | #include "../qos.h" |
9 | #include "en/htb.h" |
10 | |
11 | struct qos_sq_callback_params { |
12 | struct mlx5e_priv *priv; |
13 | struct mlx5e_channels *chs; |
14 | }; |
15 | |
16 | int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes) |
17 | { |
18 | if (nbytes < BYTES_IN_MBIT) { |
19 | qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n" , |
20 | nbytes, BYTES_IN_MBIT); |
21 | return -EINVAL; |
22 | } |
23 | return 0; |
24 | } |
25 | |
26 | static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes) |
27 | { |
28 | return div_u64(dividend: nbytes, BYTES_IN_MBIT); |
29 | } |
30 | |
31 | int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev) |
32 | { |
33 | return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev)); |
34 | } |
35 | |
36 | /* TX datapath API */ |
37 | |
38 | u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) |
39 | { |
40 | /* These channel params are safe to access from the datapath, because: |
41 | * 1. This function is called only after checking selq->htb_maj_id != 0, |
42 | * and the number of queues can't change while HTB offload is active. |
43 | * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for |
44 | * mlx5e_select_queue to finish while holding priv->state_lock, |
45 | * preventing other code from changing the number of queues. |
46 | */ |
47 | bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS); |
48 | |
49 | return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(params: &chs->params) + qid; |
50 | } |
51 | |
52 | /* SQ lifecycle */ |
53 | |
54 | static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid) |
55 | { |
56 | struct mlx5e_params *params = &priv->channels.params; |
57 | struct mlx5e_txqsq __rcu **qos_sqs; |
58 | struct mlx5e_channel *c; |
59 | int ix; |
60 | |
61 | ix = qid % params->num_channels; |
62 | qid /= params->num_channels; |
63 | c = priv->channels.c[ix]; |
64 | |
65 | qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs); |
66 | return mlx5e_state_dereference(priv, qos_sqs[qid]); |
67 | } |
68 | |
69 | int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs, |
70 | u16 node_qid, u32 hw_id) |
71 | { |
72 | struct mlx5e_create_cq_param ccp = {}; |
73 | struct mlx5e_txqsq __rcu **qos_sqs; |
74 | struct mlx5e_sq_param param_sq; |
75 | struct mlx5e_cq_param param_cq; |
76 | int txq_ix, ix, qid, err = 0; |
77 | struct mlx5e_params *params; |
78 | struct mlx5e_channel *c; |
79 | struct mlx5e_txqsq *sq; |
80 | u32 tisn; |
81 | |
82 | params = &chs->params; |
83 | |
84 | txq_ix = mlx5e_qid_from_qos(chs, qid: node_qid); |
85 | |
86 | WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb)); |
87 | if (!priv->htb_qos_sq_stats) { |
88 | struct mlx5e_sq_stats **stats_list; |
89 | |
90 | stats_list = kvcalloc(n: mlx5e_qos_max_leaf_nodes(mdev: priv->mdev), |
91 | size: sizeof(*stats_list), GFP_KERNEL); |
92 | if (!stats_list) |
93 | return -ENOMEM; |
94 | |
95 | WRITE_ONCE(priv->htb_qos_sq_stats, stats_list); |
96 | } |
97 | |
98 | if (!priv->htb_qos_sq_stats[node_qid]) { |
99 | struct mlx5e_sq_stats *stats; |
100 | |
101 | stats = kzalloc(size: sizeof(*stats), GFP_KERNEL); |
102 | if (!stats) |
103 | return -ENOMEM; |
104 | |
105 | WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats); |
106 | /* Order htb_max_qos_sqs increment after writing the array pointer. |
107 | * Pairs with smp_load_acquire in en_stats.c. |
108 | */ |
109 | smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1); |
110 | } |
111 | |
112 | ix = node_qid % params->num_channels; |
113 | qid = node_qid / params->num_channels; |
114 | c = chs->c[ix]; |
115 | |
116 | qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs); |
117 | sq = kzalloc(size: sizeof(*sq), GFP_KERNEL); |
118 | |
119 | if (!sq) |
120 | return -ENOMEM; |
121 | |
122 | mlx5e_build_create_cq_param(ccp: &ccp, c); |
123 | |
124 | memset(¶m_sq, 0, sizeof(param_sq)); |
125 | memset(¶m_cq, 0, sizeof(param_cq)); |
126 | mlx5e_build_sq_param(mdev: c->mdev, params, param: ¶m_sq); |
127 | mlx5e_build_tx_cq_param(mdev: c->mdev, params, param: ¶m_cq); |
128 | err = mlx5e_open_cq(mdev: c->mdev, moder: params->tx_cq_moderation, param: ¶m_cq, ccp: &ccp, cq: &sq->cq); |
129 | if (err) |
130 | goto err_free_sq; |
131 | |
132 | tisn = mlx5e_profile_get_tisn(mdev: c->mdev, priv: c->priv, profile: c->priv->profile, |
133 | lag_port: c->lag_port, tc: 0); |
134 | err = mlx5e_open_txqsq(c, tisn, txq_ix, params, param: ¶m_sq, sq, tc: 0, qos_queue_group_id: hw_id, |
135 | sq_stats: priv->htb_qos_sq_stats[node_qid]); |
136 | if (err) |
137 | goto err_close_cq; |
138 | |
139 | rcu_assign_pointer(qos_sqs[qid], sq); |
140 | |
141 | return 0; |
142 | |
143 | err_close_cq: |
144 | mlx5e_close_cq(cq: &sq->cq); |
145 | err_free_sq: |
146 | kfree(objp: sq); |
147 | return err; |
148 | } |
149 | |
150 | static int mlx5e_open_qos_sq_cb_wrapper(void *data, u16 node_qid, u32 hw_id) |
151 | { |
152 | struct qos_sq_callback_params *cb_params = data; |
153 | |
154 | return mlx5e_open_qos_sq(priv: cb_params->priv, chs: cb_params->chs, node_qid, hw_id); |
155 | } |
156 | |
157 | int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id) |
158 | { |
159 | struct mlx5e_priv *priv = data; |
160 | struct mlx5e_txqsq *sq; |
161 | u16 qid; |
162 | |
163 | sq = mlx5e_get_qos_sq(priv, qid: node_qid); |
164 | |
165 | qid = mlx5e_qid_from_qos(chs: &priv->channels, qid: node_qid); |
166 | |
167 | /* If it's a new queue, it will be marked as started at this point. |
168 | * Stop it before updating txq2sq. |
169 | */ |
170 | mlx5e_tx_disable_queue(txq: netdev_get_tx_queue(dev: priv->netdev, index: qid)); |
171 | |
172 | priv->txq2sq[qid] = sq; |
173 | |
174 | /* Make the change to txq2sq visible before the queue is started. |
175 | * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, |
176 | * which pairs with this barrier. |
177 | */ |
178 | smp_wmb(); |
179 | |
180 | qos_dbg(sq->mdev, "Activate QoS SQ qid %u\n" , node_qid); |
181 | mlx5e_activate_txqsq(sq); |
182 | |
183 | return 0; |
184 | } |
185 | |
186 | void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) |
187 | { |
188 | struct mlx5e_txqsq *sq; |
189 | |
190 | sq = mlx5e_get_qos_sq(priv, qid); |
191 | if (!sq) /* Handle the case when the SQ failed to open. */ |
192 | return; |
193 | |
194 | qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n" , qid); |
195 | mlx5e_deactivate_txqsq(sq); |
196 | |
197 | priv->txq2sq[mlx5e_qid_from_qos(chs: &priv->channels, qid)] = NULL; |
198 | |
199 | /* Make the change to txq2sq visible before the queue is started again. |
200 | * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, |
201 | * which pairs with this barrier. |
202 | */ |
203 | smp_wmb(); |
204 | } |
205 | |
206 | void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid) |
207 | { |
208 | struct mlx5e_txqsq __rcu **qos_sqs; |
209 | struct mlx5e_params *params; |
210 | struct mlx5e_channel *c; |
211 | struct mlx5e_txqsq *sq; |
212 | int ix; |
213 | |
214 | params = &priv->channels.params; |
215 | |
216 | ix = qid % params->num_channels; |
217 | qid /= params->num_channels; |
218 | c = priv->channels.c[ix]; |
219 | qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs); |
220 | sq = rcu_replace_pointer(qos_sqs[qid], NULL, lockdep_is_held(&priv->state_lock)); |
221 | if (!sq) /* Handle the case when the SQ failed to open. */ |
222 | return; |
223 | |
224 | synchronize_rcu(); /* Sync with NAPI. */ |
225 | |
226 | mlx5e_close_txqsq(sq); |
227 | mlx5e_close_cq(cq: &sq->cq); |
228 | kfree(objp: sq); |
229 | } |
230 | |
231 | void mlx5e_qos_close_queues(struct mlx5e_channel *c) |
232 | { |
233 | struct mlx5e_txqsq __rcu **qos_sqs; |
234 | int i; |
235 | |
236 | qos_sqs = rcu_replace_pointer(c->qos_sqs, NULL, lockdep_is_held(&c->priv->state_lock)); |
237 | if (!qos_sqs) |
238 | return; |
239 | synchronize_rcu(); /* Sync with NAPI. */ |
240 | |
241 | for (i = 0; i < c->qos_sqs_size; i++) { |
242 | struct mlx5e_txqsq *sq; |
243 | |
244 | sq = mlx5e_state_dereference(c->priv, qos_sqs[i]); |
245 | if (!sq) /* Handle the case when the SQ failed to open. */ |
246 | continue; |
247 | |
248 | mlx5e_close_txqsq(sq); |
249 | mlx5e_close_cq(cq: &sq->cq); |
250 | kfree(objp: sq); |
251 | } |
252 | |
253 | kvfree(addr: qos_sqs); |
254 | } |
255 | |
256 | void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs) |
257 | { |
258 | int i; |
259 | |
260 | for (i = 0; i < chs->num; i++) |
261 | mlx5e_qos_close_queues(c: chs->c[i]); |
262 | } |
263 | |
264 | int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs) |
265 | { |
266 | u16 qos_sqs_size; |
267 | int i; |
268 | |
269 | qos_sqs_size = DIV_ROUND_UP(mlx5e_qos_max_leaf_nodes(priv->mdev), chs->num); |
270 | |
271 | for (i = 0; i < chs->num; i++) { |
272 | struct mlx5e_txqsq **sqs; |
273 | |
274 | sqs = kvcalloc(n: qos_sqs_size, size: sizeof(struct mlx5e_txqsq *), GFP_KERNEL); |
275 | if (!sqs) |
276 | goto err_free; |
277 | |
278 | WRITE_ONCE(chs->c[i]->qos_sqs_size, qos_sqs_size); |
279 | smp_wmb(); /* Pairs with mlx5e_napi_poll. */ |
280 | rcu_assign_pointer(chs->c[i]->qos_sqs, sqs); |
281 | } |
282 | |
283 | return 0; |
284 | |
285 | err_free: |
286 | while (--i >= 0) { |
287 | struct mlx5e_txqsq **sqs; |
288 | |
289 | sqs = rcu_replace_pointer(chs->c[i]->qos_sqs, NULL, |
290 | lockdep_is_held(&priv->state_lock)); |
291 | |
292 | synchronize_rcu(); /* Sync with NAPI. */ |
293 | kvfree(addr: sqs); |
294 | } |
295 | return -ENOMEM; |
296 | } |
297 | |
298 | int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs) |
299 | { |
300 | struct qos_sq_callback_params callback_params; |
301 | int err; |
302 | |
303 | err = mlx5e_qos_alloc_queues(priv, chs); |
304 | if (err) |
305 | return err; |
306 | |
307 | callback_params.priv = priv; |
308 | callback_params.chs = chs; |
309 | |
310 | err = mlx5e_htb_enumerate_leaves(htb: priv->htb, callback: mlx5e_open_qos_sq_cb_wrapper, data: &callback_params); |
311 | if (err) { |
312 | mlx5e_qos_close_all_queues(chs); |
313 | return err; |
314 | } |
315 | |
316 | return 0; |
317 | } |
318 | |
319 | void mlx5e_qos_activate_queues(struct mlx5e_priv *priv) |
320 | { |
321 | mlx5e_htb_enumerate_leaves(htb: priv->htb, callback: mlx5e_activate_qos_sq, data: priv); |
322 | } |
323 | |
324 | void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c) |
325 | { |
326 | struct mlx5e_params *params = &c->priv->channels.params; |
327 | struct mlx5e_txqsq __rcu **qos_sqs; |
328 | int i; |
329 | |
330 | qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs); |
331 | if (!qos_sqs) |
332 | return; |
333 | |
334 | for (i = 0; i < c->qos_sqs_size; i++) { |
335 | u16 qid = params->num_channels * i + c->ix; |
336 | struct mlx5e_txqsq *sq; |
337 | |
338 | sq = mlx5e_state_dereference(c->priv, qos_sqs[i]); |
339 | if (!sq) /* Handle the case when the SQ failed to open. */ |
340 | continue; |
341 | |
342 | qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n" , qid); |
343 | mlx5e_deactivate_txqsq(sq); |
344 | |
345 | /* The queue is disabled, no synchronization with datapath is needed. */ |
346 | c->priv->txq2sq[mlx5e_qid_from_qos(chs: &c->priv->channels, qid)] = NULL; |
347 | } |
348 | } |
349 | |
350 | void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs) |
351 | { |
352 | int i; |
353 | |
354 | for (i = 0; i < chs->num; i++) |
355 | mlx5e_qos_deactivate_queues(c: chs->c[i]); |
356 | } |
357 | |
358 | void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq) |
359 | { |
360 | qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n" , qid); |
361 | netdev_tx_reset_queue(q: txq); |
362 | netif_tx_start_queue(dev_queue: txq); |
363 | } |
364 | |
365 | void mlx5e_reset_qdisc(struct net_device *dev, u16 qid) |
366 | { |
367 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, index: qid); |
368 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
369 | |
370 | if (!qdisc) |
371 | return; |
372 | |
373 | spin_lock_bh(lock: qdisc_lock(qdisc)); |
374 | qdisc_reset(qdisc); |
375 | spin_unlock_bh(lock: qdisc_lock(qdisc)); |
376 | } |
377 | |
378 | int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_qopt) |
379 | { |
380 | struct mlx5e_htb *htb = priv->htb; |
381 | int res; |
382 | |
383 | if (!htb && htb_qopt->command != TC_HTB_CREATE) |
384 | return -EINVAL; |
385 | |
386 | if (htb_qopt->prio || htb_qopt->quantum) { |
387 | NL_SET_ERR_MSG_MOD(htb_qopt->extack, |
388 | "prio and quantum parameters are not supported by device with HTB offload enabled." ); |
389 | return -EOPNOTSUPP; |
390 | } |
391 | |
392 | switch (htb_qopt->command) { |
393 | case TC_HTB_CREATE: |
394 | if (!mlx5_qos_is_supported(mdev: priv->mdev)) { |
395 | NL_SET_ERR_MSG_MOD(htb_qopt->extack, |
396 | "Missing QoS capabilities. Try disabling SRIOV or use a supported device." ); |
397 | return -EOPNOTSUPP; |
398 | } |
399 | priv->htb = mlx5e_htb_alloc(); |
400 | htb = priv->htb; |
401 | if (!htb) |
402 | return -ENOMEM; |
403 | res = mlx5e_htb_init(htb, htb_qopt, netdev: priv->netdev, mdev: priv->mdev, selq: &priv->selq, priv); |
404 | if (res) { |
405 | mlx5e_htb_free(htb); |
406 | priv->htb = NULL; |
407 | } |
408 | return res; |
409 | case TC_HTB_DESTROY: |
410 | mlx5e_htb_cleanup(htb); |
411 | mlx5e_htb_free(htb); |
412 | priv->htb = NULL; |
413 | return 0; |
414 | case TC_HTB_LEAF_ALLOC_QUEUE: |
415 | res = mlx5e_htb_leaf_alloc_queue(htb, classid: htb_qopt->classid, parent_classid: htb_qopt->parent_classid, |
416 | rate: htb_qopt->rate, ceil: htb_qopt->ceil, extack: htb_qopt->extack); |
417 | if (res < 0) |
418 | return res; |
419 | htb_qopt->qid = res; |
420 | return 0; |
421 | case TC_HTB_LEAF_TO_INNER: |
422 | return mlx5e_htb_leaf_to_inner(htb, classid: htb_qopt->parent_classid, child_classid: htb_qopt->classid, |
423 | rate: htb_qopt->rate, ceil: htb_qopt->ceil, extack: htb_qopt->extack); |
424 | case TC_HTB_LEAF_DEL: |
425 | return mlx5e_htb_leaf_del(htb, classid: &htb_qopt->classid, extack: htb_qopt->extack); |
426 | case TC_HTB_LEAF_DEL_LAST: |
427 | case TC_HTB_LEAF_DEL_LAST_FORCE: |
428 | return mlx5e_htb_leaf_del_last(htb, classid: htb_qopt->classid, |
429 | force: htb_qopt->command == TC_HTB_LEAF_DEL_LAST_FORCE, |
430 | extack: htb_qopt->extack); |
431 | case TC_HTB_NODE_MODIFY: |
432 | return mlx5e_htb_node_modify(htb, classid: htb_qopt->classid, rate: htb_qopt->rate, ceil: htb_qopt->ceil, |
433 | extack: htb_qopt->extack); |
434 | case TC_HTB_LEAF_QUERY_QUEUE: |
435 | res = mlx5e_htb_get_txq_by_classid(htb, classid: htb_qopt->classid); |
436 | if (res < 0) |
437 | return res; |
438 | htb_qopt->qid = res; |
439 | return 0; |
440 | default: |
441 | return -EOPNOTSUPP; |
442 | } |
443 | } |
444 | |
445 | struct mlx5e_mqprio_rl { |
446 | struct mlx5_core_dev *mdev; |
447 | u32 root_id; |
448 | u32 *leaves_id; |
449 | u8 num_tc; |
450 | }; |
451 | |
452 | struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void) |
453 | { |
454 | return kvzalloc(size: sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL); |
455 | } |
456 | |
457 | void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl) |
458 | { |
459 | kvfree(addr: rl); |
460 | } |
461 | |
462 | int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc, |
463 | u64 max_rate[]) |
464 | { |
465 | int err; |
466 | int tc; |
467 | |
468 | if (!mlx5_qos_is_supported(mdev)) { |
469 | qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device." ); |
470 | return -EOPNOTSUPP; |
471 | } |
472 | if (num_tc > mlx5e_qos_max_leaf_nodes(mdev)) |
473 | return -EINVAL; |
474 | |
475 | rl->mdev = mdev; |
476 | rl->num_tc = num_tc; |
477 | rl->leaves_id = kvcalloc(n: num_tc, size: sizeof(*rl->leaves_id), GFP_KERNEL); |
478 | if (!rl->leaves_id) |
479 | return -ENOMEM; |
480 | |
481 | err = mlx5_qos_create_root_node(mdev, id: &rl->root_id); |
482 | if (err) |
483 | goto err_free_leaves; |
484 | |
485 | qos_dbg(mdev, "Root created, id %#x\n" , rl->root_id); |
486 | |
487 | for (tc = 0; tc < num_tc; tc++) { |
488 | u32 max_average_bw; |
489 | |
490 | max_average_bw = mlx5e_qos_bytes2mbits(mdev, nbytes: max_rate[tc]); |
491 | err = mlx5_qos_create_leaf_node(mdev, parent_id: rl->root_id, bw_share: 0, max_avg_bw: max_average_bw, |
492 | id: &rl->leaves_id[tc]); |
493 | if (err) |
494 | goto err_destroy_leaves; |
495 | |
496 | qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n" , |
497 | tc, rl->leaves_id[tc], max_average_bw); |
498 | } |
499 | return 0; |
500 | |
501 | err_destroy_leaves: |
502 | while (--tc >= 0) |
503 | mlx5_qos_destroy_node(mdev, id: rl->leaves_id[tc]); |
504 | mlx5_qos_destroy_node(mdev, id: rl->root_id); |
505 | err_free_leaves: |
506 | kvfree(addr: rl->leaves_id); |
507 | return err; |
508 | } |
509 | |
510 | void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl) |
511 | { |
512 | int tc; |
513 | |
514 | for (tc = 0; tc < rl->num_tc; tc++) |
515 | mlx5_qos_destroy_node(mdev: rl->mdev, id: rl->leaves_id[tc]); |
516 | mlx5_qos_destroy_node(mdev: rl->mdev, id: rl->root_id); |
517 | kvfree(addr: rl->leaves_id); |
518 | } |
519 | |
520 | int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id) |
521 | { |
522 | if (tc >= rl->num_tc) |
523 | return -EINVAL; |
524 | |
525 | *hw_id = rl->leaves_id[tc]; |
526 | return 0; |
527 | } |
528 | |