1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/device.h>
33#include <linux/netdevice.h>
34#include "en.h"
35#include "en/port.h"
36#include "en/port_buffer.h"
37
38#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
39
40#define MLX5E_100MB (100000)
41#define MLX5E_1GB (1000000)
42
43#define MLX5E_CEE_STATE_UP 1
44#define MLX5E_CEE_STATE_DOWN 0
45
46/* Max supported cable length is 1000 meters */
47#define MLX5E_MAX_CABLE_LENGTH 1000
48
49enum {
50 MLX5E_VENDOR_TC_GROUP_NUM = 7,
51 MLX5E_LOWEST_PRIO_GROUP = 0,
52};
53
54enum {
55 MLX5_DCB_CHG_RESET,
56 MLX5_DCB_NO_CHG,
57 MLX5_DCB_CHG_NO_RESET,
58};
59
60#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
61 MLX5_CAP_QCAM_REG(mdev, qpts) && \
62 MLX5_CAP_QCAM_REG(mdev, qpdpm))
63
64static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
65static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
66
67/* If dcbx mode is non-host set the dcbx mode to host.
68 */
69static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
70 enum mlx5_dcbx_oper_mode mode)
71{
72 struct mlx5_core_dev *mdev = priv->mdev;
73 u32 param[MLX5_ST_SZ_DW(dcbx_param)];
74 int err;
75
76 err = mlx5_query_port_dcbx_param(mdev, out: param);
77 if (err)
78 return err;
79
80 MLX5_SET(dcbx_param, param, version_admin, mode);
81 if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
82 MLX5_SET(dcbx_param, param, willing_admin, 1);
83
84 return mlx5_set_port_dcbx_param(mdev, in: param);
85}
86
87static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
88{
89 struct mlx5e_dcbx *dcbx = &priv->dcbx;
90 int err;
91
92 if (!MLX5_CAP_GEN(priv->mdev, dcbx))
93 return 0;
94
95 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
96 return 0;
97
98 err = mlx5e_dcbnl_set_dcbx_mode(priv, mode: MLX5E_DCBX_PARAM_VER_OPER_HOST);
99 if (err)
100 return err;
101
102 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
103 return 0;
104}
105
106static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
107 struct ieee_ets *ets)
108{
109 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
110 struct mlx5_core_dev *mdev = priv->mdev;
111 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
112 bool is_tc_group_6_exist = false;
113 bool is_zero_bw_ets_tc = false;
114 int err = 0;
115 int i;
116
117 if (!MLX5_CAP_GEN(priv->mdev, ets))
118 return -EOPNOTSUPP;
119
120 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
121 err = mlx5_query_port_prio_tc(mdev, prio: i, tc: &ets->prio_tc[i]);
122 if (err)
123 return err;
124 }
125
126 ets->ets_cap = mlx5_max_tc(mdev: priv->mdev) + 1;
127 for (i = 0; i < ets->ets_cap; i++) {
128 err = mlx5_query_port_tc_group(mdev, tc: i, tc_group: &tc_group[i]);
129 if (err)
130 return err;
131
132 err = mlx5_query_port_tc_bw_alloc(mdev, tc: i, bw_pct: &ets->tc_tx_bw[i]);
133 if (err)
134 return err;
135
136 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
137 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
138 is_zero_bw_ets_tc = true;
139
140 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
141 is_tc_group_6_exist = true;
142 }
143
144 /* Report 0% ets tc if exits*/
145 if (is_zero_bw_ets_tc) {
146 for (i = 0; i < ets->ets_cap; i++)
147 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
148 ets->tc_tx_bw[i] = 0;
149 }
150
151 /* Update tc_tsa based on fw setting*/
152 for (i = 0; i < ets->ets_cap; i++) {
153 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
154 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
155 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
156 !is_tc_group_6_exist)
157 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
158 }
159 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
160
161 return err;
162}
163
164static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
165{
166 bool any_tc_mapped_to_ets = false;
167 bool ets_zero_bw = false;
168 int strict_group;
169 int i;
170
171 for (i = 0; i <= max_tc; i++) {
172 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
173 any_tc_mapped_to_ets = true;
174 if (!ets->tc_tx_bw[i])
175 ets_zero_bw = true;
176 }
177 }
178
179 /* strict group has higher priority than ets group */
180 strict_group = MLX5E_LOWEST_PRIO_GROUP;
181 if (any_tc_mapped_to_ets)
182 strict_group++;
183 if (ets_zero_bw)
184 strict_group++;
185
186 for (i = 0; i <= max_tc; i++) {
187 switch (ets->tc_tsa[i]) {
188 case IEEE_8021QAZ_TSA_VENDOR:
189 tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
190 break;
191 case IEEE_8021QAZ_TSA_STRICT:
192 tc_group[i] = strict_group++;
193 break;
194 case IEEE_8021QAZ_TSA_ETS:
195 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
196 if (ets->tc_tx_bw[i] && ets_zero_bw)
197 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
198 break;
199 }
200 }
201}
202
203static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
204 u8 *tc_group, int max_tc)
205{
206 int bw_for_ets_zero_bw_tc = 0;
207 int last_ets_zero_bw_tc = -1;
208 int num_ets_zero_bw = 0;
209 int i;
210
211 for (i = 0; i <= max_tc; i++) {
212 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
213 !ets->tc_tx_bw[i]) {
214 num_ets_zero_bw++;
215 last_ets_zero_bw_tc = i;
216 }
217 }
218
219 if (num_ets_zero_bw)
220 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
221
222 for (i = 0; i <= max_tc; i++) {
223 switch (ets->tc_tsa[i]) {
224 case IEEE_8021QAZ_TSA_VENDOR:
225 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
226 break;
227 case IEEE_8021QAZ_TSA_STRICT:
228 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
229 break;
230 case IEEE_8021QAZ_TSA_ETS:
231 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
232 ets->tc_tx_bw[i] :
233 bw_for_ets_zero_bw_tc;
234 break;
235 }
236 }
237
238 /* Make sure the total bw for ets zero bw group is 100% */
239 if (last_ets_zero_bw_tc != -1)
240 tc_tx_bw[last_ets_zero_bw_tc] +=
241 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
242}
243
244/* If there are ETS BW 0,
245 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
246 * Set group #0 to all the ETS BW 0 tcs and
247 * equally splits the 100% BW between them
248 * Report both group #0 and #1 as ETS type.
249 * All the tcs in group #0 will be reported with 0% BW.
250 */
251static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
252{
253 struct mlx5_core_dev *mdev = priv->mdev;
254 u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
255 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
256 int max_tc = mlx5_max_tc(mdev);
257 int err, i;
258
259 mlx5e_build_tc_group(ets, tc_group, max_tc);
260 mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
261
262 err = mlx5_set_port_prio_tc(mdev, prio_tc: ets->prio_tc);
263 if (err)
264 return err;
265
266 err = mlx5_set_port_tc_group(mdev, tc_group);
267 if (err)
268 return err;
269
270 err = mlx5_set_port_tc_bw_alloc(mdev, tc_bw: tc_tx_bw);
271
272 if (err)
273 return err;
274
275 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
276
277 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
278 netdev_dbg(priv->netdev, "%s: prio_%d <=> tc_%d\n",
279 __func__, i, ets->prio_tc[i]);
280 netdev_dbg(priv->netdev, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
281 __func__, i, tc_tx_bw[i], tc_group[i]);
282 }
283
284 return err;
285}
286
287static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
288 struct ieee_ets *ets,
289 bool zero_sum_allowed)
290{
291 bool have_ets_tc = false;
292 int bw_sum = 0;
293 int i;
294
295 /* Validate Priority */
296 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
297 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
298 netdev_err(dev: netdev,
299 format: "Failed to validate ETS: priority value greater than max(%d)\n",
300 MLX5E_MAX_PRIORITY);
301 return -EINVAL;
302 }
303 }
304
305 /* Validate Bandwidth Sum */
306 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
307 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
308 have_ets_tc = true;
309 bw_sum += ets->tc_tx_bw[i];
310 }
311 }
312
313 if (have_ets_tc && bw_sum != 100) {
314 if (bw_sum || (!bw_sum && !zero_sum_allowed))
315 netdev_err(dev: netdev,
316 format: "Failed to validate ETS: BW sum is illegal\n");
317 return -EINVAL;
318 }
319 return 0;
320}
321
322static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
323 struct ieee_ets *ets)
324{
325 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
326 int err;
327
328 if (!MLX5_CAP_GEN(priv->mdev, ets))
329 return -EOPNOTSUPP;
330
331 err = mlx5e_dbcnl_validate_ets(netdev, ets, zero_sum_allowed: false);
332 if (err)
333 return err;
334
335 err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
336 if (err)
337 return err;
338
339 return 0;
340}
341
342static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
343 struct ieee_pfc *pfc)
344{
345 struct mlx5e_priv *priv = netdev_priv(dev);
346 struct mlx5_core_dev *mdev = priv->mdev;
347 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
348 int i;
349
350 pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
351 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
352 pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
353 pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
354 }
355
356 if (MLX5_BUFFER_SUPPORTED(mdev))
357 pfc->delay = priv->dcbx.cable_len;
358
359 return mlx5_query_port_pfc(dev: mdev, pfc_en_tx: &pfc->pfc_en, NULL);
360}
361
362static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
363 struct ieee_pfc *pfc)
364{
365 struct mlx5e_priv *priv = netdev_priv(dev);
366 struct mlx5_core_dev *mdev = priv->mdev;
367 u32 old_cable_len = priv->dcbx.cable_len;
368 struct ieee_pfc pfc_new;
369 u32 changed = 0;
370 u8 curr_pfc_en;
371 int ret = 0;
372
373 /* pfc_en */
374 mlx5_query_port_pfc(dev: mdev, pfc_en_tx: &curr_pfc_en, NULL);
375 if (pfc->pfc_en != curr_pfc_en) {
376 ret = mlx5_set_port_pfc(dev: mdev, pfc_en_tx: pfc->pfc_en, pfc_en_rx: pfc->pfc_en);
377 if (ret)
378 return ret;
379 mlx5_toggle_port_link(dev: mdev);
380 changed |= MLX5E_PORT_BUFFER_PFC;
381 }
382
383 if (pfc->delay &&
384 pfc->delay < MLX5E_MAX_CABLE_LENGTH &&
385 pfc->delay != priv->dcbx.cable_len) {
386 priv->dcbx.cable_len = pfc->delay;
387 changed |= MLX5E_PORT_BUFFER_CABLE_LEN;
388 }
389
390 if (MLX5_BUFFER_SUPPORTED(mdev)) {
391 pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
392 if (priv->dcbx.manual_buffer)
393 ret = mlx5e_port_manual_buffer_config(priv, change: changed,
394 mtu: dev->mtu, pfc: &pfc_new,
395 NULL, NULL);
396
397 if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN))
398 priv->dcbx.cable_len = old_cable_len;
399 }
400
401 if (!ret) {
402 netdev_dbg(dev,
403 "%s: PFC per priority bit mask: 0x%x\n",
404 __func__, pfc->pfc_en);
405 }
406 return ret;
407}
408
409static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
410{
411 struct mlx5e_priv *priv = netdev_priv(dev);
412
413 return priv->dcbx.cap;
414}
415
416static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
417{
418 struct mlx5e_priv *priv = netdev_priv(dev);
419 struct mlx5e_dcbx *dcbx = &priv->dcbx;
420
421 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
422 return 1;
423
424 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
425 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
426 return 0;
427
428 /* set dcbx to fw controlled */
429 if (!mlx5e_dcbnl_set_dcbx_mode(priv, mode: MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
430 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
431 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
432 return 0;
433 }
434
435 return 1;
436 }
437
438 if (!(mode & DCB_CAP_DCBX_HOST))
439 return 1;
440
441 if (mlx5e_dcbnl_switch_to_host_mode(priv: netdev_priv(dev)))
442 return 1;
443
444 dcbx->cap = mode;
445
446 return 0;
447}
448
449static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
450{
451 struct mlx5e_priv *priv = netdev_priv(dev);
452 struct dcb_app temp;
453 bool is_new;
454 int err;
455
456 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
457 !MLX5_DSCP_SUPPORTED(priv->mdev))
458 return -EOPNOTSUPP;
459
460 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
461 (app->protocol >= MLX5E_MAX_DSCP))
462 return -EINVAL;
463
464 /* Save the old entry info */
465 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
466 temp.protocol = app->protocol;
467 temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
468
469 /* Check if need to switch to dscp trust state */
470 if (!priv->dcbx.dscp_app_cnt) {
471 err = mlx5e_set_trust_state(priv, trust_state: MLX5_QPTS_TRUST_DSCP);
472 if (err)
473 return err;
474 }
475
476 /* Skip the fw command if new and old mapping are the same */
477 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
478 err = mlx5e_set_dscp2prio(priv, dscp: app->protocol, prio: app->priority);
479 if (err)
480 goto fw_err;
481 }
482
483 /* Delete the old entry if exists */
484 is_new = false;
485 err = dcb_ieee_delapp(dev, &temp);
486 if (err)
487 is_new = true;
488
489 /* Add new entry and update counter */
490 err = dcb_ieee_setapp(dev, app);
491 if (err)
492 return err;
493
494 if (is_new)
495 priv->dcbx.dscp_app_cnt++;
496
497 return err;
498
499fw_err:
500 mlx5e_set_trust_state(priv, trust_state: MLX5_QPTS_TRUST_PCP);
501 return err;
502}
503
504static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
505{
506 struct mlx5e_priv *priv = netdev_priv(dev);
507 int err;
508
509 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
510 !MLX5_DSCP_SUPPORTED(priv->mdev))
511 return -EOPNOTSUPP;
512
513 if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
514 (app->protocol >= MLX5E_MAX_DSCP))
515 return -EINVAL;
516
517 /* Skip if no dscp app entry */
518 if (!priv->dcbx.dscp_app_cnt)
519 return -ENOENT;
520
521 /* Check if the entry matches fw setting */
522 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
523 return -ENOENT;
524
525 /* Delete the app entry */
526 err = dcb_ieee_delapp(dev, app);
527 if (err)
528 return err;
529
530 /* Reset the priority mapping back to zero */
531 err = mlx5e_set_dscp2prio(priv, dscp: app->protocol, prio: 0);
532 if (err)
533 goto fw_err;
534
535 priv->dcbx.dscp_app_cnt--;
536
537 /* Check if need to switch to pcp trust state */
538 if (!priv->dcbx.dscp_app_cnt)
539 err = mlx5e_set_trust_state(priv, trust_state: MLX5_QPTS_TRUST_PCP);
540
541 return err;
542
543fw_err:
544 mlx5e_set_trust_state(priv, trust_state: MLX5_QPTS_TRUST_PCP);
545 return err;
546}
547
548static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
549 struct ieee_maxrate *maxrate)
550{
551 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
552 struct mlx5_core_dev *mdev = priv->mdev;
553 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
554 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
555 int err;
556 int i;
557
558 err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
559 if (err)
560 return err;
561
562 memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
563
564 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
565 switch (max_bw_unit[i]) {
566 case MLX5_100_MBPS_UNIT:
567 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
568 break;
569 case MLX5_GBPS_UNIT:
570 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
571 break;
572 case MLX5_BW_NO_LIMIT:
573 break;
574 default:
575 WARN(true, "non-supported BW unit");
576 break;
577 }
578 }
579
580 return 0;
581}
582
583static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
584 struct ieee_maxrate *maxrate)
585{
586 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
587 struct mlx5_core_dev *mdev = priv->mdev;
588 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
589 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
590 __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
591 int i;
592
593 memset(max_bw_value, 0, sizeof(max_bw_value));
594 memset(max_bw_unit, 0, sizeof(max_bw_unit));
595
596 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
597 if (!maxrate->tc_maxrate[i]) {
598 max_bw_unit[i] = MLX5_BW_NO_LIMIT;
599 continue;
600 }
601 if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
602 max_bw_value[i] = div_u64(dividend: maxrate->tc_maxrate[i],
603 MLX5E_100MB);
604 max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
605 max_bw_unit[i] = MLX5_100_MBPS_UNIT;
606 } else {
607 max_bw_value[i] = div_u64(dividend: maxrate->tc_maxrate[i],
608 MLX5E_1GB);
609 max_bw_unit[i] = MLX5_GBPS_UNIT;
610 }
611 }
612
613 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
614 netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n",
615 __func__, i, max_bw_value[i]);
616 }
617
618 return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
619}
620
621static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
622{
623 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
624 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
625 struct mlx5_core_dev *mdev = priv->mdev;
626 struct ieee_ets ets;
627 struct ieee_pfc pfc;
628 int err = -EOPNOTSUPP;
629 int i;
630
631 if (!MLX5_CAP_GEN(mdev, ets))
632 goto out;
633
634 memset(&ets, 0, sizeof(ets));
635 memset(&pfc, 0, sizeof(pfc));
636
637 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
638 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
639 ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
640 ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
641 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
642 ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
643 netdev_dbg(netdev,
644 "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
645 __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
646 ets.prio_tc[i]);
647 }
648
649 err = mlx5e_dbcnl_validate_ets(netdev, ets: &ets, zero_sum_allowed: true);
650 if (err)
651 goto out;
652
653 err = mlx5e_dcbnl_ieee_setets_core(priv, ets: &ets);
654 if (err) {
655 netdev_err(dev: netdev,
656 format: "%s, Failed to set ETS: %d\n", __func__, err);
657 goto out;
658 }
659
660 /* Set PFC */
661 pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
662 if (!cee_cfg->pfc_enable)
663 pfc.pfc_en = 0;
664 else
665 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
666 pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
667
668 err = mlx5e_dcbnl_ieee_setpfc(dev: netdev, pfc: &pfc);
669 if (err) {
670 netdev_err(dev: netdev,
671 format: "%s, Failed to set PFC: %d\n", __func__, err);
672 goto out;
673 }
674out:
675 return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
676}
677
678static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
679{
680 return MLX5E_CEE_STATE_UP;
681}
682
683static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
684 u8 *perm_addr)
685{
686 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
687
688 if (!perm_addr)
689 return;
690
691 memset(perm_addr, 0xff, MAX_ADDR_LEN);
692
693 mlx5_query_mac_address(mdev: priv->mdev, addr: perm_addr);
694}
695
696static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
697 int priority, u8 prio_type,
698 u8 pgid, u8 bw_pct, u8 up_map)
699{
700 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
701 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
702
703 if (priority >= CEE_DCBX_MAX_PRIO) {
704 netdev_err(dev: netdev,
705 format: "%s, priority is out of range\n", __func__);
706 return;
707 }
708
709 if (pgid >= CEE_DCBX_MAX_PGS) {
710 netdev_err(dev: netdev,
711 format: "%s, priority group is out of range\n", __func__);
712 return;
713 }
714
715 cee_cfg->prio_to_pg_map[priority] = pgid;
716}
717
718static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
719 int pgid, u8 bw_pct)
720{
721 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
722 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
723
724 if (pgid >= CEE_DCBX_MAX_PGS) {
725 netdev_err(dev: netdev,
726 format: "%s, priority group is out of range\n", __func__);
727 return;
728 }
729
730 cee_cfg->pg_bw_pct[pgid] = bw_pct;
731}
732
733static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
734 int priority, u8 *prio_type,
735 u8 *pgid, u8 *bw_pct, u8 *up_map)
736{
737 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
738 struct mlx5_core_dev *mdev = priv->mdev;
739
740 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
741 netdev_err(dev: netdev, format: "%s, ets is not supported\n", __func__);
742 return;
743 }
744
745 if (priority >= CEE_DCBX_MAX_PRIO) {
746 netdev_err(dev: netdev,
747 format: "%s, priority is out of range\n", __func__);
748 return;
749 }
750
751 *prio_type = 0;
752 *bw_pct = 0;
753 *up_map = 0;
754
755 if (mlx5_query_port_prio_tc(mdev, prio: priority, tc: pgid))
756 *pgid = 0;
757}
758
759static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
760 int pgid, u8 *bw_pct)
761{
762 struct ieee_ets ets;
763
764 if (pgid >= CEE_DCBX_MAX_PGS) {
765 netdev_err(dev: netdev,
766 format: "%s, priority group is out of range\n", __func__);
767 return;
768 }
769
770 mlx5e_dcbnl_ieee_getets(netdev, ets: &ets);
771 *bw_pct = ets.tc_tx_bw[pgid];
772}
773
774static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
775 int priority, u8 setting)
776{
777 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
778 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
779
780 if (priority >= CEE_DCBX_MAX_PRIO) {
781 netdev_err(dev: netdev,
782 format: "%s, priority is out of range\n", __func__);
783 return;
784 }
785
786 if (setting > 1)
787 return;
788
789 cee_cfg->pfc_setting[priority] = setting;
790}
791
792static int
793mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
794 int priority, u8 *setting)
795{
796 struct ieee_pfc pfc;
797 int err;
798
799 err = mlx5e_dcbnl_ieee_getpfc(dev: netdev, pfc: &pfc);
800
801 if (err)
802 *setting = 0;
803 else
804 *setting = (pfc.pfc_en >> priority) & 0x01;
805
806 return err;
807}
808
809static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
810 int priority, u8 *setting)
811{
812 if (priority >= CEE_DCBX_MAX_PRIO) {
813 netdev_err(dev: netdev,
814 format: "%s, priority is out of range\n", __func__);
815 return;
816 }
817
818 if (!setting)
819 return;
820
821 mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
822}
823
824static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
825 int capid, u8 *cap)
826{
827 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
828 struct mlx5_core_dev *mdev = priv->mdev;
829 u8 rval = 0;
830
831 switch (capid) {
832 case DCB_CAP_ATTR_PG:
833 *cap = true;
834 break;
835 case DCB_CAP_ATTR_PFC:
836 *cap = true;
837 break;
838 case DCB_CAP_ATTR_UP2TC:
839 *cap = false;
840 break;
841 case DCB_CAP_ATTR_PG_TCS:
842 *cap = 1 << mlx5_max_tc(mdev);
843 break;
844 case DCB_CAP_ATTR_PFC_TCS:
845 *cap = 1 << mlx5_max_tc(mdev);
846 break;
847 case DCB_CAP_ATTR_GSP:
848 *cap = false;
849 break;
850 case DCB_CAP_ATTR_BCN:
851 *cap = false;
852 break;
853 case DCB_CAP_ATTR_DCBX:
854 *cap = priv->dcbx.cap |
855 DCB_CAP_DCBX_VER_CEE |
856 DCB_CAP_DCBX_VER_IEEE;
857 break;
858 default:
859 *cap = 0;
860 rval = 1;
861 break;
862 }
863
864 return rval;
865}
866
867static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
868 int tcs_id, u8 *num)
869{
870 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
871 struct mlx5_core_dev *mdev = priv->mdev;
872
873 switch (tcs_id) {
874 case DCB_NUMTCS_ATTR_PG:
875 case DCB_NUMTCS_ATTR_PFC:
876 *num = mlx5_max_tc(mdev) + 1;
877 break;
878 default:
879 return -EINVAL;
880 }
881
882 return 0;
883}
884
885static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
886{
887 struct ieee_pfc pfc;
888
889 if (mlx5e_dcbnl_ieee_getpfc(dev: netdev, pfc: &pfc))
890 return MLX5E_CEE_STATE_DOWN;
891
892 return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
893}
894
895static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
896{
897 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
898 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
899
900 if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
901 return;
902
903 cee_cfg->pfc_enable = state;
904}
905
906static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
907 struct dcbnl_buffer *dcb_buffer)
908{
909 struct mlx5e_priv *priv = netdev_priv(dev);
910 struct mlx5_core_dev *mdev = priv->mdev;
911 struct mlx5e_port_buffer port_buffer;
912 u8 buffer[MLX5E_MAX_PRIORITY];
913 int i, err;
914
915 if (!MLX5_BUFFER_SUPPORTED(mdev))
916 return -EOPNOTSUPP;
917
918 err = mlx5e_port_query_priority2buffer(mdev, buffer);
919 if (err)
920 return err;
921
922 for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
923 dcb_buffer->prio2buffer[i] = buffer[i];
924
925 err = mlx5e_port_query_buffer(priv, port_buffer: &port_buffer);
926 if (err)
927 return err;
928
929 for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
930 dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
931 dcb_buffer->total_size = port_buffer.port_buffer_size -
932 port_buffer.internal_buffers_size;
933
934 return 0;
935}
936
937static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
938 struct dcbnl_buffer *dcb_buffer)
939{
940 struct mlx5e_priv *priv = netdev_priv(dev);
941 struct mlx5_core_dev *mdev = priv->mdev;
942 struct mlx5e_port_buffer port_buffer;
943 u8 old_prio2buffer[MLX5E_MAX_PRIORITY];
944 u32 *buffer_size = NULL;
945 u8 *prio2buffer = NULL;
946 u32 changed = 0;
947 int i, err;
948
949 if (!MLX5_BUFFER_SUPPORTED(mdev))
950 return -EOPNOTSUPP;
951
952 for (i = 0; i < DCBX_MAX_BUFFERS; i++)
953 mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
954
955 for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
956 mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
957
958 err = mlx5e_port_query_priority2buffer(mdev, buffer: old_prio2buffer);
959 if (err)
960 return err;
961
962 for (i = 0; i < MLX5E_MAX_PRIORITY; i++) {
963 if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) {
964 changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER;
965 prio2buffer = dcb_buffer->prio2buffer;
966 break;
967 }
968 }
969
970 err = mlx5e_port_query_buffer(priv, port_buffer: &port_buffer);
971 if (err)
972 return err;
973
974 for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
975 if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
976 changed |= MLX5E_PORT_BUFFER_SIZE;
977 buffer_size = dcb_buffer->buffer_size;
978 break;
979 }
980 }
981
982 if (!changed)
983 return 0;
984
985 priv->dcbx.manual_buffer = true;
986 err = mlx5e_port_manual_buffer_config(priv, change: changed, mtu: dev->mtu, NULL,
987 buffer_size, prio2buffer);
988 return err;
989}
990
991static const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
992 .ieee_getets = mlx5e_dcbnl_ieee_getets,
993 .ieee_setets = mlx5e_dcbnl_ieee_setets,
994 .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
995 .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
996 .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
997 .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
998 .ieee_setapp = mlx5e_dcbnl_ieee_setapp,
999 .ieee_delapp = mlx5e_dcbnl_ieee_delapp,
1000 .getdcbx = mlx5e_dcbnl_getdcbx,
1001 .setdcbx = mlx5e_dcbnl_setdcbx,
1002 .dcbnl_getbuffer = mlx5e_dcbnl_getbuffer,
1003 .dcbnl_setbuffer = mlx5e_dcbnl_setbuffer,
1004
1005/* CEE interfaces */
1006 .setall = mlx5e_dcbnl_setall,
1007 .getstate = mlx5e_dcbnl_getstate,
1008 .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
1009
1010 .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
1011 .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
1012 .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
1013 .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
1014
1015 .setpfccfg = mlx5e_dcbnl_setpfccfg,
1016 .getpfccfg = mlx5e_dcbnl_getpfccfg,
1017 .getcap = mlx5e_dcbnl_getcap,
1018 .getnumtcs = mlx5e_dcbnl_getnumtcs,
1019 .getpfcstate = mlx5e_dcbnl_getpfcstate,
1020 .setpfcstate = mlx5e_dcbnl_setpfcstate,
1021};
1022
1023void mlx5e_dcbnl_build_netdev(struct net_device *netdev)
1024{
1025 struct mlx5e_priv *priv = netdev_priv(dev: netdev);
1026 struct mlx5_core_dev *mdev = priv->mdev;
1027
1028 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
1029 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1030}
1031
1032static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
1033 enum mlx5_dcbx_oper_mode *mode)
1034{
1035 u32 out[MLX5_ST_SZ_DW(dcbx_param)];
1036
1037 *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
1038
1039 if (!mlx5_query_port_dcbx_param(mdev: priv->mdev, out))
1040 *mode = MLX5_GET(dcbx_param, out, version_oper);
1041
1042 /* From driver's point of view, we only care if the mode
1043 * is host (HOST) or non-host (AUTO)
1044 */
1045 if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
1046 *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
1047}
1048
1049static void mlx5e_ets_init(struct mlx5e_priv *priv)
1050{
1051 struct ieee_ets ets;
1052 int err;
1053 int i;
1054
1055 if (!MLX5_CAP_GEN(priv->mdev, ets))
1056 return;
1057
1058 memset(&ets, 0, sizeof(ets));
1059 ets.ets_cap = mlx5_max_tc(mdev: priv->mdev) + 1;
1060 for (i = 0; i < ets.ets_cap; i++) {
1061 ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
1062 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
1063 ets.prio_tc[i] = i;
1064 }
1065
1066 if (ets.ets_cap > 1) {
1067 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
1068 ets.prio_tc[0] = 1;
1069 ets.prio_tc[1] = 0;
1070 }
1071
1072 err = mlx5e_dcbnl_ieee_setets_core(priv, ets: &ets);
1073 if (err)
1074 netdev_err(dev: priv->netdev,
1075 format: "%s, Failed to init ETS: %d\n", __func__, err);
1076}
1077
1078enum {
1079 INIT,
1080 DELETE,
1081};
1082
1083static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
1084{
1085 struct dcb_app temp;
1086 int i;
1087
1088 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
1089 return;
1090
1091 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
1092 return;
1093
1094 /* No SEL_DSCP entry in non DSCP state */
1095 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
1096 return;
1097
1098 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
1099 for (i = 0; i < MLX5E_MAX_DSCP; i++) {
1100 temp.protocol = i;
1101 temp.priority = priv->dcbx_dp.dscp2prio[i];
1102 if (action == INIT)
1103 dcb_ieee_setapp(priv->netdev, &temp);
1104 else
1105 dcb_ieee_delapp(priv->netdev, &temp);
1106 }
1107
1108 priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
1109}
1110
1111void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
1112{
1113 mlx5e_dcbnl_dscp_app(priv, action: INIT);
1114}
1115
1116void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
1117{
1118 mlx5e_dcbnl_dscp_app(priv, action: DELETE);
1119}
1120
1121static void mlx5e_params_calc_trust_tx_min_inline_mode(struct mlx5_core_dev *mdev,
1122 struct mlx5e_params *params,
1123 u8 trust_state)
1124{
1125 mlx5_query_min_inline(mdev, min_inline: &params->tx_min_inline_mode);
1126 if (trust_state == MLX5_QPTS_TRUST_DSCP &&
1127 params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
1128 params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
1129}
1130
1131static int mlx5e_update_trust_state_hw(struct mlx5e_priv *priv, void *context)
1132{
1133 u8 *trust_state = context;
1134 int err;
1135
1136 err = mlx5_set_trust_state(mdev: priv->mdev, trust_state: *trust_state);
1137 if (err)
1138 return err;
1139 WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state);
1140
1141 return 0;
1142}
1143
1144static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1145{
1146 struct mlx5e_params new_params;
1147 bool reset = true;
1148 int err;
1149
1150 mutex_lock(&priv->state_lock);
1151
1152 new_params = priv->channels.params;
1153 mlx5e_params_calc_trust_tx_min_inline_mode(mdev: priv->mdev, params: &new_params,
1154 trust_state);
1155
1156 /* Skip if tx_min_inline is the same */
1157 if (new_params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode)
1158 reset = false;
1159
1160 err = mlx5e_safe_switch_params(priv, new_params: &new_params,
1161 preactivate: mlx5e_update_trust_state_hw,
1162 context: &trust_state, reset);
1163
1164 mutex_unlock(lock: &priv->state_lock);
1165
1166 return err;
1167}
1168
1169static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
1170{
1171 int err;
1172
1173 err = mlx5_set_dscp2prio(mdev: priv->mdev, dscp, prio);
1174 if (err)
1175 return err;
1176
1177 priv->dcbx_dp.dscp2prio[dscp] = prio;
1178 return err;
1179}
1180
1181static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1182{
1183 struct mlx5_core_dev *mdev = priv->mdev;
1184 u8 trust_state;
1185 int err;
1186
1187 if (!MLX5_DSCP_SUPPORTED(mdev)) {
1188 WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP);
1189 return 0;
1190 }
1191
1192 err = mlx5_query_trust_state(mdev: priv->mdev, trust_state: &trust_state);
1193 if (err)
1194 return err;
1195 WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
1196
1197 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) {
1198 /*
1199 * Align the driver state with the register state.
1200 * Temporary state change is required to enable the app list reset.
1201 */
1202 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP;
1203 mlx5e_dcbnl_delete_app(priv);
1204 priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
1205 }
1206
1207 mlx5e_params_calc_trust_tx_min_inline_mode(mdev: priv->mdev, params: &priv->channels.params,
1208 trust_state: priv->dcbx_dp.trust_state);
1209
1210 err = mlx5_query_dscp2prio(mdev: priv->mdev, dscp2prio: priv->dcbx_dp.dscp2prio);
1211 if (err)
1212 return err;
1213
1214 return 0;
1215}
1216
1217#define MLX5E_BUFFER_CELL_SHIFT 7
1218
1219static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
1220{
1221 struct mlx5_core_dev *mdev = priv->mdev;
1222 u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1223 u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
1224
1225 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1226 return (1 << MLX5E_BUFFER_CELL_SHIFT);
1227
1228 if (mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sizeof(in), data_out: out, size_out: sizeof(out),
1229 reg_num: MLX5_REG_SBCAM, arg: 0, write: 0))
1230 return (1 << MLX5E_BUFFER_CELL_SHIFT);
1231
1232 return MLX5_GET(sbcam_reg, out, cap_cell_size);
1233}
1234
1235void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
1236{
1237 struct mlx5e_dcbx *dcbx = &priv->dcbx;
1238
1239 mlx5e_trust_initialize(priv);
1240
1241 if (!MLX5_CAP_GEN(priv->mdev, qos))
1242 return;
1243
1244 if (MLX5_CAP_GEN(priv->mdev, dcbx))
1245 mlx5e_dcbnl_query_dcbx_mode(priv, mode: &dcbx->mode);
1246
1247 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
1248 DCB_CAP_DCBX_VER_IEEE;
1249 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
1250 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
1251
1252 priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
1253 priv->dcbx.manual_buffer = false;
1254 priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
1255
1256 mlx5e_ets_init(priv);
1257}
1258

source code of linux/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c