1/*
2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "lib/events.h"
34#include "en.h"
35#include "en_accel/ktls.h"
36#include "en_accel/en_accel.h"
37#include "en/ptp.h"
38#include "en/port.h"
39
40#ifdef CONFIG_PAGE_POOL_STATS
41#include <net/page_pool/helpers.h>
42#endif
43
44static unsigned int stats_grps_num(struct mlx5e_priv *priv)
45{
46 return !priv->profile->stats_grps_num ? 0 :
47 priv->profile->stats_grps_num(priv);
48}
49
50unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
51{
52 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
53 const unsigned int num_stats_grps = stats_grps_num(priv);
54 unsigned int total = 0;
55 int i;
56
57 for (i = 0; i < num_stats_grps; i++)
58 total += stats_grps[i]->get_num_stats(priv);
59
60 return total;
61}
62
63void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
64{
65 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
66 const unsigned int num_stats_grps = stats_grps_num(priv);
67 int i;
68
69 for (i = num_stats_grps - 1; i >= 0; i--)
70 if (stats_grps[i]->update_stats &&
71 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
72 stats_grps[i]->update_stats(priv);
73}
74
75void mlx5e_stats_update(struct mlx5e_priv *priv)
76{
77 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
78 const unsigned int num_stats_grps = stats_grps_num(priv);
79 int i;
80
81 for (i = num_stats_grps - 1; i >= 0; i--)
82 if (stats_grps[i]->update_stats)
83 stats_grps[i]->update_stats(priv);
84}
85
86void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
87{
88 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
89 const unsigned int num_stats_grps = stats_grps_num(priv);
90 int i;
91
92 for (i = 0; i < num_stats_grps; i++)
93 idx = stats_grps[i]->fill_stats(priv, data, idx);
94}
95
96void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
97{
98 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
99 const unsigned int num_stats_grps = stats_grps_num(priv);
100 int i, idx = 0;
101
102 for (i = 0; i < num_stats_grps; i++)
103 idx = stats_grps[i]->fill_strings(priv, data, idx);
104}
105
106/* Concrete NIC Stats */
107
108static const struct counter_desc sw_stats_desc[] = {
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
121
122#ifdef CONFIG_MLX5_EN_TLS
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
132#endif
133
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
183#ifdef CONFIG_MLX5_EN_ARFS
184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_add) },
185 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_in) },
186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_request_out) },
187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_expired) },
188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
189#endif
190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
191#ifdef CONFIG_PAGE_POOL_STATS
192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
197 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
198 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
202 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
203#endif
204#ifdef CONFIG_MLX5_EN_TLS
205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
209 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
214 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
215#endif
216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
224 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
225 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
226 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
227 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
228 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
229 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
230 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
231 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
232 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
233 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
234 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
235 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
236 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
237 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
238 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
239 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
240 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
241 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
242 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
243 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
244 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
245 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
246};
247
248#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
249
250static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
251{
252 return NUM_SW_COUNTERS;
253}
254
255static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
256{
257 int i;
258
259 for (i = 0; i < NUM_SW_COUNTERS; i++)
260 strcpy(p: data + (idx++) * ETH_GSTRING_LEN, q: sw_stats_desc[i].format);
261 return idx;
262}
263
264static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
265{
266 int i;
267
268 for (i = 0; i < NUM_SW_COUNTERS; i++)
269 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
270 return idx;
271}
272
273static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
274 struct mlx5e_xdpsq_stats *xdpsq_red_stats)
275{
276 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
277 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
278 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
279 s->tx_xdp_nops += xdpsq_red_stats->nops;
280 s->tx_xdp_full += xdpsq_red_stats->full;
281 s->tx_xdp_err += xdpsq_red_stats->err;
282 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
283}
284
285static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
286 struct mlx5e_xdpsq_stats *xdpsq_stats)
287{
288 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
289 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
290 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
291 s->rx_xdp_tx_nops += xdpsq_stats->nops;
292 s->rx_xdp_tx_full += xdpsq_stats->full;
293 s->rx_xdp_tx_err += xdpsq_stats->err;
294 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
295}
296
297static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
298 struct mlx5e_xdpsq_stats *xsksq_stats)
299{
300 s->tx_xsk_xmit += xsksq_stats->xmit;
301 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
302 s->tx_xsk_inlnw += xsksq_stats->inlnw;
303 s->tx_xsk_full += xsksq_stats->full;
304 s->tx_xsk_err += xsksq_stats->err;
305 s->tx_xsk_cqes += xsksq_stats->cqes;
306}
307
308static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
309 struct mlx5e_rq_stats *xskrq_stats)
310{
311 s->rx_xsk_packets += xskrq_stats->packets;
312 s->rx_xsk_bytes += xskrq_stats->bytes;
313 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
314 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
315 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
316 s->rx_xsk_csum_none += xskrq_stats->csum_none;
317 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
318 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
319 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
320 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
321 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
322 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
323 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
324 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
325 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
326 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
327 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
328 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
329}
330
331static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
332 struct mlx5e_rq_stats *rq_stats)
333{
334 s->rx_packets += rq_stats->packets;
335 s->rx_bytes += rq_stats->bytes;
336 s->rx_lro_packets += rq_stats->lro_packets;
337 s->rx_lro_bytes += rq_stats->lro_bytes;
338 s->rx_gro_packets += rq_stats->gro_packets;
339 s->rx_gro_bytes += rq_stats->gro_bytes;
340 s->rx_gro_skbs += rq_stats->gro_skbs;
341 s->rx_gro_match_packets += rq_stats->gro_match_packets;
342 s->rx_gro_large_hds += rq_stats->gro_large_hds;
343 s->rx_ecn_mark += rq_stats->ecn_mark;
344 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
345 s->rx_csum_none += rq_stats->csum_none;
346 s->rx_csum_complete += rq_stats->csum_complete;
347 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
348 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
349 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
350 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
351 s->rx_xdp_drop += rq_stats->xdp_drop;
352 s->rx_xdp_redirect += rq_stats->xdp_redirect;
353 s->rx_wqe_err += rq_stats->wqe_err;
354 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
355 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
356 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
357 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
358 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
359 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
360 s->rx_congst_umr += rq_stats->congst_umr;
361#ifdef CONFIG_MLX5_EN_ARFS
362 s->rx_arfs_add += rq_stats->arfs_add;
363 s->rx_arfs_request_in += rq_stats->arfs_request_in;
364 s->rx_arfs_request_out += rq_stats->arfs_request_out;
365 s->rx_arfs_expired += rq_stats->arfs_expired;
366 s->rx_arfs_err += rq_stats->arfs_err;
367#endif
368 s->rx_recover += rq_stats->recover;
369#ifdef CONFIG_PAGE_POOL_STATS
370 s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
371 s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
372 s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
373 s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill;
374 s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive;
375 s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order;
376 s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached;
377 s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full;
378 s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
379 s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
380 s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
381#endif
382#ifdef CONFIG_MLX5_EN_TLS
383 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
384 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
385 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
386 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
387 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
388 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
389 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
390 s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry;
391 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
392 s->rx_tls_err += rq_stats->tls_err;
393#endif
394}
395
396static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
397 struct mlx5e_ch_stats *ch_stats)
398{
399 s->ch_events += ch_stats->events;
400 s->ch_poll += ch_stats->poll;
401 s->ch_arm += ch_stats->arm;
402 s->ch_aff_change += ch_stats->aff_change;
403 s->ch_force_irq += ch_stats->force_irq;
404 s->ch_eq_rearm += ch_stats->eq_rearm;
405}
406
407static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
408 struct mlx5e_sq_stats *sq_stats)
409{
410 s->tx_packets += sq_stats->packets;
411 s->tx_bytes += sq_stats->bytes;
412 s->tx_tso_packets += sq_stats->tso_packets;
413 s->tx_tso_bytes += sq_stats->tso_bytes;
414 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
415 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
416 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
417 s->tx_nop += sq_stats->nop;
418 s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
419 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
420 s->tx_queue_stopped += sq_stats->stopped;
421 s->tx_queue_wake += sq_stats->wake;
422 s->tx_queue_dropped += sq_stats->dropped;
423 s->tx_cqe_err += sq_stats->cqe_err;
424 s->tx_recover += sq_stats->recover;
425 s->tx_xmit_more += sq_stats->xmit_more;
426 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
427 s->tx_csum_none += sq_stats->csum_none;
428 s->tx_csum_partial += sq_stats->csum_partial;
429#ifdef CONFIG_MLX5_EN_TLS
430 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
431 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
432 s->tx_tls_ooo += sq_stats->tls_ooo;
433 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
434 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
435 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
436 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
437 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
438 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
439#endif
440 s->tx_cqes += sq_stats->cqes;
441}
442
443static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
444 struct mlx5e_sw_stats *s)
445{
446 int i;
447
448 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
449 return;
450
451 mlx5e_stats_grp_sw_update_stats_ch_stats(s, ch_stats: &priv->ptp_stats.ch);
452
453 if (priv->tx_ptp_opened) {
454 for (i = 0; i < priv->max_opened_tc; i++) {
455 mlx5e_stats_grp_sw_update_stats_sq(s, sq_stats: &priv->ptp_stats.sq[i]);
456
457 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
458 barrier();
459 }
460 }
461 if (priv->rx_ptp_opened) {
462 mlx5e_stats_grp_sw_update_stats_rq_stats(s, rq_stats: &priv->ptp_stats.rq);
463
464 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
465 barrier();
466 }
467}
468
469static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
470 struct mlx5e_sw_stats *s)
471{
472 struct mlx5e_sq_stats **stats;
473 u16 max_qos_sqs;
474 int i;
475
476 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
477 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
478 stats = READ_ONCE(priv->htb_qos_sq_stats);
479
480 for (i = 0; i < max_qos_sqs; i++) {
481 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
482
483 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
484 barrier();
485 }
486}
487
488#ifdef CONFIG_PAGE_POOL_STATS
489static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
490{
491 struct mlx5e_rq_stats *rq_stats = c->rq.stats;
492 struct page_pool *pool = c->rq.page_pool;
493 struct page_pool_stats stats = { 0 };
494
495 if (!page_pool_get_stats(pool, stats: &stats))
496 return;
497
498 rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
499 rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
500 rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
501 rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
502 rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
503 rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
504
505 rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
506 rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
507 rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
508 rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
509 rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
510}
511#else
512static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
513{
514}
515#endif
516
517static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
518{
519 struct mlx5e_sw_stats *s = &priv->stats.sw;
520 int i;
521
522 memset(s, 0, sizeof(*s));
523
524 for (i = 0; i < priv->channels.num; i++) /* for active channels only */
525 mlx5e_stats_update_stats_rq_page_pool(c: priv->channels.c[i]);
526
527 for (i = 0; i < priv->stats_nch; i++) {
528 struct mlx5e_channel_stats *channel_stats =
529 priv->channel_stats[i];
530
531 int j;
532
533 mlx5e_stats_grp_sw_update_stats_rq_stats(s, rq_stats: &channel_stats->rq);
534 mlx5e_stats_grp_sw_update_stats_xdpsq(s, xdpsq_stats: &channel_stats->rq_xdpsq);
535 mlx5e_stats_grp_sw_update_stats_ch_stats(s, ch_stats: &channel_stats->ch);
536 /* xdp redirect */
537 mlx5e_stats_grp_sw_update_stats_xdp_red(s, xdpsq_red_stats: &channel_stats->xdpsq);
538 /* AF_XDP zero-copy */
539 mlx5e_stats_grp_sw_update_stats_xskrq(s, xskrq_stats: &channel_stats->xskrq);
540 mlx5e_stats_grp_sw_update_stats_xsksq(s, xsksq_stats: &channel_stats->xsksq);
541
542 for (j = 0; j < priv->max_opened_tc; j++) {
543 mlx5e_stats_grp_sw_update_stats_sq(s, sq_stats: &channel_stats->sq[j]);
544
545 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
546 barrier();
547 }
548 }
549 mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
550 mlx5e_stats_grp_sw_update_stats_qos(priv, s);
551}
552
553static const struct counter_desc q_stats_desc[] = {
554 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
555};
556
557static const struct counter_desc drop_rq_stats_desc[] = {
558 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
559};
560
561#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
562#define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
563
564static bool q_counter_any(struct mlx5e_priv *priv)
565{
566 struct mlx5_core_dev *pos;
567 int i;
568
569 mlx5_sd_for_each_dev(i, priv->mdev, pos)
570 if (priv->q_counter[i++])
571 return true;
572
573 return false;
574}
575
576static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
577{
578 int num_stats = 0;
579
580 if (q_counter_any(priv))
581 num_stats += NUM_Q_COUNTERS;
582
583 if (priv->drop_rq_q_counter)
584 num_stats += NUM_DROP_RQ_COUNTERS;
585
586 return num_stats;
587}
588
589static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
590{
591 int i;
592
593 for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
594 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
595 q: q_stats_desc[i].format);
596
597 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
598 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
599 q: drop_rq_stats_desc[i].format);
600
601 return idx;
602}
603
604static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
605{
606 int i;
607
608 for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
609 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
610 q_stats_desc, i);
611 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
612 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
613 drop_rq_stats_desc, i);
614 return idx;
615}
616
617static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
618{
619 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
620 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
621 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
622 struct mlx5_core_dev *pos;
623 u32 rx_out_of_buffer = 0;
624 int ret, i;
625
626 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
627
628 mlx5_sd_for_each_dev(i, priv->mdev, pos) {
629 if (priv->q_counter[i]) {
630 MLX5_SET(query_q_counter_in, in, counter_set_id,
631 priv->q_counter[i]);
632 ret = mlx5_cmd_exec_inout(pos, query_q_counter, in, out);
633 if (!ret)
634 rx_out_of_buffer += MLX5_GET(query_q_counter_out,
635 out, out_of_buffer);
636 }
637 }
638 qcnt->rx_out_of_buffer = rx_out_of_buffer;
639
640 if (priv->drop_rq_q_counter) {
641 MLX5_SET(query_q_counter_in, in, counter_set_id,
642 priv->drop_rq_q_counter);
643 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
644 if (!ret)
645 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
646 out, out_of_buffer);
647 }
648}
649
650#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
651static const struct counter_desc vnic_env_stats_steer_desc[] = {
652 { "rx_steer_missed_packets",
653 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
654};
655
656static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
657 { "dev_internal_queue_oob",
658 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
659};
660
661static const struct counter_desc vnic_env_stats_drop_desc[] = {
662 { "rx_oversize_pkts_buffer",
663 VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
664};
665
666#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
667 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
668 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
669#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
670 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
671 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
672#define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
673 (MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
674 ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
675
676static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
677{
678 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
679 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
680 NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
681}
682
683static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
684{
685 int i;
686
687 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
688 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
689 q: vnic_env_stats_steer_desc[i].format);
690
691 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
692 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
693 q: vnic_env_stats_dev_oob_desc[i].format);
694
695 for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
696 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
697 q: vnic_env_stats_drop_desc[i].format);
698
699 return idx;
700}
701
702static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
703{
704 int i;
705
706 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
707 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
708 vnic_env_stats_steer_desc, i);
709
710 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
711 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
712 vnic_env_stats_dev_oob_desc, i);
713
714 for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
715 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
716 vnic_env_stats_drop_desc, i);
717
718 return idx;
719}
720
721static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
722{
723 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
724 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
725 struct mlx5_core_dev *mdev = priv->mdev;
726
727 if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
728 return;
729
730 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
731 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
732}
733
734#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
735static const struct counter_desc vport_stats_desc[] = {
736 { "rx_vport_unicast_packets",
737 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
738 { "rx_vport_unicast_bytes",
739 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
740 { "tx_vport_unicast_packets",
741 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
742 { "tx_vport_unicast_bytes",
743 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
744 { "rx_vport_multicast_packets",
745 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
746 { "rx_vport_multicast_bytes",
747 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
748 { "tx_vport_multicast_packets",
749 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
750 { "tx_vport_multicast_bytes",
751 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
752 { "rx_vport_broadcast_packets",
753 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
754 { "rx_vport_broadcast_bytes",
755 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
756 { "tx_vport_broadcast_packets",
757 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
758 { "tx_vport_broadcast_bytes",
759 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
760 { "rx_vport_rdma_unicast_packets",
761 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
762 { "rx_vport_rdma_unicast_bytes",
763 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
764 { "tx_vport_rdma_unicast_packets",
765 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
766 { "tx_vport_rdma_unicast_bytes",
767 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
768 { "rx_vport_rdma_multicast_packets",
769 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
770 { "rx_vport_rdma_multicast_bytes",
771 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
772 { "tx_vport_rdma_multicast_packets",
773 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
774 { "tx_vport_rdma_multicast_bytes",
775 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
776};
777
778static const struct counter_desc vport_loopback_stats_desc[] = {
779 { "vport_loopback_packets",
780 VPORT_COUNTER_OFF(local_loopback.packets) },
781 { "vport_loopback_bytes",
782 VPORT_COUNTER_OFF(local_loopback.octets) },
783};
784
785#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
786#define NUM_VPORT_LOOPBACK_COUNTERS(dev) \
787 (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
788 ARRAY_SIZE(vport_loopback_stats_desc) : 0)
789
790static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
791{
792 return NUM_VPORT_COUNTERS +
793 NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev);
794}
795
796static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
797{
798 int i;
799
800 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
801 strcpy(p: data + (idx++) * ETH_GSTRING_LEN, q: vport_stats_desc[i].format);
802
803 for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
804 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
805 q: vport_loopback_stats_desc[i].format);
806
807 return idx;
808}
809
810static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
811{
812 int i;
813
814 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
815 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
816 vport_stats_desc, i);
817
818 for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
819 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
820 vport_loopback_stats_desc, i);
821
822 return idx;
823}
824
825static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
826{
827 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
828 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
829 struct mlx5_core_dev *mdev = priv->mdev;
830
831 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
832 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
833}
834
835#define PPORT_802_3_OFF(c) \
836 MLX5_BYTE_OFF(ppcnt_reg, \
837 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
838static const struct counter_desc pport_802_3_stats_desc[] = {
839 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
840 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
841 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
842 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
843 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
844 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
845 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
846 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
847 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
848 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
849 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
850 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
851 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
852 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
853 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
854 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
855 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
856 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
857};
858
859#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
860
861static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
862{
863 return NUM_PPORT_802_3_COUNTERS;
864}
865
866static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
867{
868 int i;
869
870 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
871 strcpy(p: data + (idx++) * ETH_GSTRING_LEN, q: pport_802_3_stats_desc[i].format);
872 return idx;
873}
874
875static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
876{
877 int i;
878
879 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
880 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
881 pport_802_3_stats_desc, i);
882 return idx;
883}
884
885#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
886 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
887
888static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
889{
890 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
891 struct mlx5_core_dev *mdev = priv->mdev;
892 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
893 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
894 void *out;
895
896 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
897 return;
898
899 MLX5_SET(ppcnt_reg, in, local_port, 1);
900 out = pstats->IEEE_802_3_counters;
901 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
902 mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
903}
904
905#define MLX5E_READ_CTR64_BE_F(ptr, set, c) \
906 be64_to_cpu(*(__be64 *)((char *)ptr + \
907 MLX5_BYTE_OFF(ppcnt_reg, \
908 counter_set.set.c##_high)))
909
910static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
911 u32 *ppcnt_ieee_802_3)
912{
913 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
914 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
915
916 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
917 return -EOPNOTSUPP;
918
919 MLX5_SET(ppcnt_reg, in, local_port, 1);
920 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
921 return mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: ppcnt_ieee_802_3,
922 size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
923}
924
925void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
926 struct ethtool_pause_stats *pause_stats)
927{
928 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
929 struct mlx5_core_dev *mdev = priv->mdev;
930
931 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
932 return;
933
934 pause_stats->tx_pause_frames =
935 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
936 eth_802_3_cntrs_grp_data_layout,
937 a_pause_mac_ctrl_frames_transmitted);
938 pause_stats->rx_pause_frames =
939 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
940 eth_802_3_cntrs_grp_data_layout,
941 a_pause_mac_ctrl_frames_received);
942}
943
944void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
945 struct ethtool_eth_phy_stats *phy_stats)
946{
947 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
948 struct mlx5_core_dev *mdev = priv->mdev;
949
950 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
951 return;
952
953 phy_stats->SymbolErrorDuringCarrier =
954 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
955 eth_802_3_cntrs_grp_data_layout,
956 a_symbol_error_during_carrier);
957}
958
959void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
960 struct ethtool_eth_mac_stats *mac_stats)
961{
962 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
963 struct mlx5_core_dev *mdev = priv->mdev;
964
965 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
966 return;
967
968#define RD(name) \
969 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \
970 eth_802_3_cntrs_grp_data_layout, \
971 name)
972
973 mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok);
974 mac_stats->FramesReceivedOK = RD(a_frames_received_ok);
975 mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
976 mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok);
977 mac_stats->OctetsReceivedOK = RD(a_octets_received_ok);
978 mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
979 mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
980 mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
981 mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
982 mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors);
983 mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
984 mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors);
985#undef RD
986}
987
988void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
989 struct ethtool_eth_ctrl_stats *ctrl_stats)
990{
991 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
992 struct mlx5_core_dev *mdev = priv->mdev;
993
994 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
995 return;
996
997 ctrl_stats->MACControlFramesTransmitted =
998 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
999 eth_802_3_cntrs_grp_data_layout,
1000 a_mac_control_frames_transmitted);
1001 ctrl_stats->MACControlFramesReceived =
1002 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
1003 eth_802_3_cntrs_grp_data_layout,
1004 a_mac_control_frames_received);
1005 ctrl_stats->UnsupportedOpcodesReceived =
1006 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
1007 eth_802_3_cntrs_grp_data_layout,
1008 a_unsupported_opcodes_received);
1009}
1010
1011#define PPORT_2863_OFF(c) \
1012 MLX5_BYTE_OFF(ppcnt_reg, \
1013 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
1014static const struct counter_desc pport_2863_stats_desc[] = {
1015 { .format: "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
1016 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
1017 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
1018};
1019
1020#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
1021
1022static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
1023{
1024 return NUM_PPORT_2863_COUNTERS;
1025}
1026
1027static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
1028{
1029 int i;
1030
1031 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1032 strcpy(p: data + (idx++) * ETH_GSTRING_LEN, q: pport_2863_stats_desc[i].format);
1033 return idx;
1034}
1035
1036static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
1037{
1038 int i;
1039
1040 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1041 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
1042 pport_2863_stats_desc, i);
1043 return idx;
1044}
1045
1046static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
1047{
1048 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1049 struct mlx5_core_dev *mdev = priv->mdev;
1050 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1051 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1052 void *out;
1053
1054 MLX5_SET(ppcnt_reg, in, local_port, 1);
1055 out = pstats->RFC_2863_counters;
1056 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
1057 mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
1058}
1059
1060#define PPORT_2819_OFF(c) \
1061 MLX5_BYTE_OFF(ppcnt_reg, \
1062 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
1063static const struct counter_desc pport_2819_stats_desc[] = {
1064 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
1065 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
1066 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
1067 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
1068 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
1069 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
1070 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
1071 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
1072 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
1073 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
1074 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
1075 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
1076 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
1077};
1078
1079#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
1080
1081static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
1082{
1083 return NUM_PPORT_2819_COUNTERS;
1084}
1085
1086static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
1087{
1088 int i;
1089
1090 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1091 strcpy(p: data + (idx++) * ETH_GSTRING_LEN, q: pport_2819_stats_desc[i].format);
1092 return idx;
1093}
1094
1095static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
1096{
1097 int i;
1098
1099 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1100 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
1101 pport_2819_stats_desc, i);
1102 return idx;
1103}
1104
1105static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
1106{
1107 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1108 struct mlx5_core_dev *mdev = priv->mdev;
1109 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1110 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1111 void *out;
1112
1113 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1114 return;
1115
1116 MLX5_SET(ppcnt_reg, in, local_port, 1);
1117 out = pstats->RFC_2819_counters;
1118 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1119 mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
1120}
1121
1122static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1123 { 0, 64 },
1124 { 65, 127 },
1125 { 128, 255 },
1126 { 256, 511 },
1127 { 512, 1023 },
1128 { 1024, 1518 },
1129 { 1519, 2047 },
1130 { 2048, 4095 },
1131 { 4096, 8191 },
1132 { 8192, 10239 },
1133 {}
1134};
1135
1136void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1137 struct ethtool_rmon_stats *rmon,
1138 const struct ethtool_rmon_hist_range **ranges)
1139{
1140 u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1141 struct mlx5_core_dev *mdev = priv->mdev;
1142 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1143 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1144
1145 MLX5_SET(ppcnt_reg, in, local_port, 1);
1146 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1147 if (mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: ppcnt_RFC_2819_counters,
1148 size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0))
1149 return;
1150
1151#define RD(name) \
1152 MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \
1153 eth_2819_cntrs_grp_data_layout, \
1154 name)
1155
1156 rmon->undersize_pkts = RD(ether_stats_undersize_pkts);
1157 rmon->fragments = RD(ether_stats_fragments);
1158 rmon->jabbers = RD(ether_stats_jabbers);
1159
1160 rmon->hist[0] = RD(ether_stats_pkts64octets);
1161 rmon->hist[1] = RD(ether_stats_pkts65to127octets);
1162 rmon->hist[2] = RD(ether_stats_pkts128to255octets);
1163 rmon->hist[3] = RD(ether_stats_pkts256to511octets);
1164 rmon->hist[4] = RD(ether_stats_pkts512to1023octets);
1165 rmon->hist[5] = RD(ether_stats_pkts1024to1518octets);
1166 rmon->hist[6] = RD(ether_stats_pkts1519to2047octets);
1167 rmon->hist[7] = RD(ether_stats_pkts2048to4095octets);
1168 rmon->hist[8] = RD(ether_stats_pkts4096to8191octets);
1169 rmon->hist[9] = RD(ether_stats_pkts8192to10239octets);
1170#undef RD
1171
1172 *ranges = mlx5e_rmon_ranges;
1173}
1174
1175#define PPORT_PHY_STATISTICAL_OFF(c) \
1176 MLX5_BYTE_OFF(ppcnt_reg, \
1177 counter_set.phys_layer_statistical_cntrs.c##_high)
1178static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1179 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1180 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1181};
1182
1183static const struct counter_desc
1184pport_phy_statistical_err_lanes_stats_desc[] = {
1185 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1186 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1187 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1188 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1189};
1190
1191#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1192 ARRAY_SIZE(pport_phy_statistical_stats_desc)
1193#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1194 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1195
1196static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1197{
1198 struct mlx5_core_dev *mdev = priv->mdev;
1199 int num_stats;
1200
1201 /* "1" for link_down_events special counter */
1202 num_stats = 1;
1203
1204 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1205 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1206
1207 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1208 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1209
1210 return num_stats;
1211}
1212
1213static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1214{
1215 struct mlx5_core_dev *mdev = priv->mdev;
1216 int i;
1217
1218 strcpy(p: data + (idx++) * ETH_GSTRING_LEN, q: "link_down_events_phy");
1219
1220 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1221 return idx;
1222
1223 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1224 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
1225 q: pport_phy_statistical_stats_desc[i].format);
1226
1227 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1228 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1229 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
1230 q: pport_phy_statistical_err_lanes_stats_desc[i].format);
1231
1232 return idx;
1233}
1234
1235static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1236{
1237 struct mlx5_core_dev *mdev = priv->mdev;
1238 int i;
1239
1240 /* link_down_events_phy has special handling since it is not stored in __be64 format */
1241 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1242 counter_set.phys_layer_cntrs.link_down_events);
1243
1244 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1245 return idx;
1246
1247 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1248 data[idx++] =
1249 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1250 pport_phy_statistical_stats_desc, i);
1251
1252 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1253 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1254 data[idx++] =
1255 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1256 pport_phy_statistical_err_lanes_stats_desc,
1257 i);
1258 return idx;
1259}
1260
1261static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1262{
1263 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1264 struct mlx5_core_dev *mdev = priv->mdev;
1265 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1266 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1267 void *out;
1268
1269 MLX5_SET(ppcnt_reg, in, local_port, 1);
1270 out = pstats->phy_counters;
1271 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1272 mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
1273
1274 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1275 return;
1276
1277 out = pstats->phy_statistical_counters;
1278 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1279 mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
1280}
1281
1282void mlx5e_get_link_ext_stats(struct net_device *dev,
1283 struct ethtool_link_ext_stats *stats)
1284{
1285 struct mlx5e_priv *priv = netdev_priv(dev);
1286 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1287 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1288 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1289
1290 MLX5_SET(ppcnt_reg, in, local_port, 1);
1291 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1292 mlx5_core_access_reg(dev: priv->mdev, data_in: in, size_in: sz, data_out: out,
1293 MLX5_ST_SZ_BYTES(ppcnt_reg), reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
1294
1295 stats->link_down_events = MLX5_GET(ppcnt_reg, out,
1296 counter_set.phys_layer_cntrs.link_down_events);
1297}
1298
1299static int fec_num_lanes(struct mlx5_core_dev *dev)
1300{
1301 u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1302 u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1303 int err;
1304
1305 MLX5_SET(pmlp_reg, in, local_port, 1);
1306 err = mlx5_core_access_reg(dev, data_in: in, size_in: sizeof(in), data_out: out, size_out: sizeof(out),
1307 reg_num: MLX5_REG_PMLP, arg: 0, write: 0);
1308 if (err)
1309 return 0;
1310
1311 return MLX5_GET(pmlp_reg, out, width);
1312}
1313
1314static int fec_active_mode(struct mlx5_core_dev *mdev)
1315{
1316 unsigned long fec_active_long;
1317 u32 fec_active;
1318
1319 if (mlx5e_get_fec_mode(dev: mdev, fec_mode_active: &fec_active, NULL))
1320 return MLX5E_FEC_NOFEC;
1321
1322 fec_active_long = fec_active;
1323 return find_first_bit(addr: &fec_active_long, size: sizeof(unsigned long) * BITS_PER_BYTE);
1324}
1325
1326#define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1327 fec_stats->corrected_blocks.lanes[(idx)] = \
1328 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1329 fc_fec_corrected_blocks_lane##idx); \
1330 fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1331 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1332 fc_fec_uncorrectable_blocks_lane##idx); \
1333})
1334
1335static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1336 u32 *ppcnt, u8 lanes)
1337{
1338 if (lanes > 3) { /* 4 lanes */
1339 MLX5E_STATS_SET_FEC_BLOCK(3);
1340 MLX5E_STATS_SET_FEC_BLOCK(2);
1341 }
1342 if (lanes > 1) /* 2 lanes */
1343 MLX5E_STATS_SET_FEC_BLOCK(1);
1344 if (lanes > 0) /* 1 lane */
1345 MLX5E_STATS_SET_FEC_BLOCK(0);
1346}
1347
1348static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1349{
1350 fec_stats->corrected_blocks.total =
1351 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1352 rs_fec_corrected_blocks);
1353 fec_stats->uncorrectable_blocks.total =
1354 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1355 rs_fec_uncorrectable_blocks);
1356}
1357
1358static void fec_set_block_stats(struct mlx5e_priv *priv,
1359 struct ethtool_fec_stats *fec_stats)
1360{
1361 struct mlx5_core_dev *mdev = priv->mdev;
1362 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1363 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1364 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1365 int mode = fec_active_mode(mdev);
1366
1367 if (mode == MLX5E_FEC_NOFEC)
1368 return;
1369
1370 MLX5_SET(ppcnt_reg, in, local_port, 1);
1371 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1372 if (mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0))
1373 return;
1374
1375 switch (mode) {
1376 case MLX5E_FEC_RS_528_514:
1377 case MLX5E_FEC_RS_544_514:
1378 case MLX5E_FEC_LLRS_272_257_1:
1379 fec_set_rs_stats(fec_stats, ppcnt: out);
1380 return;
1381 case MLX5E_FEC_FIRECODE:
1382 fec_set_fc_stats(fec_stats, ppcnt: out, lanes: fec_num_lanes(dev: mdev));
1383 }
1384}
1385
1386static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1387 struct ethtool_fec_stats *fec_stats)
1388{
1389 u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1390 struct mlx5_core_dev *mdev = priv->mdev;
1391 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1392 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1393
1394 MLX5_SET(ppcnt_reg, in, local_port, 1);
1395 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1396 if (mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: ppcnt_phy_statistical,
1397 size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0))
1398 return;
1399
1400 fec_stats->corrected_bits.total =
1401 MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1402 phys_layer_statistical_cntrs,
1403 phy_corrected_bits);
1404}
1405
1406void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1407 struct ethtool_fec_stats *fec_stats)
1408{
1409 if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
1410 return;
1411
1412 fec_set_corrected_bits_total(priv, fec_stats);
1413 fec_set_block_stats(priv, fec_stats);
1414}
1415
1416#define PPORT_ETH_EXT_OFF(c) \
1417 MLX5_BYTE_OFF(ppcnt_reg, \
1418 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1419static const struct counter_desc pport_eth_ext_stats_desc[] = {
1420 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1421};
1422
1423#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
1424
1425static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1426{
1427 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1428 return NUM_PPORT_ETH_EXT_COUNTERS;
1429
1430 return 0;
1431}
1432
1433static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1434{
1435 int i;
1436
1437 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1438 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1439 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
1440 q: pport_eth_ext_stats_desc[i].format);
1441 return idx;
1442}
1443
1444static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1445{
1446 int i;
1447
1448 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1449 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1450 data[idx++] =
1451 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1452 pport_eth_ext_stats_desc, i);
1453 return idx;
1454}
1455
1456static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1457{
1458 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1459 struct mlx5_core_dev *mdev = priv->mdev;
1460 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1461 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1462 void *out;
1463
1464 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1465 return;
1466
1467 MLX5_SET(ppcnt_reg, in, local_port, 1);
1468 out = pstats->eth_ext_counters;
1469 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1470 mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
1471}
1472
1473#define PCIE_PERF_OFF(c) \
1474 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1475static const struct counter_desc pcie_perf_stats_desc[] = {
1476 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1477 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1478};
1479
1480#define PCIE_PERF_OFF64(c) \
1481 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1482static const struct counter_desc pcie_perf_stats_desc64[] = {
1483 { .format: "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1484};
1485
1486static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1487 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1488 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1489 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1490 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1491};
1492
1493#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
1494#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
1495#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
1496
1497static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1498{
1499 int num_stats = 0;
1500
1501 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1502 num_stats += NUM_PCIE_PERF_COUNTERS;
1503
1504 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1505 num_stats += NUM_PCIE_PERF_COUNTERS64;
1506
1507 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1508 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1509
1510 return num_stats;
1511}
1512
1513static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1514{
1515 int i;
1516
1517 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1518 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1519 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
1520 q: pcie_perf_stats_desc[i].format);
1521
1522 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1523 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1524 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
1525 q: pcie_perf_stats_desc64[i].format);
1526
1527 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1528 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1529 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
1530 q: pcie_perf_stall_stats_desc[i].format);
1531 return idx;
1532}
1533
1534static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1535{
1536 int i;
1537
1538 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1539 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1540 data[idx++] =
1541 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1542 pcie_perf_stats_desc, i);
1543
1544 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1545 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1546 data[idx++] =
1547 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1548 pcie_perf_stats_desc64, i);
1549
1550 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1551 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1552 data[idx++] =
1553 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1554 pcie_perf_stall_stats_desc, i);
1555 return idx;
1556}
1557
1558static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1559{
1560 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1561 struct mlx5_core_dev *mdev = priv->mdev;
1562 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1563 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1564 void *out;
1565
1566 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1567 return;
1568
1569 out = pcie_stats->pcie_perf_counters;
1570 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1571 mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz, reg_num: MLX5_REG_MPCNT, arg: 0, write: 0);
1572}
1573
1574#define PPORT_PER_TC_PRIO_OFF(c) \
1575 MLX5_BYTE_OFF(ppcnt_reg, \
1576 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1577
1578static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1579 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1580};
1581
1582#define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1583
1584#define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1585 MLX5_BYTE_OFF(ppcnt_reg, \
1586 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1587
1588static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1589 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1590 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1591};
1592
1593#define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1594 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1595
1596static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1597{
1598 struct mlx5_core_dev *mdev = priv->mdev;
1599
1600 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1601 return 0;
1602
1603 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1604}
1605
1606static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1607{
1608 struct mlx5_core_dev *mdev = priv->mdev;
1609 int i, prio;
1610
1611 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1612 return idx;
1613
1614 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1615 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1616 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
1617 fmt: pport_per_tc_prio_stats_desc[i].format, prio);
1618 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1619 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
1620 fmt: pport_per_tc_congest_prio_stats_desc[i].format, prio);
1621 }
1622
1623 return idx;
1624}
1625
1626static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1627{
1628 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1629 struct mlx5_core_dev *mdev = priv->mdev;
1630 int i, prio;
1631
1632 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1633 return idx;
1634
1635 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1636 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1637 data[idx++] =
1638 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1639 pport_per_tc_prio_stats_desc, i);
1640 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1641 data[idx++] =
1642 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1643 pport_per_tc_congest_prio_stats_desc, i);
1644 }
1645
1646 return idx;
1647}
1648
1649static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1650{
1651 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1652 struct mlx5_core_dev *mdev = priv->mdev;
1653 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1654 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1655 void *out;
1656 int prio;
1657
1658 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1659 return;
1660
1661 MLX5_SET(ppcnt_reg, in, pnat, 2);
1662 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1663 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1664 out = pstats->per_tc_prio_counters[prio];
1665 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1666 mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
1667 }
1668}
1669
1670static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1671{
1672 struct mlx5_core_dev *mdev = priv->mdev;
1673
1674 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1675 return 0;
1676
1677 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1678}
1679
1680static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1681{
1682 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1683 struct mlx5_core_dev *mdev = priv->mdev;
1684 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1685 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1686 void *out;
1687 int prio;
1688
1689 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1690 return;
1691
1692 MLX5_SET(ppcnt_reg, in, pnat, 2);
1693 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1694 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1695 out = pstats->per_tc_congest_prio_counters[prio];
1696 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1697 mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz, reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
1698 }
1699}
1700
1701static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1702{
1703 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1704 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1705}
1706
1707static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1708{
1709 mlx5e_grp_per_tc_prio_update_stats(priv);
1710 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1711}
1712
1713#define PPORT_PER_PRIO_OFF(c) \
1714 MLX5_BYTE_OFF(ppcnt_reg, \
1715 counter_set.eth_per_prio_grp_data_layout.c##_high)
1716static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1717 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1718 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1719 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1720 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1721 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1722};
1723
1724#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1725
1726static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1727{
1728 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1729}
1730
1731static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1732 u8 *data,
1733 int idx)
1734{
1735 int i, prio;
1736
1737 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1738 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1739 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
1740 fmt: pport_per_prio_traffic_stats_desc[i].format, prio);
1741 }
1742
1743 return idx;
1744}
1745
1746static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1747 u64 *data,
1748 int idx)
1749{
1750 int i, prio;
1751
1752 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1753 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1754 data[idx++] =
1755 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1756 pport_per_prio_traffic_stats_desc, i);
1757 }
1758
1759 return idx;
1760}
1761
1762static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1763 /* %s is "global" or "prio{i}" */
1764 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1765 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1766 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1767 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1768 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1769};
1770
1771static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1772 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1773 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1774};
1775
1776#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1777#define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1778 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1779 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1780
1781static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1782{
1783 struct mlx5_core_dev *mdev = priv->mdev;
1784 u8 pfc_en_tx;
1785 u8 pfc_en_rx;
1786 int err;
1787
1788 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1789 return 0;
1790
1791 err = mlx5_query_port_pfc(dev: mdev, pfc_en_tx: &pfc_en_tx, pfc_en_rx: &pfc_en_rx);
1792
1793 return err ? 0 : pfc_en_tx | pfc_en_rx;
1794}
1795
1796static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1797{
1798 struct mlx5_core_dev *mdev = priv->mdev;
1799 u32 rx_pause;
1800 u32 tx_pause;
1801 int err;
1802
1803 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1804 return false;
1805
1806 err = mlx5_query_port_pause(dev: mdev, rx_pause: &rx_pause, tx_pause: &tx_pause);
1807
1808 return err ? false : rx_pause | tx_pause;
1809}
1810
1811static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1812{
1813 return (mlx5e_query_global_pause_combined(priv) +
1814 hweight8(mlx5e_query_pfc_combined(priv))) *
1815 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1816 NUM_PPORT_PFC_STALL_COUNTERS(priv);
1817}
1818
1819static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1820 u8 *data,
1821 int idx)
1822{
1823 unsigned long pfc_combined;
1824 int i, prio;
1825
1826 pfc_combined = mlx5e_query_pfc_combined(priv);
1827 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1828 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1829 char pfc_string[ETH_GSTRING_LEN];
1830
1831 snprintf(buf: pfc_string, size: sizeof(pfc_string), fmt: "prio%d", prio);
1832 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
1833 fmt: pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1834 }
1835 }
1836
1837 if (mlx5e_query_global_pause_combined(priv)) {
1838 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1839 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
1840 fmt: pport_per_prio_pfc_stats_desc[i].format, "global");
1841 }
1842 }
1843
1844 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1845 strcpy(p: data + (idx++) * ETH_GSTRING_LEN,
1846 q: pport_pfc_stall_stats_desc[i].format);
1847
1848 return idx;
1849}
1850
1851static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1852 u64 *data,
1853 int idx)
1854{
1855 unsigned long pfc_combined;
1856 int i, prio;
1857
1858 pfc_combined = mlx5e_query_pfc_combined(priv);
1859 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1860 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1861 data[idx++] =
1862 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1863 pport_per_prio_pfc_stats_desc, i);
1864 }
1865 }
1866
1867 if (mlx5e_query_global_pause_combined(priv)) {
1868 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1869 data[idx++] =
1870 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1871 pport_per_prio_pfc_stats_desc, i);
1872 }
1873 }
1874
1875 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1876 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1877 pport_pfc_stall_stats_desc, i);
1878
1879 return idx;
1880}
1881
1882static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1883{
1884 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1885 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1886}
1887
1888static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1889{
1890 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1891 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1892 return idx;
1893}
1894
1895static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1896{
1897 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1898 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1899 return idx;
1900}
1901
1902static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1903{
1904 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1905 struct mlx5_core_dev *mdev = priv->mdev;
1906 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1907 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1908 int prio;
1909 void *out;
1910
1911 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1912 return;
1913
1914 MLX5_SET(ppcnt_reg, in, local_port, 1);
1915 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1916 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1917 out = pstats->per_prio_counters[prio];
1918 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1919 mlx5_core_access_reg(dev: mdev, data_in: in, size_in: sz, data_out: out, size_out: sz,
1920 reg_num: MLX5_REG_PPCNT, arg: 0, write: 0);
1921 }
1922}
1923
1924static const struct counter_desc mlx5e_pme_status_desc[] = {
1925 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1926};
1927
1928static const struct counter_desc mlx5e_pme_error_desc[] = {
1929 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1930 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1931 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1932};
1933
1934#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1935#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1936
1937static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1938{
1939 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1940}
1941
1942static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1943{
1944 int i;
1945
1946 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1947 strcpy(p: data + (idx++) * ETH_GSTRING_LEN, q: mlx5e_pme_status_desc[i].format);
1948
1949 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1950 strcpy(p: data + (idx++) * ETH_GSTRING_LEN, q: mlx5e_pme_error_desc[i].format);
1951
1952 return idx;
1953}
1954
1955static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1956{
1957 struct mlx5_pme_stats pme_stats;
1958 int i;
1959
1960 mlx5_get_pme_stats(dev: priv->mdev, stats: &pme_stats);
1961
1962 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1963 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1964 mlx5e_pme_status_desc, i);
1965
1966 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1967 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1968 mlx5e_pme_error_desc, i);
1969
1970 return idx;
1971}
1972
1973static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1974
1975static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1976{
1977 return mlx5e_ktls_get_count(priv);
1978}
1979
1980static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1981{
1982 return idx + mlx5e_ktls_get_strings(priv, data: data + idx * ETH_GSTRING_LEN);
1983}
1984
1985static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1986{
1987 return idx + mlx5e_ktls_get_stats(priv, data: data + idx);
1988}
1989
1990static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1991
1992static const struct counter_desc rq_stats_desc[] = {
1993 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1994 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1995 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1996 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1997 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1998 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1999 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2000 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
2001 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
2002 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2003 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
2004 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
2005 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
2006 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
2007 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
2008 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
2009 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
2010 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
2011 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2012 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
2013 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2014 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2015 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2016 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2017 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2018 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2019 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
2020#ifdef CONFIG_MLX5_EN_ARFS
2021 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_add) },
2022 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_in) },
2023 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_request_out) },
2024 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_expired) },
2025 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
2026#endif
2027 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
2028#ifdef CONFIG_PAGE_POOL_STATS
2029 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
2030 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
2031 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
2032 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
2033 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
2034 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
2035 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
2036 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
2037 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
2038 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
2039 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
2040#endif
2041#ifdef CONFIG_MLX5_EN_TLS
2042 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
2043 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
2044 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
2045 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
2046 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
2047 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
2048 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
2049 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
2050 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
2051 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
2052#endif
2053};
2054
2055static const struct counter_desc sq_stats_desc[] = {
2056 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
2057 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
2058 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2059 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2060 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2061 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2062 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2063 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2064 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2065 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
2066 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2067 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2068#ifdef CONFIG_MLX5_EN_TLS
2069 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2070 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2071 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2072 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2073 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2074 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2075 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2076 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2077 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2078#endif
2079 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2080 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
2081 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
2082 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2083 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
2084 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2085 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
2086 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2087};
2088
2089static const struct counter_desc rq_xdpsq_stats_desc[] = {
2090 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2091 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2092 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2093 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2094 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2095 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2096 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2097};
2098
2099static const struct counter_desc xdpsq_stats_desc[] = {
2100 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2101 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2102 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2103 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2104 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2105 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2106 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2107};
2108
2109static const struct counter_desc xskrq_stats_desc[] = {
2110 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
2111 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
2112 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2113 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2114 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2115 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
2116 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2117 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2118 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2119 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2120 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2121 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2122 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2123 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2124 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2125 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2126 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2127 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2128};
2129
2130static const struct counter_desc xsksq_stats_desc[] = {
2131 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2132 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2133 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2134 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2135 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2136 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2137};
2138
2139static const struct counter_desc ch_stats_desc[] = {
2140 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2141 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
2142 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
2143 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
2144 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
2145 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2146};
2147
2148static const struct counter_desc ptp_sq_stats_desc[] = {
2149 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2150 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2151 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2152 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2153 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2154 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2155 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2156 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2157 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2158 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2159 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2160 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2161 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2162 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2163};
2164
2165static const struct counter_desc ptp_ch_stats_desc[] = {
2166 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2167 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2168 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2169 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2170};
2171
2172static const struct counter_desc ptp_cq_stats_desc[] = {
2173 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2174 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2175 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2176 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
2177 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
2178};
2179
2180static const struct counter_desc ptp_rq_stats_desc[] = {
2181 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2182 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2183 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2184 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2185 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2186 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2187 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2188 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2189 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2190 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2191 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2192 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2193 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2194 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2195 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2196 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2197 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2198 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2199 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2200 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2201 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2202 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2203 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2204};
2205
2206static const struct counter_desc qos_sq_stats_desc[] = {
2207 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2208 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2209 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2210 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2211 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2212 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2213 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2214 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2215 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2216 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2217 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2218 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2219#ifdef CONFIG_MLX5_EN_TLS
2220 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2221 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2222 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2223 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2224 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2225 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2226 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2227 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2228 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2229#endif
2230 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2231 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2232 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2233 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2234 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2235 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2236 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2237 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2238};
2239
2240#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
2241#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
2242#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
2243#define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
2244#define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
2245#define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
2246#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
2247#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
2248#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
2249#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
2250#define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc)
2251#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
2252
2253static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2254{
2255 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2256 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
2257}
2258
2259static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2260{
2261 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2262 u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2263 int i, qid;
2264
2265 for (qid = 0; qid < max_qos_sqs; qid++)
2266 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2267 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2268 fmt: qos_sq_stats_desc[i].format, qid);
2269
2270 return idx;
2271}
2272
2273static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2274{
2275 struct mlx5e_sq_stats **stats;
2276 u16 max_qos_sqs;
2277 int i, qid;
2278
2279 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2280 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2281 stats = READ_ONCE(priv->htb_qos_sq_stats);
2282
2283 for (qid = 0; qid < max_qos_sqs; qid++) {
2284 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2285
2286 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2287 data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2288 }
2289
2290 return idx;
2291}
2292
2293static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2294
2295static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2296{
2297 int num = NUM_PTP_CH_STATS;
2298
2299 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2300 return 0;
2301
2302 if (priv->tx_ptp_opened)
2303 num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2304 if (priv->rx_ptp_opened)
2305 num += NUM_PTP_RQ_STATS;
2306
2307 return num;
2308}
2309
2310static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2311{
2312 int i, tc;
2313
2314 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2315 return idx;
2316
2317 for (i = 0; i < NUM_PTP_CH_STATS; i++)
2318 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2319 fmt: "%s", ptp_ch_stats_desc[i].format);
2320
2321 if (priv->tx_ptp_opened) {
2322 for (tc = 0; tc < priv->max_opened_tc; tc++)
2323 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2324 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2325 fmt: ptp_sq_stats_desc[i].format, tc);
2326
2327 for (tc = 0; tc < priv->max_opened_tc; tc++)
2328 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2329 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2330 fmt: ptp_cq_stats_desc[i].format, tc);
2331 }
2332 if (priv->rx_ptp_opened) {
2333 for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2334 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2335 fmt: ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
2336 }
2337 return idx;
2338}
2339
2340static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2341{
2342 int i, tc;
2343
2344 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2345 return idx;
2346
2347 for (i = 0; i < NUM_PTP_CH_STATS; i++)
2348 data[idx++] =
2349 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2350 ptp_ch_stats_desc, i);
2351
2352 if (priv->tx_ptp_opened) {
2353 for (tc = 0; tc < priv->max_opened_tc; tc++)
2354 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2355 data[idx++] =
2356 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2357 ptp_sq_stats_desc, i);
2358
2359 for (tc = 0; tc < priv->max_opened_tc; tc++)
2360 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2361 data[idx++] =
2362 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2363 ptp_cq_stats_desc, i);
2364 }
2365 if (priv->rx_ptp_opened) {
2366 for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2367 data[idx++] =
2368 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2369 ptp_rq_stats_desc, i);
2370 }
2371 return idx;
2372}
2373
2374static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2375
2376static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2377{
2378 int max_nch = priv->stats_nch;
2379
2380 return (NUM_RQ_STATS * max_nch) +
2381 (NUM_CH_STATS * max_nch) +
2382 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2383 (NUM_RQ_XDPSQ_STATS * max_nch) +
2384 (NUM_XDPSQ_STATS * max_nch) +
2385 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2386 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2387}
2388
2389static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2390{
2391 bool is_xsk = priv->xsk.ever_used;
2392 int max_nch = priv->stats_nch;
2393 int i, j, tc;
2394
2395 for (i = 0; i < max_nch; i++)
2396 for (j = 0; j < NUM_CH_STATS; j++)
2397 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2398 fmt: ch_stats_desc[j].format, i);
2399
2400 for (i = 0; i < max_nch; i++) {
2401 for (j = 0; j < NUM_RQ_STATS; j++)
2402 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2403 fmt: rq_stats_desc[j].format, i);
2404 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2405 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2406 fmt: xskrq_stats_desc[j].format, i);
2407 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2408 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2409 fmt: rq_xdpsq_stats_desc[j].format, i);
2410 }
2411
2412 for (tc = 0; tc < priv->max_opened_tc; tc++)
2413 for (i = 0; i < max_nch; i++)
2414 for (j = 0; j < NUM_SQ_STATS; j++)
2415 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2416 fmt: sq_stats_desc[j].format,
2417 i + tc * max_nch);
2418
2419 for (i = 0; i < max_nch; i++) {
2420 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2421 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2422 fmt: xsksq_stats_desc[j].format, i);
2423 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2424 sprintf(buf: data + (idx++) * ETH_GSTRING_LEN,
2425 fmt: xdpsq_stats_desc[j].format, i);
2426 }
2427
2428 return idx;
2429}
2430
2431static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2432{
2433 bool is_xsk = priv->xsk.ever_used;
2434 int max_nch = priv->stats_nch;
2435 int i, j, tc;
2436
2437 for (i = 0; i < max_nch; i++)
2438 for (j = 0; j < NUM_CH_STATS; j++)
2439 data[idx++] =
2440 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
2441 ch_stats_desc, j);
2442
2443 for (i = 0; i < max_nch; i++) {
2444 for (j = 0; j < NUM_RQ_STATS; j++)
2445 data[idx++] =
2446 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
2447 rq_stats_desc, j);
2448 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2449 data[idx++] =
2450 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
2451 xskrq_stats_desc, j);
2452 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2453 data[idx++] =
2454 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
2455 rq_xdpsq_stats_desc, j);
2456 }
2457
2458 for (tc = 0; tc < priv->max_opened_tc; tc++)
2459 for (i = 0; i < max_nch; i++)
2460 for (j = 0; j < NUM_SQ_STATS; j++)
2461 data[idx++] =
2462 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
2463 sq_stats_desc, j);
2464
2465 for (i = 0; i < max_nch; i++) {
2466 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2467 data[idx++] =
2468 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
2469 xsksq_stats_desc, j);
2470 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2471 data[idx++] =
2472 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
2473 xdpsq_stats_desc, j);
2474 }
2475
2476 return idx;
2477}
2478
2479static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2480
2481MLX5E_DEFINE_STATS_GRP(sw, 0);
2482MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2483MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2484MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2485MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2486MLX5E_DEFINE_STATS_GRP(2863, 0);
2487MLX5E_DEFINE_STATS_GRP(2819, 0);
2488MLX5E_DEFINE_STATS_GRP(phy, 0);
2489MLX5E_DEFINE_STATS_GRP(pcie, 0);
2490MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2491MLX5E_DEFINE_STATS_GRP(pme, 0);
2492MLX5E_DEFINE_STATS_GRP(channels, 0);
2493MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2494MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2495static MLX5E_DEFINE_STATS_GRP(tls, 0);
2496MLX5E_DEFINE_STATS_GRP(ptp, 0);
2497static MLX5E_DEFINE_STATS_GRP(qos, 0);
2498
2499/* The stats groups order is opposite to the update_stats() order calls */
2500mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2501 &MLX5E_STATS_GRP(sw),
2502 &MLX5E_STATS_GRP(qcnt),
2503 &MLX5E_STATS_GRP(vnic_env),
2504 &MLX5E_STATS_GRP(vport),
2505 &MLX5E_STATS_GRP(802_3),
2506 &MLX5E_STATS_GRP(2863),
2507 &MLX5E_STATS_GRP(2819),
2508 &MLX5E_STATS_GRP(phy),
2509 &MLX5E_STATS_GRP(eth_ext),
2510 &MLX5E_STATS_GRP(pcie),
2511 &MLX5E_STATS_GRP(per_prio),
2512 &MLX5E_STATS_GRP(pme),
2513#ifdef CONFIG_MLX5_EN_IPSEC
2514 &MLX5E_STATS_GRP(ipsec_hw),
2515 &MLX5E_STATS_GRP(ipsec_sw),
2516#endif
2517 &MLX5E_STATS_GRP(tls),
2518 &MLX5E_STATS_GRP(channels),
2519 &MLX5E_STATS_GRP(per_port_buff_congest),
2520 &MLX5E_STATS_GRP(ptp),
2521 &MLX5E_STATS_GRP(qos),
2522#ifdef CONFIG_MLX5_MACSEC
2523 &MLX5E_STATS_GRP(macsec_hw),
2524#endif
2525};
2526
2527unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2528{
2529 return ARRAY_SIZE(mlx5e_nic_stats_grps);
2530}
2531

source code of linux/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c