1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Statistics
37 *
38 */
39
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <rdma/ib_mad.h>
43#include <rdma/ib_pma.h>
44
45#include "roce_hsi.h"
46#include "qplib_res.h"
47#include "qplib_sp.h"
48#include "qplib_fp.h"
49#include "qplib_rcfw.h"
50#include "bnxt_re.h"
51#include "hw_counters.h"
52
53static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
54 [BNXT_RE_RX_PKTS].name = "rx_pkts",
55 [BNXT_RE_RX_BYTES].name = "rx_bytes",
56 [BNXT_RE_TX_PKTS].name = "tx_pkts",
57 [BNXT_RE_TX_BYTES].name = "tx_bytes",
58 [BNXT_RE_RECOVERABLE_ERRORS].name = "recoverable_errors",
59 [BNXT_RE_TX_ERRORS].name = "tx_roce_errors",
60 [BNXT_RE_TX_DISCARDS].name = "tx_roce_discards",
61 [BNXT_RE_RX_ERRORS].name = "rx_roce_errors",
62 [BNXT_RE_RX_DISCARDS].name = "rx_roce_discards",
63 [BNXT_RE_TO_RETRANSMITS].name = "local_ack_timeout_err",
64 [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "packet_seq_err",
65 [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
66 [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_nak_retry_err",
67 [BNXT_RE_MISSING_RESP].name = "implied_nak_seq_err",
68 [BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err",
69 [BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err",
70 [BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err",
71 [BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err",
72 [BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err",
73 [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "req_remote_invalid_request",
74 [BNXT_RE_REMOTE_ACCESS_ERR].name = "req_remote_access_errors",
75 [BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err",
76 [BNXT_RE_DUP_REQ].name = "duplicate_request",
77 [BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max",
78 [BNXT_RE_RES_LENGTH_MISMATCH].name = "resp_local_length_error",
79 [BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe",
80 [BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err",
81 [BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey",
82 [BNXT_RE_RES_RX_DOMAIN_ERR].name = "res_rx_domain_err",
83 [BNXT_RE_RES_RX_NO_PERM].name = "res_rx_no_perm",
84 [BNXT_RE_RES_RX_RANGE_ERR].name = "res_rx_range_err",
85 [BNXT_RE_RES_TX_INVALID_RKEY].name = "res_tx_invalid_rkey",
86 [BNXT_RE_RES_TX_DOMAIN_ERR].name = "res_tx_domain_err",
87 [BNXT_RE_RES_TX_NO_PERM].name = "res_tx_no_perm",
88 [BNXT_RE_RES_TX_RANGE_ERR].name = "res_tx_range_err",
89 [BNXT_RE_RES_IRRQ_OFLOW].name = "res_irrq_oflow",
90 [BNXT_RE_RES_UNSUP_OPCODE].name = "res_unsup_opcode",
91 [BNXT_RE_RES_UNALIGNED_ATOMIC].name = "res_unaligned_atomic",
92 [BNXT_RE_RES_REM_INV_ERR].name = "res_rem_inv_err",
93 [BNXT_RE_RES_MEM_ERROR].name = "res_mem_err",
94 [BNXT_RE_RES_SRQ_ERR].name = "res_srq_err",
95 [BNXT_RE_RES_CMP_ERR].name = "res_cmp_err",
96 [BNXT_RE_RES_INVALID_DUP_RKEY].name = "res_invalid_dup_rkey",
97 [BNXT_RE_RES_WQE_FORMAT_ERR].name = "res_wqe_format_err",
98 [BNXT_RE_RES_CQ_LOAD_ERR].name = "res_cq_load_err",
99 [BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err",
100 [BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err",
101 [BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err",
102 [BNXT_RE_OUT_OF_SEQ_ERR].name = "out_of_sequence",
103 [BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req",
104 [BNXT_RE_TX_READ_REQ].name = "tx_read_req",
105 [BNXT_RE_TX_READ_RES].name = "tx_read_resp",
106 [BNXT_RE_TX_WRITE_REQ].name = "tx_write_req",
107 [BNXT_RE_TX_SEND_REQ].name = "tx_send_req",
108 [BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts",
109 [BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes",
110 [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_requests",
111 [BNXT_RE_RX_READ_REQ].name = "rx_read_requests",
112 [BNXT_RE_RX_READ_RESP].name = "rx_read_resp",
113 [BNXT_RE_RX_WRITE_REQ].name = "rx_write_requests",
114 [BNXT_RE_RX_SEND_REQ].name = "rx_send_req",
115 [BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts",
116 [BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes",
117 [BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts",
118 [BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes",
119 [BNXT_RE_OOB].name = "out_of_buffer",
120 [BNXT_RE_TX_CNP].name = "np_cnp_pkts",
121 [BNXT_RE_RX_CNP].name = "rp_cnp_handled",
122 [BNXT_RE_RX_ECN].name = "np_ecn_marked_roce_packets",
123 [BNXT_RE_REQ_CQE_ERROR].name = "req_cqe_error",
124 [BNXT_RE_RESP_CQE_ERROR].name = "resp_cqe_error",
125 [BNXT_RE_RESP_REMOTE_ACCESS_ERRS].name = "resp_remote_access_errors",
126};
127
128static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
129 struct rdma_hw_stats *stats,
130 struct bnxt_qplib_ext_stat *s)
131{
132 stats->value[BNXT_RE_TX_ATOMIC_REQ] = s->tx_atomic_req;
133 stats->value[BNXT_RE_TX_READ_REQ] = s->tx_read_req;
134 stats->value[BNXT_RE_TX_READ_RES] = s->tx_read_res;
135 stats->value[BNXT_RE_TX_WRITE_REQ] = s->tx_write_req;
136 stats->value[BNXT_RE_TX_SEND_REQ] = s->tx_send_req;
137 stats->value[BNXT_RE_TX_ROCE_PKTS] = s->tx_roce_pkts;
138 stats->value[BNXT_RE_TX_ROCE_BYTES] = s->tx_roce_bytes;
139 stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req;
140 stats->value[BNXT_RE_RX_READ_REQ] = s->rx_read_req;
141 stats->value[BNXT_RE_RX_READ_RESP] = s->rx_read_res;
142 stats->value[BNXT_RE_RX_WRITE_REQ] = s->rx_write_req;
143 stats->value[BNXT_RE_RX_SEND_REQ] = s->rx_send_req;
144 stats->value[BNXT_RE_RX_ROCE_PKTS] = s->rx_roce_pkts;
145 stats->value[BNXT_RE_RX_ROCE_BYTES] = s->rx_roce_bytes;
146 stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts;
147 stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes;
148 stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer;
149 stats->value[BNXT_RE_TX_CNP] = s->tx_cnp;
150 stats->value[BNXT_RE_RX_CNP] = s->rx_cnp;
151 stats->value[BNXT_RE_RX_ECN] = s->rx_ecn_marked;
152 stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = s->rx_out_of_sequence;
153}
154
155static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev,
156 struct rdma_hw_stats *stats)
157{
158 struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
159 u32 fid;
160 int rc;
161
162 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
163 rc = bnxt_qplib_qext_stat(rcfw: &rdev->rcfw, fid, estat);
164 if (rc)
165 goto done;
166 bnxt_re_copy_ext_stats(rdev, stats, s: estat);
167
168done:
169 return rc;
170}
171
172static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
173 struct rdma_hw_stats *stats,
174 struct bnxt_qplib_roce_stats *err_s)
175{
176 stats->value[BNXT_RE_TO_RETRANSMITS] =
177 err_s->to_retransmits;
178 stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
179 err_s->seq_err_naks_rcvd;
180 stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
181 err_s->max_retry_exceeded;
182 stats->value[BNXT_RE_RNR_NAKS_RCVD] =
183 err_s->rnr_naks_rcvd;
184 stats->value[BNXT_RE_MISSING_RESP] =
185 err_s->missing_resp;
186 stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
187 err_s->unrecoverable_err;
188 stats->value[BNXT_RE_BAD_RESP_ERR] =
189 err_s->bad_resp_err;
190 stats->value[BNXT_RE_LOCAL_QP_OP_ERR] =
191 err_s->local_qp_op_err;
192 stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
193 err_s->local_protection_err;
194 stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
195 err_s->mem_mgmt_op_err;
196 stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
197 err_s->remote_invalid_req_err;
198 stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
199 err_s->remote_access_err;
200 stats->value[BNXT_RE_REMOTE_OP_ERR] =
201 err_s->remote_op_err;
202 stats->value[BNXT_RE_DUP_REQ] =
203 err_s->dup_req;
204 stats->value[BNXT_RE_RES_EXCEED_MAX] =
205 err_s->res_exceed_max;
206 stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
207 err_s->res_length_mismatch;
208 stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
209 err_s->res_exceeds_wqe;
210 stats->value[BNXT_RE_RES_OPCODE_ERR] =
211 err_s->res_opcode_err;
212 stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
213 err_s->res_rx_invalid_rkey;
214 stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
215 err_s->res_rx_domain_err;
216 stats->value[BNXT_RE_RES_RX_NO_PERM] =
217 err_s->res_rx_no_perm;
218 stats->value[BNXT_RE_RES_RX_RANGE_ERR] =
219 err_s->res_rx_range_err;
220 stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
221 err_s->res_tx_invalid_rkey;
222 stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
223 err_s->res_tx_domain_err;
224 stats->value[BNXT_RE_RES_TX_NO_PERM] =
225 err_s->res_tx_no_perm;
226 stats->value[BNXT_RE_RES_TX_RANGE_ERR] =
227 err_s->res_tx_range_err;
228 stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
229 err_s->res_irrq_oflow;
230 stats->value[BNXT_RE_RES_UNSUP_OPCODE] =
231 err_s->res_unsup_opcode;
232 stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
233 err_s->res_unaligned_atomic;
234 stats->value[BNXT_RE_RES_REM_INV_ERR] =
235 err_s->res_rem_inv_err;
236 stats->value[BNXT_RE_RES_MEM_ERROR] =
237 err_s->res_mem_error;
238 stats->value[BNXT_RE_RES_SRQ_ERR] =
239 err_s->res_srq_err;
240 stats->value[BNXT_RE_RES_CMP_ERR] =
241 err_s->res_cmp_err;
242 stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
243 err_s->res_invalid_dup_rkey;
244 stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
245 err_s->res_wqe_format_err;
246 stats->value[BNXT_RE_RES_CQ_LOAD_ERR] =
247 err_s->res_cq_load_err;
248 stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] =
249 err_s->res_srq_load_err;
250 stats->value[BNXT_RE_RES_TX_PCI_ERR] =
251 err_s->res_tx_pci_err;
252 stats->value[BNXT_RE_RES_RX_PCI_ERR] =
253 err_s->res_rx_pci_err;
254 stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
255 err_s->res_oos_drop_count;
256 stats->value[BNXT_RE_REQ_CQE_ERROR] =
257 err_s->bad_resp_err +
258 err_s->local_qp_op_err +
259 err_s->local_protection_err +
260 err_s->mem_mgmt_op_err +
261 err_s->remote_invalid_req_err +
262 err_s->remote_access_err +
263 err_s->remote_op_err;
264 stats->value[BNXT_RE_RESP_CQE_ERROR] =
265 err_s->res_cmp_err +
266 err_s->res_cq_load_err;
267 stats->value[BNXT_RE_RESP_REMOTE_ACCESS_ERRS] =
268 err_s->res_rx_no_perm +
269 err_s->res_tx_no_perm;
270}
271
272int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
273{
274 struct ib_pma_portcounters_ext *pma_cnt_ext;
275 struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
276 struct ctx_hw_stats *hw_stats = NULL;
277 int rc;
278
279 hw_stats = rdev->qplib_ctx.stats.dma;
280
281 pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
282 if (_is_ext_stats_supported(dev_cap_flags: rdev->dev_attr->dev_cap_flags)) {
283 u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
284
285 rc = bnxt_qplib_qext_stat(rcfw: &rdev->rcfw, fid, estat);
286 if (rc)
287 return rc;
288 }
289
290 pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
291 if ((bnxt_qplib_is_chip_gen_p5(cctx: rdev->chip_ctx) && rdev->is_virtfn) ||
292 !bnxt_qplib_is_chip_gen_p5(cctx: rdev->chip_ctx)) {
293 pma_cnt_ext->port_xmit_data =
294 cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_bytes) / 4);
295 pma_cnt_ext->port_rcv_data =
296 cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_bytes) / 4);
297 pma_cnt_ext->port_xmit_packets =
298 cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts));
299 pma_cnt_ext->port_rcv_packets =
300 cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts));
301 pma_cnt_ext->port_unicast_rcv_packets =
302 cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts));
303 pma_cnt_ext->port_unicast_xmit_packets =
304 cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts));
305
306 } else {
307 pma_cnt_ext->port_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts);
308 pma_cnt_ext->port_rcv_data = cpu_to_be64(estat->rx_roce_good_bytes / 4);
309 pma_cnt_ext->port_xmit_packets = cpu_to_be64(estat->tx_roce_pkts);
310 pma_cnt_ext->port_xmit_data = cpu_to_be64(estat->tx_roce_bytes / 4);
311 pma_cnt_ext->port_unicast_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts);
312 pma_cnt_ext->port_unicast_xmit_packets = cpu_to_be64(estat->tx_roce_pkts);
313 }
314 return 0;
315}
316
317int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
318{
319 struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
320 struct ib_pma_portcounters *pma_cnt;
321 struct ctx_hw_stats *hw_stats = NULL;
322 int rc;
323
324 hw_stats = rdev->qplib_ctx.stats.dma;
325
326 pma_cnt = (struct ib_pma_portcounters *)(out_mad->data + 40);
327 if (_is_ext_stats_supported(dev_cap_flags: rdev->dev_attr->dev_cap_flags)) {
328 u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
329
330 rc = bnxt_qplib_qext_stat(rcfw: &rdev->rcfw, fid, estat);
331 if (rc)
332 return rc;
333 }
334 if ((bnxt_qplib_is_chip_gen_p5(cctx: rdev->chip_ctx) && rdev->is_virtfn) ||
335 !bnxt_qplib_is_chip_gen_p5(cctx: rdev->chip_ctx)) {
336 pma_cnt->port_rcv_packets =
337 cpu_to_be32((u32)(le64_to_cpu(hw_stats->rx_ucast_pkts)) & 0xFFFFFFFF);
338 pma_cnt->port_rcv_data =
339 cpu_to_be32((u32)((le64_to_cpu(hw_stats->rx_ucast_bytes) &
340 0xFFFFFFFF) / 4));
341 pma_cnt->port_xmit_packets =
342 cpu_to_be32((u32)(le64_to_cpu(hw_stats->tx_ucast_pkts)) & 0xFFFFFFFF);
343 pma_cnt->port_xmit_data =
344 cpu_to_be32((u32)((le64_to_cpu(hw_stats->tx_ucast_bytes)
345 & 0xFFFFFFFF) / 4));
346 } else {
347 pma_cnt->port_rcv_packets = cpu_to_be32(estat->rx_roce_good_pkts);
348 pma_cnt->port_rcv_data = cpu_to_be32((estat->rx_roce_good_bytes / 4));
349 pma_cnt->port_xmit_packets = cpu_to_be32(estat->tx_roce_pkts);
350 pma_cnt->port_xmit_data = cpu_to_be32((estat->tx_roce_bytes / 4));
351 }
352 pma_cnt->port_rcv_constraint_errors = (u8)(le64_to_cpu(hw_stats->rx_discard_pkts) & 0xFF);
353 pma_cnt->port_rcv_errors = cpu_to_be16((u16)(le64_to_cpu(hw_stats->rx_error_pkts)
354 & 0xFFFF));
355 pma_cnt->port_xmit_constraint_errors = (u8)(le64_to_cpu(hw_stats->tx_error_pkts) & 0xFF);
356 pma_cnt->port_xmit_discards = cpu_to_be16((u16)(le64_to_cpu(hw_stats->tx_discard_pkts)
357 & 0xFFFF));
358
359 return 0;
360}
361
362int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
363 struct rdma_hw_stats *stats,
364 u32 port, int index)
365{
366 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
367 struct bnxt_qplib_roce_stats *err_s = NULL;
368 struct ctx_hw_stats *hw_stats = NULL;
369 int rc = 0;
370
371 hw_stats = rdev->qplib_ctx.stats.dma;
372 if (!port || !stats)
373 return -EINVAL;
374
375 if (hw_stats) {
376 stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
377 le64_to_cpu(hw_stats->tx_bcast_pkts);
378 stats->value[BNXT_RE_TX_DISCARDS] =
379 le64_to_cpu(hw_stats->tx_discard_pkts);
380 stats->value[BNXT_RE_TX_ERRORS] =
381 le64_to_cpu(hw_stats->tx_error_pkts);
382 stats->value[BNXT_RE_RX_ERRORS] =
383 le64_to_cpu(hw_stats->rx_error_pkts);
384 stats->value[BNXT_RE_RX_DISCARDS] =
385 le64_to_cpu(hw_stats->rx_discard_pkts);
386 stats->value[BNXT_RE_RX_PKTS] =
387 le64_to_cpu(hw_stats->rx_ucast_pkts);
388 stats->value[BNXT_RE_RX_BYTES] =
389 le64_to_cpu(hw_stats->rx_ucast_bytes);
390 stats->value[BNXT_RE_TX_PKTS] =
391 le64_to_cpu(hw_stats->tx_ucast_pkts);
392 stats->value[BNXT_RE_TX_BYTES] =
393 le64_to_cpu(hw_stats->tx_ucast_bytes);
394 }
395 err_s = &rdev->stats.rstat.errs;
396 if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
397 rc = bnxt_qplib_get_roce_stats(rcfw: &rdev->rcfw, stats: err_s);
398 if (rc) {
399 clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
400 addr: &rdev->flags);
401 goto done;
402 }
403 bnxt_re_copy_err_stats(rdev, stats, err_s);
404 if (bnxt_ext_stats_supported(ctx: rdev->chip_ctx, flags: rdev->dev_attr->dev_cap_flags,
405 virtfn: rdev->is_virtfn)) {
406 rc = bnxt_re_get_ext_stat(rdev, stats);
407 if (rc) {
408 clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
409 addr: &rdev->flags);
410 goto done;
411 }
412 }
413 }
414
415done:
416 return bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx) ?
417 BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS;
418}
419
420struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
421 u32 port_num)
422{
423 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
424 int num_counters = 0;
425
426 if (bnxt_qplib_is_chip_gen_p5_p7(cctx: rdev->chip_ctx))
427 num_counters = BNXT_RE_NUM_EXT_COUNTERS;
428 else
429 num_counters = BNXT_RE_NUM_STD_COUNTERS;
430
431 return rdma_alloc_hw_stats_struct(descs: bnxt_re_stat_descs, num_counters,
432 RDMA_HW_STATS_DEFAULT_LIFESPAN);
433}
434

source code of linux/drivers/infiniband/hw/bnxt_re/hw_counters.c