1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/debugfs.h>
34#include <linux/mlx5/qp.h>
35#include <linux/mlx5/cq.h>
36#include <linux/mlx5/driver.h>
37#include "mlx5_core.h"
38#include "lib/eq.h"
39
40enum {
41 QP_PID,
42 QP_STATE,
43 QP_XPORT,
44 QP_MTU,
45 QP_N_RECV,
46 QP_RECV_SZ,
47 QP_N_SEND,
48 QP_LOG_PG_SZ,
49 QP_RQPN,
50};
51
52static char *qp_fields[] = {
53 [QP_PID] = "pid",
54 [QP_STATE] = "state",
55 [QP_XPORT] = "transport",
56 [QP_MTU] = "mtu",
57 [QP_N_RECV] = "num_recv",
58 [QP_RECV_SZ] = "rcv_wqe_sz",
59 [QP_N_SEND] = "num_send",
60 [QP_LOG_PG_SZ] = "log2_page_sz",
61 [QP_RQPN] = "remote_qpn",
62};
63
64enum {
65 EQ_NUM_EQES,
66 EQ_INTR,
67 EQ_LOG_PG_SZ,
68};
69
70static char *eq_fields[] = {
71 [EQ_NUM_EQES] = "num_eqes",
72 [EQ_INTR] = "intr",
73 [EQ_LOG_PG_SZ] = "log_page_size",
74};
75
76enum {
77 CQ_PID,
78 CQ_NUM_CQES,
79 CQ_LOG_PG_SZ,
80};
81
82static char *cq_fields[] = {
83 [CQ_PID] = "pid",
84 [CQ_NUM_CQES] = "num_cqes",
85 [CQ_LOG_PG_SZ] = "log_page_size",
86};
87
88struct dentry *mlx5_debugfs_root;
89EXPORT_SYMBOL(mlx5_debugfs_root);
90
91void mlx5_register_debugfs(void)
92{
93 mlx5_debugfs_root = debugfs_create_dir(name: "mlx5", NULL);
94}
95
96void mlx5_unregister_debugfs(void)
97{
98 debugfs_remove(dentry: mlx5_debugfs_root);
99}
100
101struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
102{
103 return dev->priv.dbg.dbg_root;
104}
105EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
106
107void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
108{
109 dev->priv.dbg.qp_debugfs = debugfs_create_dir(name: "QPs", parent: dev->priv.dbg.dbg_root);
110}
111EXPORT_SYMBOL(mlx5_qp_debugfs_init);
112
113void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
114{
115 debugfs_remove_recursive(dentry: dev->priv.dbg.qp_debugfs);
116}
117EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
118
119void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
120{
121 dev->priv.dbg.eq_debugfs = debugfs_create_dir(name: "EQs", parent: dev->priv.dbg.dbg_root);
122}
123
124void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
125{
126 debugfs_remove_recursive(dentry: dev->priv.dbg.eq_debugfs);
127}
128
129static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
130 loff_t *pos)
131{
132 struct mlx5_cmd_stats *stats;
133 u64 field = 0;
134 int ret;
135 char tbuf[22];
136
137 stats = filp->private_data;
138 spin_lock_irq(lock: &stats->lock);
139 if (stats->n)
140 field = div64_u64(dividend: stats->sum, divisor: stats->n);
141 spin_unlock_irq(lock: &stats->lock);
142 ret = snprintf(buf: tbuf, size: sizeof(tbuf), fmt: "%llu\n", field);
143 return simple_read_from_buffer(to: buf, count, ppos: pos, from: tbuf, available: ret);
144}
145
146static ssize_t average_write(struct file *filp, const char __user *buf,
147 size_t count, loff_t *pos)
148{
149 struct mlx5_cmd_stats *stats;
150
151 stats = filp->private_data;
152 spin_lock_irq(lock: &stats->lock);
153 stats->sum = 0;
154 stats->n = 0;
155 spin_unlock_irq(lock: &stats->lock);
156
157 *pos += count;
158
159 return count;
160}
161
162static const struct file_operations stats_fops = {
163 .owner = THIS_MODULE,
164 .open = simple_open,
165 .read = average_read,
166 .write = average_write,
167};
168
169static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
170 loff_t *pos)
171{
172 struct mlx5_cmd *cmd;
173 char tbuf[6];
174 int weight;
175 int field;
176 int ret;
177
178 cmd = filp->private_data;
179 weight = bitmap_weight(src: &cmd->vars.bitmask, nbits: cmd->vars.max_reg_cmds);
180 field = cmd->vars.max_reg_cmds - weight;
181 ret = snprintf(buf: tbuf, size: sizeof(tbuf), fmt: "%d\n", field);
182 return simple_read_from_buffer(to: buf, count, ppos: pos, from: tbuf, available: ret);
183}
184
185static const struct file_operations slots_fops = {
186 .owner = THIS_MODULE,
187 .open = simple_open,
188 .read = slots_read,
189};
190
191static struct mlx5_cmd_stats *
192mlx5_cmdif_alloc_stats(struct xarray *stats_xa, int opcode)
193{
194 struct mlx5_cmd_stats *stats = kzalloc(size: sizeof(*stats), GFP_KERNEL);
195 int err;
196
197 if (!stats)
198 return NULL;
199
200 err = xa_insert(xa: stats_xa, index: opcode, entry: stats, GFP_KERNEL);
201 if (err) {
202 kfree(objp: stats);
203 return NULL;
204 }
205 spin_lock_init(&stats->lock);
206 return stats;
207}
208
209void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
210{
211 struct mlx5_cmd_stats *stats;
212 struct dentry **cmd;
213 const char *namep;
214 int i;
215
216 cmd = &dev->priv.dbg.cmdif_debugfs;
217 *cmd = debugfs_create_dir(name: "commands", parent: dev->priv.dbg.dbg_root);
218
219 debugfs_create_file(name: "slots_inuse", mode: 0400, parent: *cmd, data: &dev->cmd, fops: &slots_fops);
220
221 xa_init(xa: &dev->cmd.stats);
222
223 for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
224 namep = mlx5_command_str(command: i);
225 if (strcmp(namep, "unknown command opcode")) {
226 stats = mlx5_cmdif_alloc_stats(stats_xa: &dev->cmd.stats, opcode: i);
227 if (!stats)
228 continue;
229 stats->root = debugfs_create_dir(name: namep, parent: *cmd);
230
231 debugfs_create_file(name: "average", mode: 0400, parent: stats->root, data: stats,
232 fops: &stats_fops);
233 debugfs_create_u64(name: "n", mode: 0400, parent: stats->root, value: &stats->n);
234 debugfs_create_u64(name: "failed", mode: 0400, parent: stats->root, value: &stats->failed);
235 debugfs_create_u64(name: "failed_mbox_status", mode: 0400, parent: stats->root,
236 value: &stats->failed_mbox_status);
237 debugfs_create_u32(name: "last_failed_errno", mode: 0400, parent: stats->root,
238 value: &stats->last_failed_errno);
239 debugfs_create_u8(name: "last_failed_mbox_status", mode: 0400, parent: stats->root,
240 value: &stats->last_failed_mbox_status);
241 debugfs_create_x32(name: "last_failed_syndrome", mode: 0400, parent: stats->root,
242 value: &stats->last_failed_syndrome);
243 }
244 }
245}
246
247void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
248{
249 struct mlx5_cmd_stats *stats;
250 unsigned long i;
251
252 debugfs_remove_recursive(dentry: dev->priv.dbg.cmdif_debugfs);
253 xa_for_each(&dev->cmd.stats, i, stats)
254 kfree(objp: stats);
255 xa_destroy(&dev->cmd.stats);
256}
257
258void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
259{
260 dev->priv.dbg.cq_debugfs = debugfs_create_dir(name: "CQs", parent: dev->priv.dbg.dbg_root);
261}
262
263void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
264{
265 debugfs_remove_recursive(dentry: dev->priv.dbg.cq_debugfs);
266}
267
268void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
269{
270 struct dentry *pages;
271
272 dev->priv.dbg.pages_debugfs = debugfs_create_dir(name: "pages", parent: dev->priv.dbg.dbg_root);
273 pages = dev->priv.dbg.pages_debugfs;
274
275 debugfs_create_u32(name: "fw_pages_total", mode: 0400, parent: pages, value: &dev->priv.fw_pages);
276 debugfs_create_u32(name: "fw_pages_vfs", mode: 0400, parent: pages, value: &dev->priv.page_counters[MLX5_VF]);
277 debugfs_create_u32(name: "fw_pages_ec_vfs", mode: 0400, parent: pages, value: &dev->priv.page_counters[MLX5_EC_VF]);
278 debugfs_create_u32(name: "fw_pages_sfs", mode: 0400, parent: pages, value: &dev->priv.page_counters[MLX5_SF]);
279 debugfs_create_u32(name: "fw_pages_host_pf", mode: 0400, parent: pages, value: &dev->priv.page_counters[MLX5_HOST_PF]);
280 debugfs_create_u32(name: "fw_pages_alloc_failed", mode: 0400, parent: pages, value: &dev->priv.fw_pages_alloc_failed);
281 debugfs_create_u32(name: "fw_pages_give_dropped", mode: 0400, parent: pages, value: &dev->priv.give_pages_dropped);
282 debugfs_create_u32(name: "fw_pages_reclaim_discard", mode: 0400, parent: pages,
283 value: &dev->priv.reclaim_pages_discard);
284}
285
286void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
287{
288 debugfs_remove_recursive(dentry: dev->priv.dbg.pages_debugfs);
289}
290
291static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
292 int index, int *is_str)
293{
294 int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
295 u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
296 u64 param = 0;
297 u32 *out;
298 int state;
299 u32 *qpc;
300 int err;
301
302 out = kzalloc(size: outlen, GFP_KERNEL);
303 if (!out)
304 return 0;
305
306 MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
307 MLX5_SET(query_qp_in, in, qpn, qp->qpn);
308 err = mlx5_cmd_exec_inout(dev, query_qp, in, out);
309 if (err)
310 goto out;
311
312 *is_str = 0;
313
314 qpc = MLX5_ADDR_OF(query_qp_out, out, qpc);
315 switch (index) {
316 case QP_PID:
317 param = qp->pid;
318 break;
319 case QP_STATE:
320 state = MLX5_GET(qpc, qpc, state);
321 param = (unsigned long)mlx5_qp_state_str(state);
322 *is_str = 1;
323 break;
324 case QP_XPORT:
325 param = (unsigned long)mlx5_qp_type_str(MLX5_GET(qpc, qpc, st));
326 *is_str = 1;
327 break;
328 case QP_MTU:
329 switch (MLX5_GET(qpc, qpc, mtu)) {
330 case IB_MTU_256:
331 param = 256;
332 break;
333 case IB_MTU_512:
334 param = 512;
335 break;
336 case IB_MTU_1024:
337 param = 1024;
338 break;
339 case IB_MTU_2048:
340 param = 2048;
341 break;
342 case IB_MTU_4096:
343 param = 4096;
344 break;
345 default:
346 param = 0;
347 }
348 break;
349 case QP_N_RECV:
350 param = 1 << MLX5_GET(qpc, qpc, log_rq_size);
351 break;
352 case QP_RECV_SZ:
353 param = 1 << (MLX5_GET(qpc, qpc, log_rq_stride) + 4);
354 break;
355 case QP_N_SEND:
356 if (!MLX5_GET(qpc, qpc, no_sq))
357 param = 1 << MLX5_GET(qpc, qpc, log_sq_size);
358 break;
359 case QP_LOG_PG_SZ:
360 param = MLX5_GET(qpc, qpc, log_page_size) + 12;
361 break;
362 case QP_RQPN:
363 param = MLX5_GET(qpc, qpc, remote_qpn);
364 break;
365 }
366out:
367 kfree(objp: out);
368 return param;
369}
370
371static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
372 int index)
373{
374 int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
375 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
376 u64 param = 0;
377 void *ctx;
378 u32 *out;
379 int err;
380
381 out = kzalloc(size: outlen, GFP_KERNEL);
382 if (!out)
383 return param;
384
385 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
386 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
387 err = mlx5_cmd_exec_inout(dev, query_eq, in, out);
388 if (err) {
389 mlx5_core_warn(dev, "failed to query eq\n");
390 goto out;
391 }
392 ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
393
394 switch (index) {
395 case EQ_NUM_EQES:
396 param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
397 break;
398 case EQ_INTR:
399 param = MLX5_GET(eqc, ctx, intr);
400 break;
401 case EQ_LOG_PG_SZ:
402 param = MLX5_GET(eqc, ctx, log_page_size) + 12;
403 break;
404 }
405
406out:
407 kfree(objp: out);
408 return param;
409}
410
411static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
412 int index)
413{
414 int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
415 u64 param = 0;
416 void *ctx;
417 u32 *out;
418 int err;
419
420 out = kvzalloc(size: outlen, GFP_KERNEL);
421 if (!out)
422 return param;
423
424 err = mlx5_core_query_cq(dev, cq, out);
425 if (err) {
426 mlx5_core_warn(dev, "failed to query cq\n");
427 goto out;
428 }
429 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
430
431 switch (index) {
432 case CQ_PID:
433 param = cq->pid;
434 break;
435 case CQ_NUM_CQES:
436 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
437 break;
438 case CQ_LOG_PG_SZ:
439 param = MLX5_GET(cqc, ctx, log_page_size);
440 break;
441 }
442
443out:
444 kvfree(addr: out);
445 return param;
446}
447
448static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
449 loff_t *pos)
450{
451 struct mlx5_field_desc *desc;
452 struct mlx5_rsc_debug *d;
453 char tbuf[18];
454 int is_str = 0;
455 u64 field;
456 int ret;
457
458 desc = filp->private_data;
459 d = (void *)(desc - desc->i) - sizeof(*d);
460 switch (d->type) {
461 case MLX5_DBG_RSC_QP:
462 field = qp_read_field(dev: d->dev, qp: d->object, index: desc->i, is_str: &is_str);
463 break;
464
465 case MLX5_DBG_RSC_EQ:
466 field = eq_read_field(dev: d->dev, eq: d->object, index: desc->i);
467 break;
468
469 case MLX5_DBG_RSC_CQ:
470 field = cq_read_field(dev: d->dev, cq: d->object, index: desc->i);
471 break;
472
473 default:
474 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
475 return -EINVAL;
476 }
477
478 if (is_str)
479 ret = snprintf(buf: tbuf, size: sizeof(tbuf), fmt: "%s\n", (const char *)(unsigned long)field);
480 else
481 ret = snprintf(buf: tbuf, size: sizeof(tbuf), fmt: "0x%llx\n", field);
482
483 return simple_read_from_buffer(to: buf, count, ppos: pos, from: tbuf, available: ret);
484}
485
486static const struct file_operations fops = {
487 .owner = THIS_MODULE,
488 .open = simple_open,
489 .read = dbg_read,
490};
491
492static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
493 struct dentry *root, struct mlx5_rsc_debug **dbg,
494 int rsn, char **field, int nfile, void *data)
495{
496 struct mlx5_rsc_debug *d;
497 char resn[32];
498 int i;
499
500 d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
501 if (!d)
502 return -ENOMEM;
503
504 d->dev = dev;
505 d->object = data;
506 d->type = type;
507 sprintf(buf: resn, fmt: "0x%x", rsn);
508 d->root = debugfs_create_dir(name: resn, parent: root);
509
510 for (i = 0; i < nfile; i++) {
511 d->fields[i].i = i;
512 debugfs_create_file(name: field[i], mode: 0400, parent: d->root, data: &d->fields[i],
513 fops: &fops);
514 }
515 *dbg = d;
516
517 return 0;
518}
519
520static void rem_res_tree(struct mlx5_rsc_debug *d)
521{
522 debugfs_remove_recursive(dentry: d->root);
523 kfree(objp: d);
524}
525
526int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
527{
528 int err;
529
530 if (!mlx5_debugfs_root)
531 return 0;
532
533 err = add_res_tree(dev, type: MLX5_DBG_RSC_QP, root: dev->priv.dbg.qp_debugfs,
534 dbg: &qp->dbg, rsn: qp->qpn, field: qp_fields,
535 ARRAY_SIZE(qp_fields), data: qp);
536 if (err)
537 qp->dbg = NULL;
538
539 return err;
540}
541EXPORT_SYMBOL(mlx5_debug_qp_add);
542
543void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
544{
545 if (!mlx5_debugfs_root || !qp->dbg)
546 return;
547
548 rem_res_tree(d: qp->dbg);
549 qp->dbg = NULL;
550}
551EXPORT_SYMBOL(mlx5_debug_qp_remove);
552
553int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
554{
555 int err;
556
557 if (!mlx5_debugfs_root)
558 return 0;
559
560 err = add_res_tree(dev, type: MLX5_DBG_RSC_EQ, root: dev->priv.dbg.eq_debugfs,
561 dbg: &eq->dbg, rsn: eq->eqn, field: eq_fields,
562 ARRAY_SIZE(eq_fields), data: eq);
563 if (err)
564 eq->dbg = NULL;
565
566 return err;
567}
568
569void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
570{
571 if (!mlx5_debugfs_root)
572 return;
573
574 if (eq->dbg)
575 rem_res_tree(d: eq->dbg);
576}
577
578int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
579{
580 int err;
581
582 if (!mlx5_debugfs_root)
583 return 0;
584
585 err = add_res_tree(dev, type: MLX5_DBG_RSC_CQ, root: dev->priv.dbg.cq_debugfs,
586 dbg: &cq->dbg, rsn: cq->cqn, field: cq_fields,
587 ARRAY_SIZE(cq_fields), data: cq);
588 if (err)
589 cq->dbg = NULL;
590
591 return err;
592}
593
594void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
595{
596 if (!mlx5_debugfs_root)
597 return;
598
599 if (cq->dbg) {
600 rem_res_tree(d: cq->dbg);
601 cq->dbg = NULL;
602 }
603}
604

source code of linux/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c