1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4#ifdef CONFIG_DEBUG_FS
5
6#include <linux/fs.h>
7#include <linux/debugfs.h>
8#include <linux/if_bridge.h>
9#include "i40e.h"
10#include "i40e_virtchnl_pf.h"
11
12static struct dentry *i40e_dbg_root;
13
14enum ring_type {
15 RING_TYPE_RX,
16 RING_TYPE_TX,
17 RING_TYPE_XDP
18};
19
20/**
21 * i40e_dbg_find_vsi - searches for the vsi with the given seid
22 * @pf: the PF structure to search for the vsi
23 * @seid: seid of the vsi it is searching for
24 **/
25static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
26{
27 int i;
28
29 if (seid < 0)
30 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
31 else
32 for (i = 0; i < pf->num_alloc_vsi; i++)
33 if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
34 return pf->vsi[i];
35
36 return NULL;
37}
38
39/**
40 * i40e_dbg_find_veb - searches for the veb with the given seid
41 * @pf: the PF structure to search for the veb
42 * @seid: seid of the veb it is searching for
43 **/
44static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
45{
46 int i;
47
48 for (i = 0; i < I40E_MAX_VEB; i++)
49 if (pf->veb[i] && pf->veb[i]->seid == seid)
50 return pf->veb[i];
51 return NULL;
52}
53
54/**************************************************************
55 * command
56 * The command entry in debugfs is for giving the driver commands
57 * to be executed - these may be for changing the internal switch
58 * setup, adding or removing filters, or other things. Many of
59 * these will be useful for some forms of unit testing.
60 **************************************************************/
61static char i40e_dbg_command_buf[256] = "";
62
63/**
64 * i40e_dbg_command_read - read for command datum
65 * @filp: the opened file
66 * @buffer: where to write the data for the user to read
67 * @count: the size of the user's buffer
68 * @ppos: file position offset
69 **/
70static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
71 size_t count, loff_t *ppos)
72{
73 struct i40e_pf *pf = filp->private_data;
74 int bytes_not_copied;
75 int buf_size = 256;
76 char *buf;
77 int len;
78
79 /* don't allow partial reads */
80 if (*ppos != 0)
81 return 0;
82 if (count < buf_size)
83 return -ENOSPC;
84
85 buf = kzalloc(size: buf_size, GFP_KERNEL);
86 if (!buf)
87 return -ENOSPC;
88
89 len = snprintf(buf, size: buf_size, fmt: "%s: %s\n",
90 pf->vsi[pf->lan_vsi]->netdev->name,
91 i40e_dbg_command_buf);
92
93 bytes_not_copied = copy_to_user(to: buffer, from: buf, n: len);
94 kfree(objp: buf);
95
96 if (bytes_not_copied)
97 return -EFAULT;
98
99 *ppos = len;
100 return len;
101}
102
103static char *i40e_filter_state_string[] = {
104 "INVALID",
105 "NEW",
106 "ACTIVE",
107 "FAILED",
108 "REMOVE",
109};
110
111/**
112 * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
113 * @pf: the i40e_pf created in command write
114 * @seid: the seid the user put in
115 **/
116static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
117{
118 struct rtnl_link_stats64 *nstat;
119 struct i40e_mac_filter *f;
120 struct i40e_vsi *vsi;
121 int i, bkt;
122
123 vsi = i40e_dbg_find_vsi(pf, seid);
124 if (!vsi) {
125 dev_info(&pf->pdev->dev,
126 "dump %d: seid not found\n", seid);
127 return;
128 }
129 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
130 if (vsi->netdev) {
131 struct net_device *nd = vsi->netdev;
132
133 dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n",
134 nd->name, nd->state, nd->flags);
135 dev_info(&pf->pdev->dev, " features = 0x%08lx\n",
136 (unsigned long int)nd->features);
137 dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n",
138 (unsigned long int)nd->hw_features);
139 dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
140 (unsigned long int)nd->vlan_features);
141 }
142 dev_info(&pf->pdev->dev,
143 " flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
144 vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
145 for (i = 0; i < BITS_TO_LONGS(__I40E_VSI_STATE_SIZE__); i++)
146 dev_info(&pf->pdev->dev,
147 " state[%d] = %08lx\n",
148 i, vsi->state[i]);
149 if (vsi == pf->vsi[pf->lan_vsi])
150 dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n",
151 pf->hw.mac.addr,
152 pf->hw.mac.san_addr,
153 pf->hw.mac.port_addr);
154 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
155 dev_info(&pf->pdev->dev,
156 " mac_filter_hash: %pM vid=%d, state %s\n",
157 f->macaddr, f->vlan,
158 i40e_filter_state_string[f->state]);
159 }
160 dev_info(&pf->pdev->dev, " active_filters %u, promisc_threshold %u, overflow promisc %s\n",
161 vsi->active_filters, vsi->promisc_threshold,
162 (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) ?
163 "ON" : "OFF"));
164 nstat = i40e_get_vsi_stats_struct(vsi);
165 dev_info(&pf->pdev->dev,
166 " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
167 (unsigned long int)nstat->rx_packets,
168 (unsigned long int)nstat->rx_bytes,
169 (unsigned long int)nstat->rx_errors,
170 (unsigned long int)nstat->rx_dropped);
171 dev_info(&pf->pdev->dev,
172 " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
173 (unsigned long int)nstat->tx_packets,
174 (unsigned long int)nstat->tx_bytes,
175 (unsigned long int)nstat->tx_errors,
176 (unsigned long int)nstat->tx_dropped);
177 dev_info(&pf->pdev->dev,
178 " net_stats: multicast = %lu, collisions = %lu\n",
179 (unsigned long int)nstat->multicast,
180 (unsigned long int)nstat->collisions);
181 dev_info(&pf->pdev->dev,
182 " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
183 (unsigned long int)nstat->rx_length_errors,
184 (unsigned long int)nstat->rx_over_errors,
185 (unsigned long int)nstat->rx_crc_errors);
186 dev_info(&pf->pdev->dev,
187 " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
188 (unsigned long int)nstat->rx_frame_errors,
189 (unsigned long int)nstat->rx_fifo_errors,
190 (unsigned long int)nstat->rx_missed_errors);
191 dev_info(&pf->pdev->dev,
192 " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
193 (unsigned long int)nstat->tx_aborted_errors,
194 (unsigned long int)nstat->tx_carrier_errors,
195 (unsigned long int)nstat->tx_fifo_errors);
196 dev_info(&pf->pdev->dev,
197 " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
198 (unsigned long int)nstat->tx_heartbeat_errors,
199 (unsigned long int)nstat->tx_window_errors);
200 dev_info(&pf->pdev->dev,
201 " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
202 (unsigned long int)nstat->rx_compressed,
203 (unsigned long int)nstat->tx_compressed);
204 dev_info(&pf->pdev->dev,
205 " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
206 (unsigned long int)vsi->net_stats_offsets.rx_packets,
207 (unsigned long int)vsi->net_stats_offsets.rx_bytes,
208 (unsigned long int)vsi->net_stats_offsets.rx_errors,
209 (unsigned long int)vsi->net_stats_offsets.rx_dropped);
210 dev_info(&pf->pdev->dev,
211 " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
212 (unsigned long int)vsi->net_stats_offsets.tx_packets,
213 (unsigned long int)vsi->net_stats_offsets.tx_bytes,
214 (unsigned long int)vsi->net_stats_offsets.tx_errors,
215 (unsigned long int)vsi->net_stats_offsets.tx_dropped);
216 dev_info(&pf->pdev->dev,
217 " net_stats_offsets: multicast = %lu, collisions = %lu\n",
218 (unsigned long int)vsi->net_stats_offsets.multicast,
219 (unsigned long int)vsi->net_stats_offsets.collisions);
220 dev_info(&pf->pdev->dev,
221 " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
222 (unsigned long int)vsi->net_stats_offsets.rx_length_errors,
223 (unsigned long int)vsi->net_stats_offsets.rx_over_errors,
224 (unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
225 dev_info(&pf->pdev->dev,
226 " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
227 (unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
228 (unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
229 (unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
230 dev_info(&pf->pdev->dev,
231 " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
232 (unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
233 (unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
234 (unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
235 dev_info(&pf->pdev->dev,
236 " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
237 (unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
238 (unsigned long int)vsi->net_stats_offsets.tx_window_errors);
239 dev_info(&pf->pdev->dev,
240 " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
241 (unsigned long int)vsi->net_stats_offsets.rx_compressed,
242 (unsigned long int)vsi->net_stats_offsets.tx_compressed);
243 dev_info(&pf->pdev->dev,
244 " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
245 vsi->tx_restart, vsi->tx_busy,
246 vsi->rx_buf_failed, vsi->rx_page_failed);
247 rcu_read_lock();
248 for (i = 0; i < vsi->num_queue_pairs; i++) {
249 struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]);
250
251 if (!rx_ring)
252 continue;
253
254 dev_info(&pf->pdev->dev,
255 " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
256 i, *rx_ring->state,
257 rx_ring->queue_index,
258 rx_ring->reg_idx);
259 dev_info(&pf->pdev->dev,
260 " rx_rings[%i]: rx_buf_len = %d\n",
261 i, rx_ring->rx_buf_len);
262 dev_info(&pf->pdev->dev,
263 " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
264 i,
265 rx_ring->next_to_use,
266 rx_ring->next_to_clean,
267 rx_ring->ring_active);
268 dev_info(&pf->pdev->dev,
269 " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
270 i, rx_ring->stats.packets,
271 rx_ring->stats.bytes,
272 rx_ring->rx_stats.non_eop_descs);
273 dev_info(&pf->pdev->dev,
274 " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n",
275 i,
276 rx_ring->rx_stats.alloc_page_failed,
277 rx_ring->rx_stats.alloc_buff_failed);
278 dev_info(&pf->pdev->dev,
279 " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
280 i,
281 rx_ring->rx_stats.page_reuse_count);
282 dev_info(&pf->pdev->dev,
283 " rx_rings[%i]: size = %i\n",
284 i, rx_ring->size);
285 dev_info(&pf->pdev->dev,
286 " rx_rings[%i]: itr_setting = %d (%s)\n",
287 i, rx_ring->itr_setting,
288 ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed");
289 }
290 for (i = 0; i < vsi->num_queue_pairs; i++) {
291 struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
292
293 if (!tx_ring)
294 continue;
295
296 dev_info(&pf->pdev->dev,
297 " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
298 i, *tx_ring->state,
299 tx_ring->queue_index,
300 tx_ring->reg_idx);
301 dev_info(&pf->pdev->dev,
302 " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
303 i,
304 tx_ring->next_to_use,
305 tx_ring->next_to_clean,
306 tx_ring->ring_active);
307 dev_info(&pf->pdev->dev,
308 " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
309 i, tx_ring->stats.packets,
310 tx_ring->stats.bytes,
311 tx_ring->tx_stats.restart_queue);
312 dev_info(&pf->pdev->dev,
313 " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
314 i,
315 tx_ring->tx_stats.tx_busy,
316 tx_ring->tx_stats.tx_done_old,
317 tx_ring->tx_stats.tx_stopped);
318 dev_info(&pf->pdev->dev,
319 " tx_rings[%i]: size = %i\n",
320 i, tx_ring->size);
321 dev_info(&pf->pdev->dev,
322 " tx_rings[%i]: DCB tc = %d\n",
323 i, tx_ring->dcb_tc);
324 dev_info(&pf->pdev->dev,
325 " tx_rings[%i]: itr_setting = %d (%s)\n",
326 i, tx_ring->itr_setting,
327 ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
328 }
329 if (i40e_enabled_xdp_vsi(vsi)) {
330 for (i = 0; i < vsi->num_queue_pairs; i++) {
331 struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
332
333 if (!xdp_ring)
334 continue;
335
336 dev_info(&pf->pdev->dev,
337 " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
338 i, *xdp_ring->state,
339 xdp_ring->queue_index,
340 xdp_ring->reg_idx);
341 dev_info(&pf->pdev->dev,
342 " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
343 i,
344 xdp_ring->next_to_use,
345 xdp_ring->next_to_clean,
346 xdp_ring->ring_active);
347 dev_info(&pf->pdev->dev,
348 " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
349 i, xdp_ring->stats.packets,
350 xdp_ring->stats.bytes,
351 xdp_ring->tx_stats.restart_queue);
352 dev_info(&pf->pdev->dev,
353 " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
354 i,
355 xdp_ring->tx_stats.tx_busy,
356 xdp_ring->tx_stats.tx_done_old);
357 dev_info(&pf->pdev->dev,
358 " xdp_rings[%i]: size = %i\n",
359 i, xdp_ring->size);
360 dev_info(&pf->pdev->dev,
361 " xdp_rings[%i]: DCB tc = %d\n",
362 i, xdp_ring->dcb_tc);
363 dev_info(&pf->pdev->dev,
364 " xdp_rings[%i]: itr_setting = %d (%s)\n",
365 i, xdp_ring->itr_setting,
366 ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
367 "dynamic" : "fixed");
368 }
369 }
370 rcu_read_unlock();
371 dev_info(&pf->pdev->dev,
372 " work_limit = %d\n",
373 vsi->work_limit);
374 dev_info(&pf->pdev->dev,
375 " max_frame = %d, rx_buf_len = %d dtype = %d\n",
376 vsi->max_frame, vsi->rx_buf_len, 0);
377 dev_info(&pf->pdev->dev,
378 " num_q_vectors = %i, base_vector = %i\n",
379 vsi->num_q_vectors, vsi->base_vector);
380 dev_info(&pf->pdev->dev,
381 " seid = %d, id = %d, uplink_seid = %d\n",
382 vsi->seid, vsi->id, vsi->uplink_seid);
383 dev_info(&pf->pdev->dev,
384 " base_queue = %d, num_queue_pairs = %d, num_tx_desc = %d, num_rx_desc = %d\n",
385 vsi->base_queue, vsi->num_queue_pairs, vsi->num_tx_desc,
386 vsi->num_rx_desc);
387 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
388 if (vsi->type == I40E_VSI_SRIOV)
389 dev_info(&pf->pdev->dev, " VF ID = %i\n", vsi->vf_id);
390 dev_info(&pf->pdev->dev,
391 " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
392 vsi->info.valid_sections, vsi->info.switch_id);
393 dev_info(&pf->pdev->dev,
394 " info: sw_reserved[] = 0x%02x 0x%02x\n",
395 vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
396 dev_info(&pf->pdev->dev,
397 " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
398 vsi->info.sec_flags, vsi->info.sec_reserved);
399 dev_info(&pf->pdev->dev,
400 " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
401 vsi->info.pvid, vsi->info.fcoe_pvid,
402 vsi->info.port_vlan_flags);
403 dev_info(&pf->pdev->dev,
404 " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
405 vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
406 vsi->info.pvlan_reserved[2]);
407 dev_info(&pf->pdev->dev,
408 " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
409 vsi->info.ingress_table, vsi->info.egress_table);
410 dev_info(&pf->pdev->dev,
411 " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
412 vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
413 vsi->info.cas_pv_reserved);
414 dev_info(&pf->pdev->dev,
415 " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
416 vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
417 vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
418 vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
419 vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
420 dev_info(&pf->pdev->dev,
421 " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
422 vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
423 vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
424 vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
425 vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
426 dev_info(&pf->pdev->dev,
427 " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
428 vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
429 vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
430 vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
431 vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
432 dev_info(&pf->pdev->dev,
433 " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
434 vsi->info.queueing_opt_flags,
435 vsi->info.queueing_opt_reserved[0],
436 vsi->info.queueing_opt_reserved[1],
437 vsi->info.queueing_opt_reserved[2]);
438 dev_info(&pf->pdev->dev,
439 " info: up_enable_bits = 0x%02x\n",
440 vsi->info.up_enable_bits);
441 dev_info(&pf->pdev->dev,
442 " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
443 vsi->info.sched_reserved, vsi->info.outer_up_table);
444 dev_info(&pf->pdev->dev,
445 " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
446 vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
447 vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
448 vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
449 vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
450 dev_info(&pf->pdev->dev,
451 " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
452 vsi->info.qs_handle[0], vsi->info.qs_handle[1],
453 vsi->info.qs_handle[2], vsi->info.qs_handle[3],
454 vsi->info.qs_handle[4], vsi->info.qs_handle[5],
455 vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
456 dev_info(&pf->pdev->dev,
457 " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
458 vsi->info.stat_counter_idx, vsi->info.sched_id);
459 dev_info(&pf->pdev->dev,
460 " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
461 vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
462 vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
463 vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
464 vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
465 vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
466 vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
467 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
468 dev_info(&pf->pdev->dev,
469 " tc_config: numtc = %d, enabled_tc = 0x%x\n",
470 vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
471 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
472 dev_info(&pf->pdev->dev,
473 " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
474 i, vsi->tc_config.tc_info[i].qoffset,
475 vsi->tc_config.tc_info[i].qcount,
476 vsi->tc_config.tc_info[i].netdev_tc);
477 }
478 dev_info(&pf->pdev->dev,
479 " bw: bw_limit = %d, bw_max_quanta = %d\n",
480 vsi->bw_limit, vsi->bw_max_quanta);
481 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
482 dev_info(&pf->pdev->dev,
483 " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
484 i, vsi->bw_ets_share_credits[i],
485 vsi->bw_ets_limit_credits[i],
486 vsi->bw_ets_max_quanta[i]);
487 }
488}
489
490/**
491 * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
492 * @pf: the i40e_pf created in command write
493 **/
494static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
495{
496 struct i40e_adminq_ring *ring;
497 struct i40e_hw *hw = &pf->hw;
498 char hdr[32];
499 int i;
500
501 snprintf(buf: hdr, size: sizeof(hdr), fmt: "%s %s: ",
502 dev_driver_string(dev: &pf->pdev->dev),
503 dev_name(dev: &pf->pdev->dev));
504
505 /* first the send (command) ring, then the receive (event) ring */
506 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
507 ring = &(hw->aq.asq);
508 for (i = 0; i < ring->count; i++) {
509 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
510
511 dev_info(&pf->pdev->dev,
512 " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
513 i, d->flags, d->opcode, d->datalen, d->retval,
514 d->cookie_high, d->cookie_low);
515 print_hex_dump(KERN_INFO, prefix_str: hdr, prefix_type: DUMP_PREFIX_NONE,
516 rowsize: 16, groupsize: 1, buf: d->params.raw, len: 16, ascii: 0);
517 }
518
519 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
520 ring = &(hw->aq.arq);
521 for (i = 0; i < ring->count; i++) {
522 struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
523
524 dev_info(&pf->pdev->dev,
525 " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
526 i, d->flags, d->opcode, d->datalen, d->retval,
527 d->cookie_high, d->cookie_low);
528 print_hex_dump(KERN_INFO, prefix_str: hdr, prefix_type: DUMP_PREFIX_NONE,
529 rowsize: 16, groupsize: 1, buf: d->params.raw, len: 16, ascii: 0);
530 }
531}
532
533/**
534 * i40e_dbg_dump_desc - handles dump desc write into command datum
535 * @cnt: number of arguments that the user supplied
536 * @vsi_seid: vsi id entered by user
537 * @ring_id: ring id entered by user
538 * @desc_n: descriptor number entered by user
539 * @pf: the i40e_pf created in command write
540 * @type: enum describing whether ring is RX, TX or XDP
541 **/
542static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
543 struct i40e_pf *pf, enum ring_type type)
544{
545 bool is_rx_ring = type == RING_TYPE_RX;
546 struct i40e_tx_desc *txd;
547 union i40e_rx_desc *rxd;
548 struct i40e_ring *ring;
549 struct i40e_vsi *vsi;
550 int i;
551
552 vsi = i40e_dbg_find_vsi(pf, seid: vsi_seid);
553 if (!vsi) {
554 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
555 return;
556 }
557 if (vsi->type != I40E_VSI_MAIN &&
558 vsi->type != I40E_VSI_FDIR &&
559 vsi->type != I40E_VSI_VMDQ2) {
560 dev_info(&pf->pdev->dev,
561 "vsi %d type %d descriptor rings not available\n",
562 vsi_seid, vsi->type);
563 return;
564 }
565 if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
566 dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
567 return;
568 }
569 if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
570 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
571 return;
572 }
573 if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) {
574 dev_info(&pf->pdev->dev,
575 "descriptor rings have not been allocated for vsi %d\n",
576 vsi_seid);
577 return;
578 }
579
580 switch (type) {
581 case RING_TYPE_RX:
582 ring = kmemdup(p: vsi->rx_rings[ring_id], size: sizeof(*ring), GFP_KERNEL);
583 break;
584 case RING_TYPE_TX:
585 ring = kmemdup(p: vsi->tx_rings[ring_id], size: sizeof(*ring), GFP_KERNEL);
586 break;
587 case RING_TYPE_XDP:
588 ring = kmemdup(p: vsi->xdp_rings[ring_id], size: sizeof(*ring), GFP_KERNEL);
589 break;
590 default:
591 ring = NULL;
592 break;
593 }
594 if (!ring)
595 return;
596
597 if (cnt == 2) {
598 switch (type) {
599 case RING_TYPE_RX:
600 dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id);
601 break;
602 case RING_TYPE_TX:
603 dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id);
604 break;
605 case RING_TYPE_XDP:
606 dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id);
607 break;
608 }
609 for (i = 0; i < ring->count; i++) {
610 if (!is_rx_ring) {
611 txd = I40E_TX_DESC(ring, i);
612 dev_info(&pf->pdev->dev,
613 " d[%03x] = 0x%016llx 0x%016llx\n",
614 i, txd->buffer_addr,
615 txd->cmd_type_offset_bsz);
616 } else {
617 rxd = I40E_RX_DESC(ring, i);
618 dev_info(&pf->pdev->dev,
619 " d[%03x] = 0x%016llx 0x%016llx\n",
620 i, rxd->read.pkt_addr,
621 rxd->read.hdr_addr);
622 }
623 }
624 } else if (cnt == 3) {
625 if (desc_n >= ring->count || desc_n < 0) {
626 dev_info(&pf->pdev->dev,
627 "descriptor %d not found\n", desc_n);
628 goto out;
629 }
630 if (!is_rx_ring) {
631 txd = I40E_TX_DESC(ring, desc_n);
632 dev_info(&pf->pdev->dev,
633 "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
634 vsi_seid, ring_id, desc_n,
635 txd->buffer_addr, txd->cmd_type_offset_bsz);
636 } else {
637 rxd = I40E_RX_DESC(ring, desc_n);
638 dev_info(&pf->pdev->dev,
639 "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
640 vsi_seid, ring_id, desc_n,
641 rxd->read.pkt_addr, rxd->read.hdr_addr);
642 }
643 } else {
644 dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
645 }
646
647out:
648 kfree(objp: ring);
649}
650
651/**
652 * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
653 * @pf: the i40e_pf created in command write
654 **/
655static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
656{
657 int i;
658
659 for (i = 0; i < pf->num_alloc_vsi; i++)
660 if (pf->vsi[i])
661 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
662 i, pf->vsi[i]->seid);
663}
664
665/**
666 * i40e_dbg_dump_eth_stats - handles dump stats write into command datum
667 * @pf: the i40e_pf created in command write
668 * @estats: the eth stats structure to be dumped
669 **/
670static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
671 struct i40e_eth_stats *estats)
672{
673 dev_info(&pf->pdev->dev, " ethstats:\n");
674 dev_info(&pf->pdev->dev,
675 " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
676 estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
677 dev_info(&pf->pdev->dev,
678 " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
679 estats->rx_broadcast, estats->rx_discards);
680 dev_info(&pf->pdev->dev,
681 " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
682 estats->rx_unknown_protocol, estats->tx_bytes);
683 dev_info(&pf->pdev->dev,
684 " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
685 estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
686 dev_info(&pf->pdev->dev,
687 " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
688 estats->tx_discards, estats->tx_errors);
689}
690
691/**
692 * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
693 * @pf: the i40e_pf created in command write
694 * @seid: the seid the user put in
695 **/
696static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
697{
698 struct i40e_veb *veb;
699
700 veb = i40e_dbg_find_veb(pf, seid);
701 if (!veb) {
702 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
703 return;
704 }
705 dev_info(&pf->pdev->dev,
706 "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n",
707 veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
708 veb->uplink_seid,
709 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
710 i40e_dbg_dump_eth_stats(pf, estats: &veb->stats);
711}
712
713/**
714 * i40e_dbg_dump_veb_all - dumps all known veb's stats
715 * @pf: the i40e_pf created in command write
716 **/
717static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
718{
719 struct i40e_veb *veb;
720 int i;
721
722 for (i = 0; i < I40E_MAX_VEB; i++) {
723 veb = pf->veb[i];
724 if (veb)
725 i40e_dbg_dump_veb_seid(pf, seid: veb->seid);
726 }
727}
728
729/**
730 * i40e_dbg_dump_vf - dump VF info
731 * @pf: the i40e_pf created in command write
732 * @vf_id: the vf_id from the user
733 **/
734static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
735{
736 struct i40e_vf *vf;
737 struct i40e_vsi *vsi;
738
739 if (!pf->num_alloc_vfs) {
740 dev_info(&pf->pdev->dev, "no VFs allocated\n");
741 } else if ((vf_id >= 0) && (vf_id < pf->num_alloc_vfs)) {
742 vf = &pf->vf[vf_id];
743 vsi = pf->vsi[vf->lan_vsi_idx];
744 dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
745 vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
746 dev_info(&pf->pdev->dev, " num MDD=%lld\n",
747 vf->num_mdd_events);
748 } else {
749 dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
750 }
751}
752
753/**
754 * i40e_dbg_dump_vf_all - dump VF info for all VFs
755 * @pf: the i40e_pf created in command write
756 **/
757static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
758{
759 int i;
760
761 if (!pf->num_alloc_vfs)
762 dev_info(&pf->pdev->dev, "no VFs enabled!\n");
763 else
764 for (i = 0; i < pf->num_alloc_vfs; i++)
765 i40e_dbg_dump_vf(pf, vf_id: i);
766}
767
768/**
769 * i40e_dbg_command_write - write into command datum
770 * @filp: the opened file
771 * @buffer: where to find the user's data
772 * @count: the length of the user's data
773 * @ppos: file position offset
774 **/
775static ssize_t i40e_dbg_command_write(struct file *filp,
776 const char __user *buffer,
777 size_t count, loff_t *ppos)
778{
779 struct i40e_pf *pf = filp->private_data;
780 char *cmd_buf, *cmd_buf_tmp;
781 int bytes_not_copied;
782 struct i40e_vsi *vsi;
783 int vsi_seid;
784 int veb_seid;
785 int vf_id;
786 int cnt;
787
788 /* don't allow partial writes */
789 if (*ppos != 0)
790 return 0;
791
792 cmd_buf = kzalloc(size: count + 1, GFP_KERNEL);
793 if (!cmd_buf)
794 return count;
795 bytes_not_copied = copy_from_user(to: cmd_buf, from: buffer, n: count);
796 if (bytes_not_copied) {
797 kfree(objp: cmd_buf);
798 return -EFAULT;
799 }
800 cmd_buf[count] = '\0';
801
802 cmd_buf_tmp = strchr(cmd_buf, '\n');
803 if (cmd_buf_tmp) {
804 *cmd_buf_tmp = '\0';
805 count = cmd_buf_tmp - cmd_buf + 1;
806 }
807
808 if (strncmp(cmd_buf, "add vsi", 7) == 0) {
809 vsi_seid = -1;
810 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
811 if (cnt == 0) {
812 /* default to PF VSI */
813 vsi_seid = pf->vsi[pf->lan_vsi]->seid;
814 } else if (vsi_seid < 0) {
815 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
816 vsi_seid);
817 goto command_write_done;
818 }
819
820 /* By default we are in VEPA mode, if this is the first VF/VMDq
821 * VSI to be added switch to VEB mode.
822 */
823 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
824 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
825 i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
826 }
827
828 vsi = i40e_vsi_setup(pf, type: I40E_VSI_VMDQ2, uplink: vsi_seid, param1: 0);
829 if (vsi)
830 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
831 vsi->seid, vsi->uplink_seid);
832 else
833 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
834
835 } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
836 cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
837 if (cnt != 1) {
838 dev_info(&pf->pdev->dev,
839 "del vsi: bad command string, cnt=%d\n",
840 cnt);
841 goto command_write_done;
842 }
843 vsi = i40e_dbg_find_vsi(pf, seid: vsi_seid);
844 if (!vsi) {
845 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
846 vsi_seid);
847 goto command_write_done;
848 }
849
850 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
851 i40e_vsi_release(vsi);
852
853 } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
854 struct i40e_veb *veb;
855 int uplink_seid, i;
856
857 cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
858 if (cnt != 2) {
859 dev_info(&pf->pdev->dev,
860 "add relay: bad command string, cnt=%d\n",
861 cnt);
862 goto command_write_done;
863 } else if (uplink_seid < 0) {
864 dev_info(&pf->pdev->dev,
865 "add relay %d: bad uplink seid\n",
866 uplink_seid);
867 goto command_write_done;
868 }
869
870 vsi = i40e_dbg_find_vsi(pf, seid: vsi_seid);
871 if (!vsi) {
872 dev_info(&pf->pdev->dev,
873 "add relay: VSI %d not found\n", vsi_seid);
874 goto command_write_done;
875 }
876
877 for (i = 0; i < I40E_MAX_VEB; i++)
878 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
879 break;
880 if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
881 uplink_seid != pf->mac_seid) {
882 dev_info(&pf->pdev->dev,
883 "add relay: relay uplink %d not found\n",
884 uplink_seid);
885 goto command_write_done;
886 }
887
888 veb = i40e_veb_setup(pf, flags: 0, uplink_seid, downlink_seid: vsi_seid,
889 enabled_tc: vsi->tc_config.enabled_tc);
890 if (veb)
891 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
892 else
893 dev_info(&pf->pdev->dev, "add relay failed\n");
894
895 } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
896 int i;
897 cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
898 if (cnt != 1) {
899 dev_info(&pf->pdev->dev,
900 "del relay: bad command string, cnt=%d\n",
901 cnt);
902 goto command_write_done;
903 } else if (veb_seid < 0) {
904 dev_info(&pf->pdev->dev,
905 "del relay %d: bad relay seid\n", veb_seid);
906 goto command_write_done;
907 }
908
909 /* find the veb */
910 for (i = 0; i < I40E_MAX_VEB; i++)
911 if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
912 break;
913 if (i >= I40E_MAX_VEB) {
914 dev_info(&pf->pdev->dev,
915 "del relay: relay %d not found\n", veb_seid);
916 goto command_write_done;
917 }
918
919 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
920 i40e_veb_release(veb: pf->veb[i]);
921 } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
922 unsigned int v;
923 int ret;
924 u16 vid;
925
926 cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
927 if (cnt != 2) {
928 dev_info(&pf->pdev->dev,
929 "add pvid: bad command string, cnt=%d\n", cnt);
930 goto command_write_done;
931 }
932
933 vsi = i40e_dbg_find_vsi(pf, seid: vsi_seid);
934 if (!vsi) {
935 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
936 vsi_seid);
937 goto command_write_done;
938 }
939
940 vid = v;
941 ret = i40e_vsi_add_pvid(vsi, vid);
942 if (!ret)
943 dev_info(&pf->pdev->dev,
944 "add pvid: %d added to VSI %d\n",
945 vid, vsi_seid);
946 else
947 dev_info(&pf->pdev->dev,
948 "add pvid: %d to VSI %d failed, ret=%d\n",
949 vid, vsi_seid, ret);
950
951 } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
952
953 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
954 if (cnt != 1) {
955 dev_info(&pf->pdev->dev,
956 "del pvid: bad command string, cnt=%d\n",
957 cnt);
958 goto command_write_done;
959 }
960
961 vsi = i40e_dbg_find_vsi(pf, seid: vsi_seid);
962 if (!vsi) {
963 dev_info(&pf->pdev->dev,
964 "del pvid: VSI %d not found\n", vsi_seid);
965 goto command_write_done;
966 }
967
968 i40e_vsi_remove_pvid(vsi);
969 dev_info(&pf->pdev->dev,
970 "del pvid: removed from VSI %d\n", vsi_seid);
971
972 } else if (strncmp(cmd_buf, "dump", 4) == 0) {
973 if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
974 i40e_fetch_switch_configuration(pf, printconfig: true);
975 } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
976 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
977 if (cnt > 0)
978 i40e_dbg_dump_vsi_seid(pf, seid: vsi_seid);
979 else
980 i40e_dbg_dump_vsi_no_seid(pf);
981 } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
982 cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
983 if (cnt > 0)
984 i40e_dbg_dump_veb_seid(pf, seid: vsi_seid);
985 else
986 i40e_dbg_dump_veb_all(pf);
987 } else if (strncmp(&cmd_buf[5], "vf", 2) == 0) {
988 cnt = sscanf(&cmd_buf[7], "%i", &vf_id);
989 if (cnt > 0)
990 i40e_dbg_dump_vf(pf, vf_id);
991 else
992 i40e_dbg_dump_vf_all(pf);
993 } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
994 int ring_id, desc_n;
995 if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
996 cnt = sscanf(&cmd_buf[12], "%i %i %i",
997 &vsi_seid, &ring_id, &desc_n);
998 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
999 desc_n, pf, type: RING_TYPE_RX);
1000 } else if (strncmp(&cmd_buf[10], "tx", 2)
1001 == 0) {
1002 cnt = sscanf(&cmd_buf[12], "%i %i %i",
1003 &vsi_seid, &ring_id, &desc_n);
1004 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
1005 desc_n, pf, type: RING_TYPE_TX);
1006 } else if (strncmp(&cmd_buf[10], "xdp", 3)
1007 == 0) {
1008 cnt = sscanf(&cmd_buf[13], "%i %i %i",
1009 &vsi_seid, &ring_id, &desc_n);
1010 i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
1011 desc_n, pf, type: RING_TYPE_XDP);
1012 } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
1013 i40e_dbg_dump_aq_desc(pf);
1014 } else {
1015 dev_info(&pf->pdev->dev,
1016 "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1017 dev_info(&pf->pdev->dev,
1018 "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1019 dev_info(&pf->pdev->dev,
1020 "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1021 dev_info(&pf->pdev->dev, "dump desc aq\n");
1022 }
1023 } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
1024 dev_info(&pf->pdev->dev,
1025 "core reset count: %d\n", pf->corer_count);
1026 dev_info(&pf->pdev->dev,
1027 "global reset count: %d\n", pf->globr_count);
1028 dev_info(&pf->pdev->dev,
1029 "emp reset count: %d\n", pf->empr_count);
1030 dev_info(&pf->pdev->dev,
1031 "pf reset count: %d\n", pf->pfr_count);
1032 dev_info(&pf->pdev->dev,
1033 "pf tx sluggish count: %d\n",
1034 pf->tx_sluggish_count);
1035 } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
1036 struct i40e_aqc_query_port_ets_config_resp *bw_data;
1037 struct i40e_dcbx_config *cfg =
1038 &pf->hw.local_dcbx_config;
1039 struct i40e_dcbx_config *r_cfg =
1040 &pf->hw.remote_dcbx_config;
1041 int i, ret;
1042 u16 switch_id;
1043
1044 bw_data = kzalloc(size: sizeof(
1045 struct i40e_aqc_query_port_ets_config_resp),
1046 GFP_KERNEL);
1047 if (!bw_data) {
1048 ret = -ENOMEM;
1049 goto command_write_done;
1050 }
1051
1052 vsi = pf->vsi[pf->lan_vsi];
1053 switch_id =
1054 le16_to_cpu(vsi->info.switch_id) &
1055 I40E_AQ_VSI_SW_ID_MASK;
1056
1057 ret = i40e_aq_query_port_ets_config(hw: &pf->hw,
1058 seid: switch_id,
1059 bw_data, NULL);
1060 if (ret) {
1061 dev_info(&pf->pdev->dev,
1062 "Query Port ETS Config AQ command failed =0x%x\n",
1063 pf->hw.aq.asq_last_status);
1064 kfree(objp: bw_data);
1065 bw_data = NULL;
1066 goto command_write_done;
1067 }
1068 dev_info(&pf->pdev->dev,
1069 "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
1070 bw_data->tc_valid_bits,
1071 bw_data->tc_strict_priority_bits,
1072 le16_to_cpu(bw_data->tc_bw_max[0]),
1073 le16_to_cpu(bw_data->tc_bw_max[1]));
1074 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1075 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
1076 bw_data->tc_bw_share_credits[i],
1077 le16_to_cpu(bw_data->tc_bw_limits[i]));
1078 }
1079
1080 kfree(objp: bw_data);
1081 bw_data = NULL;
1082
1083 dev_info(&pf->pdev->dev,
1084 "port dcbx_mode=%d\n", cfg->dcbx_mode);
1085 dev_info(&pf->pdev->dev,
1086 "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1087 cfg->etscfg.willing, cfg->etscfg.cbs,
1088 cfg->etscfg.maxtcs);
1089 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1090 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1091 i, cfg->etscfg.prioritytable[i],
1092 cfg->etscfg.tcbwtable[i],
1093 cfg->etscfg.tsatable[i]);
1094 }
1095 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1096 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1097 i, cfg->etsrec.prioritytable[i],
1098 cfg->etsrec.tcbwtable[i],
1099 cfg->etsrec.tsatable[i]);
1100 }
1101 dev_info(&pf->pdev->dev,
1102 "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1103 cfg->pfc.willing, cfg->pfc.mbc,
1104 cfg->pfc.pfccap, cfg->pfc.pfcenable);
1105 dev_info(&pf->pdev->dev,
1106 "port app_table: num_apps=%d\n", cfg->numapps);
1107 for (i = 0; i < cfg->numapps; i++) {
1108 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1109 i, cfg->app[i].priority,
1110 cfg->app[i].selector,
1111 cfg->app[i].protocolid);
1112 }
1113 /* Peer TLV DCBX data */
1114 dev_info(&pf->pdev->dev,
1115 "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
1116 r_cfg->etscfg.willing,
1117 r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
1118 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1119 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1120 i, r_cfg->etscfg.prioritytable[i],
1121 r_cfg->etscfg.tcbwtable[i],
1122 r_cfg->etscfg.tsatable[i]);
1123 }
1124 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1125 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
1126 i, r_cfg->etsrec.prioritytable[i],
1127 r_cfg->etsrec.tcbwtable[i],
1128 r_cfg->etsrec.tsatable[i]);
1129 }
1130 dev_info(&pf->pdev->dev,
1131 "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
1132 r_cfg->pfc.willing,
1133 r_cfg->pfc.mbc,
1134 r_cfg->pfc.pfccap,
1135 r_cfg->pfc.pfcenable);
1136 dev_info(&pf->pdev->dev,
1137 "remote port app_table: num_apps=%d\n",
1138 r_cfg->numapps);
1139 for (i = 0; i < r_cfg->numapps; i++) {
1140 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
1141 i, r_cfg->app[i].priority,
1142 r_cfg->app[i].selector,
1143 r_cfg->app[i].protocolid);
1144 }
1145 } else if (strncmp(&cmd_buf[5], "debug fwdata", 12) == 0) {
1146 int cluster_id, table_id;
1147 int index, ret;
1148 u16 buff_len = 4096;
1149 u32 next_index;
1150 u8 next_table;
1151 u8 *buff;
1152 u16 rlen;
1153
1154 cnt = sscanf(&cmd_buf[18], "%i %i %i",
1155 &cluster_id, &table_id, &index);
1156 if (cnt != 3) {
1157 dev_info(&pf->pdev->dev,
1158 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1159 goto command_write_done;
1160 }
1161
1162 dev_info(&pf->pdev->dev,
1163 "AQ debug dump fwdata params %x %x %x %x\n",
1164 cluster_id, table_id, index, buff_len);
1165 buff = kzalloc(size: buff_len, GFP_KERNEL);
1166 if (!buff)
1167 goto command_write_done;
1168
1169 ret = i40e_aq_debug_dump(hw: &pf->hw, cluster_id, table_id,
1170 start_index: index, buff_size: buff_len, buff, ret_buff_size: &rlen,
1171 ret_next_table: &next_table, ret_next_index: &next_index,
1172 NULL);
1173 if (ret) {
1174 dev_info(&pf->pdev->dev,
1175 "debug dump fwdata AQ Failed %d 0x%x\n",
1176 ret, pf->hw.aq.asq_last_status);
1177 kfree(objp: buff);
1178 buff = NULL;
1179 goto command_write_done;
1180 }
1181 dev_info(&pf->pdev->dev,
1182 "AQ debug dump fwdata rlen=0x%x next_table=0x%x next_index=0x%x\n",
1183 rlen, next_table, next_index);
1184 print_hex_dump(KERN_INFO, prefix_str: "AQ buffer WB: ",
1185 prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 1,
1186 buf: buff, len: rlen, ascii: true);
1187 kfree(objp: buff);
1188 buff = NULL;
1189 } else {
1190 dev_info(&pf->pdev->dev,
1191 "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n");
1192 dev_info(&pf->pdev->dev, "dump switch\n");
1193 dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
1194 dev_info(&pf->pdev->dev, "dump reset stats\n");
1195 dev_info(&pf->pdev->dev, "dump port\n");
1196 dev_info(&pf->pdev->dev, "dump vf [vf_id]\n");
1197 dev_info(&pf->pdev->dev,
1198 "dump debug fwdata <cluster_id> <table_id> <index>\n");
1199 }
1200 } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
1201 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
1202 i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
1203
1204 } else if (strncmp(cmd_buf, "corer", 5) == 0) {
1205 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
1206 i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
1207
1208 } else if (strncmp(cmd_buf, "globr", 5) == 0) {
1209 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
1210 i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
1211
1212 } else if (strncmp(cmd_buf, "read", 4) == 0) {
1213 u32 address;
1214 u32 value;
1215
1216 cnt = sscanf(&cmd_buf[4], "%i", &address);
1217 if (cnt != 1) {
1218 dev_info(&pf->pdev->dev, "read <reg>\n");
1219 goto command_write_done;
1220 }
1221
1222 /* check the range on address */
1223 if (address > (pf->ioremap_len - sizeof(u32))) {
1224 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large, max=0x%08lx\n",
1225 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1226 goto command_write_done;
1227 }
1228
1229 value = rd32(&pf->hw, address);
1230 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
1231 address, value);
1232
1233 } else if (strncmp(cmd_buf, "write", 5) == 0) {
1234 u32 address, value;
1235
1236 cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
1237 if (cnt != 2) {
1238 dev_info(&pf->pdev->dev, "write <reg> <value>\n");
1239 goto command_write_done;
1240 }
1241
1242 /* check the range on address */
1243 if (address > (pf->ioremap_len - sizeof(u32))) {
1244 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large, max=0x%08lx\n",
1245 address, (unsigned long int)(pf->ioremap_len - sizeof(u32)));
1246 goto command_write_done;
1247 }
1248 wr32(&pf->hw, address, value);
1249 value = rd32(&pf->hw, address);
1250 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
1251 address, value);
1252 } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
1253 if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
1254 cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
1255 if (cnt == 0) {
1256 int i;
1257
1258 for (i = 0; i < pf->num_alloc_vsi; i++)
1259 i40e_vsi_reset_stats(vsi: pf->vsi[i]);
1260 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
1261 } else if (cnt == 1) {
1262 vsi = i40e_dbg_find_vsi(pf, seid: vsi_seid);
1263 if (!vsi) {
1264 dev_info(&pf->pdev->dev,
1265 "clear_stats vsi: bad vsi %d\n",
1266 vsi_seid);
1267 goto command_write_done;
1268 }
1269 i40e_vsi_reset_stats(vsi);
1270 dev_info(&pf->pdev->dev,
1271 "vsi clear stats called for vsi %d\n",
1272 vsi_seid);
1273 } else {
1274 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
1275 }
1276 } else if (strncmp(&cmd_buf[12], "port", 4) == 0) {
1277 if (pf->hw.partition_id == 1) {
1278 i40e_pf_reset_stats(pf);
1279 dev_info(&pf->pdev->dev, "port stats cleared\n");
1280 } else {
1281 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n");
1282 }
1283 } else {
1284 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n");
1285 }
1286 } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
1287 struct i40e_aq_desc *desc;
1288 int ret;
1289
1290 desc = kzalloc(size: sizeof(struct i40e_aq_desc), GFP_KERNEL);
1291 if (!desc)
1292 goto command_write_done;
1293 cnt = sscanf(&cmd_buf[11],
1294 "%hi %hi %hi %hi %i %i %i %i %i %i",
1295 &desc->flags,
1296 &desc->opcode, &desc->datalen, &desc->retval,
1297 &desc->cookie_high, &desc->cookie_low,
1298 &desc->params.internal.param0,
1299 &desc->params.internal.param1,
1300 &desc->params.internal.param2,
1301 &desc->params.internal.param3);
1302 if (cnt != 10) {
1303 dev_info(&pf->pdev->dev,
1304 "send aq_cmd: bad command string, cnt=%d\n",
1305 cnt);
1306 kfree(objp: desc);
1307 desc = NULL;
1308 goto command_write_done;
1309 }
1310 ret = i40e_asq_send_command(hw: &pf->hw, desc, NULL, buff_size: 0, NULL);
1311 if (!ret) {
1312 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1313 } else if (ret == -EIO) {
1314 dev_info(&pf->pdev->dev,
1315 "AQ command send failed Opcode %x AQ Error: %d\n",
1316 desc->opcode, pf->hw.aq.asq_last_status);
1317 } else {
1318 dev_info(&pf->pdev->dev,
1319 "AQ command send failed Opcode %x Status: %d\n",
1320 desc->opcode, ret);
1321 }
1322 dev_info(&pf->pdev->dev,
1323 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1324 desc->flags, desc->opcode, desc->datalen, desc->retval,
1325 desc->cookie_high, desc->cookie_low,
1326 desc->params.internal.param0,
1327 desc->params.internal.param1,
1328 desc->params.internal.param2,
1329 desc->params.internal.param3);
1330 kfree(objp: desc);
1331 desc = NULL;
1332 } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
1333 struct i40e_aq_desc *desc;
1334 u16 buffer_len;
1335 u8 *buff;
1336 int ret;
1337
1338 desc = kzalloc(size: sizeof(struct i40e_aq_desc), GFP_KERNEL);
1339 if (!desc)
1340 goto command_write_done;
1341 cnt = sscanf(&cmd_buf[20],
1342 "%hi %hi %hi %hi %i %i %i %i %i %i %hi",
1343 &desc->flags,
1344 &desc->opcode, &desc->datalen, &desc->retval,
1345 &desc->cookie_high, &desc->cookie_low,
1346 &desc->params.internal.param0,
1347 &desc->params.internal.param1,
1348 &desc->params.internal.param2,
1349 &desc->params.internal.param3,
1350 &buffer_len);
1351 if (cnt != 11) {
1352 dev_info(&pf->pdev->dev,
1353 "send indirect aq_cmd: bad command string, cnt=%d\n",
1354 cnt);
1355 kfree(objp: desc);
1356 desc = NULL;
1357 goto command_write_done;
1358 }
1359 /* Just stub a buffer big enough in case user messed up */
1360 if (buffer_len == 0)
1361 buffer_len = 1280;
1362
1363 buff = kzalloc(size: buffer_len, GFP_KERNEL);
1364 if (!buff) {
1365 kfree(objp: desc);
1366 desc = NULL;
1367 goto command_write_done;
1368 }
1369 desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
1370 ret = i40e_asq_send_command(hw: &pf->hw, desc, buff,
1371 buff_size: buffer_len, NULL);
1372 if (!ret) {
1373 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
1374 } else if (ret == -EIO) {
1375 dev_info(&pf->pdev->dev,
1376 "AQ command send failed Opcode %x AQ Error: %d\n",
1377 desc->opcode, pf->hw.aq.asq_last_status);
1378 } else {
1379 dev_info(&pf->pdev->dev,
1380 "AQ command send failed Opcode %x Status: %d\n",
1381 desc->opcode, ret);
1382 }
1383 dev_info(&pf->pdev->dev,
1384 "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
1385 desc->flags, desc->opcode, desc->datalen, desc->retval,
1386 desc->cookie_high, desc->cookie_low,
1387 desc->params.internal.param0,
1388 desc->params.internal.param1,
1389 desc->params.internal.param2,
1390 desc->params.internal.param3);
1391 print_hex_dump(KERN_INFO, prefix_str: "AQ buffer WB: ",
1392 prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 1,
1393 buf: buff, len: buffer_len, ascii: true);
1394 kfree(objp: buff);
1395 buff = NULL;
1396 kfree(objp: desc);
1397 desc = NULL;
1398 } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
1399 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
1400 i40e_get_current_fd_count(pf));
1401 } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
1402 if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
1403 int ret;
1404
1405 ret = i40e_aq_stop_lldp(hw: &pf->hw, shutdown_agent: false, persist: false, NULL);
1406 if (ret) {
1407 dev_info(&pf->pdev->dev,
1408 "Stop LLDP AQ command failed =0x%x\n",
1409 pf->hw.aq.asq_last_status);
1410 goto command_write_done;
1411 }
1412 ret = i40e_aq_add_rem_control_packet_filter(hw: &pf->hw,
1413 mac_addr: pf->hw.mac.addr,
1414 ETH_P_LLDP, flags: 0,
1415 vsi_seid: pf->vsi[pf->lan_vsi]->seid,
1416 queue: 0, is_add: true, NULL, NULL);
1417 if (ret) {
1418 dev_info(&pf->pdev->dev,
1419 "%s: Add Control Packet Filter AQ command failed =0x%x\n",
1420 __func__, pf->hw.aq.asq_last_status);
1421 goto command_write_done;
1422 }
1423#ifdef CONFIG_I40E_DCB
1424 pf->dcbx_cap = DCB_CAP_DCBX_HOST |
1425 DCB_CAP_DCBX_VER_IEEE;
1426#endif /* CONFIG_I40E_DCB */
1427 } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
1428 int ret;
1429
1430 ret = i40e_aq_add_rem_control_packet_filter(hw: &pf->hw,
1431 mac_addr: pf->hw.mac.addr,
1432 ETH_P_LLDP, flags: 0,
1433 vsi_seid: pf->vsi[pf->lan_vsi]->seid,
1434 queue: 0, is_add: false, NULL, NULL);
1435 if (ret) {
1436 dev_info(&pf->pdev->dev,
1437 "%s: Remove Control Packet Filter AQ command failed =0x%x\n",
1438 __func__, pf->hw.aq.asq_last_status);
1439 /* Continue and start FW LLDP anyways */
1440 }
1441
1442 ret = i40e_aq_start_lldp(hw: &pf->hw, persist: false, NULL);
1443 if (ret) {
1444 dev_info(&pf->pdev->dev,
1445 "Start LLDP AQ command failed =0x%x\n",
1446 pf->hw.aq.asq_last_status);
1447 goto command_write_done;
1448 }
1449#ifdef CONFIG_I40E_DCB
1450 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
1451 DCB_CAP_DCBX_VER_IEEE;
1452#endif /* CONFIG_I40E_DCB */
1453 } else if (strncmp(&cmd_buf[5],
1454 "get local", 9) == 0) {
1455 u16 llen, rlen;
1456 int ret;
1457 u8 *buff;
1458
1459 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1460 if (!buff)
1461 goto command_write_done;
1462
1463 ret = i40e_aq_get_lldp_mib(hw: &pf->hw, bridge_type: 0,
1464 I40E_AQ_LLDP_MIB_LOCAL,
1465 buff, I40E_LLDPDU_SIZE,
1466 local_len: &llen, remote_len: &rlen, NULL);
1467 if (ret) {
1468 dev_info(&pf->pdev->dev,
1469 "Get LLDP MIB (local) AQ command failed =0x%x\n",
1470 pf->hw.aq.asq_last_status);
1471 kfree(objp: buff);
1472 buff = NULL;
1473 goto command_write_done;
1474 }
1475 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n");
1476 print_hex_dump(KERN_INFO, prefix_str: "LLDP MIB (local): ",
1477 prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 1,
1478 buf: buff, I40E_LLDPDU_SIZE, ascii: true);
1479 kfree(objp: buff);
1480 buff = NULL;
1481 } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
1482 u16 llen, rlen;
1483 int ret;
1484 u8 *buff;
1485
1486 buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
1487 if (!buff)
1488 goto command_write_done;
1489
1490 ret = i40e_aq_get_lldp_mib(hw: &pf->hw,
1491 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
1492 I40E_AQ_LLDP_MIB_REMOTE,
1493 buff, I40E_LLDPDU_SIZE,
1494 local_len: &llen, remote_len: &rlen, NULL);
1495 if (ret) {
1496 dev_info(&pf->pdev->dev,
1497 "Get LLDP MIB (remote) AQ command failed =0x%x\n",
1498 pf->hw.aq.asq_last_status);
1499 kfree(objp: buff);
1500 buff = NULL;
1501 goto command_write_done;
1502 }
1503 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n");
1504 print_hex_dump(KERN_INFO, prefix_str: "LLDP MIB (remote): ",
1505 prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 1,
1506 buf: buff, I40E_LLDPDU_SIZE, ascii: true);
1507 kfree(objp: buff);
1508 buff = NULL;
1509 } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
1510 int ret;
1511
1512 ret = i40e_aq_cfg_lldp_mib_change_event(hw: &pf->hw,
1513 enable_update: true, NULL);
1514 if (ret) {
1515 dev_info(&pf->pdev->dev,
1516 "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
1517 pf->hw.aq.asq_last_status);
1518 goto command_write_done;
1519 }
1520 } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
1521 int ret;
1522
1523 ret = i40e_aq_cfg_lldp_mib_change_event(hw: &pf->hw,
1524 enable_update: false, NULL);
1525 if (ret) {
1526 dev_info(&pf->pdev->dev,
1527 "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
1528 pf->hw.aq.asq_last_status);
1529 goto command_write_done;
1530 }
1531 }
1532 } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
1533 u16 buffer_len, bytes;
1534 u16 module;
1535 u32 offset;
1536 u16 *buff;
1537 int ret;
1538
1539 cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
1540 &module, &offset, &buffer_len);
1541 if (cnt == 0) {
1542 module = 0;
1543 offset = 0;
1544 buffer_len = 0;
1545 } else if (cnt == 1) {
1546 offset = 0;
1547 buffer_len = 0;
1548 } else if (cnt == 2) {
1549 buffer_len = 0;
1550 } else if (cnt > 3) {
1551 dev_info(&pf->pdev->dev,
1552 "nvm read: bad command string, cnt=%d\n", cnt);
1553 goto command_write_done;
1554 }
1555
1556 /* set the max length */
1557 buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
1558
1559 bytes = 2 * buffer_len;
1560
1561 /* read at least 1k bytes, no more than 4kB */
1562 bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
1563 buff = kzalloc(size: bytes, GFP_KERNEL);
1564 if (!buff)
1565 goto command_write_done;
1566
1567 ret = i40e_acquire_nvm(hw: &pf->hw, access: I40E_RESOURCE_READ);
1568 if (ret) {
1569 dev_info(&pf->pdev->dev,
1570 "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
1571 ret, pf->hw.aq.asq_last_status);
1572 kfree(objp: buff);
1573 goto command_write_done;
1574 }
1575
1576 ret = i40e_aq_read_nvm(hw: &pf->hw, module_pointer: module, offset: (2 * offset),
1577 length: bytes, data: (u8 *)buff, last_command: true, NULL);
1578 i40e_release_nvm(hw: &pf->hw);
1579 if (ret) {
1580 dev_info(&pf->pdev->dev,
1581 "Read NVM AQ failed err=%d status=0x%x\n",
1582 ret, pf->hw.aq.asq_last_status);
1583 } else {
1584 dev_info(&pf->pdev->dev,
1585 "Read NVM module=0x%x offset=0x%x words=%d\n",
1586 module, offset, buffer_len);
1587 if (bytes)
1588 print_hex_dump(KERN_INFO, prefix_str: "NVM Dump: ",
1589 prefix_type: DUMP_PREFIX_OFFSET, rowsize: 16, groupsize: 2,
1590 buf: buff, len: bytes, ascii: true);
1591 }
1592 kfree(objp: buff);
1593 buff = NULL;
1594 } else {
1595 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
1596 dev_info(&pf->pdev->dev, "available commands\n");
1597 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
1598 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
1599 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
1600 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
1601 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
1602 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
1603 dev_info(&pf->pdev->dev, " dump switch\n");
1604 dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
1605 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
1606 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
1607 dev_info(&pf->pdev->dev, " dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
1608 dev_info(&pf->pdev->dev, " dump desc aq\n");
1609 dev_info(&pf->pdev->dev, " dump reset stats\n");
1610 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n");
1611 dev_info(&pf->pdev->dev, " read <reg>\n");
1612 dev_info(&pf->pdev->dev, " write <reg> <value>\n");
1613 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
1614 dev_info(&pf->pdev->dev, " clear_stats port\n");
1615 dev_info(&pf->pdev->dev, " pfr\n");
1616 dev_info(&pf->pdev->dev, " corer\n");
1617 dev_info(&pf->pdev->dev, " globr\n");
1618 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
1619 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
1620 dev_info(&pf->pdev->dev, " fd current cnt");
1621 dev_info(&pf->pdev->dev, " lldp start\n");
1622 dev_info(&pf->pdev->dev, " lldp stop\n");
1623 dev_info(&pf->pdev->dev, " lldp get local\n");
1624 dev_info(&pf->pdev->dev, " lldp get remote\n");
1625 dev_info(&pf->pdev->dev, " lldp event on\n");
1626 dev_info(&pf->pdev->dev, " lldp event off\n");
1627 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
1628 }
1629
1630command_write_done:
1631 kfree(objp: cmd_buf);
1632 cmd_buf = NULL;
1633 return count;
1634}
1635
1636static const struct file_operations i40e_dbg_command_fops = {
1637 .owner = THIS_MODULE,
1638 .open = simple_open,
1639 .read = i40e_dbg_command_read,
1640 .write = i40e_dbg_command_write,
1641};
1642
1643/**************************************************************
1644 * netdev_ops
1645 * The netdev_ops entry in debugfs is for giving the driver commands
1646 * to be executed from the netdev operations.
1647 **************************************************************/
1648static char i40e_dbg_netdev_ops_buf[256] = "";
1649
1650/**
1651 * i40e_dbg_netdev_ops_read - read for netdev_ops datum
1652 * @filp: the opened file
1653 * @buffer: where to write the data for the user to read
1654 * @count: the size of the user's buffer
1655 * @ppos: file position offset
1656 **/
1657static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
1658 size_t count, loff_t *ppos)
1659{
1660 struct i40e_pf *pf = filp->private_data;
1661 int bytes_not_copied;
1662 int buf_size = 256;
1663 char *buf;
1664 int len;
1665
1666 /* don't allow partal reads */
1667 if (*ppos != 0)
1668 return 0;
1669 if (count < buf_size)
1670 return -ENOSPC;
1671
1672 buf = kzalloc(size: buf_size, GFP_KERNEL);
1673 if (!buf)
1674 return -ENOSPC;
1675
1676 len = snprintf(buf, size: buf_size, fmt: "%s: %s\n",
1677 pf->vsi[pf->lan_vsi]->netdev->name,
1678 i40e_dbg_netdev_ops_buf);
1679
1680 bytes_not_copied = copy_to_user(to: buffer, from: buf, n: len);
1681 kfree(objp: buf);
1682
1683 if (bytes_not_copied)
1684 return -EFAULT;
1685
1686 *ppos = len;
1687 return len;
1688}
1689
1690/**
1691 * i40e_dbg_netdev_ops_write - write into netdev_ops datum
1692 * @filp: the opened file
1693 * @buffer: where to find the user's data
1694 * @count: the length of the user's data
1695 * @ppos: file position offset
1696 **/
1697static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
1698 const char __user *buffer,
1699 size_t count, loff_t *ppos)
1700{
1701 struct i40e_pf *pf = filp->private_data;
1702 int bytes_not_copied;
1703 struct i40e_vsi *vsi;
1704 char *buf_tmp;
1705 int vsi_seid;
1706 int i, cnt;
1707
1708 /* don't allow partial writes */
1709 if (*ppos != 0)
1710 return 0;
1711 if (count >= sizeof(i40e_dbg_netdev_ops_buf))
1712 return -ENOSPC;
1713
1714 memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
1715 bytes_not_copied = copy_from_user(to: i40e_dbg_netdev_ops_buf,
1716 from: buffer, n: count);
1717 if (bytes_not_copied)
1718 return -EFAULT;
1719 i40e_dbg_netdev_ops_buf[count] = '\0';
1720
1721 buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
1722 if (buf_tmp) {
1723 *buf_tmp = '\0';
1724 count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
1725 }
1726
1727 if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
1728 int mtu;
1729
1730 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
1731 &vsi_seid, &mtu);
1732 if (cnt != 2) {
1733 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
1734 goto netdev_ops_write_done;
1735 }
1736 vsi = i40e_dbg_find_vsi(pf, seid: vsi_seid);
1737 if (!vsi) {
1738 dev_info(&pf->pdev->dev,
1739 "change_mtu: VSI %d not found\n", vsi_seid);
1740 } else if (!vsi->netdev) {
1741 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n",
1742 vsi_seid);
1743 } else if (rtnl_trylock()) {
1744 vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
1745 mtu);
1746 rtnl_unlock();
1747 dev_info(&pf->pdev->dev, "change_mtu called\n");
1748 } else {
1749 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1750 }
1751
1752 } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
1753 cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
1754 if (cnt != 1) {
1755 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
1756 goto netdev_ops_write_done;
1757 }
1758 vsi = i40e_dbg_find_vsi(pf, seid: vsi_seid);
1759 if (!vsi) {
1760 dev_info(&pf->pdev->dev,
1761 "set_rx_mode: VSI %d not found\n", vsi_seid);
1762 } else if (!vsi->netdev) {
1763 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n",
1764 vsi_seid);
1765 } else if (rtnl_trylock()) {
1766 vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
1767 rtnl_unlock();
1768 dev_info(&pf->pdev->dev, "set_rx_mode called\n");
1769 } else {
1770 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
1771 }
1772
1773 } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
1774 cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
1775 if (cnt != 1) {
1776 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
1777 goto netdev_ops_write_done;
1778 }
1779 vsi = i40e_dbg_find_vsi(pf, seid: vsi_seid);
1780 if (!vsi) {
1781 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
1782 vsi_seid);
1783 } else if (!vsi->netdev) {
1784 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n",
1785 vsi_seid);
1786 } else {
1787 for (i = 0; i < vsi->num_q_vectors; i++)
1788 napi_schedule(n: &vsi->q_vectors[i]->napi);
1789 dev_info(&pf->pdev->dev, "napi called\n");
1790 }
1791 } else {
1792 dev_info(&pf->pdev->dev, "unknown command '%s'\n",
1793 i40e_dbg_netdev_ops_buf);
1794 dev_info(&pf->pdev->dev, "available commands\n");
1795 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
1796 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
1797 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
1798 }
1799netdev_ops_write_done:
1800 return count;
1801}
1802
1803static const struct file_operations i40e_dbg_netdev_ops_fops = {
1804 .owner = THIS_MODULE,
1805 .open = simple_open,
1806 .read = i40e_dbg_netdev_ops_read,
1807 .write = i40e_dbg_netdev_ops_write,
1808};
1809
1810/**
1811 * i40e_dbg_pf_init - setup the debugfs directory for the PF
1812 * @pf: the PF that is starting up
1813 **/
1814void i40e_dbg_pf_init(struct i40e_pf *pf)
1815{
1816 const char *name = pci_name(pdev: pf->pdev);
1817
1818 pf->i40e_dbg_pf = debugfs_create_dir(name, parent: i40e_dbg_root);
1819
1820 debugfs_create_file(name: "command", mode: 0600, parent: pf->i40e_dbg_pf, data: pf,
1821 fops: &i40e_dbg_command_fops);
1822
1823 debugfs_create_file(name: "netdev_ops", mode: 0600, parent: pf->i40e_dbg_pf, data: pf,
1824 fops: &i40e_dbg_netdev_ops_fops);
1825}
1826
1827/**
1828 * i40e_dbg_pf_exit - clear out the PF's debugfs entries
1829 * @pf: the PF that is stopping
1830 **/
1831void i40e_dbg_pf_exit(struct i40e_pf *pf)
1832{
1833 debugfs_remove_recursive(dentry: pf->i40e_dbg_pf);
1834 pf->i40e_dbg_pf = NULL;
1835}
1836
1837/**
1838 * i40e_dbg_init - start up debugfs for the driver
1839 **/
1840void i40e_dbg_init(void)
1841{
1842 i40e_dbg_root = debugfs_create_dir(name: i40e_driver_name, NULL);
1843 if (IS_ERR(ptr: i40e_dbg_root))
1844 pr_info("init of debugfs failed\n");
1845}
1846
1847/**
1848 * i40e_dbg_exit - clean out the driver's debugfs entries
1849 **/
1850void i40e_dbg_exit(void)
1851{
1852 debugfs_remove_recursive(dentry: i40e_dbg_root);
1853 i40e_dbg_root = NULL;
1854}
1855
1856#endif /* CONFIG_DEBUG_FS */
1857

source code of linux/drivers/net/ethernet/intel/i40e/i40e_debugfs.c