1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2017 Chelsio Communications. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/sort.h> |
7 | #include <linux/string.h> |
8 | |
9 | #include "t4_regs.h" |
10 | #include "cxgb4.h" |
11 | #include "cxgb4_cudbg.h" |
12 | #include "cudbg_if.h" |
13 | #include "cudbg_lib_common.h" |
14 | #include "cudbg_entity.h" |
15 | #include "cudbg_lib.h" |
16 | #include "cudbg_zlib.h" |
17 | #include "cxgb4_tc_mqprio.h" |
18 | |
19 | static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { |
20 | {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ |
21 | {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */ |
22 | {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */ |
23 | {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */ |
24 | {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */ |
25 | {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */ |
26 | {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */ |
27 | {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */ |
28 | {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */ |
29 | {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */ |
30 | {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */ |
31 | {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */ |
32 | }; |
33 | |
34 | static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = { |
35 | {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */ |
36 | {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */ |
37 | {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */ |
38 | {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */ |
39 | {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */ |
40 | {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */ |
41 | {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */ |
42 | {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */ |
43 | {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */ |
44 | {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */ |
45 | {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */ |
46 | }; |
47 | |
48 | static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = { |
49 | {0x7e18, 0x7e1c, 0x0, 12} |
50 | }; |
51 | |
52 | static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = { |
53 | {0x7e18, 0x7e1c, 0x0, 12} |
54 | }; |
55 | |
56 | static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = { |
57 | {0x7e50, 0x7e54, 0x0, 13}, |
58 | {0x7e50, 0x7e54, 0x10, 6}, |
59 | {0x7e50, 0x7e54, 0x18, 21}, |
60 | {0x7e50, 0x7e54, 0x30, 32}, |
61 | {0x7e50, 0x7e54, 0x50, 22}, |
62 | {0x7e50, 0x7e54, 0x68, 12} |
63 | }; |
64 | |
65 | static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = { |
66 | {0x7e50, 0x7e54, 0x0, 13}, |
67 | {0x7e50, 0x7e54, 0x10, 6}, |
68 | {0x7e50, 0x7e54, 0x18, 8}, |
69 | {0x7e50, 0x7e54, 0x20, 13}, |
70 | {0x7e50, 0x7e54, 0x30, 16}, |
71 | {0x7e50, 0x7e54, 0x40, 16}, |
72 | {0x7e50, 0x7e54, 0x50, 16}, |
73 | {0x7e50, 0x7e54, 0x60, 6}, |
74 | {0x7e50, 0x7e54, 0x68, 4} |
75 | }; |
76 | |
77 | static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = { |
78 | {0x10cc, 0x10d0, 0x0, 16}, |
79 | {0x10cc, 0x10d4, 0x0, 16}, |
80 | }; |
81 | |
82 | static const u32 t6_sge_qbase_index_array[] = { |
83 | /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ |
84 | 0x1250, 0x1240, 0x1244, 0x1248, 0x124c, |
85 | }; |
86 | |
87 | static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = { |
88 | {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */ |
89 | {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */ |
90 | {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */ |
91 | }; |
92 | |
93 | static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = { |
94 | {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */ |
95 | {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */ |
96 | }; |
97 | |
98 | static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = { |
99 | {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */ |
100 | {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */ |
101 | }; |
102 | |
103 | static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = { |
104 | {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */ |
105 | {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */ |
106 | }; |
107 | |
108 | static const u32 t5_pcie_config_array[][2] = { |
109 | {0x0, 0x34}, |
110 | {0x3c, 0x40}, |
111 | {0x50, 0x64}, |
112 | {0x70, 0x80}, |
113 | {0x94, 0xa0}, |
114 | {0xb0, 0xb8}, |
115 | {0xd0, 0xd4}, |
116 | {0x100, 0x128}, |
117 | {0x140, 0x148}, |
118 | {0x150, 0x164}, |
119 | {0x170, 0x178}, |
120 | {0x180, 0x194}, |
121 | {0x1a0, 0x1b8}, |
122 | {0x1c0, 0x208}, |
123 | }; |
124 | |
125 | static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = { |
126 | {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */ |
127 | {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */ |
128 | {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */ |
129 | }; |
130 | |
131 | static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = { |
132 | {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */ |
133 | {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */ |
134 | }; |
135 | |
136 | static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { |
137 | {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ |
138 | {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */ |
139 | {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ |
140 | {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ |
141 | {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ |
142 | {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ |
143 | {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ |
144 | {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ |
145 | {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ |
146 | {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ |
147 | {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ |
148 | {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ |
149 | {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ |
150 | {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ |
151 | {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ |
152 | {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ |
153 | {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ |
154 | {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ |
155 | {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */ |
156 | {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */ |
157 | {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */ |
158 | {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */ |
159 | }; |
160 | |
161 | static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { |
162 | {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ |
163 | {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */ |
164 | {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ |
165 | {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ |
166 | {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ |
167 | {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ |
168 | {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ |
169 | {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ |
170 | {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ |
171 | {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ |
172 | {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ |
173 | {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ |
174 | {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ |
175 | }; |
176 | |
177 | static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { |
178 | {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */ |
179 | }; |
180 | |
181 | u32 cudbg_get_entity_length(struct adapter *adap, u32 entity) |
182 | { |
183 | struct cudbg_tcam tcam_region = { 0 }; |
184 | u32 value, n = 0, len = 0; |
185 | |
186 | switch (entity) { |
187 | case CUDBG_REG_DUMP: |
188 | switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { |
189 | case CHELSIO_T4: |
190 | len = T4_REGMAP_SIZE; |
191 | break; |
192 | case CHELSIO_T5: |
193 | case CHELSIO_T6: |
194 | len = T5_REGMAP_SIZE; |
195 | break; |
196 | default: |
197 | break; |
198 | } |
199 | break; |
200 | case CUDBG_DEV_LOG: |
201 | len = adap->params.devlog.size; |
202 | break; |
203 | case CUDBG_CIM_LA: |
204 | if (is_t6(chip: adap->params.chip)) { |
205 | len = adap->params.cim_la_size / 10 + 1; |
206 | len *= 10 * sizeof(u32); |
207 | } else { |
208 | len = adap->params.cim_la_size / 8; |
209 | len *= 8 * sizeof(u32); |
210 | } |
211 | len += sizeof(u32); /* for reading CIM LA configuration */ |
212 | break; |
213 | case CUDBG_CIM_MA_LA: |
214 | len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); |
215 | break; |
216 | case CUDBG_CIM_QCFG: |
217 | len = sizeof(struct cudbg_cim_qcfg); |
218 | break; |
219 | case CUDBG_CIM_IBQ_TP0: |
220 | case CUDBG_CIM_IBQ_TP1: |
221 | case CUDBG_CIM_IBQ_ULP: |
222 | case CUDBG_CIM_IBQ_SGE0: |
223 | case CUDBG_CIM_IBQ_SGE1: |
224 | case CUDBG_CIM_IBQ_NCSI: |
225 | len = CIM_IBQ_SIZE * 4 * sizeof(u32); |
226 | break; |
227 | case CUDBG_CIM_OBQ_ULP0: |
228 | len = cudbg_cim_obq_size(padap: adap, qid: 0); |
229 | break; |
230 | case CUDBG_CIM_OBQ_ULP1: |
231 | len = cudbg_cim_obq_size(padap: adap, qid: 1); |
232 | break; |
233 | case CUDBG_CIM_OBQ_ULP2: |
234 | len = cudbg_cim_obq_size(padap: adap, qid: 2); |
235 | break; |
236 | case CUDBG_CIM_OBQ_ULP3: |
237 | len = cudbg_cim_obq_size(padap: adap, qid: 3); |
238 | break; |
239 | case CUDBG_CIM_OBQ_SGE: |
240 | len = cudbg_cim_obq_size(padap: adap, qid: 4); |
241 | break; |
242 | case CUDBG_CIM_OBQ_NCSI: |
243 | len = cudbg_cim_obq_size(padap: adap, qid: 5); |
244 | break; |
245 | case CUDBG_CIM_OBQ_RXQ0: |
246 | len = cudbg_cim_obq_size(padap: adap, qid: 6); |
247 | break; |
248 | case CUDBG_CIM_OBQ_RXQ1: |
249 | len = cudbg_cim_obq_size(padap: adap, qid: 7); |
250 | break; |
251 | case CUDBG_EDC0: |
252 | value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); |
253 | if (value & EDRAM0_ENABLE_F) { |
254 | value = t4_read_reg(adap, MA_EDRAM0_BAR_A); |
255 | len = EDRAM0_SIZE_G(value); |
256 | } |
257 | len = cudbg_mbytes_to_bytes(size: len); |
258 | break; |
259 | case CUDBG_EDC1: |
260 | value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); |
261 | if (value & EDRAM1_ENABLE_F) { |
262 | value = t4_read_reg(adap, MA_EDRAM1_BAR_A); |
263 | len = EDRAM1_SIZE_G(value); |
264 | } |
265 | len = cudbg_mbytes_to_bytes(size: len); |
266 | break; |
267 | case CUDBG_MC0: |
268 | value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); |
269 | if (value & EXT_MEM0_ENABLE_F) { |
270 | value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); |
271 | len = EXT_MEM0_SIZE_G(value); |
272 | } |
273 | len = cudbg_mbytes_to_bytes(size: len); |
274 | break; |
275 | case CUDBG_MC1: |
276 | value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); |
277 | if (value & EXT_MEM1_ENABLE_F) { |
278 | value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); |
279 | len = EXT_MEM1_SIZE_G(value); |
280 | } |
281 | len = cudbg_mbytes_to_bytes(size: len); |
282 | break; |
283 | case CUDBG_RSS: |
284 | len = t4_chip_rss_size(adapter: adap) * sizeof(u16); |
285 | break; |
286 | case CUDBG_RSS_VF_CONF: |
287 | len = adap->params.arch.vfcount * |
288 | sizeof(struct cudbg_rss_vf_conf); |
289 | break; |
290 | case CUDBG_PATH_MTU: |
291 | len = NMTUS * sizeof(u16); |
292 | break; |
293 | case CUDBG_PM_STATS: |
294 | len = sizeof(struct cudbg_pm_stats); |
295 | break; |
296 | case CUDBG_HW_SCHED: |
297 | len = sizeof(struct cudbg_hw_sched); |
298 | break; |
299 | case CUDBG_TP_INDIRECT: |
300 | switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { |
301 | case CHELSIO_T5: |
302 | n = sizeof(t5_tp_pio_array) + |
303 | sizeof(t5_tp_tm_pio_array) + |
304 | sizeof(t5_tp_mib_index_array); |
305 | break; |
306 | case CHELSIO_T6: |
307 | n = sizeof(t6_tp_pio_array) + |
308 | sizeof(t6_tp_tm_pio_array) + |
309 | sizeof(t6_tp_mib_index_array); |
310 | break; |
311 | default: |
312 | break; |
313 | } |
314 | n = n / (IREG_NUM_ELEM * sizeof(u32)); |
315 | len = sizeof(struct ireg_buf) * n; |
316 | break; |
317 | case CUDBG_SGE_INDIRECT: |
318 | len = sizeof(struct ireg_buf) * 2 + |
319 | sizeof(struct sge_qbase_reg_field); |
320 | break; |
321 | case CUDBG_ULPRX_LA: |
322 | len = sizeof(struct cudbg_ulprx_la); |
323 | break; |
324 | case CUDBG_TP_LA: |
325 | len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); |
326 | break; |
327 | case CUDBG_MEMINFO: |
328 | len = sizeof(struct cudbg_ver_hdr) + |
329 | sizeof(struct cudbg_meminfo); |
330 | break; |
331 | case CUDBG_CIM_PIF_LA: |
332 | len = sizeof(struct cudbg_cim_pif_la); |
333 | len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); |
334 | break; |
335 | case CUDBG_CLK: |
336 | len = sizeof(struct cudbg_clk_info); |
337 | break; |
338 | case CUDBG_PCIE_INDIRECT: |
339 | n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); |
340 | len = sizeof(struct ireg_buf) * n * 2; |
341 | break; |
342 | case CUDBG_PM_INDIRECT: |
343 | n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); |
344 | len = sizeof(struct ireg_buf) * n * 2; |
345 | break; |
346 | case CUDBG_TID_INFO: |
347 | len = sizeof(struct cudbg_tid_info_region_rev1); |
348 | break; |
349 | case CUDBG_PCIE_CONFIG: |
350 | len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; |
351 | break; |
352 | case CUDBG_DUMP_CONTEXT: |
353 | len = cudbg_dump_context_size(padap: adap); |
354 | break; |
355 | case CUDBG_MPS_TCAM: |
356 | len = sizeof(struct cudbg_mps_tcam) * |
357 | adap->params.arch.mps_tcam_size; |
358 | break; |
359 | case CUDBG_VPD_DATA: |
360 | len = sizeof(struct cudbg_vpd_data); |
361 | break; |
362 | case CUDBG_LE_TCAM: |
363 | cudbg_fill_le_tcam_info(padap: adap, tcam_region: &tcam_region); |
364 | len = sizeof(struct cudbg_tcam) + |
365 | sizeof(struct cudbg_tid_data) * tcam_region.max_tid; |
366 | break; |
367 | case CUDBG_CCTRL: |
368 | len = sizeof(u16) * NMTUS * NCCTRL_WIN; |
369 | break; |
370 | case CUDBG_MA_INDIRECT: |
371 | if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { |
372 | n = sizeof(t6_ma_ireg_array) / |
373 | (IREG_NUM_ELEM * sizeof(u32)); |
374 | len = sizeof(struct ireg_buf) * n * 2; |
375 | } |
376 | break; |
377 | case CUDBG_ULPTX_LA: |
378 | len = sizeof(struct cudbg_ver_hdr) + |
379 | sizeof(struct cudbg_ulptx_la); |
380 | break; |
381 | case CUDBG_UP_CIM_INDIRECT: |
382 | n = 0; |
383 | if (is_t5(chip: adap->params.chip)) |
384 | n = sizeof(t5_up_cim_reg_array) / |
385 | ((IREG_NUM_ELEM + 1) * sizeof(u32)); |
386 | else if (is_t6(chip: adap->params.chip)) |
387 | n = sizeof(t6_up_cim_reg_array) / |
388 | ((IREG_NUM_ELEM + 1) * sizeof(u32)); |
389 | len = sizeof(struct ireg_buf) * n; |
390 | break; |
391 | case CUDBG_PBT_TABLE: |
392 | len = sizeof(struct cudbg_pbt_tables); |
393 | break; |
394 | case CUDBG_MBOX_LOG: |
395 | len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; |
396 | break; |
397 | case CUDBG_HMA_INDIRECT: |
398 | if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { |
399 | n = sizeof(t6_hma_ireg_array) / |
400 | (IREG_NUM_ELEM * sizeof(u32)); |
401 | len = sizeof(struct ireg_buf) * n; |
402 | } |
403 | break; |
404 | case CUDBG_HMA: |
405 | value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); |
406 | if (value & HMA_MUX_F) { |
407 | /* In T6, there's no MC1. So, HMA shares MC1 |
408 | * address space. |
409 | */ |
410 | value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); |
411 | len = EXT_MEM1_SIZE_G(value); |
412 | } |
413 | len = cudbg_mbytes_to_bytes(size: len); |
414 | break; |
415 | case CUDBG_QDESC: |
416 | cudbg_fill_qdesc_num_and_size(padap: adap, NULL, size: &len); |
417 | break; |
418 | default: |
419 | break; |
420 | } |
421 | |
422 | return len; |
423 | } |
424 | |
425 | static int cudbg_do_compression(struct cudbg_init *pdbg_init, |
426 | struct cudbg_buffer *pin_buff, |
427 | struct cudbg_buffer *dbg_buff) |
428 | { |
429 | struct cudbg_buffer temp_in_buff = { 0 }; |
430 | int bytes_left, bytes_read, bytes; |
431 | u32 offset = dbg_buff->offset; |
432 | int rc; |
433 | |
434 | temp_in_buff.offset = pin_buff->offset; |
435 | temp_in_buff.data = pin_buff->data; |
436 | temp_in_buff.size = pin_buff->size; |
437 | |
438 | bytes_left = pin_buff->size; |
439 | bytes_read = 0; |
440 | while (bytes_left > 0) { |
441 | /* Do compression in smaller chunks */ |
442 | bytes = min_t(unsigned long, bytes_left, |
443 | (unsigned long)CUDBG_CHUNK_SIZE); |
444 | temp_in_buff.data = (char *)pin_buff->data + bytes_read; |
445 | temp_in_buff.size = bytes; |
446 | rc = cudbg_compress_buff(pdbg_init, pin_buff: &temp_in_buff, pout_buff: dbg_buff); |
447 | if (rc) |
448 | return rc; |
449 | bytes_left -= bytes; |
450 | bytes_read += bytes; |
451 | } |
452 | |
453 | pin_buff->size = dbg_buff->offset - offset; |
454 | return 0; |
455 | } |
456 | |
457 | static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init, |
458 | struct cudbg_buffer *pin_buff, |
459 | struct cudbg_buffer *dbg_buff) |
460 | { |
461 | int rc = 0; |
462 | |
463 | if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) { |
464 | cudbg_update_buff(pin_buff, pout_buff: dbg_buff); |
465 | } else { |
466 | rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff); |
467 | if (rc) |
468 | goto out; |
469 | } |
470 | |
471 | out: |
472 | cudbg_put_buff(pdbg_init, pin_buff); |
473 | return rc; |
474 | } |
475 | |
476 | static int is_fw_attached(struct cudbg_init *pdbg_init) |
477 | { |
478 | struct adapter *padap = pdbg_init->adap; |
479 | |
480 | if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd) |
481 | return 0; |
482 | |
483 | return 1; |
484 | } |
485 | |
486 | /* This function will add additional padding bytes into debug_buffer to make it |
487 | * 4 byte aligned. |
488 | */ |
489 | void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, |
490 | struct cudbg_entity_hdr *entity_hdr) |
491 | { |
492 | u8 zero_buf[4] = {0}; |
493 | u8 padding, remain; |
494 | |
495 | remain = (dbg_buff->offset - entity_hdr->start_offset) % 4; |
496 | padding = 4 - remain; |
497 | if (remain) { |
498 | memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf, |
499 | padding); |
500 | dbg_buff->offset += padding; |
501 | entity_hdr->num_pad = padding; |
502 | } |
503 | entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset; |
504 | } |
505 | |
506 | struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i) |
507 | { |
508 | struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf; |
509 | |
510 | return (struct cudbg_entity_hdr *) |
511 | ((char *)outbuf + cudbg_hdr->hdr_len + |
512 | (sizeof(struct cudbg_entity_hdr) * (i - 1))); |
513 | } |
514 | |
515 | static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len, |
516 | void *dest) |
517 | { |
518 | int vaddr, rc; |
519 | |
520 | vaddr = t4_eeprom_ptov(phys_addr: addr, fn: padap->pf, sz: EEPROMPFSIZE); |
521 | if (vaddr < 0) |
522 | return vaddr; |
523 | |
524 | rc = pci_read_vpd(dev: padap->pdev, pos: vaddr, count: len, buf: dest); |
525 | if (rc < 0) |
526 | return rc; |
527 | |
528 | return 0; |
529 | } |
530 | |
531 | static int cudbg_mem_desc_cmp(const void *a, const void *b) |
532 | { |
533 | return ((const struct cudbg_mem_desc *)a)->base - |
534 | ((const struct cudbg_mem_desc *)b)->base; |
535 | } |
536 | |
537 | int cudbg_fill_meminfo(struct adapter *padap, |
538 | struct cudbg_meminfo *meminfo_buff) |
539 | { |
540 | struct cudbg_mem_desc *md; |
541 | u32 lo, hi, used, alloc; |
542 | int n, i; |
543 | |
544 | memset(meminfo_buff->avail, 0, |
545 | ARRAY_SIZE(meminfo_buff->avail) * |
546 | sizeof(struct cudbg_mem_desc)); |
547 | memset(meminfo_buff->mem, 0, |
548 | (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc)); |
549 | md = meminfo_buff->mem; |
550 | |
551 | for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) { |
552 | meminfo_buff->mem[i].limit = 0; |
553 | meminfo_buff->mem[i].idx = i; |
554 | } |
555 | |
556 | /* Find and sort the populated memory ranges */ |
557 | i = 0; |
558 | lo = t4_read_reg(adap: padap, MA_TARGET_MEM_ENABLE_A); |
559 | if (lo & EDRAM0_ENABLE_F) { |
560 | hi = t4_read_reg(adap: padap, MA_EDRAM0_BAR_A); |
561 | meminfo_buff->avail[i].base = |
562 | cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi)); |
563 | meminfo_buff->avail[i].limit = |
564 | meminfo_buff->avail[i].base + |
565 | cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi)); |
566 | meminfo_buff->avail[i].idx = 0; |
567 | i++; |
568 | } |
569 | |
570 | if (lo & EDRAM1_ENABLE_F) { |
571 | hi = t4_read_reg(adap: padap, MA_EDRAM1_BAR_A); |
572 | meminfo_buff->avail[i].base = |
573 | cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi)); |
574 | meminfo_buff->avail[i].limit = |
575 | meminfo_buff->avail[i].base + |
576 | cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi)); |
577 | meminfo_buff->avail[i].idx = 1; |
578 | i++; |
579 | } |
580 | |
581 | if (is_t5(chip: padap->params.chip)) { |
582 | if (lo & EXT_MEM0_ENABLE_F) { |
583 | hi = t4_read_reg(adap: padap, MA_EXT_MEMORY0_BAR_A); |
584 | meminfo_buff->avail[i].base = |
585 | cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi)); |
586 | meminfo_buff->avail[i].limit = |
587 | meminfo_buff->avail[i].base + |
588 | cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi)); |
589 | meminfo_buff->avail[i].idx = 3; |
590 | i++; |
591 | } |
592 | |
593 | if (lo & EXT_MEM1_ENABLE_F) { |
594 | hi = t4_read_reg(adap: padap, MA_EXT_MEMORY1_BAR_A); |
595 | meminfo_buff->avail[i].base = |
596 | cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi)); |
597 | meminfo_buff->avail[i].limit = |
598 | meminfo_buff->avail[i].base + |
599 | cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi)); |
600 | meminfo_buff->avail[i].idx = 4; |
601 | i++; |
602 | } |
603 | } else { |
604 | if (lo & EXT_MEM_ENABLE_F) { |
605 | hi = t4_read_reg(adap: padap, MA_EXT_MEMORY_BAR_A); |
606 | meminfo_buff->avail[i].base = |
607 | cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi)); |
608 | meminfo_buff->avail[i].limit = |
609 | meminfo_buff->avail[i].base + |
610 | cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi)); |
611 | meminfo_buff->avail[i].idx = 2; |
612 | i++; |
613 | } |
614 | |
615 | if (lo & HMA_MUX_F) { |
616 | hi = t4_read_reg(adap: padap, MA_EXT_MEMORY1_BAR_A); |
617 | meminfo_buff->avail[i].base = |
618 | cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi)); |
619 | meminfo_buff->avail[i].limit = |
620 | meminfo_buff->avail[i].base + |
621 | cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi)); |
622 | meminfo_buff->avail[i].idx = 5; |
623 | i++; |
624 | } |
625 | } |
626 | |
627 | if (!i) /* no memory available */ |
628 | return CUDBG_STATUS_ENTITY_NOT_FOUND; |
629 | |
630 | meminfo_buff->avail_c = i; |
631 | sort(base: meminfo_buff->avail, num: i, size: sizeof(struct cudbg_mem_desc), |
632 | cmp_func: cudbg_mem_desc_cmp, NULL); |
633 | (md++)->base = t4_read_reg(adap: padap, SGE_DBQ_CTXT_BADDR_A); |
634 | (md++)->base = t4_read_reg(adap: padap, SGE_IMSG_CTXT_BADDR_A); |
635 | (md++)->base = t4_read_reg(adap: padap, SGE_FLM_CACHE_BADDR_A); |
636 | (md++)->base = t4_read_reg(adap: padap, TP_CMM_TCB_BASE_A); |
637 | (md++)->base = t4_read_reg(adap: padap, TP_CMM_MM_BASE_A); |
638 | (md++)->base = t4_read_reg(adap: padap, TP_CMM_TIMER_BASE_A); |
639 | (md++)->base = t4_read_reg(adap: padap, TP_CMM_MM_RX_FLST_BASE_A); |
640 | (md++)->base = t4_read_reg(adap: padap, TP_CMM_MM_TX_FLST_BASE_A); |
641 | (md++)->base = t4_read_reg(adap: padap, TP_CMM_MM_PS_FLST_BASE_A); |
642 | |
643 | /* the next few have explicit upper bounds */ |
644 | md->base = t4_read_reg(adap: padap, TP_PMM_TX_BASE_A); |
645 | md->limit = md->base - 1 + |
646 | t4_read_reg(adap: padap, TP_PMM_TX_PAGE_SIZE_A) * |
647 | PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A)); |
648 | md++; |
649 | |
650 | md->base = t4_read_reg(adap: padap, TP_PMM_RX_BASE_A); |
651 | md->limit = md->base - 1 + |
652 | t4_read_reg(adap: padap, TP_PMM_RX_PAGE_SIZE_A) * |
653 | PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A)); |
654 | md++; |
655 | |
656 | if (t4_read_reg(adap: padap, LE_DB_CONFIG_A) & HASHEN_F) { |
657 | if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) { |
658 | hi = t4_read_reg(adap: padap, LE_DB_TID_HASHBASE_A) / 4; |
659 | md->base = t4_read_reg(adap: padap, LE_DB_HASH_TID_BASE_A); |
660 | } else { |
661 | hi = t4_read_reg(adap: padap, LE_DB_HASH_TID_BASE_A); |
662 | md->base = t4_read_reg(adap: padap, |
663 | LE_DB_HASH_TBL_BASE_ADDR_A); |
664 | } |
665 | md->limit = 0; |
666 | } else { |
667 | md->base = 0; |
668 | md->idx = ARRAY_SIZE(cudbg_region); /* hide it */ |
669 | } |
670 | md++; |
671 | |
672 | #define ulp_region(reg) do { \ |
673 | md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\ |
674 | (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\ |
675 | } while (0) |
676 | |
677 | ulp_region(RX_ISCSI); |
678 | ulp_region(RX_TDDP); |
679 | ulp_region(TX_TPT); |
680 | ulp_region(RX_STAG); |
681 | ulp_region(RX_RQ); |
682 | ulp_region(RX_RQUDP); |
683 | ulp_region(RX_PBL); |
684 | ulp_region(TX_PBL); |
685 | #undef ulp_region |
686 | md->base = 0; |
687 | md->idx = ARRAY_SIZE(cudbg_region); |
688 | if (!is_t4(chip: padap->params.chip)) { |
689 | u32 fifo_size = t4_read_reg(adap: padap, SGE_DBVFIFO_SIZE_A); |
690 | u32 sge_ctrl = t4_read_reg(adap: padap, SGE_CONTROL2_A); |
691 | u32 size = 0; |
692 | |
693 | if (is_t5(chip: padap->params.chip)) { |
694 | if (sge_ctrl & VFIFO_ENABLE_F) |
695 | size = DBVFIFO_SIZE_G(fifo_size); |
696 | } else { |
697 | size = T6_DBVFIFO_SIZE_G(fifo_size); |
698 | } |
699 | |
700 | if (size) { |
701 | md->base = BASEADDR_G(t4_read_reg(padap, |
702 | SGE_DBVFIFO_BADDR_A)); |
703 | md->limit = md->base + (size << 2) - 1; |
704 | } |
705 | } |
706 | |
707 | md++; |
708 | |
709 | md->base = t4_read_reg(adap: padap, ULP_RX_CTX_BASE_A); |
710 | md->limit = 0; |
711 | md++; |
712 | md->base = t4_read_reg(adap: padap, ULP_TX_ERR_TABLE_BASE_A); |
713 | md->limit = 0; |
714 | md++; |
715 | |
716 | md->base = padap->vres.ocq.start; |
717 | if (padap->vres.ocq.size) |
718 | md->limit = md->base + padap->vres.ocq.size - 1; |
719 | else |
720 | md->idx = ARRAY_SIZE(cudbg_region); /* hide it */ |
721 | md++; |
722 | |
723 | /* add any address-space holes, there can be up to 3 */ |
724 | for (n = 0; n < i - 1; n++) |
725 | if (meminfo_buff->avail[n].limit < |
726 | meminfo_buff->avail[n + 1].base) |
727 | (md++)->base = meminfo_buff->avail[n].limit; |
728 | |
729 | if (meminfo_buff->avail[n].limit) |
730 | (md++)->base = meminfo_buff->avail[n].limit; |
731 | |
732 | n = md - meminfo_buff->mem; |
733 | meminfo_buff->mem_c = n; |
734 | |
735 | sort(base: meminfo_buff->mem, num: n, size: sizeof(struct cudbg_mem_desc), |
736 | cmp_func: cudbg_mem_desc_cmp, NULL); |
737 | |
738 | lo = t4_read_reg(adap: padap, CIM_SDRAM_BASE_ADDR_A); |
739 | hi = t4_read_reg(adap: padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1; |
740 | meminfo_buff->up_ram_lo = lo; |
741 | meminfo_buff->up_ram_hi = hi; |
742 | |
743 | lo = t4_read_reg(adap: padap, CIM_EXTMEM2_BASE_ADDR_A); |
744 | hi = t4_read_reg(adap: padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1; |
745 | meminfo_buff->up_extmem2_lo = lo; |
746 | meminfo_buff->up_extmem2_hi = hi; |
747 | |
748 | lo = t4_read_reg(adap: padap, TP_PMM_RX_MAX_PAGE_A); |
749 | for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++) |
750 | meminfo_buff->free_rx_cnt += |
751 | FREERXPAGECOUNT_G(t4_read_reg(padap, |
752 | TP_FLM_FREE_RX_CNT_A)); |
753 | |
754 | meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo); |
755 | meminfo_buff->rx_pages_data[1] = |
756 | t4_read_reg(adap: padap, TP_PMM_RX_PAGE_SIZE_A) >> 10; |
757 | meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1; |
758 | |
759 | lo = t4_read_reg(adap: padap, TP_PMM_TX_MAX_PAGE_A); |
760 | hi = t4_read_reg(adap: padap, TP_PMM_TX_PAGE_SIZE_A); |
761 | for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++) |
762 | meminfo_buff->free_tx_cnt += |
763 | FREETXPAGECOUNT_G(t4_read_reg(padap, |
764 | TP_FLM_FREE_TX_CNT_A)); |
765 | |
766 | meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo); |
767 | meminfo_buff->tx_pages_data[1] = |
768 | hi >= (1 << 20) ? (hi >> 20) : (hi >> 10); |
769 | meminfo_buff->tx_pages_data[2] = |
770 | hi >= (1 << 20) ? 'M' : 'K'; |
771 | meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo); |
772 | |
773 | meminfo_buff->p_structs = t4_read_reg(adap: padap, TP_CMM_MM_MAX_PSTRUCT_A); |
774 | meminfo_buff->p_structs_free_cnt = |
775 | FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A)); |
776 | |
777 | for (i = 0; i < 4; i++) { |
778 | if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) |
779 | lo = t4_read_reg(adap: padap, |
780 | MPS_RX_MAC_BG_PG_CNT0_A + i * 4); |
781 | else |
782 | lo = t4_read_reg(adap: padap, MPS_RX_PG_RSV0_A + i * 4); |
783 | if (is_t5(chip: padap->params.chip)) { |
784 | used = T5_USED_G(lo); |
785 | alloc = T5_ALLOC_G(lo); |
786 | } else { |
787 | used = USED_G(lo); |
788 | alloc = ALLOC_G(lo); |
789 | } |
790 | meminfo_buff->port_used[i] = used; |
791 | meminfo_buff->port_alloc[i] = alloc; |
792 | } |
793 | |
794 | for (i = 0; i < padap->params.arch.nchan; i++) { |
795 | if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) |
796 | lo = t4_read_reg(adap: padap, |
797 | MPS_RX_LPBK_BG_PG_CNT0_A + i * 4); |
798 | else |
799 | lo = t4_read_reg(adap: padap, MPS_RX_PG_RSV4_A + i * 4); |
800 | if (is_t5(chip: padap->params.chip)) { |
801 | used = T5_USED_G(lo); |
802 | alloc = T5_ALLOC_G(lo); |
803 | } else { |
804 | used = USED_G(lo); |
805 | alloc = ALLOC_G(lo); |
806 | } |
807 | meminfo_buff->loopback_used[i] = used; |
808 | meminfo_buff->loopback_alloc[i] = alloc; |
809 | } |
810 | |
811 | return 0; |
812 | } |
813 | |
814 | int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, |
815 | struct cudbg_buffer *dbg_buff, |
816 | struct cudbg_error *cudbg_err) |
817 | { |
818 | struct adapter *padap = pdbg_init->adap; |
819 | struct cudbg_buffer temp_buff = { 0 }; |
820 | u32 buf_size = 0; |
821 | int rc = 0; |
822 | |
823 | if (is_t4(chip: padap->params.chip)) |
824 | buf_size = T4_REGMAP_SIZE; |
825 | else if (is_t5(chip: padap->params.chip) || is_t6(chip: padap->params.chip)) |
826 | buf_size = T5_REGMAP_SIZE; |
827 | |
828 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: buf_size, pin_buff: &temp_buff); |
829 | if (rc) |
830 | return rc; |
831 | t4_get_regs(adap: padap, buf: (void *)temp_buff.data, buf_size: temp_buff.size); |
832 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
833 | } |
834 | |
835 | int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, |
836 | struct cudbg_buffer *dbg_buff, |
837 | struct cudbg_error *cudbg_err) |
838 | { |
839 | struct adapter *padap = pdbg_init->adap; |
840 | struct cudbg_buffer temp_buff = { 0 }; |
841 | struct devlog_params *dparams; |
842 | int rc = 0; |
843 | |
844 | rc = t4_init_devlog_params(adapter: padap); |
845 | if (rc < 0) { |
846 | cudbg_err->sys_err = rc; |
847 | return rc; |
848 | } |
849 | |
850 | dparams = &padap->params.devlog; |
851 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: dparams->size, pin_buff: &temp_buff); |
852 | if (rc) |
853 | return rc; |
854 | |
855 | /* Collect FW devlog */ |
856 | if (dparams->start != 0) { |
857 | spin_lock(lock: &padap->win0_lock); |
858 | rc = t4_memory_rw(adap: padap, win: padap->params.drv_memwin, |
859 | mtype: dparams->memtype, addr: dparams->start, |
860 | len: dparams->size, |
861 | buf: (__be32 *)(char *)temp_buff.data, |
862 | dir: 1); |
863 | spin_unlock(lock: &padap->win0_lock); |
864 | if (rc) { |
865 | cudbg_err->sys_err = rc; |
866 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
867 | return rc; |
868 | } |
869 | } |
870 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
871 | } |
872 | |
873 | int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, |
874 | struct cudbg_buffer *dbg_buff, |
875 | struct cudbg_error *cudbg_err) |
876 | { |
877 | struct adapter *padap = pdbg_init->adap; |
878 | struct cudbg_buffer temp_buff = { 0 }; |
879 | int size, rc; |
880 | u32 cfg = 0; |
881 | |
882 | if (is_t6(chip: padap->params.chip)) { |
883 | size = padap->params.cim_la_size / 10 + 1; |
884 | size *= 10 * sizeof(u32); |
885 | } else { |
886 | size = padap->params.cim_la_size / 8; |
887 | size *= 8 * sizeof(u32); |
888 | } |
889 | |
890 | size += sizeof(cfg); |
891 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
892 | if (rc) |
893 | return rc; |
894 | |
895 | rc = t4_cim_read(adap: padap, UP_UP_DBG_LA_CFG_A, n: 1, valp: &cfg); |
896 | if (rc) { |
897 | cudbg_err->sys_err = rc; |
898 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
899 | return rc; |
900 | } |
901 | |
902 | memcpy((char *)temp_buff.data, &cfg, sizeof(cfg)); |
903 | rc = t4_cim_read_la(adap: padap, |
904 | la_buf: (u32 *)((char *)temp_buff.data + sizeof(cfg)), |
905 | NULL); |
906 | if (rc < 0) { |
907 | cudbg_err->sys_err = rc; |
908 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
909 | return rc; |
910 | } |
911 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
912 | } |
913 | |
914 | int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, |
915 | struct cudbg_buffer *dbg_buff, |
916 | struct cudbg_error *cudbg_err) |
917 | { |
918 | struct adapter *padap = pdbg_init->adap; |
919 | struct cudbg_buffer temp_buff = { 0 }; |
920 | int size, rc; |
921 | |
922 | size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); |
923 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
924 | if (rc) |
925 | return rc; |
926 | |
927 | t4_cim_read_ma_la(adap: padap, |
928 | ma_req: (u32 *)temp_buff.data, |
929 | ma_rsp: (u32 *)((char *)temp_buff.data + |
930 | 5 * CIM_MALA_SIZE)); |
931 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
932 | } |
933 | |
934 | int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, |
935 | struct cudbg_buffer *dbg_buff, |
936 | struct cudbg_error *cudbg_err) |
937 | { |
938 | struct adapter *padap = pdbg_init->adap; |
939 | struct cudbg_buffer temp_buff = { 0 }; |
940 | struct cudbg_cim_qcfg *cim_qcfg_data; |
941 | int rc; |
942 | |
943 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: sizeof(struct cudbg_cim_qcfg), |
944 | pin_buff: &temp_buff); |
945 | if (rc) |
946 | return rc; |
947 | |
948 | cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data; |
949 | cim_qcfg_data->chip = padap->params.chip; |
950 | rc = t4_cim_read(adap: padap, UP_IBQ_0_RDADDR_A, |
951 | ARRAY_SIZE(cim_qcfg_data->stat), valp: cim_qcfg_data->stat); |
952 | if (rc) { |
953 | cudbg_err->sys_err = rc; |
954 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
955 | return rc; |
956 | } |
957 | |
958 | rc = t4_cim_read(adap: padap, UP_OBQ_0_REALADDR_A, |
959 | ARRAY_SIZE(cim_qcfg_data->obq_wr), |
960 | valp: cim_qcfg_data->obq_wr); |
961 | if (rc) { |
962 | cudbg_err->sys_err = rc; |
963 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
964 | return rc; |
965 | } |
966 | |
967 | t4_read_cimq_cfg(adap: padap, base: cim_qcfg_data->base, size: cim_qcfg_data->size, |
968 | thres: cim_qcfg_data->thres); |
969 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
970 | } |
971 | |
972 | static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, |
973 | struct cudbg_buffer *dbg_buff, |
974 | struct cudbg_error *cudbg_err, int qid) |
975 | { |
976 | struct adapter *padap = pdbg_init->adap; |
977 | struct cudbg_buffer temp_buff = { 0 }; |
978 | int no_of_read_words, rc = 0; |
979 | u32 qsize; |
980 | |
981 | /* collect CIM IBQ */ |
982 | qsize = CIM_IBQ_SIZE * 4 * sizeof(u32); |
983 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: qsize, pin_buff: &temp_buff); |
984 | if (rc) |
985 | return rc; |
986 | |
987 | /* t4_read_cim_ibq will return no. of read words or error */ |
988 | no_of_read_words = t4_read_cim_ibq(adap: padap, qid, |
989 | data: (u32 *)temp_buff.data, n: qsize); |
990 | /* no_of_read_words is less than or equal to 0 means error */ |
991 | if (no_of_read_words <= 0) { |
992 | if (!no_of_read_words) |
993 | rc = CUDBG_SYSTEM_ERROR; |
994 | else |
995 | rc = no_of_read_words; |
996 | cudbg_err->sys_err = rc; |
997 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
998 | return rc; |
999 | } |
1000 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1001 | } |
1002 | |
1003 | int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, |
1004 | struct cudbg_buffer *dbg_buff, |
1005 | struct cudbg_error *cudbg_err) |
1006 | { |
1007 | return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid: 0); |
1008 | } |
1009 | |
1010 | int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init, |
1011 | struct cudbg_buffer *dbg_buff, |
1012 | struct cudbg_error *cudbg_err) |
1013 | { |
1014 | return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid: 1); |
1015 | } |
1016 | |
1017 | int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init, |
1018 | struct cudbg_buffer *dbg_buff, |
1019 | struct cudbg_error *cudbg_err) |
1020 | { |
1021 | return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid: 2); |
1022 | } |
1023 | |
1024 | int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init, |
1025 | struct cudbg_buffer *dbg_buff, |
1026 | struct cudbg_error *cudbg_err) |
1027 | { |
1028 | return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid: 3); |
1029 | } |
1030 | |
1031 | int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init, |
1032 | struct cudbg_buffer *dbg_buff, |
1033 | struct cudbg_error *cudbg_err) |
1034 | { |
1035 | return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid: 4); |
1036 | } |
1037 | |
1038 | int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init, |
1039 | struct cudbg_buffer *dbg_buff, |
1040 | struct cudbg_error *cudbg_err) |
1041 | { |
1042 | return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, qid: 5); |
1043 | } |
1044 | |
1045 | u32 cudbg_cim_obq_size(struct adapter *padap, int qid) |
1046 | { |
1047 | u32 value; |
1048 | |
1049 | t4_write_reg(adap: padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | |
1050 | QUENUMSELECT_V(qid)); |
1051 | value = t4_read_reg(adap: padap, CIM_QUEUE_CONFIG_CTRL_A); |
1052 | value = CIMQSIZE_G(value) * 64; /* size in number of words */ |
1053 | return value * sizeof(u32); |
1054 | } |
1055 | |
1056 | static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init, |
1057 | struct cudbg_buffer *dbg_buff, |
1058 | struct cudbg_error *cudbg_err, int qid) |
1059 | { |
1060 | struct adapter *padap = pdbg_init->adap; |
1061 | struct cudbg_buffer temp_buff = { 0 }; |
1062 | int no_of_read_words, rc = 0; |
1063 | u32 qsize; |
1064 | |
1065 | /* collect CIM OBQ */ |
1066 | qsize = cudbg_cim_obq_size(padap, qid); |
1067 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: qsize, pin_buff: &temp_buff); |
1068 | if (rc) |
1069 | return rc; |
1070 | |
1071 | /* t4_read_cim_obq will return no. of read words or error */ |
1072 | no_of_read_words = t4_read_cim_obq(adap: padap, qid, |
1073 | data: (u32 *)temp_buff.data, n: qsize); |
1074 | /* no_of_read_words is less than or equal to 0 means error */ |
1075 | if (no_of_read_words <= 0) { |
1076 | if (!no_of_read_words) |
1077 | rc = CUDBG_SYSTEM_ERROR; |
1078 | else |
1079 | rc = no_of_read_words; |
1080 | cudbg_err->sys_err = rc; |
1081 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
1082 | return rc; |
1083 | } |
1084 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1085 | } |
1086 | |
1087 | int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init, |
1088 | struct cudbg_buffer *dbg_buff, |
1089 | struct cudbg_error *cudbg_err) |
1090 | { |
1091 | return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid: 0); |
1092 | } |
1093 | |
1094 | int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init, |
1095 | struct cudbg_buffer *dbg_buff, |
1096 | struct cudbg_error *cudbg_err) |
1097 | { |
1098 | return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid: 1); |
1099 | } |
1100 | |
1101 | int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init, |
1102 | struct cudbg_buffer *dbg_buff, |
1103 | struct cudbg_error *cudbg_err) |
1104 | { |
1105 | return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid: 2); |
1106 | } |
1107 | |
1108 | int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init, |
1109 | struct cudbg_buffer *dbg_buff, |
1110 | struct cudbg_error *cudbg_err) |
1111 | { |
1112 | return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid: 3); |
1113 | } |
1114 | |
1115 | int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init, |
1116 | struct cudbg_buffer *dbg_buff, |
1117 | struct cudbg_error *cudbg_err) |
1118 | { |
1119 | return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid: 4); |
1120 | } |
1121 | |
1122 | int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init, |
1123 | struct cudbg_buffer *dbg_buff, |
1124 | struct cudbg_error *cudbg_err) |
1125 | { |
1126 | return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid: 5); |
1127 | } |
1128 | |
1129 | int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, |
1130 | struct cudbg_buffer *dbg_buff, |
1131 | struct cudbg_error *cudbg_err) |
1132 | { |
1133 | return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid: 6); |
1134 | } |
1135 | |
1136 | int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, |
1137 | struct cudbg_buffer *dbg_buff, |
1138 | struct cudbg_error *cudbg_err) |
1139 | { |
1140 | return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, qid: 7); |
1141 | } |
1142 | |
1143 | static int cudbg_meminfo_get_mem_index(struct adapter *padap, |
1144 | struct cudbg_meminfo *mem_info, |
1145 | u8 mem_type, u8 *idx) |
1146 | { |
1147 | u8 i, flag; |
1148 | |
1149 | switch (mem_type) { |
1150 | case MEM_EDC0: |
1151 | flag = EDC0_FLAG; |
1152 | break; |
1153 | case MEM_EDC1: |
1154 | flag = EDC1_FLAG; |
1155 | break; |
1156 | case MEM_MC0: |
1157 | /* Some T5 cards have both MC0 and MC1. */ |
1158 | flag = is_t5(chip: padap->params.chip) ? MC0_FLAG : MC_FLAG; |
1159 | break; |
1160 | case MEM_MC1: |
1161 | flag = MC1_FLAG; |
1162 | break; |
1163 | case MEM_HMA: |
1164 | flag = HMA_FLAG; |
1165 | break; |
1166 | default: |
1167 | return CUDBG_STATUS_ENTITY_NOT_FOUND; |
1168 | } |
1169 | |
1170 | for (i = 0; i < mem_info->avail_c; i++) { |
1171 | if (mem_info->avail[i].idx == flag) { |
1172 | *idx = i; |
1173 | return 0; |
1174 | } |
1175 | } |
1176 | |
1177 | return CUDBG_STATUS_ENTITY_NOT_FOUND; |
1178 | } |
1179 | |
1180 | /* Fetch the @region_name's start and end from @meminfo. */ |
1181 | static int cudbg_get_mem_region(struct adapter *padap, |
1182 | struct cudbg_meminfo *meminfo, |
1183 | u8 mem_type, const char *region_name, |
1184 | struct cudbg_mem_desc *mem_desc) |
1185 | { |
1186 | u8 mc, found = 0; |
1187 | u32 idx = 0; |
1188 | int rc, i; |
1189 | |
1190 | rc = cudbg_meminfo_get_mem_index(padap, mem_info: meminfo, mem_type, idx: &mc); |
1191 | if (rc) |
1192 | return rc; |
1193 | |
1194 | i = match_string(array: cudbg_region, ARRAY_SIZE(cudbg_region), string: region_name); |
1195 | if (i < 0) |
1196 | return -EINVAL; |
1197 | |
1198 | idx = i; |
1199 | for (i = 0; i < meminfo->mem_c; i++) { |
1200 | if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region)) |
1201 | continue; /* Skip holes */ |
1202 | |
1203 | if (!(meminfo->mem[i].limit)) |
1204 | meminfo->mem[i].limit = |
1205 | i < meminfo->mem_c - 1 ? |
1206 | meminfo->mem[i + 1].base - 1 : ~0; |
1207 | |
1208 | if (meminfo->mem[i].idx == idx) { |
1209 | /* Check if the region exists in @mem_type memory */ |
1210 | if (meminfo->mem[i].base < meminfo->avail[mc].base && |
1211 | meminfo->mem[i].limit < meminfo->avail[mc].base) |
1212 | return -EINVAL; |
1213 | |
1214 | if (meminfo->mem[i].base > meminfo->avail[mc].limit) |
1215 | return -EINVAL; |
1216 | |
1217 | memcpy(mem_desc, &meminfo->mem[i], |
1218 | sizeof(struct cudbg_mem_desc)); |
1219 | found = 1; |
1220 | break; |
1221 | } |
1222 | } |
1223 | if (!found) |
1224 | return -EINVAL; |
1225 | |
1226 | return 0; |
1227 | } |
1228 | |
1229 | /* Fetch and update the start and end of the requested memory region w.r.t 0 |
1230 | * in the corresponding EDC/MC/HMA. |
1231 | */ |
1232 | static int cudbg_get_mem_relative(struct adapter *padap, |
1233 | struct cudbg_meminfo *meminfo, |
1234 | u8 mem_type, u32 *out_base, u32 *out_end) |
1235 | { |
1236 | u8 mc_idx; |
1237 | int rc; |
1238 | |
1239 | rc = cudbg_meminfo_get_mem_index(padap, mem_info: meminfo, mem_type, idx: &mc_idx); |
1240 | if (rc) |
1241 | return rc; |
1242 | |
1243 | if (*out_base < meminfo->avail[mc_idx].base) |
1244 | *out_base = 0; |
1245 | else |
1246 | *out_base -= meminfo->avail[mc_idx].base; |
1247 | |
1248 | if (*out_end > meminfo->avail[mc_idx].limit) |
1249 | *out_end = meminfo->avail[mc_idx].limit; |
1250 | else |
1251 | *out_end -= meminfo->avail[mc_idx].base; |
1252 | |
1253 | return 0; |
1254 | } |
1255 | |
1256 | /* Get TX and RX Payload region */ |
1257 | static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type, |
1258 | const char *region_name, |
1259 | struct cudbg_region_info *payload) |
1260 | { |
1261 | struct cudbg_mem_desc mem_desc = { 0 }; |
1262 | struct cudbg_meminfo meminfo; |
1263 | int rc; |
1264 | |
1265 | rc = cudbg_fill_meminfo(padap, meminfo_buff: &meminfo); |
1266 | if (rc) |
1267 | return rc; |
1268 | |
1269 | rc = cudbg_get_mem_region(padap, meminfo: &meminfo, mem_type, region_name, |
1270 | mem_desc: &mem_desc); |
1271 | if (rc) { |
1272 | payload->exist = false; |
1273 | return 0; |
1274 | } |
1275 | |
1276 | payload->exist = true; |
1277 | payload->start = mem_desc.base; |
1278 | payload->end = mem_desc.limit; |
1279 | |
1280 | return cudbg_get_mem_relative(padap, meminfo: &meminfo, mem_type, |
1281 | out_base: &payload->start, out_end: &payload->end); |
1282 | } |
1283 | |
1284 | static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win, |
1285 | int mtype, u32 addr, u32 len, void *hbuf) |
1286 | { |
1287 | u32 win_pf, memoffset, mem_aperture, mem_base; |
1288 | struct adapter *adap = pdbg_init->adap; |
1289 | u32 pos, offset, resid; |
1290 | u32 *res_buf; |
1291 | u64 *buf; |
1292 | int ret; |
1293 | |
1294 | /* Argument sanity checks ... |
1295 | */ |
1296 | if (addr & 0x3 || (uintptr_t)hbuf & 0x3) |
1297 | return -EINVAL; |
1298 | |
1299 | buf = (u64 *)hbuf; |
1300 | |
1301 | /* Try to do 64-bit reads. Residual will be handled later. */ |
1302 | resid = len & 0x7; |
1303 | len -= resid; |
1304 | |
1305 | ret = t4_memory_rw_init(adap, win, mtype, mem_off: &memoffset, mem_base: &mem_base, |
1306 | mem_aperture: &mem_aperture); |
1307 | if (ret) |
1308 | return ret; |
1309 | |
1310 | addr = addr + memoffset; |
1311 | win_pf = is_t4(chip: adap->params.chip) ? 0 : PFNUM_V(adap->pf); |
1312 | |
1313 | pos = addr & ~(mem_aperture - 1); |
1314 | offset = addr - pos; |
1315 | |
1316 | /* Set up initial PCI-E Memory Window to cover the start of our |
1317 | * transfer. |
1318 | */ |
1319 | t4_memory_update_win(adap, win, addr: pos | win_pf); |
1320 | |
1321 | /* Transfer data from the adapter */ |
1322 | while (len > 0) { |
1323 | *buf++ = le64_to_cpu((__force __le64) |
1324 | t4_read_reg64(adap, mem_base + offset)); |
1325 | offset += sizeof(u64); |
1326 | len -= sizeof(u64); |
1327 | |
1328 | /* If we've reached the end of our current window aperture, |
1329 | * move the PCI-E Memory Window on to the next. |
1330 | */ |
1331 | if (offset == mem_aperture) { |
1332 | pos += mem_aperture; |
1333 | offset = 0; |
1334 | t4_memory_update_win(adap, win, addr: pos | win_pf); |
1335 | } |
1336 | } |
1337 | |
1338 | res_buf = (u32 *)buf; |
1339 | /* Read residual in 32-bit multiples */ |
1340 | while (resid > sizeof(u32)) { |
1341 | *res_buf++ = le32_to_cpu((__force __le32) |
1342 | t4_read_reg(adap, mem_base + offset)); |
1343 | offset += sizeof(u32); |
1344 | resid -= sizeof(u32); |
1345 | |
1346 | /* If we've reached the end of our current window aperture, |
1347 | * move the PCI-E Memory Window on to the next. |
1348 | */ |
1349 | if (offset == mem_aperture) { |
1350 | pos += mem_aperture; |
1351 | offset = 0; |
1352 | t4_memory_update_win(adap, win, addr: pos | win_pf); |
1353 | } |
1354 | } |
1355 | |
1356 | /* Transfer residual < 32-bits */ |
1357 | if (resid) |
1358 | t4_memory_rw_residual(adap, off: resid, addr: mem_base + offset, |
1359 | buf: (u8 *)res_buf, T4_MEMORY_READ); |
1360 | |
1361 | return 0; |
1362 | } |
1363 | |
1364 | #define CUDBG_YIELD_ITERATION 256 |
1365 | |
1366 | static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, |
1367 | struct cudbg_buffer *dbg_buff, u8 mem_type, |
1368 | unsigned long tot_len, |
1369 | struct cudbg_error *cudbg_err) |
1370 | { |
1371 | static const char * const region_name[] = { "Tx payload:" , |
1372 | "Rx payload:" }; |
1373 | unsigned long bytes, bytes_left, bytes_read = 0; |
1374 | struct adapter *padap = pdbg_init->adap; |
1375 | struct cudbg_buffer temp_buff = { 0 }; |
1376 | struct cudbg_region_info payload[2]; |
1377 | u32 yield_count = 0; |
1378 | int rc = 0; |
1379 | u8 i; |
1380 | |
1381 | /* Get TX/RX Payload region range if they exist */ |
1382 | memset(payload, 0, sizeof(payload)); |
1383 | for (i = 0; i < ARRAY_SIZE(region_name); i++) { |
1384 | rc = cudbg_get_payload_range(padap, mem_type, region_name: region_name[i], |
1385 | payload: &payload[i]); |
1386 | if (rc) |
1387 | return rc; |
1388 | |
1389 | if (payload[i].exist) { |
1390 | /* Align start and end to avoid wrap around */ |
1391 | payload[i].start = roundup(payload[i].start, |
1392 | CUDBG_CHUNK_SIZE); |
1393 | payload[i].end = rounddown(payload[i].end, |
1394 | CUDBG_CHUNK_SIZE); |
1395 | } |
1396 | } |
1397 | |
1398 | bytes_left = tot_len; |
1399 | while (bytes_left > 0) { |
1400 | /* As MC size is huge and read through PIO access, this |
1401 | * loop will hold cpu for a longer time. OS may think that |
1402 | * the process is hanged and will generate CPU stall traces. |
1403 | * So yield the cpu regularly. |
1404 | */ |
1405 | yield_count++; |
1406 | if (!(yield_count % CUDBG_YIELD_ITERATION)) |
1407 | schedule(); |
1408 | |
1409 | bytes = min_t(unsigned long, bytes_left, |
1410 | (unsigned long)CUDBG_CHUNK_SIZE); |
1411 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: bytes, pin_buff: &temp_buff); |
1412 | if (rc) |
1413 | return rc; |
1414 | |
1415 | for (i = 0; i < ARRAY_SIZE(payload); i++) |
1416 | if (payload[i].exist && |
1417 | bytes_read >= payload[i].start && |
1418 | bytes_read + bytes <= payload[i].end) |
1419 | /* TX and RX Payload regions can't overlap */ |
1420 | goto skip_read; |
1421 | |
1422 | spin_lock(lock: &padap->win0_lock); |
1423 | rc = cudbg_memory_read(pdbg_init, win: MEMWIN_NIC, mtype: mem_type, |
1424 | addr: bytes_read, len: bytes, hbuf: temp_buff.data); |
1425 | spin_unlock(lock: &padap->win0_lock); |
1426 | if (rc) { |
1427 | cudbg_err->sys_err = rc; |
1428 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
1429 | return rc; |
1430 | } |
1431 | |
1432 | skip_read: |
1433 | bytes_left -= bytes; |
1434 | bytes_read += bytes; |
1435 | rc = cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, |
1436 | dbg_buff); |
1437 | if (rc) { |
1438 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
1439 | return rc; |
1440 | } |
1441 | } |
1442 | return rc; |
1443 | } |
1444 | |
1445 | static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, |
1446 | struct cudbg_error *cudbg_err) |
1447 | { |
1448 | struct adapter *padap = pdbg_init->adap; |
1449 | int rc; |
1450 | |
1451 | if (is_fw_attached(pdbg_init)) { |
1452 | /* Flush uP dcache before reading edcX/mcX */ |
1453 | rc = t4_fwcache(adap: padap, op: FW_PARAM_DEV_FWCACHE_FLUSH); |
1454 | if (rc) |
1455 | cudbg_err->sys_warn = rc; |
1456 | } |
1457 | } |
1458 | |
1459 | static int cudbg_mem_region_size(struct cudbg_init *pdbg_init, |
1460 | struct cudbg_error *cudbg_err, |
1461 | u8 mem_type, unsigned long *region_size) |
1462 | { |
1463 | struct adapter *padap = pdbg_init->adap; |
1464 | struct cudbg_meminfo mem_info; |
1465 | u8 mc_idx; |
1466 | int rc; |
1467 | |
1468 | memset(&mem_info, 0, sizeof(struct cudbg_meminfo)); |
1469 | rc = cudbg_fill_meminfo(padap, meminfo_buff: &mem_info); |
1470 | if (rc) { |
1471 | cudbg_err->sys_err = rc; |
1472 | return rc; |
1473 | } |
1474 | |
1475 | cudbg_t4_fwcache(pdbg_init, cudbg_err); |
1476 | rc = cudbg_meminfo_get_mem_index(padap, mem_info: &mem_info, mem_type, idx: &mc_idx); |
1477 | if (rc) { |
1478 | cudbg_err->sys_err = rc; |
1479 | return rc; |
1480 | } |
1481 | |
1482 | if (region_size) |
1483 | *region_size = mem_info.avail[mc_idx].limit - |
1484 | mem_info.avail[mc_idx].base; |
1485 | |
1486 | return 0; |
1487 | } |
1488 | |
1489 | static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, |
1490 | struct cudbg_buffer *dbg_buff, |
1491 | struct cudbg_error *cudbg_err, |
1492 | u8 mem_type) |
1493 | { |
1494 | unsigned long size = 0; |
1495 | int rc; |
1496 | |
1497 | rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, region_size: &size); |
1498 | if (rc) |
1499 | return rc; |
1500 | |
1501 | return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, tot_len: size, |
1502 | cudbg_err); |
1503 | } |
1504 | |
1505 | int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, |
1506 | struct cudbg_buffer *dbg_buff, |
1507 | struct cudbg_error *cudbg_err) |
1508 | { |
1509 | return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, |
1510 | mem_type: MEM_EDC0); |
1511 | } |
1512 | |
1513 | int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, |
1514 | struct cudbg_buffer *dbg_buff, |
1515 | struct cudbg_error *cudbg_err) |
1516 | { |
1517 | return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, |
1518 | mem_type: MEM_EDC1); |
1519 | } |
1520 | |
1521 | int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init, |
1522 | struct cudbg_buffer *dbg_buff, |
1523 | struct cudbg_error *cudbg_err) |
1524 | { |
1525 | return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, |
1526 | mem_type: MEM_MC0); |
1527 | } |
1528 | |
1529 | int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init, |
1530 | struct cudbg_buffer *dbg_buff, |
1531 | struct cudbg_error *cudbg_err) |
1532 | { |
1533 | return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, |
1534 | mem_type: MEM_MC1); |
1535 | } |
1536 | |
1537 | int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, |
1538 | struct cudbg_buffer *dbg_buff, |
1539 | struct cudbg_error *cudbg_err) |
1540 | { |
1541 | return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, |
1542 | mem_type: MEM_HMA); |
1543 | } |
1544 | |
1545 | int (struct cudbg_init *pdbg_init, |
1546 | struct cudbg_buffer *dbg_buff, |
1547 | struct cudbg_error *cudbg_err) |
1548 | { |
1549 | struct adapter *padap = pdbg_init->adap; |
1550 | struct cudbg_buffer temp_buff = { 0 }; |
1551 | int rc, nentries; |
1552 | |
1553 | nentries = t4_chip_rss_size(adapter: padap); |
1554 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: nentries * sizeof(u16), |
1555 | pin_buff: &temp_buff); |
1556 | if (rc) |
1557 | return rc; |
1558 | |
1559 | rc = t4_read_rss(adapter: padap, entries: (u16 *)temp_buff.data); |
1560 | if (rc) { |
1561 | cudbg_err->sys_err = rc; |
1562 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
1563 | return rc; |
1564 | } |
1565 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1566 | } |
1567 | |
1568 | int (struct cudbg_init *pdbg_init, |
1569 | struct cudbg_buffer *dbg_buff, |
1570 | struct cudbg_error *cudbg_err) |
1571 | { |
1572 | struct adapter *padap = pdbg_init->adap; |
1573 | struct cudbg_buffer temp_buff = { 0 }; |
1574 | struct cudbg_rss_vf_conf *vfconf; |
1575 | int vf, rc, vf_count; |
1576 | |
1577 | vf_count = padap->params.arch.vfcount; |
1578 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, |
1579 | size: vf_count * sizeof(struct cudbg_rss_vf_conf), |
1580 | pin_buff: &temp_buff); |
1581 | if (rc) |
1582 | return rc; |
1583 | |
1584 | vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data; |
1585 | for (vf = 0; vf < vf_count; vf++) |
1586 | t4_read_rss_vf_config(adapter: padap, index: vf, vfl: &vfconf[vf].rss_vf_vfl, |
1587 | vfh: &vfconf[vf].rss_vf_vfh, sleep_ok: true); |
1588 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1589 | } |
1590 | |
1591 | int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init, |
1592 | struct cudbg_buffer *dbg_buff, |
1593 | struct cudbg_error *cudbg_err) |
1594 | { |
1595 | struct adapter *padap = pdbg_init->adap; |
1596 | struct cudbg_buffer temp_buff = { 0 }; |
1597 | int rc; |
1598 | |
1599 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: NMTUS * sizeof(u16), |
1600 | pin_buff: &temp_buff); |
1601 | if (rc) |
1602 | return rc; |
1603 | |
1604 | t4_read_mtu_tbl(adap: padap, mtus: (u16 *)temp_buff.data, NULL); |
1605 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1606 | } |
1607 | |
1608 | int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init, |
1609 | struct cudbg_buffer *dbg_buff, |
1610 | struct cudbg_error *cudbg_err) |
1611 | { |
1612 | struct adapter *padap = pdbg_init->adap; |
1613 | struct cudbg_buffer temp_buff = { 0 }; |
1614 | struct cudbg_pm_stats *pm_stats_buff; |
1615 | int rc; |
1616 | |
1617 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: sizeof(struct cudbg_pm_stats), |
1618 | pin_buff: &temp_buff); |
1619 | if (rc) |
1620 | return rc; |
1621 | |
1622 | pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data; |
1623 | t4_pmtx_get_stats(adap: padap, cnt: pm_stats_buff->tx_cnt, cycles: pm_stats_buff->tx_cyc); |
1624 | t4_pmrx_get_stats(adap: padap, cnt: pm_stats_buff->rx_cnt, cycles: pm_stats_buff->rx_cyc); |
1625 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1626 | } |
1627 | |
1628 | int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, |
1629 | struct cudbg_buffer *dbg_buff, |
1630 | struct cudbg_error *cudbg_err) |
1631 | { |
1632 | struct adapter *padap = pdbg_init->adap; |
1633 | struct cudbg_buffer temp_buff = { 0 }; |
1634 | struct cudbg_hw_sched *hw_sched_buff; |
1635 | int i, rc = 0; |
1636 | |
1637 | if (!padap->params.vpd.cclk) |
1638 | return CUDBG_STATUS_CCLK_NOT_DEFINED; |
1639 | |
1640 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: sizeof(struct cudbg_hw_sched), |
1641 | pin_buff: &temp_buff); |
1642 | |
1643 | if (rc) |
1644 | return rc; |
1645 | |
1646 | hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data; |
1647 | hw_sched_buff->map = t4_read_reg(adap: padap, TP_TX_MOD_QUEUE_REQ_MAP_A); |
1648 | hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A)); |
1649 | t4_read_pace_tbl(adap: padap, pace_vals: hw_sched_buff->pace_tab); |
1650 | for (i = 0; i < NTX_SCHED; ++i) |
1651 | t4_get_tx_sched(adap: padap, sched: i, kbps: &hw_sched_buff->kbps[i], |
1652 | ipg: &hw_sched_buff->ipg[i], sleep_ok: true); |
1653 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1654 | } |
1655 | |
1656 | int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, |
1657 | struct cudbg_buffer *dbg_buff, |
1658 | struct cudbg_error *cudbg_err) |
1659 | { |
1660 | struct adapter *padap = pdbg_init->adap; |
1661 | struct cudbg_buffer temp_buff = { 0 }; |
1662 | struct ireg_buf *ch_tp_pio; |
1663 | int i, rc, n = 0; |
1664 | u32 size; |
1665 | |
1666 | if (is_t5(chip: padap->params.chip)) |
1667 | n = sizeof(t5_tp_pio_array) + |
1668 | sizeof(t5_tp_tm_pio_array) + |
1669 | sizeof(t5_tp_mib_index_array); |
1670 | else |
1671 | n = sizeof(t6_tp_pio_array) + |
1672 | sizeof(t6_tp_tm_pio_array) + |
1673 | sizeof(t6_tp_mib_index_array); |
1674 | |
1675 | n = n / (IREG_NUM_ELEM * sizeof(u32)); |
1676 | size = sizeof(struct ireg_buf) * n; |
1677 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
1678 | if (rc) |
1679 | return rc; |
1680 | |
1681 | ch_tp_pio = (struct ireg_buf *)temp_buff.data; |
1682 | |
1683 | /* TP_PIO */ |
1684 | if (is_t5(chip: padap->params.chip)) |
1685 | n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); |
1686 | else if (is_t6(chip: padap->params.chip)) |
1687 | n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); |
1688 | |
1689 | for (i = 0; i < n; i++) { |
1690 | struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; |
1691 | u32 *buff = ch_tp_pio->outbuf; |
1692 | |
1693 | if (is_t5(chip: padap->params.chip)) { |
1694 | tp_pio->ireg_addr = t5_tp_pio_array[i][0]; |
1695 | tp_pio->ireg_data = t5_tp_pio_array[i][1]; |
1696 | tp_pio->ireg_local_offset = t5_tp_pio_array[i][2]; |
1697 | tp_pio->ireg_offset_range = t5_tp_pio_array[i][3]; |
1698 | } else if (is_t6(chip: padap->params.chip)) { |
1699 | tp_pio->ireg_addr = t6_tp_pio_array[i][0]; |
1700 | tp_pio->ireg_data = t6_tp_pio_array[i][1]; |
1701 | tp_pio->ireg_local_offset = t6_tp_pio_array[i][2]; |
1702 | tp_pio->ireg_offset_range = t6_tp_pio_array[i][3]; |
1703 | } |
1704 | t4_tp_pio_read(adap: padap, buff, nregs: tp_pio->ireg_offset_range, |
1705 | start_index: tp_pio->ireg_local_offset, sleep_ok: true); |
1706 | ch_tp_pio++; |
1707 | } |
1708 | |
1709 | /* TP_TM_PIO */ |
1710 | if (is_t5(chip: padap->params.chip)) |
1711 | n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); |
1712 | else if (is_t6(chip: padap->params.chip)) |
1713 | n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); |
1714 | |
1715 | for (i = 0; i < n; i++) { |
1716 | struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; |
1717 | u32 *buff = ch_tp_pio->outbuf; |
1718 | |
1719 | if (is_t5(chip: padap->params.chip)) { |
1720 | tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0]; |
1721 | tp_pio->ireg_data = t5_tp_tm_pio_array[i][1]; |
1722 | tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2]; |
1723 | tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3]; |
1724 | } else if (is_t6(chip: padap->params.chip)) { |
1725 | tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0]; |
1726 | tp_pio->ireg_data = t6_tp_tm_pio_array[i][1]; |
1727 | tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2]; |
1728 | tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3]; |
1729 | } |
1730 | t4_tp_tm_pio_read(adap: padap, buff, nregs: tp_pio->ireg_offset_range, |
1731 | start_index: tp_pio->ireg_local_offset, sleep_ok: true); |
1732 | ch_tp_pio++; |
1733 | } |
1734 | |
1735 | /* TP_MIB_INDEX */ |
1736 | if (is_t5(chip: padap->params.chip)) |
1737 | n = sizeof(t5_tp_mib_index_array) / |
1738 | (IREG_NUM_ELEM * sizeof(u32)); |
1739 | else if (is_t6(chip: padap->params.chip)) |
1740 | n = sizeof(t6_tp_mib_index_array) / |
1741 | (IREG_NUM_ELEM * sizeof(u32)); |
1742 | |
1743 | for (i = 0; i < n ; i++) { |
1744 | struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; |
1745 | u32 *buff = ch_tp_pio->outbuf; |
1746 | |
1747 | if (is_t5(chip: padap->params.chip)) { |
1748 | tp_pio->ireg_addr = t5_tp_mib_index_array[i][0]; |
1749 | tp_pio->ireg_data = t5_tp_mib_index_array[i][1]; |
1750 | tp_pio->ireg_local_offset = |
1751 | t5_tp_mib_index_array[i][2]; |
1752 | tp_pio->ireg_offset_range = |
1753 | t5_tp_mib_index_array[i][3]; |
1754 | } else if (is_t6(chip: padap->params.chip)) { |
1755 | tp_pio->ireg_addr = t6_tp_mib_index_array[i][0]; |
1756 | tp_pio->ireg_data = t6_tp_mib_index_array[i][1]; |
1757 | tp_pio->ireg_local_offset = |
1758 | t6_tp_mib_index_array[i][2]; |
1759 | tp_pio->ireg_offset_range = |
1760 | t6_tp_mib_index_array[i][3]; |
1761 | } |
1762 | t4_tp_mib_read(adap: padap, buff, nregs: tp_pio->ireg_offset_range, |
1763 | start_index: tp_pio->ireg_local_offset, sleep_ok: true); |
1764 | ch_tp_pio++; |
1765 | } |
1766 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1767 | } |
1768 | |
1769 | static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap, |
1770 | struct sge_qbase_reg_field *qbase, |
1771 | u32 func, bool is_pf) |
1772 | { |
1773 | u32 *buff, i; |
1774 | |
1775 | if (is_pf) { |
1776 | buff = qbase->pf_data_value[func]; |
1777 | } else { |
1778 | buff = qbase->vf_data_value[func]; |
1779 | /* In SGE_QBASE_INDEX, |
1780 | * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256. |
1781 | */ |
1782 | func += 8; |
1783 | } |
1784 | |
1785 | t4_write_reg(adap: padap, reg_addr: qbase->reg_addr, val: func); |
1786 | for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++) |
1787 | *buff = t4_read_reg(adap: padap, reg_addr: qbase->reg_data[i]); |
1788 | } |
1789 | |
1790 | int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, |
1791 | struct cudbg_buffer *dbg_buff, |
1792 | struct cudbg_error *cudbg_err) |
1793 | { |
1794 | struct adapter *padap = pdbg_init->adap; |
1795 | struct cudbg_buffer temp_buff = { 0 }; |
1796 | struct sge_qbase_reg_field *sge_qbase; |
1797 | struct ireg_buf *ch_sge_dbg; |
1798 | u8 padap_running = 0; |
1799 | int i, rc; |
1800 | u32 size; |
1801 | |
1802 | /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can |
1803 | * lead to SGE missing doorbells under heavy traffic. So, only |
1804 | * collect them when adapter is idle. |
1805 | */ |
1806 | for_each_port(padap, i) { |
1807 | padap_running = netif_running(dev: padap->port[i]); |
1808 | if (padap_running) |
1809 | break; |
1810 | } |
1811 | |
1812 | size = sizeof(*ch_sge_dbg) * 2; |
1813 | if (!padap_running) |
1814 | size += sizeof(*sge_qbase); |
1815 | |
1816 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
1817 | if (rc) |
1818 | return rc; |
1819 | |
1820 | ch_sge_dbg = (struct ireg_buf *)temp_buff.data; |
1821 | for (i = 0; i < 2; i++) { |
1822 | struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio; |
1823 | u32 *buff = ch_sge_dbg->outbuf; |
1824 | |
1825 | sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0]; |
1826 | sge_pio->ireg_data = t5_sge_dbg_index_array[i][1]; |
1827 | sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2]; |
1828 | sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3]; |
1829 | t4_read_indirect(adap: padap, |
1830 | addr_reg: sge_pio->ireg_addr, |
1831 | data_reg: sge_pio->ireg_data, |
1832 | vals: buff, |
1833 | nregs: sge_pio->ireg_offset_range, |
1834 | start_idx: sge_pio->ireg_local_offset); |
1835 | ch_sge_dbg++; |
1836 | } |
1837 | |
1838 | if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 && |
1839 | !padap_running) { |
1840 | sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg; |
1841 | /* 1 addr reg SGE_QBASE_INDEX and 4 data reg |
1842 | * SGE_QBASE_MAP[0-3] |
1843 | */ |
1844 | sge_qbase->reg_addr = t6_sge_qbase_index_array[0]; |
1845 | for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++) |
1846 | sge_qbase->reg_data[i] = |
1847 | t6_sge_qbase_index_array[i + 1]; |
1848 | |
1849 | for (i = 0; i <= PCIE_FW_MASTER_M; i++) |
1850 | cudbg_read_sge_qbase_indirect_reg(padap, qbase: sge_qbase, |
1851 | func: i, is_pf: true); |
1852 | |
1853 | for (i = 0; i < padap->params.arch.vfcount; i++) |
1854 | cudbg_read_sge_qbase_indirect_reg(padap, qbase: sge_qbase, |
1855 | func: i, is_pf: false); |
1856 | |
1857 | sge_qbase->vfcount = padap->params.arch.vfcount; |
1858 | } |
1859 | |
1860 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1861 | } |
1862 | |
1863 | int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, |
1864 | struct cudbg_buffer *dbg_buff, |
1865 | struct cudbg_error *cudbg_err) |
1866 | { |
1867 | struct adapter *padap = pdbg_init->adap; |
1868 | struct cudbg_buffer temp_buff = { 0 }; |
1869 | struct cudbg_ulprx_la *ulprx_la_buff; |
1870 | int rc; |
1871 | |
1872 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: sizeof(struct cudbg_ulprx_la), |
1873 | pin_buff: &temp_buff); |
1874 | if (rc) |
1875 | return rc; |
1876 | |
1877 | ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data; |
1878 | t4_ulprx_read_la(adap: padap, la_buf: (u32 *)ulprx_la_buff->data); |
1879 | ulprx_la_buff->size = ULPRX_LA_SIZE; |
1880 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1881 | } |
1882 | |
1883 | int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, |
1884 | struct cudbg_buffer *dbg_buff, |
1885 | struct cudbg_error *cudbg_err) |
1886 | { |
1887 | struct adapter *padap = pdbg_init->adap; |
1888 | struct cudbg_buffer temp_buff = { 0 }; |
1889 | struct cudbg_tp_la *tp_la_buff; |
1890 | int size, rc; |
1891 | |
1892 | size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); |
1893 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
1894 | if (rc) |
1895 | return rc; |
1896 | |
1897 | tp_la_buff = (struct cudbg_tp_la *)temp_buff.data; |
1898 | tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A)); |
1899 | t4_tp_read_la(adap: padap, la_buf: (u64 *)tp_la_buff->data, NULL); |
1900 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1901 | } |
1902 | |
1903 | int cudbg_collect_meminfo(struct cudbg_init *pdbg_init, |
1904 | struct cudbg_buffer *dbg_buff, |
1905 | struct cudbg_error *cudbg_err) |
1906 | { |
1907 | struct adapter *padap = pdbg_init->adap; |
1908 | struct cudbg_buffer temp_buff = { 0 }; |
1909 | struct cudbg_meminfo *meminfo_buff; |
1910 | struct cudbg_ver_hdr *ver_hdr; |
1911 | int rc; |
1912 | |
1913 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, |
1914 | size: sizeof(struct cudbg_ver_hdr) + |
1915 | sizeof(struct cudbg_meminfo), |
1916 | pin_buff: &temp_buff); |
1917 | if (rc) |
1918 | return rc; |
1919 | |
1920 | ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; |
1921 | ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; |
1922 | ver_hdr->revision = CUDBG_MEMINFO_REV; |
1923 | ver_hdr->size = sizeof(struct cudbg_meminfo); |
1924 | |
1925 | meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data + |
1926 | sizeof(*ver_hdr)); |
1927 | rc = cudbg_fill_meminfo(padap, meminfo_buff); |
1928 | if (rc) { |
1929 | cudbg_err->sys_err = rc; |
1930 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
1931 | return rc; |
1932 | } |
1933 | |
1934 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1935 | } |
1936 | |
1937 | int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, |
1938 | struct cudbg_buffer *dbg_buff, |
1939 | struct cudbg_error *cudbg_err) |
1940 | { |
1941 | struct cudbg_cim_pif_la *cim_pif_la_buff; |
1942 | struct adapter *padap = pdbg_init->adap; |
1943 | struct cudbg_buffer temp_buff = { 0 }; |
1944 | int size, rc; |
1945 | |
1946 | size = sizeof(struct cudbg_cim_pif_la) + |
1947 | 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); |
1948 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
1949 | if (rc) |
1950 | return rc; |
1951 | |
1952 | cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data; |
1953 | cim_pif_la_buff->size = CIM_PIFLA_SIZE; |
1954 | t4_cim_read_pif_la(adap: padap, pif_req: (u32 *)cim_pif_la_buff->data, |
1955 | pif_rsp: (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE, |
1956 | NULL, NULL); |
1957 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
1958 | } |
1959 | |
1960 | int cudbg_collect_clk_info(struct cudbg_init *pdbg_init, |
1961 | struct cudbg_buffer *dbg_buff, |
1962 | struct cudbg_error *cudbg_err) |
1963 | { |
1964 | struct adapter *padap = pdbg_init->adap; |
1965 | struct cudbg_buffer temp_buff = { 0 }; |
1966 | struct cudbg_clk_info *clk_info_buff; |
1967 | u64 tp_tick_us; |
1968 | int rc; |
1969 | |
1970 | if (!padap->params.vpd.cclk) |
1971 | return CUDBG_STATUS_CCLK_NOT_DEFINED; |
1972 | |
1973 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: sizeof(struct cudbg_clk_info), |
1974 | pin_buff: &temp_buff); |
1975 | if (rc) |
1976 | return rc; |
1977 | |
1978 | clk_info_buff = (struct cudbg_clk_info *)temp_buff.data; |
1979 | clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */ |
1980 | clk_info_buff->res = t4_read_reg(adap: padap, TP_TIMER_RESOLUTION_A); |
1981 | clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res); |
1982 | clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res); |
1983 | tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000; |
1984 | |
1985 | clk_info_buff->dack_timer = |
1986 | (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 * |
1987 | t4_read_reg(adap: padap, TP_DACK_TIMER_A); |
1988 | clk_info_buff->retransmit_min = |
1989 | tp_tick_us * t4_read_reg(adap: padap, TP_RXT_MIN_A); |
1990 | clk_info_buff->retransmit_max = |
1991 | tp_tick_us * t4_read_reg(adap: padap, TP_RXT_MAX_A); |
1992 | clk_info_buff->persist_timer_min = |
1993 | tp_tick_us * t4_read_reg(adap: padap, TP_PERS_MIN_A); |
1994 | clk_info_buff->persist_timer_max = |
1995 | tp_tick_us * t4_read_reg(adap: padap, TP_PERS_MAX_A); |
1996 | clk_info_buff->keepalive_idle_timer = |
1997 | tp_tick_us * t4_read_reg(adap: padap, TP_KEEP_IDLE_A); |
1998 | clk_info_buff->keepalive_interval = |
1999 | tp_tick_us * t4_read_reg(adap: padap, TP_KEEP_INTVL_A); |
2000 | clk_info_buff->initial_srtt = |
2001 | tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A)); |
2002 | clk_info_buff->finwait2_timer = |
2003 | tp_tick_us * t4_read_reg(adap: padap, TP_FINWAIT2_TIMER_A); |
2004 | |
2005 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
2006 | } |
2007 | |
2008 | int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, |
2009 | struct cudbg_buffer *dbg_buff, |
2010 | struct cudbg_error *cudbg_err) |
2011 | { |
2012 | struct adapter *padap = pdbg_init->adap; |
2013 | struct cudbg_buffer temp_buff = { 0 }; |
2014 | struct ireg_buf *ch_pcie; |
2015 | int i, rc, n; |
2016 | u32 size; |
2017 | |
2018 | n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); |
2019 | size = sizeof(struct ireg_buf) * n * 2; |
2020 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
2021 | if (rc) |
2022 | return rc; |
2023 | |
2024 | ch_pcie = (struct ireg_buf *)temp_buff.data; |
2025 | /* PCIE_PDBG */ |
2026 | for (i = 0; i < n; i++) { |
2027 | struct ireg_field *pcie_pio = &ch_pcie->tp_pio; |
2028 | u32 *buff = ch_pcie->outbuf; |
2029 | |
2030 | pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0]; |
2031 | pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1]; |
2032 | pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2]; |
2033 | pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3]; |
2034 | t4_read_indirect(adap: padap, |
2035 | addr_reg: pcie_pio->ireg_addr, |
2036 | data_reg: pcie_pio->ireg_data, |
2037 | vals: buff, |
2038 | nregs: pcie_pio->ireg_offset_range, |
2039 | start_idx: pcie_pio->ireg_local_offset); |
2040 | ch_pcie++; |
2041 | } |
2042 | |
2043 | /* PCIE_CDBG */ |
2044 | n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); |
2045 | for (i = 0; i < n; i++) { |
2046 | struct ireg_field *pcie_pio = &ch_pcie->tp_pio; |
2047 | u32 *buff = ch_pcie->outbuf; |
2048 | |
2049 | pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0]; |
2050 | pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1]; |
2051 | pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2]; |
2052 | pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3]; |
2053 | t4_read_indirect(adap: padap, |
2054 | addr_reg: pcie_pio->ireg_addr, |
2055 | data_reg: pcie_pio->ireg_data, |
2056 | vals: buff, |
2057 | nregs: pcie_pio->ireg_offset_range, |
2058 | start_idx: pcie_pio->ireg_local_offset); |
2059 | ch_pcie++; |
2060 | } |
2061 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
2062 | } |
2063 | |
2064 | int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, |
2065 | struct cudbg_buffer *dbg_buff, |
2066 | struct cudbg_error *cudbg_err) |
2067 | { |
2068 | struct adapter *padap = pdbg_init->adap; |
2069 | struct cudbg_buffer temp_buff = { 0 }; |
2070 | struct ireg_buf *ch_pm; |
2071 | int i, rc, n; |
2072 | u32 size; |
2073 | |
2074 | n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); |
2075 | size = sizeof(struct ireg_buf) * n * 2; |
2076 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
2077 | if (rc) |
2078 | return rc; |
2079 | |
2080 | ch_pm = (struct ireg_buf *)temp_buff.data; |
2081 | /* PM_RX */ |
2082 | for (i = 0; i < n; i++) { |
2083 | struct ireg_field *pm_pio = &ch_pm->tp_pio; |
2084 | u32 *buff = ch_pm->outbuf; |
2085 | |
2086 | pm_pio->ireg_addr = t5_pm_rx_array[i][0]; |
2087 | pm_pio->ireg_data = t5_pm_rx_array[i][1]; |
2088 | pm_pio->ireg_local_offset = t5_pm_rx_array[i][2]; |
2089 | pm_pio->ireg_offset_range = t5_pm_rx_array[i][3]; |
2090 | t4_read_indirect(adap: padap, |
2091 | addr_reg: pm_pio->ireg_addr, |
2092 | data_reg: pm_pio->ireg_data, |
2093 | vals: buff, |
2094 | nregs: pm_pio->ireg_offset_range, |
2095 | start_idx: pm_pio->ireg_local_offset); |
2096 | ch_pm++; |
2097 | } |
2098 | |
2099 | /* PM_TX */ |
2100 | n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32)); |
2101 | for (i = 0; i < n; i++) { |
2102 | struct ireg_field *pm_pio = &ch_pm->tp_pio; |
2103 | u32 *buff = ch_pm->outbuf; |
2104 | |
2105 | pm_pio->ireg_addr = t5_pm_tx_array[i][0]; |
2106 | pm_pio->ireg_data = t5_pm_tx_array[i][1]; |
2107 | pm_pio->ireg_local_offset = t5_pm_tx_array[i][2]; |
2108 | pm_pio->ireg_offset_range = t5_pm_tx_array[i][3]; |
2109 | t4_read_indirect(adap: padap, |
2110 | addr_reg: pm_pio->ireg_addr, |
2111 | data_reg: pm_pio->ireg_data, |
2112 | vals: buff, |
2113 | nregs: pm_pio->ireg_offset_range, |
2114 | start_idx: pm_pio->ireg_local_offset); |
2115 | ch_pm++; |
2116 | } |
2117 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
2118 | } |
2119 | |
2120 | int cudbg_collect_tid(struct cudbg_init *pdbg_init, |
2121 | struct cudbg_buffer *dbg_buff, |
2122 | struct cudbg_error *cudbg_err) |
2123 | { |
2124 | struct adapter *padap = pdbg_init->adap; |
2125 | struct cudbg_tid_info_region_rev1 *tid1; |
2126 | struct cudbg_buffer temp_buff = { 0 }; |
2127 | struct cudbg_tid_info_region *tid; |
2128 | u32 para[2], val[2]; |
2129 | int rc; |
2130 | |
2131 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, |
2132 | size: sizeof(struct cudbg_tid_info_region_rev1), |
2133 | pin_buff: &temp_buff); |
2134 | if (rc) |
2135 | return rc; |
2136 | |
2137 | tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data; |
2138 | tid = &tid1->tid; |
2139 | tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE; |
2140 | tid1->ver_hdr.revision = CUDBG_TID_INFO_REV; |
2141 | tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) - |
2142 | sizeof(struct cudbg_ver_hdr); |
2143 | |
2144 | /* If firmware is not attached/alive, use backdoor register |
2145 | * access to collect dump. |
2146 | */ |
2147 | if (!is_fw_attached(pdbg_init)) |
2148 | goto fill_tid; |
2149 | |
2150 | #define FW_PARAM_PFVF_A(param) \ |
2151 | (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ |
2152 | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \ |
2153 | FW_PARAMS_PARAM_Y_V(0) | \ |
2154 | FW_PARAMS_PARAM_Z_V(0)) |
2155 | |
2156 | para[0] = FW_PARAM_PFVF_A(ETHOFLD_START); |
2157 | para[1] = FW_PARAM_PFVF_A(ETHOFLD_END); |
2158 | rc = t4_query_params(adap: padap, mbox: padap->mbox, pf: padap->pf, vf: 0, nparams: 2, params: para, val); |
2159 | if (rc < 0) { |
2160 | cudbg_err->sys_err = rc; |
2161 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
2162 | return rc; |
2163 | } |
2164 | tid->uotid_base = val[0]; |
2165 | tid->nuotids = val[1] - val[0] + 1; |
2166 | |
2167 | if (is_t5(chip: padap->params.chip)) { |
2168 | tid->sb = t4_read_reg(adap: padap, LE_DB_SERVER_INDEX_A) / 4; |
2169 | } else if (is_t6(chip: padap->params.chip)) { |
2170 | tid1->tid_start = |
2171 | t4_read_reg(adap: padap, LE_DB_ACTIVE_TABLE_START_INDEX_A); |
2172 | tid->sb = t4_read_reg(adap: padap, LE_DB_SRVR_START_INDEX_A); |
2173 | |
2174 | para[0] = FW_PARAM_PFVF_A(HPFILTER_START); |
2175 | para[1] = FW_PARAM_PFVF_A(HPFILTER_END); |
2176 | rc = t4_query_params(adap: padap, mbox: padap->mbox, pf: padap->pf, vf: 0, nparams: 2, |
2177 | params: para, val); |
2178 | if (rc < 0) { |
2179 | cudbg_err->sys_err = rc; |
2180 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
2181 | return rc; |
2182 | } |
2183 | tid->hpftid_base = val[0]; |
2184 | tid->nhpftids = val[1] - val[0] + 1; |
2185 | } |
2186 | |
2187 | #undef FW_PARAM_PFVF_A |
2188 | |
2189 | fill_tid: |
2190 | tid->ntids = padap->tids.ntids; |
2191 | tid->nstids = padap->tids.nstids; |
2192 | tid->stid_base = padap->tids.stid_base; |
2193 | tid->hash_base = padap->tids.hash_base; |
2194 | |
2195 | tid->natids = padap->tids.natids; |
2196 | tid->nftids = padap->tids.nftids; |
2197 | tid->ftid_base = padap->tids.ftid_base; |
2198 | tid->aftid_base = padap->tids.aftid_base; |
2199 | tid->aftid_end = padap->tids.aftid_end; |
2200 | |
2201 | tid->sftid_base = padap->tids.sftid_base; |
2202 | tid->nsftids = padap->tids.nsftids; |
2203 | |
2204 | tid->flags = padap->flags; |
2205 | tid->le_db_conf = t4_read_reg(adap: padap, LE_DB_CONFIG_A); |
2206 | tid->ip_users = t4_read_reg(adap: padap, LE_DB_ACT_CNT_IPV4_A); |
2207 | tid->ipv6_users = t4_read_reg(adap: padap, LE_DB_ACT_CNT_IPV6_A); |
2208 | |
2209 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
2210 | } |
2211 | |
2212 | int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init, |
2213 | struct cudbg_buffer *dbg_buff, |
2214 | struct cudbg_error *cudbg_err) |
2215 | { |
2216 | struct adapter *padap = pdbg_init->adap; |
2217 | struct cudbg_buffer temp_buff = { 0 }; |
2218 | u32 size, *value, j; |
2219 | int i, rc, n; |
2220 | |
2221 | size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; |
2222 | n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32)); |
2223 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
2224 | if (rc) |
2225 | return rc; |
2226 | |
2227 | value = (u32 *)temp_buff.data; |
2228 | for (i = 0; i < n; i++) { |
2229 | for (j = t5_pcie_config_array[i][0]; |
2230 | j <= t5_pcie_config_array[i][1]; j += 4) { |
2231 | t4_hw_pci_read_cfg4(adapter: padap, reg: j, val: value); |
2232 | value++; |
2233 | } |
2234 | } |
2235 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
2236 | } |
2237 | |
2238 | static int cudbg_sge_ctxt_check_valid(u32 *buf, int type) |
2239 | { |
2240 | int index, bit, bit_pos = 0; |
2241 | |
2242 | switch (type) { |
2243 | case CTXT_EGRESS: |
2244 | bit_pos = 176; |
2245 | break; |
2246 | case CTXT_INGRESS: |
2247 | bit_pos = 141; |
2248 | break; |
2249 | case CTXT_FLM: |
2250 | bit_pos = 89; |
2251 | break; |
2252 | } |
2253 | index = bit_pos / 32; |
2254 | bit = bit_pos % 32; |
2255 | return buf[index] & (1U << bit); |
2256 | } |
2257 | |
2258 | static int cudbg_get_ctxt_region_info(struct adapter *padap, |
2259 | struct cudbg_region_info *ctx_info, |
2260 | u8 *mem_type) |
2261 | { |
2262 | struct cudbg_mem_desc mem_desc; |
2263 | struct cudbg_meminfo meminfo; |
2264 | u32 i, j, value, found; |
2265 | u8 flq; |
2266 | int rc; |
2267 | |
2268 | rc = cudbg_fill_meminfo(padap, meminfo_buff: &meminfo); |
2269 | if (rc) |
2270 | return rc; |
2271 | |
2272 | /* Get EGRESS and INGRESS context region size */ |
2273 | for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { |
2274 | found = 0; |
2275 | memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc)); |
2276 | for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) { |
2277 | rc = cudbg_get_mem_region(padap, meminfo: &meminfo, mem_type: j, |
2278 | region_name: cudbg_region[i], |
2279 | mem_desc: &mem_desc); |
2280 | if (!rc) { |
2281 | found = 1; |
2282 | rc = cudbg_get_mem_relative(padap, meminfo: &meminfo, mem_type: j, |
2283 | out_base: &mem_desc.base, |
2284 | out_end: &mem_desc.limit); |
2285 | if (rc) { |
2286 | ctx_info[i].exist = false; |
2287 | break; |
2288 | } |
2289 | ctx_info[i].exist = true; |
2290 | ctx_info[i].start = mem_desc.base; |
2291 | ctx_info[i].end = mem_desc.limit; |
2292 | mem_type[i] = j; |
2293 | break; |
2294 | } |
2295 | } |
2296 | if (!found) |
2297 | ctx_info[i].exist = false; |
2298 | } |
2299 | |
2300 | /* Get FLM and CNM max qid. */ |
2301 | value = t4_read_reg(adap: padap, SGE_FLM_CFG_A); |
2302 | |
2303 | /* Get number of data freelist queues */ |
2304 | flq = HDRSTARTFLQ_G(value); |
2305 | ctx_info[CTXT_FLM].exist = true; |
2306 | ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE; |
2307 | |
2308 | /* The number of CONM contexts are same as number of freelist |
2309 | * queues. |
2310 | */ |
2311 | ctx_info[CTXT_CNM].exist = true; |
2312 | ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end; |
2313 | |
2314 | return 0; |
2315 | } |
2316 | |
2317 | int cudbg_dump_context_size(struct adapter *padap) |
2318 | { |
2319 | struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} }; |
2320 | u8 mem_type[CTXT_INGRESS + 1] = { 0 }; |
2321 | u32 i, size = 0; |
2322 | int rc; |
2323 | |
2324 | /* Get max valid qid for each type of queue */ |
2325 | rc = cudbg_get_ctxt_region_info(padap, ctx_info: region_info, mem_type); |
2326 | if (rc) |
2327 | return rc; |
2328 | |
2329 | for (i = 0; i < CTXT_CNM; i++) { |
2330 | if (!region_info[i].exist) { |
2331 | if (i == CTXT_EGRESS || i == CTXT_INGRESS) |
2332 | size += CUDBG_LOWMEM_MAX_CTXT_QIDS * |
2333 | SGE_CTXT_SIZE; |
2334 | continue; |
2335 | } |
2336 | |
2337 | size += (region_info[i].end - region_info[i].start + 1) / |
2338 | SGE_CTXT_SIZE; |
2339 | } |
2340 | return size * sizeof(struct cudbg_ch_cntxt); |
2341 | } |
2342 | |
2343 | static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid, |
2344 | enum ctxt_type ctype, u32 *data) |
2345 | { |
2346 | struct adapter *padap = pdbg_init->adap; |
2347 | int rc = -1; |
2348 | |
2349 | /* Under heavy traffic, the SGE Queue contexts registers will be |
2350 | * frequently accessed by firmware. |
2351 | * |
2352 | * To avoid conflicts with firmware, always ask firmware to fetch |
2353 | * the SGE Queue contexts via mailbox. On failure, fallback to |
2354 | * accessing hardware registers directly. |
2355 | */ |
2356 | if (is_fw_attached(pdbg_init)) |
2357 | rc = t4_sge_ctxt_rd(adap: padap, mbox: padap->mbox, cid, ctype, data); |
2358 | if (rc) |
2359 | t4_sge_ctxt_rd_bd(adap: padap, cid, ctype, data); |
2360 | } |
2361 | |
2362 | static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid, |
2363 | u8 ctxt_type, |
2364 | struct cudbg_ch_cntxt **out_buff) |
2365 | { |
2366 | struct cudbg_ch_cntxt *buff = *out_buff; |
2367 | int rc; |
2368 | u32 j; |
2369 | |
2370 | for (j = 0; j < max_qid; j++) { |
2371 | cudbg_read_sge_ctxt(pdbg_init, cid: j, ctype: ctxt_type, data: buff->data); |
2372 | rc = cudbg_sge_ctxt_check_valid(buf: buff->data, type: ctxt_type); |
2373 | if (!rc) |
2374 | continue; |
2375 | |
2376 | buff->cntxt_type = ctxt_type; |
2377 | buff->cntxt_id = j; |
2378 | buff++; |
2379 | if (ctxt_type == CTXT_FLM) { |
2380 | cudbg_read_sge_ctxt(pdbg_init, cid: j, ctype: CTXT_CNM, data: buff->data); |
2381 | buff->cntxt_type = CTXT_CNM; |
2382 | buff->cntxt_id = j; |
2383 | buff++; |
2384 | } |
2385 | } |
2386 | |
2387 | *out_buff = buff; |
2388 | } |
2389 | |
2390 | int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, |
2391 | struct cudbg_buffer *dbg_buff, |
2392 | struct cudbg_error *cudbg_err) |
2393 | { |
2394 | struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} }; |
2395 | struct adapter *padap = pdbg_init->adap; |
2396 | u32 j, size, max_ctx_size, max_ctx_qid; |
2397 | u8 mem_type[CTXT_INGRESS + 1] = { 0 }; |
2398 | struct cudbg_buffer temp_buff = { 0 }; |
2399 | struct cudbg_ch_cntxt *buff; |
2400 | u8 *ctx_buf; |
2401 | u8 i, k; |
2402 | int rc; |
2403 | |
2404 | /* Get max valid qid for each type of queue */ |
2405 | rc = cudbg_get_ctxt_region_info(padap, ctx_info: region_info, mem_type); |
2406 | if (rc) |
2407 | return rc; |
2408 | |
2409 | rc = cudbg_dump_context_size(padap); |
2410 | if (rc <= 0) |
2411 | return CUDBG_STATUS_ENTITY_NOT_FOUND; |
2412 | |
2413 | size = rc; |
2414 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
2415 | if (rc) |
2416 | return rc; |
2417 | |
2418 | /* Get buffer with enough space to read the biggest context |
2419 | * region in memory. |
2420 | */ |
2421 | max_ctx_size = max(region_info[CTXT_EGRESS].end - |
2422 | region_info[CTXT_EGRESS].start + 1, |
2423 | region_info[CTXT_INGRESS].end - |
2424 | region_info[CTXT_INGRESS].start + 1); |
2425 | |
2426 | ctx_buf = kvzalloc(size: max_ctx_size, GFP_KERNEL); |
2427 | if (!ctx_buf) { |
2428 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
2429 | return -ENOMEM; |
2430 | } |
2431 | |
2432 | buff = (struct cudbg_ch_cntxt *)temp_buff.data; |
2433 | |
2434 | /* Collect EGRESS and INGRESS context data. |
2435 | * In case of failures, fallback to collecting via FW or |
2436 | * backdoor access. |
2437 | */ |
2438 | for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) { |
2439 | if (!region_info[i].exist) { |
2440 | max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS; |
2441 | cudbg_get_sge_ctxt_fw(pdbg_init, max_qid: max_ctx_qid, ctxt_type: i, |
2442 | out_buff: &buff); |
2443 | continue; |
2444 | } |
2445 | |
2446 | max_ctx_size = region_info[i].end - region_info[i].start + 1; |
2447 | max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE; |
2448 | |
2449 | /* If firmware is not attached/alive, use backdoor register |
2450 | * access to collect dump. |
2451 | */ |
2452 | if (is_fw_attached(pdbg_init)) { |
2453 | t4_sge_ctxt_flush(adap: padap, mbox: padap->mbox, ctxt_type: i); |
2454 | |
2455 | rc = t4_memory_rw(adap: padap, win: MEMWIN_NIC, mtype: mem_type[i], |
2456 | addr: region_info[i].start, len: max_ctx_size, |
2457 | buf: (__be32 *)ctx_buf, dir: 1); |
2458 | } |
2459 | |
2460 | if (rc || !is_fw_attached(pdbg_init)) { |
2461 | max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS; |
2462 | cudbg_get_sge_ctxt_fw(pdbg_init, max_qid: max_ctx_qid, ctxt_type: i, |
2463 | out_buff: &buff); |
2464 | continue; |
2465 | } |
2466 | |
2467 | for (j = 0; j < max_ctx_qid; j++) { |
2468 | __be64 *dst_off; |
2469 | u64 *src_off; |
2470 | |
2471 | src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE); |
2472 | dst_off = (__be64 *)buff->data; |
2473 | |
2474 | /* The data is stored in 64-bit cpu order. Convert it |
2475 | * to big endian before parsing. |
2476 | */ |
2477 | for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++) |
2478 | dst_off[k] = cpu_to_be64(src_off[k]); |
2479 | |
2480 | rc = cudbg_sge_ctxt_check_valid(buf: buff->data, type: i); |
2481 | if (!rc) |
2482 | continue; |
2483 | |
2484 | buff->cntxt_type = i; |
2485 | buff->cntxt_id = j; |
2486 | buff++; |
2487 | } |
2488 | } |
2489 | |
2490 | kvfree(addr: ctx_buf); |
2491 | |
2492 | /* Collect FREELIST and CONGESTION MANAGER contexts */ |
2493 | max_ctx_size = region_info[CTXT_FLM].end - |
2494 | region_info[CTXT_FLM].start + 1; |
2495 | max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE; |
2496 | /* Since FLM and CONM are 1-to-1 mapped, the below function |
2497 | * will fetch both FLM and CONM contexts. |
2498 | */ |
2499 | cudbg_get_sge_ctxt_fw(pdbg_init, max_qid: max_ctx_qid, ctxt_type: CTXT_FLM, out_buff: &buff); |
2500 | |
2501 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
2502 | } |
2503 | |
2504 | static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) |
2505 | { |
2506 | *mask = x | y; |
2507 | y = (__force u64)cpu_to_be64(y); |
2508 | memcpy(addr, (char *)&y + 2, ETH_ALEN); |
2509 | } |
2510 | |
2511 | static void cudbg_mps_rpl_backdoor(struct adapter *padap, |
2512 | struct fw_ldst_mps_rplc *mps_rplc) |
2513 | { |
2514 | if (is_t5(chip: padap->params.chip)) { |
2515 | mps_rplc->rplc255_224 = htonl(t4_read_reg(padap, |
2516 | MPS_VF_RPLCT_MAP3_A)); |
2517 | mps_rplc->rplc223_192 = htonl(t4_read_reg(padap, |
2518 | MPS_VF_RPLCT_MAP2_A)); |
2519 | mps_rplc->rplc191_160 = htonl(t4_read_reg(padap, |
2520 | MPS_VF_RPLCT_MAP1_A)); |
2521 | mps_rplc->rplc159_128 = htonl(t4_read_reg(padap, |
2522 | MPS_VF_RPLCT_MAP0_A)); |
2523 | } else { |
2524 | mps_rplc->rplc255_224 = htonl(t4_read_reg(padap, |
2525 | MPS_VF_RPLCT_MAP7_A)); |
2526 | mps_rplc->rplc223_192 = htonl(t4_read_reg(padap, |
2527 | MPS_VF_RPLCT_MAP6_A)); |
2528 | mps_rplc->rplc191_160 = htonl(t4_read_reg(padap, |
2529 | MPS_VF_RPLCT_MAP5_A)); |
2530 | mps_rplc->rplc159_128 = htonl(t4_read_reg(padap, |
2531 | MPS_VF_RPLCT_MAP4_A)); |
2532 | } |
2533 | mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A)); |
2534 | mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A)); |
2535 | mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A)); |
2536 | mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A)); |
2537 | } |
2538 | |
2539 | static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init, |
2540 | struct cudbg_mps_tcam *tcam, u32 idx) |
2541 | { |
2542 | struct adapter *padap = pdbg_init->adap; |
2543 | u64 tcamy, tcamx, val; |
2544 | u32 ctl, data2; |
2545 | int rc = 0; |
2546 | |
2547 | if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) { |
2548 | /* CtlReqID - 1: use Host Driver Requester ID |
2549 | * CtlCmdType - 0: Read, 1: Write |
2550 | * CtlTcamSel - 0: TCAM0, 1: TCAM1 |
2551 | * CtlXYBitSel- 0: Y bit, 1: X bit |
2552 | */ |
2553 | |
2554 | /* Read tcamy */ |
2555 | ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0); |
2556 | if (idx < 256) |
2557 | ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0); |
2558 | else |
2559 | ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1); |
2560 | |
2561 | t4_write_reg(adap: padap, MPS_CLS_TCAM_DATA2_CTL_A, val: ctl); |
2562 | val = t4_read_reg(adap: padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A); |
2563 | tcamy = DMACH_G(val) << 32; |
2564 | tcamy |= t4_read_reg(adap: padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A); |
2565 | data2 = t4_read_reg(adap: padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A); |
2566 | tcam->lookup_type = DATALKPTYPE_G(data2); |
2567 | |
2568 | /* 0 - Outer header, 1 - Inner header |
2569 | * [71:48] bit locations are overloaded for |
2570 | * outer vs. inner lookup types. |
2571 | */ |
2572 | if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) { |
2573 | /* Inner header VNI */ |
2574 | tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2); |
2575 | tcam->vniy = (tcam->vniy << 16) | VIDL_G(val); |
2576 | tcam->dip_hit = data2 & DATADIPHIT_F; |
2577 | } else { |
2578 | tcam->vlan_vld = data2 & DATAVIDH2_F; |
2579 | tcam->ivlan = VIDL_G(val); |
2580 | } |
2581 | |
2582 | tcam->port_num = DATAPORTNUM_G(data2); |
2583 | |
2584 | /* Read tcamx. Change the control param */ |
2585 | ctl |= CTLXYBITSEL_V(1); |
2586 | t4_write_reg(adap: padap, MPS_CLS_TCAM_DATA2_CTL_A, val: ctl); |
2587 | val = t4_read_reg(adap: padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A); |
2588 | tcamx = DMACH_G(val) << 32; |
2589 | tcamx |= t4_read_reg(adap: padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A); |
2590 | data2 = t4_read_reg(adap: padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A); |
2591 | if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) { |
2592 | /* Inner header VNI mask */ |
2593 | tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2); |
2594 | tcam->vnix = (tcam->vnix << 16) | VIDL_G(val); |
2595 | } |
2596 | } else { |
2597 | tcamy = t4_read_reg64(adap: padap, MPS_CLS_TCAM_Y_L(idx)); |
2598 | tcamx = t4_read_reg64(adap: padap, MPS_CLS_TCAM_X_L(idx)); |
2599 | } |
2600 | |
2601 | /* If no entry, return */ |
2602 | if (tcamx & tcamy) |
2603 | return rc; |
2604 | |
2605 | tcam->cls_lo = t4_read_reg(adap: padap, MPS_CLS_SRAM_L(idx)); |
2606 | tcam->cls_hi = t4_read_reg(adap: padap, MPS_CLS_SRAM_H(idx)); |
2607 | |
2608 | if (is_t5(chip: padap->params.chip)) |
2609 | tcam->repli = (tcam->cls_lo & REPLICATE_F); |
2610 | else if (is_t6(chip: padap->params.chip)) |
2611 | tcam->repli = (tcam->cls_lo & T6_REPLICATE_F); |
2612 | |
2613 | if (tcam->repli) { |
2614 | struct fw_ldst_cmd ldst_cmd; |
2615 | struct fw_ldst_mps_rplc mps_rplc; |
2616 | |
2617 | memset(&ldst_cmd, 0, sizeof(ldst_cmd)); |
2618 | ldst_cmd.op_to_addrspace = |
2619 | htonl(FW_CMD_OP_V(FW_LDST_CMD) | |
2620 | FW_CMD_REQUEST_F | FW_CMD_READ_F | |
2621 | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS)); |
2622 | ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); |
2623 | ldst_cmd.u.mps.rplc.fid_idx = |
2624 | htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) | |
2625 | FW_LDST_CMD_IDX_V(idx)); |
2626 | |
2627 | /* If firmware is not attached/alive, use backdoor register |
2628 | * access to collect dump. |
2629 | */ |
2630 | if (is_fw_attached(pdbg_init)) |
2631 | rc = t4_wr_mbox(adap: padap, mbox: padap->mbox, cmd: &ldst_cmd, |
2632 | size: sizeof(ldst_cmd), rpl: &ldst_cmd); |
2633 | |
2634 | if (rc || !is_fw_attached(pdbg_init)) { |
2635 | cudbg_mps_rpl_backdoor(padap, mps_rplc: &mps_rplc); |
2636 | /* Ignore error since we collected directly from |
2637 | * reading registers. |
2638 | */ |
2639 | rc = 0; |
2640 | } else { |
2641 | mps_rplc = ldst_cmd.u.mps.rplc; |
2642 | } |
2643 | |
2644 | tcam->rplc[0] = ntohl(mps_rplc.rplc31_0); |
2645 | tcam->rplc[1] = ntohl(mps_rplc.rplc63_32); |
2646 | tcam->rplc[2] = ntohl(mps_rplc.rplc95_64); |
2647 | tcam->rplc[3] = ntohl(mps_rplc.rplc127_96); |
2648 | if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) { |
2649 | tcam->rplc[4] = ntohl(mps_rplc.rplc159_128); |
2650 | tcam->rplc[5] = ntohl(mps_rplc.rplc191_160); |
2651 | tcam->rplc[6] = ntohl(mps_rplc.rplc223_192); |
2652 | tcam->rplc[7] = ntohl(mps_rplc.rplc255_224); |
2653 | } |
2654 | } |
2655 | cudbg_tcamxy2valmask(x: tcamx, y: tcamy, addr: tcam->addr, mask: &tcam->mask); |
2656 | tcam->idx = idx; |
2657 | tcam->rplc_size = padap->params.arch.mps_rplc_size; |
2658 | return rc; |
2659 | } |
2660 | |
2661 | int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, |
2662 | struct cudbg_buffer *dbg_buff, |
2663 | struct cudbg_error *cudbg_err) |
2664 | { |
2665 | struct adapter *padap = pdbg_init->adap; |
2666 | struct cudbg_buffer temp_buff = { 0 }; |
2667 | u32 size = 0, i, n, total_size = 0; |
2668 | struct cudbg_mps_tcam *tcam; |
2669 | int rc; |
2670 | |
2671 | n = padap->params.arch.mps_tcam_size; |
2672 | size = sizeof(struct cudbg_mps_tcam) * n; |
2673 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
2674 | if (rc) |
2675 | return rc; |
2676 | |
2677 | tcam = (struct cudbg_mps_tcam *)temp_buff.data; |
2678 | for (i = 0; i < n; i++) { |
2679 | rc = cudbg_collect_tcam_index(pdbg_init, tcam, idx: i); |
2680 | if (rc) { |
2681 | cudbg_err->sys_err = rc; |
2682 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
2683 | return rc; |
2684 | } |
2685 | total_size += sizeof(struct cudbg_mps_tcam); |
2686 | tcam++; |
2687 | } |
2688 | |
2689 | if (!total_size) { |
2690 | rc = CUDBG_SYSTEM_ERROR; |
2691 | cudbg_err->sys_err = rc; |
2692 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
2693 | return rc; |
2694 | } |
2695 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
2696 | } |
2697 | |
2698 | int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, |
2699 | struct cudbg_buffer *dbg_buff, |
2700 | struct cudbg_error *cudbg_err) |
2701 | { |
2702 | struct adapter *padap = pdbg_init->adap; |
2703 | struct cudbg_buffer temp_buff = { 0 }; |
2704 | char vpd_str[CUDBG_VPD_VER_LEN + 1]; |
2705 | struct cudbg_vpd_data *vpd_data; |
2706 | struct vpd_params vpd = { 0 }; |
2707 | u32 vpd_vers, fw_vers; |
2708 | int rc; |
2709 | |
2710 | rc = t4_get_raw_vpd_params(adapter: padap, p: &vpd); |
2711 | if (rc) |
2712 | return rc; |
2713 | |
2714 | rc = t4_get_fw_version(adapter: padap, vers: &fw_vers); |
2715 | if (rc) |
2716 | return rc; |
2717 | |
2718 | rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN, |
2719 | dest: vpd_str); |
2720 | if (rc) |
2721 | return rc; |
2722 | |
2723 | vpd_str[CUDBG_VPD_VER_LEN] = '\0'; |
2724 | rc = kstrtouint(s: vpd_str, base: 0, res: &vpd_vers); |
2725 | if (rc) |
2726 | return rc; |
2727 | |
2728 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: sizeof(struct cudbg_vpd_data), |
2729 | pin_buff: &temp_buff); |
2730 | if (rc) |
2731 | return rc; |
2732 | |
2733 | vpd_data = (struct cudbg_vpd_data *)temp_buff.data; |
2734 | memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1); |
2735 | memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1); |
2736 | memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1); |
2737 | memcpy(vpd_data->mn, vpd.id, ID_LEN + 1); |
2738 | vpd_data->scfg_vers = t4_read_reg(adap: padap, PCIE_STATIC_SPARE2_A); |
2739 | vpd_data->vpd_vers = vpd_vers; |
2740 | vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers); |
2741 | vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers); |
2742 | vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers); |
2743 | vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers); |
2744 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
2745 | } |
2746 | |
2747 | static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid, |
2748 | struct cudbg_tid_data *tid_data) |
2749 | { |
2750 | struct adapter *padap = pdbg_init->adap; |
2751 | int i, cmd_retry = 8; |
2752 | u32 val; |
2753 | |
2754 | /* Fill REQ_DATA regs with 0's */ |
2755 | for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++) |
2756 | t4_write_reg(adap: padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), val: 0); |
2757 | |
2758 | /* Write DBIG command */ |
2759 | val = DBGICMD_V(4) | DBGITID_V(tid); |
2760 | t4_write_reg(adap: padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val); |
2761 | tid_data->dbig_cmd = val; |
2762 | |
2763 | val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */ |
2764 | t4_write_reg(adap: padap, LE_DB_DBGI_CONFIG_A, val); |
2765 | tid_data->dbig_conf = val; |
2766 | |
2767 | /* Poll the DBGICMDBUSY bit */ |
2768 | val = 1; |
2769 | while (val) { |
2770 | val = t4_read_reg(adap: padap, LE_DB_DBGI_CONFIG_A); |
2771 | val = val & DBGICMDBUSY_F; |
2772 | cmd_retry--; |
2773 | if (!cmd_retry) |
2774 | return CUDBG_SYSTEM_ERROR; |
2775 | } |
2776 | |
2777 | /* Check RESP status */ |
2778 | val = t4_read_reg(adap: padap, LE_DB_DBGI_RSP_STATUS_A); |
2779 | tid_data->dbig_rsp_stat = val; |
2780 | if (!(val & 1)) |
2781 | return CUDBG_SYSTEM_ERROR; |
2782 | |
2783 | /* Read RESP data */ |
2784 | for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++) |
2785 | tid_data->data[i] = t4_read_reg(adap: padap, |
2786 | LE_DB_DBGI_RSP_DATA_A + |
2787 | (i << 2)); |
2788 | tid_data->tid = tid; |
2789 | return 0; |
2790 | } |
2791 | |
2792 | static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region) |
2793 | { |
2794 | int type = LE_ET_UNKNOWN; |
2795 | |
2796 | if (tid < tcam_region.server_start) |
2797 | type = LE_ET_TCAM_CON; |
2798 | else if (tid < tcam_region.filter_start) |
2799 | type = LE_ET_TCAM_SERVER; |
2800 | else if (tid < tcam_region.clip_start) |
2801 | type = LE_ET_TCAM_FILTER; |
2802 | else if (tid < tcam_region.routing_start) |
2803 | type = LE_ET_TCAM_CLIP; |
2804 | else if (tid < tcam_region.tid_hash_base) |
2805 | type = LE_ET_TCAM_ROUTING; |
2806 | else if (tid < tcam_region.max_tid) |
2807 | type = LE_ET_HASH_CON; |
2808 | else |
2809 | type = LE_ET_INVALID_TID; |
2810 | |
2811 | return type; |
2812 | } |
2813 | |
2814 | static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data, |
2815 | struct cudbg_tcam tcam_region) |
2816 | { |
2817 | int ipv6 = 0; |
2818 | int le_type; |
2819 | |
2820 | le_type = cudbg_get_le_type(tid: tid_data->tid, tcam_region); |
2821 | if (tid_data->tid & 1) |
2822 | return 0; |
2823 | |
2824 | if (le_type == LE_ET_HASH_CON) { |
2825 | ipv6 = tid_data->data[16] & 0x8000; |
2826 | } else if (le_type == LE_ET_TCAM_CON) { |
2827 | ipv6 = tid_data->data[16] & 0x8000; |
2828 | if (ipv6) |
2829 | ipv6 = tid_data->data[9] == 0x00C00000; |
2830 | } else { |
2831 | ipv6 = 0; |
2832 | } |
2833 | return ipv6; |
2834 | } |
2835 | |
2836 | void cudbg_fill_le_tcam_info(struct adapter *padap, |
2837 | struct cudbg_tcam *tcam_region) |
2838 | { |
2839 | u32 value; |
2840 | |
2841 | /* Get the LE regions */ |
2842 | value = t4_read_reg(adap: padap, LE_DB_TID_HASHBASE_A); /* hash base index */ |
2843 | tcam_region->tid_hash_base = value; |
2844 | |
2845 | /* Get routing table index */ |
2846 | value = t4_read_reg(adap: padap, LE_DB_ROUTING_TABLE_INDEX_A); |
2847 | tcam_region->routing_start = value; |
2848 | |
2849 | /* Get clip table index. For T6 there is separate CLIP TCAM */ |
2850 | if (is_t6(chip: padap->params.chip)) |
2851 | value = t4_read_reg(adap: padap, LE_DB_CLCAM_TID_BASE_A); |
2852 | else |
2853 | value = t4_read_reg(adap: padap, LE_DB_CLIP_TABLE_INDEX_A); |
2854 | tcam_region->clip_start = value; |
2855 | |
2856 | /* Get filter table index */ |
2857 | value = t4_read_reg(adap: padap, LE_DB_FILTER_TABLE_INDEX_A); |
2858 | tcam_region->filter_start = value; |
2859 | |
2860 | /* Get server table index */ |
2861 | value = t4_read_reg(adap: padap, LE_DB_SERVER_INDEX_A); |
2862 | tcam_region->server_start = value; |
2863 | |
2864 | /* Check whether hash is enabled and calculate the max tids */ |
2865 | value = t4_read_reg(adap: padap, LE_DB_CONFIG_A); |
2866 | if ((value >> HASHEN_S) & 1) { |
2867 | value = t4_read_reg(adap: padap, LE_DB_HASH_CONFIG_A); |
2868 | if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { |
2869 | tcam_region->max_tid = (value & 0xFFFFF) + |
2870 | tcam_region->tid_hash_base; |
2871 | } else { |
2872 | value = HASHTIDSIZE_G(value); |
2873 | value = 1 << value; |
2874 | tcam_region->max_tid = value + |
2875 | tcam_region->tid_hash_base; |
2876 | } |
2877 | } else { /* hash not enabled */ |
2878 | if (is_t6(chip: padap->params.chip)) |
2879 | tcam_region->max_tid = (value & ASLIPCOMPEN_F) ? |
2880 | CUDBG_MAX_TID_COMP_EN : |
2881 | CUDBG_MAX_TID_COMP_DIS; |
2882 | else |
2883 | tcam_region->max_tid = CUDBG_MAX_TCAM_TID; |
2884 | } |
2885 | |
2886 | if (is_t6(chip: padap->params.chip)) |
2887 | tcam_region->max_tid += CUDBG_T6_CLIP; |
2888 | } |
2889 | |
2890 | int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, |
2891 | struct cudbg_buffer *dbg_buff, |
2892 | struct cudbg_error *cudbg_err) |
2893 | { |
2894 | struct adapter *padap = pdbg_init->adap; |
2895 | struct cudbg_buffer temp_buff = { 0 }; |
2896 | struct cudbg_tcam tcam_region = { 0 }; |
2897 | struct cudbg_tid_data *tid_data; |
2898 | u32 bytes = 0; |
2899 | int rc, size; |
2900 | u32 i; |
2901 | |
2902 | cudbg_fill_le_tcam_info(padap, tcam_region: &tcam_region); |
2903 | |
2904 | size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid; |
2905 | size += sizeof(struct cudbg_tcam); |
2906 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
2907 | if (rc) |
2908 | return rc; |
2909 | |
2910 | memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam)); |
2911 | bytes = sizeof(struct cudbg_tcam); |
2912 | tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes); |
2913 | /* read all tid */ |
2914 | for (i = 0; i < tcam_region.max_tid; ) { |
2915 | rc = cudbg_read_tid(pdbg_init, tid: i, tid_data); |
2916 | if (rc) { |
2917 | cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; |
2918 | /* Update tcam header and exit */ |
2919 | tcam_region.max_tid = i; |
2920 | memcpy(temp_buff.data, &tcam_region, |
2921 | sizeof(struct cudbg_tcam)); |
2922 | goto out; |
2923 | } |
2924 | |
2925 | if (cudbg_is_ipv6_entry(tid_data, tcam_region)) { |
2926 | /* T6 CLIP TCAM: ipv6 takes 4 entries */ |
2927 | if (is_t6(chip: padap->params.chip) && |
2928 | i >= tcam_region.clip_start && |
2929 | i < tcam_region.clip_start + CUDBG_T6_CLIP) |
2930 | i += 4; |
2931 | else /* Main TCAM: ipv6 takes two tids */ |
2932 | i += 2; |
2933 | } else { |
2934 | i++; |
2935 | } |
2936 | |
2937 | tid_data++; |
2938 | bytes += sizeof(struct cudbg_tid_data); |
2939 | } |
2940 | |
2941 | out: |
2942 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
2943 | } |
2944 | |
2945 | int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, |
2946 | struct cudbg_buffer *dbg_buff, |
2947 | struct cudbg_error *cudbg_err) |
2948 | { |
2949 | struct adapter *padap = pdbg_init->adap; |
2950 | struct cudbg_buffer temp_buff = { 0 }; |
2951 | u32 size; |
2952 | int rc; |
2953 | |
2954 | size = sizeof(u16) * NMTUS * NCCTRL_WIN; |
2955 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
2956 | if (rc) |
2957 | return rc; |
2958 | |
2959 | t4_read_cong_tbl(adap: padap, incr: (void *)temp_buff.data); |
2960 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
2961 | } |
2962 | |
2963 | int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, |
2964 | struct cudbg_buffer *dbg_buff, |
2965 | struct cudbg_error *cudbg_err) |
2966 | { |
2967 | struct adapter *padap = pdbg_init->adap; |
2968 | struct cudbg_buffer temp_buff = { 0 }; |
2969 | struct ireg_buf *ma_indr; |
2970 | int i, rc, n; |
2971 | u32 size, j; |
2972 | |
2973 | if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) |
2974 | return CUDBG_STATUS_ENTITY_NOT_FOUND; |
2975 | |
2976 | n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); |
2977 | size = sizeof(struct ireg_buf) * n * 2; |
2978 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
2979 | if (rc) |
2980 | return rc; |
2981 | |
2982 | ma_indr = (struct ireg_buf *)temp_buff.data; |
2983 | for (i = 0; i < n; i++) { |
2984 | struct ireg_field *ma_fli = &ma_indr->tp_pio; |
2985 | u32 *buff = ma_indr->outbuf; |
2986 | |
2987 | ma_fli->ireg_addr = t6_ma_ireg_array[i][0]; |
2988 | ma_fli->ireg_data = t6_ma_ireg_array[i][1]; |
2989 | ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2]; |
2990 | ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3]; |
2991 | t4_read_indirect(adap: padap, addr_reg: ma_fli->ireg_addr, data_reg: ma_fli->ireg_data, |
2992 | vals: buff, nregs: ma_fli->ireg_offset_range, |
2993 | start_idx: ma_fli->ireg_local_offset); |
2994 | ma_indr++; |
2995 | } |
2996 | |
2997 | n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32)); |
2998 | for (i = 0; i < n; i++) { |
2999 | struct ireg_field *ma_fli = &ma_indr->tp_pio; |
3000 | u32 *buff = ma_indr->outbuf; |
3001 | |
3002 | ma_fli->ireg_addr = t6_ma_ireg_array2[i][0]; |
3003 | ma_fli->ireg_data = t6_ma_ireg_array2[i][1]; |
3004 | ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2]; |
3005 | for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) { |
3006 | t4_read_indirect(adap: padap, addr_reg: ma_fli->ireg_addr, |
3007 | data_reg: ma_fli->ireg_data, vals: buff, nregs: 1, |
3008 | start_idx: ma_fli->ireg_local_offset); |
3009 | buff++; |
3010 | ma_fli->ireg_local_offset += 0x20; |
3011 | } |
3012 | ma_indr++; |
3013 | } |
3014 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
3015 | } |
3016 | |
3017 | int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, |
3018 | struct cudbg_buffer *dbg_buff, |
3019 | struct cudbg_error *cudbg_err) |
3020 | { |
3021 | struct adapter *padap = pdbg_init->adap; |
3022 | struct cudbg_buffer temp_buff = { 0 }; |
3023 | struct cudbg_ulptx_la *ulptx_la_buff; |
3024 | struct cudbg_ver_hdr *ver_hdr; |
3025 | u32 i, j; |
3026 | int rc; |
3027 | |
3028 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, |
3029 | size: sizeof(struct cudbg_ver_hdr) + |
3030 | sizeof(struct cudbg_ulptx_la), |
3031 | pin_buff: &temp_buff); |
3032 | if (rc) |
3033 | return rc; |
3034 | |
3035 | ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data; |
3036 | ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; |
3037 | ver_hdr->revision = CUDBG_ULPTX_LA_REV; |
3038 | ver_hdr->size = sizeof(struct cudbg_ulptx_la); |
3039 | |
3040 | ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data + |
3041 | sizeof(*ver_hdr)); |
3042 | for (i = 0; i < CUDBG_NUM_ULPTX; i++) { |
3043 | ulptx_la_buff->rdptr[i] = t4_read_reg(adap: padap, |
3044 | ULP_TX_LA_RDPTR_0_A + |
3045 | 0x10 * i); |
3046 | ulptx_la_buff->wrptr[i] = t4_read_reg(adap: padap, |
3047 | ULP_TX_LA_WRPTR_0_A + |
3048 | 0x10 * i); |
3049 | ulptx_la_buff->rddata[i] = t4_read_reg(adap: padap, |
3050 | ULP_TX_LA_RDDATA_0_A + |
3051 | 0x10 * i); |
3052 | for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) |
3053 | ulptx_la_buff->rd_data[i][j] = |
3054 | t4_read_reg(adap: padap, |
3055 | ULP_TX_LA_RDDATA_0_A + 0x10 * i); |
3056 | } |
3057 | |
3058 | for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) { |
3059 | t4_write_reg(adap: padap, ULP_TX_ASIC_DEBUG_CTRL_A, val: 0x1); |
3060 | ulptx_la_buff->rdptr_asic[i] = |
3061 | t4_read_reg(adap: padap, ULP_TX_ASIC_DEBUG_CTRL_A); |
3062 | ulptx_la_buff->rddata_asic[i][0] = |
3063 | t4_read_reg(adap: padap, ULP_TX_ASIC_DEBUG_0_A); |
3064 | ulptx_la_buff->rddata_asic[i][1] = |
3065 | t4_read_reg(adap: padap, ULP_TX_ASIC_DEBUG_1_A); |
3066 | ulptx_la_buff->rddata_asic[i][2] = |
3067 | t4_read_reg(adap: padap, ULP_TX_ASIC_DEBUG_2_A); |
3068 | ulptx_la_buff->rddata_asic[i][3] = |
3069 | t4_read_reg(adap: padap, ULP_TX_ASIC_DEBUG_3_A); |
3070 | ulptx_la_buff->rddata_asic[i][4] = |
3071 | t4_read_reg(adap: padap, ULP_TX_ASIC_DEBUG_4_A); |
3072 | ulptx_la_buff->rddata_asic[i][5] = |
3073 | t4_read_reg(adap: padap, PM_RX_BASE_ADDR); |
3074 | } |
3075 | |
3076 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
3077 | } |
3078 | |
3079 | int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, |
3080 | struct cudbg_buffer *dbg_buff, |
3081 | struct cudbg_error *cudbg_err) |
3082 | { |
3083 | struct adapter *padap = pdbg_init->adap; |
3084 | struct cudbg_buffer temp_buff = { 0 }; |
3085 | u32 local_offset, local_range; |
3086 | struct ireg_buf *up_cim; |
3087 | u32 size, j, iter; |
3088 | u32 instance = 0; |
3089 | int i, rc, n; |
3090 | |
3091 | if (is_t5(chip: padap->params.chip)) |
3092 | n = sizeof(t5_up_cim_reg_array) / |
3093 | ((IREG_NUM_ELEM + 1) * sizeof(u32)); |
3094 | else if (is_t6(chip: padap->params.chip)) |
3095 | n = sizeof(t6_up_cim_reg_array) / |
3096 | ((IREG_NUM_ELEM + 1) * sizeof(u32)); |
3097 | else |
3098 | return CUDBG_STATUS_NOT_IMPLEMENTED; |
3099 | |
3100 | size = sizeof(struct ireg_buf) * n; |
3101 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
3102 | if (rc) |
3103 | return rc; |
3104 | |
3105 | up_cim = (struct ireg_buf *)temp_buff.data; |
3106 | for (i = 0; i < n; i++) { |
3107 | struct ireg_field *up_cim_reg = &up_cim->tp_pio; |
3108 | u32 *buff = up_cim->outbuf; |
3109 | |
3110 | if (is_t5(chip: padap->params.chip)) { |
3111 | up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0]; |
3112 | up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1]; |
3113 | up_cim_reg->ireg_local_offset = |
3114 | t5_up_cim_reg_array[i][2]; |
3115 | up_cim_reg->ireg_offset_range = |
3116 | t5_up_cim_reg_array[i][3]; |
3117 | instance = t5_up_cim_reg_array[i][4]; |
3118 | } else if (is_t6(chip: padap->params.chip)) { |
3119 | up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0]; |
3120 | up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1]; |
3121 | up_cim_reg->ireg_local_offset = |
3122 | t6_up_cim_reg_array[i][2]; |
3123 | up_cim_reg->ireg_offset_range = |
3124 | t6_up_cim_reg_array[i][3]; |
3125 | instance = t6_up_cim_reg_array[i][4]; |
3126 | } |
3127 | |
3128 | switch (instance) { |
3129 | case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES: |
3130 | iter = up_cim_reg->ireg_offset_range; |
3131 | local_offset = 0x120; |
3132 | local_range = 1; |
3133 | break; |
3134 | case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES: |
3135 | iter = up_cim_reg->ireg_offset_range; |
3136 | local_offset = 0x10; |
3137 | local_range = 1; |
3138 | break; |
3139 | default: |
3140 | iter = 1; |
3141 | local_offset = 0; |
3142 | local_range = up_cim_reg->ireg_offset_range; |
3143 | break; |
3144 | } |
3145 | |
3146 | for (j = 0; j < iter; j++, buff++) { |
3147 | rc = t4_cim_read(adap: padap, |
3148 | addr: up_cim_reg->ireg_local_offset + |
3149 | (j * local_offset), n: local_range, valp: buff); |
3150 | if (rc) { |
3151 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
3152 | return rc; |
3153 | } |
3154 | } |
3155 | up_cim++; |
3156 | } |
3157 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
3158 | } |
3159 | |
3160 | int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, |
3161 | struct cudbg_buffer *dbg_buff, |
3162 | struct cudbg_error *cudbg_err) |
3163 | { |
3164 | struct adapter *padap = pdbg_init->adap; |
3165 | struct cudbg_buffer temp_buff = { 0 }; |
3166 | struct cudbg_pbt_tables *pbt; |
3167 | int i, rc; |
3168 | u32 addr; |
3169 | |
3170 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, |
3171 | size: sizeof(struct cudbg_pbt_tables), |
3172 | pin_buff: &temp_buff); |
3173 | if (rc) |
3174 | return rc; |
3175 | |
3176 | pbt = (struct cudbg_pbt_tables *)temp_buff.data; |
3177 | /* PBT dynamic entries */ |
3178 | addr = CUDBG_CHAC_PBT_ADDR; |
3179 | for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) { |
3180 | rc = t4_cim_read(adap: padap, addr: addr + (i * 4), n: 1, |
3181 | valp: &pbt->pbt_dynamic[i]); |
3182 | if (rc) { |
3183 | cudbg_err->sys_err = rc; |
3184 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
3185 | return rc; |
3186 | } |
3187 | } |
3188 | |
3189 | /* PBT static entries */ |
3190 | /* static entries start when bit 6 is set */ |
3191 | addr = CUDBG_CHAC_PBT_ADDR + (1 << 6); |
3192 | for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) { |
3193 | rc = t4_cim_read(adap: padap, addr: addr + (i * 4), n: 1, |
3194 | valp: &pbt->pbt_static[i]); |
3195 | if (rc) { |
3196 | cudbg_err->sys_err = rc; |
3197 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
3198 | return rc; |
3199 | } |
3200 | } |
3201 | |
3202 | /* LRF entries */ |
3203 | addr = CUDBG_CHAC_PBT_LRF; |
3204 | for (i = 0; i < CUDBG_LRF_ENTRIES; i++) { |
3205 | rc = t4_cim_read(adap: padap, addr: addr + (i * 4), n: 1, |
3206 | valp: &pbt->lrf_table[i]); |
3207 | if (rc) { |
3208 | cudbg_err->sys_err = rc; |
3209 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
3210 | return rc; |
3211 | } |
3212 | } |
3213 | |
3214 | /* PBT data entries */ |
3215 | addr = CUDBG_CHAC_PBT_DATA; |
3216 | for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) { |
3217 | rc = t4_cim_read(adap: padap, addr: addr + (i * 4), n: 1, |
3218 | valp: &pbt->pbt_data[i]); |
3219 | if (rc) { |
3220 | cudbg_err->sys_err = rc; |
3221 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
3222 | return rc; |
3223 | } |
3224 | } |
3225 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
3226 | } |
3227 | |
3228 | int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, |
3229 | struct cudbg_buffer *dbg_buff, |
3230 | struct cudbg_error *cudbg_err) |
3231 | { |
3232 | struct adapter *padap = pdbg_init->adap; |
3233 | struct cudbg_mbox_log *mboxlog = NULL; |
3234 | struct cudbg_buffer temp_buff = { 0 }; |
3235 | struct mbox_cmd_log *log = NULL; |
3236 | struct mbox_cmd *entry; |
3237 | unsigned int entry_idx; |
3238 | u16 mbox_cmds; |
3239 | int i, k, rc; |
3240 | u64 flit; |
3241 | u32 size; |
3242 | |
3243 | log = padap->mbox_log; |
3244 | mbox_cmds = padap->mbox_log->size; |
3245 | size = sizeof(struct cudbg_mbox_log) * mbox_cmds; |
3246 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
3247 | if (rc) |
3248 | return rc; |
3249 | |
3250 | mboxlog = (struct cudbg_mbox_log *)temp_buff.data; |
3251 | for (k = 0; k < mbox_cmds; k++) { |
3252 | entry_idx = log->cursor + k; |
3253 | if (entry_idx >= log->size) |
3254 | entry_idx -= log->size; |
3255 | |
3256 | entry = mbox_cmd_log_entry(log, entry_idx); |
3257 | /* skip over unused entries */ |
3258 | if (entry->timestamp == 0) |
3259 | continue; |
3260 | |
3261 | memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd)); |
3262 | for (i = 0; i < MBOX_LEN / 8; i++) { |
3263 | flit = entry->cmd[i]; |
3264 | mboxlog->hi[i] = (u32)(flit >> 32); |
3265 | mboxlog->lo[i] = (u32)flit; |
3266 | } |
3267 | mboxlog++; |
3268 | } |
3269 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
3270 | } |
3271 | |
3272 | int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, |
3273 | struct cudbg_buffer *dbg_buff, |
3274 | struct cudbg_error *cudbg_err) |
3275 | { |
3276 | struct adapter *padap = pdbg_init->adap; |
3277 | struct cudbg_buffer temp_buff = { 0 }; |
3278 | struct ireg_buf *hma_indr; |
3279 | int i, rc, n; |
3280 | u32 size; |
3281 | |
3282 | if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) |
3283 | return CUDBG_STATUS_ENTITY_NOT_FOUND; |
3284 | |
3285 | n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); |
3286 | size = sizeof(struct ireg_buf) * n; |
3287 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size, pin_buff: &temp_buff); |
3288 | if (rc) |
3289 | return rc; |
3290 | |
3291 | hma_indr = (struct ireg_buf *)temp_buff.data; |
3292 | for (i = 0; i < n; i++) { |
3293 | struct ireg_field *hma_fli = &hma_indr->tp_pio; |
3294 | u32 *buff = hma_indr->outbuf; |
3295 | |
3296 | hma_fli->ireg_addr = t6_hma_ireg_array[i][0]; |
3297 | hma_fli->ireg_data = t6_hma_ireg_array[i][1]; |
3298 | hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2]; |
3299 | hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3]; |
3300 | t4_read_indirect(adap: padap, addr_reg: hma_fli->ireg_addr, data_reg: hma_fli->ireg_data, |
3301 | vals: buff, nregs: hma_fli->ireg_offset_range, |
3302 | start_idx: hma_fli->ireg_local_offset); |
3303 | hma_indr++; |
3304 | } |
3305 | return cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, dbg_buff); |
3306 | } |
3307 | |
3308 | void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, |
3309 | u32 *num, u32 *size) |
3310 | { |
3311 | u32 tot_entries = 0, tot_size = 0; |
3312 | |
3313 | /* NIC TXQ, RXQ, FLQ, and CTRLQ */ |
3314 | tot_entries += MAX_ETH_QSETS * 3; |
3315 | tot_entries += MAX_CTRL_QUEUES; |
3316 | |
3317 | tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; |
3318 | tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; |
3319 | tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE; |
3320 | tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES * |
3321 | MAX_CTRL_TXQ_DESC_SIZE; |
3322 | |
3323 | /* FW_EVTQ and INTRQ */ |
3324 | tot_entries += INGQ_EXTRAS; |
3325 | tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; |
3326 | |
3327 | /* PTP_TXQ */ |
3328 | tot_entries += 1; |
3329 | tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; |
3330 | |
3331 | /* ULD TXQ, RXQ, and FLQ */ |
3332 | tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS; |
3333 | tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2; |
3334 | |
3335 | tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * |
3336 | MAX_TXQ_DESC_SIZE; |
3337 | tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES * |
3338 | MAX_RXQ_DESC_SIZE; |
3339 | tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS * |
3340 | MAX_FL_DESC_SIZE; |
3341 | |
3342 | /* ULD CIQ */ |
3343 | tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS; |
3344 | tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE * |
3345 | MAX_RXQ_DESC_SIZE; |
3346 | |
3347 | /* ETHOFLD TXQ, RXQ, and FLQ */ |
3348 | tot_entries += MAX_OFLD_QSETS * 3; |
3349 | tot_size += MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; |
3350 | |
3351 | tot_size += sizeof(struct cudbg_ver_hdr) + |
3352 | sizeof(struct cudbg_qdesc_info) + |
3353 | sizeof(struct cudbg_qdesc_entry) * tot_entries; |
3354 | |
3355 | if (num) |
3356 | *num = tot_entries; |
3357 | |
3358 | if (size) |
3359 | *size = tot_size; |
3360 | } |
3361 | |
3362 | int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, |
3363 | struct cudbg_buffer *dbg_buff, |
3364 | struct cudbg_error *cudbg_err) |
3365 | { |
3366 | u32 num_queues = 0, tot_entries = 0, size = 0; |
3367 | struct adapter *padap = pdbg_init->adap; |
3368 | struct cudbg_buffer temp_buff = { 0 }; |
3369 | struct cudbg_qdesc_entry *qdesc_entry; |
3370 | struct cudbg_qdesc_info *qdesc_info; |
3371 | struct cudbg_ver_hdr *ver_hdr; |
3372 | struct sge *s = &padap->sge; |
3373 | u32 i, j, cur_off, tot_len; |
3374 | u8 *data; |
3375 | int rc; |
3376 | |
3377 | cudbg_fill_qdesc_num_and_size(padap, num: &tot_entries, size: &size); |
3378 | size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE); |
3379 | tot_len = size; |
3380 | data = kvzalloc(size, GFP_KERNEL); |
3381 | if (!data) |
3382 | return -ENOMEM; |
3383 | |
3384 | ver_hdr = (struct cudbg_ver_hdr *)data; |
3385 | ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; |
3386 | ver_hdr->revision = CUDBG_QDESC_REV; |
3387 | ver_hdr->size = sizeof(struct cudbg_qdesc_info); |
3388 | size -= sizeof(*ver_hdr); |
3389 | |
3390 | qdesc_info = (struct cudbg_qdesc_info *)(data + |
3391 | sizeof(*ver_hdr)); |
3392 | size -= sizeof(*qdesc_info); |
3393 | qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data; |
3394 | |
3395 | #define QDESC_GET(q, desc, type, label) do { \ |
3396 | if (size <= 0) { \ |
3397 | goto label; \ |
3398 | } \ |
3399 | if (desc) { \ |
3400 | cudbg_fill_qdesc_##q(q, type, qdesc_entry); \ |
3401 | size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \ |
3402 | num_queues++; \ |
3403 | qdesc_entry = cudbg_next_qdesc(qdesc_entry); \ |
3404 | } \ |
3405 | } while (0) |
3406 | |
3407 | #define QDESC_GET_TXQ(q, type, label) do { \ |
3408 | struct sge_txq *txq = (struct sge_txq *)q; \ |
3409 | QDESC_GET(txq, txq->desc, type, label); \ |
3410 | } while (0) |
3411 | |
3412 | #define QDESC_GET_RXQ(q, type, label) do { \ |
3413 | struct sge_rspq *rxq = (struct sge_rspq *)q; \ |
3414 | QDESC_GET(rxq, rxq->desc, type, label); \ |
3415 | } while (0) |
3416 | |
3417 | #define QDESC_GET_FLQ(q, type, label) do { \ |
3418 | struct sge_fl *flq = (struct sge_fl *)q; \ |
3419 | QDESC_GET(flq, flq->desc, type, label); \ |
3420 | } while (0) |
3421 | |
3422 | /* NIC TXQ */ |
3423 | for (i = 0; i < s->ethqsets; i++) |
3424 | QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out); |
3425 | |
3426 | /* NIC RXQ */ |
3427 | for (i = 0; i < s->ethqsets; i++) |
3428 | QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out); |
3429 | |
3430 | /* NIC FLQ */ |
3431 | for (i = 0; i < s->ethqsets; i++) |
3432 | QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out); |
3433 | |
3434 | /* NIC CTRLQ */ |
3435 | for (i = 0; i < padap->params.nports; i++) |
3436 | QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out); |
3437 | |
3438 | /* FW_EVTQ */ |
3439 | QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out); |
3440 | |
3441 | /* INTRQ */ |
3442 | QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out); |
3443 | |
3444 | /* PTP_TXQ */ |
3445 | QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out); |
3446 | |
3447 | /* ULD Queues */ |
3448 | mutex_lock(&uld_mutex); |
3449 | |
3450 | if (s->uld_txq_info) { |
3451 | struct sge_uld_txq_info *utxq; |
3452 | |
3453 | /* ULD TXQ */ |
3454 | for (j = 0; j < CXGB4_TX_MAX; j++) { |
3455 | if (!s->uld_txq_info[j]) |
3456 | continue; |
3457 | |
3458 | utxq = s->uld_txq_info[j]; |
3459 | for (i = 0; i < utxq->ntxq; i++) |
3460 | QDESC_GET_TXQ(&utxq->uldtxq[i].q, |
3461 | cudbg_uld_txq_to_qtype(j), |
3462 | out_unlock_uld); |
3463 | } |
3464 | } |
3465 | |
3466 | if (s->uld_rxq_info) { |
3467 | struct sge_uld_rxq_info *urxq; |
3468 | u32 base; |
3469 | |
3470 | /* ULD RXQ */ |
3471 | for (j = 0; j < CXGB4_ULD_MAX; j++) { |
3472 | if (!s->uld_rxq_info[j]) |
3473 | continue; |
3474 | |
3475 | urxq = s->uld_rxq_info[j]; |
3476 | for (i = 0; i < urxq->nrxq; i++) |
3477 | QDESC_GET_RXQ(&urxq->uldrxq[i].rspq, |
3478 | cudbg_uld_rxq_to_qtype(j), |
3479 | out_unlock_uld); |
3480 | } |
3481 | |
3482 | /* ULD FLQ */ |
3483 | for (j = 0; j < CXGB4_ULD_MAX; j++) { |
3484 | if (!s->uld_rxq_info[j]) |
3485 | continue; |
3486 | |
3487 | urxq = s->uld_rxq_info[j]; |
3488 | for (i = 0; i < urxq->nrxq; i++) |
3489 | QDESC_GET_FLQ(&urxq->uldrxq[i].fl, |
3490 | cudbg_uld_flq_to_qtype(j), |
3491 | out_unlock_uld); |
3492 | } |
3493 | |
3494 | /* ULD CIQ */ |
3495 | for (j = 0; j < CXGB4_ULD_MAX; j++) { |
3496 | if (!s->uld_rxq_info[j]) |
3497 | continue; |
3498 | |
3499 | urxq = s->uld_rxq_info[j]; |
3500 | base = urxq->nrxq; |
3501 | for (i = 0; i < urxq->nciq; i++) |
3502 | QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq, |
3503 | cudbg_uld_ciq_to_qtype(j), |
3504 | out_unlock_uld); |
3505 | } |
3506 | } |
3507 | mutex_unlock(lock: &uld_mutex); |
3508 | |
3509 | if (!padap->tc_mqprio) |
3510 | goto out; |
3511 | |
3512 | mutex_lock(&padap->tc_mqprio->mqprio_mutex); |
3513 | /* ETHOFLD TXQ */ |
3514 | if (s->eohw_txq) |
3515 | for (i = 0; i < s->eoqsets; i++) |
3516 | QDESC_GET_TXQ(&s->eohw_txq[i].q, |
3517 | CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio); |
3518 | |
3519 | /* ETHOFLD RXQ and FLQ */ |
3520 | if (s->eohw_rxq) { |
3521 | for (i = 0; i < s->eoqsets; i++) |
3522 | QDESC_GET_RXQ(&s->eohw_rxq[i].rspq, |
3523 | CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio); |
3524 | |
3525 | for (i = 0; i < s->eoqsets; i++) |
3526 | QDESC_GET_FLQ(&s->eohw_rxq[i].fl, |
3527 | CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio); |
3528 | } |
3529 | |
3530 | out_unlock_mqprio: |
3531 | mutex_unlock(lock: &padap->tc_mqprio->mqprio_mutex); |
3532 | |
3533 | out: |
3534 | qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry); |
3535 | qdesc_info->num_queues = num_queues; |
3536 | cur_off = 0; |
3537 | while (tot_len) { |
3538 | u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE); |
3539 | |
3540 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: chunk_size, |
3541 | pin_buff: &temp_buff); |
3542 | if (rc) { |
3543 | cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; |
3544 | goto out_free; |
3545 | } |
3546 | |
3547 | memcpy(temp_buff.data, data + cur_off, chunk_size); |
3548 | tot_len -= chunk_size; |
3549 | cur_off += chunk_size; |
3550 | rc = cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, |
3551 | dbg_buff); |
3552 | if (rc) { |
3553 | cudbg_put_buff(pdbg_init, pin_buff: &temp_buff); |
3554 | cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; |
3555 | goto out_free; |
3556 | } |
3557 | } |
3558 | |
3559 | out_free: |
3560 | kvfree(addr: data); |
3561 | |
3562 | #undef QDESC_GET_FLQ |
3563 | #undef QDESC_GET_RXQ |
3564 | #undef QDESC_GET_TXQ |
3565 | #undef QDESC_GET |
3566 | |
3567 | return rc; |
3568 | |
3569 | out_unlock_uld: |
3570 | mutex_unlock(lock: &uld_mutex); |
3571 | goto out; |
3572 | } |
3573 | |
3574 | int cudbg_collect_flash(struct cudbg_init *pdbg_init, |
3575 | struct cudbg_buffer *dbg_buff, |
3576 | struct cudbg_error *cudbg_err) |
3577 | { |
3578 | struct adapter *padap = pdbg_init->adap; |
3579 | u32 count = padap->params.sf_size, n; |
3580 | struct cudbg_buffer temp_buff = {0}; |
3581 | u32 addr, i; |
3582 | int rc; |
3583 | |
3584 | addr = FLASH_EXP_ROM_START; |
3585 | |
3586 | for (i = 0; i < count; i += SF_PAGE_SIZE) { |
3587 | n = min_t(u32, count - i, SF_PAGE_SIZE); |
3588 | |
3589 | rc = cudbg_get_buff(pdbg_init, pdbg_buff: dbg_buff, size: n, pin_buff: &temp_buff); |
3590 | if (rc) { |
3591 | cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; |
3592 | goto out; |
3593 | } |
3594 | rc = t4_read_flash(adapter: padap, addr, nwords: n, data: (u32 *)temp_buff.data, byte_oriented: 0); |
3595 | if (rc) |
3596 | goto out; |
3597 | |
3598 | addr += (n * 4); |
3599 | rc = cudbg_write_and_release_buff(pdbg_init, pin_buff: &temp_buff, |
3600 | dbg_buff); |
3601 | if (rc) { |
3602 | cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; |
3603 | goto out; |
3604 | } |
3605 | } |
3606 | |
3607 | out: |
3608 | return rc; |
3609 | } |
3610 | |