1 | /* |
2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #include <linux/mlx5/driver.h> |
34 | #include <linux/mlx5/eswitch.h> |
35 | #include "mlx5_core.h" |
36 | #include "../../mlxfw/mlxfw.h" |
37 | #include "lib/tout.h" |
38 | |
39 | enum { |
40 | MCQS_IDENTIFIER_BOOT_IMG = 0x1, |
41 | MCQS_IDENTIFIER_OEM_NVCONFIG = 0x4, |
42 | MCQS_IDENTIFIER_MLNX_NVCONFIG = 0x5, |
43 | MCQS_IDENTIFIER_CS_TOKEN = 0x6, |
44 | MCQS_IDENTIFIER_DBG_TOKEN = 0x7, |
45 | MCQS_IDENTIFIER_GEARBOX = 0xA, |
46 | }; |
47 | |
48 | enum { |
49 | MCQS_UPDATE_STATE_IDLE, |
50 | MCQS_UPDATE_STATE_IN_PROGRESS, |
51 | MCQS_UPDATE_STATE_APPLIED, |
52 | MCQS_UPDATE_STATE_ACTIVE, |
53 | MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET, |
54 | MCQS_UPDATE_STATE_FAILED, |
55 | MCQS_UPDATE_STATE_CANCELED, |
56 | MCQS_UPDATE_STATE_BUSY, |
57 | }; |
58 | |
59 | enum { |
60 | MCQI_INFO_TYPE_CAPABILITIES = 0x0, |
61 | MCQI_INFO_TYPE_VERSION = 0x1, |
62 | MCQI_INFO_TYPE_ACTIVATION_METHOD = 0x5, |
63 | }; |
64 | |
65 | enum { |
66 | MCQI_FW_RUNNING_VERSION = 0, |
67 | MCQI_FW_STORED_VERSION = 1, |
68 | }; |
69 | |
70 | int mlx5_query_board_id(struct mlx5_core_dev *dev) |
71 | { |
72 | u32 *out; |
73 | int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); |
74 | u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {}; |
75 | int err; |
76 | |
77 | out = kzalloc(size: outlen, GFP_KERNEL); |
78 | if (!out) |
79 | return -ENOMEM; |
80 | |
81 | MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); |
82 | err = mlx5_cmd_exec_inout(dev, query_adapter, in, out); |
83 | if (err) |
84 | goto out; |
85 | |
86 | memcpy(dev->board_id, |
87 | MLX5_ADDR_OF(query_adapter_out, out, |
88 | query_adapter_struct.vsd_contd_psid), |
89 | MLX5_FLD_SZ_BYTES(query_adapter_out, |
90 | query_adapter_struct.vsd_contd_psid)); |
91 | |
92 | out: |
93 | kfree(objp: out); |
94 | return err; |
95 | } |
96 | |
97 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id) |
98 | { |
99 | u32 *out; |
100 | int outlen = MLX5_ST_SZ_BYTES(query_adapter_out); |
101 | u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {}; |
102 | int err; |
103 | |
104 | out = kzalloc(size: outlen, GFP_KERNEL); |
105 | if (!out) |
106 | return -ENOMEM; |
107 | |
108 | MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER); |
109 | err = mlx5_cmd_exec_inout(mdev, query_adapter, in, out); |
110 | if (err) |
111 | goto out; |
112 | |
113 | *vendor_id = MLX5_GET(query_adapter_out, out, |
114 | query_adapter_struct.ieee_vendor_id); |
115 | out: |
116 | kfree(objp: out); |
117 | return err; |
118 | } |
119 | EXPORT_SYMBOL(mlx5_core_query_vendor_id); |
120 | |
121 | static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev) |
122 | { |
123 | return mlx5_query_pcam_reg(dev, pcam: dev->caps.pcam, |
124 | feature_group: MLX5_PCAM_FEATURE_ENHANCED_FEATURES, |
125 | access_reg_group: MLX5_PCAM_REGS_5000_TO_507F); |
126 | } |
127 | |
128 | static int mlx5_get_mcam_access_reg_group(struct mlx5_core_dev *dev, |
129 | enum mlx5_mcam_reg_groups group) |
130 | { |
131 | return mlx5_query_mcam_reg(dev, mcap: dev->caps.mcam[group], |
132 | feature_group: MLX5_MCAM_FEATURE_ENHANCED_FEATURES, access_reg_group: group); |
133 | } |
134 | |
135 | static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev) |
136 | { |
137 | return mlx5_query_qcam_reg(mdev: dev, qcam: dev->caps.qcam, |
138 | feature_group: MLX5_QCAM_FEATURE_ENHANCED_FEATURES, |
139 | access_reg_group: MLX5_QCAM_REGS_FIRST_128); |
140 | } |
141 | |
142 | int mlx5_query_hca_caps(struct mlx5_core_dev *dev) |
143 | { |
144 | int err; |
145 | |
146 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_GENERAL, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
147 | if (err) |
148 | return err; |
149 | |
150 | if (MLX5_CAP_GEN(dev, port_selection_cap)) { |
151 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_PORT_SELECTION, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
152 | if (err) |
153 | return err; |
154 | } |
155 | |
156 | if (MLX5_CAP_GEN(dev, hca_cap_2)) { |
157 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_GENERAL_2, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
158 | if (err) |
159 | return err; |
160 | } |
161 | |
162 | if (MLX5_CAP_GEN(dev, eth_net_offloads)) { |
163 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_ETHERNET_OFFLOADS, |
164 | cap_mode: HCA_CAP_OPMOD_GET_CUR); |
165 | if (err) |
166 | return err; |
167 | } |
168 | |
169 | if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { |
170 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, |
171 | cap_mode: HCA_CAP_OPMOD_GET_CUR); |
172 | if (err) |
173 | return err; |
174 | } |
175 | |
176 | if (MLX5_CAP_GEN(dev, pg)) { |
177 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_ODP, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
178 | if (err) |
179 | return err; |
180 | } |
181 | |
182 | if (MLX5_CAP_GEN(dev, atomic)) { |
183 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_ATOMIC, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
184 | if (err) |
185 | return err; |
186 | } |
187 | |
188 | if (MLX5_CAP_GEN(dev, roce)) { |
189 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_ROCE, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
190 | if (err) |
191 | return err; |
192 | } |
193 | |
194 | if (MLX5_CAP_GEN(dev, nic_flow_table) || |
195 | MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) { |
196 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_FLOW_TABLE, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
197 | if (err) |
198 | return err; |
199 | } |
200 | |
201 | if (MLX5_ESWITCH_MANAGER(dev)) { |
202 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_ESWITCH_FLOW_TABLE, |
203 | cap_mode: HCA_CAP_OPMOD_GET_CUR); |
204 | if (err) |
205 | return err; |
206 | |
207 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_ESWITCH, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
208 | if (err) |
209 | return err; |
210 | } |
211 | |
212 | if (MLX5_CAP_GEN(dev, qos)) { |
213 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_QOS, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
214 | if (err) |
215 | return err; |
216 | } |
217 | |
218 | if (MLX5_CAP_GEN(dev, debug)) |
219 | mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_DEBUG, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
220 | |
221 | if (MLX5_CAP_GEN(dev, pcam_reg)) |
222 | mlx5_get_pcam_reg(dev); |
223 | |
224 | if (MLX5_CAP_GEN(dev, mcam_reg)) { |
225 | mlx5_get_mcam_access_reg_group(dev, group: MLX5_MCAM_REGS_FIRST_128); |
226 | mlx5_get_mcam_access_reg_group(dev, group: MLX5_MCAM_REGS_0x9100_0x917F); |
227 | } |
228 | |
229 | if (MLX5_CAP_GEN(dev, qcam_reg)) |
230 | mlx5_get_qcam_reg(dev); |
231 | |
232 | if (MLX5_CAP_GEN(dev, device_memory)) { |
233 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_DEV_MEM, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
234 | if (err) |
235 | return err; |
236 | } |
237 | |
238 | if (MLX5_CAP_GEN(dev, event_cap)) { |
239 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_DEV_EVENT, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
240 | if (err) |
241 | return err; |
242 | } |
243 | |
244 | if (MLX5_CAP_GEN(dev, tls_tx) || MLX5_CAP_GEN(dev, tls_rx)) { |
245 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_TLS, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
246 | if (err) |
247 | return err; |
248 | } |
249 | |
250 | if (MLX5_CAP_GEN_64(dev, general_obj_types) & |
251 | MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) { |
252 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_VDPA_EMULATION, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
253 | if (err) |
254 | return err; |
255 | } |
256 | |
257 | if (MLX5_CAP_GEN(dev, ipsec_offload)) { |
258 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_IPSEC, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
259 | if (err) |
260 | return err; |
261 | } |
262 | |
263 | if (MLX5_CAP_GEN(dev, crypto)) { |
264 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_CRYPTO, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
265 | if (err) |
266 | return err; |
267 | } |
268 | |
269 | if (MLX5_CAP_GEN_64(dev, general_obj_types) & |
270 | MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD) { |
271 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_MACSEC, cap_mode: HCA_CAP_OPMOD_GET_CUR); |
272 | if (err) |
273 | return err; |
274 | } |
275 | |
276 | if (MLX5_CAP_GEN(dev, adv_virtualization)) { |
277 | err = mlx5_core_get_caps_mode(dev, cap_type: MLX5_CAP_ADV_VIRTUALIZATION, |
278 | cap_mode: HCA_CAP_OPMOD_GET_CUR); |
279 | if (err) |
280 | return err; |
281 | } |
282 | |
283 | return 0; |
284 | } |
285 | |
286 | int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id) |
287 | { |
288 | u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {}; |
289 | int i; |
290 | |
291 | MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA); |
292 | |
293 | if (MLX5_CAP_GEN(dev, sw_owner_id)) { |
294 | for (i = 0; i < 4; i++) |
295 | MLX5_ARRAY_SET(init_hca_in, in, sw_owner_id, i, |
296 | sw_owner_id[i]); |
297 | } |
298 | |
299 | if (MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) && |
300 | dev->priv.sw_vhca_id > 0) |
301 | MLX5_SET(init_hca_in, in, sw_vhca_id, dev->priv.sw_vhca_id); |
302 | |
303 | return mlx5_cmd_exec_in(dev, init_hca, in); |
304 | } |
305 | |
306 | int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) |
307 | { |
308 | u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {}; |
309 | |
310 | MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); |
311 | return mlx5_cmd_exec_in(dev, teardown_hca, in); |
312 | } |
313 | |
314 | int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev) |
315 | { |
316 | u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0}; |
317 | u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0}; |
318 | int force_state; |
319 | int ret; |
320 | |
321 | if (!MLX5_CAP_GEN(dev, force_teardown)) { |
322 | mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n" ); |
323 | return -EOPNOTSUPP; |
324 | } |
325 | |
326 | MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); |
327 | MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE); |
328 | |
329 | ret = mlx5_cmd_exec_polling(dev, in, in_size: sizeof(in), out, out_size: sizeof(out)); |
330 | if (ret) |
331 | return ret; |
332 | |
333 | force_state = MLX5_GET(teardown_hca_out, out, state); |
334 | if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { |
335 | mlx5_core_warn(dev, "teardown with force mode failed, doing normal teardown\n" ); |
336 | return -EIO; |
337 | } |
338 | |
339 | return 0; |
340 | } |
341 | |
342 | int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) |
343 | { |
344 | unsigned long end, delay_ms = mlx5_tout_ms(dev, TEARDOWN); |
345 | u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {}; |
346 | u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {}; |
347 | int state; |
348 | int ret; |
349 | |
350 | if (!MLX5_CAP_GEN(dev, fast_teardown)) { |
351 | mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n" ); |
352 | return -EOPNOTSUPP; |
353 | } |
354 | |
355 | MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA); |
356 | MLX5_SET(teardown_hca_in, in, profile, |
357 | MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN); |
358 | |
359 | ret = mlx5_cmd_exec_inout(dev, teardown_hca, in, out); |
360 | if (ret) |
361 | return ret; |
362 | |
363 | state = MLX5_GET(teardown_hca_out, out, state); |
364 | if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) { |
365 | mlx5_core_warn(dev, "teardown with fast mode failed\n" ); |
366 | return -EIO; |
367 | } |
368 | |
369 | mlx5_set_nic_state(dev, state: MLX5_NIC_IFC_DISABLED); |
370 | |
371 | /* Loop until device state turns to disable */ |
372 | end = jiffies + msecs_to_jiffies(m: delay_ms); |
373 | do { |
374 | if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) |
375 | break; |
376 | |
377 | cond_resched(); |
378 | } while (!time_after(jiffies, end)); |
379 | |
380 | if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { |
381 | dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n" , |
382 | mlx5_get_nic_state(dev), delay_ms); |
383 | return -EIO; |
384 | } |
385 | |
386 | return 0; |
387 | } |
388 | |
389 | enum mlxsw_reg_mcc_instruction { |
390 | MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01, |
391 | MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02, |
392 | MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03, |
393 | MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04, |
394 | MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06, |
395 | MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08, |
396 | }; |
397 | |
398 | static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev, |
399 | enum mlxsw_reg_mcc_instruction instr, |
400 | u16 component_index, u32 update_handle, |
401 | u32 component_size) |
402 | { |
403 | u32 out[MLX5_ST_SZ_DW(mcc_reg)]; |
404 | u32 in[MLX5_ST_SZ_DW(mcc_reg)]; |
405 | |
406 | memset(in, 0, sizeof(in)); |
407 | |
408 | MLX5_SET(mcc_reg, in, instruction, instr); |
409 | MLX5_SET(mcc_reg, in, component_index, component_index); |
410 | MLX5_SET(mcc_reg, in, update_handle, update_handle); |
411 | MLX5_SET(mcc_reg, in, component_size, component_size); |
412 | |
413 | return mlx5_core_access_reg(dev, data_in: in, size_in: sizeof(in), data_out: out, |
414 | size_out: sizeof(out), reg_num: MLX5_REG_MCC, arg: 0, write: 1); |
415 | } |
416 | |
417 | static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev, |
418 | u32 *update_handle, u8 *error_code, |
419 | u8 *control_state) |
420 | { |
421 | u32 out[MLX5_ST_SZ_DW(mcc_reg)]; |
422 | u32 in[MLX5_ST_SZ_DW(mcc_reg)]; |
423 | int err; |
424 | |
425 | memset(in, 0, sizeof(in)); |
426 | memset(out, 0, sizeof(out)); |
427 | MLX5_SET(mcc_reg, in, update_handle, *update_handle); |
428 | |
429 | err = mlx5_core_access_reg(dev, data_in: in, size_in: sizeof(in), data_out: out, |
430 | size_out: sizeof(out), reg_num: MLX5_REG_MCC, arg: 0, write: 0); |
431 | if (err) |
432 | goto out; |
433 | |
434 | *update_handle = MLX5_GET(mcc_reg, out, update_handle); |
435 | *error_code = MLX5_GET(mcc_reg, out, error_code); |
436 | *control_state = MLX5_GET(mcc_reg, out, control_state); |
437 | |
438 | out: |
439 | return err; |
440 | } |
441 | |
442 | static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev, |
443 | u32 update_handle, |
444 | u32 offset, u16 size, |
445 | u8 *data) |
446 | { |
447 | int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size; |
448 | u32 out[MLX5_ST_SZ_DW(mcda_reg)]; |
449 | int i, j, dw_size = size >> 2; |
450 | __be32 data_element; |
451 | u32 *in; |
452 | |
453 | in = kzalloc(size: in_size, GFP_KERNEL); |
454 | if (!in) |
455 | return -ENOMEM; |
456 | |
457 | MLX5_SET(mcda_reg, in, update_handle, update_handle); |
458 | MLX5_SET(mcda_reg, in, offset, offset); |
459 | MLX5_SET(mcda_reg, in, size, size); |
460 | |
461 | for (i = 0; i < dw_size; i++) { |
462 | j = i * 4; |
463 | data_element = htonl(*(u32 *)&data[j]); |
464 | memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4); |
465 | } |
466 | |
467 | err = mlx5_core_access_reg(dev, data_in: in, size_in: in_size, data_out: out, |
468 | size_out: sizeof(out), reg_num: MLX5_REG_MCDA, arg: 0, write: 1); |
469 | kfree(objp: in); |
470 | return err; |
471 | } |
472 | |
473 | static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev, |
474 | u16 component_index, bool read_pending, |
475 | u8 info_type, u16 data_size, void *mcqi_data) |
476 | { |
477 | u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_UN_SZ_DW(mcqi_reg_data)] = {}; |
478 | u32 in[MLX5_ST_SZ_DW(mcqi_reg)] = {}; |
479 | void *data; |
480 | int err; |
481 | |
482 | MLX5_SET(mcqi_reg, in, component_index, component_index); |
483 | MLX5_SET(mcqi_reg, in, read_pending_component, read_pending); |
484 | MLX5_SET(mcqi_reg, in, info_type, info_type); |
485 | MLX5_SET(mcqi_reg, in, data_size, data_size); |
486 | |
487 | err = mlx5_core_access_reg(dev, data_in: in, size_in: sizeof(in), data_out: out, |
488 | MLX5_ST_SZ_BYTES(mcqi_reg) + data_size, |
489 | reg_num: MLX5_REG_MCQI, arg: 0, write: 0); |
490 | if (err) |
491 | return err; |
492 | |
493 | data = MLX5_ADDR_OF(mcqi_reg, out, data); |
494 | memcpy(mcqi_data, data, data_size); |
495 | |
496 | return 0; |
497 | } |
498 | |
499 | static int mlx5_reg_mcqi_caps_query(struct mlx5_core_dev *dev, u16 component_index, |
500 | u32 *max_component_size, u8 *log_mcda_word_size, |
501 | u16 *mcda_max_write_size) |
502 | { |
503 | u32 mcqi_reg[MLX5_ST_SZ_DW(mcqi_cap)] = {}; |
504 | int err; |
505 | |
506 | err = mlx5_reg_mcqi_query(dev, component_index, read_pending: 0, |
507 | info_type: MCQI_INFO_TYPE_CAPABILITIES, |
508 | MLX5_ST_SZ_BYTES(mcqi_cap), mcqi_data: mcqi_reg); |
509 | if (err) |
510 | return err; |
511 | |
512 | *max_component_size = MLX5_GET(mcqi_cap, mcqi_reg, max_component_size); |
513 | *log_mcda_word_size = MLX5_GET(mcqi_cap, mcqi_reg, log_mcda_word_size); |
514 | *mcda_max_write_size = MLX5_GET(mcqi_cap, mcqi_reg, mcda_max_write_size); |
515 | |
516 | return 0; |
517 | } |
518 | |
519 | struct mlx5_mlxfw_dev { |
520 | struct mlxfw_dev mlxfw_dev; |
521 | struct mlx5_core_dev *mlx5_core_dev; |
522 | }; |
523 | |
524 | static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev, |
525 | u16 component_index, u32 *p_max_size, |
526 | u8 *p_align_bits, u16 *p_max_write_size) |
527 | { |
528 | struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = |
529 | container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); |
530 | struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; |
531 | |
532 | if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi)) { |
533 | mlx5_core_warn(dev, "caps query isn't supported by running FW\n" ); |
534 | return -EOPNOTSUPP; |
535 | } |
536 | |
537 | return mlx5_reg_mcqi_caps_query(dev, component_index, max_component_size: p_max_size, |
538 | log_mcda_word_size: p_align_bits, mcda_max_write_size: p_max_write_size); |
539 | } |
540 | |
541 | static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle) |
542 | { |
543 | struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = |
544 | container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); |
545 | struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; |
546 | u8 control_state, error_code; |
547 | int err; |
548 | |
549 | *fwhandle = 0; |
550 | err = mlx5_reg_mcc_query(dev, update_handle: fwhandle, error_code: &error_code, control_state: &control_state); |
551 | if (err) |
552 | return err; |
553 | |
554 | if (control_state != MLXFW_FSM_STATE_IDLE) |
555 | return -EBUSY; |
556 | |
557 | return mlx5_reg_mcc_set(dev, instr: MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, |
558 | component_index: 0, update_handle: *fwhandle, component_size: 0); |
559 | } |
560 | |
561 | static int mlx5_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, |
562 | u16 component_index, u32 component_size) |
563 | { |
564 | struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = |
565 | container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); |
566 | struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; |
567 | |
568 | return mlx5_reg_mcc_set(dev, instr: MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT, |
569 | component_index, update_handle: fwhandle, component_size); |
570 | } |
571 | |
572 | static int mlx5_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, |
573 | u8 *data, u16 size, u32 offset) |
574 | { |
575 | struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = |
576 | container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); |
577 | struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; |
578 | |
579 | return mlx5_reg_mcda_set(dev, update_handle: fwhandle, offset, size, data); |
580 | } |
581 | |
582 | static int mlx5_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, |
583 | u16 component_index) |
584 | { |
585 | struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = |
586 | container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); |
587 | struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; |
588 | |
589 | return mlx5_reg_mcc_set(dev, instr: MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT, |
590 | component_index, update_handle: fwhandle, component_size: 0); |
591 | } |
592 | |
593 | static int mlx5_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) |
594 | { |
595 | struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = |
596 | container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); |
597 | struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; |
598 | |
599 | return mlx5_reg_mcc_set(dev, instr: MLX5_REG_MCC_INSTRUCTION_ACTIVATE, component_index: 0, |
600 | update_handle: fwhandle, component_size: 0); |
601 | } |
602 | |
603 | static int mlx5_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle, |
604 | enum mlxfw_fsm_state *fsm_state, |
605 | enum mlxfw_fsm_state_err *fsm_state_err) |
606 | { |
607 | struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = |
608 | container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); |
609 | struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; |
610 | u8 control_state, error_code; |
611 | int err; |
612 | |
613 | err = mlx5_reg_mcc_query(dev, update_handle: &fwhandle, error_code: &error_code, control_state: &control_state); |
614 | if (err) |
615 | return err; |
616 | |
617 | *fsm_state = control_state; |
618 | *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, |
619 | MLXFW_FSM_STATE_ERR_MAX); |
620 | return 0; |
621 | } |
622 | |
623 | static void mlx5_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) |
624 | { |
625 | struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = |
626 | container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); |
627 | struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; |
628 | |
629 | mlx5_reg_mcc_set(dev, instr: MLX5_REG_MCC_INSTRUCTION_CANCEL, component_index: 0, update_handle: fwhandle, component_size: 0); |
630 | } |
631 | |
632 | static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle) |
633 | { |
634 | struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = |
635 | container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); |
636 | struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; |
637 | |
638 | mlx5_reg_mcc_set(dev, instr: MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, component_index: 0, |
639 | update_handle: fwhandle, component_size: 0); |
640 | } |
641 | |
642 | static int mlx5_fsm_reactivate(struct mlxfw_dev *mlxfw_dev, u8 *status) |
643 | { |
644 | struct mlx5_mlxfw_dev *mlx5_mlxfw_dev = |
645 | container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev); |
646 | struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev; |
647 | u32 out[MLX5_ST_SZ_DW(mirc_reg)]; |
648 | u32 in[MLX5_ST_SZ_DW(mirc_reg)]; |
649 | unsigned long exp_time; |
650 | int err; |
651 | |
652 | exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FSM_REACTIVATE)); |
653 | |
654 | if (!MLX5_CAP_MCAM_REG2(dev, mirc)) |
655 | return -EOPNOTSUPP; |
656 | |
657 | memset(in, 0, sizeof(in)); |
658 | |
659 | err = mlx5_core_access_reg(dev, data_in: in, size_in: sizeof(in), data_out: out, |
660 | size_out: sizeof(out), reg_num: MLX5_REG_MIRC, arg: 0, write: 1); |
661 | if (err) |
662 | return err; |
663 | |
664 | do { |
665 | memset(out, 0, sizeof(out)); |
666 | err = mlx5_core_access_reg(dev, data_in: in, size_in: sizeof(in), data_out: out, |
667 | size_out: sizeof(out), reg_num: MLX5_REG_MIRC, arg: 0, write: 0); |
668 | if (err) |
669 | return err; |
670 | |
671 | *status = MLX5_GET(mirc_reg, out, status_code); |
672 | if (*status != MLXFW_FSM_REACTIVATE_STATUS_BUSY) |
673 | return 0; |
674 | |
675 | msleep(msecs: 20); |
676 | } while (time_before(jiffies, exp_time)); |
677 | |
678 | return 0; |
679 | } |
680 | |
681 | static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = { |
682 | .component_query = mlx5_component_query, |
683 | .fsm_lock = mlx5_fsm_lock, |
684 | .fsm_component_update = mlx5_fsm_component_update, |
685 | .fsm_block_download = mlx5_fsm_block_download, |
686 | .fsm_component_verify = mlx5_fsm_component_verify, |
687 | .fsm_activate = mlx5_fsm_activate, |
688 | .fsm_reactivate = mlx5_fsm_reactivate, |
689 | .fsm_query_state = mlx5_fsm_query_state, |
690 | .fsm_cancel = mlx5_fsm_cancel, |
691 | .fsm_release = mlx5_fsm_release |
692 | }; |
693 | |
694 | int mlx5_firmware_flash(struct mlx5_core_dev *dev, |
695 | const struct firmware *firmware, |
696 | struct netlink_ext_ack *extack) |
697 | { |
698 | struct mlx5_mlxfw_dev mlx5_mlxfw_dev = { |
699 | .mlxfw_dev = { |
700 | .ops = &mlx5_mlxfw_dev_ops, |
701 | .psid = dev->board_id, |
702 | .psid_size = strlen(dev->board_id), |
703 | .devlink = priv_to_devlink(priv: dev), |
704 | }, |
705 | .mlx5_core_dev = dev |
706 | }; |
707 | |
708 | if (!MLX5_CAP_GEN(dev, mcam_reg) || |
709 | !MLX5_CAP_MCAM_REG(dev, mcqi) || |
710 | !MLX5_CAP_MCAM_REG(dev, mcc) || |
711 | !MLX5_CAP_MCAM_REG(dev, mcda)) { |
712 | pr_info("%s flashing isn't supported by the running FW\n" , __func__); |
713 | return -EOPNOTSUPP; |
714 | } |
715 | |
716 | return mlxfw_firmware_flash(mlxfw_dev: &mlx5_mlxfw_dev.mlxfw_dev, |
717 | firmware, extack); |
718 | } |
719 | |
720 | static int mlx5_reg_mcqi_version_query(struct mlx5_core_dev *dev, |
721 | u16 component_index, bool read_pending, |
722 | u32 *mcqi_version_out) |
723 | { |
724 | return mlx5_reg_mcqi_query(dev, component_index, read_pending, |
725 | info_type: MCQI_INFO_TYPE_VERSION, |
726 | MLX5_ST_SZ_BYTES(mcqi_version), |
727 | mcqi_data: mcqi_version_out); |
728 | } |
729 | |
730 | static int mlx5_reg_mcqs_query(struct mlx5_core_dev *dev, u32 *out, |
731 | u16 component_index) |
732 | { |
733 | u8 out_sz = MLX5_ST_SZ_BYTES(mcqs_reg); |
734 | u32 in[MLX5_ST_SZ_DW(mcqs_reg)] = {}; |
735 | int err; |
736 | |
737 | memset(out, 0, out_sz); |
738 | |
739 | MLX5_SET(mcqs_reg, in, component_index, component_index); |
740 | |
741 | err = mlx5_core_access_reg(dev, data_in: in, size_in: sizeof(in), data_out: out, |
742 | size_out: out_sz, reg_num: MLX5_REG_MCQS, arg: 0, write: 0); |
743 | return err; |
744 | } |
745 | |
746 | /* scans component index sequentially, to find the boot img index */ |
747 | static int mlx5_get_boot_img_component_index(struct mlx5_core_dev *dev) |
748 | { |
749 | u32 out[MLX5_ST_SZ_DW(mcqs_reg)] = {}; |
750 | u16 identifier, component_idx = 0; |
751 | bool quit; |
752 | int err; |
753 | |
754 | do { |
755 | err = mlx5_reg_mcqs_query(dev, out, component_index: component_idx); |
756 | if (err) |
757 | return err; |
758 | |
759 | identifier = MLX5_GET(mcqs_reg, out, identifier); |
760 | quit = !!MLX5_GET(mcqs_reg, out, last_index_flag); |
761 | quit |= identifier == MCQS_IDENTIFIER_BOOT_IMG; |
762 | } while (!quit && ++component_idx); |
763 | |
764 | if (identifier != MCQS_IDENTIFIER_BOOT_IMG) { |
765 | mlx5_core_warn(dev, "mcqs: can't find boot_img component ix, last scanned idx %d\n" , |
766 | component_idx); |
767 | return -EOPNOTSUPP; |
768 | } |
769 | |
770 | return component_idx; |
771 | } |
772 | |
773 | static int |
774 | mlx5_fw_image_pending(struct mlx5_core_dev *dev, |
775 | int component_index, |
776 | bool *pending_version_exists) |
777 | { |
778 | u32 out[MLX5_ST_SZ_DW(mcqs_reg)]; |
779 | u8 component_update_state; |
780 | int err; |
781 | |
782 | err = mlx5_reg_mcqs_query(dev, out, component_index); |
783 | if (err) |
784 | return err; |
785 | |
786 | component_update_state = MLX5_GET(mcqs_reg, out, component_update_state); |
787 | |
788 | if (component_update_state == MCQS_UPDATE_STATE_IDLE) { |
789 | *pending_version_exists = false; |
790 | } else if (component_update_state == MCQS_UPDATE_STATE_ACTIVE_PENDING_RESET) { |
791 | *pending_version_exists = true; |
792 | } else { |
793 | mlx5_core_warn(dev, |
794 | "mcqs: can't read pending fw version while fw state is %d\n" , |
795 | component_update_state); |
796 | return -ENODATA; |
797 | } |
798 | return 0; |
799 | } |
800 | |
801 | int mlx5_fw_version_query(struct mlx5_core_dev *dev, |
802 | u32 *running_ver, u32 *pending_ver) |
803 | { |
804 | u32 reg_mcqi_version[MLX5_ST_SZ_DW(mcqi_version)] = {}; |
805 | bool pending_version_exists; |
806 | int component_index; |
807 | int err; |
808 | |
809 | if (!MLX5_CAP_GEN(dev, mcam_reg) || !MLX5_CAP_MCAM_REG(dev, mcqi) || |
810 | !MLX5_CAP_MCAM_REG(dev, mcqs)) { |
811 | mlx5_core_warn(dev, "fw query isn't supported by the FW\n" ); |
812 | return -EOPNOTSUPP; |
813 | } |
814 | |
815 | component_index = mlx5_get_boot_img_component_index(dev); |
816 | if (component_index < 0) |
817 | return component_index; |
818 | |
819 | err = mlx5_reg_mcqi_version_query(dev, component_index, |
820 | read_pending: MCQI_FW_RUNNING_VERSION, |
821 | mcqi_version_out: reg_mcqi_version); |
822 | if (err) |
823 | return err; |
824 | |
825 | *running_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version); |
826 | |
827 | err = mlx5_fw_image_pending(dev, component_index, pending_version_exists: &pending_version_exists); |
828 | if (err) |
829 | return err; |
830 | |
831 | if (!pending_version_exists) { |
832 | *pending_ver = 0; |
833 | return 0; |
834 | } |
835 | |
836 | err = mlx5_reg_mcqi_version_query(dev, component_index, |
837 | read_pending: MCQI_FW_STORED_VERSION, |
838 | mcqi_version_out: reg_mcqi_version); |
839 | if (err) |
840 | return err; |
841 | |
842 | *pending_ver = MLX5_GET(mcqi_version, reg_mcqi_version, version); |
843 | |
844 | return 0; |
845 | } |
846 | |