1 | /* |
2 | * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #include <linux/highmem.h> |
34 | #include <linux/errno.h> |
35 | #include <linux/pci.h> |
36 | #include <linux/dma-mapping.h> |
37 | #include <linux/slab.h> |
38 | #include <linux/delay.h> |
39 | #include <linux/random.h> |
40 | #include <linux/mlx5/driver.h> |
41 | #include <linux/mlx5/eq.h> |
42 | #include <linux/debugfs.h> |
43 | |
44 | #include "mlx5_core.h" |
45 | #include "lib/eq.h" |
46 | #include "lib/tout.h" |
47 | #define CREATE_TRACE_POINTS |
48 | #include "diag/cmd_tracepoint.h" |
49 | |
50 | struct mlx5_ifc_mbox_out_bits { |
51 | u8 status[0x8]; |
52 | u8 reserved_at_8[0x18]; |
53 | |
54 | u8 syndrome[0x20]; |
55 | |
56 | u8 reserved_at_40[0x40]; |
57 | }; |
58 | |
59 | struct mlx5_ifc_mbox_in_bits { |
60 | u8 opcode[0x10]; |
61 | u8 uid[0x10]; |
62 | |
63 | u8 reserved_at_20[0x10]; |
64 | u8 op_mod[0x10]; |
65 | |
66 | u8 reserved_at_40[0x40]; |
67 | }; |
68 | |
69 | enum { |
70 | CMD_IF_REV = 5, |
71 | }; |
72 | |
73 | enum { |
74 | CMD_MODE_POLLING, |
75 | CMD_MODE_EVENTS |
76 | }; |
77 | |
78 | enum { |
79 | MLX5_CMD_DELIVERY_STAT_OK = 0x0, |
80 | MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, |
81 | MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, |
82 | MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, |
83 | MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, |
84 | MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, |
85 | MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, |
86 | MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, |
87 | MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, |
88 | MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, |
89 | MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, |
90 | }; |
91 | |
92 | static u16 in_to_opcode(void *in) |
93 | { |
94 | return MLX5_GET(mbox_in, in, opcode); |
95 | } |
96 | |
97 | /* Returns true for opcodes that might be triggered very frequently and throttle |
98 | * the command interface. Limit their command slots usage. |
99 | */ |
100 | static bool mlx5_cmd_is_throttle_opcode(u16 op) |
101 | { |
102 | switch (op) { |
103 | case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: |
104 | case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: |
105 | case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: |
106 | case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: |
107 | case MLX5_CMD_OP_SYNC_CRYPTO: |
108 | return true; |
109 | } |
110 | return false; |
111 | } |
112 | |
113 | static struct mlx5_cmd_work_ent * |
114 | cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, |
115 | struct mlx5_cmd_msg *out, void *uout, int uout_size, |
116 | mlx5_cmd_cbk_t cbk, void *context, int page_queue) |
117 | { |
118 | gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; |
119 | struct mlx5_cmd_work_ent *ent; |
120 | |
121 | ent = kzalloc(size: sizeof(*ent), flags: alloc_flags); |
122 | if (!ent) |
123 | return ERR_PTR(error: -ENOMEM); |
124 | |
125 | ent->idx = -EINVAL; |
126 | ent->in = in; |
127 | ent->out = out; |
128 | ent->uout = uout; |
129 | ent->uout_size = uout_size; |
130 | ent->callback = cbk; |
131 | ent->context = context; |
132 | ent->cmd = cmd; |
133 | ent->page_queue = page_queue; |
134 | ent->op = in_to_opcode(in: in->first.data); |
135 | refcount_set(r: &ent->refcnt, n: 1); |
136 | |
137 | return ent; |
138 | } |
139 | |
140 | static void cmd_free_ent(struct mlx5_cmd_work_ent *ent) |
141 | { |
142 | kfree(objp: ent); |
143 | } |
144 | |
145 | static u8 alloc_token(struct mlx5_cmd *cmd) |
146 | { |
147 | u8 token; |
148 | |
149 | spin_lock(lock: &cmd->token_lock); |
150 | cmd->token++; |
151 | if (cmd->token == 0) |
152 | cmd->token++; |
153 | token = cmd->token; |
154 | spin_unlock(lock: &cmd->token_lock); |
155 | |
156 | return token; |
157 | } |
158 | |
159 | static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent) |
160 | { |
161 | unsigned long flags; |
162 | int ret; |
163 | |
164 | spin_lock_irqsave(&cmd->alloc_lock, flags); |
165 | ret = find_first_bit(addr: &cmd->vars.bitmask, size: cmd->vars.max_reg_cmds); |
166 | if (ret < cmd->vars.max_reg_cmds) { |
167 | clear_bit(nr: ret, addr: &cmd->vars.bitmask); |
168 | ent->idx = ret; |
169 | cmd->ent_arr[ent->idx] = ent; |
170 | } |
171 | spin_unlock_irqrestore(lock: &cmd->alloc_lock, flags); |
172 | |
173 | return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM; |
174 | } |
175 | |
176 | static void cmd_free_index(struct mlx5_cmd *cmd, int idx) |
177 | { |
178 | lockdep_assert_held(&cmd->alloc_lock); |
179 | set_bit(nr: idx, addr: &cmd->vars.bitmask); |
180 | } |
181 | |
182 | static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) |
183 | { |
184 | refcount_inc(r: &ent->refcnt); |
185 | } |
186 | |
187 | static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) |
188 | { |
189 | struct mlx5_cmd *cmd = ent->cmd; |
190 | unsigned long flags; |
191 | |
192 | spin_lock_irqsave(&cmd->alloc_lock, flags); |
193 | if (!refcount_dec_and_test(r: &ent->refcnt)) |
194 | goto out; |
195 | |
196 | if (ent->idx >= 0) { |
197 | cmd_free_index(cmd, idx: ent->idx); |
198 | up(sem: ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem); |
199 | } |
200 | |
201 | cmd_free_ent(ent); |
202 | out: |
203 | spin_unlock_irqrestore(lock: &cmd->alloc_lock, flags); |
204 | } |
205 | |
206 | static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) |
207 | { |
208 | return cmd->cmd_buf + (idx << cmd->vars.log_stride); |
209 | } |
210 | |
211 | static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg) |
212 | { |
213 | int size = msg->len; |
214 | int blen = size - min_t(int, sizeof(msg->first.data), size); |
215 | |
216 | return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE); |
217 | } |
218 | |
219 | static u8 xor8_buf(void *buf, size_t offset, int len) |
220 | { |
221 | u8 *ptr = buf; |
222 | u8 sum = 0; |
223 | int i; |
224 | int end = len + offset; |
225 | |
226 | for (i = offset; i < end; i++) |
227 | sum ^= ptr[i]; |
228 | |
229 | return sum; |
230 | } |
231 | |
232 | static int verify_block_sig(struct mlx5_cmd_prot_block *block) |
233 | { |
234 | size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); |
235 | int xor_len = sizeof(*block) - sizeof(block->data) - 1; |
236 | |
237 | if (xor8_buf(buf: block, offset: rsvd0_off, len: xor_len) != 0xff) |
238 | return -EHWPOISON; |
239 | |
240 | if (xor8_buf(buf: block, offset: 0, len: sizeof(*block)) != 0xff) |
241 | return -EHWPOISON; |
242 | |
243 | return 0; |
244 | } |
245 | |
246 | static void calc_block_sig(struct mlx5_cmd_prot_block *block) |
247 | { |
248 | int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2; |
249 | size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); |
250 | |
251 | block->ctrl_sig = ~xor8_buf(buf: block, offset: rsvd0_off, len: ctrl_xor_len); |
252 | block->sig = ~xor8_buf(buf: block, offset: 0, len: sizeof(*block) - 1); |
253 | } |
254 | |
255 | static void calc_chain_sig(struct mlx5_cmd_msg *msg) |
256 | { |
257 | struct mlx5_cmd_mailbox *next = msg->next; |
258 | int n = mlx5_calc_cmd_blocks(msg); |
259 | int i = 0; |
260 | |
261 | for (i = 0; i < n && next; i++) { |
262 | calc_block_sig(block: next->buf); |
263 | next = next->next; |
264 | } |
265 | } |
266 | |
267 | static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) |
268 | { |
269 | ent->lay->sig = ~xor8_buf(buf: ent->lay, offset: 0, len: sizeof(*ent->lay)); |
270 | if (csum) { |
271 | calc_chain_sig(msg: ent->in); |
272 | calc_chain_sig(msg: ent->out); |
273 | } |
274 | } |
275 | |
276 | static void poll_timeout(struct mlx5_cmd_work_ent *ent) |
277 | { |
278 | struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd); |
279 | u64 cmd_to_ms = mlx5_tout_ms(dev, CMD); |
280 | unsigned long poll_end; |
281 | u8 own; |
282 | |
283 | poll_end = jiffies + msecs_to_jiffies(m: cmd_to_ms + 1000); |
284 | |
285 | do { |
286 | own = READ_ONCE(ent->lay->status_own); |
287 | if (!(own & CMD_OWNER_HW)) { |
288 | ent->ret = 0; |
289 | return; |
290 | } |
291 | cond_resched(); |
292 | } while (time_before(jiffies, poll_end)); |
293 | |
294 | ent->ret = -ETIMEDOUT; |
295 | } |
296 | |
297 | static int verify_signature(struct mlx5_cmd_work_ent *ent) |
298 | { |
299 | struct mlx5_cmd_mailbox *next = ent->out->next; |
300 | int n = mlx5_calc_cmd_blocks(msg: ent->out); |
301 | int err; |
302 | u8 sig; |
303 | int i = 0; |
304 | |
305 | sig = xor8_buf(buf: ent->lay, offset: 0, len: sizeof(*ent->lay)); |
306 | if (sig != 0xff) |
307 | return -EHWPOISON; |
308 | |
309 | for (i = 0; i < n && next; i++) { |
310 | err = verify_block_sig(block: next->buf); |
311 | if (err) |
312 | return -EHWPOISON; |
313 | |
314 | next = next->next; |
315 | } |
316 | |
317 | return 0; |
318 | } |
319 | |
320 | static void dump_buf(void *buf, int size, int data_only, int offset, int idx) |
321 | { |
322 | __be32 *p = buf; |
323 | int i; |
324 | |
325 | for (i = 0; i < size; i += 16) { |
326 | pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n" , idx, offset, |
327 | be32_to_cpu(p[0]), be32_to_cpu(p[1]), |
328 | be32_to_cpu(p[2]), be32_to_cpu(p[3])); |
329 | p += 4; |
330 | offset += 16; |
331 | } |
332 | if (!data_only) |
333 | pr_debug("\n" ); |
334 | } |
335 | |
336 | static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, |
337 | u32 *synd, u8 *status) |
338 | { |
339 | *synd = 0; |
340 | *status = 0; |
341 | |
342 | switch (op) { |
343 | case MLX5_CMD_OP_TEARDOWN_HCA: |
344 | case MLX5_CMD_OP_DISABLE_HCA: |
345 | case MLX5_CMD_OP_MANAGE_PAGES: |
346 | case MLX5_CMD_OP_DESTROY_MKEY: |
347 | case MLX5_CMD_OP_DESTROY_EQ: |
348 | case MLX5_CMD_OP_DESTROY_CQ: |
349 | case MLX5_CMD_OP_DESTROY_QP: |
350 | case MLX5_CMD_OP_DESTROY_PSV: |
351 | case MLX5_CMD_OP_DESTROY_SRQ: |
352 | case MLX5_CMD_OP_DESTROY_XRC_SRQ: |
353 | case MLX5_CMD_OP_DESTROY_XRQ: |
354 | case MLX5_CMD_OP_DESTROY_DCT: |
355 | case MLX5_CMD_OP_DEALLOC_Q_COUNTER: |
356 | case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: |
357 | case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: |
358 | case MLX5_CMD_OP_DEALLOC_PD: |
359 | case MLX5_CMD_OP_DEALLOC_UAR: |
360 | case MLX5_CMD_OP_DETACH_FROM_MCG: |
361 | case MLX5_CMD_OP_DEALLOC_XRCD: |
362 | case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: |
363 | case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: |
364 | case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: |
365 | case MLX5_CMD_OP_DESTROY_LAG: |
366 | case MLX5_CMD_OP_DESTROY_VPORT_LAG: |
367 | case MLX5_CMD_OP_DESTROY_TIR: |
368 | case MLX5_CMD_OP_DESTROY_SQ: |
369 | case MLX5_CMD_OP_DESTROY_RQ: |
370 | case MLX5_CMD_OP_DESTROY_RMP: |
371 | case MLX5_CMD_OP_DESTROY_TIS: |
372 | case MLX5_CMD_OP_DESTROY_RQT: |
373 | case MLX5_CMD_OP_DESTROY_FLOW_TABLE: |
374 | case MLX5_CMD_OP_DESTROY_FLOW_GROUP: |
375 | case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: |
376 | case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: |
377 | case MLX5_CMD_OP_2ERR_QP: |
378 | case MLX5_CMD_OP_2RST_QP: |
379 | case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: |
380 | case MLX5_CMD_OP_MODIFY_FLOW_TABLE: |
381 | case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: |
382 | case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: |
383 | case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT: |
384 | case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: |
385 | case MLX5_CMD_OP_FPGA_DESTROY_QP: |
386 | case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: |
387 | case MLX5_CMD_OP_DEALLOC_MEMIC: |
388 | case MLX5_CMD_OP_PAGE_FAULT_RESUME: |
389 | case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: |
390 | case MLX5_CMD_OP_DEALLOC_SF: |
391 | case MLX5_CMD_OP_DESTROY_UCTX: |
392 | case MLX5_CMD_OP_DESTROY_UMEM: |
393 | case MLX5_CMD_OP_MODIFY_RQT: |
394 | return MLX5_CMD_STAT_OK; |
395 | |
396 | case MLX5_CMD_OP_QUERY_HCA_CAP: |
397 | case MLX5_CMD_OP_QUERY_ADAPTER: |
398 | case MLX5_CMD_OP_INIT_HCA: |
399 | case MLX5_CMD_OP_ENABLE_HCA: |
400 | case MLX5_CMD_OP_QUERY_PAGES: |
401 | case MLX5_CMD_OP_SET_HCA_CAP: |
402 | case MLX5_CMD_OP_QUERY_ISSI: |
403 | case MLX5_CMD_OP_SET_ISSI: |
404 | case MLX5_CMD_OP_CREATE_MKEY: |
405 | case MLX5_CMD_OP_QUERY_MKEY: |
406 | case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: |
407 | case MLX5_CMD_OP_CREATE_EQ: |
408 | case MLX5_CMD_OP_QUERY_EQ: |
409 | case MLX5_CMD_OP_GEN_EQE: |
410 | case MLX5_CMD_OP_CREATE_CQ: |
411 | case MLX5_CMD_OP_QUERY_CQ: |
412 | case MLX5_CMD_OP_MODIFY_CQ: |
413 | case MLX5_CMD_OP_CREATE_QP: |
414 | case MLX5_CMD_OP_RST2INIT_QP: |
415 | case MLX5_CMD_OP_INIT2RTR_QP: |
416 | case MLX5_CMD_OP_RTR2RTS_QP: |
417 | case MLX5_CMD_OP_RTS2RTS_QP: |
418 | case MLX5_CMD_OP_SQERR2RTS_QP: |
419 | case MLX5_CMD_OP_QUERY_QP: |
420 | case MLX5_CMD_OP_SQD_RTS_QP: |
421 | case MLX5_CMD_OP_INIT2INIT_QP: |
422 | case MLX5_CMD_OP_CREATE_PSV: |
423 | case MLX5_CMD_OP_CREATE_SRQ: |
424 | case MLX5_CMD_OP_QUERY_SRQ: |
425 | case MLX5_CMD_OP_ARM_RQ: |
426 | case MLX5_CMD_OP_CREATE_XRC_SRQ: |
427 | case MLX5_CMD_OP_QUERY_XRC_SRQ: |
428 | case MLX5_CMD_OP_ARM_XRC_SRQ: |
429 | case MLX5_CMD_OP_CREATE_XRQ: |
430 | case MLX5_CMD_OP_QUERY_XRQ: |
431 | case MLX5_CMD_OP_ARM_XRQ: |
432 | case MLX5_CMD_OP_CREATE_DCT: |
433 | case MLX5_CMD_OP_DRAIN_DCT: |
434 | case MLX5_CMD_OP_QUERY_DCT: |
435 | case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: |
436 | case MLX5_CMD_OP_QUERY_VPORT_STATE: |
437 | case MLX5_CMD_OP_MODIFY_VPORT_STATE: |
438 | case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: |
439 | case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: |
440 | case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: |
441 | case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: |
442 | case MLX5_CMD_OP_SET_ROCE_ADDRESS: |
443 | case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: |
444 | case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: |
445 | case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: |
446 | case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: |
447 | case MLX5_CMD_OP_QUERY_VNIC_ENV: |
448 | case MLX5_CMD_OP_QUERY_VPORT_COUNTER: |
449 | case MLX5_CMD_OP_ALLOC_Q_COUNTER: |
450 | case MLX5_CMD_OP_QUERY_Q_COUNTER: |
451 | case MLX5_CMD_OP_SET_MONITOR_COUNTER: |
452 | case MLX5_CMD_OP_ARM_MONITOR_COUNTER: |
453 | case MLX5_CMD_OP_SET_PP_RATE_LIMIT: |
454 | case MLX5_CMD_OP_QUERY_RATE_LIMIT: |
455 | case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: |
456 | case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: |
457 | case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: |
458 | case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: |
459 | case MLX5_CMD_OP_ALLOC_PD: |
460 | case MLX5_CMD_OP_ALLOC_UAR: |
461 | case MLX5_CMD_OP_CONFIG_INT_MODERATION: |
462 | case MLX5_CMD_OP_ACCESS_REG: |
463 | case MLX5_CMD_OP_ATTACH_TO_MCG: |
464 | case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: |
465 | case MLX5_CMD_OP_MAD_IFC: |
466 | case MLX5_CMD_OP_QUERY_MAD_DEMUX: |
467 | case MLX5_CMD_OP_SET_MAD_DEMUX: |
468 | case MLX5_CMD_OP_NOP: |
469 | case MLX5_CMD_OP_ALLOC_XRCD: |
470 | case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: |
471 | case MLX5_CMD_OP_QUERY_CONG_STATUS: |
472 | case MLX5_CMD_OP_MODIFY_CONG_STATUS: |
473 | case MLX5_CMD_OP_QUERY_CONG_PARAMS: |
474 | case MLX5_CMD_OP_MODIFY_CONG_PARAMS: |
475 | case MLX5_CMD_OP_QUERY_CONG_STATISTICS: |
476 | case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: |
477 | case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: |
478 | case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: |
479 | case MLX5_CMD_OP_CREATE_LAG: |
480 | case MLX5_CMD_OP_MODIFY_LAG: |
481 | case MLX5_CMD_OP_QUERY_LAG: |
482 | case MLX5_CMD_OP_CREATE_VPORT_LAG: |
483 | case MLX5_CMD_OP_CREATE_TIR: |
484 | case MLX5_CMD_OP_MODIFY_TIR: |
485 | case MLX5_CMD_OP_QUERY_TIR: |
486 | case MLX5_CMD_OP_CREATE_SQ: |
487 | case MLX5_CMD_OP_MODIFY_SQ: |
488 | case MLX5_CMD_OP_QUERY_SQ: |
489 | case MLX5_CMD_OP_CREATE_RQ: |
490 | case MLX5_CMD_OP_MODIFY_RQ: |
491 | case MLX5_CMD_OP_QUERY_RQ: |
492 | case MLX5_CMD_OP_CREATE_RMP: |
493 | case MLX5_CMD_OP_MODIFY_RMP: |
494 | case MLX5_CMD_OP_QUERY_RMP: |
495 | case MLX5_CMD_OP_CREATE_TIS: |
496 | case MLX5_CMD_OP_MODIFY_TIS: |
497 | case MLX5_CMD_OP_QUERY_TIS: |
498 | case MLX5_CMD_OP_CREATE_RQT: |
499 | case MLX5_CMD_OP_QUERY_RQT: |
500 | |
501 | case MLX5_CMD_OP_CREATE_FLOW_TABLE: |
502 | case MLX5_CMD_OP_QUERY_FLOW_TABLE: |
503 | case MLX5_CMD_OP_CREATE_FLOW_GROUP: |
504 | case MLX5_CMD_OP_QUERY_FLOW_GROUP: |
505 | case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: |
506 | case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: |
507 | case MLX5_CMD_OP_QUERY_FLOW_COUNTER: |
508 | case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: |
509 | case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: |
510 | case MLX5_CMD_OP_FPGA_CREATE_QP: |
511 | case MLX5_CMD_OP_FPGA_MODIFY_QP: |
512 | case MLX5_CMD_OP_FPGA_QUERY_QP: |
513 | case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS: |
514 | case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: |
515 | case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: |
516 | case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: |
517 | case MLX5_CMD_OP_CREATE_UCTX: |
518 | case MLX5_CMD_OP_CREATE_UMEM: |
519 | case MLX5_CMD_OP_ALLOC_MEMIC: |
520 | case MLX5_CMD_OP_MODIFY_XRQ: |
521 | case MLX5_CMD_OP_RELEASE_XRQ_ERROR: |
522 | case MLX5_CMD_OP_QUERY_VHCA_STATE: |
523 | case MLX5_CMD_OP_MODIFY_VHCA_STATE: |
524 | case MLX5_CMD_OP_ALLOC_SF: |
525 | case MLX5_CMD_OP_SUSPEND_VHCA: |
526 | case MLX5_CMD_OP_RESUME_VHCA: |
527 | case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE: |
528 | case MLX5_CMD_OP_SAVE_VHCA_STATE: |
529 | case MLX5_CMD_OP_LOAD_VHCA_STATE: |
530 | case MLX5_CMD_OP_SYNC_CRYPTO: |
531 | case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS: |
532 | *status = MLX5_DRIVER_STATUS_ABORTED; |
533 | *synd = MLX5_DRIVER_SYND; |
534 | return -ENOLINK; |
535 | default: |
536 | mlx5_core_err(dev, "Unknown FW command (%d)\n" , op); |
537 | return -EINVAL; |
538 | } |
539 | } |
540 | |
541 | const char *mlx5_command_str(int command) |
542 | { |
543 | #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd |
544 | |
545 | switch (command) { |
546 | MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); |
547 | MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); |
548 | MLX5_COMMAND_STR_CASE(INIT_HCA); |
549 | MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); |
550 | MLX5_COMMAND_STR_CASE(ENABLE_HCA); |
551 | MLX5_COMMAND_STR_CASE(DISABLE_HCA); |
552 | MLX5_COMMAND_STR_CASE(QUERY_PAGES); |
553 | MLX5_COMMAND_STR_CASE(MANAGE_PAGES); |
554 | MLX5_COMMAND_STR_CASE(SET_HCA_CAP); |
555 | MLX5_COMMAND_STR_CASE(QUERY_ISSI); |
556 | MLX5_COMMAND_STR_CASE(SET_ISSI); |
557 | MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION); |
558 | MLX5_COMMAND_STR_CASE(CREATE_MKEY); |
559 | MLX5_COMMAND_STR_CASE(QUERY_MKEY); |
560 | MLX5_COMMAND_STR_CASE(DESTROY_MKEY); |
561 | MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); |
562 | MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); |
563 | MLX5_COMMAND_STR_CASE(CREATE_EQ); |
564 | MLX5_COMMAND_STR_CASE(DESTROY_EQ); |
565 | MLX5_COMMAND_STR_CASE(QUERY_EQ); |
566 | MLX5_COMMAND_STR_CASE(GEN_EQE); |
567 | MLX5_COMMAND_STR_CASE(CREATE_CQ); |
568 | MLX5_COMMAND_STR_CASE(DESTROY_CQ); |
569 | MLX5_COMMAND_STR_CASE(QUERY_CQ); |
570 | MLX5_COMMAND_STR_CASE(MODIFY_CQ); |
571 | MLX5_COMMAND_STR_CASE(CREATE_QP); |
572 | MLX5_COMMAND_STR_CASE(DESTROY_QP); |
573 | MLX5_COMMAND_STR_CASE(RST2INIT_QP); |
574 | MLX5_COMMAND_STR_CASE(INIT2RTR_QP); |
575 | MLX5_COMMAND_STR_CASE(RTR2RTS_QP); |
576 | MLX5_COMMAND_STR_CASE(RTS2RTS_QP); |
577 | MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); |
578 | MLX5_COMMAND_STR_CASE(2ERR_QP); |
579 | MLX5_COMMAND_STR_CASE(2RST_QP); |
580 | MLX5_COMMAND_STR_CASE(QUERY_QP); |
581 | MLX5_COMMAND_STR_CASE(SQD_RTS_QP); |
582 | MLX5_COMMAND_STR_CASE(INIT2INIT_QP); |
583 | MLX5_COMMAND_STR_CASE(CREATE_PSV); |
584 | MLX5_COMMAND_STR_CASE(DESTROY_PSV); |
585 | MLX5_COMMAND_STR_CASE(CREATE_SRQ); |
586 | MLX5_COMMAND_STR_CASE(DESTROY_SRQ); |
587 | MLX5_COMMAND_STR_CASE(QUERY_SRQ); |
588 | MLX5_COMMAND_STR_CASE(ARM_RQ); |
589 | MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); |
590 | MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); |
591 | MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); |
592 | MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); |
593 | MLX5_COMMAND_STR_CASE(CREATE_DCT); |
594 | MLX5_COMMAND_STR_CASE(DESTROY_DCT); |
595 | MLX5_COMMAND_STR_CASE(DRAIN_DCT); |
596 | MLX5_COMMAND_STR_CASE(QUERY_DCT); |
597 | MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); |
598 | MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); |
599 | MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); |
600 | MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); |
601 | MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); |
602 | MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); |
603 | MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); |
604 | MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); |
605 | MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); |
606 | MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); |
607 | MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); |
608 | MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); |
609 | MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); |
610 | MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV); |
611 | MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); |
612 | MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); |
613 | MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); |
614 | MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); |
615 | MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER); |
616 | MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER); |
617 | MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); |
618 | MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); |
619 | MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); |
620 | MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); |
621 | MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT); |
622 | MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT); |
623 | MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT); |
624 | MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT); |
625 | MLX5_COMMAND_STR_CASE(ALLOC_PD); |
626 | MLX5_COMMAND_STR_CASE(DEALLOC_PD); |
627 | MLX5_COMMAND_STR_CASE(ALLOC_UAR); |
628 | MLX5_COMMAND_STR_CASE(DEALLOC_UAR); |
629 | MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); |
630 | MLX5_COMMAND_STR_CASE(ACCESS_REG); |
631 | MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); |
632 | MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); |
633 | MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); |
634 | MLX5_COMMAND_STR_CASE(MAD_IFC); |
635 | MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); |
636 | MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); |
637 | MLX5_COMMAND_STR_CASE(NOP); |
638 | MLX5_COMMAND_STR_CASE(ALLOC_XRCD); |
639 | MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); |
640 | MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); |
641 | MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); |
642 | MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); |
643 | MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); |
644 | MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); |
645 | MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); |
646 | MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); |
647 | MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); |
648 | MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); |
649 | MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); |
650 | MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); |
651 | MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); |
652 | MLX5_COMMAND_STR_CASE(SET_WOL_ROL); |
653 | MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); |
654 | MLX5_COMMAND_STR_CASE(CREATE_LAG); |
655 | MLX5_COMMAND_STR_CASE(MODIFY_LAG); |
656 | MLX5_COMMAND_STR_CASE(QUERY_LAG); |
657 | MLX5_COMMAND_STR_CASE(DESTROY_LAG); |
658 | MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG); |
659 | MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG); |
660 | MLX5_COMMAND_STR_CASE(CREATE_TIR); |
661 | MLX5_COMMAND_STR_CASE(MODIFY_TIR); |
662 | MLX5_COMMAND_STR_CASE(DESTROY_TIR); |
663 | MLX5_COMMAND_STR_CASE(QUERY_TIR); |
664 | MLX5_COMMAND_STR_CASE(CREATE_SQ); |
665 | MLX5_COMMAND_STR_CASE(MODIFY_SQ); |
666 | MLX5_COMMAND_STR_CASE(DESTROY_SQ); |
667 | MLX5_COMMAND_STR_CASE(QUERY_SQ); |
668 | MLX5_COMMAND_STR_CASE(CREATE_RQ); |
669 | MLX5_COMMAND_STR_CASE(MODIFY_RQ); |
670 | MLX5_COMMAND_STR_CASE(DESTROY_RQ); |
671 | MLX5_COMMAND_STR_CASE(QUERY_RQ); |
672 | MLX5_COMMAND_STR_CASE(CREATE_RMP); |
673 | MLX5_COMMAND_STR_CASE(MODIFY_RMP); |
674 | MLX5_COMMAND_STR_CASE(DESTROY_RMP); |
675 | MLX5_COMMAND_STR_CASE(QUERY_RMP); |
676 | MLX5_COMMAND_STR_CASE(CREATE_TIS); |
677 | MLX5_COMMAND_STR_CASE(MODIFY_TIS); |
678 | MLX5_COMMAND_STR_CASE(DESTROY_TIS); |
679 | MLX5_COMMAND_STR_CASE(QUERY_TIS); |
680 | MLX5_COMMAND_STR_CASE(CREATE_RQT); |
681 | MLX5_COMMAND_STR_CASE(MODIFY_RQT); |
682 | MLX5_COMMAND_STR_CASE(DESTROY_RQT); |
683 | MLX5_COMMAND_STR_CASE(QUERY_RQT); |
684 | MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT); |
685 | MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); |
686 | MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); |
687 | MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); |
688 | MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); |
689 | MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); |
690 | MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); |
691 | MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); |
692 | MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); |
693 | MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); |
694 | MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); |
695 | MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); |
696 | MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); |
697 | MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); |
698 | MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT); |
699 | MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT); |
700 | MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); |
701 | MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); |
702 | MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP); |
703 | MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP); |
704 | MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP); |
705 | MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS); |
706 | MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP); |
707 | MLX5_COMMAND_STR_CASE(CREATE_XRQ); |
708 | MLX5_COMMAND_STR_CASE(DESTROY_XRQ); |
709 | MLX5_COMMAND_STR_CASE(QUERY_XRQ); |
710 | MLX5_COMMAND_STR_CASE(ARM_XRQ); |
711 | MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT); |
712 | MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT); |
713 | MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT); |
714 | MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT); |
715 | MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); |
716 | MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); |
717 | MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); |
718 | MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS); |
719 | MLX5_COMMAND_STR_CASE(CREATE_UCTX); |
720 | MLX5_COMMAND_STR_CASE(DESTROY_UCTX); |
721 | MLX5_COMMAND_STR_CASE(CREATE_UMEM); |
722 | MLX5_COMMAND_STR_CASE(DESTROY_UMEM); |
723 | MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); |
724 | MLX5_COMMAND_STR_CASE(MODIFY_XRQ); |
725 | MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE); |
726 | MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE); |
727 | MLX5_COMMAND_STR_CASE(ALLOC_SF); |
728 | MLX5_COMMAND_STR_CASE(DEALLOC_SF); |
729 | MLX5_COMMAND_STR_CASE(SUSPEND_VHCA); |
730 | MLX5_COMMAND_STR_CASE(RESUME_VHCA); |
731 | MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE); |
732 | MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE); |
733 | MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE); |
734 | MLX5_COMMAND_STR_CASE(SYNC_CRYPTO); |
735 | MLX5_COMMAND_STR_CASE(ALLOW_OTHER_VHCA_ACCESS); |
736 | default: return "unknown command opcode" ; |
737 | } |
738 | } |
739 | |
740 | static const char *cmd_status_str(u8 status) |
741 | { |
742 | switch (status) { |
743 | case MLX5_CMD_STAT_OK: |
744 | return "OK" ; |
745 | case MLX5_CMD_STAT_INT_ERR: |
746 | return "internal error" ; |
747 | case MLX5_CMD_STAT_BAD_OP_ERR: |
748 | return "bad operation" ; |
749 | case MLX5_CMD_STAT_BAD_PARAM_ERR: |
750 | return "bad parameter" ; |
751 | case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: |
752 | return "bad system state" ; |
753 | case MLX5_CMD_STAT_BAD_RES_ERR: |
754 | return "bad resource" ; |
755 | case MLX5_CMD_STAT_RES_BUSY: |
756 | return "resource busy" ; |
757 | case MLX5_CMD_STAT_LIM_ERR: |
758 | return "limits exceeded" ; |
759 | case MLX5_CMD_STAT_BAD_RES_STATE_ERR: |
760 | return "bad resource state" ; |
761 | case MLX5_CMD_STAT_IX_ERR: |
762 | return "bad index" ; |
763 | case MLX5_CMD_STAT_NO_RES_ERR: |
764 | return "no resources" ; |
765 | case MLX5_CMD_STAT_BAD_INP_LEN_ERR: |
766 | return "bad input length" ; |
767 | case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: |
768 | return "bad output length" ; |
769 | case MLX5_CMD_STAT_BAD_QP_STATE_ERR: |
770 | return "bad QP state" ; |
771 | case MLX5_CMD_STAT_BAD_PKT_ERR: |
772 | return "bad packet (discarded)" ; |
773 | case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: |
774 | return "bad size too many outstanding CQEs" ; |
775 | default: |
776 | return "unknown status" ; |
777 | } |
778 | } |
779 | |
780 | static int cmd_status_to_err(u8 status) |
781 | { |
782 | switch (status) { |
783 | case MLX5_CMD_STAT_OK: return 0; |
784 | case MLX5_CMD_STAT_INT_ERR: return -EIO; |
785 | case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; |
786 | case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; |
787 | case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; |
788 | case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; |
789 | case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; |
790 | case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; |
791 | case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; |
792 | case MLX5_CMD_STAT_IX_ERR: return -EINVAL; |
793 | case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; |
794 | case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; |
795 | case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; |
796 | case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; |
797 | case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; |
798 | case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; |
799 | default: return -EIO; |
800 | } |
801 | } |
802 | |
803 | void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) |
804 | { |
805 | u32 syndrome = MLX5_GET(mbox_out, out, syndrome); |
806 | u8 status = MLX5_GET(mbox_out, out, status); |
807 | |
808 | mlx5_core_err_rl(dev, |
809 | "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n" , |
810 | mlx5_command_str(opcode), opcode, op_mod, |
811 | cmd_status_str(status), status, syndrome, cmd_status_to_err(status)); |
812 | } |
813 | EXPORT_SYMBOL(mlx5_cmd_out_err); |
814 | |
815 | static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) |
816 | { |
817 | u16 opcode, op_mod; |
818 | u16 uid; |
819 | |
820 | opcode = in_to_opcode(in); |
821 | op_mod = MLX5_GET(mbox_in, in, op_mod); |
822 | uid = MLX5_GET(mbox_in, in, uid); |
823 | |
824 | if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY && |
825 | opcode != MLX5_CMD_OP_CREATE_UCTX) |
826 | mlx5_cmd_out_err(dev, opcode, op_mod, out); |
827 | } |
828 | |
829 | int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) |
830 | { |
831 | /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */ |
832 | if (err == -ENXIO) { |
833 | u16 opcode = in_to_opcode(in); |
834 | u32 syndrome; |
835 | u8 status; |
836 | |
837 | /* PCI Error, emulate command return status, for smooth reset */ |
838 | err = mlx5_internal_err_ret_value(dev, op: opcode, synd: &syndrome, status: &status); |
839 | MLX5_SET(mbox_out, out, status, status); |
840 | MLX5_SET(mbox_out, out, syndrome, syndrome); |
841 | if (!err) |
842 | return 0; |
843 | } |
844 | |
845 | /* driver or FW delivery error */ |
846 | if (err != -EREMOTEIO && err) |
847 | return err; |
848 | |
849 | /* check outbox status */ |
850 | err = cmd_status_to_err(MLX5_GET(mbox_out, out, status)); |
851 | if (err) |
852 | cmd_status_print(dev, in, out); |
853 | |
854 | return err; |
855 | } |
856 | EXPORT_SYMBOL(mlx5_cmd_check); |
857 | |
858 | static void dump_command(struct mlx5_core_dev *dev, |
859 | struct mlx5_cmd_work_ent *ent, int input) |
860 | { |
861 | struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; |
862 | struct mlx5_cmd_mailbox *next = msg->next; |
863 | int n = mlx5_calc_cmd_blocks(msg); |
864 | u16 op = ent->op; |
865 | int data_only; |
866 | u32 offset = 0; |
867 | int dump_len; |
868 | int i; |
869 | |
870 | mlx5_core_dbg(dev, "cmd[%d]: start dump\n" , ent->idx); |
871 | data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); |
872 | |
873 | if (data_only) |
874 | mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, |
875 | "cmd[%d]: dump command data %s(0x%x) %s\n" , |
876 | ent->idx, mlx5_command_str(op), op, |
877 | input ? "INPUT" : "OUTPUT" ); |
878 | else |
879 | mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n" , |
880 | ent->idx, mlx5_command_str(op), op, |
881 | input ? "INPUT" : "OUTPUT" ); |
882 | |
883 | if (data_only) { |
884 | if (input) { |
885 | dump_buf(buf: ent->lay->in, size: sizeof(ent->lay->in), data_only: 1, offset, idx: ent->idx); |
886 | offset += sizeof(ent->lay->in); |
887 | } else { |
888 | dump_buf(buf: ent->lay->out, size: sizeof(ent->lay->out), data_only: 1, offset, idx: ent->idx); |
889 | offset += sizeof(ent->lay->out); |
890 | } |
891 | } else { |
892 | dump_buf(buf: ent->lay, size: sizeof(*ent->lay), data_only: 0, offset, idx: ent->idx); |
893 | offset += sizeof(*ent->lay); |
894 | } |
895 | |
896 | for (i = 0; i < n && next; i++) { |
897 | if (data_only) { |
898 | dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); |
899 | dump_buf(buf: next->buf, size: dump_len, data_only: 1, offset, idx: ent->idx); |
900 | offset += MLX5_CMD_DATA_BLOCK_SIZE; |
901 | } else { |
902 | mlx5_core_dbg(dev, "cmd[%d]: command block:\n" , ent->idx); |
903 | dump_buf(buf: next->buf, size: sizeof(struct mlx5_cmd_prot_block), data_only: 0, offset, |
904 | idx: ent->idx); |
905 | offset += sizeof(struct mlx5_cmd_prot_block); |
906 | } |
907 | next = next->next; |
908 | } |
909 | |
910 | if (data_only) |
911 | pr_debug("\n" ); |
912 | |
913 | mlx5_core_dbg(dev, "cmd[%d]: end dump\n" , ent->idx); |
914 | } |
915 | |
916 | static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); |
917 | |
918 | static void cb_timeout_handler(struct work_struct *work) |
919 | { |
920 | struct delayed_work *dwork = container_of(work, struct delayed_work, |
921 | work); |
922 | struct mlx5_cmd_work_ent *ent = container_of(dwork, |
923 | struct mlx5_cmd_work_ent, |
924 | cb_timeout_work); |
925 | struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, |
926 | cmd); |
927 | |
928 | mlx5_cmd_eq_recover(dev); |
929 | |
930 | /* Maybe got handled by eq recover ? */ |
931 | if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { |
932 | mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n" , ent->idx, |
933 | mlx5_command_str(ent->op), ent->op); |
934 | goto out; /* phew, already handled */ |
935 | } |
936 | |
937 | ent->ret = -ETIMEDOUT; |
938 | mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n" , |
939 | ent->idx, mlx5_command_str(ent->op), ent->op); |
940 | mlx5_cmd_comp_handler(dev, vec: 1ULL << ent->idx, forced: true); |
941 | |
942 | out: |
943 | cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */ |
944 | } |
945 | |
946 | static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); |
947 | static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, |
948 | struct mlx5_cmd_msg *msg); |
949 | |
950 | static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) |
951 | { |
952 | if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) |
953 | return true; |
954 | |
955 | return cmd->allowed_opcode == opcode; |
956 | } |
957 | |
958 | bool mlx5_cmd_is_down(struct mlx5_core_dev *dev) |
959 | { |
960 | return pci_channel_offline(pdev: dev->pdev) || |
961 | dev->cmd.state != MLX5_CMDIF_STATE_UP || |
962 | dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR; |
963 | } |
964 | |
965 | static void cmd_work_handler(struct work_struct *work) |
966 | { |
967 | struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); |
968 | struct mlx5_cmd *cmd = ent->cmd; |
969 | bool poll_cmd = ent->polling; |
970 | struct mlx5_cmd_layout *lay; |
971 | struct mlx5_core_dev *dev; |
972 | unsigned long cb_timeout; |
973 | struct semaphore *sem; |
974 | unsigned long flags; |
975 | int alloc_ret; |
976 | int cmd_mode; |
977 | |
978 | dev = container_of(cmd, struct mlx5_core_dev, cmd); |
979 | cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); |
980 | |
981 | complete(&ent->handling); |
982 | sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem; |
983 | down(sem); |
984 | if (!ent->page_queue) { |
985 | alloc_ret = cmd_alloc_index(cmd, ent); |
986 | if (alloc_ret < 0) { |
987 | mlx5_core_err_rl(dev, "failed to allocate command entry\n" ); |
988 | if (ent->callback) { |
989 | ent->callback(-EAGAIN, ent->context); |
990 | mlx5_free_cmd_msg(dev, msg: ent->out); |
991 | free_msg(dev, msg: ent->in); |
992 | cmd_ent_put(ent); |
993 | } else { |
994 | ent->ret = -EAGAIN; |
995 | complete(&ent->done); |
996 | } |
997 | up(sem); |
998 | return; |
999 | } |
1000 | } else { |
1001 | ent->idx = cmd->vars.max_reg_cmds; |
1002 | spin_lock_irqsave(&cmd->alloc_lock, flags); |
1003 | clear_bit(nr: ent->idx, addr: &cmd->vars.bitmask); |
1004 | cmd->ent_arr[ent->idx] = ent; |
1005 | spin_unlock_irqrestore(lock: &cmd->alloc_lock, flags); |
1006 | } |
1007 | |
1008 | lay = get_inst(cmd, idx: ent->idx); |
1009 | ent->lay = lay; |
1010 | memset(lay, 0, sizeof(*lay)); |
1011 | memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); |
1012 | if (ent->in->next) |
1013 | lay->in_ptr = cpu_to_be64(ent->in->next->dma); |
1014 | lay->inlen = cpu_to_be32(ent->in->len); |
1015 | if (ent->out->next) |
1016 | lay->out_ptr = cpu_to_be64(ent->out->next->dma); |
1017 | lay->outlen = cpu_to_be32(ent->out->len); |
1018 | lay->type = MLX5_PCI_CMD_XPORT; |
1019 | lay->token = ent->token; |
1020 | lay->status_own = CMD_OWNER_HW; |
1021 | set_signature(ent, csum: !cmd->checksum_disabled); |
1022 | dump_command(dev, ent, input: 1); |
1023 | ent->ts1 = ktime_get_ns(); |
1024 | cmd_mode = cmd->mode; |
1025 | |
1026 | if (ent->callback && schedule_delayed_work(dwork: &ent->cb_timeout_work, delay: cb_timeout)) |
1027 | cmd_ent_get(ent); |
1028 | set_bit(nr: MLX5_CMD_ENT_STATE_PENDING_COMP, addr: &ent->state); |
1029 | |
1030 | cmd_ent_get(ent); /* for the _real_ FW event on completion */ |
1031 | /* Skip sending command to fw if internal error */ |
1032 | if (mlx5_cmd_is_down(dev) || !opcode_allowed(cmd: &dev->cmd, opcode: ent->op)) { |
1033 | ent->ret = -ENXIO; |
1034 | mlx5_cmd_comp_handler(dev, vec: 1ULL << ent->idx, forced: true); |
1035 | return; |
1036 | } |
1037 | |
1038 | /* ring doorbell after the descriptor is valid */ |
1039 | mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n" , 1 << ent->idx); |
1040 | wmb(); |
1041 | iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); |
1042 | /* if not in polling don't use ent after this point */ |
1043 | if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { |
1044 | poll_timeout(ent); |
1045 | /* make sure we read the descriptor after ownership is SW */ |
1046 | rmb(); |
1047 | mlx5_cmd_comp_handler(dev, vec: 1ULL << ent->idx, forced: (ent->ret == -ETIMEDOUT)); |
1048 | } |
1049 | } |
1050 | |
1051 | static int deliv_status_to_err(u8 status) |
1052 | { |
1053 | switch (status) { |
1054 | case MLX5_CMD_DELIVERY_STAT_OK: |
1055 | case MLX5_DRIVER_STATUS_ABORTED: |
1056 | return 0; |
1057 | case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: |
1058 | case MLX5_CMD_DELIVERY_STAT_TOK_ERR: |
1059 | return -EBADR; |
1060 | case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: |
1061 | case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: |
1062 | case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: |
1063 | return -EFAULT; /* Bad address */ |
1064 | case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: |
1065 | case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: |
1066 | case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: |
1067 | case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: |
1068 | return -ENOMSG; |
1069 | case MLX5_CMD_DELIVERY_STAT_FW_ERR: |
1070 | return -EIO; |
1071 | default: |
1072 | return -EINVAL; |
1073 | } |
1074 | } |
1075 | |
1076 | static const char *deliv_status_to_str(u8 status) |
1077 | { |
1078 | switch (status) { |
1079 | case MLX5_CMD_DELIVERY_STAT_OK: |
1080 | return "no errors" ; |
1081 | case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: |
1082 | return "signature error" ; |
1083 | case MLX5_CMD_DELIVERY_STAT_TOK_ERR: |
1084 | return "token error" ; |
1085 | case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: |
1086 | return "bad block number" ; |
1087 | case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: |
1088 | return "output pointer not aligned to block size" ; |
1089 | case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: |
1090 | return "input pointer not aligned to block size" ; |
1091 | case MLX5_CMD_DELIVERY_STAT_FW_ERR: |
1092 | return "firmware internal error" ; |
1093 | case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: |
1094 | return "command input length error" ; |
1095 | case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: |
1096 | return "command output length error" ; |
1097 | case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: |
1098 | return "reserved fields not cleared" ; |
1099 | case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: |
1100 | return "bad command descriptor type" ; |
1101 | default: |
1102 | return "unknown status code" ; |
1103 | } |
1104 | } |
1105 | |
1106 | enum { |
1107 | MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000, |
1108 | }; |
1109 | |
1110 | static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, |
1111 | struct mlx5_cmd_work_ent *ent) |
1112 | { |
1113 | unsigned long timeout = msecs_to_jiffies(m: MLX5_CMD_TIMEOUT_RECOVER_MSEC); |
1114 | |
1115 | mlx5_cmd_eq_recover(dev); |
1116 | |
1117 | /* Re-wait on the ent->done after executing the recovery flow. If the |
1118 | * recovery flow (or any other recovery flow running simultaneously) |
1119 | * has recovered an EQE, it should cause the entry to be completed by |
1120 | * the command interface. |
1121 | */ |
1122 | if (wait_for_completion_timeout(x: &ent->done, timeout)) { |
1123 | mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n" , ent->idx, |
1124 | mlx5_command_str(ent->op), ent->op); |
1125 | return; |
1126 | } |
1127 | |
1128 | mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n" , ent->idx, |
1129 | mlx5_command_str(ent->op), ent->op); |
1130 | |
1131 | ent->ret = -ETIMEDOUT; |
1132 | mlx5_cmd_comp_handler(dev, vec: 1ULL << ent->idx, forced: true); |
1133 | } |
1134 | |
1135 | static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) |
1136 | { |
1137 | unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); |
1138 | struct mlx5_cmd *cmd = &dev->cmd; |
1139 | int err; |
1140 | |
1141 | if (!wait_for_completion_timeout(x: &ent->handling, timeout) && |
1142 | cancel_work_sync(work: &ent->work)) { |
1143 | ent->ret = -ECANCELED; |
1144 | goto out_err; |
1145 | } |
1146 | if (cmd->mode == CMD_MODE_POLLING || ent->polling) |
1147 | wait_for_completion(&ent->done); |
1148 | else if (!wait_for_completion_timeout(x: &ent->done, timeout)) |
1149 | wait_func_handle_exec_timeout(dev, ent); |
1150 | |
1151 | out_err: |
1152 | err = ent->ret; |
1153 | |
1154 | if (err == -ETIMEDOUT) { |
1155 | mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n" , |
1156 | mlx5_command_str(ent->op), ent->op); |
1157 | } else if (err == -ECANCELED) { |
1158 | mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n" , |
1159 | mlx5_command_str(ent->op), ent->op); |
1160 | } |
1161 | mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n" , |
1162 | err, deliv_status_to_str(ent->status), ent->status); |
1163 | |
1164 | return err; |
1165 | } |
1166 | |
1167 | /* Notes: |
1168 | * 1. Callback functions may not sleep |
1169 | * 2. page queue commands do not support asynchrous completion |
1170 | * |
1171 | * return value in case (!callback): |
1172 | * ret < 0 : Command execution couldn't be submitted by driver |
1173 | * ret > 0 : Command execution couldn't be performed by firmware |
1174 | * ret == 0: Command was executed by FW, Caller must check FW outbox status. |
1175 | * |
1176 | * return value in case (callback): |
1177 | * ret < 0 : Command execution couldn't be submitted by driver |
1178 | * ret == 0: Command will be submitted to FW for execution |
1179 | * and the callback will be called for further status updates |
1180 | */ |
1181 | static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, |
1182 | struct mlx5_cmd_msg *out, void *uout, int uout_size, |
1183 | mlx5_cmd_cbk_t callback, |
1184 | void *context, int page_queue, |
1185 | u8 token, bool force_polling) |
1186 | { |
1187 | struct mlx5_cmd *cmd = &dev->cmd; |
1188 | struct mlx5_cmd_work_ent *ent; |
1189 | struct mlx5_cmd_stats *stats; |
1190 | u8 status = 0; |
1191 | int err = 0; |
1192 | s64 ds; |
1193 | |
1194 | if (callback && page_queue) |
1195 | return -EINVAL; |
1196 | |
1197 | ent = cmd_alloc_ent(cmd, in, out, uout, uout_size, |
1198 | cbk: callback, context, page_queue); |
1199 | if (IS_ERR(ptr: ent)) |
1200 | return PTR_ERR(ptr: ent); |
1201 | |
1202 | /* put for this ent is when consumed, depending on the use case |
1203 | * 1) (!callback) blocking flow: by caller after wait_func completes |
1204 | * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled |
1205 | */ |
1206 | |
1207 | ent->token = token; |
1208 | ent->polling = force_polling; |
1209 | |
1210 | init_completion(x: &ent->handling); |
1211 | if (!callback) |
1212 | init_completion(x: &ent->done); |
1213 | |
1214 | INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); |
1215 | INIT_WORK(&ent->work, cmd_work_handler); |
1216 | if (page_queue) { |
1217 | cmd_work_handler(work: &ent->work); |
1218 | } else if (!queue_work(wq: cmd->wq, work: &ent->work)) { |
1219 | mlx5_core_warn(dev, "failed to queue work\n" ); |
1220 | err = -EALREADY; |
1221 | goto out_free; |
1222 | } |
1223 | |
1224 | if (callback) |
1225 | return 0; /* mlx5_cmd_comp_handler() will put(ent) */ |
1226 | |
1227 | err = wait_func(dev, ent); |
1228 | if (err == -ETIMEDOUT || err == -ECANCELED) |
1229 | goto out_free; |
1230 | |
1231 | ds = ent->ts2 - ent->ts1; |
1232 | stats = xa_load(&cmd->stats, index: ent->op); |
1233 | if (stats) { |
1234 | spin_lock_irq(lock: &stats->lock); |
1235 | stats->sum += ds; |
1236 | ++stats->n; |
1237 | spin_unlock_irq(lock: &stats->lock); |
1238 | } |
1239 | mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, |
1240 | "fw exec time for %s is %lld nsec\n" , |
1241 | mlx5_command_str(ent->op), ds); |
1242 | |
1243 | out_free: |
1244 | status = ent->status; |
1245 | cmd_ent_put(ent); |
1246 | return err ? : status; |
1247 | } |
1248 | |
1249 | static ssize_t dbg_write(struct file *filp, const char __user *buf, |
1250 | size_t count, loff_t *pos) |
1251 | { |
1252 | struct mlx5_core_dev *dev = filp->private_data; |
1253 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; |
1254 | char lbuf[3]; |
1255 | int err; |
1256 | |
1257 | if (!dbg->in_msg || !dbg->out_msg) |
1258 | return -ENOMEM; |
1259 | |
1260 | if (count < sizeof(lbuf) - 1) |
1261 | return -EINVAL; |
1262 | |
1263 | if (copy_from_user(to: lbuf, from: buf, n: sizeof(lbuf) - 1)) |
1264 | return -EFAULT; |
1265 | |
1266 | lbuf[sizeof(lbuf) - 1] = 0; |
1267 | |
1268 | if (strcmp(lbuf, "go" )) |
1269 | return -EINVAL; |
1270 | |
1271 | err = mlx5_cmd_exec(dev, in: dbg->in_msg, in_size: dbg->inlen, out: dbg->out_msg, out_size: dbg->outlen); |
1272 | |
1273 | return err ? err : count; |
1274 | } |
1275 | |
1276 | static const struct file_operations fops = { |
1277 | .owner = THIS_MODULE, |
1278 | .open = simple_open, |
1279 | .write = dbg_write, |
1280 | }; |
1281 | |
1282 | static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size, |
1283 | u8 token) |
1284 | { |
1285 | struct mlx5_cmd_prot_block *block; |
1286 | struct mlx5_cmd_mailbox *next; |
1287 | int copy; |
1288 | |
1289 | if (!to || !from) |
1290 | return -ENOMEM; |
1291 | |
1292 | copy = min_t(int, size, sizeof(to->first.data)); |
1293 | memcpy(to->first.data, from, copy); |
1294 | size -= copy; |
1295 | from += copy; |
1296 | |
1297 | next = to->next; |
1298 | while (size) { |
1299 | if (!next) { |
1300 | /* this is a BUG */ |
1301 | return -ENOMEM; |
1302 | } |
1303 | |
1304 | copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); |
1305 | block = next->buf; |
1306 | memcpy(block->data, from, copy); |
1307 | from += copy; |
1308 | size -= copy; |
1309 | block->token = token; |
1310 | next = next->next; |
1311 | } |
1312 | |
1313 | return 0; |
1314 | } |
1315 | |
1316 | static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) |
1317 | { |
1318 | struct mlx5_cmd_prot_block *block; |
1319 | struct mlx5_cmd_mailbox *next; |
1320 | int copy; |
1321 | |
1322 | if (!to || !from) |
1323 | return -ENOMEM; |
1324 | |
1325 | copy = min_t(int, size, sizeof(from->first.data)); |
1326 | memcpy(to, from->first.data, copy); |
1327 | size -= copy; |
1328 | to += copy; |
1329 | |
1330 | next = from->next; |
1331 | while (size) { |
1332 | if (!next) { |
1333 | /* this is a BUG */ |
1334 | return -ENOMEM; |
1335 | } |
1336 | |
1337 | copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); |
1338 | block = next->buf; |
1339 | |
1340 | memcpy(to, block->data, copy); |
1341 | to += copy; |
1342 | size -= copy; |
1343 | next = next->next; |
1344 | } |
1345 | |
1346 | return 0; |
1347 | } |
1348 | |
1349 | static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, |
1350 | gfp_t flags) |
1351 | { |
1352 | struct mlx5_cmd_mailbox *mailbox; |
1353 | |
1354 | mailbox = kmalloc(size: sizeof(*mailbox), flags); |
1355 | if (!mailbox) |
1356 | return ERR_PTR(error: -ENOMEM); |
1357 | |
1358 | mailbox->buf = dma_pool_zalloc(pool: dev->cmd.pool, mem_flags: flags, |
1359 | handle: &mailbox->dma); |
1360 | if (!mailbox->buf) { |
1361 | mlx5_core_dbg(dev, "failed allocation\n" ); |
1362 | kfree(objp: mailbox); |
1363 | return ERR_PTR(error: -ENOMEM); |
1364 | } |
1365 | mailbox->next = NULL; |
1366 | |
1367 | return mailbox; |
1368 | } |
1369 | |
1370 | static void free_cmd_box(struct mlx5_core_dev *dev, |
1371 | struct mlx5_cmd_mailbox *mailbox) |
1372 | { |
1373 | dma_pool_free(pool: dev->cmd.pool, vaddr: mailbox->buf, addr: mailbox->dma); |
1374 | kfree(objp: mailbox); |
1375 | } |
1376 | |
1377 | static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, |
1378 | gfp_t flags, int size, |
1379 | u8 token) |
1380 | { |
1381 | struct mlx5_cmd_mailbox *tmp, *head = NULL; |
1382 | struct mlx5_cmd_prot_block *block; |
1383 | struct mlx5_cmd_msg *msg; |
1384 | int err; |
1385 | int n; |
1386 | int i; |
1387 | |
1388 | msg = kzalloc(size: sizeof(*msg), flags); |
1389 | if (!msg) |
1390 | return ERR_PTR(error: -ENOMEM); |
1391 | |
1392 | msg->len = size; |
1393 | n = mlx5_calc_cmd_blocks(msg); |
1394 | |
1395 | for (i = 0; i < n; i++) { |
1396 | tmp = alloc_cmd_box(dev, flags); |
1397 | if (IS_ERR(ptr: tmp)) { |
1398 | mlx5_core_warn(dev, "failed allocating block\n" ); |
1399 | err = PTR_ERR(ptr: tmp); |
1400 | goto err_alloc; |
1401 | } |
1402 | |
1403 | block = tmp->buf; |
1404 | tmp->next = head; |
1405 | block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); |
1406 | block->block_num = cpu_to_be32(n - i - 1); |
1407 | block->token = token; |
1408 | head = tmp; |
1409 | } |
1410 | msg->next = head; |
1411 | return msg; |
1412 | |
1413 | err_alloc: |
1414 | while (head) { |
1415 | tmp = head->next; |
1416 | free_cmd_box(dev, mailbox: head); |
1417 | head = tmp; |
1418 | } |
1419 | kfree(objp: msg); |
1420 | |
1421 | return ERR_PTR(error: err); |
1422 | } |
1423 | |
1424 | static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, |
1425 | struct mlx5_cmd_msg *msg) |
1426 | { |
1427 | struct mlx5_cmd_mailbox *head = msg->next; |
1428 | struct mlx5_cmd_mailbox *next; |
1429 | |
1430 | while (head) { |
1431 | next = head->next; |
1432 | free_cmd_box(dev, mailbox: head); |
1433 | head = next; |
1434 | } |
1435 | kfree(objp: msg); |
1436 | } |
1437 | |
1438 | static ssize_t data_write(struct file *filp, const char __user *buf, |
1439 | size_t count, loff_t *pos) |
1440 | { |
1441 | struct mlx5_core_dev *dev = filp->private_data; |
1442 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; |
1443 | void *ptr; |
1444 | |
1445 | if (*pos != 0) |
1446 | return -EINVAL; |
1447 | |
1448 | kfree(objp: dbg->in_msg); |
1449 | dbg->in_msg = NULL; |
1450 | dbg->inlen = 0; |
1451 | ptr = memdup_user(buf, count); |
1452 | if (IS_ERR(ptr)) |
1453 | return PTR_ERR(ptr); |
1454 | dbg->in_msg = ptr; |
1455 | dbg->inlen = count; |
1456 | |
1457 | *pos = count; |
1458 | |
1459 | return count; |
1460 | } |
1461 | |
1462 | static ssize_t data_read(struct file *filp, char __user *buf, size_t count, |
1463 | loff_t *pos) |
1464 | { |
1465 | struct mlx5_core_dev *dev = filp->private_data; |
1466 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; |
1467 | |
1468 | if (!dbg->out_msg) |
1469 | return -ENOMEM; |
1470 | |
1471 | return simple_read_from_buffer(to: buf, count, ppos: pos, from: dbg->out_msg, |
1472 | available: dbg->outlen); |
1473 | } |
1474 | |
1475 | static const struct file_operations dfops = { |
1476 | .owner = THIS_MODULE, |
1477 | .open = simple_open, |
1478 | .write = data_write, |
1479 | .read = data_read, |
1480 | }; |
1481 | |
1482 | static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, |
1483 | loff_t *pos) |
1484 | { |
1485 | struct mlx5_core_dev *dev = filp->private_data; |
1486 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; |
1487 | char outlen[8]; |
1488 | int err; |
1489 | |
1490 | err = snprintf(buf: outlen, size: sizeof(outlen), fmt: "%d" , dbg->outlen); |
1491 | if (err < 0) |
1492 | return err; |
1493 | |
1494 | return simple_read_from_buffer(to: buf, count, ppos: pos, from: outlen, available: err); |
1495 | } |
1496 | |
1497 | static ssize_t outlen_write(struct file *filp, const char __user *buf, |
1498 | size_t count, loff_t *pos) |
1499 | { |
1500 | struct mlx5_core_dev *dev = filp->private_data; |
1501 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; |
1502 | char outlen_str[8] = {0}; |
1503 | int outlen; |
1504 | void *ptr; |
1505 | int err; |
1506 | |
1507 | if (*pos != 0 || count > 6) |
1508 | return -EINVAL; |
1509 | |
1510 | kfree(objp: dbg->out_msg); |
1511 | dbg->out_msg = NULL; |
1512 | dbg->outlen = 0; |
1513 | |
1514 | if (copy_from_user(to: outlen_str, from: buf, n: count)) |
1515 | return -EFAULT; |
1516 | |
1517 | err = sscanf(outlen_str, "%d" , &outlen); |
1518 | if (err != 1) |
1519 | return -EINVAL; |
1520 | |
1521 | ptr = kzalloc(size: outlen, GFP_KERNEL); |
1522 | if (!ptr) |
1523 | return -ENOMEM; |
1524 | |
1525 | dbg->out_msg = ptr; |
1526 | dbg->outlen = outlen; |
1527 | |
1528 | *pos = count; |
1529 | |
1530 | return count; |
1531 | } |
1532 | |
1533 | static const struct file_operations olfops = { |
1534 | .owner = THIS_MODULE, |
1535 | .open = simple_open, |
1536 | .write = outlen_write, |
1537 | .read = outlen_read, |
1538 | }; |
1539 | |
1540 | static void set_wqname(struct mlx5_core_dev *dev) |
1541 | { |
1542 | struct mlx5_cmd *cmd = &dev->cmd; |
1543 | |
1544 | snprintf(buf: cmd->wq_name, size: sizeof(cmd->wq_name), fmt: "mlx5_cmd_%s" , |
1545 | dev_name(dev: dev->device)); |
1546 | } |
1547 | |
1548 | static void clean_debug_files(struct mlx5_core_dev *dev) |
1549 | { |
1550 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; |
1551 | |
1552 | if (!mlx5_debugfs_root) |
1553 | return; |
1554 | |
1555 | debugfs_remove_recursive(dentry: dbg->dbg_root); |
1556 | } |
1557 | |
1558 | static void create_debugfs_files(struct mlx5_core_dev *dev) |
1559 | { |
1560 | struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; |
1561 | |
1562 | dbg->dbg_root = debugfs_create_dir(name: "cmd" , parent: mlx5_debugfs_get_dev_root(dev)); |
1563 | |
1564 | debugfs_create_file(name: "in" , mode: 0400, parent: dbg->dbg_root, data: dev, fops: &dfops); |
1565 | debugfs_create_file(name: "out" , mode: 0200, parent: dbg->dbg_root, data: dev, fops: &dfops); |
1566 | debugfs_create_file(name: "out_len" , mode: 0600, parent: dbg->dbg_root, data: dev, fops: &olfops); |
1567 | debugfs_create_u8(name: "status" , mode: 0600, parent: dbg->dbg_root, value: &dbg->status); |
1568 | debugfs_create_file(name: "run" , mode: 0200, parent: dbg->dbg_root, data: dev, fops: &fops); |
1569 | } |
1570 | |
1571 | void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) |
1572 | { |
1573 | struct mlx5_cmd *cmd = &dev->cmd; |
1574 | int i; |
1575 | |
1576 | for (i = 0; i < cmd->vars.max_reg_cmds; i++) |
1577 | down(sem: &cmd->vars.sem); |
1578 | down(sem: &cmd->vars.pages_sem); |
1579 | |
1580 | cmd->allowed_opcode = opcode; |
1581 | |
1582 | up(sem: &cmd->vars.pages_sem); |
1583 | for (i = 0; i < cmd->vars.max_reg_cmds; i++) |
1584 | up(sem: &cmd->vars.sem); |
1585 | } |
1586 | |
1587 | static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) |
1588 | { |
1589 | struct mlx5_cmd *cmd = &dev->cmd; |
1590 | int i; |
1591 | |
1592 | for (i = 0; i < cmd->vars.max_reg_cmds; i++) |
1593 | down(sem: &cmd->vars.sem); |
1594 | down(sem: &cmd->vars.pages_sem); |
1595 | |
1596 | cmd->mode = mode; |
1597 | |
1598 | up(sem: &cmd->vars.pages_sem); |
1599 | for (i = 0; i < cmd->vars.max_reg_cmds; i++) |
1600 | up(sem: &cmd->vars.sem); |
1601 | } |
1602 | |
1603 | static int cmd_comp_notifier(struct notifier_block *nb, |
1604 | unsigned long type, void *data) |
1605 | { |
1606 | struct mlx5_core_dev *dev; |
1607 | struct mlx5_cmd *cmd; |
1608 | struct mlx5_eqe *eqe; |
1609 | |
1610 | cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb); |
1611 | dev = container_of(cmd, struct mlx5_core_dev, cmd); |
1612 | eqe = data; |
1613 | |
1614 | mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), forced: false); |
1615 | |
1616 | return NOTIFY_OK; |
1617 | } |
1618 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev) |
1619 | { |
1620 | MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD); |
1621 | mlx5_eq_notifier_register(dev, nb: &dev->cmd.nb); |
1622 | mlx5_cmd_change_mod(dev, mode: CMD_MODE_EVENTS); |
1623 | } |
1624 | |
1625 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) |
1626 | { |
1627 | mlx5_cmd_change_mod(dev, mode: CMD_MODE_POLLING); |
1628 | mlx5_eq_notifier_unregister(dev, nb: &dev->cmd.nb); |
1629 | } |
1630 | |
1631 | static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) |
1632 | { |
1633 | unsigned long flags; |
1634 | |
1635 | if (msg->parent) { |
1636 | spin_lock_irqsave(&msg->parent->lock, flags); |
1637 | list_add_tail(new: &msg->list, head: &msg->parent->head); |
1638 | spin_unlock_irqrestore(lock: &msg->parent->lock, flags); |
1639 | } else { |
1640 | mlx5_free_cmd_msg(dev, msg); |
1641 | } |
1642 | } |
1643 | |
1644 | static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) |
1645 | { |
1646 | struct mlx5_cmd *cmd = &dev->cmd; |
1647 | struct mlx5_cmd_work_ent *ent; |
1648 | mlx5_cmd_cbk_t callback; |
1649 | void *context; |
1650 | int err; |
1651 | int i; |
1652 | s64 ds; |
1653 | struct mlx5_cmd_stats *stats; |
1654 | unsigned long flags; |
1655 | unsigned long vector; |
1656 | |
1657 | /* there can be at most 32 command queues */ |
1658 | vector = vec & 0xffffffff; |
1659 | for (i = 0; i < (1 << cmd->vars.log_sz); i++) { |
1660 | if (test_bit(i, &vector)) { |
1661 | ent = cmd->ent_arr[i]; |
1662 | |
1663 | /* if we already completed the command, ignore it */ |
1664 | if (!test_and_clear_bit(nr: MLX5_CMD_ENT_STATE_PENDING_COMP, |
1665 | addr: &ent->state)) { |
1666 | /* only real completion can free the cmd slot */ |
1667 | if (!forced) { |
1668 | mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n" , |
1669 | ent->idx); |
1670 | cmd_ent_put(ent); |
1671 | } |
1672 | continue; |
1673 | } |
1674 | |
1675 | if (ent->callback && cancel_delayed_work(dwork: &ent->cb_timeout_work)) |
1676 | cmd_ent_put(ent); /* timeout work was canceled */ |
1677 | |
1678 | if (!forced || /* Real FW completion */ |
1679 | mlx5_cmd_is_down(dev) || /* No real FW completion is expected */ |
1680 | !opcode_allowed(cmd, opcode: ent->op)) |
1681 | cmd_ent_put(ent); |
1682 | |
1683 | ent->ts2 = ktime_get_ns(); |
1684 | memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); |
1685 | dump_command(dev, ent, input: 0); |
1686 | |
1687 | if (vec & MLX5_TRIGGERED_CMD_COMP) |
1688 | ent->ret = -ENXIO; |
1689 | |
1690 | if (!ent->ret) { /* Command completed by FW */ |
1691 | if (!cmd->checksum_disabled) |
1692 | ent->ret = verify_signature(ent); |
1693 | |
1694 | ent->status = ent->lay->status_own >> 1; |
1695 | |
1696 | mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n" , |
1697 | ent->ret, deliv_status_to_str(ent->status), ent->status); |
1698 | } |
1699 | |
1700 | if (ent->callback) { |
1701 | ds = ent->ts2 - ent->ts1; |
1702 | stats = xa_load(&cmd->stats, index: ent->op); |
1703 | if (stats) { |
1704 | spin_lock_irqsave(&stats->lock, flags); |
1705 | stats->sum += ds; |
1706 | ++stats->n; |
1707 | spin_unlock_irqrestore(lock: &stats->lock, flags); |
1708 | } |
1709 | |
1710 | callback = ent->callback; |
1711 | context = ent->context; |
1712 | err = ent->ret ? : ent->status; |
1713 | if (err > 0) /* Failed in FW, command didn't execute */ |
1714 | err = deliv_status_to_err(status: err); |
1715 | |
1716 | if (!err) |
1717 | err = mlx5_copy_from_msg(to: ent->uout, |
1718 | from: ent->out, |
1719 | size: ent->uout_size); |
1720 | |
1721 | mlx5_free_cmd_msg(dev, msg: ent->out); |
1722 | free_msg(dev, msg: ent->in); |
1723 | |
1724 | /* final consumer is done, release ent */ |
1725 | cmd_ent_put(ent); |
1726 | callback(err, context); |
1727 | } else { |
1728 | /* release wait_func() so mlx5_cmd_invoke() |
1729 | * can make the final ent_put() |
1730 | */ |
1731 | complete(&ent->done); |
1732 | } |
1733 | } |
1734 | } |
1735 | } |
1736 | |
1737 | static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) |
1738 | { |
1739 | struct mlx5_cmd *cmd = &dev->cmd; |
1740 | unsigned long bitmask; |
1741 | unsigned long flags; |
1742 | u64 vector; |
1743 | int i; |
1744 | |
1745 | /* wait for pending handlers to complete */ |
1746 | mlx5_eq_synchronize_cmd_irq(dev); |
1747 | spin_lock_irqsave(&dev->cmd.alloc_lock, flags); |
1748 | vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1); |
1749 | if (!vector) |
1750 | goto no_trig; |
1751 | |
1752 | bitmask = vector; |
1753 | /* we must increment the allocated entries refcount before triggering the completions |
1754 | * to guarantee pending commands will not get freed in the meanwhile. |
1755 | * For that reason, it also has to be done inside the alloc_lock. |
1756 | */ |
1757 | for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz)) |
1758 | cmd_ent_get(ent: cmd->ent_arr[i]); |
1759 | vector |= MLX5_TRIGGERED_CMD_COMP; |
1760 | spin_unlock_irqrestore(lock: &dev->cmd.alloc_lock, flags); |
1761 | |
1762 | mlx5_core_dbg(dev, "vector 0x%llx\n" , vector); |
1763 | mlx5_cmd_comp_handler(dev, vec: vector, forced: true); |
1764 | for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz)) |
1765 | cmd_ent_put(ent: cmd->ent_arr[i]); |
1766 | return; |
1767 | |
1768 | no_trig: |
1769 | spin_unlock_irqrestore(lock: &dev->cmd.alloc_lock, flags); |
1770 | } |
1771 | |
1772 | void mlx5_cmd_flush(struct mlx5_core_dev *dev) |
1773 | { |
1774 | struct mlx5_cmd *cmd = &dev->cmd; |
1775 | int i; |
1776 | |
1777 | for (i = 0; i < cmd->vars.max_reg_cmds; i++) { |
1778 | while (down_trylock(sem: &cmd->vars.sem)) { |
1779 | mlx5_cmd_trigger_completions(dev); |
1780 | cond_resched(); |
1781 | } |
1782 | } |
1783 | |
1784 | while (down_trylock(sem: &cmd->vars.pages_sem)) { |
1785 | mlx5_cmd_trigger_completions(dev); |
1786 | cond_resched(); |
1787 | } |
1788 | |
1789 | /* Unlock cmdif */ |
1790 | up(sem: &cmd->vars.pages_sem); |
1791 | for (i = 0; i < cmd->vars.max_reg_cmds; i++) |
1792 | up(sem: &cmd->vars.sem); |
1793 | } |
1794 | |
1795 | static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, |
1796 | gfp_t gfp) |
1797 | { |
1798 | struct mlx5_cmd_msg *msg = ERR_PTR(error: -ENOMEM); |
1799 | struct cmd_msg_cache *ch = NULL; |
1800 | struct mlx5_cmd *cmd = &dev->cmd; |
1801 | int i; |
1802 | |
1803 | if (in_size <= 16) |
1804 | goto cache_miss; |
1805 | |
1806 | for (i = 0; i < dev->profile.num_cmd_caches; i++) { |
1807 | ch = &cmd->cache[i]; |
1808 | if (in_size > ch->max_inbox_size) |
1809 | continue; |
1810 | spin_lock_irq(lock: &ch->lock); |
1811 | if (list_empty(head: &ch->head)) { |
1812 | spin_unlock_irq(lock: &ch->lock); |
1813 | continue; |
1814 | } |
1815 | msg = list_entry(ch->head.next, typeof(*msg), list); |
1816 | /* For cached lists, we must explicitly state what is |
1817 | * the real size |
1818 | */ |
1819 | msg->len = in_size; |
1820 | list_del(entry: &msg->list); |
1821 | spin_unlock_irq(lock: &ch->lock); |
1822 | break; |
1823 | } |
1824 | |
1825 | if (!IS_ERR(ptr: msg)) |
1826 | return msg; |
1827 | |
1828 | cache_miss: |
1829 | msg = mlx5_alloc_cmd_msg(dev, flags: gfp, size: in_size, token: 0); |
1830 | return msg; |
1831 | } |
1832 | |
1833 | static int is_manage_pages(void *in) |
1834 | { |
1835 | return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES; |
1836 | } |
1837 | |
1838 | /* Notes: |
1839 | * 1. Callback functions may not sleep |
1840 | * 2. Page queue commands do not support asynchrous completion |
1841 | */ |
1842 | static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, |
1843 | int out_size, mlx5_cmd_cbk_t callback, void *context, |
1844 | bool force_polling) |
1845 | { |
1846 | struct mlx5_cmd_msg *inb, *outb; |
1847 | u16 opcode = in_to_opcode(in); |
1848 | bool throttle_op; |
1849 | int pages_queue; |
1850 | gfp_t gfp; |
1851 | u8 token; |
1852 | int err; |
1853 | |
1854 | if (mlx5_cmd_is_down(dev) || !opcode_allowed(cmd: &dev->cmd, opcode)) |
1855 | return -ENXIO; |
1856 | |
1857 | throttle_op = mlx5_cmd_is_throttle_opcode(op: opcode); |
1858 | if (throttle_op) { |
1859 | /* atomic context may not sleep */ |
1860 | if (callback) |
1861 | return -EINVAL; |
1862 | down(sem: &dev->cmd.vars.throttle_sem); |
1863 | } |
1864 | |
1865 | pages_queue = is_manage_pages(in); |
1866 | gfp = callback ? GFP_ATOMIC : GFP_KERNEL; |
1867 | |
1868 | inb = alloc_msg(dev, in_size, gfp); |
1869 | if (IS_ERR(inb)) { |
1870 | err = PTR_ERR(inb); |
1871 | goto out_up; |
1872 | } |
1873 | |
1874 | token = alloc_token(cmd: &dev->cmd); |
1875 | |
1876 | err = mlx5_copy_to_msg(inb, from: in, size: in_size, token); |
1877 | if (err) { |
1878 | mlx5_core_warn(dev, "err %d\n" , err); |
1879 | goto out_in; |
1880 | } |
1881 | |
1882 | outb = mlx5_alloc_cmd_msg(dev, flags: gfp, size: out_size, token); |
1883 | if (IS_ERR(outb)) { |
1884 | err = PTR_ERR(outb); |
1885 | goto out_in; |
1886 | } |
1887 | |
1888 | err = mlx5_cmd_invoke(dev, inb, outb, uout: out, uout_size: out_size, callback, context, |
1889 | page_queue: pages_queue, token, force_polling); |
1890 | if (callback) |
1891 | return err; |
1892 | |
1893 | if (err > 0) /* Failed in FW, command didn't execute */ |
1894 | err = deliv_status_to_err(status: err); |
1895 | |
1896 | if (err) |
1897 | goto out_out; |
1898 | |
1899 | /* command completed by FW */ |
1900 | err = mlx5_copy_from_msg(to: out, outb, size: out_size); |
1901 | out_out: |
1902 | mlx5_free_cmd_msg(dev, outb); |
1903 | out_in: |
1904 | free_msg(dev, inb); |
1905 | out_up: |
1906 | if (throttle_op) |
1907 | up(sem: &dev->cmd.vars.throttle_sem); |
1908 | return err; |
1909 | } |
1910 | |
1911 | static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) |
1912 | { |
1913 | u32 syndrome = MLX5_GET(mbox_out, out, syndrome); |
1914 | u8 status = MLX5_GET(mbox_out, out, status); |
1915 | |
1916 | trace_mlx5_cmd(command_str: mlx5_command_str(command: opcode), opcode, op_mod, |
1917 | status_str: cmd_status_str(status), status, syndrome, |
1918 | err: cmd_status_to_err(status)); |
1919 | } |
1920 | |
1921 | static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, |
1922 | u32 syndrome, int err) |
1923 | { |
1924 | const char *namep = mlx5_command_str(command: opcode); |
1925 | struct mlx5_cmd_stats *stats; |
1926 | unsigned long flags; |
1927 | |
1928 | if (!err || !(strcmp(namep, "unknown command opcode" ))) |
1929 | return; |
1930 | |
1931 | stats = xa_load(&dev->cmd.stats, index: opcode); |
1932 | if (!stats) |
1933 | return; |
1934 | spin_lock_irqsave(&stats->lock, flags); |
1935 | stats->failed++; |
1936 | if (err < 0) |
1937 | stats->last_failed_errno = -err; |
1938 | if (err == -EREMOTEIO) { |
1939 | stats->failed_mbox_status++; |
1940 | stats->last_failed_mbox_status = status; |
1941 | stats->last_failed_syndrome = syndrome; |
1942 | } |
1943 | spin_unlock_irqrestore(lock: &stats->lock, flags); |
1944 | } |
1945 | |
1946 | /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ |
1947 | static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out) |
1948 | { |
1949 | u32 syndrome = MLX5_GET(mbox_out, out, syndrome); |
1950 | u8 status = MLX5_GET(mbox_out, out, status); |
1951 | |
1952 | if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */ |
1953 | err = -EIO; |
1954 | |
1955 | if (!err && status != MLX5_CMD_STAT_OK) { |
1956 | err = -EREMOTEIO; |
1957 | mlx5_cmd_err_trace(dev, opcode, op_mod, out); |
1958 | } |
1959 | |
1960 | cmd_status_log(dev, opcode, status, syndrome, err); |
1961 | return err; |
1962 | } |
1963 | |
1964 | /** |
1965 | * mlx5_cmd_do - Executes a fw command, wait for completion. |
1966 | * Unlike mlx5_cmd_exec, this function will not translate or intercept |
1967 | * outbox.status and will return -EREMOTEIO when |
1968 | * outbox.status != MLX5_CMD_STAT_OK |
1969 | * |
1970 | * @dev: mlx5 core device |
1971 | * @in: inbox mlx5_ifc command buffer |
1972 | * @in_size: inbox buffer size |
1973 | * @out: outbox mlx5_ifc buffer |
1974 | * @out_size: outbox size |
1975 | * |
1976 | * @return: |
1977 | * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK. |
1978 | * Caller must check FW outbox status. |
1979 | * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK. |
1980 | * < 0 : Command execution couldn't be performed by firmware or driver |
1981 | */ |
1982 | int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) |
1983 | { |
1984 | int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, force_polling: false); |
1985 | u16 op_mod = MLX5_GET(mbox_in, in, op_mod); |
1986 | u16 opcode = in_to_opcode(in); |
1987 | |
1988 | return cmd_status_err(dev, err, opcode, op_mod, out); |
1989 | } |
1990 | EXPORT_SYMBOL(mlx5_cmd_do); |
1991 | |
1992 | /** |
1993 | * mlx5_cmd_exec - Executes a fw command, wait for completion |
1994 | * |
1995 | * @dev: mlx5 core device |
1996 | * @in: inbox mlx5_ifc command buffer |
1997 | * @in_size: inbox buffer size |
1998 | * @out: outbox mlx5_ifc buffer |
1999 | * @out_size: outbox size |
2000 | * |
2001 | * @return: 0 if no error, FW command execution was successful |
2002 | * and outbox status is ok. |
2003 | */ |
2004 | int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, |
2005 | int out_size) |
2006 | { |
2007 | int err = mlx5_cmd_do(dev, in, in_size, out, out_size); |
2008 | |
2009 | return mlx5_cmd_check(dev, err, in, out); |
2010 | } |
2011 | EXPORT_SYMBOL(mlx5_cmd_exec); |
2012 | |
2013 | /** |
2014 | * mlx5_cmd_exec_polling - Executes a fw command, poll for completion |
2015 | * Needed for driver force teardown, when command completion EQ |
2016 | * will not be available to complete the command |
2017 | * |
2018 | * @dev: mlx5 core device |
2019 | * @in: inbox mlx5_ifc command buffer |
2020 | * @in_size: inbox buffer size |
2021 | * @out: outbox mlx5_ifc buffer |
2022 | * @out_size: outbox size |
2023 | * |
2024 | * @return: 0 if no error, FW command execution was successful |
2025 | * and outbox status is ok. |
2026 | */ |
2027 | int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, |
2028 | void *out, int out_size) |
2029 | { |
2030 | int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, force_polling: true); |
2031 | u16 op_mod = MLX5_GET(mbox_in, in, op_mod); |
2032 | u16 opcode = in_to_opcode(in); |
2033 | |
2034 | err = cmd_status_err(dev, err, opcode, op_mod, out); |
2035 | return mlx5_cmd_check(dev, err, in, out); |
2036 | } |
2037 | EXPORT_SYMBOL(mlx5_cmd_exec_polling); |
2038 | |
2039 | void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, |
2040 | struct mlx5_async_ctx *ctx) |
2041 | { |
2042 | ctx->dev = dev; |
2043 | /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ |
2044 | atomic_set(v: &ctx->num_inflight, i: 1); |
2045 | init_completion(x: &ctx->inflight_done); |
2046 | } |
2047 | EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); |
2048 | |
2049 | /** |
2050 | * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx |
2051 | * @ctx: The ctx to clean |
2052 | * |
2053 | * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The |
2054 | * caller must ensure that mlx5_cmd_exec_cb() is not called during or after |
2055 | * the call mlx5_cleanup_async_ctx(). |
2056 | */ |
2057 | void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) |
2058 | { |
2059 | if (!atomic_dec_and_test(v: &ctx->num_inflight)) |
2060 | wait_for_completion(&ctx->inflight_done); |
2061 | } |
2062 | EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); |
2063 | |
2064 | static void mlx5_cmd_exec_cb_handler(int status, void *_work) |
2065 | { |
2066 | struct mlx5_async_work *work = _work; |
2067 | struct mlx5_async_ctx *ctx; |
2068 | |
2069 | ctx = work->ctx; |
2070 | status = cmd_status_err(dev: ctx->dev, err: status, opcode: work->opcode, op_mod: work->op_mod, out: work->out); |
2071 | work->user_callback(status, work); |
2072 | if (atomic_dec_and_test(v: &ctx->num_inflight)) |
2073 | complete(&ctx->inflight_done); |
2074 | } |
2075 | |
2076 | int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, |
2077 | void *out, int out_size, mlx5_async_cbk_t callback, |
2078 | struct mlx5_async_work *work) |
2079 | { |
2080 | int ret; |
2081 | |
2082 | work->ctx = ctx; |
2083 | work->user_callback = callback; |
2084 | work->opcode = in_to_opcode(in); |
2085 | work->op_mod = MLX5_GET(mbox_in, in, op_mod); |
2086 | work->out = out; |
2087 | if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) |
2088 | return -EIO; |
2089 | ret = cmd_exec(dev: ctx->dev, in, in_size, out, out_size, |
2090 | callback: mlx5_cmd_exec_cb_handler, context: work, force_polling: false); |
2091 | if (ret && atomic_dec_and_test(v: &ctx->num_inflight)) |
2092 | complete(&ctx->inflight_done); |
2093 | |
2094 | return ret; |
2095 | } |
2096 | EXPORT_SYMBOL(mlx5_cmd_exec_cb); |
2097 | |
2098 | int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev, |
2099 | struct mlx5_cmd_allow_other_vhca_access_attr *attr) |
2100 | { |
2101 | u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {}; |
2102 | u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {}; |
2103 | void *key; |
2104 | |
2105 | MLX5_SET(allow_other_vhca_access_in, |
2106 | in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS); |
2107 | MLX5_SET(allow_other_vhca_access_in, |
2108 | in, object_type_to_be_accessed, attr->obj_type); |
2109 | MLX5_SET(allow_other_vhca_access_in, |
2110 | in, object_id_to_be_accessed, attr->obj_id); |
2111 | |
2112 | key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key); |
2113 | memcpy(key, attr->access_key, sizeof(attr->access_key)); |
2114 | |
2115 | return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
2116 | } |
2117 | |
2118 | int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev, |
2119 | struct mlx5_cmd_alias_obj_create_attr *alias_attr, |
2120 | u32 *obj_id) |
2121 | { |
2122 | u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; |
2123 | u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {}; |
2124 | void *param; |
2125 | void *attr; |
2126 | void *key; |
2127 | int ret; |
2128 | |
2129 | attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr); |
2130 | MLX5_SET(general_obj_in_cmd_hdr, |
2131 | attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); |
2132 | MLX5_SET(general_obj_in_cmd_hdr, |
2133 | attr, obj_type, alias_attr->obj_type); |
2134 | param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param); |
2135 | MLX5_SET(general_obj_create_param, param, alias_object, 1); |
2136 | |
2137 | attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx); |
2138 | MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id); |
2139 | MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id); |
2140 | |
2141 | key = MLX5_ADDR_OF(alias_context, attr, access_key); |
2142 | memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key)); |
2143 | |
2144 | ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
2145 | if (ret) |
2146 | return ret; |
2147 | |
2148 | *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); |
2149 | |
2150 | return 0; |
2151 | } |
2152 | |
2153 | int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, |
2154 | u16 obj_type) |
2155 | { |
2156 | u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {}; |
2157 | u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; |
2158 | |
2159 | MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); |
2160 | MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type); |
2161 | MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id); |
2162 | |
2163 | return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
2164 | } |
2165 | |
2166 | static void destroy_msg_cache(struct mlx5_core_dev *dev) |
2167 | { |
2168 | struct cmd_msg_cache *ch; |
2169 | struct mlx5_cmd_msg *msg; |
2170 | struct mlx5_cmd_msg *n; |
2171 | int i; |
2172 | |
2173 | for (i = 0; i < dev->profile.num_cmd_caches; i++) { |
2174 | ch = &dev->cmd.cache[i]; |
2175 | list_for_each_entry_safe(msg, n, &ch->head, list) { |
2176 | list_del(entry: &msg->list); |
2177 | mlx5_free_cmd_msg(dev, msg); |
2178 | } |
2179 | } |
2180 | } |
2181 | |
2182 | static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = { |
2183 | 512, 32, 16, 8, 2 |
2184 | }; |
2185 | |
2186 | static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = { |
2187 | 16 + MLX5_CMD_DATA_BLOCK_SIZE, |
2188 | 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2, |
2189 | 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16, |
2190 | 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256, |
2191 | 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512, |
2192 | }; |
2193 | |
2194 | static void create_msg_cache(struct mlx5_core_dev *dev) |
2195 | { |
2196 | struct mlx5_cmd *cmd = &dev->cmd; |
2197 | struct cmd_msg_cache *ch; |
2198 | struct mlx5_cmd_msg *msg; |
2199 | int i; |
2200 | int k; |
2201 | |
2202 | /* Initialize and fill the caches with initial entries */ |
2203 | for (k = 0; k < dev->profile.num_cmd_caches; k++) { |
2204 | ch = &cmd->cache[k]; |
2205 | spin_lock_init(&ch->lock); |
2206 | INIT_LIST_HEAD(list: &ch->head); |
2207 | ch->num_ent = cmd_cache_num_ent[k]; |
2208 | ch->max_inbox_size = cmd_cache_ent_size[k]; |
2209 | for (i = 0; i < ch->num_ent; i++) { |
2210 | msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN, |
2211 | size: ch->max_inbox_size, token: 0); |
2212 | if (IS_ERR(ptr: msg)) |
2213 | break; |
2214 | msg->parent = ch; |
2215 | list_add_tail(new: &msg->list, head: &ch->head); |
2216 | } |
2217 | } |
2218 | } |
2219 | |
2220 | static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) |
2221 | { |
2222 | cmd->cmd_alloc_buf = dma_alloc_coherent(dev: mlx5_core_dma_dev(dev), size: MLX5_ADAPTER_PAGE_SIZE, |
2223 | dma_handle: &cmd->alloc_dma, GFP_KERNEL); |
2224 | if (!cmd->cmd_alloc_buf) |
2225 | return -ENOMEM; |
2226 | |
2227 | /* make sure it is aligned to 4K */ |
2228 | if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { |
2229 | cmd->cmd_buf = cmd->cmd_alloc_buf; |
2230 | cmd->dma = cmd->alloc_dma; |
2231 | cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; |
2232 | return 0; |
2233 | } |
2234 | |
2235 | dma_free_coherent(dev: mlx5_core_dma_dev(dev), size: MLX5_ADAPTER_PAGE_SIZE, cpu_addr: cmd->cmd_alloc_buf, |
2236 | dma_handle: cmd->alloc_dma); |
2237 | cmd->cmd_alloc_buf = dma_alloc_coherent(dev: mlx5_core_dma_dev(dev), |
2238 | size: 2 * MLX5_ADAPTER_PAGE_SIZE - 1, |
2239 | dma_handle: &cmd->alloc_dma, GFP_KERNEL); |
2240 | if (!cmd->cmd_alloc_buf) |
2241 | return -ENOMEM; |
2242 | |
2243 | cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); |
2244 | cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); |
2245 | cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; |
2246 | return 0; |
2247 | } |
2248 | |
2249 | static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) |
2250 | { |
2251 | dma_free_coherent(dev: mlx5_core_dma_dev(dev), size: cmd->alloc_size, cpu_addr: cmd->cmd_alloc_buf, |
2252 | dma_handle: cmd->alloc_dma); |
2253 | } |
2254 | |
2255 | static u16 cmdif_rev(struct mlx5_core_dev *dev) |
2256 | { |
2257 | return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; |
2258 | } |
2259 | |
2260 | int mlx5_cmd_init(struct mlx5_core_dev *dev) |
2261 | { |
2262 | struct mlx5_cmd *cmd = &dev->cmd; |
2263 | |
2264 | cmd->checksum_disabled = 1; |
2265 | |
2266 | spin_lock_init(&cmd->alloc_lock); |
2267 | spin_lock_init(&cmd->token_lock); |
2268 | |
2269 | set_wqname(dev); |
2270 | cmd->wq = create_singlethread_workqueue(cmd->wq_name); |
2271 | if (!cmd->wq) { |
2272 | mlx5_core_err(dev, "failed to create command workqueue\n" ); |
2273 | return -ENOMEM; |
2274 | } |
2275 | |
2276 | mlx5_cmdif_debugfs_init(dev); |
2277 | |
2278 | return 0; |
2279 | } |
2280 | |
2281 | void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) |
2282 | { |
2283 | struct mlx5_cmd *cmd = &dev->cmd; |
2284 | |
2285 | mlx5_cmdif_debugfs_cleanup(dev); |
2286 | destroy_workqueue(wq: cmd->wq); |
2287 | } |
2288 | |
2289 | int mlx5_cmd_enable(struct mlx5_core_dev *dev) |
2290 | { |
2291 | int size = sizeof(struct mlx5_cmd_prot_block); |
2292 | int align = roundup_pow_of_two(size); |
2293 | struct mlx5_cmd *cmd = &dev->cmd; |
2294 | u32 cmd_h, cmd_l; |
2295 | int err; |
2296 | |
2297 | memset(&cmd->vars, 0, sizeof(cmd->vars)); |
2298 | cmd->vars.cmdif_rev = cmdif_rev(dev); |
2299 | if (cmd->vars.cmdif_rev != CMD_IF_REV) { |
2300 | mlx5_core_err(dev, |
2301 | "Driver cmdif rev(%d) differs from firmware's(%d)\n" , |
2302 | CMD_IF_REV, cmd->vars.cmdif_rev); |
2303 | return -EINVAL; |
2304 | } |
2305 | |
2306 | cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; |
2307 | cmd->vars.log_sz = cmd_l >> 4 & 0xf; |
2308 | cmd->vars.log_stride = cmd_l & 0xf; |
2309 | if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) { |
2310 | mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n" , |
2311 | 1 << cmd->vars.log_sz); |
2312 | return -EINVAL; |
2313 | } |
2314 | |
2315 | if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) { |
2316 | mlx5_core_err(dev, "command queue size overflow\n" ); |
2317 | return -EINVAL; |
2318 | } |
2319 | |
2320 | cmd->state = MLX5_CMDIF_STATE_DOWN; |
2321 | cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1; |
2322 | cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1; |
2323 | |
2324 | sema_init(sem: &cmd->vars.sem, val: cmd->vars.max_reg_cmds); |
2325 | sema_init(sem: &cmd->vars.pages_sem, val: 1); |
2326 | sema_init(sem: &cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2)); |
2327 | |
2328 | cmd->pool = dma_pool_create(name: "mlx5_cmd" , dev: mlx5_core_dma_dev(dev), size, align, allocation: 0); |
2329 | if (!cmd->pool) |
2330 | return -ENOMEM; |
2331 | |
2332 | err = alloc_cmd_page(dev, cmd); |
2333 | if (err) |
2334 | goto err_free_pool; |
2335 | |
2336 | cmd_h = (u32)((u64)(cmd->dma) >> 32); |
2337 | cmd_l = (u32)(cmd->dma); |
2338 | if (cmd_l & 0xfff) { |
2339 | mlx5_core_err(dev, "invalid command queue address\n" ); |
2340 | err = -ENOMEM; |
2341 | goto err_cmd_page; |
2342 | } |
2343 | |
2344 | iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); |
2345 | iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); |
2346 | |
2347 | /* Make sure firmware sees the complete address before we proceed */ |
2348 | wmb(); |
2349 | |
2350 | mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n" , (unsigned long long)(cmd->dma)); |
2351 | |
2352 | cmd->mode = CMD_MODE_POLLING; |
2353 | cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; |
2354 | |
2355 | create_msg_cache(dev); |
2356 | create_debugfs_files(dev); |
2357 | |
2358 | return 0; |
2359 | |
2360 | err_cmd_page: |
2361 | free_cmd_page(dev, cmd); |
2362 | err_free_pool: |
2363 | dma_pool_destroy(pool: cmd->pool); |
2364 | return err; |
2365 | } |
2366 | |
2367 | void mlx5_cmd_disable(struct mlx5_core_dev *dev) |
2368 | { |
2369 | struct mlx5_cmd *cmd = &dev->cmd; |
2370 | |
2371 | flush_workqueue(cmd->wq); |
2372 | clean_debug_files(dev); |
2373 | destroy_msg_cache(dev); |
2374 | free_cmd_page(dev, cmd); |
2375 | dma_pool_destroy(pool: cmd->pool); |
2376 | } |
2377 | |
2378 | void mlx5_cmd_set_state(struct mlx5_core_dev *dev, |
2379 | enum mlx5_cmdif_state cmdif_state) |
2380 | { |
2381 | dev->cmd.state = cmdif_state; |
2382 | } |
2383 | |