1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ |
3 | #include <linux/security.h> |
4 | #include <linux/debugfs.h> |
5 | #include <linux/ktime.h> |
6 | #include <linux/mutex.h> |
7 | #include <asm/unaligned.h> |
8 | #include <cxlpci.h> |
9 | #include <cxlmem.h> |
10 | #include <cxl.h> |
11 | |
12 | #include "core.h" |
13 | #include "trace.h" |
14 | |
15 | static bool cxl_raw_allow_all; |
16 | |
17 | /** |
18 | * DOC: cxl mbox |
19 | * |
20 | * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The |
21 | * implementation is used by the cxl_pci driver to initialize the device |
22 | * and implement the cxl_mem.h IOCTL UAPI. It also implements the |
23 | * backend of the cxl_pmem_ctl() transport for LIBNVDIMM. |
24 | */ |
25 | |
26 | #define cxl_for_each_cmd(cmd) \ |
27 | for ((cmd) = &cxl_mem_commands[0]; \ |
28 | ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++) |
29 | |
30 | #define CXL_CMD(_id, sin, sout, _flags) \ |
31 | [CXL_MEM_COMMAND_ID_##_id] = { \ |
32 | .info = { \ |
33 | .id = CXL_MEM_COMMAND_ID_##_id, \ |
34 | .size_in = sin, \ |
35 | .size_out = sout, \ |
36 | }, \ |
37 | .opcode = CXL_MBOX_OP_##_id, \ |
38 | .flags = _flags, \ |
39 | } |
40 | |
41 | #define CXL_VARIABLE_PAYLOAD ~0U |
42 | /* |
43 | * This table defines the supported mailbox commands for the driver. This table |
44 | * is made up of a UAPI structure. Non-negative values as parameters in the |
45 | * table will be validated against the user's input. For example, if size_in is |
46 | * 0, and the user passed in 1, it is an error. |
47 | */ |
48 | static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { |
49 | CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), |
50 | #ifdef CONFIG_CXL_MEM_RAW_COMMANDS |
51 | CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0), |
52 | #endif |
53 | CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), |
54 | CXL_CMD(GET_FW_INFO, 0, 0x50, 0), |
55 | CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), |
56 | CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), |
57 | CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), |
58 | CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), |
59 | CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), |
60 | CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), |
61 | CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), |
62 | CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), |
63 | CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), |
64 | CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), |
65 | CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), |
66 | CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0), |
67 | }; |
68 | |
69 | /* |
70 | * Commands that RAW doesn't permit. The rationale for each: |
71 | * |
72 | * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / |
73 | * coordination of transaction timeout values at the root bridge level. |
74 | * |
75 | * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live |
76 | * and needs to be coordinated with HDM updates. |
77 | * |
78 | * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the |
79 | * driver and any writes from userspace invalidates those contents. |
80 | * |
81 | * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes |
82 | * to the device after it is marked clean, userspace can not make that |
83 | * assertion. |
84 | * |
85 | * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that |
86 | * is kept up to date with patrol notifications and error management. |
87 | * |
88 | * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel |
89 | * driver orchestration for safety. |
90 | */ |
91 | static u16 cxl_disabled_raw_commands[] = { |
92 | CXL_MBOX_OP_ACTIVATE_FW, |
93 | CXL_MBOX_OP_SET_PARTITION_INFO, |
94 | CXL_MBOX_OP_SET_LSA, |
95 | CXL_MBOX_OP_SET_SHUTDOWN_STATE, |
96 | CXL_MBOX_OP_SCAN_MEDIA, |
97 | CXL_MBOX_OP_GET_SCAN_MEDIA, |
98 | CXL_MBOX_OP_GET_POISON, |
99 | CXL_MBOX_OP_INJECT_POISON, |
100 | CXL_MBOX_OP_CLEAR_POISON, |
101 | }; |
102 | |
103 | /* |
104 | * Command sets that RAW doesn't permit. All opcodes in this set are |
105 | * disabled because they pass plain text security payloads over the |
106 | * user/kernel boundary. This functionality is intended to be wrapped |
107 | * behind the keys ABI which allows for encrypted payloads in the UAPI |
108 | */ |
109 | static u8 security_command_sets[] = { |
110 | 0x44, /* Sanitize */ |
111 | 0x45, /* Persistent Memory Data-at-rest Security */ |
112 | 0x46, /* Security Passthrough */ |
113 | }; |
114 | |
115 | static bool cxl_is_security_command(u16 opcode) |
116 | { |
117 | int i; |
118 | |
119 | for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) |
120 | if (security_command_sets[i] == (opcode >> 8)) |
121 | return true; |
122 | return false; |
123 | } |
124 | |
125 | static void cxl_set_security_cmd_enabled(struct cxl_security_state *security, |
126 | u16 opcode) |
127 | { |
128 | switch (opcode) { |
129 | case CXL_MBOX_OP_SANITIZE: |
130 | set_bit(nr: CXL_SEC_ENABLED_SANITIZE, addr: security->enabled_cmds); |
131 | break; |
132 | case CXL_MBOX_OP_SECURE_ERASE: |
133 | set_bit(nr: CXL_SEC_ENABLED_SECURE_ERASE, |
134 | addr: security->enabled_cmds); |
135 | break; |
136 | case CXL_MBOX_OP_GET_SECURITY_STATE: |
137 | set_bit(nr: CXL_SEC_ENABLED_GET_SECURITY_STATE, |
138 | addr: security->enabled_cmds); |
139 | break; |
140 | case CXL_MBOX_OP_SET_PASSPHRASE: |
141 | set_bit(nr: CXL_SEC_ENABLED_SET_PASSPHRASE, |
142 | addr: security->enabled_cmds); |
143 | break; |
144 | case CXL_MBOX_OP_DISABLE_PASSPHRASE: |
145 | set_bit(nr: CXL_SEC_ENABLED_DISABLE_PASSPHRASE, |
146 | addr: security->enabled_cmds); |
147 | break; |
148 | case CXL_MBOX_OP_UNLOCK: |
149 | set_bit(nr: CXL_SEC_ENABLED_UNLOCK, addr: security->enabled_cmds); |
150 | break; |
151 | case CXL_MBOX_OP_FREEZE_SECURITY: |
152 | set_bit(nr: CXL_SEC_ENABLED_FREEZE_SECURITY, |
153 | addr: security->enabled_cmds); |
154 | break; |
155 | case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: |
156 | set_bit(nr: CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE, |
157 | addr: security->enabled_cmds); |
158 | break; |
159 | default: |
160 | break; |
161 | } |
162 | } |
163 | |
164 | static bool cxl_is_poison_command(u16 opcode) |
165 | { |
166 | #define CXL_MBOX_OP_POISON_CMDS 0x43 |
167 | |
168 | if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS) |
169 | return true; |
170 | |
171 | return false; |
172 | } |
173 | |
174 | static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison, |
175 | u16 opcode) |
176 | { |
177 | switch (opcode) { |
178 | case CXL_MBOX_OP_GET_POISON: |
179 | set_bit(nr: CXL_POISON_ENABLED_LIST, addr: poison->enabled_cmds); |
180 | break; |
181 | case CXL_MBOX_OP_INJECT_POISON: |
182 | set_bit(nr: CXL_POISON_ENABLED_INJECT, addr: poison->enabled_cmds); |
183 | break; |
184 | case CXL_MBOX_OP_CLEAR_POISON: |
185 | set_bit(nr: CXL_POISON_ENABLED_CLEAR, addr: poison->enabled_cmds); |
186 | break; |
187 | case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS: |
188 | set_bit(nr: CXL_POISON_ENABLED_SCAN_CAPS, addr: poison->enabled_cmds); |
189 | break; |
190 | case CXL_MBOX_OP_SCAN_MEDIA: |
191 | set_bit(nr: CXL_POISON_ENABLED_SCAN_MEDIA, addr: poison->enabled_cmds); |
192 | break; |
193 | case CXL_MBOX_OP_GET_SCAN_MEDIA: |
194 | set_bit(nr: CXL_POISON_ENABLED_SCAN_RESULTS, addr: poison->enabled_cmds); |
195 | break; |
196 | default: |
197 | break; |
198 | } |
199 | } |
200 | |
201 | static struct cxl_mem_command *cxl_mem_find_command(u16 opcode) |
202 | { |
203 | struct cxl_mem_command *c; |
204 | |
205 | cxl_for_each_cmd(c) |
206 | if (c->opcode == opcode) |
207 | return c; |
208 | |
209 | return NULL; |
210 | } |
211 | |
212 | static const char *cxl_mem_opcode_to_name(u16 opcode) |
213 | { |
214 | struct cxl_mem_command *c; |
215 | |
216 | c = cxl_mem_find_command(opcode); |
217 | if (!c) |
218 | return NULL; |
219 | |
220 | return cxl_command_names[c->info.id].name; |
221 | } |
222 | |
223 | /** |
224 | * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command |
225 | * @mds: The driver data for the operation |
226 | * @mbox_cmd: initialized command to execute |
227 | * |
228 | * Context: Any context. |
229 | * Return: |
230 | * * %>=0 - Number of bytes returned in @out. |
231 | * * %-E2BIG - Payload is too large for hardware. |
232 | * * %-EBUSY - Couldn't acquire exclusive mailbox access. |
233 | * * %-EFAULT - Hardware error occurred. |
234 | * * %-ENXIO - Command completed, but device reported an error. |
235 | * * %-EIO - Unexpected output size. |
236 | * |
237 | * Mailbox commands may execute successfully yet the device itself reported an |
238 | * error. While this distinction can be useful for commands from userspace, the |
239 | * kernel will only be able to use results when both are successful. |
240 | */ |
241 | int cxl_internal_send_cmd(struct cxl_memdev_state *mds, |
242 | struct cxl_mbox_cmd *mbox_cmd) |
243 | { |
244 | size_t out_size, min_out; |
245 | int rc; |
246 | |
247 | if (mbox_cmd->size_in > mds->payload_size || |
248 | mbox_cmd->size_out > mds->payload_size) |
249 | return -E2BIG; |
250 | |
251 | out_size = mbox_cmd->size_out; |
252 | min_out = mbox_cmd->min_out; |
253 | rc = mds->mbox_send(mds, mbox_cmd); |
254 | /* |
255 | * EIO is reserved for a payload size mismatch and mbox_send() |
256 | * may not return this error. |
257 | */ |
258 | if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO")) |
259 | return -ENXIO; |
260 | if (rc) |
261 | return rc; |
262 | |
263 | if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS && |
264 | mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND) |
265 | return cxl_mbox_cmd_rc2errno(mbox_cmd); |
266 | |
267 | if (!out_size) |
268 | return 0; |
269 | |
270 | /* |
271 | * Variable sized output needs to at least satisfy the caller's |
272 | * minimum if not the fully requested size. |
273 | */ |
274 | if (min_out == 0) |
275 | min_out = out_size; |
276 | |
277 | if (mbox_cmd->size_out < min_out) |
278 | return -EIO; |
279 | return 0; |
280 | } |
281 | EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, CXL); |
282 | |
283 | static bool cxl_mem_raw_command_allowed(u16 opcode) |
284 | { |
285 | int i; |
286 | |
287 | if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) |
288 | return false; |
289 | |
290 | if (security_locked_down(what: LOCKDOWN_PCI_ACCESS)) |
291 | return false; |
292 | |
293 | if (cxl_raw_allow_all) |
294 | return true; |
295 | |
296 | if (cxl_is_security_command(opcode)) |
297 | return false; |
298 | |
299 | for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) |
300 | if (cxl_disabled_raw_commands[i] == opcode) |
301 | return false; |
302 | |
303 | return true; |
304 | } |
305 | |
306 | /** |
307 | * cxl_payload_from_user_allowed() - Check contents of in_payload. |
308 | * @opcode: The mailbox command opcode. |
309 | * @payload_in: Pointer to the input payload passed in from user space. |
310 | * |
311 | * Return: |
312 | * * true - payload_in passes check for @opcode. |
313 | * * false - payload_in contains invalid or unsupported values. |
314 | * |
315 | * The driver may inspect payload contents before sending a mailbox |
316 | * command from user space to the device. The intent is to reject |
317 | * commands with input payloads that are known to be unsafe. This |
318 | * check is not intended to replace the users careful selection of |
319 | * mailbox command parameters and makes no guarantee that the user |
320 | * command will succeed, nor that it is appropriate. |
321 | * |
322 | * The specific checks are determined by the opcode. |
323 | */ |
324 | static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) |
325 | { |
326 | switch (opcode) { |
327 | case CXL_MBOX_OP_SET_PARTITION_INFO: { |
328 | struct cxl_mbox_set_partition_info *pi = payload_in; |
329 | |
330 | if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG) |
331 | return false; |
332 | break; |
333 | } |
334 | default: |
335 | break; |
336 | } |
337 | return true; |
338 | } |
339 | |
340 | static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, |
341 | struct cxl_memdev_state *mds, u16 opcode, |
342 | size_t in_size, size_t out_size, u64 in_payload) |
343 | { |
344 | *mbox = (struct cxl_mbox_cmd) { |
345 | .opcode = opcode, |
346 | .size_in = in_size, |
347 | }; |
348 | |
349 | if (in_size) { |
350 | mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload), |
351 | in_size); |
352 | if (IS_ERR(ptr: mbox->payload_in)) |
353 | return PTR_ERR(ptr: mbox->payload_in); |
354 | |
355 | if (!cxl_payload_from_user_allowed(opcode, payload_in: mbox->payload_in)) { |
356 | dev_dbg(mds->cxlds.dev, "%s: input payload not allowed\n", |
357 | cxl_mem_opcode_to_name(opcode)); |
358 | kvfree(addr: mbox->payload_in); |
359 | return -EBUSY; |
360 | } |
361 | } |
362 | |
363 | /* Prepare to handle a full payload for variable sized output */ |
364 | if (out_size == CXL_VARIABLE_PAYLOAD) |
365 | mbox->size_out = mds->payload_size; |
366 | else |
367 | mbox->size_out = out_size; |
368 | |
369 | if (mbox->size_out) { |
370 | mbox->payload_out = kvzalloc(size: mbox->size_out, GFP_KERNEL); |
371 | if (!mbox->payload_out) { |
372 | kvfree(addr: mbox->payload_in); |
373 | return -ENOMEM; |
374 | } |
375 | } |
376 | return 0; |
377 | } |
378 | |
379 | static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox) |
380 | { |
381 | kvfree(addr: mbox->payload_in); |
382 | kvfree(addr: mbox->payload_out); |
383 | } |
384 | |
385 | static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, |
386 | const struct cxl_send_command *send_cmd, |
387 | struct cxl_memdev_state *mds) |
388 | { |
389 | if (send_cmd->raw.rsvd) |
390 | return -EINVAL; |
391 | |
392 | /* |
393 | * Unlike supported commands, the output size of RAW commands |
394 | * gets passed along without further checking, so it must be |
395 | * validated here. |
396 | */ |
397 | if (send_cmd->out.size > mds->payload_size) |
398 | return -EINVAL; |
399 | |
400 | if (!cxl_mem_raw_command_allowed(opcode: send_cmd->raw.opcode)) |
401 | return -EPERM; |
402 | |
403 | dev_WARN_ONCE(mds->cxlds.dev, true, "raw command path used\n"); |
404 | |
405 | *mem_cmd = (struct cxl_mem_command) { |
406 | .info = { |
407 | .id = CXL_MEM_COMMAND_ID_RAW, |
408 | .size_in = send_cmd->in.size, |
409 | .size_out = send_cmd->out.size, |
410 | }, |
411 | .opcode = send_cmd->raw.opcode |
412 | }; |
413 | |
414 | return 0; |
415 | } |
416 | |
417 | static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, |
418 | const struct cxl_send_command *send_cmd, |
419 | struct cxl_memdev_state *mds) |
420 | { |
421 | struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id]; |
422 | const struct cxl_command_info *info = &c->info; |
423 | |
424 | if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) |
425 | return -EINVAL; |
426 | |
427 | if (send_cmd->rsvd) |
428 | return -EINVAL; |
429 | |
430 | if (send_cmd->in.rsvd || send_cmd->out.rsvd) |
431 | return -EINVAL; |
432 | |
433 | /* Check that the command is enabled for hardware */ |
434 | if (!test_bit(info->id, mds->enabled_cmds)) |
435 | return -ENOTTY; |
436 | |
437 | /* Check that the command is not claimed for exclusive kernel use */ |
438 | if (test_bit(info->id, mds->exclusive_cmds)) |
439 | return -EBUSY; |
440 | |
441 | /* Check the input buffer is the expected size */ |
442 | if ((info->size_in != CXL_VARIABLE_PAYLOAD) && |
443 | (info->size_in != send_cmd->in.size)) |
444 | return -ENOMEM; |
445 | |
446 | /* Check the output buffer is at least large enough */ |
447 | if ((info->size_out != CXL_VARIABLE_PAYLOAD) && |
448 | (send_cmd->out.size < info->size_out)) |
449 | return -ENOMEM; |
450 | |
451 | *mem_cmd = (struct cxl_mem_command) { |
452 | .info = { |
453 | .id = info->id, |
454 | .flags = info->flags, |
455 | .size_in = send_cmd->in.size, |
456 | .size_out = send_cmd->out.size, |
457 | }, |
458 | .opcode = c->opcode |
459 | }; |
460 | |
461 | return 0; |
462 | } |
463 | |
464 | /** |
465 | * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. |
466 | * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd. |
467 | * @mds: The driver data for the operation |
468 | * @send_cmd: &struct cxl_send_command copied in from userspace. |
469 | * |
470 | * Return: |
471 | * * %0 - @out_cmd is ready to send. |
472 | * * %-ENOTTY - Invalid command specified. |
473 | * * %-EINVAL - Reserved fields or invalid values were used. |
474 | * * %-ENOMEM - Input or output buffer wasn't sized properly. |
475 | * * %-EPERM - Attempted to use a protected command. |
476 | * * %-EBUSY - Kernel has claimed exclusive access to this opcode |
477 | * |
478 | * The result of this command is a fully validated command in @mbox_cmd that is |
479 | * safe to send to the hardware. |
480 | */ |
481 | static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, |
482 | struct cxl_memdev_state *mds, |
483 | const struct cxl_send_command *send_cmd) |
484 | { |
485 | struct cxl_mem_command mem_cmd; |
486 | int rc; |
487 | |
488 | if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) |
489 | return -ENOTTY; |
490 | |
491 | /* |
492 | * The user can never specify an input payload larger than what hardware |
493 | * supports, but output can be arbitrarily large (simply write out as |
494 | * much data as the hardware provides). |
495 | */ |
496 | if (send_cmd->in.size > mds->payload_size) |
497 | return -EINVAL; |
498 | |
499 | /* Sanitize and construct a cxl_mem_command */ |
500 | if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) |
501 | rc = cxl_to_mem_cmd_raw(mem_cmd: &mem_cmd, send_cmd, mds); |
502 | else |
503 | rc = cxl_to_mem_cmd(mem_cmd: &mem_cmd, send_cmd, mds); |
504 | |
505 | if (rc) |
506 | return rc; |
507 | |
508 | /* Sanitize and construct a cxl_mbox_cmd */ |
509 | return cxl_mbox_cmd_ctor(mbox: mbox_cmd, mds, opcode: mem_cmd.opcode, |
510 | in_size: mem_cmd.info.size_in, out_size: mem_cmd.info.size_out, |
511 | in_payload: send_cmd->in.payload); |
512 | } |
513 | |
514 | int cxl_query_cmd(struct cxl_memdev *cxlmd, |
515 | struct cxl_mem_query_commands __user *q) |
516 | { |
517 | struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds: cxlmd->cxlds); |
518 | struct device *dev = &cxlmd->dev; |
519 | struct cxl_mem_command *cmd; |
520 | u32 n_commands; |
521 | int j = 0; |
522 | |
523 | dev_dbg(dev, "Query IOCTL\n"); |
524 | |
525 | if (get_user(n_commands, &q->n_commands)) |
526 | return -EFAULT; |
527 | |
528 | /* returns the total number if 0 elements are requested. */ |
529 | if (n_commands == 0) |
530 | return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands); |
531 | |
532 | /* |
533 | * otherwise, return max(n_commands, total commands) cxl_command_info |
534 | * structures. |
535 | */ |
536 | cxl_for_each_cmd(cmd) { |
537 | struct cxl_command_info info = cmd->info; |
538 | |
539 | if (test_bit(info.id, mds->enabled_cmds)) |
540 | info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED; |
541 | if (test_bit(info.id, mds->exclusive_cmds)) |
542 | info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE; |
543 | |
544 | if (copy_to_user(to: &q->commands[j++], from: &info, n: sizeof(info))) |
545 | return -EFAULT; |
546 | |
547 | if (j == n_commands) |
548 | break; |
549 | } |
550 | |
551 | return 0; |
552 | } |
553 | |
554 | /** |
555 | * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. |
556 | * @mds: The driver data for the operation |
557 | * @mbox_cmd: The validated mailbox command. |
558 | * @out_payload: Pointer to userspace's output payload. |
559 | * @size_out: (Input) Max payload size to copy out. |
560 | * (Output) Payload size hardware generated. |
561 | * @retval: Hardware generated return code from the operation. |
562 | * |
563 | * Return: |
564 | * * %0 - Mailbox transaction succeeded. This implies the mailbox |
565 | * protocol completed successfully not that the operation itself |
566 | * was successful. |
567 | * * %-ENOMEM - Couldn't allocate a bounce buffer. |
568 | * * %-EFAULT - Something happened with copy_to/from_user. |
569 | * * %-EINTR - Mailbox acquisition interrupted. |
570 | * * %-EXXX - Transaction level failures. |
571 | * |
572 | * Dispatches a mailbox command on behalf of a userspace request. |
573 | * The output payload is copied to userspace. |
574 | * |
575 | * See cxl_send_cmd(). |
576 | */ |
577 | static int handle_mailbox_cmd_from_user(struct cxl_memdev_state *mds, |
578 | struct cxl_mbox_cmd *mbox_cmd, |
579 | u64 out_payload, s32 *size_out, |
580 | u32 *retval) |
581 | { |
582 | struct device *dev = mds->cxlds.dev; |
583 | int rc; |
584 | |
585 | dev_dbg(dev, |
586 | "Submitting %s command for user\n" |
587 | "\topcode: %x\n" |
588 | "\tsize: %zx\n", |
589 | cxl_mem_opcode_to_name(mbox_cmd->opcode), |
590 | mbox_cmd->opcode, mbox_cmd->size_in); |
591 | |
592 | rc = mds->mbox_send(mds, mbox_cmd); |
593 | if (rc) |
594 | goto out; |
595 | |
596 | /* |
597 | * @size_out contains the max size that's allowed to be written back out |
598 | * to userspace. While the payload may have written more output than |
599 | * this it will have to be ignored. |
600 | */ |
601 | if (mbox_cmd->size_out) { |
602 | dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out, |
603 | "Invalid return size\n"); |
604 | if (copy_to_user(u64_to_user_ptr(out_payload), |
605 | from: mbox_cmd->payload_out, n: mbox_cmd->size_out)) { |
606 | rc = -EFAULT; |
607 | goto out; |
608 | } |
609 | } |
610 | |
611 | *size_out = mbox_cmd->size_out; |
612 | *retval = mbox_cmd->return_code; |
613 | |
614 | out: |
615 | cxl_mbox_cmd_dtor(mbox: mbox_cmd); |
616 | return rc; |
617 | } |
618 | |
619 | int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) |
620 | { |
621 | struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds: cxlmd->cxlds); |
622 | struct device *dev = &cxlmd->dev; |
623 | struct cxl_send_command send; |
624 | struct cxl_mbox_cmd mbox_cmd; |
625 | int rc; |
626 | |
627 | dev_dbg(dev, "Send IOCTL\n"); |
628 | |
629 | if (copy_from_user(to: &send, from: s, n: sizeof(send))) |
630 | return -EFAULT; |
631 | |
632 | rc = cxl_validate_cmd_from_user(mbox_cmd: &mbox_cmd, mds, send_cmd: &send); |
633 | if (rc) |
634 | return rc; |
635 | |
636 | rc = handle_mailbox_cmd_from_user(mds, mbox_cmd: &mbox_cmd, out_payload: send.out.payload, |
637 | size_out: &send.out.size, retval: &send.retval); |
638 | if (rc) |
639 | return rc; |
640 | |
641 | if (copy_to_user(to: s, from: &send, n: sizeof(send))) |
642 | return -EFAULT; |
643 | |
644 | return 0; |
645 | } |
646 | |
647 | static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, |
648 | u32 *size, u8 *out) |
649 | { |
650 | u32 remaining = *size; |
651 | u32 offset = 0; |
652 | |
653 | while (remaining) { |
654 | u32 xfer_size = min_t(u32, remaining, mds->payload_size); |
655 | struct cxl_mbox_cmd mbox_cmd; |
656 | struct cxl_mbox_get_log log; |
657 | int rc; |
658 | |
659 | log = (struct cxl_mbox_get_log) { |
660 | .uuid = *uuid, |
661 | .offset = cpu_to_le32(offset), |
662 | .length = cpu_to_le32(xfer_size), |
663 | }; |
664 | |
665 | mbox_cmd = (struct cxl_mbox_cmd) { |
666 | .opcode = CXL_MBOX_OP_GET_LOG, |
667 | .size_in = sizeof(log), |
668 | .payload_in = &log, |
669 | .size_out = xfer_size, |
670 | .payload_out = out, |
671 | }; |
672 | |
673 | rc = cxl_internal_send_cmd(mds, &mbox_cmd); |
674 | |
675 | /* |
676 | * The output payload length that indicates the number |
677 | * of valid bytes can be smaller than the Log buffer |
678 | * size. |
679 | */ |
680 | if (rc == -EIO && mbox_cmd.size_out < xfer_size) { |
681 | offset += mbox_cmd.size_out; |
682 | break; |
683 | } |
684 | |
685 | if (rc < 0) |
686 | return rc; |
687 | |
688 | out += xfer_size; |
689 | remaining -= xfer_size; |
690 | offset += xfer_size; |
691 | } |
692 | |
693 | *size = offset; |
694 | |
695 | return 0; |
696 | } |
697 | |
698 | /** |
699 | * cxl_walk_cel() - Walk through the Command Effects Log. |
700 | * @mds: The driver data for the operation |
701 | * @size: Length of the Command Effects Log. |
702 | * @cel: CEL |
703 | * |
704 | * Iterate over each entry in the CEL and determine if the driver supports the |
705 | * command. If so, the command is enabled for the device and can be used later. |
706 | */ |
707 | static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) |
708 | { |
709 | struct cxl_cel_entry *cel_entry; |
710 | const int cel_entries = size / sizeof(*cel_entry); |
711 | struct device *dev = mds->cxlds.dev; |
712 | int i; |
713 | |
714 | cel_entry = (struct cxl_cel_entry *) cel; |
715 | |
716 | for (i = 0; i < cel_entries; i++) { |
717 | u16 opcode = le16_to_cpu(cel_entry[i].opcode); |
718 | struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); |
719 | int enabled = 0; |
720 | |
721 | if (cmd) { |
722 | set_bit(nr: cmd->info.id, addr: mds->enabled_cmds); |
723 | enabled++; |
724 | } |
725 | |
726 | if (cxl_is_poison_command(opcode)) { |
727 | cxl_set_poison_cmd_enabled(poison: &mds->poison, opcode); |
728 | enabled++; |
729 | } |
730 | |
731 | if (cxl_is_security_command(opcode)) { |
732 | cxl_set_security_cmd_enabled(security: &mds->security, opcode); |
733 | enabled++; |
734 | } |
735 | |
736 | dev_dbg(dev, "Opcode 0x%04x %s\n", opcode, |
737 | enabled ? "enabled": "unsupported by driver"); |
738 | } |
739 | } |
740 | |
741 | static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds) |
742 | { |
743 | struct cxl_mbox_get_supported_logs *ret; |
744 | struct cxl_mbox_cmd mbox_cmd; |
745 | int rc; |
746 | |
747 | ret = kvmalloc(size: mds->payload_size, GFP_KERNEL); |
748 | if (!ret) |
749 | return ERR_PTR(error: -ENOMEM); |
750 | |
751 | mbox_cmd = (struct cxl_mbox_cmd) { |
752 | .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS, |
753 | .size_out = mds->payload_size, |
754 | .payload_out = ret, |
755 | /* At least the record number field must be valid */ |
756 | .min_out = 2, |
757 | }; |
758 | rc = cxl_internal_send_cmd(mds, &mbox_cmd); |
759 | if (rc < 0) { |
760 | kvfree(addr: ret); |
761 | return ERR_PTR(error: rc); |
762 | } |
763 | |
764 | |
765 | return ret; |
766 | } |
767 | |
768 | enum { |
769 | CEL_UUID, |
770 | VENDOR_DEBUG_UUID, |
771 | }; |
772 | |
773 | /* See CXL 2.0 Table 170. Get Log Input Payload */ |
774 | static const uuid_t log_uuid[] = { |
775 | [CEL_UUID] = DEFINE_CXL_CEL_UUID, |
776 | [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID, |
777 | }; |
778 | |
779 | /** |
780 | * cxl_enumerate_cmds() - Enumerate commands for a device. |
781 | * @mds: The driver data for the operation |
782 | * |
783 | * Returns 0 if enumerate completed successfully. |
784 | * |
785 | * CXL devices have optional support for certain commands. This function will |
786 | * determine the set of supported commands for the hardware and update the |
787 | * enabled_cmds bitmap in the @mds. |
788 | */ |
789 | int cxl_enumerate_cmds(struct cxl_memdev_state *mds) |
790 | { |
791 | struct cxl_mbox_get_supported_logs *gsl; |
792 | struct device *dev = mds->cxlds.dev; |
793 | struct cxl_mem_command *cmd; |
794 | int i, rc; |
795 | |
796 | gsl = cxl_get_gsl(mds); |
797 | if (IS_ERR(ptr: gsl)) |
798 | return PTR_ERR(ptr: gsl); |
799 | |
800 | rc = -ENOENT; |
801 | for (i = 0; i < le16_to_cpu(gsl->entries); i++) { |
802 | u32 size = le32_to_cpu(gsl->entry[i].size); |
803 | uuid_t uuid = gsl->entry[i].uuid; |
804 | u8 *log; |
805 | |
806 | dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); |
807 | |
808 | if (!uuid_equal(u1: &uuid, u2: &log_uuid[CEL_UUID])) |
809 | continue; |
810 | |
811 | log = kvmalloc(size, GFP_KERNEL); |
812 | if (!log) { |
813 | rc = -ENOMEM; |
814 | goto out; |
815 | } |
816 | |
817 | rc = cxl_xfer_log(mds, uuid: &uuid, size: &size, out: log); |
818 | if (rc) { |
819 | kvfree(addr: log); |
820 | goto out; |
821 | } |
822 | |
823 | cxl_walk_cel(mds, size, cel: log); |
824 | kvfree(addr: log); |
825 | |
826 | /* In case CEL was bogus, enable some default commands. */ |
827 | cxl_for_each_cmd(cmd) |
828 | if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) |
829 | set_bit(nr: cmd->info.id, addr: mds->enabled_cmds); |
830 | |
831 | /* Found the required CEL */ |
832 | rc = 0; |
833 | } |
834 | out: |
835 | kvfree(addr: gsl); |
836 | return rc; |
837 | } |
838 | EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL); |
839 | |
840 | void cxl_event_trace_record(const struct cxl_memdev *cxlmd, |
841 | enum cxl_event_log_type type, |
842 | enum cxl_event_type event_type, |
843 | const uuid_t *uuid, union cxl_event *evt) |
844 | { |
845 | if (event_type == CXL_CPER_EVENT_GEN_MEDIA) |
846 | trace_cxl_general_media(cxlmd, log: type, rec: &evt->gen_media); |
847 | else if (event_type == CXL_CPER_EVENT_DRAM) |
848 | trace_cxl_dram(cxlmd, log: type, rec: &evt->dram); |
849 | else if (event_type == CXL_CPER_EVENT_MEM_MODULE) |
850 | trace_cxl_memory_module(cxlmd, log: type, rec: &evt->mem_module); |
851 | else |
852 | trace_cxl_generic_event(cxlmd, log: type, uuid, gen_rec: &evt->generic); |
853 | } |
854 | EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL); |
855 | |
856 | static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd, |
857 | enum cxl_event_log_type type, |
858 | struct cxl_event_record_raw *record) |
859 | { |
860 | enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC; |
861 | const uuid_t *uuid = &record->id; |
862 | |
863 | if (uuid_equal(u1: uuid, u2: &CXL_EVENT_GEN_MEDIA_UUID)) |
864 | ev_type = CXL_CPER_EVENT_GEN_MEDIA; |
865 | else if (uuid_equal(u1: uuid, u2: &CXL_EVENT_DRAM_UUID)) |
866 | ev_type = CXL_CPER_EVENT_DRAM; |
867 | else if (uuid_equal(u1: uuid, u2: &CXL_EVENT_MEM_MODULE_UUID)) |
868 | ev_type = CXL_CPER_EVENT_MEM_MODULE; |
869 | |
870 | cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event); |
871 | } |
872 | |
873 | static int cxl_clear_event_record(struct cxl_memdev_state *mds, |
874 | enum cxl_event_log_type log, |
875 | struct cxl_get_event_payload *get_pl) |
876 | { |
877 | struct cxl_mbox_clear_event_payload *payload; |
878 | u16 total = le16_to_cpu(get_pl->record_count); |
879 | u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES; |
880 | size_t pl_size = struct_size(payload, handles, max_handles); |
881 | struct cxl_mbox_cmd mbox_cmd; |
882 | u16 cnt; |
883 | int rc = 0; |
884 | int i; |
885 | |
886 | /* Payload size may limit the max handles */ |
887 | if (pl_size > mds->payload_size) { |
888 | max_handles = (mds->payload_size - sizeof(*payload)) / |
889 | sizeof(__le16); |
890 | pl_size = struct_size(payload, handles, max_handles); |
891 | } |
892 | |
893 | payload = kvzalloc(size: pl_size, GFP_KERNEL); |
894 | if (!payload) |
895 | return -ENOMEM; |
896 | |
897 | *payload = (struct cxl_mbox_clear_event_payload) { |
898 | .event_log = log, |
899 | }; |
900 | |
901 | mbox_cmd = (struct cxl_mbox_cmd) { |
902 | .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD, |
903 | .payload_in = payload, |
904 | .size_in = pl_size, |
905 | }; |
906 | |
907 | /* |
908 | * Clear Event Records uses u8 for the handle cnt while Get Event |
909 | * Record can return up to 0xffff records. |
910 | */ |
911 | i = 0; |
912 | for (cnt = 0; cnt < total; cnt++) { |
913 | struct cxl_event_record_raw *raw = &get_pl->records[cnt]; |
914 | struct cxl_event_generic *gen = &raw->event.generic; |
915 | |
916 | payload->handles[i++] = gen->hdr.handle; |
917 | dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log, |
918 | le16_to_cpu(payload->handles[i - 1])); |
919 | |
920 | if (i == max_handles) { |
921 | payload->nr_recs = i; |
922 | rc = cxl_internal_send_cmd(mds, &mbox_cmd); |
923 | if (rc) |
924 | goto free_pl; |
925 | i = 0; |
926 | } |
927 | } |
928 | |
929 | /* Clear what is left if any */ |
930 | if (i) { |
931 | payload->nr_recs = i; |
932 | mbox_cmd.size_in = struct_size(payload, handles, i); |
933 | rc = cxl_internal_send_cmd(mds, &mbox_cmd); |
934 | if (rc) |
935 | goto free_pl; |
936 | } |
937 | |
938 | free_pl: |
939 | kvfree(addr: payload); |
940 | return rc; |
941 | } |
942 | |
943 | static void cxl_mem_get_records_log(struct cxl_memdev_state *mds, |
944 | enum cxl_event_log_type type) |
945 | { |
946 | struct cxl_memdev *cxlmd = mds->cxlds.cxlmd; |
947 | struct device *dev = mds->cxlds.dev; |
948 | struct cxl_get_event_payload *payload; |
949 | struct cxl_mbox_cmd mbox_cmd; |
950 | u8 log_type = type; |
951 | u16 nr_rec; |
952 | |
953 | mutex_lock(&mds->event.log_lock); |
954 | payload = mds->event.buf; |
955 | |
956 | mbox_cmd = (struct cxl_mbox_cmd) { |
957 | .opcode = CXL_MBOX_OP_GET_EVENT_RECORD, |
958 | .payload_in = &log_type, |
959 | .size_in = sizeof(log_type), |
960 | .payload_out = payload, |
961 | .min_out = struct_size(payload, records, 0), |
962 | }; |
963 | |
964 | do { |
965 | int rc, i; |
966 | |
967 | mbox_cmd.size_out = mds->payload_size; |
968 | |
969 | rc = cxl_internal_send_cmd(mds, &mbox_cmd); |
970 | if (rc) { |
971 | dev_err_ratelimited(dev, |
972 | "Event log '%d': Failed to query event records : %d", |
973 | type, rc); |
974 | break; |
975 | } |
976 | |
977 | nr_rec = le16_to_cpu(payload->record_count); |
978 | if (!nr_rec) |
979 | break; |
980 | |
981 | for (i = 0; i < nr_rec; i++) |
982 | __cxl_event_trace_record(cxlmd, type, |
983 | record: &payload->records[i]); |
984 | |
985 | if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW) |
986 | trace_cxl_overflow(cxlmd, log: type, payload); |
987 | |
988 | rc = cxl_clear_event_record(mds, log: type, get_pl: payload); |
989 | if (rc) { |
990 | dev_err_ratelimited(dev, |
991 | "Event log '%d': Failed to clear events : %d", |
992 | type, rc); |
993 | break; |
994 | } |
995 | } while (nr_rec); |
996 | |
997 | mutex_unlock(lock: &mds->event.log_lock); |
998 | } |
999 | |
1000 | /** |
1001 | * cxl_mem_get_event_records - Get Event Records from the device |
1002 | * @mds: The driver data for the operation |
1003 | * @status: Event Status register value identifying which events are available. |
1004 | * |
1005 | * Retrieve all event records available on the device, report them as trace |
1006 | * events, and clear them. |
1007 | * |
1008 | * See CXL rev 3.0 @8.2.9.2.2 Get Event Records |
1009 | * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records |
1010 | */ |
1011 | void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status) |
1012 | { |
1013 | dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status); |
1014 | |
1015 | if (status & CXLDEV_EVENT_STATUS_FATAL) |
1016 | cxl_mem_get_records_log(mds, type: CXL_EVENT_TYPE_FATAL); |
1017 | if (status & CXLDEV_EVENT_STATUS_FAIL) |
1018 | cxl_mem_get_records_log(mds, type: CXL_EVENT_TYPE_FAIL); |
1019 | if (status & CXLDEV_EVENT_STATUS_WARN) |
1020 | cxl_mem_get_records_log(mds, type: CXL_EVENT_TYPE_WARN); |
1021 | if (status & CXLDEV_EVENT_STATUS_INFO) |
1022 | cxl_mem_get_records_log(mds, type: CXL_EVENT_TYPE_INFO); |
1023 | } |
1024 | EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, CXL); |
1025 | |
1026 | /** |
1027 | * cxl_mem_get_partition_info - Get partition info |
1028 | * @mds: The driver data for the operation |
1029 | * |
1030 | * Retrieve the current partition info for the device specified. The active |
1031 | * values are the current capacity in bytes. If not 0, the 'next' values are |
1032 | * the pending values, in bytes, which take affect on next cold reset. |
1033 | * |
1034 | * Return: 0 if no error: or the result of the mailbox command. |
1035 | * |
1036 | * See CXL @8.2.9.5.2.1 Get Partition Info |
1037 | */ |
1038 | static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) |
1039 | { |
1040 | struct cxl_mbox_get_partition_info pi; |
1041 | struct cxl_mbox_cmd mbox_cmd; |
1042 | int rc; |
1043 | |
1044 | mbox_cmd = (struct cxl_mbox_cmd) { |
1045 | .opcode = CXL_MBOX_OP_GET_PARTITION_INFO, |
1046 | .size_out = sizeof(pi), |
1047 | .payload_out = &pi, |
1048 | }; |
1049 | rc = cxl_internal_send_cmd(mds, &mbox_cmd); |
1050 | if (rc) |
1051 | return rc; |
1052 | |
1053 | mds->active_volatile_bytes = |
1054 | le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; |
1055 | mds->active_persistent_bytes = |
1056 | le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; |
1057 | mds->next_volatile_bytes = |
1058 | le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; |
1059 | mds->next_persistent_bytes = |
1060 | le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; |
1061 | |
1062 | return 0; |
1063 | } |
1064 | |
1065 | /** |
1066 | * cxl_dev_state_identify() - Send the IDENTIFY command to the device. |
1067 | * @mds: The driver data for the operation |
1068 | * |
1069 | * Return: 0 if identify was executed successfully or media not ready. |
1070 | * |
1071 | * This will dispatch the identify command to the device and on success populate |
1072 | * structures to be exported to sysfs. |
1073 | */ |
1074 | int cxl_dev_state_identify(struct cxl_memdev_state *mds) |
1075 | { |
1076 | /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ |
1077 | struct cxl_mbox_identify id; |
1078 | struct cxl_mbox_cmd mbox_cmd; |
1079 | u32 val; |
1080 | int rc; |
1081 | |
1082 | if (!mds->cxlds.media_ready) |
1083 | return 0; |
1084 | |
1085 | mbox_cmd = (struct cxl_mbox_cmd) { |
1086 | .opcode = CXL_MBOX_OP_IDENTIFY, |
1087 | .size_out = sizeof(id), |
1088 | .payload_out = &id, |
1089 | }; |
1090 | rc = cxl_internal_send_cmd(mds, &mbox_cmd); |
1091 | if (rc < 0) |
1092 | return rc; |
1093 | |
1094 | mds->total_bytes = |
1095 | le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER; |
1096 | mds->volatile_only_bytes = |
1097 | le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER; |
1098 | mds->persistent_only_bytes = |
1099 | le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER; |
1100 | mds->partition_align_bytes = |
1101 | le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER; |
1102 | |
1103 | mds->lsa_size = le32_to_cpu(id.lsa_size); |
1104 | memcpy(mds->firmware_version, id.fw_revision, |
1105 | sizeof(id.fw_revision)); |
1106 | |
1107 | if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) { |
1108 | val = get_unaligned_le24(p: id.poison_list_max_mer); |
1109 | mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX); |
1110 | } |
1111 | |
1112 | return 0; |
1113 | } |
1114 | EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); |
1115 | |
1116 | static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) |
1117 | { |
1118 | int rc; |
1119 | u32 sec_out = 0; |
1120 | struct cxl_get_security_output { |
1121 | __le32 flags; |
1122 | } out; |
1123 | struct cxl_mbox_cmd sec_cmd = { |
1124 | .opcode = CXL_MBOX_OP_GET_SECURITY_STATE, |
1125 | .payload_out = &out, |
1126 | .size_out = sizeof(out), |
1127 | }; |
1128 | struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd }; |
1129 | struct cxl_dev_state *cxlds = &mds->cxlds; |
1130 | |
1131 | if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE) |
1132 | return -EINVAL; |
1133 | |
1134 | rc = cxl_internal_send_cmd(mds, &sec_cmd); |
1135 | if (rc < 0) { |
1136 | dev_err(cxlds->dev, "Failed to get security state : %d", rc); |
1137 | return rc; |
1138 | } |
1139 | |
1140 | /* |
1141 | * Prior to using these commands, any security applied to |
1142 | * the user data areas of the device shall be DISABLED (or |
1143 | * UNLOCKED for secure erase case). |
1144 | */ |
1145 | sec_out = le32_to_cpu(out.flags); |
1146 | if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) |
1147 | return -EINVAL; |
1148 | |
1149 | if (cmd == CXL_MBOX_OP_SECURE_ERASE && |
1150 | sec_out & CXL_PMEM_SEC_STATE_LOCKED) |
1151 | return -EINVAL; |
1152 | |
1153 | rc = cxl_internal_send_cmd(mds, &mbox_cmd); |
1154 | if (rc < 0) { |
1155 | dev_err(cxlds->dev, "Failed to sanitize device : %d", rc); |
1156 | return rc; |
1157 | } |
1158 | |
1159 | return 0; |
1160 | } |
1161 | |
1162 | |
1163 | /** |
1164 | * cxl_mem_sanitize() - Send a sanitization command to the device. |
1165 | * @cxlmd: The device for the operation |
1166 | * @cmd: The specific sanitization command opcode |
1167 | * |
1168 | * Return: 0 if the command was executed successfully, regardless of |
1169 | * whether or not the actual security operation is done in the background, |
1170 | * such as for the Sanitize case. |
1171 | * Error return values can be the result of the mailbox command, -EINVAL |
1172 | * when security requirements are not met or invalid contexts, or -EBUSY |
1173 | * if the sanitize operation is already in flight. |
1174 | * |
1175 | * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase. |
1176 | */ |
1177 | int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) |
1178 | { |
1179 | struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds: cxlmd->cxlds); |
1180 | struct cxl_port *endpoint; |
1181 | int rc; |
1182 | |
1183 | /* synchronize with cxl_mem_probe() and decoder write operations */ |
1184 | device_lock(dev: &cxlmd->dev); |
1185 | endpoint = cxlmd->endpoint; |
1186 | down_read(sem: &cxl_region_rwsem); |
1187 | /* |
1188 | * Require an endpoint to be safe otherwise the driver can not |
1189 | * be sure that the device is unmapped. |
1190 | */ |
1191 | if (endpoint && cxl_num_decoders_committed(port: endpoint) == 0) |
1192 | rc = __cxl_mem_sanitize(mds, cmd); |
1193 | else |
1194 | rc = -EBUSY; |
1195 | up_read(sem: &cxl_region_rwsem); |
1196 | device_unlock(dev: &cxlmd->dev); |
1197 | |
1198 | return rc; |
1199 | } |
1200 | |
1201 | static int add_dpa_res(struct device *dev, struct resource *parent, |
1202 | struct resource *res, resource_size_t start, |
1203 | resource_size_t size, const char *type) |
1204 | { |
1205 | int rc; |
1206 | |
1207 | res->name = type; |
1208 | res->start = start; |
1209 | res->end = start + size - 1; |
1210 | res->flags = IORESOURCE_MEM; |
1211 | if (resource_size(res) == 0) { |
1212 | dev_dbg(dev, "DPA(%s): no capacity\n", res->name); |
1213 | return 0; |
1214 | } |
1215 | rc = request_resource(root: parent, new: res); |
1216 | if (rc) { |
1217 | dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name, |
1218 | res, rc); |
1219 | return rc; |
1220 | } |
1221 | |
1222 | dev_dbg(dev, "DPA(%s): %pr\n", res->name, res); |
1223 | |
1224 | return 0; |
1225 | } |
1226 | |
1227 | int cxl_mem_create_range_info(struct cxl_memdev_state *mds) |
1228 | { |
1229 | struct cxl_dev_state *cxlds = &mds->cxlds; |
1230 | struct device *dev = cxlds->dev; |
1231 | int rc; |
1232 | |
1233 | if (!cxlds->media_ready) { |
1234 | cxlds->dpa_res = DEFINE_RES_MEM(0, 0); |
1235 | cxlds->ram_res = DEFINE_RES_MEM(0, 0); |
1236 | cxlds->pmem_res = DEFINE_RES_MEM(0, 0); |
1237 | return 0; |
1238 | } |
1239 | |
1240 | cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes); |
1241 | |
1242 | if (mds->partition_align_bytes == 0) { |
1243 | rc = add_dpa_res(dev, parent: &cxlds->dpa_res, res: &cxlds->ram_res, start: 0, |
1244 | size: mds->volatile_only_bytes, type: "ram"); |
1245 | if (rc) |
1246 | return rc; |
1247 | return add_dpa_res(dev, parent: &cxlds->dpa_res, res: &cxlds->pmem_res, |
1248 | start: mds->volatile_only_bytes, |
1249 | size: mds->persistent_only_bytes, type: "pmem"); |
1250 | } |
1251 | |
1252 | rc = cxl_mem_get_partition_info(mds); |
1253 | if (rc) { |
1254 | dev_err(dev, "Failed to query partition information\n"); |
1255 | return rc; |
1256 | } |
1257 | |
1258 | rc = add_dpa_res(dev, parent: &cxlds->dpa_res, res: &cxlds->ram_res, start: 0, |
1259 | size: mds->active_volatile_bytes, type: "ram"); |
1260 | if (rc) |
1261 | return rc; |
1262 | return add_dpa_res(dev, parent: &cxlds->dpa_res, res: &cxlds->pmem_res, |
1263 | start: mds->active_volatile_bytes, |
1264 | size: mds->active_persistent_bytes, type: "pmem"); |
1265 | } |
1266 | EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL); |
1267 | |
1268 | int cxl_set_timestamp(struct cxl_memdev_state *mds) |
1269 | { |
1270 | struct cxl_mbox_cmd mbox_cmd; |
1271 | struct cxl_mbox_set_timestamp_in pi; |
1272 | int rc; |
1273 | |
1274 | pi.timestamp = cpu_to_le64(ktime_get_real_ns()); |
1275 | mbox_cmd = (struct cxl_mbox_cmd) { |
1276 | .opcode = CXL_MBOX_OP_SET_TIMESTAMP, |
1277 | .size_in = sizeof(pi), |
1278 | .payload_in = &pi, |
1279 | }; |
1280 | |
1281 | rc = cxl_internal_send_cmd(mds, &mbox_cmd); |
1282 | /* |
1283 | * Command is optional. Devices may have another way of providing |
1284 | * a timestamp, or may return all 0s in timestamp fields. |
1285 | * Don't report an error if this command isn't supported |
1286 | */ |
1287 | if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED)) |
1288 | return rc; |
1289 | |
1290 | return 0; |
1291 | } |
1292 | EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL); |
1293 | |
1294 | int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, |
1295 | struct cxl_region *cxlr) |
1296 | { |
1297 | struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds: cxlmd->cxlds); |
1298 | struct cxl_mbox_poison_out *po; |
1299 | struct cxl_mbox_poison_in pi; |
1300 | struct cxl_mbox_cmd mbox_cmd; |
1301 | int nr_records = 0; |
1302 | int rc; |
1303 | |
1304 | rc = mutex_lock_interruptible(&mds->poison.lock); |
1305 | if (rc) |
1306 | return rc; |
1307 | |
1308 | po = mds->poison.list_out; |
1309 | pi.offset = cpu_to_le64(offset); |
1310 | pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT); |
1311 | |
1312 | mbox_cmd = (struct cxl_mbox_cmd) { |
1313 | .opcode = CXL_MBOX_OP_GET_POISON, |
1314 | .size_in = sizeof(pi), |
1315 | .payload_in = &pi, |
1316 | .size_out = mds->payload_size, |
1317 | .payload_out = po, |
1318 | .min_out = struct_size(po, record, 0), |
1319 | }; |
1320 | |
1321 | do { |
1322 | rc = cxl_internal_send_cmd(mds, &mbox_cmd); |
1323 | if (rc) |
1324 | break; |
1325 | |
1326 | for (int i = 0; i < le16_to_cpu(po->count); i++) |
1327 | trace_cxl_poison(cxlmd, cxlr, record: &po->record[i], |
1328 | flags: po->flags, overflow_ts: po->overflow_ts, |
1329 | trace_type: CXL_POISON_TRACE_LIST); |
1330 | |
1331 | /* Protect against an uncleared _FLAG_MORE */ |
1332 | nr_records = nr_records + le16_to_cpu(po->count); |
1333 | if (nr_records >= mds->poison.max_errors) { |
1334 | dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n", |
1335 | nr_records); |
1336 | break; |
1337 | } |
1338 | } while (po->flags & CXL_POISON_FLAG_MORE); |
1339 | |
1340 | mutex_unlock(lock: &mds->poison.lock); |
1341 | return rc; |
1342 | } |
1343 | EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL); |
1344 | |
1345 | static void free_poison_buf(void *buf) |
1346 | { |
1347 | kvfree(addr: buf); |
1348 | } |
1349 | |
1350 | /* Get Poison List output buffer is protected by mds->poison.lock */ |
1351 | static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds) |
1352 | { |
1353 | mds->poison.list_out = kvmalloc(size: mds->payload_size, GFP_KERNEL); |
1354 | if (!mds->poison.list_out) |
1355 | return -ENOMEM; |
1356 | |
1357 | return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf, |
1358 | mds->poison.list_out); |
1359 | } |
1360 | |
1361 | int cxl_poison_state_init(struct cxl_memdev_state *mds) |
1362 | { |
1363 | int rc; |
1364 | |
1365 | if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) |
1366 | return 0; |
1367 | |
1368 | rc = cxl_poison_alloc_buf(mds); |
1369 | if (rc) { |
1370 | clear_bit(nr: CXL_POISON_ENABLED_LIST, addr: mds->poison.enabled_cmds); |
1371 | return rc; |
1372 | } |
1373 | |
1374 | mutex_init(&mds->poison.lock); |
1375 | return 0; |
1376 | } |
1377 | EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL); |
1378 | |
1379 | struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) |
1380 | { |
1381 | struct cxl_memdev_state *mds; |
1382 | |
1383 | mds = devm_kzalloc(dev, size: sizeof(*mds), GFP_KERNEL); |
1384 | if (!mds) { |
1385 | dev_err(dev, "No memory available\n"); |
1386 | return ERR_PTR(error: -ENOMEM); |
1387 | } |
1388 | |
1389 | mutex_init(&mds->mbox_mutex); |
1390 | mutex_init(&mds->event.log_lock); |
1391 | mds->cxlds.dev = dev; |
1392 | mds->cxlds.reg_map.host = dev; |
1393 | mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE; |
1394 | mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; |
1395 | mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID; |
1396 | mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID; |
1397 | |
1398 | return mds; |
1399 | } |
1400 | EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, CXL); |
1401 | |
1402 | void __init cxl_mbox_init(void) |
1403 | { |
1404 | struct dentry *mbox_debugfs; |
1405 | |
1406 | mbox_debugfs = cxl_debugfs_create_dir(dir: "mbox"); |
1407 | debugfs_create_bool(name: "raw_allow_all", mode: 0600, parent: mbox_debugfs, |
1408 | value: &cxl_raw_allow_all); |
1409 | } |
1410 |
Definitions
- cxl_raw_allow_all
- cxl_mem_commands
- cxl_disabled_raw_commands
- security_command_sets
- cxl_is_security_command
- cxl_set_security_cmd_enabled
- cxl_is_poison_command
- cxl_set_poison_cmd_enabled
- cxl_mem_find_command
- cxl_mem_opcode_to_name
- cxl_internal_send_cmd
- cxl_mem_raw_command_allowed
- cxl_payload_from_user_allowed
- cxl_mbox_cmd_ctor
- cxl_mbox_cmd_dtor
- cxl_to_mem_cmd_raw
- cxl_to_mem_cmd
- cxl_validate_cmd_from_user
- cxl_query_cmd
- handle_mailbox_cmd_from_user
- cxl_send_cmd
- cxl_xfer_log
- cxl_walk_cel
- cxl_get_gsl
- log_uuid
- cxl_enumerate_cmds
- cxl_event_trace_record
- __cxl_event_trace_record
- cxl_clear_event_record
- cxl_mem_get_records_log
- cxl_mem_get_event_records
- cxl_mem_get_partition_info
- cxl_dev_state_identify
- __cxl_mem_sanitize
- cxl_mem_sanitize
- add_dpa_res
- cxl_mem_create_range_info
- cxl_set_timestamp
- cxl_mem_get_poison
- free_poison_buf
- cxl_poison_alloc_buf
- cxl_poison_state_init
- cxl_memdev_state_create
Improve your Profiling and Debugging skills
Find out more