1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3#include <linux/security.h>
4#include <linux/debugfs.h>
5#include <linux/ktime.h>
6#include <linux/mutex.h>
7#include <linux/unaligned.h>
8#include <cxlpci.h>
9#include <cxlmem.h>
10#include <cxl.h>
11
12#include "core.h"
13#include "trace.h"
14#include "mce.h"
15
16static bool cxl_raw_allow_all;
17
18/**
19 * DOC: cxl mbox
20 *
21 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
22 * implementation is used by the cxl_pci driver to initialize the device
23 * and implement the cxl_mem.h IOCTL UAPI. It also implements the
24 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
25 */
26
27#define cxl_for_each_cmd(cmd) \
28 for ((cmd) = &cxl_mem_commands[0]; \
29 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
30
31#define CXL_CMD(_id, sin, sout, _flags) \
32 [CXL_MEM_COMMAND_ID_##_id] = { \
33 .info = { \
34 .id = CXL_MEM_COMMAND_ID_##_id, \
35 .size_in = sin, \
36 .size_out = sout, \
37 }, \
38 .opcode = CXL_MBOX_OP_##_id, \
39 .flags = _flags, \
40 }
41
42#define CXL_VARIABLE_PAYLOAD ~0U
43/*
44 * This table defines the supported mailbox commands for the driver. This table
45 * is made up of a UAPI structure. Non-negative values as parameters in the
46 * table will be validated against the user's input. For example, if size_in is
47 * 0, and the user passed in 1, it is an error.
48 */
49static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
50 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
51#ifdef CONFIG_CXL_MEM_RAW_COMMANDS
52 CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
53#endif
54 CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
55 CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
56 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
57 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
58 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
59 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
60 CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0),
61 CXL_CMD(CLEAR_LOG, 0x10, 0, 0),
62 CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0),
63 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
64 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
65 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
66 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
67 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
68 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
69 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
70 CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
71};
72
73/*
74 * Commands that RAW doesn't permit. The rationale for each:
75 *
76 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
77 * coordination of transaction timeout values at the root bridge level.
78 *
79 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
80 * and needs to be coordinated with HDM updates.
81 *
82 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
83 * driver and any writes from userspace invalidates those contents.
84 *
85 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
86 * to the device after it is marked clean, userspace can not make that
87 * assertion.
88 *
89 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
90 * is kept up to date with patrol notifications and error management.
91 *
92 * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel
93 * driver orchestration for safety.
94 */
95static u16 cxl_disabled_raw_commands[] = {
96 CXL_MBOX_OP_ACTIVATE_FW,
97 CXL_MBOX_OP_SET_PARTITION_INFO,
98 CXL_MBOX_OP_SET_LSA,
99 CXL_MBOX_OP_SET_SHUTDOWN_STATE,
100 CXL_MBOX_OP_SCAN_MEDIA,
101 CXL_MBOX_OP_GET_SCAN_MEDIA,
102 CXL_MBOX_OP_GET_POISON,
103 CXL_MBOX_OP_INJECT_POISON,
104 CXL_MBOX_OP_CLEAR_POISON,
105};
106
107/*
108 * Command sets that RAW doesn't permit. All opcodes in this set are
109 * disabled because they pass plain text security payloads over the
110 * user/kernel boundary. This functionality is intended to be wrapped
111 * behind the keys ABI which allows for encrypted payloads in the UAPI
112 */
113static u8 security_command_sets[] = {
114 0x44, /* Sanitize */
115 0x45, /* Persistent Memory Data-at-rest Security */
116 0x46, /* Security Passthrough */
117};
118
119static bool cxl_is_security_command(u16 opcode)
120{
121 int i;
122
123 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
124 if (security_command_sets[i] == (opcode >> 8))
125 return true;
126 return false;
127}
128
129static void cxl_set_security_cmd_enabled(struct cxl_security_state *security,
130 u16 opcode)
131{
132 switch (opcode) {
133 case CXL_MBOX_OP_SANITIZE:
134 set_bit(nr: CXL_SEC_ENABLED_SANITIZE, addr: security->enabled_cmds);
135 break;
136 case CXL_MBOX_OP_SECURE_ERASE:
137 set_bit(nr: CXL_SEC_ENABLED_SECURE_ERASE,
138 addr: security->enabled_cmds);
139 break;
140 case CXL_MBOX_OP_GET_SECURITY_STATE:
141 set_bit(nr: CXL_SEC_ENABLED_GET_SECURITY_STATE,
142 addr: security->enabled_cmds);
143 break;
144 case CXL_MBOX_OP_SET_PASSPHRASE:
145 set_bit(nr: CXL_SEC_ENABLED_SET_PASSPHRASE,
146 addr: security->enabled_cmds);
147 break;
148 case CXL_MBOX_OP_DISABLE_PASSPHRASE:
149 set_bit(nr: CXL_SEC_ENABLED_DISABLE_PASSPHRASE,
150 addr: security->enabled_cmds);
151 break;
152 case CXL_MBOX_OP_UNLOCK:
153 set_bit(nr: CXL_SEC_ENABLED_UNLOCK, addr: security->enabled_cmds);
154 break;
155 case CXL_MBOX_OP_FREEZE_SECURITY:
156 set_bit(nr: CXL_SEC_ENABLED_FREEZE_SECURITY,
157 addr: security->enabled_cmds);
158 break;
159 case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
160 set_bit(nr: CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE,
161 addr: security->enabled_cmds);
162 break;
163 default:
164 break;
165 }
166}
167
168static bool cxl_is_poison_command(u16 opcode)
169{
170#define CXL_MBOX_OP_POISON_CMDS 0x43
171
172 if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS)
173 return true;
174
175 return false;
176}
177
178static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison,
179 u16 opcode)
180{
181 switch (opcode) {
182 case CXL_MBOX_OP_GET_POISON:
183 set_bit(nr: CXL_POISON_ENABLED_LIST, addr: poison->enabled_cmds);
184 break;
185 case CXL_MBOX_OP_INJECT_POISON:
186 set_bit(nr: CXL_POISON_ENABLED_INJECT, addr: poison->enabled_cmds);
187 break;
188 case CXL_MBOX_OP_CLEAR_POISON:
189 set_bit(nr: CXL_POISON_ENABLED_CLEAR, addr: poison->enabled_cmds);
190 break;
191 case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS:
192 set_bit(nr: CXL_POISON_ENABLED_SCAN_CAPS, addr: poison->enabled_cmds);
193 break;
194 case CXL_MBOX_OP_SCAN_MEDIA:
195 set_bit(nr: CXL_POISON_ENABLED_SCAN_MEDIA, addr: poison->enabled_cmds);
196 break;
197 case CXL_MBOX_OP_GET_SCAN_MEDIA:
198 set_bit(nr: CXL_POISON_ENABLED_SCAN_RESULTS, addr: poison->enabled_cmds);
199 break;
200 default:
201 break;
202 }
203}
204
205static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
206{
207 struct cxl_mem_command *c;
208
209 cxl_for_each_cmd(c)
210 if (c->opcode == opcode)
211 return c;
212
213 return NULL;
214}
215
216static const char *cxl_mem_opcode_to_name(u16 opcode)
217{
218 struct cxl_mem_command *c;
219
220 c = cxl_mem_find_command(opcode);
221 if (!c)
222 return NULL;
223
224 return cxl_command_names[c->info.id].name;
225}
226
227/**
228 * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command
229 * @cxl_mbox: CXL mailbox context
230 * @mbox_cmd: initialized command to execute
231 *
232 * Context: Any context.
233 * Return:
234 * * %>=0 - Number of bytes returned in @out.
235 * * %-E2BIG - Payload is too large for hardware.
236 * * %-EBUSY - Couldn't acquire exclusive mailbox access.
237 * * %-EFAULT - Hardware error occurred.
238 * * %-ENXIO - Command completed, but device reported an error.
239 * * %-EIO - Unexpected output size.
240 *
241 * Mailbox commands may execute successfully yet the device itself reported an
242 * error. While this distinction can be useful for commands from userspace, the
243 * kernel will only be able to use results when both are successful.
244 */
245int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox,
246 struct cxl_mbox_cmd *mbox_cmd)
247{
248 size_t out_size, min_out;
249 int rc;
250
251 if (mbox_cmd->size_in > cxl_mbox->payload_size ||
252 mbox_cmd->size_out > cxl_mbox->payload_size)
253 return -E2BIG;
254
255 out_size = mbox_cmd->size_out;
256 min_out = mbox_cmd->min_out;
257 rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
258 /*
259 * EIO is reserved for a payload size mismatch and mbox_send()
260 * may not return this error.
261 */
262 if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO"))
263 return -ENXIO;
264 if (rc)
265 return rc;
266
267 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS &&
268 mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND)
269 return cxl_mbox_cmd_rc2errno(mbox_cmd);
270
271 if (!out_size)
272 return 0;
273
274 /*
275 * Variable sized output needs to at least satisfy the caller's
276 * minimum if not the fully requested size.
277 */
278 if (min_out == 0)
279 min_out = out_size;
280
281 if (mbox_cmd->size_out < min_out)
282 return -EIO;
283 return 0;
284}
285EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, "CXL");
286
287static bool cxl_mem_raw_command_allowed(u16 opcode)
288{
289 int i;
290
291 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
292 return false;
293
294 if (security_locked_down(what: LOCKDOWN_PCI_ACCESS))
295 return false;
296
297 if (cxl_raw_allow_all)
298 return true;
299
300 if (cxl_is_security_command(opcode))
301 return false;
302
303 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
304 if (cxl_disabled_raw_commands[i] == opcode)
305 return false;
306
307 return true;
308}
309
310/**
311 * cxl_payload_from_user_allowed() - Check contents of in_payload.
312 * @opcode: The mailbox command opcode.
313 * @payload_in: Pointer to the input payload passed in from user space.
314 *
315 * Return:
316 * * true - payload_in passes check for @opcode.
317 * * false - payload_in contains invalid or unsupported values.
318 *
319 * The driver may inspect payload contents before sending a mailbox
320 * command from user space to the device. The intent is to reject
321 * commands with input payloads that are known to be unsafe. This
322 * check is not intended to replace the users careful selection of
323 * mailbox command parameters and makes no guarantee that the user
324 * command will succeed, nor that it is appropriate.
325 *
326 * The specific checks are determined by the opcode.
327 */
328static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
329{
330 switch (opcode) {
331 case CXL_MBOX_OP_SET_PARTITION_INFO: {
332 struct cxl_mbox_set_partition_info *pi = payload_in;
333
334 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
335 return false;
336 break;
337 }
338 case CXL_MBOX_OP_CLEAR_LOG: {
339 const uuid_t *uuid = (uuid_t *)payload_in;
340
341 /*
342 * Restrict the ‘Clear log’ action to only apply to
343 * Vendor debug logs.
344 */
345 return uuid_equal(u1: uuid, u2: &DEFINE_CXL_VENDOR_DEBUG_UUID);
346 }
347 default:
348 break;
349 }
350 return true;
351}
352
353static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox_cmd,
354 struct cxl_mailbox *cxl_mbox, u16 opcode,
355 size_t in_size, size_t out_size, u64 in_payload)
356{
357 *mbox_cmd = (struct cxl_mbox_cmd) {
358 .opcode = opcode,
359 .size_in = in_size,
360 };
361
362 if (in_size) {
363 mbox_cmd->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
364 in_size);
365 if (IS_ERR(ptr: mbox_cmd->payload_in))
366 return PTR_ERR(ptr: mbox_cmd->payload_in);
367
368 if (!cxl_payload_from_user_allowed(opcode, payload_in: mbox_cmd->payload_in)) {
369 dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n",
370 cxl_mem_opcode_to_name(opcode));
371 kvfree(addr: mbox_cmd->payload_in);
372 return -EBUSY;
373 }
374 }
375
376 /* Prepare to handle a full payload for variable sized output */
377 if (out_size == CXL_VARIABLE_PAYLOAD)
378 mbox_cmd->size_out = cxl_mbox->payload_size;
379 else
380 mbox_cmd->size_out = out_size;
381
382 if (mbox_cmd->size_out) {
383 mbox_cmd->payload_out = kvzalloc(mbox_cmd->size_out, GFP_KERNEL);
384 if (!mbox_cmd->payload_out) {
385 kvfree(addr: mbox_cmd->payload_in);
386 return -ENOMEM;
387 }
388 }
389 return 0;
390}
391
392static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
393{
394 kvfree(addr: mbox->payload_in);
395 kvfree(addr: mbox->payload_out);
396}
397
398static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
399 const struct cxl_send_command *send_cmd,
400 struct cxl_mailbox *cxl_mbox)
401{
402 if (send_cmd->raw.rsvd)
403 return -EINVAL;
404
405 /*
406 * Unlike supported commands, the output size of RAW commands
407 * gets passed along without further checking, so it must be
408 * validated here.
409 */
410 if (send_cmd->out.size > cxl_mbox->payload_size)
411 return -EINVAL;
412
413 if (!cxl_mem_raw_command_allowed(opcode: send_cmd->raw.opcode))
414 return -EPERM;
415
416 dev_WARN_ONCE(cxl_mbox->host, true, "raw command path used\n");
417
418 *mem_cmd = (struct cxl_mem_command) {
419 .info = {
420 .id = CXL_MEM_COMMAND_ID_RAW,
421 .size_in = send_cmd->in.size,
422 .size_out = send_cmd->out.size,
423 },
424 .opcode = send_cmd->raw.opcode
425 };
426
427 return 0;
428}
429
430static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
431 const struct cxl_send_command *send_cmd,
432 struct cxl_mailbox *cxl_mbox)
433{
434 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
435 const struct cxl_command_info *info = &c->info;
436
437 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
438 return -EINVAL;
439
440 if (send_cmd->rsvd)
441 return -EINVAL;
442
443 if (send_cmd->in.rsvd || send_cmd->out.rsvd)
444 return -EINVAL;
445
446 /* Check that the command is enabled for hardware */
447 if (!test_bit(info->id, cxl_mbox->enabled_cmds))
448 return -ENOTTY;
449
450 /* Check that the command is not claimed for exclusive kernel use */
451 if (test_bit(info->id, cxl_mbox->exclusive_cmds))
452 return -EBUSY;
453
454 /* Check the input buffer is the expected size */
455 if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
456 (info->size_in != send_cmd->in.size))
457 return -ENOMEM;
458
459 /* Check the output buffer is at least large enough */
460 if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
461 (send_cmd->out.size < info->size_out))
462 return -ENOMEM;
463
464 *mem_cmd = (struct cxl_mem_command) {
465 .info = {
466 .id = info->id,
467 .flags = info->flags,
468 .size_in = send_cmd->in.size,
469 .size_out = send_cmd->out.size,
470 },
471 .opcode = c->opcode
472 };
473
474 return 0;
475}
476
477/**
478 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
479 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
480 * @cxl_mbox: CXL mailbox context
481 * @send_cmd: &struct cxl_send_command copied in from userspace.
482 *
483 * Return:
484 * * %0 - @out_cmd is ready to send.
485 * * %-ENOTTY - Invalid command specified.
486 * * %-EINVAL - Reserved fields or invalid values were used.
487 * * %-ENOMEM - Input or output buffer wasn't sized properly.
488 * * %-EPERM - Attempted to use a protected command.
489 * * %-EBUSY - Kernel has claimed exclusive access to this opcode
490 *
491 * The result of this command is a fully validated command in @mbox_cmd that is
492 * safe to send to the hardware.
493 */
494static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
495 struct cxl_mailbox *cxl_mbox,
496 const struct cxl_send_command *send_cmd)
497{
498 struct cxl_mem_command mem_cmd;
499 int rc;
500
501 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
502 return -ENOTTY;
503
504 /*
505 * The user can never specify an input payload larger than what hardware
506 * supports, but output can be arbitrarily large (simply write out as
507 * much data as the hardware provides).
508 */
509 if (send_cmd->in.size > cxl_mbox->payload_size)
510 return -EINVAL;
511
512 /* Sanitize and construct a cxl_mem_command */
513 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
514 rc = cxl_to_mem_cmd_raw(mem_cmd: &mem_cmd, send_cmd, cxl_mbox);
515 else
516 rc = cxl_to_mem_cmd(mem_cmd: &mem_cmd, send_cmd, cxl_mbox);
517
518 if (rc)
519 return rc;
520
521 /* Sanitize and construct a cxl_mbox_cmd */
522 return cxl_mbox_cmd_ctor(mbox_cmd, cxl_mbox, opcode: mem_cmd.opcode,
523 in_size: mem_cmd.info.size_in, out_size: mem_cmd.info.size_out,
524 in_payload: send_cmd->in.payload);
525}
526
527int cxl_query_cmd(struct cxl_mailbox *cxl_mbox,
528 struct cxl_mem_query_commands __user *q)
529{
530 struct device *dev = cxl_mbox->host;
531 struct cxl_mem_command *cmd;
532 u32 n_commands;
533 int j = 0;
534
535 dev_dbg(dev, "Query IOCTL\n");
536
537 if (get_user(n_commands, &q->n_commands))
538 return -EFAULT;
539
540 /* returns the total number if 0 elements are requested. */
541 if (n_commands == 0)
542 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
543
544 /*
545 * otherwise, return min(n_commands, total commands) cxl_command_info
546 * structures.
547 */
548 cxl_for_each_cmd(cmd) {
549 struct cxl_command_info info = cmd->info;
550
551 if (test_bit(info.id, cxl_mbox->enabled_cmds))
552 info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED;
553 if (test_bit(info.id, cxl_mbox->exclusive_cmds))
554 info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE;
555
556 if (copy_to_user(to: &q->commands[j++], from: &info, n: sizeof(info)))
557 return -EFAULT;
558
559 if (j == n_commands)
560 break;
561 }
562
563 return 0;
564}
565
566/**
567 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
568 * @cxl_mbox: The mailbox context for the operation.
569 * @mbox_cmd: The validated mailbox command.
570 * @out_payload: Pointer to userspace's output payload.
571 * @size_out: (Input) Max payload size to copy out.
572 * (Output) Payload size hardware generated.
573 * @retval: Hardware generated return code from the operation.
574 *
575 * Return:
576 * * %0 - Mailbox transaction succeeded. This implies the mailbox
577 * protocol completed successfully not that the operation itself
578 * was successful.
579 * * %-ENOMEM - Couldn't allocate a bounce buffer.
580 * * %-EFAULT - Something happened with copy_to/from_user.
581 * * %-EINTR - Mailbox acquisition interrupted.
582 * * %-EXXX - Transaction level failures.
583 *
584 * Dispatches a mailbox command on behalf of a userspace request.
585 * The output payload is copied to userspace.
586 *
587 * See cxl_send_cmd().
588 */
589static int handle_mailbox_cmd_from_user(struct cxl_mailbox *cxl_mbox,
590 struct cxl_mbox_cmd *mbox_cmd,
591 u64 out_payload, s32 *size_out,
592 u32 *retval)
593{
594 struct device *dev = cxl_mbox->host;
595 int rc;
596
597 dev_dbg(dev,
598 "Submitting %s command for user\n"
599 "\topcode: %x\n"
600 "\tsize: %zx\n",
601 cxl_mem_opcode_to_name(mbox_cmd->opcode),
602 mbox_cmd->opcode, mbox_cmd->size_in);
603
604 rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd);
605 if (rc)
606 goto out;
607
608 /*
609 * @size_out contains the max size that's allowed to be written back out
610 * to userspace. While the payload may have written more output than
611 * this it will have to be ignored.
612 */
613 if (mbox_cmd->size_out) {
614 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
615 "Invalid return size\n");
616 if (copy_to_user(u64_to_user_ptr(out_payload),
617 from: mbox_cmd->payload_out, n: mbox_cmd->size_out)) {
618 rc = -EFAULT;
619 goto out;
620 }
621 }
622
623 *size_out = mbox_cmd->size_out;
624 *retval = mbox_cmd->return_code;
625
626out:
627 cxl_mbox_cmd_dtor(mbox: mbox_cmd);
628 return rc;
629}
630
631int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s)
632{
633 struct device *dev = cxl_mbox->host;
634 struct cxl_send_command send;
635 struct cxl_mbox_cmd mbox_cmd;
636 int rc;
637
638 dev_dbg(dev, "Send IOCTL\n");
639
640 if (copy_from_user(to: &send, from: s, n: sizeof(send)))
641 return -EFAULT;
642
643 rc = cxl_validate_cmd_from_user(mbox_cmd: &mbox_cmd, cxl_mbox, send_cmd: &send);
644 if (rc)
645 return rc;
646
647 rc = handle_mailbox_cmd_from_user(cxl_mbox, mbox_cmd: &mbox_cmd, out_payload: send.out.payload,
648 size_out: &send.out.size, retval: &send.retval);
649 if (rc)
650 return rc;
651
652 if (copy_to_user(to: s, from: &send, n: sizeof(send)))
653 return -EFAULT;
654
655 return 0;
656}
657
658static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid,
659 u32 *size, u8 *out)
660{
661 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
662 u32 remaining = *size;
663 u32 offset = 0;
664
665 while (remaining) {
666 u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size);
667 struct cxl_mbox_cmd mbox_cmd;
668 struct cxl_mbox_get_log log;
669 int rc;
670
671 log = (struct cxl_mbox_get_log) {
672 .uuid = *uuid,
673 .offset = cpu_to_le32(offset),
674 .length = cpu_to_le32(xfer_size),
675 };
676
677 mbox_cmd = (struct cxl_mbox_cmd) {
678 .opcode = CXL_MBOX_OP_GET_LOG,
679 .size_in = sizeof(log),
680 .payload_in = &log,
681 .size_out = xfer_size,
682 .payload_out = out,
683 };
684
685 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
686
687 /*
688 * The output payload length that indicates the number
689 * of valid bytes can be smaller than the Log buffer
690 * size.
691 */
692 if (rc == -EIO && mbox_cmd.size_out < xfer_size) {
693 offset += mbox_cmd.size_out;
694 break;
695 }
696
697 if (rc < 0)
698 return rc;
699
700 out += xfer_size;
701 remaining -= xfer_size;
702 offset += xfer_size;
703 }
704
705 *size = offset;
706
707 return 0;
708}
709
710static int check_features_opcodes(u16 opcode, int *ro_cmds, int *wr_cmds)
711{
712 switch (opcode) {
713 case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
714 case CXL_MBOX_OP_GET_FEATURE:
715 (*ro_cmds)++;
716 return 1;
717 case CXL_MBOX_OP_SET_FEATURE:
718 (*wr_cmds)++;
719 return 1;
720 default:
721 return 0;
722 }
723}
724
725/* 'Get Supported Features' and 'Get Feature' */
726#define MAX_FEATURES_READ_CMDS 2
727static void set_features_cap(struct cxl_mailbox *cxl_mbox,
728 int ro_cmds, int wr_cmds)
729{
730 /* Setting up Features capability while walking the CEL */
731 if (ro_cmds == MAX_FEATURES_READ_CMDS) {
732 if (wr_cmds)
733 cxl_mbox->feat_cap = CXL_FEATURES_RW;
734 else
735 cxl_mbox->feat_cap = CXL_FEATURES_RO;
736 }
737}
738
739/**
740 * cxl_walk_cel() - Walk through the Command Effects Log.
741 * @mds: The driver data for the operation
742 * @size: Length of the Command Effects Log.
743 * @cel: CEL
744 *
745 * Iterate over each entry in the CEL and determine if the driver supports the
746 * command. If so, the command is enabled for the device and can be used later.
747 */
748static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
749{
750 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
751 struct cxl_cel_entry *cel_entry;
752 const int cel_entries = size / sizeof(*cel_entry);
753 struct device *dev = mds->cxlds.dev;
754 int i, ro_cmds = 0, wr_cmds = 0;
755
756 cel_entry = (struct cxl_cel_entry *) cel;
757
758 for (i = 0; i < cel_entries; i++) {
759 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
760 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
761 int enabled = 0;
762
763 if (cmd) {
764 set_bit(nr: cmd->info.id, addr: cxl_mbox->enabled_cmds);
765 enabled++;
766 }
767
768 enabled += check_features_opcodes(opcode, ro_cmds: &ro_cmds,
769 wr_cmds: &wr_cmds);
770
771 if (cxl_is_poison_command(opcode)) {
772 cxl_set_poison_cmd_enabled(poison: &mds->poison, opcode);
773 enabled++;
774 }
775
776 if (cxl_is_security_command(opcode)) {
777 cxl_set_security_cmd_enabled(security: &mds->security, opcode);
778 enabled++;
779 }
780
781 dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
782 enabled ? "enabled" : "unsupported by driver");
783 }
784
785 set_features_cap(cxl_mbox, ro_cmds, wr_cmds);
786}
787
788static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds)
789{
790 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
791 struct cxl_mbox_get_supported_logs *ret;
792 struct cxl_mbox_cmd mbox_cmd;
793 int rc;
794
795 ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
796 if (!ret)
797 return ERR_PTR(error: -ENOMEM);
798
799 mbox_cmd = (struct cxl_mbox_cmd) {
800 .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS,
801 .size_out = cxl_mbox->payload_size,
802 .payload_out = ret,
803 /* At least the record number field must be valid */
804 .min_out = 2,
805 };
806 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
807 if (rc < 0) {
808 kvfree(addr: ret);
809 return ERR_PTR(error: rc);
810 }
811
812
813 return ret;
814}
815
816enum {
817 CEL_UUID,
818 VENDOR_DEBUG_UUID,
819};
820
821/* See CXL 2.0 Table 170. Get Log Input Payload */
822static const uuid_t log_uuid[] = {
823 [CEL_UUID] = DEFINE_CXL_CEL_UUID,
824 [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
825};
826
827/**
828 * cxl_enumerate_cmds() - Enumerate commands for a device.
829 * @mds: The driver data for the operation
830 *
831 * Returns 0 if enumerate completed successfully.
832 *
833 * CXL devices have optional support for certain commands. This function will
834 * determine the set of supported commands for the hardware and update the
835 * enabled_cmds bitmap in the @mds.
836 */
837int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
838{
839 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
840 struct cxl_mbox_get_supported_logs *gsl;
841 struct device *dev = mds->cxlds.dev;
842 struct cxl_mem_command *cmd;
843 int i, rc;
844
845 gsl = cxl_get_gsl(mds);
846 if (IS_ERR(ptr: gsl))
847 return PTR_ERR(ptr: gsl);
848
849 rc = -ENOENT;
850 for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
851 u32 size = le32_to_cpu(gsl->entry[i].size);
852 uuid_t uuid = gsl->entry[i].uuid;
853 u8 *log;
854
855 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
856
857 if (!uuid_equal(u1: &uuid, u2: &log_uuid[CEL_UUID]))
858 continue;
859
860 log = kvmalloc(size, GFP_KERNEL);
861 if (!log) {
862 rc = -ENOMEM;
863 goto out;
864 }
865
866 rc = cxl_xfer_log(mds, uuid: &uuid, size: &size, out: log);
867 if (rc) {
868 kvfree(addr: log);
869 goto out;
870 }
871
872 cxl_walk_cel(mds, size, cel: log);
873 kvfree(addr: log);
874
875 /* In case CEL was bogus, enable some default commands. */
876 cxl_for_each_cmd(cmd)
877 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
878 set_bit(nr: cmd->info.id, addr: cxl_mbox->enabled_cmds);
879
880 /* Found the required CEL */
881 rc = 0;
882 }
883out:
884 kvfree(addr: gsl);
885 return rc;
886}
887EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL");
888
889void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
890 enum cxl_event_log_type type,
891 enum cxl_event_type event_type,
892 const uuid_t *uuid, union cxl_event *evt)
893{
894 if (event_type == CXL_CPER_EVENT_MEM_MODULE) {
895 trace_cxl_memory_module(cxlmd, log: type, rec: &evt->mem_module);
896 return;
897 }
898 if (event_type == CXL_CPER_EVENT_GENERIC) {
899 trace_cxl_generic_event(cxlmd, log: type, uuid, gen_rec: &evt->generic);
900 return;
901 }
902
903 if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) {
904 u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX;
905 struct cxl_region *cxlr;
906
907 /*
908 * These trace points are annotated with HPA and region
909 * translations. Take topology mutation locks and lookup
910 * { HPA, REGION } from { DPA, MEMDEV } in the event record.
911 */
912 guard(rwsem_read)(T: &cxl_region_rwsem);
913 guard(rwsem_read)(T: &cxl_dpa_rwsem);
914
915 dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK;
916 cxlr = cxl_dpa_to_region(cxlmd, dpa);
917 if (cxlr) {
918 u64 cache_size = cxlr->params.cache_size;
919
920 hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa);
921 if (cache_size)
922 hpa_alias = hpa - cache_size;
923 }
924
925 if (event_type == CXL_CPER_EVENT_GEN_MEDIA) {
926 if (cxl_store_rec_gen_media(cxlmd: (struct cxl_memdev *)cxlmd, evt))
927 dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n");
928
929 trace_cxl_general_media(cxlmd, log: type, cxlr, hpa,
930 hpa_alias0: hpa_alias, rec: &evt->gen_media);
931 } else if (event_type == CXL_CPER_EVENT_DRAM) {
932 if (cxl_store_rec_dram(cxlmd: (struct cxl_memdev *)cxlmd, evt))
933 dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n");
934
935 trace_cxl_dram(cxlmd, log: type, cxlr, hpa, hpa_alias0: hpa_alias,
936 rec: &evt->dram);
937 }
938 }
939}
940EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL");
941
942static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
943 enum cxl_event_log_type type,
944 struct cxl_event_record_raw *record)
945{
946 enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
947 const uuid_t *uuid = &record->id;
948
949 if (uuid_equal(u1: uuid, u2: &CXL_EVENT_GEN_MEDIA_UUID))
950 ev_type = CXL_CPER_EVENT_GEN_MEDIA;
951 else if (uuid_equal(u1: uuid, u2: &CXL_EVENT_DRAM_UUID))
952 ev_type = CXL_CPER_EVENT_DRAM;
953 else if (uuid_equal(u1: uuid, u2: &CXL_EVENT_MEM_MODULE_UUID))
954 ev_type = CXL_CPER_EVENT_MEM_MODULE;
955
956 cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
957}
958
959static int cxl_clear_event_record(struct cxl_memdev_state *mds,
960 enum cxl_event_log_type log,
961 struct cxl_get_event_payload *get_pl)
962{
963 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
964 struct cxl_mbox_clear_event_payload *payload;
965 u16 total = le16_to_cpu(get_pl->record_count);
966 u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES;
967 size_t pl_size = struct_size(payload, handles, max_handles);
968 struct cxl_mbox_cmd mbox_cmd;
969 u16 cnt;
970 int rc = 0;
971 int i;
972
973 /* Payload size may limit the max handles */
974 if (pl_size > cxl_mbox->payload_size) {
975 max_handles = (cxl_mbox->payload_size - sizeof(*payload)) /
976 sizeof(__le16);
977 pl_size = struct_size(payload, handles, max_handles);
978 }
979
980 payload = kvzalloc(pl_size, GFP_KERNEL);
981 if (!payload)
982 return -ENOMEM;
983
984 *payload = (struct cxl_mbox_clear_event_payload) {
985 .event_log = log,
986 };
987
988 mbox_cmd = (struct cxl_mbox_cmd) {
989 .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD,
990 .payload_in = payload,
991 .size_in = pl_size,
992 };
993
994 /*
995 * Clear Event Records uses u8 for the handle cnt while Get Event
996 * Record can return up to 0xffff records.
997 */
998 i = 0;
999 for (cnt = 0; cnt < total; cnt++) {
1000 struct cxl_event_record_raw *raw = &get_pl->records[cnt];
1001 struct cxl_event_generic *gen = &raw->event.generic;
1002
1003 payload->handles[i++] = gen->hdr.handle;
1004 dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
1005 le16_to_cpu(payload->handles[i - 1]));
1006
1007 if (i == max_handles) {
1008 payload->nr_recs = i;
1009 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1010 if (rc)
1011 goto free_pl;
1012 i = 0;
1013 }
1014 }
1015
1016 /* Clear what is left if any */
1017 if (i) {
1018 payload->nr_recs = i;
1019 mbox_cmd.size_in = struct_size(payload, handles, i);
1020 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1021 if (rc)
1022 goto free_pl;
1023 }
1024
1025free_pl:
1026 kvfree(addr: payload);
1027 return rc;
1028}
1029
1030static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
1031 enum cxl_event_log_type type)
1032{
1033 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1034 struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
1035 struct device *dev = mds->cxlds.dev;
1036 struct cxl_get_event_payload *payload;
1037 u8 log_type = type;
1038 u16 nr_rec;
1039
1040 mutex_lock(&mds->event.log_lock);
1041 payload = mds->event.buf;
1042
1043 do {
1044 int rc, i;
1045 struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
1046 .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
1047 .payload_in = &log_type,
1048 .size_in = sizeof(log_type),
1049 .payload_out = payload,
1050 .size_out = cxl_mbox->payload_size,
1051 .min_out = struct_size(payload, records, 0),
1052 };
1053
1054 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1055 if (rc) {
1056 dev_err_ratelimited(dev,
1057 "Event log '%d': Failed to query event records : %d",
1058 type, rc);
1059 break;
1060 }
1061
1062 nr_rec = le16_to_cpu(payload->record_count);
1063 if (!nr_rec)
1064 break;
1065
1066 for (i = 0; i < nr_rec; i++)
1067 __cxl_event_trace_record(cxlmd, type,
1068 record: &payload->records[i]);
1069
1070 if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
1071 trace_cxl_overflow(cxlmd, log: type, payload);
1072
1073 rc = cxl_clear_event_record(mds, log: type, get_pl: payload);
1074 if (rc) {
1075 dev_err_ratelimited(dev,
1076 "Event log '%d': Failed to clear events : %d",
1077 type, rc);
1078 break;
1079 }
1080 } while (nr_rec);
1081
1082 mutex_unlock(lock: &mds->event.log_lock);
1083}
1084
1085/**
1086 * cxl_mem_get_event_records - Get Event Records from the device
1087 * @mds: The driver data for the operation
1088 * @status: Event Status register value identifying which events are available.
1089 *
1090 * Retrieve all event records available on the device, report them as trace
1091 * events, and clear them.
1092 *
1093 * See CXL rev 3.0 @8.2.9.2.2 Get Event Records
1094 * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records
1095 */
1096void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status)
1097{
1098 dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status);
1099
1100 if (status & CXLDEV_EVENT_STATUS_FATAL)
1101 cxl_mem_get_records_log(mds, type: CXL_EVENT_TYPE_FATAL);
1102 if (status & CXLDEV_EVENT_STATUS_FAIL)
1103 cxl_mem_get_records_log(mds, type: CXL_EVENT_TYPE_FAIL);
1104 if (status & CXLDEV_EVENT_STATUS_WARN)
1105 cxl_mem_get_records_log(mds, type: CXL_EVENT_TYPE_WARN);
1106 if (status & CXLDEV_EVENT_STATUS_INFO)
1107 cxl_mem_get_records_log(mds, type: CXL_EVENT_TYPE_INFO);
1108}
1109EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, "CXL");
1110
1111/**
1112 * cxl_mem_get_partition_info - Get partition info
1113 * @mds: The driver data for the operation
1114 *
1115 * Retrieve the current partition info for the device specified. The active
1116 * values are the current capacity in bytes. If not 0, the 'next' values are
1117 * the pending values, in bytes, which take affect on next cold reset.
1118 *
1119 * Return: 0 if no error: or the result of the mailbox command.
1120 *
1121 * See CXL @8.2.9.5.2.1 Get Partition Info
1122 */
1123static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds)
1124{
1125 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1126 struct cxl_mbox_get_partition_info pi;
1127 struct cxl_mbox_cmd mbox_cmd;
1128 int rc;
1129
1130 mbox_cmd = (struct cxl_mbox_cmd) {
1131 .opcode = CXL_MBOX_OP_GET_PARTITION_INFO,
1132 .size_out = sizeof(pi),
1133 .payload_out = &pi,
1134 };
1135 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1136 if (rc)
1137 return rc;
1138
1139 mds->active_volatile_bytes =
1140 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
1141 mds->active_persistent_bytes =
1142 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
1143
1144 return 0;
1145}
1146
1147/**
1148 * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
1149 * @mds: The driver data for the operation
1150 *
1151 * Return: 0 if identify was executed successfully or media not ready.
1152 *
1153 * This will dispatch the identify command to the device and on success populate
1154 * structures to be exported to sysfs.
1155 */
1156int cxl_dev_state_identify(struct cxl_memdev_state *mds)
1157{
1158 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1159 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
1160 struct cxl_mbox_identify id;
1161 struct cxl_mbox_cmd mbox_cmd;
1162 u32 val;
1163 int rc;
1164
1165 if (!mds->cxlds.media_ready)
1166 return 0;
1167
1168 mbox_cmd = (struct cxl_mbox_cmd) {
1169 .opcode = CXL_MBOX_OP_IDENTIFY,
1170 .size_out = sizeof(id),
1171 .payload_out = &id,
1172 };
1173 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1174 if (rc < 0)
1175 return rc;
1176
1177 mds->total_bytes =
1178 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
1179 mds->volatile_only_bytes =
1180 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
1181 mds->persistent_only_bytes =
1182 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
1183 mds->partition_align_bytes =
1184 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
1185
1186 mds->lsa_size = le32_to_cpu(id.lsa_size);
1187 memcpy(mds->firmware_version, id.fw_revision,
1188 sizeof(id.fw_revision));
1189
1190 if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) {
1191 val = get_unaligned_le24(p: id.poison_list_max_mer);
1192 mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX);
1193 }
1194
1195 return 0;
1196}
1197EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, "CXL");
1198
1199static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
1200{
1201 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1202 int rc;
1203 u32 sec_out = 0;
1204 struct cxl_get_security_output {
1205 __le32 flags;
1206 } out;
1207 struct cxl_mbox_cmd sec_cmd = {
1208 .opcode = CXL_MBOX_OP_GET_SECURITY_STATE,
1209 .payload_out = &out,
1210 .size_out = sizeof(out),
1211 };
1212 struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd };
1213
1214 if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE)
1215 return -EINVAL;
1216
1217 rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd);
1218 if (rc < 0) {
1219 dev_err(cxl_mbox->host, "Failed to get security state : %d", rc);
1220 return rc;
1221 }
1222
1223 /*
1224 * Prior to using these commands, any security applied to
1225 * the user data areas of the device shall be DISABLED (or
1226 * UNLOCKED for secure erase case).
1227 */
1228 sec_out = le32_to_cpu(out.flags);
1229 if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET)
1230 return -EINVAL;
1231
1232 if (cmd == CXL_MBOX_OP_SECURE_ERASE &&
1233 sec_out & CXL_PMEM_SEC_STATE_LOCKED)
1234 return -EINVAL;
1235
1236 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1237 if (rc < 0) {
1238 dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc);
1239 return rc;
1240 }
1241
1242 return 0;
1243}
1244
1245
1246/**
1247 * cxl_mem_sanitize() - Send a sanitization command to the device.
1248 * @cxlmd: The device for the operation
1249 * @cmd: The specific sanitization command opcode
1250 *
1251 * Return: 0 if the command was executed successfully, regardless of
1252 * whether or not the actual security operation is done in the background,
1253 * such as for the Sanitize case.
1254 * Error return values can be the result of the mailbox command, -EINVAL
1255 * when security requirements are not met or invalid contexts, or -EBUSY
1256 * if the sanitize operation is already in flight.
1257 *
1258 * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
1259 */
1260int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
1261{
1262 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds: cxlmd->cxlds);
1263 struct cxl_port *endpoint;
1264
1265 /* synchronize with cxl_mem_probe() and decoder write operations */
1266 guard(device)(T: &cxlmd->dev);
1267 endpoint = cxlmd->endpoint;
1268 guard(rwsem_read)(T: &cxl_region_rwsem);
1269 /*
1270 * Require an endpoint to be safe otherwise the driver can not
1271 * be sure that the device is unmapped.
1272 */
1273 if (endpoint && cxl_num_decoders_committed(port: endpoint) == 0)
1274 return __cxl_mem_sanitize(mds, cmd);
1275
1276 return -EBUSY;
1277}
1278
1279static void add_part(struct cxl_dpa_info *info, u64 start, u64 size, enum cxl_partition_mode mode)
1280{
1281 int i = info->nr_partitions;
1282
1283 if (size == 0)
1284 return;
1285
1286 info->part[i].range = (struct range) {
1287 .start = start,
1288 .end = start + size - 1,
1289 };
1290 info->part[i].mode = mode;
1291 info->nr_partitions++;
1292}
1293
1294int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info)
1295{
1296 struct cxl_dev_state *cxlds = &mds->cxlds;
1297 struct device *dev = cxlds->dev;
1298 int rc;
1299
1300 if (!cxlds->media_ready) {
1301 info->size = 0;
1302 return 0;
1303 }
1304
1305 info->size = mds->total_bytes;
1306
1307 if (mds->partition_align_bytes == 0) {
1308 add_part(info, start: 0, size: mds->volatile_only_bytes, mode: CXL_PARTMODE_RAM);
1309 add_part(info, start: mds->volatile_only_bytes,
1310 size: mds->persistent_only_bytes, mode: CXL_PARTMODE_PMEM);
1311 return 0;
1312 }
1313
1314 rc = cxl_mem_get_partition_info(mds);
1315 if (rc) {
1316 dev_err(dev, "Failed to query partition information\n");
1317 return rc;
1318 }
1319
1320 add_part(info, start: 0, size: mds->active_volatile_bytes, mode: CXL_PARTMODE_RAM);
1321 add_part(info, start: mds->active_volatile_bytes, size: mds->active_persistent_bytes,
1322 mode: CXL_PARTMODE_PMEM);
1323
1324 return 0;
1325}
1326EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL");
1327
1328int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count)
1329{
1330 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1331 struct cxl_mbox_get_health_info_out hi;
1332 struct cxl_mbox_cmd mbox_cmd;
1333 int rc;
1334
1335 mbox_cmd = (struct cxl_mbox_cmd) {
1336 .opcode = CXL_MBOX_OP_GET_HEALTH_INFO,
1337 .size_out = sizeof(hi),
1338 .payload_out = &hi,
1339 };
1340
1341 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1342 if (!rc)
1343 *count = le32_to_cpu(hi.dirty_shutdown_cnt);
1344
1345 return rc;
1346}
1347EXPORT_SYMBOL_NS_GPL(cxl_get_dirty_count, "CXL");
1348
1349int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds)
1350{
1351 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1352 struct cxl_mbox_cmd mbox_cmd;
1353 struct cxl_mbox_set_shutdown_state_in in = {
1354 .state = 1
1355 };
1356
1357 mbox_cmd = (struct cxl_mbox_cmd) {
1358 .opcode = CXL_MBOX_OP_SET_SHUTDOWN_STATE,
1359 .size_in = sizeof(in),
1360 .payload_in = &in,
1361 };
1362
1363 return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1364}
1365EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL");
1366
1367int cxl_set_timestamp(struct cxl_memdev_state *mds)
1368{
1369 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1370 struct cxl_mbox_cmd mbox_cmd;
1371 struct cxl_mbox_set_timestamp_in pi;
1372 int rc;
1373
1374 pi.timestamp = cpu_to_le64(ktime_get_real_ns());
1375 mbox_cmd = (struct cxl_mbox_cmd) {
1376 .opcode = CXL_MBOX_OP_SET_TIMESTAMP,
1377 .size_in = sizeof(pi),
1378 .payload_in = &pi,
1379 };
1380
1381 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1382 /*
1383 * Command is optional. Devices may have another way of providing
1384 * a timestamp, or may return all 0s in timestamp fields.
1385 * Don't report an error if this command isn't supported
1386 */
1387 if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED))
1388 return rc;
1389
1390 return 0;
1391}
1392EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, "CXL");
1393
1394int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
1395 struct cxl_region *cxlr)
1396{
1397 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds: cxlmd->cxlds);
1398 struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
1399 struct cxl_mbox_poison_out *po;
1400 struct cxl_mbox_poison_in pi;
1401 int nr_records = 0;
1402 int rc;
1403
1404 rc = mutex_lock_interruptible(&mds->poison.lock);
1405 if (rc)
1406 return rc;
1407
1408 po = mds->poison.list_out;
1409 pi.offset = cpu_to_le64(offset);
1410 pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
1411
1412 do {
1413 struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
1414 .opcode = CXL_MBOX_OP_GET_POISON,
1415 .size_in = sizeof(pi),
1416 .payload_in = &pi,
1417 .size_out = cxl_mbox->payload_size,
1418 .payload_out = po,
1419 .min_out = struct_size(po, record, 0),
1420 };
1421
1422 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd);
1423 if (rc)
1424 break;
1425
1426 for (int i = 0; i < le16_to_cpu(po->count); i++)
1427 trace_cxl_poison(cxlmd, cxlr, record: &po->record[i],
1428 flags: po->flags, overflow_ts: po->overflow_ts,
1429 trace_type: CXL_POISON_TRACE_LIST);
1430
1431 /* Protect against an uncleared _FLAG_MORE */
1432 nr_records = nr_records + le16_to_cpu(po->count);
1433 if (nr_records >= mds->poison.max_errors) {
1434 dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n",
1435 nr_records);
1436 break;
1437 }
1438 } while (po->flags & CXL_POISON_FLAG_MORE);
1439
1440 mutex_unlock(lock: &mds->poison.lock);
1441 return rc;
1442}
1443EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL");
1444
1445static void free_poison_buf(void *buf)
1446{
1447 kvfree(addr: buf);
1448}
1449
1450/* Get Poison List output buffer is protected by mds->poison.lock */
1451static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds)
1452{
1453 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
1454
1455 mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL);
1456 if (!mds->poison.list_out)
1457 return -ENOMEM;
1458
1459 return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf,
1460 mds->poison.list_out);
1461}
1462
1463int cxl_poison_state_init(struct cxl_memdev_state *mds)
1464{
1465 int rc;
1466
1467 if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds))
1468 return 0;
1469
1470 rc = cxl_poison_alloc_buf(mds);
1471 if (rc) {
1472 clear_bit(nr: CXL_POISON_ENABLED_LIST, addr: mds->poison.enabled_cmds);
1473 return rc;
1474 }
1475
1476 mutex_init(&mds->poison.lock);
1477 return 0;
1478}
1479EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL");
1480
1481int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host)
1482{
1483 if (!cxl_mbox || !host)
1484 return -EINVAL;
1485
1486 cxl_mbox->host = host;
1487 mutex_init(&cxl_mbox->mbox_mutex);
1488 rcuwait_init(w: &cxl_mbox->mbox_wait);
1489
1490 return 0;
1491}
1492EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL");
1493
1494struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
1495{
1496 struct cxl_memdev_state *mds;
1497 int rc;
1498
1499 mds = devm_kzalloc(dev, size: sizeof(*mds), GFP_KERNEL);
1500 if (!mds) {
1501 dev_err(dev, "No memory available\n");
1502 return ERR_PTR(error: -ENOMEM);
1503 }
1504
1505 mutex_init(&mds->event.log_lock);
1506 mds->cxlds.dev = dev;
1507 mds->cxlds.reg_map.host = dev;
1508 mds->cxlds.cxl_mbox.host = dev;
1509 mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
1510 mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
1511
1512 rc = devm_cxl_register_mce_notifier(dev, mce_notifer: &mds->mce_notifier);
1513 if (rc == -EOPNOTSUPP)
1514 dev_warn(dev, "CXL MCE unsupported\n");
1515 else if (rc)
1516 return ERR_PTR(error: rc);
1517
1518 return mds;
1519}
1520EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL");
1521
1522void __init cxl_mbox_init(void)
1523{
1524 struct dentry *mbox_debugfs;
1525
1526 mbox_debugfs = cxl_debugfs_create_dir(dir: "mbox");
1527 debugfs_create_bool(name: "raw_allow_all", mode: 0600, parent: mbox_debugfs,
1528 value: &cxl_raw_allow_all);
1529}
1530

source code of linux/drivers/cxl/core/mbox.c