1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
---|---|
2 | /* Copyright(c) 2020-2021 Intel Corporation. */ |
3 | #ifndef __CXL_MEM_H__ |
4 | #define __CXL_MEM_H__ |
5 | #include <uapi/linux/cxl_mem.h> |
6 | #include <linux/pci.h> |
7 | #include <linux/cdev.h> |
8 | #include <linux/uuid.h> |
9 | #include <linux/node.h> |
10 | #include <cxl/event.h> |
11 | #include <cxl/mailbox.h> |
12 | #include "cxl.h" |
13 | |
14 | /* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */ |
15 | #define CXLMDEV_STATUS_OFFSET 0x0 |
16 | #define CXLMDEV_DEV_FATAL BIT(0) |
17 | #define CXLMDEV_FW_HALT BIT(1) |
18 | #define CXLMDEV_STATUS_MEDIA_STATUS_MASK GENMASK(3, 2) |
19 | #define CXLMDEV_MS_NOT_READY 0 |
20 | #define CXLMDEV_MS_READY 1 |
21 | #define CXLMDEV_MS_ERROR 2 |
22 | #define CXLMDEV_MS_DISABLED 3 |
23 | #define CXLMDEV_READY(status) \ |
24 | (FIELD_GET(CXLMDEV_STATUS_MEDIA_STATUS_MASK, status) == \ |
25 | CXLMDEV_MS_READY) |
26 | #define CXLMDEV_MBOX_IF_READY BIT(4) |
27 | #define CXLMDEV_RESET_NEEDED_MASK GENMASK(7, 5) |
28 | #define CXLMDEV_RESET_NEEDED_NOT 0 |
29 | #define CXLMDEV_RESET_NEEDED_COLD 1 |
30 | #define CXLMDEV_RESET_NEEDED_WARM 2 |
31 | #define CXLMDEV_RESET_NEEDED_HOT 3 |
32 | #define CXLMDEV_RESET_NEEDED_CXL 4 |
33 | #define CXLMDEV_RESET_NEEDED(status) \ |
34 | (FIELD_GET(CXLMDEV_RESET_NEEDED_MASK, status) != \ |
35 | CXLMDEV_RESET_NEEDED_NOT) |
36 | |
37 | /** |
38 | * struct cxl_memdev - CXL bus object representing a Type-3 Memory Device |
39 | * @dev: driver core device object |
40 | * @cdev: char dev core object for ioctl operations |
41 | * @cxlds: The device state backing this device |
42 | * @detach_work: active memdev lost a port in its ancestry |
43 | * @cxl_nvb: coordinate removal of @cxl_nvd if present |
44 | * @cxl_nvd: optional bridge to an nvdimm if the device supports pmem |
45 | * @endpoint: connection to the CXL port topology for this memory device |
46 | * @id: id number of this memdev instance. |
47 | * @depth: endpoint port depth |
48 | * @scrub_cycle: current scrub cycle set for this device |
49 | * @scrub_region_id: id number of a backed region (if any) for which current scrub cycle set |
50 | * @err_rec_array: List of xarrarys to store the memdev error records to |
51 | * check attributes for a memory repair operation are from |
52 | * current boot. |
53 | */ |
54 | struct cxl_memdev { |
55 | struct device dev; |
56 | struct cdev cdev; |
57 | struct cxl_dev_state *cxlds; |
58 | struct work_struct detach_work; |
59 | struct cxl_nvdimm_bridge *cxl_nvb; |
60 | struct cxl_nvdimm *cxl_nvd; |
61 | struct cxl_port *endpoint; |
62 | int id; |
63 | int depth; |
64 | u8 scrub_cycle; |
65 | int scrub_region_id; |
66 | void *err_rec_array; |
67 | }; |
68 | |
69 | static inline struct cxl_memdev *to_cxl_memdev(struct device *dev) |
70 | { |
71 | return container_of(dev, struct cxl_memdev, dev); |
72 | } |
73 | |
74 | static inline struct cxl_port *cxled_to_port(struct cxl_endpoint_decoder *cxled) |
75 | { |
76 | return to_cxl_port(dev: cxled->cxld.dev.parent); |
77 | } |
78 | |
79 | static inline struct cxl_port *cxlrd_to_port(struct cxl_root_decoder *cxlrd) |
80 | { |
81 | return to_cxl_port(dev: cxlrd->cxlsd.cxld.dev.parent); |
82 | } |
83 | |
84 | static inline struct cxl_memdev * |
85 | cxled_to_memdev(struct cxl_endpoint_decoder *cxled) |
86 | { |
87 | struct cxl_port *port = to_cxl_port(dev: cxled->cxld.dev.parent); |
88 | |
89 | return to_cxl_memdev(dev: port->uport_dev); |
90 | } |
91 | |
92 | bool is_cxl_memdev(const struct device *dev); |
93 | static inline bool is_cxl_endpoint(struct cxl_port *port) |
94 | { |
95 | return is_cxl_memdev(dev: port->uport_dev); |
96 | } |
97 | |
98 | struct cxl_memdev *devm_cxl_add_memdev(struct device *host, |
99 | struct cxl_dev_state *cxlds); |
100 | int devm_cxl_sanitize_setup_notifier(struct device *host, |
101 | struct cxl_memdev *cxlmd); |
102 | struct cxl_memdev_state; |
103 | int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds); |
104 | int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, |
105 | resource_size_t base, resource_size_t len, |
106 | resource_size_t skipped); |
107 | |
108 | #define CXL_NR_PARTITIONS_MAX 2 |
109 | |
110 | struct cxl_dpa_info { |
111 | u64 size; |
112 | struct cxl_dpa_part_info { |
113 | struct range range; |
114 | enum cxl_partition_mode mode; |
115 | } part[CXL_NR_PARTITIONS_MAX]; |
116 | int nr_partitions; |
117 | }; |
118 | |
119 | int cxl_dpa_setup(struct cxl_dev_state *cxlds, const struct cxl_dpa_info *info); |
120 | |
121 | static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port, |
122 | struct cxl_memdev *cxlmd) |
123 | { |
124 | if (!port) |
125 | return NULL; |
126 | |
127 | return xa_load(&port->endpoints, index: (unsigned long)&cxlmd->dev); |
128 | } |
129 | |
130 | /* |
131 | * Per CXL 3.0 Section 8.2.8.4.5.1 |
132 | */ |
133 | #define CMD_CMD_RC_TABLE \ |
134 | C(SUCCESS, 0, NULL), \ |
135 | C(BACKGROUND, -ENXIO, "background cmd started successfully"), \ |
136 | C(INPUT, -ENXIO, "cmd input was invalid"), \ |
137 | C(UNSUPPORTED, -ENXIO, "cmd is not supported"), \ |
138 | C(INTERNAL, -ENXIO, "internal device error"), \ |
139 | C(RETRY, -ENXIO, "temporary error, retry once"), \ |
140 | C(BUSY, -ENXIO, "ongoing background operation"), \ |
141 | C(MEDIADISABLED, -ENXIO, "media access is disabled"), \ |
142 | C(FWINPROGRESS, -ENXIO, "one FW package can be transferred at a time"), \ |
143 | C(FWOOO, -ENXIO, "FW package content was transferred out of order"), \ |
144 | C(FWAUTH, -ENXIO, "FW package authentication failed"), \ |
145 | C(FWSLOT, -ENXIO, "FW slot is not supported for requested operation"), \ |
146 | C(FWROLLBACK, -ENXIO, "rolled back to the previous active FW"), \ |
147 | C(FWRESET, -ENXIO, "FW failed to activate, needs cold reset"), \ |
148 | C(HANDLE, -ENXIO, "one or more Event Record Handles were invalid"), \ |
149 | C(PADDR, -EFAULT, "physical address specified is invalid"), \ |
150 | C(POISONLMT, -EBUSY, "poison injection limit has been reached"), \ |
151 | C(MEDIAFAILURE, -ENXIO, "permanent issue with the media"), \ |
152 | C(ABORT, -ENXIO, "background cmd was aborted by device"), \ |
153 | C(SECURITY, -ENXIO, "not valid in the current security state"), \ |
154 | C(PASSPHRASE, -ENXIO, "phrase doesn't match current set passphrase"), \ |
155 | C(MBUNSUPPORTED, -ENXIO, "unsupported on the mailbox it was issued on"),\ |
156 | C(PAYLOADLEN, -ENXIO, "invalid payload length"), \ |
157 | C(LOG, -ENXIO, "invalid or unsupported log page"), \ |
158 | C(INTERRUPTED, -ENXIO, "asynchronous event occured"), \ |
159 | C(FEATUREVERSION, -ENXIO, "unsupported feature version"), \ |
160 | C(FEATURESELVALUE, -ENXIO, "unsupported feature selection value"), \ |
161 | C(FEATURETRANSFERIP, -ENXIO, "feature transfer in progress"), \ |
162 | C(FEATURETRANSFEROOO, -ENXIO, "feature transfer out of order"), \ |
163 | C(RESOURCEEXHAUSTED, -ENXIO, "resources are exhausted"), \ |
164 | C(EXTLIST, -ENXIO, "invalid Extent List"), \ |
165 | |
166 | #undef C |
167 | #define C(a, b, c) CXL_MBOX_CMD_RC_##a |
168 | enum { CMD_CMD_RC_TABLE }; |
169 | #undef C |
170 | #define C(a, b, c) { b, c } |
171 | struct cxl_mbox_cmd_rc { |
172 | int err; |
173 | const char *desc; |
174 | }; |
175 | |
176 | static const |
177 | struct cxl_mbox_cmd_rc cxl_mbox_cmd_rctable[] ={ CMD_CMD_RC_TABLE }; |
178 | #undef C |
179 | |
180 | static inline const char *cxl_mbox_cmd_rc2str(struct cxl_mbox_cmd *mbox_cmd) |
181 | { |
182 | return cxl_mbox_cmd_rctable[mbox_cmd->return_code].desc; |
183 | } |
184 | |
185 | static inline int cxl_mbox_cmd_rc2errno(struct cxl_mbox_cmd *mbox_cmd) |
186 | { |
187 | return cxl_mbox_cmd_rctable[mbox_cmd->return_code].err; |
188 | } |
189 | |
190 | /* |
191 | * CXL 2.0 - Memory capacity multiplier |
192 | * See Section 8.2.9.5 |
193 | * |
194 | * Volatile, Persistent, and Partition capacities are specified to be in |
195 | * multiples of 256MB - define a multiplier to convert to/from bytes. |
196 | */ |
197 | #define CXL_CAPACITY_MULTIPLIER SZ_256M |
198 | |
199 | /* |
200 | * Event Interrupt Policy |
201 | * |
202 | * CXL rev 3.0 section 8.2.9.2.4; Table 8-52 |
203 | */ |
204 | enum cxl_event_int_mode { |
205 | CXL_INT_NONE = 0x00, |
206 | CXL_INT_MSI_MSIX = 0x01, |
207 | CXL_INT_FW = 0x02 |
208 | }; |
209 | struct cxl_event_interrupt_policy { |
210 | u8 info_settings; |
211 | u8 warn_settings; |
212 | u8 failure_settings; |
213 | u8 fatal_settings; |
214 | } __packed; |
215 | |
216 | /** |
217 | * struct cxl_event_state - Event log driver state |
218 | * |
219 | * @buf: Buffer to receive event data |
220 | * @log_lock: Serialize event_buf and log use |
221 | */ |
222 | struct cxl_event_state { |
223 | struct cxl_get_event_payload *buf; |
224 | struct mutex log_lock; |
225 | }; |
226 | |
227 | /* Device enabled poison commands */ |
228 | enum poison_cmd_enabled_bits { |
229 | CXL_POISON_ENABLED_LIST, |
230 | CXL_POISON_ENABLED_INJECT, |
231 | CXL_POISON_ENABLED_CLEAR, |
232 | CXL_POISON_ENABLED_SCAN_CAPS, |
233 | CXL_POISON_ENABLED_SCAN_MEDIA, |
234 | CXL_POISON_ENABLED_SCAN_RESULTS, |
235 | CXL_POISON_ENABLED_MAX |
236 | }; |
237 | |
238 | /* Device enabled security commands */ |
239 | enum security_cmd_enabled_bits { |
240 | CXL_SEC_ENABLED_SANITIZE, |
241 | CXL_SEC_ENABLED_SECURE_ERASE, |
242 | CXL_SEC_ENABLED_GET_SECURITY_STATE, |
243 | CXL_SEC_ENABLED_SET_PASSPHRASE, |
244 | CXL_SEC_ENABLED_DISABLE_PASSPHRASE, |
245 | CXL_SEC_ENABLED_UNLOCK, |
246 | CXL_SEC_ENABLED_FREEZE_SECURITY, |
247 | CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE, |
248 | CXL_SEC_ENABLED_MAX |
249 | }; |
250 | |
251 | /** |
252 | * struct cxl_poison_state - Driver poison state info |
253 | * |
254 | * @max_errors: Maximum media error records held in device cache |
255 | * @enabled_cmds: All poison commands enabled in the CEL |
256 | * @list_out: The poison list payload returned by device |
257 | * @lock: Protect reads of the poison list |
258 | * |
259 | * Reads of the poison list are synchronized to ensure that a reader |
260 | * does not get an incomplete list because their request overlapped |
261 | * (was interrupted or preceded by) another read request of the same |
262 | * DPA range. CXL Spec 3.0 Section 8.2.9.8.4.1 |
263 | */ |
264 | struct cxl_poison_state { |
265 | u32 max_errors; |
266 | DECLARE_BITMAP(enabled_cmds, CXL_POISON_ENABLED_MAX); |
267 | struct cxl_mbox_poison_out *list_out; |
268 | struct mutex lock; /* Protect reads of poison list */ |
269 | }; |
270 | |
271 | /* |
272 | * Get FW Info |
273 | * CXL rev 3.0 section 8.2.9.3.1; Table 8-56 |
274 | */ |
275 | struct cxl_mbox_get_fw_info { |
276 | u8 num_slots; |
277 | u8 slot_info; |
278 | u8 activation_cap; |
279 | u8 reserved[13]; |
280 | char slot_1_revision[16]; |
281 | char slot_2_revision[16]; |
282 | char slot_3_revision[16]; |
283 | char slot_4_revision[16]; |
284 | } __packed; |
285 | |
286 | #define CXL_FW_INFO_SLOT_INFO_CUR_MASK GENMASK(2, 0) |
287 | #define CXL_FW_INFO_SLOT_INFO_NEXT_MASK GENMASK(5, 3) |
288 | #define CXL_FW_INFO_SLOT_INFO_NEXT_SHIFT 3 |
289 | #define CXL_FW_INFO_ACTIVATION_CAP_HAS_LIVE_ACTIVATE BIT(0) |
290 | |
291 | /* |
292 | * Transfer FW Input Payload |
293 | * CXL rev 3.0 section 8.2.9.3.2; Table 8-57 |
294 | */ |
295 | struct cxl_mbox_transfer_fw { |
296 | u8 action; |
297 | u8 slot; |
298 | u8 reserved[2]; |
299 | __le32 offset; |
300 | u8 reserved2[0x78]; |
301 | u8 data[]; |
302 | } __packed; |
303 | |
304 | #define CXL_FW_TRANSFER_ACTION_FULL 0x0 |
305 | #define CXL_FW_TRANSFER_ACTION_INITIATE 0x1 |
306 | #define CXL_FW_TRANSFER_ACTION_CONTINUE 0x2 |
307 | #define CXL_FW_TRANSFER_ACTION_END 0x3 |
308 | #define CXL_FW_TRANSFER_ACTION_ABORT 0x4 |
309 | |
310 | /* |
311 | * CXL rev 3.0 section 8.2.9.3.2 mandates 128-byte alignment for FW packages |
312 | * and for each part transferred in a Transfer FW command. |
313 | */ |
314 | #define CXL_FW_TRANSFER_ALIGNMENT 128 |
315 | |
316 | /* |
317 | * Activate FW Input Payload |
318 | * CXL rev 3.0 section 8.2.9.3.3; Table 8-58 |
319 | */ |
320 | struct cxl_mbox_activate_fw { |
321 | u8 action; |
322 | u8 slot; |
323 | } __packed; |
324 | |
325 | #define CXL_FW_ACTIVATE_ONLINE 0x0 |
326 | #define CXL_FW_ACTIVATE_OFFLINE 0x1 |
327 | |
328 | /* FW state bits */ |
329 | #define CXL_FW_STATE_BITS 32 |
330 | #define CXL_FW_CANCEL 0 |
331 | |
332 | /** |
333 | * struct cxl_fw_state - Firmware upload / activation state |
334 | * |
335 | * @state: fw_uploader state bitmask |
336 | * @oneshot: whether the fw upload fits in a single transfer |
337 | * @num_slots: Number of FW slots available |
338 | * @cur_slot: Slot number currently active |
339 | * @next_slot: Slot number for the new firmware |
340 | */ |
341 | struct cxl_fw_state { |
342 | DECLARE_BITMAP(state, CXL_FW_STATE_BITS); |
343 | bool oneshot; |
344 | int num_slots; |
345 | int cur_slot; |
346 | int next_slot; |
347 | }; |
348 | |
349 | /** |
350 | * struct cxl_security_state - Device security state |
351 | * |
352 | * @state: state of last security operation |
353 | * @enabled_cmds: All security commands enabled in the CEL |
354 | * @poll_tmo_secs: polling timeout |
355 | * @sanitize_active: sanitize completion pending |
356 | * @poll_dwork: polling work item |
357 | * @sanitize_node: sanitation sysfs file to notify |
358 | */ |
359 | struct cxl_security_state { |
360 | unsigned long state; |
361 | DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX); |
362 | int poll_tmo_secs; |
363 | bool sanitize_active; |
364 | struct delayed_work poll_dwork; |
365 | struct kernfs_node *sanitize_node; |
366 | }; |
367 | |
368 | /* |
369 | * enum cxl_devtype - delineate type-2 from a generic type-3 device |
370 | * @CXL_DEVTYPE_DEVMEM - Vendor specific CXL Type-2 device implementing HDM-D or |
371 | * HDM-DB, no requirement that this device implements a |
372 | * mailbox, or other memory-device-standard manageability |
373 | * flows. |
374 | * @CXL_DEVTYPE_CLASSMEM - Common class definition of a CXL Type-3 device with |
375 | * HDM-H and class-mandatory memory device registers |
376 | */ |
377 | enum cxl_devtype { |
378 | CXL_DEVTYPE_DEVMEM, |
379 | CXL_DEVTYPE_CLASSMEM, |
380 | }; |
381 | |
382 | /** |
383 | * struct cxl_dpa_perf - DPA performance property entry |
384 | * @dpa_range: range for DPA address |
385 | * @coord: QoS performance data (i.e. latency, bandwidth) |
386 | * @cdat_coord: raw QoS performance data from CDAT |
387 | * @qos_class: QoS Class cookies |
388 | */ |
389 | struct cxl_dpa_perf { |
390 | struct range dpa_range; |
391 | struct access_coordinate coord[ACCESS_COORDINATE_MAX]; |
392 | struct access_coordinate cdat_coord[ACCESS_COORDINATE_MAX]; |
393 | int qos_class; |
394 | }; |
395 | |
396 | /** |
397 | * struct cxl_dpa_partition - DPA partition descriptor |
398 | * @res: shortcut to the partition in the DPA resource tree (cxlds->dpa_res) |
399 | * @perf: performance attributes of the partition from CDAT |
400 | * @mode: operation mode for the DPA capacity, e.g. ram, pmem, dynamic... |
401 | */ |
402 | struct cxl_dpa_partition { |
403 | struct resource res; |
404 | struct cxl_dpa_perf perf; |
405 | enum cxl_partition_mode mode; |
406 | }; |
407 | |
408 | /** |
409 | * struct cxl_dev_state - The driver device state |
410 | * |
411 | * cxl_dev_state represents the CXL driver/device state. It provides an |
412 | * interface to mailbox commands as well as some cached data about the device. |
413 | * Currently only memory devices are represented. |
414 | * |
415 | * @dev: The device associated with this CXL state |
416 | * @cxlmd: The device representing the CXL.mem capabilities of @dev |
417 | * @reg_map: component and ras register mapping parameters |
418 | * @regs: Parsed register blocks |
419 | * @cxl_dvsec: Offset to the PCIe device DVSEC |
420 | * @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH) |
421 | * @media_ready: Indicate whether the device media is usable |
422 | * @dpa_res: Overall DPA resource tree for the device |
423 | * @part: DPA partition array |
424 | * @nr_partitions: Number of DPA partitions |
425 | * @serial: PCIe Device Serial Number |
426 | * @type: Generic Memory Class device or Vendor Specific Memory device |
427 | * @cxl_mbox: CXL mailbox context |
428 | * @cxlfs: CXL features context |
429 | */ |
430 | struct cxl_dev_state { |
431 | struct device *dev; |
432 | struct cxl_memdev *cxlmd; |
433 | struct cxl_register_map reg_map; |
434 | struct cxl_regs regs; |
435 | int cxl_dvsec; |
436 | bool rcd; |
437 | bool media_ready; |
438 | struct resource dpa_res; |
439 | struct cxl_dpa_partition part[CXL_NR_PARTITIONS_MAX]; |
440 | unsigned int nr_partitions; |
441 | u64 serial; |
442 | enum cxl_devtype type; |
443 | struct cxl_mailbox cxl_mbox; |
444 | #ifdef CONFIG_CXL_FEATURES |
445 | struct cxl_features_state *cxlfs; |
446 | #endif |
447 | }; |
448 | |
449 | static inline resource_size_t cxl_pmem_size(struct cxl_dev_state *cxlds) |
450 | { |
451 | /* |
452 | * Static PMEM may be at partition index 0 when there is no static RAM |
453 | * capacity. |
454 | */ |
455 | for (int i = 0; i < cxlds->nr_partitions; i++) |
456 | if (cxlds->part[i].mode == CXL_PARTMODE_PMEM) |
457 | return resource_size(res: &cxlds->part[i].res); |
458 | return 0; |
459 | } |
460 | |
461 | static inline struct cxl_dev_state *mbox_to_cxlds(struct cxl_mailbox *cxl_mbox) |
462 | { |
463 | return dev_get_drvdata(dev: cxl_mbox->host); |
464 | } |
465 | |
466 | /** |
467 | * struct cxl_memdev_state - Generic Type-3 Memory Device Class driver data |
468 | * |
469 | * CXL 8.1.12.1 PCI Header - Class Code Register Memory Device defines |
470 | * common memory device functionality like the presence of a mailbox and |
471 | * the functionality related to that like Identify Memory Device and Get |
472 | * Partition Info |
473 | * @cxlds: Core driver state common across Type-2 and Type-3 devices |
474 | * @lsa_size: Size of Label Storage Area |
475 | * (CXL 2.0 8.2.9.5.1.1 Identify Memory Device) |
476 | * @firmware_version: Firmware version for the memory device. |
477 | * @total_bytes: sum of all possible capacities |
478 | * @volatile_only_bytes: hard volatile capacity |
479 | * @persistent_only_bytes: hard persistent capacity |
480 | * @partition_align_bytes: alignment size for partition-able capacity |
481 | * @active_volatile_bytes: sum of hard + soft volatile |
482 | * @active_persistent_bytes: sum of hard + soft persistent |
483 | * @event: event log driver state |
484 | * @poison: poison driver state info |
485 | * @security: security driver state info |
486 | * @fw: firmware upload / activation state |
487 | * @mce_notifier: MCE notifier |
488 | * |
489 | * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for |
490 | * details on capacity parameters. |
491 | */ |
492 | struct cxl_memdev_state { |
493 | struct cxl_dev_state cxlds; |
494 | size_t lsa_size; |
495 | char firmware_version[0x10]; |
496 | u64 total_bytes; |
497 | u64 volatile_only_bytes; |
498 | u64 persistent_only_bytes; |
499 | u64 partition_align_bytes; |
500 | u64 active_volatile_bytes; |
501 | u64 active_persistent_bytes; |
502 | |
503 | struct cxl_event_state event; |
504 | struct cxl_poison_state poison; |
505 | struct cxl_security_state security; |
506 | struct cxl_fw_state fw; |
507 | struct notifier_block mce_notifier; |
508 | }; |
509 | |
510 | static inline struct cxl_memdev_state * |
511 | to_cxl_memdev_state(struct cxl_dev_state *cxlds) |
512 | { |
513 | if (cxlds->type != CXL_DEVTYPE_CLASSMEM) |
514 | return NULL; |
515 | return container_of(cxlds, struct cxl_memdev_state, cxlds); |
516 | } |
517 | |
518 | enum cxl_opcode { |
519 | CXL_MBOX_OP_INVALID = 0x0000, |
520 | CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID, |
521 | CXL_MBOX_OP_GET_EVENT_RECORD = 0x0100, |
522 | CXL_MBOX_OP_CLEAR_EVENT_RECORD = 0x0101, |
523 | CXL_MBOX_OP_GET_EVT_INT_POLICY = 0x0102, |
524 | CXL_MBOX_OP_SET_EVT_INT_POLICY = 0x0103, |
525 | CXL_MBOX_OP_GET_FW_INFO = 0x0200, |
526 | CXL_MBOX_OP_TRANSFER_FW = 0x0201, |
527 | CXL_MBOX_OP_ACTIVATE_FW = 0x0202, |
528 | CXL_MBOX_OP_GET_TIMESTAMP = 0x0300, |
529 | CXL_MBOX_OP_SET_TIMESTAMP = 0x0301, |
530 | CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400, |
531 | CXL_MBOX_OP_GET_LOG = 0x0401, |
532 | CXL_MBOX_OP_GET_LOG_CAPS = 0x0402, |
533 | CXL_MBOX_OP_CLEAR_LOG = 0x0403, |
534 | CXL_MBOX_OP_GET_SUP_LOG_SUBLIST = 0x0405, |
535 | CXL_MBOX_OP_GET_SUPPORTED_FEATURES = 0x0500, |
536 | CXL_MBOX_OP_GET_FEATURE = 0x0501, |
537 | CXL_MBOX_OP_SET_FEATURE = 0x0502, |
538 | CXL_MBOX_OP_DO_MAINTENANCE = 0x0600, |
539 | CXL_MBOX_OP_IDENTIFY = 0x4000, |
540 | CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100, |
541 | CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101, |
542 | CXL_MBOX_OP_GET_LSA = 0x4102, |
543 | CXL_MBOX_OP_SET_LSA = 0x4103, |
544 | CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200, |
545 | CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201, |
546 | CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202, |
547 | CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203, |
548 | CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204, |
549 | CXL_MBOX_OP_GET_POISON = 0x4300, |
550 | CXL_MBOX_OP_INJECT_POISON = 0x4301, |
551 | CXL_MBOX_OP_CLEAR_POISON = 0x4302, |
552 | CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303, |
553 | CXL_MBOX_OP_SCAN_MEDIA = 0x4304, |
554 | CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, |
555 | CXL_MBOX_OP_SANITIZE = 0x4400, |
556 | CXL_MBOX_OP_SECURE_ERASE = 0x4401, |
557 | CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500, |
558 | CXL_MBOX_OP_SET_PASSPHRASE = 0x4501, |
559 | CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502, |
560 | CXL_MBOX_OP_UNLOCK = 0x4503, |
561 | CXL_MBOX_OP_FREEZE_SECURITY = 0x4504, |
562 | CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE = 0x4505, |
563 | CXL_MBOX_OP_MAX = 0x10000 |
564 | }; |
565 | |
566 | #define DEFINE_CXL_CEL_UUID \ |
567 | UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, 0xb1, 0x62, \ |
568 | 0x3b, 0x3f, 0x17) |
569 | |
570 | #define DEFINE_CXL_VENDOR_DEBUG_UUID \ |
571 | UUID_INIT(0x5e1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19, \ |
572 | 0x40, 0x3d, 0x86) |
573 | |
574 | struct cxl_mbox_get_supported_logs { |
575 | __le16 entries; |
576 | u8 rsvd[6]; |
577 | struct cxl_gsl_entry { |
578 | uuid_t uuid; |
579 | __le32 size; |
580 | } __packed entry[]; |
581 | } __packed; |
582 | |
583 | struct cxl_cel_entry { |
584 | __le16 opcode; |
585 | __le16 effect; |
586 | } __packed; |
587 | |
588 | struct cxl_mbox_get_log { |
589 | uuid_t uuid; |
590 | __le32 offset; |
591 | __le32 length; |
592 | } __packed; |
593 | |
594 | /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ |
595 | struct cxl_mbox_identify { |
596 | char fw_revision[0x10]; |
597 | __le64 total_capacity; |
598 | __le64 volatile_capacity; |
599 | __le64 persistent_capacity; |
600 | __le64 partition_align; |
601 | __le16 info_event_log_size; |
602 | __le16 warning_event_log_size; |
603 | __le16 failure_event_log_size; |
604 | __le16 fatal_event_log_size; |
605 | __le32 lsa_size; |
606 | u8 poison_list_max_mer[3]; |
607 | __le16 inject_poison_limit; |
608 | u8 poison_caps; |
609 | u8 qos_telemetry_caps; |
610 | } __packed; |
611 | |
612 | /* |
613 | * General Media Event Record UUID |
614 | * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 |
615 | */ |
616 | #define CXL_EVENT_GEN_MEDIA_UUID \ |
617 | UUID_INIT(0xfbcd0a77, 0xc260, 0x417f, 0x85, 0xa9, 0x08, 0x8b, 0x16, \ |
618 | 0x21, 0xeb, 0xa6) |
619 | |
620 | /* |
621 | * DRAM Event Record UUID |
622 | * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 |
623 | */ |
624 | #define CXL_EVENT_DRAM_UUID \ |
625 | UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, \ |
626 | 0x5c, 0x96, 0x24) |
627 | |
628 | /* |
629 | * Memory Module Event Record UUID |
630 | * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45 |
631 | */ |
632 | #define CXL_EVENT_MEM_MODULE_UUID \ |
633 | UUID_INIT(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86, 0x79, 0xba, 0xb1, \ |
634 | 0x13, 0xb7, 0x74) |
635 | |
636 | /* |
637 | * Get Event Records output payload |
638 | * CXL rev 3.0 section 8.2.9.2.2; Table 8-50 |
639 | */ |
640 | #define CXL_GET_EVENT_FLAG_OVERFLOW BIT(0) |
641 | #define CXL_GET_EVENT_FLAG_MORE_RECORDS BIT(1) |
642 | struct cxl_get_event_payload { |
643 | u8 flags; |
644 | u8 reserved1; |
645 | __le16 overflow_err_count; |
646 | __le64 first_overflow_timestamp; |
647 | __le64 last_overflow_timestamp; |
648 | __le16 record_count; |
649 | u8 reserved2[10]; |
650 | struct cxl_event_record_raw records[]; |
651 | } __packed; |
652 | |
653 | /* |
654 | * CXL rev 3.0 section 8.2.9.2.2; Table 8-49 |
655 | */ |
656 | enum cxl_event_log_type { |
657 | CXL_EVENT_TYPE_INFO = 0x00, |
658 | CXL_EVENT_TYPE_WARN, |
659 | CXL_EVENT_TYPE_FAIL, |
660 | CXL_EVENT_TYPE_FATAL, |
661 | CXL_EVENT_TYPE_MAX |
662 | }; |
663 | |
664 | /* |
665 | * Clear Event Records input payload |
666 | * CXL rev 3.0 section 8.2.9.2.3; Table 8-51 |
667 | */ |
668 | struct cxl_mbox_clear_event_payload { |
669 | u8 event_log; /* enum cxl_event_log_type */ |
670 | u8 clear_flags; |
671 | u8 nr_recs; |
672 | u8 reserved[3]; |
673 | __le16 handles[]; |
674 | } __packed; |
675 | #define CXL_CLEAR_EVENT_MAX_HANDLES U8_MAX |
676 | |
677 | struct cxl_mbox_get_partition_info { |
678 | __le64 active_volatile_cap; |
679 | __le64 active_persistent_cap; |
680 | __le64 next_volatile_cap; |
681 | __le64 next_persistent_cap; |
682 | } __packed; |
683 | |
684 | struct cxl_mbox_get_lsa { |
685 | __le32 offset; |
686 | __le32 length; |
687 | } __packed; |
688 | |
689 | struct cxl_mbox_set_lsa { |
690 | __le32 offset; |
691 | __le32 reserved; |
692 | u8 data[]; |
693 | } __packed; |
694 | |
695 | struct cxl_mbox_set_partition_info { |
696 | __le64 volatile_capacity; |
697 | u8 flags; |
698 | } __packed; |
699 | |
700 | #define CXL_SET_PARTITION_IMMEDIATE_FLAG BIT(0) |
701 | |
702 | /* Get Health Info Output Payload CXL 3.2 Spec 8.2.10.9.3.1 Table 8-148 */ |
703 | struct cxl_mbox_get_health_info_out { |
704 | u8 health_status; |
705 | u8 media_status; |
706 | u8 additional_status; |
707 | u8 life_used; |
708 | __le16 device_temperature; |
709 | __le32 dirty_shutdown_cnt; |
710 | __le32 corrected_volatile_error_cnt; |
711 | __le32 corrected_persistent_error_cnt; |
712 | } __packed; |
713 | |
714 | /* Set Shutdown State Input Payload CXL 3.2 Spec 8.2.10.9.3.5 Table 8-152 */ |
715 | struct cxl_mbox_set_shutdown_state_in { |
716 | u8 state; |
717 | } __packed; |
718 | |
719 | /* Set Timestamp CXL 3.0 Spec 8.2.9.4.2 */ |
720 | struct cxl_mbox_set_timestamp_in { |
721 | __le64 timestamp; |
722 | |
723 | } __packed; |
724 | |
725 | /* Get Poison List CXL 3.0 Spec 8.2.9.8.4.1 */ |
726 | struct cxl_mbox_poison_in { |
727 | __le64 offset; |
728 | __le64 length; |
729 | } __packed; |
730 | |
731 | struct cxl_mbox_poison_out { |
732 | u8 flags; |
733 | u8 rsvd1; |
734 | __le64 overflow_ts; |
735 | __le16 count; |
736 | u8 rsvd2[20]; |
737 | struct cxl_poison_record { |
738 | __le64 address; |
739 | __le32 length; |
740 | __le32 rsvd; |
741 | } __packed record[]; |
742 | } __packed; |
743 | |
744 | /* |
745 | * Get Poison List address field encodes the starting |
746 | * address of poison, and the source of the poison. |
747 | */ |
748 | #define CXL_POISON_START_MASK GENMASK_ULL(63, 6) |
749 | #define CXL_POISON_SOURCE_MASK GENMASK(2, 0) |
750 | |
751 | /* Get Poison List record length is in units of 64 bytes */ |
752 | #define CXL_POISON_LEN_MULT 64 |
753 | |
754 | /* Kernel defined maximum for a list of poison errors */ |
755 | #define CXL_POISON_LIST_MAX 1024 |
756 | |
757 | /* Get Poison List: Payload out flags */ |
758 | #define CXL_POISON_FLAG_MORE BIT(0) |
759 | #define CXL_POISON_FLAG_OVERFLOW BIT(1) |
760 | #define CXL_POISON_FLAG_SCANNING BIT(2) |
761 | |
762 | /* Get Poison List: Poison Source */ |
763 | #define CXL_POISON_SOURCE_UNKNOWN 0 |
764 | #define CXL_POISON_SOURCE_EXTERNAL 1 |
765 | #define CXL_POISON_SOURCE_INTERNAL 2 |
766 | #define CXL_POISON_SOURCE_INJECTED 3 |
767 | #define CXL_POISON_SOURCE_VENDOR 7 |
768 | |
769 | /* Inject & Clear Poison CXL 3.0 Spec 8.2.9.8.4.2/3 */ |
770 | struct cxl_mbox_inject_poison { |
771 | __le64 address; |
772 | }; |
773 | |
774 | /* Clear Poison CXL 3.0 Spec 8.2.9.8.4.3 */ |
775 | struct cxl_mbox_clear_poison { |
776 | __le64 address; |
777 | u8 write_data[CXL_POISON_LEN_MULT]; |
778 | } __packed; |
779 | |
780 | /** |
781 | * struct cxl_mem_command - Driver representation of a memory device command |
782 | * @info: Command information as it exists for the UAPI |
783 | * @opcode: The actual bits used for the mailbox protocol |
784 | * @flags: Set of flags effecting driver behavior. |
785 | * |
786 | * * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag |
787 | * will be enabled by the driver regardless of what hardware may have |
788 | * advertised. |
789 | * |
790 | * The cxl_mem_command is the driver's internal representation of commands that |
791 | * are supported by the driver. Some of these commands may not be supported by |
792 | * the hardware. The driver will use @info to validate the fields passed in by |
793 | * the user then submit the @opcode to the hardware. |
794 | * |
795 | * See struct cxl_command_info. |
796 | */ |
797 | struct cxl_mem_command { |
798 | struct cxl_command_info info; |
799 | enum cxl_opcode opcode; |
800 | u32 flags; |
801 | #define CXL_CMD_FLAG_FORCE_ENABLE BIT(0) |
802 | }; |
803 | |
804 | #define CXL_PMEM_SEC_STATE_USER_PASS_SET 0x01 |
805 | #define CXL_PMEM_SEC_STATE_MASTER_PASS_SET 0x02 |
806 | #define CXL_PMEM_SEC_STATE_LOCKED 0x04 |
807 | #define CXL_PMEM_SEC_STATE_FROZEN 0x08 |
808 | #define CXL_PMEM_SEC_STATE_USER_PLIMIT 0x10 |
809 | #define CXL_PMEM_SEC_STATE_MASTER_PLIMIT 0x20 |
810 | |
811 | /* set passphrase input payload */ |
812 | struct cxl_set_pass { |
813 | u8 type; |
814 | u8 reserved[31]; |
815 | /* CXL field using NVDIMM define, same length */ |
816 | u8 old_pass[NVDIMM_PASSPHRASE_LEN]; |
817 | u8 new_pass[NVDIMM_PASSPHRASE_LEN]; |
818 | } __packed; |
819 | |
820 | /* disable passphrase input payload */ |
821 | struct cxl_disable_pass { |
822 | u8 type; |
823 | u8 reserved[31]; |
824 | u8 pass[NVDIMM_PASSPHRASE_LEN]; |
825 | } __packed; |
826 | |
827 | /* passphrase secure erase payload */ |
828 | struct cxl_pass_erase { |
829 | u8 type; |
830 | u8 reserved[31]; |
831 | u8 pass[NVDIMM_PASSPHRASE_LEN]; |
832 | } __packed; |
833 | |
834 | enum { |
835 | CXL_PMEM_SEC_PASS_MASTER = 0, |
836 | CXL_PMEM_SEC_PASS_USER, |
837 | }; |
838 | |
839 | int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox, |
840 | struct cxl_mbox_cmd *cmd); |
841 | int cxl_dev_state_identify(struct cxl_memdev_state *mds); |
842 | int cxl_await_media_ready(struct cxl_dev_state *cxlds); |
843 | int cxl_enumerate_cmds(struct cxl_memdev_state *mds); |
844 | int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info); |
845 | struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev); |
846 | void set_exclusive_cxl_commands(struct cxl_memdev_state *mds, |
847 | unsigned long *cmds); |
848 | void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, |
849 | unsigned long *cmds); |
850 | void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status); |
851 | void cxl_event_trace_record(const struct cxl_memdev *cxlmd, |
852 | enum cxl_event_log_type type, |
853 | enum cxl_event_type event_type, |
854 | const uuid_t *uuid, union cxl_event *evt); |
855 | int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count); |
856 | int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds); |
857 | int cxl_set_timestamp(struct cxl_memdev_state *mds); |
858 | int cxl_poison_state_init(struct cxl_memdev_state *mds); |
859 | int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, |
860 | struct cxl_region *cxlr); |
861 | int cxl_trigger_poison_list(struct cxl_memdev *cxlmd); |
862 | int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa); |
863 | int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa); |
864 | |
865 | #ifdef CONFIG_CXL_EDAC_MEM_FEATURES |
866 | int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd); |
867 | int devm_cxl_region_edac_register(struct cxl_region *cxlr); |
868 | int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, union cxl_event *evt); |
869 | int cxl_store_rec_dram(struct cxl_memdev *cxlmd, union cxl_event *evt); |
870 | void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd); |
871 | #else |
872 | static inline int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd) |
873 | { return 0; } |
874 | static inline int devm_cxl_region_edac_register(struct cxl_region *cxlr) |
875 | { return 0; } |
876 | static inline int cxl_store_rec_gen_media(struct cxl_memdev *cxlmd, |
877 | union cxl_event *evt) |
878 | { return 0; } |
879 | static inline int cxl_store_rec_dram(struct cxl_memdev *cxlmd, |
880 | union cxl_event *evt) |
881 | { return 0; } |
882 | static inline void devm_cxl_memdev_edac_release(struct cxl_memdev *cxlmd) |
883 | { return; } |
884 | #endif |
885 | |
886 | #ifdef CONFIG_CXL_SUSPEND |
887 | void cxl_mem_active_inc(void); |
888 | void cxl_mem_active_dec(void); |
889 | #else |
890 | static inline void cxl_mem_active_inc(void) |
891 | { |
892 | } |
893 | static inline void cxl_mem_active_dec(void) |
894 | { |
895 | } |
896 | #endif |
897 | |
898 | int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd); |
899 | |
900 | /** |
901 | * struct cxl_hdm - HDM Decoder registers and cached / decoded capabilities |
902 | * @regs: mapped registers, see devm_cxl_setup_hdm() |
903 | * @decoder_count: number of decoders for this port |
904 | * @target_count: for switch decoders, max downstream port targets |
905 | * @interleave_mask: interleave granularity capability, see check_interleave_cap() |
906 | * @iw_cap_mask: bitmask of supported interleave ways, see check_interleave_cap() |
907 | * @port: mapped cxl_port, see devm_cxl_setup_hdm() |
908 | */ |
909 | struct cxl_hdm { |
910 | struct cxl_component_regs regs; |
911 | unsigned int decoder_count; |
912 | unsigned int target_count; |
913 | unsigned int interleave_mask; |
914 | unsigned long iw_cap_mask; |
915 | struct cxl_port *port; |
916 | }; |
917 | |
918 | struct seq_file; |
919 | struct dentry *cxl_debugfs_create_dir(const char *dir); |
920 | void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds); |
921 | #endif /* __CXL_MEM_H__ */ |
922 |
Definitions
- cxl_memdev
- to_cxl_memdev
- cxled_to_port
- cxlrd_to_port
- cxled_to_memdev
- is_cxl_endpoint
- cxl_dpa_info
- cxl_dpa_part_info
- cxl_ep_load
- cxl_mbox_cmd_rc
- cxl_mbox_cmd_rctable
- cxl_mbox_cmd_rc2str
- cxl_mbox_cmd_rc2errno
- cxl_event_int_mode
- cxl_event_interrupt_policy
- cxl_event_state
- poison_cmd_enabled_bits
- security_cmd_enabled_bits
- cxl_poison_state
- cxl_mbox_get_fw_info
- cxl_mbox_transfer_fw
- cxl_mbox_activate_fw
- cxl_fw_state
- cxl_security_state
- cxl_devtype
- cxl_dpa_perf
- cxl_dpa_partition
- cxl_dev_state
- cxl_pmem_size
- mbox_to_cxlds
- cxl_memdev_state
- to_cxl_memdev_state
- cxl_opcode
- cxl_mbox_get_supported_logs
- cxl_gsl_entry
- cxl_cel_entry
- cxl_mbox_get_log
- cxl_mbox_identify
- cxl_get_event_payload
- cxl_event_log_type
- cxl_mbox_clear_event_payload
- cxl_mbox_get_partition_info
- cxl_mbox_get_lsa
- cxl_mbox_set_lsa
- cxl_mbox_set_partition_info
- cxl_mbox_get_health_info_out
- cxl_mbox_set_shutdown_state_in
- cxl_mbox_set_timestamp_in
- cxl_mbox_poison_in
- cxl_mbox_poison_out
- cxl_poison_record
- cxl_mbox_inject_poison
- cxl_mbox_clear_poison
- cxl_mem_command
- cxl_set_pass
- cxl_disable_pass
- cxl_pass_erase
Improve your Profiling and Debugging skills
Find out more