1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* Copyright(c) 2020 Intel Corporation. */ |
3 | |
4 | #ifndef __CXL_H__ |
5 | #define __CXL_H__ |
6 | |
7 | #include <linux/libnvdimm.h> |
8 | #include <linux/bitfield.h> |
9 | #include <linux/notifier.h> |
10 | #include <linux/bitops.h> |
11 | #include <linux/log2.h> |
12 | #include <linux/node.h> |
13 | #include <linux/io.h> |
14 | |
15 | /** |
16 | * DOC: cxl objects |
17 | * |
18 | * The CXL core objects like ports, decoders, and regions are shared |
19 | * between the subsystem drivers cxl_acpi, cxl_pci, and core drivers |
20 | * (port-driver, region-driver, nvdimm object-drivers... etc). |
21 | */ |
22 | |
23 | /* CXL 2.0 8.2.4 CXL Component Register Layout and Definition */ |
24 | #define CXL_COMPONENT_REG_BLOCK_SIZE SZ_64K |
25 | |
26 | /* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/ |
27 | #define CXL_CM_OFFSET 0x1000 |
28 | #define CXL_CM_CAP_HDR_OFFSET 0x0 |
29 | #define CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0) |
30 | #define CM_CAP_HDR_CAP_ID 1 |
31 | #define CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16) |
32 | #define CM_CAP_HDR_CAP_VERSION 1 |
33 | #define CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20) |
34 | #define CM_CAP_HDR_CACHE_MEM_VERSION 1 |
35 | #define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24) |
36 | #define CXL_CM_CAP_PTR_MASK GENMASK(31, 20) |
37 | |
38 | #define CXL_CM_CAP_CAP_ID_RAS 0x2 |
39 | #define CXL_CM_CAP_CAP_ID_HDM 0x5 |
40 | #define CXL_CM_CAP_CAP_HDM_VERSION 1 |
41 | |
42 | /* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */ |
43 | #define CXL_HDM_DECODER_CAP_OFFSET 0x0 |
44 | #define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0) |
45 | #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4) |
46 | #define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8) |
47 | #define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9) |
48 | #define CXL_HDM_DECODER_CTRL_OFFSET 0x4 |
49 | #define CXL_HDM_DECODER_ENABLE BIT(1) |
50 | #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10) |
51 | #define CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i) (0x20 * (i) + 0x14) |
52 | #define CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i) (0x20 * (i) + 0x18) |
53 | #define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i) (0x20 * (i) + 0x1c) |
54 | #define CXL_HDM_DECODER0_CTRL_OFFSET(i) (0x20 * (i) + 0x20) |
55 | #define CXL_HDM_DECODER0_CTRL_IG_MASK GENMASK(3, 0) |
56 | #define CXL_HDM_DECODER0_CTRL_IW_MASK GENMASK(7, 4) |
57 | #define CXL_HDM_DECODER0_CTRL_LOCK BIT(8) |
58 | #define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9) |
59 | #define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10) |
60 | #define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11) |
61 | #define CXL_HDM_DECODER0_CTRL_HOSTONLY BIT(12) |
62 | #define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24) |
63 | #define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28) |
64 | #define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i) |
65 | #define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i) |
66 | |
67 | /* HDM decoder control register constants CXL 3.0 8.2.5.19.7 */ |
68 | #define CXL_DECODER_MIN_GRANULARITY 256 |
69 | #define CXL_DECODER_MAX_ENCODED_IG 6 |
70 | |
71 | static inline int cxl_hdm_decoder_count(u32 cap_hdr) |
72 | { |
73 | int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr); |
74 | |
75 | return val ? val * 2 : 1; |
76 | } |
77 | |
78 | /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */ |
79 | static inline int eig_to_granularity(u16 eig, unsigned int *granularity) |
80 | { |
81 | if (eig > CXL_DECODER_MAX_ENCODED_IG) |
82 | return -EINVAL; |
83 | *granularity = CXL_DECODER_MIN_GRANULARITY << eig; |
84 | return 0; |
85 | } |
86 | |
87 | /* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */ |
88 | static inline int eiw_to_ways(u8 eiw, unsigned int *ways) |
89 | { |
90 | switch (eiw) { |
91 | case 0 ... 4: |
92 | *ways = 1 << eiw; |
93 | break; |
94 | case 8 ... 10: |
95 | *ways = 3 << (eiw - 8); |
96 | break; |
97 | default: |
98 | return -EINVAL; |
99 | } |
100 | |
101 | return 0; |
102 | } |
103 | |
104 | static inline int granularity_to_eig(int granularity, u16 *eig) |
105 | { |
106 | if (granularity > SZ_16K || granularity < CXL_DECODER_MIN_GRANULARITY || |
107 | !is_power_of_2(n: granularity)) |
108 | return -EINVAL; |
109 | *eig = ilog2(granularity) - 8; |
110 | return 0; |
111 | } |
112 | |
113 | static inline int ways_to_eiw(unsigned int ways, u8 *eiw) |
114 | { |
115 | if (ways > 16) |
116 | return -EINVAL; |
117 | if (is_power_of_2(n: ways)) { |
118 | *eiw = ilog2(ways); |
119 | return 0; |
120 | } |
121 | if (ways % 3) |
122 | return -EINVAL; |
123 | ways /= 3; |
124 | if (!is_power_of_2(n: ways)) |
125 | return -EINVAL; |
126 | *eiw = ilog2(ways) + 8; |
127 | return 0; |
128 | } |
129 | |
130 | /* RAS Registers CXL 2.0 8.2.5.9 CXL RAS Capability Structure */ |
131 | #define CXL_RAS_UNCORRECTABLE_STATUS_OFFSET 0x0 |
132 | #define CXL_RAS_UNCORRECTABLE_STATUS_MASK (GENMASK(16, 14) | GENMASK(11, 0)) |
133 | #define CXL_RAS_UNCORRECTABLE_MASK_OFFSET 0x4 |
134 | #define CXL_RAS_UNCORRECTABLE_MASK_MASK (GENMASK(16, 14) | GENMASK(11, 0)) |
135 | #define CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK BIT(8) |
136 | #define CXL_RAS_UNCORRECTABLE_SEVERITY_OFFSET 0x8 |
137 | #define CXL_RAS_UNCORRECTABLE_SEVERITY_MASK (GENMASK(16, 14) | GENMASK(11, 0)) |
138 | #define CXL_RAS_CORRECTABLE_STATUS_OFFSET 0xC |
139 | #define CXL_RAS_CORRECTABLE_STATUS_MASK GENMASK(6, 0) |
140 | #define CXL_RAS_CORRECTABLE_MASK_OFFSET 0x10 |
141 | #define CXL_RAS_CORRECTABLE_MASK_MASK GENMASK(6, 0) |
142 | #define CXL_RAS_CAP_CONTROL_OFFSET 0x14 |
143 | #define CXL_RAS_CAP_CONTROL_FE_MASK GENMASK(5, 0) |
144 | #define 0x18 |
145 | #define CXL_RAS_CAPABILITY_LENGTH 0x58 |
146 | #define SZ_512 |
147 | #define SZ_512 / sizeof(u32) |
148 | |
149 | /* CXL 2.0 8.2.8.1 Device Capabilities Array Register */ |
150 | #define CXLDEV_CAP_ARRAY_OFFSET 0x0 |
151 | #define CXLDEV_CAP_ARRAY_CAP_ID 0 |
152 | #define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0) |
153 | #define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32) |
154 | /* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */ |
155 | #define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0) |
156 | /* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */ |
157 | #define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1 |
158 | #define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2 |
159 | #define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3 |
160 | #define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000 |
161 | |
162 | /* CXL 3.0 8.2.8.3.1 Event Status Register */ |
163 | #define CXLDEV_DEV_EVENT_STATUS_OFFSET 0x00 |
164 | #define CXLDEV_EVENT_STATUS_INFO BIT(0) |
165 | #define CXLDEV_EVENT_STATUS_WARN BIT(1) |
166 | #define CXLDEV_EVENT_STATUS_FAIL BIT(2) |
167 | #define CXLDEV_EVENT_STATUS_FATAL BIT(3) |
168 | |
169 | #define CXLDEV_EVENT_STATUS_ALL (CXLDEV_EVENT_STATUS_INFO | \ |
170 | CXLDEV_EVENT_STATUS_WARN | \ |
171 | CXLDEV_EVENT_STATUS_FAIL | \ |
172 | CXLDEV_EVENT_STATUS_FATAL) |
173 | |
174 | /* CXL rev 3.0 section 8.2.9.2.4; Table 8-52 */ |
175 | #define CXLDEV_EVENT_INT_MODE_MASK GENMASK(1, 0) |
176 | #define CXLDEV_EVENT_INT_MSGNUM_MASK GENMASK(7, 4) |
177 | |
178 | /* CXL 2.0 8.2.8.4 Mailbox Registers */ |
179 | #define CXLDEV_MBOX_CAPS_OFFSET 0x00 |
180 | #define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0) |
181 | #define CXLDEV_MBOX_CAP_BG_CMD_IRQ BIT(6) |
182 | #define CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK GENMASK(10, 7) |
183 | #define CXLDEV_MBOX_CTRL_OFFSET 0x04 |
184 | #define CXLDEV_MBOX_CTRL_DOORBELL BIT(0) |
185 | #define CXLDEV_MBOX_CTRL_BG_CMD_IRQ BIT(2) |
186 | #define CXLDEV_MBOX_CMD_OFFSET 0x08 |
187 | #define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0) |
188 | #define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16) |
189 | #define CXLDEV_MBOX_STATUS_OFFSET 0x10 |
190 | #define CXLDEV_MBOX_STATUS_BG_CMD BIT(0) |
191 | #define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32) |
192 | #define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18 |
193 | #define CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0) |
194 | #define CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK GENMASK_ULL(22, 16) |
195 | #define CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK GENMASK_ULL(47, 32) |
196 | #define CXLDEV_MBOX_BG_CMD_COMMAND_VENDOR_MASK GENMASK_ULL(63, 48) |
197 | #define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20 |
198 | |
199 | /* |
200 | * Using struct_group() allows for per register-block-type helper routines, |
201 | * without requiring block-type agnostic code to include the prefix. |
202 | */ |
203 | struct cxl_regs { |
204 | /* |
205 | * Common set of CXL Component register block base pointers |
206 | * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure |
207 | * @ras: CXL 2.0 8.2.5.9 CXL RAS Capability Structure |
208 | */ |
209 | struct_group_tagged(cxl_component_regs, component, |
210 | void __iomem *hdm_decoder; |
211 | void __iomem *ras; |
212 | ); |
213 | /* |
214 | * Common set of CXL Device register block base pointers |
215 | * @status: CXL 2.0 8.2.8.3 Device Status Registers |
216 | * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers |
217 | * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers |
218 | */ |
219 | struct_group_tagged(cxl_device_regs, device_regs, |
220 | void __iomem *status, *mbox, *memdev; |
221 | ); |
222 | |
223 | struct_group_tagged(cxl_pmu_regs, pmu_regs, |
224 | void __iomem *pmu; |
225 | ); |
226 | |
227 | /* |
228 | * RCH downstream port specific RAS register |
229 | * @aer: CXL 3.0 8.2.1.1 RCH Downstream Port RCRB |
230 | */ |
231 | struct_group_tagged(cxl_rch_regs, rch_regs, |
232 | void __iomem *dport_aer; |
233 | ); |
234 | }; |
235 | |
236 | struct cxl_reg_map { |
237 | bool valid; |
238 | int id; |
239 | unsigned long offset; |
240 | unsigned long size; |
241 | }; |
242 | |
243 | struct cxl_component_reg_map { |
244 | struct cxl_reg_map hdm_decoder; |
245 | struct cxl_reg_map ras; |
246 | }; |
247 | |
248 | struct cxl_device_reg_map { |
249 | struct cxl_reg_map status; |
250 | struct cxl_reg_map mbox; |
251 | struct cxl_reg_map memdev; |
252 | }; |
253 | |
254 | struct cxl_pmu_reg_map { |
255 | struct cxl_reg_map pmu; |
256 | }; |
257 | |
258 | /** |
259 | * struct cxl_register_map - DVSEC harvested register block mapping parameters |
260 | * @host: device for devm operations and logging |
261 | * @base: virtual base of the register-block-BAR + @block_offset |
262 | * @resource: physical resource base of the register block |
263 | * @max_size: maximum mapping size to perform register search |
264 | * @reg_type: see enum cxl_regloc_type |
265 | * @component_map: cxl_reg_map for component registers |
266 | * @device_map: cxl_reg_maps for device registers |
267 | * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units |
268 | */ |
269 | struct cxl_register_map { |
270 | struct device *host; |
271 | void __iomem *base; |
272 | resource_size_t resource; |
273 | resource_size_t max_size; |
274 | u8 reg_type; |
275 | union { |
276 | struct cxl_component_reg_map component_map; |
277 | struct cxl_device_reg_map device_map; |
278 | struct cxl_pmu_reg_map pmu_map; |
279 | }; |
280 | }; |
281 | |
282 | void cxl_probe_component_regs(struct device *dev, void __iomem *base, |
283 | struct cxl_component_reg_map *map); |
284 | void cxl_probe_device_regs(struct device *dev, void __iomem *base, |
285 | struct cxl_device_reg_map *map); |
286 | int cxl_map_component_regs(const struct cxl_register_map *map, |
287 | struct cxl_component_regs *regs, |
288 | unsigned long map_mask); |
289 | int cxl_map_device_regs(const struct cxl_register_map *map, |
290 | struct cxl_device_regs *regs); |
291 | int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs); |
292 | |
293 | enum cxl_regloc_type; |
294 | int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type); |
295 | int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, |
296 | struct cxl_register_map *map, int index); |
297 | int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type, |
298 | struct cxl_register_map *map); |
299 | int cxl_setup_regs(struct cxl_register_map *map); |
300 | struct cxl_dport; |
301 | resource_size_t cxl_rcd_component_reg_phys(struct device *dev, |
302 | struct cxl_dport *dport); |
303 | |
304 | #define CXL_RESOURCE_NONE ((resource_size_t) -1) |
305 | #define CXL_TARGET_STRLEN 20 |
306 | |
307 | /* |
308 | * cxl_decoder flags that define the type of memory / devices this |
309 | * decoder supports as well as configuration lock status See "CXL 2.0 |
310 | * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details. |
311 | * Additionally indicate whether decoder settings were autodetected, |
312 | * user customized. |
313 | */ |
314 | #define CXL_DECODER_F_RAM BIT(0) |
315 | #define CXL_DECODER_F_PMEM BIT(1) |
316 | #define CXL_DECODER_F_TYPE2 BIT(2) |
317 | #define CXL_DECODER_F_TYPE3 BIT(3) |
318 | #define CXL_DECODER_F_LOCK BIT(4) |
319 | #define CXL_DECODER_F_ENABLE BIT(5) |
320 | #define CXL_DECODER_F_MASK GENMASK(5, 0) |
321 | |
322 | enum cxl_decoder_type { |
323 | CXL_DECODER_DEVMEM = 2, |
324 | CXL_DECODER_HOSTONLYMEM = 3, |
325 | }; |
326 | |
327 | /* |
328 | * Current specification goes up to 8, double that seems a reasonable |
329 | * software max for the foreseeable future |
330 | */ |
331 | #define CXL_DECODER_MAX_INTERLEAVE 16 |
332 | |
333 | #define CXL_QOS_CLASS_INVALID -1 |
334 | |
335 | /** |
336 | * struct cxl_decoder - Common CXL HDM Decoder Attributes |
337 | * @dev: this decoder's device |
338 | * @id: kernel device name id |
339 | * @hpa_range: Host physical address range mapped by this decoder |
340 | * @interleave_ways: number of cxl_dports in this decode |
341 | * @interleave_granularity: data stride per dport |
342 | * @target_type: accelerator vs expander (type2 vs type3) selector |
343 | * @region: currently assigned region for this decoder |
344 | * @flags: memory type capabilities and locking |
345 | * @commit: device/decoder-type specific callback to commit settings to hw |
346 | * @reset: device/decoder-type specific callback to reset hw settings |
347 | */ |
348 | struct cxl_decoder { |
349 | struct device dev; |
350 | int id; |
351 | struct range hpa_range; |
352 | int interleave_ways; |
353 | int interleave_granularity; |
354 | enum cxl_decoder_type target_type; |
355 | struct cxl_region *region; |
356 | unsigned long flags; |
357 | int (*commit)(struct cxl_decoder *cxld); |
358 | int (*reset)(struct cxl_decoder *cxld); |
359 | }; |
360 | |
361 | /* |
362 | * CXL_DECODER_DEAD prevents endpoints from being reattached to regions |
363 | * while cxld_unregister() is running |
364 | */ |
365 | enum cxl_decoder_mode { |
366 | CXL_DECODER_NONE, |
367 | CXL_DECODER_RAM, |
368 | CXL_DECODER_PMEM, |
369 | CXL_DECODER_MIXED, |
370 | CXL_DECODER_DEAD, |
371 | }; |
372 | |
373 | static inline const char *cxl_decoder_mode_name(enum cxl_decoder_mode mode) |
374 | { |
375 | static const char * const names[] = { |
376 | [CXL_DECODER_NONE] = "none" , |
377 | [CXL_DECODER_RAM] = "ram" , |
378 | [CXL_DECODER_PMEM] = "pmem" , |
379 | [CXL_DECODER_MIXED] = "mixed" , |
380 | }; |
381 | |
382 | if (mode >= CXL_DECODER_NONE && mode <= CXL_DECODER_MIXED) |
383 | return names[mode]; |
384 | return "mixed" ; |
385 | } |
386 | |
387 | /* |
388 | * Track whether this decoder is reserved for region autodiscovery, or |
389 | * free for userspace provisioning. |
390 | */ |
391 | enum cxl_decoder_state { |
392 | CXL_DECODER_STATE_MANUAL, |
393 | CXL_DECODER_STATE_AUTO, |
394 | }; |
395 | |
396 | /** |
397 | * struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder |
398 | * @cxld: base cxl_decoder_object |
399 | * @dpa_res: actively claimed DPA span of this decoder |
400 | * @skip: offset into @dpa_res where @cxld.hpa_range maps |
401 | * @mode: which memory type / access-mode-partition this decoder targets |
402 | * @state: autodiscovery state |
403 | * @pos: interleave position in @cxld.region |
404 | */ |
405 | struct cxl_endpoint_decoder { |
406 | struct cxl_decoder cxld; |
407 | struct resource *dpa_res; |
408 | resource_size_t skip; |
409 | enum cxl_decoder_mode mode; |
410 | enum cxl_decoder_state state; |
411 | int pos; |
412 | }; |
413 | |
414 | /** |
415 | * struct cxl_switch_decoder - Switch specific CXL HDM Decoder |
416 | * @cxld: base cxl_decoder object |
417 | * @nr_targets: number of elements in @target |
418 | * @target: active ordered target list in current decoder configuration |
419 | * |
420 | * The 'switch' decoder type represents the decoder instances of cxl_port's that |
421 | * route from the root of a CXL memory decode topology to the endpoints. They |
422 | * come in two flavors, root-level decoders, statically defined by platform |
423 | * firmware, and mid-level decoders, where interleave-granularity, |
424 | * interleave-width, and the target list are mutable. |
425 | */ |
426 | struct cxl_switch_decoder { |
427 | struct cxl_decoder cxld; |
428 | int nr_targets; |
429 | struct cxl_dport *target[]; |
430 | }; |
431 | |
432 | struct cxl_root_decoder; |
433 | typedef struct cxl_dport *(*cxl_calc_hb_fn)(struct cxl_root_decoder *cxlrd, |
434 | int pos); |
435 | |
436 | /** |
437 | * struct cxl_root_decoder - Static platform CXL address decoder |
438 | * @res: host / parent resource for region allocations |
439 | * @region_id: region id for next region provisioning event |
440 | * @calc_hb: which host bridge covers the n'th position by granularity |
441 | * @platform_data: platform specific configuration data |
442 | * @range_lock: sync region autodiscovery by address range |
443 | * @qos_class: QoS performance class cookie |
444 | * @cxlsd: base cxl switch decoder |
445 | */ |
446 | struct cxl_root_decoder { |
447 | struct resource *res; |
448 | atomic_t region_id; |
449 | cxl_calc_hb_fn calc_hb; |
450 | void *platform_data; |
451 | struct mutex range_lock; |
452 | int qos_class; |
453 | struct cxl_switch_decoder cxlsd; |
454 | }; |
455 | |
456 | /* |
457 | * enum cxl_config_state - State machine for region configuration |
458 | * @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely |
459 | * @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more |
460 | * changes to interleave_ways or interleave_granularity |
461 | * @CXL_CONFIG_ACTIVE: All targets have been added the region is now |
462 | * active |
463 | * @CXL_CONFIG_RESET_PENDING: see commit_store() |
464 | * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware |
465 | */ |
466 | enum cxl_config_state { |
467 | CXL_CONFIG_IDLE, |
468 | CXL_CONFIG_INTERLEAVE_ACTIVE, |
469 | CXL_CONFIG_ACTIVE, |
470 | CXL_CONFIG_RESET_PENDING, |
471 | CXL_CONFIG_COMMIT, |
472 | }; |
473 | |
474 | /** |
475 | * struct cxl_region_params - region settings |
476 | * @state: allow the driver to lockdown further parameter changes |
477 | * @uuid: unique id for persistent regions |
478 | * @interleave_ways: number of endpoints in the region |
479 | * @interleave_granularity: capacity each endpoint contributes to a stripe |
480 | * @res: allocated iomem capacity for this region |
481 | * @targets: active ordered targets in current decoder configuration |
482 | * @nr_targets: number of targets |
483 | * |
484 | * State transitions are protected by the cxl_region_rwsem |
485 | */ |
486 | struct cxl_region_params { |
487 | enum cxl_config_state state; |
488 | uuid_t uuid; |
489 | int interleave_ways; |
490 | int interleave_granularity; |
491 | struct resource *res; |
492 | struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE]; |
493 | int nr_targets; |
494 | }; |
495 | |
496 | /* |
497 | * Indicate whether this region has been assembled by autodetection or |
498 | * userspace assembly. Prevent endpoint decoders outside of automatic |
499 | * detection from being added to the region. |
500 | */ |
501 | #define CXL_REGION_F_AUTO 0 |
502 | |
503 | /* |
504 | * Require that a committed region successfully complete a teardown once |
505 | * any of its associated decoders have been torn down. This maintains |
506 | * the commit state for the region since there are committed decoders, |
507 | * but blocks cxl_region_probe(). |
508 | */ |
509 | #define CXL_REGION_F_NEEDS_RESET 1 |
510 | |
511 | /** |
512 | * struct cxl_region - CXL region |
513 | * @dev: This region's device |
514 | * @id: This region's id. Id is globally unique across all regions |
515 | * @mode: Endpoint decoder allocation / access mode |
516 | * @type: Endpoint decoder target type |
517 | * @cxl_nvb: nvdimm bridge for coordinating @cxlr_pmem setup / shutdown |
518 | * @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge |
519 | * @flags: Region state flags |
520 | * @params: active + config params for the region |
521 | * @coord: QoS access coordinates for the region |
522 | * @memory_notifier: notifier for setting the access coordinates to node |
523 | */ |
524 | struct cxl_region { |
525 | struct device dev; |
526 | int id; |
527 | enum cxl_decoder_mode mode; |
528 | enum cxl_decoder_type type; |
529 | struct cxl_nvdimm_bridge *cxl_nvb; |
530 | struct cxl_pmem_region *cxlr_pmem; |
531 | unsigned long flags; |
532 | struct cxl_region_params params; |
533 | struct access_coordinate coord[ACCESS_COORDINATE_MAX]; |
534 | struct notifier_block memory_notifier; |
535 | }; |
536 | |
537 | struct cxl_nvdimm_bridge { |
538 | int id; |
539 | struct device dev; |
540 | struct cxl_port *port; |
541 | struct nvdimm_bus *nvdimm_bus; |
542 | struct nvdimm_bus_descriptor nd_desc; |
543 | }; |
544 | |
545 | #define CXL_DEV_ID_LEN 19 |
546 | |
547 | struct cxl_nvdimm { |
548 | struct device dev; |
549 | struct cxl_memdev *cxlmd; |
550 | u8 dev_id[CXL_DEV_ID_LEN]; /* for nvdimm, string of 'serial' */ |
551 | }; |
552 | |
553 | struct cxl_pmem_region_mapping { |
554 | struct cxl_memdev *cxlmd; |
555 | struct cxl_nvdimm *cxl_nvd; |
556 | u64 start; |
557 | u64 size; |
558 | int position; |
559 | }; |
560 | |
561 | struct cxl_pmem_region { |
562 | struct device dev; |
563 | struct cxl_region *cxlr; |
564 | struct nd_region *nd_region; |
565 | struct range hpa_range; |
566 | int nr_mappings; |
567 | struct cxl_pmem_region_mapping mapping[]; |
568 | }; |
569 | |
570 | struct cxl_dax_region { |
571 | struct device dev; |
572 | struct cxl_region *cxlr; |
573 | struct range hpa_range; |
574 | }; |
575 | |
576 | /** |
577 | * struct cxl_port - logical collection of upstream port devices and |
578 | * downstream port devices to construct a CXL memory |
579 | * decode hierarchy. |
580 | * @dev: this port's device |
581 | * @uport_dev: PCI or platform device implementing the upstream port capability |
582 | * @host_bridge: Shortcut to the platform attach point for this port |
583 | * @id: id for port device-name |
584 | * @dports: cxl_dport instances referenced by decoders |
585 | * @endpoints: cxl_ep instances, endpoints that are a descendant of this port |
586 | * @regions: cxl_region_ref instances, regions mapped by this port |
587 | * @parent_dport: dport that points to this port in the parent |
588 | * @decoder_ida: allocator for decoder ids |
589 | * @reg_map: component and ras register mapping parameters |
590 | * @nr_dports: number of entries in @dports |
591 | * @hdm_end: track last allocated HDM decoder instance for allocation ordering |
592 | * @commit_end: cursor to track highest committed decoder for commit ordering |
593 | * @dead: last ep has been removed, force port re-creation |
594 | * @depth: How deep this port is relative to the root. depth 0 is the root. |
595 | * @cdat: Cached CDAT data |
596 | * @cdat_available: Should a CDAT attribute be available in sysfs |
597 | * @pci_latency: Upstream latency in picoseconds |
598 | */ |
599 | struct cxl_port { |
600 | struct device dev; |
601 | struct device *uport_dev; |
602 | struct device *host_bridge; |
603 | int id; |
604 | struct xarray dports; |
605 | struct xarray endpoints; |
606 | struct xarray regions; |
607 | struct cxl_dport *parent_dport; |
608 | struct ida decoder_ida; |
609 | struct cxl_register_map reg_map; |
610 | int nr_dports; |
611 | int hdm_end; |
612 | int commit_end; |
613 | bool dead; |
614 | unsigned int depth; |
615 | struct cxl_cdat { |
616 | void *table; |
617 | size_t length; |
618 | } cdat; |
619 | bool cdat_available; |
620 | long pci_latency; |
621 | }; |
622 | |
623 | /** |
624 | * struct cxl_root - logical collection of root cxl_port items |
625 | * |
626 | * @port: cxl_port member |
627 | * @ops: cxl root operations |
628 | */ |
629 | struct cxl_root { |
630 | struct cxl_port port; |
631 | const struct cxl_root_ops *ops; |
632 | }; |
633 | |
634 | static inline struct cxl_root * |
635 | to_cxl_root(const struct cxl_port *port) |
636 | { |
637 | return container_of(port, struct cxl_root, port); |
638 | } |
639 | |
640 | struct cxl_root_ops { |
641 | int (*qos_class)(struct cxl_root *cxl_root, |
642 | struct access_coordinate *coord, int entries, |
643 | int *qos_class); |
644 | }; |
645 | |
646 | static inline struct cxl_dport * |
647 | cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev) |
648 | { |
649 | return xa_load(&port->dports, index: (unsigned long)dport_dev); |
650 | } |
651 | |
652 | struct cxl_rcrb_info { |
653 | resource_size_t base; |
654 | u16 aer_cap; |
655 | }; |
656 | |
657 | /** |
658 | * struct cxl_dport - CXL downstream port |
659 | * @dport_dev: PCI bridge or firmware device representing the downstream link |
660 | * @reg_map: component and ras register mapping parameters |
661 | * @port_id: unique hardware identifier for dport in decoder target list |
662 | * @rcrb: Data about the Root Complex Register Block layout |
663 | * @rch: Indicate whether this dport was enumerated in RCH or VH mode |
664 | * @port: reference to cxl_port that contains this downstream port |
665 | * @regs: Dport parsed register blocks |
666 | * @coord: access coordinates (bandwidth and latency performance attributes) |
667 | * @link_latency: calculated PCIe downstream latency |
668 | */ |
669 | struct cxl_dport { |
670 | struct device *dport_dev; |
671 | struct cxl_register_map reg_map; |
672 | int port_id; |
673 | struct cxl_rcrb_info rcrb; |
674 | bool rch; |
675 | struct cxl_port *port; |
676 | struct cxl_regs regs; |
677 | struct access_coordinate coord[ACCESS_COORDINATE_MAX]; |
678 | long link_latency; |
679 | }; |
680 | |
681 | /** |
682 | * struct cxl_ep - track an endpoint's interest in a port |
683 | * @ep: device that hosts a generic CXL endpoint (expander or accelerator) |
684 | * @dport: which dport routes to this endpoint on @port |
685 | * @next: cxl switch port across the link attached to @dport NULL if |
686 | * attached to an endpoint |
687 | */ |
688 | struct cxl_ep { |
689 | struct device *ep; |
690 | struct cxl_dport *dport; |
691 | struct cxl_port *next; |
692 | }; |
693 | |
694 | /** |
695 | * struct cxl_region_ref - track a region's interest in a port |
696 | * @port: point in topology to install this reference |
697 | * @decoder: decoder assigned for @region in @port |
698 | * @region: region for this reference |
699 | * @endpoints: cxl_ep references for region members beneath @port |
700 | * @nr_targets_set: track how many targets have been programmed during setup |
701 | * @nr_eps: number of endpoints beneath @port |
702 | * @nr_targets: number of distinct targets needed to reach @nr_eps |
703 | */ |
704 | struct cxl_region_ref { |
705 | struct cxl_port *port; |
706 | struct cxl_decoder *decoder; |
707 | struct cxl_region *region; |
708 | struct xarray endpoints; |
709 | int nr_targets_set; |
710 | int nr_eps; |
711 | int nr_targets; |
712 | }; |
713 | |
714 | /* |
715 | * The platform firmware device hosting the root is also the top of the |
716 | * CXL port topology. All other CXL ports have another CXL port as their |
717 | * parent and their ->uport_dev / host device is out-of-line of the port |
718 | * ancestry. |
719 | */ |
720 | static inline bool is_cxl_root(struct cxl_port *port) |
721 | { |
722 | return port->uport_dev == port->dev.parent; |
723 | } |
724 | |
725 | int cxl_num_decoders_committed(struct cxl_port *port); |
726 | bool is_cxl_port(const struct device *dev); |
727 | struct cxl_port *to_cxl_port(const struct device *dev); |
728 | struct pci_bus; |
729 | int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, |
730 | struct pci_bus *bus); |
731 | struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port); |
732 | struct cxl_port *devm_cxl_add_port(struct device *host, |
733 | struct device *uport_dev, |
734 | resource_size_t component_reg_phys, |
735 | struct cxl_dport *parent_dport); |
736 | struct cxl_root *devm_cxl_add_root(struct device *host, |
737 | const struct cxl_root_ops *ops); |
738 | struct cxl_root *find_cxl_root(struct cxl_port *port); |
739 | void put_cxl_root(struct cxl_root *cxl_root); |
740 | DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T)) |
741 | |
742 | int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd); |
743 | void cxl_bus_rescan(void); |
744 | void cxl_bus_drain(void); |
745 | struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev, |
746 | struct cxl_dport **dport); |
747 | struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, |
748 | struct cxl_dport **dport); |
749 | bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd); |
750 | |
751 | struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, |
752 | struct device *dport, int port_id, |
753 | resource_size_t component_reg_phys); |
754 | struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, |
755 | struct device *dport_dev, int port_id, |
756 | resource_size_t rcrb); |
757 | |
758 | #ifdef CONFIG_PCIEAER_CXL |
759 | void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport); |
760 | #else |
761 | static inline void cxl_setup_parent_dport(struct device *host, |
762 | struct cxl_dport *dport) { } |
763 | #endif |
764 | |
765 | struct cxl_decoder *to_cxl_decoder(struct device *dev); |
766 | struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev); |
767 | struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); |
768 | struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev); |
769 | bool is_root_decoder(struct device *dev); |
770 | bool is_switch_decoder(struct device *dev); |
771 | bool is_endpoint_decoder(struct device *dev); |
772 | struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, |
773 | unsigned int nr_targets, |
774 | cxl_calc_hb_fn calc_hb); |
775 | struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos); |
776 | struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, |
777 | unsigned int nr_targets); |
778 | int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); |
779 | struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port); |
780 | int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map); |
781 | int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); |
782 | int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); |
783 | |
784 | /** |
785 | * struct cxl_endpoint_dvsec_info - Cached DVSEC info |
786 | * @mem_enabled: cached value of mem_enabled in the DVSEC at init time |
787 | * @ranges: Number of active HDM ranges this device uses. |
788 | * @port: endpoint port associated with this info instance |
789 | * @dvsec_range: cached attributes of the ranges in the DVSEC, PCIE_DEVICE |
790 | */ |
791 | struct cxl_endpoint_dvsec_info { |
792 | bool mem_enabled; |
793 | int ranges; |
794 | struct cxl_port *port; |
795 | struct range dvsec_range[2]; |
796 | }; |
797 | |
798 | struct cxl_hdm; |
799 | struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, |
800 | struct cxl_endpoint_dvsec_info *info); |
801 | int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, |
802 | struct cxl_endpoint_dvsec_info *info); |
803 | int devm_cxl_add_passthrough_decoder(struct cxl_port *port); |
804 | int cxl_dvsec_rr_decode(struct device *dev, int dvsec, |
805 | struct cxl_endpoint_dvsec_info *info); |
806 | |
807 | bool is_cxl_region(struct device *dev); |
808 | |
809 | extern struct bus_type cxl_bus_type; |
810 | |
811 | struct cxl_driver { |
812 | const char *name; |
813 | int (*probe)(struct device *dev); |
814 | void (*remove)(struct device *dev); |
815 | struct device_driver drv; |
816 | int id; |
817 | }; |
818 | |
819 | static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv) |
820 | { |
821 | return container_of(drv, struct cxl_driver, drv); |
822 | } |
823 | |
824 | int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, |
825 | const char *modname); |
826 | #define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME) |
827 | void cxl_driver_unregister(struct cxl_driver *cxl_drv); |
828 | |
829 | #define module_cxl_driver(__cxl_driver) \ |
830 | module_driver(__cxl_driver, cxl_driver_register, cxl_driver_unregister) |
831 | |
832 | #define CXL_DEVICE_NVDIMM_BRIDGE 1 |
833 | #define CXL_DEVICE_NVDIMM 2 |
834 | #define CXL_DEVICE_PORT 3 |
835 | #define CXL_DEVICE_ROOT 4 |
836 | #define CXL_DEVICE_MEMORY_EXPANDER 5 |
837 | #define CXL_DEVICE_REGION 6 |
838 | #define CXL_DEVICE_PMEM_REGION 7 |
839 | #define CXL_DEVICE_DAX_REGION 8 |
840 | #define CXL_DEVICE_PMU 9 |
841 | |
842 | #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*") |
843 | #define CXL_MODALIAS_FMT "cxl:t%d" |
844 | |
845 | struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev); |
846 | struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host, |
847 | struct cxl_port *port); |
848 | struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev); |
849 | bool is_cxl_nvdimm(struct device *dev); |
850 | bool is_cxl_nvdimm_bridge(struct device *dev); |
851 | int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd); |
852 | struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd); |
853 | |
854 | #ifdef CONFIG_CXL_REGION |
855 | bool is_cxl_pmem_region(struct device *dev); |
856 | struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev); |
857 | int cxl_add_to_region(struct cxl_port *root, |
858 | struct cxl_endpoint_decoder *cxled); |
859 | struct cxl_dax_region *to_cxl_dax_region(struct device *dev); |
860 | #else |
861 | static inline bool is_cxl_pmem_region(struct device *dev) |
862 | { |
863 | return false; |
864 | } |
865 | static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) |
866 | { |
867 | return NULL; |
868 | } |
869 | static inline int cxl_add_to_region(struct cxl_port *root, |
870 | struct cxl_endpoint_decoder *cxled) |
871 | { |
872 | return 0; |
873 | } |
874 | static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev) |
875 | { |
876 | return NULL; |
877 | } |
878 | #endif |
879 | |
880 | void cxl_endpoint_parse_cdat(struct cxl_port *port); |
881 | void cxl_switch_parse_cdat(struct cxl_port *port); |
882 | |
883 | int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, |
884 | struct access_coordinate *coord); |
885 | void cxl_region_perf_data_calculate(struct cxl_region *cxlr, |
886 | struct cxl_endpoint_decoder *cxled); |
887 | |
888 | void cxl_memdev_update_perf(struct cxl_memdev *cxlmd); |
889 | |
890 | void cxl_coordinates_combine(struct access_coordinate *out, |
891 | struct access_coordinate *c1, |
892 | struct access_coordinate *c2); |
893 | |
894 | /* |
895 | * Unit test builds overrides this to __weak, find the 'strong' version |
896 | * of these symbols in tools/testing/cxl/. |
897 | */ |
898 | #ifndef __mock |
899 | #define __mock static |
900 | #endif |
901 | |
902 | #endif /* __CXL_H__ */ |
903 | |