1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ |
3 | #include <linux/platform_device.h> |
4 | #include <linux/memregion.h> |
5 | #include <linux/workqueue.h> |
6 | #include <linux/debugfs.h> |
7 | #include <linux/device.h> |
8 | #include <linux/module.h> |
9 | #include <linux/pci.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/idr.h> |
12 | #include <linux/node.h> |
13 | #include <cxl/einj.h> |
14 | #include <cxlmem.h> |
15 | #include <cxlpci.h> |
16 | #include <cxl.h> |
17 | #include "core.h" |
18 | |
19 | /** |
20 | * DOC: cxl core |
21 | * |
22 | * The CXL core provides a set of interfaces that can be consumed by CXL aware |
23 | * drivers. The interfaces allow for creation, modification, and destruction of |
24 | * regions, memory devices, ports, and decoders. CXL aware drivers must register |
25 | * with the CXL core via these interfaces in order to be able to participate in |
26 | * cross-device interleave coordination. The CXL core also establishes and |
27 | * maintains the bridge to the nvdimm subsystem. |
28 | * |
29 | * CXL core introduces sysfs hierarchy to control the devices that are |
30 | * instantiated by the core. |
31 | */ |
32 | |
33 | /* |
34 | * All changes to the interleave configuration occur with this lock held |
35 | * for write. |
36 | */ |
37 | DECLARE_RWSEM(cxl_region_rwsem); |
38 | |
39 | static DEFINE_IDA(cxl_port_ida); |
40 | static DEFINE_XARRAY(cxl_root_buses); |
41 | |
42 | int cxl_num_decoders_committed(struct cxl_port *port) |
43 | { |
44 | lockdep_assert_held(&cxl_region_rwsem); |
45 | |
46 | return port->commit_end + 1; |
47 | } |
48 | |
49 | static ssize_t devtype_show(struct device *dev, struct device_attribute *attr, |
50 | char *buf) |
51 | { |
52 | return sysfs_emit(buf, fmt: "%s\n", dev->type->name); |
53 | } |
54 | static DEVICE_ATTR_RO(devtype); |
55 | |
56 | static int cxl_device_id(const struct device *dev) |
57 | { |
58 | if (dev->type == &cxl_nvdimm_bridge_type) |
59 | return CXL_DEVICE_NVDIMM_BRIDGE; |
60 | if (dev->type == &cxl_nvdimm_type) |
61 | return CXL_DEVICE_NVDIMM; |
62 | if (dev->type == CXL_PMEM_REGION_TYPE()) |
63 | return CXL_DEVICE_PMEM_REGION; |
64 | if (dev->type == CXL_DAX_REGION_TYPE()) |
65 | return CXL_DEVICE_DAX_REGION; |
66 | if (is_cxl_port(dev)) { |
67 | if (is_cxl_root(port: to_cxl_port(dev))) |
68 | return CXL_DEVICE_ROOT; |
69 | return CXL_DEVICE_PORT; |
70 | } |
71 | if (is_cxl_memdev(dev)) |
72 | return CXL_DEVICE_MEMORY_EXPANDER; |
73 | if (dev->type == CXL_REGION_TYPE()) |
74 | return CXL_DEVICE_REGION; |
75 | if (dev->type == &cxl_pmu_type) |
76 | return CXL_DEVICE_PMU; |
77 | return 0; |
78 | } |
79 | |
80 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
81 | char *buf) |
82 | { |
83 | return sysfs_emit(buf, CXL_MODALIAS_FMT "\n", cxl_device_id(dev)); |
84 | } |
85 | static DEVICE_ATTR_RO(modalias); |
86 | |
87 | static struct attribute *cxl_base_attributes[] = { |
88 | &dev_attr_devtype.attr, |
89 | &dev_attr_modalias.attr, |
90 | NULL, |
91 | }; |
92 | |
93 | struct attribute_group cxl_base_attribute_group = { |
94 | .attrs = cxl_base_attributes, |
95 | }; |
96 | |
97 | static ssize_t start_show(struct device *dev, struct device_attribute *attr, |
98 | char *buf) |
99 | { |
100 | struct cxl_decoder *cxld = to_cxl_decoder(dev); |
101 | |
102 | return sysfs_emit(buf, fmt: "%#llx\n", cxld->hpa_range.start); |
103 | } |
104 | static DEVICE_ATTR_ADMIN_RO(start); |
105 | |
106 | static ssize_t size_show(struct device *dev, struct device_attribute *attr, |
107 | char *buf) |
108 | { |
109 | struct cxl_decoder *cxld = to_cxl_decoder(dev); |
110 | |
111 | return sysfs_emit(buf, fmt: "%#llx\n", range_len(range: &cxld->hpa_range)); |
112 | } |
113 | static DEVICE_ATTR_RO(size); |
114 | |
115 | #define CXL_DECODER_FLAG_ATTR(name, flag) \ |
116 | static ssize_t name##_show(struct device *dev, \ |
117 | struct device_attribute *attr, char *buf) \ |
118 | { \ |
119 | struct cxl_decoder *cxld = to_cxl_decoder(dev); \ |
120 | \ |
121 | return sysfs_emit(buf, "%s\n", \ |
122 | (cxld->flags & (flag)) ? "1" : "0"); \ |
123 | } \ |
124 | static DEVICE_ATTR_RO(name) |
125 | |
126 | CXL_DECODER_FLAG_ATTR(cap_pmem, CXL_DECODER_F_PMEM); |
127 | CXL_DECODER_FLAG_ATTR(cap_ram, CXL_DECODER_F_RAM); |
128 | CXL_DECODER_FLAG_ATTR(cap_type2, CXL_DECODER_F_TYPE2); |
129 | CXL_DECODER_FLAG_ATTR(cap_type3, CXL_DECODER_F_TYPE3); |
130 | CXL_DECODER_FLAG_ATTR(locked, CXL_DECODER_F_LOCK); |
131 | |
132 | static ssize_t target_type_show(struct device *dev, |
133 | struct device_attribute *attr, char *buf) |
134 | { |
135 | struct cxl_decoder *cxld = to_cxl_decoder(dev); |
136 | |
137 | switch (cxld->target_type) { |
138 | case CXL_DECODER_DEVMEM: |
139 | return sysfs_emit(buf, fmt: "accelerator\n"); |
140 | case CXL_DECODER_HOSTONLYMEM: |
141 | return sysfs_emit(buf, fmt: "expander\n"); |
142 | } |
143 | return -ENXIO; |
144 | } |
145 | static DEVICE_ATTR_RO(target_type); |
146 | |
147 | static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf) |
148 | { |
149 | struct cxl_decoder *cxld = &cxlsd->cxld; |
150 | ssize_t offset = 0; |
151 | int i, rc = 0; |
152 | |
153 | for (i = 0; i < cxld->interleave_ways; i++) { |
154 | struct cxl_dport *dport = cxlsd->target[i]; |
155 | struct cxl_dport *next = NULL; |
156 | |
157 | if (!dport) |
158 | break; |
159 | |
160 | if (i + 1 < cxld->interleave_ways) |
161 | next = cxlsd->target[i + 1]; |
162 | rc = sysfs_emit_at(buf, at: offset, fmt: "%d%s", dport->port_id, |
163 | next ? ",": ""); |
164 | if (rc < 0) |
165 | return rc; |
166 | offset += rc; |
167 | } |
168 | |
169 | return offset; |
170 | } |
171 | |
172 | static ssize_t target_list_show(struct device *dev, |
173 | struct device_attribute *attr, char *buf) |
174 | { |
175 | struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); |
176 | ssize_t offset; |
177 | int rc; |
178 | |
179 | guard(rwsem_read)(T: &cxl_region_rwsem); |
180 | rc = emit_target_list(cxlsd, buf); |
181 | if (rc < 0) |
182 | return rc; |
183 | offset = rc; |
184 | |
185 | rc = sysfs_emit_at(buf, at: offset, fmt: "\n"); |
186 | if (rc < 0) |
187 | return rc; |
188 | |
189 | return offset + rc; |
190 | } |
191 | static DEVICE_ATTR_RO(target_list); |
192 | |
193 | static ssize_t mode_show(struct device *dev, struct device_attribute *attr, |
194 | char *buf) |
195 | { |
196 | struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); |
197 | struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); |
198 | struct cxl_dev_state *cxlds = cxlmd->cxlds; |
199 | /* without @cxl_dpa_rwsem, make sure @part is not reloaded */ |
200 | int part = READ_ONCE(cxled->part); |
201 | const char *desc; |
202 | |
203 | if (part < 0) |
204 | desc = "none"; |
205 | else |
206 | desc = cxlds->part[part].res.name; |
207 | |
208 | return sysfs_emit(buf, fmt: "%s\n", desc); |
209 | } |
210 | |
211 | static ssize_t mode_store(struct device *dev, struct device_attribute *attr, |
212 | const char *buf, size_t len) |
213 | { |
214 | struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); |
215 | enum cxl_partition_mode mode; |
216 | ssize_t rc; |
217 | |
218 | if (sysfs_streq(s1: buf, s2: "pmem")) |
219 | mode = CXL_PARTMODE_PMEM; |
220 | else if (sysfs_streq(s1: buf, s2: "ram")) |
221 | mode = CXL_PARTMODE_RAM; |
222 | else |
223 | return -EINVAL; |
224 | |
225 | rc = cxl_dpa_set_part(cxled, mode); |
226 | if (rc) |
227 | return rc; |
228 | |
229 | return len; |
230 | } |
231 | static DEVICE_ATTR_RW(mode); |
232 | |
233 | static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr, |
234 | char *buf) |
235 | { |
236 | struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); |
237 | |
238 | guard(rwsem_read)(T: &cxl_dpa_rwsem); |
239 | return sysfs_emit(buf, fmt: "%#llx\n", (u64)cxl_dpa_resource_start(cxled)); |
240 | } |
241 | static DEVICE_ATTR_RO(dpa_resource); |
242 | |
243 | static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr, |
244 | char *buf) |
245 | { |
246 | struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); |
247 | resource_size_t size = cxl_dpa_size(cxled); |
248 | |
249 | return sysfs_emit(buf, fmt: "%pa\n", &size); |
250 | } |
251 | |
252 | static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr, |
253 | const char *buf, size_t len) |
254 | { |
255 | struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); |
256 | unsigned long long size; |
257 | ssize_t rc; |
258 | |
259 | rc = kstrtoull(s: buf, base: 0, res: &size); |
260 | if (rc) |
261 | return rc; |
262 | |
263 | if (!IS_ALIGNED(size, SZ_256M)) |
264 | return -EINVAL; |
265 | |
266 | rc = cxl_dpa_free(cxled); |
267 | if (rc) |
268 | return rc; |
269 | |
270 | if (size == 0) |
271 | return len; |
272 | |
273 | rc = cxl_dpa_alloc(cxled, size); |
274 | if (rc) |
275 | return rc; |
276 | |
277 | return len; |
278 | } |
279 | static DEVICE_ATTR_RW(dpa_size); |
280 | |
281 | static ssize_t interleave_granularity_show(struct device *dev, |
282 | struct device_attribute *attr, |
283 | char *buf) |
284 | { |
285 | struct cxl_decoder *cxld = to_cxl_decoder(dev); |
286 | |
287 | return sysfs_emit(buf, fmt: "%d\n", cxld->interleave_granularity); |
288 | } |
289 | |
290 | static DEVICE_ATTR_RO(interleave_granularity); |
291 | |
292 | static ssize_t interleave_ways_show(struct device *dev, |
293 | struct device_attribute *attr, char *buf) |
294 | { |
295 | struct cxl_decoder *cxld = to_cxl_decoder(dev); |
296 | |
297 | return sysfs_emit(buf, fmt: "%d\n", cxld->interleave_ways); |
298 | } |
299 | |
300 | static DEVICE_ATTR_RO(interleave_ways); |
301 | |
302 | static ssize_t qos_class_show(struct device *dev, |
303 | struct device_attribute *attr, char *buf) |
304 | { |
305 | struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); |
306 | |
307 | return sysfs_emit(buf, fmt: "%d\n", cxlrd->qos_class); |
308 | } |
309 | static DEVICE_ATTR_RO(qos_class); |
310 | |
311 | static struct attribute *cxl_decoder_base_attrs[] = { |
312 | &dev_attr_start.attr, |
313 | &dev_attr_size.attr, |
314 | &dev_attr_locked.attr, |
315 | &dev_attr_interleave_granularity.attr, |
316 | &dev_attr_interleave_ways.attr, |
317 | NULL, |
318 | }; |
319 | |
320 | static struct attribute_group cxl_decoder_base_attribute_group = { |
321 | .attrs = cxl_decoder_base_attrs, |
322 | }; |
323 | |
324 | static struct attribute *cxl_decoder_root_attrs[] = { |
325 | &dev_attr_cap_pmem.attr, |
326 | &dev_attr_cap_ram.attr, |
327 | &dev_attr_cap_type2.attr, |
328 | &dev_attr_cap_type3.attr, |
329 | &dev_attr_target_list.attr, |
330 | &dev_attr_qos_class.attr, |
331 | SET_CXL_REGION_ATTR(create_pmem_region) |
332 | SET_CXL_REGION_ATTR(create_ram_region) |
333 | SET_CXL_REGION_ATTR(delete_region) |
334 | NULL, |
335 | }; |
336 | |
337 | static bool can_create_pmem(struct cxl_root_decoder *cxlrd) |
338 | { |
339 | unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM; |
340 | |
341 | return (cxlrd->cxlsd.cxld.flags & flags) == flags; |
342 | } |
343 | |
344 | static bool can_create_ram(struct cxl_root_decoder *cxlrd) |
345 | { |
346 | unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_RAM; |
347 | |
348 | return (cxlrd->cxlsd.cxld.flags & flags) == flags; |
349 | } |
350 | |
351 | static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n) |
352 | { |
353 | struct device *dev = kobj_to_dev(kobj); |
354 | struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); |
355 | |
356 | if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd)) |
357 | return 0; |
358 | |
359 | if (a == CXL_REGION_ATTR(create_ram_region) && !can_create_ram(cxlrd)) |
360 | return 0; |
361 | |
362 | if (a == CXL_REGION_ATTR(delete_region) && |
363 | !(can_create_pmem(cxlrd) || can_create_ram(cxlrd))) |
364 | return 0; |
365 | |
366 | return a->mode; |
367 | } |
368 | |
369 | static struct attribute_group cxl_decoder_root_attribute_group = { |
370 | .attrs = cxl_decoder_root_attrs, |
371 | .is_visible = cxl_root_decoder_visible, |
372 | }; |
373 | |
374 | static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { |
375 | &cxl_decoder_root_attribute_group, |
376 | &cxl_decoder_base_attribute_group, |
377 | &cxl_base_attribute_group, |
378 | NULL, |
379 | }; |
380 | |
381 | static struct attribute *cxl_decoder_switch_attrs[] = { |
382 | &dev_attr_target_type.attr, |
383 | &dev_attr_target_list.attr, |
384 | SET_CXL_REGION_ATTR(region) |
385 | NULL, |
386 | }; |
387 | |
388 | static struct attribute_group cxl_decoder_switch_attribute_group = { |
389 | .attrs = cxl_decoder_switch_attrs, |
390 | }; |
391 | |
392 | static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { |
393 | &cxl_decoder_switch_attribute_group, |
394 | &cxl_decoder_base_attribute_group, |
395 | &cxl_base_attribute_group, |
396 | NULL, |
397 | }; |
398 | |
399 | static struct attribute *cxl_decoder_endpoint_attrs[] = { |
400 | &dev_attr_target_type.attr, |
401 | &dev_attr_mode.attr, |
402 | &dev_attr_dpa_size.attr, |
403 | &dev_attr_dpa_resource.attr, |
404 | SET_CXL_REGION_ATTR(region) |
405 | NULL, |
406 | }; |
407 | |
408 | static struct attribute_group cxl_decoder_endpoint_attribute_group = { |
409 | .attrs = cxl_decoder_endpoint_attrs, |
410 | }; |
411 | |
412 | static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = { |
413 | &cxl_decoder_base_attribute_group, |
414 | &cxl_decoder_endpoint_attribute_group, |
415 | &cxl_base_attribute_group, |
416 | NULL, |
417 | }; |
418 | |
419 | static void __cxl_decoder_release(struct cxl_decoder *cxld) |
420 | { |
421 | struct cxl_port *port = to_cxl_port(dev: cxld->dev.parent); |
422 | |
423 | ida_free(&port->decoder_ida, id: cxld->id); |
424 | put_device(dev: &port->dev); |
425 | } |
426 | |
427 | static void cxl_endpoint_decoder_release(struct device *dev) |
428 | { |
429 | struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); |
430 | |
431 | __cxl_decoder_release(cxld: &cxled->cxld); |
432 | kfree(objp: cxled); |
433 | } |
434 | |
435 | static void cxl_switch_decoder_release(struct device *dev) |
436 | { |
437 | struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); |
438 | |
439 | __cxl_decoder_release(cxld: &cxlsd->cxld); |
440 | kfree(objp: cxlsd); |
441 | } |
442 | |
443 | struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev) |
444 | { |
445 | if (dev_WARN_ONCE(dev, !is_root_decoder(dev), |
446 | "not a cxl_root_decoder device\n")) |
447 | return NULL; |
448 | return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev); |
449 | } |
450 | EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, "CXL"); |
451 | |
452 | static void cxl_root_decoder_release(struct device *dev) |
453 | { |
454 | struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); |
455 | |
456 | if (atomic_read(v: &cxlrd->region_id) >= 0) |
457 | memregion_free(id: atomic_read(v: &cxlrd->region_id)); |
458 | __cxl_decoder_release(cxld: &cxlrd->cxlsd.cxld); |
459 | kfree(objp: cxlrd); |
460 | } |
461 | |
462 | static const struct device_type cxl_decoder_endpoint_type = { |
463 | .name = "cxl_decoder_endpoint", |
464 | .release = cxl_endpoint_decoder_release, |
465 | .groups = cxl_decoder_endpoint_attribute_groups, |
466 | }; |
467 | |
468 | static const struct device_type cxl_decoder_switch_type = { |
469 | .name = "cxl_decoder_switch", |
470 | .release = cxl_switch_decoder_release, |
471 | .groups = cxl_decoder_switch_attribute_groups, |
472 | }; |
473 | |
474 | static const struct device_type cxl_decoder_root_type = { |
475 | .name = "cxl_decoder_root", |
476 | .release = cxl_root_decoder_release, |
477 | .groups = cxl_decoder_root_attribute_groups, |
478 | }; |
479 | |
480 | bool is_endpoint_decoder(struct device *dev) |
481 | { |
482 | return dev->type == &cxl_decoder_endpoint_type; |
483 | } |
484 | EXPORT_SYMBOL_NS_GPL(is_endpoint_decoder, "CXL"); |
485 | |
486 | bool is_root_decoder(struct device *dev) |
487 | { |
488 | return dev->type == &cxl_decoder_root_type; |
489 | } |
490 | EXPORT_SYMBOL_NS_GPL(is_root_decoder, "CXL"); |
491 | |
492 | bool is_switch_decoder(struct device *dev) |
493 | { |
494 | return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; |
495 | } |
496 | EXPORT_SYMBOL_NS_GPL(is_switch_decoder, "CXL"); |
497 | |
498 | struct cxl_decoder *to_cxl_decoder(struct device *dev) |
499 | { |
500 | if (dev_WARN_ONCE(dev, |
501 | !is_switch_decoder(dev) && !is_endpoint_decoder(dev), |
502 | "not a cxl_decoder device\n")) |
503 | return NULL; |
504 | return container_of(dev, struct cxl_decoder, dev); |
505 | } |
506 | EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, "CXL"); |
507 | |
508 | struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev) |
509 | { |
510 | if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev), |
511 | "not a cxl_endpoint_decoder device\n")) |
512 | return NULL; |
513 | return container_of(dev, struct cxl_endpoint_decoder, cxld.dev); |
514 | } |
515 | EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, "CXL"); |
516 | |
517 | struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) |
518 | { |
519 | if (dev_WARN_ONCE(dev, !is_switch_decoder(dev), |
520 | "not a cxl_switch_decoder device\n")) |
521 | return NULL; |
522 | return container_of(dev, struct cxl_switch_decoder, cxld.dev); |
523 | } |
524 | EXPORT_SYMBOL_NS_GPL(to_cxl_switch_decoder, "CXL"); |
525 | |
526 | static void cxl_ep_release(struct cxl_ep *ep) |
527 | { |
528 | put_device(dev: ep->ep); |
529 | kfree(objp: ep); |
530 | } |
531 | |
532 | static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep) |
533 | { |
534 | if (!ep) |
535 | return; |
536 | xa_erase(&port->endpoints, index: (unsigned long) ep->ep); |
537 | cxl_ep_release(ep); |
538 | } |
539 | |
540 | static void cxl_port_release(struct device *dev) |
541 | { |
542 | struct cxl_port *port = to_cxl_port(dev); |
543 | unsigned long index; |
544 | struct cxl_ep *ep; |
545 | |
546 | xa_for_each(&port->endpoints, index, ep) |
547 | cxl_ep_remove(port, ep); |
548 | xa_destroy(&port->endpoints); |
549 | xa_destroy(&port->dports); |
550 | xa_destroy(&port->regions); |
551 | ida_free(&cxl_port_ida, id: port->id); |
552 | if (is_cxl_root(port)) |
553 | kfree(objp: to_cxl_root(port)); |
554 | else |
555 | kfree(objp: port); |
556 | } |
557 | |
558 | static ssize_t decoders_committed_show(struct device *dev, |
559 | struct device_attribute *attr, char *buf) |
560 | { |
561 | struct cxl_port *port = to_cxl_port(dev); |
562 | |
563 | guard(rwsem_read)(T: &cxl_region_rwsem); |
564 | return sysfs_emit(buf, fmt: "%d\n", cxl_num_decoders_committed(port)); |
565 | } |
566 | |
567 | static DEVICE_ATTR_RO(decoders_committed); |
568 | |
569 | static struct attribute *cxl_port_attrs[] = { |
570 | &dev_attr_decoders_committed.attr, |
571 | NULL, |
572 | }; |
573 | |
574 | static struct attribute_group cxl_port_attribute_group = { |
575 | .attrs = cxl_port_attrs, |
576 | }; |
577 | |
578 | static const struct attribute_group *cxl_port_attribute_groups[] = { |
579 | &cxl_base_attribute_group, |
580 | &cxl_port_attribute_group, |
581 | NULL, |
582 | }; |
583 | |
584 | static const struct device_type cxl_port_type = { |
585 | .name = "cxl_port", |
586 | .release = cxl_port_release, |
587 | .groups = cxl_port_attribute_groups, |
588 | }; |
589 | |
590 | bool is_cxl_port(const struct device *dev) |
591 | { |
592 | return dev->type == &cxl_port_type; |
593 | } |
594 | EXPORT_SYMBOL_NS_GPL(is_cxl_port, "CXL"); |
595 | |
596 | struct cxl_port *to_cxl_port(const struct device *dev) |
597 | { |
598 | if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, |
599 | "not a cxl_port device\n")) |
600 | return NULL; |
601 | return container_of(dev, struct cxl_port, dev); |
602 | } |
603 | EXPORT_SYMBOL_NS_GPL(to_cxl_port, "CXL"); |
604 | |
605 | struct cxl_port *parent_port_of(struct cxl_port *port) |
606 | { |
607 | if (!port || !port->parent_dport) |
608 | return NULL; |
609 | return port->parent_dport->port; |
610 | } |
611 | |
612 | static void unregister_port(void *_port) |
613 | { |
614 | struct cxl_port *port = _port; |
615 | struct cxl_port *parent = parent_port_of(port); |
616 | struct device *lock_dev; |
617 | |
618 | /* |
619 | * CXL root port's and the first level of ports are unregistered |
620 | * under the platform firmware device lock, all other ports are |
621 | * unregistered while holding their parent port lock. |
622 | */ |
623 | if (!parent) |
624 | lock_dev = port->uport_dev; |
625 | else if (is_cxl_root(port: parent)) |
626 | lock_dev = parent->uport_dev; |
627 | else |
628 | lock_dev = &parent->dev; |
629 | |
630 | device_lock_assert(dev: lock_dev); |
631 | port->dead = true; |
632 | device_unregister(dev: &port->dev); |
633 | } |
634 | |
635 | static void cxl_unlink_uport(void *_port) |
636 | { |
637 | struct cxl_port *port = _port; |
638 | |
639 | sysfs_remove_link(kobj: &port->dev.kobj, name: "uport"); |
640 | } |
641 | |
642 | static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) |
643 | { |
644 | int rc; |
645 | |
646 | rc = sysfs_create_link(kobj: &port->dev.kobj, target: &port->uport_dev->kobj, |
647 | name: "uport"); |
648 | if (rc) |
649 | return rc; |
650 | return devm_add_action_or_reset(host, cxl_unlink_uport, port); |
651 | } |
652 | |
653 | static void cxl_unlink_parent_dport(void *_port) |
654 | { |
655 | struct cxl_port *port = _port; |
656 | |
657 | sysfs_remove_link(kobj: &port->dev.kobj, name: "parent_dport"); |
658 | } |
659 | |
660 | static int devm_cxl_link_parent_dport(struct device *host, |
661 | struct cxl_port *port, |
662 | struct cxl_dport *parent_dport) |
663 | { |
664 | int rc; |
665 | |
666 | if (!parent_dport) |
667 | return 0; |
668 | |
669 | rc = sysfs_create_link(kobj: &port->dev.kobj, target: &parent_dport->dport_dev->kobj, |
670 | name: "parent_dport"); |
671 | if (rc) |
672 | return rc; |
673 | return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port); |
674 | } |
675 | |
676 | static struct lock_class_key cxl_port_key; |
677 | |
678 | static struct cxl_port *cxl_port_alloc(struct device *uport_dev, |
679 | struct cxl_dport *parent_dport) |
680 | { |
681 | struct cxl_root *cxl_root __free(kfree) = NULL; |
682 | struct cxl_port *port, *_port __free(kfree) = NULL; |
683 | struct device *dev; |
684 | int rc; |
685 | |
686 | /* No parent_dport, root cxl_port */ |
687 | if (!parent_dport) { |
688 | cxl_root = kzalloc(sizeof(*cxl_root), GFP_KERNEL); |
689 | if (!cxl_root) |
690 | return ERR_PTR(error: -ENOMEM); |
691 | } else { |
692 | _port = kzalloc(sizeof(*port), GFP_KERNEL); |
693 | if (!_port) |
694 | return ERR_PTR(error: -ENOMEM); |
695 | } |
696 | |
697 | rc = ida_alloc(ida: &cxl_port_ida, GFP_KERNEL); |
698 | if (rc < 0) |
699 | return ERR_PTR(error: rc); |
700 | |
701 | if (cxl_root) |
702 | port = &no_free_ptr(cxl_root)->port; |
703 | else |
704 | port = no_free_ptr(_port); |
705 | |
706 | port->id = rc; |
707 | port->uport_dev = uport_dev; |
708 | |
709 | /* |
710 | * The top-level cxl_port "cxl_root" does not have a cxl_port as |
711 | * its parent and it does not have any corresponding component |
712 | * registers as its decode is described by a fixed platform |
713 | * description. |
714 | */ |
715 | dev = &port->dev; |
716 | if (parent_dport) { |
717 | struct cxl_port *parent_port = parent_dport->port; |
718 | struct cxl_port *iter; |
719 | |
720 | dev->parent = &parent_port->dev; |
721 | port->depth = parent_port->depth + 1; |
722 | port->parent_dport = parent_dport; |
723 | |
724 | /* |
725 | * walk to the host bridge, or the first ancestor that knows |
726 | * the host bridge |
727 | */ |
728 | iter = port; |
729 | while (!iter->host_bridge && |
730 | !is_cxl_root(port: to_cxl_port(iter->dev.parent))) |
731 | iter = to_cxl_port(iter->dev.parent); |
732 | if (iter->host_bridge) |
733 | port->host_bridge = iter->host_bridge; |
734 | else if (parent_dport->rch) |
735 | port->host_bridge = parent_dport->dport_dev; |
736 | else |
737 | port->host_bridge = iter->uport_dev; |
738 | dev_dbg(uport_dev, "host-bridge: %s\n", |
739 | dev_name(port->host_bridge)); |
740 | } else |
741 | dev->parent = uport_dev; |
742 | |
743 | ida_init(ida: &port->decoder_ida); |
744 | port->hdm_end = -1; |
745 | port->commit_end = -1; |
746 | xa_init(xa: &port->dports); |
747 | xa_init(xa: &port->endpoints); |
748 | xa_init(xa: &port->regions); |
749 | |
750 | device_initialize(dev); |
751 | lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); |
752 | device_set_pm_not_required(dev); |
753 | dev->bus = &cxl_bus_type; |
754 | dev->type = &cxl_port_type; |
755 | |
756 | return port; |
757 | } |
758 | |
759 | static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map, |
760 | resource_size_t component_reg_phys) |
761 | { |
762 | *map = (struct cxl_register_map) { |
763 | .host = host, |
764 | .reg_type = CXL_REGLOC_RBI_EMPTY, |
765 | .resource = component_reg_phys, |
766 | }; |
767 | |
768 | if (component_reg_phys == CXL_RESOURCE_NONE) |
769 | return 0; |
770 | |
771 | map->reg_type = CXL_REGLOC_RBI_COMPONENT; |
772 | map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE; |
773 | |
774 | return cxl_setup_regs(map); |
775 | } |
776 | |
777 | static int cxl_port_setup_regs(struct cxl_port *port, |
778 | resource_size_t component_reg_phys) |
779 | { |
780 | if (dev_is_platform(port->uport_dev)) |
781 | return 0; |
782 | return cxl_setup_comp_regs(host: &port->dev, map: &port->reg_map, |
783 | component_reg_phys); |
784 | } |
785 | |
786 | static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport, |
787 | resource_size_t component_reg_phys) |
788 | { |
789 | int rc; |
790 | |
791 | if (dev_is_platform(dport->dport_dev)) |
792 | return 0; |
793 | |
794 | /* |
795 | * use @dport->dport_dev for the context for error messages during |
796 | * register probing, and fixup @host after the fact, since @host may be |
797 | * NULL. |
798 | */ |
799 | rc = cxl_setup_comp_regs(host: dport->dport_dev, map: &dport->reg_map, |
800 | component_reg_phys); |
801 | dport->reg_map.host = host; |
802 | return rc; |
803 | } |
804 | |
805 | DEFINE_SHOW_ATTRIBUTE(einj_cxl_available_error_type); |
806 | |
807 | static int cxl_einj_inject(void *data, u64 type) |
808 | { |
809 | struct cxl_dport *dport = data; |
810 | |
811 | if (dport->rch) |
812 | return einj_cxl_inject_rch_error(rcrb: dport->rcrb.base, type); |
813 | |
814 | return einj_cxl_inject_error(to_pci_dev(dport->dport_dev), type); |
815 | } |
816 | DEFINE_DEBUGFS_ATTRIBUTE(cxl_einj_inject_fops, NULL, cxl_einj_inject, |
817 | "0x%llx\n"); |
818 | |
819 | static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport) |
820 | { |
821 | struct dentry *dir; |
822 | |
823 | if (!einj_cxl_is_initialized()) |
824 | return; |
825 | |
826 | /* |
827 | * dport_dev needs to be a PCIe port for CXL 2.0+ ports because |
828 | * EINJ expects a dport SBDF to be specified for 2.0 error injection. |
829 | */ |
830 | if (!dport->rch && !dev_is_pci(dport->dport_dev)) |
831 | return; |
832 | |
833 | dir = cxl_debugfs_create_dir(dir: dev_name(dev: dport->dport_dev)); |
834 | |
835 | debugfs_create_file("einj_inject", 0200, dir, dport, |
836 | &cxl_einj_inject_fops); |
837 | } |
838 | |
839 | static int cxl_port_add(struct cxl_port *port, |
840 | resource_size_t component_reg_phys, |
841 | struct cxl_dport *parent_dport) |
842 | { |
843 | struct device *dev __free(put_device) = &port->dev; |
844 | int rc; |
845 | |
846 | if (is_cxl_memdev(dev: port->uport_dev)) { |
847 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev: port->uport_dev); |
848 | struct cxl_dev_state *cxlds = cxlmd->cxlds; |
849 | |
850 | rc = dev_set_name(dev, name: "endpoint%d", port->id); |
851 | if (rc) |
852 | return rc; |
853 | |
854 | /* |
855 | * The endpoint driver already enumerated the component and RAS |
856 | * registers. Reuse that enumeration while prepping them to be |
857 | * mapped by the cxl_port driver. |
858 | */ |
859 | port->reg_map = cxlds->reg_map; |
860 | port->reg_map.host = &port->dev; |
861 | cxlmd->endpoint = port; |
862 | } else if (parent_dport) { |
863 | rc = dev_set_name(dev, name: "port%d", port->id); |
864 | if (rc) |
865 | return rc; |
866 | |
867 | rc = cxl_port_setup_regs(port, component_reg_phys); |
868 | if (rc) |
869 | return rc; |
870 | } else { |
871 | rc = dev_set_name(dev, name: "root%d", port->id); |
872 | if (rc) |
873 | return rc; |
874 | } |
875 | |
876 | rc = device_add(dev); |
877 | if (rc) |
878 | return rc; |
879 | |
880 | /* Inhibit the cleanup function invoked */ |
881 | dev = NULL; |
882 | return 0; |
883 | } |
884 | |
885 | static struct cxl_port *__devm_cxl_add_port(struct device *host, |
886 | struct device *uport_dev, |
887 | resource_size_t component_reg_phys, |
888 | struct cxl_dport *parent_dport) |
889 | { |
890 | struct cxl_port *port; |
891 | int rc; |
892 | |
893 | port = cxl_port_alloc(uport_dev, parent_dport); |
894 | if (IS_ERR(ptr: port)) |
895 | return port; |
896 | |
897 | rc = cxl_port_add(port, component_reg_phys, parent_dport); |
898 | if (rc) |
899 | return ERR_PTR(error: rc); |
900 | |
901 | rc = devm_add_action_or_reset(host, unregister_port, port); |
902 | if (rc) |
903 | return ERR_PTR(error: rc); |
904 | |
905 | rc = devm_cxl_link_uport(host, port); |
906 | if (rc) |
907 | return ERR_PTR(error: rc); |
908 | |
909 | rc = devm_cxl_link_parent_dport(host, port, parent_dport); |
910 | if (rc) |
911 | return ERR_PTR(error: rc); |
912 | |
913 | if (parent_dport && dev_is_pci(uport_dev)) |
914 | port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev)); |
915 | |
916 | return port; |
917 | } |
918 | |
919 | /** |
920 | * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy |
921 | * @host: host device for devm operations |
922 | * @uport_dev: "physical" device implementing this upstream port |
923 | * @component_reg_phys: (optional) for configurable cxl_port instances |
924 | * @parent_dport: next hop up in the CXL memory decode hierarchy |
925 | */ |
926 | struct cxl_port *devm_cxl_add_port(struct device *host, |
927 | struct device *uport_dev, |
928 | resource_size_t component_reg_phys, |
929 | struct cxl_dport *parent_dport) |
930 | { |
931 | struct cxl_port *port, *parent_port; |
932 | |
933 | port = __devm_cxl_add_port(host, uport_dev, component_reg_phys, |
934 | parent_dport); |
935 | |
936 | parent_port = parent_dport ? parent_dport->port : NULL; |
937 | if (IS_ERR(ptr: port)) { |
938 | dev_dbg(uport_dev, "Failed to add%s%s%s: %ld\n", |
939 | parent_port ? " port to ": "", |
940 | parent_port ? dev_name(&parent_port->dev) : "", |
941 | parent_port ? "": " root port", |
942 | PTR_ERR(port)); |
943 | } else { |
944 | dev_dbg(uport_dev, "%s added%s%s%s\n", |
945 | dev_name(&port->dev), |
946 | parent_port ? " to ": "", |
947 | parent_port ? dev_name(&parent_port->dev) : "", |
948 | parent_port ? "": " (root port)"); |
949 | } |
950 | |
951 | return port; |
952 | } |
953 | EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, "CXL"); |
954 | |
955 | struct cxl_root *devm_cxl_add_root(struct device *host, |
956 | const struct cxl_root_ops *ops) |
957 | { |
958 | struct cxl_root *cxl_root; |
959 | struct cxl_port *port; |
960 | |
961 | port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); |
962 | if (IS_ERR(ptr: port)) |
963 | return ERR_CAST(ptr: port); |
964 | |
965 | cxl_root = to_cxl_root(port); |
966 | cxl_root->ops = ops; |
967 | return cxl_root; |
968 | } |
969 | EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, "CXL"); |
970 | |
971 | struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) |
972 | { |
973 | /* There is no pci_bus associated with a CXL platform-root port */ |
974 | if (is_cxl_root(port)) |
975 | return NULL; |
976 | |
977 | if (dev_is_pci(port->uport_dev)) { |
978 | struct pci_dev *pdev = to_pci_dev(port->uport_dev); |
979 | |
980 | return pdev->subordinate; |
981 | } |
982 | |
983 | return xa_load(&cxl_root_buses, index: (unsigned long)port->uport_dev); |
984 | } |
985 | EXPORT_SYMBOL_NS_GPL(cxl_port_to_pci_bus, "CXL"); |
986 | |
987 | static void unregister_pci_bus(void *uport_dev) |
988 | { |
989 | xa_erase(&cxl_root_buses, index: (unsigned long)uport_dev); |
990 | } |
991 | |
992 | int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, |
993 | struct pci_bus *bus) |
994 | { |
995 | int rc; |
996 | |
997 | if (dev_is_pci(uport_dev)) |
998 | return -EINVAL; |
999 | |
1000 | rc = xa_insert(xa: &cxl_root_buses, index: (unsigned long)uport_dev, entry: bus, |
1001 | GFP_KERNEL); |
1002 | if (rc) |
1003 | return rc; |
1004 | return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev); |
1005 | } |
1006 | EXPORT_SYMBOL_NS_GPL(devm_cxl_register_pci_bus, "CXL"); |
1007 | |
1008 | static bool dev_is_cxl_root_child(struct device *dev) |
1009 | { |
1010 | struct cxl_port *port, *parent; |
1011 | |
1012 | if (!is_cxl_port(dev)) |
1013 | return false; |
1014 | |
1015 | port = to_cxl_port(dev); |
1016 | if (is_cxl_root(port)) |
1017 | return false; |
1018 | |
1019 | parent = to_cxl_port(port->dev.parent); |
1020 | if (is_cxl_root(port: parent)) |
1021 | return true; |
1022 | |
1023 | return false; |
1024 | } |
1025 | |
1026 | struct cxl_root *find_cxl_root(struct cxl_port *port) |
1027 | { |
1028 | struct cxl_port *iter = port; |
1029 | |
1030 | while (iter && !is_cxl_root(port: iter)) |
1031 | iter = to_cxl_port(iter->dev.parent); |
1032 | |
1033 | if (!iter) |
1034 | return NULL; |
1035 | get_device(dev: &iter->dev); |
1036 | return to_cxl_root(port: iter); |
1037 | } |
1038 | EXPORT_SYMBOL_NS_GPL(find_cxl_root, "CXL"); |
1039 | |
1040 | static struct cxl_dport *find_dport(struct cxl_port *port, int id) |
1041 | { |
1042 | struct cxl_dport *dport; |
1043 | unsigned long index; |
1044 | |
1045 | device_lock_assert(dev: &port->dev); |
1046 | xa_for_each(&port->dports, index, dport) |
1047 | if (dport->port_id == id) |
1048 | return dport; |
1049 | return NULL; |
1050 | } |
1051 | |
1052 | static int add_dport(struct cxl_port *port, struct cxl_dport *dport) |
1053 | { |
1054 | struct cxl_dport *dup; |
1055 | int rc; |
1056 | |
1057 | device_lock_assert(dev: &port->dev); |
1058 | dup = find_dport(port, id: dport->port_id); |
1059 | if (dup) { |
1060 | dev_err(&port->dev, |
1061 | "unable to add dport%d-%s non-unique port id (%s)\n", |
1062 | dport->port_id, dev_name(dport->dport_dev), |
1063 | dev_name(dup->dport_dev)); |
1064 | return -EBUSY; |
1065 | } |
1066 | |
1067 | rc = xa_insert(xa: &port->dports, index: (unsigned long)dport->dport_dev, entry: dport, |
1068 | GFP_KERNEL); |
1069 | if (rc) |
1070 | return rc; |
1071 | |
1072 | port->nr_dports++; |
1073 | return 0; |
1074 | } |
1075 | |
1076 | /* |
1077 | * Since root-level CXL dports cannot be enumerated by PCI they are not |
1078 | * enumerated by the common port driver that acquires the port lock over |
1079 | * dport add/remove. Instead, root dports are manually added by a |
1080 | * platform driver and cond_cxl_root_lock() is used to take the missing |
1081 | * port lock in that case. |
1082 | */ |
1083 | static void cond_cxl_root_lock(struct cxl_port *port) |
1084 | { |
1085 | if (is_cxl_root(port)) |
1086 | device_lock(dev: &port->dev); |
1087 | } |
1088 | |
1089 | static void cond_cxl_root_unlock(struct cxl_port *port) |
1090 | { |
1091 | if (is_cxl_root(port)) |
1092 | device_unlock(dev: &port->dev); |
1093 | } |
1094 | |
1095 | static void cxl_dport_remove(void *data) |
1096 | { |
1097 | struct cxl_dport *dport = data; |
1098 | struct cxl_port *port = dport->port; |
1099 | |
1100 | xa_erase(&port->dports, index: (unsigned long) dport->dport_dev); |
1101 | put_device(dev: dport->dport_dev); |
1102 | } |
1103 | |
1104 | static void cxl_dport_unlink(void *data) |
1105 | { |
1106 | struct cxl_dport *dport = data; |
1107 | struct cxl_port *port = dport->port; |
1108 | char link_name[CXL_TARGET_STRLEN]; |
1109 | |
1110 | sprintf(buf: link_name, fmt: "dport%d", dport->port_id); |
1111 | sysfs_remove_link(kobj: &port->dev.kobj, name: link_name); |
1112 | } |
1113 | |
1114 | static struct cxl_dport * |
1115 | __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, |
1116 | int port_id, resource_size_t component_reg_phys, |
1117 | resource_size_t rcrb) |
1118 | { |
1119 | char link_name[CXL_TARGET_STRLEN]; |
1120 | struct cxl_dport *dport; |
1121 | struct device *host; |
1122 | int rc; |
1123 | |
1124 | if (is_cxl_root(port)) |
1125 | host = port->uport_dev; |
1126 | else |
1127 | host = &port->dev; |
1128 | |
1129 | if (!host->driver) { |
1130 | dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n", |
1131 | dev_name(dport_dev)); |
1132 | return ERR_PTR(error: -ENXIO); |
1133 | } |
1134 | |
1135 | if (snprintf(buf: link_name, CXL_TARGET_STRLEN, fmt: "dport%d", port_id) >= |
1136 | CXL_TARGET_STRLEN) |
1137 | return ERR_PTR(error: -EINVAL); |
1138 | |
1139 | dport = devm_kzalloc(dev: host, size: sizeof(*dport), GFP_KERNEL); |
1140 | if (!dport) |
1141 | return ERR_PTR(error: -ENOMEM); |
1142 | |
1143 | dport->dport_dev = dport_dev; |
1144 | dport->port_id = port_id; |
1145 | dport->port = port; |
1146 | |
1147 | if (rcrb == CXL_RESOURCE_NONE) { |
1148 | rc = cxl_dport_setup_regs(host: &port->dev, dport, |
1149 | component_reg_phys); |
1150 | if (rc) |
1151 | return ERR_PTR(error: rc); |
1152 | } else { |
1153 | dport->rcrb.base = rcrb; |
1154 | component_reg_phys = __rcrb_to_component(dev: dport_dev, ri: &dport->rcrb, |
1155 | which: CXL_RCRB_DOWNSTREAM); |
1156 | if (component_reg_phys == CXL_RESOURCE_NONE) { |
1157 | dev_warn(dport_dev, "Invalid Component Registers in RCRB"); |
1158 | return ERR_PTR(error: -ENXIO); |
1159 | } |
1160 | |
1161 | /* |
1162 | * RCH @dport is not ready to map until associated with its |
1163 | * memdev |
1164 | */ |
1165 | rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys); |
1166 | if (rc) |
1167 | return ERR_PTR(error: rc); |
1168 | |
1169 | dport->rch = true; |
1170 | } |
1171 | |
1172 | if (component_reg_phys != CXL_RESOURCE_NONE) |
1173 | dev_dbg(dport_dev, "Component Registers found for dport: %pa\n", |
1174 | &component_reg_phys); |
1175 | |
1176 | cond_cxl_root_lock(port); |
1177 | rc = add_dport(port, dport); |
1178 | cond_cxl_root_unlock(port); |
1179 | if (rc) |
1180 | return ERR_PTR(error: rc); |
1181 | |
1182 | get_device(dev: dport_dev); |
1183 | rc = devm_add_action_or_reset(host, cxl_dport_remove, dport); |
1184 | if (rc) |
1185 | return ERR_PTR(error: rc); |
1186 | |
1187 | rc = sysfs_create_link(kobj: &port->dev.kobj, target: &dport_dev->kobj, name: link_name); |
1188 | if (rc) |
1189 | return ERR_PTR(error: rc); |
1190 | |
1191 | rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport); |
1192 | if (rc) |
1193 | return ERR_PTR(error: rc); |
1194 | |
1195 | if (dev_is_pci(dport_dev)) |
1196 | dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev)); |
1197 | |
1198 | cxl_debugfs_create_dport_dir(dport); |
1199 | |
1200 | return dport; |
1201 | } |
1202 | |
1203 | /** |
1204 | * devm_cxl_add_dport - append VH downstream port data to a cxl_port |
1205 | * @port: the cxl_port that references this dport |
1206 | * @dport_dev: firmware or PCI device representing the dport |
1207 | * @port_id: identifier for this dport in a decoder's target list |
1208 | * @component_reg_phys: optional location of CXL component registers |
1209 | * |
1210 | * Note that dports are appended to the devm release action's of the |
1211 | * either the port's host (for root ports), or the port itself (for |
1212 | * switch ports) |
1213 | */ |
1214 | struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, |
1215 | struct device *dport_dev, int port_id, |
1216 | resource_size_t component_reg_phys) |
1217 | { |
1218 | struct cxl_dport *dport; |
1219 | |
1220 | dport = __devm_cxl_add_dport(port, dport_dev, port_id, |
1221 | component_reg_phys, CXL_RESOURCE_NONE); |
1222 | if (IS_ERR(ptr: dport)) { |
1223 | dev_dbg(dport_dev, "failed to add dport to %s: %ld\n", |
1224 | dev_name(&port->dev), PTR_ERR(dport)); |
1225 | } else { |
1226 | dev_dbg(dport_dev, "dport added to %s\n", |
1227 | dev_name(&port->dev)); |
1228 | } |
1229 | |
1230 | return dport; |
1231 | } |
1232 | EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, "CXL"); |
1233 | |
1234 | /** |
1235 | * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port |
1236 | * @port: the cxl_port that references this dport |
1237 | * @dport_dev: firmware or PCI device representing the dport |
1238 | * @port_id: identifier for this dport in a decoder's target list |
1239 | * @rcrb: mandatory location of a Root Complex Register Block |
1240 | * |
1241 | * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH |
1242 | */ |
1243 | struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, |
1244 | struct device *dport_dev, int port_id, |
1245 | resource_size_t rcrb) |
1246 | { |
1247 | struct cxl_dport *dport; |
1248 | |
1249 | if (rcrb == CXL_RESOURCE_NONE) { |
1250 | dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n"); |
1251 | return ERR_PTR(error: -EINVAL); |
1252 | } |
1253 | |
1254 | dport = __devm_cxl_add_dport(port, dport_dev, port_id, |
1255 | CXL_RESOURCE_NONE, rcrb); |
1256 | if (IS_ERR(ptr: dport)) { |
1257 | dev_dbg(dport_dev, "failed to add RCH dport to %s: %ld\n", |
1258 | dev_name(&port->dev), PTR_ERR(dport)); |
1259 | } else { |
1260 | dev_dbg(dport_dev, "RCH dport added to %s\n", |
1261 | dev_name(&port->dev)); |
1262 | } |
1263 | |
1264 | return dport; |
1265 | } |
1266 | EXPORT_SYMBOL_NS_GPL(devm_cxl_add_rch_dport, "CXL"); |
1267 | |
1268 | static int add_ep(struct cxl_ep *new) |
1269 | { |
1270 | struct cxl_port *port = new->dport->port; |
1271 | |
1272 | guard(device)(T: &port->dev); |
1273 | if (port->dead) |
1274 | return -ENXIO; |
1275 | |
1276 | return xa_insert(xa: &port->endpoints, index: (unsigned long)new->ep, |
1277 | entry: new, GFP_KERNEL); |
1278 | } |
1279 | |
1280 | /** |
1281 | * cxl_add_ep - register an endpoint's interest in a port |
1282 | * @dport: the dport that routes to @ep_dev |
1283 | * @ep_dev: device representing the endpoint |
1284 | * |
1285 | * Intermediate CXL ports are scanned based on the arrival of endpoints. |
1286 | * When those endpoints depart the port can be destroyed once all |
1287 | * endpoints that care about that port have been removed. |
1288 | */ |
1289 | static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev) |
1290 | { |
1291 | struct cxl_ep *ep; |
1292 | int rc; |
1293 | |
1294 | ep = kzalloc(sizeof(*ep), GFP_KERNEL); |
1295 | if (!ep) |
1296 | return -ENOMEM; |
1297 | |
1298 | ep->ep = get_device(dev: ep_dev); |
1299 | ep->dport = dport; |
1300 | |
1301 | rc = add_ep(new: ep); |
1302 | if (rc) |
1303 | cxl_ep_release(ep); |
1304 | return rc; |
1305 | } |
1306 | |
1307 | struct cxl_find_port_ctx { |
1308 | const struct device *dport_dev; |
1309 | const struct cxl_port *parent_port; |
1310 | struct cxl_dport **dport; |
1311 | }; |
1312 | |
1313 | static int match_port_by_dport(struct device *dev, const void *data) |
1314 | { |
1315 | const struct cxl_find_port_ctx *ctx = data; |
1316 | struct cxl_dport *dport; |
1317 | struct cxl_port *port; |
1318 | |
1319 | if (!is_cxl_port(dev)) |
1320 | return 0; |
1321 | if (ctx->parent_port && dev->parent != &ctx->parent_port->dev) |
1322 | return 0; |
1323 | |
1324 | port = to_cxl_port(dev); |
1325 | dport = cxl_find_dport_by_dev(port, dport_dev: ctx->dport_dev); |
1326 | if (ctx->dport) |
1327 | *ctx->dport = dport; |
1328 | return dport != NULL; |
1329 | } |
1330 | |
1331 | static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx) |
1332 | { |
1333 | struct device *dev; |
1334 | |
1335 | if (!ctx->dport_dev) |
1336 | return NULL; |
1337 | |
1338 | dev = bus_find_device(bus: &cxl_bus_type, NULL, data: ctx, match: match_port_by_dport); |
1339 | if (dev) |
1340 | return to_cxl_port(dev); |
1341 | return NULL; |
1342 | } |
1343 | |
1344 | static struct cxl_port *find_cxl_port(struct device *dport_dev, |
1345 | struct cxl_dport **dport) |
1346 | { |
1347 | struct cxl_find_port_ctx ctx = { |
1348 | .dport_dev = dport_dev, |
1349 | .dport = dport, |
1350 | }; |
1351 | struct cxl_port *port; |
1352 | |
1353 | port = __find_cxl_port(ctx: &ctx); |
1354 | return port; |
1355 | } |
1356 | |
1357 | static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port, |
1358 | struct device *dport_dev, |
1359 | struct cxl_dport **dport) |
1360 | { |
1361 | struct cxl_find_port_ctx ctx = { |
1362 | .dport_dev = dport_dev, |
1363 | .parent_port = parent_port, |
1364 | .dport = dport, |
1365 | }; |
1366 | struct cxl_port *port; |
1367 | |
1368 | port = __find_cxl_port(ctx: &ctx); |
1369 | return port; |
1370 | } |
1371 | |
1372 | /* |
1373 | * All users of grandparent() are using it to walk PCIe-like switch port |
1374 | * hierarchy. A PCIe switch is comprised of a bridge device representing the |
1375 | * upstream switch port and N bridges representing downstream switch ports. When |
1376 | * bridges stack the grand-parent of a downstream switch port is another |
1377 | * downstream switch port in the immediate ancestor switch. |
1378 | */ |
1379 | static struct device *grandparent(struct device *dev) |
1380 | { |
1381 | if (dev && dev->parent) |
1382 | return dev->parent->parent; |
1383 | return NULL; |
1384 | } |
1385 | |
1386 | static struct device *endpoint_host(struct cxl_port *endpoint) |
1387 | { |
1388 | struct cxl_port *port = to_cxl_port(endpoint->dev.parent); |
1389 | |
1390 | if (is_cxl_root(port)) |
1391 | return port->uport_dev; |
1392 | return &port->dev; |
1393 | } |
1394 | |
1395 | static void delete_endpoint(void *data) |
1396 | { |
1397 | struct cxl_memdev *cxlmd = data; |
1398 | struct cxl_port *endpoint = cxlmd->endpoint; |
1399 | struct device *host = endpoint_host(endpoint); |
1400 | |
1401 | scoped_guard(device, host) { |
1402 | if (host->driver && !endpoint->dead) { |
1403 | devm_release_action(dev: host, action: cxl_unlink_parent_dport, data: endpoint); |
1404 | devm_release_action(dev: host, action: cxl_unlink_uport, data: endpoint); |
1405 | devm_release_action(dev: host, action: unregister_port, data: endpoint); |
1406 | } |
1407 | cxlmd->endpoint = NULL; |
1408 | } |
1409 | put_device(dev: &endpoint->dev); |
1410 | put_device(dev: host); |
1411 | } |
1412 | |
1413 | int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) |
1414 | { |
1415 | struct device *host = endpoint_host(endpoint); |
1416 | struct device *dev = &cxlmd->dev; |
1417 | |
1418 | get_device(dev: host); |
1419 | get_device(dev: &endpoint->dev); |
1420 | cxlmd->depth = endpoint->depth; |
1421 | return devm_add_action_or_reset(dev, delete_endpoint, cxlmd); |
1422 | } |
1423 | EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, "CXL"); |
1424 | |
1425 | /* |
1426 | * The natural end of life of a non-root 'cxl_port' is when its parent port goes |
1427 | * through a ->remove() event ("top-down" unregistration). The unnatural trigger |
1428 | * for a port to be unregistered is when all memdevs beneath that port have gone |
1429 | * through ->remove(). This "bottom-up" removal selectively removes individual |
1430 | * child ports manually. This depends on devm_cxl_add_port() to not change is |
1431 | * devm action registration order, and for dports to have already been |
1432 | * destroyed by reap_dports(). |
1433 | */ |
1434 | static void delete_switch_port(struct cxl_port *port) |
1435 | { |
1436 | devm_release_action(dev: port->dev.parent, action: cxl_unlink_parent_dport, data: port); |
1437 | devm_release_action(dev: port->dev.parent, action: cxl_unlink_uport, data: port); |
1438 | devm_release_action(dev: port->dev.parent, action: unregister_port, data: port); |
1439 | } |
1440 | |
1441 | static void reap_dports(struct cxl_port *port) |
1442 | { |
1443 | struct cxl_dport *dport; |
1444 | unsigned long index; |
1445 | |
1446 | device_lock_assert(dev: &port->dev); |
1447 | |
1448 | xa_for_each(&port->dports, index, dport) { |
1449 | devm_release_action(dev: &port->dev, action: cxl_dport_unlink, data: dport); |
1450 | devm_release_action(dev: &port->dev, action: cxl_dport_remove, data: dport); |
1451 | devm_kfree(dev: &port->dev, p: dport); |
1452 | } |
1453 | } |
1454 | |
1455 | struct detach_ctx { |
1456 | struct cxl_memdev *cxlmd; |
1457 | int depth; |
1458 | }; |
1459 | |
1460 | static int port_has_memdev(struct device *dev, const void *data) |
1461 | { |
1462 | const struct detach_ctx *ctx = data; |
1463 | struct cxl_port *port; |
1464 | |
1465 | if (!is_cxl_port(dev)) |
1466 | return 0; |
1467 | |
1468 | port = to_cxl_port(dev); |
1469 | if (port->depth != ctx->depth) |
1470 | return 0; |
1471 | |
1472 | return !!cxl_ep_load(port, cxlmd: ctx->cxlmd); |
1473 | } |
1474 | |
1475 | static void cxl_detach_ep(void *data) |
1476 | { |
1477 | struct cxl_memdev *cxlmd = data; |
1478 | |
1479 | for (int i = cxlmd->depth - 1; i >= 1; i--) { |
1480 | struct cxl_port *port, *parent_port; |
1481 | struct detach_ctx ctx = { |
1482 | .cxlmd = cxlmd, |
1483 | .depth = i, |
1484 | }; |
1485 | struct cxl_ep *ep; |
1486 | bool died = false; |
1487 | |
1488 | struct device *dev __free(put_device) = |
1489 | bus_find_device(bus: &cxl_bus_type, NULL, data: &ctx, match: port_has_memdev); |
1490 | if (!dev) |
1491 | continue; |
1492 | port = to_cxl_port(dev); |
1493 | |
1494 | parent_port = to_cxl_port(port->dev.parent); |
1495 | device_lock(dev: &parent_port->dev); |
1496 | device_lock(dev: &port->dev); |
1497 | ep = cxl_ep_load(port, cxlmd); |
1498 | dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", |
1499 | ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); |
1500 | cxl_ep_remove(port, ep); |
1501 | if (ep && !port->dead && xa_empty(xa: &port->endpoints) && |
1502 | !is_cxl_root(port: parent_port) && parent_port->dev.driver) { |
1503 | /* |
1504 | * This was the last ep attached to a dynamically |
1505 | * enumerated port. Block new cxl_add_ep() and garbage |
1506 | * collect the port. |
1507 | */ |
1508 | died = true; |
1509 | port->dead = true; |
1510 | reap_dports(port); |
1511 | } |
1512 | device_unlock(dev: &port->dev); |
1513 | |
1514 | if (died) { |
1515 | dev_dbg(&cxlmd->dev, "delete %s\n", |
1516 | dev_name(&port->dev)); |
1517 | delete_switch_port(port); |
1518 | } |
1519 | device_unlock(dev: &parent_port->dev); |
1520 | } |
1521 | } |
1522 | |
1523 | static resource_size_t find_component_registers(struct device *dev) |
1524 | { |
1525 | struct cxl_register_map map; |
1526 | struct pci_dev *pdev; |
1527 | |
1528 | /* |
1529 | * Theoretically, CXL component registers can be hosted on a |
1530 | * non-PCI device, in practice, only cxl_test hits this case. |
1531 | */ |
1532 | if (!dev_is_pci(dev)) |
1533 | return CXL_RESOURCE_NONE; |
1534 | |
1535 | pdev = to_pci_dev(dev); |
1536 | |
1537 | cxl_find_regblock(pdev, type: CXL_REGLOC_RBI_COMPONENT, map: &map); |
1538 | return map.resource; |
1539 | } |
1540 | |
1541 | static int add_port_attach_ep(struct cxl_memdev *cxlmd, |
1542 | struct device *uport_dev, |
1543 | struct device *dport_dev) |
1544 | { |
1545 | struct device *dparent = grandparent(dev: dport_dev); |
1546 | struct cxl_dport *dport, *parent_dport; |
1547 | resource_size_t component_reg_phys; |
1548 | int rc; |
1549 | |
1550 | if (!dparent) { |
1551 | /* |
1552 | * The iteration reached the topology root without finding the |
1553 | * CXL-root 'cxl_port' on a previous iteration, fail for now to |
1554 | * be re-probed after platform driver attaches. |
1555 | */ |
1556 | dev_dbg(&cxlmd->dev, "%s is a root dport\n", |
1557 | dev_name(dport_dev)); |
1558 | return -ENXIO; |
1559 | } |
1560 | |
1561 | struct cxl_port *parent_port __free(put_cxl_port) = |
1562 | find_cxl_port(dport_dev: dparent, dport: &parent_dport); |
1563 | if (!parent_port) { |
1564 | /* iterate to create this parent_port */ |
1565 | return -EAGAIN; |
1566 | } |
1567 | |
1568 | /* |
1569 | * Definition with __free() here to keep the sequence of |
1570 | * dereferencing the device of the port before the parent_port releasing. |
1571 | */ |
1572 | struct cxl_port *port __free(put_cxl_port) = NULL; |
1573 | scoped_guard(device, &parent_port->dev) { |
1574 | if (!parent_port->dev.driver) { |
1575 | dev_warn(&cxlmd->dev, |
1576 | "port %s:%s disabled, failed to enumerate CXL.mem\n", |
1577 | dev_name(&parent_port->dev), dev_name(uport_dev)); |
1578 | return -ENXIO; |
1579 | } |
1580 | |
1581 | port = find_cxl_port_at(parent_port, dport_dev, dport: &dport); |
1582 | if (!port) { |
1583 | component_reg_phys = find_component_registers(dev: uport_dev); |
1584 | port = devm_cxl_add_port(&parent_port->dev, uport_dev, |
1585 | component_reg_phys, parent_dport); |
1586 | if (IS_ERR(ptr: port)) |
1587 | return PTR_ERR(ptr: port); |
1588 | |
1589 | /* retry find to pick up the new dport information */ |
1590 | port = find_cxl_port_at(parent_port, dport_dev, dport: &dport); |
1591 | if (!port) |
1592 | return -ENXIO; |
1593 | } |
1594 | } |
1595 | |
1596 | dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", |
1597 | dev_name(&port->dev), dev_name(port->uport_dev)); |
1598 | rc = cxl_add_ep(dport, ep_dev: &cxlmd->dev); |
1599 | if (rc == -EBUSY) { |
1600 | /* |
1601 | * "can't" happen, but this error code means |
1602 | * something to the caller, so translate it. |
1603 | */ |
1604 | rc = -ENXIO; |
1605 | } |
1606 | |
1607 | return rc; |
1608 | } |
1609 | |
1610 | int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) |
1611 | { |
1612 | struct device *dev = &cxlmd->dev; |
1613 | struct device *iter; |
1614 | int rc; |
1615 | |
1616 | /* |
1617 | * Skip intermediate port enumeration in the RCH case, there |
1618 | * are no ports in between a host bridge and an endpoint. |
1619 | */ |
1620 | if (cxlmd->cxlds->rcd) |
1621 | return 0; |
1622 | |
1623 | rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd); |
1624 | if (rc) |
1625 | return rc; |
1626 | |
1627 | /* |
1628 | * Scan for and add all cxl_ports in this device's ancestry. |
1629 | * Repeat until no more ports are added. Abort if a port add |
1630 | * attempt fails. |
1631 | */ |
1632 | retry: |
1633 | for (iter = dev; iter; iter = grandparent(dev: iter)) { |
1634 | struct device *dport_dev = grandparent(dev: iter); |
1635 | struct device *uport_dev; |
1636 | struct cxl_dport *dport; |
1637 | |
1638 | /* |
1639 | * The terminal "grandparent" in PCI is NULL and @platform_bus |
1640 | * for platform devices |
1641 | */ |
1642 | if (!dport_dev || dport_dev == &platform_bus) |
1643 | return 0; |
1644 | |
1645 | uport_dev = dport_dev->parent; |
1646 | if (!uport_dev) { |
1647 | dev_warn(dev, "at %s no parent for dport: %s\n", |
1648 | dev_name(iter), dev_name(dport_dev)); |
1649 | return -ENXIO; |
1650 | } |
1651 | |
1652 | dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n", |
1653 | dev_name(iter), dev_name(dport_dev), |
1654 | dev_name(uport_dev)); |
1655 | struct cxl_port *port __free(put_cxl_port) = |
1656 | find_cxl_port(dport_dev, dport: &dport); |
1657 | if (port) { |
1658 | dev_dbg(&cxlmd->dev, |
1659 | "found already registered port %s:%s\n", |
1660 | dev_name(&port->dev), |
1661 | dev_name(port->uport_dev)); |
1662 | rc = cxl_add_ep(dport, ep_dev: &cxlmd->dev); |
1663 | |
1664 | /* |
1665 | * If the endpoint already exists in the port's list, |
1666 | * that's ok, it was added on a previous pass. |
1667 | * Otherwise, retry in add_port_attach_ep() after taking |
1668 | * the parent_port lock as the current port may be being |
1669 | * reaped. |
1670 | */ |
1671 | if (rc && rc != -EBUSY) |
1672 | return rc; |
1673 | |
1674 | cxl_gpf_port_setup(dport); |
1675 | |
1676 | /* Any more ports to add between this one and the root? */ |
1677 | if (!dev_is_cxl_root_child(dev: &port->dev)) |
1678 | continue; |
1679 | |
1680 | return 0; |
1681 | } |
1682 | |
1683 | rc = add_port_attach_ep(cxlmd, uport_dev, dport_dev); |
1684 | /* port missing, try to add parent */ |
1685 | if (rc == -EAGAIN) |
1686 | continue; |
1687 | /* failed to add ep or port */ |
1688 | if (rc) |
1689 | return rc; |
1690 | /* port added, new descendants possible, start over */ |
1691 | goto retry; |
1692 | } |
1693 | |
1694 | return 0; |
1695 | } |
1696 | EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, "CXL"); |
1697 | |
1698 | struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev, |
1699 | struct cxl_dport **dport) |
1700 | { |
1701 | return find_cxl_port(dport_dev: pdev->dev.parent, dport); |
1702 | } |
1703 | EXPORT_SYMBOL_NS_GPL(cxl_pci_find_port, "CXL"); |
1704 | |
1705 | struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, |
1706 | struct cxl_dport **dport) |
1707 | { |
1708 | return find_cxl_port(dport_dev: grandparent(dev: &cxlmd->dev), dport); |
1709 | } |
1710 | EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, "CXL"); |
1711 | |
1712 | static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, |
1713 | struct cxl_port *port, int *target_map) |
1714 | { |
1715 | int i; |
1716 | |
1717 | if (!target_map) |
1718 | return 0; |
1719 | |
1720 | device_lock_assert(dev: &port->dev); |
1721 | |
1722 | if (xa_empty(xa: &port->dports)) |
1723 | return -EINVAL; |
1724 | |
1725 | guard(rwsem_write)(T: &cxl_region_rwsem); |
1726 | for (i = 0; i < cxlsd->cxld.interleave_ways; i++) { |
1727 | struct cxl_dport *dport = find_dport(port, id: target_map[i]); |
1728 | |
1729 | if (!dport) |
1730 | return -ENXIO; |
1731 | cxlsd->target[i] = dport; |
1732 | } |
1733 | |
1734 | return 0; |
1735 | } |
1736 | |
1737 | static struct lock_class_key cxl_decoder_key; |
1738 | |
1739 | /** |
1740 | * cxl_decoder_init - Common decoder setup / initialization |
1741 | * @port: owning port of this decoder |
1742 | * @cxld: common decoder properties to initialize |
1743 | * |
1744 | * A port may contain one or more decoders. Each of those decoders |
1745 | * enable some address space for CXL.mem utilization. A decoder is |
1746 | * expected to be configured by the caller before registering via |
1747 | * cxl_decoder_add() |
1748 | */ |
1749 | static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld) |
1750 | { |
1751 | struct device *dev; |
1752 | int rc; |
1753 | |
1754 | rc = ida_alloc(ida: &port->decoder_ida, GFP_KERNEL); |
1755 | if (rc < 0) |
1756 | return rc; |
1757 | |
1758 | /* need parent to stick around to release the id */ |
1759 | get_device(dev: &port->dev); |
1760 | cxld->id = rc; |
1761 | |
1762 | dev = &cxld->dev; |
1763 | device_initialize(dev); |
1764 | lockdep_set_class(&dev->mutex, &cxl_decoder_key); |
1765 | device_set_pm_not_required(dev); |
1766 | dev->parent = &port->dev; |
1767 | dev->bus = &cxl_bus_type; |
1768 | |
1769 | /* Pre initialize an "empty" decoder */ |
1770 | cxld->interleave_ways = 1; |
1771 | cxld->interleave_granularity = PAGE_SIZE; |
1772 | cxld->target_type = CXL_DECODER_HOSTONLYMEM; |
1773 | cxld->hpa_range = (struct range) { |
1774 | .start = 0, |
1775 | .end = -1, |
1776 | }; |
1777 | |
1778 | return 0; |
1779 | } |
1780 | |
1781 | static int cxl_switch_decoder_init(struct cxl_port *port, |
1782 | struct cxl_switch_decoder *cxlsd, |
1783 | int nr_targets) |
1784 | { |
1785 | if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) |
1786 | return -EINVAL; |
1787 | |
1788 | cxlsd->nr_targets = nr_targets; |
1789 | return cxl_decoder_init(port, cxld: &cxlsd->cxld); |
1790 | } |
1791 | |
1792 | /** |
1793 | * cxl_root_decoder_alloc - Allocate a root level decoder |
1794 | * @port: owning CXL root of this decoder |
1795 | * @nr_targets: static number of downstream targets |
1796 | * |
1797 | * Return: A new cxl decoder to be registered by cxl_decoder_add(). A |
1798 | * 'CXL root' decoder is one that decodes from a top-level / static platform |
1799 | * firmware description of CXL resources into a CXL standard decode |
1800 | * topology. |
1801 | */ |
1802 | struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, |
1803 | unsigned int nr_targets) |
1804 | { |
1805 | struct cxl_root_decoder *cxlrd; |
1806 | struct cxl_switch_decoder *cxlsd; |
1807 | struct cxl_decoder *cxld; |
1808 | int rc; |
1809 | |
1810 | if (!is_cxl_root(port)) |
1811 | return ERR_PTR(error: -EINVAL); |
1812 | |
1813 | cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets), |
1814 | GFP_KERNEL); |
1815 | if (!cxlrd) |
1816 | return ERR_PTR(error: -ENOMEM); |
1817 | |
1818 | cxlsd = &cxlrd->cxlsd; |
1819 | rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); |
1820 | if (rc) { |
1821 | kfree(objp: cxlrd); |
1822 | return ERR_PTR(error: rc); |
1823 | } |
1824 | |
1825 | mutex_init(&cxlrd->range_lock); |
1826 | |
1827 | cxld = &cxlsd->cxld; |
1828 | cxld->dev.type = &cxl_decoder_root_type; |
1829 | /* |
1830 | * cxl_root_decoder_release() special cases negative ids to |
1831 | * detect memregion_alloc() failures. |
1832 | */ |
1833 | atomic_set(v: &cxlrd->region_id, i: -1); |
1834 | rc = memregion_alloc(GFP_KERNEL); |
1835 | if (rc < 0) { |
1836 | put_device(dev: &cxld->dev); |
1837 | return ERR_PTR(error: rc); |
1838 | } |
1839 | |
1840 | atomic_set(v: &cxlrd->region_id, i: rc); |
1841 | cxlrd->qos_class = CXL_QOS_CLASS_INVALID; |
1842 | return cxlrd; |
1843 | } |
1844 | EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, "CXL"); |
1845 | |
1846 | /** |
1847 | * cxl_switch_decoder_alloc - Allocate a switch level decoder |
1848 | * @port: owning CXL switch port of this decoder |
1849 | * @nr_targets: max number of dynamically addressable downstream targets |
1850 | * |
1851 | * Return: A new cxl decoder to be registered by cxl_decoder_add(). A |
1852 | * 'switch' decoder is any decoder that can be enumerated by PCIe |
1853 | * topology and the HDM Decoder Capability. This includes the decoders |
1854 | * that sit between Switch Upstream Ports / Switch Downstream Ports and |
1855 | * Host Bridges / Root Ports. |
1856 | */ |
1857 | struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, |
1858 | unsigned int nr_targets) |
1859 | { |
1860 | struct cxl_switch_decoder *cxlsd; |
1861 | struct cxl_decoder *cxld; |
1862 | int rc; |
1863 | |
1864 | if (is_cxl_root(port) || is_cxl_endpoint(port)) |
1865 | return ERR_PTR(error: -EINVAL); |
1866 | |
1867 | cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL); |
1868 | if (!cxlsd) |
1869 | return ERR_PTR(error: -ENOMEM); |
1870 | |
1871 | rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); |
1872 | if (rc) { |
1873 | kfree(objp: cxlsd); |
1874 | return ERR_PTR(error: rc); |
1875 | } |
1876 | |
1877 | cxld = &cxlsd->cxld; |
1878 | cxld->dev.type = &cxl_decoder_switch_type; |
1879 | return cxlsd; |
1880 | } |
1881 | EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, "CXL"); |
1882 | |
1883 | /** |
1884 | * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder |
1885 | * @port: owning port of this decoder |
1886 | * |
1887 | * Return: A new cxl decoder to be registered by cxl_decoder_add() |
1888 | */ |
1889 | struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) |
1890 | { |
1891 | struct cxl_endpoint_decoder *cxled; |
1892 | struct cxl_decoder *cxld; |
1893 | int rc; |
1894 | |
1895 | if (!is_cxl_endpoint(port)) |
1896 | return ERR_PTR(error: -EINVAL); |
1897 | |
1898 | cxled = kzalloc(sizeof(*cxled), GFP_KERNEL); |
1899 | if (!cxled) |
1900 | return ERR_PTR(error: -ENOMEM); |
1901 | |
1902 | cxled->pos = -1; |
1903 | cxled->part = -1; |
1904 | cxld = &cxled->cxld; |
1905 | rc = cxl_decoder_init(port, cxld); |
1906 | if (rc) { |
1907 | kfree(objp: cxled); |
1908 | return ERR_PTR(error: rc); |
1909 | } |
1910 | |
1911 | cxld->dev.type = &cxl_decoder_endpoint_type; |
1912 | return cxled; |
1913 | } |
1914 | EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL"); |
1915 | |
1916 | /** |
1917 | * cxl_decoder_add_locked - Add a decoder with targets |
1918 | * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() |
1919 | * @target_map: A list of downstream ports that this decoder can direct memory |
1920 | * traffic to. These numbers should correspond with the port number |
1921 | * in the PCIe Link Capabilities structure. |
1922 | * |
1923 | * Certain types of decoders may not have any targets. The main example of this |
1924 | * is an endpoint device. A more awkward example is a hostbridge whose root |
1925 | * ports get hot added (technically possible, though unlikely). |
1926 | * |
1927 | * This is the locked variant of cxl_decoder_add(). |
1928 | * |
1929 | * Context: Process context. Expects the device lock of the port that owns the |
1930 | * @cxld to be held. |
1931 | * |
1932 | * Return: Negative error code if the decoder wasn't properly configured; else |
1933 | * returns 0. |
1934 | */ |
1935 | int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) |
1936 | { |
1937 | struct cxl_port *port; |
1938 | struct device *dev; |
1939 | int rc; |
1940 | |
1941 | if (WARN_ON_ONCE(!cxld)) |
1942 | return -EINVAL; |
1943 | |
1944 | if (WARN_ON_ONCE(IS_ERR(cxld))) |
1945 | return PTR_ERR(ptr: cxld); |
1946 | |
1947 | if (cxld->interleave_ways < 1) |
1948 | return -EINVAL; |
1949 | |
1950 | dev = &cxld->dev; |
1951 | |
1952 | port = to_cxl_port(cxld->dev.parent); |
1953 | if (!is_endpoint_decoder(dev)) { |
1954 | struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); |
1955 | |
1956 | rc = decoder_populate_targets(cxlsd, port, target_map); |
1957 | if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) { |
1958 | dev_err(&port->dev, |
1959 | "Failed to populate active decoder targets\n"); |
1960 | return rc; |
1961 | } |
1962 | } |
1963 | |
1964 | rc = dev_set_name(dev, name: "decoder%d.%d", port->id, cxld->id); |
1965 | if (rc) |
1966 | return rc; |
1967 | |
1968 | return device_add(dev); |
1969 | } |
1970 | EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL"); |
1971 | |
1972 | /** |
1973 | * cxl_decoder_add - Add a decoder with targets |
1974 | * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() |
1975 | * @target_map: A list of downstream ports that this decoder can direct memory |
1976 | * traffic to. These numbers should correspond with the port number |
1977 | * in the PCIe Link Capabilities structure. |
1978 | * |
1979 | * This is the unlocked variant of cxl_decoder_add_locked(). |
1980 | * See cxl_decoder_add_locked(). |
1981 | * |
1982 | * Context: Process context. Takes and releases the device lock of the port that |
1983 | * owns the @cxld. |
1984 | */ |
1985 | int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) |
1986 | { |
1987 | struct cxl_port *port; |
1988 | |
1989 | if (WARN_ON_ONCE(!cxld)) |
1990 | return -EINVAL; |
1991 | |
1992 | if (WARN_ON_ONCE(IS_ERR(cxld))) |
1993 | return PTR_ERR(ptr: cxld); |
1994 | |
1995 | port = to_cxl_port(cxld->dev.parent); |
1996 | |
1997 | guard(device)(T: &port->dev); |
1998 | return cxl_decoder_add_locked(cxld, target_map); |
1999 | } |
2000 | EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL"); |
2001 | |
2002 | static void cxld_unregister(void *dev) |
2003 | { |
2004 | struct cxl_endpoint_decoder *cxled; |
2005 | |
2006 | if (is_endpoint_decoder(dev)) { |
2007 | cxled = to_cxl_endpoint_decoder(dev); |
2008 | cxl_decoder_kill_region(cxled); |
2009 | } |
2010 | |
2011 | device_unregister(dev); |
2012 | } |
2013 | |
2014 | int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld) |
2015 | { |
2016 | return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev); |
2017 | } |
2018 | EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, "CXL"); |
2019 | |
2020 | /** |
2021 | * __cxl_driver_register - register a driver for the cxl bus |
2022 | * @cxl_drv: cxl driver structure to attach |
2023 | * @owner: owning module/driver |
2024 | * @modname: KBUILD_MODNAME for parent driver |
2025 | */ |
2026 | int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner, |
2027 | const char *modname) |
2028 | { |
2029 | if (!cxl_drv->probe) { |
2030 | pr_debug("%s ->probe() must be specified\n", modname); |
2031 | return -EINVAL; |
2032 | } |
2033 | |
2034 | if (!cxl_drv->name) { |
2035 | pr_debug("%s ->name must be specified\n", modname); |
2036 | return -EINVAL; |
2037 | } |
2038 | |
2039 | if (!cxl_drv->id) { |
2040 | pr_debug("%s ->id must be specified\n", modname); |
2041 | return -EINVAL; |
2042 | } |
2043 | |
2044 | cxl_drv->drv.bus = &cxl_bus_type; |
2045 | cxl_drv->drv.owner = owner; |
2046 | cxl_drv->drv.mod_name = modname; |
2047 | cxl_drv->drv.name = cxl_drv->name; |
2048 | |
2049 | return driver_register(drv: &cxl_drv->drv); |
2050 | } |
2051 | EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, "CXL"); |
2052 | |
2053 | void cxl_driver_unregister(struct cxl_driver *cxl_drv) |
2054 | { |
2055 | driver_unregister(drv: &cxl_drv->drv); |
2056 | } |
2057 | EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, "CXL"); |
2058 | |
2059 | static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) |
2060 | { |
2061 | return add_uevent_var(env, format: "MODALIAS="CXL_MODALIAS_FMT, |
2062 | cxl_device_id(dev)); |
2063 | } |
2064 | |
2065 | static int cxl_bus_match(struct device *dev, const struct device_driver *drv) |
2066 | { |
2067 | return cxl_device_id(dev) == to_cxl_drv(drv)->id; |
2068 | } |
2069 | |
2070 | static int cxl_bus_probe(struct device *dev) |
2071 | { |
2072 | int rc; |
2073 | |
2074 | rc = to_cxl_drv(dev->driver)->probe(dev); |
2075 | dev_dbg(dev, "probe: %d\n", rc); |
2076 | return rc; |
2077 | } |
2078 | |
2079 | static void cxl_bus_remove(struct device *dev) |
2080 | { |
2081 | struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); |
2082 | |
2083 | if (cxl_drv->remove) |
2084 | cxl_drv->remove(dev); |
2085 | } |
2086 | |
2087 | static struct workqueue_struct *cxl_bus_wq; |
2088 | |
2089 | static int cxl_rescan_attach(struct device *dev, void *data) |
2090 | { |
2091 | int rc = device_attach(dev); |
2092 | |
2093 | dev_vdbg(dev, "rescan: %s\n", rc ? "attach": "detached"); |
2094 | |
2095 | return 0; |
2096 | } |
2097 | |
2098 | static void cxl_bus_rescan_queue(struct work_struct *w) |
2099 | { |
2100 | bus_for_each_dev(bus: &cxl_bus_type, NULL, NULL, fn: cxl_rescan_attach); |
2101 | } |
2102 | |
2103 | void cxl_bus_rescan(void) |
2104 | { |
2105 | static DECLARE_WORK(rescan_work, cxl_bus_rescan_queue); |
2106 | |
2107 | queue_work(wq: cxl_bus_wq, work: &rescan_work); |
2108 | } |
2109 | EXPORT_SYMBOL_NS_GPL(cxl_bus_rescan, "CXL"); |
2110 | |
2111 | void cxl_bus_drain(void) |
2112 | { |
2113 | drain_workqueue(wq: cxl_bus_wq); |
2114 | } |
2115 | EXPORT_SYMBOL_NS_GPL(cxl_bus_drain, "CXL"); |
2116 | |
2117 | bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) |
2118 | { |
2119 | return queue_work(wq: cxl_bus_wq, work: &cxlmd->detach_work); |
2120 | } |
2121 | EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, "CXL"); |
2122 | |
2123 | static void add_latency(struct access_coordinate *c, long latency) |
2124 | { |
2125 | for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { |
2126 | c[i].write_latency += latency; |
2127 | c[i].read_latency += latency; |
2128 | } |
2129 | } |
2130 | |
2131 | static bool coordinates_valid(struct access_coordinate *c) |
2132 | { |
2133 | for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { |
2134 | if (c[i].read_bandwidth && c[i].write_bandwidth && |
2135 | c[i].read_latency && c[i].write_latency) |
2136 | continue; |
2137 | return false; |
2138 | } |
2139 | |
2140 | return true; |
2141 | } |
2142 | |
2143 | static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw) |
2144 | { |
2145 | for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { |
2146 | c[i].write_bandwidth = min(c[i].write_bandwidth, bw); |
2147 | c[i].read_bandwidth = min(c[i].read_bandwidth, bw); |
2148 | } |
2149 | } |
2150 | |
2151 | static void set_access_coordinates(struct access_coordinate *out, |
2152 | struct access_coordinate *in) |
2153 | { |
2154 | for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) |
2155 | out[i] = in[i]; |
2156 | } |
2157 | |
2158 | static bool parent_port_is_cxl_root(struct cxl_port *port) |
2159 | { |
2160 | return is_cxl_root(port: to_cxl_port(port->dev.parent)); |
2161 | } |
2162 | |
2163 | /** |
2164 | * cxl_endpoint_get_perf_coordinates - Retrieve performance numbers stored in dports |
2165 | * of CXL path |
2166 | * @port: endpoint cxl_port |
2167 | * @coord: output performance data |
2168 | * |
2169 | * Return: errno on failure, 0 on success. |
2170 | */ |
2171 | int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, |
2172 | struct access_coordinate *coord) |
2173 | { |
2174 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev: port->uport_dev); |
2175 | struct access_coordinate c[] = { |
2176 | { |
2177 | .read_bandwidth = UINT_MAX, |
2178 | .write_bandwidth = UINT_MAX, |
2179 | }, |
2180 | { |
2181 | .read_bandwidth = UINT_MAX, |
2182 | .write_bandwidth = UINT_MAX, |
2183 | }, |
2184 | }; |
2185 | struct cxl_port *iter = port; |
2186 | struct cxl_dport *dport; |
2187 | struct pci_dev *pdev; |
2188 | struct device *dev; |
2189 | unsigned int bw; |
2190 | bool is_cxl_root; |
2191 | |
2192 | if (!is_cxl_endpoint(port)) |
2193 | return -EINVAL; |
2194 | |
2195 | /* |
2196 | * Skip calculation for RCD. Expectation is HMAT already covers RCD case |
2197 | * since RCH does not support hotplug. |
2198 | */ |
2199 | if (cxlmd->cxlds->rcd) |
2200 | return 0; |
2201 | |
2202 | /* |
2203 | * Exit the loop when the parent port of the current iter port is cxl |
2204 | * root. The iterative loop starts at the endpoint and gathers the |
2205 | * latency of the CXL link from the current device/port to the connected |
2206 | * downstream port each iteration. |
2207 | */ |
2208 | do { |
2209 | dport = iter->parent_dport; |
2210 | iter = to_cxl_port(iter->dev.parent); |
2211 | is_cxl_root = parent_port_is_cxl_root(port: iter); |
2212 | |
2213 | /* |
2214 | * There's no valid access_coordinate for a root port since RPs do not |
2215 | * have CDAT and therefore needs to be skipped. |
2216 | */ |
2217 | if (!is_cxl_root) { |
2218 | if (!coordinates_valid(c: dport->coord)) |
2219 | return -EINVAL; |
2220 | cxl_coordinates_combine(out: c, c1: c, c2: dport->coord); |
2221 | } |
2222 | add_latency(c, latency: dport->link_latency); |
2223 | } while (!is_cxl_root); |
2224 | |
2225 | dport = iter->parent_dport; |
2226 | /* Retrieve HB coords */ |
2227 | if (!coordinates_valid(c: dport->coord)) |
2228 | return -EINVAL; |
2229 | cxl_coordinates_combine(out: c, c1: c, c2: dport->coord); |
2230 | |
2231 | dev = port->uport_dev->parent; |
2232 | if (!dev_is_pci(dev)) |
2233 | return -ENODEV; |
2234 | |
2235 | /* Get the calculated PCI paths bandwidth */ |
2236 | pdev = to_pci_dev(dev); |
2237 | bw = pcie_bandwidth_available(dev: pdev, NULL, NULL, NULL); |
2238 | if (bw == 0) |
2239 | return -ENXIO; |
2240 | bw /= BITS_PER_BYTE; |
2241 | |
2242 | set_min_bandwidth(c, bw); |
2243 | set_access_coordinates(out: coord, in: c); |
2244 | |
2245 | return 0; |
2246 | } |
2247 | EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, "CXL"); |
2248 | |
2249 | int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, |
2250 | struct access_coordinate *c) |
2251 | { |
2252 | struct cxl_dport *dport = port->parent_dport; |
2253 | |
2254 | /* Check this port is connected to a switch DSP and not an RP */ |
2255 | if (parent_port_is_cxl_root(port: to_cxl_port(port->dev.parent))) |
2256 | return -ENODEV; |
2257 | |
2258 | if (!coordinates_valid(c: dport->coord)) |
2259 | return -EINVAL; |
2260 | |
2261 | for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { |
2262 | c[i].read_bandwidth = dport->coord[i].read_bandwidth; |
2263 | c[i].write_bandwidth = dport->coord[i].write_bandwidth; |
2264 | } |
2265 | |
2266 | return 0; |
2267 | } |
2268 | |
2269 | /* for user tooling to ensure port disable work has completed */ |
2270 | static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count) |
2271 | { |
2272 | if (sysfs_streq(s1: buf, s2: "1")) { |
2273 | flush_workqueue(cxl_bus_wq); |
2274 | return count; |
2275 | } |
2276 | |
2277 | return -EINVAL; |
2278 | } |
2279 | |
2280 | static BUS_ATTR_WO(flush); |
2281 | |
2282 | static struct attribute *cxl_bus_attributes[] = { |
2283 | &bus_attr_flush.attr, |
2284 | NULL, |
2285 | }; |
2286 | |
2287 | static struct attribute_group cxl_bus_attribute_group = { |
2288 | .attrs = cxl_bus_attributes, |
2289 | }; |
2290 | |
2291 | static const struct attribute_group *cxl_bus_attribute_groups[] = { |
2292 | &cxl_bus_attribute_group, |
2293 | NULL, |
2294 | }; |
2295 | |
2296 | struct bus_type cxl_bus_type = { |
2297 | .name = "cxl", |
2298 | .uevent = cxl_bus_uevent, |
2299 | .match = cxl_bus_match, |
2300 | .probe = cxl_bus_probe, |
2301 | .remove = cxl_bus_remove, |
2302 | .bus_groups = cxl_bus_attribute_groups, |
2303 | }; |
2304 | EXPORT_SYMBOL_NS_GPL(cxl_bus_type, "CXL"); |
2305 | |
2306 | static struct dentry *cxl_debugfs; |
2307 | |
2308 | struct dentry *cxl_debugfs_create_dir(const char *dir) |
2309 | { |
2310 | return debugfs_create_dir(name: dir, parent: cxl_debugfs); |
2311 | } |
2312 | EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, "CXL"); |
2313 | |
2314 | static __init int cxl_core_init(void) |
2315 | { |
2316 | int rc; |
2317 | |
2318 | cxl_debugfs = debugfs_create_dir(name: "cxl", NULL); |
2319 | |
2320 | if (einj_cxl_is_initialized()) |
2321 | debugfs_create_file("einj_types", 0400, cxl_debugfs, NULL, |
2322 | &einj_cxl_available_error_type_fops); |
2323 | |
2324 | cxl_mbox_init(); |
2325 | |
2326 | rc = cxl_memdev_init(); |
2327 | if (rc) |
2328 | return rc; |
2329 | |
2330 | cxl_bus_wq = alloc_ordered_workqueue("cxl_port", 0); |
2331 | if (!cxl_bus_wq) { |
2332 | rc = -ENOMEM; |
2333 | goto err_wq; |
2334 | } |
2335 | |
2336 | rc = bus_register(bus: &cxl_bus_type); |
2337 | if (rc) |
2338 | goto err_bus; |
2339 | |
2340 | rc = cxl_region_init(); |
2341 | if (rc) |
2342 | goto err_region; |
2343 | |
2344 | rc = cxl_ras_init(); |
2345 | if (rc) |
2346 | goto err_ras; |
2347 | |
2348 | return 0; |
2349 | |
2350 | err_ras: |
2351 | cxl_region_exit(); |
2352 | err_region: |
2353 | bus_unregister(bus: &cxl_bus_type); |
2354 | err_bus: |
2355 | destroy_workqueue(wq: cxl_bus_wq); |
2356 | err_wq: |
2357 | cxl_memdev_exit(); |
2358 | return rc; |
2359 | } |
2360 | |
2361 | static void cxl_core_exit(void) |
2362 | { |
2363 | cxl_ras_exit(); |
2364 | cxl_region_exit(); |
2365 | bus_unregister(bus: &cxl_bus_type); |
2366 | destroy_workqueue(wq: cxl_bus_wq); |
2367 | cxl_memdev_exit(); |
2368 | debugfs_remove_recursive(dentry: cxl_debugfs); |
2369 | } |
2370 | |
2371 | subsys_initcall(cxl_core_init); |
2372 | module_exit(cxl_core_exit); |
2373 | MODULE_DESCRIPTION("CXL: Core Compute Express Link support"); |
2374 | MODULE_LICENSE("GPL v2"); |
2375 | MODULE_IMPORT_NS("CXL"); |
2376 |
Definitions
- cxl_region_rwsem
- cxl_port_ida
- cxl_root_buses
- cxl_num_decoders_committed
- devtype_show
- cxl_device_id
- modalias_show
- cxl_base_attributes
- cxl_base_attribute_group
- start_show
- size_show
- target_type_show
- emit_target_list
- target_list_show
- mode_show
- mode_store
- dpa_resource_show
- dpa_size_show
- dpa_size_store
- interleave_granularity_show
- interleave_ways_show
- qos_class_show
- cxl_decoder_base_attrs
- cxl_decoder_base_attribute_group
- cxl_decoder_root_attrs
- can_create_pmem
- can_create_ram
- cxl_root_decoder_visible
- cxl_decoder_root_attribute_group
- cxl_decoder_root_attribute_groups
- cxl_decoder_switch_attrs
- cxl_decoder_switch_attribute_group
- cxl_decoder_switch_attribute_groups
- cxl_decoder_endpoint_attrs
- cxl_decoder_endpoint_attribute_group
- cxl_decoder_endpoint_attribute_groups
- __cxl_decoder_release
- cxl_endpoint_decoder_release
- cxl_switch_decoder_release
- to_cxl_root_decoder
- cxl_root_decoder_release
- cxl_decoder_endpoint_type
- cxl_decoder_switch_type
- cxl_decoder_root_type
- is_endpoint_decoder
- is_root_decoder
- is_switch_decoder
- to_cxl_decoder
- to_cxl_endpoint_decoder
- to_cxl_switch_decoder
- cxl_ep_release
- cxl_ep_remove
- cxl_port_release
- decoders_committed_show
- cxl_port_attrs
- cxl_port_attribute_group
- cxl_port_attribute_groups
- cxl_port_type
- is_cxl_port
- to_cxl_port
- parent_port_of
- unregister_port
- cxl_unlink_uport
- devm_cxl_link_uport
- cxl_unlink_parent_dport
- devm_cxl_link_parent_dport
- cxl_port_key
- cxl_port_alloc
- cxl_setup_comp_regs
- cxl_port_setup_regs
- cxl_dport_setup_regs
- cxl_einj_inject
- cxl_einj_inject_fops
- cxl_debugfs_create_dport_dir
- cxl_port_add
- __devm_cxl_add_port
- devm_cxl_add_port
- devm_cxl_add_root
- cxl_port_to_pci_bus
- unregister_pci_bus
- devm_cxl_register_pci_bus
- dev_is_cxl_root_child
- find_cxl_root
- find_dport
- add_dport
- cond_cxl_root_lock
- cond_cxl_root_unlock
- cxl_dport_remove
- cxl_dport_unlink
- __devm_cxl_add_dport
- devm_cxl_add_dport
- devm_cxl_add_rch_dport
- add_ep
- cxl_add_ep
- cxl_find_port_ctx
- match_port_by_dport
- __find_cxl_port
- find_cxl_port
- find_cxl_port_at
- grandparent
- endpoint_host
- delete_endpoint
- cxl_endpoint_autoremove
- delete_switch_port
- reap_dports
- detach_ctx
- port_has_memdev
- cxl_detach_ep
- find_component_registers
- add_port_attach_ep
- devm_cxl_enumerate_ports
- cxl_pci_find_port
- cxl_mem_find_port
- decoder_populate_targets
- cxl_decoder_key
- cxl_decoder_init
- cxl_switch_decoder_init
- cxl_root_decoder_alloc
- cxl_switch_decoder_alloc
- cxl_endpoint_decoder_alloc
- cxl_decoder_add_locked
- cxl_decoder_add
- cxld_unregister
- cxl_decoder_autoremove
- __cxl_driver_register
- cxl_driver_unregister
- cxl_bus_uevent
- cxl_bus_match
- cxl_bus_probe
- cxl_bus_remove
- cxl_bus_wq
- cxl_rescan_attach
- cxl_bus_rescan_queue
- cxl_bus_rescan
- cxl_bus_drain
- schedule_cxl_memdev_detach
- add_latency
- coordinates_valid
- set_min_bandwidth
- set_access_coordinates
- parent_port_is_cxl_root
- cxl_endpoint_get_perf_coordinates
- cxl_port_get_switch_dport_bandwidth
- flush_store
- cxl_bus_attributes
- cxl_bus_attribute_group
- cxl_bus_attribute_groups
- cxl_bus_type
- cxl_debugfs
- cxl_debugfs_create_dir
- cxl_core_init
Improve your Profiling and Debugging skills
Find out more