1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ |
3 | #include <linux/debugfs.h> |
4 | #include <linux/device.h> |
5 | #include <linux/module.h> |
6 | #include <linux/pci.h> |
7 | |
8 | #include "cxlmem.h" |
9 | #include "cxlpci.h" |
10 | |
11 | /** |
12 | * DOC: cxl mem |
13 | * |
14 | * CXL memory endpoint devices and switches are CXL capable devices that are |
15 | * participating in CXL.mem protocol. Their functionality builds on top of the |
16 | * CXL.io protocol that allows enumerating and configuring components via |
17 | * standard PCI mechanisms. |
18 | * |
19 | * The cxl_mem driver owns kicking off the enumeration of this CXL.mem |
20 | * capability. With the detection of a CXL capable endpoint, the driver will |
21 | * walk up to find the platform specific port it is connected to, and determine |
22 | * if there are intervening switches in the path. If there are switches, a |
23 | * secondary action is to enumerate those (implemented in cxl_core). Finally the |
24 | * cxl_mem driver adds the device it is bound to as a CXL endpoint-port for use |
25 | * in higher level operations. |
26 | */ |
27 | |
28 | static void enable_suspend(void *data) |
29 | { |
30 | cxl_mem_active_dec(); |
31 | } |
32 | |
33 | static void remove_debugfs(void *dentry) |
34 | { |
35 | debugfs_remove_recursive(dentry); |
36 | } |
37 | |
38 | static int cxl_mem_dpa_show(struct seq_file *file, void *data) |
39 | { |
40 | struct device *dev = file->private; |
41 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); |
42 | |
43 | cxl_dpa_debug(file, cxlds: cxlmd->cxlds); |
44 | |
45 | return 0; |
46 | } |
47 | |
48 | static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd, |
49 | struct cxl_dport *parent_dport) |
50 | { |
51 | struct cxl_port *parent_port = parent_dport->port; |
52 | struct cxl_port *endpoint, *iter, *down; |
53 | int rc; |
54 | |
55 | /* |
56 | * Now that the path to the root is established record all the |
57 | * intervening ports in the chain. |
58 | */ |
59 | for (iter = parent_port, down = NULL; !is_cxl_root(port: iter); |
60 | down = iter, iter = to_cxl_port(dev: iter->dev.parent)) { |
61 | struct cxl_ep *ep; |
62 | |
63 | ep = cxl_ep_load(port: iter, cxlmd); |
64 | ep->next = down; |
65 | } |
66 | |
67 | /* Note: endpoint port component registers are derived from @cxlds */ |
68 | endpoint = devm_cxl_add_port(host, uport_dev: &cxlmd->dev, CXL_RESOURCE_NONE, |
69 | parent_dport); |
70 | if (IS_ERR(ptr: endpoint)) |
71 | return PTR_ERR(ptr: endpoint); |
72 | |
73 | rc = cxl_endpoint_autoremove(cxlmd, endpoint); |
74 | if (rc) |
75 | return rc; |
76 | |
77 | if (!endpoint->dev.driver) { |
78 | dev_err(&cxlmd->dev, "%s failed probe\n" , |
79 | dev_name(&endpoint->dev)); |
80 | return -ENXIO; |
81 | } |
82 | |
83 | return 0; |
84 | } |
85 | |
86 | static int cxl_debugfs_poison_inject(void *data, u64 dpa) |
87 | { |
88 | struct cxl_memdev *cxlmd = data; |
89 | |
90 | return cxl_inject_poison(cxlmd, dpa); |
91 | } |
92 | |
93 | DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops, NULL, |
94 | cxl_debugfs_poison_inject, "%llx\n" ); |
95 | |
96 | static int cxl_debugfs_poison_clear(void *data, u64 dpa) |
97 | { |
98 | struct cxl_memdev *cxlmd = data; |
99 | |
100 | return cxl_clear_poison(cxlmd, dpa); |
101 | } |
102 | |
103 | DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL, |
104 | cxl_debugfs_poison_clear, "%llx\n" ); |
105 | |
106 | static int cxl_mem_probe(struct device *dev) |
107 | { |
108 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); |
109 | struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds: cxlmd->cxlds); |
110 | struct cxl_dev_state *cxlds = cxlmd->cxlds; |
111 | struct device *endpoint_parent; |
112 | struct cxl_port *parent_port; |
113 | struct cxl_dport *dport; |
114 | struct dentry *dentry; |
115 | int rc; |
116 | |
117 | if (!cxlds->media_ready) |
118 | return -EBUSY; |
119 | |
120 | /* |
121 | * Someone is trying to reattach this device after it lost its port |
122 | * connection (an endpoint port previously registered by this memdev was |
123 | * disabled). This racy check is ok because if the port is still gone, |
124 | * no harm done, and if the port hierarchy comes back it will re-trigger |
125 | * this probe. Port rescan and memdev detach work share the same |
126 | * single-threaded workqueue. |
127 | */ |
128 | if (work_pending(&cxlmd->detach_work)) |
129 | return -EBUSY; |
130 | |
131 | dentry = cxl_debugfs_create_dir(dir: dev_name(dev)); |
132 | debugfs_create_devm_seqfile(dev, name: "dpamem" , parent: dentry, read_fn: cxl_mem_dpa_show); |
133 | |
134 | if (test_bit(CXL_POISON_ENABLED_INJECT, mds->poison.enabled_cmds)) |
135 | debugfs_create_file(name: "inject_poison" , mode: 0200, parent: dentry, data: cxlmd, |
136 | fops: &cxl_poison_inject_fops); |
137 | if (test_bit(CXL_POISON_ENABLED_CLEAR, mds->poison.enabled_cmds)) |
138 | debugfs_create_file(name: "clear_poison" , mode: 0200, parent: dentry, data: cxlmd, |
139 | fops: &cxl_poison_clear_fops); |
140 | |
141 | rc = devm_add_action_or_reset(dev, remove_debugfs, dentry); |
142 | if (rc) |
143 | return rc; |
144 | |
145 | rc = devm_cxl_enumerate_ports(cxlmd); |
146 | if (rc) |
147 | return rc; |
148 | |
149 | parent_port = cxl_mem_find_port(cxlmd, dport: &dport); |
150 | if (!parent_port) { |
151 | dev_err(dev, "CXL port topology not found\n" ); |
152 | return -ENXIO; |
153 | } |
154 | |
155 | if (dport->rch) |
156 | endpoint_parent = parent_port->uport_dev; |
157 | else |
158 | endpoint_parent = &parent_port->dev; |
159 | |
160 | cxl_setup_parent_dport(host: dev, dport); |
161 | |
162 | device_lock(dev: endpoint_parent); |
163 | if (!endpoint_parent->driver) { |
164 | dev_err(dev, "CXL port topology %s not enabled\n" , |
165 | dev_name(endpoint_parent)); |
166 | rc = -ENXIO; |
167 | goto unlock; |
168 | } |
169 | |
170 | rc = devm_cxl_add_endpoint(host: endpoint_parent, cxlmd, parent_dport: dport); |
171 | unlock: |
172 | device_unlock(dev: endpoint_parent); |
173 | put_device(dev: &parent_port->dev); |
174 | if (rc) |
175 | return rc; |
176 | |
177 | if (resource_size(res: &cxlds->pmem_res) && IS_ENABLED(CONFIG_CXL_PMEM)) { |
178 | rc = devm_cxl_add_nvdimm(cxlmd); |
179 | if (rc == -ENODEV) |
180 | dev_info(dev, "PMEM disabled by platform\n" ); |
181 | else |
182 | return rc; |
183 | } |
184 | |
185 | /* |
186 | * The kernel may be operating out of CXL memory on this device, |
187 | * there is no spec defined way to determine whether this device |
188 | * preserves contents over suspend, and there is no simple way |
189 | * to arrange for the suspend image to avoid CXL memory which |
190 | * would setup a circular dependency between PCI resume and save |
191 | * state restoration. |
192 | * |
193 | * TODO: support suspend when all the regions this device is |
194 | * hosting are locked and covered by the system address map, |
195 | * i.e. platform firmware owns restoring the HDM configuration |
196 | * that it locked. |
197 | */ |
198 | cxl_mem_active_inc(); |
199 | return devm_add_action_or_reset(dev, enable_suspend, NULL); |
200 | } |
201 | |
202 | static ssize_t trigger_poison_list_store(struct device *dev, |
203 | struct device_attribute *attr, |
204 | const char *buf, size_t len) |
205 | { |
206 | bool trigger; |
207 | int rc; |
208 | |
209 | if (kstrtobool(s: buf, res: &trigger) || !trigger) |
210 | return -EINVAL; |
211 | |
212 | rc = cxl_trigger_poison_list(cxlmd: to_cxl_memdev(dev)); |
213 | |
214 | return rc ? rc : len; |
215 | } |
216 | static DEVICE_ATTR_WO(trigger_poison_list); |
217 | |
218 | static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n) |
219 | { |
220 | struct device *dev = kobj_to_dev(kobj); |
221 | struct cxl_memdev *cxlmd = to_cxl_memdev(dev); |
222 | struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds: cxlmd->cxlds); |
223 | |
224 | if (a == &dev_attr_trigger_poison_list.attr) |
225 | if (!test_bit(CXL_POISON_ENABLED_LIST, |
226 | mds->poison.enabled_cmds)) |
227 | return 0; |
228 | |
229 | return a->mode; |
230 | } |
231 | |
232 | static struct attribute *cxl_mem_attrs[] = { |
233 | &dev_attr_trigger_poison_list.attr, |
234 | NULL |
235 | }; |
236 | |
237 | static struct attribute_group cxl_mem_group = { |
238 | .attrs = cxl_mem_attrs, |
239 | .is_visible = cxl_mem_visible, |
240 | }; |
241 | |
242 | __ATTRIBUTE_GROUPS(cxl_mem); |
243 | |
244 | static struct cxl_driver cxl_mem_driver = { |
245 | .name = "cxl_mem" , |
246 | .probe = cxl_mem_probe, |
247 | .id = CXL_DEVICE_MEMORY_EXPANDER, |
248 | .drv = { |
249 | .dev_groups = cxl_mem_groups, |
250 | }, |
251 | }; |
252 | |
253 | module_cxl_driver(cxl_mem_driver); |
254 | |
255 | MODULE_LICENSE("GPL v2" ); |
256 | MODULE_IMPORT_NS(CXL); |
257 | MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER); |
258 | /* |
259 | * create_endpoint() wants to validate port driver attach immediately after |
260 | * endpoint registration. |
261 | */ |
262 | MODULE_SOFTDEP("pre: cxl_port" ); |
263 | |