1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. |
4 | */ |
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
6 | #include <linux/moduleparam.h> |
7 | #include <linux/vmalloc.h> |
8 | #include <linux/device.h> |
9 | #include <linux/ndctl.h> |
10 | #include <linux/slab.h> |
11 | #include <linux/io.h> |
12 | #include <linux/fs.h> |
13 | #include <linux/mm.h> |
14 | #include "nd-core.h" |
15 | #include "label.h" |
16 | #include "pmem.h" |
17 | #include "nd.h" |
18 | |
19 | static DEFINE_IDA(dimm_ida); |
20 | |
21 | /* |
22 | * Retrieve bus and dimm handle and return if this bus supports |
23 | * get_config_data commands |
24 | */ |
25 | int nvdimm_check_config_data(struct device *dev) |
26 | { |
27 | struct nvdimm *nvdimm = to_nvdimm(dev); |
28 | |
29 | if (!nvdimm->cmd_mask || |
30 | !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { |
31 | if (test_bit(NDD_LABELING, &nvdimm->flags)) |
32 | return -ENXIO; |
33 | else |
34 | return -ENOTTY; |
35 | } |
36 | |
37 | return 0; |
38 | } |
39 | |
40 | static int validate_dimm(struct nvdimm_drvdata *ndd) |
41 | { |
42 | int rc; |
43 | |
44 | if (!ndd) |
45 | return -EINVAL; |
46 | |
47 | rc = nvdimm_check_config_data(dev: ndd->dev); |
48 | if (rc) |
49 | dev_dbg(ndd->dev, "%ps: %s error: %d\n" , |
50 | __builtin_return_address(0), __func__, rc); |
51 | return rc; |
52 | } |
53 | |
54 | /** |
55 | * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area |
56 | * @ndd: dimm to initialize |
57 | * |
58 | * Returns: %0 if the area is already valid, -errno on error |
59 | */ |
60 | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) |
61 | { |
62 | struct nd_cmd_get_config_size *cmd = &ndd->nsarea; |
63 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(nd_dev: ndd->dev); |
64 | struct nvdimm_bus_descriptor *nd_desc; |
65 | int rc = validate_dimm(ndd); |
66 | int cmd_rc = 0; |
67 | |
68 | if (rc) |
69 | return rc; |
70 | |
71 | if (cmd->config_size) |
72 | return 0; /* already valid */ |
73 | |
74 | memset(cmd, 0, sizeof(*cmd)); |
75 | nd_desc = nvdimm_bus->nd_desc; |
76 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(dev: ndd->dev), |
77 | ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc); |
78 | if (rc < 0) |
79 | return rc; |
80 | return cmd_rc; |
81 | } |
82 | |
83 | int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf, |
84 | size_t offset, size_t len) |
85 | { |
86 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(nd_dev: ndd->dev); |
87 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; |
88 | int rc = validate_dimm(ndd), cmd_rc = 0; |
89 | struct nd_cmd_get_config_data_hdr *cmd; |
90 | size_t max_cmd_size, buf_offset; |
91 | |
92 | if (rc) |
93 | return rc; |
94 | |
95 | if (offset + len > ndd->nsarea.config_size) |
96 | return -ENXIO; |
97 | |
98 | max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer); |
99 | cmd = kvzalloc(size: max_cmd_size + sizeof(*cmd), GFP_KERNEL); |
100 | if (!cmd) |
101 | return -ENOMEM; |
102 | |
103 | for (buf_offset = 0; len; |
104 | len -= cmd->in_length, buf_offset += cmd->in_length) { |
105 | size_t cmd_size; |
106 | |
107 | cmd->in_offset = offset + buf_offset; |
108 | cmd->in_length = min(max_cmd_size, len); |
109 | |
110 | cmd_size = sizeof(*cmd) + cmd->in_length; |
111 | |
112 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(dev: ndd->dev), |
113 | ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
114 | if (rc < 0) |
115 | break; |
116 | if (cmd_rc < 0) { |
117 | rc = cmd_rc; |
118 | break; |
119 | } |
120 | |
121 | /* out_buf should be valid, copy it into our output buffer */ |
122 | memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length); |
123 | } |
124 | kvfree(addr: cmd); |
125 | |
126 | return rc; |
127 | } |
128 | |
129 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
130 | void *buf, size_t len) |
131 | { |
132 | size_t max_cmd_size, buf_offset; |
133 | struct nd_cmd_set_config_hdr *cmd; |
134 | int rc = validate_dimm(ndd), cmd_rc = 0; |
135 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(nd_dev: ndd->dev); |
136 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; |
137 | |
138 | if (rc) |
139 | return rc; |
140 | |
141 | if (offset + len > ndd->nsarea.config_size) |
142 | return -ENXIO; |
143 | |
144 | max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer); |
145 | cmd = kvzalloc(size: max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL); |
146 | if (!cmd) |
147 | return -ENOMEM; |
148 | |
149 | for (buf_offset = 0; len; len -= cmd->in_length, |
150 | buf_offset += cmd->in_length) { |
151 | size_t cmd_size; |
152 | |
153 | cmd->in_offset = offset + buf_offset; |
154 | cmd->in_length = min(max_cmd_size, len); |
155 | memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length); |
156 | |
157 | /* status is output in the last 4-bytes of the command buffer */ |
158 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); |
159 | |
160 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(dev: ndd->dev), |
161 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); |
162 | if (rc < 0) |
163 | break; |
164 | if (cmd_rc < 0) { |
165 | rc = cmd_rc; |
166 | break; |
167 | } |
168 | } |
169 | kvfree(addr: cmd); |
170 | |
171 | return rc; |
172 | } |
173 | |
174 | void nvdimm_set_labeling(struct device *dev) |
175 | { |
176 | struct nvdimm *nvdimm = to_nvdimm(dev); |
177 | |
178 | set_bit(nr: NDD_LABELING, addr: &nvdimm->flags); |
179 | } |
180 | |
181 | void nvdimm_set_locked(struct device *dev) |
182 | { |
183 | struct nvdimm *nvdimm = to_nvdimm(dev); |
184 | |
185 | set_bit(nr: NDD_LOCKED, addr: &nvdimm->flags); |
186 | } |
187 | |
188 | void nvdimm_clear_locked(struct device *dev) |
189 | { |
190 | struct nvdimm *nvdimm = to_nvdimm(dev); |
191 | |
192 | clear_bit(nr: NDD_LOCKED, addr: &nvdimm->flags); |
193 | } |
194 | |
195 | static void nvdimm_release(struct device *dev) |
196 | { |
197 | struct nvdimm *nvdimm = to_nvdimm(dev); |
198 | |
199 | ida_free(&dimm_ida, id: nvdimm->id); |
200 | kfree(objp: nvdimm); |
201 | } |
202 | |
203 | struct nvdimm *to_nvdimm(struct device *dev) |
204 | { |
205 | struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev); |
206 | |
207 | WARN_ON(!is_nvdimm(dev)); |
208 | return nvdimm; |
209 | } |
210 | EXPORT_SYMBOL_GPL(to_nvdimm); |
211 | |
212 | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) |
213 | { |
214 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
215 | |
216 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); |
217 | |
218 | return dev_get_drvdata(dev: &nvdimm->dev); |
219 | } |
220 | EXPORT_SYMBOL(to_ndd); |
221 | |
222 | void nvdimm_drvdata_release(struct kref *kref) |
223 | { |
224 | struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); |
225 | struct device *dev = ndd->dev; |
226 | struct resource *res, *_r; |
227 | |
228 | dev_dbg(dev, "trace\n" ); |
229 | nvdimm_bus_lock(dev); |
230 | for_each_dpa_resource_safe(ndd, res, _r) |
231 | nvdimm_free_dpa(ndd, res); |
232 | nvdimm_bus_unlock(dev); |
233 | |
234 | kvfree(addr: ndd->data); |
235 | kfree(objp: ndd); |
236 | put_device(dev); |
237 | } |
238 | |
239 | void get_ndd(struct nvdimm_drvdata *ndd) |
240 | { |
241 | kref_get(kref: &ndd->kref); |
242 | } |
243 | |
244 | void put_ndd(struct nvdimm_drvdata *ndd) |
245 | { |
246 | if (ndd) |
247 | kref_put(kref: &ndd->kref, release: nvdimm_drvdata_release); |
248 | } |
249 | |
250 | const char *nvdimm_name(struct nvdimm *nvdimm) |
251 | { |
252 | return dev_name(dev: &nvdimm->dev); |
253 | } |
254 | EXPORT_SYMBOL_GPL(nvdimm_name); |
255 | |
256 | struct kobject *nvdimm_kobj(struct nvdimm *nvdimm) |
257 | { |
258 | return &nvdimm->dev.kobj; |
259 | } |
260 | EXPORT_SYMBOL_GPL(nvdimm_kobj); |
261 | |
262 | unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm) |
263 | { |
264 | return nvdimm->cmd_mask; |
265 | } |
266 | EXPORT_SYMBOL_GPL(nvdimm_cmd_mask); |
267 | |
268 | void *nvdimm_provider_data(struct nvdimm *nvdimm) |
269 | { |
270 | if (nvdimm) |
271 | return nvdimm->provider_data; |
272 | return NULL; |
273 | } |
274 | EXPORT_SYMBOL_GPL(nvdimm_provider_data); |
275 | |
276 | static ssize_t commands_show(struct device *dev, |
277 | struct device_attribute *attr, char *buf) |
278 | { |
279 | struct nvdimm *nvdimm = to_nvdimm(dev); |
280 | int cmd, len = 0; |
281 | |
282 | if (!nvdimm->cmd_mask) |
283 | return sprintf(buf, fmt: "\n" ); |
284 | |
285 | for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG) |
286 | len += sprintf(buf: buf + len, fmt: "%s " , nvdimm_cmd_name(cmd)); |
287 | len += sprintf(buf: buf + len, fmt: "\n" ); |
288 | return len; |
289 | } |
290 | static DEVICE_ATTR_RO(commands); |
291 | |
292 | static ssize_t flags_show(struct device *dev, |
293 | struct device_attribute *attr, char *buf) |
294 | { |
295 | struct nvdimm *nvdimm = to_nvdimm(dev); |
296 | |
297 | return sprintf(buf, fmt: "%s%s\n" , |
298 | test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "" , |
299 | test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "" ); |
300 | } |
301 | static DEVICE_ATTR_RO(flags); |
302 | |
303 | static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
304 | char *buf) |
305 | { |
306 | struct nvdimm *nvdimm = to_nvdimm(dev); |
307 | |
308 | /* |
309 | * The state may be in the process of changing, userspace should |
310 | * quiesce probing if it wants a static answer |
311 | */ |
312 | nvdimm_bus_lock(dev); |
313 | nvdimm_bus_unlock(dev); |
314 | return sprintf(buf, fmt: "%s\n" , atomic_read(v: &nvdimm->busy) |
315 | ? "active" : "idle" ); |
316 | } |
317 | static DEVICE_ATTR_RO(state); |
318 | |
319 | static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) |
320 | { |
321 | struct device *dev; |
322 | ssize_t rc; |
323 | u32 nfree; |
324 | |
325 | if (!ndd) |
326 | return -ENXIO; |
327 | |
328 | dev = ndd->dev; |
329 | nvdimm_bus_lock(dev); |
330 | nfree = nd_label_nfree(ndd); |
331 | if (nfree - 1 > nfree) { |
332 | dev_WARN_ONCE(dev, 1, "we ate our last label?\n" ); |
333 | nfree = 0; |
334 | } else |
335 | nfree--; |
336 | rc = sprintf(buf, fmt: "%d\n" , nfree); |
337 | nvdimm_bus_unlock(dev); |
338 | return rc; |
339 | } |
340 | |
341 | static ssize_t available_slots_show(struct device *dev, |
342 | struct device_attribute *attr, char *buf) |
343 | { |
344 | ssize_t rc; |
345 | |
346 | device_lock(dev); |
347 | rc = __available_slots_show(ndd: dev_get_drvdata(dev), buf); |
348 | device_unlock(dev); |
349 | |
350 | return rc; |
351 | } |
352 | static DEVICE_ATTR_RO(available_slots); |
353 | |
354 | static ssize_t security_show(struct device *dev, |
355 | struct device_attribute *attr, char *buf) |
356 | { |
357 | struct nvdimm *nvdimm = to_nvdimm(dev); |
358 | |
359 | /* |
360 | * For the test version we need to poll the "hardware" in order |
361 | * to get the updated status for unlock testing. |
362 | */ |
363 | if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)) |
364 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, ptype: NVDIMM_USER); |
365 | |
366 | if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags)) |
367 | return sprintf(buf, fmt: "overwrite\n" ); |
368 | if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) |
369 | return sprintf(buf, fmt: "disabled\n" ); |
370 | if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) |
371 | return sprintf(buf, fmt: "unlocked\n" ); |
372 | if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags)) |
373 | return sprintf(buf, fmt: "locked\n" ); |
374 | return -ENOTTY; |
375 | } |
376 | |
377 | static ssize_t frozen_show(struct device *dev, |
378 | struct device_attribute *attr, char *buf) |
379 | { |
380 | struct nvdimm *nvdimm = to_nvdimm(dev); |
381 | |
382 | return sprintf(buf, fmt: "%d\n" , test_bit(NVDIMM_SECURITY_FROZEN, |
383 | &nvdimm->sec.flags)); |
384 | } |
385 | static DEVICE_ATTR_RO(frozen); |
386 | |
387 | static ssize_t security_store(struct device *dev, |
388 | struct device_attribute *attr, const char *buf, size_t len) |
389 | |
390 | { |
391 | ssize_t rc; |
392 | |
393 | /* |
394 | * Require all userspace triggered security management to be |
395 | * done while probing is idle and the DIMM is not in active use |
396 | * in any region. |
397 | */ |
398 | device_lock(dev); |
399 | nvdimm_bus_lock(dev); |
400 | wait_nvdimm_bus_probe_idle(dev); |
401 | rc = nvdimm_security_store(dev, buf, len); |
402 | nvdimm_bus_unlock(dev); |
403 | device_unlock(dev); |
404 | |
405 | return rc; |
406 | } |
407 | static DEVICE_ATTR_RW(security); |
408 | |
409 | static struct attribute *nvdimm_attributes[] = { |
410 | &dev_attr_state.attr, |
411 | &dev_attr_flags.attr, |
412 | &dev_attr_commands.attr, |
413 | &dev_attr_available_slots.attr, |
414 | &dev_attr_security.attr, |
415 | &dev_attr_frozen.attr, |
416 | NULL, |
417 | }; |
418 | |
419 | static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n) |
420 | { |
421 | struct device *dev = container_of(kobj, typeof(*dev), kobj); |
422 | struct nvdimm *nvdimm = to_nvdimm(dev); |
423 | |
424 | if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr) |
425 | return a->mode; |
426 | if (!nvdimm->sec.flags) |
427 | return 0; |
428 | |
429 | if (a == &dev_attr_security.attr) { |
430 | /* Are there any state mutation ops (make writable)? */ |
431 | if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable |
432 | || nvdimm->sec.ops->change_key |
433 | || nvdimm->sec.ops->erase |
434 | || nvdimm->sec.ops->overwrite) |
435 | return a->mode; |
436 | return 0444; |
437 | } |
438 | |
439 | if (nvdimm->sec.ops->freeze) |
440 | return a->mode; |
441 | return 0; |
442 | } |
443 | |
444 | static const struct attribute_group nvdimm_attribute_group = { |
445 | .attrs = nvdimm_attributes, |
446 | .is_visible = nvdimm_visible, |
447 | }; |
448 | |
449 | static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf) |
450 | { |
451 | struct nvdimm *nvdimm = to_nvdimm(dev); |
452 | enum nvdimm_fwa_result result; |
453 | |
454 | if (!nvdimm->fw_ops) |
455 | return -EOPNOTSUPP; |
456 | |
457 | nvdimm_bus_lock(dev); |
458 | result = nvdimm->fw_ops->activate_result(nvdimm); |
459 | nvdimm_bus_unlock(dev); |
460 | |
461 | switch (result) { |
462 | case NVDIMM_FWA_RESULT_NONE: |
463 | return sprintf(buf, fmt: "none\n" ); |
464 | case NVDIMM_FWA_RESULT_SUCCESS: |
465 | return sprintf(buf, fmt: "success\n" ); |
466 | case NVDIMM_FWA_RESULT_FAIL: |
467 | return sprintf(buf, fmt: "fail\n" ); |
468 | case NVDIMM_FWA_RESULT_NOTSTAGED: |
469 | return sprintf(buf, fmt: "not_staged\n" ); |
470 | case NVDIMM_FWA_RESULT_NEEDRESET: |
471 | return sprintf(buf, fmt: "need_reset\n" ); |
472 | default: |
473 | return -ENXIO; |
474 | } |
475 | } |
476 | static DEVICE_ATTR_ADMIN_RO(result); |
477 | |
478 | static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf) |
479 | { |
480 | struct nvdimm *nvdimm = to_nvdimm(dev); |
481 | enum nvdimm_fwa_state state; |
482 | |
483 | if (!nvdimm->fw_ops) |
484 | return -EOPNOTSUPP; |
485 | |
486 | nvdimm_bus_lock(dev); |
487 | state = nvdimm->fw_ops->activate_state(nvdimm); |
488 | nvdimm_bus_unlock(dev); |
489 | |
490 | switch (state) { |
491 | case NVDIMM_FWA_IDLE: |
492 | return sprintf(buf, fmt: "idle\n" ); |
493 | case NVDIMM_FWA_BUSY: |
494 | return sprintf(buf, fmt: "busy\n" ); |
495 | case NVDIMM_FWA_ARMED: |
496 | return sprintf(buf, fmt: "armed\n" ); |
497 | default: |
498 | return -ENXIO; |
499 | } |
500 | } |
501 | |
502 | static ssize_t activate_store(struct device *dev, struct device_attribute *attr, |
503 | const char *buf, size_t len) |
504 | { |
505 | struct nvdimm *nvdimm = to_nvdimm(dev); |
506 | enum nvdimm_fwa_trigger arg; |
507 | int rc; |
508 | |
509 | if (!nvdimm->fw_ops) |
510 | return -EOPNOTSUPP; |
511 | |
512 | if (sysfs_streq(s1: buf, s2: "arm" )) |
513 | arg = NVDIMM_FWA_ARM; |
514 | else if (sysfs_streq(s1: buf, s2: "disarm" )) |
515 | arg = NVDIMM_FWA_DISARM; |
516 | else |
517 | return -EINVAL; |
518 | |
519 | nvdimm_bus_lock(dev); |
520 | rc = nvdimm->fw_ops->arm(nvdimm, arg); |
521 | nvdimm_bus_unlock(dev); |
522 | |
523 | if (rc < 0) |
524 | return rc; |
525 | return len; |
526 | } |
527 | static DEVICE_ATTR_ADMIN_RW(activate); |
528 | |
529 | static struct attribute *nvdimm_firmware_attributes[] = { |
530 | &dev_attr_activate.attr, |
531 | &dev_attr_result.attr, |
532 | NULL, |
533 | }; |
534 | |
535 | static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n) |
536 | { |
537 | struct device *dev = container_of(kobj, typeof(*dev), kobj); |
538 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(nd_dev: dev); |
539 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; |
540 | struct nvdimm *nvdimm = to_nvdimm(dev); |
541 | enum nvdimm_fwa_capability cap; |
542 | |
543 | if (!nd_desc->fw_ops) |
544 | return 0; |
545 | if (!nvdimm->fw_ops) |
546 | return 0; |
547 | |
548 | nvdimm_bus_lock(dev); |
549 | cap = nd_desc->fw_ops->capability(nd_desc); |
550 | nvdimm_bus_unlock(dev); |
551 | |
552 | if (cap < NVDIMM_FWA_CAP_QUIESCE) |
553 | return 0; |
554 | |
555 | return a->mode; |
556 | } |
557 | |
558 | static const struct attribute_group nvdimm_firmware_attribute_group = { |
559 | .name = "firmware" , |
560 | .attrs = nvdimm_firmware_attributes, |
561 | .is_visible = nvdimm_firmware_visible, |
562 | }; |
563 | |
564 | static const struct attribute_group *nvdimm_attribute_groups[] = { |
565 | &nd_device_attribute_group, |
566 | &nvdimm_attribute_group, |
567 | &nvdimm_firmware_attribute_group, |
568 | NULL, |
569 | }; |
570 | |
571 | static const struct device_type nvdimm_device_type = { |
572 | .name = "nvdimm" , |
573 | .release = nvdimm_release, |
574 | .groups = nvdimm_attribute_groups, |
575 | }; |
576 | |
577 | bool is_nvdimm(const struct device *dev) |
578 | { |
579 | return dev->type == &nvdimm_device_type; |
580 | } |
581 | |
582 | static struct lock_class_key nvdimm_key; |
583 | |
584 | struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, |
585 | void *provider_data, const struct attribute_group **groups, |
586 | unsigned long flags, unsigned long cmd_mask, int num_flush, |
587 | struct resource *flush_wpq, const char *dimm_id, |
588 | const struct nvdimm_security_ops *sec_ops, |
589 | const struct nvdimm_fw_ops *fw_ops) |
590 | { |
591 | struct nvdimm *nvdimm = kzalloc(size: sizeof(*nvdimm), GFP_KERNEL); |
592 | struct device *dev; |
593 | |
594 | if (!nvdimm) |
595 | return NULL; |
596 | |
597 | nvdimm->id = ida_alloc(ida: &dimm_ida, GFP_KERNEL); |
598 | if (nvdimm->id < 0) { |
599 | kfree(objp: nvdimm); |
600 | return NULL; |
601 | } |
602 | |
603 | nvdimm->dimm_id = dimm_id; |
604 | nvdimm->provider_data = provider_data; |
605 | nvdimm->flags = flags; |
606 | nvdimm->cmd_mask = cmd_mask; |
607 | nvdimm->num_flush = num_flush; |
608 | nvdimm->flush_wpq = flush_wpq; |
609 | atomic_set(v: &nvdimm->busy, i: 0); |
610 | dev = &nvdimm->dev; |
611 | dev_set_name(dev, name: "nmem%d" , nvdimm->id); |
612 | dev->parent = &nvdimm_bus->dev; |
613 | dev->type = &nvdimm_device_type; |
614 | dev->devt = MKDEV(nvdimm_major, nvdimm->id); |
615 | dev->groups = groups; |
616 | nvdimm->sec.ops = sec_ops; |
617 | nvdimm->fw_ops = fw_ops; |
618 | nvdimm->sec.overwrite_tmo = 0; |
619 | INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query); |
620 | /* |
621 | * Security state must be initialized before device_add() for |
622 | * attribute visibility. |
623 | */ |
624 | /* get security state and extended (master) state */ |
625 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, ptype: NVDIMM_USER); |
626 | nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, ptype: NVDIMM_MASTER); |
627 | device_initialize(dev); |
628 | lockdep_set_class(&dev->mutex, &nvdimm_key); |
629 | if (test_bit(NDD_REGISTER_SYNC, &flags)) |
630 | nd_device_register_sync(dev); |
631 | else |
632 | nd_device_register(dev); |
633 | |
634 | return nvdimm; |
635 | } |
636 | EXPORT_SYMBOL_GPL(__nvdimm_create); |
637 | |
638 | void nvdimm_delete(struct nvdimm *nvdimm) |
639 | { |
640 | struct device *dev = &nvdimm->dev; |
641 | bool dev_put = false; |
642 | |
643 | /* We are shutting down. Make state frozen artificially. */ |
644 | nvdimm_bus_lock(dev); |
645 | set_bit(nr: NVDIMM_SECURITY_FROZEN, addr: &nvdimm->sec.flags); |
646 | if (test_and_clear_bit(nr: NDD_WORK_PENDING, addr: &nvdimm->flags)) |
647 | dev_put = true; |
648 | nvdimm_bus_unlock(dev); |
649 | cancel_delayed_work_sync(dwork: &nvdimm->dwork); |
650 | if (dev_put) |
651 | put_device(dev); |
652 | nd_device_unregister(dev, mode: ND_SYNC); |
653 | } |
654 | EXPORT_SYMBOL_GPL(nvdimm_delete); |
655 | |
656 | static void shutdown_security_notify(void *data) |
657 | { |
658 | struct nvdimm *nvdimm = data; |
659 | |
660 | sysfs_put(kn: nvdimm->sec.overwrite_state); |
661 | } |
662 | |
663 | int nvdimm_security_setup_events(struct device *dev) |
664 | { |
665 | struct nvdimm *nvdimm = to_nvdimm(dev); |
666 | |
667 | if (!nvdimm->sec.flags || !nvdimm->sec.ops |
668 | || !nvdimm->sec.ops->overwrite) |
669 | return 0; |
670 | nvdimm->sec.overwrite_state = sysfs_get_dirent(parent: dev->kobj.sd, name: "security" ); |
671 | if (!nvdimm->sec.overwrite_state) |
672 | return -ENOMEM; |
673 | |
674 | return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm); |
675 | } |
676 | EXPORT_SYMBOL_GPL(nvdimm_security_setup_events); |
677 | |
678 | int nvdimm_in_overwrite(struct nvdimm *nvdimm) |
679 | { |
680 | return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags); |
681 | } |
682 | EXPORT_SYMBOL_GPL(nvdimm_in_overwrite); |
683 | |
684 | int nvdimm_security_freeze(struct nvdimm *nvdimm) |
685 | { |
686 | int rc; |
687 | |
688 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); |
689 | |
690 | if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze) |
691 | return -EOPNOTSUPP; |
692 | |
693 | if (!nvdimm->sec.flags) |
694 | return -EIO; |
695 | |
696 | if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { |
697 | dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n" ); |
698 | return -EBUSY; |
699 | } |
700 | |
701 | rc = nvdimm->sec.ops->freeze(nvdimm); |
702 | nvdimm->sec.flags = nvdimm_security_flags(nvdimm, ptype: NVDIMM_USER); |
703 | |
704 | return rc; |
705 | } |
706 | |
707 | static unsigned long dpa_align(struct nd_region *nd_region) |
708 | { |
709 | struct device *dev = &nd_region->dev; |
710 | |
711 | if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), |
712 | "bus lock required for capacity provision\n" )) |
713 | return 0; |
714 | if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align |
715 | % nd_region->ndr_mappings, |
716 | "invalid region align %#lx mappings: %d\n" , |
717 | nd_region->align, nd_region->ndr_mappings)) |
718 | return 0; |
719 | return nd_region->align / nd_region->ndr_mappings; |
720 | } |
721 | |
722 | /** |
723 | * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max |
724 | * contiguous unallocated dpa range. |
725 | * @nd_region: constrain available space check to this reference region |
726 | * @nd_mapping: container of dpa-resource-root + labels |
727 | * |
728 | * Returns: %0 if there is an alignment error, otherwise the max |
729 | * unallocated dpa range |
730 | */ |
731 | resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, |
732 | struct nd_mapping *nd_mapping) |
733 | { |
734 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
735 | struct nvdimm_bus *nvdimm_bus; |
736 | resource_size_t max = 0; |
737 | struct resource *res; |
738 | unsigned long align; |
739 | |
740 | /* if a dimm is disabled the available capacity is zero */ |
741 | if (!ndd) |
742 | return 0; |
743 | |
744 | align = dpa_align(nd_region); |
745 | if (!align) |
746 | return 0; |
747 | |
748 | nvdimm_bus = walk_to_nvdimm_bus(nd_dev: ndd->dev); |
749 | if (__reserve_free_pmem(dev: &nd_region->dev, data: nd_mapping->nvdimm)) |
750 | return 0; |
751 | for_each_dpa_resource(ndd, res) { |
752 | resource_size_t start, end; |
753 | |
754 | if (strcmp(res->name, "pmem-reserve" ) != 0) |
755 | continue; |
756 | /* trim free space relative to current alignment setting */ |
757 | start = ALIGN(res->start, align); |
758 | end = ALIGN_DOWN(res->end + 1, align) - 1; |
759 | if (end < start) |
760 | continue; |
761 | if (end - start + 1 > max) |
762 | max = end - start + 1; |
763 | } |
764 | release_free_pmem(nvdimm_bus, nd_mapping); |
765 | return max; |
766 | } |
767 | |
768 | /** |
769 | * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa |
770 | * @nd_mapping: container of dpa-resource-root + labels |
771 | * @nd_region: constrain available space check to this reference region |
772 | * |
773 | * Validate that a PMEM label, if present, aligns with the start of an |
774 | * interleave set. |
775 | * |
776 | * Returns: %0 if there is an alignment error, otherwise the unallocated dpa |
777 | */ |
778 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, |
779 | struct nd_mapping *nd_mapping) |
780 | { |
781 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); |
782 | resource_size_t map_start, map_end, busy = 0; |
783 | struct resource *res; |
784 | unsigned long align; |
785 | |
786 | if (!ndd) |
787 | return 0; |
788 | |
789 | align = dpa_align(nd_region); |
790 | if (!align) |
791 | return 0; |
792 | |
793 | map_start = nd_mapping->start; |
794 | map_end = map_start + nd_mapping->size - 1; |
795 | for_each_dpa_resource(ndd, res) { |
796 | resource_size_t start, end; |
797 | |
798 | start = ALIGN_DOWN(res->start, align); |
799 | end = ALIGN(res->end + 1, align) - 1; |
800 | if (start >= map_start && start < map_end) { |
801 | if (end > map_end) { |
802 | nd_dbg_dpa(nd_region, ndd, res, |
803 | "misaligned to iset\n" ); |
804 | return 0; |
805 | } |
806 | busy += end - start + 1; |
807 | } else if (end >= map_start && end <= map_end) { |
808 | busy += end - start + 1; |
809 | } else if (map_start > start && map_start < end) { |
810 | /* total eclipse of the mapping */ |
811 | busy += nd_mapping->size; |
812 | } |
813 | } |
814 | |
815 | if (busy < nd_mapping->size) |
816 | return ALIGN_DOWN(nd_mapping->size - busy, align); |
817 | return 0; |
818 | } |
819 | |
820 | void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) |
821 | { |
822 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); |
823 | kfree(objp: res->name); |
824 | __release_region(&ndd->dpa, res->start, resource_size(res)); |
825 | } |
826 | |
827 | struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, |
828 | struct nd_label_id *label_id, resource_size_t start, |
829 | resource_size_t n) |
830 | { |
831 | char *name = kmemdup(p: label_id, size: sizeof(*label_id), GFP_KERNEL); |
832 | struct resource *res; |
833 | |
834 | if (!name) |
835 | return NULL; |
836 | |
837 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); |
838 | res = __request_region(&ndd->dpa, start, n, name, flags: 0); |
839 | if (!res) |
840 | kfree(objp: name); |
841 | return res; |
842 | } |
843 | |
844 | /** |
845 | * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id |
846 | * @ndd: container of dpa-resource-root + labels |
847 | * @label_id: dpa resource name of the form pmem-<human readable uuid> |
848 | * |
849 | * Returns: sum of the dpa allocated to the label_id |
850 | */ |
851 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, |
852 | struct nd_label_id *label_id) |
853 | { |
854 | resource_size_t allocated = 0; |
855 | struct resource *res; |
856 | |
857 | for_each_dpa_resource(ndd, res) |
858 | if (strcmp(res->name, label_id->id) == 0) |
859 | allocated += resource_size(res); |
860 | |
861 | return allocated; |
862 | } |
863 | |
864 | static int count_dimms(struct device *dev, void *c) |
865 | { |
866 | int *count = c; |
867 | |
868 | if (is_nvdimm(dev)) |
869 | (*count)++; |
870 | return 0; |
871 | } |
872 | |
873 | int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count) |
874 | { |
875 | int count = 0; |
876 | /* Flush any possible dimm registration failures */ |
877 | nd_synchronize(); |
878 | |
879 | device_for_each_child(dev: &nvdimm_bus->dev, data: &count, fn: count_dimms); |
880 | dev_dbg(&nvdimm_bus->dev, "count: %d\n" , count); |
881 | if (count != dimm_count) |
882 | return -ENXIO; |
883 | return 0; |
884 | } |
885 | EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count); |
886 | |
887 | void __exit nvdimm_devs_exit(void) |
888 | { |
889 | ida_destroy(ida: &dimm_ida); |
890 | } |
891 | |