1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Core registration and callback routines for MTD |
4 | * drivers and users. |
5 | * |
6 | * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> |
7 | * Copyright © 2006 Red Hat UK Limited |
8 | */ |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/kernel.h> |
12 | #include <linux/ptrace.h> |
13 | #include <linux/seq_file.h> |
14 | #include <linux/string.h> |
15 | #include <linux/timer.h> |
16 | #include <linux/major.h> |
17 | #include <linux/fs.h> |
18 | #include <linux/err.h> |
19 | #include <linux/ioctl.h> |
20 | #include <linux/init.h> |
21 | #include <linux/of.h> |
22 | #include <linux/proc_fs.h> |
23 | #include <linux/idr.h> |
24 | #include <linux/backing-dev.h> |
25 | #include <linux/gfp.h> |
26 | #include <linux/random.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/reboot.h> |
29 | #include <linux/leds.h> |
30 | #include <linux/debugfs.h> |
31 | #include <linux/nvmem-provider.h> |
32 | #include <linux/root_dev.h> |
33 | #include <linux/error-injection.h> |
34 | |
35 | #include <linux/mtd/mtd.h> |
36 | #include <linux/mtd/partitions.h> |
37 | |
38 | #include "mtdcore.h" |
39 | |
40 | struct backing_dev_info *mtd_bdi; |
41 | |
42 | #ifdef CONFIG_PM_SLEEP |
43 | |
44 | static int mtd_cls_suspend(struct device *dev) |
45 | { |
46 | struct mtd_info *mtd = dev_get_drvdata(dev); |
47 | |
48 | return mtd ? mtd_suspend(mtd) : 0; |
49 | } |
50 | |
51 | static int mtd_cls_resume(struct device *dev) |
52 | { |
53 | struct mtd_info *mtd = dev_get_drvdata(dev); |
54 | |
55 | if (mtd) |
56 | mtd_resume(mtd); |
57 | return 0; |
58 | } |
59 | |
60 | static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume); |
61 | #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops) |
62 | #else |
63 | #define MTD_CLS_PM_OPS NULL |
64 | #endif |
65 | |
66 | static struct class mtd_class = { |
67 | .name = "mtd" , |
68 | .pm = MTD_CLS_PM_OPS, |
69 | }; |
70 | |
71 | static DEFINE_IDR(mtd_idr); |
72 | |
73 | /* These are exported solely for the purpose of mtd_blkdevs.c. You |
74 | should not use them for _anything_ else */ |
75 | DEFINE_MUTEX(mtd_table_mutex); |
76 | EXPORT_SYMBOL_GPL(mtd_table_mutex); |
77 | |
78 | struct mtd_info *__mtd_next_device(int i) |
79 | { |
80 | return idr_get_next(&mtd_idr, nextid: &i); |
81 | } |
82 | EXPORT_SYMBOL_GPL(__mtd_next_device); |
83 | |
84 | static LIST_HEAD(mtd_notifiers); |
85 | |
86 | |
87 | #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) |
88 | |
89 | /* REVISIT once MTD uses the driver model better, whoever allocates |
90 | * the mtd_info will probably want to use the release() hook... |
91 | */ |
92 | static void mtd_release(struct device *dev) |
93 | { |
94 | struct mtd_info *mtd = dev_get_drvdata(dev); |
95 | dev_t index = MTD_DEVT(mtd->index); |
96 | |
97 | idr_remove(&mtd_idr, id: mtd->index); |
98 | of_node_put(node: mtd_get_of_node(mtd)); |
99 | |
100 | if (mtd_is_partition(mtd)) |
101 | release_mtd_partition(mtd); |
102 | |
103 | /* remove /dev/mtdXro node */ |
104 | device_destroy(cls: &mtd_class, devt: index + 1); |
105 | } |
106 | |
107 | static void mtd_device_release(struct kref *kref) |
108 | { |
109 | struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt); |
110 | bool is_partition = mtd_is_partition(mtd); |
111 | |
112 | debugfs_remove_recursive(dentry: mtd->dbg.dfs_dir); |
113 | |
114 | /* Try to remove the NVMEM provider */ |
115 | nvmem_unregister(nvmem: mtd->nvmem); |
116 | |
117 | device_unregister(dev: &mtd->dev); |
118 | |
119 | /* |
120 | * Clear dev so mtd can be safely re-registered later if desired. |
121 | * Should not be done for partition, |
122 | * as it was already destroyed in device_unregister(). |
123 | */ |
124 | if (!is_partition) |
125 | memset(&mtd->dev, 0, sizeof(mtd->dev)); |
126 | |
127 | module_put(THIS_MODULE); |
128 | } |
129 | |
130 | #define MTD_DEVICE_ATTR_RO(name) \ |
131 | static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL) |
132 | |
133 | #define MTD_DEVICE_ATTR_RW(name) \ |
134 | static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store) |
135 | |
136 | static ssize_t mtd_type_show(struct device *dev, |
137 | struct device_attribute *attr, char *buf) |
138 | { |
139 | struct mtd_info *mtd = dev_get_drvdata(dev); |
140 | char *type; |
141 | |
142 | switch (mtd->type) { |
143 | case MTD_ABSENT: |
144 | type = "absent" ; |
145 | break; |
146 | case MTD_RAM: |
147 | type = "ram" ; |
148 | break; |
149 | case MTD_ROM: |
150 | type = "rom" ; |
151 | break; |
152 | case MTD_NORFLASH: |
153 | type = "nor" ; |
154 | break; |
155 | case MTD_NANDFLASH: |
156 | type = "nand" ; |
157 | break; |
158 | case MTD_DATAFLASH: |
159 | type = "dataflash" ; |
160 | break; |
161 | case MTD_UBIVOLUME: |
162 | type = "ubi" ; |
163 | break; |
164 | case MTD_MLCNANDFLASH: |
165 | type = "mlc-nand" ; |
166 | break; |
167 | default: |
168 | type = "unknown" ; |
169 | } |
170 | |
171 | return sysfs_emit(buf, fmt: "%s\n" , type); |
172 | } |
173 | MTD_DEVICE_ATTR_RO(type); |
174 | |
175 | static ssize_t mtd_flags_show(struct device *dev, |
176 | struct device_attribute *attr, char *buf) |
177 | { |
178 | struct mtd_info *mtd = dev_get_drvdata(dev); |
179 | |
180 | return sysfs_emit(buf, fmt: "0x%lx\n" , (unsigned long)mtd->flags); |
181 | } |
182 | MTD_DEVICE_ATTR_RO(flags); |
183 | |
184 | static ssize_t mtd_size_show(struct device *dev, |
185 | struct device_attribute *attr, char *buf) |
186 | { |
187 | struct mtd_info *mtd = dev_get_drvdata(dev); |
188 | |
189 | return sysfs_emit(buf, fmt: "%llu\n" , (unsigned long long)mtd->size); |
190 | } |
191 | MTD_DEVICE_ATTR_RO(size); |
192 | |
193 | static ssize_t mtd_erasesize_show(struct device *dev, |
194 | struct device_attribute *attr, char *buf) |
195 | { |
196 | struct mtd_info *mtd = dev_get_drvdata(dev); |
197 | |
198 | return sysfs_emit(buf, fmt: "%lu\n" , (unsigned long)mtd->erasesize); |
199 | } |
200 | MTD_DEVICE_ATTR_RO(erasesize); |
201 | |
202 | static ssize_t mtd_writesize_show(struct device *dev, |
203 | struct device_attribute *attr, char *buf) |
204 | { |
205 | struct mtd_info *mtd = dev_get_drvdata(dev); |
206 | |
207 | return sysfs_emit(buf, fmt: "%lu\n" , (unsigned long)mtd->writesize); |
208 | } |
209 | MTD_DEVICE_ATTR_RO(writesize); |
210 | |
211 | static ssize_t mtd_subpagesize_show(struct device *dev, |
212 | struct device_attribute *attr, char *buf) |
213 | { |
214 | struct mtd_info *mtd = dev_get_drvdata(dev); |
215 | unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; |
216 | |
217 | return sysfs_emit(buf, fmt: "%u\n" , subpagesize); |
218 | } |
219 | MTD_DEVICE_ATTR_RO(subpagesize); |
220 | |
221 | static ssize_t mtd_oobsize_show(struct device *dev, |
222 | struct device_attribute *attr, char *buf) |
223 | { |
224 | struct mtd_info *mtd = dev_get_drvdata(dev); |
225 | |
226 | return sysfs_emit(buf, fmt: "%lu\n" , (unsigned long)mtd->oobsize); |
227 | } |
228 | MTD_DEVICE_ATTR_RO(oobsize); |
229 | |
230 | static ssize_t mtd_oobavail_show(struct device *dev, |
231 | struct device_attribute *attr, char *buf) |
232 | { |
233 | struct mtd_info *mtd = dev_get_drvdata(dev); |
234 | |
235 | return sysfs_emit(buf, fmt: "%u\n" , mtd->oobavail); |
236 | } |
237 | MTD_DEVICE_ATTR_RO(oobavail); |
238 | |
239 | static ssize_t mtd_numeraseregions_show(struct device *dev, |
240 | struct device_attribute *attr, char *buf) |
241 | { |
242 | struct mtd_info *mtd = dev_get_drvdata(dev); |
243 | |
244 | return sysfs_emit(buf, fmt: "%u\n" , mtd->numeraseregions); |
245 | } |
246 | MTD_DEVICE_ATTR_RO(numeraseregions); |
247 | |
248 | static ssize_t mtd_name_show(struct device *dev, |
249 | struct device_attribute *attr, char *buf) |
250 | { |
251 | struct mtd_info *mtd = dev_get_drvdata(dev); |
252 | |
253 | return sysfs_emit(buf, fmt: "%s\n" , mtd->name); |
254 | } |
255 | MTD_DEVICE_ATTR_RO(name); |
256 | |
257 | static ssize_t mtd_ecc_strength_show(struct device *dev, |
258 | struct device_attribute *attr, char *buf) |
259 | { |
260 | struct mtd_info *mtd = dev_get_drvdata(dev); |
261 | |
262 | return sysfs_emit(buf, fmt: "%u\n" , mtd->ecc_strength); |
263 | } |
264 | MTD_DEVICE_ATTR_RO(ecc_strength); |
265 | |
266 | static ssize_t mtd_bitflip_threshold_show(struct device *dev, |
267 | struct device_attribute *attr, |
268 | char *buf) |
269 | { |
270 | struct mtd_info *mtd = dev_get_drvdata(dev); |
271 | |
272 | return sysfs_emit(buf, fmt: "%u\n" , mtd->bitflip_threshold); |
273 | } |
274 | |
275 | static ssize_t mtd_bitflip_threshold_store(struct device *dev, |
276 | struct device_attribute *attr, |
277 | const char *buf, size_t count) |
278 | { |
279 | struct mtd_info *mtd = dev_get_drvdata(dev); |
280 | unsigned int bitflip_threshold; |
281 | int retval; |
282 | |
283 | retval = kstrtouint(s: buf, base: 0, res: &bitflip_threshold); |
284 | if (retval) |
285 | return retval; |
286 | |
287 | mtd->bitflip_threshold = bitflip_threshold; |
288 | return count; |
289 | } |
290 | MTD_DEVICE_ATTR_RW(bitflip_threshold); |
291 | |
292 | static ssize_t mtd_ecc_step_size_show(struct device *dev, |
293 | struct device_attribute *attr, char *buf) |
294 | { |
295 | struct mtd_info *mtd = dev_get_drvdata(dev); |
296 | |
297 | return sysfs_emit(buf, fmt: "%u\n" , mtd->ecc_step_size); |
298 | |
299 | } |
300 | MTD_DEVICE_ATTR_RO(ecc_step_size); |
301 | |
302 | static ssize_t mtd_corrected_bits_show(struct device *dev, |
303 | struct device_attribute *attr, char *buf) |
304 | { |
305 | struct mtd_info *mtd = dev_get_drvdata(dev); |
306 | struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; |
307 | |
308 | return sysfs_emit(buf, fmt: "%u\n" , ecc_stats->corrected); |
309 | } |
310 | MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */ |
311 | |
312 | static ssize_t mtd_ecc_failures_show(struct device *dev, |
313 | struct device_attribute *attr, char *buf) |
314 | { |
315 | struct mtd_info *mtd = dev_get_drvdata(dev); |
316 | struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; |
317 | |
318 | return sysfs_emit(buf, fmt: "%u\n" , ecc_stats->failed); |
319 | } |
320 | MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */ |
321 | |
322 | static ssize_t mtd_bad_blocks_show(struct device *dev, |
323 | struct device_attribute *attr, char *buf) |
324 | { |
325 | struct mtd_info *mtd = dev_get_drvdata(dev); |
326 | struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; |
327 | |
328 | return sysfs_emit(buf, fmt: "%u\n" , ecc_stats->badblocks); |
329 | } |
330 | MTD_DEVICE_ATTR_RO(bad_blocks); |
331 | |
332 | static ssize_t mtd_bbt_blocks_show(struct device *dev, |
333 | struct device_attribute *attr, char *buf) |
334 | { |
335 | struct mtd_info *mtd = dev_get_drvdata(dev); |
336 | struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; |
337 | |
338 | return sysfs_emit(buf, fmt: "%u\n" , ecc_stats->bbtblocks); |
339 | } |
340 | MTD_DEVICE_ATTR_RO(bbt_blocks); |
341 | |
342 | static struct attribute *mtd_attrs[] = { |
343 | &dev_attr_type.attr, |
344 | &dev_attr_flags.attr, |
345 | &dev_attr_size.attr, |
346 | &dev_attr_erasesize.attr, |
347 | &dev_attr_writesize.attr, |
348 | &dev_attr_subpagesize.attr, |
349 | &dev_attr_oobsize.attr, |
350 | &dev_attr_oobavail.attr, |
351 | &dev_attr_numeraseregions.attr, |
352 | &dev_attr_name.attr, |
353 | &dev_attr_ecc_strength.attr, |
354 | &dev_attr_ecc_step_size.attr, |
355 | &dev_attr_corrected_bits.attr, |
356 | &dev_attr_ecc_failures.attr, |
357 | &dev_attr_bad_blocks.attr, |
358 | &dev_attr_bbt_blocks.attr, |
359 | &dev_attr_bitflip_threshold.attr, |
360 | NULL, |
361 | }; |
362 | ATTRIBUTE_GROUPS(mtd); |
363 | |
364 | static const struct device_type mtd_devtype = { |
365 | .name = "mtd" , |
366 | .groups = mtd_groups, |
367 | .release = mtd_release, |
368 | }; |
369 | |
370 | static bool mtd_expert_analysis_mode; |
371 | |
372 | #ifdef CONFIG_DEBUG_FS |
373 | bool mtd_check_expert_analysis_mode(void) |
374 | { |
375 | const char *mtd_expert_analysis_warning = |
376 | "Bad block checks have been entirely disabled.\n" |
377 | "This is only reserved for post-mortem forensics and debug purposes.\n" |
378 | "Never enable this mode if you do not know what you are doing!\n" ; |
379 | |
380 | return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning); |
381 | } |
382 | EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode); |
383 | #endif |
384 | |
385 | static struct dentry *dfs_dir_mtd; |
386 | |
387 | static void mtd_debugfs_populate(struct mtd_info *mtd) |
388 | { |
389 | struct device *dev = &mtd->dev; |
390 | |
391 | if (IS_ERR_OR_NULL(ptr: dfs_dir_mtd)) |
392 | return; |
393 | |
394 | mtd->dbg.dfs_dir = debugfs_create_dir(name: dev_name(dev), parent: dfs_dir_mtd); |
395 | } |
396 | |
397 | #ifndef CONFIG_MMU |
398 | unsigned mtd_mmap_capabilities(struct mtd_info *mtd) |
399 | { |
400 | switch (mtd->type) { |
401 | case MTD_RAM: |
402 | return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | |
403 | NOMMU_MAP_READ | NOMMU_MAP_WRITE; |
404 | case MTD_ROM: |
405 | return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | |
406 | NOMMU_MAP_READ; |
407 | default: |
408 | return NOMMU_MAP_COPY; |
409 | } |
410 | } |
411 | EXPORT_SYMBOL_GPL(mtd_mmap_capabilities); |
412 | #endif |
413 | |
414 | static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, |
415 | void *cmd) |
416 | { |
417 | struct mtd_info *mtd; |
418 | |
419 | mtd = container_of(n, struct mtd_info, reboot_notifier); |
420 | mtd->_reboot(mtd); |
421 | |
422 | return NOTIFY_DONE; |
423 | } |
424 | |
425 | /** |
426 | * mtd_wunit_to_pairing_info - get pairing information of a wunit |
427 | * @mtd: pointer to new MTD device info structure |
428 | * @wunit: write unit we are interested in |
429 | * @info: returned pairing information |
430 | * |
431 | * Retrieve pairing information associated to the wunit. |
432 | * This is mainly useful when dealing with MLC/TLC NANDs where pages can be |
433 | * paired together, and where programming a page may influence the page it is |
434 | * paired with. |
435 | * The notion of page is replaced by the term wunit (write-unit) to stay |
436 | * consistent with the ->writesize field. |
437 | * |
438 | * The @wunit argument can be extracted from an absolute offset using |
439 | * mtd_offset_to_wunit(). @info is filled with the pairing information attached |
440 | * to @wunit. |
441 | * |
442 | * From the pairing info the MTD user can find all the wunits paired with |
443 | * @wunit using the following loop: |
444 | * |
445 | * for (i = 0; i < mtd_pairing_groups(mtd); i++) { |
446 | * info.pair = i; |
447 | * mtd_pairing_info_to_wunit(mtd, &info); |
448 | * ... |
449 | * } |
450 | */ |
451 | int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit, |
452 | struct mtd_pairing_info *info) |
453 | { |
454 | struct mtd_info *master = mtd_get_master(mtd); |
455 | int npairs = mtd_wunit_per_eb(mtd: master) / mtd_pairing_groups(mtd: master); |
456 | |
457 | if (wunit < 0 || wunit >= npairs) |
458 | return -EINVAL; |
459 | |
460 | if (master->pairing && master->pairing->get_info) |
461 | return master->pairing->get_info(master, wunit, info); |
462 | |
463 | info->group = 0; |
464 | info->pair = wunit; |
465 | |
466 | return 0; |
467 | } |
468 | EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info); |
469 | |
470 | /** |
471 | * mtd_pairing_info_to_wunit - get wunit from pairing information |
472 | * @mtd: pointer to new MTD device info structure |
473 | * @info: pairing information struct |
474 | * |
475 | * Returns a positive number representing the wunit associated to the info |
476 | * struct, or a negative error code. |
477 | * |
478 | * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to |
479 | * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info() |
480 | * doc). |
481 | * |
482 | * It can also be used to only program the first page of each pair (i.e. |
483 | * page attached to group 0), which allows one to use an MLC NAND in |
484 | * software-emulated SLC mode: |
485 | * |
486 | * info.group = 0; |
487 | * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd); |
488 | * for (info.pair = 0; info.pair < npairs; info.pair++) { |
489 | * wunit = mtd_pairing_info_to_wunit(mtd, &info); |
490 | * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit), |
491 | * mtd->writesize, &retlen, buf + (i * mtd->writesize)); |
492 | * } |
493 | */ |
494 | int mtd_pairing_info_to_wunit(struct mtd_info *mtd, |
495 | const struct mtd_pairing_info *info) |
496 | { |
497 | struct mtd_info *master = mtd_get_master(mtd); |
498 | int ngroups = mtd_pairing_groups(mtd: master); |
499 | int npairs = mtd_wunit_per_eb(mtd: master) / ngroups; |
500 | |
501 | if (!info || info->pair < 0 || info->pair >= npairs || |
502 | info->group < 0 || info->group >= ngroups) |
503 | return -EINVAL; |
504 | |
505 | if (master->pairing && master->pairing->get_wunit) |
506 | return mtd->pairing->get_wunit(master, info); |
507 | |
508 | return info->pair; |
509 | } |
510 | EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit); |
511 | |
512 | /** |
513 | * mtd_pairing_groups - get the number of pairing groups |
514 | * @mtd: pointer to new MTD device info structure |
515 | * |
516 | * Returns the number of pairing groups. |
517 | * |
518 | * This number is usually equal to the number of bits exposed by a single |
519 | * cell, and can be used in conjunction with mtd_pairing_info_to_wunit() |
520 | * to iterate over all pages of a given pair. |
521 | */ |
522 | int mtd_pairing_groups(struct mtd_info *mtd) |
523 | { |
524 | struct mtd_info *master = mtd_get_master(mtd); |
525 | |
526 | if (!master->pairing || !master->pairing->ngroups) |
527 | return 1; |
528 | |
529 | return master->pairing->ngroups; |
530 | } |
531 | EXPORT_SYMBOL_GPL(mtd_pairing_groups); |
532 | |
533 | static int mtd_nvmem_reg_read(void *priv, unsigned int offset, |
534 | void *val, size_t bytes) |
535 | { |
536 | struct mtd_info *mtd = priv; |
537 | size_t retlen; |
538 | int err; |
539 | |
540 | err = mtd_read(mtd, from: offset, len: bytes, retlen: &retlen, buf: val); |
541 | if (err && err != -EUCLEAN) |
542 | return err; |
543 | |
544 | return retlen == bytes ? 0 : -EIO; |
545 | } |
546 | |
547 | static int mtd_nvmem_add(struct mtd_info *mtd) |
548 | { |
549 | struct device_node *node = mtd_get_of_node(mtd); |
550 | struct nvmem_config config = {}; |
551 | |
552 | config.id = NVMEM_DEVID_NONE; |
553 | config.dev = &mtd->dev; |
554 | config.name = dev_name(dev: &mtd->dev); |
555 | config.owner = THIS_MODULE; |
556 | config.add_legacy_fixed_of_cells = of_device_is_compatible(device: node, "nvmem-cells" ); |
557 | config.reg_read = mtd_nvmem_reg_read; |
558 | config.size = mtd->size; |
559 | config.word_size = 1; |
560 | config.stride = 1; |
561 | config.read_only = true; |
562 | config.root_only = true; |
563 | config.ignore_wp = true; |
564 | config.priv = mtd; |
565 | |
566 | mtd->nvmem = nvmem_register(cfg: &config); |
567 | if (IS_ERR(ptr: mtd->nvmem)) { |
568 | /* Just ignore if there is no NVMEM support in the kernel */ |
569 | if (PTR_ERR(ptr: mtd->nvmem) == -EOPNOTSUPP) |
570 | mtd->nvmem = NULL; |
571 | else |
572 | return dev_err_probe(dev: &mtd->dev, err: PTR_ERR(ptr: mtd->nvmem), |
573 | fmt: "Failed to register NVMEM device\n" ); |
574 | } |
575 | |
576 | return 0; |
577 | } |
578 | |
579 | static void mtd_check_of_node(struct mtd_info *mtd) |
580 | { |
581 | struct device_node *partitions, *parent_dn, *mtd_dn = NULL; |
582 | const char *pname, *prefix = "partition-" ; |
583 | int plen, mtd_name_len, offset, prefix_len; |
584 | |
585 | /* Check if MTD already has a device node */ |
586 | if (mtd_get_of_node(mtd)) |
587 | return; |
588 | |
589 | if (!mtd_is_partition(mtd)) |
590 | return; |
591 | |
592 | parent_dn = of_node_get(node: mtd_get_of_node(mtd: mtd->parent)); |
593 | if (!parent_dn) |
594 | return; |
595 | |
596 | if (mtd_is_partition(mtd: mtd->parent)) |
597 | partitions = of_node_get(node: parent_dn); |
598 | else |
599 | partitions = of_get_child_by_name(node: parent_dn, name: "partitions" ); |
600 | if (!partitions) |
601 | goto exit_parent; |
602 | |
603 | prefix_len = strlen(prefix); |
604 | mtd_name_len = strlen(mtd->name); |
605 | |
606 | /* Search if a partition is defined with the same name */ |
607 | for_each_child_of_node(partitions, mtd_dn) { |
608 | /* Skip partition with no/wrong prefix */ |
609 | if (!of_node_name_prefix(np: mtd_dn, prefix)) |
610 | continue; |
611 | |
612 | /* Label have priority. Check that first */ |
613 | if (!of_property_read_string(np: mtd_dn, propname: "label" , out_string: &pname)) { |
614 | offset = 0; |
615 | } else { |
616 | pname = mtd_dn->name; |
617 | offset = prefix_len; |
618 | } |
619 | |
620 | plen = strlen(pname) - offset; |
621 | if (plen == mtd_name_len && |
622 | !strncmp(mtd->name, pname + offset, plen)) { |
623 | mtd_set_of_node(mtd, np: mtd_dn); |
624 | of_node_put(node: mtd_dn); |
625 | break; |
626 | } |
627 | } |
628 | |
629 | of_node_put(node: partitions); |
630 | exit_parent: |
631 | of_node_put(node: parent_dn); |
632 | } |
633 | |
634 | /** |
635 | * add_mtd_device - register an MTD device |
636 | * @mtd: pointer to new MTD device info structure |
637 | * |
638 | * Add a device to the list of MTD devices present in the system, and |
639 | * notify each currently active MTD 'user' of its arrival. Returns |
640 | * zero on success or non-zero on failure. |
641 | */ |
642 | |
643 | int add_mtd_device(struct mtd_info *mtd) |
644 | { |
645 | struct device_node *np = mtd_get_of_node(mtd); |
646 | struct mtd_info *master = mtd_get_master(mtd); |
647 | struct mtd_notifier *not; |
648 | int i, error, ofidx; |
649 | |
650 | /* |
651 | * May occur, for instance, on buggy drivers which call |
652 | * mtd_device_parse_register() multiple times on the same master MTD, |
653 | * especially with CONFIG_MTD_PARTITIONED_MASTER=y. |
654 | */ |
655 | if (WARN_ONCE(mtd->dev.type, "MTD already registered\n" )) |
656 | return -EEXIST; |
657 | |
658 | BUG_ON(mtd->writesize == 0); |
659 | |
660 | /* |
661 | * MTD drivers should implement ->_{write,read}() or |
662 | * ->_{write,read}_oob(), but not both. |
663 | */ |
664 | if (WARN_ON((mtd->_write && mtd->_write_oob) || |
665 | (mtd->_read && mtd->_read_oob))) |
666 | return -EINVAL; |
667 | |
668 | if (WARN_ON((!mtd->erasesize || !master->_erase) && |
669 | !(mtd->flags & MTD_NO_ERASE))) |
670 | return -EINVAL; |
671 | |
672 | /* |
673 | * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the |
674 | * master is an MLC NAND and has a proper pairing scheme defined. |
675 | * We also reject masters that implement ->_writev() for now, because |
676 | * NAND controller drivers don't implement this hook, and adding the |
677 | * SLC -> MLC address/length conversion to this path is useless if we |
678 | * don't have a user. |
679 | */ |
680 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION && |
681 | (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH || |
682 | !master->pairing || master->_writev)) |
683 | return -EINVAL; |
684 | |
685 | mutex_lock(&mtd_table_mutex); |
686 | |
687 | ofidx = -1; |
688 | if (np) |
689 | ofidx = of_alias_get_id(np, stem: "mtd" ); |
690 | if (ofidx >= 0) |
691 | i = idr_alloc(&mtd_idr, ptr: mtd, start: ofidx, end: ofidx + 1, GFP_KERNEL); |
692 | else |
693 | i = idr_alloc(&mtd_idr, ptr: mtd, start: 0, end: 0, GFP_KERNEL); |
694 | if (i < 0) { |
695 | error = i; |
696 | goto fail_locked; |
697 | } |
698 | |
699 | mtd->index = i; |
700 | kref_init(kref: &mtd->refcnt); |
701 | |
702 | /* default value if not set by driver */ |
703 | if (mtd->bitflip_threshold == 0) |
704 | mtd->bitflip_threshold = mtd->ecc_strength; |
705 | |
706 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
707 | int ngroups = mtd_pairing_groups(master); |
708 | |
709 | mtd->erasesize /= ngroups; |
710 | mtd->size = (u64)mtd_div_by_eb(sz: mtd->size, mtd: master) * |
711 | mtd->erasesize; |
712 | } |
713 | |
714 | if (is_power_of_2(n: mtd->erasesize)) |
715 | mtd->erasesize_shift = ffs(mtd->erasesize) - 1; |
716 | else |
717 | mtd->erasesize_shift = 0; |
718 | |
719 | if (is_power_of_2(n: mtd->writesize)) |
720 | mtd->writesize_shift = ffs(mtd->writesize) - 1; |
721 | else |
722 | mtd->writesize_shift = 0; |
723 | |
724 | mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; |
725 | mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; |
726 | |
727 | /* Some chips always power up locked. Unlock them now */ |
728 | if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) { |
729 | error = mtd_unlock(mtd, ofs: 0, len: mtd->size); |
730 | if (error && error != -EOPNOTSUPP) |
731 | printk(KERN_WARNING |
732 | "%s: unlock failed, writes may not work\n" , |
733 | mtd->name); |
734 | /* Ignore unlock failures? */ |
735 | error = 0; |
736 | } |
737 | |
738 | /* Caller should have set dev.parent to match the |
739 | * physical device, if appropriate. |
740 | */ |
741 | mtd->dev.type = &mtd_devtype; |
742 | mtd->dev.class = &mtd_class; |
743 | mtd->dev.devt = MTD_DEVT(i); |
744 | dev_set_name(dev: &mtd->dev, name: "mtd%d" , i); |
745 | dev_set_drvdata(dev: &mtd->dev, data: mtd); |
746 | mtd_check_of_node(mtd); |
747 | of_node_get(node: mtd_get_of_node(mtd)); |
748 | error = device_register(dev: &mtd->dev); |
749 | if (error) { |
750 | put_device(dev: &mtd->dev); |
751 | goto fail_added; |
752 | } |
753 | |
754 | /* Add the nvmem provider */ |
755 | error = mtd_nvmem_add(mtd); |
756 | if (error) |
757 | goto fail_nvmem_add; |
758 | |
759 | mtd_debugfs_populate(mtd); |
760 | |
761 | device_create(cls: &mtd_class, parent: mtd->dev.parent, MTD_DEVT(i) + 1, NULL, |
762 | fmt: "mtd%dro" , i); |
763 | |
764 | pr_debug("mtd: Giving out device %d to %s\n" , i, mtd->name); |
765 | /* No need to get a refcount on the module containing |
766 | the notifier, since we hold the mtd_table_mutex */ |
767 | list_for_each_entry(not, &mtd_notifiers, list) |
768 | not->add(mtd); |
769 | |
770 | mutex_unlock(lock: &mtd_table_mutex); |
771 | |
772 | if (of_property_read_bool(np: mtd_get_of_node(mtd), propname: "linux,rootfs" )) { |
773 | if (IS_BUILTIN(CONFIG_MTD)) { |
774 | pr_info("mtd: setting mtd%d (%s) as root device\n" , mtd->index, mtd->name); |
775 | ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index); |
776 | } else { |
777 | pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n" , |
778 | mtd->index, mtd->name); |
779 | } |
780 | } |
781 | |
782 | /* We _know_ we aren't being removed, because |
783 | our caller is still holding us here. So none |
784 | of this try_ nonsense, and no bitching about it |
785 | either. :) */ |
786 | __module_get(THIS_MODULE); |
787 | return 0; |
788 | |
789 | fail_nvmem_add: |
790 | device_unregister(dev: &mtd->dev); |
791 | fail_added: |
792 | of_node_put(node: mtd_get_of_node(mtd)); |
793 | idr_remove(&mtd_idr, id: i); |
794 | fail_locked: |
795 | mutex_unlock(lock: &mtd_table_mutex); |
796 | return error; |
797 | } |
798 | |
799 | /** |
800 | * del_mtd_device - unregister an MTD device |
801 | * @mtd: pointer to MTD device info structure |
802 | * |
803 | * Remove a device from the list of MTD devices present in the system, |
804 | * and notify each currently active MTD 'user' of its departure. |
805 | * Returns zero on success or 1 on failure, which currently will happen |
806 | * if the requested device does not appear to be present in the list. |
807 | */ |
808 | |
809 | int del_mtd_device(struct mtd_info *mtd) |
810 | { |
811 | int ret; |
812 | struct mtd_notifier *not; |
813 | |
814 | mutex_lock(&mtd_table_mutex); |
815 | |
816 | if (idr_find(&mtd_idr, id: mtd->index) != mtd) { |
817 | ret = -ENODEV; |
818 | goto out_error; |
819 | } |
820 | |
821 | /* No need to get a refcount on the module containing |
822 | the notifier, since we hold the mtd_table_mutex */ |
823 | list_for_each_entry(not, &mtd_notifiers, list) |
824 | not->remove(mtd); |
825 | |
826 | kref_put(kref: &mtd->refcnt, release: mtd_device_release); |
827 | ret = 0; |
828 | |
829 | out_error: |
830 | mutex_unlock(lock: &mtd_table_mutex); |
831 | return ret; |
832 | } |
833 | |
834 | /* |
835 | * Set a few defaults based on the parent devices, if not provided by the |
836 | * driver |
837 | */ |
838 | static void mtd_set_dev_defaults(struct mtd_info *mtd) |
839 | { |
840 | if (mtd->dev.parent) { |
841 | if (!mtd->owner && mtd->dev.parent->driver) |
842 | mtd->owner = mtd->dev.parent->driver->owner; |
843 | if (!mtd->name) |
844 | mtd->name = dev_name(dev: mtd->dev.parent); |
845 | } else { |
846 | pr_debug("mtd device won't show a device symlink in sysfs\n" ); |
847 | } |
848 | |
849 | INIT_LIST_HEAD(list: &mtd->partitions); |
850 | mutex_init(&mtd->master.partitions_lock); |
851 | mutex_init(&mtd->master.chrdev_lock); |
852 | } |
853 | |
854 | static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user) |
855 | { |
856 | struct otp_info *info; |
857 | ssize_t size = 0; |
858 | unsigned int i; |
859 | size_t retlen; |
860 | int ret; |
861 | |
862 | info = kmalloc(PAGE_SIZE, GFP_KERNEL); |
863 | if (!info) |
864 | return -ENOMEM; |
865 | |
866 | if (is_user) |
867 | ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, retlen: &retlen, buf: info); |
868 | else |
869 | ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, retlen: &retlen, buf: info); |
870 | if (ret) |
871 | goto err; |
872 | |
873 | for (i = 0; i < retlen / sizeof(*info); i++) |
874 | size += info[i].length; |
875 | |
876 | kfree(objp: info); |
877 | return size; |
878 | |
879 | err: |
880 | kfree(objp: info); |
881 | |
882 | /* ENODATA means there is no OTP region. */ |
883 | return ret == -ENODATA ? 0 : ret; |
884 | } |
885 | |
886 | static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, |
887 | const char *compatible, |
888 | int size, |
889 | nvmem_reg_read_t reg_read) |
890 | { |
891 | struct nvmem_device *nvmem = NULL; |
892 | struct nvmem_config config = {}; |
893 | struct device_node *np; |
894 | |
895 | /* DT binding is optional */ |
896 | np = of_get_compatible_child(parent: mtd->dev.of_node, compatible); |
897 | |
898 | /* OTP nvmem will be registered on the physical device */ |
899 | config.dev = mtd->dev.parent; |
900 | config.name = compatible; |
901 | config.id = NVMEM_DEVID_AUTO; |
902 | config.owner = THIS_MODULE; |
903 | config.add_legacy_fixed_of_cells = true; |
904 | config.type = NVMEM_TYPE_OTP; |
905 | config.root_only = true; |
906 | config.ignore_wp = true; |
907 | config.reg_read = reg_read; |
908 | config.size = size; |
909 | config.of_node = np; |
910 | config.priv = mtd; |
911 | |
912 | nvmem = nvmem_register(cfg: &config); |
913 | /* Just ignore if there is no NVMEM support in the kernel */ |
914 | if (IS_ERR(ptr: nvmem) && PTR_ERR(ptr: nvmem) == -EOPNOTSUPP) |
915 | nvmem = NULL; |
916 | |
917 | of_node_put(node: np); |
918 | |
919 | return nvmem; |
920 | } |
921 | |
922 | static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset, |
923 | void *val, size_t bytes) |
924 | { |
925 | struct mtd_info *mtd = priv; |
926 | size_t retlen; |
927 | int ret; |
928 | |
929 | ret = mtd_read_user_prot_reg(mtd, from: offset, len: bytes, retlen: &retlen, buf: val); |
930 | if (ret) |
931 | return ret; |
932 | |
933 | return retlen == bytes ? 0 : -EIO; |
934 | } |
935 | |
936 | static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset, |
937 | void *val, size_t bytes) |
938 | { |
939 | struct mtd_info *mtd = priv; |
940 | size_t retlen; |
941 | int ret; |
942 | |
943 | ret = mtd_read_fact_prot_reg(mtd, from: offset, len: bytes, retlen: &retlen, buf: val); |
944 | if (ret) |
945 | return ret; |
946 | |
947 | return retlen == bytes ? 0 : -EIO; |
948 | } |
949 | |
950 | static int mtd_otp_nvmem_add(struct mtd_info *mtd) |
951 | { |
952 | struct device *dev = mtd->dev.parent; |
953 | struct nvmem_device *nvmem; |
954 | ssize_t size; |
955 | int err; |
956 | |
957 | if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) { |
958 | size = mtd_otp_size(mtd, is_user: true); |
959 | if (size < 0) |
960 | return size; |
961 | |
962 | if (size > 0) { |
963 | nvmem = mtd_otp_nvmem_register(mtd, compatible: "user-otp" , size, |
964 | reg_read: mtd_nvmem_user_otp_reg_read); |
965 | if (IS_ERR(ptr: nvmem)) { |
966 | err = PTR_ERR(ptr: nvmem); |
967 | goto err; |
968 | } |
969 | mtd->otp_user_nvmem = nvmem; |
970 | } |
971 | } |
972 | |
973 | if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) { |
974 | size = mtd_otp_size(mtd, is_user: false); |
975 | if (size < 0) { |
976 | err = size; |
977 | goto err; |
978 | } |
979 | |
980 | if (size > 0) { |
981 | /* |
982 | * The factory OTP contains thing such as a unique serial |
983 | * number and is small, so let's read it out and put it |
984 | * into the entropy pool. |
985 | */ |
986 | void *otp; |
987 | |
988 | otp = kmalloc(size, GFP_KERNEL); |
989 | if (!otp) { |
990 | err = -ENOMEM; |
991 | goto err; |
992 | } |
993 | err = mtd_nvmem_fact_otp_reg_read(priv: mtd, offset: 0, val: otp, bytes: size); |
994 | if (err < 0) { |
995 | kfree(objp: otp); |
996 | goto err; |
997 | } |
998 | add_device_randomness(buf: otp, len: err); |
999 | kfree(objp: otp); |
1000 | |
1001 | nvmem = mtd_otp_nvmem_register(mtd, compatible: "factory-otp" , size, |
1002 | reg_read: mtd_nvmem_fact_otp_reg_read); |
1003 | if (IS_ERR(ptr: nvmem)) { |
1004 | err = PTR_ERR(ptr: nvmem); |
1005 | goto err; |
1006 | } |
1007 | mtd->otp_factory_nvmem = nvmem; |
1008 | } |
1009 | } |
1010 | |
1011 | return 0; |
1012 | |
1013 | err: |
1014 | nvmem_unregister(nvmem: mtd->otp_user_nvmem); |
1015 | return dev_err_probe(dev, err, fmt: "Failed to register OTP NVMEM device\n" ); |
1016 | } |
1017 | |
1018 | /** |
1019 | * mtd_device_parse_register - parse partitions and register an MTD device. |
1020 | * |
1021 | * @mtd: the MTD device to register |
1022 | * @types: the list of MTD partition probes to try, see |
1023 | * 'parse_mtd_partitions()' for more information |
1024 | * @parser_data: MTD partition parser-specific data |
1025 | * @parts: fallback partition information to register, if parsing fails; |
1026 | * only valid if %nr_parts > %0 |
1027 | * @nr_parts: the number of partitions in parts, if zero then the full |
1028 | * MTD device is registered if no partition info is found |
1029 | * |
1030 | * This function aggregates MTD partitions parsing (done by |
1031 | * 'parse_mtd_partitions()') and MTD device and partitions registering. It |
1032 | * basically follows the most common pattern found in many MTD drivers: |
1033 | * |
1034 | * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is |
1035 | * registered first. |
1036 | * * Then It tries to probe partitions on MTD device @mtd using parsers |
1037 | * specified in @types (if @types is %NULL, then the default list of parsers |
1038 | * is used, see 'parse_mtd_partitions()' for more information). If none are |
1039 | * found this functions tries to fallback to information specified in |
1040 | * @parts/@nr_parts. |
1041 | * * If no partitions were found this function just registers the MTD device |
1042 | * @mtd and exits. |
1043 | * |
1044 | * Returns zero in case of success and a negative error code in case of failure. |
1045 | */ |
1046 | int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, |
1047 | struct mtd_part_parser_data *parser_data, |
1048 | const struct mtd_partition *parts, |
1049 | int nr_parts) |
1050 | { |
1051 | int ret; |
1052 | |
1053 | mtd_set_dev_defaults(mtd); |
1054 | |
1055 | ret = mtd_otp_nvmem_add(mtd); |
1056 | if (ret) |
1057 | goto out; |
1058 | |
1059 | if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) { |
1060 | ret = add_mtd_device(mtd); |
1061 | if (ret) |
1062 | goto out; |
1063 | } |
1064 | |
1065 | /* Prefer parsed partitions over driver-provided fallback */ |
1066 | ret = parse_mtd_partitions(master: mtd, types, data: parser_data); |
1067 | if (ret == -EPROBE_DEFER) |
1068 | goto out; |
1069 | |
1070 | if (ret > 0) |
1071 | ret = 0; |
1072 | else if (nr_parts) |
1073 | ret = add_mtd_partitions(mtd, parts, nr_parts); |
1074 | else if (!device_is_registered(dev: &mtd->dev)) |
1075 | ret = add_mtd_device(mtd); |
1076 | else |
1077 | ret = 0; |
1078 | |
1079 | if (ret) |
1080 | goto out; |
1081 | |
1082 | /* |
1083 | * FIXME: some drivers unfortunately call this function more than once. |
1084 | * So we have to check if we've already assigned the reboot notifier. |
1085 | * |
1086 | * Generally, we can make multiple calls work for most cases, but it |
1087 | * does cause problems with parse_mtd_partitions() above (e.g., |
1088 | * cmdlineparts will register partitions more than once). |
1089 | */ |
1090 | WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call, |
1091 | "MTD already registered\n" ); |
1092 | if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { |
1093 | mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; |
1094 | register_reboot_notifier(&mtd->reboot_notifier); |
1095 | } |
1096 | |
1097 | out: |
1098 | if (ret) { |
1099 | nvmem_unregister(nvmem: mtd->otp_user_nvmem); |
1100 | nvmem_unregister(nvmem: mtd->otp_factory_nvmem); |
1101 | } |
1102 | |
1103 | if (ret && device_is_registered(dev: &mtd->dev)) |
1104 | del_mtd_device(mtd); |
1105 | |
1106 | return ret; |
1107 | } |
1108 | EXPORT_SYMBOL_GPL(mtd_device_parse_register); |
1109 | |
1110 | /** |
1111 | * mtd_device_unregister - unregister an existing MTD device. |
1112 | * |
1113 | * @master: the MTD device to unregister. This will unregister both the master |
1114 | * and any partitions if registered. |
1115 | */ |
1116 | int mtd_device_unregister(struct mtd_info *master) |
1117 | { |
1118 | int err; |
1119 | |
1120 | if (master->_reboot) { |
1121 | unregister_reboot_notifier(&master->reboot_notifier); |
1122 | memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier)); |
1123 | } |
1124 | |
1125 | nvmem_unregister(nvmem: master->otp_user_nvmem); |
1126 | nvmem_unregister(nvmem: master->otp_factory_nvmem); |
1127 | |
1128 | err = del_mtd_partitions(master); |
1129 | if (err) |
1130 | return err; |
1131 | |
1132 | if (!device_is_registered(dev: &master->dev)) |
1133 | return 0; |
1134 | |
1135 | return del_mtd_device(mtd: master); |
1136 | } |
1137 | EXPORT_SYMBOL_GPL(mtd_device_unregister); |
1138 | |
1139 | /** |
1140 | * register_mtd_user - register a 'user' of MTD devices. |
1141 | * @new: pointer to notifier info structure |
1142 | * |
1143 | * Registers a pair of callbacks function to be called upon addition |
1144 | * or removal of MTD devices. Causes the 'add' callback to be immediately |
1145 | * invoked for each MTD device currently present in the system. |
1146 | */ |
1147 | void register_mtd_user (struct mtd_notifier *new) |
1148 | { |
1149 | struct mtd_info *mtd; |
1150 | |
1151 | mutex_lock(&mtd_table_mutex); |
1152 | |
1153 | list_add(new: &new->list, head: &mtd_notifiers); |
1154 | |
1155 | __module_get(THIS_MODULE); |
1156 | |
1157 | mtd_for_each_device(mtd) |
1158 | new->add(mtd); |
1159 | |
1160 | mutex_unlock(lock: &mtd_table_mutex); |
1161 | } |
1162 | EXPORT_SYMBOL_GPL(register_mtd_user); |
1163 | |
1164 | /** |
1165 | * unregister_mtd_user - unregister a 'user' of MTD devices. |
1166 | * @old: pointer to notifier info structure |
1167 | * |
1168 | * Removes a callback function pair from the list of 'users' to be |
1169 | * notified upon addition or removal of MTD devices. Causes the |
1170 | * 'remove' callback to be immediately invoked for each MTD device |
1171 | * currently present in the system. |
1172 | */ |
1173 | int unregister_mtd_user (struct mtd_notifier *old) |
1174 | { |
1175 | struct mtd_info *mtd; |
1176 | |
1177 | mutex_lock(&mtd_table_mutex); |
1178 | |
1179 | module_put(THIS_MODULE); |
1180 | |
1181 | mtd_for_each_device(mtd) |
1182 | old->remove(mtd); |
1183 | |
1184 | list_del(entry: &old->list); |
1185 | mutex_unlock(lock: &mtd_table_mutex); |
1186 | return 0; |
1187 | } |
1188 | EXPORT_SYMBOL_GPL(unregister_mtd_user); |
1189 | |
1190 | /** |
1191 | * get_mtd_device - obtain a validated handle for an MTD device |
1192 | * @mtd: last known address of the required MTD device |
1193 | * @num: internal device number of the required MTD device |
1194 | * |
1195 | * Given a number and NULL address, return the num'th entry in the device |
1196 | * table, if any. Given an address and num == -1, search the device table |
1197 | * for a device with that address and return if it's still present. Given |
1198 | * both, return the num'th driver only if its address matches. Return |
1199 | * error code if not. |
1200 | */ |
1201 | struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) |
1202 | { |
1203 | struct mtd_info *ret = NULL, *other; |
1204 | int err = -ENODEV; |
1205 | |
1206 | mutex_lock(&mtd_table_mutex); |
1207 | |
1208 | if (num == -1) { |
1209 | mtd_for_each_device(other) { |
1210 | if (other == mtd) { |
1211 | ret = mtd; |
1212 | break; |
1213 | } |
1214 | } |
1215 | } else if (num >= 0) { |
1216 | ret = idr_find(&mtd_idr, id: num); |
1217 | if (mtd && mtd != ret) |
1218 | ret = NULL; |
1219 | } |
1220 | |
1221 | if (!ret) { |
1222 | ret = ERR_PTR(error: err); |
1223 | goto out; |
1224 | } |
1225 | |
1226 | err = __get_mtd_device(mtd: ret); |
1227 | if (err) |
1228 | ret = ERR_PTR(error: err); |
1229 | out: |
1230 | mutex_unlock(lock: &mtd_table_mutex); |
1231 | return ret; |
1232 | } |
1233 | EXPORT_SYMBOL_GPL(get_mtd_device); |
1234 | |
1235 | |
1236 | int __get_mtd_device(struct mtd_info *mtd) |
1237 | { |
1238 | struct mtd_info *master = mtd_get_master(mtd); |
1239 | int err; |
1240 | |
1241 | if (master->_get_device) { |
1242 | err = master->_get_device(mtd); |
1243 | if (err) |
1244 | return err; |
1245 | } |
1246 | |
1247 | if (!try_module_get(module: master->owner)) { |
1248 | if (master->_put_device) |
1249 | master->_put_device(master); |
1250 | return -ENODEV; |
1251 | } |
1252 | |
1253 | while (mtd) { |
1254 | if (mtd != master) |
1255 | kref_get(kref: &mtd->refcnt); |
1256 | mtd = mtd->parent; |
1257 | } |
1258 | |
1259 | if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) |
1260 | kref_get(kref: &master->refcnt); |
1261 | |
1262 | return 0; |
1263 | } |
1264 | EXPORT_SYMBOL_GPL(__get_mtd_device); |
1265 | |
1266 | /** |
1267 | * of_get_mtd_device_by_node - obtain an MTD device associated with a given node |
1268 | * |
1269 | * @np: device tree node |
1270 | */ |
1271 | struct mtd_info *of_get_mtd_device_by_node(struct device_node *np) |
1272 | { |
1273 | struct mtd_info *mtd = NULL; |
1274 | struct mtd_info *tmp; |
1275 | int err; |
1276 | |
1277 | mutex_lock(&mtd_table_mutex); |
1278 | |
1279 | err = -EPROBE_DEFER; |
1280 | mtd_for_each_device(tmp) { |
1281 | if (mtd_get_of_node(mtd: tmp) == np) { |
1282 | mtd = tmp; |
1283 | err = __get_mtd_device(mtd); |
1284 | break; |
1285 | } |
1286 | } |
1287 | |
1288 | mutex_unlock(lock: &mtd_table_mutex); |
1289 | |
1290 | return err ? ERR_PTR(error: err) : mtd; |
1291 | } |
1292 | EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node); |
1293 | |
1294 | /** |
1295 | * get_mtd_device_nm - obtain a validated handle for an MTD device by |
1296 | * device name |
1297 | * @name: MTD device name to open |
1298 | * |
1299 | * This function returns MTD device description structure in case of |
1300 | * success and an error code in case of failure. |
1301 | */ |
1302 | struct mtd_info *get_mtd_device_nm(const char *name) |
1303 | { |
1304 | int err = -ENODEV; |
1305 | struct mtd_info *mtd = NULL, *other; |
1306 | |
1307 | mutex_lock(&mtd_table_mutex); |
1308 | |
1309 | mtd_for_each_device(other) { |
1310 | if (!strcmp(name, other->name)) { |
1311 | mtd = other; |
1312 | break; |
1313 | } |
1314 | } |
1315 | |
1316 | if (!mtd) |
1317 | goto out_unlock; |
1318 | |
1319 | err = __get_mtd_device(mtd); |
1320 | if (err) |
1321 | goto out_unlock; |
1322 | |
1323 | mutex_unlock(lock: &mtd_table_mutex); |
1324 | return mtd; |
1325 | |
1326 | out_unlock: |
1327 | mutex_unlock(lock: &mtd_table_mutex); |
1328 | return ERR_PTR(error: err); |
1329 | } |
1330 | EXPORT_SYMBOL_GPL(get_mtd_device_nm); |
1331 | |
1332 | void put_mtd_device(struct mtd_info *mtd) |
1333 | { |
1334 | mutex_lock(&mtd_table_mutex); |
1335 | __put_mtd_device(mtd); |
1336 | mutex_unlock(lock: &mtd_table_mutex); |
1337 | |
1338 | } |
1339 | EXPORT_SYMBOL_GPL(put_mtd_device); |
1340 | |
1341 | void __put_mtd_device(struct mtd_info *mtd) |
1342 | { |
1343 | struct mtd_info *master = mtd_get_master(mtd); |
1344 | |
1345 | while (mtd) { |
1346 | /* kref_put() can relese mtd, so keep a reference mtd->parent */ |
1347 | struct mtd_info *parent = mtd->parent; |
1348 | |
1349 | if (mtd != master) |
1350 | kref_put(kref: &mtd->refcnt, release: mtd_device_release); |
1351 | mtd = parent; |
1352 | } |
1353 | |
1354 | if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) |
1355 | kref_put(kref: &master->refcnt, release: mtd_device_release); |
1356 | |
1357 | module_put(module: master->owner); |
1358 | |
1359 | /* must be the last as master can be freed in the _put_device */ |
1360 | if (master->_put_device) |
1361 | master->_put_device(master); |
1362 | } |
1363 | EXPORT_SYMBOL_GPL(__put_mtd_device); |
1364 | |
1365 | /* |
1366 | * Erase is an synchronous operation. Device drivers are epected to return a |
1367 | * negative error code if the operation failed and update instr->fail_addr |
1368 | * to point the portion that was not properly erased. |
1369 | */ |
1370 | int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) |
1371 | { |
1372 | struct mtd_info *master = mtd_get_master(mtd); |
1373 | u64 mst_ofs = mtd_get_master_ofs(mtd, ofs: 0); |
1374 | struct erase_info adjinstr; |
1375 | int ret; |
1376 | |
1377 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; |
1378 | adjinstr = *instr; |
1379 | |
1380 | if (!mtd->erasesize || !master->_erase) |
1381 | return -ENOTSUPP; |
1382 | |
1383 | if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr) |
1384 | return -EINVAL; |
1385 | if (!(mtd->flags & MTD_WRITEABLE)) |
1386 | return -EROFS; |
1387 | |
1388 | if (!instr->len) |
1389 | return 0; |
1390 | |
1391 | ledtrig_mtd_activity(); |
1392 | |
1393 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
1394 | adjinstr.addr = (loff_t)mtd_div_by_eb(sz: instr->addr, mtd) * |
1395 | master->erasesize; |
1396 | adjinstr.len = ((u64)mtd_div_by_eb(sz: instr->addr + instr->len, mtd) * |
1397 | master->erasesize) - |
1398 | adjinstr.addr; |
1399 | } |
1400 | |
1401 | adjinstr.addr += mst_ofs; |
1402 | |
1403 | ret = master->_erase(master, &adjinstr); |
1404 | |
1405 | if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) { |
1406 | instr->fail_addr = adjinstr.fail_addr - mst_ofs; |
1407 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
1408 | instr->fail_addr = mtd_div_by_eb(sz: instr->fail_addr, |
1409 | mtd: master); |
1410 | instr->fail_addr *= mtd->erasesize; |
1411 | } |
1412 | } |
1413 | |
1414 | return ret; |
1415 | } |
1416 | EXPORT_SYMBOL_GPL(mtd_erase); |
1417 | ALLOW_ERROR_INJECTION(mtd_erase, ERRNO); |
1418 | |
1419 | /* |
1420 | * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. |
1421 | */ |
1422 | int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, |
1423 | void **virt, resource_size_t *phys) |
1424 | { |
1425 | struct mtd_info *master = mtd_get_master(mtd); |
1426 | |
1427 | *retlen = 0; |
1428 | *virt = NULL; |
1429 | if (phys) |
1430 | *phys = 0; |
1431 | if (!master->_point) |
1432 | return -EOPNOTSUPP; |
1433 | if (from < 0 || from >= mtd->size || len > mtd->size - from) |
1434 | return -EINVAL; |
1435 | if (!len) |
1436 | return 0; |
1437 | |
1438 | from = mtd_get_master_ofs(mtd, ofs: from); |
1439 | return master->_point(master, from, len, retlen, virt, phys); |
1440 | } |
1441 | EXPORT_SYMBOL_GPL(mtd_point); |
1442 | |
1443 | /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ |
1444 | int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) |
1445 | { |
1446 | struct mtd_info *master = mtd_get_master(mtd); |
1447 | |
1448 | if (!master->_unpoint) |
1449 | return -EOPNOTSUPP; |
1450 | if (from < 0 || from >= mtd->size || len > mtd->size - from) |
1451 | return -EINVAL; |
1452 | if (!len) |
1453 | return 0; |
1454 | return master->_unpoint(master, mtd_get_master_ofs(mtd, ofs: from), len); |
1455 | } |
1456 | EXPORT_SYMBOL_GPL(mtd_unpoint); |
1457 | |
1458 | /* |
1459 | * Allow NOMMU mmap() to directly map the device (if not NULL) |
1460 | * - return the address to which the offset maps |
1461 | * - return -ENOSYS to indicate refusal to do the mapping |
1462 | */ |
1463 | unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, |
1464 | unsigned long offset, unsigned long flags) |
1465 | { |
1466 | size_t retlen; |
1467 | void *virt; |
1468 | int ret; |
1469 | |
1470 | ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); |
1471 | if (ret) |
1472 | return ret; |
1473 | if (retlen != len) { |
1474 | mtd_unpoint(mtd, offset, retlen); |
1475 | return -ENOSYS; |
1476 | } |
1477 | return (unsigned long)virt; |
1478 | } |
1479 | EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); |
1480 | |
1481 | static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master, |
1482 | const struct mtd_ecc_stats *old_stats) |
1483 | { |
1484 | struct mtd_ecc_stats diff; |
1485 | |
1486 | if (master == mtd) |
1487 | return; |
1488 | |
1489 | diff = master->ecc_stats; |
1490 | diff.failed -= old_stats->failed; |
1491 | diff.corrected -= old_stats->corrected; |
1492 | |
1493 | while (mtd->parent) { |
1494 | mtd->ecc_stats.failed += diff.failed; |
1495 | mtd->ecc_stats.corrected += diff.corrected; |
1496 | mtd = mtd->parent; |
1497 | } |
1498 | } |
1499 | |
1500 | int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, |
1501 | u_char *buf) |
1502 | { |
1503 | struct mtd_oob_ops ops = { |
1504 | .len = len, |
1505 | .datbuf = buf, |
1506 | }; |
1507 | int ret; |
1508 | |
1509 | ret = mtd_read_oob(mtd, from, ops: &ops); |
1510 | *retlen = ops.retlen; |
1511 | |
1512 | WARN_ON_ONCE(*retlen != len && mtd_is_bitflip_or_eccerr(ret)); |
1513 | |
1514 | return ret; |
1515 | } |
1516 | EXPORT_SYMBOL_GPL(mtd_read); |
1517 | ALLOW_ERROR_INJECTION(mtd_read, ERRNO); |
1518 | |
1519 | int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, |
1520 | const u_char *buf) |
1521 | { |
1522 | struct mtd_oob_ops ops = { |
1523 | .len = len, |
1524 | .datbuf = (u8 *)buf, |
1525 | }; |
1526 | int ret; |
1527 | |
1528 | ret = mtd_write_oob(mtd, to, ops: &ops); |
1529 | *retlen = ops.retlen; |
1530 | |
1531 | return ret; |
1532 | } |
1533 | EXPORT_SYMBOL_GPL(mtd_write); |
1534 | ALLOW_ERROR_INJECTION(mtd_write, ERRNO); |
1535 | |
1536 | /* |
1537 | * In blackbox flight recorder like scenarios we want to make successful writes |
1538 | * in interrupt context. panic_write() is only intended to be called when its |
1539 | * known the kernel is about to panic and we need the write to succeed. Since |
1540 | * the kernel is not going to be running for much longer, this function can |
1541 | * break locks and delay to ensure the write succeeds (but not sleep). |
1542 | */ |
1543 | int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, |
1544 | const u_char *buf) |
1545 | { |
1546 | struct mtd_info *master = mtd_get_master(mtd); |
1547 | |
1548 | *retlen = 0; |
1549 | if (!master->_panic_write) |
1550 | return -EOPNOTSUPP; |
1551 | if (to < 0 || to >= mtd->size || len > mtd->size - to) |
1552 | return -EINVAL; |
1553 | if (!(mtd->flags & MTD_WRITEABLE)) |
1554 | return -EROFS; |
1555 | if (!len) |
1556 | return 0; |
1557 | if (!master->oops_panic_write) |
1558 | master->oops_panic_write = true; |
1559 | |
1560 | return master->_panic_write(master, mtd_get_master_ofs(mtd, ofs: to), len, |
1561 | retlen, buf); |
1562 | } |
1563 | EXPORT_SYMBOL_GPL(mtd_panic_write); |
1564 | |
1565 | static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, |
1566 | struct mtd_oob_ops *ops) |
1567 | { |
1568 | /* |
1569 | * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving |
1570 | * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in |
1571 | * this case. |
1572 | */ |
1573 | if (!ops->datbuf) |
1574 | ops->len = 0; |
1575 | |
1576 | if (!ops->oobbuf) |
1577 | ops->ooblen = 0; |
1578 | |
1579 | if (offs < 0 || offs + ops->len > mtd->size) |
1580 | return -EINVAL; |
1581 | |
1582 | if (ops->ooblen) { |
1583 | size_t maxooblen; |
1584 | |
1585 | if (ops->ooboffs >= mtd_oobavail(mtd, ops)) |
1586 | return -EINVAL; |
1587 | |
1588 | maxooblen = ((size_t)(mtd_div_by_ws(sz: mtd->size, mtd) - |
1589 | mtd_div_by_ws(sz: offs, mtd)) * |
1590 | mtd_oobavail(mtd, ops)) - ops->ooboffs; |
1591 | if (ops->ooblen > maxooblen) |
1592 | return -EINVAL; |
1593 | } |
1594 | |
1595 | return 0; |
1596 | } |
1597 | |
1598 | static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from, |
1599 | struct mtd_oob_ops *ops) |
1600 | { |
1601 | struct mtd_info *master = mtd_get_master(mtd); |
1602 | int ret; |
1603 | |
1604 | from = mtd_get_master_ofs(mtd, ofs: from); |
1605 | if (master->_read_oob) |
1606 | ret = master->_read_oob(master, from, ops); |
1607 | else |
1608 | ret = master->_read(master, from, ops->len, &ops->retlen, |
1609 | ops->datbuf); |
1610 | |
1611 | return ret; |
1612 | } |
1613 | |
1614 | static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to, |
1615 | struct mtd_oob_ops *ops) |
1616 | { |
1617 | struct mtd_info *master = mtd_get_master(mtd); |
1618 | int ret; |
1619 | |
1620 | to = mtd_get_master_ofs(mtd, ofs: to); |
1621 | if (master->_write_oob) |
1622 | ret = master->_write_oob(master, to, ops); |
1623 | else |
1624 | ret = master->_write(master, to, ops->len, &ops->retlen, |
1625 | ops->datbuf); |
1626 | |
1627 | return ret; |
1628 | } |
1629 | |
1630 | static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read, |
1631 | struct mtd_oob_ops *ops) |
1632 | { |
1633 | struct mtd_info *master = mtd_get_master(mtd); |
1634 | int ngroups = mtd_pairing_groups(master); |
1635 | int npairs = mtd_wunit_per_eb(mtd: master) / ngroups; |
1636 | struct mtd_oob_ops adjops = *ops; |
1637 | unsigned int wunit, oobavail; |
1638 | struct mtd_pairing_info info; |
1639 | int max_bitflips = 0; |
1640 | u32 ebofs, pageofs; |
1641 | loff_t base, pos; |
1642 | |
1643 | ebofs = mtd_mod_by_eb(sz: start, mtd); |
1644 | base = (loff_t)mtd_div_by_eb(sz: start, mtd) * master->erasesize; |
1645 | info.group = 0; |
1646 | info.pair = mtd_div_by_ws(sz: ebofs, mtd); |
1647 | pageofs = mtd_mod_by_ws(sz: ebofs, mtd); |
1648 | oobavail = mtd_oobavail(mtd, ops); |
1649 | |
1650 | while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { |
1651 | int ret; |
1652 | |
1653 | if (info.pair >= npairs) { |
1654 | info.pair = 0; |
1655 | base += master->erasesize; |
1656 | } |
1657 | |
1658 | wunit = mtd_pairing_info_to_wunit(master, &info); |
1659 | pos = mtd_wunit_to_offset(mtd, base, wunit); |
1660 | |
1661 | adjops.len = ops->len - ops->retlen; |
1662 | if (adjops.len > mtd->writesize - pageofs) |
1663 | adjops.len = mtd->writesize - pageofs; |
1664 | |
1665 | adjops.ooblen = ops->ooblen - ops->oobretlen; |
1666 | if (adjops.ooblen > oobavail - adjops.ooboffs) |
1667 | adjops.ooblen = oobavail - adjops.ooboffs; |
1668 | |
1669 | if (read) { |
1670 | ret = mtd_read_oob_std(mtd, from: pos + pageofs, ops: &adjops); |
1671 | if (ret > 0) |
1672 | max_bitflips = max(max_bitflips, ret); |
1673 | } else { |
1674 | ret = mtd_write_oob_std(mtd, to: pos + pageofs, ops: &adjops); |
1675 | } |
1676 | |
1677 | if (ret < 0) |
1678 | return ret; |
1679 | |
1680 | max_bitflips = max(max_bitflips, ret); |
1681 | ops->retlen += adjops.retlen; |
1682 | ops->oobretlen += adjops.oobretlen; |
1683 | adjops.datbuf += adjops.retlen; |
1684 | adjops.oobbuf += adjops.oobretlen; |
1685 | adjops.ooboffs = 0; |
1686 | pageofs = 0; |
1687 | info.pair++; |
1688 | } |
1689 | |
1690 | return max_bitflips; |
1691 | } |
1692 | |
1693 | int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) |
1694 | { |
1695 | struct mtd_info *master = mtd_get_master(mtd); |
1696 | struct mtd_ecc_stats old_stats = master->ecc_stats; |
1697 | int ret_code; |
1698 | |
1699 | ops->retlen = ops->oobretlen = 0; |
1700 | |
1701 | ret_code = mtd_check_oob_ops(mtd, offs: from, ops); |
1702 | if (ret_code) |
1703 | return ret_code; |
1704 | |
1705 | ledtrig_mtd_activity(); |
1706 | |
1707 | /* Check the validity of a potential fallback on mtd->_read */ |
1708 | if (!master->_read_oob && (!master->_read || ops->oobbuf)) |
1709 | return -EOPNOTSUPP; |
1710 | |
1711 | if (ops->stats) |
1712 | memset(ops->stats, 0, sizeof(*ops->stats)); |
1713 | |
1714 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) |
1715 | ret_code = mtd_io_emulated_slc(mtd, start: from, read: true, ops); |
1716 | else |
1717 | ret_code = mtd_read_oob_std(mtd, from, ops); |
1718 | |
1719 | mtd_update_ecc_stats(mtd, master, old_stats: &old_stats); |
1720 | |
1721 | /* |
1722 | * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics |
1723 | * similar to mtd->_read(), returning a non-negative integer |
1724 | * representing max bitflips. In other cases, mtd->_read_oob() may |
1725 | * return -EUCLEAN. In all cases, perform similar logic to mtd_read(). |
1726 | */ |
1727 | if (unlikely(ret_code < 0)) |
1728 | return ret_code; |
1729 | if (mtd->ecc_strength == 0) |
1730 | return 0; /* device lacks ecc */ |
1731 | if (ops->stats) |
1732 | ops->stats->max_bitflips = ret_code; |
1733 | return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; |
1734 | } |
1735 | EXPORT_SYMBOL_GPL(mtd_read_oob); |
1736 | |
1737 | int mtd_write_oob(struct mtd_info *mtd, loff_t to, |
1738 | struct mtd_oob_ops *ops) |
1739 | { |
1740 | struct mtd_info *master = mtd_get_master(mtd); |
1741 | int ret; |
1742 | |
1743 | ops->retlen = ops->oobretlen = 0; |
1744 | |
1745 | if (!(mtd->flags & MTD_WRITEABLE)) |
1746 | return -EROFS; |
1747 | |
1748 | ret = mtd_check_oob_ops(mtd, offs: to, ops); |
1749 | if (ret) |
1750 | return ret; |
1751 | |
1752 | ledtrig_mtd_activity(); |
1753 | |
1754 | /* Check the validity of a potential fallback on mtd->_write */ |
1755 | if (!master->_write_oob && (!master->_write || ops->oobbuf)) |
1756 | return -EOPNOTSUPP; |
1757 | |
1758 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) |
1759 | return mtd_io_emulated_slc(mtd, start: to, read: false, ops); |
1760 | |
1761 | return mtd_write_oob_std(mtd, to, ops); |
1762 | } |
1763 | EXPORT_SYMBOL_GPL(mtd_write_oob); |
1764 | |
1765 | /** |
1766 | * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section |
1767 | * @mtd: MTD device structure |
1768 | * @section: ECC section. Depending on the layout you may have all the ECC |
1769 | * bytes stored in a single contiguous section, or one section |
1770 | * per ECC chunk (and sometime several sections for a single ECC |
1771 | * ECC chunk) |
1772 | * @oobecc: OOB region struct filled with the appropriate ECC position |
1773 | * information |
1774 | * |
1775 | * This function returns ECC section information in the OOB area. If you want |
1776 | * to get all the ECC bytes information, then you should call |
1777 | * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE. |
1778 | * |
1779 | * Returns zero on success, a negative error code otherwise. |
1780 | */ |
1781 | int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, |
1782 | struct mtd_oob_region *oobecc) |
1783 | { |
1784 | struct mtd_info *master = mtd_get_master(mtd); |
1785 | |
1786 | memset(oobecc, 0, sizeof(*oobecc)); |
1787 | |
1788 | if (!master || section < 0) |
1789 | return -EINVAL; |
1790 | |
1791 | if (!master->ooblayout || !master->ooblayout->ecc) |
1792 | return -ENOTSUPP; |
1793 | |
1794 | return master->ooblayout->ecc(master, section, oobecc); |
1795 | } |
1796 | EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc); |
1797 | |
1798 | /** |
1799 | * mtd_ooblayout_free - Get the OOB region definition of a specific free |
1800 | * section |
1801 | * @mtd: MTD device structure |
1802 | * @section: Free section you are interested in. Depending on the layout |
1803 | * you may have all the free bytes stored in a single contiguous |
1804 | * section, or one section per ECC chunk plus an extra section |
1805 | * for the remaining bytes (or other funky layout). |
1806 | * @oobfree: OOB region struct filled with the appropriate free position |
1807 | * information |
1808 | * |
1809 | * This function returns free bytes position in the OOB area. If you want |
1810 | * to get all the free bytes information, then you should call |
1811 | * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE. |
1812 | * |
1813 | * Returns zero on success, a negative error code otherwise. |
1814 | */ |
1815 | int mtd_ooblayout_free(struct mtd_info *mtd, int section, |
1816 | struct mtd_oob_region *oobfree) |
1817 | { |
1818 | struct mtd_info *master = mtd_get_master(mtd); |
1819 | |
1820 | memset(oobfree, 0, sizeof(*oobfree)); |
1821 | |
1822 | if (!master || section < 0) |
1823 | return -EINVAL; |
1824 | |
1825 | if (!master->ooblayout || !master->ooblayout->free) |
1826 | return -ENOTSUPP; |
1827 | |
1828 | return master->ooblayout->free(master, section, oobfree); |
1829 | } |
1830 | EXPORT_SYMBOL_GPL(mtd_ooblayout_free); |
1831 | |
1832 | /** |
1833 | * mtd_ooblayout_find_region - Find the region attached to a specific byte |
1834 | * @mtd: mtd info structure |
1835 | * @byte: the byte we are searching for |
1836 | * @sectionp: pointer where the section id will be stored |
1837 | * @oobregion: used to retrieve the ECC position |
1838 | * @iter: iterator function. Should be either mtd_ooblayout_free or |
1839 | * mtd_ooblayout_ecc depending on the region type you're searching for |
1840 | * |
1841 | * This function returns the section id and oobregion information of a |
1842 | * specific byte. For example, say you want to know where the 4th ECC byte is |
1843 | * stored, you'll use: |
1844 | * |
1845 | * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc); |
1846 | * |
1847 | * Returns zero on success, a negative error code otherwise. |
1848 | */ |
1849 | static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte, |
1850 | int *sectionp, struct mtd_oob_region *oobregion, |
1851 | int (*iter)(struct mtd_info *, |
1852 | int section, |
1853 | struct mtd_oob_region *oobregion)) |
1854 | { |
1855 | int pos = 0, ret, section = 0; |
1856 | |
1857 | memset(oobregion, 0, sizeof(*oobregion)); |
1858 | |
1859 | while (1) { |
1860 | ret = iter(mtd, section, oobregion); |
1861 | if (ret) |
1862 | return ret; |
1863 | |
1864 | if (pos + oobregion->length > byte) |
1865 | break; |
1866 | |
1867 | pos += oobregion->length; |
1868 | section++; |
1869 | } |
1870 | |
1871 | /* |
1872 | * Adjust region info to make it start at the beginning at the |
1873 | * 'start' ECC byte. |
1874 | */ |
1875 | oobregion->offset += byte - pos; |
1876 | oobregion->length -= byte - pos; |
1877 | *sectionp = section; |
1878 | |
1879 | return 0; |
1880 | } |
1881 | |
1882 | /** |
1883 | * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific |
1884 | * ECC byte |
1885 | * @mtd: mtd info structure |
1886 | * @eccbyte: the byte we are searching for |
1887 | * @section: pointer where the section id will be stored |
1888 | * @oobregion: OOB region information |
1889 | * |
1890 | * Works like mtd_ooblayout_find_region() except it searches for a specific ECC |
1891 | * byte. |
1892 | * |
1893 | * Returns zero on success, a negative error code otherwise. |
1894 | */ |
1895 | int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, |
1896 | int *section, |
1897 | struct mtd_oob_region *oobregion) |
1898 | { |
1899 | return mtd_ooblayout_find_region(mtd, byte: eccbyte, sectionp: section, oobregion, |
1900 | iter: mtd_ooblayout_ecc); |
1901 | } |
1902 | EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion); |
1903 | |
1904 | /** |
1905 | * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer |
1906 | * @mtd: mtd info structure |
1907 | * @buf: destination buffer to store OOB bytes |
1908 | * @oobbuf: OOB buffer |
1909 | * @start: first byte to retrieve |
1910 | * @nbytes: number of bytes to retrieve |
1911 | * @iter: section iterator |
1912 | * |
1913 | * Extract bytes attached to a specific category (ECC or free) |
1914 | * from the OOB buffer and copy them into buf. |
1915 | * |
1916 | * Returns zero on success, a negative error code otherwise. |
1917 | */ |
1918 | static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf, |
1919 | const u8 *oobbuf, int start, int nbytes, |
1920 | int (*iter)(struct mtd_info *, |
1921 | int section, |
1922 | struct mtd_oob_region *oobregion)) |
1923 | { |
1924 | struct mtd_oob_region oobregion; |
1925 | int section, ret; |
1926 | |
1927 | ret = mtd_ooblayout_find_region(mtd, byte: start, sectionp: §ion, |
1928 | oobregion: &oobregion, iter); |
1929 | |
1930 | while (!ret) { |
1931 | int cnt; |
1932 | |
1933 | cnt = min_t(int, nbytes, oobregion.length); |
1934 | memcpy(buf, oobbuf + oobregion.offset, cnt); |
1935 | buf += cnt; |
1936 | nbytes -= cnt; |
1937 | |
1938 | if (!nbytes) |
1939 | break; |
1940 | |
1941 | ret = iter(mtd, ++section, &oobregion); |
1942 | } |
1943 | |
1944 | return ret; |
1945 | } |
1946 | |
1947 | /** |
1948 | * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer |
1949 | * @mtd: mtd info structure |
1950 | * @buf: source buffer to get OOB bytes from |
1951 | * @oobbuf: OOB buffer |
1952 | * @start: first OOB byte to set |
1953 | * @nbytes: number of OOB bytes to set |
1954 | * @iter: section iterator |
1955 | * |
1956 | * Fill the OOB buffer with data provided in buf. The category (ECC or free) |
1957 | * is selected by passing the appropriate iterator. |
1958 | * |
1959 | * Returns zero on success, a negative error code otherwise. |
1960 | */ |
1961 | static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf, |
1962 | u8 *oobbuf, int start, int nbytes, |
1963 | int (*iter)(struct mtd_info *, |
1964 | int section, |
1965 | struct mtd_oob_region *oobregion)) |
1966 | { |
1967 | struct mtd_oob_region oobregion; |
1968 | int section, ret; |
1969 | |
1970 | ret = mtd_ooblayout_find_region(mtd, byte: start, sectionp: §ion, |
1971 | oobregion: &oobregion, iter); |
1972 | |
1973 | while (!ret) { |
1974 | int cnt; |
1975 | |
1976 | cnt = min_t(int, nbytes, oobregion.length); |
1977 | memcpy(oobbuf + oobregion.offset, buf, cnt); |
1978 | buf += cnt; |
1979 | nbytes -= cnt; |
1980 | |
1981 | if (!nbytes) |
1982 | break; |
1983 | |
1984 | ret = iter(mtd, ++section, &oobregion); |
1985 | } |
1986 | |
1987 | return ret; |
1988 | } |
1989 | |
1990 | /** |
1991 | * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category |
1992 | * @mtd: mtd info structure |
1993 | * @iter: category iterator |
1994 | * |
1995 | * Count the number of bytes in a given category. |
1996 | * |
1997 | * Returns a positive value on success, a negative error code otherwise. |
1998 | */ |
1999 | static int mtd_ooblayout_count_bytes(struct mtd_info *mtd, |
2000 | int (*iter)(struct mtd_info *, |
2001 | int section, |
2002 | struct mtd_oob_region *oobregion)) |
2003 | { |
2004 | struct mtd_oob_region oobregion; |
2005 | int section = 0, ret, nbytes = 0; |
2006 | |
2007 | while (1) { |
2008 | ret = iter(mtd, section++, &oobregion); |
2009 | if (ret) { |
2010 | if (ret == -ERANGE) |
2011 | ret = nbytes; |
2012 | break; |
2013 | } |
2014 | |
2015 | nbytes += oobregion.length; |
2016 | } |
2017 | |
2018 | return ret; |
2019 | } |
2020 | |
2021 | /** |
2022 | * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer |
2023 | * @mtd: mtd info structure |
2024 | * @eccbuf: destination buffer to store ECC bytes |
2025 | * @oobbuf: OOB buffer |
2026 | * @start: first ECC byte to retrieve |
2027 | * @nbytes: number of ECC bytes to retrieve |
2028 | * |
2029 | * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes. |
2030 | * |
2031 | * Returns zero on success, a negative error code otherwise. |
2032 | */ |
2033 | int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, |
2034 | const u8 *oobbuf, int start, int nbytes) |
2035 | { |
2036 | return mtd_ooblayout_get_bytes(mtd, buf: eccbuf, oobbuf, start, nbytes, |
2037 | iter: mtd_ooblayout_ecc); |
2038 | } |
2039 | EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes); |
2040 | |
2041 | /** |
2042 | * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer |
2043 | * @mtd: mtd info structure |
2044 | * @eccbuf: source buffer to get ECC bytes from |
2045 | * @oobbuf: OOB buffer |
2046 | * @start: first ECC byte to set |
2047 | * @nbytes: number of ECC bytes to set |
2048 | * |
2049 | * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes. |
2050 | * |
2051 | * Returns zero on success, a negative error code otherwise. |
2052 | */ |
2053 | int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, |
2054 | u8 *oobbuf, int start, int nbytes) |
2055 | { |
2056 | return mtd_ooblayout_set_bytes(mtd, buf: eccbuf, oobbuf, start, nbytes, |
2057 | iter: mtd_ooblayout_ecc); |
2058 | } |
2059 | EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes); |
2060 | |
2061 | /** |
2062 | * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer |
2063 | * @mtd: mtd info structure |
2064 | * @databuf: destination buffer to store ECC bytes |
2065 | * @oobbuf: OOB buffer |
2066 | * @start: first ECC byte to retrieve |
2067 | * @nbytes: number of ECC bytes to retrieve |
2068 | * |
2069 | * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes. |
2070 | * |
2071 | * Returns zero on success, a negative error code otherwise. |
2072 | */ |
2073 | int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, |
2074 | const u8 *oobbuf, int start, int nbytes) |
2075 | { |
2076 | return mtd_ooblayout_get_bytes(mtd, buf: databuf, oobbuf, start, nbytes, |
2077 | iter: mtd_ooblayout_free); |
2078 | } |
2079 | EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes); |
2080 | |
2081 | /** |
2082 | * mtd_ooblayout_set_databytes - set data bytes into the oob buffer |
2083 | * @mtd: mtd info structure |
2084 | * @databuf: source buffer to get data bytes from |
2085 | * @oobbuf: OOB buffer |
2086 | * @start: first ECC byte to set |
2087 | * @nbytes: number of ECC bytes to set |
2088 | * |
2089 | * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes. |
2090 | * |
2091 | * Returns zero on success, a negative error code otherwise. |
2092 | */ |
2093 | int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, |
2094 | u8 *oobbuf, int start, int nbytes) |
2095 | { |
2096 | return mtd_ooblayout_set_bytes(mtd, buf: databuf, oobbuf, start, nbytes, |
2097 | iter: mtd_ooblayout_free); |
2098 | } |
2099 | EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes); |
2100 | |
2101 | /** |
2102 | * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB |
2103 | * @mtd: mtd info structure |
2104 | * |
2105 | * Works like mtd_ooblayout_count_bytes(), except it count free bytes. |
2106 | * |
2107 | * Returns zero on success, a negative error code otherwise. |
2108 | */ |
2109 | int mtd_ooblayout_count_freebytes(struct mtd_info *mtd) |
2110 | { |
2111 | return mtd_ooblayout_count_bytes(mtd, iter: mtd_ooblayout_free); |
2112 | } |
2113 | EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes); |
2114 | |
2115 | /** |
2116 | * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB |
2117 | * @mtd: mtd info structure |
2118 | * |
2119 | * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes. |
2120 | * |
2121 | * Returns zero on success, a negative error code otherwise. |
2122 | */ |
2123 | int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd) |
2124 | { |
2125 | return mtd_ooblayout_count_bytes(mtd, iter: mtd_ooblayout_ecc); |
2126 | } |
2127 | EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes); |
2128 | |
2129 | /* |
2130 | * Method to access the protection register area, present in some flash |
2131 | * devices. The user data is one time programmable but the factory data is read |
2132 | * only. |
2133 | */ |
2134 | int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, |
2135 | struct otp_info *buf) |
2136 | { |
2137 | struct mtd_info *master = mtd_get_master(mtd); |
2138 | |
2139 | if (!master->_get_fact_prot_info) |
2140 | return -EOPNOTSUPP; |
2141 | if (!len) |
2142 | return 0; |
2143 | return master->_get_fact_prot_info(master, len, retlen, buf); |
2144 | } |
2145 | EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); |
2146 | |
2147 | int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, |
2148 | size_t *retlen, u_char *buf) |
2149 | { |
2150 | struct mtd_info *master = mtd_get_master(mtd); |
2151 | |
2152 | *retlen = 0; |
2153 | if (!master->_read_fact_prot_reg) |
2154 | return -EOPNOTSUPP; |
2155 | if (!len) |
2156 | return 0; |
2157 | return master->_read_fact_prot_reg(master, from, len, retlen, buf); |
2158 | } |
2159 | EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); |
2160 | |
2161 | int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, |
2162 | struct otp_info *buf) |
2163 | { |
2164 | struct mtd_info *master = mtd_get_master(mtd); |
2165 | |
2166 | if (!master->_get_user_prot_info) |
2167 | return -EOPNOTSUPP; |
2168 | if (!len) |
2169 | return 0; |
2170 | return master->_get_user_prot_info(master, len, retlen, buf); |
2171 | } |
2172 | EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); |
2173 | |
2174 | int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, |
2175 | size_t *retlen, u_char *buf) |
2176 | { |
2177 | struct mtd_info *master = mtd_get_master(mtd); |
2178 | |
2179 | *retlen = 0; |
2180 | if (!master->_read_user_prot_reg) |
2181 | return -EOPNOTSUPP; |
2182 | if (!len) |
2183 | return 0; |
2184 | return master->_read_user_prot_reg(master, from, len, retlen, buf); |
2185 | } |
2186 | EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); |
2187 | |
2188 | int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, |
2189 | size_t *retlen, const u_char *buf) |
2190 | { |
2191 | struct mtd_info *master = mtd_get_master(mtd); |
2192 | int ret; |
2193 | |
2194 | *retlen = 0; |
2195 | if (!master->_write_user_prot_reg) |
2196 | return -EOPNOTSUPP; |
2197 | if (!len) |
2198 | return 0; |
2199 | ret = master->_write_user_prot_reg(master, to, len, retlen, buf); |
2200 | if (ret) |
2201 | return ret; |
2202 | |
2203 | /* |
2204 | * If no data could be written at all, we are out of memory and |
2205 | * must return -ENOSPC. |
2206 | */ |
2207 | return (*retlen) ? 0 : -ENOSPC; |
2208 | } |
2209 | EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); |
2210 | |
2211 | int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) |
2212 | { |
2213 | struct mtd_info *master = mtd_get_master(mtd); |
2214 | |
2215 | if (!master->_lock_user_prot_reg) |
2216 | return -EOPNOTSUPP; |
2217 | if (!len) |
2218 | return 0; |
2219 | return master->_lock_user_prot_reg(master, from, len); |
2220 | } |
2221 | EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); |
2222 | |
2223 | int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) |
2224 | { |
2225 | struct mtd_info *master = mtd_get_master(mtd); |
2226 | |
2227 | if (!master->_erase_user_prot_reg) |
2228 | return -EOPNOTSUPP; |
2229 | if (!len) |
2230 | return 0; |
2231 | return master->_erase_user_prot_reg(master, from, len); |
2232 | } |
2233 | EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg); |
2234 | |
2235 | /* Chip-supported device locking */ |
2236 | int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2237 | { |
2238 | struct mtd_info *master = mtd_get_master(mtd); |
2239 | |
2240 | if (!master->_lock) |
2241 | return -EOPNOTSUPP; |
2242 | if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) |
2243 | return -EINVAL; |
2244 | if (!len) |
2245 | return 0; |
2246 | |
2247 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
2248 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2249 | len = (u64)mtd_div_by_eb(sz: len, mtd) * master->erasesize; |
2250 | } |
2251 | |
2252 | return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len); |
2253 | } |
2254 | EXPORT_SYMBOL_GPL(mtd_lock); |
2255 | |
2256 | int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2257 | { |
2258 | struct mtd_info *master = mtd_get_master(mtd); |
2259 | |
2260 | if (!master->_unlock) |
2261 | return -EOPNOTSUPP; |
2262 | if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) |
2263 | return -EINVAL; |
2264 | if (!len) |
2265 | return 0; |
2266 | |
2267 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
2268 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2269 | len = (u64)mtd_div_by_eb(sz: len, mtd) * master->erasesize; |
2270 | } |
2271 | |
2272 | return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len); |
2273 | } |
2274 | EXPORT_SYMBOL_GPL(mtd_unlock); |
2275 | |
2276 | int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
2277 | { |
2278 | struct mtd_info *master = mtd_get_master(mtd); |
2279 | |
2280 | if (!master->_is_locked) |
2281 | return -EOPNOTSUPP; |
2282 | if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs) |
2283 | return -EINVAL; |
2284 | if (!len) |
2285 | return 0; |
2286 | |
2287 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) { |
2288 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2289 | len = (u64)mtd_div_by_eb(sz: len, mtd) * master->erasesize; |
2290 | } |
2291 | |
2292 | return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len); |
2293 | } |
2294 | EXPORT_SYMBOL_GPL(mtd_is_locked); |
2295 | |
2296 | int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs) |
2297 | { |
2298 | struct mtd_info *master = mtd_get_master(mtd); |
2299 | |
2300 | if (ofs < 0 || ofs >= mtd->size) |
2301 | return -EINVAL; |
2302 | if (!master->_block_isreserved) |
2303 | return 0; |
2304 | |
2305 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) |
2306 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2307 | |
2308 | return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs)); |
2309 | } |
2310 | EXPORT_SYMBOL_GPL(mtd_block_isreserved); |
2311 | |
2312 | int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) |
2313 | { |
2314 | struct mtd_info *master = mtd_get_master(mtd); |
2315 | |
2316 | if (ofs < 0 || ofs >= mtd->size) |
2317 | return -EINVAL; |
2318 | if (!master->_block_isbad) |
2319 | return 0; |
2320 | |
2321 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) |
2322 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2323 | |
2324 | return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs)); |
2325 | } |
2326 | EXPORT_SYMBOL_GPL(mtd_block_isbad); |
2327 | |
2328 | int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) |
2329 | { |
2330 | struct mtd_info *master = mtd_get_master(mtd); |
2331 | int ret; |
2332 | |
2333 | if (!master->_block_markbad) |
2334 | return -EOPNOTSUPP; |
2335 | if (ofs < 0 || ofs >= mtd->size) |
2336 | return -EINVAL; |
2337 | if (!(mtd->flags & MTD_WRITEABLE)) |
2338 | return -EROFS; |
2339 | |
2340 | if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) |
2341 | ofs = (loff_t)mtd_div_by_eb(sz: ofs, mtd) * master->erasesize; |
2342 | |
2343 | ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs)); |
2344 | if (ret) |
2345 | return ret; |
2346 | |
2347 | while (mtd->parent) { |
2348 | mtd->ecc_stats.badblocks++; |
2349 | mtd = mtd->parent; |
2350 | } |
2351 | |
2352 | return 0; |
2353 | } |
2354 | EXPORT_SYMBOL_GPL(mtd_block_markbad); |
2355 | ALLOW_ERROR_INJECTION(mtd_block_markbad, ERRNO); |
2356 | |
2357 | /* |
2358 | * default_mtd_writev - the default writev method |
2359 | * @mtd: mtd device description object pointer |
2360 | * @vecs: the vectors to write |
2361 | * @count: count of vectors in @vecs |
2362 | * @to: the MTD device offset to write to |
2363 | * @retlen: on exit contains the count of bytes written to the MTD device. |
2364 | * |
2365 | * This function returns zero in case of success and a negative error code in |
2366 | * case of failure. |
2367 | */ |
2368 | static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, |
2369 | unsigned long count, loff_t to, size_t *retlen) |
2370 | { |
2371 | unsigned long i; |
2372 | size_t totlen = 0, thislen; |
2373 | int ret = 0; |
2374 | |
2375 | for (i = 0; i < count; i++) { |
2376 | if (!vecs[i].iov_len) |
2377 | continue; |
2378 | ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen, |
2379 | vecs[i].iov_base); |
2380 | totlen += thislen; |
2381 | if (ret || thislen != vecs[i].iov_len) |
2382 | break; |
2383 | to += vecs[i].iov_len; |
2384 | } |
2385 | *retlen = totlen; |
2386 | return ret; |
2387 | } |
2388 | |
2389 | /* |
2390 | * mtd_writev - the vector-based MTD write method |
2391 | * @mtd: mtd device description object pointer |
2392 | * @vecs: the vectors to write |
2393 | * @count: count of vectors in @vecs |
2394 | * @to: the MTD device offset to write to |
2395 | * @retlen: on exit contains the count of bytes written to the MTD device. |
2396 | * |
2397 | * This function returns zero in case of success and a negative error code in |
2398 | * case of failure. |
2399 | */ |
2400 | int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, |
2401 | unsigned long count, loff_t to, size_t *retlen) |
2402 | { |
2403 | struct mtd_info *master = mtd_get_master(mtd); |
2404 | |
2405 | *retlen = 0; |
2406 | if (!(mtd->flags & MTD_WRITEABLE)) |
2407 | return -EROFS; |
2408 | |
2409 | if (!master->_writev) |
2410 | return default_mtd_writev(mtd, vecs, count, to, retlen); |
2411 | |
2412 | return master->_writev(master, vecs, count, |
2413 | mtd_get_master_ofs(mtd, ofs: to), retlen); |
2414 | } |
2415 | EXPORT_SYMBOL_GPL(mtd_writev); |
2416 | |
2417 | /** |
2418 | * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size |
2419 | * @mtd: mtd device description object pointer |
2420 | * @size: a pointer to the ideal or maximum size of the allocation, points |
2421 | * to the actual allocation size on success. |
2422 | * |
2423 | * This routine attempts to allocate a contiguous kernel buffer up to |
2424 | * the specified size, backing off the size of the request exponentially |
2425 | * until the request succeeds or until the allocation size falls below |
2426 | * the system page size. This attempts to make sure it does not adversely |
2427 | * impact system performance, so when allocating more than one page, we |
2428 | * ask the memory allocator to avoid re-trying, swapping, writing back |
2429 | * or performing I/O. |
2430 | * |
2431 | * Note, this function also makes sure that the allocated buffer is aligned to |
2432 | * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. |
2433 | * |
2434 | * This is called, for example by mtd_{read,write} and jffs2_scan_medium, |
2435 | * to handle smaller (i.e. degraded) buffer allocations under low- or |
2436 | * fragmented-memory situations where such reduced allocations, from a |
2437 | * requested ideal, are allowed. |
2438 | * |
2439 | * Returns a pointer to the allocated buffer on success; otherwise, NULL. |
2440 | */ |
2441 | void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) |
2442 | { |
2443 | gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY; |
2444 | size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); |
2445 | void *kbuf; |
2446 | |
2447 | *size = min_t(size_t, *size, KMALLOC_MAX_SIZE); |
2448 | |
2449 | while (*size > min_alloc) { |
2450 | kbuf = kmalloc(size: *size, flags); |
2451 | if (kbuf) |
2452 | return kbuf; |
2453 | |
2454 | *size >>= 1; |
2455 | *size = ALIGN(*size, mtd->writesize); |
2456 | } |
2457 | |
2458 | /* |
2459 | * For the last resort allocation allow 'kmalloc()' to do all sorts of |
2460 | * things (write-back, dropping caches, etc) by using GFP_KERNEL. |
2461 | */ |
2462 | return kmalloc(size: *size, GFP_KERNEL); |
2463 | } |
2464 | EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); |
2465 | |
2466 | #ifdef CONFIG_PROC_FS |
2467 | |
2468 | /*====================================================================*/ |
2469 | /* Support for /proc/mtd */ |
2470 | |
2471 | static int mtd_proc_show(struct seq_file *m, void *v) |
2472 | { |
2473 | struct mtd_info *mtd; |
2474 | |
2475 | seq_puts(m, s: "dev: size erasesize name\n" ); |
2476 | mutex_lock(&mtd_table_mutex); |
2477 | mtd_for_each_device(mtd) { |
2478 | seq_printf(m, fmt: "mtd%d: %8.8llx %8.8x \"%s\"\n" , |
2479 | mtd->index, (unsigned long long)mtd->size, |
2480 | mtd->erasesize, mtd->name); |
2481 | } |
2482 | mutex_unlock(lock: &mtd_table_mutex); |
2483 | return 0; |
2484 | } |
2485 | #endif /* CONFIG_PROC_FS */ |
2486 | |
2487 | /*====================================================================*/ |
2488 | /* Init code */ |
2489 | |
2490 | static struct backing_dev_info * __init mtd_bdi_init(const char *name) |
2491 | { |
2492 | struct backing_dev_info *bdi; |
2493 | int ret; |
2494 | |
2495 | bdi = bdi_alloc(NUMA_NO_NODE); |
2496 | if (!bdi) |
2497 | return ERR_PTR(error: -ENOMEM); |
2498 | bdi->ra_pages = 0; |
2499 | bdi->io_pages = 0; |
2500 | |
2501 | /* |
2502 | * We put '-0' suffix to the name to get the same name format as we |
2503 | * used to get. Since this is called only once, we get a unique name. |
2504 | */ |
2505 | ret = bdi_register(bdi, fmt: "%.28s-0" , name); |
2506 | if (ret) |
2507 | bdi_put(bdi); |
2508 | |
2509 | return ret ? ERR_PTR(error: ret) : bdi; |
2510 | } |
2511 | |
2512 | static struct proc_dir_entry *proc_mtd; |
2513 | |
2514 | static int __init init_mtd(void) |
2515 | { |
2516 | int ret; |
2517 | |
2518 | ret = class_register(class: &mtd_class); |
2519 | if (ret) |
2520 | goto err_reg; |
2521 | |
2522 | mtd_bdi = mtd_bdi_init(name: "mtd" ); |
2523 | if (IS_ERR(ptr: mtd_bdi)) { |
2524 | ret = PTR_ERR(ptr: mtd_bdi); |
2525 | goto err_bdi; |
2526 | } |
2527 | |
2528 | proc_mtd = proc_create_single("mtd" , 0, NULL, mtd_proc_show); |
2529 | |
2530 | ret = init_mtdchar(); |
2531 | if (ret) |
2532 | goto out_procfs; |
2533 | |
2534 | dfs_dir_mtd = debugfs_create_dir(name: "mtd" , NULL); |
2535 | debugfs_create_bool(name: "expert_analysis_mode" , mode: 0600, parent: dfs_dir_mtd, |
2536 | value: &mtd_expert_analysis_mode); |
2537 | |
2538 | return 0; |
2539 | |
2540 | out_procfs: |
2541 | if (proc_mtd) |
2542 | remove_proc_entry("mtd" , NULL); |
2543 | bdi_unregister(bdi: mtd_bdi); |
2544 | bdi_put(bdi: mtd_bdi); |
2545 | err_bdi: |
2546 | class_unregister(class: &mtd_class); |
2547 | err_reg: |
2548 | pr_err("Error registering mtd class or bdi: %d\n" , ret); |
2549 | return ret; |
2550 | } |
2551 | |
2552 | static void __exit cleanup_mtd(void) |
2553 | { |
2554 | debugfs_remove_recursive(dentry: dfs_dir_mtd); |
2555 | cleanup_mtdchar(); |
2556 | if (proc_mtd) |
2557 | remove_proc_entry("mtd" , NULL); |
2558 | class_unregister(class: &mtd_class); |
2559 | bdi_unregister(bdi: mtd_bdi); |
2560 | bdi_put(bdi: mtd_bdi); |
2561 | idr_destroy(&mtd_idr); |
2562 | } |
2563 | |
2564 | module_init(init_mtd); |
2565 | module_exit(cleanup_mtd); |
2566 | |
2567 | MODULE_LICENSE("GPL" ); |
2568 | MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>" ); |
2569 | MODULE_DESCRIPTION("Core MTD registration and access routines" ); |
2570 | |