1// SPDX-License-Identifier: GPL-2.0
2/*
3 * nvmem framework core.
4 *
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9#include <linux/device.h>
10#include <linux/export.h>
11#include <linux/fs.h>
12#include <linux/idr.h>
13#include <linux/init.h>
14#include <linux/kref.h>
15#include <linux/module.h>
16#include <linux/nvmem-consumer.h>
17#include <linux/nvmem-provider.h>
18#include <linux/gpio/consumer.h>
19#include <linux/of.h>
20#include <linux/slab.h>
21
22struct nvmem_device {
23 struct module *owner;
24 struct device dev;
25 int stride;
26 int word_size;
27 int id;
28 struct kref refcnt;
29 size_t size;
30 bool read_only;
31 bool root_only;
32 int flags;
33 enum nvmem_type type;
34 struct bin_attribute eeprom;
35 struct device *base_dev;
36 struct list_head cells;
37 const struct nvmem_keepout *keepout;
38 unsigned int nkeepout;
39 nvmem_reg_read_t reg_read;
40 nvmem_reg_write_t reg_write;
41 struct gpio_desc *wp_gpio;
42 struct nvmem_layout *layout;
43 void *priv;
44};
45
46#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
47
48#define FLAG_COMPAT BIT(0)
49struct nvmem_cell_entry {
50 const char *name;
51 int offset;
52 size_t raw_len;
53 int bytes;
54 int bit_offset;
55 int nbits;
56 nvmem_cell_post_process_t read_post_process;
57 void *priv;
58 struct device_node *np;
59 struct nvmem_device *nvmem;
60 struct list_head node;
61};
62
63struct nvmem_cell {
64 struct nvmem_cell_entry *entry;
65 const char *id;
66 int index;
67};
68
69static DEFINE_MUTEX(nvmem_mutex);
70static DEFINE_IDA(nvmem_ida);
71
72static DEFINE_MUTEX(nvmem_cell_mutex);
73static LIST_HEAD(nvmem_cell_tables);
74
75static DEFINE_MUTEX(nvmem_lookup_mutex);
76static LIST_HEAD(nvmem_lookup_list);
77
78static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
79
80static DEFINE_SPINLOCK(nvmem_layout_lock);
81static LIST_HEAD(nvmem_layouts);
82
83static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
84 void *val, size_t bytes)
85{
86 if (nvmem->reg_read)
87 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
88
89 return -EINVAL;
90}
91
92static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
93 void *val, size_t bytes)
94{
95 int ret;
96
97 if (nvmem->reg_write) {
98 gpiod_set_value_cansleep(desc: nvmem->wp_gpio, value: 0);
99 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
100 gpiod_set_value_cansleep(desc: nvmem->wp_gpio, value: 1);
101 return ret;
102 }
103
104 return -EINVAL;
105}
106
107static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
108 unsigned int offset, void *val,
109 size_t bytes, int write)
110{
111
112 unsigned int end = offset + bytes;
113 unsigned int kend, ksize;
114 const struct nvmem_keepout *keepout = nvmem->keepout;
115 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
116 int rc;
117
118 /*
119 * Skip all keepouts before the range being accessed.
120 * Keepouts are sorted.
121 */
122 while ((keepout < keepoutend) && (keepout->end <= offset))
123 keepout++;
124
125 while ((offset < end) && (keepout < keepoutend)) {
126 /* Access the valid portion before the keepout. */
127 if (offset < keepout->start) {
128 kend = min(end, keepout->start);
129 ksize = kend - offset;
130 if (write)
131 rc = __nvmem_reg_write(nvmem, offset, val, bytes: ksize);
132 else
133 rc = __nvmem_reg_read(nvmem, offset, val, bytes: ksize);
134
135 if (rc)
136 return rc;
137
138 offset += ksize;
139 val += ksize;
140 }
141
142 /*
143 * Now we're aligned to the start of this keepout zone. Go
144 * through it.
145 */
146 kend = min(end, keepout->end);
147 ksize = kend - offset;
148 if (!write)
149 memset(val, keepout->value, ksize);
150
151 val += ksize;
152 offset += ksize;
153 keepout++;
154 }
155
156 /*
157 * If we ran out of keepouts but there's still stuff to do, send it
158 * down directly
159 */
160 if (offset < end) {
161 ksize = end - offset;
162 if (write)
163 return __nvmem_reg_write(nvmem, offset, val, bytes: ksize);
164 else
165 return __nvmem_reg_read(nvmem, offset, val, bytes: ksize);
166 }
167
168 return 0;
169}
170
171static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
172 void *val, size_t bytes)
173{
174 if (!nvmem->nkeepout)
175 return __nvmem_reg_read(nvmem, offset, val, bytes);
176
177 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, write: false);
178}
179
180static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
181 void *val, size_t bytes)
182{
183 if (!nvmem->nkeepout)
184 return __nvmem_reg_write(nvmem, offset, val, bytes);
185
186 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, write: true);
187}
188
189#ifdef CONFIG_NVMEM_SYSFS
190static const char * const nvmem_type_str[] = {
191 [NVMEM_TYPE_UNKNOWN] = "Unknown",
192 [NVMEM_TYPE_EEPROM] = "EEPROM",
193 [NVMEM_TYPE_OTP] = "OTP",
194 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
195 [NVMEM_TYPE_FRAM] = "FRAM",
196};
197
198#ifdef CONFIG_DEBUG_LOCK_ALLOC
199static struct lock_class_key eeprom_lock_key;
200#endif
201
202static ssize_t type_show(struct device *dev,
203 struct device_attribute *attr, char *buf)
204{
205 struct nvmem_device *nvmem = to_nvmem_device(dev);
206
207 return sprintf(buf, fmt: "%s\n", nvmem_type_str[nvmem->type]);
208}
209
210static DEVICE_ATTR_RO(type);
211
212static struct attribute *nvmem_attrs[] = {
213 &dev_attr_type.attr,
214 NULL,
215};
216
217static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
218 struct bin_attribute *attr, char *buf,
219 loff_t pos, size_t count)
220{
221 struct device *dev;
222 struct nvmem_device *nvmem;
223 int rc;
224
225 if (attr->private)
226 dev = attr->private;
227 else
228 dev = kobj_to_dev(kobj);
229 nvmem = to_nvmem_device(dev);
230
231 /* Stop the user from reading */
232 if (pos >= nvmem->size)
233 return 0;
234
235 if (!IS_ALIGNED(pos, nvmem->stride))
236 return -EINVAL;
237
238 if (count < nvmem->word_size)
239 return -EINVAL;
240
241 if (pos + count > nvmem->size)
242 count = nvmem->size - pos;
243
244 count = round_down(count, nvmem->word_size);
245
246 if (!nvmem->reg_read)
247 return -EPERM;
248
249 rc = nvmem_reg_read(nvmem, offset: pos, val: buf, bytes: count);
250
251 if (rc)
252 return rc;
253
254 return count;
255}
256
257static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
258 struct bin_attribute *attr, char *buf,
259 loff_t pos, size_t count)
260{
261 struct device *dev;
262 struct nvmem_device *nvmem;
263 int rc;
264
265 if (attr->private)
266 dev = attr->private;
267 else
268 dev = kobj_to_dev(kobj);
269 nvmem = to_nvmem_device(dev);
270
271 /* Stop the user from writing */
272 if (pos >= nvmem->size)
273 return -EFBIG;
274
275 if (!IS_ALIGNED(pos, nvmem->stride))
276 return -EINVAL;
277
278 if (count < nvmem->word_size)
279 return -EINVAL;
280
281 if (pos + count > nvmem->size)
282 count = nvmem->size - pos;
283
284 count = round_down(count, nvmem->word_size);
285
286 if (!nvmem->reg_write)
287 return -EPERM;
288
289 rc = nvmem_reg_write(nvmem, offset: pos, val: buf, bytes: count);
290
291 if (rc)
292 return rc;
293
294 return count;
295}
296
297static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
298{
299 umode_t mode = 0400;
300
301 if (!nvmem->root_only)
302 mode |= 0044;
303
304 if (!nvmem->read_only)
305 mode |= 0200;
306
307 if (!nvmem->reg_write)
308 mode &= ~0200;
309
310 if (!nvmem->reg_read)
311 mode &= ~0444;
312
313 return mode;
314}
315
316static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
317 struct bin_attribute *attr, int i)
318{
319 struct device *dev = kobj_to_dev(kobj);
320 struct nvmem_device *nvmem = to_nvmem_device(dev);
321
322 attr->size = nvmem->size;
323
324 return nvmem_bin_attr_get_umode(nvmem);
325}
326
327/* default read/write permissions */
328static struct bin_attribute bin_attr_rw_nvmem = {
329 .attr = {
330 .name = "nvmem",
331 .mode = 0644,
332 },
333 .read = bin_attr_nvmem_read,
334 .write = bin_attr_nvmem_write,
335};
336
337static struct bin_attribute *nvmem_bin_attributes[] = {
338 &bin_attr_rw_nvmem,
339 NULL,
340};
341
342static const struct attribute_group nvmem_bin_group = {
343 .bin_attrs = nvmem_bin_attributes,
344 .attrs = nvmem_attrs,
345 .is_bin_visible = nvmem_bin_attr_is_visible,
346};
347
348static const struct attribute_group *nvmem_dev_groups[] = {
349 &nvmem_bin_group,
350 NULL,
351};
352
353static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
354 .attr = {
355 .name = "eeprom",
356 },
357 .read = bin_attr_nvmem_read,
358 .write = bin_attr_nvmem_write,
359};
360
361/*
362 * nvmem_setup_compat() - Create an additional binary entry in
363 * drivers sys directory, to be backwards compatible with the older
364 * drivers/misc/eeprom drivers.
365 */
366static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
367 const struct nvmem_config *config)
368{
369 int rval;
370
371 if (!config->compat)
372 return 0;
373
374 if (!config->base_dev)
375 return -EINVAL;
376
377 if (config->type == NVMEM_TYPE_FRAM)
378 bin_attr_nvmem_eeprom_compat.attr.name = "fram";
379
380 nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
381 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
382 nvmem->eeprom.size = nvmem->size;
383#ifdef CONFIG_DEBUG_LOCK_ALLOC
384 nvmem->eeprom.attr.key = &eeprom_lock_key;
385#endif
386 nvmem->eeprom.private = &nvmem->dev;
387 nvmem->base_dev = config->base_dev;
388
389 rval = device_create_bin_file(dev: nvmem->base_dev, attr: &nvmem->eeprom);
390 if (rval) {
391 dev_err(&nvmem->dev,
392 "Failed to create eeprom binary file %d\n", rval);
393 return rval;
394 }
395
396 nvmem->flags |= FLAG_COMPAT;
397
398 return 0;
399}
400
401static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
402 const struct nvmem_config *config)
403{
404 if (config->compat)
405 device_remove_bin_file(dev: nvmem->base_dev, attr: &nvmem->eeprom);
406}
407
408#else /* CONFIG_NVMEM_SYSFS */
409
410static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
411 const struct nvmem_config *config)
412{
413 return -ENOSYS;
414}
415static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
416 const struct nvmem_config *config)
417{
418}
419
420#endif /* CONFIG_NVMEM_SYSFS */
421
422static void nvmem_release(struct device *dev)
423{
424 struct nvmem_device *nvmem = to_nvmem_device(dev);
425
426 ida_free(&nvmem_ida, id: nvmem->id);
427 gpiod_put(desc: nvmem->wp_gpio);
428 kfree(objp: nvmem);
429}
430
431static const struct device_type nvmem_provider_type = {
432 .release = nvmem_release,
433};
434
435static struct bus_type nvmem_bus_type = {
436 .name = "nvmem",
437};
438
439static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
440{
441 blocking_notifier_call_chain(nh: &nvmem_notifier, val: NVMEM_CELL_REMOVE, v: cell);
442 mutex_lock(&nvmem_mutex);
443 list_del(entry: &cell->node);
444 mutex_unlock(lock: &nvmem_mutex);
445 of_node_put(node: cell->np);
446 kfree_const(x: cell->name);
447 kfree(objp: cell);
448}
449
450static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
451{
452 struct nvmem_cell_entry *cell, *p;
453
454 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
455 nvmem_cell_entry_drop(cell);
456}
457
458static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
459{
460 mutex_lock(&nvmem_mutex);
461 list_add_tail(new: &cell->node, head: &cell->nvmem->cells);
462 mutex_unlock(lock: &nvmem_mutex);
463 blocking_notifier_call_chain(nh: &nvmem_notifier, val: NVMEM_CELL_ADD, v: cell);
464}
465
466static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
467 const struct nvmem_cell_info *info,
468 struct nvmem_cell_entry *cell)
469{
470 cell->nvmem = nvmem;
471 cell->offset = info->offset;
472 cell->raw_len = info->raw_len ?: info->bytes;
473 cell->bytes = info->bytes;
474 cell->name = info->name;
475 cell->read_post_process = info->read_post_process;
476 cell->priv = info->priv;
477
478 cell->bit_offset = info->bit_offset;
479 cell->nbits = info->nbits;
480 cell->np = info->np;
481
482 if (cell->nbits)
483 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
484 BITS_PER_BYTE);
485
486 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
487 dev_err(&nvmem->dev,
488 "cell %s unaligned to nvmem stride %d\n",
489 cell->name ?: "<unknown>", nvmem->stride);
490 return -EINVAL;
491 }
492
493 return 0;
494}
495
496static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
497 const struct nvmem_cell_info *info,
498 struct nvmem_cell_entry *cell)
499{
500 int err;
501
502 err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
503 if (err)
504 return err;
505
506 cell->name = kstrdup_const(s: info->name, GFP_KERNEL);
507 if (!cell->name)
508 return -ENOMEM;
509
510 return 0;
511}
512
513/**
514 * nvmem_add_one_cell() - Add one cell information to an nvmem device
515 *
516 * @nvmem: nvmem device to add cells to.
517 * @info: nvmem cell info to add to the device
518 *
519 * Return: 0 or negative error code on failure.
520 */
521int nvmem_add_one_cell(struct nvmem_device *nvmem,
522 const struct nvmem_cell_info *info)
523{
524 struct nvmem_cell_entry *cell;
525 int rval;
526
527 cell = kzalloc(size: sizeof(*cell), GFP_KERNEL);
528 if (!cell)
529 return -ENOMEM;
530
531 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
532 if (rval) {
533 kfree(objp: cell);
534 return rval;
535 }
536
537 nvmem_cell_entry_add(cell);
538
539 return 0;
540}
541EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
542
543/**
544 * nvmem_add_cells() - Add cell information to an nvmem device
545 *
546 * @nvmem: nvmem device to add cells to.
547 * @info: nvmem cell info to add to the device
548 * @ncells: number of cells in info
549 *
550 * Return: 0 or negative error code on failure.
551 */
552static int nvmem_add_cells(struct nvmem_device *nvmem,
553 const struct nvmem_cell_info *info,
554 int ncells)
555{
556 int i, rval;
557
558 for (i = 0; i < ncells; i++) {
559 rval = nvmem_add_one_cell(nvmem, &info[i]);
560 if (rval)
561 return rval;
562 }
563
564 return 0;
565}
566
567/**
568 * nvmem_register_notifier() - Register a notifier block for nvmem events.
569 *
570 * @nb: notifier block to be called on nvmem events.
571 *
572 * Return: 0 on success, negative error number on failure.
573 */
574int nvmem_register_notifier(struct notifier_block *nb)
575{
576 return blocking_notifier_chain_register(nh: &nvmem_notifier, nb);
577}
578EXPORT_SYMBOL_GPL(nvmem_register_notifier);
579
580/**
581 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
582 *
583 * @nb: notifier block to be unregistered.
584 *
585 * Return: 0 on success, negative error number on failure.
586 */
587int nvmem_unregister_notifier(struct notifier_block *nb)
588{
589 return blocking_notifier_chain_unregister(nh: &nvmem_notifier, nb);
590}
591EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
592
593static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
594{
595 const struct nvmem_cell_info *info;
596 struct nvmem_cell_table *table;
597 struct nvmem_cell_entry *cell;
598 int rval = 0, i;
599
600 mutex_lock(&nvmem_cell_mutex);
601 list_for_each_entry(table, &nvmem_cell_tables, node) {
602 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
603 for (i = 0; i < table->ncells; i++) {
604 info = &table->cells[i];
605
606 cell = kzalloc(size: sizeof(*cell), GFP_KERNEL);
607 if (!cell) {
608 rval = -ENOMEM;
609 goto out;
610 }
611
612 rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
613 if (rval) {
614 kfree(objp: cell);
615 goto out;
616 }
617
618 nvmem_cell_entry_add(cell);
619 }
620 }
621 }
622
623out:
624 mutex_unlock(lock: &nvmem_cell_mutex);
625 return rval;
626}
627
628static struct nvmem_cell_entry *
629nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
630{
631 struct nvmem_cell_entry *iter, *cell = NULL;
632
633 mutex_lock(&nvmem_mutex);
634 list_for_each_entry(iter, &nvmem->cells, node) {
635 if (strcmp(cell_id, iter->name) == 0) {
636 cell = iter;
637 break;
638 }
639 }
640 mutex_unlock(lock: &nvmem_mutex);
641
642 return cell;
643}
644
645static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
646{
647 unsigned int cur = 0;
648 const struct nvmem_keepout *keepout = nvmem->keepout;
649 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
650
651 while (keepout < keepoutend) {
652 /* Ensure keepouts are sorted and don't overlap. */
653 if (keepout->start < cur) {
654 dev_err(&nvmem->dev,
655 "Keepout regions aren't sorted or overlap.\n");
656
657 return -ERANGE;
658 }
659
660 if (keepout->end < keepout->start) {
661 dev_err(&nvmem->dev,
662 "Invalid keepout region.\n");
663
664 return -EINVAL;
665 }
666
667 /*
668 * Validate keepouts (and holes between) don't violate
669 * word_size constraints.
670 */
671 if ((keepout->end - keepout->start < nvmem->word_size) ||
672 ((keepout->start != cur) &&
673 (keepout->start - cur < nvmem->word_size))) {
674
675 dev_err(&nvmem->dev,
676 "Keepout regions violate word_size constraints.\n");
677
678 return -ERANGE;
679 }
680
681 /* Validate keepouts don't violate stride (alignment). */
682 if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
683 !IS_ALIGNED(keepout->end, nvmem->stride)) {
684
685 dev_err(&nvmem->dev,
686 "Keepout regions violate stride.\n");
687
688 return -EINVAL;
689 }
690
691 cur = keepout->end;
692 keepout++;
693 }
694
695 return 0;
696}
697
698static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np)
699{
700 struct nvmem_layout *layout = nvmem->layout;
701 struct device *dev = &nvmem->dev;
702 struct device_node *child;
703 const __be32 *addr;
704 int len, ret;
705
706 for_each_child_of_node(np, child) {
707 struct nvmem_cell_info info = {0};
708
709 addr = of_get_property(node: child, name: "reg", lenp: &len);
710 if (!addr)
711 continue;
712 if (len < 2 * sizeof(u32)) {
713 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
714 of_node_put(node: child);
715 return -EINVAL;
716 }
717
718 info.offset = be32_to_cpup(p: addr++);
719 info.bytes = be32_to_cpup(p: addr);
720 info.name = kasprintf(GFP_KERNEL, fmt: "%pOFn", child);
721
722 addr = of_get_property(node: child, name: "bits", lenp: &len);
723 if (addr && len == (2 * sizeof(u32))) {
724 info.bit_offset = be32_to_cpup(p: addr++);
725 info.nbits = be32_to_cpup(p: addr);
726 }
727
728 info.np = of_node_get(node: child);
729
730 if (layout && layout->fixup_cell_info)
731 layout->fixup_cell_info(nvmem, layout, &info);
732
733 ret = nvmem_add_one_cell(nvmem, &info);
734 kfree(objp: info.name);
735 if (ret) {
736 of_node_put(node: child);
737 return ret;
738 }
739 }
740
741 return 0;
742}
743
744static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem)
745{
746 return nvmem_add_cells_from_dt(nvmem, np: nvmem->dev.of_node);
747}
748
749static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
750{
751 struct device_node *layout_np;
752 int err = 0;
753
754 layout_np = of_nvmem_layout_get_container(nvmem);
755 if (!layout_np)
756 return 0;
757
758 if (of_device_is_compatible(device: layout_np, "fixed-layout"))
759 err = nvmem_add_cells_from_dt(nvmem, np: layout_np);
760
761 of_node_put(node: layout_np);
762
763 return err;
764}
765
766int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
767{
768 layout->owner = owner;
769
770 spin_lock(lock: &nvmem_layout_lock);
771 list_add(new: &layout->node, head: &nvmem_layouts);
772 spin_unlock(lock: &nvmem_layout_lock);
773
774 blocking_notifier_call_chain(nh: &nvmem_notifier, val: NVMEM_LAYOUT_ADD, v: layout);
775
776 return 0;
777}
778EXPORT_SYMBOL_GPL(__nvmem_layout_register);
779
780void nvmem_layout_unregister(struct nvmem_layout *layout)
781{
782 blocking_notifier_call_chain(nh: &nvmem_notifier, val: NVMEM_LAYOUT_REMOVE, v: layout);
783
784 spin_lock(lock: &nvmem_layout_lock);
785 list_del(entry: &layout->node);
786 spin_unlock(lock: &nvmem_layout_lock);
787}
788EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
789
790static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
791{
792 struct device_node *layout_np;
793 struct nvmem_layout *l, *layout = ERR_PTR(error: -EPROBE_DEFER);
794
795 layout_np = of_nvmem_layout_get_container(nvmem);
796 if (!layout_np)
797 return NULL;
798
799 /*
800 * In case the nvmem device was built-in while the layout was built as a
801 * module, we shall manually request the layout driver loading otherwise
802 * we'll never have any match.
803 */
804 of_request_module(np: layout_np);
805
806 spin_lock(lock: &nvmem_layout_lock);
807
808 list_for_each_entry(l, &nvmem_layouts, node) {
809 if (of_match_node(matches: l->of_match_table, node: layout_np)) {
810 if (try_module_get(module: l->owner))
811 layout = l;
812
813 break;
814 }
815 }
816
817 spin_unlock(lock: &nvmem_layout_lock);
818 of_node_put(node: layout_np);
819
820 return layout;
821}
822
823static void nvmem_layout_put(struct nvmem_layout *layout)
824{
825 if (layout)
826 module_put(module: layout->owner);
827}
828
829static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem)
830{
831 struct nvmem_layout *layout = nvmem->layout;
832 int ret;
833
834 if (layout && layout->add_cells) {
835 ret = layout->add_cells(&nvmem->dev, nvmem, layout);
836 if (ret)
837 return ret;
838 }
839
840 return 0;
841}
842
843#if IS_ENABLED(CONFIG_OF)
844/**
845 * of_nvmem_layout_get_container() - Get OF node to layout container.
846 *
847 * @nvmem: nvmem device.
848 *
849 * Return: a node pointer with refcount incremented or NULL if no
850 * container exists. Use of_node_put() on it when done.
851 */
852struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem)
853{
854 return of_get_child_by_name(node: nvmem->dev.of_node, name: "nvmem-layout");
855}
856EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container);
857#endif
858
859const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem,
860 struct nvmem_layout *layout)
861{
862 struct device_node __maybe_unused *layout_np;
863 const struct of_device_id *match;
864
865 layout_np = of_nvmem_layout_get_container(nvmem);
866 match = of_match_node(matches: layout->of_match_table, node: layout_np);
867
868 return match ? match->data : NULL;
869}
870EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data);
871
872/**
873 * nvmem_register() - Register a nvmem device for given nvmem_config.
874 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
875 *
876 * @config: nvmem device configuration with which nvmem device is created.
877 *
878 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
879 * on success.
880 */
881
882struct nvmem_device *nvmem_register(const struct nvmem_config *config)
883{
884 struct nvmem_device *nvmem;
885 int rval;
886
887 if (!config->dev)
888 return ERR_PTR(error: -EINVAL);
889
890 if (!config->reg_read && !config->reg_write)
891 return ERR_PTR(error: -EINVAL);
892
893 nvmem = kzalloc(size: sizeof(*nvmem), GFP_KERNEL);
894 if (!nvmem)
895 return ERR_PTR(error: -ENOMEM);
896
897 rval = ida_alloc(ida: &nvmem_ida, GFP_KERNEL);
898 if (rval < 0) {
899 kfree(objp: nvmem);
900 return ERR_PTR(error: rval);
901 }
902
903 nvmem->id = rval;
904
905 nvmem->dev.type = &nvmem_provider_type;
906 nvmem->dev.bus = &nvmem_bus_type;
907 nvmem->dev.parent = config->dev;
908
909 device_initialize(dev: &nvmem->dev);
910
911 if (!config->ignore_wp)
912 nvmem->wp_gpio = gpiod_get_optional(dev: config->dev, con_id: "wp",
913 flags: GPIOD_OUT_HIGH);
914 if (IS_ERR(ptr: nvmem->wp_gpio)) {
915 rval = PTR_ERR(ptr: nvmem->wp_gpio);
916 nvmem->wp_gpio = NULL;
917 goto err_put_device;
918 }
919
920 kref_init(kref: &nvmem->refcnt);
921 INIT_LIST_HEAD(list: &nvmem->cells);
922
923 nvmem->owner = config->owner;
924 if (!nvmem->owner && config->dev->driver)
925 nvmem->owner = config->dev->driver->owner;
926 nvmem->stride = config->stride ?: 1;
927 nvmem->word_size = config->word_size ?: 1;
928 nvmem->size = config->size;
929 nvmem->root_only = config->root_only;
930 nvmem->priv = config->priv;
931 nvmem->type = config->type;
932 nvmem->reg_read = config->reg_read;
933 nvmem->reg_write = config->reg_write;
934 nvmem->keepout = config->keepout;
935 nvmem->nkeepout = config->nkeepout;
936 if (config->of_node)
937 nvmem->dev.of_node = config->of_node;
938 else
939 nvmem->dev.of_node = config->dev->of_node;
940
941 switch (config->id) {
942 case NVMEM_DEVID_NONE:
943 rval = dev_set_name(dev: &nvmem->dev, name: "%s", config->name);
944 break;
945 case NVMEM_DEVID_AUTO:
946 rval = dev_set_name(dev: &nvmem->dev, name: "%s%d", config->name, nvmem->id);
947 break;
948 default:
949 rval = dev_set_name(dev: &nvmem->dev, name: "%s%d",
950 config->name ? : "nvmem",
951 config->name ? config->id : nvmem->id);
952 break;
953 }
954
955 if (rval)
956 goto err_put_device;
957
958 nvmem->read_only = device_property_present(dev: config->dev, propname: "read-only") ||
959 config->read_only || !nvmem->reg_write;
960
961#ifdef CONFIG_NVMEM_SYSFS
962 nvmem->dev.groups = nvmem_dev_groups;
963#endif
964
965 if (nvmem->nkeepout) {
966 rval = nvmem_validate_keepouts(nvmem);
967 if (rval)
968 goto err_put_device;
969 }
970
971 if (config->compat) {
972 rval = nvmem_sysfs_setup_compat(nvmem, config);
973 if (rval)
974 goto err_put_device;
975 }
976
977 /*
978 * If the driver supplied a layout by config->layout, the module
979 * pointer will be NULL and nvmem_layout_put() will be a noop.
980 */
981 nvmem->layout = config->layout ?: nvmem_layout_get(nvmem);
982 if (IS_ERR(ptr: nvmem->layout)) {
983 rval = PTR_ERR(ptr: nvmem->layout);
984 nvmem->layout = NULL;
985
986 if (rval == -EPROBE_DEFER)
987 goto err_teardown_compat;
988 }
989
990 if (config->cells) {
991 rval = nvmem_add_cells(nvmem, info: config->cells, ncells: config->ncells);
992 if (rval)
993 goto err_remove_cells;
994 }
995
996 rval = nvmem_add_cells_from_table(nvmem);
997 if (rval)
998 goto err_remove_cells;
999
1000 if (config->add_legacy_fixed_of_cells) {
1001 rval = nvmem_add_cells_from_legacy_of(nvmem);
1002 if (rval)
1003 goto err_remove_cells;
1004 }
1005
1006 rval = nvmem_add_cells_from_fixed_layout(nvmem);
1007 if (rval)
1008 goto err_remove_cells;
1009
1010 rval = nvmem_add_cells_from_layout(nvmem);
1011 if (rval)
1012 goto err_remove_cells;
1013
1014 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
1015
1016 rval = device_add(dev: &nvmem->dev);
1017 if (rval)
1018 goto err_remove_cells;
1019
1020 blocking_notifier_call_chain(nh: &nvmem_notifier, val: NVMEM_ADD, v: nvmem);
1021
1022 return nvmem;
1023
1024err_remove_cells:
1025 nvmem_device_remove_all_cells(nvmem);
1026 nvmem_layout_put(layout: nvmem->layout);
1027err_teardown_compat:
1028 if (config->compat)
1029 nvmem_sysfs_remove_compat(nvmem, config);
1030err_put_device:
1031 put_device(dev: &nvmem->dev);
1032
1033 return ERR_PTR(error: rval);
1034}
1035EXPORT_SYMBOL_GPL(nvmem_register);
1036
1037static void nvmem_device_release(struct kref *kref)
1038{
1039 struct nvmem_device *nvmem;
1040
1041 nvmem = container_of(kref, struct nvmem_device, refcnt);
1042
1043 blocking_notifier_call_chain(nh: &nvmem_notifier, val: NVMEM_REMOVE, v: nvmem);
1044
1045 if (nvmem->flags & FLAG_COMPAT)
1046 device_remove_bin_file(dev: nvmem->base_dev, attr: &nvmem->eeprom);
1047
1048 nvmem_device_remove_all_cells(nvmem);
1049 nvmem_layout_put(layout: nvmem->layout);
1050 device_unregister(dev: &nvmem->dev);
1051}
1052
1053/**
1054 * nvmem_unregister() - Unregister previously registered nvmem device
1055 *
1056 * @nvmem: Pointer to previously registered nvmem device.
1057 */
1058void nvmem_unregister(struct nvmem_device *nvmem)
1059{
1060 if (nvmem)
1061 kref_put(kref: &nvmem->refcnt, release: nvmem_device_release);
1062}
1063EXPORT_SYMBOL_GPL(nvmem_unregister);
1064
1065static void devm_nvmem_unregister(void *nvmem)
1066{
1067 nvmem_unregister(nvmem);
1068}
1069
1070/**
1071 * devm_nvmem_register() - Register a managed nvmem device for given
1072 * nvmem_config.
1073 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
1074 *
1075 * @dev: Device that uses the nvmem device.
1076 * @config: nvmem device configuration with which nvmem device is created.
1077 *
1078 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
1079 * on success.
1080 */
1081struct nvmem_device *devm_nvmem_register(struct device *dev,
1082 const struct nvmem_config *config)
1083{
1084 struct nvmem_device *nvmem;
1085 int ret;
1086
1087 nvmem = nvmem_register(config);
1088 if (IS_ERR(ptr: nvmem))
1089 return nvmem;
1090
1091 ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
1092 if (ret)
1093 return ERR_PTR(error: ret);
1094
1095 return nvmem;
1096}
1097EXPORT_SYMBOL_GPL(devm_nvmem_register);
1098
1099static struct nvmem_device *__nvmem_device_get(void *data,
1100 int (*match)(struct device *dev, const void *data))
1101{
1102 struct nvmem_device *nvmem = NULL;
1103 struct device *dev;
1104
1105 mutex_lock(&nvmem_mutex);
1106 dev = bus_find_device(bus: &nvmem_bus_type, NULL, data, match);
1107 if (dev)
1108 nvmem = to_nvmem_device(dev);
1109 mutex_unlock(lock: &nvmem_mutex);
1110 if (!nvmem)
1111 return ERR_PTR(error: -EPROBE_DEFER);
1112
1113 if (!try_module_get(module: nvmem->owner)) {
1114 dev_err(&nvmem->dev,
1115 "could not increase module refcount for cell %s\n",
1116 nvmem_dev_name(nvmem));
1117
1118 put_device(dev: &nvmem->dev);
1119 return ERR_PTR(error: -EINVAL);
1120 }
1121
1122 kref_get(kref: &nvmem->refcnt);
1123
1124 return nvmem;
1125}
1126
1127static void __nvmem_device_put(struct nvmem_device *nvmem)
1128{
1129 put_device(dev: &nvmem->dev);
1130 module_put(module: nvmem->owner);
1131 kref_put(kref: &nvmem->refcnt, release: nvmem_device_release);
1132}
1133
1134#if IS_ENABLED(CONFIG_OF)
1135/**
1136 * of_nvmem_device_get() - Get nvmem device from a given id
1137 *
1138 * @np: Device tree node that uses the nvmem device.
1139 * @id: nvmem name from nvmem-names property.
1140 *
1141 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1142 * on success.
1143 */
1144struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
1145{
1146
1147 struct device_node *nvmem_np;
1148 struct nvmem_device *nvmem;
1149 int index = 0;
1150
1151 if (id)
1152 index = of_property_match_string(np, propname: "nvmem-names", string: id);
1153
1154 nvmem_np = of_parse_phandle(np, phandle_name: "nvmem", index);
1155 if (!nvmem_np)
1156 return ERR_PTR(error: -ENOENT);
1157
1158 nvmem = __nvmem_device_get(data: nvmem_np, match: device_match_of_node);
1159 of_node_put(node: nvmem_np);
1160 return nvmem;
1161}
1162EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1163#endif
1164
1165/**
1166 * nvmem_device_get() - Get nvmem device from a given id
1167 *
1168 * @dev: Device that uses the nvmem device.
1169 * @dev_name: name of the requested nvmem device.
1170 *
1171 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1172 * on success.
1173 */
1174struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1175{
1176 if (dev->of_node) { /* try dt first */
1177 struct nvmem_device *nvmem;
1178
1179 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1180
1181 if (!IS_ERR(ptr: nvmem) || PTR_ERR(ptr: nvmem) == -EPROBE_DEFER)
1182 return nvmem;
1183
1184 }
1185
1186 return __nvmem_device_get(data: (void *)dev_name, match: device_match_name);
1187}
1188EXPORT_SYMBOL_GPL(nvmem_device_get);
1189
1190/**
1191 * nvmem_device_find() - Find nvmem device with matching function
1192 *
1193 * @data: Data to pass to match function
1194 * @match: Callback function to check device
1195 *
1196 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1197 * on success.
1198 */
1199struct nvmem_device *nvmem_device_find(void *data,
1200 int (*match)(struct device *dev, const void *data))
1201{
1202 return __nvmem_device_get(data, match);
1203}
1204EXPORT_SYMBOL_GPL(nvmem_device_find);
1205
1206static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1207{
1208 struct nvmem_device **nvmem = res;
1209
1210 if (WARN_ON(!nvmem || !*nvmem))
1211 return 0;
1212
1213 return *nvmem == data;
1214}
1215
1216static void devm_nvmem_device_release(struct device *dev, void *res)
1217{
1218 nvmem_device_put(nvmem: *(struct nvmem_device **)res);
1219}
1220
1221/**
1222 * devm_nvmem_device_put() - put alredy got nvmem device
1223 *
1224 * @dev: Device that uses the nvmem device.
1225 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1226 * that needs to be released.
1227 */
1228void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1229{
1230 int ret;
1231
1232 ret = devres_release(dev, release: devm_nvmem_device_release,
1233 match: devm_nvmem_device_match, match_data: nvmem);
1234
1235 WARN_ON(ret);
1236}
1237EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1238
1239/**
1240 * nvmem_device_put() - put alredy got nvmem device
1241 *
1242 * @nvmem: pointer to nvmem device that needs to be released.
1243 */
1244void nvmem_device_put(struct nvmem_device *nvmem)
1245{
1246 __nvmem_device_put(nvmem);
1247}
1248EXPORT_SYMBOL_GPL(nvmem_device_put);
1249
1250/**
1251 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1252 *
1253 * @dev: Device that requests the nvmem device.
1254 * @id: name id for the requested nvmem device.
1255 *
1256 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1257 * on success. The nvmem_cell will be freed by the automatically once the
1258 * device is freed.
1259 */
1260struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1261{
1262 struct nvmem_device **ptr, *nvmem;
1263
1264 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1265 if (!ptr)
1266 return ERR_PTR(error: -ENOMEM);
1267
1268 nvmem = nvmem_device_get(dev, id);
1269 if (!IS_ERR(ptr: nvmem)) {
1270 *ptr = nvmem;
1271 devres_add(dev, res: ptr);
1272 } else {
1273 devres_free(res: ptr);
1274 }
1275
1276 return nvmem;
1277}
1278EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1279
1280static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
1281 const char *id, int index)
1282{
1283 struct nvmem_cell *cell;
1284 const char *name = NULL;
1285
1286 cell = kzalloc(size: sizeof(*cell), GFP_KERNEL);
1287 if (!cell)
1288 return ERR_PTR(error: -ENOMEM);
1289
1290 if (id) {
1291 name = kstrdup_const(s: id, GFP_KERNEL);
1292 if (!name) {
1293 kfree(objp: cell);
1294 return ERR_PTR(error: -ENOMEM);
1295 }
1296 }
1297
1298 cell->id = name;
1299 cell->entry = entry;
1300 cell->index = index;
1301
1302 return cell;
1303}
1304
1305static struct nvmem_cell *
1306nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1307{
1308 struct nvmem_cell_entry *cell_entry;
1309 struct nvmem_cell *cell = ERR_PTR(error: -ENOENT);
1310 struct nvmem_cell_lookup *lookup;
1311 struct nvmem_device *nvmem;
1312 const char *dev_id;
1313
1314 if (!dev)
1315 return ERR_PTR(error: -EINVAL);
1316
1317 dev_id = dev_name(dev);
1318
1319 mutex_lock(&nvmem_lookup_mutex);
1320
1321 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1322 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1323 (strcmp(lookup->con_id, con_id) == 0)) {
1324 /* This is the right entry. */
1325 nvmem = __nvmem_device_get(data: (void *)lookup->nvmem_name,
1326 match: device_match_name);
1327 if (IS_ERR(ptr: nvmem)) {
1328 /* Provider may not be registered yet. */
1329 cell = ERR_CAST(ptr: nvmem);
1330 break;
1331 }
1332
1333 cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1334 cell_id: lookup->cell_name);
1335 if (!cell_entry) {
1336 __nvmem_device_put(nvmem);
1337 cell = ERR_PTR(error: -ENOENT);
1338 } else {
1339 cell = nvmem_create_cell(entry: cell_entry, id: con_id, index: 0);
1340 if (IS_ERR(ptr: cell))
1341 __nvmem_device_put(nvmem);
1342 }
1343 break;
1344 }
1345 }
1346
1347 mutex_unlock(lock: &nvmem_lookup_mutex);
1348 return cell;
1349}
1350
1351#if IS_ENABLED(CONFIG_OF)
1352static struct nvmem_cell_entry *
1353nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1354{
1355 struct nvmem_cell_entry *iter, *cell = NULL;
1356
1357 mutex_lock(&nvmem_mutex);
1358 list_for_each_entry(iter, &nvmem->cells, node) {
1359 if (np == iter->np) {
1360 cell = iter;
1361 break;
1362 }
1363 }
1364 mutex_unlock(lock: &nvmem_mutex);
1365
1366 return cell;
1367}
1368
1369/**
1370 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1371 *
1372 * @np: Device tree node that uses the nvmem cell.
1373 * @id: nvmem cell name from nvmem-cell-names property, or NULL
1374 * for the cell at index 0 (the lone cell with no accompanying
1375 * nvmem-cell-names property).
1376 *
1377 * Return: Will be an ERR_PTR() on error or a valid pointer
1378 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1379 * nvmem_cell_put().
1380 */
1381struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1382{
1383 struct device_node *cell_np, *nvmem_np;
1384 struct nvmem_device *nvmem;
1385 struct nvmem_cell_entry *cell_entry;
1386 struct nvmem_cell *cell;
1387 struct of_phandle_args cell_spec;
1388 int index = 0;
1389 int cell_index = 0;
1390 int ret;
1391
1392 /* if cell name exists, find index to the name */
1393 if (id)
1394 index = of_property_match_string(np, propname: "nvmem-cell-names", string: id);
1395
1396 ret = of_parse_phandle_with_optional_args(np, list_name: "nvmem-cells",
1397 cells_name: "#nvmem-cell-cells",
1398 index, out_args: &cell_spec);
1399 if (ret)
1400 return ERR_PTR(error: -ENOENT);
1401
1402 if (cell_spec.args_count > 1)
1403 return ERR_PTR(error: -EINVAL);
1404
1405 cell_np = cell_spec.np;
1406 if (cell_spec.args_count)
1407 cell_index = cell_spec.args[0];
1408
1409 nvmem_np = of_get_parent(node: cell_np);
1410 if (!nvmem_np) {
1411 of_node_put(node: cell_np);
1412 return ERR_PTR(error: -EINVAL);
1413 }
1414
1415 /* nvmem layouts produce cells within the nvmem-layout container */
1416 if (of_node_name_eq(np: nvmem_np, name: "nvmem-layout")) {
1417 nvmem_np = of_get_next_parent(node: nvmem_np);
1418 if (!nvmem_np) {
1419 of_node_put(node: cell_np);
1420 return ERR_PTR(error: -EINVAL);
1421 }
1422 }
1423
1424 nvmem = __nvmem_device_get(data: nvmem_np, match: device_match_of_node);
1425 of_node_put(node: nvmem_np);
1426 if (IS_ERR(ptr: nvmem)) {
1427 of_node_put(node: cell_np);
1428 return ERR_CAST(ptr: nvmem);
1429 }
1430
1431 cell_entry = nvmem_find_cell_entry_by_node(nvmem, np: cell_np);
1432 of_node_put(node: cell_np);
1433 if (!cell_entry) {
1434 __nvmem_device_put(nvmem);
1435 return ERR_PTR(error: -ENOENT);
1436 }
1437
1438 cell = nvmem_create_cell(entry: cell_entry, id, index: cell_index);
1439 if (IS_ERR(ptr: cell))
1440 __nvmem_device_put(nvmem);
1441
1442 return cell;
1443}
1444EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1445#endif
1446
1447/**
1448 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1449 *
1450 * @dev: Device that requests the nvmem cell.
1451 * @id: nvmem cell name to get (this corresponds with the name from the
1452 * nvmem-cell-names property for DT systems and with the con_id from
1453 * the lookup entry for non-DT systems).
1454 *
1455 * Return: Will be an ERR_PTR() on error or a valid pointer
1456 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1457 * nvmem_cell_put().
1458 */
1459struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1460{
1461 struct nvmem_cell *cell;
1462
1463 if (dev->of_node) { /* try dt first */
1464 cell = of_nvmem_cell_get(dev->of_node, id);
1465 if (!IS_ERR(ptr: cell) || PTR_ERR(ptr: cell) == -EPROBE_DEFER)
1466 return cell;
1467 }
1468
1469 /* NULL cell id only allowed for device tree; invalid otherwise */
1470 if (!id)
1471 return ERR_PTR(error: -EINVAL);
1472
1473 return nvmem_cell_get_from_lookup(dev, con_id: id);
1474}
1475EXPORT_SYMBOL_GPL(nvmem_cell_get);
1476
1477static void devm_nvmem_cell_release(struct device *dev, void *res)
1478{
1479 nvmem_cell_put(cell: *(struct nvmem_cell **)res);
1480}
1481
1482/**
1483 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1484 *
1485 * @dev: Device that requests the nvmem cell.
1486 * @id: nvmem cell name id to get.
1487 *
1488 * Return: Will be an ERR_PTR() on error or a valid pointer
1489 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1490 * automatically once the device is freed.
1491 */
1492struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1493{
1494 struct nvmem_cell **ptr, *cell;
1495
1496 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1497 if (!ptr)
1498 return ERR_PTR(error: -ENOMEM);
1499
1500 cell = nvmem_cell_get(dev, id);
1501 if (!IS_ERR(ptr: cell)) {
1502 *ptr = cell;
1503 devres_add(dev, res: ptr);
1504 } else {
1505 devres_free(res: ptr);
1506 }
1507
1508 return cell;
1509}
1510EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1511
1512static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1513{
1514 struct nvmem_cell **c = res;
1515
1516 if (WARN_ON(!c || !*c))
1517 return 0;
1518
1519 return *c == data;
1520}
1521
1522/**
1523 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1524 * from devm_nvmem_cell_get.
1525 *
1526 * @dev: Device that requests the nvmem cell.
1527 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1528 */
1529void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1530{
1531 int ret;
1532
1533 ret = devres_release(dev, release: devm_nvmem_cell_release,
1534 match: devm_nvmem_cell_match, match_data: cell);
1535
1536 WARN_ON(ret);
1537}
1538EXPORT_SYMBOL(devm_nvmem_cell_put);
1539
1540/**
1541 * nvmem_cell_put() - Release previously allocated nvmem cell.
1542 *
1543 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1544 */
1545void nvmem_cell_put(struct nvmem_cell *cell)
1546{
1547 struct nvmem_device *nvmem = cell->entry->nvmem;
1548
1549 if (cell->id)
1550 kfree_const(x: cell->id);
1551
1552 kfree(objp: cell);
1553 __nvmem_device_put(nvmem);
1554}
1555EXPORT_SYMBOL_GPL(nvmem_cell_put);
1556
1557static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1558{
1559 u8 *p, *b;
1560 int i, extra, bit_offset = cell->bit_offset;
1561
1562 p = b = buf;
1563 if (bit_offset) {
1564 /* First shift */
1565 *b++ >>= bit_offset;
1566
1567 /* setup rest of the bytes if any */
1568 for (i = 1; i < cell->bytes; i++) {
1569 /* Get bits from next byte and shift them towards msb */
1570 *p |= *b << (BITS_PER_BYTE - bit_offset);
1571
1572 p = b;
1573 *b++ >>= bit_offset;
1574 }
1575 } else {
1576 /* point to the msb */
1577 p += cell->bytes - 1;
1578 }
1579
1580 /* result fits in less bytes */
1581 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1582 while (--extra >= 0)
1583 *p-- = 0;
1584
1585 /* clear msb bits if any leftover in the last byte */
1586 if (cell->nbits % BITS_PER_BYTE)
1587 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1588}
1589
1590static int __nvmem_cell_read(struct nvmem_device *nvmem,
1591 struct nvmem_cell_entry *cell,
1592 void *buf, size_t *len, const char *id, int index)
1593{
1594 int rc;
1595
1596 rc = nvmem_reg_read(nvmem, offset: cell->offset, val: buf, bytes: cell->raw_len);
1597
1598 if (rc)
1599 return rc;
1600
1601 /* shift bits in-place */
1602 if (cell->bit_offset || cell->nbits)
1603 nvmem_shift_read_buffer_in_place(cell, buf);
1604
1605 if (cell->read_post_process) {
1606 rc = cell->read_post_process(cell->priv, id, index,
1607 cell->offset, buf, cell->raw_len);
1608 if (rc)
1609 return rc;
1610 }
1611
1612 if (len)
1613 *len = cell->bytes;
1614
1615 return 0;
1616}
1617
1618/**
1619 * nvmem_cell_read() - Read a given nvmem cell
1620 *
1621 * @cell: nvmem cell to be read.
1622 * @len: pointer to length of cell which will be populated on successful read;
1623 * can be NULL.
1624 *
1625 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1626 * buffer should be freed by the consumer with a kfree().
1627 */
1628void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1629{
1630 struct nvmem_cell_entry *entry = cell->entry;
1631 struct nvmem_device *nvmem = entry->nvmem;
1632 u8 *buf;
1633 int rc;
1634
1635 if (!nvmem)
1636 return ERR_PTR(error: -EINVAL);
1637
1638 buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL);
1639 if (!buf)
1640 return ERR_PTR(error: -ENOMEM);
1641
1642 rc = __nvmem_cell_read(nvmem, cell: cell->entry, buf, len, id: cell->id, index: cell->index);
1643 if (rc) {
1644 kfree(objp: buf);
1645 return ERR_PTR(error: rc);
1646 }
1647
1648 return buf;
1649}
1650EXPORT_SYMBOL_GPL(nvmem_cell_read);
1651
1652static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1653 u8 *_buf, int len)
1654{
1655 struct nvmem_device *nvmem = cell->nvmem;
1656 int i, rc, nbits, bit_offset = cell->bit_offset;
1657 u8 v, *p, *buf, *b, pbyte, pbits;
1658
1659 nbits = cell->nbits;
1660 buf = kzalloc(size: cell->bytes, GFP_KERNEL);
1661 if (!buf)
1662 return ERR_PTR(error: -ENOMEM);
1663
1664 memcpy(buf, _buf, len);
1665 p = b = buf;
1666
1667 if (bit_offset) {
1668 pbyte = *b;
1669 *b <<= bit_offset;
1670
1671 /* setup the first byte with lsb bits from nvmem */
1672 rc = nvmem_reg_read(nvmem, offset: cell->offset, val: &v, bytes: 1);
1673 if (rc)
1674 goto err;
1675 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1676
1677 /* setup rest of the byte if any */
1678 for (i = 1; i < cell->bytes; i++) {
1679 /* Get last byte bits and shift them towards lsb */
1680 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1681 pbyte = *b;
1682 p = b;
1683 *b <<= bit_offset;
1684 *b++ |= pbits;
1685 }
1686 }
1687
1688 /* if it's not end on byte boundary */
1689 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1690 /* setup the last byte with msb bits from nvmem */
1691 rc = nvmem_reg_read(nvmem,
1692 offset: cell->offset + cell->bytes - 1, val: &v, bytes: 1);
1693 if (rc)
1694 goto err;
1695 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1696
1697 }
1698
1699 return buf;
1700err:
1701 kfree(objp: buf);
1702 return ERR_PTR(error: rc);
1703}
1704
1705static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1706{
1707 struct nvmem_device *nvmem = cell->nvmem;
1708 int rc;
1709
1710 if (!nvmem || nvmem->read_only ||
1711 (cell->bit_offset == 0 && len != cell->bytes))
1712 return -EINVAL;
1713
1714 /*
1715 * Any cells which have a read_post_process hook are read-only because
1716 * we cannot reverse the operation and it might affect other cells,
1717 * too.
1718 */
1719 if (cell->read_post_process)
1720 return -EINVAL;
1721
1722 if (cell->bit_offset || cell->nbits) {
1723 buf = nvmem_cell_prepare_write_buffer(cell, buf: buf, len);
1724 if (IS_ERR(ptr: buf))
1725 return PTR_ERR(ptr: buf);
1726 }
1727
1728 rc = nvmem_reg_write(nvmem, offset: cell->offset, val: buf, bytes: cell->bytes);
1729
1730 /* free the tmp buffer */
1731 if (cell->bit_offset || cell->nbits)
1732 kfree(objp: buf);
1733
1734 if (rc)
1735 return rc;
1736
1737 return len;
1738}
1739
1740/**
1741 * nvmem_cell_write() - Write to a given nvmem cell
1742 *
1743 * @cell: nvmem cell to be written.
1744 * @buf: Buffer to be written.
1745 * @len: length of buffer to be written to nvmem cell.
1746 *
1747 * Return: length of bytes written or negative on failure.
1748 */
1749int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1750{
1751 return __nvmem_cell_entry_write(cell: cell->entry, buf, len);
1752}
1753
1754EXPORT_SYMBOL_GPL(nvmem_cell_write);
1755
1756static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1757 void *val, size_t count)
1758{
1759 struct nvmem_cell *cell;
1760 void *buf;
1761 size_t len;
1762
1763 cell = nvmem_cell_get(dev, cell_id);
1764 if (IS_ERR(ptr: cell))
1765 return PTR_ERR(ptr: cell);
1766
1767 buf = nvmem_cell_read(cell, &len);
1768 if (IS_ERR(ptr: buf)) {
1769 nvmem_cell_put(cell);
1770 return PTR_ERR(ptr: buf);
1771 }
1772 if (len != count) {
1773 kfree(objp: buf);
1774 nvmem_cell_put(cell);
1775 return -EINVAL;
1776 }
1777 memcpy(val, buf, count);
1778 kfree(objp: buf);
1779 nvmem_cell_put(cell);
1780
1781 return 0;
1782}
1783
1784/**
1785 * nvmem_cell_read_u8() - Read a cell value as a u8
1786 *
1787 * @dev: Device that requests the nvmem cell.
1788 * @cell_id: Name of nvmem cell to read.
1789 * @val: pointer to output value.
1790 *
1791 * Return: 0 on success or negative errno.
1792 */
1793int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1794{
1795 return nvmem_cell_read_common(dev, cell_id, val, count: sizeof(*val));
1796}
1797EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1798
1799/**
1800 * nvmem_cell_read_u16() - Read a cell value as a u16
1801 *
1802 * @dev: Device that requests the nvmem cell.
1803 * @cell_id: Name of nvmem cell to read.
1804 * @val: pointer to output value.
1805 *
1806 * Return: 0 on success or negative errno.
1807 */
1808int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1809{
1810 return nvmem_cell_read_common(dev, cell_id, val, count: sizeof(*val));
1811}
1812EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1813
1814/**
1815 * nvmem_cell_read_u32() - Read a cell value as a u32
1816 *
1817 * @dev: Device that requests the nvmem cell.
1818 * @cell_id: Name of nvmem cell to read.
1819 * @val: pointer to output value.
1820 *
1821 * Return: 0 on success or negative errno.
1822 */
1823int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1824{
1825 return nvmem_cell_read_common(dev, cell_id, val, count: sizeof(*val));
1826}
1827EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1828
1829/**
1830 * nvmem_cell_read_u64() - Read a cell value as a u64
1831 *
1832 * @dev: Device that requests the nvmem cell.
1833 * @cell_id: Name of nvmem cell to read.
1834 * @val: pointer to output value.
1835 *
1836 * Return: 0 on success or negative errno.
1837 */
1838int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1839{
1840 return nvmem_cell_read_common(dev, cell_id, val, count: sizeof(*val));
1841}
1842EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1843
1844static const void *nvmem_cell_read_variable_common(struct device *dev,
1845 const char *cell_id,
1846 size_t max_len, size_t *len)
1847{
1848 struct nvmem_cell *cell;
1849 int nbits;
1850 void *buf;
1851
1852 cell = nvmem_cell_get(dev, cell_id);
1853 if (IS_ERR(ptr: cell))
1854 return cell;
1855
1856 nbits = cell->entry->nbits;
1857 buf = nvmem_cell_read(cell, len);
1858 nvmem_cell_put(cell);
1859 if (IS_ERR(ptr: buf))
1860 return buf;
1861
1862 /*
1863 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1864 * the length of the real data. Throw away the extra junk.
1865 */
1866 if (nbits)
1867 *len = DIV_ROUND_UP(nbits, 8);
1868
1869 if (*len > max_len) {
1870 kfree(objp: buf);
1871 return ERR_PTR(error: -ERANGE);
1872 }
1873
1874 return buf;
1875}
1876
1877/**
1878 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1879 *
1880 * @dev: Device that requests the nvmem cell.
1881 * @cell_id: Name of nvmem cell to read.
1882 * @val: pointer to output value.
1883 *
1884 * Return: 0 on success or negative errno.
1885 */
1886int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1887 u32 *val)
1888{
1889 size_t len;
1890 const u8 *buf;
1891 int i;
1892
1893 buf = nvmem_cell_read_variable_common(dev, cell_id, max_len: sizeof(*val), len: &len);
1894 if (IS_ERR(ptr: buf))
1895 return PTR_ERR(ptr: buf);
1896
1897 /* Copy w/ implicit endian conversion */
1898 *val = 0;
1899 for (i = 0; i < len; i++)
1900 *val |= buf[i] << (8 * i);
1901
1902 kfree(objp: buf);
1903
1904 return 0;
1905}
1906EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1907
1908/**
1909 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1910 *
1911 * @dev: Device that requests the nvmem cell.
1912 * @cell_id: Name of nvmem cell to read.
1913 * @val: pointer to output value.
1914 *
1915 * Return: 0 on success or negative errno.
1916 */
1917int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1918 u64 *val)
1919{
1920 size_t len;
1921 const u8 *buf;
1922 int i;
1923
1924 buf = nvmem_cell_read_variable_common(dev, cell_id, max_len: sizeof(*val), len: &len);
1925 if (IS_ERR(ptr: buf))
1926 return PTR_ERR(ptr: buf);
1927
1928 /* Copy w/ implicit endian conversion */
1929 *val = 0;
1930 for (i = 0; i < len; i++)
1931 *val |= (uint64_t)buf[i] << (8 * i);
1932
1933 kfree(objp: buf);
1934
1935 return 0;
1936}
1937EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1938
1939/**
1940 * nvmem_device_cell_read() - Read a given nvmem device and cell
1941 *
1942 * @nvmem: nvmem device to read from.
1943 * @info: nvmem cell info to be read.
1944 * @buf: buffer pointer which will be populated on successful read.
1945 *
1946 * Return: length of successful bytes read on success and negative
1947 * error code on error.
1948 */
1949ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1950 struct nvmem_cell_info *info, void *buf)
1951{
1952 struct nvmem_cell_entry cell;
1953 int rc;
1954 ssize_t len;
1955
1956 if (!nvmem)
1957 return -EINVAL;
1958
1959 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell: &cell);
1960 if (rc)
1961 return rc;
1962
1963 rc = __nvmem_cell_read(nvmem, cell: &cell, buf, len: &len, NULL, index: 0);
1964 if (rc)
1965 return rc;
1966
1967 return len;
1968}
1969EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1970
1971/**
1972 * nvmem_device_cell_write() - Write cell to a given nvmem device
1973 *
1974 * @nvmem: nvmem device to be written to.
1975 * @info: nvmem cell info to be written.
1976 * @buf: buffer to be written to cell.
1977 *
1978 * Return: length of bytes written or negative error code on failure.
1979 */
1980int nvmem_device_cell_write(struct nvmem_device *nvmem,
1981 struct nvmem_cell_info *info, void *buf)
1982{
1983 struct nvmem_cell_entry cell;
1984 int rc;
1985
1986 if (!nvmem)
1987 return -EINVAL;
1988
1989 rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell: &cell);
1990 if (rc)
1991 return rc;
1992
1993 return __nvmem_cell_entry_write(cell: &cell, buf, len: cell.bytes);
1994}
1995EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1996
1997/**
1998 * nvmem_device_read() - Read from a given nvmem device
1999 *
2000 * @nvmem: nvmem device to read from.
2001 * @offset: offset in nvmem device.
2002 * @bytes: number of bytes to read.
2003 * @buf: buffer pointer which will be populated on successful read.
2004 *
2005 * Return: length of successful bytes read on success and negative
2006 * error code on error.
2007 */
2008int nvmem_device_read(struct nvmem_device *nvmem,
2009 unsigned int offset,
2010 size_t bytes, void *buf)
2011{
2012 int rc;
2013
2014 if (!nvmem)
2015 return -EINVAL;
2016
2017 rc = nvmem_reg_read(nvmem, offset, val: buf, bytes);
2018
2019 if (rc)
2020 return rc;
2021
2022 return bytes;
2023}
2024EXPORT_SYMBOL_GPL(nvmem_device_read);
2025
2026/**
2027 * nvmem_device_write() - Write cell to a given nvmem device
2028 *
2029 * @nvmem: nvmem device to be written to.
2030 * @offset: offset in nvmem device.
2031 * @bytes: number of bytes to write.
2032 * @buf: buffer to be written.
2033 *
2034 * Return: length of bytes written or negative error code on failure.
2035 */
2036int nvmem_device_write(struct nvmem_device *nvmem,
2037 unsigned int offset,
2038 size_t bytes, void *buf)
2039{
2040 int rc;
2041
2042 if (!nvmem)
2043 return -EINVAL;
2044
2045 rc = nvmem_reg_write(nvmem, offset, val: buf, bytes);
2046
2047 if (rc)
2048 return rc;
2049
2050
2051 return bytes;
2052}
2053EXPORT_SYMBOL_GPL(nvmem_device_write);
2054
2055/**
2056 * nvmem_add_cell_table() - register a table of cell info entries
2057 *
2058 * @table: table of cell info entries
2059 */
2060void nvmem_add_cell_table(struct nvmem_cell_table *table)
2061{
2062 mutex_lock(&nvmem_cell_mutex);
2063 list_add_tail(new: &table->node, head: &nvmem_cell_tables);
2064 mutex_unlock(lock: &nvmem_cell_mutex);
2065}
2066EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
2067
2068/**
2069 * nvmem_del_cell_table() - remove a previously registered cell info table
2070 *
2071 * @table: table of cell info entries
2072 */
2073void nvmem_del_cell_table(struct nvmem_cell_table *table)
2074{
2075 mutex_lock(&nvmem_cell_mutex);
2076 list_del(entry: &table->node);
2077 mutex_unlock(lock: &nvmem_cell_mutex);
2078}
2079EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
2080
2081/**
2082 * nvmem_add_cell_lookups() - register a list of cell lookup entries
2083 *
2084 * @entries: array of cell lookup entries
2085 * @nentries: number of cell lookup entries in the array
2086 */
2087void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2088{
2089 int i;
2090
2091 mutex_lock(&nvmem_lookup_mutex);
2092 for (i = 0; i < nentries; i++)
2093 list_add_tail(new: &entries[i].node, head: &nvmem_lookup_list);
2094 mutex_unlock(lock: &nvmem_lookup_mutex);
2095}
2096EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
2097
2098/**
2099 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
2100 * entries
2101 *
2102 * @entries: array of cell lookup entries
2103 * @nentries: number of cell lookup entries in the array
2104 */
2105void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
2106{
2107 int i;
2108
2109 mutex_lock(&nvmem_lookup_mutex);
2110 for (i = 0; i < nentries; i++)
2111 list_del(entry: &entries[i].node);
2112 mutex_unlock(lock: &nvmem_lookup_mutex);
2113}
2114EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
2115
2116/**
2117 * nvmem_dev_name() - Get the name of a given nvmem device.
2118 *
2119 * @nvmem: nvmem device.
2120 *
2121 * Return: name of the nvmem device.
2122 */
2123const char *nvmem_dev_name(struct nvmem_device *nvmem)
2124{
2125 return dev_name(dev: &nvmem->dev);
2126}
2127EXPORT_SYMBOL_GPL(nvmem_dev_name);
2128
2129static int __init nvmem_init(void)
2130{
2131 return bus_register(bus: &nvmem_bus_type);
2132}
2133
2134static void __exit nvmem_exit(void)
2135{
2136 bus_unregister(bus: &nvmem_bus_type);
2137}
2138
2139subsys_initcall(nvmem_init);
2140module_exit(nvmem_exit);
2141
2142MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
2143MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
2144MODULE_DESCRIPTION("nvmem Driver Core");
2145

source code of linux/drivers/nvmem/core.c