1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * nvmem framework core. |
4 | * |
5 | * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> |
6 | * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> |
7 | */ |
8 | |
9 | #include <linux/device.h> |
10 | #include <linux/export.h> |
11 | #include <linux/fs.h> |
12 | #include <linux/idr.h> |
13 | #include <linux/init.h> |
14 | #include <linux/kref.h> |
15 | #include <linux/module.h> |
16 | #include <linux/nvmem-consumer.h> |
17 | #include <linux/nvmem-provider.h> |
18 | #include <linux/gpio/consumer.h> |
19 | #include <linux/of.h> |
20 | #include <linux/slab.h> |
21 | |
22 | #include "internals.h" |
23 | |
24 | #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) |
25 | |
26 | #define FLAG_COMPAT BIT(0) |
27 | struct nvmem_cell_entry { |
28 | const char *name; |
29 | int offset; |
30 | size_t raw_len; |
31 | int bytes; |
32 | int bit_offset; |
33 | int nbits; |
34 | nvmem_cell_post_process_t read_post_process; |
35 | void *priv; |
36 | struct device_node *np; |
37 | struct nvmem_device *nvmem; |
38 | struct list_head node; |
39 | }; |
40 | |
41 | struct nvmem_cell { |
42 | struct nvmem_cell_entry *entry; |
43 | const char *id; |
44 | int index; |
45 | }; |
46 | |
47 | static DEFINE_MUTEX(nvmem_mutex); |
48 | static DEFINE_IDA(nvmem_ida); |
49 | |
50 | static DEFINE_MUTEX(nvmem_lookup_mutex); |
51 | static LIST_HEAD(nvmem_lookup_list); |
52 | |
53 | static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); |
54 | |
55 | static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, |
56 | void *val, size_t bytes) |
57 | { |
58 | if (nvmem->reg_read) |
59 | return nvmem->reg_read(nvmem->priv, offset, val, bytes); |
60 | |
61 | return -EINVAL; |
62 | } |
63 | |
64 | static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, |
65 | void *val, size_t bytes) |
66 | { |
67 | int ret; |
68 | |
69 | if (nvmem->reg_write) { |
70 | gpiod_set_value_cansleep(desc: nvmem->wp_gpio, value: 0); |
71 | ret = nvmem->reg_write(nvmem->priv, offset, val, bytes); |
72 | gpiod_set_value_cansleep(desc: nvmem->wp_gpio, value: 1); |
73 | return ret; |
74 | } |
75 | |
76 | return -EINVAL; |
77 | } |
78 | |
79 | static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, |
80 | unsigned int offset, void *val, |
81 | size_t bytes, int write) |
82 | { |
83 | |
84 | unsigned int end = offset + bytes; |
85 | unsigned int kend, ksize; |
86 | const struct nvmem_keepout *keepout = nvmem->keepout; |
87 | const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; |
88 | int rc; |
89 | |
90 | /* |
91 | * Skip all keepouts before the range being accessed. |
92 | * Keepouts are sorted. |
93 | */ |
94 | while ((keepout < keepoutend) && (keepout->end <= offset)) |
95 | keepout++; |
96 | |
97 | while ((offset < end) && (keepout < keepoutend)) { |
98 | /* Access the valid portion before the keepout. */ |
99 | if (offset < keepout->start) { |
100 | kend = min(end, keepout->start); |
101 | ksize = kend - offset; |
102 | if (write) |
103 | rc = __nvmem_reg_write(nvmem, offset, val, bytes: ksize); |
104 | else |
105 | rc = __nvmem_reg_read(nvmem, offset, val, bytes: ksize); |
106 | |
107 | if (rc) |
108 | return rc; |
109 | |
110 | offset += ksize; |
111 | val += ksize; |
112 | } |
113 | |
114 | /* |
115 | * Now we're aligned to the start of this keepout zone. Go |
116 | * through it. |
117 | */ |
118 | kend = min(end, keepout->end); |
119 | ksize = kend - offset; |
120 | if (!write) |
121 | memset(val, keepout->value, ksize); |
122 | |
123 | val += ksize; |
124 | offset += ksize; |
125 | keepout++; |
126 | } |
127 | |
128 | /* |
129 | * If we ran out of keepouts but there's still stuff to do, send it |
130 | * down directly |
131 | */ |
132 | if (offset < end) { |
133 | ksize = end - offset; |
134 | if (write) |
135 | return __nvmem_reg_write(nvmem, offset, val, bytes: ksize); |
136 | else |
137 | return __nvmem_reg_read(nvmem, offset, val, bytes: ksize); |
138 | } |
139 | |
140 | return 0; |
141 | } |
142 | |
143 | static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, |
144 | void *val, size_t bytes) |
145 | { |
146 | if (!nvmem->nkeepout) |
147 | return __nvmem_reg_read(nvmem, offset, val, bytes); |
148 | |
149 | return nvmem_access_with_keepouts(nvmem, offset, val, bytes, write: false); |
150 | } |
151 | |
152 | static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, |
153 | void *val, size_t bytes) |
154 | { |
155 | if (!nvmem->nkeepout) |
156 | return __nvmem_reg_write(nvmem, offset, val, bytes); |
157 | |
158 | return nvmem_access_with_keepouts(nvmem, offset, val, bytes, write: true); |
159 | } |
160 | |
161 | #ifdef CONFIG_NVMEM_SYSFS |
162 | static const char * const nvmem_type_str[] = { |
163 | [NVMEM_TYPE_UNKNOWN] = "Unknown", |
164 | [NVMEM_TYPE_EEPROM] = "EEPROM", |
165 | [NVMEM_TYPE_OTP] = "OTP", |
166 | [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed", |
167 | [NVMEM_TYPE_FRAM] = "FRAM", |
168 | }; |
169 | |
170 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
171 | static struct lock_class_key eeprom_lock_key; |
172 | #endif |
173 | |
174 | static ssize_t type_show(struct device *dev, |
175 | struct device_attribute *attr, char *buf) |
176 | { |
177 | struct nvmem_device *nvmem = to_nvmem_device(dev); |
178 | |
179 | return sysfs_emit(buf, fmt: "%s\n", nvmem_type_str[nvmem->type]); |
180 | } |
181 | |
182 | static DEVICE_ATTR_RO(type); |
183 | |
184 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, |
185 | char *buf) |
186 | { |
187 | struct nvmem_device *nvmem = to_nvmem_device(dev); |
188 | |
189 | return sysfs_emit(buf, fmt: "%d\n", nvmem->read_only); |
190 | } |
191 | |
192 | static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, |
193 | const char *buf, size_t count) |
194 | { |
195 | struct nvmem_device *nvmem = to_nvmem_device(dev); |
196 | int ret = kstrtobool(s: buf, res: &nvmem->read_only); |
197 | |
198 | if (ret < 0) |
199 | return ret; |
200 | |
201 | return count; |
202 | } |
203 | |
204 | static DEVICE_ATTR_RW(force_ro); |
205 | |
206 | static struct attribute *nvmem_attrs[] = { |
207 | &dev_attr_force_ro.attr, |
208 | &dev_attr_type.attr, |
209 | NULL, |
210 | }; |
211 | |
212 | static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, |
213 | const struct bin_attribute *attr, char *buf, |
214 | loff_t pos, size_t count) |
215 | { |
216 | struct device *dev; |
217 | struct nvmem_device *nvmem; |
218 | int rc; |
219 | |
220 | if (attr->private) |
221 | dev = attr->private; |
222 | else |
223 | dev = kobj_to_dev(kobj); |
224 | nvmem = to_nvmem_device(dev); |
225 | |
226 | if (!IS_ALIGNED(pos, nvmem->stride)) |
227 | return -EINVAL; |
228 | |
229 | if (count < nvmem->word_size) |
230 | return -EINVAL; |
231 | |
232 | count = round_down(count, nvmem->word_size); |
233 | |
234 | if (!nvmem->reg_read) |
235 | return -EPERM; |
236 | |
237 | rc = nvmem_reg_read(nvmem, offset: pos, val: buf, bytes: count); |
238 | |
239 | if (rc) |
240 | return rc; |
241 | |
242 | return count; |
243 | } |
244 | |
245 | static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, |
246 | const struct bin_attribute *attr, char *buf, |
247 | loff_t pos, size_t count) |
248 | { |
249 | struct device *dev; |
250 | struct nvmem_device *nvmem; |
251 | int rc; |
252 | |
253 | if (attr->private) |
254 | dev = attr->private; |
255 | else |
256 | dev = kobj_to_dev(kobj); |
257 | nvmem = to_nvmem_device(dev); |
258 | |
259 | if (!IS_ALIGNED(pos, nvmem->stride)) |
260 | return -EINVAL; |
261 | |
262 | if (count < nvmem->word_size) |
263 | return -EINVAL; |
264 | |
265 | count = round_down(count, nvmem->word_size); |
266 | |
267 | if (!nvmem->reg_write || nvmem->read_only) |
268 | return -EPERM; |
269 | |
270 | rc = nvmem_reg_write(nvmem, offset: pos, val: buf, bytes: count); |
271 | |
272 | if (rc) |
273 | return rc; |
274 | |
275 | return count; |
276 | } |
277 | |
278 | static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem) |
279 | { |
280 | umode_t mode = 0400; |
281 | |
282 | if (!nvmem->root_only) |
283 | mode |= 0044; |
284 | |
285 | if (!nvmem->read_only) |
286 | mode |= 0200; |
287 | |
288 | if (!nvmem->reg_write) |
289 | mode &= ~0200; |
290 | |
291 | if (!nvmem->reg_read) |
292 | mode &= ~0444; |
293 | |
294 | return mode; |
295 | } |
296 | |
297 | static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, |
298 | const struct bin_attribute *attr, |
299 | int i) |
300 | { |
301 | struct device *dev = kobj_to_dev(kobj); |
302 | struct nvmem_device *nvmem = to_nvmem_device(dev); |
303 | |
304 | return nvmem_bin_attr_get_umode(nvmem); |
305 | } |
306 | |
307 | static size_t nvmem_bin_attr_size(struct kobject *kobj, |
308 | const struct bin_attribute *attr, |
309 | int i) |
310 | { |
311 | struct device *dev = kobj_to_dev(kobj); |
312 | struct nvmem_device *nvmem = to_nvmem_device(dev); |
313 | |
314 | return nvmem->size; |
315 | } |
316 | |
317 | static umode_t nvmem_attr_is_visible(struct kobject *kobj, |
318 | struct attribute *attr, int i) |
319 | { |
320 | struct device *dev = kobj_to_dev(kobj); |
321 | struct nvmem_device *nvmem = to_nvmem_device(dev); |
322 | |
323 | /* |
324 | * If the device has no .reg_write operation, do not allow |
325 | * configuration as read-write. |
326 | * If the device is set as read-only by configuration, it |
327 | * can be forced into read-write mode using the 'force_ro' |
328 | * attribute. |
329 | */ |
330 | if (attr == &dev_attr_force_ro.attr && !nvmem->reg_write) |
331 | return 0; /* Attribute not visible */ |
332 | |
333 | return attr->mode; |
334 | } |
335 | |
336 | static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, |
337 | const char *id, int index); |
338 | |
339 | static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj, |
340 | const struct bin_attribute *attr, char *buf, |
341 | loff_t pos, size_t count) |
342 | { |
343 | struct nvmem_cell_entry *entry; |
344 | struct nvmem_cell *cell = NULL; |
345 | size_t cell_sz, read_len; |
346 | void *content; |
347 | |
348 | entry = attr->private; |
349 | cell = nvmem_create_cell(entry, id: entry->name, index: 0); |
350 | if (IS_ERR(ptr: cell)) |
351 | return PTR_ERR(ptr: cell); |
352 | |
353 | if (!cell) |
354 | return -EINVAL; |
355 | |
356 | content = nvmem_cell_read(cell, len: &cell_sz); |
357 | if (IS_ERR(ptr: content)) { |
358 | read_len = PTR_ERR(ptr: content); |
359 | goto destroy_cell; |
360 | } |
361 | |
362 | read_len = min_t(unsigned int, cell_sz - pos, count); |
363 | memcpy(buf, content + pos, read_len); |
364 | kfree(objp: content); |
365 | |
366 | destroy_cell: |
367 | kfree_const(x: cell->id); |
368 | kfree(objp: cell); |
369 | |
370 | return read_len; |
371 | } |
372 | |
373 | /* default read/write permissions */ |
374 | static const struct bin_attribute bin_attr_rw_nvmem = { |
375 | .attr = { |
376 | .name = "nvmem", |
377 | .mode = 0644, |
378 | }, |
379 | .read_new = bin_attr_nvmem_read, |
380 | .write_new = bin_attr_nvmem_write, |
381 | }; |
382 | |
383 | static const struct bin_attribute *const nvmem_bin_attributes[] = { |
384 | &bin_attr_rw_nvmem, |
385 | NULL, |
386 | }; |
387 | |
388 | static const struct attribute_group nvmem_bin_group = { |
389 | .bin_attrs_new = nvmem_bin_attributes, |
390 | .attrs = nvmem_attrs, |
391 | .is_bin_visible = nvmem_bin_attr_is_visible, |
392 | .bin_size = nvmem_bin_attr_size, |
393 | .is_visible = nvmem_attr_is_visible, |
394 | }; |
395 | |
396 | static const struct attribute_group *nvmem_dev_groups[] = { |
397 | &nvmem_bin_group, |
398 | NULL, |
399 | }; |
400 | |
401 | static const struct bin_attribute bin_attr_nvmem_eeprom_compat = { |
402 | .attr = { |
403 | .name = "eeprom", |
404 | }, |
405 | .read_new = bin_attr_nvmem_read, |
406 | .write_new = bin_attr_nvmem_write, |
407 | }; |
408 | |
409 | /* |
410 | * nvmem_setup_compat() - Create an additional binary entry in |
411 | * drivers sys directory, to be backwards compatible with the older |
412 | * drivers/misc/eeprom drivers. |
413 | */ |
414 | static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, |
415 | const struct nvmem_config *config) |
416 | { |
417 | int rval; |
418 | |
419 | if (!config->compat) |
420 | return 0; |
421 | |
422 | if (!config->base_dev) |
423 | return -EINVAL; |
424 | |
425 | nvmem->eeprom = bin_attr_nvmem_eeprom_compat; |
426 | if (config->type == NVMEM_TYPE_FRAM) |
427 | nvmem->eeprom.attr.name = "fram"; |
428 | nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); |
429 | nvmem->eeprom.size = nvmem->size; |
430 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
431 | nvmem->eeprom.attr.key = &eeprom_lock_key; |
432 | #endif |
433 | nvmem->eeprom.private = &nvmem->dev; |
434 | nvmem->base_dev = config->base_dev; |
435 | |
436 | rval = device_create_bin_file(dev: nvmem->base_dev, attr: &nvmem->eeprom); |
437 | if (rval) { |
438 | dev_err(&nvmem->dev, |
439 | "Failed to create eeprom binary file %d\n", rval); |
440 | return rval; |
441 | } |
442 | |
443 | nvmem->flags |= FLAG_COMPAT; |
444 | |
445 | return 0; |
446 | } |
447 | |
448 | static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, |
449 | const struct nvmem_config *config) |
450 | { |
451 | if (config->compat) |
452 | device_remove_bin_file(dev: nvmem->base_dev, attr: &nvmem->eeprom); |
453 | } |
454 | |
455 | static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) |
456 | { |
457 | struct attribute_group group = { |
458 | .name = "cells", |
459 | }; |
460 | struct nvmem_cell_entry *entry; |
461 | const struct bin_attribute **pattrs; |
462 | struct bin_attribute *attrs; |
463 | unsigned int ncells = 0, i = 0; |
464 | int ret = 0; |
465 | |
466 | mutex_lock(&nvmem_mutex); |
467 | |
468 | if (list_empty(head: &nvmem->cells) || nvmem->sysfs_cells_populated) |
469 | goto unlock_mutex; |
470 | |
471 | /* Allocate an array of attributes with a sentinel */ |
472 | ncells = list_count_nodes(head: &nvmem->cells); |
473 | pattrs = devm_kcalloc(dev: &nvmem->dev, n: ncells + 1, |
474 | size: sizeof(struct bin_attribute *), GFP_KERNEL); |
475 | if (!pattrs) { |
476 | ret = -ENOMEM; |
477 | goto unlock_mutex; |
478 | } |
479 | |
480 | attrs = devm_kcalloc(dev: &nvmem->dev, n: ncells, size: sizeof(struct bin_attribute), GFP_KERNEL); |
481 | if (!attrs) { |
482 | ret = -ENOMEM; |
483 | goto unlock_mutex; |
484 | } |
485 | |
486 | /* Initialize each attribute to take the name and size of the cell */ |
487 | list_for_each_entry(entry, &nvmem->cells, node) { |
488 | sysfs_bin_attr_init(&attrs[i]); |
489 | attrs[i].attr.name = devm_kasprintf(dev: &nvmem->dev, GFP_KERNEL, |
490 | fmt: "%s@%x,%x", entry->name, |
491 | entry->offset, |
492 | entry->bit_offset); |
493 | attrs[i].attr.mode = 0444 & nvmem_bin_attr_get_umode(nvmem); |
494 | attrs[i].size = entry->bytes; |
495 | attrs[i].read_new = &nvmem_cell_attr_read; |
496 | attrs[i].private = entry; |
497 | if (!attrs[i].attr.name) { |
498 | ret = -ENOMEM; |
499 | goto unlock_mutex; |
500 | } |
501 | |
502 | pattrs[i] = &attrs[i]; |
503 | i++; |
504 | } |
505 | |
506 | group.bin_attrs_new = pattrs; |
507 | |
508 | ret = device_add_group(dev: &nvmem->dev, grp: &group); |
509 | if (ret) |
510 | goto unlock_mutex; |
511 | |
512 | nvmem->sysfs_cells_populated = true; |
513 | |
514 | unlock_mutex: |
515 | mutex_unlock(lock: &nvmem_mutex); |
516 | |
517 | return ret; |
518 | } |
519 | |
520 | #else /* CONFIG_NVMEM_SYSFS */ |
521 | |
522 | static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, |
523 | const struct nvmem_config *config) |
524 | { |
525 | return -ENOSYS; |
526 | } |
527 | static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, |
528 | const struct nvmem_config *config) |
529 | { |
530 | } |
531 | |
532 | #endif /* CONFIG_NVMEM_SYSFS */ |
533 | |
534 | static void nvmem_release(struct device *dev) |
535 | { |
536 | struct nvmem_device *nvmem = to_nvmem_device(dev); |
537 | |
538 | ida_free(&nvmem_ida, id: nvmem->id); |
539 | gpiod_put(desc: nvmem->wp_gpio); |
540 | kfree(objp: nvmem); |
541 | } |
542 | |
543 | static const struct device_type nvmem_provider_type = { |
544 | .release = nvmem_release, |
545 | }; |
546 | |
547 | static struct bus_type nvmem_bus_type = { |
548 | .name = "nvmem", |
549 | }; |
550 | |
551 | static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell) |
552 | { |
553 | blocking_notifier_call_chain(nh: &nvmem_notifier, val: NVMEM_CELL_REMOVE, v: cell); |
554 | mutex_lock(&nvmem_mutex); |
555 | list_del(entry: &cell->node); |
556 | mutex_unlock(lock: &nvmem_mutex); |
557 | of_node_put(node: cell->np); |
558 | kfree_const(x: cell->name); |
559 | kfree(objp: cell); |
560 | } |
561 | |
562 | static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) |
563 | { |
564 | struct nvmem_cell_entry *cell, *p; |
565 | |
566 | list_for_each_entry_safe(cell, p, &nvmem->cells, node) |
567 | nvmem_cell_entry_drop(cell); |
568 | } |
569 | |
570 | static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell) |
571 | { |
572 | mutex_lock(&nvmem_mutex); |
573 | list_add_tail(new: &cell->node, head: &cell->nvmem->cells); |
574 | mutex_unlock(lock: &nvmem_mutex); |
575 | blocking_notifier_call_chain(nh: &nvmem_notifier, val: NVMEM_CELL_ADD, v: cell); |
576 | } |
577 | |
578 | static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem, |
579 | const struct nvmem_cell_info *info, |
580 | struct nvmem_cell_entry *cell) |
581 | { |
582 | cell->nvmem = nvmem; |
583 | cell->offset = info->offset; |
584 | cell->raw_len = info->raw_len ?: info->bytes; |
585 | cell->bytes = info->bytes; |
586 | cell->name = info->name; |
587 | cell->read_post_process = info->read_post_process; |
588 | cell->priv = info->priv; |
589 | |
590 | cell->bit_offset = info->bit_offset; |
591 | cell->nbits = info->nbits; |
592 | cell->np = info->np; |
593 | |
594 | if (cell->nbits) { |
595 | cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, |
596 | BITS_PER_BYTE); |
597 | cell->raw_len = ALIGN(cell->bytes, nvmem->word_size); |
598 | } |
599 | |
600 | if (!IS_ALIGNED(cell->offset, nvmem->stride)) { |
601 | dev_err(&nvmem->dev, |
602 | "cell %s unaligned to nvmem stride %d\n", |
603 | cell->name ?: "<unknown>", nvmem->stride); |
604 | return -EINVAL; |
605 | } |
606 | |
607 | if (!IS_ALIGNED(cell->raw_len, nvmem->word_size)) { |
608 | dev_err(&nvmem->dev, |
609 | "cell %s raw len %zd unaligned to nvmem word size %d\n", |
610 | cell->name ?: "<unknown>", cell->raw_len, |
611 | nvmem->word_size); |
612 | |
613 | if (info->raw_len) |
614 | return -EINVAL; |
615 | |
616 | cell->raw_len = ALIGN(cell->raw_len, nvmem->word_size); |
617 | } |
618 | |
619 | return 0; |
620 | } |
621 | |
622 | static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem, |
623 | const struct nvmem_cell_info *info, |
624 | struct nvmem_cell_entry *cell) |
625 | { |
626 | int err; |
627 | |
628 | err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell); |
629 | if (err) |
630 | return err; |
631 | |
632 | cell->name = kstrdup_const(s: info->name, GFP_KERNEL); |
633 | if (!cell->name) |
634 | return -ENOMEM; |
635 | |
636 | return 0; |
637 | } |
638 | |
639 | /** |
640 | * nvmem_add_one_cell() - Add one cell information to an nvmem device |
641 | * |
642 | * @nvmem: nvmem device to add cells to. |
643 | * @info: nvmem cell info to add to the device |
644 | * |
645 | * Return: 0 or negative error code on failure. |
646 | */ |
647 | int nvmem_add_one_cell(struct nvmem_device *nvmem, |
648 | const struct nvmem_cell_info *info) |
649 | { |
650 | struct nvmem_cell_entry *cell; |
651 | int rval; |
652 | |
653 | cell = kzalloc(sizeof(*cell), GFP_KERNEL); |
654 | if (!cell) |
655 | return -ENOMEM; |
656 | |
657 | rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell); |
658 | if (rval) { |
659 | kfree(objp: cell); |
660 | return rval; |
661 | } |
662 | |
663 | nvmem_cell_entry_add(cell); |
664 | |
665 | return 0; |
666 | } |
667 | EXPORT_SYMBOL_GPL(nvmem_add_one_cell); |
668 | |
669 | /** |
670 | * nvmem_add_cells() - Add cell information to an nvmem device |
671 | * |
672 | * @nvmem: nvmem device to add cells to. |
673 | * @info: nvmem cell info to add to the device |
674 | * @ncells: number of cells in info |
675 | * |
676 | * Return: 0 or negative error code on failure. |
677 | */ |
678 | static int nvmem_add_cells(struct nvmem_device *nvmem, |
679 | const struct nvmem_cell_info *info, |
680 | int ncells) |
681 | { |
682 | int i, rval; |
683 | |
684 | for (i = 0; i < ncells; i++) { |
685 | rval = nvmem_add_one_cell(nvmem, &info[i]); |
686 | if (rval) |
687 | return rval; |
688 | } |
689 | |
690 | return 0; |
691 | } |
692 | |
693 | /** |
694 | * nvmem_register_notifier() - Register a notifier block for nvmem events. |
695 | * |
696 | * @nb: notifier block to be called on nvmem events. |
697 | * |
698 | * Return: 0 on success, negative error number on failure. |
699 | */ |
700 | int nvmem_register_notifier(struct notifier_block *nb) |
701 | { |
702 | return blocking_notifier_chain_register(nh: &nvmem_notifier, nb); |
703 | } |
704 | EXPORT_SYMBOL_GPL(nvmem_register_notifier); |
705 | |
706 | /** |
707 | * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events. |
708 | * |
709 | * @nb: notifier block to be unregistered. |
710 | * |
711 | * Return: 0 on success, negative error number on failure. |
712 | */ |
713 | int nvmem_unregister_notifier(struct notifier_block *nb) |
714 | { |
715 | return blocking_notifier_chain_unregister(nh: &nvmem_notifier, nb); |
716 | } |
717 | EXPORT_SYMBOL_GPL(nvmem_unregister_notifier); |
718 | |
719 | static struct nvmem_cell_entry * |
720 | nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id) |
721 | { |
722 | struct nvmem_cell_entry *iter, *cell = NULL; |
723 | |
724 | mutex_lock(&nvmem_mutex); |
725 | list_for_each_entry(iter, &nvmem->cells, node) { |
726 | if (strcmp(cell_id, iter->name) == 0) { |
727 | cell = iter; |
728 | break; |
729 | } |
730 | } |
731 | mutex_unlock(lock: &nvmem_mutex); |
732 | |
733 | return cell; |
734 | } |
735 | |
736 | static int nvmem_validate_keepouts(struct nvmem_device *nvmem) |
737 | { |
738 | unsigned int cur = 0; |
739 | const struct nvmem_keepout *keepout = nvmem->keepout; |
740 | const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; |
741 | |
742 | while (keepout < keepoutend) { |
743 | /* Ensure keepouts are sorted and don't overlap. */ |
744 | if (keepout->start < cur) { |
745 | dev_err(&nvmem->dev, |
746 | "Keepout regions aren't sorted or overlap.\n"); |
747 | |
748 | return -ERANGE; |
749 | } |
750 | |
751 | if (keepout->end < keepout->start) { |
752 | dev_err(&nvmem->dev, |
753 | "Invalid keepout region.\n"); |
754 | |
755 | return -EINVAL; |
756 | } |
757 | |
758 | /* |
759 | * Validate keepouts (and holes between) don't violate |
760 | * word_size constraints. |
761 | */ |
762 | if ((keepout->end - keepout->start < nvmem->word_size) || |
763 | ((keepout->start != cur) && |
764 | (keepout->start - cur < nvmem->word_size))) { |
765 | |
766 | dev_err(&nvmem->dev, |
767 | "Keepout regions violate word_size constraints.\n"); |
768 | |
769 | return -ERANGE; |
770 | } |
771 | |
772 | /* Validate keepouts don't violate stride (alignment). */ |
773 | if (!IS_ALIGNED(keepout->start, nvmem->stride) || |
774 | !IS_ALIGNED(keepout->end, nvmem->stride)) { |
775 | |
776 | dev_err(&nvmem->dev, |
777 | "Keepout regions violate stride.\n"); |
778 | |
779 | return -EINVAL; |
780 | } |
781 | |
782 | cur = keepout->end; |
783 | keepout++; |
784 | } |
785 | |
786 | return 0; |
787 | } |
788 | |
789 | static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) |
790 | { |
791 | struct device *dev = &nvmem->dev; |
792 | struct device_node *child; |
793 | const __be32 *addr; |
794 | int len, ret; |
795 | |
796 | for_each_child_of_node(np, child) { |
797 | struct nvmem_cell_info info = {0}; |
798 | |
799 | addr = of_get_property(node: child, name: "reg", lenp: &len); |
800 | if (!addr) |
801 | continue; |
802 | if (len < 2 * sizeof(u32)) { |
803 | dev_err(dev, "nvmem: invalid reg on %pOF\n", child); |
804 | of_node_put(node: child); |
805 | return -EINVAL; |
806 | } |
807 | |
808 | info.offset = be32_to_cpup(p: addr++); |
809 | info.bytes = be32_to_cpup(p: addr); |
810 | info.name = kasprintf(GFP_KERNEL, fmt: "%pOFn", child); |
811 | |
812 | addr = of_get_property(node: child, name: "bits", lenp: &len); |
813 | if (addr && len == (2 * sizeof(u32))) { |
814 | info.bit_offset = be32_to_cpup(p: addr++); |
815 | info.nbits = be32_to_cpup(p: addr); |
816 | if (info.bit_offset >= BITS_PER_BYTE * info.bytes || |
817 | info.nbits < 1 || |
818 | info.bit_offset + info.nbits > BITS_PER_BYTE * info.bytes) { |
819 | dev_err(dev, "nvmem: invalid bits on %pOF\n", child); |
820 | of_node_put(node: child); |
821 | return -EINVAL; |
822 | } |
823 | } |
824 | |
825 | info.np = of_node_get(node: child); |
826 | |
827 | if (nvmem->fixup_dt_cell_info) |
828 | nvmem->fixup_dt_cell_info(nvmem, &info); |
829 | |
830 | ret = nvmem_add_one_cell(nvmem, &info); |
831 | kfree(objp: info.name); |
832 | if (ret) { |
833 | of_node_put(node: child); |
834 | return ret; |
835 | } |
836 | } |
837 | |
838 | return 0; |
839 | } |
840 | |
841 | static int nvmem_add_cells_from_legacy_of(struct nvmem_device *nvmem) |
842 | { |
843 | return nvmem_add_cells_from_dt(nvmem, np: nvmem->dev.of_node); |
844 | } |
845 | |
846 | static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) |
847 | { |
848 | struct device_node *layout_np; |
849 | int err = 0; |
850 | |
851 | layout_np = of_nvmem_layout_get_container(nvmem); |
852 | if (!layout_np) |
853 | return 0; |
854 | |
855 | if (of_device_is_compatible(device: layout_np, "fixed-layout")) |
856 | err = nvmem_add_cells_from_dt(nvmem, np: layout_np); |
857 | |
858 | of_node_put(node: layout_np); |
859 | |
860 | return err; |
861 | } |
862 | |
863 | int nvmem_layout_register(struct nvmem_layout *layout) |
864 | { |
865 | int ret; |
866 | |
867 | if (!layout->add_cells) |
868 | return -EINVAL; |
869 | |
870 | /* Populate the cells */ |
871 | ret = layout->add_cells(layout); |
872 | if (ret) |
873 | return ret; |
874 | |
875 | #ifdef CONFIG_NVMEM_SYSFS |
876 | ret = nvmem_populate_sysfs_cells(nvmem: layout->nvmem); |
877 | if (ret) { |
878 | nvmem_device_remove_all_cells(nvmem: layout->nvmem); |
879 | return ret; |
880 | } |
881 | #endif |
882 | |
883 | return 0; |
884 | } |
885 | EXPORT_SYMBOL_GPL(nvmem_layout_register); |
886 | |
887 | void nvmem_layout_unregister(struct nvmem_layout *layout) |
888 | { |
889 | /* Keep the API even with an empty stub in case we need it later */ |
890 | } |
891 | EXPORT_SYMBOL_GPL(nvmem_layout_unregister); |
892 | |
893 | /** |
894 | * nvmem_register() - Register a nvmem device for given nvmem_config. |
895 | * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem |
896 | * |
897 | * @config: nvmem device configuration with which nvmem device is created. |
898 | * |
899 | * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device |
900 | * on success. |
901 | */ |
902 | |
903 | struct nvmem_device *nvmem_register(const struct nvmem_config *config) |
904 | { |
905 | struct nvmem_device *nvmem; |
906 | int rval; |
907 | |
908 | if (!config->dev) |
909 | return ERR_PTR(error: -EINVAL); |
910 | |
911 | if (!config->reg_read && !config->reg_write) |
912 | return ERR_PTR(error: -EINVAL); |
913 | |
914 | nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); |
915 | if (!nvmem) |
916 | return ERR_PTR(error: -ENOMEM); |
917 | |
918 | rval = ida_alloc(ida: &nvmem_ida, GFP_KERNEL); |
919 | if (rval < 0) { |
920 | kfree(objp: nvmem); |
921 | return ERR_PTR(error: rval); |
922 | } |
923 | |
924 | nvmem->id = rval; |
925 | |
926 | nvmem->dev.type = &nvmem_provider_type; |
927 | nvmem->dev.bus = &nvmem_bus_type; |
928 | nvmem->dev.parent = config->dev; |
929 | |
930 | device_initialize(dev: &nvmem->dev); |
931 | |
932 | if (!config->ignore_wp) |
933 | nvmem->wp_gpio = gpiod_get_optional(dev: config->dev, con_id: "wp", |
934 | flags: GPIOD_OUT_HIGH); |
935 | if (IS_ERR(ptr: nvmem->wp_gpio)) { |
936 | rval = PTR_ERR(ptr: nvmem->wp_gpio); |
937 | nvmem->wp_gpio = NULL; |
938 | goto err_put_device; |
939 | } |
940 | |
941 | kref_init(kref: &nvmem->refcnt); |
942 | INIT_LIST_HEAD(list: &nvmem->cells); |
943 | nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info; |
944 | |
945 | nvmem->owner = config->owner; |
946 | if (!nvmem->owner && config->dev->driver) |
947 | nvmem->owner = config->dev->driver->owner; |
948 | nvmem->stride = config->stride ?: 1; |
949 | nvmem->word_size = config->word_size ?: 1; |
950 | nvmem->size = config->size; |
951 | nvmem->root_only = config->root_only; |
952 | nvmem->priv = config->priv; |
953 | nvmem->type = config->type; |
954 | nvmem->reg_read = config->reg_read; |
955 | nvmem->reg_write = config->reg_write; |
956 | nvmem->keepout = config->keepout; |
957 | nvmem->nkeepout = config->nkeepout; |
958 | if (config->of_node) |
959 | nvmem->dev.of_node = config->of_node; |
960 | else |
961 | nvmem->dev.of_node = config->dev->of_node; |
962 | |
963 | switch (config->id) { |
964 | case NVMEM_DEVID_NONE: |
965 | rval = dev_set_name(dev: &nvmem->dev, name: "%s", config->name); |
966 | break; |
967 | case NVMEM_DEVID_AUTO: |
968 | rval = dev_set_name(dev: &nvmem->dev, name: "%s%d", config->name, nvmem->id); |
969 | break; |
970 | default: |
971 | rval = dev_set_name(dev: &nvmem->dev, name: "%s%d", |
972 | config->name ? : "nvmem", |
973 | config->name ? config->id : nvmem->id); |
974 | break; |
975 | } |
976 | |
977 | if (rval) |
978 | goto err_put_device; |
979 | |
980 | nvmem->read_only = device_property_present(dev: config->dev, propname: "read-only") || |
981 | config->read_only || !nvmem->reg_write; |
982 | |
983 | #ifdef CONFIG_NVMEM_SYSFS |
984 | nvmem->dev.groups = nvmem_dev_groups; |
985 | #endif |
986 | |
987 | if (nvmem->nkeepout) { |
988 | rval = nvmem_validate_keepouts(nvmem); |
989 | if (rval) |
990 | goto err_put_device; |
991 | } |
992 | |
993 | if (config->compat) { |
994 | rval = nvmem_sysfs_setup_compat(nvmem, config); |
995 | if (rval) |
996 | goto err_put_device; |
997 | } |
998 | |
999 | if (config->cells) { |
1000 | rval = nvmem_add_cells(nvmem, info: config->cells, ncells: config->ncells); |
1001 | if (rval) |
1002 | goto err_remove_cells; |
1003 | } |
1004 | |
1005 | if (config->add_legacy_fixed_of_cells) { |
1006 | rval = nvmem_add_cells_from_legacy_of(nvmem); |
1007 | if (rval) |
1008 | goto err_remove_cells; |
1009 | } |
1010 | |
1011 | rval = nvmem_add_cells_from_fixed_layout(nvmem); |
1012 | if (rval) |
1013 | goto err_remove_cells; |
1014 | |
1015 | dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); |
1016 | |
1017 | rval = device_add(dev: &nvmem->dev); |
1018 | if (rval) |
1019 | goto err_remove_cells; |
1020 | |
1021 | rval = nvmem_populate_layout(nvmem); |
1022 | if (rval) |
1023 | goto err_remove_dev; |
1024 | |
1025 | #ifdef CONFIG_NVMEM_SYSFS |
1026 | rval = nvmem_populate_sysfs_cells(nvmem); |
1027 | if (rval) |
1028 | goto err_destroy_layout; |
1029 | #endif |
1030 | |
1031 | blocking_notifier_call_chain(nh: &nvmem_notifier, val: NVMEM_ADD, v: nvmem); |
1032 | |
1033 | return nvmem; |
1034 | |
1035 | #ifdef CONFIG_NVMEM_SYSFS |
1036 | err_destroy_layout: |
1037 | nvmem_destroy_layout(nvmem); |
1038 | #endif |
1039 | err_remove_dev: |
1040 | device_del(dev: &nvmem->dev); |
1041 | err_remove_cells: |
1042 | nvmem_device_remove_all_cells(nvmem); |
1043 | if (config->compat) |
1044 | nvmem_sysfs_remove_compat(nvmem, config); |
1045 | err_put_device: |
1046 | put_device(dev: &nvmem->dev); |
1047 | |
1048 | return ERR_PTR(error: rval); |
1049 | } |
1050 | EXPORT_SYMBOL_GPL(nvmem_register); |
1051 | |
1052 | static void nvmem_device_release(struct kref *kref) |
1053 | { |
1054 | struct nvmem_device *nvmem; |
1055 | |
1056 | nvmem = container_of(kref, struct nvmem_device, refcnt); |
1057 | |
1058 | blocking_notifier_call_chain(nh: &nvmem_notifier, val: NVMEM_REMOVE, v: nvmem); |
1059 | |
1060 | if (nvmem->flags & FLAG_COMPAT) |
1061 | device_remove_bin_file(dev: nvmem->base_dev, attr: &nvmem->eeprom); |
1062 | |
1063 | nvmem_device_remove_all_cells(nvmem); |
1064 | nvmem_destroy_layout(nvmem); |
1065 | device_unregister(dev: &nvmem->dev); |
1066 | } |
1067 | |
1068 | /** |
1069 | * nvmem_unregister() - Unregister previously registered nvmem device |
1070 | * |
1071 | * @nvmem: Pointer to previously registered nvmem device. |
1072 | */ |
1073 | void nvmem_unregister(struct nvmem_device *nvmem) |
1074 | { |
1075 | if (nvmem) |
1076 | kref_put(kref: &nvmem->refcnt, release: nvmem_device_release); |
1077 | } |
1078 | EXPORT_SYMBOL_GPL(nvmem_unregister); |
1079 | |
1080 | static void devm_nvmem_unregister(void *nvmem) |
1081 | { |
1082 | nvmem_unregister(nvmem); |
1083 | } |
1084 | |
1085 | /** |
1086 | * devm_nvmem_register() - Register a managed nvmem device for given |
1087 | * nvmem_config. |
1088 | * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem |
1089 | * |
1090 | * @dev: Device that uses the nvmem device. |
1091 | * @config: nvmem device configuration with which nvmem device is created. |
1092 | * |
1093 | * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device |
1094 | * on success. |
1095 | */ |
1096 | struct nvmem_device *devm_nvmem_register(struct device *dev, |
1097 | const struct nvmem_config *config) |
1098 | { |
1099 | struct nvmem_device *nvmem; |
1100 | int ret; |
1101 | |
1102 | nvmem = nvmem_register(config); |
1103 | if (IS_ERR(ptr: nvmem)) |
1104 | return nvmem; |
1105 | |
1106 | ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem); |
1107 | if (ret) |
1108 | return ERR_PTR(error: ret); |
1109 | |
1110 | return nvmem; |
1111 | } |
1112 | EXPORT_SYMBOL_GPL(devm_nvmem_register); |
1113 | |
1114 | static struct nvmem_device *__nvmem_device_get(void *data, |
1115 | int (*match)(struct device *dev, const void *data)) |
1116 | { |
1117 | struct nvmem_device *nvmem = NULL; |
1118 | struct device *dev; |
1119 | |
1120 | mutex_lock(&nvmem_mutex); |
1121 | dev = bus_find_device(bus: &nvmem_bus_type, NULL, data, match); |
1122 | if (dev) |
1123 | nvmem = to_nvmem_device(dev); |
1124 | mutex_unlock(lock: &nvmem_mutex); |
1125 | if (!nvmem) |
1126 | return ERR_PTR(error: -EPROBE_DEFER); |
1127 | |
1128 | if (!try_module_get(module: nvmem->owner)) { |
1129 | dev_err(&nvmem->dev, |
1130 | "could not increase module refcount for cell %s\n", |
1131 | nvmem_dev_name(nvmem)); |
1132 | |
1133 | put_device(dev: &nvmem->dev); |
1134 | return ERR_PTR(error: -EINVAL); |
1135 | } |
1136 | |
1137 | kref_get(kref: &nvmem->refcnt); |
1138 | |
1139 | return nvmem; |
1140 | } |
1141 | |
1142 | static void __nvmem_device_put(struct nvmem_device *nvmem) |
1143 | { |
1144 | put_device(dev: &nvmem->dev); |
1145 | module_put(module: nvmem->owner); |
1146 | kref_put(kref: &nvmem->refcnt, release: nvmem_device_release); |
1147 | } |
1148 | |
1149 | #if IS_ENABLED(CONFIG_OF) |
1150 | /** |
1151 | * of_nvmem_device_get() - Get nvmem device from a given id |
1152 | * |
1153 | * @np: Device tree node that uses the nvmem device. |
1154 | * @id: nvmem name from nvmem-names property. |
1155 | * |
1156 | * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device |
1157 | * on success. |
1158 | */ |
1159 | struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) |
1160 | { |
1161 | |
1162 | struct device_node *nvmem_np; |
1163 | struct nvmem_device *nvmem; |
1164 | int index = 0; |
1165 | |
1166 | if (id) |
1167 | index = of_property_match_string(np, propname: "nvmem-names", string: id); |
1168 | |
1169 | nvmem_np = of_parse_phandle(np, phandle_name: "nvmem", index); |
1170 | if (!nvmem_np) |
1171 | return ERR_PTR(error: -ENOENT); |
1172 | |
1173 | nvmem = __nvmem_device_get(data: nvmem_np, match: device_match_of_node); |
1174 | of_node_put(node: nvmem_np); |
1175 | return nvmem; |
1176 | } |
1177 | EXPORT_SYMBOL_GPL(of_nvmem_device_get); |
1178 | #endif |
1179 | |
1180 | /** |
1181 | * nvmem_device_get() - Get nvmem device from a given id |
1182 | * |
1183 | * @dev: Device that uses the nvmem device. |
1184 | * @dev_name: name of the requested nvmem device. |
1185 | * |
1186 | * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device |
1187 | * on success. |
1188 | */ |
1189 | struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) |
1190 | { |
1191 | if (dev->of_node) { /* try dt first */ |
1192 | struct nvmem_device *nvmem; |
1193 | |
1194 | nvmem = of_nvmem_device_get(dev->of_node, dev_name); |
1195 | |
1196 | if (!IS_ERR(ptr: nvmem) || PTR_ERR(ptr: nvmem) == -EPROBE_DEFER) |
1197 | return nvmem; |
1198 | |
1199 | } |
1200 | |
1201 | return __nvmem_device_get(data: (void *)dev_name, match: device_match_name); |
1202 | } |
1203 | EXPORT_SYMBOL_GPL(nvmem_device_get); |
1204 | |
1205 | /** |
1206 | * nvmem_device_find() - Find nvmem device with matching function |
1207 | * |
1208 | * @data: Data to pass to match function |
1209 | * @match: Callback function to check device |
1210 | * |
1211 | * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device |
1212 | * on success. |
1213 | */ |
1214 | struct nvmem_device *nvmem_device_find(void *data, |
1215 | int (*match)(struct device *dev, const void *data)) |
1216 | { |
1217 | return __nvmem_device_get(data, match); |
1218 | } |
1219 | EXPORT_SYMBOL_GPL(nvmem_device_find); |
1220 | |
1221 | static int devm_nvmem_device_match(struct device *dev, void *res, void *data) |
1222 | { |
1223 | struct nvmem_device **nvmem = res; |
1224 | |
1225 | if (WARN_ON(!nvmem || !*nvmem)) |
1226 | return 0; |
1227 | |
1228 | return *nvmem == data; |
1229 | } |
1230 | |
1231 | static void devm_nvmem_device_release(struct device *dev, void *res) |
1232 | { |
1233 | nvmem_device_put(nvmem: *(struct nvmem_device **)res); |
1234 | } |
1235 | |
1236 | /** |
1237 | * devm_nvmem_device_put() - put already got nvmem device |
1238 | * |
1239 | * @dev: Device that uses the nvmem device. |
1240 | * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), |
1241 | * that needs to be released. |
1242 | */ |
1243 | void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) |
1244 | { |
1245 | int ret; |
1246 | |
1247 | ret = devres_release(dev, release: devm_nvmem_device_release, |
1248 | match: devm_nvmem_device_match, match_data: nvmem); |
1249 | |
1250 | WARN_ON(ret); |
1251 | } |
1252 | EXPORT_SYMBOL_GPL(devm_nvmem_device_put); |
1253 | |
1254 | /** |
1255 | * nvmem_device_put() - put already got nvmem device |
1256 | * |
1257 | * @nvmem: pointer to nvmem device that needs to be released. |
1258 | */ |
1259 | void nvmem_device_put(struct nvmem_device *nvmem) |
1260 | { |
1261 | __nvmem_device_put(nvmem); |
1262 | } |
1263 | EXPORT_SYMBOL_GPL(nvmem_device_put); |
1264 | |
1265 | /** |
1266 | * devm_nvmem_device_get() - Get nvmem device of device form a given id |
1267 | * |
1268 | * @dev: Device that requests the nvmem device. |
1269 | * @id: name id for the requested nvmem device. |
1270 | * |
1271 | * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device |
1272 | * on success. The nvmem_device will be freed by the automatically once the |
1273 | * device is freed. |
1274 | */ |
1275 | struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) |
1276 | { |
1277 | struct nvmem_device **ptr, *nvmem; |
1278 | |
1279 | ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); |
1280 | if (!ptr) |
1281 | return ERR_PTR(error: -ENOMEM); |
1282 | |
1283 | nvmem = nvmem_device_get(dev, id); |
1284 | if (!IS_ERR(ptr: nvmem)) { |
1285 | *ptr = nvmem; |
1286 | devres_add(dev, res: ptr); |
1287 | } else { |
1288 | devres_free(res: ptr); |
1289 | } |
1290 | |
1291 | return nvmem; |
1292 | } |
1293 | EXPORT_SYMBOL_GPL(devm_nvmem_device_get); |
1294 | |
1295 | static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, |
1296 | const char *id, int index) |
1297 | { |
1298 | struct nvmem_cell *cell; |
1299 | const char *name = NULL; |
1300 | |
1301 | cell = kzalloc(sizeof(*cell), GFP_KERNEL); |
1302 | if (!cell) |
1303 | return ERR_PTR(error: -ENOMEM); |
1304 | |
1305 | if (id) { |
1306 | name = kstrdup_const(s: id, GFP_KERNEL); |
1307 | if (!name) { |
1308 | kfree(objp: cell); |
1309 | return ERR_PTR(error: -ENOMEM); |
1310 | } |
1311 | } |
1312 | |
1313 | cell->id = name; |
1314 | cell->entry = entry; |
1315 | cell->index = index; |
1316 | |
1317 | return cell; |
1318 | } |
1319 | |
1320 | static struct nvmem_cell * |
1321 | nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) |
1322 | { |
1323 | struct nvmem_cell_entry *cell_entry; |
1324 | struct nvmem_cell *cell = ERR_PTR(error: -ENOENT); |
1325 | struct nvmem_cell_lookup *lookup; |
1326 | struct nvmem_device *nvmem; |
1327 | const char *dev_id; |
1328 | |
1329 | if (!dev) |
1330 | return ERR_PTR(error: -EINVAL); |
1331 | |
1332 | dev_id = dev_name(dev); |
1333 | |
1334 | mutex_lock(&nvmem_lookup_mutex); |
1335 | |
1336 | list_for_each_entry(lookup, &nvmem_lookup_list, node) { |
1337 | if ((strcmp(lookup->dev_id, dev_id) == 0) && |
1338 | (strcmp(lookup->con_id, con_id) == 0)) { |
1339 | /* This is the right entry. */ |
1340 | nvmem = __nvmem_device_get(data: (void *)lookup->nvmem_name, |
1341 | match: device_match_name); |
1342 | if (IS_ERR(ptr: nvmem)) { |
1343 | /* Provider may not be registered yet. */ |
1344 | cell = ERR_CAST(ptr: nvmem); |
1345 | break; |
1346 | } |
1347 | |
1348 | cell_entry = nvmem_find_cell_entry_by_name(nvmem, |
1349 | cell_id: lookup->cell_name); |
1350 | if (!cell_entry) { |
1351 | __nvmem_device_put(nvmem); |
1352 | cell = ERR_PTR(error: -ENOENT); |
1353 | } else { |
1354 | cell = nvmem_create_cell(entry: cell_entry, id: con_id, index: 0); |
1355 | if (IS_ERR(ptr: cell)) |
1356 | __nvmem_device_put(nvmem); |
1357 | } |
1358 | break; |
1359 | } |
1360 | } |
1361 | |
1362 | mutex_unlock(lock: &nvmem_lookup_mutex); |
1363 | return cell; |
1364 | } |
1365 | |
1366 | static void nvmem_layout_module_put(struct nvmem_device *nvmem) |
1367 | { |
1368 | if (nvmem->layout && nvmem->layout->dev.driver) |
1369 | module_put(module: nvmem->layout->dev.driver->owner); |
1370 | } |
1371 | |
1372 | #if IS_ENABLED(CONFIG_OF) |
1373 | static struct nvmem_cell_entry * |
1374 | nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) |
1375 | { |
1376 | struct nvmem_cell_entry *iter, *cell = NULL; |
1377 | |
1378 | mutex_lock(&nvmem_mutex); |
1379 | list_for_each_entry(iter, &nvmem->cells, node) { |
1380 | if (np == iter->np) { |
1381 | cell = iter; |
1382 | break; |
1383 | } |
1384 | } |
1385 | mutex_unlock(lock: &nvmem_mutex); |
1386 | |
1387 | return cell; |
1388 | } |
1389 | |
1390 | static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem) |
1391 | { |
1392 | if (!nvmem->layout) |
1393 | return 0; |
1394 | |
1395 | if (!nvmem->layout->dev.driver || |
1396 | !try_module_get(module: nvmem->layout->dev.driver->owner)) |
1397 | return -EPROBE_DEFER; |
1398 | |
1399 | return 0; |
1400 | } |
1401 | |
1402 | /** |
1403 | * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id |
1404 | * |
1405 | * @np: Device tree node that uses the nvmem cell. |
1406 | * @id: nvmem cell name from nvmem-cell-names property, or NULL |
1407 | * for the cell at index 0 (the lone cell with no accompanying |
1408 | * nvmem-cell-names property). |
1409 | * |
1410 | * Return: Will be an ERR_PTR() on error or a valid pointer |
1411 | * to a struct nvmem_cell. The nvmem_cell will be freed by the |
1412 | * nvmem_cell_put(). |
1413 | */ |
1414 | struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) |
1415 | { |
1416 | struct device_node *cell_np, *nvmem_np; |
1417 | struct nvmem_device *nvmem; |
1418 | struct nvmem_cell_entry *cell_entry; |
1419 | struct nvmem_cell *cell; |
1420 | struct of_phandle_args cell_spec; |
1421 | int index = 0; |
1422 | int cell_index = 0; |
1423 | int ret; |
1424 | |
1425 | /* if cell name exists, find index to the name */ |
1426 | if (id) |
1427 | index = of_property_match_string(np, propname: "nvmem-cell-names", string: id); |
1428 | |
1429 | ret = of_parse_phandle_with_optional_args(np, list_name: "nvmem-cells", |
1430 | cells_name: "#nvmem-cell-cells", |
1431 | index, out_args: &cell_spec); |
1432 | if (ret) |
1433 | return ERR_PTR(error: -ENOENT); |
1434 | |
1435 | if (cell_spec.args_count > 1) |
1436 | return ERR_PTR(error: -EINVAL); |
1437 | |
1438 | cell_np = cell_spec.np; |
1439 | if (cell_spec.args_count) |
1440 | cell_index = cell_spec.args[0]; |
1441 | |
1442 | nvmem_np = of_get_parent(node: cell_np); |
1443 | if (!nvmem_np) { |
1444 | of_node_put(node: cell_np); |
1445 | return ERR_PTR(error: -EINVAL); |
1446 | } |
1447 | |
1448 | /* nvmem layouts produce cells within the nvmem-layout container */ |
1449 | if (of_node_name_eq(np: nvmem_np, name: "nvmem-layout")) { |
1450 | nvmem_np = of_get_next_parent(node: nvmem_np); |
1451 | if (!nvmem_np) { |
1452 | of_node_put(node: cell_np); |
1453 | return ERR_PTR(error: -EINVAL); |
1454 | } |
1455 | } |
1456 | |
1457 | nvmem = __nvmem_device_get(data: nvmem_np, match: device_match_of_node); |
1458 | of_node_put(node: nvmem_np); |
1459 | if (IS_ERR(ptr: nvmem)) { |
1460 | of_node_put(node: cell_np); |
1461 | return ERR_CAST(ptr: nvmem); |
1462 | } |
1463 | |
1464 | ret = nvmem_layout_module_get_optional(nvmem); |
1465 | if (ret) { |
1466 | of_node_put(node: cell_np); |
1467 | __nvmem_device_put(nvmem); |
1468 | return ERR_PTR(error: ret); |
1469 | } |
1470 | |
1471 | cell_entry = nvmem_find_cell_entry_by_node(nvmem, np: cell_np); |
1472 | of_node_put(node: cell_np); |
1473 | if (!cell_entry) { |
1474 | __nvmem_device_put(nvmem); |
1475 | nvmem_layout_module_put(nvmem); |
1476 | if (nvmem->layout) |
1477 | return ERR_PTR(error: -EPROBE_DEFER); |
1478 | else |
1479 | return ERR_PTR(error: -ENOENT); |
1480 | } |
1481 | |
1482 | cell = nvmem_create_cell(entry: cell_entry, id, index: cell_index); |
1483 | if (IS_ERR(ptr: cell)) { |
1484 | __nvmem_device_put(nvmem); |
1485 | nvmem_layout_module_put(nvmem); |
1486 | } |
1487 | |
1488 | return cell; |
1489 | } |
1490 | EXPORT_SYMBOL_GPL(of_nvmem_cell_get); |
1491 | #endif |
1492 | |
1493 | /** |
1494 | * nvmem_cell_get() - Get nvmem cell of device form a given cell name |
1495 | * |
1496 | * @dev: Device that requests the nvmem cell. |
1497 | * @id: nvmem cell name to get (this corresponds with the name from the |
1498 | * nvmem-cell-names property for DT systems and with the con_id from |
1499 | * the lookup entry for non-DT systems). |
1500 | * |
1501 | * Return: Will be an ERR_PTR() on error or a valid pointer |
1502 | * to a struct nvmem_cell. The nvmem_cell will be freed by the |
1503 | * nvmem_cell_put(). |
1504 | */ |
1505 | struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id) |
1506 | { |
1507 | struct nvmem_cell *cell; |
1508 | |
1509 | if (dev->of_node) { /* try dt first */ |
1510 | cell = of_nvmem_cell_get(dev->of_node, id); |
1511 | if (!IS_ERR(ptr: cell) || PTR_ERR(ptr: cell) == -EPROBE_DEFER) |
1512 | return cell; |
1513 | } |
1514 | |
1515 | /* NULL cell id only allowed for device tree; invalid otherwise */ |
1516 | if (!id) |
1517 | return ERR_PTR(error: -EINVAL); |
1518 | |
1519 | return nvmem_cell_get_from_lookup(dev, con_id: id); |
1520 | } |
1521 | EXPORT_SYMBOL_GPL(nvmem_cell_get); |
1522 | |
1523 | static void devm_nvmem_cell_release(struct device *dev, void *res) |
1524 | { |
1525 | nvmem_cell_put(cell: *(struct nvmem_cell **)res); |
1526 | } |
1527 | |
1528 | /** |
1529 | * devm_nvmem_cell_get() - Get nvmem cell of device form a given id |
1530 | * |
1531 | * @dev: Device that requests the nvmem cell. |
1532 | * @id: nvmem cell name id to get. |
1533 | * |
1534 | * Return: Will be an ERR_PTR() on error or a valid pointer |
1535 | * to a struct nvmem_cell. The nvmem_cell will be freed by the |
1536 | * automatically once the device is freed. |
1537 | */ |
1538 | struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) |
1539 | { |
1540 | struct nvmem_cell **ptr, *cell; |
1541 | |
1542 | ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); |
1543 | if (!ptr) |
1544 | return ERR_PTR(error: -ENOMEM); |
1545 | |
1546 | cell = nvmem_cell_get(dev, id); |
1547 | if (!IS_ERR(ptr: cell)) { |
1548 | *ptr = cell; |
1549 | devres_add(dev, res: ptr); |
1550 | } else { |
1551 | devres_free(res: ptr); |
1552 | } |
1553 | |
1554 | return cell; |
1555 | } |
1556 | EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); |
1557 | |
1558 | static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) |
1559 | { |
1560 | struct nvmem_cell **c = res; |
1561 | |
1562 | if (WARN_ON(!c || !*c)) |
1563 | return 0; |
1564 | |
1565 | return *c == data; |
1566 | } |
1567 | |
1568 | /** |
1569 | * devm_nvmem_cell_put() - Release previously allocated nvmem cell |
1570 | * from devm_nvmem_cell_get. |
1571 | * |
1572 | * @dev: Device that requests the nvmem cell. |
1573 | * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get(). |
1574 | */ |
1575 | void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) |
1576 | { |
1577 | int ret; |
1578 | |
1579 | ret = devres_release(dev, release: devm_nvmem_cell_release, |
1580 | match: devm_nvmem_cell_match, match_data: cell); |
1581 | |
1582 | WARN_ON(ret); |
1583 | } |
1584 | EXPORT_SYMBOL(devm_nvmem_cell_put); |
1585 | |
1586 | /** |
1587 | * nvmem_cell_put() - Release previously allocated nvmem cell. |
1588 | * |
1589 | * @cell: Previously allocated nvmem cell by nvmem_cell_get(). |
1590 | */ |
1591 | void nvmem_cell_put(struct nvmem_cell *cell) |
1592 | { |
1593 | struct nvmem_device *nvmem = cell->entry->nvmem; |
1594 | |
1595 | if (cell->id) |
1596 | kfree_const(x: cell->id); |
1597 | |
1598 | kfree(objp: cell); |
1599 | __nvmem_device_put(nvmem); |
1600 | nvmem_layout_module_put(nvmem); |
1601 | } |
1602 | EXPORT_SYMBOL_GPL(nvmem_cell_put); |
1603 | |
1604 | static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf) |
1605 | { |
1606 | u8 *p, *b; |
1607 | int i, extra, bytes_offset; |
1608 | int bit_offset = cell->bit_offset; |
1609 | |
1610 | p = b = buf; |
1611 | |
1612 | bytes_offset = bit_offset / BITS_PER_BYTE; |
1613 | b += bytes_offset; |
1614 | bit_offset %= BITS_PER_BYTE; |
1615 | |
1616 | if (bit_offset % BITS_PER_BYTE) { |
1617 | /* First shift */ |
1618 | *p = *b++ >> bit_offset; |
1619 | |
1620 | /* setup rest of the bytes if any */ |
1621 | for (i = 1; i < cell->bytes; i++) { |
1622 | /* Get bits from next byte and shift them towards msb */ |
1623 | *p++ |= *b << (BITS_PER_BYTE - bit_offset); |
1624 | |
1625 | *p = *b++ >> bit_offset; |
1626 | } |
1627 | } else if (p != b) { |
1628 | memmove(p, b, cell->bytes - bytes_offset); |
1629 | p += cell->bytes - 1; |
1630 | } else { |
1631 | /* point to the msb */ |
1632 | p += cell->bytes - 1; |
1633 | } |
1634 | |
1635 | /* result fits in less bytes */ |
1636 | extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE); |
1637 | while (--extra >= 0) |
1638 | *p-- = 0; |
1639 | |
1640 | /* clear msb bits if any leftover in the last byte */ |
1641 | if (cell->nbits % BITS_PER_BYTE) |
1642 | *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0); |
1643 | } |
1644 | |
1645 | static int __nvmem_cell_read(struct nvmem_device *nvmem, |
1646 | struct nvmem_cell_entry *cell, |
1647 | void *buf, size_t *len, const char *id, int index) |
1648 | { |
1649 | int rc; |
1650 | |
1651 | rc = nvmem_reg_read(nvmem, offset: cell->offset, val: buf, bytes: cell->raw_len); |
1652 | |
1653 | if (rc) |
1654 | return rc; |
1655 | |
1656 | /* shift bits in-place */ |
1657 | if (cell->bit_offset || cell->nbits) |
1658 | nvmem_shift_read_buffer_in_place(cell, buf); |
1659 | |
1660 | if (cell->read_post_process) { |
1661 | rc = cell->read_post_process(cell->priv, id, index, |
1662 | cell->offset, buf, cell->raw_len); |
1663 | if (rc) |
1664 | return rc; |
1665 | } |
1666 | |
1667 | if (len) |
1668 | *len = cell->bytes; |
1669 | |
1670 | return 0; |
1671 | } |
1672 | |
1673 | /** |
1674 | * nvmem_cell_read() - Read a given nvmem cell |
1675 | * |
1676 | * @cell: nvmem cell to be read. |
1677 | * @len: pointer to length of cell which will be populated on successful read; |
1678 | * can be NULL. |
1679 | * |
1680 | * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The |
1681 | * buffer should be freed by the consumer with a kfree(). |
1682 | */ |
1683 | void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) |
1684 | { |
1685 | struct nvmem_cell_entry *entry = cell->entry; |
1686 | struct nvmem_device *nvmem = entry->nvmem; |
1687 | u8 *buf; |
1688 | int rc; |
1689 | |
1690 | if (!nvmem) |
1691 | return ERR_PTR(error: -EINVAL); |
1692 | |
1693 | buf = kzalloc(max_t(size_t, entry->raw_len, entry->bytes), GFP_KERNEL); |
1694 | if (!buf) |
1695 | return ERR_PTR(error: -ENOMEM); |
1696 | |
1697 | rc = __nvmem_cell_read(nvmem, cell: cell->entry, buf, len, id: cell->id, index: cell->index); |
1698 | if (rc) { |
1699 | kfree(objp: buf); |
1700 | return ERR_PTR(error: rc); |
1701 | } |
1702 | |
1703 | return buf; |
1704 | } |
1705 | EXPORT_SYMBOL_GPL(nvmem_cell_read); |
1706 | |
1707 | static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell, |
1708 | u8 *_buf, int len) |
1709 | { |
1710 | struct nvmem_device *nvmem = cell->nvmem; |
1711 | int i, rc, nbits, bit_offset = cell->bit_offset; |
1712 | u8 v, *p, *buf, *b, pbyte, pbits; |
1713 | |
1714 | nbits = cell->nbits; |
1715 | buf = kzalloc(cell->bytes, GFP_KERNEL); |
1716 | if (!buf) |
1717 | return ERR_PTR(error: -ENOMEM); |
1718 | |
1719 | memcpy(buf, _buf, len); |
1720 | p = b = buf; |
1721 | |
1722 | if (bit_offset) { |
1723 | pbyte = *b; |
1724 | *b <<= bit_offset; |
1725 | |
1726 | /* setup the first byte with lsb bits from nvmem */ |
1727 | rc = nvmem_reg_read(nvmem, offset: cell->offset, val: &v, bytes: 1); |
1728 | if (rc) |
1729 | goto err; |
1730 | *b++ |= GENMASK(bit_offset - 1, 0) & v; |
1731 | |
1732 | /* setup rest of the byte if any */ |
1733 | for (i = 1; i < cell->bytes; i++) { |
1734 | /* Get last byte bits and shift them towards lsb */ |
1735 | pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); |
1736 | pbyte = *b; |
1737 | p = b; |
1738 | *b <<= bit_offset; |
1739 | *b++ |= pbits; |
1740 | } |
1741 | } |
1742 | |
1743 | /* if it's not end on byte boundary */ |
1744 | if ((nbits + bit_offset) % BITS_PER_BYTE) { |
1745 | /* setup the last byte with msb bits from nvmem */ |
1746 | rc = nvmem_reg_read(nvmem, |
1747 | offset: cell->offset + cell->bytes - 1, val: &v, bytes: 1); |
1748 | if (rc) |
1749 | goto err; |
1750 | *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; |
1751 | |
1752 | } |
1753 | |
1754 | return buf; |
1755 | err: |
1756 | kfree(objp: buf); |
1757 | return ERR_PTR(error: rc); |
1758 | } |
1759 | |
1760 | static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len) |
1761 | { |
1762 | struct nvmem_device *nvmem = cell->nvmem; |
1763 | int rc; |
1764 | |
1765 | if (!nvmem || nvmem->read_only || |
1766 | (cell->bit_offset == 0 && len != cell->bytes)) |
1767 | return -EINVAL; |
1768 | |
1769 | /* |
1770 | * Any cells which have a read_post_process hook are read-only because |
1771 | * we cannot reverse the operation and it might affect other cells, |
1772 | * too. |
1773 | */ |
1774 | if (cell->read_post_process) |
1775 | return -EINVAL; |
1776 | |
1777 | if (cell->bit_offset || cell->nbits) { |
1778 | if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes) |
1779 | return -EINVAL; |
1780 | buf = nvmem_cell_prepare_write_buffer(cell, buf: buf, len); |
1781 | if (IS_ERR(ptr: buf)) |
1782 | return PTR_ERR(ptr: buf); |
1783 | } |
1784 | |
1785 | rc = nvmem_reg_write(nvmem, offset: cell->offset, val: buf, bytes: cell->bytes); |
1786 | |
1787 | /* free the tmp buffer */ |
1788 | if (cell->bit_offset || cell->nbits) |
1789 | kfree(objp: buf); |
1790 | |
1791 | if (rc) |
1792 | return rc; |
1793 | |
1794 | return len; |
1795 | } |
1796 | |
1797 | /** |
1798 | * nvmem_cell_write() - Write to a given nvmem cell |
1799 | * |
1800 | * @cell: nvmem cell to be written. |
1801 | * @buf: Buffer to be written. |
1802 | * @len: length of buffer to be written to nvmem cell. |
1803 | * |
1804 | * Return: length of bytes written or negative on failure. |
1805 | */ |
1806 | int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) |
1807 | { |
1808 | return __nvmem_cell_entry_write(cell: cell->entry, buf, len); |
1809 | } |
1810 | |
1811 | EXPORT_SYMBOL_GPL(nvmem_cell_write); |
1812 | |
1813 | static int nvmem_cell_read_common(struct device *dev, const char *cell_id, |
1814 | void *val, size_t count) |
1815 | { |
1816 | struct nvmem_cell *cell; |
1817 | void *buf; |
1818 | size_t len; |
1819 | |
1820 | cell = nvmem_cell_get(dev, cell_id); |
1821 | if (IS_ERR(ptr: cell)) |
1822 | return PTR_ERR(ptr: cell); |
1823 | |
1824 | buf = nvmem_cell_read(cell, &len); |
1825 | if (IS_ERR(ptr: buf)) { |
1826 | nvmem_cell_put(cell); |
1827 | return PTR_ERR(ptr: buf); |
1828 | } |
1829 | if (len != count) { |
1830 | kfree(objp: buf); |
1831 | nvmem_cell_put(cell); |
1832 | return -EINVAL; |
1833 | } |
1834 | memcpy(val, buf, count); |
1835 | kfree(objp: buf); |
1836 | nvmem_cell_put(cell); |
1837 | |
1838 | return 0; |
1839 | } |
1840 | |
1841 | /** |
1842 | * nvmem_cell_read_u8() - Read a cell value as a u8 |
1843 | * |
1844 | * @dev: Device that requests the nvmem cell. |
1845 | * @cell_id: Name of nvmem cell to read. |
1846 | * @val: pointer to output value. |
1847 | * |
1848 | * Return: 0 on success or negative errno. |
1849 | */ |
1850 | int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val) |
1851 | { |
1852 | return nvmem_cell_read_common(dev, cell_id, val, count: sizeof(*val)); |
1853 | } |
1854 | EXPORT_SYMBOL_GPL(nvmem_cell_read_u8); |
1855 | |
1856 | /** |
1857 | * nvmem_cell_read_u16() - Read a cell value as a u16 |
1858 | * |
1859 | * @dev: Device that requests the nvmem cell. |
1860 | * @cell_id: Name of nvmem cell to read. |
1861 | * @val: pointer to output value. |
1862 | * |
1863 | * Return: 0 on success or negative errno. |
1864 | */ |
1865 | int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val) |
1866 | { |
1867 | return nvmem_cell_read_common(dev, cell_id, val, count: sizeof(*val)); |
1868 | } |
1869 | EXPORT_SYMBOL_GPL(nvmem_cell_read_u16); |
1870 | |
1871 | /** |
1872 | * nvmem_cell_read_u32() - Read a cell value as a u32 |
1873 | * |
1874 | * @dev: Device that requests the nvmem cell. |
1875 | * @cell_id: Name of nvmem cell to read. |
1876 | * @val: pointer to output value. |
1877 | * |
1878 | * Return: 0 on success or negative errno. |
1879 | */ |
1880 | int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val) |
1881 | { |
1882 | return nvmem_cell_read_common(dev, cell_id, val, count: sizeof(*val)); |
1883 | } |
1884 | EXPORT_SYMBOL_GPL(nvmem_cell_read_u32); |
1885 | |
1886 | /** |
1887 | * nvmem_cell_read_u64() - Read a cell value as a u64 |
1888 | * |
1889 | * @dev: Device that requests the nvmem cell. |
1890 | * @cell_id: Name of nvmem cell to read. |
1891 | * @val: pointer to output value. |
1892 | * |
1893 | * Return: 0 on success or negative errno. |
1894 | */ |
1895 | int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val) |
1896 | { |
1897 | return nvmem_cell_read_common(dev, cell_id, val, count: sizeof(*val)); |
1898 | } |
1899 | EXPORT_SYMBOL_GPL(nvmem_cell_read_u64); |
1900 | |
1901 | static const void *nvmem_cell_read_variable_common(struct device *dev, |
1902 | const char *cell_id, |
1903 | size_t max_len, size_t *len) |
1904 | { |
1905 | struct nvmem_cell *cell; |
1906 | int nbits; |
1907 | void *buf; |
1908 | |
1909 | cell = nvmem_cell_get(dev, cell_id); |
1910 | if (IS_ERR(ptr: cell)) |
1911 | return cell; |
1912 | |
1913 | nbits = cell->entry->nbits; |
1914 | buf = nvmem_cell_read(cell, len); |
1915 | nvmem_cell_put(cell); |
1916 | if (IS_ERR(ptr: buf)) |
1917 | return buf; |
1918 | |
1919 | /* |
1920 | * If nbits is set then nvmem_cell_read() can significantly exaggerate |
1921 | * the length of the real data. Throw away the extra junk. |
1922 | */ |
1923 | if (nbits) |
1924 | *len = DIV_ROUND_UP(nbits, 8); |
1925 | |
1926 | if (*len > max_len) { |
1927 | kfree(objp: buf); |
1928 | return ERR_PTR(error: -ERANGE); |
1929 | } |
1930 | |
1931 | return buf; |
1932 | } |
1933 | |
1934 | /** |
1935 | * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number. |
1936 | * |
1937 | * @dev: Device that requests the nvmem cell. |
1938 | * @cell_id: Name of nvmem cell to read. |
1939 | * @val: pointer to output value. |
1940 | * |
1941 | * Return: 0 on success or negative errno. |
1942 | */ |
1943 | int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id, |
1944 | u32 *val) |
1945 | { |
1946 | size_t len; |
1947 | const u8 *buf; |
1948 | int i; |
1949 | |
1950 | buf = nvmem_cell_read_variable_common(dev, cell_id, max_len: sizeof(*val), len: &len); |
1951 | if (IS_ERR(ptr: buf)) |
1952 | return PTR_ERR(ptr: buf); |
1953 | |
1954 | /* Copy w/ implicit endian conversion */ |
1955 | *val = 0; |
1956 | for (i = 0; i < len; i++) |
1957 | *val |= buf[i] << (8 * i); |
1958 | |
1959 | kfree(objp: buf); |
1960 | |
1961 | return 0; |
1962 | } |
1963 | EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32); |
1964 | |
1965 | /** |
1966 | * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number. |
1967 | * |
1968 | * @dev: Device that requests the nvmem cell. |
1969 | * @cell_id: Name of nvmem cell to read. |
1970 | * @val: pointer to output value. |
1971 | * |
1972 | * Return: 0 on success or negative errno. |
1973 | */ |
1974 | int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id, |
1975 | u64 *val) |
1976 | { |
1977 | size_t len; |
1978 | const u8 *buf; |
1979 | int i; |
1980 | |
1981 | buf = nvmem_cell_read_variable_common(dev, cell_id, max_len: sizeof(*val), len: &len); |
1982 | if (IS_ERR(ptr: buf)) |
1983 | return PTR_ERR(ptr: buf); |
1984 | |
1985 | /* Copy w/ implicit endian conversion */ |
1986 | *val = 0; |
1987 | for (i = 0; i < len; i++) |
1988 | *val |= (uint64_t)buf[i] << (8 * i); |
1989 | |
1990 | kfree(objp: buf); |
1991 | |
1992 | return 0; |
1993 | } |
1994 | EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64); |
1995 | |
1996 | /** |
1997 | * nvmem_device_cell_read() - Read a given nvmem device and cell |
1998 | * |
1999 | * @nvmem: nvmem device to read from. |
2000 | * @info: nvmem cell info to be read. |
2001 | * @buf: buffer pointer which will be populated on successful read. |
2002 | * |
2003 | * Return: length of successful bytes read on success and negative |
2004 | * error code on error. |
2005 | */ |
2006 | ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, |
2007 | struct nvmem_cell_info *info, void *buf) |
2008 | { |
2009 | struct nvmem_cell_entry cell; |
2010 | int rc; |
2011 | ssize_t len; |
2012 | |
2013 | if (!nvmem) |
2014 | return -EINVAL; |
2015 | |
2016 | rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell: &cell); |
2017 | if (rc) |
2018 | return rc; |
2019 | |
2020 | rc = __nvmem_cell_read(nvmem, cell: &cell, buf, len: &len, NULL, index: 0); |
2021 | if (rc) |
2022 | return rc; |
2023 | |
2024 | return len; |
2025 | } |
2026 | EXPORT_SYMBOL_GPL(nvmem_device_cell_read); |
2027 | |
2028 | /** |
2029 | * nvmem_device_cell_write() - Write cell to a given nvmem device |
2030 | * |
2031 | * @nvmem: nvmem device to be written to. |
2032 | * @info: nvmem cell info to be written. |
2033 | * @buf: buffer to be written to cell. |
2034 | * |
2035 | * Return: length of bytes written or negative error code on failure. |
2036 | */ |
2037 | int nvmem_device_cell_write(struct nvmem_device *nvmem, |
2038 | struct nvmem_cell_info *info, void *buf) |
2039 | { |
2040 | struct nvmem_cell_entry cell; |
2041 | int rc; |
2042 | |
2043 | if (!nvmem) |
2044 | return -EINVAL; |
2045 | |
2046 | rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell: &cell); |
2047 | if (rc) |
2048 | return rc; |
2049 | |
2050 | return __nvmem_cell_entry_write(cell: &cell, buf, len: cell.bytes); |
2051 | } |
2052 | EXPORT_SYMBOL_GPL(nvmem_device_cell_write); |
2053 | |
2054 | /** |
2055 | * nvmem_device_read() - Read from a given nvmem device |
2056 | * |
2057 | * @nvmem: nvmem device to read from. |
2058 | * @offset: offset in nvmem device. |
2059 | * @bytes: number of bytes to read. |
2060 | * @buf: buffer pointer which will be populated on successful read. |
2061 | * |
2062 | * Return: length of successful bytes read on success and negative |
2063 | * error code on error. |
2064 | */ |
2065 | int nvmem_device_read(struct nvmem_device *nvmem, |
2066 | unsigned int offset, |
2067 | size_t bytes, void *buf) |
2068 | { |
2069 | int rc; |
2070 | |
2071 | if (!nvmem) |
2072 | return -EINVAL; |
2073 | |
2074 | rc = nvmem_reg_read(nvmem, offset, val: buf, bytes); |
2075 | |
2076 | if (rc) |
2077 | return rc; |
2078 | |
2079 | return bytes; |
2080 | } |
2081 | EXPORT_SYMBOL_GPL(nvmem_device_read); |
2082 | |
2083 | /** |
2084 | * nvmem_device_write() - Write cell to a given nvmem device |
2085 | * |
2086 | * @nvmem: nvmem device to be written to. |
2087 | * @offset: offset in nvmem device. |
2088 | * @bytes: number of bytes to write. |
2089 | * @buf: buffer to be written. |
2090 | * |
2091 | * Return: length of bytes written or negative error code on failure. |
2092 | */ |
2093 | int nvmem_device_write(struct nvmem_device *nvmem, |
2094 | unsigned int offset, |
2095 | size_t bytes, void *buf) |
2096 | { |
2097 | int rc; |
2098 | |
2099 | if (!nvmem) |
2100 | return -EINVAL; |
2101 | |
2102 | rc = nvmem_reg_write(nvmem, offset, val: buf, bytes); |
2103 | |
2104 | if (rc) |
2105 | return rc; |
2106 | |
2107 | |
2108 | return bytes; |
2109 | } |
2110 | EXPORT_SYMBOL_GPL(nvmem_device_write); |
2111 | |
2112 | /** |
2113 | * nvmem_add_cell_lookups() - register a list of cell lookup entries |
2114 | * |
2115 | * @entries: array of cell lookup entries |
2116 | * @nentries: number of cell lookup entries in the array |
2117 | */ |
2118 | void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) |
2119 | { |
2120 | int i; |
2121 | |
2122 | mutex_lock(&nvmem_lookup_mutex); |
2123 | for (i = 0; i < nentries; i++) |
2124 | list_add_tail(new: &entries[i].node, head: &nvmem_lookup_list); |
2125 | mutex_unlock(lock: &nvmem_lookup_mutex); |
2126 | } |
2127 | EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups); |
2128 | |
2129 | /** |
2130 | * nvmem_del_cell_lookups() - remove a list of previously added cell lookup |
2131 | * entries |
2132 | * |
2133 | * @entries: array of cell lookup entries |
2134 | * @nentries: number of cell lookup entries in the array |
2135 | */ |
2136 | void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) |
2137 | { |
2138 | int i; |
2139 | |
2140 | mutex_lock(&nvmem_lookup_mutex); |
2141 | for (i = 0; i < nentries; i++) |
2142 | list_del(entry: &entries[i].node); |
2143 | mutex_unlock(lock: &nvmem_lookup_mutex); |
2144 | } |
2145 | EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups); |
2146 | |
2147 | /** |
2148 | * nvmem_dev_name() - Get the name of a given nvmem device. |
2149 | * |
2150 | * @nvmem: nvmem device. |
2151 | * |
2152 | * Return: name of the nvmem device. |
2153 | */ |
2154 | const char *nvmem_dev_name(struct nvmem_device *nvmem) |
2155 | { |
2156 | return dev_name(dev: &nvmem->dev); |
2157 | } |
2158 | EXPORT_SYMBOL_GPL(nvmem_dev_name); |
2159 | |
2160 | /** |
2161 | * nvmem_dev_size() - Get the size of a given nvmem device. |
2162 | * |
2163 | * @nvmem: nvmem device. |
2164 | * |
2165 | * Return: size of the nvmem device. |
2166 | */ |
2167 | size_t nvmem_dev_size(struct nvmem_device *nvmem) |
2168 | { |
2169 | return nvmem->size; |
2170 | } |
2171 | EXPORT_SYMBOL_GPL(nvmem_dev_size); |
2172 | |
2173 | static int __init nvmem_init(void) |
2174 | { |
2175 | int ret; |
2176 | |
2177 | ret = bus_register(bus: &nvmem_bus_type); |
2178 | if (ret) |
2179 | return ret; |
2180 | |
2181 | ret = nvmem_layout_bus_register(); |
2182 | if (ret) |
2183 | bus_unregister(bus: &nvmem_bus_type); |
2184 | |
2185 | return ret; |
2186 | } |
2187 | |
2188 | static void __exit nvmem_exit(void) |
2189 | { |
2190 | nvmem_layout_bus_unregister(); |
2191 | bus_unregister(bus: &nvmem_bus_type); |
2192 | } |
2193 | |
2194 | subsys_initcall(nvmem_init); |
2195 | module_exit(nvmem_exit); |
2196 | |
2197 | MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); |
2198 | MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); |
2199 | MODULE_DESCRIPTION("nvmem Driver Core"); |
2200 |
Definitions
- nvmem_cell_entry
- nvmem_cell
- nvmem_mutex
- nvmem_ida
- nvmem_lookup_mutex
- nvmem_lookup_list
- nvmem_notifier
- __nvmem_reg_read
- __nvmem_reg_write
- nvmem_access_with_keepouts
- nvmem_reg_read
- nvmem_reg_write
- nvmem_type_str
- eeprom_lock_key
- type_show
- force_ro_show
- force_ro_store
- nvmem_attrs
- bin_attr_nvmem_read
- bin_attr_nvmem_write
- nvmem_bin_attr_get_umode
- nvmem_bin_attr_is_visible
- nvmem_bin_attr_size
- nvmem_attr_is_visible
- nvmem_cell_attr_read
- bin_attr_rw_nvmem
- nvmem_bin_attributes
- nvmem_bin_group
- nvmem_dev_groups
- bin_attr_nvmem_eeprom_compat
- nvmem_sysfs_setup_compat
- nvmem_sysfs_remove_compat
- nvmem_populate_sysfs_cells
- nvmem_release
- nvmem_provider_type
- nvmem_bus_type
- nvmem_cell_entry_drop
- nvmem_device_remove_all_cells
- nvmem_cell_entry_add
- nvmem_cell_info_to_nvmem_cell_entry_nodup
- nvmem_cell_info_to_nvmem_cell_entry
- nvmem_add_one_cell
- nvmem_add_cells
- nvmem_register_notifier
- nvmem_unregister_notifier
- nvmem_find_cell_entry_by_name
- nvmem_validate_keepouts
- nvmem_add_cells_from_dt
- nvmem_add_cells_from_legacy_of
- nvmem_add_cells_from_fixed_layout
- nvmem_layout_register
- nvmem_layout_unregister
- nvmem_register
- nvmem_device_release
- nvmem_unregister
- devm_nvmem_unregister
- devm_nvmem_register
- __nvmem_device_get
- __nvmem_device_put
- of_nvmem_device_get
- nvmem_device_get
- nvmem_device_find
- devm_nvmem_device_match
- devm_nvmem_device_release
- devm_nvmem_device_put
- nvmem_device_put
- devm_nvmem_device_get
- nvmem_create_cell
- nvmem_cell_get_from_lookup
- nvmem_layout_module_put
- nvmem_find_cell_entry_by_node
- nvmem_layout_module_get_optional
- of_nvmem_cell_get
- nvmem_cell_get
- devm_nvmem_cell_release
- devm_nvmem_cell_get
- devm_nvmem_cell_match
- devm_nvmem_cell_put
- nvmem_cell_put
- nvmem_shift_read_buffer_in_place
- __nvmem_cell_read
- nvmem_cell_read
- nvmem_cell_prepare_write_buffer
- __nvmem_cell_entry_write
- nvmem_cell_write
- nvmem_cell_read_common
- nvmem_cell_read_u8
- nvmem_cell_read_u16
- nvmem_cell_read_u32
- nvmem_cell_read_u64
- nvmem_cell_read_variable_common
- nvmem_cell_read_variable_le_u32
- nvmem_cell_read_variable_le_u64
- nvmem_device_cell_read
- nvmem_device_cell_write
- nvmem_device_read
- nvmem_device_write
- nvmem_add_cell_lookups
- nvmem_del_cell_lookups
- nvmem_dev_name
- nvmem_dev_size
- nvmem_init
Improve your Profiling and Debugging skills
Find out more