1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * Device tree based initialization code for reserved memory. |
4 | * |
5 | * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved. |
6 | * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd. |
7 | * http://www.samsung.com |
8 | * Author: Marek Szyprowski <m.szyprowski@samsung.com> |
9 | * Author: Josh Cartwright <joshc@codeaurora.org> |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) "OF: reserved mem: " fmt |
13 | |
14 | #include <linux/err.h> |
15 | #include <linux/libfdt.h> |
16 | #include <linux/of.h> |
17 | #include <linux/of_fdt.h> |
18 | #include <linux/of_platform.h> |
19 | #include <linux/mm.h> |
20 | #include <linux/sizes.h> |
21 | #include <linux/of_reserved_mem.h> |
22 | #include <linux/sort.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/memblock.h> |
25 | #include <linux/kmemleak.h> |
26 | #include <linux/cma.h> |
27 | |
28 | #include "of_private.h" |
29 | |
30 | #define MAX_RESERVED_REGIONS 64 |
31 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; |
32 | static int reserved_mem_count; |
33 | |
34 | static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, |
35 | phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, |
36 | phys_addr_t *res_base) |
37 | { |
38 | phys_addr_t base; |
39 | int err = 0; |
40 | |
41 | end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; |
42 | align = !align ? SMP_CACHE_BYTES : align; |
43 | base = memblock_phys_alloc_range(size, align, start, end); |
44 | if (!base) |
45 | return -ENOMEM; |
46 | |
47 | *res_base = base; |
48 | if (nomap) { |
49 | err = memblock_mark_nomap(base, size); |
50 | if (err) |
51 | memblock_phys_free(base, size); |
52 | } |
53 | |
54 | kmemleak_ignore_phys(phys: base); |
55 | |
56 | return err; |
57 | } |
58 | |
59 | /* |
60 | * fdt_reserved_mem_save_node() - save fdt node for second pass initialization |
61 | */ |
62 | static void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, |
63 | phys_addr_t base, phys_addr_t size) |
64 | { |
65 | struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; |
66 | |
67 | if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) { |
68 | pr_err("not enough space for all defined regions.\n" ); |
69 | return; |
70 | } |
71 | |
72 | rmem->fdt_node = node; |
73 | rmem->name = uname; |
74 | rmem->base = base; |
75 | rmem->size = size; |
76 | |
77 | reserved_mem_count++; |
78 | return; |
79 | } |
80 | |
81 | static int __init early_init_dt_reserve_memory(phys_addr_t base, |
82 | phys_addr_t size, bool nomap) |
83 | { |
84 | if (nomap) { |
85 | /* |
86 | * If the memory is already reserved (by another region), we |
87 | * should not allow it to be marked nomap, but don't worry |
88 | * if the region isn't memory as it won't be mapped. |
89 | */ |
90 | if (memblock_overlaps_region(type: &memblock.memory, base, size) && |
91 | memblock_is_region_reserved(base, size)) |
92 | return -EBUSY; |
93 | |
94 | return memblock_mark_nomap(base, size); |
95 | } |
96 | return memblock_reserve(base, size); |
97 | } |
98 | |
99 | /* |
100 | * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property |
101 | */ |
102 | static int __init __reserved_mem_reserve_reg(unsigned long node, |
103 | const char *uname) |
104 | { |
105 | int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); |
106 | phys_addr_t base, size; |
107 | int len; |
108 | const __be32 *prop; |
109 | int first = 1; |
110 | bool nomap; |
111 | |
112 | prop = of_get_flat_dt_prop(node, name: "reg" , size: &len); |
113 | if (!prop) |
114 | return -ENOENT; |
115 | |
116 | if (len && len % t_len != 0) { |
117 | pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n" , |
118 | uname); |
119 | return -EINVAL; |
120 | } |
121 | |
122 | nomap = of_get_flat_dt_prop(node, name: "no-map" , NULL) != NULL; |
123 | |
124 | while (len >= t_len) { |
125 | base = dt_mem_next_cell(s: dt_root_addr_cells, cellp: &prop); |
126 | size = dt_mem_next_cell(s: dt_root_size_cells, cellp: &prop); |
127 | |
128 | if (size && |
129 | early_init_dt_reserve_memory(base, size, nomap) == 0) |
130 | pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n" , |
131 | uname, &base, (unsigned long)(size / SZ_1M)); |
132 | else |
133 | pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n" , |
134 | uname, &base, (unsigned long)(size / SZ_1M)); |
135 | |
136 | len -= t_len; |
137 | if (first) { |
138 | fdt_reserved_mem_save_node(node, uname, base, size); |
139 | first = 0; |
140 | } |
141 | } |
142 | return 0; |
143 | } |
144 | |
145 | /* |
146 | * __reserved_mem_check_root() - check if #size-cells, #address-cells provided |
147 | * in /reserved-memory matches the values supported by the current implementation, |
148 | * also check if ranges property has been provided |
149 | */ |
150 | static int __init __reserved_mem_check_root(unsigned long node) |
151 | { |
152 | const __be32 *prop; |
153 | |
154 | prop = of_get_flat_dt_prop(node, name: "#size-cells" , NULL); |
155 | if (!prop || be32_to_cpup(p: prop) != dt_root_size_cells) |
156 | return -EINVAL; |
157 | |
158 | prop = of_get_flat_dt_prop(node, name: "#address-cells" , NULL); |
159 | if (!prop || be32_to_cpup(p: prop) != dt_root_addr_cells) |
160 | return -EINVAL; |
161 | |
162 | prop = of_get_flat_dt_prop(node, name: "ranges" , NULL); |
163 | if (!prop) |
164 | return -EINVAL; |
165 | return 0; |
166 | } |
167 | |
168 | /* |
169 | * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory |
170 | */ |
171 | int __init fdt_scan_reserved_mem(void) |
172 | { |
173 | int node, child; |
174 | const void *fdt = initial_boot_params; |
175 | |
176 | node = fdt_path_offset(fdt, path: "/reserved-memory" ); |
177 | if (node < 0) |
178 | return -ENODEV; |
179 | |
180 | if (__reserved_mem_check_root(node) != 0) { |
181 | pr_err("Reserved memory: unsupported node format, ignoring\n" ); |
182 | return -EINVAL; |
183 | } |
184 | |
185 | fdt_for_each_subnode(child, fdt, node) { |
186 | const char *uname; |
187 | int err; |
188 | |
189 | if (!of_fdt_device_is_available(blob: fdt, node: child)) |
190 | continue; |
191 | |
192 | uname = fdt_get_name(fdt, nodeoffset: child, NULL); |
193 | |
194 | err = __reserved_mem_reserve_reg(node: child, uname); |
195 | if (err == -ENOENT && of_get_flat_dt_prop(node: child, name: "size" , NULL)) |
196 | fdt_reserved_mem_save_node(node: child, uname, base: 0, size: 0); |
197 | } |
198 | return 0; |
199 | } |
200 | |
201 | /* |
202 | * __reserved_mem_alloc_in_range() - allocate reserved memory described with |
203 | * 'alloc-ranges'. Choose bottom-up/top-down depending on nearby existing |
204 | * reserved regions to keep the reserved memory contiguous if possible. |
205 | */ |
206 | static int __init __reserved_mem_alloc_in_range(phys_addr_t size, |
207 | phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, |
208 | phys_addr_t *res_base) |
209 | { |
210 | bool prev_bottom_up = memblock_bottom_up(); |
211 | bool bottom_up = false, top_down = false; |
212 | int ret, i; |
213 | |
214 | for (i = 0; i < reserved_mem_count; i++) { |
215 | struct reserved_mem *rmem = &reserved_mem[i]; |
216 | |
217 | /* Skip regions that were not reserved yet */ |
218 | if (rmem->size == 0) |
219 | continue; |
220 | |
221 | /* |
222 | * If range starts next to an existing reservation, use bottom-up: |
223 | * |....RRRR................RRRRRRRR..............| |
224 | * --RRRR------ |
225 | */ |
226 | if (start >= rmem->base && start <= (rmem->base + rmem->size)) |
227 | bottom_up = true; |
228 | |
229 | /* |
230 | * If range ends next to an existing reservation, use top-down: |
231 | * |....RRRR................RRRRRRRR..............| |
232 | * -------RRRR----- |
233 | */ |
234 | if (end >= rmem->base && end <= (rmem->base + rmem->size)) |
235 | top_down = true; |
236 | } |
237 | |
238 | /* Change setting only if either bottom-up or top-down was selected */ |
239 | if (bottom_up != top_down) |
240 | memblock_set_bottom_up(enable: bottom_up); |
241 | |
242 | ret = early_init_dt_alloc_reserved_memory_arch(size, align, |
243 | start, end, nomap, res_base); |
244 | |
245 | /* Restore old setting if needed */ |
246 | if (bottom_up != top_down) |
247 | memblock_set_bottom_up(enable: prev_bottom_up); |
248 | |
249 | return ret; |
250 | } |
251 | |
252 | /* |
253 | * __reserved_mem_alloc_size() - allocate reserved memory described by |
254 | * 'size', 'alignment' and 'alloc-ranges' properties. |
255 | */ |
256 | static int __init __reserved_mem_alloc_size(unsigned long node, |
257 | const char *uname, phys_addr_t *res_base, phys_addr_t *res_size) |
258 | { |
259 | int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); |
260 | phys_addr_t start = 0, end = 0; |
261 | phys_addr_t base = 0, align = 0, size; |
262 | int len; |
263 | const __be32 *prop; |
264 | bool nomap; |
265 | int ret; |
266 | |
267 | prop = of_get_flat_dt_prop(node, name: "size" , size: &len); |
268 | if (!prop) |
269 | return -EINVAL; |
270 | |
271 | if (len != dt_root_size_cells * sizeof(__be32)) { |
272 | pr_err("invalid size property in '%s' node.\n" , uname); |
273 | return -EINVAL; |
274 | } |
275 | size = dt_mem_next_cell(s: dt_root_size_cells, cellp: &prop); |
276 | |
277 | prop = of_get_flat_dt_prop(node, name: "alignment" , size: &len); |
278 | if (prop) { |
279 | if (len != dt_root_addr_cells * sizeof(__be32)) { |
280 | pr_err("invalid alignment property in '%s' node.\n" , |
281 | uname); |
282 | return -EINVAL; |
283 | } |
284 | align = dt_mem_next_cell(s: dt_root_addr_cells, cellp: &prop); |
285 | } |
286 | |
287 | nomap = of_get_flat_dt_prop(node, name: "no-map" , NULL) != NULL; |
288 | |
289 | /* Need adjust the alignment to satisfy the CMA requirement */ |
290 | if (IS_ENABLED(CONFIG_CMA) |
291 | && of_flat_dt_is_compatible(node, name: "shared-dma-pool" ) |
292 | && of_get_flat_dt_prop(node, name: "reusable" , NULL) |
293 | && !nomap) |
294 | align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); |
295 | |
296 | prop = of_get_flat_dt_prop(node, name: "alloc-ranges" , size: &len); |
297 | if (prop) { |
298 | |
299 | if (len % t_len != 0) { |
300 | pr_err("invalid alloc-ranges property in '%s', skipping node.\n" , |
301 | uname); |
302 | return -EINVAL; |
303 | } |
304 | |
305 | base = 0; |
306 | |
307 | while (len > 0) { |
308 | start = dt_mem_next_cell(s: dt_root_addr_cells, cellp: &prop); |
309 | end = start + dt_mem_next_cell(s: dt_root_size_cells, |
310 | cellp: &prop); |
311 | |
312 | ret = __reserved_mem_alloc_in_range(size, align, |
313 | start, end, nomap, res_base: &base); |
314 | if (ret == 0) { |
315 | pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n" , |
316 | uname, &base, |
317 | (unsigned long)(size / SZ_1M)); |
318 | break; |
319 | } |
320 | len -= t_len; |
321 | } |
322 | |
323 | } else { |
324 | ret = early_init_dt_alloc_reserved_memory_arch(size, align, |
325 | start: 0, end: 0, nomap, res_base: &base); |
326 | if (ret == 0) |
327 | pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n" , |
328 | uname, &base, (unsigned long)(size / SZ_1M)); |
329 | } |
330 | |
331 | if (base == 0) { |
332 | pr_err("failed to allocate memory for node '%s': size %lu MiB\n" , |
333 | uname, (unsigned long)(size / SZ_1M)); |
334 | return -ENOMEM; |
335 | } |
336 | |
337 | *res_base = base; |
338 | *res_size = size; |
339 | |
340 | return 0; |
341 | } |
342 | |
343 | static const struct of_device_id __rmem_of_table_sentinel |
344 | __used __section("__reservedmem_of_table_end" ); |
345 | |
346 | /* |
347 | * __reserved_mem_init_node() - call region specific reserved memory init code |
348 | */ |
349 | static int __init __reserved_mem_init_node(struct reserved_mem *rmem) |
350 | { |
351 | extern const struct of_device_id __reservedmem_of_table[]; |
352 | const struct of_device_id *i; |
353 | int ret = -ENOENT; |
354 | |
355 | for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { |
356 | reservedmem_of_init_fn initfn = i->data; |
357 | const char *compat = i->compatible; |
358 | |
359 | if (!of_flat_dt_is_compatible(node: rmem->fdt_node, name: compat)) |
360 | continue; |
361 | |
362 | ret = initfn(rmem); |
363 | if (ret == 0) { |
364 | pr_info("initialized node %s, compatible id %s\n" , |
365 | rmem->name, compat); |
366 | break; |
367 | } |
368 | } |
369 | return ret; |
370 | } |
371 | |
372 | static int __init __rmem_cmp(const void *a, const void *b) |
373 | { |
374 | const struct reserved_mem *ra = a, *rb = b; |
375 | |
376 | if (ra->base < rb->base) |
377 | return -1; |
378 | |
379 | if (ra->base > rb->base) |
380 | return 1; |
381 | |
382 | /* |
383 | * Put the dynamic allocations (address == 0, size == 0) before static |
384 | * allocations at address 0x0 so that overlap detection works |
385 | * correctly. |
386 | */ |
387 | if (ra->size < rb->size) |
388 | return -1; |
389 | if (ra->size > rb->size) |
390 | return 1; |
391 | |
392 | if (ra->fdt_node < rb->fdt_node) |
393 | return -1; |
394 | if (ra->fdt_node > rb->fdt_node) |
395 | return 1; |
396 | |
397 | return 0; |
398 | } |
399 | |
400 | static void __init __rmem_check_for_overlap(void) |
401 | { |
402 | int i; |
403 | |
404 | if (reserved_mem_count < 2) |
405 | return; |
406 | |
407 | sort(base: reserved_mem, num: reserved_mem_count, size: sizeof(reserved_mem[0]), |
408 | cmp_func: __rmem_cmp, NULL); |
409 | for (i = 0; i < reserved_mem_count - 1; i++) { |
410 | struct reserved_mem *this, *next; |
411 | |
412 | this = &reserved_mem[i]; |
413 | next = &reserved_mem[i + 1]; |
414 | |
415 | if (this->base + this->size > next->base) { |
416 | phys_addr_t this_end, next_end; |
417 | |
418 | this_end = this->base + this->size; |
419 | next_end = next->base + next->size; |
420 | pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n" , |
421 | this->name, &this->base, &this_end, |
422 | next->name, &next->base, &next_end); |
423 | } |
424 | } |
425 | } |
426 | |
427 | /** |
428 | * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions |
429 | */ |
430 | void __init fdt_init_reserved_mem(void) |
431 | { |
432 | int i; |
433 | |
434 | /* check for overlapping reserved regions */ |
435 | __rmem_check_for_overlap(); |
436 | |
437 | for (i = 0; i < reserved_mem_count; i++) { |
438 | struct reserved_mem *rmem = &reserved_mem[i]; |
439 | unsigned long node = rmem->fdt_node; |
440 | int len; |
441 | const __be32 *prop; |
442 | int err = 0; |
443 | bool nomap; |
444 | |
445 | nomap = of_get_flat_dt_prop(node, name: "no-map" , NULL) != NULL; |
446 | prop = of_get_flat_dt_prop(node, name: "phandle" , size: &len); |
447 | if (!prop) |
448 | prop = of_get_flat_dt_prop(node, name: "linux,phandle" , size: &len); |
449 | if (prop) |
450 | rmem->phandle = of_read_number(cell: prop, size: len/4); |
451 | |
452 | if (rmem->size == 0) |
453 | err = __reserved_mem_alloc_size(node, uname: rmem->name, |
454 | res_base: &rmem->base, res_size: &rmem->size); |
455 | if (err == 0) { |
456 | err = __reserved_mem_init_node(rmem); |
457 | if (err != 0 && err != -ENOENT) { |
458 | pr_info("node %s compatible matching fail\n" , |
459 | rmem->name); |
460 | if (nomap) |
461 | memblock_clear_nomap(base: rmem->base, size: rmem->size); |
462 | else |
463 | memblock_phys_free(base: rmem->base, |
464 | size: rmem->size); |
465 | } else { |
466 | phys_addr_t end = rmem->base + rmem->size - 1; |
467 | bool reusable = |
468 | (of_get_flat_dt_prop(node, name: "reusable" , NULL)) != NULL; |
469 | |
470 | pr_info("%pa..%pa (%lu KiB) %s %s %s\n" , |
471 | &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K), |
472 | nomap ? "nomap" : "map" , |
473 | reusable ? "reusable" : "non-reusable" , |
474 | rmem->name ? rmem->name : "unknown" ); |
475 | } |
476 | } |
477 | } |
478 | } |
479 | |
480 | static inline struct reserved_mem *__find_rmem(struct device_node *node) |
481 | { |
482 | unsigned int i; |
483 | |
484 | if (!node->phandle) |
485 | return NULL; |
486 | |
487 | for (i = 0; i < reserved_mem_count; i++) |
488 | if (reserved_mem[i].phandle == node->phandle) |
489 | return &reserved_mem[i]; |
490 | return NULL; |
491 | } |
492 | |
493 | struct rmem_assigned_device { |
494 | struct device *dev; |
495 | struct reserved_mem *rmem; |
496 | struct list_head list; |
497 | }; |
498 | |
499 | static LIST_HEAD(of_rmem_assigned_device_list); |
500 | static DEFINE_MUTEX(of_rmem_assigned_device_mutex); |
501 | |
502 | /** |
503 | * of_reserved_mem_device_init_by_idx() - assign reserved memory region to |
504 | * given device |
505 | * @dev: Pointer to the device to configure |
506 | * @np: Pointer to the device_node with 'reserved-memory' property |
507 | * @idx: Index of selected region |
508 | * |
509 | * This function assigns respective DMA-mapping operations based on reserved |
510 | * memory region specified by 'memory-region' property in @np node to the @dev |
511 | * device. When driver needs to use more than one reserved memory region, it |
512 | * should allocate child devices and initialize regions by name for each of |
513 | * child device. |
514 | * |
515 | * Returns error code or zero on success. |
516 | */ |
517 | int of_reserved_mem_device_init_by_idx(struct device *dev, |
518 | struct device_node *np, int idx) |
519 | { |
520 | struct rmem_assigned_device *rd; |
521 | struct device_node *target; |
522 | struct reserved_mem *rmem; |
523 | int ret; |
524 | |
525 | if (!np || !dev) |
526 | return -EINVAL; |
527 | |
528 | target = of_parse_phandle(np, phandle_name: "memory-region" , index: idx); |
529 | if (!target) |
530 | return -ENODEV; |
531 | |
532 | if (!of_device_is_available(device: target)) { |
533 | of_node_put(node: target); |
534 | return 0; |
535 | } |
536 | |
537 | rmem = __find_rmem(node: target); |
538 | of_node_put(node: target); |
539 | |
540 | if (!rmem || !rmem->ops || !rmem->ops->device_init) |
541 | return -EINVAL; |
542 | |
543 | rd = kmalloc(size: sizeof(struct rmem_assigned_device), GFP_KERNEL); |
544 | if (!rd) |
545 | return -ENOMEM; |
546 | |
547 | ret = rmem->ops->device_init(rmem, dev); |
548 | if (ret == 0) { |
549 | rd->dev = dev; |
550 | rd->rmem = rmem; |
551 | |
552 | mutex_lock(&of_rmem_assigned_device_mutex); |
553 | list_add(new: &rd->list, head: &of_rmem_assigned_device_list); |
554 | mutex_unlock(lock: &of_rmem_assigned_device_mutex); |
555 | |
556 | dev_info(dev, "assigned reserved memory node %s\n" , rmem->name); |
557 | } else { |
558 | kfree(objp: rd); |
559 | } |
560 | |
561 | return ret; |
562 | } |
563 | EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx); |
564 | |
565 | /** |
566 | * of_reserved_mem_device_init_by_name() - assign named reserved memory region |
567 | * to given device |
568 | * @dev: pointer to the device to configure |
569 | * @np: pointer to the device node with 'memory-region' property |
570 | * @name: name of the selected memory region |
571 | * |
572 | * Returns: 0 on success or a negative error-code on failure. |
573 | */ |
574 | int of_reserved_mem_device_init_by_name(struct device *dev, |
575 | struct device_node *np, |
576 | const char *name) |
577 | { |
578 | int idx = of_property_match_string(np, propname: "memory-region-names" , string: name); |
579 | |
580 | return of_reserved_mem_device_init_by_idx(dev, np, idx); |
581 | } |
582 | EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name); |
583 | |
584 | /** |
585 | * of_reserved_mem_device_release() - release reserved memory device structures |
586 | * @dev: Pointer to the device to deconfigure |
587 | * |
588 | * This function releases structures allocated for memory region handling for |
589 | * the given device. |
590 | */ |
591 | void of_reserved_mem_device_release(struct device *dev) |
592 | { |
593 | struct rmem_assigned_device *rd, *tmp; |
594 | LIST_HEAD(release_list); |
595 | |
596 | mutex_lock(&of_rmem_assigned_device_mutex); |
597 | list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) { |
598 | if (rd->dev == dev) |
599 | list_move_tail(list: &rd->list, head: &release_list); |
600 | } |
601 | mutex_unlock(lock: &of_rmem_assigned_device_mutex); |
602 | |
603 | list_for_each_entry_safe(rd, tmp, &release_list, list) { |
604 | if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release) |
605 | rd->rmem->ops->device_release(rd->rmem, dev); |
606 | |
607 | kfree(objp: rd); |
608 | } |
609 | } |
610 | EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); |
611 | |
612 | /** |
613 | * of_reserved_mem_lookup() - acquire reserved_mem from a device node |
614 | * @np: node pointer of the desired reserved-memory region |
615 | * |
616 | * This function allows drivers to acquire a reference to the reserved_mem |
617 | * struct based on a device node handle. |
618 | * |
619 | * Returns a reserved_mem reference, or NULL on error. |
620 | */ |
621 | struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) |
622 | { |
623 | const char *name; |
624 | int i; |
625 | |
626 | if (!np->full_name) |
627 | return NULL; |
628 | |
629 | name = kbasename(path: np->full_name); |
630 | for (i = 0; i < reserved_mem_count; i++) |
631 | if (!strcmp(reserved_mem[i].name, name)) |
632 | return &reserved_mem[i]; |
633 | |
634 | return NULL; |
635 | } |
636 | EXPORT_SYMBOL_GPL(of_reserved_mem_lookup); |
637 | |