1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | #include <linux/device.h> |
3 | #include <linux/err.h> |
4 | #include <linux/io.h> |
5 | #include <linux/gfp.h> |
6 | #include <linux/export.h> |
7 | #include <linux/of_address.h> |
8 | |
9 | enum devm_ioremap_type { |
10 | DEVM_IOREMAP = 0, |
11 | DEVM_IOREMAP_UC, |
12 | DEVM_IOREMAP_WC, |
13 | DEVM_IOREMAP_NP, |
14 | }; |
15 | |
16 | void devm_ioremap_release(struct device *dev, void *res) |
17 | { |
18 | iounmap(addr: *(void __iomem **)res); |
19 | } |
20 | |
21 | static int devm_ioremap_match(struct device *dev, void *res, void *match_data) |
22 | { |
23 | return *(void **)res == match_data; |
24 | } |
25 | |
26 | static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, |
27 | resource_size_t size, |
28 | enum devm_ioremap_type type) |
29 | { |
30 | void __iomem **ptr, *addr = NULL; |
31 | |
32 | ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL, |
33 | dev_to_node(dev)); |
34 | if (!ptr) |
35 | return NULL; |
36 | |
37 | switch (type) { |
38 | case DEVM_IOREMAP: |
39 | addr = ioremap(offset, size); |
40 | break; |
41 | case DEVM_IOREMAP_UC: |
42 | addr = ioremap_uc(offset, size); |
43 | break; |
44 | case DEVM_IOREMAP_WC: |
45 | addr = ioremap_wc(offset, size); |
46 | break; |
47 | case DEVM_IOREMAP_NP: |
48 | addr = ioremap_np(offset, size); |
49 | break; |
50 | } |
51 | |
52 | if (addr) { |
53 | *ptr = addr; |
54 | devres_add(dev, res: ptr); |
55 | } else |
56 | devres_free(res: ptr); |
57 | |
58 | return addr; |
59 | } |
60 | |
61 | /** |
62 | * devm_ioremap - Managed ioremap() |
63 | * @dev: Generic device to remap IO address for |
64 | * @offset: Resource address to map |
65 | * @size: Size of map |
66 | * |
67 | * Managed ioremap(). Map is automatically unmapped on driver detach. |
68 | */ |
69 | void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, |
70 | resource_size_t size) |
71 | { |
72 | return __devm_ioremap(dev, offset, size, type: DEVM_IOREMAP); |
73 | } |
74 | EXPORT_SYMBOL(devm_ioremap); |
75 | |
76 | /** |
77 | * devm_ioremap_uc - Managed ioremap_uc() |
78 | * @dev: Generic device to remap IO address for |
79 | * @offset: Resource address to map |
80 | * @size: Size of map |
81 | * |
82 | * Managed ioremap_uc(). Map is automatically unmapped on driver detach. |
83 | */ |
84 | void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset, |
85 | resource_size_t size) |
86 | { |
87 | return __devm_ioremap(dev, offset, size, type: DEVM_IOREMAP_UC); |
88 | } |
89 | EXPORT_SYMBOL_GPL(devm_ioremap_uc); |
90 | |
91 | /** |
92 | * devm_ioremap_wc - Managed ioremap_wc() |
93 | * @dev: Generic device to remap IO address for |
94 | * @offset: Resource address to map |
95 | * @size: Size of map |
96 | * |
97 | * Managed ioremap_wc(). Map is automatically unmapped on driver detach. |
98 | */ |
99 | void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, |
100 | resource_size_t size) |
101 | { |
102 | return __devm_ioremap(dev, offset, size, type: DEVM_IOREMAP_WC); |
103 | } |
104 | EXPORT_SYMBOL(devm_ioremap_wc); |
105 | |
106 | /** |
107 | * devm_iounmap - Managed iounmap() |
108 | * @dev: Generic device to unmap for |
109 | * @addr: Address to unmap |
110 | * |
111 | * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). |
112 | */ |
113 | void devm_iounmap(struct device *dev, void __iomem *addr) |
114 | { |
115 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, |
116 | (__force void *)addr)); |
117 | iounmap(addr); |
118 | } |
119 | EXPORT_SYMBOL(devm_iounmap); |
120 | |
121 | static void __iomem * |
122 | __devm_ioremap_resource(struct device *dev, const struct resource *res, |
123 | enum devm_ioremap_type type) |
124 | { |
125 | resource_size_t size; |
126 | void __iomem *dest_ptr; |
127 | char *pretty_name; |
128 | |
129 | BUG_ON(!dev); |
130 | |
131 | if (!res || resource_type(res) != IORESOURCE_MEM) { |
132 | dev_err(dev, "invalid resource %pR\n", res); |
133 | return IOMEM_ERR_PTR(-EINVAL); |
134 | } |
135 | |
136 | if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED) |
137 | type = DEVM_IOREMAP_NP; |
138 | |
139 | size = resource_size(res); |
140 | |
141 | if (res->name) |
142 | pretty_name = devm_kasprintf(dev, GFP_KERNEL, fmt: "%s %s", |
143 | dev_name(dev), res->name); |
144 | else |
145 | pretty_name = devm_kstrdup(dev, s: dev_name(dev), GFP_KERNEL); |
146 | if (!pretty_name) { |
147 | dev_err(dev, "can't generate pretty name for resource %pR\n", res); |
148 | return IOMEM_ERR_PTR(-ENOMEM); |
149 | } |
150 | |
151 | if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { |
152 | dev_err(dev, "can't request region for resource %pR\n", res); |
153 | return IOMEM_ERR_PTR(-EBUSY); |
154 | } |
155 | |
156 | dest_ptr = __devm_ioremap(dev, offset: res->start, size, type); |
157 | if (!dest_ptr) { |
158 | dev_err(dev, "ioremap failed for resource %pR\n", res); |
159 | devm_release_mem_region(dev, res->start, size); |
160 | dest_ptr = IOMEM_ERR_PTR(-ENOMEM); |
161 | } |
162 | |
163 | return dest_ptr; |
164 | } |
165 | |
166 | /** |
167 | * devm_ioremap_resource() - check, request region, and ioremap resource |
168 | * @dev: generic device to handle the resource for |
169 | * @res: resource to be handled |
170 | * |
171 | * Checks that a resource is a valid memory region, requests the memory |
172 | * region and ioremaps it. All operations are managed and will be undone |
173 | * on driver detach. |
174 | * |
175 | * Usage example: |
176 | * |
177 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
178 | * base = devm_ioremap_resource(&pdev->dev, res); |
179 | * if (IS_ERR(base)) |
180 | * return PTR_ERR(base); |
181 | * |
182 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code |
183 | * on failure. |
184 | */ |
185 | void __iomem *devm_ioremap_resource(struct device *dev, |
186 | const struct resource *res) |
187 | { |
188 | return __devm_ioremap_resource(dev, res, type: DEVM_IOREMAP); |
189 | } |
190 | EXPORT_SYMBOL(devm_ioremap_resource); |
191 | |
192 | /** |
193 | * devm_ioremap_resource_wc() - write-combined variant of |
194 | * devm_ioremap_resource() |
195 | * @dev: generic device to handle the resource for |
196 | * @res: resource to be handled |
197 | * |
198 | * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code |
199 | * on failure. |
200 | */ |
201 | void __iomem *devm_ioremap_resource_wc(struct device *dev, |
202 | const struct resource *res) |
203 | { |
204 | return __devm_ioremap_resource(dev, res, type: DEVM_IOREMAP_WC); |
205 | } |
206 | |
207 | /* |
208 | * devm_of_iomap - Requests a resource and maps the memory mapped IO |
209 | * for a given device_node managed by a given device |
210 | * |
211 | * Checks that a resource is a valid memory region, requests the memory |
212 | * region and ioremaps it. All operations are managed and will be undone |
213 | * on driver detach of the device. |
214 | * |
215 | * This is to be used when a device requests/maps resources described |
216 | * by other device tree nodes (children or otherwise). |
217 | * |
218 | * @dev: The device "managing" the resource |
219 | * @node: The device-tree node where the resource resides |
220 | * @index: index of the MMIO range in the "reg" property |
221 | * @size: Returns the size of the resource (pass NULL if not needed) |
222 | * |
223 | * Usage example: |
224 | * |
225 | * base = devm_of_iomap(&pdev->dev, node, 0, NULL); |
226 | * if (IS_ERR(base)) |
227 | * return PTR_ERR(base); |
228 | * |
229 | * Please Note: This is not a one-to-one replacement for of_iomap() because the |
230 | * of_iomap() function does not track whether the region is already mapped. If |
231 | * two drivers try to map the same memory, the of_iomap() function will succeed |
232 | * but the devm_of_iomap() function will return -EBUSY. |
233 | * |
234 | * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded |
235 | * error code on failure. |
236 | */ |
237 | void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index, |
238 | resource_size_t *size) |
239 | { |
240 | struct resource res; |
241 | |
242 | if (of_address_to_resource(dev: node, index, r: &res)) |
243 | return IOMEM_ERR_PTR(-EINVAL); |
244 | if (size) |
245 | *size = resource_size(res: &res); |
246 | return devm_ioremap_resource(dev, &res); |
247 | } |
248 | EXPORT_SYMBOL(devm_of_iomap); |
249 | |
250 | #ifdef CONFIG_HAS_IOPORT_MAP |
251 | /* |
252 | * Generic iomap devres |
253 | */ |
254 | static void devm_ioport_map_release(struct device *dev, void *res) |
255 | { |
256 | ioport_unmap(p: *(void __iomem **)res); |
257 | } |
258 | |
259 | static int devm_ioport_map_match(struct device *dev, void *res, |
260 | void *match_data) |
261 | { |
262 | return *(void **)res == match_data; |
263 | } |
264 | |
265 | /** |
266 | * devm_ioport_map - Managed ioport_map() |
267 | * @dev: Generic device to map ioport for |
268 | * @port: Port to map |
269 | * @nr: Number of ports to map |
270 | * |
271 | * Managed ioport_map(). Map is automatically unmapped on driver |
272 | * detach. |
273 | * |
274 | * Return: a pointer to the remapped memory or NULL on failure. |
275 | */ |
276 | void __iomem *devm_ioport_map(struct device *dev, unsigned long port, |
277 | unsigned int nr) |
278 | { |
279 | void __iomem **ptr, *addr; |
280 | |
281 | ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL, |
282 | dev_to_node(dev)); |
283 | if (!ptr) |
284 | return NULL; |
285 | |
286 | addr = ioport_map(port, nr); |
287 | if (addr) { |
288 | *ptr = addr; |
289 | devres_add(dev, res: ptr); |
290 | } else |
291 | devres_free(res: ptr); |
292 | |
293 | return addr; |
294 | } |
295 | EXPORT_SYMBOL(devm_ioport_map); |
296 | |
297 | /** |
298 | * devm_ioport_unmap - Managed ioport_unmap() |
299 | * @dev: Generic device to unmap for |
300 | * @addr: Address to unmap |
301 | * |
302 | * Managed ioport_unmap(). @addr must have been mapped using |
303 | * devm_ioport_map(). |
304 | */ |
305 | void devm_ioport_unmap(struct device *dev, void __iomem *addr) |
306 | { |
307 | ioport_unmap(p: addr); |
308 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, |
309 | devm_ioport_map_match, (__force void *)addr)); |
310 | } |
311 | EXPORT_SYMBOL(devm_ioport_unmap); |
312 | #endif /* CONFIG_HAS_IOPORT_MAP */ |
313 | |
314 | static void devm_arch_phys_ac_add_release(struct device *dev, void *res) |
315 | { |
316 | arch_phys_wc_del(handle: *((int *)res)); |
317 | } |
318 | |
319 | /** |
320 | * devm_arch_phys_wc_add - Managed arch_phys_wc_add() |
321 | * @dev: Managed device |
322 | * @base: Memory base address |
323 | * @size: Size of memory range |
324 | * |
325 | * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback. |
326 | * See arch_phys_wc_add() for more information. |
327 | */ |
328 | int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size) |
329 | { |
330 | int *mtrr; |
331 | int ret; |
332 | |
333 | mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL, |
334 | dev_to_node(dev)); |
335 | if (!mtrr) |
336 | return -ENOMEM; |
337 | |
338 | ret = arch_phys_wc_add(base, size); |
339 | if (ret < 0) { |
340 | devres_free(res: mtrr); |
341 | return ret; |
342 | } |
343 | |
344 | *mtrr = ret; |
345 | devres_add(dev, res: mtrr); |
346 | |
347 | return ret; |
348 | } |
349 | EXPORT_SYMBOL(devm_arch_phys_wc_add); |
350 | |
351 | struct arch_io_reserve_memtype_wc_devres { |
352 | resource_size_t start; |
353 | resource_size_t size; |
354 | }; |
355 | |
356 | static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res) |
357 | { |
358 | const struct arch_io_reserve_memtype_wc_devres *this = res; |
359 | |
360 | arch_io_free_memtype_wc(start: this->start, size: this->size); |
361 | } |
362 | |
363 | /** |
364 | * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc() |
365 | * @dev: Managed device |
366 | * @start: Memory base address |
367 | * @size: Size of memory range |
368 | * |
369 | * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc() |
370 | * and sets up a release callback See arch_io_reserve_memtype_wc() for more |
371 | * information. |
372 | */ |
373 | int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start, |
374 | resource_size_t size) |
375 | { |
376 | struct arch_io_reserve_memtype_wc_devres *dr; |
377 | int ret; |
378 | |
379 | dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL, |
380 | dev_to_node(dev)); |
381 | if (!dr) |
382 | return -ENOMEM; |
383 | |
384 | ret = arch_io_reserve_memtype_wc(start, size); |
385 | if (ret < 0) { |
386 | devres_free(res: dr); |
387 | return ret; |
388 | } |
389 | |
390 | dr->start = start; |
391 | dr->size = size; |
392 | devres_add(dev, res: dr); |
393 | |
394 | return ret; |
395 | } |
396 | EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc); |
397 |
Definitions
- devm_ioremap_type
- devm_ioremap_release
- devm_ioremap_match
- __devm_ioremap
- devm_ioremap
- devm_ioremap_uc
- devm_ioremap_wc
- devm_iounmap
- __devm_ioremap_resource
- devm_ioremap_resource
- devm_ioremap_resource_wc
- devm_of_iomap
- devm_ioport_map_release
- devm_ioport_map_match
- devm_ioport_map
- devm_ioport_unmap
- devm_arch_phys_ac_add_release
- devm_arch_phys_wc_add
- arch_io_reserve_memtype_wc_devres
- devm_arch_io_free_memtype_wc_release
Improve your Profiling and Debugging skills
Find out more