1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/device.h> |
3 | #include <linux/types.h> |
4 | #include <linux/io.h> |
5 | #include <linux/mm.h> |
6 | #include <linux/ioremap.h> |
7 | |
8 | #ifndef arch_memremap_wb |
9 | static void *arch_memremap_wb(resource_size_t offset, unsigned long size, |
10 | unsigned long flags) |
11 | { |
12 | #ifdef ioremap_cache |
13 | return (__force void *)ioremap_cache(offset, size); |
14 | #else |
15 | return (__force void *)ioremap(offset, size); |
16 | #endif |
17 | } |
18 | #endif |
19 | |
20 | #ifndef arch_memremap_can_ram_remap |
21 | static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, |
22 | unsigned long flags) |
23 | { |
24 | return true; |
25 | } |
26 | #endif |
27 | |
28 | static void *try_ram_remap(resource_size_t offset, size_t size, |
29 | unsigned long flags) |
30 | { |
31 | unsigned long pfn = PHYS_PFN(offset); |
32 | |
33 | /* In the simple case just return the existing linear address */ |
34 | if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) && |
35 | arch_memremap_can_ram_remap(offset, size, flags)) |
36 | return __va(offset); |
37 | |
38 | return NULL; /* fallback to arch_memremap_wb */ |
39 | } |
40 | |
41 | /** |
42 | * memremap() - remap an iomem_resource as cacheable memory |
43 | * @offset: iomem resource start address |
44 | * @size: size of remap |
45 | * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC, |
46 | * MEMREMAP_ENC, MEMREMAP_DEC |
47 | * |
48 | * memremap() is "ioremap" for cases where it is known that the resource |
49 | * being mapped does not have i/o side effects and the __iomem |
50 | * annotation is not applicable. In the case of multiple flags, the different |
51 | * mapping types will be attempted in the order listed below until one of |
52 | * them succeeds. |
53 | * |
54 | * MEMREMAP_WB - matches the default mapping for System RAM on |
55 | * the architecture. This is usually a read-allocate write-back cache. |
56 | * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM |
57 | * memremap() will bypass establishing a new mapping and instead return |
58 | * a pointer into the direct map. |
59 | * |
60 | * MEMREMAP_WT - establish a mapping whereby writes either bypass the |
61 | * cache or are written through to memory and never exist in a |
62 | * cache-dirty state with respect to program visibility. Attempts to |
63 | * map System RAM with this mapping type will fail. |
64 | * |
65 | * MEMREMAP_WC - establish a writecombine mapping, whereby writes may |
66 | * be coalesced together (e.g. in the CPU's write buffers), but is otherwise |
67 | * uncached. Attempts to map System RAM with this mapping type will fail. |
68 | */ |
69 | void *memremap(resource_size_t offset, size_t size, unsigned long flags) |
70 | { |
71 | int is_ram = region_intersects(offset, size, |
72 | IORESOURCE_SYSTEM_RAM, desc: IORES_DESC_NONE); |
73 | void *addr = NULL; |
74 | |
75 | if (!flags) |
76 | return NULL; |
77 | |
78 | if (is_ram == REGION_MIXED) { |
79 | WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n" , |
80 | &offset, (unsigned long) size); |
81 | return NULL; |
82 | } |
83 | |
84 | /* Try all mapping types requested until one returns non-NULL */ |
85 | if (flags & MEMREMAP_WB) { |
86 | /* |
87 | * MEMREMAP_WB is special in that it can be satisfied |
88 | * from the direct map. Some archs depend on the |
89 | * capability of memremap() to autodetect cases where |
90 | * the requested range is potentially in System RAM. |
91 | */ |
92 | if (is_ram == REGION_INTERSECTS) |
93 | addr = try_ram_remap(offset, size, flags); |
94 | if (!addr) |
95 | addr = arch_memremap_wb(phys_addr: offset, size, flags); |
96 | } |
97 | |
98 | /* |
99 | * If we don't have a mapping yet and other request flags are |
100 | * present then we will be attempting to establish a new virtual |
101 | * address mapping. Enforce that this mapping is not aliasing |
102 | * System RAM. |
103 | */ |
104 | if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { |
105 | WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n" , |
106 | &offset, (unsigned long) size); |
107 | return NULL; |
108 | } |
109 | |
110 | if (!addr && (flags & MEMREMAP_WT)) |
111 | addr = ioremap_wt(offset, size); |
112 | |
113 | if (!addr && (flags & MEMREMAP_WC)) |
114 | addr = ioremap_wc(offset, size); |
115 | |
116 | return addr; |
117 | } |
118 | EXPORT_SYMBOL(memremap); |
119 | |
120 | void memunmap(void *addr) |
121 | { |
122 | if (is_ioremap_addr(x: addr)) |
123 | iounmap(addr: (void __iomem *) addr); |
124 | } |
125 | EXPORT_SYMBOL(memunmap); |
126 | |
127 | static void devm_memremap_release(struct device *dev, void *res) |
128 | { |
129 | memunmap(*(void **)res); |
130 | } |
131 | |
132 | static int devm_memremap_match(struct device *dev, void *res, void *match_data) |
133 | { |
134 | return *(void **)res == match_data; |
135 | } |
136 | |
137 | void *devm_memremap(struct device *dev, resource_size_t offset, |
138 | size_t size, unsigned long flags) |
139 | { |
140 | void **ptr, *addr; |
141 | |
142 | ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, |
143 | dev_to_node(dev)); |
144 | if (!ptr) |
145 | return ERR_PTR(error: -ENOMEM); |
146 | |
147 | addr = memremap(offset, size, flags); |
148 | if (addr) { |
149 | *ptr = addr; |
150 | devres_add(dev, res: ptr); |
151 | } else { |
152 | devres_free(res: ptr); |
153 | return ERR_PTR(error: -ENXIO); |
154 | } |
155 | |
156 | return addr; |
157 | } |
158 | EXPORT_SYMBOL(devm_memremap); |
159 | |
160 | void devm_memunmap(struct device *dev, void *addr) |
161 | { |
162 | WARN_ON(devres_release(dev, devm_memremap_release, |
163 | devm_memremap_match, addr)); |
164 | } |
165 | EXPORT_SYMBOL(devm_memunmap); |
166 | |