Warning: That file was not part of the compilation database. It may have many parsing errors.
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|---|
2 | #ifndef __ASM_ARM_CMPXCHG_H |
3 | #define __ASM_ARM_CMPXCHG_H |
4 | |
5 | #include <linux/irqflags.h> |
6 | #include <linux/prefetch.h> |
7 | #include <asm/barrier.h> |
8 | |
9 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) |
10 | /* |
11 | * On the StrongARM, "swp" is terminally broken since it bypasses the |
12 | * cache totally. This means that the cache becomes inconsistent, and, |
13 | * since we use normal loads/stores as well, this is really bad. |
14 | * Typically, this causes oopsen in filp_close, but could have other, |
15 | * more disastrous effects. There are two work-arounds: |
16 | * 1. Disable interrupts and emulate the atomic swap |
17 | * 2. Clean the cache, perform atomic swap, flush the cache |
18 | * |
19 | * We choose (1) since its the "easiest" to achieve here and is not |
20 | * dependent on the processor type. |
21 | * |
22 | * NOTE that this solution won't work on an SMP system, so explcitly |
23 | * forbid it here. |
24 | */ |
25 | #define swp_is_buggy |
26 | #endif |
27 | |
28 | static inline unsigned long |
29 | __arch_xchg(unsigned long x, volatile void *ptr, int size) |
30 | { |
31 | extern void __bad_xchg(volatile void *, int); |
32 | unsigned long ret; |
33 | #ifdef swp_is_buggy |
34 | unsigned long flags; |
35 | #endif |
36 | #if __LINUX_ARM_ARCH__ >= 6 |
37 | unsigned int tmp; |
38 | #endif |
39 | |
40 | prefetchw((const void *)ptr); |
41 | |
42 | switch (size) { |
43 | #if __LINUX_ARM_ARCH__ >= 6 |
44 | #ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */ |
45 | case 1: |
46 | asm volatile("@ __xchg1\n" |
47 | "1: ldrexb %0, [%3]\n" |
48 | " strexb %1, %2, [%3]\n" |
49 | " teq %1, #0\n" |
50 | " bne 1b" |
51 | : "=&r"(ret), "=&r"(tmp) |
52 | : "r"(x), "r"(ptr) |
53 | : "memory", "cc"); |
54 | break; |
55 | case 2: |
56 | asm volatile("@ __xchg2\n" |
57 | "1: ldrexh %0, [%3]\n" |
58 | " strexh %1, %2, [%3]\n" |
59 | " teq %1, #0\n" |
60 | " bne 1b" |
61 | : "=&r"(ret), "=&r"(tmp) |
62 | : "r"(x), "r"(ptr) |
63 | : "memory", "cc"); |
64 | break; |
65 | #endif |
66 | case 4: |
67 | asm volatile("@ __xchg4\n" |
68 | "1: ldrex %0, [%3]\n" |
69 | " strex %1, %2, [%3]\n" |
70 | " teq %1, #0\n" |
71 | " bne 1b" |
72 | : "=&r"(ret), "=&r"(tmp) |
73 | : "r"(x), "r"(ptr) |
74 | : "memory", "cc"); |
75 | break; |
76 | #elif defined(swp_is_buggy) |
77 | #ifdef CONFIG_SMP |
78 | #error SMP is not supported on this platform |
79 | #endif |
80 | case 1: |
81 | raw_local_irq_save(flags); |
82 | ret = *(volatile unsigned char *)ptr; |
83 | *(volatile unsigned char *)ptr = x; |
84 | raw_local_irq_restore(flags); |
85 | break; |
86 | |
87 | case 4: |
88 | raw_local_irq_save(flags); |
89 | ret = *(volatile unsigned long *)ptr; |
90 | *(volatile unsigned long *)ptr = x; |
91 | raw_local_irq_restore(flags); |
92 | break; |
93 | #else |
94 | case 1: |
95 | asm volatile("@ __xchg1\n" |
96 | " swpb %0, %1, [%2]" |
97 | : "=&r"(ret) |
98 | : "r"(x), "r"(ptr) |
99 | : "memory", "cc"); |
100 | break; |
101 | case 4: |
102 | asm volatile("@ __xchg4\n" |
103 | " swp %0, %1, [%2]" |
104 | : "=&r"(ret) |
105 | : "r"(x), "r"(ptr) |
106 | : "memory", "cc"); |
107 | break; |
108 | #endif |
109 | default: |
110 | /* Cause a link-time error, the xchg() size is not supported */ |
111 | __bad_xchg(ptr, size), ret = 0; |
112 | break; |
113 | } |
114 | |
115 | return ret; |
116 | } |
117 | |
118 | #define arch_xchg_relaxed(ptr, x) ({ \ |
119 | (__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr), \ |
120 | sizeof(*(ptr))); \ |
121 | }) |
122 | |
123 | #include <asm-generic/cmpxchg-local.h> |
124 | |
125 | #if __LINUX_ARM_ARCH__ < 6 |
126 | /* min ARCH < ARMv6 */ |
127 | |
128 | #ifdef CONFIG_SMP |
129 | #error "SMP is not supported on this platform" |
130 | #endif |
131 | |
132 | #define arch_xchg arch_xchg_relaxed |
133 | |
134 | /* |
135 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make |
136 | * them available. |
137 | */ |
138 | #define arch_cmpxchg_local(ptr, o, n) ({ \ |
139 | (__typeof(*ptr))__generic_cmpxchg_local((ptr), \ |
140 | (unsigned long)(o), \ |
141 | (unsigned long)(n), \ |
142 | sizeof(*(ptr))); \ |
143 | }) |
144 | |
145 | #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) |
146 | |
147 | #include <asm-generic/cmpxchg.h> |
148 | |
149 | #else /* min ARCH >= ARMv6 */ |
150 | |
151 | extern void __bad_cmpxchg(volatile void *ptr, int size); |
152 | |
153 | /* |
154 | * cmpxchg only support 32-bits operands on ARMv6. |
155 | */ |
156 | |
157 | static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, |
158 | unsigned long new, int size) |
159 | { |
160 | unsigned long oldval, res; |
161 | |
162 | prefetchw((const void *)ptr); |
163 | |
164 | switch (size) { |
165 | #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */ |
166 | case 1: |
167 | do { |
168 | asm volatile("@ __cmpxchg1\n" |
169 | " ldrexb %1, [%2]\n" |
170 | " mov %0, #0\n" |
171 | " teq %1, %3\n" |
172 | " strexbeq %0, %4, [%2]\n" |
173 | : "=&r"(res), "=&r"(oldval) |
174 | : "r"(ptr), "Ir"(old), "r"(new) |
175 | : "memory", "cc"); |
176 | } while (res); |
177 | break; |
178 | case 2: |
179 | do { |
180 | asm volatile("@ __cmpxchg1\n" |
181 | " ldrexh %1, [%2]\n" |
182 | " mov %0, #0\n" |
183 | " teq %1, %3\n" |
184 | " strexheq %0, %4, [%2]\n" |
185 | : "=&r"(res), "=&r"(oldval) |
186 | : "r"(ptr), "Ir"(old), "r"(new) |
187 | : "memory", "cc"); |
188 | } while (res); |
189 | break; |
190 | #endif |
191 | case 4: |
192 | do { |
193 | asm volatile("@ __cmpxchg4\n" |
194 | " ldrex %1, [%2]\n" |
195 | " mov %0, #0\n" |
196 | " teq %1, %3\n" |
197 | " strexeq %0, %4, [%2]\n" |
198 | : "=&r"(res), "=&r"(oldval) |
199 | : "r"(ptr), "Ir"(old), "r"(new) |
200 | : "memory", "cc"); |
201 | } while (res); |
202 | break; |
203 | default: |
204 | __bad_cmpxchg(ptr, size); |
205 | oldval = 0; |
206 | } |
207 | |
208 | return oldval; |
209 | } |
210 | |
211 | #define arch_cmpxchg_relaxed(ptr,o,n) ({ \ |
212 | (__typeof__(*(ptr)))__cmpxchg((ptr), \ |
213 | (unsigned long)(o), \ |
214 | (unsigned long)(n), \ |
215 | sizeof(*(ptr))); \ |
216 | }) |
217 | |
218 | static inline unsigned long __cmpxchg_local(volatile void *ptr, |
219 | unsigned long old, |
220 | unsigned long new, int size) |
221 | { |
222 | unsigned long ret; |
223 | |
224 | switch (size) { |
225 | #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ |
226 | case 1: |
227 | case 2: |
228 | ret = __generic_cmpxchg_local(ptr, old, new, size); |
229 | break; |
230 | #endif |
231 | default: |
232 | ret = __cmpxchg(ptr, old, new, size); |
233 | } |
234 | |
235 | return ret; |
236 | } |
237 | |
238 | #define arch_cmpxchg_local(ptr, o, n) ({ \ |
239 | (__typeof(*ptr))__cmpxchg_local((ptr), \ |
240 | (unsigned long)(o), \ |
241 | (unsigned long)(n), \ |
242 | sizeof(*(ptr))); \ |
243 | }) |
244 | |
245 | static inline unsigned long long __cmpxchg64(unsigned long long *ptr, |
246 | unsigned long long old, |
247 | unsigned long long new) |
248 | { |
249 | unsigned long long oldval; |
250 | unsigned long res; |
251 | |
252 | prefetchw(ptr); |
253 | |
254 | __asm__ __volatile__( |
255 | "1: ldrexd %1, %H1, [%3]\n" |
256 | " teq %1, %4\n" |
257 | " teqeq %H1, %H4\n" |
258 | " bne 2f\n" |
259 | " strexd %0, %5, %H5, [%3]\n" |
260 | " teq %0, #0\n" |
261 | " bne 1b\n" |
262 | "2:" |
263 | : "=&r"(res), "=&r"(oldval), "+Qo"(*ptr) |
264 | : "r"(ptr), "r"(old), "r"(new) |
265 | : "cc"); |
266 | |
267 | return oldval; |
268 | } |
269 | |
270 | #define arch_cmpxchg64_relaxed(ptr, o, n) ({ \ |
271 | (__typeof__(*(ptr)))__cmpxchg64((ptr), \ |
272 | (unsigned long long)(o), \ |
273 | (unsigned long long)(n)); \ |
274 | }) |
275 | |
276 | #define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n)) |
277 | |
278 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ |
279 | |
280 | #endif /* __ASM_ARM_CMPXCHG_H */ |
281 |
Warning: That file was not part of the compilation database. It may have many parsing errors.