1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_PERCPU_H |
3 | #define _ASM_X86_PERCPU_H |
4 | |
5 | #ifdef CONFIG_X86_64 |
6 | #define __percpu_seg gs |
7 | #define __percpu_rel (%rip) |
8 | #else |
9 | #define __percpu_seg fs |
10 | #define __percpu_rel |
11 | #endif |
12 | |
13 | #ifdef __ASSEMBLY__ |
14 | |
15 | #ifdef CONFIG_SMP |
16 | #define __percpu %__percpu_seg: |
17 | #else |
18 | #define __percpu |
19 | #endif |
20 | |
21 | #define PER_CPU_VAR(var) __percpu(var)__percpu_rel |
22 | |
23 | #ifdef CONFIG_X86_64_SMP |
24 | #define INIT_PER_CPU_VAR(var) init_per_cpu__##var |
25 | #else |
26 | #define INIT_PER_CPU_VAR(var) var |
27 | #endif |
28 | |
29 | #else /* ...!ASSEMBLY */ |
30 | |
31 | #include <linux/build_bug.h> |
32 | #include <linux/stringify.h> |
33 | #include <asm/asm.h> |
34 | |
35 | #ifdef CONFIG_SMP |
36 | |
37 | #ifdef CONFIG_CC_HAS_NAMED_AS |
38 | |
39 | #ifdef __CHECKER__ |
40 | #define __seg_gs __attribute__((address_space(__seg_gs))) |
41 | #define __seg_fs __attribute__((address_space(__seg_fs))) |
42 | #endif |
43 | |
44 | #ifdef CONFIG_X86_64 |
45 | #define __percpu_seg_override __seg_gs |
46 | #else |
47 | #define __percpu_seg_override __seg_fs |
48 | #endif |
49 | |
50 | #define __percpu_prefix "" |
51 | |
52 | #else /* CONFIG_CC_HAS_NAMED_AS */ |
53 | |
54 | #define __percpu_seg_override |
55 | #define __percpu_prefix "%%"__stringify(__percpu_seg)":" |
56 | |
57 | #endif /* CONFIG_CC_HAS_NAMED_AS */ |
58 | |
59 | #define __force_percpu_prefix "%%"__stringify(__percpu_seg)":" |
60 | #define __my_cpu_offset this_cpu_read(this_cpu_off) |
61 | |
62 | #ifdef CONFIG_USE_X86_SEG_SUPPORT |
63 | /* |
64 | * Efficient implementation for cases in which the compiler supports |
65 | * named address spaces. Allows the compiler to perform additional |
66 | * optimizations that can save more instructions. |
67 | */ |
68 | #define arch_raw_cpu_ptr(ptr) \ |
69 | ({ \ |
70 | unsigned long tcp_ptr__; \ |
71 | tcp_ptr__ = __raw_cpu_read(, this_cpu_off); \ |
72 | \ |
73 | tcp_ptr__ += (unsigned long)(ptr); \ |
74 | (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ |
75 | }) |
76 | #else /* CONFIG_USE_X86_SEG_SUPPORT */ |
77 | /* |
78 | * Compared to the generic __my_cpu_offset version, the following |
79 | * saves one instruction and avoids clobbering a temp register. |
80 | */ |
81 | #define arch_raw_cpu_ptr(ptr) \ |
82 | ({ \ |
83 | unsigned long tcp_ptr__; \ |
84 | asm ("mov " __percpu_arg(1) ", %0" \ |
85 | : "=r" (tcp_ptr__) \ |
86 | : "m" (__my_cpu_var(this_cpu_off))); \ |
87 | \ |
88 | tcp_ptr__ += (unsigned long)(ptr); \ |
89 | (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \ |
90 | }) |
91 | #endif /* CONFIG_USE_X86_SEG_SUPPORT */ |
92 | |
93 | #define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel |
94 | |
95 | #else /* CONFIG_SMP */ |
96 | #define __percpu_seg_override |
97 | #define __percpu_prefix "" |
98 | #define __force_percpu_prefix "" |
99 | |
100 | #define PER_CPU_VAR(var) (var)__percpu_rel |
101 | |
102 | #endif /* CONFIG_SMP */ |
103 | |
104 | #define __my_cpu_type(var) typeof(var) __percpu_seg_override |
105 | #define __my_cpu_ptr(ptr) (__my_cpu_type(*ptr) *)(uintptr_t)(ptr) |
106 | #define __my_cpu_var(var) (*__my_cpu_ptr(&var)) |
107 | #define __percpu_arg(x) __percpu_prefix "%" #x |
108 | #define __force_percpu_arg(x) __force_percpu_prefix "%" #x |
109 | |
110 | /* |
111 | * Initialized pointers to per-cpu variables needed for the boot |
112 | * processor need to use these macros to get the proper address |
113 | * offset from __per_cpu_load on SMP. |
114 | * |
115 | * There also must be an entry in vmlinux_64.lds.S |
116 | */ |
117 | #define DECLARE_INIT_PER_CPU(var) \ |
118 | extern typeof(var) init_per_cpu_var(var) |
119 | |
120 | #ifdef CONFIG_X86_64_SMP |
121 | #define init_per_cpu_var(var) init_per_cpu__##var |
122 | #else |
123 | #define init_per_cpu_var(var) var |
124 | #endif |
125 | |
126 | /* For arch-specific code, we can use direct single-insn ops (they |
127 | * don't give an lvalue though). */ |
128 | |
129 | #define __pcpu_type_1 u8 |
130 | #define __pcpu_type_2 u16 |
131 | #define __pcpu_type_4 u32 |
132 | #define __pcpu_type_8 u64 |
133 | |
134 | #define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff)) |
135 | #define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff)) |
136 | #define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff)) |
137 | #define __pcpu_cast_8(val) ((u64)(val)) |
138 | |
139 | #define __pcpu_op1_1(op, dst) op "b " dst |
140 | #define __pcpu_op1_2(op, dst) op "w " dst |
141 | #define __pcpu_op1_4(op, dst) op "l " dst |
142 | #define __pcpu_op1_8(op, dst) op "q " dst |
143 | |
144 | #define __pcpu_op2_1(op, src, dst) op "b " src ", " dst |
145 | #define __pcpu_op2_2(op, src, dst) op "w " src ", " dst |
146 | #define __pcpu_op2_4(op, src, dst) op "l " src ", " dst |
147 | #define __pcpu_op2_8(op, src, dst) op "q " src ", " dst |
148 | |
149 | #define __pcpu_reg_1(mod, x) mod "q" (x) |
150 | #define __pcpu_reg_2(mod, x) mod "r" (x) |
151 | #define __pcpu_reg_4(mod, x) mod "r" (x) |
152 | #define __pcpu_reg_8(mod, x) mod "r" (x) |
153 | |
154 | #define __pcpu_reg_imm_1(x) "qi" (x) |
155 | #define __pcpu_reg_imm_2(x) "ri" (x) |
156 | #define __pcpu_reg_imm_4(x) "ri" (x) |
157 | #define __pcpu_reg_imm_8(x) "re" (x) |
158 | |
159 | #define percpu_to_op(size, qual, op, _var, _val) \ |
160 | do { \ |
161 | __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ |
162 | if (0) { \ |
163 | typeof(_var) pto_tmp__; \ |
164 | pto_tmp__ = (_val); \ |
165 | (void)pto_tmp__; \ |
166 | } \ |
167 | asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \ |
168 | : [var] "+m" (__my_cpu_var(_var)) \ |
169 | : [val] __pcpu_reg_imm_##size(pto_val__)); \ |
170 | } while (0) |
171 | |
172 | #define percpu_unary_op(size, qual, op, _var) \ |
173 | ({ \ |
174 | asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \ |
175 | : [var] "+m" (__my_cpu_var(_var))); \ |
176 | }) |
177 | |
178 | /* |
179 | * Generate a percpu add to memory instruction and optimize code |
180 | * if one is added or subtracted. |
181 | */ |
182 | #define percpu_add_op(size, qual, var, val) \ |
183 | do { \ |
184 | const int pao_ID__ = (__builtin_constant_p(val) && \ |
185 | ((val) == 1 || (val) == -1)) ? \ |
186 | (int)(val) : 0; \ |
187 | if (0) { \ |
188 | typeof(var) pao_tmp__; \ |
189 | pao_tmp__ = (val); \ |
190 | (void)pao_tmp__; \ |
191 | } \ |
192 | if (pao_ID__ == 1) \ |
193 | percpu_unary_op(size, qual, "inc", var); \ |
194 | else if (pao_ID__ == -1) \ |
195 | percpu_unary_op(size, qual, "dec", var); \ |
196 | else \ |
197 | percpu_to_op(size, qual, "add", var, val); \ |
198 | } while (0) |
199 | |
200 | #define percpu_from_op(size, qual, op, _var) \ |
201 | ({ \ |
202 | __pcpu_type_##size pfo_val__; \ |
203 | asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \ |
204 | : [val] __pcpu_reg_##size("=", pfo_val__) \ |
205 | : [var] "m" (__my_cpu_var(_var))); \ |
206 | (typeof(_var))(unsigned long) pfo_val__; \ |
207 | }) |
208 | |
209 | #define percpu_stable_op(size, op, _var) \ |
210 | ({ \ |
211 | __pcpu_type_##size pfo_val__; \ |
212 | asm(__pcpu_op2_##size(op, __force_percpu_arg(a[var]), "%[val]") \ |
213 | : [val] __pcpu_reg_##size("=", pfo_val__) \ |
214 | : [var] "i" (&(_var))); \ |
215 | (typeof(_var))(unsigned long) pfo_val__; \ |
216 | }) |
217 | |
218 | /* |
219 | * Add return operation |
220 | */ |
221 | #define percpu_add_return_op(size, qual, _var, _val) \ |
222 | ({ \ |
223 | __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \ |
224 | asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \ |
225 | __percpu_arg([var])) \ |
226 | : [tmp] __pcpu_reg_##size("+", paro_tmp__), \ |
227 | [var] "+m" (__my_cpu_var(_var)) \ |
228 | : : "memory"); \ |
229 | (typeof(_var))(unsigned long) (paro_tmp__ + _val); \ |
230 | }) |
231 | |
232 | /* |
233 | * xchg is implemented using cmpxchg without a lock prefix. xchg is |
234 | * expensive due to the implied lock prefix. The processor cannot prefetch |
235 | * cachelines if xchg is used. |
236 | */ |
237 | #define percpu_xchg_op(size, qual, _var, _nval) \ |
238 | ({ \ |
239 | __pcpu_type_##size pxo_old__; \ |
240 | __pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval); \ |
241 | asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), \ |
242 | "%[oval]") \ |
243 | "\n1:\t" \ |
244 | __pcpu_op2_##size("cmpxchg", "%[nval]", \ |
245 | __percpu_arg([var])) \ |
246 | "\n\tjnz 1b" \ |
247 | : [oval] "=&a" (pxo_old__), \ |
248 | [var] "+m" (__my_cpu_var(_var)) \ |
249 | : [nval] __pcpu_reg_##size(, pxo_new__) \ |
250 | : "memory"); \ |
251 | (typeof(_var))(unsigned long) pxo_old__; \ |
252 | }) |
253 | |
254 | /* |
255 | * cmpxchg has no such implied lock semantics as a result it is much |
256 | * more efficient for cpu local operations. |
257 | */ |
258 | #define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \ |
259 | ({ \ |
260 | __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \ |
261 | __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \ |
262 | asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \ |
263 | __percpu_arg([var])) \ |
264 | : [oval] "+a" (pco_old__), \ |
265 | [var] "+m" (__my_cpu_var(_var)) \ |
266 | : [nval] __pcpu_reg_##size(, pco_new__) \ |
267 | : "memory"); \ |
268 | (typeof(_var))(unsigned long) pco_old__; \ |
269 | }) |
270 | |
271 | #define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval) \ |
272 | ({ \ |
273 | bool success; \ |
274 | __pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \ |
275 | __pcpu_type_##size pco_old__ = *pco_oval__; \ |
276 | __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \ |
277 | asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \ |
278 | __percpu_arg([var])) \ |
279 | CC_SET(z) \ |
280 | : CC_OUT(z) (success), \ |
281 | [oval] "+a" (pco_old__), \ |
282 | [var] "+m" (__my_cpu_var(_var)) \ |
283 | : [nval] __pcpu_reg_##size(, pco_new__) \ |
284 | : "memory"); \ |
285 | if (unlikely(!success)) \ |
286 | *pco_oval__ = pco_old__; \ |
287 | likely(success); \ |
288 | }) |
289 | |
290 | #if defined(CONFIG_X86_32) && !defined(CONFIG_UML) |
291 | #define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \ |
292 | ({ \ |
293 | union { \ |
294 | u64 var; \ |
295 | struct { \ |
296 | u32 low, high; \ |
297 | }; \ |
298 | } old__, new__; \ |
299 | \ |
300 | old__.var = _oval; \ |
301 | new__.var = _nval; \ |
302 | \ |
303 | asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \ |
304 | "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ |
305 | : [var] "+m" (__my_cpu_var(_var)), \ |
306 | "+a" (old__.low), \ |
307 | "+d" (old__.high) \ |
308 | : "b" (new__.low), \ |
309 | "c" (new__.high), \ |
310 | "S" (&(_var)) \ |
311 | : "memory"); \ |
312 | \ |
313 | old__.var; \ |
314 | }) |
315 | |
316 | #define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, , pcp, oval, nval) |
317 | #define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, volatile, pcp, oval, nval) |
318 | |
319 | #define percpu_try_cmpxchg64_op(size, qual, _var, _ovalp, _nval) \ |
320 | ({ \ |
321 | bool success; \ |
322 | u64 *_oval = (u64 *)(_ovalp); \ |
323 | union { \ |
324 | u64 var; \ |
325 | struct { \ |
326 | u32 low, high; \ |
327 | }; \ |
328 | } old__, new__; \ |
329 | \ |
330 | old__.var = *_oval; \ |
331 | new__.var = _nval; \ |
332 | \ |
333 | asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \ |
334 | "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \ |
335 | CC_SET(z) \ |
336 | : CC_OUT(z) (success), \ |
337 | [var] "+m" (__my_cpu_var(_var)), \ |
338 | "+a" (old__.low), \ |
339 | "+d" (old__.high) \ |
340 | : "b" (new__.low), \ |
341 | "c" (new__.high), \ |
342 | "S" (&(_var)) \ |
343 | : "memory"); \ |
344 | if (unlikely(!success)) \ |
345 | *_oval = old__.var; \ |
346 | likely(success); \ |
347 | }) |
348 | |
349 | #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval) |
350 | #define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval) |
351 | #endif |
352 | |
353 | #ifdef CONFIG_X86_64 |
354 | #define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval); |
355 | #define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval); |
356 | |
357 | #define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval); |
358 | #define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval); |
359 | |
360 | #define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval) \ |
361 | ({ \ |
362 | union { \ |
363 | u128 var; \ |
364 | struct { \ |
365 | u64 low, high; \ |
366 | }; \ |
367 | } old__, new__; \ |
368 | \ |
369 | old__.var = _oval; \ |
370 | new__.var = _nval; \ |
371 | \ |
372 | asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \ |
373 | "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ |
374 | : [var] "+m" (__my_cpu_var(_var)), \ |
375 | "+a" (old__.low), \ |
376 | "+d" (old__.high) \ |
377 | : "b" (new__.low), \ |
378 | "c" (new__.high), \ |
379 | "S" (&(_var)) \ |
380 | : "memory"); \ |
381 | \ |
382 | old__.var; \ |
383 | }) |
384 | |
385 | #define raw_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, , pcp, oval, nval) |
386 | #define this_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, volatile, pcp, oval, nval) |
387 | |
388 | #define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval) \ |
389 | ({ \ |
390 | bool success; \ |
391 | u128 *_oval = (u128 *)(_ovalp); \ |
392 | union { \ |
393 | u128 var; \ |
394 | struct { \ |
395 | u64 low, high; \ |
396 | }; \ |
397 | } old__, new__; \ |
398 | \ |
399 | old__.var = *_oval; \ |
400 | new__.var = _nval; \ |
401 | \ |
402 | asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \ |
403 | "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \ |
404 | CC_SET(z) \ |
405 | : CC_OUT(z) (success), \ |
406 | [var] "+m" (__my_cpu_var(_var)), \ |
407 | "+a" (old__.low), \ |
408 | "+d" (old__.high) \ |
409 | : "b" (new__.low), \ |
410 | "c" (new__.high), \ |
411 | "S" (&(_var)) \ |
412 | : "memory"); \ |
413 | if (unlikely(!success)) \ |
414 | *_oval = old__.var; \ |
415 | likely(success); \ |
416 | }) |
417 | |
418 | #define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval) |
419 | #define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval) |
420 | #endif |
421 | |
422 | /* |
423 | * this_cpu_read() makes gcc load the percpu variable every time it is |
424 | * accessed while this_cpu_read_stable() allows the value to be cached. |
425 | * this_cpu_read_stable() is more efficient and can be used if its value |
426 | * is guaranteed to be valid across cpus. The current users include |
427 | * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are |
428 | * actually per-thread variables implemented as per-CPU variables and |
429 | * thus stable for the duration of the respective task. |
430 | */ |
431 | #define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp) |
432 | #define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp) |
433 | #define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp) |
434 | #define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp) |
435 | #define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp) |
436 | |
437 | #ifdef CONFIG_USE_X86_SEG_SUPPORT |
438 | |
439 | #define __raw_cpu_read(qual, pcp) \ |
440 | ({ \ |
441 | *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)); \ |
442 | }) |
443 | |
444 | #define __raw_cpu_write(qual, pcp, val) \ |
445 | do { \ |
446 | *(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val); \ |
447 | } while (0) |
448 | |
449 | #define raw_cpu_read_1(pcp) __raw_cpu_read(, pcp) |
450 | #define raw_cpu_read_2(pcp) __raw_cpu_read(, pcp) |
451 | #define raw_cpu_read_4(pcp) __raw_cpu_read(, pcp) |
452 | #define raw_cpu_write_1(pcp, val) __raw_cpu_write(, pcp, val) |
453 | #define raw_cpu_write_2(pcp, val) __raw_cpu_write(, pcp, val) |
454 | #define raw_cpu_write_4(pcp, val) __raw_cpu_write(, pcp, val) |
455 | |
456 | #define this_cpu_read_1(pcp) __raw_cpu_read(volatile, pcp) |
457 | #define this_cpu_read_2(pcp) __raw_cpu_read(volatile, pcp) |
458 | #define this_cpu_read_4(pcp) __raw_cpu_read(volatile, pcp) |
459 | #define this_cpu_write_1(pcp, val) __raw_cpu_write(volatile, pcp, val) |
460 | #define this_cpu_write_2(pcp, val) __raw_cpu_write(volatile, pcp, val) |
461 | #define this_cpu_write_4(pcp, val) __raw_cpu_write(volatile, pcp, val) |
462 | |
463 | #ifdef CONFIG_X86_64 |
464 | #define raw_cpu_read_8(pcp) __raw_cpu_read(, pcp) |
465 | #define raw_cpu_write_8(pcp, val) __raw_cpu_write(, pcp, val) |
466 | |
467 | #define this_cpu_read_8(pcp) __raw_cpu_read(volatile, pcp) |
468 | #define this_cpu_write_8(pcp, val) __raw_cpu_write(volatile, pcp, val) |
469 | #endif |
470 | |
471 | #define this_cpu_read_const(pcp) __raw_cpu_read(, pcp) |
472 | #else /* CONFIG_USE_X86_SEG_SUPPORT */ |
473 | |
474 | #define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp) |
475 | #define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp) |
476 | #define raw_cpu_read_4(pcp) percpu_from_op(4, , "mov", pcp) |
477 | #define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val) |
478 | #define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val) |
479 | #define raw_cpu_write_4(pcp, val) percpu_to_op(4, , "mov", (pcp), val) |
480 | |
481 | #define this_cpu_read_1(pcp) percpu_from_op(1, volatile, "mov", pcp) |
482 | #define this_cpu_read_2(pcp) percpu_from_op(2, volatile, "mov", pcp) |
483 | #define this_cpu_read_4(pcp) percpu_from_op(4, volatile, "mov", pcp) |
484 | #define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val) |
485 | #define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val) |
486 | #define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val) |
487 | |
488 | #ifdef CONFIG_X86_64 |
489 | #define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp) |
490 | #define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val) |
491 | |
492 | #define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp) |
493 | #define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val) |
494 | #endif |
495 | |
496 | /* |
497 | * The generic per-cpu infrastrucutre is not suitable for |
498 | * reading const-qualified variables. |
499 | */ |
500 | #define this_cpu_read_const(pcp) ({ BUILD_BUG(); (typeof(pcp))0; }) |
501 | #endif /* CONFIG_USE_X86_SEG_SUPPORT */ |
502 | |
503 | #define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val) |
504 | #define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val) |
505 | #define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val) |
506 | #define raw_cpu_and_1(pcp, val) percpu_to_op(1, , "and", (pcp), val) |
507 | #define raw_cpu_and_2(pcp, val) percpu_to_op(2, , "and", (pcp), val) |
508 | #define raw_cpu_and_4(pcp, val) percpu_to_op(4, , "and", (pcp), val) |
509 | #define raw_cpu_or_1(pcp, val) percpu_to_op(1, , "or", (pcp), val) |
510 | #define raw_cpu_or_2(pcp, val) percpu_to_op(2, , "or", (pcp), val) |
511 | #define raw_cpu_or_4(pcp, val) percpu_to_op(4, , "or", (pcp), val) |
512 | |
513 | /* |
514 | * raw_cpu_xchg() can use a load-store since it is not required to be |
515 | * IRQ-safe. |
516 | */ |
517 | #define raw_percpu_xchg_op(var, nval) \ |
518 | ({ \ |
519 | typeof(var) pxo_ret__ = raw_cpu_read(var); \ |
520 | raw_cpu_write(var, (nval)); \ |
521 | pxo_ret__; \ |
522 | }) |
523 | |
524 | #define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val) |
525 | #define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val) |
526 | #define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val) |
527 | |
528 | #define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val) |
529 | #define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val) |
530 | #define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val) |
531 | #define this_cpu_and_1(pcp, val) percpu_to_op(1, volatile, "and", (pcp), val) |
532 | #define this_cpu_and_2(pcp, val) percpu_to_op(2, volatile, "and", (pcp), val) |
533 | #define this_cpu_and_4(pcp, val) percpu_to_op(4, volatile, "and", (pcp), val) |
534 | #define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val) |
535 | #define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val) |
536 | #define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val) |
537 | #define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(1, volatile, pcp, nval) |
538 | #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(2, volatile, pcp, nval) |
539 | #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(4, volatile, pcp, nval) |
540 | |
541 | #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val) |
542 | #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val) |
543 | #define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val) |
544 | #define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval) |
545 | #define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval) |
546 | #define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval) |
547 | #define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, , pcp, ovalp, nval) |
548 | #define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, , pcp, ovalp, nval) |
549 | #define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, , pcp, ovalp, nval) |
550 | |
551 | #define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val) |
552 | #define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val) |
553 | #define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val) |
554 | #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval) |
555 | #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval) |
556 | #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval) |
557 | #define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval) |
558 | #define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval) |
559 | #define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval) |
560 | |
561 | /* |
562 | * Per cpu atomic 64 bit operations are only available under 64 bit. |
563 | * 32 bit must fall back to generic operations. |
564 | */ |
565 | #ifdef CONFIG_X86_64 |
566 | #define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) |
567 | #define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val) |
568 | #define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val) |
569 | #define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val) |
570 | #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) |
571 | #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval) |
572 | #define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval) |
573 | |
574 | #define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val) |
575 | #define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) |
576 | #define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) |
577 | #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) |
578 | #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval) |
579 | #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval) |
580 | #define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval) |
581 | #endif |
582 | |
583 | static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr, |
584 | const unsigned long __percpu *addr) |
585 | { |
586 | unsigned long __percpu *a = |
587 | (unsigned long __percpu *)addr + nr / BITS_PER_LONG; |
588 | |
589 | #ifdef CONFIG_X86_64 |
590 | return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0; |
591 | #else |
592 | return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0; |
593 | #endif |
594 | } |
595 | |
596 | static inline bool x86_this_cpu_variable_test_bit(int nr, |
597 | const unsigned long __percpu *addr) |
598 | { |
599 | bool oldbit; |
600 | |
601 | asm volatile("btl " __percpu_arg(2)",%1" |
602 | CC_SET(c) |
603 | : CC_OUT(c) (oldbit) |
604 | : "m" (*__my_cpu_ptr((unsigned long __percpu *)(addr))), "Ir" (nr)); |
605 | |
606 | return oldbit; |
607 | } |
608 | |
609 | #define x86_this_cpu_test_bit(nr, addr) \ |
610 | (__builtin_constant_p((nr)) \ |
611 | ? x86_this_cpu_constant_test_bit((nr), (addr)) \ |
612 | : x86_this_cpu_variable_test_bit((nr), (addr))) |
613 | |
614 | |
615 | #include <asm-generic/percpu.h> |
616 | |
617 | /* We can use this directly for local CPU (faster). */ |
618 | DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off); |
619 | |
620 | #endif /* !__ASSEMBLY__ */ |
621 | |
622 | #ifdef CONFIG_SMP |
623 | |
624 | /* |
625 | * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu |
626 | * variables that are initialized and accessed before there are per_cpu |
627 | * areas allocated. |
628 | */ |
629 | |
630 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ |
631 | DEFINE_PER_CPU(_type, _name) = _initvalue; \ |
632 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ |
633 | { [0 ... NR_CPUS-1] = _initvalue }; \ |
634 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map |
635 | |
636 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ |
637 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \ |
638 | __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \ |
639 | { [0 ... NR_CPUS-1] = _initvalue }; \ |
640 | __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map |
641 | |
642 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
643 | EXPORT_PER_CPU_SYMBOL(_name) |
644 | |
645 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ |
646 | DECLARE_PER_CPU(_type, _name); \ |
647 | extern __typeof__(_type) *_name##_early_ptr; \ |
648 | extern __typeof__(_type) _name##_early_map[] |
649 | |
650 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ |
651 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \ |
652 | extern __typeof__(_type) *_name##_early_ptr; \ |
653 | extern __typeof__(_type) _name##_early_map[] |
654 | |
655 | #define early_per_cpu_ptr(_name) (_name##_early_ptr) |
656 | #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx]) |
657 | #define early_per_cpu(_name, _cpu) \ |
658 | *(early_per_cpu_ptr(_name) ? \ |
659 | &early_per_cpu_ptr(_name)[_cpu] : \ |
660 | &per_cpu(_name, _cpu)) |
661 | |
662 | #else /* !CONFIG_SMP */ |
663 | #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \ |
664 | DEFINE_PER_CPU(_type, _name) = _initvalue |
665 | |
666 | #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \ |
667 | DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue |
668 | |
669 | #define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \ |
670 | EXPORT_PER_CPU_SYMBOL(_name) |
671 | |
672 | #define DECLARE_EARLY_PER_CPU(_type, _name) \ |
673 | DECLARE_PER_CPU(_type, _name) |
674 | |
675 | #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \ |
676 | DECLARE_PER_CPU_READ_MOSTLY(_type, _name) |
677 | |
678 | #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu) |
679 | #define early_per_cpu_ptr(_name) NULL |
680 | /* no early_per_cpu_map() */ |
681 | |
682 | #endif /* !CONFIG_SMP */ |
683 | |
684 | #endif /* _ASM_X86_PERCPU_H */ |
685 | |