1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * linux/percpu-defs.h - basic definitions for percpu areas |
4 | * |
5 | * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. |
6 | * |
7 | * This file is separate from linux/percpu.h to avoid cyclic inclusion |
8 | * dependency from arch header files. Only to be included from |
9 | * asm/percpu.h. |
10 | * |
11 | * This file includes macros necessary to declare percpu sections and |
12 | * variables, and definitions of percpu accessors and operations. It |
13 | * should provide enough percpu features to arch header files even when |
14 | * they can only include asm/percpu.h to avoid cyclic inclusion dependency. |
15 | */ |
16 | |
17 | #ifndef _LINUX_PERCPU_DEFS_H |
18 | #define _LINUX_PERCPU_DEFS_H |
19 | |
20 | #ifdef CONFIG_SMP |
21 | |
22 | #ifdef MODULE |
23 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
24 | #define PER_CPU_ALIGNED_SECTION "" |
25 | #else |
26 | #define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" |
27 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" |
28 | #endif |
29 | #define PER_CPU_FIRST_SECTION "..first" |
30 | |
31 | #else |
32 | |
33 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
34 | #define PER_CPU_ALIGNED_SECTION "..shared_aligned" |
35 | #define PER_CPU_FIRST_SECTION "" |
36 | |
37 | #endif |
38 | |
39 | /* |
40 | * Base implementations of per-CPU variable declarations and definitions, where |
41 | * the section in which the variable is to be placed is provided by the |
42 | * 'sec' argument. This may be used to affect the parameters governing the |
43 | * variable's storage. |
44 | * |
45 | * NOTE! The sections for the DECLARE and for the DEFINE must match, lest |
46 | * linkage errors occur due the compiler generating the wrong code to access |
47 | * that section. |
48 | */ |
49 | #define __PCPU_ATTRS(sec) \ |
50 | __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \ |
51 | PER_CPU_ATTRIBUTES |
52 | |
53 | #define __PCPU_DUMMY_ATTRS \ |
54 | __section(".discard") __attribute__((unused)) |
55 | |
56 | /* |
57 | * s390 and alpha modules require percpu variables to be defined as |
58 | * weak to force the compiler to generate GOT based external |
59 | * references for them. This is necessary because percpu sections |
60 | * will be located outside of the usually addressable area. |
61 | * |
62 | * This definition puts the following two extra restrictions when |
63 | * defining percpu variables. |
64 | * |
65 | * 1. The symbol must be globally unique, even the static ones. |
66 | * 2. Static percpu variables cannot be defined inside a function. |
67 | * |
68 | * Archs which need weak percpu definitions should define |
69 | * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. |
70 | * |
71 | * To ensure that the generic code observes the above two |
72 | * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak |
73 | * definition is used for all cases. |
74 | */ |
75 | #if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) |
76 | /* |
77 | * __pcpu_scope_* dummy variable is used to enforce scope. It |
78 | * receives the static modifier when it's used in front of |
79 | * DEFINE_PER_CPU() and will trigger build failure if |
80 | * DECLARE_PER_CPU() is used for the same variable. |
81 | * |
82 | * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness |
83 | * such that hidden weak symbol collision, which will cause unrelated |
84 | * variables to share the same address, can be detected during build. |
85 | */ |
86 | #define DECLARE_PER_CPU_SECTION(type, name, sec) \ |
87 | extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
88 | extern __PCPU_ATTRS(sec) __typeof__(type) name |
89 | |
90 | #define DEFINE_PER_CPU_SECTION(type, name, sec) \ |
91 | __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
92 | extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
93 | __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
94 | extern __PCPU_ATTRS(sec) __typeof__(type) name; \ |
95 | __PCPU_ATTRS(sec) __weak __typeof__(type) name |
96 | #else |
97 | /* |
98 | * Normal declaration and definition macros. |
99 | */ |
100 | #define DECLARE_PER_CPU_SECTION(type, name, sec) \ |
101 | extern __PCPU_ATTRS(sec) __typeof__(type) name |
102 | |
103 | #define DEFINE_PER_CPU_SECTION(type, name, sec) \ |
104 | __PCPU_ATTRS(sec) __typeof__(type) name |
105 | #endif |
106 | |
107 | /* |
108 | * Variant on the per-CPU variable declaration/definition theme used for |
109 | * ordinary per-CPU variables. |
110 | */ |
111 | #define DECLARE_PER_CPU(type, name) \ |
112 | DECLARE_PER_CPU_SECTION(type, name, "") |
113 | |
114 | #define DEFINE_PER_CPU(type, name) \ |
115 | DEFINE_PER_CPU_SECTION(type, name, "") |
116 | |
117 | /* |
118 | * Declaration/definition used for per-CPU variables that must come first in |
119 | * the set of variables. |
120 | */ |
121 | #define DECLARE_PER_CPU_FIRST(type, name) \ |
122 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) |
123 | |
124 | #define DEFINE_PER_CPU_FIRST(type, name) \ |
125 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) |
126 | |
127 | /* |
128 | * Declaration/definition used for per-CPU variables that must be cacheline |
129 | * aligned under SMP conditions so that, whilst a particular instance of the |
130 | * data corresponds to a particular CPU, inefficiencies due to direct access by |
131 | * other CPUs are reduced by preventing the data from unnecessarily spanning |
132 | * cachelines. |
133 | * |
134 | * An example of this would be statistical data, where each CPU's set of data |
135 | * is updated by that CPU alone, but the data from across all CPUs is collated |
136 | * by a CPU processing a read from a proc file. |
137 | */ |
138 | #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ |
139 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
140 | ____cacheline_aligned_in_smp |
141 | |
142 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
143 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
144 | ____cacheline_aligned_in_smp |
145 | |
146 | #define DECLARE_PER_CPU_ALIGNED(type, name) \ |
147 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ |
148 | ____cacheline_aligned |
149 | |
150 | #define DEFINE_PER_CPU_ALIGNED(type, name) \ |
151 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ |
152 | ____cacheline_aligned |
153 | |
154 | /* |
155 | * Declaration/definition used for per-CPU variables that must be page aligned. |
156 | */ |
157 | #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ |
158 | DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
159 | __aligned(PAGE_SIZE) |
160 | |
161 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
162 | DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
163 | __aligned(PAGE_SIZE) |
164 | |
165 | /* |
166 | * Declaration/definition used for per-CPU variables that must be read mostly. |
167 | */ |
168 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ |
169 | DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") |
170 | |
171 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ |
172 | DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") |
173 | |
174 | /* |
175 | * Declaration/definition used for per-CPU variables that should be accessed |
176 | * as decrypted when memory encryption is enabled in the guest. |
177 | */ |
178 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
179 | #define DECLARE_PER_CPU_DECRYPTED(type, name) \ |
180 | DECLARE_PER_CPU_SECTION(type, name, "..decrypted") |
181 | |
182 | #define DEFINE_PER_CPU_DECRYPTED(type, name) \ |
183 | DEFINE_PER_CPU_SECTION(type, name, "..decrypted") |
184 | #else |
185 | #define DEFINE_PER_CPU_DECRYPTED(type, name) DEFINE_PER_CPU(type, name) |
186 | #endif |
187 | |
188 | /* |
189 | * Intermodule exports for per-CPU variables. sparse forgets about |
190 | * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to |
191 | * noop if __CHECKER__. |
192 | */ |
193 | #ifndef __CHECKER__ |
194 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) |
195 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) |
196 | #else |
197 | #define EXPORT_PER_CPU_SYMBOL(var) |
198 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) |
199 | #endif |
200 | |
201 | /* |
202 | * Accessors and operations. |
203 | */ |
204 | #ifndef __ASSEMBLY__ |
205 | |
206 | /* |
207 | * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating |
208 | * @ptr and is invoked once before a percpu area is accessed by all |
209 | * accessors and operations. This is performed in the generic part of |
210 | * percpu and arch overrides don't need to worry about it; however, if an |
211 | * arch wants to implement an arch-specific percpu accessor or operation, |
212 | * it may use __verify_pcpu_ptr() to verify the parameters. |
213 | * |
214 | * + 0 is required in order to convert the pointer type from a |
215 | * potential array type to a pointer to a single item of the array. |
216 | */ |
217 | #define __verify_pcpu_ptr(ptr) \ |
218 | do { \ |
219 | const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ |
220 | (void)__vpp_verify; \ |
221 | } while (0) |
222 | |
223 | #ifdef CONFIG_SMP |
224 | |
225 | /* |
226 | * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() |
227 | * to prevent the compiler from making incorrect assumptions about the |
228 | * pointer value. The weird cast keeps both GCC and sparse happy. |
229 | */ |
230 | #define SHIFT_PERCPU_PTR(__p, __offset) \ |
231 | RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) |
232 | |
233 | #define per_cpu_ptr(ptr, cpu) \ |
234 | ({ \ |
235 | __verify_pcpu_ptr(ptr); \ |
236 | SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ |
237 | }) |
238 | |
239 | #define raw_cpu_ptr(ptr) \ |
240 | ({ \ |
241 | __verify_pcpu_ptr(ptr); \ |
242 | arch_raw_cpu_ptr(ptr); \ |
243 | }) |
244 | |
245 | #ifdef CONFIG_DEBUG_PREEMPT |
246 | #define this_cpu_ptr(ptr) \ |
247 | ({ \ |
248 | __verify_pcpu_ptr(ptr); \ |
249 | SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ |
250 | }) |
251 | #else |
252 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) |
253 | #endif |
254 | |
255 | #else /* CONFIG_SMP */ |
256 | |
257 | #define VERIFY_PERCPU_PTR(__p) \ |
258 | ({ \ |
259 | __verify_pcpu_ptr(__p); \ |
260 | (typeof(*(__p)) __kernel __force *)(__p); \ |
261 | }) |
262 | |
263 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) |
264 | #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) |
265 | #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) |
266 | |
267 | #endif /* CONFIG_SMP */ |
268 | |
269 | #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) |
270 | |
271 | /* |
272 | * Must be an lvalue. Since @var must be a simple identifier, |
273 | * we force a syntax error here if it isn't. |
274 | */ |
275 | #define get_cpu_var(var) \ |
276 | (*({ \ |
277 | preempt_disable(); \ |
278 | this_cpu_ptr(&var); \ |
279 | })) |
280 | |
281 | /* |
282 | * The weird & is necessary because sparse considers (void)(var) to be |
283 | * a direct dereference of percpu variable (var). |
284 | */ |
285 | #define put_cpu_var(var) \ |
286 | do { \ |
287 | (void)&(var); \ |
288 | preempt_enable(); \ |
289 | } while (0) |
290 | |
291 | #define get_cpu_ptr(var) \ |
292 | ({ \ |
293 | preempt_disable(); \ |
294 | this_cpu_ptr(var); \ |
295 | }) |
296 | |
297 | #define put_cpu_ptr(var) \ |
298 | do { \ |
299 | (void)(var); \ |
300 | preempt_enable(); \ |
301 | } while (0) |
302 | |
303 | /* |
304 | * Branching function to split up a function into a set of functions that |
305 | * are called for different scalar sizes of the objects handled. |
306 | */ |
307 | |
308 | extern void __bad_size_call_parameter(void); |
309 | |
310 | #ifdef CONFIG_DEBUG_PREEMPT |
311 | extern void __this_cpu_preempt_check(const char *op); |
312 | #else |
313 | static __always_inline void __this_cpu_preempt_check(const char *op) { } |
314 | #endif |
315 | |
316 | #define __pcpu_size_call_return(stem, variable) \ |
317 | ({ \ |
318 | typeof(variable) pscr_ret__; \ |
319 | __verify_pcpu_ptr(&(variable)); \ |
320 | switch(sizeof(variable)) { \ |
321 | case 1: pscr_ret__ = stem##1(variable); break; \ |
322 | case 2: pscr_ret__ = stem##2(variable); break; \ |
323 | case 4: pscr_ret__ = stem##4(variable); break; \ |
324 | case 8: pscr_ret__ = stem##8(variable); break; \ |
325 | default: \ |
326 | __bad_size_call_parameter(); break; \ |
327 | } \ |
328 | pscr_ret__; \ |
329 | }) |
330 | |
331 | #define __pcpu_size_call_return2(stem, variable, ...) \ |
332 | ({ \ |
333 | typeof(variable) pscr2_ret__; \ |
334 | __verify_pcpu_ptr(&(variable)); \ |
335 | switch(sizeof(variable)) { \ |
336 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ |
337 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ |
338 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ |
339 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ |
340 | default: \ |
341 | __bad_size_call_parameter(); break; \ |
342 | } \ |
343 | pscr2_ret__; \ |
344 | }) |
345 | |
346 | #define __pcpu_size_call_return2bool(stem, variable, ...) \ |
347 | ({ \ |
348 | bool pscr2_ret__; \ |
349 | __verify_pcpu_ptr(&(variable)); \ |
350 | switch(sizeof(variable)) { \ |
351 | case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ |
352 | case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ |
353 | case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ |
354 | case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ |
355 | default: \ |
356 | __bad_size_call_parameter(); break; \ |
357 | } \ |
358 | pscr2_ret__; \ |
359 | }) |
360 | |
361 | #define __pcpu_size_call(stem, variable, ...) \ |
362 | do { \ |
363 | __verify_pcpu_ptr(&(variable)); \ |
364 | switch(sizeof(variable)) { \ |
365 | case 1: stem##1(variable, __VA_ARGS__);break; \ |
366 | case 2: stem##2(variable, __VA_ARGS__);break; \ |
367 | case 4: stem##4(variable, __VA_ARGS__);break; \ |
368 | case 8: stem##8(variable, __VA_ARGS__);break; \ |
369 | default: \ |
370 | __bad_size_call_parameter();break; \ |
371 | } \ |
372 | } while (0) |
373 | |
374 | /* |
375 | * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> |
376 | * |
377 | * Optimized manipulation for memory allocated through the per cpu |
378 | * allocator or for addresses of per cpu variables. |
379 | * |
380 | * These operation guarantee exclusivity of access for other operations |
381 | * on the *same* processor. The assumption is that per cpu data is only |
382 | * accessed by a single processor instance (the current one). |
383 | * |
384 | * The arch code can provide optimized implementation by defining macros |
385 | * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per |
386 | * cpu atomic operations for 2 byte sized RMW actions. If arch code does |
387 | * not provide operations for a scalar size then the fallback in the |
388 | * generic code will be used. |
389 | * |
390 | * cmpxchg_double replaces two adjacent scalars at once. The first two |
391 | * parameters are per cpu variables which have to be of the same size. A |
392 | * truth value is returned to indicate success or failure (since a double |
393 | * register result is difficult to handle). There is very limited hardware |
394 | * support for these operations, so only certain sizes may work. |
395 | */ |
396 | |
397 | /* |
398 | * Operations for contexts where we do not want to do any checks for |
399 | * preemptions. Unless strictly necessary, always use [__]this_cpu_*() |
400 | * instead. |
401 | * |
402 | * If there is no other protection through preempt disable and/or disabling |
403 | * interrupts then one of these RMW operations can show unexpected behavior |
404 | * because the execution thread was rescheduled on another processor or an |
405 | * interrupt occurred and the same percpu variable was modified from the |
406 | * interrupt context. |
407 | */ |
408 | #define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) |
409 | #define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) |
410 | #define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) |
411 | #define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) |
412 | #define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) |
413 | #define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) |
414 | #define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) |
415 | #define raw_cpu_cmpxchg(pcp, oval, nval) \ |
416 | __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) |
417 | #define raw_cpu_try_cmpxchg(pcp, ovalp, nval) \ |
418 | __pcpu_size_call_return2bool(raw_cpu_try_cmpxchg_, pcp, ovalp, nval) |
419 | #define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) |
420 | #define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) |
421 | #define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) |
422 | #define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) |
423 | #define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) |
424 | #define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) |
425 | |
426 | /* |
427 | * Operations for contexts that are safe from preemption/interrupts. These |
428 | * operations verify that preemption is disabled. |
429 | */ |
430 | #define __this_cpu_read(pcp) \ |
431 | ({ \ |
432 | __this_cpu_preempt_check("read"); \ |
433 | raw_cpu_read(pcp); \ |
434 | }) |
435 | |
436 | #define __this_cpu_write(pcp, val) \ |
437 | ({ \ |
438 | __this_cpu_preempt_check("write"); \ |
439 | raw_cpu_write(pcp, val); \ |
440 | }) |
441 | |
442 | #define __this_cpu_add(pcp, val) \ |
443 | ({ \ |
444 | __this_cpu_preempt_check("add"); \ |
445 | raw_cpu_add(pcp, val); \ |
446 | }) |
447 | |
448 | #define __this_cpu_and(pcp, val) \ |
449 | ({ \ |
450 | __this_cpu_preempt_check("and"); \ |
451 | raw_cpu_and(pcp, val); \ |
452 | }) |
453 | |
454 | #define __this_cpu_or(pcp, val) \ |
455 | ({ \ |
456 | __this_cpu_preempt_check("or"); \ |
457 | raw_cpu_or(pcp, val); \ |
458 | }) |
459 | |
460 | #define __this_cpu_add_return(pcp, val) \ |
461 | ({ \ |
462 | __this_cpu_preempt_check("add_return"); \ |
463 | raw_cpu_add_return(pcp, val); \ |
464 | }) |
465 | |
466 | #define __this_cpu_xchg(pcp, nval) \ |
467 | ({ \ |
468 | __this_cpu_preempt_check("xchg"); \ |
469 | raw_cpu_xchg(pcp, nval); \ |
470 | }) |
471 | |
472 | #define __this_cpu_cmpxchg(pcp, oval, nval) \ |
473 | ({ \ |
474 | __this_cpu_preempt_check("cmpxchg"); \ |
475 | raw_cpu_cmpxchg(pcp, oval, nval); \ |
476 | }) |
477 | |
478 | #define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) |
479 | #define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) |
480 | #define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) |
481 | #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
482 | #define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) |
483 | #define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) |
484 | |
485 | /* |
486 | * Operations with implied preemption/interrupt protection. These |
487 | * operations can be used without worrying about preemption or interrupt. |
488 | */ |
489 | #define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) |
490 | #define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) |
491 | #define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) |
492 | #define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) |
493 | #define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) |
494 | #define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) |
495 | #define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) |
496 | #define this_cpu_cmpxchg(pcp, oval, nval) \ |
497 | __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) |
498 | #define this_cpu_try_cmpxchg(pcp, ovalp, nval) \ |
499 | __pcpu_size_call_return2bool(this_cpu_try_cmpxchg_, pcp, ovalp, nval) |
500 | #define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) |
501 | #define this_cpu_inc(pcp) this_cpu_add(pcp, 1) |
502 | #define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) |
503 | #define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
504 | #define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) |
505 | #define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) |
506 | |
507 | #endif /* __ASSEMBLY__ */ |
508 | #endif /* _LINUX_PERCPU_DEFS_H */ |
509 | |