1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_COMPILER_H |
3 | #define __LINUX_COMPILER_H |
4 | |
5 | #include <linux/compiler_types.h> |
6 | |
7 | #ifndef __ASSEMBLY__ |
8 | |
9 | #ifdef __KERNEL__ |
10 | |
11 | /* |
12 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
13 | * to disable branch tracing on a per file basis. |
14 | */ |
15 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
16 | int expect, int is_constant); |
17 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
18 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
19 | #define likely_notrace(x) __builtin_expect(!!(x), 1) |
20 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
21 | |
22 | #define __branch_check__(x, expect, is_constant) ({ \ |
23 | long ______r; \ |
24 | static struct ftrace_likely_data \ |
25 | __aligned(4) \ |
26 | __section("_ftrace_annotated_branch") \ |
27 | ______f = { \ |
28 | .data.func = __func__, \ |
29 | .data.file = __FILE__, \ |
30 | .data.line = __LINE__, \ |
31 | }; \ |
32 | ______r = __builtin_expect(!!(x), expect); \ |
33 | ftrace_likely_update(&______f, ______r, \ |
34 | expect, is_constant); \ |
35 | ______r; \ |
36 | }) |
37 | |
38 | /* |
39 | * Using __builtin_constant_p(x) to ignore cases where the return |
40 | * value is always the same. This idea is taken from a similar patch |
41 | * written by Daniel Walker. |
42 | */ |
43 | # ifndef likely |
44 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
45 | # endif |
46 | # ifndef unlikely |
47 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
48 | # endif |
49 | |
50 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
51 | /* |
52 | * "Define 'is'", Bill Clinton |
53 | * "Define 'if'", Steven Rostedt |
54 | */ |
55 | #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) |
56 | |
57 | #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) |
58 | |
59 | #define __trace_if_value(cond) ({ \ |
60 | static struct ftrace_branch_data \ |
61 | __aligned(4) \ |
62 | __section("_ftrace_branch") \ |
63 | __if_trace = { \ |
64 | .func = __func__, \ |
65 | .file = __FILE__, \ |
66 | .line = __LINE__, \ |
67 | }; \ |
68 | (cond) ? \ |
69 | (__if_trace.miss_hit[1]++,1) : \ |
70 | (__if_trace.miss_hit[0]++,0); \ |
71 | }) |
72 | |
73 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
74 | |
75 | #else |
76 | # define likely(x) __builtin_expect(!!(x), 1) |
77 | # define unlikely(x) __builtin_expect(!!(x), 0) |
78 | # define likely_notrace(x) likely(x) |
79 | # define unlikely_notrace(x) unlikely(x) |
80 | #endif |
81 | |
82 | /* Optimization barrier */ |
83 | #ifndef barrier |
84 | /* The "volatile" is due to gcc bugs */ |
85 | # define barrier() __asm__ __volatile__("": : :"memory") |
86 | #endif |
87 | |
88 | #ifndef barrier_data |
89 | /* |
90 | * This version is i.e. to prevent dead stores elimination on @ptr |
91 | * where gcc and llvm may behave differently when otherwise using |
92 | * normal barrier(): while gcc behavior gets along with a normal |
93 | * barrier(), llvm needs an explicit input variable to be assumed |
94 | * clobbered. The issue is as follows: while the inline asm might |
95 | * access any memory it wants, the compiler could have fit all of |
96 | * @ptr into memory registers instead, and since @ptr never escaped |
97 | * from that, it proved that the inline asm wasn't touching any of |
98 | * it. This version works well with both compilers, i.e. we're telling |
99 | * the compiler that the inline asm absolutely may see the contents |
100 | * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 |
101 | */ |
102 | # define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") |
103 | #endif |
104 | |
105 | /* workaround for GCC PR82365 if needed */ |
106 | #ifndef barrier_before_unreachable |
107 | # define barrier_before_unreachable() do { } while (0) |
108 | #endif |
109 | |
110 | /* Unreachable code */ |
111 | #ifdef CONFIG_OBJTOOL |
112 | /* |
113 | * These macros help objtool understand GCC code flow for unreachable code. |
114 | * The __COUNTER__ based labels are a hack to make each instance of the macros |
115 | * unique, to convince GCC not to merge duplicate inline asm statements. |
116 | */ |
117 | #define __stringify_label(n) #n |
118 | |
119 | #define __annotate_unreachable(c) ({ \ |
120 | asm volatile(__stringify_label(c) ":\n\t" \ |
121 | ".pushsection .discard.unreachable\n\t" \ |
122 | ".long " __stringify_label(c) "b - .\n\t" \ |
123 | ".popsection\n\t" : : "i" (c)); \ |
124 | }) |
125 | #define annotate_unreachable() __annotate_unreachable(__COUNTER__) |
126 | |
127 | /* Annotate a C jump table to allow objtool to follow the code flow */ |
128 | #define __annotate_jump_table __section(".rodata..c_jump_table") |
129 | |
130 | #else /* !CONFIG_OBJTOOL */ |
131 | #define annotate_unreachable() |
132 | #define __annotate_jump_table |
133 | #endif /* CONFIG_OBJTOOL */ |
134 | |
135 | #ifndef unreachable |
136 | # define unreachable() do { \ |
137 | annotate_unreachable(); \ |
138 | __builtin_unreachable(); \ |
139 | } while (0) |
140 | #endif |
141 | |
142 | /* |
143 | * KENTRY - kernel entry point |
144 | * This can be used to annotate symbols (functions or data) that are used |
145 | * without their linker symbol being referenced explicitly. For example, |
146 | * interrupt vector handlers, or functions in the kernel image that are found |
147 | * programatically. |
148 | * |
149 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those |
150 | * are handled in their own way (with KEEP() in linker scripts). |
151 | * |
152 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the |
153 | * linker script. For example an architecture could KEEP() its entire |
154 | * boot/exception vector code rather than annotate each function and data. |
155 | */ |
156 | #ifndef KENTRY |
157 | # define KENTRY(sym) \ |
158 | extern typeof(sym) sym; \ |
159 | static const unsigned long __kentry_##sym \ |
160 | __used \ |
161 | __attribute__((__section__("___kentry+" #sym))) \ |
162 | = (unsigned long)&sym; |
163 | #endif |
164 | |
165 | #ifndef RELOC_HIDE |
166 | # define RELOC_HIDE(ptr, off) \ |
167 | ({ unsigned long __ptr; \ |
168 | __ptr = (unsigned long) (ptr); \ |
169 | (typeof(ptr)) (__ptr + (off)); }) |
170 | #endif |
171 | |
172 | #define absolute_pointer(val) RELOC_HIDE((void *)(val), 0) |
173 | |
174 | #ifndef OPTIMIZER_HIDE_VAR |
175 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
176 | #define OPTIMIZER_HIDE_VAR(var) \ |
177 | __asm__ ("" : "=r" (var) : "0" (var)) |
178 | #endif |
179 | |
180 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) |
181 | |
182 | /** |
183 | * data_race - mark an expression as containing intentional data races |
184 | * |
185 | * This data_race() macro is useful for situations in which data races |
186 | * should be forgiven. One example is diagnostic code that accesses |
187 | * shared variables but is not a part of the core synchronization design. |
188 | * |
189 | * This macro *does not* affect normal code generation, but is a hint |
190 | * to tooling that data races here are to be ignored. |
191 | */ |
192 | #define data_race(expr) \ |
193 | ({ \ |
194 | __unqual_scalar_typeof(({ expr; })) __v = ({ \ |
195 | __kcsan_disable_current(); \ |
196 | expr; \ |
197 | }); \ |
198 | __kcsan_enable_current(); \ |
199 | __v; \ |
200 | }) |
201 | |
202 | #endif /* __KERNEL__ */ |
203 | |
204 | /* |
205 | * Force the compiler to emit 'sym' as a symbol, so that we can reference |
206 | * it from inline assembler. Necessary in case 'sym' could be inlined |
207 | * otherwise, or eliminated entirely due to lack of references that are |
208 | * visible to the compiler. |
209 | */ |
210 | #define ___ADDRESSABLE(sym, __attrs) \ |
211 | static void * __used __attrs \ |
212 | __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym; |
213 | #define __ADDRESSABLE(sym) \ |
214 | ___ADDRESSABLE(sym, __section(".discard.addressable")) |
215 | |
216 | /** |
217 | * offset_to_ptr - convert a relative memory offset to an absolute pointer |
218 | * @off: the address of the 32-bit offset value |
219 | */ |
220 | static inline void *offset_to_ptr(const int *off) |
221 | { |
222 | return (void *)((unsigned long)off + *off); |
223 | } |
224 | |
225 | #endif /* __ASSEMBLY__ */ |
226 | |
227 | /* &a[0] degrades to a pointer: a different type from an array */ |
228 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) |
229 | |
230 | /* |
231 | * This returns a constant expression while determining if an argument is |
232 | * a constant expression, most importantly without evaluating the argument. |
233 | * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de> |
234 | */ |
235 | #define __is_constexpr(x) \ |
236 | (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) |
237 | |
238 | /* |
239 | * Whether 'type' is a signed type or an unsigned type. Supports scalar types, |
240 | * bool and also pointer types. |
241 | */ |
242 | #define is_signed_type(type) (((type)(-1)) < (__force type)1) |
243 | #define is_unsigned_type(type) (!is_signed_type(type)) |
244 | |
245 | /* |
246 | * This is needed in functions which generate the stack canary, see |
247 | * arch/x86/kernel/smpboot.c::start_secondary() for an example. |
248 | */ |
249 | #define prevent_tail_call_optimization() mb() |
250 | |
251 | #include <asm/rwonce.h> |
252 | |
253 | #endif /* __LINUX_COMPILER_H */ |
254 | |