1 | /* Definitions of target machine for GCC for IA-32. |
2 | Copyright (C) 1988-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify |
7 | it under the terms of the GNU General Public License as published by |
8 | the Free Software Foundation; either version 3, or (at your option) |
9 | any later version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | GNU General Public License for more details. |
15 | |
16 | Under Section 7 of GPL version 3, you are granted additional |
17 | permissions described in the GCC Runtime Library Exception, version |
18 | 3.1, as published by the Free Software Foundation. |
19 | |
20 | You should have received a copy of the GNU General Public License and |
21 | a copy of the GCC Runtime Library Exception along with this program; |
22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
23 | <http://www.gnu.org/licenses/>. */ |
24 | |
25 | /* The purpose of this file is to define the characteristics of the i386, |
26 | independent of assembler syntax or operating system. |
27 | |
28 | Three other files build on this one to describe a specific assembler syntax: |
29 | bsd386.h, att386.h, and sun386.h. |
30 | |
31 | The actual tm.h file for a particular system should include |
32 | this file, and then the file for the appropriate assembler syntax. |
33 | |
34 | Many macros that specify assembler syntax are omitted entirely from |
35 | this file because they really belong in the files for particular |
36 | assemblers. These include RP, IP, LPREFIX, PUT_OP_SIZE, USE_STAR, |
37 | ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many |
38 | that start with ASM_ or end in ASM_OP. */ |
39 | |
40 | /* Redefines for option macros. */ |
41 | |
42 | #define TARGET_CMPXCHG16B TARGET_CX16 |
43 | #define TARGET_CMPXCHG16B_P(x) TARGET_CX16_P(x) |
44 | |
45 | #define TARGET_LP64 TARGET_ABI_64 |
46 | #define TARGET_LP64_P(x) TARGET_ABI_64_P(x) |
47 | #define TARGET_X32 TARGET_ABI_X32 |
48 | #define TARGET_X32_P(x) TARGET_ABI_X32_P(x) |
49 | #define TARGET_16BIT TARGET_CODE16 |
50 | #define TARGET_16BIT_P(x) TARGET_CODE16_P(x) |
51 | |
52 | #define TARGET_MMX_WITH_SSE (TARGET_64BIT && TARGET_SSE2) |
53 | |
54 | #define TARGET_APX_EGPR (ix86_apx_features & apx_egpr) |
55 | #define TARGET_APX_PUSH2POP2 (ix86_apx_features & apx_push2pop2) |
56 | #define TARGET_APX_NDD (ix86_apx_features & apx_ndd) |
57 | |
58 | #include "config/vxworks-dummy.h" |
59 | |
60 | #include "config/i386/i386-opts.h" |
61 | |
62 | #define MAX_STRINGOP_ALGS 4 |
63 | |
64 | /* Specify what algorithm to use for stringops on known size. |
65 | When size is unknown, the UNKNOWN_SIZE alg is used. When size is |
66 | known at compile time or estimated via feedback, the SIZE array |
67 | is walked in order until MAX is greater then the estimate (or -1 |
68 | means infinity). Corresponding ALG is used then. |
69 | When NOALIGN is true the code guaranting the alignment of the memory |
70 | block is skipped. |
71 | |
72 | For example initializer: |
73 | {{256, loop}, {-1, rep_prefix_4_byte}} |
74 | will use loop for blocks smaller or equal to 256 bytes, rep prefix will |
75 | be used otherwise. */ |
76 | struct stringop_algs |
77 | { |
78 | const enum stringop_alg unknown_size; |
79 | const struct stringop_strategy { |
80 | /* Several older compilers delete the default constructor because of the |
81 | const entries (see PR100246). Manually specifying a CTOR works around |
82 | this issue. Since this header is used by code compiled with the C |
83 | compiler we must guard the addition. */ |
84 | #ifdef __cplusplus |
85 | constexpr |
86 | stringop_strategy (int _max = -1, enum stringop_alg _alg = libcall, |
87 | int _noalign = false) |
88 | : max (_max), alg (_alg), noalign (_noalign) {} |
89 | #endif |
90 | const int max; |
91 | const enum stringop_alg alg; |
92 | int noalign; |
93 | } size [MAX_STRINGOP_ALGS]; |
94 | }; |
95 | |
96 | /* Analog of COSTS_N_INSNS when optimizing for size. */ |
97 | #ifndef COSTS_N_BYTES |
98 | #define COSTS_N_BYTES(N) ((N) * 2) |
99 | #endif |
100 | |
101 | /* Define the specific costs for a given cpu. NB: hard_register is used |
102 | by TARGET_REGISTER_MOVE_COST and TARGET_MEMORY_MOVE_COST to compute |
103 | hard register move costs by register allocator. Relative costs of |
104 | pseudo register load and store versus pseudo register moves in RTL |
105 | expressions for TARGET_RTX_COSTS can be different from relative |
106 | costs of hard registers to get the most efficient operations with |
107 | pseudo registers. */ |
108 | |
109 | struct processor_costs { |
110 | /* Costs used by register allocator. integer->integer register move |
111 | cost is 2. */ |
112 | struct |
113 | { |
114 | const int movzbl_load; /* cost of loading using movzbl */ |
115 | const int int_load[3]; /* cost of loading integer registers |
116 | in QImode, HImode and SImode relative |
117 | to reg-reg move (2). */ |
118 | const int int_store[3]; /* cost of storing integer register |
119 | in QImode, HImode and SImode */ |
120 | const int fp_move; /* cost of reg,reg fld/fst */ |
121 | const int fp_load[3]; /* cost of loading FP register |
122 | in SFmode, DFmode and XFmode */ |
123 | const int fp_store[3]; /* cost of storing FP register |
124 | in SFmode, DFmode and XFmode */ |
125 | const int mmx_move; /* cost of moving MMX register. */ |
126 | const int mmx_load[2]; /* cost of loading MMX register |
127 | in SImode and DImode */ |
128 | const int mmx_store[2]; /* cost of storing MMX register |
129 | in SImode and DImode */ |
130 | const int xmm_move; /* cost of moving XMM register. */ |
131 | const int ymm_move; /* cost of moving XMM register. */ |
132 | const int zmm_move; /* cost of moving XMM register. */ |
133 | const int sse_load[5]; /* cost of loading SSE register |
134 | in 32bit, 64bit, 128bit, 256bit and 512bit */ |
135 | const int sse_store[5]; /* cost of storing SSE register |
136 | in SImode, DImode and TImode. */ |
137 | const int sse_to_integer; /* cost of moving SSE register to integer. */ |
138 | const int integer_to_sse; /* cost of moving integer register to SSE. */ |
139 | const int mask_to_integer; /* cost of moving mask register to integer. */ |
140 | const int integer_to_mask; /* cost of moving integer register to mask. */ |
141 | const int mask_load[3]; /* cost of loading mask registers |
142 | in QImode, HImode and SImode. */ |
143 | const int mask_store[3]; /* cost of storing mask register |
144 | in QImode, HImode and SImode. */ |
145 | const int mask_move; /* cost of moving mask register. */ |
146 | } hard_register; |
147 | |
148 | const int add; /* cost of an add instruction */ |
149 | const int lea; /* cost of a lea instruction */ |
150 | const int shift_var; /* variable shift costs */ |
151 | const int shift_const; /* constant shift costs */ |
152 | const int mult_init[5]; /* cost of starting a multiply |
153 | in QImode, HImode, SImode, DImode, TImode*/ |
154 | const int mult_bit; /* cost of multiply per each bit set */ |
155 | const int divide[5]; /* cost of a divide/mod |
156 | in QImode, HImode, SImode, DImode, TImode*/ |
157 | int movsx; /* The cost of movsx operation. */ |
158 | int movzx; /* The cost of movzx operation. */ |
159 | const int large_insn; /* insns larger than this cost more */ |
160 | const int move_ratio; /* The threshold of number of scalar |
161 | memory-to-memory move insns. */ |
162 | const int clear_ratio; /* The threshold of number of scalar |
163 | memory clearing insns. */ |
164 | const int int_load[3]; /* cost of loading integer registers |
165 | in QImode, HImode and SImode relative |
166 | to reg-reg move (2). */ |
167 | const int int_store[3]; /* cost of storing integer register |
168 | in QImode, HImode and SImode */ |
169 | const int sse_load[5]; /* cost of loading SSE register |
170 | in 32bit, 64bit, 128bit, 256bit and 512bit */ |
171 | const int sse_store[5]; /* cost of storing SSE register |
172 | in 32bit, 64bit, 128bit, 256bit and 512bit */ |
173 | const int sse_unaligned_load[5];/* cost of unaligned load. */ |
174 | const int sse_unaligned_store[5];/* cost of unaligned store. */ |
175 | const int xmm_move, ymm_move, /* cost of moving XMM and YMM register. */ |
176 | zmm_move; |
177 | const int sse_to_integer; /* cost of moving SSE register to integer. */ |
178 | const int gather_static, gather_per_elt; /* Cost of gather load is computed |
179 | as static + per_item * nelts. */ |
180 | const int scatter_static, scatter_per_elt; /* Cost of gather store is |
181 | computed as static + per_item * nelts. */ |
182 | const int l1_cache_size; /* size of l1 cache, in kilobytes. */ |
183 | const int l2_cache_size; /* size of l2 cache, in kilobytes. */ |
184 | const int prefetch_block; /* bytes moved to cache for prefetch. */ |
185 | const int simultaneous_prefetches; /* number of parallel prefetch |
186 | operations. */ |
187 | const int branch_cost; /* Default value for BRANCH_COST. */ |
188 | const int fadd; /* cost of FADD and FSUB instructions. */ |
189 | const int fmul; /* cost of FMUL instruction. */ |
190 | const int fdiv; /* cost of FDIV instruction. */ |
191 | const int fabs; /* cost of FABS instruction. */ |
192 | const int fchs; /* cost of FCHS instruction. */ |
193 | const int fsqrt; /* cost of FSQRT instruction. */ |
194 | /* Specify what algorithm |
195 | to use for stringops on unknown size. */ |
196 | const int sse_op; /* cost of cheap SSE instruction. */ |
197 | const int addss; /* cost of ADDSS/SD SUBSS/SD instructions. */ |
198 | const int mulss; /* cost of MULSS instructions. */ |
199 | const int mulsd; /* cost of MULSD instructions. */ |
200 | const int fmass; /* cost of FMASS instructions. */ |
201 | const int fmasd; /* cost of FMASD instructions. */ |
202 | const int divss; /* cost of DIVSS instructions. */ |
203 | const int divsd; /* cost of DIVSD instructions. */ |
204 | const int sqrtss; /* cost of SQRTSS instructions. */ |
205 | const int sqrtsd; /* cost of SQRTSD instructions. */ |
206 | const int reassoc_int, reassoc_fp, reassoc_vec_int, reassoc_vec_fp; |
207 | /* Specify reassociation width for integer, |
208 | fp, vector integer and vector fp |
209 | operations. Generally should correspond |
210 | to number of instructions executed in |
211 | parallel. See also |
212 | ix86_reassociation_width. */ |
213 | struct stringop_algs *memcpy, *memset; |
214 | const int cond_taken_branch_cost; /* Cost of taken branch for vectorizer |
215 | cost model. */ |
216 | const int cond_not_taken_branch_cost;/* Cost of not taken branch for |
217 | vectorizer cost model. */ |
218 | |
219 | /* The "0:0:8" label alignment specified for some processors generates |
220 | secondary 8-byte alignment only for those label/jump/loop targets |
221 | which have primary alignment. */ |
222 | const char *const align_loop; /* Loop alignment. */ |
223 | const char *const align_jump; /* Jump alignment. */ |
224 | const char *const align_label; /* Label alignment. */ |
225 | const char *const align_func; /* Function alignment. */ |
226 | |
227 | const unsigned small_unroll_ninsns; /* Insn count limit for small loop |
228 | to be unrolled. */ |
229 | const unsigned small_unroll_factor; /* Unroll factor for small loop to |
230 | be unrolled. */ |
231 | }; |
232 | |
233 | extern const struct processor_costs *ix86_cost; |
234 | extern const struct processor_costs ix86_size_cost; |
235 | |
236 | #define ix86_cur_cost() \ |
237 | (optimize_insn_for_size_p () ? &ix86_size_cost: ix86_cost) |
238 | |
239 | /* Macros used in the machine description to test the flags. */ |
240 | |
241 | /* configure can arrange to change it. */ |
242 | |
243 | #ifndef TARGET_CPU_DEFAULT |
244 | #define TARGET_CPU_DEFAULT PROCESSOR_GENERIC |
245 | #endif |
246 | |
247 | #ifndef TARGET_FPMATH_DEFAULT |
248 | #define TARGET_FPMATH_DEFAULT \ |
249 | (TARGET_64BIT && TARGET_SSE ? FPMATH_SSE : FPMATH_387) |
250 | #endif |
251 | |
252 | #ifndef TARGET_FPMATH_DEFAULT_P |
253 | #define TARGET_FPMATH_DEFAULT_P(x) \ |
254 | (TARGET_64BIT_P(x) && TARGET_SSE_P(x) ? FPMATH_SSE : FPMATH_387) |
255 | #endif |
256 | |
257 | /* If the i387 is disabled or -miamcu is used , then do not return |
258 | values in it. */ |
259 | #define TARGET_FLOAT_RETURNS_IN_80387 \ |
260 | (TARGET_FLOAT_RETURNS && TARGET_80387 && !TARGET_IAMCU) |
261 | #define TARGET_FLOAT_RETURNS_IN_80387_P(x) \ |
262 | (TARGET_FLOAT_RETURNS_P(x) && TARGET_80387_P(x) && !TARGET_IAMCU_P(x)) |
263 | |
264 | /* 64bit Sledgehammer mode. For libgcc2 we make sure this is a |
265 | compile-time constant. */ |
266 | #ifdef IN_LIBGCC2 |
267 | #undef TARGET_64BIT |
268 | #ifdef __x86_64__ |
269 | #define TARGET_64BIT 1 |
270 | #else |
271 | #define TARGET_64BIT 0 |
272 | #endif |
273 | #else |
274 | #ifndef TARGET_BI_ARCH |
275 | #undef TARGET_64BIT |
276 | #undef TARGET_64BIT_P |
277 | #if TARGET_64BIT_DEFAULT |
278 | #define TARGET_64BIT 1 |
279 | #define TARGET_64BIT_P(x) 1 |
280 | #else |
281 | #define TARGET_64BIT 0 |
282 | #define TARGET_64BIT_P(x) 0 |
283 | #endif |
284 | #endif |
285 | #endif |
286 | |
287 | #define HAS_LONG_COND_BRANCH 1 |
288 | #define HAS_LONG_UNCOND_BRANCH 1 |
289 | |
290 | #define TARGET_CPU_P(CPU) (ix86_tune == PROCESSOR_ ## CPU) |
291 | |
292 | /* Feature tests against the various tunings. */ |
293 | enum ix86_tune_indices { |
294 | #undef DEF_TUNE |
295 | #define DEF_TUNE(tune, name, selector) tune, |
296 | #include "x86-tune.def" |
297 | #undef DEF_TUNE |
298 | X86_TUNE_LAST |
299 | }; |
300 | |
301 | extern unsigned char ix86_tune_features[X86_TUNE_LAST]; |
302 | |
303 | #define TARGET_USE_LEAVE ix86_tune_features[X86_TUNE_USE_LEAVE] |
304 | #define TARGET_PUSH_MEMORY ix86_tune_features[X86_TUNE_PUSH_MEMORY] |
305 | #define TARGET_ZERO_EXTEND_WITH_AND \ |
306 | ix86_tune_features[X86_TUNE_ZERO_EXTEND_WITH_AND] |
307 | #define TARGET_UNROLL_STRLEN ix86_tune_features[X86_TUNE_UNROLL_STRLEN] |
308 | #define TARGET_BRANCH_PREDICTION_HINTS \ |
309 | ix86_tune_features[X86_TUNE_BRANCH_PREDICTION_HINTS] |
310 | #define TARGET_DOUBLE_WITH_ADD ix86_tune_features[X86_TUNE_DOUBLE_WITH_ADD] |
311 | #define TARGET_USE_SAHF ix86_tune_features[X86_TUNE_USE_SAHF] |
312 | #define TARGET_MOVX ix86_tune_features[X86_TUNE_MOVX] |
313 | #define TARGET_PARTIAL_REG_STALL ix86_tune_features[X86_TUNE_PARTIAL_REG_STALL] |
314 | #define TARGET_PARTIAL_MEMORY_READ_STALL \ |
315 | ix86_tune_features[X86_TUNE_PARTIAL_MEMORY_READ_STALL] |
316 | #define TARGET_PARTIAL_FLAG_REG_STALL \ |
317 | ix86_tune_features[X86_TUNE_PARTIAL_FLAG_REG_STALL] |
318 | #define TARGET_LCP_STALL \ |
319 | ix86_tune_features[X86_TUNE_LCP_STALL] |
320 | #define TARGET_USE_HIMODE_FIOP ix86_tune_features[X86_TUNE_USE_HIMODE_FIOP] |
321 | #define TARGET_USE_SIMODE_FIOP ix86_tune_features[X86_TUNE_USE_SIMODE_FIOP] |
322 | #define TARGET_USE_MOV0 ix86_tune_features[X86_TUNE_USE_MOV0] |
323 | #define TARGET_USE_CLTD ix86_tune_features[X86_TUNE_USE_CLTD] |
324 | #define TARGET_USE_XCHGB ix86_tune_features[X86_TUNE_USE_XCHGB] |
325 | #define TARGET_SPLIT_LONG_MOVES ix86_tune_features[X86_TUNE_SPLIT_LONG_MOVES] |
326 | #define TARGET_READ_MODIFY_WRITE ix86_tune_features[X86_TUNE_READ_MODIFY_WRITE] |
327 | #define TARGET_READ_MODIFY ix86_tune_features[X86_TUNE_READ_MODIFY] |
328 | #define TARGET_PROMOTE_QImode ix86_tune_features[X86_TUNE_PROMOTE_QIMODE] |
329 | #define TARGET_FAST_PREFIX ix86_tune_features[X86_TUNE_FAST_PREFIX] |
330 | #define TARGET_SINGLE_STRINGOP ix86_tune_features[X86_TUNE_SINGLE_STRINGOP] |
331 | #define TARGET_PREFER_KNOWN_REP_MOVSB_STOSB \ |
332 | ix86_tune_features[X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB] |
333 | #define TARGET_MISALIGNED_MOVE_STRING_PRO_EPILOGUES \ |
334 | ix86_tune_features[X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES] |
335 | #define TARGET_QIMODE_MATH ix86_tune_features[X86_TUNE_QIMODE_MATH] |
336 | #define TARGET_HIMODE_MATH ix86_tune_features[X86_TUNE_HIMODE_MATH] |
337 | #define TARGET_PROMOTE_QI_REGS ix86_tune_features[X86_TUNE_PROMOTE_QI_REGS] |
338 | #define TARGET_PROMOTE_HI_REGS ix86_tune_features[X86_TUNE_PROMOTE_HI_REGS] |
339 | #define TARGET_SINGLE_POP ix86_tune_features[X86_TUNE_SINGLE_POP] |
340 | #define TARGET_DOUBLE_POP ix86_tune_features[X86_TUNE_DOUBLE_POP] |
341 | #define TARGET_SINGLE_PUSH ix86_tune_features[X86_TUNE_SINGLE_PUSH] |
342 | #define TARGET_DOUBLE_PUSH ix86_tune_features[X86_TUNE_DOUBLE_PUSH] |
343 | #define TARGET_INTEGER_DFMODE_MOVES \ |
344 | ix86_tune_features[X86_TUNE_INTEGER_DFMODE_MOVES] |
345 | #define TARGET_PARTIAL_REG_DEPENDENCY \ |
346 | ix86_tune_features[X86_TUNE_PARTIAL_REG_DEPENDENCY] |
347 | #define TARGET_SSE_PARTIAL_REG_DEPENDENCY \ |
348 | ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY] |
349 | #define TARGET_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY \ |
350 | ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY] |
351 | #define TARGET_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY \ |
352 | ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY] |
353 | #define TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \ |
354 | ix86_tune_features[X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL] |
355 | #define TARGET_SSE_UNALIGNED_STORE_OPTIMAL \ |
356 | ix86_tune_features[X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL] |
357 | #define TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL \ |
358 | ix86_tune_features[X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL] |
359 | #define TARGET_SSE_SPLIT_REGS ix86_tune_features[X86_TUNE_SSE_SPLIT_REGS] |
360 | #define TARGET_SSE_TYPELESS_STORES \ |
361 | ix86_tune_features[X86_TUNE_SSE_TYPELESS_STORES] |
362 | #define TARGET_SSE_LOAD0_BY_PXOR ix86_tune_features[X86_TUNE_SSE_LOAD0_BY_PXOR] |
363 | #define TARGET_MEMORY_MISMATCH_STALL \ |
364 | ix86_tune_features[X86_TUNE_MEMORY_MISMATCH_STALL] |
365 | #define TARGET_PROLOGUE_USING_MOVE \ |
366 | ix86_tune_features[X86_TUNE_PROLOGUE_USING_MOVE] |
367 | #define TARGET_EPILOGUE_USING_MOVE \ |
368 | ix86_tune_features[X86_TUNE_EPILOGUE_USING_MOVE] |
369 | #define TARGET_SHIFT1 ix86_tune_features[X86_TUNE_SHIFT1] |
370 | #define TARGET_USE_FFREEP ix86_tune_features[X86_TUNE_USE_FFREEP] |
371 | #define TARGET_INTER_UNIT_MOVES_TO_VEC \ |
372 | ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_TO_VEC] |
373 | #define TARGET_INTER_UNIT_MOVES_FROM_VEC \ |
374 | ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_FROM_VEC] |
375 | #define TARGET_INTER_UNIT_CONVERSIONS \ |
376 | ix86_tune_features[X86_TUNE_INTER_UNIT_CONVERSIONS] |
377 | #define TARGET_FOUR_JUMP_LIMIT ix86_tune_features[X86_TUNE_FOUR_JUMP_LIMIT] |
378 | #define TARGET_SCHEDULE ix86_tune_features[X86_TUNE_SCHEDULE] |
379 | #define TARGET_USE_BT ix86_tune_features[X86_TUNE_USE_BT] |
380 | #define TARGET_USE_INCDEC ix86_tune_features[X86_TUNE_USE_INCDEC] |
381 | #define TARGET_PAD_RETURNS ix86_tune_features[X86_TUNE_PAD_RETURNS] |
382 | #define TARGET_PAD_SHORT_FUNCTION \ |
383 | ix86_tune_features[X86_TUNE_PAD_SHORT_FUNCTION] |
384 | #define TARGET_EXT_80387_CONSTANTS \ |
385 | ix86_tune_features[X86_TUNE_EXT_80387_CONSTANTS] |
386 | #define TARGET_AVOID_VECTOR_DECODE \ |
387 | ix86_tune_features[X86_TUNE_AVOID_VECTOR_DECODE] |
388 | #define TARGET_TUNE_PROMOTE_HIMODE_IMUL \ |
389 | ix86_tune_features[X86_TUNE_PROMOTE_HIMODE_IMUL] |
390 | #define TARGET_SLOW_IMUL_IMM32_MEM \ |
391 | ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM32_MEM] |
392 | #define TARGET_SLOW_IMUL_IMM8 ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM8] |
393 | #define TARGET_MOVE_M1_VIA_OR ix86_tune_features[X86_TUNE_MOVE_M1_VIA_OR] |
394 | #define TARGET_NOT_UNPAIRABLE ix86_tune_features[X86_TUNE_NOT_UNPAIRABLE] |
395 | #define TARGET_NOT_VECTORMODE ix86_tune_features[X86_TUNE_NOT_VECTORMODE] |
396 | #define TARGET_USE_VECTOR_FP_CONVERTS \ |
397 | ix86_tune_features[X86_TUNE_USE_VECTOR_FP_CONVERTS] |
398 | #define TARGET_USE_VECTOR_CONVERTS \ |
399 | ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS] |
400 | #define TARGET_SLOW_PSHUFB \ |
401 | ix86_tune_features[X86_TUNE_SLOW_PSHUFB] |
402 | #define TARGET_AVOID_4BYTE_PREFIXES \ |
403 | ix86_tune_features[X86_TUNE_AVOID_4BYTE_PREFIXES] |
404 | #define TARGET_USE_GATHER_2PARTS \ |
405 | ix86_tune_features[X86_TUNE_USE_GATHER_2PARTS] |
406 | #define TARGET_USE_SCATTER_2PARTS \ |
407 | ix86_tune_features[X86_TUNE_USE_SCATTER_2PARTS] |
408 | #define TARGET_USE_GATHER_4PARTS \ |
409 | ix86_tune_features[X86_TUNE_USE_GATHER_4PARTS] |
410 | #define TARGET_USE_SCATTER_4PARTS \ |
411 | ix86_tune_features[X86_TUNE_USE_SCATTER_4PARTS] |
412 | #define TARGET_USE_GATHER_8PARTS \ |
413 | ix86_tune_features[X86_TUNE_USE_GATHER_8PARTS] |
414 | #define TARGET_USE_SCATTER_8PARTS \ |
415 | ix86_tune_features[X86_TUNE_USE_SCATTER_8PARTS] |
416 | #define TARGET_FUSE_CMP_AND_BRANCH_32 \ |
417 | ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_32] |
418 | #define TARGET_FUSE_CMP_AND_BRANCH_64 \ |
419 | ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_64] |
420 | #define TARGET_FUSE_CMP_AND_BRANCH \ |
421 | (TARGET_64BIT ? TARGET_FUSE_CMP_AND_BRANCH_64 \ |
422 | : TARGET_FUSE_CMP_AND_BRANCH_32) |
423 | #define TARGET_FUSE_CMP_AND_BRANCH_SOFLAGS \ |
424 | ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS] |
425 | #define TARGET_FUSE_ALU_AND_BRANCH \ |
426 | ix86_tune_features[X86_TUNE_FUSE_ALU_AND_BRANCH] |
427 | #define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU] |
428 | #define TARGET_AVOID_LEA_FOR_ADDR \ |
429 | ix86_tune_features[X86_TUNE_AVOID_LEA_FOR_ADDR] |
430 | #define TARGET_SOFTWARE_PREFETCHING_BENEFICIAL \ |
431 | ix86_tune_features[X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL] |
432 | #define TARGET_AVX256_SPLIT_REGS \ |
433 | ix86_tune_features[X86_TUNE_AVX256_SPLIT_REGS] |
434 | #define TARGET_AVX512_SPLIT_REGS \ |
435 | ix86_tune_features[X86_TUNE_AVX512_SPLIT_REGS] |
436 | #define TARGET_GENERAL_REGS_SSE_SPILL \ |
437 | ix86_tune_features[X86_TUNE_GENERAL_REGS_SSE_SPILL] |
438 | #define TARGET_AVOID_MEM_OPND_FOR_CMOVE \ |
439 | ix86_tune_features[X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE] |
440 | #define TARGET_SPLIT_MEM_OPND_FOR_FP_CONVERTS \ |
441 | ix86_tune_features[X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS] |
442 | #define TARGET_ADJUST_UNROLL \ |
443 | ix86_tune_features[X86_TUNE_ADJUST_UNROLL] |
444 | #define TARGET_AVOID_FALSE_DEP_FOR_BMI \ |
445 | ix86_tune_features[X86_TUNE_AVOID_FALSE_DEP_FOR_BMI] |
446 | #define TARGET_ONE_IF_CONV_INSN \ |
447 | ix86_tune_features[X86_TUNE_ONE_IF_CONV_INSN] |
448 | #define TARGET_AVOID_MFENCE ix86_tune_features[X86_TUNE_AVOID_MFENCE] |
449 | #define TARGET_EMIT_VZEROUPPER \ |
450 | ix86_tune_features[X86_TUNE_EMIT_VZEROUPPER] |
451 | #define TARGET_EXPAND_ABS \ |
452 | ix86_tune_features[X86_TUNE_EXPAND_ABS] |
453 | #define TARGET_V2DF_REDUCTION_PREFER_HADDPD \ |
454 | ix86_tune_features[X86_TUNE_V2DF_REDUCTION_PREFER_HADDPD] |
455 | #define TARGET_DEST_FALSE_DEP_FOR_GLC \ |
456 | ix86_tune_features[X86_TUNE_DEST_FALSE_DEP_FOR_GLC] |
457 | #define TARGET_SLOW_STC ix86_tune_features[X86_TUNE_SLOW_STC] |
458 | #define TARGET_USE_RCR ix86_tune_features[X86_TUNE_USE_RCR] |
459 | |
460 | /* Feature tests against the various architecture variations. */ |
461 | enum ix86_arch_indices { |
462 | X86_ARCH_CMOV, |
463 | X86_ARCH_CMPXCHG, |
464 | X86_ARCH_CMPXCHG8B, |
465 | X86_ARCH_XADD, |
466 | X86_ARCH_BSWAP, |
467 | |
468 | X86_ARCH_LAST |
469 | }; |
470 | |
471 | extern unsigned char ix86_arch_features[X86_ARCH_LAST]; |
472 | |
473 | #define TARGET_CMOV ix86_arch_features[X86_ARCH_CMOV] |
474 | #define TARGET_CMPXCHG ix86_arch_features[X86_ARCH_CMPXCHG] |
475 | #define TARGET_CMPXCHG8B ix86_arch_features[X86_ARCH_CMPXCHG8B] |
476 | #define TARGET_XADD ix86_arch_features[X86_ARCH_XADD] |
477 | #define TARGET_BSWAP ix86_arch_features[X86_ARCH_BSWAP] |
478 | |
479 | /* For sane SSE instruction set generation we need fcomi instruction. |
480 | It is safe to enable all CMOVE instructions. Also, RDRAND intrinsic |
481 | expands to a sequence that includes conditional move. */ |
482 | #define TARGET_CMOVE (TARGET_CMOV || TARGET_SSE || TARGET_RDRND) |
483 | |
484 | #define TARGET_FISTTP (TARGET_SSE3 && TARGET_80387) |
485 | |
486 | extern unsigned char ix86_prefetch_sse; |
487 | #define TARGET_PREFETCH_SSE ix86_prefetch_sse |
488 | |
489 | #define ASSEMBLER_DIALECT (ix86_asm_dialect) |
490 | |
491 | #define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0) |
492 | #define TARGET_MIX_SSE_I387 \ |
493 | ((ix86_fpmath & (FPMATH_SSE | FPMATH_387)) == (FPMATH_SSE | FPMATH_387)) |
494 | |
495 | #define TARGET_HARD_SF_REGS (TARGET_80387 || TARGET_MMX || TARGET_SSE) |
496 | #define TARGET_HARD_DF_REGS (TARGET_80387 || TARGET_SSE) |
497 | #define TARGET_HARD_XF_REGS (TARGET_80387) |
498 | |
499 | #define TARGET_GNU_TLS (ix86_tls_dialect == TLS_DIALECT_GNU) |
500 | #define TARGET_GNU2_TLS (ix86_tls_dialect == TLS_DIALECT_GNU2) |
501 | #define TARGET_ANY_GNU_TLS (TARGET_GNU_TLS || TARGET_GNU2_TLS) |
502 | #define TARGET_SUN_TLS 0 |
503 | |
504 | #ifndef TARGET_64BIT_DEFAULT |
505 | #define TARGET_64BIT_DEFAULT 0 |
506 | #endif |
507 | #ifndef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT |
508 | #define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT 0 |
509 | #endif |
510 | |
511 | #define TARGET_SSP_GLOBAL_GUARD (ix86_stack_protector_guard == SSP_GLOBAL) |
512 | #define TARGET_SSP_TLS_GUARD (ix86_stack_protector_guard == SSP_TLS) |
513 | |
514 | /* Fence to use after loop using storent. */ |
515 | |
516 | extern GTY(()) tree x86_mfence; |
517 | #define FENCE_FOLLOWING_MOVNT x86_mfence |
518 | |
519 | /* Once GDB has been enhanced to deal with functions without frame |
520 | pointers, we can change this to allow for elimination of |
521 | the frame pointer in leaf functions. */ |
522 | #define TARGET_DEFAULT 0 |
523 | |
524 | /* Extra bits to force. */ |
525 | #define TARGET_SUBTARGET_DEFAULT 0 |
526 | #define TARGET_SUBTARGET_ISA_DEFAULT 0 |
527 | |
528 | /* Extra bits to force on w/ 32-bit mode. */ |
529 | #define TARGET_SUBTARGET32_DEFAULT 0 |
530 | #define TARGET_SUBTARGET32_ISA_DEFAULT 0 |
531 | |
532 | /* Extra bits to force on w/ 64-bit mode. */ |
533 | #define TARGET_SUBTARGET64_DEFAULT 0 |
534 | /* Enable MMX, SSE and SSE2 by default. */ |
535 | #define TARGET_SUBTARGET64_ISA_DEFAULT \ |
536 | (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2) |
537 | |
538 | /* Replace MACH-O, ifdefs by in-line tests, where possible. |
539 | (a) Macros defined in config/i386/darwin.h */ |
540 | #define TARGET_MACHO 0 |
541 | #define TARGET_MACHO_SYMBOL_STUBS 0 |
542 | #define MACHOPIC_ATT_STUB 0 |
543 | /* (b) Macros defined in config/darwin.h */ |
544 | #define MACHO_DYNAMIC_NO_PIC_P 0 |
545 | #define MACHOPIC_INDIRECT 0 |
546 | #define MACHOPIC_PURE 0 |
547 | |
548 | /* For the RDOS */ |
549 | #define TARGET_RDOS 0 |
550 | |
551 | /* For the Windows 64-bit ABI. */ |
552 | #define TARGET_64BIT_MS_ABI (TARGET_64BIT && ix86_cfun_abi () == MS_ABI) |
553 | |
554 | /* For the Windows 32-bit ABI. */ |
555 | #define TARGET_32BIT_MS_ABI (!TARGET_64BIT && ix86_cfun_abi () == MS_ABI) |
556 | |
557 | /* This is re-defined by cygming.h. */ |
558 | #define TARGET_SEH 0 |
559 | |
560 | /* The default abi used by target. */ |
561 | #define DEFAULT_ABI SYSV_ABI |
562 | |
563 | /* The default TLS segment register used by target. */ |
564 | #define DEFAULT_TLS_SEG_REG \ |
565 | (TARGET_64BIT ? ADDR_SPACE_SEG_FS : ADDR_SPACE_SEG_GS) |
566 | |
567 | /* Subtargets may reset this to 1 in order to enable 96-bit long double |
568 | with the rounding mode forced to 53 bits. */ |
569 | #define TARGET_96_ROUND_53_LONG_DOUBLE 0 |
570 | |
571 | #ifndef SUBTARGET_DRIVER_SELF_SPECS |
572 | # define SUBTARGET_DRIVER_SELF_SPECS "" |
573 | #endif |
574 | |
575 | #define DRIVER_SELF_SPECS SUBTARGET_DRIVER_SELF_SPECS |
576 | |
577 | /* -march=native handling only makes sense with compiler running on |
578 | an x86 or x86_64 chip. If changing this condition, also change |
579 | the condition in driver-i386.cc. */ |
580 | #if defined(__i386__) || defined(__x86_64__) |
581 | /* In driver-i386.cc. */ |
582 | extern const char *host_detect_local_cpu (int argc, const char **argv); |
583 | #define \ |
584 | { "local_cpu_detect", host_detect_local_cpu }, |
585 | #define HAVE_LOCAL_CPU_DETECT |
586 | #endif |
587 | |
588 | #if TARGET_64BIT_DEFAULT |
589 | #define OPT_ARCH64 "!m32" |
590 | #define OPT_ARCH32 "m32" |
591 | #else |
592 | #define OPT_ARCH64 "m64|mx32" |
593 | #define OPT_ARCH32 "m64|mx32:;" |
594 | #endif |
595 | |
596 | /* Support for configure-time defaults of some command line options. |
597 | The order here is important so that -march doesn't squash the |
598 | tune or cpu values. */ |
599 | #define OPTION_DEFAULT_SPECS \ |
600 | {"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ |
601 | {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ |
602 | {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ |
603 | {"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ |
604 | {"cpu_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ |
605 | {"cpu_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ |
606 | {"arch", "%{!march=*:-march=%(VALUE)}"}, \ |
607 | {"arch_32", "%{" OPT_ARCH32 ":%{!march=*:-march=%(VALUE)}}"}, \ |
608 | {"arch_64", "%{" OPT_ARCH64 ":%{!march=*:-march=%(VALUE)}}"}, |
609 | |
610 | /* Specs for the compiler proper */ |
611 | |
612 | #ifndef CC1_CPU_SPEC |
613 | #define CC1_CPU_SPEC_1 "" |
614 | |
615 | #ifndef HAVE_LOCAL_CPU_DETECT |
616 | #define CC1_CPU_SPEC CC1_CPU_SPEC_1 |
617 | #else |
618 | #define ARCH_ARG "%{" OPT_ARCH64 ":64;:32}" |
619 | #define CC1_CPU_SPEC CC1_CPU_SPEC_1 \ |
620 | "%{march=native:%>march=native %:local_cpu_detect(arch " ARCH_ARG ") \ |
621 | %{!mtune=*:%>mtune=native %:local_cpu_detect(tune " ARCH_ARG ")}} \ |
622 | %{mtune=native:%>mtune=native %:local_cpu_detect(tune " ARCH_ARG ")}" |
623 | #endif |
624 | #endif |
625 | |
626 | /* Target CPU builtins. */ |
627 | #define TARGET_CPU_CPP_BUILTINS() ix86_target_macros () |
628 | |
629 | /* Target Pragmas. */ |
630 | #define REGISTER_TARGET_PRAGMAS() ix86_register_pragmas () |
631 | |
632 | #ifndef CC1_SPEC |
633 | #define CC1_SPEC "%(cc1_cpu) " |
634 | #endif |
635 | |
636 | /* This macro defines names of additional specifications to put in the |
637 | specs that can be used in various specifications like CC1_SPEC. Its |
638 | definition is an initializer with a subgrouping for each command option. |
639 | |
640 | Each subgrouping contains a string constant, that defines the |
641 | specification name, and a string constant that used by the GCC driver |
642 | program. |
643 | |
644 | Do not define this macro if it does not need to do anything. */ |
645 | |
646 | #ifndef SUBTARGET_EXTRA_SPECS |
647 | #define |
648 | #endif |
649 | |
650 | #define \ |
651 | { "cc1_cpu", CC1_CPU_SPEC }, \ |
652 | SUBTARGET_EXTRA_SPECS |
653 | |
654 | |
655 | /* Whether to allow x87 floating-point arithmetic on MODE (one of |
656 | SFmode, DFmode and XFmode) in the current excess precision |
657 | configuration. */ |
658 | #define X87_ENABLE_ARITH(MODE) \ |
659 | (ix86_unsafe_math_optimizations \ |
660 | || ix86_excess_precision == EXCESS_PRECISION_FAST \ |
661 | || (MODE) == XFmode) |
662 | |
663 | /* Likewise, whether to allow direct conversions from integer mode |
664 | IMODE (HImode, SImode or DImode) to MODE. */ |
665 | #define X87_ENABLE_FLOAT(MODE, IMODE) \ |
666 | (ix86_unsafe_math_optimizations \ |
667 | || ix86_excess_precision == EXCESS_PRECISION_FAST \ |
668 | || (MODE) == XFmode \ |
669 | || ((MODE) == DFmode && (IMODE) == SImode) \ |
670 | || (IMODE) == HImode) |
671 | |
672 | /* target machine storage layout */ |
673 | |
674 | #define SHORT_TYPE_SIZE 16 |
675 | #define INT_TYPE_SIZE 32 |
676 | #define LONG_TYPE_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD) |
677 | #define POINTER_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD) |
678 | #define LONG_LONG_TYPE_SIZE 64 |
679 | #define FLOAT_TYPE_SIZE 32 |
680 | #define DOUBLE_TYPE_SIZE 64 |
681 | #define LONG_DOUBLE_TYPE_SIZE \ |
682 | (TARGET_LONG_DOUBLE_64 ? 64 : (TARGET_LONG_DOUBLE_128 ? 128 : 80)) |
683 | |
684 | #define WIDEST_HARDWARE_FP_SIZE 80 |
685 | |
686 | #if defined (TARGET_BI_ARCH) || TARGET_64BIT_DEFAULT |
687 | #define MAX_BITS_PER_WORD 64 |
688 | #else |
689 | #define MAX_BITS_PER_WORD 32 |
690 | #endif |
691 | |
692 | /* Define this if most significant byte of a word is the lowest numbered. */ |
693 | /* That is true on the 80386. */ |
694 | |
695 | #define BITS_BIG_ENDIAN 0 |
696 | |
697 | /* Define this if most significant byte of a word is the lowest numbered. */ |
698 | /* That is not true on the 80386. */ |
699 | #define BYTES_BIG_ENDIAN 0 |
700 | |
701 | /* Define this if most significant word of a multiword number is the lowest |
702 | numbered. */ |
703 | /* Not true for 80386 */ |
704 | #define WORDS_BIG_ENDIAN 0 |
705 | |
706 | /* Width of a word, in units (bytes). */ |
707 | #define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4) |
708 | |
709 | #ifndef IN_LIBGCC2 |
710 | #define MIN_UNITS_PER_WORD 4 |
711 | #endif |
712 | |
713 | /* Allocation boundary (in *bits*) for storing arguments in argument list. */ |
714 | #define PARM_BOUNDARY BITS_PER_WORD |
715 | |
716 | /* Boundary (in *bits*) on which stack pointer should be aligned. */ |
717 | #define STACK_BOUNDARY (TARGET_64BIT_MS_ABI ? 128 : BITS_PER_WORD) |
718 | |
719 | /* Stack boundary of the main function guaranteed by OS. */ |
720 | #define MAIN_STACK_BOUNDARY (TARGET_64BIT ? 128 : 32) |
721 | |
722 | /* Minimum stack boundary. */ |
723 | #define MIN_STACK_BOUNDARY BITS_PER_WORD |
724 | |
725 | /* Boundary (in *bits*) on which the stack pointer prefers to be |
726 | aligned; the compiler cannot rely on having this alignment. */ |
727 | #define PREFERRED_STACK_BOUNDARY ix86_preferred_stack_boundary |
728 | |
729 | /* It should be MIN_STACK_BOUNDARY. But we set it to 128 bits for |
730 | both 32bit and 64bit, to support codes that need 128 bit stack |
731 | alignment for SSE instructions, but can't realign the stack. */ |
732 | #define PREFERRED_STACK_BOUNDARY_DEFAULT \ |
733 | (TARGET_IAMCU ? MIN_STACK_BOUNDARY : 128) |
734 | |
735 | /* 1 if -mstackrealign should be turned on by default. It will |
736 | generate an alternate prologue and epilogue that realigns the |
737 | runtime stack if nessary. This supports mixing codes that keep a |
738 | 4-byte aligned stack, as specified by i386 psABI, with codes that |
739 | need a 16-byte aligned stack, as required by SSE instructions. */ |
740 | #define STACK_REALIGN_DEFAULT 0 |
741 | |
742 | /* Boundary (in *bits*) on which the incoming stack is aligned. */ |
743 | #define INCOMING_STACK_BOUNDARY ix86_incoming_stack_boundary |
744 | |
745 | /* According to Windows x64 software convention, the maximum stack allocatable |
746 | in the prologue is 4G - 8 bytes. Furthermore, there is a limited set of |
747 | instructions allowed to adjust the stack pointer in the epilog, forcing the |
748 | use of frame pointer for frames larger than 2 GB. This theorical limit |
749 | is reduced by 256, an over-estimated upper bound for the stack use by the |
750 | prologue. |
751 | We define only one threshold for both the prolog and the epilog. When the |
752 | frame size is larger than this threshold, we allocate the area to save SSE |
753 | regs, then save them, and then allocate the remaining. There is no SEH |
754 | unwind info for this later allocation. */ |
755 | #define SEH_MAX_FRAME_SIZE ((2U << 30) - 256) |
756 | |
757 | /* Target OS keeps a vector-aligned (128-bit, 16-byte) stack. This is |
758 | mandatory for the 64-bit ABI, and may or may not be true for other |
759 | operating systems. */ |
760 | #define TARGET_KEEPS_VECTOR_ALIGNED_STACK TARGET_64BIT |
761 | |
762 | /* Minimum allocation boundary for the code of a function. */ |
763 | #define FUNCTION_BOUNDARY 8 |
764 | |
765 | /* We will and with this value to test if a custom function descriptor needs |
766 | a static chain. The function boundary must the adjusted so that the bit |
767 | this represents is no longer part of the address. 0 Disables the custom |
768 | function descriptors. */ |
769 | #define X86_CUSTOM_FUNCTION_TEST 1 |
770 | |
771 | /* C++ stores the virtual bit in the lowest bit of function pointers. */ |
772 | #define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_pfn |
773 | |
774 | /* Minimum size in bits of the largest boundary to which any |
775 | and all fundamental data types supported by the hardware |
776 | might need to be aligned. No data type wants to be aligned |
777 | rounder than this. |
778 | |
779 | Pentium+ prefers DFmode values to be aligned to 64 bit boundary |
780 | and Pentium Pro XFmode values at 128 bit boundaries. |
781 | |
782 | When increasing the maximum, also update |
783 | TARGET_ABSOLUTE_BIGGEST_ALIGNMENT. */ |
784 | |
785 | #define BIGGEST_ALIGNMENT \ |
786 | (TARGET_IAMCU ? 32 : ((TARGET_AVX512F && TARGET_EVEX512) \ |
787 | ? 512 : (TARGET_AVX ? 256 : 128))) |
788 | |
789 | /* Maximum stack alignment. */ |
790 | #define MAX_STACK_ALIGNMENT MAX_OFILE_ALIGNMENT |
791 | |
792 | /* Alignment value for attribute ((aligned)). It is a constant since |
793 | it is the part of the ABI. We shouldn't change it with -mavx. */ |
794 | #define ATTRIBUTE_ALIGNED_VALUE (TARGET_IAMCU ? 32 : 128) |
795 | |
796 | /* Decide whether a variable of mode MODE should be 128 bit aligned. */ |
797 | #define ALIGN_MODE_128(MODE) \ |
798 | ((MODE) == XFmode || SSE_REG_MODE_P (MODE)) |
799 | |
800 | /* The published ABIs say that doubles should be aligned on word |
801 | boundaries, so lower the alignment for structure fields unless |
802 | -malign-double is set. */ |
803 | |
804 | /* ??? Blah -- this macro is used directly by libobjc. Since it |
805 | supports no vector modes, cut out the complexity and fall back |
806 | on BIGGEST_FIELD_ALIGNMENT. */ |
807 | #ifdef IN_TARGET_LIBS |
808 | #ifdef __x86_64__ |
809 | #define BIGGEST_FIELD_ALIGNMENT 128 |
810 | #else |
811 | #define BIGGEST_FIELD_ALIGNMENT 32 |
812 | #endif |
813 | #else |
814 | #define ADJUST_FIELD_ALIGN(FIELD, TYPE, COMPUTED) \ |
815 | x86_field_alignment ((TYPE), (COMPUTED)) |
816 | #endif |
817 | |
818 | /* If defined, a C expression to compute the alignment for a static |
819 | variable. TYPE is the data type, and ALIGN is the alignment that |
820 | the object would ordinarily have. The value of this macro is used |
821 | instead of that alignment to align the object. |
822 | |
823 | If this macro is not defined, then ALIGN is used. |
824 | |
825 | One use of this macro is to increase alignment of medium-size |
826 | data to make it all fit in fewer cache lines. Another is to |
827 | cause character arrays to be word-aligned so that `strcpy' calls |
828 | that copy constants to character arrays can be done inline. */ |
829 | |
830 | #define DATA_ALIGNMENT(TYPE, ALIGN) \ |
831 | ix86_data_alignment ((TYPE), (ALIGN), true) |
832 | |
833 | /* Similar to DATA_ALIGNMENT, but for the cases where the ABI mandates |
834 | some alignment increase, instead of optimization only purposes. E.g. |
835 | AMD x86-64 psABI says that variables with array type larger than 15 bytes |
836 | must be aligned to 16 byte boundaries. |
837 | |
838 | If this macro is not defined, then ALIGN is used. */ |
839 | |
840 | #define DATA_ABI_ALIGNMENT(TYPE, ALIGN) \ |
841 | ix86_data_alignment ((TYPE), (ALIGN), false) |
842 | |
843 | /* If defined, a C expression to compute the alignment for a local |
844 | variable. TYPE is the data type, and ALIGN is the alignment that |
845 | the object would ordinarily have. The value of this macro is used |
846 | instead of that alignment to align the object. |
847 | |
848 | If this macro is not defined, then ALIGN is used. |
849 | |
850 | One use of this macro is to increase alignment of medium-size |
851 | data to make it all fit in fewer cache lines. */ |
852 | |
853 | #define LOCAL_ALIGNMENT(TYPE, ALIGN) \ |
854 | ix86_local_alignment ((TYPE), VOIDmode, (ALIGN)) |
855 | |
856 | /* If defined, a C expression to compute the alignment for stack slot. |
857 | TYPE is the data type, MODE is the widest mode available, and ALIGN |
858 | is the alignment that the slot would ordinarily have. The value of |
859 | this macro is used instead of that alignment to align the slot. |
860 | |
861 | If this macro is not defined, then ALIGN is used when TYPE is NULL, |
862 | Otherwise, LOCAL_ALIGNMENT will be used. |
863 | |
864 | One use of this macro is to set alignment of stack slot to the |
865 | maximum alignment of all possible modes which the slot may have. */ |
866 | |
867 | #define STACK_SLOT_ALIGNMENT(TYPE, MODE, ALIGN) \ |
868 | ix86_local_alignment ((TYPE), (MODE), (ALIGN)) |
869 | |
870 | /* If defined, a C expression to compute the alignment for a local |
871 | variable DECL. |
872 | |
873 | If this macro is not defined, then |
874 | LOCAL_ALIGNMENT (TREE_TYPE (DECL), DECL_ALIGN (DECL)) will be used. |
875 | |
876 | One use of this macro is to increase alignment of medium-size |
877 | data to make it all fit in fewer cache lines. */ |
878 | |
879 | #define LOCAL_DECL_ALIGNMENT(DECL) \ |
880 | ix86_local_alignment ((DECL), VOIDmode, DECL_ALIGN (DECL)) |
881 | |
882 | /* If defined, a C expression to compute the minimum required alignment |
883 | for dynamic stack realignment purposes for EXP (a TYPE or DECL), |
884 | MODE, assuming normal alignment ALIGN. |
885 | |
886 | If this macro is not defined, then (ALIGN) will be used. */ |
887 | |
888 | #define MINIMUM_ALIGNMENT(EXP, MODE, ALIGN) \ |
889 | ix86_minimum_alignment ((EXP), (MODE), (ALIGN)) |
890 | |
891 | |
892 | /* Set this nonzero if move instructions will actually fail to work |
893 | when given unaligned data. */ |
894 | #define STRICT_ALIGNMENT 0 |
895 | |
896 | /* If bit field type is int, don't let it cross an int, |
897 | and give entire struct the alignment of an int. */ |
898 | /* Required on the 386 since it doesn't have bit-field insns. */ |
899 | #define PCC_BITFIELD_TYPE_MATTERS 1 |
900 | |
901 | /* Standard register usage. */ |
902 | |
903 | /* This processor has special stack-like registers. See reg-stack.cc |
904 | for details. */ |
905 | |
906 | #define STACK_REGS |
907 | |
908 | #define IS_STACK_MODE(MODE) \ |
909 | (X87_FLOAT_MODE_P (MODE) \ |
910 | && (!(SSE_FLOAT_MODE_P (MODE) && TARGET_SSE_MATH) \ |
911 | || TARGET_MIX_SSE_I387)) |
912 | |
913 | /* Number of actual hardware registers. |
914 | The hardware registers are assigned numbers for the compiler |
915 | from 0 to just below FIRST_PSEUDO_REGISTER. |
916 | All registers that the compiler knows about must be given numbers, |
917 | even those that are not normally considered general registers. |
918 | |
919 | In the 80386 we give the 8 general purpose registers the numbers 0-7. |
920 | We number the floating point registers 8-15. |
921 | Note that registers 0-7 can be accessed as a short or int, |
922 | while only 0-3 may be used with byte `mov' instructions. |
923 | |
924 | Reg 16 does not correspond to any hardware register, but instead |
925 | appears in the RTL as an argument pointer prior to reload, and is |
926 | eliminated during reloading in favor of either the stack or frame |
927 | pointer. */ |
928 | |
929 | #define FIRST_PSEUDO_REGISTER FIRST_PSEUDO_REG |
930 | |
931 | /* Number of hardware registers that go into the DWARF-2 unwind info. |
932 | If not defined, equals FIRST_PSEUDO_REGISTER. */ |
933 | |
934 | #define DWARF_FRAME_REGISTERS 17 |
935 | |
936 | /* 1 for registers that have pervasive standard uses |
937 | and are not available for the register allocator. |
938 | On the 80386, the stack pointer is such, as is the arg pointer. |
939 | |
940 | REX registers are disabled for 32bit targets in |
941 | TARGET_CONDITIONAL_REGISTER_USAGE. */ |
942 | |
943 | #define FIXED_REGISTERS \ |
944 | /*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \ |
945 | { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \ |
946 | /*arg,flags,fpsr,frame*/ \ |
947 | 1, 1, 1, 1, \ |
948 | /*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \ |
949 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
950 | /* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \ |
951 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
952 | /* r8, r9, r10, r11, r12, r13, r14, r15*/ \ |
953 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
954 | /*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \ |
955 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
956 | /*xmm16,xmm17,xmm18,xmm19,xmm20,xmm21,xmm22,xmm23*/ \ |
957 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
958 | /*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \ |
959 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
960 | /* k0, k1, k2, k3, k4, k5, k6, k7*/ \ |
961 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
962 | /* r16, r17, r18, r19, r20, r21, r22, r23*/ \ |
963 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
964 | /* r24, r25, r26, r27, r28, r29, r30, r31*/ \ |
965 | 0, 0, 0, 0, 0, 0, 0, 0} \ |
966 | |
967 | /* 1 for registers not available across function calls. |
968 | These must include the FIXED_REGISTERS and also any |
969 | registers that can be used without being saved. |
970 | The latter must include the registers where values are returned |
971 | and the register where structure-value addresses are passed. |
972 | Aside from that, you can include as many other registers as you like. |
973 | |
974 | Value is set to 1 if the register is call used unconditionally. |
975 | Bit one is set if the register is call used on TARGET_32BIT ABI. |
976 | Bit two is set if the register is call used on TARGET_64BIT ABI. |
977 | Bit three is set if the register is call used on TARGET_64BIT_MS_ABI. |
978 | |
979 | Proper values are computed in TARGET_CONDITIONAL_REGISTER_USAGE. */ |
980 | |
981 | #define CALL_USED_REGISTERS_MASK(IS_64BIT_MS_ABI) \ |
982 | ((IS_64BIT_MS_ABI) ? (1 << 3) : TARGET_64BIT ? (1 << 2) : (1 << 1)) |
983 | |
984 | #define CALL_USED_REGISTERS \ |
985 | /*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \ |
986 | { 1, 1, 1, 0, 4, 4, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ |
987 | /*arg,flags,fpsr,frame*/ \ |
988 | 1, 1, 1, 1, \ |
989 | /*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \ |
990 | 1, 1, 1, 1, 1, 1, 6, 6, \ |
991 | /* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \ |
992 | 1, 1, 1, 1, 1, 1, 1, 1, \ |
993 | /* r8, r9, r10, r11, r12, r13, r14, r15*/ \ |
994 | 1, 1, 1, 1, 2, 2, 2, 2, \ |
995 | /*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \ |
996 | 6, 6, 6, 6, 6, 6, 6, 6, \ |
997 | /*xmm16,xmm17,xmm18,xmm19,xmm20,xmm21,xmm22,xmm23*/ \ |
998 | 1, 1, 1, 1, 1, 1, 1, 1, \ |
999 | /*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \ |
1000 | 1, 1, 1, 1, 1, 1, 1, 1, \ |
1001 | /* k0, k1, k2, k3, k4, k5, k6, k7*/ \ |
1002 | 1, 1, 1, 1, 1, 1, 1, 1, \ |
1003 | /* r16, r17, r18, r19, r20, r21, r22, r23*/ \ |
1004 | 1, 1, 1, 1, 1, 1, 1, 1, \ |
1005 | /* r24, r25, r26, r27, r28, r29, r30, r31*/ \ |
1006 | 1, 1, 1, 1, 1, 1, 1, 1} \ |
1007 | |
1008 | /* Order in which to allocate registers. Each register must be |
1009 | listed once, even those in FIXED_REGISTERS. List frame pointer |
1010 | late and fixed registers last. Note that, in general, we prefer |
1011 | registers listed in CALL_USED_REGISTERS, keeping the others |
1012 | available for storage of persistent values. |
1013 | |
1014 | The ADJUST_REG_ALLOC_ORDER actually overwrite the order, |
1015 | so this is just empty initializer for array. */ |
1016 | |
1017 | #define REG_ALLOC_ORDER \ |
1018 | { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \ |
1019 | 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, \ |
1020 | 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \ |
1021 | 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, \ |
1022 | 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, \ |
1023 | 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91} |
1024 | |
1025 | /* ADJUST_REG_ALLOC_ORDER is a macro which permits reg_alloc_order |
1026 | to be rearranged based on a particular function. When using sse math, |
1027 | we want to allocate SSE before x87 registers and vice versa. */ |
1028 | |
1029 | #define ADJUST_REG_ALLOC_ORDER x86_order_regs_for_local_alloc () |
1030 | |
1031 | #define INSN_BASE_REG_CLASS(INSN) \ |
1032 | ix86_insn_base_reg_class (INSN) |
1033 | |
1034 | #define REGNO_OK_FOR_INSN_BASE_P(NUM, INSN) \ |
1035 | ix86_regno_ok_for_insn_base_p (NUM, INSN) |
1036 | |
1037 | #define INSN_INDEX_REG_CLASS(INSN) \ |
1038 | ix86_insn_index_reg_class (INSN) |
1039 | |
1040 | #define OVERRIDE_ABI_FORMAT(FNDECL) ix86_call_abi_override (FNDECL) |
1041 | |
1042 | #define HARD_REGNO_NREGS_HAS_PADDING(REGNO, MODE) \ |
1043 | (TARGET_128BIT_LONG_DOUBLE && !TARGET_64BIT \ |
1044 | && GENERAL_REGNO_P (REGNO) \ |
1045 | && ((MODE) == XFmode || (MODE) == XCmode)) |
1046 | |
1047 | #define HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE) ((MODE) == XFmode ? 4 : 8) |
1048 | |
1049 | #define REGMODE_NATURAL_SIZE(MODE) ix86_regmode_natural_size (MODE) |
1050 | |
1051 | #define VALID_AVX256_REG_MODE(MODE) \ |
1052 | ((MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \ |
1053 | || (MODE) == V4DImode || (MODE) == V2TImode || (MODE) == V8SFmode \ |
1054 | || (MODE) == V4DFmode || (MODE) == V16HFmode || (MODE) == V16BFmode) |
1055 | |
1056 | #define VALID_AVX256_REG_OR_OI_MODE(MODE) \ |
1057 | (VALID_AVX256_REG_MODE (MODE) || (MODE) == OImode) |
1058 | |
1059 | #define VALID_AVX512F_SCALAR_MODE(MODE) \ |
1060 | ((MODE) == DImode || (MODE) == DFmode \ |
1061 | || (MODE) == SImode || (MODE) == SFmode \ |
1062 | || (MODE) == HImode || (MODE) == HFmode || (MODE) == BFmode) |
1063 | |
1064 | #define VALID_AVX512F_REG_MODE(MODE) \ |
1065 | ((MODE) == V8DImode || (MODE) == V8DFmode || (MODE) == V64QImode \ |
1066 | || (MODE) == V16SImode || (MODE) == V16SFmode || (MODE) == V32HImode \ |
1067 | || (MODE) == V4TImode || (MODE) == V32HFmode || (MODE) == V32BFmode) |
1068 | |
1069 | #define VALID_AVX512F_REG_OR_XI_MODE(MODE) \ |
1070 | (VALID_AVX512F_REG_MODE (MODE) || (MODE) == XImode) |
1071 | |
1072 | #define VALID_AVX512VL_128_REG_MODE(MODE) \ |
1073 | ((MODE) == V2DImode || (MODE) == V2DFmode || (MODE) == V16QImode \ |
1074 | || (MODE) == V4SImode || (MODE) == V4SFmode || (MODE) == V8HImode \ |
1075 | || (MODE) == TFmode || (MODE) == V1TImode || (MODE) == V8HFmode \ |
1076 | || (MODE) == V8BFmode || (MODE) == TImode) |
1077 | |
1078 | #define VALID_AVX512FP16_REG_MODE(MODE) \ |
1079 | ((MODE) == V8HFmode || (MODE) == V16HFmode || (MODE) == V32HFmode) |
1080 | |
1081 | #define VALID_SSE2_TYPE_MODE(MODE) \ |
1082 | ((MODE) == HFmode || (MODE) == BFmode \ |
1083 | || (MODE) == HCmode || (MODE) == BCmode) |
1084 | |
1085 | #define VALID_SSE2_REG_MODE(MODE) \ |
1086 | ((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \ |
1087 | || (MODE) == V8HFmode || (MODE) == V4HFmode || (MODE) == V2HFmode \ |
1088 | || (MODE) == V8BFmode || (MODE) == V4BFmode || (MODE) == V2BFmode \ |
1089 | || (MODE) == V4QImode || (MODE) == V2HImode || (MODE) == V1SImode \ |
1090 | || (MODE) == V2DImode || (MODE) == V2QImode \ |
1091 | || (MODE) == DFmode || (MODE) == DImode \ |
1092 | || (MODE) == HFmode || (MODE) == BFmode) |
1093 | |
1094 | #define VALID_SSE_REG_MODE(MODE) \ |
1095 | ((MODE) == V1TImode || (MODE) == TImode \ |
1096 | || (MODE) == V4SFmode || (MODE) == V4SImode \ |
1097 | || (MODE) == SFmode || (MODE) == SImode \ |
1098 | || (MODE) == TFmode || (MODE) == TDmode) |
1099 | |
1100 | #define VALID_MMX_REG_MODE_3DNOW(MODE) \ |
1101 | ((MODE) == V2SFmode || (MODE) == SFmode) |
1102 | |
1103 | /* To match ia32 psABI, V4HFmode should be added here. */ |
1104 | #define VALID_MMX_REG_MODE(MODE) \ |
1105 | ((MODE) == V1DImode || (MODE) == DImode \ |
1106 | || (MODE) == V2SImode || (MODE) == SImode \ |
1107 | || (MODE) == V4HImode || (MODE) == V8QImode \ |
1108 | || (MODE) == V4HFmode || (MODE) == V4BFmode) |
1109 | |
1110 | #define VALID_MASK_REG_MODE(MODE) ((MODE) == HImode || (MODE) == QImode) |
1111 | |
1112 | #define VALID_MASK_AVX512BW_MODE(MODE) ((MODE) == SImode || (MODE) == DImode) |
1113 | |
1114 | #define VALID_FP_MODE_P(MODE) \ |
1115 | ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode \ |
1116 | || (MODE) == SCmode || (MODE) == DCmode || (MODE) == XCmode) |
1117 | |
1118 | #define VALID_INT_MODE_P(MODE) \ |
1119 | ((MODE) == QImode || (MODE) == HImode \ |
1120 | || (MODE) == SImode || (MODE) == DImode \ |
1121 | || (MODE) == CQImode || (MODE) == CHImode \ |
1122 | || (MODE) == CSImode || (MODE) == CDImode \ |
1123 | || (MODE) == SDmode || (MODE) == DDmode \ |
1124 | || (MODE) == HFmode || (MODE) == HCmode || (MODE) == BFmode \ |
1125 | || (MODE) == V2HImode || (MODE) == V2HFmode || (MODE) == V2BFmode \ |
1126 | || (MODE) == V1SImode || (MODE) == V4QImode || (MODE) == V2QImode \ |
1127 | || (TARGET_64BIT \ |
1128 | && ((MODE) == TImode || (MODE) == CTImode \ |
1129 | || (MODE) == TFmode || (MODE) == TCmode \ |
1130 | || (MODE) == V8QImode || (MODE) == V4HImode \ |
1131 | || (MODE) == V2SImode || (MODE) == TDmode))) |
1132 | |
1133 | /* Return true for modes passed in SSE registers. */ |
1134 | #define SSE_REG_MODE_P(MODE) \ |
1135 | ((MODE) == V1TImode || (MODE) == TImode || (MODE) == V16QImode \ |
1136 | || (MODE) == TFmode || (MODE) == V8HImode || (MODE) == V2DFmode \ |
1137 | || (MODE) == V2DImode || (MODE) == V4SFmode || (MODE) == V4SImode \ |
1138 | || (MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \ |
1139 | || (MODE) == V4DImode || (MODE) == V8SFmode || (MODE) == V4DFmode \ |
1140 | || (MODE) == V2TImode || (MODE) == V8DImode || (MODE) == V64QImode \ |
1141 | || (MODE) == V16SImode || (MODE) == V32HImode || (MODE) == V8DFmode \ |
1142 | || (MODE) == V16SFmode \ |
1143 | || (MODE) == V32HFmode || (MODE) == V16HFmode || (MODE) == V8HFmode \ |
1144 | || (MODE) == V32BFmode || (MODE) == V16BFmode || (MODE) == V8BFmode) |
1145 | |
1146 | #define X87_FLOAT_MODE_P(MODE) \ |
1147 | (TARGET_80387 && ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode)) |
1148 | |
1149 | #define SSE_FLOAT_MODE_P(MODE) \ |
1150 | ((TARGET_SSE && (MODE) == SFmode) || (TARGET_SSE2 && (MODE) == DFmode)) |
1151 | |
1152 | #define SSE_FLOAT_MODE_SSEMATH_OR_HF_P(MODE) \ |
1153 | ((SSE_FLOAT_MODE_P (MODE) && TARGET_SSE_MATH) \ |
1154 | || (TARGET_AVX512FP16 && (MODE) == HFmode)) |
1155 | |
1156 | #define FMA4_VEC_FLOAT_MODE_P(MODE) \ |
1157 | (TARGET_FMA4 && ((MODE) == V4SFmode || (MODE) == V2DFmode \ |
1158 | || (MODE) == V8SFmode || (MODE) == V4DFmode)) |
1159 | |
1160 | #define VALID_BCST_MODE_P(MODE) \ |
1161 | ((MODE) == SFmode || (MODE) == DFmode \ |
1162 | || (MODE) == SImode || (MODE) == DImode \ |
1163 | || (MODE) == HFmode) |
1164 | |
1165 | /* It is possible to write patterns to move flags; but until someone |
1166 | does it, */ |
1167 | #define AVOID_CCMODE_COPIES |
1168 | |
1169 | /* Specify the modes required to caller save a given hard regno. |
1170 | We do this on i386 to prevent flags from being saved at all. |
1171 | |
1172 | Kill any attempts to combine saving of modes. */ |
1173 | |
1174 | #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ |
1175 | (CC_REGNO_P (REGNO) ? VOIDmode \ |
1176 | : (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \ |
1177 | : (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), NULL) \ |
1178 | : (MODE) == HImode && !((GENERAL_REGNO_P (REGNO) \ |
1179 | && TARGET_PARTIAL_REG_STALL) \ |
1180 | || MASK_REGNO_P (REGNO)) ? SImode \ |
1181 | : (MODE) == QImode && !(ANY_QI_REGNO_P (REGNO) \ |
1182 | || MASK_REGNO_P (REGNO)) ? SImode \ |
1183 | : (MODE)) |
1184 | |
1185 | /* Specify the registers used for certain standard purposes. |
1186 | The values of these macros are register numbers. */ |
1187 | |
1188 | /* on the 386 the pc register is %eip, and is not usable as a general |
1189 | register. The ordinary mov instructions won't work */ |
1190 | /* #define PC_REGNUM */ |
1191 | |
1192 | /* Base register for access to arguments of the function. */ |
1193 | #define ARG_POINTER_REGNUM ARGP_REG |
1194 | |
1195 | /* Register to use for pushing function arguments. */ |
1196 | #define STACK_POINTER_REGNUM SP_REG |
1197 | |
1198 | /* Base register for access to local variables of the function. */ |
1199 | #define FRAME_POINTER_REGNUM FRAME_REG |
1200 | #define HARD_FRAME_POINTER_REGNUM BP_REG |
1201 | |
1202 | #define FIRST_INT_REG AX_REG |
1203 | #define LAST_INT_REG SP_REG |
1204 | |
1205 | #define FIRST_INDEX_REG AX_REG |
1206 | #define LAST_INDEX_REG BP_REG |
1207 | |
1208 | #define FIRST_QI_REG AX_REG |
1209 | #define LAST_QI_REG BX_REG |
1210 | |
1211 | /* First & last stack-like regs */ |
1212 | #define FIRST_STACK_REG ST0_REG |
1213 | #define LAST_STACK_REG ST7_REG |
1214 | |
1215 | #define FIRST_SSE_REG XMM0_REG |
1216 | #define LAST_SSE_REG XMM7_REG |
1217 | |
1218 | #define FIRST_MMX_REG MM0_REG |
1219 | #define LAST_MMX_REG MM7_REG |
1220 | |
1221 | #define FIRST_REX_INT_REG R8_REG |
1222 | #define LAST_REX_INT_REG R15_REG |
1223 | |
1224 | #define FIRST_REX_SSE_REG XMM8_REG |
1225 | #define LAST_REX_SSE_REG XMM15_REG |
1226 | |
1227 | #define FIRST_EXT_REX_SSE_REG XMM16_REG |
1228 | #define LAST_EXT_REX_SSE_REG XMM31_REG |
1229 | |
1230 | #define FIRST_MASK_REG MASK0_REG |
1231 | #define LAST_MASK_REG MASK7_REG |
1232 | |
1233 | #define FIRST_REX2_INT_REG R16_REG |
1234 | #define LAST_REX2_INT_REG R31_REG |
1235 | |
1236 | /* Override this in other tm.h files to cope with various OS lossage |
1237 | requiring a frame pointer. */ |
1238 | #ifndef SUBTARGET_FRAME_POINTER_REQUIRED |
1239 | #define SUBTARGET_FRAME_POINTER_REQUIRED 0 |
1240 | #endif |
1241 | |
1242 | /* Define the shadow offset for asan. Other OS's can override in the |
1243 | respective tm.h files. */ |
1244 | #ifndef SUBTARGET_SHADOW_OFFSET |
1245 | #define SUBTARGET_SHADOW_OFFSET \ |
1246 | (TARGET_LP64 ? HOST_WIDE_INT_C (0x7fff8000) : HOST_WIDE_INT_1 << 29) |
1247 | #endif |
1248 | |
1249 | /* Make sure we can access arbitrary call frames. */ |
1250 | #define SETUP_FRAME_ADDRESSES() ix86_setup_frame_addresses () |
1251 | |
1252 | /* Register to hold the addressing base for position independent |
1253 | code access to data items. We don't use PIC pointer for 64bit |
1254 | mode. Define the regnum to dummy value to prevent gcc from |
1255 | pessimizing code dealing with EBX. |
1256 | |
1257 | To avoid clobbering a call-saved register unnecessarily, we renumber |
1258 | the pic register when possible. The change is visible after the |
1259 | prologue has been emitted. */ |
1260 | |
1261 | #define REAL_PIC_OFFSET_TABLE_REGNUM (TARGET_64BIT ? R15_REG : BX_REG) |
1262 | |
1263 | #define PIC_OFFSET_TABLE_REGNUM \ |
1264 | (ix86_use_pseudo_pic_reg () \ |
1265 | ? (pic_offset_table_rtx \ |
1266 | ? INVALID_REGNUM \ |
1267 | : REAL_PIC_OFFSET_TABLE_REGNUM) \ |
1268 | : INVALID_REGNUM) |
1269 | |
1270 | #define GOT_SYMBOL_NAME "_GLOBAL_OFFSET_TABLE_" |
1271 | |
1272 | /* This is overridden by <cygwin.h>. */ |
1273 | #define MS_AGGREGATE_RETURN 0 |
1274 | |
1275 | #define KEEP_AGGREGATE_RETURN_POINTER 0 |
1276 | |
1277 | /* Define the classes of registers for register constraints in the |
1278 | machine description. Also define ranges of constants. |
1279 | |
1280 | One of the classes must always be named ALL_REGS and include all hard regs. |
1281 | If there is more than one class, another class must be named NO_REGS |
1282 | and contain no registers. |
1283 | |
1284 | The name GENERAL_REGS must be the name of a class (or an alias for |
1285 | another name such as ALL_REGS). This is the class of registers |
1286 | that is allowed by "g" or "r" in a register constraint. |
1287 | Also, registers outside this class are allocated only when |
1288 | instructions express preferences for them. |
1289 | |
1290 | The classes must be numbered in nondecreasing order; that is, |
1291 | a larger-numbered class must never be contained completely |
1292 | in a smaller-numbered class. This is why CLOBBERED_REGS class |
1293 | is listed early, even though in 64-bit mode it contains more |
1294 | registers than just %eax, %ecx, %edx. |
1295 | |
1296 | For any two classes, it is very desirable that there be another |
1297 | class that represents their union. |
1298 | |
1299 | The flags and fpsr registers are in no class. */ |
1300 | |
1301 | enum reg_class |
1302 | { |
1303 | NO_REGS, |
1304 | AREG, DREG, CREG, BREG, SIREG, DIREG, |
1305 | AD_REGS, /* %eax/%edx for DImode */ |
1306 | CLOBBERED_REGS, /* call-clobbered integer registers */ |
1307 | Q_REGS, /* %eax %ebx %ecx %edx */ |
1308 | NON_Q_REGS, /* %esi %edi %ebp %esp */ |
1309 | TLS_GOTBASE_REGS, /* %ebx %ecx %edx %esi %edi %ebp */ |
1310 | LEGACY_GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp */ |
1311 | LEGACY_INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */ |
1312 | GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp |
1313 | %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 |
1314 | %r16 %r17 %r18 %r19 %r20 %r21 %r22 %r23 |
1315 | %r24 %r25 %r26 %r27 %r28 %r29 %r30 %r31 */ |
1316 | INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp |
1317 | %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 |
1318 | %r16 %r17 %r18 %r19 %r20 %r21 %r22 %r23 |
1319 | %r24 %r25 %r26 %r27 %r28 %r29 %r30 %r31 */ |
1320 | GENERAL_GPR16, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp |
1321 | %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 */ |
1322 | INDEX_GPR16, /* %eax %ebx %ecx %edx %esi %edi %ebp |
1323 | %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 */ |
1324 | FP_TOP_REG, FP_SECOND_REG, /* %st(0) %st(1) */ |
1325 | FLOAT_REGS, |
1326 | SSE_FIRST_REG, |
1327 | NO_REX_SSE_REGS, |
1328 | SSE_REGS, |
1329 | ALL_SSE_REGS, |
1330 | MMX_REGS, |
1331 | FLOAT_SSE_REGS, |
1332 | FLOAT_INT_REGS, |
1333 | INT_SSE_REGS, |
1334 | FLOAT_INT_SSE_REGS, |
1335 | MASK_REGS, |
1336 | ALL_MASK_REGS, |
1337 | INT_MASK_REGS, |
1338 | ALL_REGS, |
1339 | LIM_REG_CLASSES |
1340 | }; |
1341 | |
1342 | #define N_REG_CLASSES ((int) LIM_REG_CLASSES) |
1343 | |
1344 | #define INTEGER_CLASS_P(CLASS) \ |
1345 | reg_class_subset_p ((CLASS), GENERAL_REGS) |
1346 | #define FLOAT_CLASS_P(CLASS) \ |
1347 | reg_class_subset_p ((CLASS), FLOAT_REGS) |
1348 | #define SSE_CLASS_P(CLASS) \ |
1349 | reg_class_subset_p ((CLASS), ALL_SSE_REGS) |
1350 | #define INT_SSE_CLASS_P(CLASS) \ |
1351 | reg_class_subset_p ((CLASS), INT_SSE_REGS) |
1352 | #define MMX_CLASS_P(CLASS) \ |
1353 | ((CLASS) == MMX_REGS) |
1354 | #define MASK_CLASS_P(CLASS) \ |
1355 | reg_class_subset_p ((CLASS), ALL_MASK_REGS) |
1356 | #define MAYBE_INTEGER_CLASS_P(CLASS) \ |
1357 | reg_classes_intersect_p ((CLASS), GENERAL_REGS) |
1358 | #define MAYBE_FLOAT_CLASS_P(CLASS) \ |
1359 | reg_classes_intersect_p ((CLASS), FLOAT_REGS) |
1360 | #define MAYBE_SSE_CLASS_P(CLASS) \ |
1361 | reg_classes_intersect_p ((CLASS), ALL_SSE_REGS) |
1362 | #define MAYBE_MMX_CLASS_P(CLASS) \ |
1363 | reg_classes_intersect_p ((CLASS), MMX_REGS) |
1364 | #define MAYBE_MASK_CLASS_P(CLASS) \ |
1365 | reg_classes_intersect_p ((CLASS), ALL_MASK_REGS) |
1366 | |
1367 | #define Q_CLASS_P(CLASS) \ |
1368 | reg_class_subset_p ((CLASS), Q_REGS) |
1369 | |
1370 | #define MAYBE_NON_Q_CLASS_P(CLASS) \ |
1371 | reg_classes_intersect_p ((CLASS), NON_Q_REGS) |
1372 | |
1373 | /* Give names of register classes as strings for dump file. */ |
1374 | |
1375 | #define REG_CLASS_NAMES \ |
1376 | { "NO_REGS", \ |
1377 | "AREG", "DREG", "CREG", "BREG", \ |
1378 | "SIREG", "DIREG", \ |
1379 | "AD_REGS", \ |
1380 | "CLOBBERED_REGS", \ |
1381 | "Q_REGS", "NON_Q_REGS", \ |
1382 | "TLS_GOTBASE_REGS", \ |
1383 | "LEGACY_GENERAL_REGS", \ |
1384 | "LEGACY_INDEX_REGS", \ |
1385 | "GENERAL_REGS", \ |
1386 | "INDEX_REGS", \ |
1387 | "GENERAL_GPR16", \ |
1388 | "INDEX_GPR16", \ |
1389 | "FP_TOP_REG", "FP_SECOND_REG", \ |
1390 | "FLOAT_REGS", \ |
1391 | "SSE_FIRST_REG", \ |
1392 | "NO_REX_SSE_REGS", \ |
1393 | "SSE_REGS", \ |
1394 | "ALL_SSE_REGS", \ |
1395 | "MMX_REGS", \ |
1396 | "FLOAT_SSE_REGS", \ |
1397 | "FLOAT_INT_REGS", \ |
1398 | "INT_SSE_REGS", \ |
1399 | "FLOAT_INT_SSE_REGS", \ |
1400 | "MASK_REGS", \ |
1401 | "ALL_MASK_REGS", \ |
1402 | "INT_MASK_REGS", \ |
1403 | "ALL_REGS" } |
1404 | |
1405 | /* Define which registers fit in which classes. This is an initializer |
1406 | for a vector of HARD_REG_SET of length N_REG_CLASSES. |
1407 | |
1408 | Note that CLOBBERED_REGS are calculated by |
1409 | TARGET_CONDITIONAL_REGISTER_USAGE. */ |
1410 | |
1411 | #define REG_CLASS_CONTENTS \ |
1412 | { { 0x0, 0x0, 0x0 }, /* NO_REGS */ \ |
1413 | { 0x01, 0x0, 0x0 }, /* AREG */ \ |
1414 | { 0x02, 0x0, 0x0 }, /* DREG */ \ |
1415 | { 0x04, 0x0, 0x0 }, /* CREG */ \ |
1416 | { 0x08, 0x0, 0x0 }, /* BREG */ \ |
1417 | { 0x10, 0x0, 0x0 }, /* SIREG */ \ |
1418 | { 0x20, 0x0, 0x0 }, /* DIREG */ \ |
1419 | { 0x03, 0x0, 0x0 }, /* AD_REGS */ \ |
1420 | { 0x07, 0x0, 0x0 }, /* CLOBBERED_REGS */ \ |
1421 | { 0x0f, 0x0, 0x0 }, /* Q_REGS */ \ |
1422 | { 0x900f0, 0x0, 0x0 }, /* NON_Q_REGS */ \ |
1423 | { 0x7e, 0xff0, 0x0 }, /* TLS_GOTBASE_REGS */ \ |
1424 | { 0x900ff, 0x0, 0x0 }, /* LEGACY_GENERAL_REGS */ \ |
1425 | { 0x7f, 0x0, 0x0 }, /* LEGACY_INDEX_REGS */ \ |
1426 | { 0x900ff, 0xff0, 0xffff000 }, /* GENERAL_REGS */ \ |
1427 | { 0x7f, 0xff0, 0xffff000 }, /* INDEX_REGS */ \ |
1428 | { 0x900ff, 0xff0, 0x0 }, /* GENERAL_GPR16 */ \ |
1429 | { 0x7f, 0xff0, 0x0 }, /* INDEX_GPR16 */ \ |
1430 | { 0x100, 0x0, 0x0 }, /* FP_TOP_REG */ \ |
1431 | { 0x200, 0x0, 0x0 }, /* FP_SECOND_REG */ \ |
1432 | { 0xff00, 0x0, 0x0 }, /* FLOAT_REGS */ \ |
1433 | { 0x100000, 0x0, 0x0 }, /* SSE_FIRST_REG */ \ |
1434 | { 0xff00000, 0x0, 0x0 }, /* NO_REX_SSE_REGS */ \ |
1435 | { 0xff00000, 0xff000, 0x0 }, /* SSE_REGS */ \ |
1436 | { 0xff00000, 0xfffff000, 0xf }, /* ALL_SSE_REGS */ \ |
1437 | { 0xf0000000, 0xf, 0x0 }, /* MMX_REGS */ \ |
1438 | { 0xff0ff00, 0xfffff000, 0xf }, /* FLOAT_SSE_REGS */ \ |
1439 | { 0x9ffff, 0xff0, 0xffff000 }, /* FLOAT_INT_REGS */ \ |
1440 | { 0xff900ff, 0xfffffff0, 0xffff00f }, /* INT_SSE_REGS */ \ |
1441 | { 0xff9ffff, 0xfffffff0, 0xffff00f }, /* FLOAT_INT_SSE_REGS */ \ |
1442 | { 0x0, 0x0, 0xfe0 }, /* MASK_REGS */ \ |
1443 | { 0x0, 0x0, 0xff0 }, /* ALL_MASK_REGS */ \ |
1444 | { 0x900ff, 0xff0, 0xffffff0 }, /* INT_MASK_REGS */ \ |
1445 | { 0xffffffff, 0xffffffff, 0xfffffff } /* ALL_REGS */ \ |
1446 | } |
1447 | |
1448 | /* The same information, inverted: |
1449 | Return the class number of the smallest class containing |
1450 | reg number REGNO. This could be a conditional expression |
1451 | or could index an array. */ |
1452 | |
1453 | #define REGNO_REG_CLASS(REGNO) (regclass_map[(REGNO)]) |
1454 | |
1455 | /* When this hook returns true for MODE, the compiler allows |
1456 | registers explicitly used in the rtl to be used as spill registers |
1457 | but prevents the compiler from extending the lifetime of these |
1458 | registers. */ |
1459 | #define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P hook_bool_mode_true |
1460 | |
1461 | #define QI_REG_P(X) (REG_P (X) && QI_REGNO_P (REGNO (X))) |
1462 | #define QI_REGNO_P(N) IN_RANGE ((N), FIRST_QI_REG, LAST_QI_REG) |
1463 | |
1464 | #define LEGACY_INT_REG_P(X) (REG_P (X) && LEGACY_INT_REGNO_P (REGNO (X))) |
1465 | #define LEGACY_INT_REGNO_P(N) IN_RANGE ((N), FIRST_INT_REG, LAST_INT_REG) |
1466 | |
1467 | #define LEGACY_INDEX_REG_P(X) (REG_P (X) && LEGACY_INDEX_REGNO_P (REGNO (X))) |
1468 | #define LEGACY_INDEX_REGNO_P(N) \ |
1469 | IN_RANGE ((N), FIRST_INDEX_REG, LAST_INDEX_REG) |
1470 | |
1471 | #define REX_INT_REG_P(X) (REG_P (X) && REX_INT_REGNO_P (REGNO (X))) |
1472 | #define REX_INT_REGNO_P(N) \ |
1473 | IN_RANGE ((N), FIRST_REX_INT_REG, LAST_REX_INT_REG) |
1474 | |
1475 | #define REX2_INT_REG_P(X) (REG_P (X) && REX2_INT_REGNO_P (REGNO (X))) |
1476 | #define REX2_INT_REGNO_P(N) \ |
1477 | IN_RANGE ((N), FIRST_REX2_INT_REG, LAST_REX2_INT_REG) |
1478 | |
1479 | #define GENERAL_REG_P(X) (REG_P (X) && GENERAL_REGNO_P (REGNO (X))) |
1480 | #define GENERAL_REGNO_P(N) \ |
1481 | (LEGACY_INT_REGNO_P (N) || REX_INT_REGNO_P (N) || REX2_INT_REGNO_P (N)) |
1482 | |
1483 | #define INDEX_REG_P(X) (REG_P (X) && INDEX_REGNO_P (REGNO (X))) |
1484 | #define INDEX_REGNO_P(N) \ |
1485 | (LEGACY_INDEX_REGNO_P (N) || REX_INT_REGNO_P (N) || REX2_INT_REGNO_P (N)) |
1486 | |
1487 | #define GENERAL_GPR16_REGNO_P(N) \ |
1488 | (LEGACY_INT_REGNO_P (N) || REX_INT_REGNO_P (N)) |
1489 | |
1490 | #define ANY_QI_REG_P(X) (REG_P (X) && ANY_QI_REGNO_P (REGNO (X))) |
1491 | #define ANY_QI_REGNO_P(N) \ |
1492 | (TARGET_64BIT ? GENERAL_REGNO_P (N) : QI_REGNO_P (N)) |
1493 | |
1494 | #define STACK_REG_P(X) (REG_P (X) && STACK_REGNO_P (REGNO (X))) |
1495 | #define STACK_REGNO_P(N) IN_RANGE ((N), FIRST_STACK_REG, LAST_STACK_REG) |
1496 | |
1497 | #define SSE_REG_P(X) (REG_P (X) && SSE_REGNO_P (REGNO (X))) |
1498 | #define SSE_REGNO_P(N) \ |
1499 | (LEGACY_SSE_REGNO_P (N) \ |
1500 | || REX_SSE_REGNO_P (N) \ |
1501 | || EXT_REX_SSE_REGNO_P (N)) |
1502 | |
1503 | #define LEGACY_SSE_REGNO_P(N) \ |
1504 | IN_RANGE ((N), FIRST_SSE_REG, LAST_SSE_REG) |
1505 | |
1506 | #define REX_SSE_REGNO_P(N) \ |
1507 | IN_RANGE ((N), FIRST_REX_SSE_REG, LAST_REX_SSE_REG) |
1508 | |
1509 | #define EXT_REX_SSE_REG_P(X) (REG_P (X) && EXT_REX_SSE_REGNO_P (REGNO (X))) |
1510 | |
1511 | #define EXT_REX_SSE_REGNO_P(N) \ |
1512 | IN_RANGE ((N), FIRST_EXT_REX_SSE_REG, LAST_EXT_REX_SSE_REG) |
1513 | |
1514 | #define ANY_FP_REG_P(X) (REG_P (X) && ANY_FP_REGNO_P (REGNO (X))) |
1515 | #define ANY_FP_REGNO_P(N) (STACK_REGNO_P (N) || SSE_REGNO_P (N)) |
1516 | |
1517 | #define MASK_REG_P(X) (REG_P (X) && MASK_REGNO_P (REGNO (X))) |
1518 | #define MASK_REGNO_P(N) IN_RANGE ((N), FIRST_MASK_REG, LAST_MASK_REG) |
1519 | #define MASK_PAIR_REGNO_P(N) ((((N) - FIRST_MASK_REG) & 1) == 0) |
1520 | |
1521 | #define MMX_REG_P(X) (REG_P (X) && MMX_REGNO_P (REGNO (X))) |
1522 | #define MMX_REGNO_P(N) IN_RANGE ((N), FIRST_MMX_REG, LAST_MMX_REG) |
1523 | |
1524 | #define CC_REG_P(X) (REG_P (X) && CC_REGNO_P (REGNO (X))) |
1525 | #define CC_REGNO_P(X) ((X) == FLAGS_REG) |
1526 | |
1527 | #define MOD4_SSE_REG_P(X) (REG_P (X) && MOD4_SSE_REGNO_P (REGNO (X))) |
1528 | #define MOD4_SSE_REGNO_P(N) ((N) == XMM0_REG \ |
1529 | || (N) == XMM4_REG \ |
1530 | || (N) == XMM8_REG \ |
1531 | || (N) == XMM12_REG \ |
1532 | || (N) == XMM16_REG \ |
1533 | || (N) == XMM20_REG \ |
1534 | || (N) == XMM24_REG \ |
1535 | || (N) == XMM28_REG) |
1536 | |
1537 | /* First floating point reg */ |
1538 | #define FIRST_FLOAT_REG FIRST_STACK_REG |
1539 | #define STACK_TOP_P(X) (REG_P (X) && REGNO (X) == FIRST_FLOAT_REG) |
1540 | |
1541 | #define GET_SSE_REGNO(N) \ |
1542 | ((N) < 8 ? FIRST_SSE_REG + (N) \ |
1543 | : (N) < 16 ? FIRST_REX_SSE_REG + (N) - 8 \ |
1544 | : FIRST_EXT_REX_SSE_REG + (N) - 16) |
1545 | |
1546 | /* The class value for index registers, and the one for base regs. */ |
1547 | |
1548 | #define INDEX_REG_CLASS INDEX_REGS |
1549 | #define BASE_REG_CLASS GENERAL_REGS |
1550 | |
1551 | /* Stack layout; function entry, exit and calling. */ |
1552 | |
1553 | /* Define this if pushing a word on the stack |
1554 | makes the stack pointer a smaller address. */ |
1555 | #define STACK_GROWS_DOWNWARD 1 |
1556 | |
1557 | /* Define this to nonzero if the nominal address of the stack frame |
1558 | is at the high-address end of the local variables; |
1559 | that is, each additional local variable allocated |
1560 | goes at a more negative offset in the frame. */ |
1561 | #define FRAME_GROWS_DOWNWARD 1 |
1562 | |
1563 | #define PUSH_ROUNDING(BYTES) ix86_push_rounding (BYTES) |
1564 | |
1565 | /* If defined, the maximum amount of space required for outgoing arguments |
1566 | will be computed and placed into the variable `crtl->outgoing_args_size'. |
1567 | No space will be pushed onto the stack for each call; instead, the |
1568 | function prologue should increase the stack frame size by this amount. |
1569 | |
1570 | In 32bit mode enabling argument accumulation results in about 5% code size |
1571 | growth because move instructions are less compact than push. In 64bit |
1572 | mode the difference is less drastic but visible. |
1573 | |
1574 | FIXME: Unlike earlier implementations, the size of unwind info seems to |
1575 | actually grow with accumulation. Is that because accumulated args |
1576 | unwind info became unnecesarily bloated? |
1577 | |
1578 | With the 64-bit MS ABI, we can generate correct code with or without |
1579 | accumulated args, but because of OUTGOING_REG_PARM_STACK_SPACE the code |
1580 | generated without accumulated args is terrible. |
1581 | |
1582 | If stack probes are required, the space used for large function |
1583 | arguments on the stack must also be probed, so enable |
1584 | -maccumulate-outgoing-args so this happens in the prologue. |
1585 | |
1586 | We must use argument accumulation in interrupt function if stack |
1587 | may be realigned to avoid DRAP. */ |
1588 | |
1589 | #define ACCUMULATE_OUTGOING_ARGS \ |
1590 | ((TARGET_ACCUMULATE_OUTGOING_ARGS \ |
1591 | && optimize_function_for_speed_p (cfun)) \ |
1592 | || (cfun->machine->func_type != TYPE_NORMAL \ |
1593 | && crtl->stack_realign_needed) \ |
1594 | || TARGET_STACK_PROBE \ |
1595 | || TARGET_64BIT_MS_ABI \ |
1596 | || (TARGET_MACHO && crtl->profile)) |
1597 | |
1598 | /* We want the stack and args grow in opposite directions, even if |
1599 | targetm.calls.push_argument returns false. */ |
1600 | #define PUSH_ARGS_REVERSED 1 |
1601 | |
1602 | /* Offset of first parameter from the argument pointer register value. */ |
1603 | #define FIRST_PARM_OFFSET(FNDECL) 0 |
1604 | |
1605 | /* Define this macro if functions should assume that stack space has been |
1606 | allocated for arguments even when their values are passed in registers. |
1607 | |
1608 | The value of this macro is the size, in bytes, of the area reserved for |
1609 | arguments passed in registers for the function represented by FNDECL. |
1610 | |
1611 | This space can be allocated by the caller, or be a part of the |
1612 | machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE' says |
1613 | which. */ |
1614 | #define REG_PARM_STACK_SPACE(FNDECL) ix86_reg_parm_stack_space (FNDECL) |
1615 | |
1616 | #define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) \ |
1617 | (TARGET_64BIT && ix86_function_type_abi (FNTYPE) == MS_ABI) |
1618 | |
1619 | /* Define how to find the value returned by a library function |
1620 | assuming the value has mode MODE. */ |
1621 | |
1622 | #define LIBCALL_VALUE(MODE) ix86_libcall_value (MODE) |
1623 | |
1624 | /* Define the size of the result block used for communication between |
1625 | untyped_call and untyped_return. The block contains a DImode value |
1626 | followed by the block used by fnsave and frstor. */ |
1627 | |
1628 | #define APPLY_RESULT_SIZE (8+108) |
1629 | |
1630 | /* 1 if N is a possible register number for function argument passing. */ |
1631 | #define FUNCTION_ARG_REGNO_P(N) ix86_function_arg_regno_p (N) |
1632 | |
1633 | /* Define a data type for recording info about an argument list |
1634 | during the scan of that argument list. This data type should |
1635 | hold all necessary information about the function itself |
1636 | and about the args processed so far, enough to enable macros |
1637 | such as FUNCTION_ARG to determine where the next arg should go. */ |
1638 | |
1639 | typedef struct ix86_args { |
1640 | int words; /* # words passed so far */ |
1641 | int nregs; /* # registers available for passing */ |
1642 | int regno; /* next available register number */ |
1643 | int fastcall; /* fastcall or thiscall calling convention |
1644 | is used */ |
1645 | int sse_words; /* # sse words passed so far */ |
1646 | int sse_nregs; /* # sse registers available for passing */ |
1647 | int warn_avx512f; /* True when we want to warn |
1648 | about AVX512F ABI. */ |
1649 | int warn_avx; /* True when we want to warn about AVX ABI. */ |
1650 | int warn_sse; /* True when we want to warn about SSE ABI. */ |
1651 | int warn_mmx; /* True when we want to warn about MMX ABI. */ |
1652 | int warn_empty; /* True when we want to warn about empty classes |
1653 | passing ABI change. */ |
1654 | int sse_regno; /* next available sse register number */ |
1655 | int mmx_words; /* # mmx words passed so far */ |
1656 | int mmx_nregs; /* # mmx registers available for passing */ |
1657 | int mmx_regno; /* next available mmx register number */ |
1658 | int maybe_vaarg; /* true for calls to possibly vardic fncts. */ |
1659 | int caller; /* true if it is caller. */ |
1660 | int float_in_sse; /* Set to 1 or 2 for 32bit targets if |
1661 | SFmode/DFmode arguments should be passed |
1662 | in SSE registers. Otherwise 0. */ |
1663 | int stdarg; /* Set to 1 if function is stdarg. */ |
1664 | enum calling_abi call_abi; /* Set to SYSV_ABI for sysv abi. Otherwise |
1665 | MS_ABI for ms abi. */ |
1666 | tree decl; /* Callee decl. */ |
1667 | } CUMULATIVE_ARGS; |
1668 | |
1669 | /* Initialize a variable CUM of type CUMULATIVE_ARGS |
1670 | for a call to a function whose data type is FNTYPE. |
1671 | For a library call, FNTYPE is 0. */ |
1672 | |
1673 | #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \ |
1674 | init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL), \ |
1675 | (N_NAMED_ARGS) != -1) |
1676 | |
1677 | /* Output assembler code to FILE to increment profiler label # LABELNO |
1678 | for profiling a function entry. */ |
1679 | |
1680 | #define FUNCTION_PROFILER(FILE, LABELNO) \ |
1681 | x86_function_profiler ((FILE), (LABELNO)) |
1682 | |
1683 | #define MCOUNT_NAME "_mcount" |
1684 | |
1685 | #define MCOUNT_NAME_BEFORE_PROLOGUE "__fentry__" |
1686 | |
1687 | #define PROFILE_COUNT_REGISTER "edx" |
1688 | |
1689 | /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, |
1690 | the stack pointer does not matter. The value is tested only in |
1691 | functions that have frame pointers. |
1692 | No definition is equivalent to always zero. */ |
1693 | /* Note on the 386 it might be more efficient not to define this since |
1694 | we have to restore it ourselves from the frame pointer, in order to |
1695 | use pop */ |
1696 | |
1697 | #define EXIT_IGNORE_STACK 1 |
1698 | |
1699 | /* Define this macro as a C expression that is nonzero for registers |
1700 | used by the epilogue or the `return' pattern. */ |
1701 | |
1702 | #define EPILOGUE_USES(REGNO) ix86_epilogue_uses (REGNO) |
1703 | |
1704 | /* Output assembler code for a block containing the constant parts |
1705 | of a trampoline, leaving space for the variable parts. */ |
1706 | |
1707 | /* On the 386, the trampoline contains two instructions: |
1708 | mov #STATIC,ecx |
1709 | jmp FUNCTION |
1710 | The trampoline is generated entirely at runtime. The operand of JMP |
1711 | is the address of FUNCTION relative to the instruction following the |
1712 | JMP (which is 5 bytes long). */ |
1713 | |
1714 | /* Length in units of the trampoline for entering a nested function. */ |
1715 | |
1716 | #define TRAMPOLINE_SIZE (TARGET_64BIT ? 28 : 14) |
1717 | |
1718 | /* Definitions for register eliminations. |
1719 | |
1720 | This is an array of structures. Each structure initializes one pair |
1721 | of eliminable registers. The "from" register number is given first, |
1722 | followed by "to". Eliminations of the same "from" register are listed |
1723 | in order of preference. |
1724 | |
1725 | There are two registers that can always be eliminated on the i386. |
1726 | The frame pointer and the arg pointer can be replaced by either the |
1727 | hard frame pointer or to the stack pointer, depending upon the |
1728 | circumstances. The hard frame pointer is not used before reload and |
1729 | so it is not eligible for elimination. */ |
1730 | |
1731 | #define ELIMINABLE_REGS \ |
1732 | {{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ |
1733 | { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ |
1734 | { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ |
1735 | { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \ |
1736 | |
1737 | /* Define the offset between two registers, one to be eliminated, and the other |
1738 | its replacement, at the start of a routine. */ |
1739 | |
1740 | #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ |
1741 | ((OFFSET) = ix86_initial_elimination_offset ((FROM), (TO))) |
1742 | |
1743 | /* Addressing modes, and classification of registers for them. */ |
1744 | |
1745 | /* Macros to check register numbers against specific register classes. */ |
1746 | |
1747 | /* These assume that REGNO is a hard or pseudo reg number. |
1748 | They give nonzero only if REGNO is a hard reg of the suitable class |
1749 | or a pseudo reg currently allocated to a suitable hard reg. |
1750 | Since they use reg_renumber, they are safe only once reg_renumber |
1751 | has been allocated, which happens in reginfo.cc during register |
1752 | allocation. */ |
1753 | |
1754 | #define REGNO_OK_FOR_INDEX_P(REGNO) \ |
1755 | (INDEX_REGNO_P (REGNO) \ |
1756 | || INDEX_REGNO_P (reg_renumber[(REGNO)])) |
1757 | |
1758 | #define REGNO_OK_FOR_BASE_P(REGNO) \ |
1759 | (GENERAL_REGNO_P (REGNO) \ |
1760 | || (REGNO) == ARG_POINTER_REGNUM \ |
1761 | || (REGNO) == FRAME_POINTER_REGNUM \ |
1762 | || GENERAL_REGNO_P (reg_renumber[(REGNO)])) |
1763 | |
1764 | /* Non strict versions, pseudos are ok. */ |
1765 | #define REGNO_OK_FOR_INDEX_NONSTRICT_P(REGNO) \ |
1766 | (INDEX_REGNO_P (REGNO) \ |
1767 | || !HARD_REGISTER_NUM_P (REGNO)) |
1768 | |
1769 | #define REGNO_OK_FOR_BASE_NONSTRICT_P(REGNO) \ |
1770 | (GENERAL_REGNO_P (REGNO) \ |
1771 | || (REGNO) == ARG_POINTER_REGNUM \ |
1772 | || (REGNO) == FRAME_POINTER_REGNUM \ |
1773 | || !HARD_REGISTER_NUM_P (REGNO)) |
1774 | |
1775 | /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression |
1776 | that is a valid memory address for an instruction. |
1777 | The MODE argument is the machine mode for the MEM expression |
1778 | that wants to use this address. |
1779 | |
1780 | The other macros defined here are used only in TARGET_LEGITIMATE_ADDRESS_P, |
1781 | except for CONSTANT_ADDRESS_P which is usually machine-independent. |
1782 | |
1783 | See legitimize_pic_address in i386.cc for details as to what |
1784 | constitutes a legitimate address when -fpic is used. */ |
1785 | |
1786 | #define MAX_REGS_PER_ADDRESS 2 |
1787 | |
1788 | #define CONSTANT_ADDRESS_P(X) constant_address_p (X) |
1789 | |
1790 | /* If defined, a C expression to determine the base term of address X. |
1791 | This macro is used in only one place: `find_base_term' in alias.cc. |
1792 | |
1793 | It is always safe for this macro to not be defined. It exists so |
1794 | that alias analysis can understand machine-dependent addresses. |
1795 | |
1796 | The typical use of this macro is to handle addresses containing |
1797 | a label_ref or symbol_ref within an UNSPEC. */ |
1798 | |
1799 | #define FIND_BASE_TERM(X) ix86_find_base_term (X) |
1800 | |
1801 | /* Nonzero if the constant value X is a legitimate general operand |
1802 | when generating PIC code. It is given that flag_pic is on and |
1803 | that X satisfies CONSTANT_P or is a CONST_DOUBLE. */ |
1804 | |
1805 | #define LEGITIMATE_PIC_OPERAND_P(X) legitimate_pic_operand_p (X) |
1806 | |
1807 | #define STRIP_UNARY(X) (UNARY_P (X) ? XEXP (X, 0) : X) |
1808 | |
1809 | #define SYMBOLIC_CONST(X) \ |
1810 | (GET_CODE (X) == SYMBOL_REF \ |
1811 | || GET_CODE (X) == LABEL_REF \ |
1812 | || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X))) |
1813 | |
1814 | /* Max number of args passed in registers. If this is more than 3, we will |
1815 | have problems with ebx (register #4), since it is a caller save register and |
1816 | is also used as the pic register in ELF. So for now, don't allow more than |
1817 | 3 registers to be passed in registers. */ |
1818 | |
1819 | /* Abi specific values for REGPARM_MAX and SSE_REGPARM_MAX */ |
1820 | #define X86_64_REGPARM_MAX 6 |
1821 | #define X86_64_MS_REGPARM_MAX 4 |
1822 | |
1823 | #define X86_32_REGPARM_MAX 3 |
1824 | |
1825 | #define REGPARM_MAX \ |
1826 | (TARGET_64BIT \ |
1827 | ? (TARGET_64BIT_MS_ABI \ |
1828 | ? X86_64_MS_REGPARM_MAX \ |
1829 | : X86_64_REGPARM_MAX) \ |
1830 | : X86_32_REGPARM_MAX) |
1831 | |
1832 | #define X86_64_SSE_REGPARM_MAX 8 |
1833 | #define X86_64_MS_SSE_REGPARM_MAX 4 |
1834 | |
1835 | #define X86_32_SSE_REGPARM_MAX (TARGET_SSE ? (TARGET_MACHO ? 4 : 3) : 0) |
1836 | |
1837 | #define SSE_REGPARM_MAX \ |
1838 | (TARGET_64BIT \ |
1839 | ? (TARGET_64BIT_MS_ABI \ |
1840 | ? X86_64_MS_SSE_REGPARM_MAX \ |
1841 | : X86_64_SSE_REGPARM_MAX) \ |
1842 | : X86_32_SSE_REGPARM_MAX) |
1843 | |
1844 | #define X86_32_MMX_REGPARM_MAX (TARGET_MMX ? (TARGET_MACHO ? 0 : 3) : 0) |
1845 | |
1846 | #define MMX_REGPARM_MAX (TARGET_64BIT ? 0 : X86_32_MMX_REGPARM_MAX) |
1847 | |
1848 | /* Specify the machine mode that this machine uses |
1849 | for the index in the tablejump instruction. */ |
1850 | #define CASE_VECTOR_MODE \ |
1851 | (!TARGET_LP64 || (flag_pic && ix86_cmodel != CM_LARGE_PIC) ? SImode : DImode) |
1852 | |
1853 | /* Define this as 1 if `char' should by default be signed; else as 0. */ |
1854 | #define DEFAULT_SIGNED_CHAR 1 |
1855 | |
1856 | /* The constant maximum number of bytes that a single instruction can |
1857 | move quickly between memory and registers or between two memory |
1858 | locations. */ |
1859 | #define MAX_MOVE_MAX 64 |
1860 | |
1861 | /* Max number of bytes we can move from memory to memory in one |
1862 | reasonably fast instruction, as opposed to MOVE_MAX_PIECES which |
1863 | is the number of bytes at a time which we can move efficiently. |
1864 | MOVE_MAX_PIECES defaults to MOVE_MAX. */ |
1865 | |
1866 | #define MOVE_MAX \ |
1867 | ((TARGET_AVX512F && TARGET_EVEX512\ |
1868 | && (ix86_move_max == PVW_AVX512 \ |
1869 | || ix86_store_max == PVW_AVX512)) \ |
1870 | ? 64 \ |
1871 | : ((TARGET_AVX \ |
1872 | && (ix86_move_max >= PVW_AVX256 \ |
1873 | || ix86_store_max >= PVW_AVX256)) \ |
1874 | ? 32 \ |
1875 | : ((TARGET_SSE2 \ |
1876 | && TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \ |
1877 | && TARGET_SSE_UNALIGNED_STORE_OPTIMAL) \ |
1878 | ? 16 : UNITS_PER_WORD))) |
1879 | |
1880 | /* STORE_MAX_PIECES is the number of bytes at a time that we can store |
1881 | efficiently. Allow 16/32/64 bytes only if inter-unit move is enabled |
1882 | since vec_duplicate enabled by inter-unit move is used to implement |
1883 | store_by_pieces of 16/32/64 bytes. */ |
1884 | #define STORE_MAX_PIECES \ |
1885 | (TARGET_INTER_UNIT_MOVES_TO_VEC \ |
1886 | ? ((TARGET_AVX512F && TARGET_EVEX512 && ix86_store_max == PVW_AVX512) \ |
1887 | ? 64 \ |
1888 | : ((TARGET_AVX \ |
1889 | && ix86_store_max >= PVW_AVX256) \ |
1890 | ? 32 \ |
1891 | : ((TARGET_SSE2 \ |
1892 | && TARGET_SSE_UNALIGNED_STORE_OPTIMAL) \ |
1893 | ? 16 : UNITS_PER_WORD))) \ |
1894 | : UNITS_PER_WORD) |
1895 | |
1896 | /* If a memory-to-memory move would take MOVE_RATIO or more simple |
1897 | move-instruction pairs, we will do a cpymem or libcall instead. |
1898 | Increasing the value will always make code faster, but eventually |
1899 | incurs high cost in increased code size. |
1900 | |
1901 | If you don't define this, a reasonable default is used. */ |
1902 | |
1903 | #define MOVE_RATIO(speed) ((speed) ? ix86_cost->move_ratio : 3) |
1904 | |
1905 | /* If a clear memory operation would take CLEAR_RATIO or more simple |
1906 | move-instruction sequences, we will do a clrmem or libcall instead. */ |
1907 | |
1908 | #define CLEAR_RATIO(speed) ((speed) ? ix86_cost->clear_ratio : 2) |
1909 | |
1910 | /* Define if shifts truncate the shift count which implies one can |
1911 | omit a sign-extension or zero-extension of a shift count. |
1912 | |
1913 | On i386, shifts do truncate the count. But bit test instructions |
1914 | take the modulo of the bit offset operand. */ |
1915 | |
1916 | /* #define SHIFT_COUNT_TRUNCATED */ |
1917 | |
1918 | /* A macro to update M and UNSIGNEDP when an object whose type is |
1919 | TYPE and which has the specified mode and signedness is to be |
1920 | stored in a register. This macro is only called when TYPE is a |
1921 | scalar type. |
1922 | |
1923 | On i386 it is sometimes useful to promote HImode and QImode |
1924 | quantities to SImode. The choice depends on target type. */ |
1925 | |
1926 | #define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ |
1927 | do { \ |
1928 | if (((MODE) == HImode && TARGET_PROMOTE_HI_REGS) \ |
1929 | || ((MODE) == QImode && TARGET_PROMOTE_QI_REGS)) \ |
1930 | (MODE) = SImode; \ |
1931 | } while (0) |
1932 | |
1933 | /* Specify the machine mode that pointers have. |
1934 | After generation of rtl, the compiler makes no further distinction |
1935 | between pointers and any other objects of this machine mode. */ |
1936 | #define Pmode (ix86_pmode == PMODE_DI ? DImode : SImode) |
1937 | |
1938 | /* Supply a definition of STACK_SAVEAREA_MODE for emit_stack_save. |
1939 | NONLOCAL needs space to save both shadow stack and stack pointers. |
1940 | |
1941 | FIXME: We only need to save and restore stack pointer in ptr_mode. |
1942 | But expand_builtin_setjmp_setup and expand_builtin_longjmp use Pmode |
1943 | to save and restore stack pointer. See |
1944 | https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84150 |
1945 | */ |
1946 | #define STACK_SAVEAREA_MODE(LEVEL) \ |
1947 | ((LEVEL) == SAVE_NONLOCAL ? (TARGET_64BIT ? TImode : DImode) : Pmode) |
1948 | |
1949 | /* Specify the machine_mode of the size increment |
1950 | operand of an 'allocate_stack' named pattern. */ |
1951 | #define STACK_SIZE_MODE Pmode |
1952 | |
1953 | /* A C expression whose value is zero if pointers that need to be extended |
1954 | from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and |
1955 | greater then zero if they are zero-extended and less then zero if the |
1956 | ptr_extend instruction should be used. */ |
1957 | |
1958 | #define POINTERS_EXTEND_UNSIGNED 1 |
1959 | |
1960 | /* A function address in a call instruction |
1961 | is a byte address (for indexing purposes) |
1962 | so give the MEM rtx a byte's mode. */ |
1963 | #define FUNCTION_MODE QImode |
1964 | |
1965 | |
1966 | /* A C expression for the cost of a branch instruction. A value of 1 |
1967 | is the default; other values are interpreted relative to that. */ |
1968 | |
1969 | #define BRANCH_COST(speed_p, predictable_p) \ |
1970 | (!(speed_p) ? 2 : (predictable_p) ? 0 : ix86_branch_cost) |
1971 | |
1972 | /* An integer expression for the size in bits of the largest integer machine |
1973 | mode that should actually be used. We allow pairs of registers. */ |
1974 | #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_64BIT ? TImode : DImode) |
1975 | |
1976 | /* Define this macro as a C expression which is nonzero if accessing |
1977 | less than a word of memory (i.e. a `char' or a `short') is no |
1978 | faster than accessing a word of memory, i.e., if such access |
1979 | require more than one instruction or if there is no difference in |
1980 | cost between byte and (aligned) word loads. |
1981 | |
1982 | When this macro is not defined, the compiler will access a field by |
1983 | finding the smallest containing object; when it is defined, a |
1984 | fullword load will be used if alignment permits. Unless bytes |
1985 | accesses are faster than word accesses, using word accesses is |
1986 | preferable since it may eliminate subsequent memory access if |
1987 | subsequent accesses occur to other fields in the same word of the |
1988 | structure, but to different bytes. */ |
1989 | |
1990 | #define SLOW_BYTE_ACCESS 0 |
1991 | |
1992 | /* Define this macro if it is as good or better to call a constant |
1993 | function address than to call an address kept in a register. |
1994 | |
1995 | Desirable on the 386 because a CALL with a constant address is |
1996 | faster than one with a register address. */ |
1997 | |
1998 | #define NO_FUNCTION_CSE 1 |
1999 | |
2000 | /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE, |
2001 | return the mode to be used for the comparison. |
2002 | |
2003 | For floating-point equality comparisons, CCFPEQmode should be used. |
2004 | VOIDmode should be used in all other cases. |
2005 | |
2006 | For integer comparisons against zero, reduce to CCNOmode or CCZmode if |
2007 | possible, to allow for more combinations. */ |
2008 | |
2009 | #define SELECT_CC_MODE(OP, X, Y) ix86_cc_mode ((OP), (X), (Y)) |
2010 | |
2011 | /* Return nonzero if MODE implies a floating point inequality can be |
2012 | reversed. */ |
2013 | |
2014 | #define REVERSIBLE_CC_MODE(MODE) 1 |
2015 | |
2016 | /* A C expression whose value is reversed condition code of the CODE for |
2017 | comparison done in CC_MODE mode. */ |
2018 | #define REVERSE_CONDITION(CODE, MODE) ix86_reverse_condition ((CODE), (MODE)) |
2019 | |
2020 | |
2021 | /* Control the assembler format that we output, to the extent |
2022 | this does not vary between assemblers. */ |
2023 | |
2024 | /* How to refer to registers in assembler output. |
2025 | This sequence is indexed by compiler's hard-register-number (see above). */ |
2026 | |
2027 | /* In order to refer to the first 8 regs as 32-bit regs, prefix an "e". |
2028 | For non floating point regs, the following are the HImode names. |
2029 | |
2030 | For float regs, the stack top is sometimes referred to as "%st(0)" |
2031 | instead of just "%st". TARGET_PRINT_OPERAND handles this with the |
2032 | "y" code. */ |
2033 | |
2034 | #define HI_REGISTER_NAMES \ |
2035 | {"ax","dx","cx","bx","si","di","bp","sp", \ |
2036 | "st","st(1)","st(2)","st(3)","st(4)","st(5)","st(6)","st(7)", \ |
2037 | "argp", "flags", "fpsr", "frame", \ |
2038 | "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7", \ |
2039 | "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", \ |
2040 | "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \ |
2041 | "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", \ |
2042 | "xmm16", "xmm17", "xmm18", "xmm19", \ |
2043 | "xmm20", "xmm21", "xmm22", "xmm23", \ |
2044 | "xmm24", "xmm25", "xmm26", "xmm27", \ |
2045 | "xmm28", "xmm29", "xmm30", "xmm31", \ |
2046 | "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7", \ |
2047 | "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \ |
2048 | "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" } |
2049 | |
2050 | #define REGISTER_NAMES HI_REGISTER_NAMES |
2051 | |
2052 | #define QI_REGISTER_NAMES \ |
2053 | {"al", "dl", "cl", "bl", "sil", "dil", "bpl", "spl"} |
2054 | |
2055 | #define QI_HIGH_REGISTER_NAMES \ |
2056 | {"ah", "dh", "ch", "bh"} |
2057 | |
2058 | /* Table of additional register names to use in user input. */ |
2059 | |
2060 | #define ADDITIONAL_REGISTER_NAMES \ |
2061 | { \ |
2062 | { "eax", AX_REG }, { "edx", DX_REG }, { "ecx", CX_REG }, { "ebx", BX_REG }, \ |
2063 | { "esi", SI_REG }, { "edi", DI_REG }, { "ebp", BP_REG }, { "esp", SP_REG }, \ |
2064 | { "rax", AX_REG }, { "rdx", DX_REG }, { "rcx", CX_REG }, { "rbx", BX_REG }, \ |
2065 | { "rsi", SI_REG }, { "rdi", DI_REG }, { "rbp", BP_REG }, { "rsp", SP_REG }, \ |
2066 | { "al", AX_REG }, { "dl", DX_REG }, { "cl", CX_REG }, { "bl", BX_REG }, \ |
2067 | { "sil", SI_REG }, { "dil", DI_REG }, { "bpl", BP_REG }, { "spl", SP_REG }, \ |
2068 | { "ah", AX_REG }, { "dh", DX_REG }, { "ch", CX_REG }, { "bh", BX_REG }, \ |
2069 | { "ymm0", XMM0_REG }, { "ymm1", XMM1_REG }, { "ymm2", XMM2_REG }, { "ymm3", XMM3_REG }, \ |
2070 | { "ymm4", XMM4_REG }, { "ymm5", XMM5_REG }, { "ymm6", XMM6_REG }, { "ymm7", XMM7_REG }, \ |
2071 | { "ymm8", XMM8_REG }, { "ymm9", XMM9_REG }, { "ymm10", XMM10_REG }, { "ymm11", XMM11_REG }, \ |
2072 | { "ymm12", XMM12_REG }, { "ymm13", XMM13_REG }, { "ymm14", XMM14_REG }, { "ymm15", XMM15_REG }, \ |
2073 | { "ymm16", XMM16_REG }, { "ymm17", XMM17_REG }, { "ymm18", XMM18_REG }, { "ymm19", XMM19_REG }, \ |
2074 | { "ymm20", XMM20_REG }, { "ymm21", XMM21_REG }, { "ymm22", XMM22_REG }, { "ymm23", XMM23_REG }, \ |
2075 | { "ymm24", XMM24_REG }, { "ymm25", XMM25_REG }, { "ymm26", XMM26_REG }, { "ymm27", XMM27_REG }, \ |
2076 | { "ymm28", XMM28_REG }, { "ymm29", XMM29_REG }, { "ymm30", XMM30_REG }, { "ymm31", XMM31_REG }, \ |
2077 | { "zmm0", XMM0_REG }, { "zmm1", XMM1_REG }, { "zmm2", XMM2_REG }, { "zmm3", XMM3_REG }, \ |
2078 | { "zmm4", XMM4_REG }, { "zmm5", XMM5_REG }, { "zmm6", XMM6_REG }, { "zmm7", XMM7_REG }, \ |
2079 | { "zmm8", XMM8_REG }, { "zmm9", XMM9_REG }, { "zmm10", XMM10_REG }, { "zmm11", XMM11_REG }, \ |
2080 | { "zmm12", XMM12_REG }, { "zmm13", XMM13_REG }, { "zmm14", XMM14_REG }, { "zmm15", XMM15_REG }, \ |
2081 | { "zmm16", XMM16_REG }, { "zmm17", XMM17_REG }, { "zmm18", XMM18_REG }, { "zmm19", XMM19_REG }, \ |
2082 | { "zmm20", XMM20_REG }, { "zmm21", XMM21_REG }, { "zmm22", XMM22_REG }, { "zmm23", XMM23_REG }, \ |
2083 | { "zmm24", XMM24_REG }, { "zmm25", XMM25_REG }, { "zmm26", XMM26_REG }, { "zmm27", XMM27_REG }, \ |
2084 | { "zmm28", XMM28_REG }, { "zmm29", XMM29_REG }, { "zmm30", XMM30_REG }, { "zmm31", XMM31_REG } \ |
2085 | } |
2086 | |
2087 | /* How to renumber registers for gdb. */ |
2088 | |
2089 | #define DEBUGGER_REGNO(N) \ |
2090 | (TARGET_64BIT ? debugger64_register_map[(N)] : debugger_register_map[(N)]) |
2091 | |
2092 | extern int const debugger_register_map[FIRST_PSEUDO_REGISTER]; |
2093 | extern int const debugger64_register_map[FIRST_PSEUDO_REGISTER]; |
2094 | extern int const svr4_debugger_register_map[FIRST_PSEUDO_REGISTER]; |
2095 | |
2096 | /* Before the prologue, RA is at 0(%esp). */ |
2097 | #define INCOMING_RETURN_ADDR_RTX \ |
2098 | gen_rtx_MEM (Pmode, stack_pointer_rtx) |
2099 | |
2100 | /* After the prologue, RA is at -4(AP) in the current frame. */ |
2101 | #define RETURN_ADDR_RTX(COUNT, FRAME) \ |
2102 | ((COUNT) == 0 \ |
2103 | ? gen_rtx_MEM (Pmode, plus_constant (Pmode, arg_pointer_rtx, \ |
2104 | -UNITS_PER_WORD)) \ |
2105 | : gen_rtx_MEM (Pmode, plus_constant (Pmode, (FRAME), UNITS_PER_WORD))) |
2106 | |
2107 | /* PC is dbx register 8; let's use that column for RA. */ |
2108 | #define DWARF_FRAME_RETURN_COLUMN (TARGET_64BIT ? 16 : 8) |
2109 | |
2110 | /* Before the prologue, there are return address and error code for |
2111 | exception handler on the top of the frame. */ |
2112 | #define INCOMING_FRAME_SP_OFFSET \ |
2113 | (cfun->machine->func_type == TYPE_EXCEPTION \ |
2114 | ? 2 * UNITS_PER_WORD : UNITS_PER_WORD) |
2115 | |
2116 | /* The value of INCOMING_FRAME_SP_OFFSET the assembler assumes in |
2117 | .cfi_startproc. */ |
2118 | #define DEFAULT_INCOMING_FRAME_SP_OFFSET UNITS_PER_WORD |
2119 | |
2120 | /* Describe how we implement __builtin_eh_return. */ |
2121 | #define EH_RETURN_DATA_REGNO(N) ((N) <= DX_REG ? (N) : INVALID_REGNUM) |
2122 | #define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, CX_REG) |
2123 | |
2124 | |
2125 | /* Select a format to encode pointers in exception handling data. CODE |
2126 | is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is |
2127 | true if the symbol may be affected by dynamic relocations. |
2128 | |
2129 | ??? All x86 object file formats are capable of representing this. |
2130 | After all, the relocation needed is the same as for the call insn. |
2131 | Whether or not a particular assembler allows us to enter such, I |
2132 | guess we'll have to see. */ |
2133 | #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ |
2134 | asm_preferred_eh_data_format ((CODE), (GLOBAL)) |
2135 | |
2136 | /* These are a couple of extensions to the formats accepted |
2137 | by asm_fprintf: |
2138 | %z prints out opcode suffix for word-mode instruction |
2139 | %r prints out word-mode name for reg_names[arg] */ |
2140 | #define ASM_FPRINTF_EXTENSIONS(FILE, ARGS, P) \ |
2141 | case 'z': \ |
2142 | fputc (TARGET_64BIT ? 'q' : 'l', (FILE)); \ |
2143 | break; \ |
2144 | \ |
2145 | case 'r': \ |
2146 | { \ |
2147 | unsigned int regno = va_arg ((ARGS), int); \ |
2148 | if (LEGACY_INT_REGNO_P (regno)) \ |
2149 | fputc (TARGET_64BIT ? 'r' : 'e', (FILE)); \ |
2150 | fputs (reg_names[regno], (FILE)); \ |
2151 | break; \ |
2152 | } |
2153 | |
2154 | /* This is how to output an insn to push a register on the stack. */ |
2155 | |
2156 | #define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \ |
2157 | asm_fprintf ((FILE), "\tpush%z\t%%%r\n", (REGNO)) |
2158 | |
2159 | /* This is how to output an insn to pop a register from the stack. */ |
2160 | |
2161 | #define ASM_OUTPUT_REG_POP(FILE, REGNO) \ |
2162 | asm_fprintf ((FILE), "\tpop%z\t%%%r\n", (REGNO)) |
2163 | |
2164 | /* This is how to output an element of a case-vector that is absolute. */ |
2165 | |
2166 | #define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \ |
2167 | ix86_output_addr_vec_elt ((FILE), (VALUE)) |
2168 | |
2169 | /* This is how to output an element of a case-vector that is relative. */ |
2170 | |
2171 | #define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \ |
2172 | ix86_output_addr_diff_elt ((FILE), (VALUE), (REL)) |
2173 | |
2174 | /* When we see %v, we will print the 'v' prefix if TARGET_AVX is true. */ |
2175 | |
2176 | #define ASM_OUTPUT_AVX_PREFIX(STREAM, PTR) \ |
2177 | { \ |
2178 | if ((PTR)[0] == '%' && (PTR)[1] == 'v') \ |
2179 | (PTR) += TARGET_AVX ? 1 : 2; \ |
2180 | } |
2181 | |
2182 | /* A C statement or statements which output an assembler instruction |
2183 | opcode to the stdio stream STREAM. The macro-operand PTR is a |
2184 | variable of type `char *' which points to the opcode name in |
2185 | its "internal" form--the form that is written in the machine |
2186 | description. */ |
2187 | |
2188 | #define ASM_OUTPUT_OPCODE(STREAM, PTR) \ |
2189 | ASM_OUTPUT_AVX_PREFIX ((STREAM), (PTR)) |
2190 | |
2191 | /* A C statement to output to the stdio stream FILE an assembler |
2192 | command to pad the location counter to a multiple of 1<<LOG |
2193 | bytes if it is within MAX_SKIP bytes. */ |
2194 | |
2195 | #ifdef HAVE_GAS_MAX_SKIP_P2ALIGN |
2196 | # define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \ |
2197 | do { \ |
2198 | if ((LOG) != 0) { \ |
2199 | if ((MAX_SKIP) == 0 || (MAX_SKIP) >= (1 << (LOG)) - 1) \ |
2200 | fprintf ((FILE), "\t.p2align %d\n", (LOG)); \ |
2201 | else \ |
2202 | fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \ |
2203 | } \ |
2204 | } while (0) |
2205 | #endif |
2206 | |
2207 | /* Write the extra assembler code needed to declare a function |
2208 | properly. */ |
2209 | |
2210 | #undef ASM_OUTPUT_FUNCTION_LABEL |
2211 | #define ASM_OUTPUT_FUNCTION_LABEL(FILE, NAME, DECL) \ |
2212 | ix86_asm_output_function_label ((FILE), (NAME), (DECL)) |
2213 | |
2214 | /* A C statement (sans semicolon) to output a reference to SYMBOL_REF SYM. |
2215 | If not defined, assemble_name will be used to output the name of the |
2216 | symbol. This macro may be used to modify the way a symbol is referenced |
2217 | depending on information encoded by TARGET_ENCODE_SECTION_INFO. */ |
2218 | |
2219 | #ifndef ASM_OUTPUT_SYMBOL_REF |
2220 | #define ASM_OUTPUT_SYMBOL_REF(FILE, SYM) \ |
2221 | do { \ |
2222 | const char *name \ |
2223 | = assemble_name_resolve (XSTR (x, 0)); \ |
2224 | /* In -masm=att wrap identifiers that start with $ \ |
2225 | into parens. */ \ |
2226 | if (ASSEMBLER_DIALECT == ASM_ATT \ |
2227 | && name[0] == '$' \ |
2228 | && user_label_prefix[0] == '\0') \ |
2229 | { \ |
2230 | fputc ('(', (FILE)); \ |
2231 | assemble_name_raw ((FILE), name); \ |
2232 | fputc (')', (FILE)); \ |
2233 | } \ |
2234 | else \ |
2235 | assemble_name_raw ((FILE), name); \ |
2236 | } while (0) |
2237 | #endif |
2238 | |
2239 | /* Under some conditions we need jump tables in the text section, |
2240 | because the assembler cannot handle label differences between |
2241 | sections. */ |
2242 | |
2243 | #define JUMP_TABLES_IN_TEXT_SECTION \ |
2244 | (flag_pic && !(TARGET_64BIT || HAVE_AS_GOTOFF_IN_DATA)) |
2245 | |
2246 | /* Switch to init or fini section via SECTION_OP, emit a call to FUNC, |
2247 | and switch back. For x86 we do this only to save a few bytes that |
2248 | would otherwise be unused in the text section. */ |
2249 | #define CRT_MKSTR2(VAL) #VAL |
2250 | #define CRT_MKSTR(x) CRT_MKSTR2(x) |
2251 | |
2252 | #define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ |
2253 | asm (SECTION_OP "\n\t" \ |
2254 | "call " CRT_MKSTR(__USER_LABEL_PREFIX__) #FUNC "\n" \ |
2255 | TEXT_SECTION_ASM_OP); |
2256 | |
2257 | /* Default threshold for putting data in large sections |
2258 | with x86-64 medium memory model */ |
2259 | #define DEFAULT_LARGE_SECTION_THRESHOLD 65536 |
2260 | |
2261 | /* Which processor to tune code generation for. These must be in sync |
2262 | with processor_cost_table in i386-options.cc. */ |
2263 | |
2264 | enum processor_type |
2265 | { |
2266 | PROCESSOR_GENERIC = 0, |
2267 | PROCESSOR_I386, /* 80386 */ |
2268 | PROCESSOR_I486, /* 80486DX, 80486SX, 80486DX[24] */ |
2269 | PROCESSOR_PENTIUM, |
2270 | PROCESSOR_LAKEMONT, |
2271 | PROCESSOR_PENTIUMPRO, |
2272 | PROCESSOR_PENTIUM4, |
2273 | PROCESSOR_NOCONA, |
2274 | PROCESSOR_CORE2, |
2275 | PROCESSOR_NEHALEM, |
2276 | PROCESSOR_SANDYBRIDGE, |
2277 | PROCESSOR_HASWELL, |
2278 | PROCESSOR_BONNELL, |
2279 | PROCESSOR_SILVERMONT, |
2280 | PROCESSOR_GOLDMONT, |
2281 | PROCESSOR_GOLDMONT_PLUS, |
2282 | PROCESSOR_TREMONT, |
2283 | PROCESSOR_SIERRAFOREST, |
2284 | PROCESSOR_GRANDRIDGE, |
2285 | PROCESSOR_CLEARWATERFOREST, |
2286 | PROCESSOR_KNL, |
2287 | PROCESSOR_KNM, |
2288 | PROCESSOR_SKYLAKE, |
2289 | PROCESSOR_SKYLAKE_AVX512, |
2290 | PROCESSOR_CANNONLAKE, |
2291 | PROCESSOR_ICELAKE_CLIENT, |
2292 | PROCESSOR_ICELAKE_SERVER, |
2293 | PROCESSOR_CASCADELAKE, |
2294 | PROCESSOR_TIGERLAKE, |
2295 | PROCESSOR_COOPERLAKE, |
2296 | PROCESSOR_SAPPHIRERAPIDS, |
2297 | PROCESSOR_ALDERLAKE, |
2298 | PROCESSOR_ROCKETLAKE, |
2299 | PROCESSOR_GRANITERAPIDS, |
2300 | PROCESSOR_GRANITERAPIDS_D, |
2301 | PROCESSOR_ARROWLAKE, |
2302 | PROCESSOR_ARROWLAKE_S, |
2303 | PROCESSOR_PANTHERLAKE, |
2304 | PROCESSOR_INTEL, |
2305 | PROCESSOR_LUJIAZUI, |
2306 | PROCESSOR_YONGFENG, |
2307 | PROCESSOR_GEODE, |
2308 | PROCESSOR_K6, |
2309 | PROCESSOR_ATHLON, |
2310 | PROCESSOR_K8, |
2311 | PROCESSOR_AMDFAM10, |
2312 | PROCESSOR_BDVER1, |
2313 | PROCESSOR_BDVER2, |
2314 | PROCESSOR_BDVER3, |
2315 | PROCESSOR_BDVER4, |
2316 | PROCESSOR_BTVER1, |
2317 | PROCESSOR_BTVER2, |
2318 | PROCESSOR_ZNVER1, |
2319 | PROCESSOR_ZNVER2, |
2320 | PROCESSOR_ZNVER3, |
2321 | PROCESSOR_ZNVER4, |
2322 | PROCESSOR_max |
2323 | }; |
2324 | |
2325 | #if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS) |
2326 | extern const char *const processor_names[]; |
2327 | |
2328 | #include "wide-int-bitmask.h" |
2329 | |
2330 | enum pta_flag |
2331 | { |
2332 | #define DEF_PTA(NAME) _ ## NAME, |
2333 | #include "i386-isa.def" |
2334 | #undef DEF_PTA |
2335 | END_PTA |
2336 | }; |
2337 | |
2338 | /* wide_int_bitmask can handle only 128 flags. */ |
2339 | STATIC_ASSERT (END_PTA <= 128); |
2340 | |
2341 | #define WIDE_INT_BITMASK_FROM_NTH(N) (N < 64 ? wide_int_bitmask (0, 1ULL << N) \ |
2342 | : wide_int_bitmask (1ULL << (N - 64), 0)) |
2343 | |
2344 | #define DEF_PTA(NAME) constexpr wide_int_bitmask PTA_ ## NAME \ |
2345 | = WIDE_INT_BITMASK_FROM_NTH ((pta_flag) _ ## NAME); |
2346 | #include "i386-isa.def" |
2347 | #undef DEF_PTA |
2348 | |
2349 | constexpr wide_int_bitmask PTA_X86_64_BASELINE = PTA_64BIT | PTA_MMX | PTA_SSE |
2350 | | PTA_SSE2 | PTA_NO_SAHF | PTA_FXSR; |
2351 | constexpr wide_int_bitmask PTA_X86_64_V2 = (PTA_X86_64_BASELINE |
2352 | & (~PTA_NO_SAHF)) |
2353 | | PTA_CX16 | PTA_POPCNT | PTA_SSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_SSSE3; |
2354 | constexpr wide_int_bitmask PTA_X86_64_V3 = PTA_X86_64_V2 |
2355 | | PTA_AVX | PTA_AVX2 | PTA_BMI | PTA_BMI2 | PTA_F16C | PTA_FMA | PTA_LZCNT |
2356 | | PTA_MOVBE | PTA_XSAVE; |
2357 | constexpr wide_int_bitmask PTA_X86_64_V4 = PTA_X86_64_V3 |
2358 | | PTA_AVX512F | PTA_AVX512BW | PTA_AVX512CD | PTA_AVX512DQ | PTA_AVX512VL; |
2359 | |
2360 | constexpr wide_int_bitmask PTA_CORE2 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 |
2361 | | PTA_SSE3 | PTA_SSSE3 | PTA_CX16 | PTA_FXSR; |
2362 | constexpr wide_int_bitmask PTA_NEHALEM = PTA_CORE2 | PTA_SSE4_1 | PTA_SSE4_2 |
2363 | | PTA_POPCNT; |
2364 | constexpr wide_int_bitmask PTA_WESTMERE = PTA_NEHALEM | PTA_PCLMUL; |
2365 | constexpr wide_int_bitmask PTA_SANDYBRIDGE = PTA_WESTMERE | PTA_AVX | PTA_XSAVE |
2366 | | PTA_XSAVEOPT; |
2367 | constexpr wide_int_bitmask PTA_IVYBRIDGE = PTA_SANDYBRIDGE | PTA_FSGSBASE |
2368 | | PTA_RDRND | PTA_F16C; |
2369 | constexpr wide_int_bitmask PTA_HASWELL = PTA_IVYBRIDGE | PTA_AVX2 | PTA_BMI |
2370 | | PTA_BMI2 | PTA_LZCNT | PTA_FMA | PTA_MOVBE | PTA_HLE; |
2371 | constexpr wide_int_bitmask PTA_BROADWELL = PTA_HASWELL | PTA_ADX | PTA_RDSEED |
2372 | | PTA_PRFCHW; |
2373 | constexpr wide_int_bitmask PTA_SKYLAKE = PTA_BROADWELL | PTA_AES |
2374 | | PTA_CLFLUSHOPT | PTA_XSAVEC | PTA_XSAVES | PTA_SGX; |
2375 | constexpr wide_int_bitmask PTA_SKYLAKE_AVX512 = PTA_SKYLAKE | PTA_AVX512F |
2376 | | PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU |
2377 | | PTA_CLWB; |
2378 | constexpr wide_int_bitmask PTA_CASCADELAKE = PTA_SKYLAKE_AVX512 |
2379 | | PTA_AVX512VNNI; |
2380 | constexpr wide_int_bitmask PTA_COOPERLAKE = PTA_CASCADELAKE | PTA_AVX512BF16; |
2381 | constexpr wide_int_bitmask PTA_CANNONLAKE = PTA_SKYLAKE | PTA_AVX512F |
2382 | | PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU |
2383 | | PTA_AVX512VBMI | PTA_AVX512IFMA | PTA_SHA; |
2384 | constexpr wide_int_bitmask PTA_ICELAKE_CLIENT = PTA_CANNONLAKE | PTA_AVX512VNNI |
2385 | | PTA_GFNI | PTA_VAES | PTA_AVX512VBMI2 | PTA_VPCLMULQDQ | PTA_AVX512BITALG |
2386 | | PTA_RDPID | PTA_AVX512VPOPCNTDQ; |
2387 | constexpr wide_int_bitmask PTA_ROCKETLAKE = PTA_ICELAKE_CLIENT & ~PTA_SGX; |
2388 | constexpr wide_int_bitmask PTA_ICELAKE_SERVER = PTA_ICELAKE_CLIENT |
2389 | | PTA_PCONFIG | PTA_WBNOINVD | PTA_CLWB; |
2390 | constexpr wide_int_bitmask PTA_TIGERLAKE = PTA_ICELAKE_CLIENT | PTA_MOVDIRI |
2391 | | PTA_MOVDIR64B | PTA_CLWB | PTA_AVX512VP2INTERSECT | PTA_KL | PTA_WIDEKL; |
2392 | constexpr wide_int_bitmask PTA_SAPPHIRERAPIDS = PTA_ICELAKE_SERVER | PTA_MOVDIRI |
2393 | | PTA_MOVDIR64B | PTA_ENQCMD | PTA_CLDEMOTE | PTA_PTWRITE | PTA_WAITPKG |
2394 | | PTA_SERIALIZE | PTA_TSXLDTRK | PTA_AMX_TILE | PTA_AMX_INT8 | PTA_AMX_BF16 |
2395 | | PTA_UINTR | PTA_AVXVNNI | PTA_AVX512FP16 | PTA_AVX512BF16; |
2396 | constexpr wide_int_bitmask PTA_KNL = PTA_BROADWELL | PTA_AVX512PF |
2397 | | PTA_AVX512ER | PTA_AVX512F | PTA_AVX512CD | PTA_PREFETCHWT1; |
2398 | constexpr wide_int_bitmask PTA_BONNELL = PTA_CORE2 | PTA_MOVBE; |
2399 | constexpr wide_int_bitmask PTA_SILVERMONT = PTA_WESTMERE | PTA_MOVBE |
2400 | | PTA_RDRND | PTA_PRFCHW; |
2401 | constexpr wide_int_bitmask PTA_GOLDMONT = PTA_SILVERMONT | PTA_AES | PTA_SHA |
2402 | | PTA_XSAVE | PTA_RDSEED | PTA_XSAVEC | PTA_XSAVES | PTA_CLFLUSHOPT |
2403 | | PTA_XSAVEOPT | PTA_FSGSBASE; |
2404 | constexpr wide_int_bitmask PTA_GOLDMONT_PLUS = PTA_GOLDMONT | PTA_RDPID |
2405 | | PTA_SGX | PTA_PTWRITE; |
2406 | constexpr wide_int_bitmask PTA_TREMONT = PTA_GOLDMONT_PLUS | PTA_CLWB |
2407 | | PTA_GFNI | PTA_MOVDIRI | PTA_MOVDIR64B | PTA_CLDEMOTE | PTA_WAITPKG; |
2408 | constexpr wide_int_bitmask PTA_ALDERLAKE = PTA_TREMONT | PTA_ADX | PTA_AVX |
2409 | | PTA_AVX2 | PTA_BMI | PTA_BMI2 | PTA_F16C | PTA_FMA | PTA_LZCNT |
2410 | | PTA_PCONFIG | PTA_PKU | PTA_VAES | PTA_VPCLMULQDQ | PTA_SERIALIZE |
2411 | | PTA_HRESET | PTA_KL | PTA_WIDEKL | PTA_AVXVNNI; |
2412 | constexpr wide_int_bitmask PTA_SIERRAFOREST = PTA_ALDERLAKE | PTA_AVXIFMA |
2413 | | PTA_AVXVNNIINT8 | PTA_AVXNECONVERT | PTA_CMPCCXADD | PTA_ENQCMD | PTA_UINTR; |
2414 | constexpr wide_int_bitmask PTA_GRANITERAPIDS = PTA_SAPPHIRERAPIDS | PTA_AMX_FP16 |
2415 | | PTA_PREFETCHI; |
2416 | constexpr wide_int_bitmask PTA_GRANITERAPIDS_D = PTA_GRANITERAPIDS |
2417 | | PTA_AMX_COMPLEX; |
2418 | constexpr wide_int_bitmask PTA_GRANDRIDGE = PTA_SIERRAFOREST | PTA_RAOINT; |
2419 | constexpr wide_int_bitmask PTA_ARROWLAKE = PTA_ALDERLAKE | PTA_AVXIFMA |
2420 | | PTA_AVXVNNIINT8 | PTA_AVXNECONVERT | PTA_CMPCCXADD | PTA_UINTR; |
2421 | constexpr wide_int_bitmask PTA_ARROWLAKE_S = PTA_ARROWLAKE | PTA_AVXVNNIINT16 |
2422 | | PTA_SHA512 | PTA_SM3 | PTA_SM4; |
2423 | constexpr wide_int_bitmask PTA_CLEARWATERFOREST = PTA_SIERRAFOREST |
2424 | | PTA_AVXVNNIINT16 | PTA_SHA512 | PTA_SM3 | PTA_SM4 | PTA_USER_MSR |
2425 | | PTA_PREFETCHI; |
2426 | constexpr wide_int_bitmask PTA_PANTHERLAKE = PTA_ARROWLAKE_S | PTA_PREFETCHI; |
2427 | constexpr wide_int_bitmask PTA_KNM = PTA_KNL | PTA_AVX5124VNNIW |
2428 | | PTA_AVX5124FMAPS | PTA_AVX512VPOPCNTDQ; |
2429 | constexpr wide_int_bitmask PTA_ZNVER1 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 |
2430 | | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1 |
2431 | | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_AVX2 | PTA_BMI | PTA_BMI2 |
2432 | | PTA_F16C | PTA_FMA | PTA_PRFCHW | PTA_FXSR | PTA_XSAVE | PTA_XSAVEOPT |
2433 | | PTA_FSGSBASE | PTA_RDRND | PTA_MOVBE | PTA_MWAITX | PTA_ADX | PTA_RDSEED |
2434 | | PTA_CLZERO | PTA_CLFLUSHOPT | PTA_XSAVEC | PTA_XSAVES | PTA_SHA | PTA_LZCNT |
2435 | | PTA_POPCNT; |
2436 | constexpr wide_int_bitmask PTA_ZNVER2 = PTA_ZNVER1 | PTA_CLWB | PTA_RDPID |
2437 | | PTA_WBNOINVD; |
2438 | constexpr wide_int_bitmask PTA_ZNVER3 = PTA_ZNVER2 | PTA_VAES | PTA_VPCLMULQDQ |
2439 | | PTA_PKU; |
2440 | constexpr wide_int_bitmask PTA_ZNVER4 = PTA_ZNVER3 | PTA_AVX512F | PTA_AVX512DQ |
2441 | | PTA_AVX512IFMA | PTA_AVX512CD | PTA_AVX512BW | PTA_AVX512VL |
2442 | | PTA_AVX512BF16 | PTA_AVX512VBMI | PTA_AVX512VBMI2 | PTA_GFNI |
2443 | | PTA_AVX512VNNI | PTA_AVX512BITALG | PTA_AVX512VPOPCNTDQ; |
2444 | |
2445 | constexpr wide_int_bitmask PTA_LUJIAZUI = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 |
2446 | | PTA_SSE3 | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AES |
2447 | | PTA_PCLMUL | PTA_BMI | PTA_BMI2 | PTA_PRFCHW | PTA_FXSR | PTA_XSAVE | PTA_XSAVEOPT |
2448 | | PTA_FSGSBASE | PTA_RDRND | PTA_MOVBE | PTA_ADX | PTA_RDSEED | PTA_POPCNT; |
2449 | |
2450 | constexpr wide_int_bitmask PTA_YONGFENG = PTA_LUJIAZUI | PTA_AVX | PTA_AVX2 | PTA_F16C |
2451 | | PTA_FMA | PTA_SHA | PTA_LZCNT; |
2452 | |
2453 | #ifndef GENERATOR_FILE |
2454 | |
2455 | #include "insn-attr-common.h" |
2456 | |
2457 | #include "common/config/i386/i386-cpuinfo.h" |
2458 | |
2459 | class pta |
2460 | { |
2461 | public: |
2462 | const char *const name; /* processor name or nickname. */ |
2463 | const enum processor_type processor; |
2464 | const enum attr_cpu schedule; |
2465 | const wide_int_bitmask flags; |
2466 | const int model; |
2467 | const enum feature_priority priority; |
2468 | }; |
2469 | |
2470 | extern const pta processor_alias_table[]; |
2471 | extern unsigned int const pta_size; |
2472 | extern unsigned int const num_arch_names; |
2473 | #endif |
2474 | |
2475 | #endif |
2476 | |
2477 | extern enum processor_type ix86_tune; |
2478 | extern enum processor_type ix86_arch; |
2479 | |
2480 | /* Size of the RED_ZONE area. */ |
2481 | #define RED_ZONE_SIZE 128 |
2482 | /* Reserved area of the red zone for temporaries. */ |
2483 | #define RED_ZONE_RESERVE 8 |
2484 | |
2485 | extern unsigned int ix86_preferred_stack_boundary; |
2486 | extern unsigned int ix86_incoming_stack_boundary; |
2487 | |
2488 | /* Smallest class containing REGNO. */ |
2489 | extern enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER]; |
2490 | |
2491 | enum ix86_fpcmp_strategy { |
2492 | IX86_FPCMP_SAHF, |
2493 | IX86_FPCMP_COMI, |
2494 | IX86_FPCMP_ARITH |
2495 | }; |
2496 | |
2497 | /* To properly truncate FP values into integers, we need to set i387 control |
2498 | word. We can't emit proper mode switching code before reload, as spills |
2499 | generated by reload may truncate values incorrectly, but we still can avoid |
2500 | redundant computation of new control word by the mode switching pass. |
2501 | The fldcw instructions are still emitted redundantly, but this is probably |
2502 | not going to be noticeable problem, as most CPUs do have fast path for |
2503 | the sequence. |
2504 | |
2505 | The machinery is to emit simple truncation instructions and split them |
2506 | before reload to instructions having USEs of two memory locations that |
2507 | are filled by this code to old and new control word. |
2508 | |
2509 | Post-reload pass may be later used to eliminate the redundant fildcw if |
2510 | needed. */ |
2511 | |
2512 | enum ix86_stack_slot |
2513 | { |
2514 | SLOT_TEMP = 0, |
2515 | SLOT_CW_STORED, |
2516 | SLOT_CW_ROUNDEVEN, |
2517 | SLOT_CW_TRUNC, |
2518 | SLOT_CW_FLOOR, |
2519 | SLOT_CW_CEIL, |
2520 | SLOT_STV_TEMP, |
2521 | SLOT_FLOATxFDI_387, |
2522 | MAX_386_STACK_LOCALS |
2523 | }; |
2524 | |
2525 | enum ix86_entity |
2526 | { |
2527 | X86_DIRFLAG = 0, |
2528 | AVX_U128, |
2529 | I387_ROUNDEVEN, |
2530 | I387_TRUNC, |
2531 | I387_FLOOR, |
2532 | I387_CEIL, |
2533 | MAX_386_ENTITIES |
2534 | }; |
2535 | |
2536 | enum x86_dirflag_state |
2537 | { |
2538 | X86_DIRFLAG_RESET, |
2539 | X86_DIRFLAG_ANY |
2540 | }; |
2541 | |
2542 | enum avx_u128_state |
2543 | { |
2544 | AVX_U128_CLEAN, |
2545 | AVX_U128_DIRTY, |
2546 | AVX_U128_ANY |
2547 | }; |
2548 | |
2549 | /* Define this macro if the port needs extra instructions inserted |
2550 | for mode switching in an optimizing compilation. */ |
2551 | |
2552 | #define OPTIMIZE_MODE_SWITCHING(ENTITY) \ |
2553 | ix86_optimize_mode_switching[(ENTITY)] |
2554 | |
2555 | /* If you define `OPTIMIZE_MODE_SWITCHING', you have to define this as |
2556 | initializer for an array of integers. Each initializer element N |
2557 | refers to an entity that needs mode switching, and specifies the |
2558 | number of different modes that might need to be set for this |
2559 | entity. The position of the initializer in the initializer - |
2560 | starting counting at zero - determines the integer that is used to |
2561 | refer to the mode-switched entity in question. */ |
2562 | |
2563 | #define NUM_MODES_FOR_MODE_SWITCHING \ |
2564 | { X86_DIRFLAG_ANY, AVX_U128_ANY, \ |
2565 | I387_CW_ANY, I387_CW_ANY, I387_CW_ANY, I387_CW_ANY } |
2566 | |
2567 | |
2568 | /* Avoid renaming of stack registers, as doing so in combination with |
2569 | scheduling just increases amount of live registers at time and in |
2570 | the turn amount of fxch instructions needed. |
2571 | |
2572 | ??? Maybe Pentium chips benefits from renaming, someone can try.... |
2573 | |
2574 | Don't rename evex to non-evex sse registers. */ |
2575 | |
2576 | #define HARD_REGNO_RENAME_OK(SRC, TARGET) \ |
2577 | (!STACK_REGNO_P (SRC) \ |
2578 | && EXT_REX_SSE_REGNO_P (SRC) == EXT_REX_SSE_REGNO_P (TARGET)) |
2579 | |
2580 | |
2581 | #define FASTCALL_PREFIX '@' |
2582 | |
2583 | #ifndef USED_FOR_TARGET |
2584 | /* Structure describing stack frame layout. |
2585 | Stack grows downward: |
2586 | |
2587 | [arguments] |
2588 | <- ARG_POINTER |
2589 | saved pc |
2590 | |
2591 | saved static chain if ix86_static_chain_on_stack |
2592 | |
2593 | saved frame pointer if frame_pointer_needed |
2594 | <- HARD_FRAME_POINTER |
2595 | [saved regs] |
2596 | <- reg_save_offset |
2597 | [padding0] |
2598 | <- stack_realign_offset |
2599 | [saved SSE regs] |
2600 | OR |
2601 | [stub-saved registers for ms x64 --> sysv clobbers |
2602 | <- Start of out-of-line, stub-saved/restored regs |
2603 | (see libgcc/config/i386/(sav|res)ms64*.S) |
2604 | [XMM6-15] |
2605 | [RSI] |
2606 | [RDI] |
2607 | [?RBX] only if RBX is clobbered |
2608 | [?RBP] only if RBP and RBX are clobbered |
2609 | [?R12] only if R12 and all previous regs are clobbered |
2610 | [?R13] only if R13 and all previous regs are clobbered |
2611 | [?R14] only if R14 and all previous regs are clobbered |
2612 | [?R15] only if R15 and all previous regs are clobbered |
2613 | <- end of stub-saved/restored regs |
2614 | [padding1] |
2615 | ] |
2616 | <- sse_reg_save_offset |
2617 | [padding2] |
2618 | | <- FRAME_POINTER |
2619 | [va_arg registers] | |
2620 | | |
2621 | [frame] | |
2622 | | |
2623 | [padding2] | = to_allocate |
2624 | <- STACK_POINTER |
2625 | */ |
2626 | struct GTY(()) ix86_frame |
2627 | { |
2628 | int nsseregs; |
2629 | int nregs; |
2630 | int va_arg_size; |
2631 | int red_zone_size; |
2632 | int outgoing_arguments_size; |
2633 | |
2634 | /* The offsets relative to ARG_POINTER. */ |
2635 | HOST_WIDE_INT frame_pointer_offset; |
2636 | HOST_WIDE_INT hard_frame_pointer_offset; |
2637 | HOST_WIDE_INT stack_pointer_offset; |
2638 | HOST_WIDE_INT hfp_save_offset; |
2639 | HOST_WIDE_INT reg_save_offset; |
2640 | HOST_WIDE_INT stack_realign_allocate; |
2641 | HOST_WIDE_INT stack_realign_offset; |
2642 | HOST_WIDE_INT sse_reg_save_offset; |
2643 | |
2644 | /* When save_regs_using_mov is set, emit prologue using |
2645 | move instead of push instructions. */ |
2646 | bool save_regs_using_mov; |
2647 | |
2648 | /* Assume without checking that: |
2649 | EXPENSIVE_P = expensive_function_p (EXPENSIVE_COUNT). */ |
2650 | bool expensive_p; |
2651 | int expensive_count; |
2652 | }; |
2653 | |
2654 | /* Machine specific frame tracking during prologue/epilogue generation. All |
2655 | values are positive, but since the x86 stack grows downward, are subtratced |
2656 | from the CFA to produce a valid address. */ |
2657 | |
2658 | struct GTY(()) machine_frame_state |
2659 | { |
2660 | /* This pair tracks the currently active CFA as reg+offset. When reg |
2661 | is drap_reg, we don't bother trying to record here the real CFA when |
2662 | it might really be a DW_CFA_def_cfa_expression. */ |
2663 | rtx cfa_reg; |
2664 | HOST_WIDE_INT cfa_offset; |
2665 | |
2666 | /* The current offset (canonically from the CFA) of ESP and EBP. |
2667 | When stack frame re-alignment is active, these may not be relative |
2668 | to the CFA. However, in all cases they are relative to the offsets |
2669 | of the saved registers stored in ix86_frame. */ |
2670 | HOST_WIDE_INT sp_offset; |
2671 | HOST_WIDE_INT fp_offset; |
2672 | |
2673 | /* The size of the red-zone that may be assumed for the purposes of |
2674 | eliding register restore notes in the epilogue. This may be zero |
2675 | if no red-zone is in effect, or may be reduced from the real |
2676 | red-zone value by a maximum runtime stack re-alignment value. */ |
2677 | int red_zone_offset; |
2678 | |
2679 | /* Indicate whether each of ESP, EBP or DRAP currently holds a valid |
2680 | value within the frame. If false then the offset above should be |
2681 | ignored. Note that DRAP, if valid, *always* points to the CFA and |
2682 | thus has an offset of zero. */ |
2683 | BOOL_BITFIELD sp_valid : 1; |
2684 | BOOL_BITFIELD fp_valid : 1; |
2685 | BOOL_BITFIELD drap_valid : 1; |
2686 | |
2687 | /* Indicate whether the local stack frame has been re-aligned. When |
2688 | set, the SP/FP offsets above are relative to the aligned frame |
2689 | and not the CFA. */ |
2690 | BOOL_BITFIELD realigned : 1; |
2691 | |
2692 | /* Indicates whether the stack pointer has been re-aligned. When set, |
2693 | SP/FP continue to be relative to the CFA, but the stack pointer |
2694 | should only be used for offsets > sp_realigned_offset, while |
2695 | the frame pointer should be used for offsets <= sp_realigned_fp_last. |
2696 | The flags realigned and sp_realigned are mutually exclusive. */ |
2697 | BOOL_BITFIELD sp_realigned : 1; |
2698 | |
2699 | /* If sp_realigned is set, this is the last valid offset from the CFA |
2700 | that can be used for access with the frame pointer. */ |
2701 | HOST_WIDE_INT sp_realigned_fp_last; |
2702 | |
2703 | /* If sp_realigned is set, this is the offset from the CFA that the stack |
2704 | pointer was realigned, and may or may not be equal to sp_realigned_fp_last. |
2705 | Access via the stack pointer is only valid for offsets that are greater than |
2706 | this value. */ |
2707 | HOST_WIDE_INT sp_realigned_offset; |
2708 | }; |
2709 | |
2710 | /* Private to winnt.cc. */ |
2711 | struct seh_frame_state; |
2712 | |
2713 | enum function_type |
2714 | { |
2715 | TYPE_UNKNOWN = 0, |
2716 | TYPE_NORMAL, |
2717 | /* The current function is an interrupt service routine with a |
2718 | pointer argument as specified by the "interrupt" attribute. */ |
2719 | TYPE_INTERRUPT, |
2720 | /* The current function is an interrupt service routine with a |
2721 | pointer argument and an integer argument as specified by the |
2722 | "interrupt" attribute. */ |
2723 | TYPE_EXCEPTION |
2724 | }; |
2725 | |
2726 | enum queued_insn_type |
2727 | { |
2728 | TYPE_NONE = 0, |
2729 | TYPE_ENDBR, |
2730 | TYPE_PATCHABLE_AREA |
2731 | }; |
2732 | |
2733 | struct GTY(()) machine_function { |
2734 | struct stack_local_entry *stack_locals; |
2735 | int varargs_gpr_size; |
2736 | int varargs_fpr_size; |
2737 | int optimize_mode_switching[MAX_386_ENTITIES]; |
2738 | |
2739 | /* Cached initial frame layout for the current function. */ |
2740 | struct ix86_frame frame; |
2741 | |
2742 | /* For -fsplit-stack support: A stack local which holds a pointer to |
2743 | the stack arguments for a function with a variable number of |
2744 | arguments. This is set at the start of the function and is used |
2745 | to initialize the overflow_arg_area field of the va_list |
2746 | structure. */ |
2747 | rtx split_stack_varargs_pointer; |
2748 | |
2749 | /* This value is used for amd64 targets and specifies the current abi |
2750 | to be used. MS_ABI means ms abi. Otherwise SYSV_ABI means sysv abi. */ |
2751 | ENUM_BITFIELD(calling_abi) call_abi : 8; |
2752 | |
2753 | /* Nonzero if the function accesses a previous frame. */ |
2754 | BOOL_BITFIELD accesses_prev_frame : 1; |
2755 | |
2756 | /* Set by ix86_compute_frame_layout and used by prologue/epilogue |
2757 | expander to determine the style used. */ |
2758 | BOOL_BITFIELD use_fast_prologue_epilogue : 1; |
2759 | |
2760 | /* Nonzero if the current function calls pc thunk and |
2761 | must not use the red zone. */ |
2762 | BOOL_BITFIELD pc_thunk_call_expanded : 1; |
2763 | |
2764 | /* If true, the current function needs the default PIC register, not |
2765 | an alternate register (on x86) and must not use the red zone (on |
2766 | x86_64), even if it's a leaf function. We don't want the |
2767 | function to be regarded as non-leaf because TLS calls need not |
2768 | affect register allocation. This flag is set when a TLS call |
2769 | instruction is expanded within a function, and never reset, even |
2770 | if all such instructions are optimized away. Use the |
2771 | ix86_current_function_calls_tls_descriptor macro for a better |
2772 | approximation. */ |
2773 | BOOL_BITFIELD tls_descriptor_call_expanded_p : 1; |
2774 | |
2775 | /* If true, the current function has a STATIC_CHAIN is placed on the |
2776 | stack below the return address. */ |
2777 | BOOL_BITFIELD static_chain_on_stack : 1; |
2778 | |
2779 | /* If true, it is safe to not save/restore DRAP register. */ |
2780 | BOOL_BITFIELD no_drap_save_restore : 1; |
2781 | |
2782 | /* Function type. */ |
2783 | ENUM_BITFIELD(function_type) func_type : 2; |
2784 | |
2785 | /* How to generate indirec branch. */ |
2786 | ENUM_BITFIELD(indirect_branch) indirect_branch_type : 3; |
2787 | |
2788 | /* If true, the current function has local indirect jumps, like |
2789 | "indirect_jump" or "tablejump". */ |
2790 | BOOL_BITFIELD has_local_indirect_jump : 1; |
2791 | |
2792 | /* How to generate function return. */ |
2793 | ENUM_BITFIELD(indirect_branch) function_return_type : 3; |
2794 | |
2795 | /* If true, the current function is a function specified with |
2796 | the "interrupt" or "no_caller_saved_registers" attribute. */ |
2797 | BOOL_BITFIELD no_caller_saved_registers : 1; |
2798 | |
2799 | /* If true, there is register available for argument passing. This |
2800 | is used only in ix86_function_ok_for_sibcall by 32-bit to determine |
2801 | if there is scratch register available for indirect sibcall. In |
2802 | 64-bit, rax, r10 and r11 are scratch registers which aren't used to |
2803 | pass arguments and can be used for indirect sibcall. */ |
2804 | BOOL_BITFIELD arg_reg_available : 1; |
2805 | |
2806 | /* If true, we're out-of-lining reg save/restore for regs clobbered |
2807 | by 64-bit ms_abi functions calling a sysv_abi function. */ |
2808 | BOOL_BITFIELD call_ms2sysv : 1; |
2809 | |
2810 | /* If true, the incoming 16-byte aligned stack has an offset (of 8) and |
2811 | needs padding prior to out-of-line stub save/restore area. */ |
2812 | BOOL_BITFIELD call_ms2sysv_pad_in : 1; |
2813 | |
2814 | /* This is the number of extra registers saved by stub (valid range is |
2815 | 0-6). Each additional register is only saved/restored by the stubs |
2816 | if all successive ones are. (Will always be zero when using a hard |
2817 | frame pointer.) */ |
2818 | unsigned int :3; |
2819 | |
2820 | /* Nonzero if the function places outgoing arguments on stack. */ |
2821 | BOOL_BITFIELD outgoing_args_on_stack : 1; |
2822 | |
2823 | /* If true, ENDBR or patchable area is queued at function entrance. */ |
2824 | ENUM_BITFIELD(queued_insn_type) insn_queued_at_entrance : 2; |
2825 | |
2826 | /* If true, the function label has been emitted. */ |
2827 | BOOL_BITFIELD function_label_emitted : 1; |
2828 | |
2829 | /* True if the function needs a stack frame. */ |
2830 | BOOL_BITFIELD stack_frame_required : 1; |
2831 | |
2832 | /* True if we should act silently, rather than raise an error for |
2833 | invalid calls. */ |
2834 | BOOL_BITFIELD silent_p : 1; |
2835 | |
2836 | /* True if red zone is used. */ |
2837 | BOOL_BITFIELD red_zone_used : 1; |
2838 | |
2839 | /* The largest alignment, in bytes, of stack slot actually used. */ |
2840 | unsigned int max_used_stack_alignment; |
2841 | |
2842 | /* During prologue/epilogue generation, the current frame state. |
2843 | Otherwise, the frame state at the end of the prologue. */ |
2844 | struct machine_frame_state fs; |
2845 | |
2846 | /* During SEH output, this is non-null. */ |
2847 | struct seh_frame_state * GTY((skip("" ))) seh; |
2848 | }; |
2849 | |
2850 | extern GTY(()) tree sysv_va_list_type_node; |
2851 | extern GTY(()) tree ms_va_list_type_node; |
2852 | #endif |
2853 | |
2854 | #define ix86_stack_locals (cfun->machine->stack_locals) |
2855 | #define ix86_varargs_gpr_size (cfun->machine->varargs_gpr_size) |
2856 | #define ix86_varargs_fpr_size (cfun->machine->varargs_fpr_size) |
2857 | #define ix86_optimize_mode_switching (cfun->machine->optimize_mode_switching) |
2858 | #define ix86_pc_thunk_call_expanded (cfun->machine->pc_thunk_call_expanded) |
2859 | #define ix86_tls_descriptor_calls_expanded_in_cfun \ |
2860 | (cfun->machine->tls_descriptor_call_expanded_p) |
2861 | /* Since tls_descriptor_call_expanded is not cleared, even if all TLS |
2862 | calls are optimized away, we try to detect cases in which it was |
2863 | optimized away. Since such instructions (use (reg REG_SP)), we can |
2864 | verify whether there's any such instruction live by testing that |
2865 | REG_SP is live. */ |
2866 | #define ix86_current_function_calls_tls_descriptor \ |
2867 | (ix86_tls_descriptor_calls_expanded_in_cfun && df_regs_ever_live_p (SP_REG)) |
2868 | #define ix86_static_chain_on_stack (cfun->machine->static_chain_on_stack) |
2869 | #define ix86_red_zone_used (cfun->machine->red_zone_used) |
2870 | |
2871 | /* Control behavior of x86_file_start. */ |
2872 | #define X86_FILE_START_VERSION_DIRECTIVE false |
2873 | #define X86_FILE_START_FLTUSED false |
2874 | |
2875 | /* Flag to mark data that is in the large address area. */ |
2876 | #define SYMBOL_FLAG_FAR_ADDR (SYMBOL_FLAG_MACH_DEP << 0) |
2877 | #define SYMBOL_REF_FAR_ADDR_P(X) \ |
2878 | ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_FAR_ADDR) != 0) |
2879 | |
2880 | /* Flags to mark dllimport/dllexport. Used by PE ports, but handy to |
2881 | have defined always, to avoid ifdefing. */ |
2882 | #define SYMBOL_FLAG_DLLIMPORT (SYMBOL_FLAG_MACH_DEP << 1) |
2883 | #define SYMBOL_REF_DLLIMPORT_P(X) \ |
2884 | ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLIMPORT) != 0) |
2885 | |
2886 | #define SYMBOL_FLAG_DLLEXPORT (SYMBOL_FLAG_MACH_DEP << 2) |
2887 | #define SYMBOL_REF_DLLEXPORT_P(X) \ |
2888 | ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLEXPORT) != 0) |
2889 | |
2890 | #define SYMBOL_FLAG_STUBVAR (SYMBOL_FLAG_MACH_DEP << 4) |
2891 | #define SYMBOL_REF_STUBVAR_P(X) \ |
2892 | ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_STUBVAR) != 0) |
2893 | |
2894 | extern void debug_ready_dispatch (void); |
2895 | extern void debug_dispatch_window (int); |
2896 | |
2897 | /* The value at zero is only defined for the BMI instructions |
2898 | LZCNT and TZCNT, not the BSR/BSF insns in the original isa. */ |
2899 | #define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ |
2900 | ((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_BMI ? 2 : 0) |
2901 | #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ |
2902 | ((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_LZCNT ? 2 : 0) |
2903 | |
2904 | |
2905 | /* Flags returned by ix86_get_callcvt (). */ |
2906 | #define IX86_CALLCVT_CDECL 0x1 |
2907 | #define IX86_CALLCVT_STDCALL 0x2 |
2908 | #define IX86_CALLCVT_FASTCALL 0x4 |
2909 | #define IX86_CALLCVT_THISCALL 0x8 |
2910 | #define IX86_CALLCVT_REGPARM 0x10 |
2911 | #define IX86_CALLCVT_SSEREGPARM 0x20 |
2912 | |
2913 | #define IX86_BASE_CALLCVT(FLAGS) \ |
2914 | ((FLAGS) & (IX86_CALLCVT_CDECL | IX86_CALLCVT_STDCALL \ |
2915 | | IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) |
2916 | |
2917 | #define RECIP_MASK_NONE 0x00 |
2918 | #define RECIP_MASK_DIV 0x01 |
2919 | #define RECIP_MASK_SQRT 0x02 |
2920 | #define RECIP_MASK_VEC_DIV 0x04 |
2921 | #define RECIP_MASK_VEC_SQRT 0x08 |
2922 | #define RECIP_MASK_ALL (RECIP_MASK_DIV | RECIP_MASK_SQRT \ |
2923 | | RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT) |
2924 | #define RECIP_MASK_DEFAULT (RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT) |
2925 | |
2926 | #define TARGET_RECIP_DIV ((recip_mask & RECIP_MASK_DIV) != 0) |
2927 | #define TARGET_RECIP_SQRT ((recip_mask & RECIP_MASK_SQRT) != 0) |
2928 | #define TARGET_RECIP_VEC_DIV ((recip_mask & RECIP_MASK_VEC_DIV) != 0) |
2929 | #define TARGET_RECIP_VEC_SQRT ((recip_mask & RECIP_MASK_VEC_SQRT) != 0) |
2930 | |
2931 | /* Use 128-bit AVX instructions in the auto-vectorizer. */ |
2932 | #define TARGET_PREFER_AVX128 (prefer_vector_width_type == PVW_AVX128) |
2933 | /* Use 256-bit AVX instructions in the auto-vectorizer. */ |
2934 | #define TARGET_PREFER_AVX256 (TARGET_PREFER_AVX128 \ |
2935 | || prefer_vector_width_type == PVW_AVX256) |
2936 | |
2937 | #define TARGET_INDIRECT_BRANCH_REGISTER \ |
2938 | (ix86_indirect_branch_register \ |
2939 | || cfun->machine->indirect_branch_type != indirect_branch_keep) |
2940 | |
2941 | #define IX86_HLE_ACQUIRE (1 << 16) |
2942 | #define IX86_HLE_RELEASE (1 << 17) |
2943 | |
2944 | /* For switching between functions with different target attributes. */ |
2945 | #define SWITCHABLE_TARGET 1 |
2946 | |
2947 | #define TARGET_SUPPORTS_WIDE_INT 1 |
2948 | |
2949 | #if !defined(GENERATOR_FILE) && !defined(IN_LIBGCC2) |
2950 | extern enum attr_cpu ix86_schedule; |
2951 | |
2952 | #define NUM_X86_64_MS_CLOBBERED_REGS 12 |
2953 | #endif |
2954 | |
2955 | /* __builtin_eh_return can't handle stack realignment, so disable MMX/SSE |
2956 | in 32-bit libgcc functions that call it. */ |
2957 | #ifndef __x86_64__ |
2958 | #define LIBGCC2_UNWIND_ATTRIBUTE __attribute__((target ("no-mmx,no-sse"))) |
2959 | #endif |
2960 | |
2961 | /* |
2962 | Local variables: |
2963 | version-control: t |
2964 | End: |
2965 | */ |
2966 | |