| 1 | /* Definitions of target machine for GCC for IA-32. |
| 2 | Copyright (C) 1988-2025 Free Software Foundation, Inc. |
| 3 | |
| 4 | This file is part of GCC. |
| 5 | |
| 6 | GCC is free software; you can redistribute it and/or modify |
| 7 | it under the terms of the GNU General Public License as published by |
| 8 | the Free Software Foundation; either version 3, or (at your option) |
| 9 | any later version. |
| 10 | |
| 11 | GCC is distributed in the hope that it will be useful, |
| 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | GNU General Public License for more details. |
| 15 | |
| 16 | Under Section 7 of GPL version 3, you are granted additional |
| 17 | permissions described in the GCC Runtime Library Exception, version |
| 18 | 3.1, as published by the Free Software Foundation. |
| 19 | |
| 20 | You should have received a copy of the GNU General Public License and |
| 21 | a copy of the GCC Runtime Library Exception along with this program; |
| 22 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
| 23 | <http://www.gnu.org/licenses/>. */ |
| 24 | |
| 25 | /* The purpose of this file is to define the characteristics of the i386, |
| 26 | independent of assembler syntax or operating system. |
| 27 | |
| 28 | Three other files build on this one to describe a specific assembler syntax: |
| 29 | bsd386.h, att386.h, and sun386.h. |
| 30 | |
| 31 | The actual tm.h file for a particular system should include |
| 32 | this file, and then the file for the appropriate assembler syntax. |
| 33 | |
| 34 | Many macros that specify assembler syntax are omitted entirely from |
| 35 | this file because they really belong in the files for particular |
| 36 | assemblers. These include RP, IP, LPREFIX, PUT_OP_SIZE, USE_STAR, |
| 37 | ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many |
| 38 | that start with ASM_ or end in ASM_OP. */ |
| 39 | |
| 40 | /* Redefines for option macros. */ |
| 41 | |
| 42 | #define TARGET_CMPXCHG16B TARGET_CX16 |
| 43 | #define TARGET_CMPXCHG16B_P(x) TARGET_CX16_P(x) |
| 44 | |
| 45 | #define TARGET_LP64 TARGET_ABI_64 |
| 46 | #define TARGET_LP64_P(x) TARGET_ABI_64_P(x) |
| 47 | #define TARGET_X32 TARGET_ABI_X32 |
| 48 | #define TARGET_X32_P(x) TARGET_ABI_X32_P(x) |
| 49 | #define TARGET_16BIT TARGET_CODE16 |
| 50 | #define TARGET_16BIT_P(x) TARGET_CODE16_P(x) |
| 51 | |
| 52 | #define TARGET_MMX_WITH_SSE (TARGET_64BIT && TARGET_SSE2) |
| 53 | |
| 54 | #define TARGET_APX_EGPR (ix86_apx_features & apx_egpr) |
| 55 | #define TARGET_APX_PUSH2POP2 (ix86_apx_features & apx_push2pop2) |
| 56 | #define TARGET_APX_NDD (ix86_apx_features & apx_ndd) |
| 57 | #define TARGET_APX_PPX (ix86_apx_features & apx_ppx) |
| 58 | #define TARGET_APX_NF (ix86_apx_features & apx_nf) |
| 59 | #define TARGET_APX_CCMP (ix86_apx_features & apx_ccmp) |
| 60 | #define TARGET_APX_ZU (ix86_apx_features & apx_zu) |
| 61 | |
| 62 | #include "config/vxworks-dummy.h" |
| 63 | |
| 64 | #include "config/i386/i386-opts.h" |
| 65 | |
| 66 | #define MAX_STRINGOP_ALGS 4 |
| 67 | |
| 68 | /* Specify what algorithm to use for stringops on known size. |
| 69 | When size is unknown, the UNKNOWN_SIZE alg is used. When size is |
| 70 | known at compile time or estimated via feedback, the SIZE array |
| 71 | is walked in order until MAX is greater then the estimate (or -1 |
| 72 | means infinity). Corresponding ALG is used then. |
| 73 | When NOALIGN is true the code guaranting the alignment of the memory |
| 74 | block is skipped. |
| 75 | |
| 76 | For example initializer: |
| 77 | {{256, loop}, {-1, rep_prefix_4_byte}} |
| 78 | will use loop for blocks smaller or equal to 256 bytes, rep prefix will |
| 79 | be used otherwise. */ |
| 80 | struct stringop_algs |
| 81 | { |
| 82 | const enum stringop_alg unknown_size; |
| 83 | const struct stringop_strategy { |
| 84 | /* Several older compilers delete the default constructor because of the |
| 85 | const entries (see PR100246). Manually specifying a CTOR works around |
| 86 | this issue. Since this header is used by code compiled with the C |
| 87 | compiler we must guard the addition. */ |
| 88 | #ifdef __cplusplus |
| 89 | constexpr |
| 90 | stringop_strategy (int _max = -1, enum stringop_alg _alg = libcall, |
| 91 | int _noalign = false) |
| 92 | : max (_max), alg (_alg), noalign (_noalign) {} |
| 93 | #endif |
| 94 | const int max; |
| 95 | const enum stringop_alg alg; |
| 96 | int noalign; |
| 97 | } size [MAX_STRINGOP_ALGS]; |
| 98 | }; |
| 99 | |
| 100 | /* Analog of COSTS_N_INSNS when optimizing for size. */ |
| 101 | #ifndef COSTS_N_BYTES |
| 102 | #define COSTS_N_BYTES(N) ((N) * 2) |
| 103 | #endif |
| 104 | |
| 105 | /* Define the specific costs for a given cpu. NB: hard_register is used |
| 106 | by TARGET_REGISTER_MOVE_COST and TARGET_MEMORY_MOVE_COST to compute |
| 107 | hard register move costs by register allocator. Relative costs of |
| 108 | pseudo register load and store versus pseudo register moves in RTL |
| 109 | expressions for TARGET_RTX_COSTS can be different from relative |
| 110 | costs of hard registers to get the most efficient operations with |
| 111 | pseudo registers. */ |
| 112 | |
| 113 | struct processor_costs { |
| 114 | /* Costs used by register allocator. integer->integer register move |
| 115 | cost is 2. */ |
| 116 | struct |
| 117 | { |
| 118 | const int movzbl_load; /* cost of loading using movzbl */ |
| 119 | const int int_load[3]; /* cost of loading integer registers |
| 120 | in QImode, HImode and SImode relative |
| 121 | to reg-reg move (2). */ |
| 122 | const int int_store[3]; /* cost of storing integer register |
| 123 | in QImode, HImode and SImode */ |
| 124 | const int fp_move; /* cost of reg,reg fld/fst */ |
| 125 | const int fp_load[3]; /* cost of loading FP register |
| 126 | in SFmode, DFmode and XFmode */ |
| 127 | const int fp_store[3]; /* cost of storing FP register |
| 128 | in SFmode, DFmode and XFmode */ |
| 129 | const int mmx_move; /* cost of moving MMX register. */ |
| 130 | const int mmx_load[2]; /* cost of loading MMX register |
| 131 | in SImode and DImode */ |
| 132 | const int mmx_store[2]; /* cost of storing MMX register |
| 133 | in SImode and DImode */ |
| 134 | const int xmm_move; /* cost of moving XMM register. */ |
| 135 | const int ymm_move; /* cost of moving XMM register. */ |
| 136 | const int zmm_move; /* cost of moving XMM register. */ |
| 137 | const int sse_load[5]; /* cost of loading SSE register |
| 138 | in 32bit, 64bit, 128bit, 256bit and 512bit */ |
| 139 | const int sse_store[5]; /* cost of storing SSE register |
| 140 | in SImode, DImode and TImode. */ |
| 141 | const int sse_to_integer; /* cost of moving SSE register to integer. */ |
| 142 | const int integer_to_sse; /* cost of moving integer register to SSE. */ |
| 143 | const int mask_to_integer; /* cost of moving mask register to integer. */ |
| 144 | const int integer_to_mask; /* cost of moving integer register to mask. */ |
| 145 | const int mask_load[3]; /* cost of loading mask registers |
| 146 | in QImode, HImode and SImode. */ |
| 147 | const int mask_store[3]; /* cost of storing mask register |
| 148 | in QImode, HImode and SImode. */ |
| 149 | const int mask_move; /* cost of moving mask register. */ |
| 150 | } hard_register; |
| 151 | |
| 152 | const int add; /* cost of an add instruction */ |
| 153 | const int lea; /* cost of a lea instruction */ |
| 154 | const int shift_var; /* variable shift costs */ |
| 155 | const int shift_const; /* constant shift costs */ |
| 156 | const int mult_init[5]; /* cost of starting a multiply |
| 157 | in QImode, HImode, SImode, DImode, TImode*/ |
| 158 | const int mult_bit; /* cost of multiply per each bit set */ |
| 159 | const int divide[5]; /* cost of a divide/mod |
| 160 | in QImode, HImode, SImode, DImode, TImode*/ |
| 161 | int movsx; /* The cost of movsx operation. */ |
| 162 | int movzx; /* The cost of movzx operation. */ |
| 163 | const int large_insn; /* insns larger than this cost more */ |
| 164 | const int move_ratio; /* The threshold of number of scalar |
| 165 | memory-to-memory move insns. */ |
| 166 | const int clear_ratio; /* The threshold of number of scalar |
| 167 | memory clearing insns. */ |
| 168 | const int int_load[3]; /* cost of loading integer registers |
| 169 | in QImode, HImode and SImode relative |
| 170 | to reg-reg move (2). */ |
| 171 | const int int_store[3]; /* cost of storing integer register |
| 172 | in QImode, HImode and SImode */ |
| 173 | const int sse_load[5]; /* cost of loading SSE register |
| 174 | in 32bit, 64bit, 128bit, 256bit and 512bit */ |
| 175 | const int sse_store[5]; /* cost of storing SSE register |
| 176 | in 32bit, 64bit, 128bit, 256bit and 512bit */ |
| 177 | const int sse_unaligned_load[5];/* cost of unaligned load. */ |
| 178 | const int sse_unaligned_store[5];/* cost of unaligned store. */ |
| 179 | const int xmm_move, ymm_move, /* cost of moving XMM and YMM register. */ |
| 180 | zmm_move; |
| 181 | const int sse_to_integer; /* cost of moving SSE register to integer. */ |
| 182 | const int integer_to_sse; /* cost of moving integer register to SSE. */ |
| 183 | const int gather_static, gather_per_elt; /* Cost of gather load is computed |
| 184 | as static + per_item * nelts. */ |
| 185 | const int scatter_static, scatter_per_elt; /* Cost of gather store is |
| 186 | computed as static + per_item * nelts. */ |
| 187 | const int l1_cache_size; /* size of l1 cache, in kilobytes. */ |
| 188 | const int l2_cache_size; /* size of l2 cache, in kilobytes. */ |
| 189 | const int prefetch_block; /* bytes moved to cache for prefetch. */ |
| 190 | const int simultaneous_prefetches; /* number of parallel prefetch |
| 191 | operations. */ |
| 192 | const int branch_cost; /* Default value for BRANCH_COST. */ |
| 193 | const int fadd; /* cost of FADD and FSUB instructions. */ |
| 194 | const int fmul; /* cost of FMUL instruction. */ |
| 195 | const int fdiv; /* cost of FDIV instruction. */ |
| 196 | const int fabs; /* cost of FABS instruction. */ |
| 197 | const int fchs; /* cost of FCHS instruction. */ |
| 198 | const int fsqrt; /* cost of FSQRT instruction. */ |
| 199 | /* Specify what algorithm |
| 200 | to use for stringops on unknown size. */ |
| 201 | const int sse_op; /* cost of cheap SSE instruction. */ |
| 202 | const int addss; /* cost of ADDSS/SD SUBSS/SD instructions. */ |
| 203 | const int mulss; /* cost of MULSS instructions. */ |
| 204 | const int mulsd; /* cost of MULSD instructions. */ |
| 205 | const int fmass; /* cost of FMASS instructions. */ |
| 206 | const int fmasd; /* cost of FMASD instructions. */ |
| 207 | const int divss; /* cost of DIVSS instructions. */ |
| 208 | const int divsd; /* cost of DIVSD instructions. */ |
| 209 | const int sqrtss; /* cost of SQRTSS instructions. */ |
| 210 | const int sqrtsd; /* cost of SQRTSD instructions. */ |
| 211 | const int cvtss2sd; /* cost SSE FP conversions, |
| 212 | such as CVTSS2SD. */ |
| 213 | const int vcvtps2pd256; /* cost 256bit packed FP conversions, |
| 214 | such as VCVTPD2PS with larger reg in ymm. */ |
| 215 | const int vcvtps2pd512; /* cost 512bit packed FP conversions, |
| 216 | such as VCVTPD2PS with larger reg in zmm. */ |
| 217 | const int cvtsi2ss; /* cost of CVTSI2SS instruction. */ |
| 218 | const int cvtss2si; /* cost of CVT(T)SS2SI instruction. */ |
| 219 | const int cvtpi2ps; /* cost of CVTPI2PS instruction. */ |
| 220 | const int cvtps2pi; /* cost of CVT(T)PS2PI instruction. */ |
| 221 | const int reassoc_int, reassoc_fp, reassoc_vec_int, reassoc_vec_fp; |
| 222 | /* Specify reassociation width for integer, |
| 223 | fp, vector integer and vector fp |
| 224 | operations. Generally should correspond |
| 225 | to number of instructions executed in |
| 226 | parallel. See also |
| 227 | ix86_reassociation_width. */ |
| 228 | struct stringop_algs *memcpy, *memset; |
| 229 | const int cond_taken_branch_cost; /* Cost of taken branch for vectorizer |
| 230 | cost model. */ |
| 231 | const int cond_not_taken_branch_cost;/* Cost of not taken branch for |
| 232 | vectorizer cost model. */ |
| 233 | |
| 234 | /* The "0:0:8" label alignment specified for some processors generates |
| 235 | secondary 8-byte alignment only for those label/jump/loop targets |
| 236 | which have primary alignment. */ |
| 237 | const char *const align_loop; /* Loop alignment. */ |
| 238 | const char *const align_jump; /* Jump alignment. */ |
| 239 | const char *const align_label; /* Label alignment. */ |
| 240 | const char *const align_func; /* Function alignment. */ |
| 241 | |
| 242 | const unsigned small_unroll_ninsns; /* Insn count limit for small loop |
| 243 | to be unrolled. */ |
| 244 | const unsigned small_unroll_factor; /* Unroll factor for small loop to |
| 245 | be unrolled. */ |
| 246 | const int br_mispredict_scale; /* Branch mispredict scale for ifcvt |
| 247 | threshold. */ |
| 248 | }; |
| 249 | |
| 250 | extern const struct processor_costs *ix86_cost; |
| 251 | extern const struct processor_costs ix86_size_cost; |
| 252 | |
| 253 | #define ix86_cur_cost() \ |
| 254 | (optimize_insn_for_size_p () ? &ix86_size_cost : ix86_cost) |
| 255 | |
| 256 | /* Macros used in the machine description to test the flags. */ |
| 257 | |
| 258 | /* configure can arrange to change it. */ |
| 259 | |
| 260 | #ifndef TARGET_CPU_DEFAULT |
| 261 | #define TARGET_CPU_DEFAULT PROCESSOR_GENERIC |
| 262 | #endif |
| 263 | |
| 264 | #ifndef TARGET_FPMATH_DEFAULT |
| 265 | #define TARGET_FPMATH_DEFAULT \ |
| 266 | (TARGET_64BIT && TARGET_SSE ? FPMATH_SSE : FPMATH_387) |
| 267 | #endif |
| 268 | |
| 269 | #ifndef TARGET_FPMATH_DEFAULT_P |
| 270 | #define TARGET_FPMATH_DEFAULT_P(x) \ |
| 271 | (TARGET_64BIT_P(x) && TARGET_SSE_P(x) ? FPMATH_SSE : FPMATH_387) |
| 272 | #endif |
| 273 | |
| 274 | /* If the i387 is disabled or -miamcu is used , then do not return |
| 275 | values in it. */ |
| 276 | #define TARGET_FLOAT_RETURNS_IN_80387 \ |
| 277 | (TARGET_FLOAT_RETURNS && TARGET_80387 && !TARGET_IAMCU) |
| 278 | #define TARGET_FLOAT_RETURNS_IN_80387_P(x) \ |
| 279 | (TARGET_FLOAT_RETURNS_P(x) && TARGET_80387_P(x) && !TARGET_IAMCU_P(x)) |
| 280 | |
| 281 | /* 64bit Sledgehammer mode. For libgcc2 we make sure this is a |
| 282 | compile-time constant. */ |
| 283 | #ifdef IN_LIBGCC2 |
| 284 | #undef TARGET_64BIT |
| 285 | #ifdef __x86_64__ |
| 286 | #define TARGET_64BIT 1 |
| 287 | #else |
| 288 | #define TARGET_64BIT 0 |
| 289 | #endif |
| 290 | #else |
| 291 | #ifndef TARGET_BI_ARCH |
| 292 | #undef TARGET_64BIT |
| 293 | #undef TARGET_64BIT_P |
| 294 | #if TARGET_64BIT_DEFAULT |
| 295 | #define TARGET_64BIT 1 |
| 296 | #define TARGET_64BIT_P(x) 1 |
| 297 | #else |
| 298 | #define TARGET_64BIT 0 |
| 299 | #define TARGET_64BIT_P(x) 0 |
| 300 | #endif |
| 301 | #endif |
| 302 | #endif |
| 303 | |
| 304 | #define HAS_LONG_COND_BRANCH 1 |
| 305 | #define HAS_LONG_UNCOND_BRANCH 1 |
| 306 | |
| 307 | #define TARGET_CPU_P(CPU) (ix86_tune == PROCESSOR_ ## CPU) |
| 308 | |
| 309 | /* Feature tests against the various tunings. */ |
| 310 | enum ix86_tune_indices { |
| 311 | #undef DEF_TUNE |
| 312 | #define DEF_TUNE(tune, name, selector) tune, |
| 313 | #include "x86-tune.def" |
| 314 | #undef DEF_TUNE |
| 315 | X86_TUNE_LAST |
| 316 | }; |
| 317 | |
| 318 | extern unsigned char ix86_tune_features[X86_TUNE_LAST]; |
| 319 | |
| 320 | #define TARGET_USE_LEAVE ix86_tune_features[X86_TUNE_USE_LEAVE] |
| 321 | #define TARGET_PUSH_MEMORY ix86_tune_features[X86_TUNE_PUSH_MEMORY] |
| 322 | #define TARGET_ZERO_EXTEND_WITH_AND \ |
| 323 | ix86_tune_features[X86_TUNE_ZERO_EXTEND_WITH_AND] |
| 324 | #define TARGET_UNROLL_STRLEN ix86_tune_features[X86_TUNE_UNROLL_STRLEN] |
| 325 | #define TARGET_BRANCH_PREDICTION_HINTS_NOT_TAKEN \ |
| 326 | ix86_tune_features[X86_TUNE_BRANCH_PREDICTION_HINTS_NOT_TAKEN] |
| 327 | #define TARGET_BRANCH_PREDICTION_HINTS_TAKEN \ |
| 328 | ix86_tune_features[X86_TUNE_BRANCH_PREDICTION_HINTS_TAKEN] |
| 329 | #define TARGET_DOUBLE_WITH_ADD ix86_tune_features[X86_TUNE_DOUBLE_WITH_ADD] |
| 330 | #define TARGET_USE_SAHF ix86_tune_features[X86_TUNE_USE_SAHF] |
| 331 | #define TARGET_MOVX ix86_tune_features[X86_TUNE_MOVX] |
| 332 | #define TARGET_PARTIAL_REG_STALL ix86_tune_features[X86_TUNE_PARTIAL_REG_STALL] |
| 333 | #define TARGET_PARTIAL_MEMORY_READ_STALL \ |
| 334 | ix86_tune_features[X86_TUNE_PARTIAL_MEMORY_READ_STALL] |
| 335 | #define TARGET_PARTIAL_FLAG_REG_STALL \ |
| 336 | ix86_tune_features[X86_TUNE_PARTIAL_FLAG_REG_STALL] |
| 337 | #define TARGET_LCP_STALL \ |
| 338 | ix86_tune_features[X86_TUNE_LCP_STALL] |
| 339 | #define TARGET_USE_HIMODE_FIOP ix86_tune_features[X86_TUNE_USE_HIMODE_FIOP] |
| 340 | #define TARGET_USE_SIMODE_FIOP ix86_tune_features[X86_TUNE_USE_SIMODE_FIOP] |
| 341 | #define TARGET_USE_MOV0 ix86_tune_features[X86_TUNE_USE_MOV0] |
| 342 | #define TARGET_USE_CLTD ix86_tune_features[X86_TUNE_USE_CLTD] |
| 343 | #define TARGET_USE_XCHGB ix86_tune_features[X86_TUNE_USE_XCHGB] |
| 344 | #define TARGET_SPLIT_LONG_MOVES ix86_tune_features[X86_TUNE_SPLIT_LONG_MOVES] |
| 345 | #define TARGET_READ_MODIFY_WRITE ix86_tune_features[X86_TUNE_READ_MODIFY_WRITE] |
| 346 | #define TARGET_READ_MODIFY ix86_tune_features[X86_TUNE_READ_MODIFY] |
| 347 | #define TARGET_PROMOTE_QImode ix86_tune_features[X86_TUNE_PROMOTE_QIMODE] |
| 348 | #define TARGET_FAST_PREFIX ix86_tune_features[X86_TUNE_FAST_PREFIX] |
| 349 | #define TARGET_SINGLE_STRINGOP ix86_tune_features[X86_TUNE_SINGLE_STRINGOP] |
| 350 | #define TARGET_PREFER_KNOWN_REP_MOVSB_STOSB \ |
| 351 | ix86_tune_features[X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB] |
| 352 | #define TARGET_MISALIGNED_MOVE_STRING_PRO_EPILOGUES \ |
| 353 | ix86_tune_features[X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES] |
| 354 | #define TARGET_QIMODE_MATH ix86_tune_features[X86_TUNE_QIMODE_MATH] |
| 355 | #define TARGET_HIMODE_MATH ix86_tune_features[X86_TUNE_HIMODE_MATH] |
| 356 | #define TARGET_PROMOTE_QI_REGS ix86_tune_features[X86_TUNE_PROMOTE_QI_REGS] |
| 357 | #define TARGET_PROMOTE_HI_REGS ix86_tune_features[X86_TUNE_PROMOTE_HI_REGS] |
| 358 | #define TARGET_SINGLE_POP ix86_tune_features[X86_TUNE_SINGLE_POP] |
| 359 | #define TARGET_DOUBLE_POP ix86_tune_features[X86_TUNE_DOUBLE_POP] |
| 360 | #define TARGET_SINGLE_PUSH ix86_tune_features[X86_TUNE_SINGLE_PUSH] |
| 361 | #define TARGET_DOUBLE_PUSH ix86_tune_features[X86_TUNE_DOUBLE_PUSH] |
| 362 | #define TARGET_INTEGER_DFMODE_MOVES \ |
| 363 | ix86_tune_features[X86_TUNE_INTEGER_DFMODE_MOVES] |
| 364 | #define TARGET_PARTIAL_REG_DEPENDENCY \ |
| 365 | ix86_tune_features[X86_TUNE_PARTIAL_REG_DEPENDENCY] |
| 366 | #define TARGET_SSE_PARTIAL_REG_DEPENDENCY \ |
| 367 | ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY] |
| 368 | #define TARGET_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY \ |
| 369 | ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY] |
| 370 | #define TARGET_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY \ |
| 371 | ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY] |
| 372 | #define TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \ |
| 373 | ix86_tune_features[X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL] |
| 374 | #define TARGET_SSE_UNALIGNED_STORE_OPTIMAL \ |
| 375 | ix86_tune_features[X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL] |
| 376 | #define TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL \ |
| 377 | ix86_tune_features[X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL] |
| 378 | #define TARGET_SSE_SPLIT_REGS ix86_tune_features[X86_TUNE_SSE_SPLIT_REGS] |
| 379 | #define TARGET_SSE_TYPELESS_STORES \ |
| 380 | ix86_tune_features[X86_TUNE_SSE_TYPELESS_STORES] |
| 381 | #define TARGET_SSE_LOAD0_BY_PXOR ix86_tune_features[X86_TUNE_SSE_LOAD0_BY_PXOR] |
| 382 | #define TARGET_MEMORY_MISMATCH_STALL \ |
| 383 | ix86_tune_features[X86_TUNE_MEMORY_MISMATCH_STALL] |
| 384 | #define TARGET_PROLOGUE_USING_MOVE \ |
| 385 | ix86_tune_features[X86_TUNE_PROLOGUE_USING_MOVE] |
| 386 | #define TARGET_EPILOGUE_USING_MOVE \ |
| 387 | ix86_tune_features[X86_TUNE_EPILOGUE_USING_MOVE] |
| 388 | #define TARGET_SHIFT1 ix86_tune_features[X86_TUNE_SHIFT1] |
| 389 | #define TARGET_USE_FFREEP ix86_tune_features[X86_TUNE_USE_FFREEP] |
| 390 | #define TARGET_INTER_UNIT_MOVES_TO_VEC \ |
| 391 | ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_TO_VEC] |
| 392 | #define TARGET_INTER_UNIT_MOVES_FROM_VEC \ |
| 393 | ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_FROM_VEC] |
| 394 | #define TARGET_INTER_UNIT_CONVERSIONS \ |
| 395 | ix86_tune_features[X86_TUNE_INTER_UNIT_CONVERSIONS] |
| 396 | #define TARGET_FOUR_JUMP_LIMIT ix86_tune_features[X86_TUNE_FOUR_JUMP_LIMIT] |
| 397 | #define TARGET_SCHEDULE ix86_tune_features[X86_TUNE_SCHEDULE] |
| 398 | #define TARGET_USE_BT ix86_tune_features[X86_TUNE_USE_BT] |
| 399 | #define TARGET_USE_INCDEC ix86_tune_features[X86_TUNE_USE_INCDEC] |
| 400 | #define TARGET_PAD_RETURNS ix86_tune_features[X86_TUNE_PAD_RETURNS] |
| 401 | #define TARGET_PAD_SHORT_FUNCTION \ |
| 402 | ix86_tune_features[X86_TUNE_PAD_SHORT_FUNCTION] |
| 403 | #define TARGET_EXT_80387_CONSTANTS \ |
| 404 | ix86_tune_features[X86_TUNE_EXT_80387_CONSTANTS] |
| 405 | #define TARGET_AVOID_VECTOR_DECODE \ |
| 406 | ix86_tune_features[X86_TUNE_AVOID_VECTOR_DECODE] |
| 407 | #define TARGET_TUNE_PROMOTE_HIMODE_IMUL \ |
| 408 | ix86_tune_features[X86_TUNE_PROMOTE_HIMODE_IMUL] |
| 409 | #define TARGET_SLOW_IMUL_IMM32_MEM \ |
| 410 | ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM32_MEM] |
| 411 | #define TARGET_SLOW_IMUL_IMM8 ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM8] |
| 412 | #define TARGET_MOVE_M1_VIA_OR ix86_tune_features[X86_TUNE_MOVE_M1_VIA_OR] |
| 413 | #define TARGET_NOT_UNPAIRABLE ix86_tune_features[X86_TUNE_NOT_UNPAIRABLE] |
| 414 | #define TARGET_NOT_VECTORMODE ix86_tune_features[X86_TUNE_NOT_VECTORMODE] |
| 415 | #define TARGET_USE_VECTOR_FP_CONVERTS \ |
| 416 | ix86_tune_features[X86_TUNE_USE_VECTOR_FP_CONVERTS] |
| 417 | #define TARGET_USE_VECTOR_CONVERTS \ |
| 418 | ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS] |
| 419 | #define TARGET_SLOW_PSHUFB \ |
| 420 | ix86_tune_features[X86_TUNE_SLOW_PSHUFB] |
| 421 | #define TARGET_AVOID_4BYTE_PREFIXES \ |
| 422 | ix86_tune_features[X86_TUNE_AVOID_4BYTE_PREFIXES] |
| 423 | #define TARGET_USE_GATHER_2PARTS \ |
| 424 | ix86_tune_features[X86_TUNE_USE_GATHER_2PARTS] |
| 425 | #define TARGET_USE_SCATTER_2PARTS \ |
| 426 | ix86_tune_features[X86_TUNE_USE_SCATTER_2PARTS] |
| 427 | #define TARGET_USE_GATHER_4PARTS \ |
| 428 | ix86_tune_features[X86_TUNE_USE_GATHER_4PARTS] |
| 429 | #define TARGET_USE_SCATTER_4PARTS \ |
| 430 | ix86_tune_features[X86_TUNE_USE_SCATTER_4PARTS] |
| 431 | #define TARGET_USE_GATHER_8PARTS \ |
| 432 | ix86_tune_features[X86_TUNE_USE_GATHER_8PARTS] |
| 433 | #define TARGET_USE_SCATTER_8PARTS \ |
| 434 | ix86_tune_features[X86_TUNE_USE_SCATTER_8PARTS] |
| 435 | #define TARGET_FUSE_CMP_AND_BRANCH_32 \ |
| 436 | ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_32] |
| 437 | #define TARGET_FUSE_CMP_AND_BRANCH_64 \ |
| 438 | ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_64] |
| 439 | #define TARGET_FUSE_CMP_AND_BRANCH \ |
| 440 | (TARGET_64BIT ? TARGET_FUSE_CMP_AND_BRANCH_64 \ |
| 441 | : TARGET_FUSE_CMP_AND_BRANCH_32) |
| 442 | #define TARGET_FUSE_CMP_AND_BRANCH_SOFLAGS \ |
| 443 | ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS] |
| 444 | #define TARGET_FUSE_ALU_AND_BRANCH \ |
| 445 | ix86_tune_features[X86_TUNE_FUSE_ALU_AND_BRANCH] |
| 446 | #define TARGET_FUSE_ALU_AND_BRANCH_MEM \ |
| 447 | ix86_tune_features[X86_TUNE_FUSE_ALU_AND_BRANCH_MEM] |
| 448 | #define TARGET_FUSE_ALU_AND_BRANCH_MEM_IMM \ |
| 449 | ix86_tune_features[X86_TUNE_FUSE_ALU_AND_BRANCH_MEM_IMM] |
| 450 | #define TARGET_FUSE_ALU_AND_BRANCH_RIP_RELATIVE\ |
| 451 | ix86_tune_features[X86_TUNE_FUSE_ALU_AND_BRANCH_RIP_RELATIVE] |
| 452 | #define TARGET_FUSE_MOV_AND_ALU \ |
| 453 | ix86_tune_features[X86_TUNE_FUSE_MOV_AND_ALU] |
| 454 | #define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU] |
| 455 | #define TARGET_AVOID_LEA_FOR_ADDR \ |
| 456 | ix86_tune_features[X86_TUNE_AVOID_LEA_FOR_ADDR] |
| 457 | #define TARGET_SOFTWARE_PREFETCHING_BENEFICIAL \ |
| 458 | ix86_tune_features[X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL] |
| 459 | #define TARGET_AVX256_SPLIT_REGS \ |
| 460 | ix86_tune_features[X86_TUNE_AVX256_SPLIT_REGS] |
| 461 | #define TARGET_AVX256_AVOID_VEC_PERM \ |
| 462 | ix86_tune_features[X86_TUNE_AVX256_AVOID_VEC_PERM] |
| 463 | #define TARGET_AVX512_SPLIT_REGS \ |
| 464 | ix86_tune_features[X86_TUNE_AVX512_SPLIT_REGS] |
| 465 | #define TARGET_GENERAL_REGS_SSE_SPILL \ |
| 466 | ix86_tune_features[X86_TUNE_GENERAL_REGS_SSE_SPILL] |
| 467 | #define TARGET_AVOID_MEM_OPND_FOR_CMOVE \ |
| 468 | ix86_tune_features[X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE] |
| 469 | #define TARGET_SPLIT_MEM_OPND_FOR_FP_CONVERTS \ |
| 470 | ix86_tune_features[X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS] |
| 471 | #define TARGET_ADJUST_UNROLL \ |
| 472 | ix86_tune_features[X86_TUNE_ADJUST_UNROLL] |
| 473 | #define TARGET_AVOID_FALSE_DEP_FOR_BMI \ |
| 474 | ix86_tune_features[X86_TUNE_AVOID_FALSE_DEP_FOR_BMI] |
| 475 | #define TARGET_AVOID_FALSE_DEP_FOR_TZCNT \ |
| 476 | ix86_tune_features[X86_TUNE_AVOID_FALSE_DEP_FOR_TZCNT] |
| 477 | #define TARGET_AVOID_FALSE_DEP_FOR_BLS \ |
| 478 | ix86_tune_features[X86_TUNE_AVOID_FALSE_DEP_FOR_BLS] |
| 479 | #define TARGET_ONE_IF_CONV_INSN \ |
| 480 | ix86_tune_features[X86_TUNE_ONE_IF_CONV_INSN] |
| 481 | #define TARGET_AVOID_MFENCE ix86_tune_features[X86_TUNE_AVOID_MFENCE] |
| 482 | #define TARGET_EXPAND_ABS \ |
| 483 | ix86_tune_features[X86_TUNE_EXPAND_ABS] |
| 484 | #define TARGET_V2DF_REDUCTION_PREFER_HADDPD \ |
| 485 | ix86_tune_features[X86_TUNE_V2DF_REDUCTION_PREFER_HADDPD] |
| 486 | #define TARGET_DEST_FALSE_DEP_FOR_GLC \ |
| 487 | ix86_tune_features[X86_TUNE_DEST_FALSE_DEP_FOR_GLC] |
| 488 | #define TARGET_SLOW_STC ix86_tune_features[X86_TUNE_SLOW_STC] |
| 489 | #define TARGET_USE_RCR ix86_tune_features[X86_TUNE_USE_RCR] |
| 490 | #define TARGET_SSE_MOVCC_USE_BLENDV \ |
| 491 | ix86_tune_features[X86_TUNE_SSE_MOVCC_USE_BLENDV] |
| 492 | #define TARGET_ALIGN_TIGHT_LOOPS \ |
| 493 | ix86_tune_features[X86_TUNE_ALIGN_TIGHT_LOOPS] |
| 494 | #define TARGET_SSE_REDUCTION_PREFER_PSHUF \ |
| 495 | ix86_tune_features[X86_TUNE_SSE_REDUCTION_PREFER_PSHUF] |
| 496 | |
| 497 | |
| 498 | /* Feature tests against the various architecture variations. */ |
| 499 | enum ix86_arch_indices { |
| 500 | X86_ARCH_CMOV, |
| 501 | X86_ARCH_CMPXCHG, |
| 502 | X86_ARCH_CMPXCHG8B, |
| 503 | X86_ARCH_XADD, |
| 504 | X86_ARCH_BSWAP, |
| 505 | |
| 506 | X86_ARCH_LAST |
| 507 | }; |
| 508 | |
| 509 | extern unsigned char ix86_arch_features[X86_ARCH_LAST]; |
| 510 | |
| 511 | #define TARGET_CMOV ix86_arch_features[X86_ARCH_CMOV] |
| 512 | #define TARGET_CMPXCHG ix86_arch_features[X86_ARCH_CMPXCHG] |
| 513 | #define TARGET_CMPXCHG8B ix86_arch_features[X86_ARCH_CMPXCHG8B] |
| 514 | #define TARGET_XADD ix86_arch_features[X86_ARCH_XADD] |
| 515 | #define TARGET_BSWAP ix86_arch_features[X86_ARCH_BSWAP] |
| 516 | |
| 517 | /* For sane SSE instruction set generation we need fcomi instruction. |
| 518 | It is safe to enable all CMOVE instructions. Also, RDRAND intrinsic |
| 519 | expands to a sequence that includes conditional move. */ |
| 520 | #define TARGET_CMOVE (TARGET_CMOV || TARGET_SSE || TARGET_RDRND) |
| 521 | |
| 522 | #define TARGET_FISTTP (TARGET_SSE3 && TARGET_80387) |
| 523 | |
| 524 | extern unsigned char ix86_prefetch_sse; |
| 525 | #define TARGET_PREFETCH_SSE ix86_prefetch_sse |
| 526 | |
| 527 | #define ASSEMBLER_DIALECT (ix86_asm_dialect) |
| 528 | |
| 529 | #define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0) |
| 530 | #define TARGET_MIX_SSE_I387 \ |
| 531 | ((ix86_fpmath & (FPMATH_SSE | FPMATH_387)) == (FPMATH_SSE | FPMATH_387)) |
| 532 | |
| 533 | #define TARGET_HARD_SF_REGS (TARGET_80387 || TARGET_MMX || TARGET_SSE) |
| 534 | #define TARGET_HARD_DF_REGS (TARGET_80387 || TARGET_SSE) |
| 535 | #define TARGET_HARD_XF_REGS (TARGET_80387) |
| 536 | |
| 537 | #define TARGET_GNU_TLS (ix86_tls_dialect == TLS_DIALECT_GNU) |
| 538 | #define TARGET_GNU2_TLS (ix86_tls_dialect == TLS_DIALECT_GNU2) |
| 539 | #define TARGET_ANY_GNU_TLS (TARGET_GNU_TLS || TARGET_GNU2_TLS) |
| 540 | #define TARGET_SUN_TLS 0 |
| 541 | #define TARGET_WIN32_TLS 0 |
| 542 | |
| 543 | #ifndef TARGET_64BIT_DEFAULT |
| 544 | #define TARGET_64BIT_DEFAULT 0 |
| 545 | #endif |
| 546 | #ifndef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT |
| 547 | #define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT 0 |
| 548 | #endif |
| 549 | |
| 550 | #define TARGET_SSP_GLOBAL_GUARD (ix86_stack_protector_guard == SSP_GLOBAL) |
| 551 | #define TARGET_SSP_TLS_GUARD (ix86_stack_protector_guard == SSP_TLS) |
| 552 | |
| 553 | /* Fence to use after loop using storent. */ |
| 554 | |
| 555 | extern GTY(()) tree x86_mfence; |
| 556 | #define FENCE_FOLLOWING_MOVNT x86_mfence |
| 557 | |
| 558 | /* Once GDB has been enhanced to deal with functions without frame |
| 559 | pointers, we can change this to allow for elimination of |
| 560 | the frame pointer in leaf functions. */ |
| 561 | #define TARGET_DEFAULT 0 |
| 562 | |
| 563 | /* Extra bits to force. */ |
| 564 | #define TARGET_SUBTARGET_DEFAULT 0 |
| 565 | #define TARGET_SUBTARGET_ISA_DEFAULT 0 |
| 566 | |
| 567 | /* Extra bits to force on w/ 32-bit mode. */ |
| 568 | #define TARGET_SUBTARGET32_DEFAULT 0 |
| 569 | #define TARGET_SUBTARGET32_ISA_DEFAULT 0 |
| 570 | |
| 571 | /* Extra bits to force on w/ 64-bit mode. */ |
| 572 | #define TARGET_SUBTARGET64_DEFAULT 0 |
| 573 | /* Enable MMX, SSE and SSE2 by default. */ |
| 574 | #define TARGET_SUBTARGET64_ISA_DEFAULT \ |
| 575 | (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2) |
| 576 | |
| 577 | /* Replace MACH-O, ifdefs by in-line tests, where possible. |
| 578 | (a) Macros defined in config/i386/darwin.h */ |
| 579 | #define TARGET_MACHO 0 |
| 580 | #define TARGET_MACHO_SYMBOL_STUBS 0 |
| 581 | #define MACHOPIC_ATT_STUB 0 |
| 582 | /* (b) Macros defined in config/darwin.h */ |
| 583 | #define MACHO_DYNAMIC_NO_PIC_P 0 |
| 584 | #define MACHOPIC_INDIRECT 0 |
| 585 | #define MACHOPIC_PURE 0 |
| 586 | |
| 587 | /* For the RDOS */ |
| 588 | #define TARGET_RDOS 0 |
| 589 | |
| 590 | /* For the Windows 64-bit ABI. */ |
| 591 | #define TARGET_64BIT_MS_ABI (TARGET_64BIT && ix86_cfun_abi () == MS_ABI) |
| 592 | |
| 593 | /* For the Windows 32-bit ABI. */ |
| 594 | #define TARGET_32BIT_MS_ABI (!TARGET_64BIT && ix86_cfun_abi () == MS_ABI) |
| 595 | |
| 596 | /* This is re-defined by cygming.h. */ |
| 597 | #define TARGET_SEH 0 |
| 598 | |
| 599 | /* The default abi used by target. */ |
| 600 | #define DEFAULT_ABI SYSV_ABI |
| 601 | |
| 602 | /* The default TLS segment register used by target. */ |
| 603 | #define DEFAULT_TLS_SEG_REG \ |
| 604 | (TARGET_64BIT ? ADDR_SPACE_SEG_FS : ADDR_SPACE_SEG_GS) |
| 605 | |
| 606 | /* Subtargets may reset this to 1 in order to enable 96-bit long double |
| 607 | with the rounding mode forced to 53 bits. */ |
| 608 | #define TARGET_96_ROUND_53_LONG_DOUBLE 0 |
| 609 | |
| 610 | #ifndef SUBTARGET_DRIVER_SELF_SPECS |
| 611 | # define SUBTARGET_DRIVER_SELF_SPECS "" |
| 612 | #endif |
| 613 | |
| 614 | #define DRIVER_SELF_SPECS SUBTARGET_DRIVER_SELF_SPECS |
| 615 | |
| 616 | /* -march=native handling only makes sense with compiler running on |
| 617 | an x86 or x86_64 chip. If changing this condition, also change |
| 618 | the condition in driver-i386.cc. */ |
| 619 | #if defined(__i386__) || defined(__x86_64__) |
| 620 | /* In driver-i386.cc. */ |
| 621 | extern const char *host_detect_local_cpu (int argc, const char **argv); |
| 622 | #define \ |
| 623 | { "local_cpu_detect", host_detect_local_cpu }, |
| 624 | #define HAVE_LOCAL_CPU_DETECT |
| 625 | #endif |
| 626 | |
| 627 | #if TARGET_64BIT_DEFAULT |
| 628 | #define OPT_ARCH64 "!m32" |
| 629 | #define OPT_ARCH32 "m32" |
| 630 | #else |
| 631 | #define OPT_ARCH64 "m64|mx32" |
| 632 | #define OPT_ARCH32 "m64|mx32:;" |
| 633 | #endif |
| 634 | |
| 635 | /* Support for configure-time defaults of some command line options. |
| 636 | The order here is important so that -march doesn't squash the |
| 637 | tune or cpu values. */ |
| 638 | #define OPTION_DEFAULT_SPECS \ |
| 639 | {"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ |
| 640 | {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ |
| 641 | {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ |
| 642 | {"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ |
| 643 | {"cpu_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ |
| 644 | {"cpu_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ |
| 645 | {"arch", "%{!march=*:-march=%(VALUE)}"}, \ |
| 646 | {"arch_32", "%{" OPT_ARCH32 ":%{!march=*:-march=%(VALUE)}}"}, \ |
| 647 | {"arch_64", "%{" OPT_ARCH64 ":%{!march=*:-march=%(VALUE)}}"}, |
| 648 | |
| 649 | /* Specs for the compiler proper */ |
| 650 | |
| 651 | #ifndef CC1_CPU_SPEC |
| 652 | #define CC1_CPU_SPEC_1 "" |
| 653 | |
| 654 | #ifndef HAVE_LOCAL_CPU_DETECT |
| 655 | #define CC1_CPU_SPEC CC1_CPU_SPEC_1 |
| 656 | #else |
| 657 | #define ARCH_ARG "%{" OPT_ARCH64 ":64;:32}" |
| 658 | #define CC1_CPU_SPEC CC1_CPU_SPEC_1 \ |
| 659 | "%{march=native:%>march=native %:local_cpu_detect(arch " ARCH_ARG ") \ |
| 660 | %{!mtune=*:%>mtune=native %:local_cpu_detect(tune " ARCH_ARG ")}} \ |
| 661 | %{mtune=native:%>mtune=native %:local_cpu_detect(tune " ARCH_ARG ")}" |
| 662 | #endif |
| 663 | #endif |
| 664 | |
| 665 | /* Target CPU builtins. */ |
| 666 | #define TARGET_CPU_CPP_BUILTINS() ix86_target_macros () |
| 667 | |
| 668 | /* Target Pragmas. */ |
| 669 | #define REGISTER_TARGET_PRAGMAS() ix86_register_pragmas () |
| 670 | |
| 671 | #ifndef CC1_SPEC |
| 672 | #define CC1_SPEC "%(cc1_cpu) " |
| 673 | #endif |
| 674 | |
| 675 | /* This macro defines names of additional specifications to put in the |
| 676 | specs that can be used in various specifications like CC1_SPEC. Its |
| 677 | definition is an initializer with a subgrouping for each command option. |
| 678 | |
| 679 | Each subgrouping contains a string constant, that defines the |
| 680 | specification name, and a string constant that used by the GCC driver |
| 681 | program. |
| 682 | |
| 683 | Do not define this macro if it does not need to do anything. */ |
| 684 | |
| 685 | #ifndef SUBTARGET_EXTRA_SPECS |
| 686 | #define |
| 687 | #endif |
| 688 | |
| 689 | #define \ |
| 690 | { "cc1_cpu", CC1_CPU_SPEC }, \ |
| 691 | SUBTARGET_EXTRA_SPECS |
| 692 | |
| 693 | |
| 694 | /* Whether to allow x87 floating-point arithmetic on MODE (one of |
| 695 | SFmode, DFmode and XFmode) in the current excess precision |
| 696 | configuration. */ |
| 697 | #define X87_ENABLE_ARITH(MODE) \ |
| 698 | (ix86_unsafe_math_optimizations \ |
| 699 | || ix86_excess_precision == EXCESS_PRECISION_FAST \ |
| 700 | || (MODE) == XFmode) |
| 701 | |
| 702 | /* Likewise, whether to allow direct conversions from integer mode |
| 703 | IMODE (HImode, SImode or DImode) to MODE. */ |
| 704 | #define X87_ENABLE_FLOAT(MODE, IMODE) \ |
| 705 | (ix86_unsafe_math_optimizations \ |
| 706 | || ix86_excess_precision == EXCESS_PRECISION_FAST \ |
| 707 | || (MODE) == XFmode \ |
| 708 | || ((MODE) == DFmode && (IMODE) == SImode) \ |
| 709 | || (IMODE) == HImode) |
| 710 | |
| 711 | /* target machine storage layout */ |
| 712 | |
| 713 | #define SHORT_TYPE_SIZE 16 |
| 714 | #define INT_TYPE_SIZE 32 |
| 715 | #define LONG_TYPE_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD) |
| 716 | #define POINTER_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD) |
| 717 | #define LONG_LONG_TYPE_SIZE 64 |
| 718 | |
| 719 | #define WIDEST_HARDWARE_FP_SIZE 80 |
| 720 | |
| 721 | #if defined (TARGET_BI_ARCH) || TARGET_64BIT_DEFAULT |
| 722 | #define MAX_BITS_PER_WORD 64 |
| 723 | #else |
| 724 | #define MAX_BITS_PER_WORD 32 |
| 725 | #endif |
| 726 | |
| 727 | /* Define this if most significant byte of a word is the lowest numbered. */ |
| 728 | /* That is true on the 80386. */ |
| 729 | |
| 730 | #define BITS_BIG_ENDIAN 0 |
| 731 | |
| 732 | /* Define this if most significant byte of a word is the lowest numbered. */ |
| 733 | /* That is not true on the 80386. */ |
| 734 | #define BYTES_BIG_ENDIAN 0 |
| 735 | |
| 736 | /* Define this if most significant word of a multiword number is the lowest |
| 737 | numbered. */ |
| 738 | /* Not true for 80386 */ |
| 739 | #define WORDS_BIG_ENDIAN 0 |
| 740 | |
| 741 | /* Width of a word, in units (bytes). */ |
| 742 | #define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4) |
| 743 | |
| 744 | #ifndef IN_LIBGCC2 |
| 745 | #define MIN_UNITS_PER_WORD 4 |
| 746 | #endif |
| 747 | |
| 748 | /* Allocation boundary (in *bits*) for storing arguments in argument list. */ |
| 749 | #define PARM_BOUNDARY BITS_PER_WORD |
| 750 | |
| 751 | /* Boundary (in *bits*) on which stack pointer should be aligned. */ |
| 752 | #define STACK_BOUNDARY (TARGET_64BIT_MS_ABI ? 128 : BITS_PER_WORD) |
| 753 | |
| 754 | /* Stack boundary of the main function guaranteed by OS. */ |
| 755 | #define MAIN_STACK_BOUNDARY (TARGET_64BIT ? 128 : 32) |
| 756 | |
| 757 | /* Minimum stack boundary. */ |
| 758 | #define MIN_STACK_BOUNDARY BITS_PER_WORD |
| 759 | |
| 760 | /* Boundary (in *bits*) on which the stack pointer prefers to be |
| 761 | aligned; the compiler cannot rely on having this alignment. */ |
| 762 | #define PREFERRED_STACK_BOUNDARY ix86_preferred_stack_boundary |
| 763 | |
| 764 | /* It should be MIN_STACK_BOUNDARY. But we set it to 128 bits for |
| 765 | both 32bit and 64bit, to support codes that need 128 bit stack |
| 766 | alignment for SSE instructions, but can't realign the stack. */ |
| 767 | #define PREFERRED_STACK_BOUNDARY_DEFAULT \ |
| 768 | (TARGET_IAMCU ? MIN_STACK_BOUNDARY : 128) |
| 769 | |
| 770 | /* 1 if -mstackrealign should be turned on by default. It will |
| 771 | generate an alternate prologue and epilogue that realigns the |
| 772 | runtime stack if nessary. This supports mixing codes that keep a |
| 773 | 4-byte aligned stack, as specified by i386 psABI, with codes that |
| 774 | need a 16-byte aligned stack, as required by SSE instructions. */ |
| 775 | #define STACK_REALIGN_DEFAULT 0 |
| 776 | |
| 777 | /* Boundary (in *bits*) on which the incoming stack is aligned. */ |
| 778 | #define INCOMING_STACK_BOUNDARY ix86_incoming_stack_boundary |
| 779 | |
| 780 | /* According to Windows x64 software convention, the maximum stack allocatable |
| 781 | in the prologue is 4G - 8 bytes. Furthermore, there is a limited set of |
| 782 | instructions allowed to adjust the stack pointer in the epilog, forcing the |
| 783 | use of frame pointer for frames larger than 2 GB. This theorical limit |
| 784 | is reduced by 256, an over-estimated upper bound for the stack use by the |
| 785 | prologue. |
| 786 | We define only one threshold for both the prolog and the epilog. When the |
| 787 | frame size is larger than this threshold, we allocate the area to save SSE |
| 788 | regs, then save them, and then allocate the remaining. There is no SEH |
| 789 | unwind info for this later allocation. */ |
| 790 | #define SEH_MAX_FRAME_SIZE ((2U << 30) - 256) |
| 791 | |
| 792 | /* Target OS keeps a vector-aligned (128-bit, 16-byte) stack. This is |
| 793 | mandatory for the 64-bit ABI, and may or may not be true for other |
| 794 | operating systems. */ |
| 795 | #define TARGET_KEEPS_VECTOR_ALIGNED_STACK TARGET_64BIT |
| 796 | |
| 797 | /* Minimum allocation boundary for the code of a function. */ |
| 798 | #define FUNCTION_BOUNDARY 8 |
| 799 | |
| 800 | /* We will and with this value to test if a custom function descriptor needs |
| 801 | a static chain. The function boundary must the adjusted so that the bit |
| 802 | this represents is no longer part of the address. 0 Disables the custom |
| 803 | function descriptors. */ |
| 804 | #define X86_CUSTOM_FUNCTION_TEST 1 |
| 805 | |
| 806 | /* C++ stores the virtual bit in the lowest bit of function pointers. */ |
| 807 | #define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_pfn |
| 808 | |
| 809 | /* Minimum size in bits of the largest boundary to which any |
| 810 | and all fundamental data types supported by the hardware |
| 811 | might need to be aligned. No data type wants to be aligned |
| 812 | rounder than this. |
| 813 | |
| 814 | Pentium+ prefers DFmode values to be aligned to 64 bit boundary |
| 815 | and Pentium Pro XFmode values at 128 bit boundaries. |
| 816 | |
| 817 | When increasing the maximum, also update |
| 818 | TARGET_ABSOLUTE_BIGGEST_ALIGNMENT. */ |
| 819 | |
| 820 | #define BIGGEST_ALIGNMENT \ |
| 821 | (TARGET_IAMCU ? 32 : (TARGET_AVX512F \ |
| 822 | ? 512 : (TARGET_AVX ? 256 : 128))) |
| 823 | |
| 824 | /* Maximum stack alignment. */ |
| 825 | #define MAX_STACK_ALIGNMENT MAX_OFILE_ALIGNMENT |
| 826 | |
| 827 | /* Alignment value for attribute ((aligned)). It is a constant since |
| 828 | it is the part of the ABI. We shouldn't change it with -mavx. */ |
| 829 | #define ATTRIBUTE_ALIGNED_VALUE (TARGET_IAMCU ? 32 : 128) |
| 830 | |
| 831 | /* Decide whether a variable of mode MODE should be 128 bit aligned. */ |
| 832 | #define ALIGN_MODE_128(MODE) \ |
| 833 | ((MODE) == XFmode || SSE_REG_MODE_P (MODE)) |
| 834 | |
| 835 | /* The published ABIs say that doubles should be aligned on word |
| 836 | boundaries, so lower the alignment for structure fields unless |
| 837 | -malign-double is set. */ |
| 838 | |
| 839 | /* ??? Blah -- this macro is used directly by libobjc. Since it |
| 840 | supports no vector modes, cut out the complexity and fall back |
| 841 | on BIGGEST_FIELD_ALIGNMENT. */ |
| 842 | #ifdef IN_TARGET_LIBS |
| 843 | #ifdef __x86_64__ |
| 844 | #define BIGGEST_FIELD_ALIGNMENT 128 |
| 845 | #else |
| 846 | #define BIGGEST_FIELD_ALIGNMENT 32 |
| 847 | #endif |
| 848 | #else |
| 849 | #define ADJUST_FIELD_ALIGN(FIELD, TYPE, COMPUTED) \ |
| 850 | x86_field_alignment ((TYPE), (COMPUTED)) |
| 851 | #endif |
| 852 | |
| 853 | /* If defined, a C expression to compute the alignment for a static |
| 854 | variable. TYPE is the data type, and ALIGN is the alignment that |
| 855 | the object would ordinarily have. The value of this macro is used |
| 856 | instead of that alignment to align the object. |
| 857 | |
| 858 | If this macro is not defined, then ALIGN is used. |
| 859 | |
| 860 | One use of this macro is to increase alignment of medium-size |
| 861 | data to make it all fit in fewer cache lines. Another is to |
| 862 | cause character arrays to be word-aligned so that `strcpy' calls |
| 863 | that copy constants to character arrays can be done inline. */ |
| 864 | |
| 865 | #define DATA_ALIGNMENT(TYPE, ALIGN) \ |
| 866 | ix86_data_alignment ((TYPE), (ALIGN), true) |
| 867 | |
| 868 | /* Similar to DATA_ALIGNMENT, but for the cases where the ABI mandates |
| 869 | some alignment increase, instead of optimization only purposes. E.g. |
| 870 | AMD x86-64 psABI says that variables with array type larger than 15 bytes |
| 871 | must be aligned to 16 byte boundaries. |
| 872 | |
| 873 | If this macro is not defined, then ALIGN is used. */ |
| 874 | |
| 875 | #define DATA_ABI_ALIGNMENT(TYPE, ALIGN) \ |
| 876 | ix86_data_alignment ((TYPE), (ALIGN), false) |
| 877 | |
| 878 | /* If defined, a C expression to compute the alignment for a local |
| 879 | variable. TYPE is the data type, and ALIGN is the alignment that |
| 880 | the object would ordinarily have. The value of this macro is used |
| 881 | instead of that alignment to align the object. |
| 882 | |
| 883 | If this macro is not defined, then ALIGN is used. |
| 884 | |
| 885 | One use of this macro is to increase alignment of medium-size |
| 886 | data to make it all fit in fewer cache lines. */ |
| 887 | |
| 888 | #define LOCAL_ALIGNMENT(TYPE, ALIGN) \ |
| 889 | ix86_local_alignment ((TYPE), VOIDmode, (ALIGN)) |
| 890 | |
| 891 | /* If defined, a C expression to compute the alignment for stack slot. |
| 892 | TYPE is the data type, MODE is the widest mode available, and ALIGN |
| 893 | is the alignment that the slot would ordinarily have. The value of |
| 894 | this macro is used instead of that alignment to align the slot. |
| 895 | |
| 896 | If this macro is not defined, then ALIGN is used when TYPE is NULL, |
| 897 | Otherwise, LOCAL_ALIGNMENT will be used. |
| 898 | |
| 899 | One use of this macro is to set alignment of stack slot to the |
| 900 | maximum alignment of all possible modes which the slot may have. */ |
| 901 | |
| 902 | #define STACK_SLOT_ALIGNMENT(TYPE, MODE, ALIGN) \ |
| 903 | ix86_local_alignment ((TYPE), (MODE), (ALIGN)) |
| 904 | |
| 905 | /* If defined, a C expression to compute the alignment for a local |
| 906 | variable DECL. |
| 907 | |
| 908 | If this macro is not defined, then |
| 909 | LOCAL_ALIGNMENT (TREE_TYPE (DECL), DECL_ALIGN (DECL)) will be used. |
| 910 | |
| 911 | One use of this macro is to increase alignment of medium-size |
| 912 | data to make it all fit in fewer cache lines. */ |
| 913 | |
| 914 | #define LOCAL_DECL_ALIGNMENT(DECL) \ |
| 915 | ix86_local_alignment ((DECL), VOIDmode, DECL_ALIGN (DECL)) |
| 916 | |
| 917 | /* If defined, a C expression to compute the minimum required alignment |
| 918 | for dynamic stack realignment purposes for EXP (a TYPE or DECL), |
| 919 | MODE, assuming normal alignment ALIGN. |
| 920 | |
| 921 | If this macro is not defined, then (ALIGN) will be used. */ |
| 922 | |
| 923 | #define MINIMUM_ALIGNMENT(EXP, MODE, ALIGN) \ |
| 924 | ix86_minimum_alignment ((EXP), (MODE), (ALIGN)) |
| 925 | |
| 926 | |
| 927 | /* Set this nonzero if move instructions will actually fail to work |
| 928 | when given unaligned data. */ |
| 929 | #define STRICT_ALIGNMENT 0 |
| 930 | |
| 931 | /* If bit field type is int, don't let it cross an int, |
| 932 | and give entire struct the alignment of an int. */ |
| 933 | /* Required on the 386 since it doesn't have bit-field insns. */ |
| 934 | #define PCC_BITFIELD_TYPE_MATTERS 1 |
| 935 | |
| 936 | #define VECTOR_STORE_FLAG_VALUE(MODE) \ |
| 937 | (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT ? constm1_rtx : NULL_RTX) |
| 938 | |
| 939 | /* Standard register usage. */ |
| 940 | |
| 941 | /* This processor has special stack-like registers. See reg-stack.cc |
| 942 | for details. */ |
| 943 | |
| 944 | #define STACK_REGS |
| 945 | |
| 946 | #define IS_STACK_MODE(MODE) \ |
| 947 | (X87_FLOAT_MODE_P (MODE) \ |
| 948 | && (!(SSE_FLOAT_MODE_P (MODE) && TARGET_SSE_MATH) \ |
| 949 | || TARGET_MIX_SSE_I387)) |
| 950 | |
| 951 | /* Number of actual hardware registers. |
| 952 | The hardware registers are assigned numbers for the compiler |
| 953 | from 0 to just below FIRST_PSEUDO_REGISTER. |
| 954 | All registers that the compiler knows about must be given numbers, |
| 955 | even those that are not normally considered general registers. |
| 956 | |
| 957 | In the 80386 we give the 8 general purpose registers the numbers 0-7. |
| 958 | We number the floating point registers 8-15. |
| 959 | Note that registers 0-7 can be accessed as a short or int, |
| 960 | while only 0-3 may be used with byte `mov' instructions. |
| 961 | |
| 962 | Reg 16 does not correspond to any hardware register, but instead |
| 963 | appears in the RTL as an argument pointer prior to reload, and is |
| 964 | eliminated during reloading in favor of either the stack or frame |
| 965 | pointer. */ |
| 966 | |
| 967 | #define FIRST_PSEUDO_REGISTER FIRST_PSEUDO_REG |
| 968 | |
| 969 | /* Number of hardware registers that go into the DWARF-2 unwind info. |
| 970 | If not defined, equals FIRST_PSEUDO_REGISTER. */ |
| 971 | |
| 972 | #define DWARF_FRAME_REGISTERS 17 |
| 973 | |
| 974 | /* 1 for registers that have pervasive standard uses |
| 975 | and are not available for the register allocator. |
| 976 | On the 80386, the stack pointer is such, as is the arg pointer. |
| 977 | |
| 978 | REX registers are disabled for 32bit targets in |
| 979 | TARGET_CONDITIONAL_REGISTER_USAGE. */ |
| 980 | |
| 981 | #define FIXED_REGISTERS \ |
| 982 | /*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \ |
| 983 | { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \ |
| 984 | /*arg,flags,fpsr,frame*/ \ |
| 985 | 1, 1, 1, 1, \ |
| 986 | /*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \ |
| 987 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
| 988 | /* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \ |
| 989 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
| 990 | /* r8, r9, r10, r11, r12, r13, r14, r15*/ \ |
| 991 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
| 992 | /*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \ |
| 993 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
| 994 | /*xmm16,xmm17,xmm18,xmm19,xmm20,xmm21,xmm22,xmm23*/ \ |
| 995 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
| 996 | /*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \ |
| 997 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
| 998 | /* k0, k1, k2, k3, k4, k5, k6, k7*/ \ |
| 999 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
| 1000 | /* r16, r17, r18, r19, r20, r21, r22, r23*/ \ |
| 1001 | 0, 0, 0, 0, 0, 0, 0, 0, \ |
| 1002 | /* r24, r25, r26, r27, r28, r29, r30, r31*/ \ |
| 1003 | 0, 0, 0, 0, 0, 0, 0, 0} \ |
| 1004 | |
| 1005 | /* 1 for registers not available across function calls. |
| 1006 | These must include the FIXED_REGISTERS and also any |
| 1007 | registers that can be used without being saved. |
| 1008 | The latter must include the registers where values are returned |
| 1009 | and the register where structure-value addresses are passed. |
| 1010 | Aside from that, you can include as many other registers as you like. |
| 1011 | |
| 1012 | Value is set to 1 if the register is call used unconditionally. |
| 1013 | Bit one is set if the register is call used on TARGET_32BIT ABI. |
| 1014 | Bit two is set if the register is call used on TARGET_64BIT ABI. |
| 1015 | Bit three is set if the register is call used on TARGET_64BIT_MS_ABI. |
| 1016 | |
| 1017 | Proper values are computed in TARGET_CONDITIONAL_REGISTER_USAGE. */ |
| 1018 | |
| 1019 | #define CALL_USED_REGISTERS_MASK(IS_64BIT_MS_ABI) \ |
| 1020 | ((IS_64BIT_MS_ABI) ? (1 << 3) : TARGET_64BIT ? (1 << 2) : (1 << 1)) |
| 1021 | |
| 1022 | #define CALL_USED_REGISTERS \ |
| 1023 | /*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \ |
| 1024 | { 1, 1, 1, 0, 4, 4, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ |
| 1025 | /*arg,flags,fpsr,frame*/ \ |
| 1026 | 1, 1, 1, 1, \ |
| 1027 | /*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \ |
| 1028 | 1, 1, 1, 1, 1, 1, 6, 6, \ |
| 1029 | /* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \ |
| 1030 | 1, 1, 1, 1, 1, 1, 1, 1, \ |
| 1031 | /* r8, r9, r10, r11, r12, r13, r14, r15*/ \ |
| 1032 | 1, 1, 1, 1, 2, 2, 2, 2, \ |
| 1033 | /*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \ |
| 1034 | 6, 6, 6, 6, 6, 6, 6, 6, \ |
| 1035 | /*xmm16,xmm17,xmm18,xmm19,xmm20,xmm21,xmm22,xmm23*/ \ |
| 1036 | 1, 1, 1, 1, 1, 1, 1, 1, \ |
| 1037 | /*xmm24,xmm25,xmm26,xmm27,xmm28,xmm29,xmm30,xmm31*/ \ |
| 1038 | 1, 1, 1, 1, 1, 1, 1, 1, \ |
| 1039 | /* k0, k1, k2, k3, k4, k5, k6, k7*/ \ |
| 1040 | 1, 1, 1, 1, 1, 1, 1, 1, \ |
| 1041 | /* r16, r17, r18, r19, r20, r21, r22, r23*/ \ |
| 1042 | 1, 1, 1, 1, 1, 1, 1, 1, \ |
| 1043 | /* r24, r25, r26, r27, r28, r29, r30, r31*/ \ |
| 1044 | 1, 1, 1, 1, 1, 1, 1, 1} \ |
| 1045 | |
| 1046 | /* Order in which to allocate registers. Each register must be |
| 1047 | listed once, even those in FIXED_REGISTERS. List frame pointer |
| 1048 | late and fixed registers last. Note that, in general, we prefer |
| 1049 | registers listed in CALL_USED_REGISTERS, keeping the others |
| 1050 | available for storage of persistent values. |
| 1051 | |
| 1052 | The ADJUST_REG_ALLOC_ORDER actually overwrite the order, |
| 1053 | so this is just empty initializer for array. */ |
| 1054 | |
| 1055 | #define REG_ALLOC_ORDER \ |
| 1056 | { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, \ |
| 1057 | 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, \ |
| 1058 | 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \ |
| 1059 | 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, \ |
| 1060 | 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, \ |
| 1061 | 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91} |
| 1062 | |
| 1063 | /* ADJUST_REG_ALLOC_ORDER is a macro which permits reg_alloc_order |
| 1064 | to be rearranged based on a particular function. When using sse math, |
| 1065 | we want to allocate SSE before x87 registers and vice versa. */ |
| 1066 | |
| 1067 | #define ADJUST_REG_ALLOC_ORDER x86_order_regs_for_local_alloc () |
| 1068 | |
| 1069 | #define INSN_BASE_REG_CLASS(INSN) \ |
| 1070 | ix86_insn_base_reg_class (INSN) |
| 1071 | |
| 1072 | #define REGNO_OK_FOR_INSN_BASE_P(NUM, INSN) \ |
| 1073 | ix86_regno_ok_for_insn_base_p (NUM, INSN) |
| 1074 | |
| 1075 | #define INSN_INDEX_REG_CLASS(INSN) \ |
| 1076 | ix86_insn_index_reg_class (INSN) |
| 1077 | |
| 1078 | #define OVERRIDE_ABI_FORMAT(FNDECL) ix86_call_abi_override (FNDECL) |
| 1079 | |
| 1080 | #define HARD_REGNO_NREGS_HAS_PADDING(REGNO, MODE) \ |
| 1081 | (TARGET_128BIT_LONG_DOUBLE && !TARGET_64BIT \ |
| 1082 | && GENERAL_REGNO_P (REGNO) \ |
| 1083 | && ((MODE) == XFmode || (MODE) == XCmode)) |
| 1084 | |
| 1085 | #define HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE) ((MODE) == XFmode ? 4 : 8) |
| 1086 | |
| 1087 | #define REGMODE_NATURAL_SIZE(MODE) ix86_regmode_natural_size (MODE) |
| 1088 | |
| 1089 | #define VALID_AVX256_REG_MODE(MODE) \ |
| 1090 | ((MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \ |
| 1091 | || (MODE) == V4DImode || (MODE) == V2TImode || (MODE) == V8SFmode \ |
| 1092 | || (MODE) == V4DFmode || (MODE) == V16HFmode || (MODE) == V16BFmode) |
| 1093 | |
| 1094 | #define VALID_AVX256_REG_OR_OI_MODE(MODE) \ |
| 1095 | (VALID_AVX256_REG_MODE (MODE) || (MODE) == OImode) |
| 1096 | |
| 1097 | #define VALID_AVX512F_SCALAR_MODE(MODE) \ |
| 1098 | ((MODE) == DImode || (MODE) == DFmode \ |
| 1099 | || (MODE) == SImode || (MODE) == SFmode \ |
| 1100 | || (MODE) == HImode || (MODE) == HFmode || (MODE) == BFmode) |
| 1101 | |
| 1102 | #define VALID_AVX512F_REG_MODE(MODE) \ |
| 1103 | ((MODE) == V8DImode || (MODE) == V8DFmode || (MODE) == V64QImode \ |
| 1104 | || (MODE) == V16SImode || (MODE) == V16SFmode || (MODE) == V32HImode \ |
| 1105 | || (MODE) == V4TImode || (MODE) == V32HFmode || (MODE) == V32BFmode) |
| 1106 | |
| 1107 | #define VALID_AVX512F_REG_OR_XI_MODE(MODE) \ |
| 1108 | (VALID_AVX512F_REG_MODE (MODE) || (MODE) == XImode) |
| 1109 | |
| 1110 | #define VALID_AVX512VL_128_REG_MODE(MODE) \ |
| 1111 | ((MODE) == V2DImode || (MODE) == V2DFmode || (MODE) == V16QImode \ |
| 1112 | || (MODE) == V4SImode || (MODE) == V4SFmode || (MODE) == V8HImode \ |
| 1113 | || (MODE) == TFmode || (MODE) == V1TImode || (MODE) == V8HFmode \ |
| 1114 | || (MODE) == V8BFmode || (MODE) == TImode) |
| 1115 | |
| 1116 | #define VALID_AVX512FP16_REG_MODE(MODE) \ |
| 1117 | ((MODE) == V8HFmode || (MODE) == V16HFmode || (MODE) == V32HFmode) |
| 1118 | |
| 1119 | #define VALID_SSE2_TYPE_MODE(MODE) \ |
| 1120 | ((MODE) == HFmode || (MODE) == BFmode \ |
| 1121 | || (MODE) == HCmode || (MODE) == BCmode) |
| 1122 | |
| 1123 | #define VALID_SSE2_REG_MODE(MODE) \ |
| 1124 | ((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \ |
| 1125 | || (MODE) == V8HFmode || (MODE) == V4HFmode || (MODE) == V2HFmode \ |
| 1126 | || (MODE) == V8BFmode || (MODE) == V4BFmode || (MODE) == V2BFmode \ |
| 1127 | || (MODE) == V4QImode || (MODE) == V2HImode || (MODE) == V1SImode \ |
| 1128 | || (MODE) == V2DImode || (MODE) == V2QImode || (MODE) == HImode \ |
| 1129 | || (MODE) == DFmode || (MODE) == DImode \ |
| 1130 | || (MODE) == HFmode || (MODE) == BFmode) |
| 1131 | |
| 1132 | #define VALID_SSE_REG_MODE(MODE) \ |
| 1133 | ((MODE) == V1TImode || (MODE) == TImode \ |
| 1134 | || (MODE) == V4SFmode || (MODE) == V4SImode \ |
| 1135 | || (MODE) == SFmode || (MODE) == SImode \ |
| 1136 | || (MODE) == TFmode || (MODE) == TDmode) |
| 1137 | |
| 1138 | #define VALID_MMX_REG_MODE_3DNOW(MODE) \ |
| 1139 | ((MODE) == V2SFmode || (MODE) == SFmode) |
| 1140 | |
| 1141 | /* To match ia32 psABI, V4HFmode should be added here. */ |
| 1142 | #define VALID_MMX_REG_MODE(MODE) \ |
| 1143 | ((MODE) == V1DImode || (MODE) == DImode \ |
| 1144 | || (MODE) == V2SImode || (MODE) == SImode \ |
| 1145 | || (MODE) == V4HImode || (MODE) == V8QImode \ |
| 1146 | || (MODE) == V4HFmode || (MODE) == V4BFmode) |
| 1147 | |
| 1148 | #define VALID_MASK_REG_MODE(MODE) ((MODE) == HImode || (MODE) == QImode) |
| 1149 | |
| 1150 | #define VALID_MASK_AVX512BW_MODE(MODE) ((MODE) == SImode || (MODE) == DImode) |
| 1151 | |
| 1152 | #define VALID_FP_MODE_P(MODE) \ |
| 1153 | ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode \ |
| 1154 | || (MODE) == SCmode || (MODE) == DCmode || (MODE) == XCmode) |
| 1155 | |
| 1156 | #define VALID_INT_MODE_P(MODE) \ |
| 1157 | ((MODE) == QImode || (MODE) == HImode \ |
| 1158 | || (MODE) == SImode || (MODE) == DImode \ |
| 1159 | || (MODE) == CQImode || (MODE) == CHImode \ |
| 1160 | || (MODE) == CSImode || (MODE) == CDImode \ |
| 1161 | || (MODE) == SDmode || (MODE) == DDmode \ |
| 1162 | || (MODE) == HFmode || (MODE) == HCmode || (MODE) == BFmode \ |
| 1163 | || (MODE) == V2HImode || (MODE) == V2HFmode || (MODE) == V2BFmode \ |
| 1164 | || (MODE) == V1SImode || (MODE) == V4QImode || (MODE) == V2QImode \ |
| 1165 | || (TARGET_64BIT \ |
| 1166 | && ((MODE) == TImode || (MODE) == CTImode \ |
| 1167 | || (MODE) == TFmode || (MODE) == TCmode \ |
| 1168 | || (MODE) == V8QImode || (MODE) == V4HImode \ |
| 1169 | || (MODE) == V2SImode || (MODE) == TDmode))) |
| 1170 | |
| 1171 | /* Return true for modes passed in SSE registers. */ |
| 1172 | #define SSE_REG_MODE_P(MODE) \ |
| 1173 | ((MODE) == V1TImode || (MODE) == TImode || (MODE) == V16QImode \ |
| 1174 | || (MODE) == TFmode || (MODE) == V8HImode || (MODE) == V2DFmode \ |
| 1175 | || (MODE) == V2DImode || (MODE) == V4SFmode || (MODE) == V4SImode \ |
| 1176 | || (MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \ |
| 1177 | || (MODE) == V4DImode || (MODE) == V8SFmode || (MODE) == V4DFmode \ |
| 1178 | || (MODE) == V2TImode || (MODE) == V8DImode || (MODE) == V64QImode \ |
| 1179 | || (MODE) == V16SImode || (MODE) == V32HImode || (MODE) == V8DFmode \ |
| 1180 | || (MODE) == V16SFmode \ |
| 1181 | || (MODE) == V32HFmode || (MODE) == V16HFmode || (MODE) == V8HFmode \ |
| 1182 | || (MODE) == V32BFmode || (MODE) == V16BFmode || (MODE) == V8BFmode) |
| 1183 | |
| 1184 | #define X87_FLOAT_MODE_P(MODE) \ |
| 1185 | (TARGET_80387 && ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode)) |
| 1186 | |
| 1187 | #define SSE_FLOAT_MODE_P(MODE) \ |
| 1188 | ((TARGET_SSE && (MODE) == SFmode) || (TARGET_SSE2 && (MODE) == DFmode)) |
| 1189 | |
| 1190 | #define SSE_FLOAT_MODE_SSEMATH_OR_HFBF_P(MODE) \ |
| 1191 | ((SSE_FLOAT_MODE_P (MODE) && TARGET_SSE_MATH) \ |
| 1192 | || (TARGET_AVX512FP16 && (MODE) == HFmode) \ |
| 1193 | || (TARGET_AVX10_2 && (MODE) == BFmode)) |
| 1194 | |
| 1195 | #define FMA4_VEC_FLOAT_MODE_P(MODE) \ |
| 1196 | (TARGET_FMA4 && ((MODE) == V4SFmode || (MODE) == V2DFmode \ |
| 1197 | || (MODE) == V8SFmode || (MODE) == V4DFmode)) |
| 1198 | |
| 1199 | #define VALID_BCST_MODE_P(MODE) \ |
| 1200 | ((MODE) == SFmode || (MODE) == DFmode \ |
| 1201 | || (MODE) == SImode || (MODE) == DImode \ |
| 1202 | || (MODE) == HFmode) |
| 1203 | |
| 1204 | /* It is possible to write patterns to move flags; but until someone |
| 1205 | does it, */ |
| 1206 | #define AVOID_CCMODE_COPIES |
| 1207 | |
| 1208 | /* Specify the modes required to caller save a given hard regno. |
| 1209 | We do this on i386 to prevent flags from being saved at all. |
| 1210 | |
| 1211 | Kill any attempts to combine saving of modes. */ |
| 1212 | |
| 1213 | #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \ |
| 1214 | (CC_REGNO_P (REGNO) ? VOIDmode \ |
| 1215 | : (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \ |
| 1216 | : (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), NULL) \ |
| 1217 | : (MODE) == HImode && !((GENERAL_REGNO_P (REGNO) \ |
| 1218 | && TARGET_PARTIAL_REG_STALL) \ |
| 1219 | || MASK_REGNO_P (REGNO)) ? SImode \ |
| 1220 | : (MODE) == QImode && !(ANY_QI_REGNO_P (REGNO) \ |
| 1221 | || MASK_REGNO_P (REGNO)) ? SImode \ |
| 1222 | : (MODE)) |
| 1223 | |
| 1224 | /* Specify the registers used for certain standard purposes. |
| 1225 | The values of these macros are register numbers. */ |
| 1226 | |
| 1227 | /* on the 386 the pc register is %eip, and is not usable as a general |
| 1228 | register. The ordinary mov instructions won't work */ |
| 1229 | /* #define PC_REGNUM */ |
| 1230 | |
| 1231 | /* Base register for access to arguments of the function. */ |
| 1232 | #define ARG_POINTER_REGNUM ARGP_REG |
| 1233 | |
| 1234 | /* Register to use for pushing function arguments. */ |
| 1235 | #define STACK_POINTER_REGNUM SP_REG |
| 1236 | |
| 1237 | /* Base register for access to local variables of the function. */ |
| 1238 | #define FRAME_POINTER_REGNUM FRAME_REG |
| 1239 | #define HARD_FRAME_POINTER_REGNUM BP_REG |
| 1240 | |
| 1241 | #define FIRST_INT_REG AX_REG |
| 1242 | #define LAST_INT_REG SP_REG |
| 1243 | |
| 1244 | #define FIRST_INDEX_REG AX_REG |
| 1245 | #define LAST_INDEX_REG BP_REG |
| 1246 | |
| 1247 | #define FIRST_QI_REG AX_REG |
| 1248 | #define LAST_QI_REG BX_REG |
| 1249 | |
| 1250 | /* First & last stack-like regs */ |
| 1251 | #define FIRST_STACK_REG ST0_REG |
| 1252 | #define LAST_STACK_REG ST7_REG |
| 1253 | |
| 1254 | #define FIRST_SSE_REG XMM0_REG |
| 1255 | #define LAST_SSE_REG XMM7_REG |
| 1256 | |
| 1257 | #define FIRST_MMX_REG MM0_REG |
| 1258 | #define LAST_MMX_REG MM7_REG |
| 1259 | |
| 1260 | #define FIRST_REX_INT_REG R8_REG |
| 1261 | #define LAST_REX_INT_REG R15_REG |
| 1262 | |
| 1263 | #define FIRST_REX_SSE_REG XMM8_REG |
| 1264 | #define LAST_REX_SSE_REG XMM15_REG |
| 1265 | |
| 1266 | #define FIRST_EXT_REX_SSE_REG XMM16_REG |
| 1267 | #define LAST_EXT_REX_SSE_REG XMM31_REG |
| 1268 | |
| 1269 | #define FIRST_MASK_REG MASK0_REG |
| 1270 | #define LAST_MASK_REG MASK7_REG |
| 1271 | |
| 1272 | #define FIRST_REX2_INT_REG R16_REG |
| 1273 | #define LAST_REX2_INT_REG R31_REG |
| 1274 | |
| 1275 | /* Override this in other tm.h files to cope with various OS lossage |
| 1276 | requiring a frame pointer. */ |
| 1277 | #ifndef SUBTARGET_FRAME_POINTER_REQUIRED |
| 1278 | #define SUBTARGET_FRAME_POINTER_REQUIRED 0 |
| 1279 | #endif |
| 1280 | |
| 1281 | /* Define the shadow offset for asan. Other OS's can override in the |
| 1282 | respective tm.h files. */ |
| 1283 | #ifndef SUBTARGET_SHADOW_OFFSET |
| 1284 | #define SUBTARGET_SHADOW_OFFSET \ |
| 1285 | (TARGET_LP64 ? HOST_WIDE_INT_C (0x7fff8000) : HOST_WIDE_INT_1 << 29) |
| 1286 | #endif |
| 1287 | |
| 1288 | /* Make sure we can access arbitrary call frames. */ |
| 1289 | #define SETUP_FRAME_ADDRESSES() ix86_setup_frame_addresses () |
| 1290 | |
| 1291 | /* Register to hold the addressing base for position independent |
| 1292 | code access to data items. We don't use PIC pointer for 64bit |
| 1293 | mode. Define the regnum to dummy value to prevent gcc from |
| 1294 | pessimizing code dealing with EBX. |
| 1295 | |
| 1296 | To avoid clobbering a call-saved register unnecessarily, we renumber |
| 1297 | the pic register when possible. The change is visible after the |
| 1298 | prologue has been emitted. */ |
| 1299 | |
| 1300 | #define REAL_PIC_OFFSET_TABLE_REGNUM (TARGET_64BIT ? R15_REG : BX_REG) |
| 1301 | |
| 1302 | #define PIC_OFFSET_TABLE_REGNUM \ |
| 1303 | (ix86_use_pseudo_pic_reg () \ |
| 1304 | ? (pic_offset_table_rtx \ |
| 1305 | ? INVALID_REGNUM \ |
| 1306 | : REAL_PIC_OFFSET_TABLE_REGNUM) \ |
| 1307 | : INVALID_REGNUM) |
| 1308 | |
| 1309 | #define GOT_SYMBOL_NAME "_GLOBAL_OFFSET_TABLE_" |
| 1310 | |
| 1311 | /* This is overridden by <cygwin.h>. */ |
| 1312 | #define MS_AGGREGATE_RETURN 0 |
| 1313 | |
| 1314 | #define KEEP_AGGREGATE_RETURN_POINTER 0 |
| 1315 | |
| 1316 | /* Define the classes of registers for register constraints in the |
| 1317 | machine description. Also define ranges of constants. |
| 1318 | |
| 1319 | One of the classes must always be named ALL_REGS and include all hard regs. |
| 1320 | If there is more than one class, another class must be named NO_REGS |
| 1321 | and contain no registers. |
| 1322 | |
| 1323 | The name GENERAL_REGS must be the name of a class (or an alias for |
| 1324 | another name such as ALL_REGS). This is the class of registers |
| 1325 | that is allowed by "g" or "r" in a register constraint. |
| 1326 | Also, registers outside this class are allocated only when |
| 1327 | instructions express preferences for them. |
| 1328 | |
| 1329 | The classes must be numbered in nondecreasing order; that is, |
| 1330 | a larger-numbered class must never be contained completely |
| 1331 | in a smaller-numbered class. This is why CLOBBERED_REGS class |
| 1332 | is listed early, even though in 64-bit mode it contains more |
| 1333 | registers than just %eax, %ecx, %edx. |
| 1334 | |
| 1335 | For any two classes, it is very desirable that there be another |
| 1336 | class that represents their union. |
| 1337 | |
| 1338 | The flags and fpsr registers are in no class. */ |
| 1339 | |
| 1340 | enum reg_class |
| 1341 | { |
| 1342 | NO_REGS, |
| 1343 | AREG, DREG, CREG, BREG, SIREG, DIREG, |
| 1344 | AD_REGS, /* %eax/%edx for DImode */ |
| 1345 | CLOBBERED_REGS, /* call-clobbered integer registers */ |
| 1346 | Q_REGS, /* %eax %ebx %ecx %edx */ |
| 1347 | NON_Q_REGS, /* %esi %edi %ebp %esp */ |
| 1348 | TLS_GOTBASE_REGS, /* %ebx %ecx %edx %esi %edi %ebp */ |
| 1349 | LEGACY_GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp */ |
| 1350 | LEGACY_INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */ |
| 1351 | GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp |
| 1352 | %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 |
| 1353 | %r16 %r17 %r18 %r19 %r20 %r21 %r22 %r23 |
| 1354 | %r24 %r25 %r26 %r27 %r28 %r29 %r30 %r31 */ |
| 1355 | INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp |
| 1356 | %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 |
| 1357 | %r16 %r17 %r18 %r19 %r20 %r21 %r22 %r23 |
| 1358 | %r24 %r25 %r26 %r27 %r28 %r29 %r30 %r31 */ |
| 1359 | GENERAL_GPR16, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp |
| 1360 | %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 */ |
| 1361 | INDEX_GPR16, /* %eax %ebx %ecx %edx %esi %edi %ebp |
| 1362 | %r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 */ |
| 1363 | FP_TOP_REG, FP_SECOND_REG, /* %st(0) %st(1) */ |
| 1364 | FLOAT_REGS, |
| 1365 | SSE_FIRST_REG, |
| 1366 | NO_REX_SSE_REGS, |
| 1367 | SSE_REGS, |
| 1368 | ALL_SSE_REGS, |
| 1369 | MMX_REGS, |
| 1370 | FLOAT_SSE_REGS, |
| 1371 | FLOAT_INT_REGS, |
| 1372 | INT_SSE_REGS, |
| 1373 | FLOAT_INT_SSE_REGS, |
| 1374 | MASK_REGS, |
| 1375 | ALL_MASK_REGS, |
| 1376 | INT_MASK_REGS, |
| 1377 | ALL_REGS, |
| 1378 | LIM_REG_CLASSES |
| 1379 | }; |
| 1380 | |
| 1381 | #define N_REG_CLASSES ((int) LIM_REG_CLASSES) |
| 1382 | |
| 1383 | #define INTEGER_CLASS_P(CLASS) \ |
| 1384 | reg_class_subset_p ((CLASS), GENERAL_REGS) |
| 1385 | #define FLOAT_CLASS_P(CLASS) \ |
| 1386 | reg_class_subset_p ((CLASS), FLOAT_REGS) |
| 1387 | #define SSE_CLASS_P(CLASS) \ |
| 1388 | reg_class_subset_p ((CLASS), ALL_SSE_REGS) |
| 1389 | #define INT_SSE_CLASS_P(CLASS) \ |
| 1390 | reg_class_subset_p ((CLASS), INT_SSE_REGS) |
| 1391 | #define MMX_CLASS_P(CLASS) \ |
| 1392 | ((CLASS) == MMX_REGS) |
| 1393 | #define MASK_CLASS_P(CLASS) \ |
| 1394 | reg_class_subset_p ((CLASS), ALL_MASK_REGS) |
| 1395 | #define MAYBE_INTEGER_CLASS_P(CLASS) \ |
| 1396 | reg_classes_intersect_p ((CLASS), GENERAL_REGS) |
| 1397 | #define MAYBE_FLOAT_CLASS_P(CLASS) \ |
| 1398 | reg_classes_intersect_p ((CLASS), FLOAT_REGS) |
| 1399 | #define MAYBE_SSE_CLASS_P(CLASS) \ |
| 1400 | reg_classes_intersect_p ((CLASS), ALL_SSE_REGS) |
| 1401 | #define MAYBE_MMX_CLASS_P(CLASS) \ |
| 1402 | reg_classes_intersect_p ((CLASS), MMX_REGS) |
| 1403 | #define MAYBE_MASK_CLASS_P(CLASS) \ |
| 1404 | reg_classes_intersect_p ((CLASS), ALL_MASK_REGS) |
| 1405 | |
| 1406 | #define Q_CLASS_P(CLASS) \ |
| 1407 | reg_class_subset_p ((CLASS), Q_REGS) |
| 1408 | |
| 1409 | #define MAYBE_NON_Q_CLASS_P(CLASS) \ |
| 1410 | reg_classes_intersect_p ((CLASS), NON_Q_REGS) |
| 1411 | |
| 1412 | /* Give names of register classes as strings for dump file. */ |
| 1413 | |
| 1414 | #define REG_CLASS_NAMES \ |
| 1415 | { "NO_REGS", \ |
| 1416 | "AREG", "DREG", "CREG", "BREG", \ |
| 1417 | "SIREG", "DIREG", \ |
| 1418 | "AD_REGS", \ |
| 1419 | "CLOBBERED_REGS", \ |
| 1420 | "Q_REGS", "NON_Q_REGS", \ |
| 1421 | "TLS_GOTBASE_REGS", \ |
| 1422 | "LEGACY_GENERAL_REGS", \ |
| 1423 | "LEGACY_INDEX_REGS", \ |
| 1424 | "GENERAL_REGS", \ |
| 1425 | "INDEX_REGS", \ |
| 1426 | "GENERAL_GPR16", \ |
| 1427 | "INDEX_GPR16", \ |
| 1428 | "FP_TOP_REG", "FP_SECOND_REG", \ |
| 1429 | "FLOAT_REGS", \ |
| 1430 | "SSE_FIRST_REG", \ |
| 1431 | "NO_REX_SSE_REGS", \ |
| 1432 | "SSE_REGS", \ |
| 1433 | "ALL_SSE_REGS", \ |
| 1434 | "MMX_REGS", \ |
| 1435 | "FLOAT_SSE_REGS", \ |
| 1436 | "FLOAT_INT_REGS", \ |
| 1437 | "INT_SSE_REGS", \ |
| 1438 | "FLOAT_INT_SSE_REGS", \ |
| 1439 | "MASK_REGS", \ |
| 1440 | "ALL_MASK_REGS", \ |
| 1441 | "INT_MASK_REGS", \ |
| 1442 | "ALL_REGS" } |
| 1443 | |
| 1444 | /* Define which registers fit in which classes. This is an initializer |
| 1445 | for a vector of HARD_REG_SET of length N_REG_CLASSES. |
| 1446 | |
| 1447 | Note that CLOBBERED_REGS are calculated by |
| 1448 | TARGET_CONDITIONAL_REGISTER_USAGE. */ |
| 1449 | |
| 1450 | #define REG_CLASS_CONTENTS \ |
| 1451 | { { 0x0, 0x0, 0x0 }, /* NO_REGS */ \ |
| 1452 | { 0x01, 0x0, 0x0 }, /* AREG */ \ |
| 1453 | { 0x02, 0x0, 0x0 }, /* DREG */ \ |
| 1454 | { 0x04, 0x0, 0x0 }, /* CREG */ \ |
| 1455 | { 0x08, 0x0, 0x0 }, /* BREG */ \ |
| 1456 | { 0x10, 0x0, 0x0 }, /* SIREG */ \ |
| 1457 | { 0x20, 0x0, 0x0 }, /* DIREG */ \ |
| 1458 | { 0x03, 0x0, 0x0 }, /* AD_REGS */ \ |
| 1459 | { 0x07, 0x0, 0x0 }, /* CLOBBERED_REGS */ \ |
| 1460 | { 0x0f, 0x0, 0x0 }, /* Q_REGS */ \ |
| 1461 | { 0x900f0, 0x0, 0x0 }, /* NON_Q_REGS */ \ |
| 1462 | { 0x7e, 0xff0, 0x0 }, /* TLS_GOTBASE_REGS */ \ |
| 1463 | { 0x900ff, 0x0, 0x0 }, /* LEGACY_GENERAL_REGS */ \ |
| 1464 | { 0x7f, 0x0, 0x0 }, /* LEGACY_INDEX_REGS */ \ |
| 1465 | { 0x900ff, 0xff0, 0xffff000 }, /* GENERAL_REGS */ \ |
| 1466 | { 0x7f, 0xff0, 0xffff000 }, /* INDEX_REGS */ \ |
| 1467 | { 0x900ff, 0xff0, 0x0 }, /* GENERAL_GPR16 */ \ |
| 1468 | { 0x7f, 0xff0, 0x0 }, /* INDEX_GPR16 */ \ |
| 1469 | { 0x100, 0x0, 0x0 }, /* FP_TOP_REG */ \ |
| 1470 | { 0x200, 0x0, 0x0 }, /* FP_SECOND_REG */ \ |
| 1471 | { 0xff00, 0x0, 0x0 }, /* FLOAT_REGS */ \ |
| 1472 | { 0x100000, 0x0, 0x0 }, /* SSE_FIRST_REG */ \ |
| 1473 | { 0xff00000, 0x0, 0x0 }, /* NO_REX_SSE_REGS */ \ |
| 1474 | { 0xff00000, 0xff000, 0x0 }, /* SSE_REGS */ \ |
| 1475 | { 0xff00000, 0xfffff000, 0xf }, /* ALL_SSE_REGS */ \ |
| 1476 | { 0xf0000000, 0xf, 0x0 }, /* MMX_REGS */ \ |
| 1477 | { 0xff0ff00, 0xfffff000, 0xf }, /* FLOAT_SSE_REGS */ \ |
| 1478 | { 0x9ffff, 0xff0, 0xffff000 }, /* FLOAT_INT_REGS */ \ |
| 1479 | { 0xff900ff, 0xfffffff0, 0xffff00f }, /* INT_SSE_REGS */ \ |
| 1480 | { 0xff9ffff, 0xfffffff0, 0xffff00f }, /* FLOAT_INT_SSE_REGS */ \ |
| 1481 | { 0x0, 0x0, 0xfe0 }, /* MASK_REGS */ \ |
| 1482 | { 0x0, 0x0, 0xff0 }, /* ALL_MASK_REGS */ \ |
| 1483 | { 0x900ff, 0xff0, 0xffffff0 }, /* INT_MASK_REGS */ \ |
| 1484 | { 0xffffffff, 0xffffffff, 0xfffffff } /* ALL_REGS */ \ |
| 1485 | } |
| 1486 | |
| 1487 | /* The same information, inverted: |
| 1488 | Return the class number of the smallest class containing |
| 1489 | reg number REGNO. This could be a conditional expression |
| 1490 | or could index an array. */ |
| 1491 | |
| 1492 | #define REGNO_REG_CLASS(REGNO) (regclass_map[(REGNO)]) |
| 1493 | |
| 1494 | #define QI_REG_P(X) (REG_P (X) && QI_REGNO_P (REGNO (X))) |
| 1495 | #define QI_REGNO_P(N) IN_RANGE ((N), FIRST_QI_REG, LAST_QI_REG) |
| 1496 | |
| 1497 | #define LEGACY_INT_REG_P(X) (REG_P (X) && LEGACY_INT_REGNO_P (REGNO (X))) |
| 1498 | #define LEGACY_INT_REGNO_P(N) IN_RANGE ((N), FIRST_INT_REG, LAST_INT_REG) |
| 1499 | |
| 1500 | #define LEGACY_INDEX_REG_P(X) (REG_P (X) && LEGACY_INDEX_REGNO_P (REGNO (X))) |
| 1501 | #define LEGACY_INDEX_REGNO_P(N) \ |
| 1502 | IN_RANGE ((N), FIRST_INDEX_REG, LAST_INDEX_REG) |
| 1503 | |
| 1504 | #define REX_INT_REG_P(X) (REG_P (X) && REX_INT_REGNO_P (REGNO (X))) |
| 1505 | #define REX_INT_REGNO_P(N) \ |
| 1506 | IN_RANGE ((N), FIRST_REX_INT_REG, LAST_REX_INT_REG) |
| 1507 | |
| 1508 | #define REX2_INT_REG_P(X) (REG_P (X) && REX2_INT_REGNO_P (REGNO (X))) |
| 1509 | #define REX2_INT_REGNO_P(N) \ |
| 1510 | IN_RANGE ((N), FIRST_REX2_INT_REG, LAST_REX2_INT_REG) |
| 1511 | |
| 1512 | #define GENERAL_REG_P(X) (REG_P (X) && GENERAL_REGNO_P (REGNO (X))) |
| 1513 | #define GENERAL_REGNO_P(N) \ |
| 1514 | (LEGACY_INT_REGNO_P (N) || REX_INT_REGNO_P (N) || REX2_INT_REGNO_P (N)) |
| 1515 | |
| 1516 | #define INDEX_REG_P(X) (REG_P (X) && INDEX_REGNO_P (REGNO (X))) |
| 1517 | #define INDEX_REGNO_P(N) \ |
| 1518 | (LEGACY_INDEX_REGNO_P (N) || REX_INT_REGNO_P (N) || REX2_INT_REGNO_P (N)) |
| 1519 | |
| 1520 | #define GENERAL_GPR16_REGNO_P(N) \ |
| 1521 | (LEGACY_INT_REGNO_P (N) || REX_INT_REGNO_P (N)) |
| 1522 | |
| 1523 | #define ANY_QI_REG_P(X) (REG_P (X) && ANY_QI_REGNO_P (REGNO (X))) |
| 1524 | #define ANY_QI_REGNO_P(N) \ |
| 1525 | (TARGET_64BIT ? GENERAL_REGNO_P (N) : QI_REGNO_P (N)) |
| 1526 | |
| 1527 | #define STACK_REG_P(X) (REG_P (X) && STACK_REGNO_P (REGNO (X))) |
| 1528 | #define STACK_REGNO_P(N) IN_RANGE ((N), FIRST_STACK_REG, LAST_STACK_REG) |
| 1529 | |
| 1530 | #define SSE_REG_P(X) (REG_P (X) && SSE_REGNO_P (REGNO (X))) |
| 1531 | #define SSE_REGNO_P(N) \ |
| 1532 | (LEGACY_SSE_REGNO_P (N) \ |
| 1533 | || REX_SSE_REGNO_P (N) \ |
| 1534 | || EXT_REX_SSE_REGNO_P (N)) |
| 1535 | |
| 1536 | #define LEGACY_SSE_REGNO_P(N) \ |
| 1537 | IN_RANGE ((N), FIRST_SSE_REG, LAST_SSE_REG) |
| 1538 | |
| 1539 | #define REX_SSE_REGNO_P(N) \ |
| 1540 | IN_RANGE ((N), FIRST_REX_SSE_REG, LAST_REX_SSE_REG) |
| 1541 | |
| 1542 | #define EXT_REX_SSE_REG_P(X) (REG_P (X) && EXT_REX_SSE_REGNO_P (REGNO (X))) |
| 1543 | |
| 1544 | #define EXT_REX_SSE_REGNO_P(N) \ |
| 1545 | IN_RANGE ((N), FIRST_EXT_REX_SSE_REG, LAST_EXT_REX_SSE_REG) |
| 1546 | |
| 1547 | #define ANY_FP_REG_P(X) (REG_P (X) && ANY_FP_REGNO_P (REGNO (X))) |
| 1548 | #define ANY_FP_REGNO_P(N) (STACK_REGNO_P (N) || SSE_REGNO_P (N)) |
| 1549 | |
| 1550 | #define MASK_REG_P(X) (REG_P (X) && MASK_REGNO_P (REGNO (X))) |
| 1551 | #define MASK_REGNO_P(N) IN_RANGE ((N), FIRST_MASK_REG, LAST_MASK_REG) |
| 1552 | #define MASK_PAIR_REGNO_P(N) ((((N) - FIRST_MASK_REG) & 1) == 0) |
| 1553 | |
| 1554 | #define MMX_REG_P(X) (REG_P (X) && MMX_REGNO_P (REGNO (X))) |
| 1555 | #define MMX_REGNO_P(N) IN_RANGE ((N), FIRST_MMX_REG, LAST_MMX_REG) |
| 1556 | |
| 1557 | #define CC_REG_P(X) (REG_P (X) && CC_REGNO_P (REGNO (X))) |
| 1558 | #define CC_REGNO_P(X) ((X) == FLAGS_REG) |
| 1559 | |
| 1560 | #define MOD4_SSE_REG_P(X) (REG_P (X) && MOD4_SSE_REGNO_P (REGNO (X))) |
| 1561 | #define MOD4_SSE_REGNO_P(N) ((N) == XMM0_REG \ |
| 1562 | || (N) == XMM4_REG \ |
| 1563 | || (N) == XMM8_REG \ |
| 1564 | || (N) == XMM12_REG \ |
| 1565 | || (N) == XMM16_REG \ |
| 1566 | || (N) == XMM20_REG \ |
| 1567 | || (N) == XMM24_REG \ |
| 1568 | || (N) == XMM28_REG) |
| 1569 | |
| 1570 | /* First floating point reg */ |
| 1571 | #define FIRST_FLOAT_REG FIRST_STACK_REG |
| 1572 | #define STACK_TOP_P(X) (REG_P (X) && REGNO (X) == FIRST_FLOAT_REG) |
| 1573 | |
| 1574 | #define GET_SSE_REGNO(N) \ |
| 1575 | ((N) < 8 ? FIRST_SSE_REG + (N) \ |
| 1576 | : (N) < 16 ? FIRST_REX_SSE_REG + (N) - 8 \ |
| 1577 | : FIRST_EXT_REX_SSE_REG + (N) - 16) |
| 1578 | |
| 1579 | /* The class value for index registers, and the one for base regs. */ |
| 1580 | |
| 1581 | #define INDEX_REG_CLASS INDEX_REGS |
| 1582 | #define BASE_REG_CLASS GENERAL_REGS |
| 1583 | |
| 1584 | /* Stack layout; function entry, exit and calling. */ |
| 1585 | |
| 1586 | /* Define this if pushing a word on the stack |
| 1587 | makes the stack pointer a smaller address. */ |
| 1588 | #define STACK_GROWS_DOWNWARD 1 |
| 1589 | |
| 1590 | /* Define this to nonzero if the nominal address of the stack frame |
| 1591 | is at the high-address end of the local variables; |
| 1592 | that is, each additional local variable allocated |
| 1593 | goes at a more negative offset in the frame. */ |
| 1594 | #define FRAME_GROWS_DOWNWARD 1 |
| 1595 | |
| 1596 | #define PUSH_ROUNDING(BYTES) ix86_push_rounding (BYTES) |
| 1597 | |
| 1598 | /* If defined, the maximum amount of space required for outgoing arguments |
| 1599 | will be computed and placed into the variable `crtl->outgoing_args_size'. |
| 1600 | No space will be pushed onto the stack for each call; instead, the |
| 1601 | function prologue should increase the stack frame size by this amount. |
| 1602 | |
| 1603 | In 32bit mode enabling argument accumulation results in about 5% code size |
| 1604 | growth because move instructions are less compact than push. In 64bit |
| 1605 | mode the difference is less drastic but visible. |
| 1606 | |
| 1607 | FIXME: Unlike earlier implementations, the size of unwind info seems to |
| 1608 | actually grow with accumulation. Is that because accumulated args |
| 1609 | unwind info became unnecesarily bloated? |
| 1610 | |
| 1611 | With the 64-bit MS ABI, we can generate correct code with or without |
| 1612 | accumulated args, but because of OUTGOING_REG_PARM_STACK_SPACE the code |
| 1613 | generated without accumulated args is terrible. |
| 1614 | |
| 1615 | If stack probes are required, the space used for large function |
| 1616 | arguments on the stack must also be probed, so enable |
| 1617 | -maccumulate-outgoing-args so this happens in the prologue. |
| 1618 | |
| 1619 | We must use argument accumulation in interrupt function if stack |
| 1620 | may be realigned to avoid DRAP. */ |
| 1621 | |
| 1622 | #define ACCUMULATE_OUTGOING_ARGS \ |
| 1623 | ((TARGET_ACCUMULATE_OUTGOING_ARGS \ |
| 1624 | && optimize_function_for_speed_p (cfun)) \ |
| 1625 | || (cfun->machine->func_type != TYPE_NORMAL \ |
| 1626 | && crtl->stack_realign_needed) \ |
| 1627 | || TARGET_STACK_PROBE \ |
| 1628 | || TARGET_64BIT_MS_ABI \ |
| 1629 | || (TARGET_MACHO && crtl->profile)) |
| 1630 | |
| 1631 | /* We want the stack and args grow in opposite directions, even if |
| 1632 | targetm.calls.push_argument returns false. */ |
| 1633 | #define PUSH_ARGS_REVERSED 1 |
| 1634 | |
| 1635 | /* Offset of first parameter from the argument pointer register value. */ |
| 1636 | #define FIRST_PARM_OFFSET(FNDECL) 0 |
| 1637 | |
| 1638 | /* Define this macro if functions should assume that stack space has been |
| 1639 | allocated for arguments even when their values are passed in registers. |
| 1640 | |
| 1641 | The value of this macro is the size, in bytes, of the area reserved for |
| 1642 | arguments passed in registers for the function represented by FNDECL. |
| 1643 | |
| 1644 | This space can be allocated by the caller, or be a part of the |
| 1645 | machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE' says |
| 1646 | which. */ |
| 1647 | #define REG_PARM_STACK_SPACE(FNDECL) ix86_reg_parm_stack_space (FNDECL) |
| 1648 | |
| 1649 | #define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) \ |
| 1650 | (TARGET_64BIT && ix86_function_type_abi (FNTYPE) == MS_ABI) |
| 1651 | |
| 1652 | /* Define how to find the value returned by a library function |
| 1653 | assuming the value has mode MODE. */ |
| 1654 | |
| 1655 | #define LIBCALL_VALUE(MODE) ix86_libcall_value (MODE) |
| 1656 | |
| 1657 | /* Define the size of the result block used for communication between |
| 1658 | untyped_call and untyped_return. The block contains a DImode value |
| 1659 | followed by the block used by fnsave and frstor. */ |
| 1660 | |
| 1661 | #define APPLY_RESULT_SIZE (8+108) |
| 1662 | |
| 1663 | /* 1 if N is a possible register number for function argument passing. */ |
| 1664 | #define FUNCTION_ARG_REGNO_P(N) ix86_function_arg_regno_p (N) |
| 1665 | |
| 1666 | /* Define a data type for recording info about an argument list |
| 1667 | during the scan of that argument list. This data type should |
| 1668 | hold all necessary information about the function itself |
| 1669 | and about the args processed so far, enough to enable macros |
| 1670 | such as FUNCTION_ARG to determine where the next arg should go. */ |
| 1671 | |
| 1672 | typedef struct ix86_args { |
| 1673 | int words; /* # words passed so far */ |
| 1674 | int nregs; /* # registers available for passing */ |
| 1675 | int regno; /* next available register number */ |
| 1676 | int fastcall; /* fastcall or thiscall calling convention |
| 1677 | is used */ |
| 1678 | int sse_words; /* # sse words passed so far */ |
| 1679 | int sse_nregs; /* # sse registers available for passing */ |
| 1680 | int warn_avx512f; /* True when we want to warn |
| 1681 | about AVX512F ABI. */ |
| 1682 | int warn_avx; /* True when we want to warn about AVX ABI. */ |
| 1683 | int warn_sse; /* True when we want to warn about SSE ABI. */ |
| 1684 | int warn_mmx; /* True when we want to warn about MMX ABI. */ |
| 1685 | int warn_empty; /* True when we want to warn about empty classes |
| 1686 | passing ABI change. */ |
| 1687 | int sse_regno; /* next available sse register number */ |
| 1688 | int mmx_words; /* # mmx words passed so far */ |
| 1689 | int mmx_nregs; /* # mmx registers available for passing */ |
| 1690 | int mmx_regno; /* next available mmx register number */ |
| 1691 | int maybe_vaarg; /* true for calls to possibly vardic fncts. */ |
| 1692 | int caller; /* true if it is caller. */ |
| 1693 | int float_in_sse; /* Set to 1 or 2 for 32bit targets if |
| 1694 | SFmode/DFmode arguments should be passed |
| 1695 | in SSE registers. Otherwise 0. */ |
| 1696 | int stdarg; /* Set to 1 if function is stdarg. */ |
| 1697 | enum calling_abi call_abi; /* Set to SYSV_ABI for sysv abi. Otherwise |
| 1698 | MS_ABI for ms abi. */ |
| 1699 | tree decl; /* Callee decl. */ |
| 1700 | } CUMULATIVE_ARGS; |
| 1701 | |
| 1702 | /* Initialize a variable CUM of type CUMULATIVE_ARGS |
| 1703 | for a call to a function whose data type is FNTYPE. |
| 1704 | For a library call, FNTYPE is 0. */ |
| 1705 | |
| 1706 | #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \ |
| 1707 | init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL), \ |
| 1708 | (N_NAMED_ARGS) != -1) |
| 1709 | |
| 1710 | /* Output assembler code to FILE to increment profiler label # LABELNO |
| 1711 | for profiling a function entry. */ |
| 1712 | |
| 1713 | #define FUNCTION_PROFILER(FILE, LABELNO) \ |
| 1714 | x86_function_profiler ((FILE), (LABELNO)) |
| 1715 | |
| 1716 | #define MCOUNT_NAME "_mcount" |
| 1717 | |
| 1718 | #define MCOUNT_NAME_BEFORE_PROLOGUE "__fentry__" |
| 1719 | |
| 1720 | #define PROFILE_COUNT_REGISTER "edx" |
| 1721 | |
| 1722 | /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, |
| 1723 | the stack pointer does not matter. The value is tested only in |
| 1724 | functions that have frame pointers. |
| 1725 | No definition is equivalent to always zero. */ |
| 1726 | /* Note on the 386 it might be more efficient not to define this since |
| 1727 | we have to restore it ourselves from the frame pointer, in order to |
| 1728 | use pop */ |
| 1729 | |
| 1730 | #define EXIT_IGNORE_STACK 1 |
| 1731 | |
| 1732 | /* Define this macro as a C expression that is nonzero for registers |
| 1733 | used by the epilogue or the `return' pattern. */ |
| 1734 | |
| 1735 | #define EPILOGUE_USES(REGNO) ix86_epilogue_uses (REGNO) |
| 1736 | |
| 1737 | /* Output assembler code for a block containing the constant parts |
| 1738 | of a trampoline, leaving space for the variable parts. */ |
| 1739 | |
| 1740 | /* On the 386, the trampoline contains two instructions: |
| 1741 | mov #STATIC,ecx |
| 1742 | jmp FUNCTION |
| 1743 | The trampoline is generated entirely at runtime. The operand of JMP |
| 1744 | is the address of FUNCTION relative to the instruction following the |
| 1745 | JMP (which is 5 bytes long). */ |
| 1746 | |
| 1747 | /* Length in units of the trampoline for entering a nested function. */ |
| 1748 | |
| 1749 | #define TRAMPOLINE_SIZE (TARGET_64BIT ? 28 : 14) |
| 1750 | |
| 1751 | /* Definitions for register eliminations. |
| 1752 | |
| 1753 | This is an array of structures. Each structure initializes one pair |
| 1754 | of eliminable registers. The "from" register number is given first, |
| 1755 | followed by "to". Eliminations of the same "from" register are listed |
| 1756 | in order of preference. |
| 1757 | |
| 1758 | There are two registers that can always be eliminated on the i386. |
| 1759 | The frame pointer and the arg pointer can be replaced by either the |
| 1760 | hard frame pointer or to the stack pointer, depending upon the |
| 1761 | circumstances. The hard frame pointer is not used before reload and |
| 1762 | so it is not eligible for elimination. */ |
| 1763 | |
| 1764 | #define ELIMINABLE_REGS \ |
| 1765 | {{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ |
| 1766 | { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ |
| 1767 | { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ |
| 1768 | { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \ |
| 1769 | |
| 1770 | /* Define the offset between two registers, one to be eliminated, and the other |
| 1771 | its replacement, at the start of a routine. */ |
| 1772 | |
| 1773 | #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ |
| 1774 | ((OFFSET) = ix86_initial_elimination_offset ((FROM), (TO))) |
| 1775 | |
| 1776 | /* Addressing modes, and classification of registers for them. */ |
| 1777 | |
| 1778 | /* Macros to check register numbers against specific register classes. */ |
| 1779 | |
| 1780 | /* These assume that REGNO is a hard or pseudo reg number. |
| 1781 | They give nonzero only if REGNO is a hard reg of the suitable class |
| 1782 | or a pseudo reg currently allocated to a suitable hard reg. |
| 1783 | Since they use reg_renumber, they are safe only once reg_renumber |
| 1784 | has been allocated, which happens in reginfo.cc during register |
| 1785 | allocation. */ |
| 1786 | |
| 1787 | #define REGNO_OK_FOR_INDEX_P(REGNO) \ |
| 1788 | (INDEX_REGNO_P (REGNO) \ |
| 1789 | || INDEX_REGNO_P (reg_renumber[(REGNO)])) |
| 1790 | |
| 1791 | #define REGNO_OK_FOR_BASE_P(REGNO) \ |
| 1792 | (GENERAL_REGNO_P (REGNO) \ |
| 1793 | || (REGNO) == ARG_POINTER_REGNUM \ |
| 1794 | || (REGNO) == FRAME_POINTER_REGNUM \ |
| 1795 | || GENERAL_REGNO_P (reg_renumber[(REGNO)])) |
| 1796 | |
| 1797 | /* Non strict versions, pseudos are ok. */ |
| 1798 | #define REGNO_OK_FOR_INDEX_NONSTRICT_P(REGNO) \ |
| 1799 | (INDEX_REGNO_P (REGNO) \ |
| 1800 | || !HARD_REGISTER_NUM_P (REGNO)) |
| 1801 | |
| 1802 | #define REGNO_OK_FOR_BASE_NONSTRICT_P(REGNO) \ |
| 1803 | (GENERAL_REGNO_P (REGNO) \ |
| 1804 | || (REGNO) == ARG_POINTER_REGNUM \ |
| 1805 | || (REGNO) == FRAME_POINTER_REGNUM \ |
| 1806 | || !HARD_REGISTER_NUM_P (REGNO)) |
| 1807 | |
| 1808 | /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression |
| 1809 | that is a valid memory address for an instruction. |
| 1810 | The MODE argument is the machine mode for the MEM expression |
| 1811 | that wants to use this address. |
| 1812 | |
| 1813 | The other macros defined here are used only in TARGET_LEGITIMATE_ADDRESS_P, |
| 1814 | except for CONSTANT_ADDRESS_P which is usually machine-independent. |
| 1815 | |
| 1816 | See legitimize_pic_address in i386.cc for details as to what |
| 1817 | constitutes a legitimate address when -fpic is used. */ |
| 1818 | |
| 1819 | #define MAX_REGS_PER_ADDRESS 2 |
| 1820 | |
| 1821 | #define CONSTANT_ADDRESS_P(X) constant_address_p (X) |
| 1822 | |
| 1823 | /* If defined, a C expression to determine the base term of address X. |
| 1824 | This macro is used in only one place: `find_base_term' in alias.cc. |
| 1825 | |
| 1826 | It is always safe for this macro to not be defined. It exists so |
| 1827 | that alias analysis can understand machine-dependent addresses. |
| 1828 | |
| 1829 | The typical use of this macro is to handle addresses containing |
| 1830 | a label_ref or symbol_ref within an UNSPEC. */ |
| 1831 | |
| 1832 | #define FIND_BASE_TERM(X) ix86_find_base_term (X) |
| 1833 | |
| 1834 | /* Nonzero if the constant value X is a legitimate general operand |
| 1835 | when generating PIC code. It is given that flag_pic is on and |
| 1836 | that X satisfies CONSTANT_P or is a CONST_DOUBLE. */ |
| 1837 | |
| 1838 | #define LEGITIMATE_PIC_OPERAND_P(X) legitimate_pic_operand_p (X) |
| 1839 | |
| 1840 | #define STRIP_UNARY(X) (UNARY_P (X) ? XEXP (X, 0) : X) |
| 1841 | |
| 1842 | #define SYMBOLIC_CONST(X) \ |
| 1843 | (GET_CODE (X) == SYMBOL_REF \ |
| 1844 | || GET_CODE (X) == LABEL_REF \ |
| 1845 | || (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X))) |
| 1846 | |
| 1847 | /* Max number of args passed in registers. If this is more than 3, we will |
| 1848 | have problems with ebx (register #4), since it is a caller save register and |
| 1849 | is also used as the pic register in ELF. So for now, don't allow more than |
| 1850 | 3 registers to be passed in registers. */ |
| 1851 | |
| 1852 | /* Abi specific values for REGPARM_MAX and SSE_REGPARM_MAX */ |
| 1853 | #define X86_64_REGPARM_MAX 6 |
| 1854 | #define X86_64_MS_REGPARM_MAX 4 |
| 1855 | |
| 1856 | #define X86_32_REGPARM_MAX 3 |
| 1857 | |
| 1858 | #define REGPARM_MAX \ |
| 1859 | (TARGET_64BIT \ |
| 1860 | ? (TARGET_64BIT_MS_ABI \ |
| 1861 | ? X86_64_MS_REGPARM_MAX \ |
| 1862 | : X86_64_REGPARM_MAX) \ |
| 1863 | : X86_32_REGPARM_MAX) |
| 1864 | |
| 1865 | #define X86_64_SSE_REGPARM_MAX 8 |
| 1866 | #define X86_64_MS_SSE_REGPARM_MAX 4 |
| 1867 | |
| 1868 | #define X86_32_SSE_REGPARM_MAX (TARGET_SSE ? (TARGET_MACHO ? 4 : 3) : 0) |
| 1869 | |
| 1870 | #define SSE_REGPARM_MAX \ |
| 1871 | (TARGET_64BIT \ |
| 1872 | ? (TARGET_64BIT_MS_ABI \ |
| 1873 | ? X86_64_MS_SSE_REGPARM_MAX \ |
| 1874 | : X86_64_SSE_REGPARM_MAX) \ |
| 1875 | : X86_32_SSE_REGPARM_MAX) |
| 1876 | |
| 1877 | #define X86_32_MMX_REGPARM_MAX (TARGET_MMX ? (TARGET_MACHO ? 0 : 3) : 0) |
| 1878 | |
| 1879 | #define MMX_REGPARM_MAX (TARGET_64BIT ? 0 : X86_32_MMX_REGPARM_MAX) |
| 1880 | |
| 1881 | /* Specify the machine mode that this machine uses |
| 1882 | for the index in the tablejump instruction. */ |
| 1883 | #define CASE_VECTOR_MODE \ |
| 1884 | (!TARGET_LP64 || (flag_pic && ix86_cmodel != CM_LARGE_PIC) ? SImode : DImode) |
| 1885 | |
| 1886 | /* Define this as 1 if `char' should by default be signed; else as 0. */ |
| 1887 | #define DEFAULT_SIGNED_CHAR 1 |
| 1888 | |
| 1889 | /* The constant maximum number of bytes that a single instruction can |
| 1890 | move quickly between memory and registers or between two memory |
| 1891 | locations. */ |
| 1892 | #define MAX_MOVE_MAX 64 |
| 1893 | |
| 1894 | /* Max number of bytes we can move from memory to memory in one |
| 1895 | reasonably fast instruction, as opposed to MOVE_MAX_PIECES which |
| 1896 | is the number of bytes at a time which we can move efficiently. |
| 1897 | MOVE_MAX_PIECES defaults to MOVE_MAX. */ |
| 1898 | |
| 1899 | #define MOVE_MAX \ |
| 1900 | ((TARGET_AVX512F \ |
| 1901 | && (ix86_move_max == PVW_AVX512 \ |
| 1902 | || ix86_store_max == PVW_AVX512)) \ |
| 1903 | ? 64 \ |
| 1904 | : ((TARGET_AVX \ |
| 1905 | && (ix86_move_max >= PVW_AVX256 \ |
| 1906 | || ix86_store_max >= PVW_AVX256)) \ |
| 1907 | ? 32 \ |
| 1908 | : ((TARGET_SSE2 \ |
| 1909 | && TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \ |
| 1910 | && TARGET_SSE_UNALIGNED_STORE_OPTIMAL) \ |
| 1911 | ? 16 : UNITS_PER_WORD))) |
| 1912 | |
| 1913 | /* STORE_MAX_PIECES is the number of bytes at a time that we can store |
| 1914 | efficiently. Allow 16/32/64 bytes only if inter-unit move is enabled |
| 1915 | since vec_duplicate enabled by inter-unit move is used to implement |
| 1916 | store_by_pieces of 16/32/64 bytes. */ |
| 1917 | #define STORE_MAX_PIECES \ |
| 1918 | (TARGET_INTER_UNIT_MOVES_TO_VEC \ |
| 1919 | ? ((TARGET_AVX512F && ix86_store_max == PVW_AVX512) \ |
| 1920 | ? 64 \ |
| 1921 | : ((TARGET_AVX \ |
| 1922 | && ix86_store_max >= PVW_AVX256) \ |
| 1923 | ? 32 \ |
| 1924 | : ((TARGET_SSE2 \ |
| 1925 | && TARGET_SSE_UNALIGNED_STORE_OPTIMAL) \ |
| 1926 | ? 16 : UNITS_PER_WORD))) \ |
| 1927 | : UNITS_PER_WORD) |
| 1928 | |
| 1929 | /* If a memory-to-memory move would take MOVE_RATIO or more simple |
| 1930 | move-instruction pairs, we will do a cpymem or libcall instead. |
| 1931 | Increasing the value will always make code faster, but eventually |
| 1932 | incurs high cost in increased code size. |
| 1933 | |
| 1934 | If you don't define this, a reasonable default is used. */ |
| 1935 | |
| 1936 | #define MOVE_RATIO(speed) ((speed) ? ix86_cost->move_ratio : 3) |
| 1937 | |
| 1938 | /* If a clear memory operation would take CLEAR_RATIO or more simple |
| 1939 | move-instruction sequences, we will do a clrmem or libcall instead. */ |
| 1940 | |
| 1941 | #define CLEAR_RATIO(speed) ((speed) ? ix86_cost->clear_ratio : 2) |
| 1942 | |
| 1943 | /* Define if shifts truncate the shift count which implies one can |
| 1944 | omit a sign-extension or zero-extension of a shift count. |
| 1945 | |
| 1946 | On i386, shifts do truncate the count. But bit test instructions |
| 1947 | take the modulo of the bit offset operand. */ |
| 1948 | |
| 1949 | /* #define SHIFT_COUNT_TRUNCATED */ |
| 1950 | |
| 1951 | /* A macro to update M and UNSIGNEDP when an object whose type is |
| 1952 | TYPE and which has the specified mode and signedness is to be |
| 1953 | stored in a register. This macro is only called when TYPE is a |
| 1954 | scalar type. |
| 1955 | |
| 1956 | On i386 it is sometimes useful to promote HImode and QImode |
| 1957 | quantities to SImode. The choice depends on target type. */ |
| 1958 | |
| 1959 | #define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ |
| 1960 | do { \ |
| 1961 | if (((MODE) == HImode && TARGET_PROMOTE_HI_REGS) \ |
| 1962 | || ((MODE) == QImode && TARGET_PROMOTE_QI_REGS)) \ |
| 1963 | (MODE) = SImode; \ |
| 1964 | } while (0) |
| 1965 | |
| 1966 | /* Specify the machine mode that pointers have. |
| 1967 | After generation of rtl, the compiler makes no further distinction |
| 1968 | between pointers and any other objects of this machine mode. */ |
| 1969 | #define Pmode (ix86_pmode == PMODE_DI ? DImode : SImode) |
| 1970 | |
| 1971 | /* Supply a definition of STACK_SAVEAREA_MODE for emit_stack_save. |
| 1972 | NONLOCAL needs space to save both shadow stack and stack pointers. |
| 1973 | |
| 1974 | FIXME: We only need to save and restore stack pointer in ptr_mode. |
| 1975 | But expand_builtin_setjmp_setup and expand_builtin_longjmp use Pmode |
| 1976 | to save and restore stack pointer. See |
| 1977 | https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84150 |
| 1978 | */ |
| 1979 | #define STACK_SAVEAREA_MODE(LEVEL) \ |
| 1980 | ((LEVEL) == SAVE_NONLOCAL ? (TARGET_64BIT ? TImode : DImode) : Pmode) |
| 1981 | |
| 1982 | /* Specify the machine_mode of the size increment |
| 1983 | operand of an 'allocate_stack' named pattern. */ |
| 1984 | #define STACK_SIZE_MODE Pmode |
| 1985 | |
| 1986 | /* A C expression whose value is zero if pointers that need to be extended |
| 1987 | from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and |
| 1988 | greater then zero if they are zero-extended and less then zero if the |
| 1989 | ptr_extend instruction should be used. */ |
| 1990 | |
| 1991 | #define POINTERS_EXTEND_UNSIGNED 1 |
| 1992 | |
| 1993 | /* A function address in a call instruction |
| 1994 | is a byte address (for indexing purposes) |
| 1995 | so give the MEM rtx a byte's mode. */ |
| 1996 | #define FUNCTION_MODE QImode |
| 1997 | |
| 1998 | |
| 1999 | /* A C expression for the cost of a branch instruction. A value of 1 |
| 2000 | is the default; other values are interpreted relative to that. */ |
| 2001 | |
| 2002 | #define BRANCH_COST(speed_p, predictable_p) \ |
| 2003 | (!(speed_p) ? 2 : (predictable_p) ? 0 : ix86_branch_cost) |
| 2004 | |
| 2005 | /* An integer expression for the size in bits of the largest integer machine |
| 2006 | mode that should actually be used. We allow pairs of registers. */ |
| 2007 | #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_64BIT ? TImode : DImode) |
| 2008 | |
| 2009 | /* Define this macro as a C expression which is nonzero if accessing |
| 2010 | less than a word of memory (i.e. a `char' or a `short') is no |
| 2011 | faster than accessing a word of memory, i.e., if such access |
| 2012 | require more than one instruction or if there is no difference in |
| 2013 | cost between byte and (aligned) word loads. |
| 2014 | |
| 2015 | When this macro is not defined, the compiler will access a field by |
| 2016 | finding the smallest containing object; when it is defined, a |
| 2017 | fullword load will be used if alignment permits. Unless bytes |
| 2018 | accesses are faster than word accesses, using word accesses is |
| 2019 | preferable since it may eliminate subsequent memory access if |
| 2020 | subsequent accesses occur to other fields in the same word of the |
| 2021 | structure, but to different bytes. */ |
| 2022 | |
| 2023 | #define SLOW_BYTE_ACCESS 0 |
| 2024 | |
| 2025 | /* Define this macro if it is as good or better to call a constant |
| 2026 | function address than to call an address kept in a register. |
| 2027 | |
| 2028 | Desirable on the 386 because a CALL with a constant address is |
| 2029 | faster than one with a register address. */ |
| 2030 | |
| 2031 | #define NO_FUNCTION_CSE 1 |
| 2032 | |
| 2033 | /* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE, |
| 2034 | return the mode to be used for the comparison. |
| 2035 | |
| 2036 | For floating-point equality comparisons, CCFPEQmode should be used. |
| 2037 | VOIDmode should be used in all other cases. |
| 2038 | |
| 2039 | For integer comparisons against zero, reduce to CCNOmode or CCZmode if |
| 2040 | possible, to allow for more combinations. */ |
| 2041 | |
| 2042 | #define SELECT_CC_MODE(OP, X, Y) ix86_cc_mode ((OP), (X), (Y)) |
| 2043 | |
| 2044 | /* Return nonzero if MODE implies a floating point inequality can be |
| 2045 | reversed. */ |
| 2046 | |
| 2047 | #define REVERSIBLE_CC_MODE(MODE) 1 |
| 2048 | |
| 2049 | /* A C expression whose value is reversed condition code of the CODE for |
| 2050 | comparison done in CC_MODE mode. */ |
| 2051 | #define REVERSE_CONDITION(CODE, MODE) ix86_reverse_condition ((CODE), (MODE)) |
| 2052 | |
| 2053 | |
| 2054 | /* Control the assembler format that we output, to the extent |
| 2055 | this does not vary between assemblers. */ |
| 2056 | |
| 2057 | /* How to refer to registers in assembler output. |
| 2058 | This sequence is indexed by compiler's hard-register-number (see above). */ |
| 2059 | |
| 2060 | /* In order to refer to the first 8 regs as 32-bit regs, prefix an "e". |
| 2061 | For non floating point regs, the following are the HImode names. |
| 2062 | |
| 2063 | For float regs, the stack top is sometimes referred to as "%st(0)" |
| 2064 | instead of just "%st". TARGET_PRINT_OPERAND handles this with the |
| 2065 | "y" code. */ |
| 2066 | |
| 2067 | #define HI_REGISTER_NAMES \ |
| 2068 | {"ax","dx","cx","bx","si","di","bp","sp", \ |
| 2069 | "st","st(1)","st(2)","st(3)","st(4)","st(5)","st(6)","st(7)", \ |
| 2070 | "argp", "flags", "fpsr", "frame", \ |
| 2071 | "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7", \ |
| 2072 | "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", \ |
| 2073 | "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \ |
| 2074 | "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", \ |
| 2075 | "xmm16", "xmm17", "xmm18", "xmm19", \ |
| 2076 | "xmm20", "xmm21", "xmm22", "xmm23", \ |
| 2077 | "xmm24", "xmm25", "xmm26", "xmm27", \ |
| 2078 | "xmm28", "xmm29", "xmm30", "xmm31", \ |
| 2079 | "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7", \ |
| 2080 | "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \ |
| 2081 | "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31" } |
| 2082 | |
| 2083 | #define REGISTER_NAMES HI_REGISTER_NAMES |
| 2084 | |
| 2085 | #define QI_REGISTER_NAMES \ |
| 2086 | {"al", "dl", "cl", "bl", "sil", "dil", "bpl", "spl"} |
| 2087 | |
| 2088 | #define QI_HIGH_REGISTER_NAMES \ |
| 2089 | {"ah", "dh", "ch", "bh"} |
| 2090 | |
| 2091 | /* Table of additional register names to use in user input. */ |
| 2092 | |
| 2093 | #define ADDITIONAL_REGISTER_NAMES \ |
| 2094 | { \ |
| 2095 | { "eax", AX_REG }, { "edx", DX_REG }, { "ecx", CX_REG }, { "ebx", BX_REG }, \ |
| 2096 | { "esi", SI_REG }, { "edi", DI_REG }, { "ebp", BP_REG }, { "esp", SP_REG }, \ |
| 2097 | { "rax", AX_REG }, { "rdx", DX_REG }, { "rcx", CX_REG }, { "rbx", BX_REG }, \ |
| 2098 | { "rsi", SI_REG }, { "rdi", DI_REG }, { "rbp", BP_REG }, { "rsp", SP_REG }, \ |
| 2099 | { "al", AX_REG }, { "dl", DX_REG }, { "cl", CX_REG }, { "bl", BX_REG }, \ |
| 2100 | { "sil", SI_REG }, { "dil", DI_REG }, { "bpl", BP_REG }, { "spl", SP_REG }, \ |
| 2101 | { "ah", AX_REG }, { "dh", DX_REG }, { "ch", CX_REG }, { "bh", BX_REG }, \ |
| 2102 | { "ymm0", XMM0_REG }, { "ymm1", XMM1_REG }, { "ymm2", XMM2_REG }, { "ymm3", XMM3_REG }, \ |
| 2103 | { "ymm4", XMM4_REG }, { "ymm5", XMM5_REG }, { "ymm6", XMM6_REG }, { "ymm7", XMM7_REG }, \ |
| 2104 | { "ymm8", XMM8_REG }, { "ymm9", XMM9_REG }, { "ymm10", XMM10_REG }, { "ymm11", XMM11_REG }, \ |
| 2105 | { "ymm12", XMM12_REG }, { "ymm13", XMM13_REG }, { "ymm14", XMM14_REG }, { "ymm15", XMM15_REG }, \ |
| 2106 | { "ymm16", XMM16_REG }, { "ymm17", XMM17_REG }, { "ymm18", XMM18_REG }, { "ymm19", XMM19_REG }, \ |
| 2107 | { "ymm20", XMM20_REG }, { "ymm21", XMM21_REG }, { "ymm22", XMM22_REG }, { "ymm23", XMM23_REG }, \ |
| 2108 | { "ymm24", XMM24_REG }, { "ymm25", XMM25_REG }, { "ymm26", XMM26_REG }, { "ymm27", XMM27_REG }, \ |
| 2109 | { "ymm28", XMM28_REG }, { "ymm29", XMM29_REG }, { "ymm30", XMM30_REG }, { "ymm31", XMM31_REG }, \ |
| 2110 | { "zmm0", XMM0_REG }, { "zmm1", XMM1_REG }, { "zmm2", XMM2_REG }, { "zmm3", XMM3_REG }, \ |
| 2111 | { "zmm4", XMM4_REG }, { "zmm5", XMM5_REG }, { "zmm6", XMM6_REG }, { "zmm7", XMM7_REG }, \ |
| 2112 | { "zmm8", XMM8_REG }, { "zmm9", XMM9_REG }, { "zmm10", XMM10_REG }, { "zmm11", XMM11_REG }, \ |
| 2113 | { "zmm12", XMM12_REG }, { "zmm13", XMM13_REG }, { "zmm14", XMM14_REG }, { "zmm15", XMM15_REG }, \ |
| 2114 | { "zmm16", XMM16_REG }, { "zmm17", XMM17_REG }, { "zmm18", XMM18_REG }, { "zmm19", XMM19_REG }, \ |
| 2115 | { "zmm20", XMM20_REG }, { "zmm21", XMM21_REG }, { "zmm22", XMM22_REG }, { "zmm23", XMM23_REG }, \ |
| 2116 | { "zmm24", XMM24_REG }, { "zmm25", XMM25_REG }, { "zmm26", XMM26_REG }, { "zmm27", XMM27_REG }, \ |
| 2117 | { "zmm28", XMM28_REG }, { "zmm29", XMM29_REG }, { "zmm30", XMM30_REG }, { "zmm31", XMM31_REG } \ |
| 2118 | } |
| 2119 | |
| 2120 | /* How to renumber registers for gdb. */ |
| 2121 | |
| 2122 | #define DEBUGGER_REGNO(N) \ |
| 2123 | (TARGET_64BIT ? debugger64_register_map[(N)] : debugger_register_map[(N)]) |
| 2124 | |
| 2125 | extern unsigned int const debugger_register_map[FIRST_PSEUDO_REGISTER]; |
| 2126 | extern unsigned int const debugger64_register_map[FIRST_PSEUDO_REGISTER]; |
| 2127 | extern unsigned int const svr4_debugger_register_map[FIRST_PSEUDO_REGISTER]; |
| 2128 | |
| 2129 | /* Before the prologue, RA is at 0(%esp). */ |
| 2130 | #define INCOMING_RETURN_ADDR_RTX \ |
| 2131 | gen_rtx_MEM (Pmode, stack_pointer_rtx) |
| 2132 | |
| 2133 | /* After the prologue, RA is at -4(AP) in the current frame. */ |
| 2134 | #define RETURN_ADDR_RTX(COUNT, FRAME) \ |
| 2135 | ((COUNT) == 0 \ |
| 2136 | ? gen_rtx_MEM (Pmode, plus_constant (Pmode, arg_pointer_rtx, \ |
| 2137 | -UNITS_PER_WORD)) \ |
| 2138 | : gen_rtx_MEM (Pmode, plus_constant (Pmode, (FRAME), UNITS_PER_WORD))) |
| 2139 | |
| 2140 | /* PC is dbx register 8; let's use that column for RA. */ |
| 2141 | #define DWARF_FRAME_RETURN_COLUMN (TARGET_64BIT ? 16 : 8) |
| 2142 | |
| 2143 | /* Before the prologue, there are return address and error code for |
| 2144 | exception handler on the top of the frame. */ |
| 2145 | #define INCOMING_FRAME_SP_OFFSET \ |
| 2146 | (cfun->machine->func_type == TYPE_EXCEPTION \ |
| 2147 | ? 2 * UNITS_PER_WORD : UNITS_PER_WORD) |
| 2148 | |
| 2149 | /* The value of INCOMING_FRAME_SP_OFFSET the assembler assumes in |
| 2150 | .cfi_startproc. */ |
| 2151 | #define DEFAULT_INCOMING_FRAME_SP_OFFSET UNITS_PER_WORD |
| 2152 | |
| 2153 | /* Describe how we implement __builtin_eh_return. */ |
| 2154 | #define EH_RETURN_DATA_REGNO(N) ((N) <= DX_REG ? (N) : INVALID_REGNUM) |
| 2155 | #define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, CX_REG) |
| 2156 | |
| 2157 | |
| 2158 | /* Select a format to encode pointers in exception handling data. CODE |
| 2159 | is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is |
| 2160 | true if the symbol may be affected by dynamic relocations. |
| 2161 | |
| 2162 | ??? All x86 object file formats are capable of representing this. |
| 2163 | After all, the relocation needed is the same as for the call insn. |
| 2164 | Whether or not a particular assembler allows us to enter such, I |
| 2165 | guess we'll have to see. */ |
| 2166 | #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ |
| 2167 | asm_preferred_eh_data_format ((CODE), (GLOBAL)) |
| 2168 | |
| 2169 | /* These are a couple of extensions to the formats accepted |
| 2170 | by asm_fprintf: |
| 2171 | %z prints out opcode suffix for word-mode instruction |
| 2172 | %r prints out word-mode name for reg_names[arg] */ |
| 2173 | #define ASM_FPRINTF_EXTENSIONS(FILE, ARGS, P) \ |
| 2174 | case 'z': \ |
| 2175 | fputc (TARGET_64BIT ? 'q' : 'l', (FILE)); \ |
| 2176 | break; \ |
| 2177 | \ |
| 2178 | case 'r': \ |
| 2179 | { \ |
| 2180 | unsigned int regno = va_arg ((ARGS), int); \ |
| 2181 | if (LEGACY_INT_REGNO_P (regno)) \ |
| 2182 | fputc (TARGET_64BIT ? 'r' : 'e', (FILE)); \ |
| 2183 | fputs (reg_names[regno], (FILE)); \ |
| 2184 | break; \ |
| 2185 | } |
| 2186 | |
| 2187 | /* This is how to output an insn to push a register on the stack. */ |
| 2188 | |
| 2189 | #define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \ |
| 2190 | asm_fprintf ((FILE), "\tpush%z\t%%%r\n", (REGNO)) |
| 2191 | |
| 2192 | /* This is how to output an insn to pop a register from the stack. */ |
| 2193 | |
| 2194 | #define ASM_OUTPUT_REG_POP(FILE, REGNO) \ |
| 2195 | asm_fprintf ((FILE), "\tpop%z\t%%%r\n", (REGNO)) |
| 2196 | |
| 2197 | /* This is how to output an element of a case-vector that is absolute. */ |
| 2198 | |
| 2199 | #define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \ |
| 2200 | ix86_output_addr_vec_elt ((FILE), (VALUE)) |
| 2201 | |
| 2202 | /* This is how to output an element of a case-vector that is relative. */ |
| 2203 | |
| 2204 | #define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \ |
| 2205 | ix86_output_addr_diff_elt ((FILE), (VALUE), (REL)) |
| 2206 | |
| 2207 | /* When we see %v, we will print the 'v' prefix if TARGET_AVX is true. */ |
| 2208 | |
| 2209 | #define ASM_OUTPUT_AVX_PREFIX(STREAM, PTR) \ |
| 2210 | { \ |
| 2211 | if ((PTR)[0] == '%' && (PTR)[1] == 'v') \ |
| 2212 | (PTR) += TARGET_AVX ? 1 : 2; \ |
| 2213 | } |
| 2214 | |
| 2215 | /* A C statement or statements which output an assembler instruction |
| 2216 | opcode to the stdio stream STREAM. The macro-operand PTR is a |
| 2217 | variable of type `char *' which points to the opcode name in |
| 2218 | its "internal" form--the form that is written in the machine |
| 2219 | description. */ |
| 2220 | |
| 2221 | #define ASM_OUTPUT_OPCODE(STREAM, PTR) \ |
| 2222 | ASM_OUTPUT_AVX_PREFIX ((STREAM), (PTR)) |
| 2223 | |
| 2224 | /* A C statement to output to the stdio stream FILE an assembler |
| 2225 | command to pad the location counter to a multiple of 1<<LOG |
| 2226 | bytes if it is within MAX_SKIP bytes. */ |
| 2227 | |
| 2228 | #ifdef HAVE_GAS_MAX_SKIP_P2ALIGN |
| 2229 | # define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \ |
| 2230 | do { \ |
| 2231 | if ((LOG) != 0) { \ |
| 2232 | if ((MAX_SKIP) == 0 || (MAX_SKIP) >= (1 << (LOG)) - 1) \ |
| 2233 | fprintf ((FILE), "\t.p2align %d\n", (LOG)); \ |
| 2234 | else \ |
| 2235 | fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \ |
| 2236 | } \ |
| 2237 | } while (0) |
| 2238 | #endif |
| 2239 | |
| 2240 | /* Write the extra assembler code needed to declare a function |
| 2241 | properly. */ |
| 2242 | |
| 2243 | #undef ASM_OUTPUT_FUNCTION_LABEL |
| 2244 | #define ASM_OUTPUT_FUNCTION_LABEL(FILE, NAME, DECL) \ |
| 2245 | ix86_asm_output_function_label ((FILE), (NAME), (DECL)) |
| 2246 | |
| 2247 | /* A C statement (sans semicolon) to output a reference to SYMBOL_REF SYM. |
| 2248 | If not defined, assemble_name will be used to output the name of the |
| 2249 | symbol. This macro may be used to modify the way a symbol is referenced |
| 2250 | depending on information encoded by TARGET_ENCODE_SECTION_INFO. */ |
| 2251 | |
| 2252 | #ifndef ASM_OUTPUT_SYMBOL_REF |
| 2253 | #define ASM_OUTPUT_SYMBOL_REF(FILE, SYM) \ |
| 2254 | do { \ |
| 2255 | const char *name \ |
| 2256 | = assemble_name_resolve (XSTR (SYM, 0)); \ |
| 2257 | /* In -masm=att wrap identifiers that start with $ \ |
| 2258 | into parens. */ \ |
| 2259 | if (ASSEMBLER_DIALECT == ASM_ATT \ |
| 2260 | && name[0] == '$' \ |
| 2261 | && user_label_prefix[0] == '\0') \ |
| 2262 | { \ |
| 2263 | fputc ('(', (FILE)); \ |
| 2264 | assemble_name_raw ((FILE), name); \ |
| 2265 | fputc (')', (FILE)); \ |
| 2266 | } \ |
| 2267 | else \ |
| 2268 | assemble_name_raw ((FILE), name); \ |
| 2269 | } while (0) |
| 2270 | #endif |
| 2271 | |
| 2272 | /* In Intel syntax, we have to quote user-defined labels that would |
| 2273 | match (unprefixed) registers or operators. */ |
| 2274 | |
| 2275 | #undef ASM_OUTPUT_LABELREF |
| 2276 | #define ASM_OUTPUT_LABELREF(STREAM, NAME) \ |
| 2277 | ix86_asm_output_labelref ((STREAM), user_label_prefix, (NAME)) |
| 2278 | |
| 2279 | /* Under some conditions we need jump tables in the text section, |
| 2280 | because the assembler cannot handle label differences between |
| 2281 | sections. */ |
| 2282 | |
| 2283 | #define JUMP_TABLES_IN_TEXT_SECTION \ |
| 2284 | (flag_pic && !(TARGET_64BIT || HAVE_AS_GOTOFF_IN_DATA)) |
| 2285 | |
| 2286 | /* Switch to init or fini section via SECTION_OP, emit a call to FUNC, |
| 2287 | and switch back. For x86 we do this only to save a few bytes that |
| 2288 | would otherwise be unused in the text section. */ |
| 2289 | #define CRT_MKSTR2(VAL) #VAL |
| 2290 | #define CRT_MKSTR(x) CRT_MKSTR2(x) |
| 2291 | |
| 2292 | #define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ |
| 2293 | asm (SECTION_OP "\n\t" \ |
| 2294 | "call " CRT_MKSTR(__USER_LABEL_PREFIX__) #FUNC "\n" \ |
| 2295 | TEXT_SECTION_ASM_OP); |
| 2296 | |
| 2297 | /* Default threshold for putting data in large sections |
| 2298 | with x86-64 medium memory model */ |
| 2299 | #define DEFAULT_LARGE_SECTION_THRESHOLD 65536 |
| 2300 | |
| 2301 | /* Which processor to tune code generation for. These must be in sync |
| 2302 | with processor_cost_table in i386-options.cc. */ |
| 2303 | |
| 2304 | #define GOT_ALIAS_SET ix86_GOT_alias_set () |
| 2305 | |
| 2306 | enum processor_type |
| 2307 | { |
| 2308 | PROCESSOR_GENERIC = 0, |
| 2309 | PROCESSOR_I386, /* 80386 */ |
| 2310 | PROCESSOR_I486, /* 80486DX, 80486SX, 80486DX[24] */ |
| 2311 | PROCESSOR_PENTIUM, |
| 2312 | PROCESSOR_LAKEMONT, |
| 2313 | PROCESSOR_PENTIUMPRO, |
| 2314 | PROCESSOR_PENTIUM4, |
| 2315 | PROCESSOR_NOCONA, |
| 2316 | PROCESSOR_CORE2, |
| 2317 | PROCESSOR_NEHALEM, |
| 2318 | PROCESSOR_SANDYBRIDGE, |
| 2319 | PROCESSOR_HASWELL, |
| 2320 | PROCESSOR_BONNELL, |
| 2321 | PROCESSOR_SILVERMONT, |
| 2322 | PROCESSOR_GOLDMONT, |
| 2323 | PROCESSOR_GOLDMONT_PLUS, |
| 2324 | PROCESSOR_TREMONT, |
| 2325 | PROCESSOR_SIERRAFOREST, |
| 2326 | PROCESSOR_GRANDRIDGE, |
| 2327 | PROCESSOR_CLEARWATERFOREST, |
| 2328 | PROCESSOR_SKYLAKE, |
| 2329 | PROCESSOR_SKYLAKE_AVX512, |
| 2330 | PROCESSOR_CANNONLAKE, |
| 2331 | PROCESSOR_ICELAKE_CLIENT, |
| 2332 | PROCESSOR_ICELAKE_SERVER, |
| 2333 | PROCESSOR_CASCADELAKE, |
| 2334 | PROCESSOR_TIGERLAKE, |
| 2335 | PROCESSOR_COOPERLAKE, |
| 2336 | PROCESSOR_SAPPHIRERAPIDS, |
| 2337 | PROCESSOR_ALDERLAKE, |
| 2338 | PROCESSOR_ROCKETLAKE, |
| 2339 | PROCESSOR_GRANITERAPIDS, |
| 2340 | PROCESSOR_GRANITERAPIDS_D, |
| 2341 | PROCESSOR_ARROWLAKE, |
| 2342 | PROCESSOR_ARROWLAKE_S, |
| 2343 | PROCESSOR_PANTHERLAKE, |
| 2344 | PROCESSOR_DIAMONDRAPIDS, |
| 2345 | PROCESSOR_INTEL, |
| 2346 | PROCESSOR_LUJIAZUI, |
| 2347 | PROCESSOR_YONGFENG, |
| 2348 | PROCESSOR_SHIJIDADAO, |
| 2349 | PROCESSOR_GEODE, |
| 2350 | PROCESSOR_K6, |
| 2351 | PROCESSOR_ATHLON, |
| 2352 | PROCESSOR_K8, |
| 2353 | PROCESSOR_AMDFAM10, |
| 2354 | PROCESSOR_BDVER1, |
| 2355 | PROCESSOR_BDVER2, |
| 2356 | PROCESSOR_BDVER3, |
| 2357 | PROCESSOR_BDVER4, |
| 2358 | PROCESSOR_BTVER1, |
| 2359 | PROCESSOR_BTVER2, |
| 2360 | PROCESSOR_ZNVER1, |
| 2361 | PROCESSOR_ZNVER2, |
| 2362 | PROCESSOR_ZNVER3, |
| 2363 | PROCESSOR_ZNVER4, |
| 2364 | PROCESSOR_ZNVER5, |
| 2365 | PROCESSOR_max |
| 2366 | }; |
| 2367 | |
| 2368 | #if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS) |
| 2369 | extern const char *const processor_names[]; |
| 2370 | |
| 2371 | #include "wide-int-bitmask.h" |
| 2372 | |
| 2373 | enum pta_flag |
| 2374 | { |
| 2375 | #define DEF_PTA(NAME) _ ## NAME, |
| 2376 | #include "i386-isa.def" |
| 2377 | #undef DEF_PTA |
| 2378 | END_PTA |
| 2379 | }; |
| 2380 | |
| 2381 | /* wide_int_bitmask can handle only 128 flags. */ |
| 2382 | STATIC_ASSERT (END_PTA <= 128); |
| 2383 | |
| 2384 | #define WIDE_INT_BITMASK_FROM_NTH(N) (N < 64 ? wide_int_bitmask (0, 1ULL << N) \ |
| 2385 | : wide_int_bitmask (1ULL << (N - 64), 0)) |
| 2386 | |
| 2387 | #define DEF_PTA(NAME) constexpr wide_int_bitmask PTA_ ## NAME \ |
| 2388 | = WIDE_INT_BITMASK_FROM_NTH ((pta_flag) _ ## NAME); |
| 2389 | #include "i386-isa.def" |
| 2390 | #undef DEF_PTA |
| 2391 | |
| 2392 | constexpr wide_int_bitmask PTA_X86_64_BASELINE = PTA_64BIT | PTA_MMX | PTA_SSE |
| 2393 | | PTA_SSE2 | PTA_NO_SAHF | PTA_FXSR; |
| 2394 | constexpr wide_int_bitmask PTA_X86_64_V2 = (PTA_X86_64_BASELINE |
| 2395 | & (~PTA_NO_SAHF)) |
| 2396 | | PTA_CX16 | PTA_POPCNT | PTA_SSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_SSSE3; |
| 2397 | constexpr wide_int_bitmask PTA_X86_64_V3 = PTA_X86_64_V2 |
| 2398 | | PTA_AVX | PTA_AVX2 | PTA_BMI | PTA_BMI2 | PTA_F16C | PTA_FMA | PTA_LZCNT |
| 2399 | | PTA_MOVBE | PTA_XSAVE; |
| 2400 | constexpr wide_int_bitmask PTA_X86_64_V4 = PTA_X86_64_V3 |
| 2401 | | PTA_AVX512F | PTA_AVX512BW | PTA_AVX512CD | PTA_AVX512DQ | PTA_AVX512VL; |
| 2402 | |
| 2403 | constexpr wide_int_bitmask PTA_CORE2 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 |
| 2404 | | PTA_SSE3 | PTA_SSSE3 | PTA_CX16 | PTA_FXSR; |
| 2405 | constexpr wide_int_bitmask PTA_NEHALEM = PTA_CORE2 | PTA_SSE4_1 | PTA_SSE4_2 |
| 2406 | | PTA_POPCNT; |
| 2407 | constexpr wide_int_bitmask PTA_WESTMERE = PTA_NEHALEM | PTA_PCLMUL; |
| 2408 | constexpr wide_int_bitmask PTA_SANDYBRIDGE = PTA_WESTMERE | PTA_AVX | PTA_XSAVE |
| 2409 | | PTA_XSAVEOPT; |
| 2410 | constexpr wide_int_bitmask PTA_IVYBRIDGE = PTA_SANDYBRIDGE | PTA_FSGSBASE |
| 2411 | | PTA_RDRND | PTA_F16C; |
| 2412 | constexpr wide_int_bitmask PTA_HASWELL = PTA_IVYBRIDGE | PTA_AVX2 | PTA_BMI |
| 2413 | | PTA_BMI2 | PTA_LZCNT | PTA_FMA | PTA_MOVBE | PTA_HLE; |
| 2414 | constexpr wide_int_bitmask PTA_BROADWELL = PTA_HASWELL | PTA_ADX | PTA_RDSEED |
| 2415 | | PTA_PRFCHW; |
| 2416 | constexpr wide_int_bitmask PTA_SKYLAKE = PTA_BROADWELL | PTA_AES |
| 2417 | | PTA_CLFLUSHOPT | PTA_XSAVEC | PTA_XSAVES | PTA_SGX; |
| 2418 | constexpr wide_int_bitmask PTA_SKYLAKE_AVX512 = PTA_SKYLAKE | PTA_AVX512F |
| 2419 | | PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU |
| 2420 | | PTA_CLWB; |
| 2421 | constexpr wide_int_bitmask PTA_CASCADELAKE = PTA_SKYLAKE_AVX512 |
| 2422 | | PTA_AVX512VNNI; |
| 2423 | constexpr wide_int_bitmask PTA_COOPERLAKE = PTA_CASCADELAKE | PTA_AVX512BF16; |
| 2424 | constexpr wide_int_bitmask PTA_CANNONLAKE = PTA_SKYLAKE | PTA_AVX512F |
| 2425 | | PTA_AVX512CD | PTA_AVX512VL | PTA_AVX512BW | PTA_AVX512DQ | PTA_PKU |
| 2426 | | PTA_AVX512VBMI | PTA_AVX512IFMA | PTA_SHA; |
| 2427 | constexpr wide_int_bitmask PTA_ICELAKE_CLIENT = PTA_CANNONLAKE | PTA_AVX512VNNI |
| 2428 | | PTA_GFNI | PTA_VAES | PTA_AVX512VBMI2 | PTA_VPCLMULQDQ | PTA_AVX512BITALG |
| 2429 | | PTA_RDPID | PTA_AVX512VPOPCNTDQ; |
| 2430 | constexpr wide_int_bitmask PTA_ROCKETLAKE = PTA_ICELAKE_CLIENT & ~PTA_SGX; |
| 2431 | constexpr wide_int_bitmask PTA_ICELAKE_SERVER = PTA_ICELAKE_CLIENT |
| 2432 | | PTA_PCONFIG | PTA_WBNOINVD | PTA_CLWB; |
| 2433 | constexpr wide_int_bitmask PTA_TIGERLAKE = PTA_ICELAKE_CLIENT | PTA_MOVDIRI |
| 2434 | | PTA_MOVDIR64B | PTA_CLWB | PTA_AVX512VP2INTERSECT | PTA_KL | PTA_WIDEKL; |
| 2435 | constexpr wide_int_bitmask PTA_SAPPHIRERAPIDS = PTA_ICELAKE_SERVER | PTA_MOVDIRI |
| 2436 | | PTA_MOVDIR64B | PTA_ENQCMD | PTA_CLDEMOTE | PTA_PTWRITE | PTA_WAITPKG |
| 2437 | | PTA_SERIALIZE | PTA_TSXLDTRK | PTA_AMX_TILE | PTA_AMX_INT8 | PTA_AMX_BF16 |
| 2438 | | PTA_UINTR | PTA_AVXVNNI | PTA_AVX512FP16 | PTA_AVX512BF16; |
| 2439 | constexpr wide_int_bitmask PTA_BONNELL = PTA_CORE2 | PTA_MOVBE; |
| 2440 | constexpr wide_int_bitmask PTA_SILVERMONT = PTA_WESTMERE | PTA_MOVBE |
| 2441 | | PTA_RDRND | PTA_PRFCHW; |
| 2442 | constexpr wide_int_bitmask PTA_GOLDMONT = PTA_SILVERMONT | PTA_AES | PTA_SHA |
| 2443 | | PTA_XSAVE | PTA_RDSEED | PTA_XSAVEC | PTA_XSAVES | PTA_CLFLUSHOPT |
| 2444 | | PTA_XSAVEOPT | PTA_FSGSBASE; |
| 2445 | constexpr wide_int_bitmask PTA_GOLDMONT_PLUS = PTA_GOLDMONT | PTA_RDPID |
| 2446 | | PTA_SGX | PTA_PTWRITE; |
| 2447 | constexpr wide_int_bitmask PTA_TREMONT = PTA_GOLDMONT_PLUS | PTA_CLWB |
| 2448 | | PTA_GFNI | PTA_MOVDIRI | PTA_MOVDIR64B | PTA_CLDEMOTE | PTA_WAITPKG; |
| 2449 | constexpr wide_int_bitmask PTA_ALDERLAKE = PTA_TREMONT | PTA_ADX | PTA_AVX |
| 2450 | | PTA_AVX2 | PTA_BMI | PTA_BMI2 | PTA_F16C | PTA_FMA | PTA_LZCNT |
| 2451 | | PTA_PCONFIG | PTA_PKU | PTA_VAES | PTA_VPCLMULQDQ | PTA_SERIALIZE |
| 2452 | | PTA_HRESET | PTA_KL | PTA_WIDEKL | PTA_AVXVNNI; |
| 2453 | constexpr wide_int_bitmask PTA_SIERRAFOREST = PTA_ALDERLAKE | PTA_AVXIFMA |
| 2454 | | PTA_AVXVNNIINT8 | PTA_AVXNECONVERT | PTA_CMPCCXADD | PTA_ENQCMD | PTA_UINTR; |
| 2455 | constexpr wide_int_bitmask PTA_GRANITERAPIDS = PTA_SAPPHIRERAPIDS | PTA_AMX_FP16 |
| 2456 | | PTA_PREFETCHI | PTA_AVX10_1; |
| 2457 | constexpr wide_int_bitmask PTA_GRANITERAPIDS_D = PTA_GRANITERAPIDS |
| 2458 | | PTA_AMX_COMPLEX; |
| 2459 | constexpr wide_int_bitmask PTA_GRANDRIDGE = PTA_SIERRAFOREST; |
| 2460 | constexpr wide_int_bitmask PTA_ARROWLAKE = PTA_ALDERLAKE | PTA_AVXIFMA |
| 2461 | | PTA_AVXVNNIINT8 | PTA_AVXNECONVERT | PTA_CMPCCXADD | PTA_UINTR; |
| 2462 | constexpr wide_int_bitmask PTA_ARROWLAKE_S = PTA_ARROWLAKE | PTA_AVXVNNIINT16 |
| 2463 | | PTA_SHA512 | PTA_SM3 | PTA_SM4; |
| 2464 | constexpr wide_int_bitmask PTA_CLEARWATERFOREST = PTA_SIERRAFOREST |
| 2465 | | PTA_AVXVNNIINT16 | PTA_SHA512 | PTA_SM3 | PTA_SM4 | PTA_USER_MSR |
| 2466 | | PTA_PREFETCHI; |
| 2467 | constexpr wide_int_bitmask PTA_PANTHERLAKE = PTA_ARROWLAKE_S | PTA_PREFETCHI; |
| 2468 | constexpr wide_int_bitmask PTA_DIAMONDRAPIDS = PTA_GRANITERAPIDS_D |
| 2469 | | PTA_AVXIFMA | PTA_AVXNECONVERT | PTA_AVXVNNIINT16 | PTA_AVXVNNIINT8 |
| 2470 | | PTA_CMPCCXADD | PTA_SHA512 | PTA_SM3 | PTA_SM4 | PTA_AVX10_2 |
| 2471 | | PTA_APX_F | PTA_AMX_AVX512 | PTA_AMX_FP8 | PTA_AMX_TF32 | PTA_AMX_TRANSPOSE |
| 2472 | | PTA_MOVRS | PTA_AMX_MOVRS | PTA_USER_MSR; |
| 2473 | |
| 2474 | constexpr wide_int_bitmask PTA_BDVER1 = PTA_64BIT | PTA_MMX | PTA_SSE |
| 2475 | | PTA_SSE2 | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 |
| 2476 | | PTA_SSE4_1 | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_FMA4 |
| 2477 | | PTA_XOP | PTA_LWP | PTA_PRFCHW | PTA_FXSR | PTA_XSAVE; |
| 2478 | constexpr wide_int_bitmask PTA_BDVER2 = PTA_BDVER1 | PTA_BMI | PTA_TBM |
| 2479 | | PTA_F16C | PTA_FMA; |
| 2480 | constexpr wide_int_bitmask PTA_BDVER3 = PTA_BDVER2 | PTA_XSAVEOPT |
| 2481 | | PTA_FSGSBASE; |
| 2482 | constexpr wide_int_bitmask PTA_BDVER4 = PTA_BDVER3 | PTA_AVX2 | PTA_BMI2 |
| 2483 | | PTA_RDRND | PTA_MOVBE | PTA_MWAITX; |
| 2484 | |
| 2485 | constexpr wide_int_bitmask PTA_ZNVER1 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 |
| 2486 | | PTA_SSE3 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1 |
| 2487 | | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_AVX2 | PTA_BMI | PTA_BMI2 |
| 2488 | | PTA_F16C | PTA_FMA | PTA_PRFCHW | PTA_FXSR | PTA_XSAVE | PTA_XSAVEOPT |
| 2489 | | PTA_FSGSBASE | PTA_RDRND | PTA_MOVBE | PTA_MWAITX | PTA_ADX | PTA_RDSEED |
| 2490 | | PTA_CLZERO | PTA_CLFLUSHOPT | PTA_XSAVEC | PTA_XSAVES | PTA_SHA | PTA_LZCNT |
| 2491 | | PTA_POPCNT; |
| 2492 | constexpr wide_int_bitmask PTA_ZNVER2 = PTA_ZNVER1 | PTA_CLWB | PTA_RDPID |
| 2493 | | PTA_WBNOINVD; |
| 2494 | constexpr wide_int_bitmask PTA_ZNVER3 = PTA_ZNVER2 | PTA_VAES | PTA_VPCLMULQDQ |
| 2495 | | PTA_PKU; |
| 2496 | constexpr wide_int_bitmask PTA_ZNVER4 = PTA_ZNVER3 | PTA_AVX512F | PTA_AVX512DQ |
| 2497 | | PTA_AVX512IFMA | PTA_AVX512CD | PTA_AVX512BW | PTA_AVX512VL |
| 2498 | | PTA_AVX512BF16 | PTA_AVX512VBMI | PTA_AVX512VBMI2 | PTA_GFNI |
| 2499 | | PTA_AVX512VNNI | PTA_AVX512BITALG | PTA_AVX512VPOPCNTDQ; |
| 2500 | constexpr wide_int_bitmask PTA_ZNVER5 = PTA_ZNVER4 | PTA_AVXVNNI |
| 2501 | | PTA_MOVDIRI | PTA_MOVDIR64B | PTA_AVX512VP2INTERSECT | PTA_PREFETCHI; |
| 2502 | |
| 2503 | constexpr wide_int_bitmask PTA_BTVER1 = PTA_64BIT | PTA_MMX | PTA_SSE |
| 2504 | | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3 | PTA_SSE4A | PTA_ABM | PTA_CX16 |
| 2505 | | PTA_PRFCHW | PTA_FXSR | PTA_XSAVE; |
| 2506 | constexpr wide_int_bitmask PTA_BTVER2 = PTA_BTVER1 | PTA_SSE4_1 | PTA_SSE4_2 |
| 2507 | | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_BMI | PTA_F16C | PTA_MOVBE |
| 2508 | | PTA_XSAVEOPT; |
| 2509 | |
| 2510 | constexpr wide_int_bitmask PTA_LUJIAZUI = PTA_64BIT | PTA_MMX | PTA_SSE |
| 2511 | | PTA_SSE2 | PTA_SSE3 | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1 |
| 2512 | | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_BMI | PTA_BMI2 | PTA_PRFCHW |
| 2513 | | PTA_FXSR | PTA_XSAVE | PTA_XSAVEOPT | PTA_FSGSBASE | PTA_RDRND | PTA_MOVBE |
| 2514 | | PTA_ADX | PTA_RDSEED | PTA_POPCNT; |
| 2515 | constexpr wide_int_bitmask PTA_YONGFENG = PTA_LUJIAZUI | PTA_AVX | PTA_AVX2 |
| 2516 | | PTA_F16C | PTA_FMA | PTA_SHA | PTA_LZCNT; |
| 2517 | |
| 2518 | #ifndef GENERATOR_FILE |
| 2519 | |
| 2520 | #include "insn-attr-common.h" |
| 2521 | |
| 2522 | #include "common/config/i386/i386-cpuinfo.h" |
| 2523 | |
| 2524 | class pta |
| 2525 | { |
| 2526 | public: |
| 2527 | const char *const name; /* processor name or nickname. */ |
| 2528 | const enum processor_type processor; |
| 2529 | const enum attr_cpu schedule; |
| 2530 | const wide_int_bitmask flags; |
| 2531 | const int model; |
| 2532 | const enum feature_priority priority; |
| 2533 | }; |
| 2534 | |
| 2535 | extern const pta processor_alias_table[]; |
| 2536 | extern unsigned int const pta_size; |
| 2537 | extern unsigned int const num_arch_names; |
| 2538 | #endif |
| 2539 | |
| 2540 | #endif |
| 2541 | |
| 2542 | extern enum processor_type ix86_tune; |
| 2543 | extern enum processor_type ix86_arch; |
| 2544 | |
| 2545 | /* Size of the RED_ZONE area. */ |
| 2546 | #define RED_ZONE_SIZE 128 |
| 2547 | /* Reserved area of the red zone for temporaries. */ |
| 2548 | #define RED_ZONE_RESERVE 8 |
| 2549 | |
| 2550 | extern unsigned int ix86_preferred_stack_boundary; |
| 2551 | extern unsigned int ix86_incoming_stack_boundary; |
| 2552 | |
| 2553 | /* Smallest class containing REGNO. */ |
| 2554 | extern enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER]; |
| 2555 | |
| 2556 | enum ix86_fpcmp_strategy { |
| 2557 | IX86_FPCMP_SAHF, |
| 2558 | IX86_FPCMP_COMI, |
| 2559 | IX86_FPCMP_ARITH |
| 2560 | }; |
| 2561 | |
| 2562 | /* To properly truncate FP values into integers, we need to set i387 control |
| 2563 | word. We can't emit proper mode switching code before reload, as spills |
| 2564 | generated by reload may truncate values incorrectly, but we still can avoid |
| 2565 | redundant computation of new control word by the mode switching pass. |
| 2566 | The fldcw instructions are still emitted redundantly, but this is probably |
| 2567 | not going to be noticeable problem, as most CPUs do have fast path for |
| 2568 | the sequence. |
| 2569 | |
| 2570 | The machinery is to emit simple truncation instructions and split them |
| 2571 | before reload to instructions having USEs of two memory locations that |
| 2572 | are filled by this code to old and new control word. |
| 2573 | |
| 2574 | Post-reload pass may be later used to eliminate the redundant fildcw if |
| 2575 | needed. */ |
| 2576 | |
| 2577 | enum ix86_stack_slot |
| 2578 | { |
| 2579 | SLOT_CW_STORED = 0, |
| 2580 | SLOT_CW_ROUNDEVEN, |
| 2581 | SLOT_CW_TRUNC, |
| 2582 | SLOT_CW_FLOOR, |
| 2583 | SLOT_CW_CEIL, |
| 2584 | SLOT_STV_TEMP, |
| 2585 | SLOT_FLOATxFDI_387, |
| 2586 | MAX_386_STACK_LOCALS |
| 2587 | }; |
| 2588 | |
| 2589 | enum ix86_entity |
| 2590 | { |
| 2591 | X86_DIRFLAG = 0, |
| 2592 | AVX_U128, |
| 2593 | I387_ROUNDEVEN, |
| 2594 | I387_TRUNC, |
| 2595 | I387_FLOOR, |
| 2596 | I387_CEIL, |
| 2597 | MAX_386_ENTITIES |
| 2598 | }; |
| 2599 | |
| 2600 | enum x86_dirflag_state |
| 2601 | { |
| 2602 | X86_DIRFLAG_RESET, |
| 2603 | X86_DIRFLAG_ANY |
| 2604 | }; |
| 2605 | |
| 2606 | enum avx_u128_state |
| 2607 | { |
| 2608 | AVX_U128_CLEAN, |
| 2609 | AVX_U128_DIRTY, |
| 2610 | AVX_U128_ANY |
| 2611 | }; |
| 2612 | |
| 2613 | /* Define this macro if the port needs extra instructions inserted |
| 2614 | for mode switching in an optimizing compilation. */ |
| 2615 | |
| 2616 | #define OPTIMIZE_MODE_SWITCHING(ENTITY) \ |
| 2617 | ix86_optimize_mode_switching[(ENTITY)] |
| 2618 | |
| 2619 | /* If you define `OPTIMIZE_MODE_SWITCHING', you have to define this as |
| 2620 | initializer for an array of integers. Each initializer element N |
| 2621 | refers to an entity that needs mode switching, and specifies the |
| 2622 | number of different modes that might need to be set for this |
| 2623 | entity. The position of the initializer in the initializer - |
| 2624 | starting counting at zero - determines the integer that is used to |
| 2625 | refer to the mode-switched entity in question. */ |
| 2626 | |
| 2627 | #define NUM_MODES_FOR_MODE_SWITCHING \ |
| 2628 | { X86_DIRFLAG_ANY, AVX_U128_ANY, \ |
| 2629 | I387_CW_ANY, I387_CW_ANY, I387_CW_ANY, I387_CW_ANY } |
| 2630 | |
| 2631 | |
| 2632 | /* Avoid renaming of stack registers, as doing so in combination with |
| 2633 | scheduling just increases amount of live registers at time and in |
| 2634 | the turn amount of fxch instructions needed. |
| 2635 | |
| 2636 | ??? Maybe Pentium chips benefits from renaming, someone can try.... |
| 2637 | |
| 2638 | Don't rename evex to non-evex sse registers. */ |
| 2639 | |
| 2640 | #define HARD_REGNO_RENAME_OK(SRC, TARGET) \ |
| 2641 | (!STACK_REGNO_P (SRC) \ |
| 2642 | && EXT_REX_SSE_REGNO_P (SRC) == EXT_REX_SSE_REGNO_P (TARGET)) |
| 2643 | |
| 2644 | |
| 2645 | #define FASTCALL_PREFIX '@' |
| 2646 | |
| 2647 | #ifndef USED_FOR_TARGET |
| 2648 | /* Structure describing stack frame layout. |
| 2649 | Stack grows downward: |
| 2650 | |
| 2651 | [arguments] |
| 2652 | <- ARG_POINTER |
| 2653 | saved pc |
| 2654 | |
| 2655 | saved static chain if ix86_static_chain_on_stack |
| 2656 | |
| 2657 | saved frame pointer if frame_pointer_needed |
| 2658 | <- HARD_FRAME_POINTER |
| 2659 | [saved regs] |
| 2660 | <- reg_save_offset |
| 2661 | [padding0] |
| 2662 | <- stack_realign_offset |
| 2663 | [saved SSE regs] |
| 2664 | OR |
| 2665 | [stub-saved registers for ms x64 --> sysv clobbers |
| 2666 | <- Start of out-of-line, stub-saved/restored regs |
| 2667 | (see libgcc/config/i386/(sav|res)ms64*.S) |
| 2668 | [XMM6-15] |
| 2669 | [RSI] |
| 2670 | [RDI] |
| 2671 | [?RBX] only if RBX is clobbered |
| 2672 | [?RBP] only if RBP and RBX are clobbered |
| 2673 | [?R12] only if R12 and all previous regs are clobbered |
| 2674 | [?R13] only if R13 and all previous regs are clobbered |
| 2675 | [?R14] only if R14 and all previous regs are clobbered |
| 2676 | [?R15] only if R15 and all previous regs are clobbered |
| 2677 | <- end of stub-saved/restored regs |
| 2678 | [padding1] |
| 2679 | ] |
| 2680 | <- sse_reg_save_offset |
| 2681 | [padding2] |
| 2682 | | <- FRAME_POINTER |
| 2683 | [va_arg registers] | |
| 2684 | | |
| 2685 | [frame] | |
| 2686 | | |
| 2687 | [padding2] | = to_allocate |
| 2688 | <- STACK_POINTER |
| 2689 | */ |
| 2690 | struct GTY(()) ix86_frame |
| 2691 | { |
| 2692 | int nsseregs; |
| 2693 | int nregs; |
| 2694 | int va_arg_size; |
| 2695 | int red_zone_size; |
| 2696 | int outgoing_arguments_size; |
| 2697 | |
| 2698 | /* The offsets relative to ARG_POINTER. */ |
| 2699 | HOST_WIDE_INT frame_pointer_offset; |
| 2700 | HOST_WIDE_INT hard_frame_pointer_offset; |
| 2701 | HOST_WIDE_INT stack_pointer_offset; |
| 2702 | HOST_WIDE_INT hfp_save_offset; |
| 2703 | HOST_WIDE_INT reg_save_offset; |
| 2704 | HOST_WIDE_INT stack_realign_allocate; |
| 2705 | HOST_WIDE_INT stack_realign_offset; |
| 2706 | HOST_WIDE_INT sse_reg_save_offset; |
| 2707 | |
| 2708 | /* When save_regs_using_mov is set, emit prologue using |
| 2709 | move instead of push instructions. */ |
| 2710 | bool save_regs_using_mov; |
| 2711 | |
| 2712 | /* Assume without checking that: |
| 2713 | EXPENSIVE_P = expensive_function_p (EXPENSIVE_COUNT). */ |
| 2714 | bool expensive_p; |
| 2715 | int expensive_count; |
| 2716 | }; |
| 2717 | |
| 2718 | /* Machine specific frame tracking during prologue/epilogue generation. All |
| 2719 | values are positive, but since the x86 stack grows downward, are subtratced |
| 2720 | from the CFA to produce a valid address. */ |
| 2721 | |
| 2722 | struct GTY(()) machine_frame_state |
| 2723 | { |
| 2724 | /* This pair tracks the currently active CFA as reg+offset. When reg |
| 2725 | is drap_reg, we don't bother trying to record here the real CFA when |
| 2726 | it might really be a DW_CFA_def_cfa_expression. */ |
| 2727 | rtx cfa_reg; |
| 2728 | HOST_WIDE_INT cfa_offset; |
| 2729 | |
| 2730 | /* The current offset (canonically from the CFA) of ESP and EBP. |
| 2731 | When stack frame re-alignment is active, these may not be relative |
| 2732 | to the CFA. However, in all cases they are relative to the offsets |
| 2733 | of the saved registers stored in ix86_frame. */ |
| 2734 | HOST_WIDE_INT sp_offset; |
| 2735 | HOST_WIDE_INT fp_offset; |
| 2736 | |
| 2737 | /* The size of the red-zone that may be assumed for the purposes of |
| 2738 | eliding register restore notes in the epilogue. This may be zero |
| 2739 | if no red-zone is in effect, or may be reduced from the real |
| 2740 | red-zone value by a maximum runtime stack re-alignment value. */ |
| 2741 | int red_zone_offset; |
| 2742 | |
| 2743 | /* Indicate whether each of ESP, EBP or DRAP currently holds a valid |
| 2744 | value within the frame. If false then the offset above should be |
| 2745 | ignored. Note that DRAP, if valid, *always* points to the CFA and |
| 2746 | thus has an offset of zero. */ |
| 2747 | BOOL_BITFIELD sp_valid : 1; |
| 2748 | BOOL_BITFIELD fp_valid : 1; |
| 2749 | BOOL_BITFIELD drap_valid : 1; |
| 2750 | |
| 2751 | /* Indicate whether the local stack frame has been re-aligned. When |
| 2752 | set, the SP/FP offsets above are relative to the aligned frame |
| 2753 | and not the CFA. */ |
| 2754 | BOOL_BITFIELD realigned : 1; |
| 2755 | |
| 2756 | /* Indicates whether the stack pointer has been re-aligned. When set, |
| 2757 | SP/FP continue to be relative to the CFA, but the stack pointer |
| 2758 | should only be used for offsets > sp_realigned_offset, while |
| 2759 | the frame pointer should be used for offsets <= sp_realigned_fp_last. |
| 2760 | The flags realigned and sp_realigned are mutually exclusive. */ |
| 2761 | BOOL_BITFIELD sp_realigned : 1; |
| 2762 | |
| 2763 | /* When APX_PPX used in prologue, force epilogue to emit |
| 2764 | popp instead of move and leave. */ |
| 2765 | BOOL_BITFIELD apx_ppx_used : 1; |
| 2766 | |
| 2767 | /* If sp_realigned is set, this is the last valid offset from the CFA |
| 2768 | that can be used for access with the frame pointer. */ |
| 2769 | HOST_WIDE_INT sp_realigned_fp_last; |
| 2770 | |
| 2771 | /* If sp_realigned is set, this is the offset from the CFA that the stack |
| 2772 | pointer was realigned, and may or may not be equal to sp_realigned_fp_last. |
| 2773 | Access via the stack pointer is only valid for offsets that are greater than |
| 2774 | this value. */ |
| 2775 | HOST_WIDE_INT sp_realigned_offset; |
| 2776 | }; |
| 2777 | |
| 2778 | /* Private to winnt.cc. */ |
| 2779 | struct seh_frame_state; |
| 2780 | |
| 2781 | enum function_type |
| 2782 | { |
| 2783 | TYPE_UNKNOWN = 0, |
| 2784 | TYPE_NORMAL, |
| 2785 | /* The current function is an interrupt service routine with a |
| 2786 | pointer argument as specified by the "interrupt" attribute. */ |
| 2787 | TYPE_INTERRUPT, |
| 2788 | /* The current function is an interrupt service routine with a |
| 2789 | pointer argument and an integer argument as specified by the |
| 2790 | "interrupt" attribute. */ |
| 2791 | TYPE_EXCEPTION |
| 2792 | }; |
| 2793 | |
| 2794 | enum call_saved_registers_type |
| 2795 | { |
| 2796 | TYPE_DEFAULT_CALL_SAVED_REGISTERS = 0, |
| 2797 | /* The current function is a function specified with the "interrupt" |
| 2798 | or "no_caller_saved_registers" attribute. */ |
| 2799 | TYPE_NO_CALLER_SAVED_REGISTERS, |
| 2800 | /* The current function is a function specified with the |
| 2801 | "no_callee_saved_registers" attribute. */ |
| 2802 | TYPE_NO_CALLEE_SAVED_REGISTERS, |
| 2803 | /* The current function is a function specified with the "noreturn" |
| 2804 | attribute. */ |
| 2805 | TYPE_NO_CALLEE_SAVED_REGISTERS_EXCEPT_BP, |
| 2806 | }; |
| 2807 | |
| 2808 | enum queued_insn_type |
| 2809 | { |
| 2810 | TYPE_NONE = 0, |
| 2811 | TYPE_ENDBR, |
| 2812 | TYPE_PATCHABLE_AREA |
| 2813 | }; |
| 2814 | |
| 2815 | struct GTY(()) machine_function { |
| 2816 | struct stack_local_entry *stack_locals; |
| 2817 | int varargs_gpr_size; |
| 2818 | int varargs_fpr_size; |
| 2819 | int optimize_mode_switching[MAX_386_ENTITIES]; |
| 2820 | |
| 2821 | /* Cached initial frame layout for the current function. */ |
| 2822 | struct ix86_frame frame; |
| 2823 | |
| 2824 | /* For -fsplit-stack support: A stack local which holds a pointer to |
| 2825 | the stack arguments for a function with a variable number of |
| 2826 | arguments. This is set at the start of the function and is used |
| 2827 | to initialize the overflow_arg_area field of the va_list |
| 2828 | structure. */ |
| 2829 | rtx split_stack_varargs_pointer; |
| 2830 | |
| 2831 | /* This value is used for amd64 targets and specifies the current abi |
| 2832 | to be used. MS_ABI means ms abi. Otherwise SYSV_ABI means sysv abi. */ |
| 2833 | ENUM_BITFIELD(calling_abi) call_abi : 8; |
| 2834 | |
| 2835 | /* Nonzero if the function accesses a previous frame. */ |
| 2836 | BOOL_BITFIELD accesses_prev_frame : 1; |
| 2837 | |
| 2838 | /* Set by ix86_compute_frame_layout and used by prologue/epilogue |
| 2839 | expander to determine the style used. */ |
| 2840 | BOOL_BITFIELD use_fast_prologue_epilogue : 1; |
| 2841 | |
| 2842 | /* Nonzero if the current function calls pc thunk and |
| 2843 | must not use the red zone. */ |
| 2844 | BOOL_BITFIELD pc_thunk_call_expanded : 1; |
| 2845 | |
| 2846 | /* If true, the current function needs the default PIC register, not |
| 2847 | an alternate register (on x86) and must not use the red zone (on |
| 2848 | x86_64), even if it's a leaf function. We don't want the |
| 2849 | function to be regarded as non-leaf because TLS calls need not |
| 2850 | affect register allocation. This flag is set when a TLS call |
| 2851 | instruction is expanded within a function, and never reset, even |
| 2852 | if all such instructions are optimized away. Use the |
| 2853 | ix86_current_function_calls_tls_descriptor macro for a better |
| 2854 | approximation. */ |
| 2855 | BOOL_BITFIELD tls_descriptor_call_expanded_p : 1; |
| 2856 | |
| 2857 | /* If true, the current function has a STATIC_CHAIN is placed on the |
| 2858 | stack below the return address. */ |
| 2859 | BOOL_BITFIELD static_chain_on_stack : 1; |
| 2860 | |
| 2861 | /* If true, it is safe to not save/restore DRAP register. */ |
| 2862 | BOOL_BITFIELD no_drap_save_restore : 1; |
| 2863 | |
| 2864 | /* Function type. */ |
| 2865 | ENUM_BITFIELD(function_type) func_type : 2; |
| 2866 | |
| 2867 | /* How to generate indirec branch. */ |
| 2868 | ENUM_BITFIELD(indirect_branch) indirect_branch_type : 3; |
| 2869 | |
| 2870 | /* If true, the current function has local indirect jumps, like |
| 2871 | "indirect_jump" or "tablejump". */ |
| 2872 | BOOL_BITFIELD has_local_indirect_jump : 1; |
| 2873 | |
| 2874 | /* How to generate function return. */ |
| 2875 | ENUM_BITFIELD(indirect_branch) function_return_type : 3; |
| 2876 | |
| 2877 | /* Call saved registers type. */ |
| 2878 | ENUM_BITFIELD(call_saved_registers_type) call_saved_registers : 2; |
| 2879 | |
| 2880 | /* If true, there is register available for argument passing. This |
| 2881 | is used only in ix86_function_ok_for_sibcall by 32-bit to determine |
| 2882 | if there is scratch register available for indirect sibcall. In |
| 2883 | 64-bit, rax, r10 and r11 are scratch registers which aren't used to |
| 2884 | pass arguments and can be used for indirect sibcall. */ |
| 2885 | BOOL_BITFIELD arg_reg_available : 1; |
| 2886 | |
| 2887 | /* If true, we're out-of-lining reg save/restore for regs clobbered |
| 2888 | by 64-bit ms_abi functions calling a sysv_abi function. */ |
| 2889 | BOOL_BITFIELD call_ms2sysv : 1; |
| 2890 | |
| 2891 | /* If true, the incoming 16-byte aligned stack has an offset (of 8) and |
| 2892 | needs padding prior to out-of-line stub save/restore area. */ |
| 2893 | BOOL_BITFIELD call_ms2sysv_pad_in : 1; |
| 2894 | |
| 2895 | /* This is the number of extra registers saved by stub (valid range is |
| 2896 | 0-6). Each additional register is only saved/restored by the stubs |
| 2897 | if all successive ones are. (Will always be zero when using a hard |
| 2898 | frame pointer.) */ |
| 2899 | unsigned int :3; |
| 2900 | |
| 2901 | /* Nonzero if the function places outgoing arguments on stack. */ |
| 2902 | BOOL_BITFIELD outgoing_args_on_stack : 1; |
| 2903 | |
| 2904 | /* If true, ENDBR or patchable area is queued at function entrance. */ |
| 2905 | ENUM_BITFIELD(queued_insn_type) insn_queued_at_entrance : 2; |
| 2906 | |
| 2907 | /* If true, the function label has been emitted. */ |
| 2908 | BOOL_BITFIELD function_label_emitted : 1; |
| 2909 | |
| 2910 | /* True if the function needs a stack frame. */ |
| 2911 | BOOL_BITFIELD stack_frame_required : 1; |
| 2912 | |
| 2913 | /* True if we should act silently, rather than raise an error for |
| 2914 | invalid calls. */ |
| 2915 | BOOL_BITFIELD silent_p : 1; |
| 2916 | |
| 2917 | /* True if red zone is used. */ |
| 2918 | BOOL_BITFIELD red_zone_used : 1; |
| 2919 | |
| 2920 | /* True if inline asm with redzone clobber has been seen. */ |
| 2921 | BOOL_BITFIELD asm_redzone_clobber_seen : 1; |
| 2922 | |
| 2923 | /* The largest alignment, in bytes, of stack slot actually used. */ |
| 2924 | unsigned int max_used_stack_alignment; |
| 2925 | |
| 2926 | /* During prologue/epilogue generation, the current frame state. |
| 2927 | Otherwise, the frame state at the end of the prologue. */ |
| 2928 | struct machine_frame_state fs; |
| 2929 | |
| 2930 | /* During SEH output, this is non-null. */ |
| 2931 | struct seh_frame_state * GTY((skip("" ))) seh; |
| 2932 | }; |
| 2933 | |
| 2934 | extern GTY(()) tree sysv_va_list_type_node; |
| 2935 | extern GTY(()) tree ms_va_list_type_node; |
| 2936 | #endif |
| 2937 | |
| 2938 | #define ix86_stack_locals (cfun->machine->stack_locals) |
| 2939 | #define ix86_varargs_gpr_size (cfun->machine->varargs_gpr_size) |
| 2940 | #define ix86_varargs_fpr_size (cfun->machine->varargs_fpr_size) |
| 2941 | #define ix86_optimize_mode_switching (cfun->machine->optimize_mode_switching) |
| 2942 | #define ix86_pc_thunk_call_expanded (cfun->machine->pc_thunk_call_expanded) |
| 2943 | #define ix86_tls_descriptor_calls_expanded_in_cfun \ |
| 2944 | (cfun->machine->tls_descriptor_call_expanded_p) |
| 2945 | /* Since tls_descriptor_call_expanded is not cleared, even if all TLS |
| 2946 | calls are optimized away, we try to detect cases in which it was |
| 2947 | optimized away. Since such instructions (use (reg REG_SP)), we can |
| 2948 | verify whether there's any such instruction live by testing that |
| 2949 | REG_SP is live. */ |
| 2950 | #define ix86_current_function_calls_tls_descriptor \ |
| 2951 | (ix86_tls_descriptor_calls_expanded_in_cfun && df_regs_ever_live_p (SP_REG)) |
| 2952 | #define ix86_static_chain_on_stack (cfun->machine->static_chain_on_stack) |
| 2953 | #define ix86_red_zone_used (cfun->machine->red_zone_used) |
| 2954 | |
| 2955 | /* Control behavior of x86_file_start. */ |
| 2956 | #define X86_FILE_START_VERSION_DIRECTIVE false |
| 2957 | #define X86_FILE_START_FLTUSED false |
| 2958 | |
| 2959 | /* Flag to mark data that is in the large address area. */ |
| 2960 | #define SYMBOL_FLAG_FAR_ADDR (SYMBOL_FLAG_MACH_DEP << 0) |
| 2961 | #define SYMBOL_REF_FAR_ADDR_P(X) \ |
| 2962 | ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_FAR_ADDR) != 0) |
| 2963 | |
| 2964 | /* Flags to mark dllimport/dllexport. Used by PE ports, but handy to |
| 2965 | have defined always, to avoid ifdefing. */ |
| 2966 | #define SYMBOL_FLAG_DLLIMPORT (SYMBOL_FLAG_MACH_DEP << 1) |
| 2967 | #define SYMBOL_REF_DLLIMPORT_P(X) \ |
| 2968 | ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLIMPORT) != 0) |
| 2969 | |
| 2970 | #define SYMBOL_FLAG_DLLEXPORT (SYMBOL_FLAG_MACH_DEP << 2) |
| 2971 | #define SYMBOL_REF_DLLEXPORT_P(X) \ |
| 2972 | ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLEXPORT) != 0) |
| 2973 | |
| 2974 | #define SYMBOL_FLAG_STUBVAR (SYMBOL_FLAG_MACH_DEP << 4) |
| 2975 | #define SYMBOL_REF_STUBVAR_P(X) \ |
| 2976 | ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_STUBVAR) != 0) |
| 2977 | |
| 2978 | extern void debug_ready_dispatch (void); |
| 2979 | extern void debug_dispatch_window (int); |
| 2980 | |
| 2981 | /* The value at zero is only defined for the BMI instructions |
| 2982 | LZCNT and TZCNT, not the BSR/BSF insns in the original isa. */ |
| 2983 | #define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ |
| 2984 | ((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_BMI ? 2 : 0) |
| 2985 | #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ |
| 2986 | ((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_LZCNT ? 2 : 0) |
| 2987 | |
| 2988 | |
| 2989 | /* Flags returned by ix86_get_callcvt (). */ |
| 2990 | #define IX86_CALLCVT_CDECL 0x1 |
| 2991 | #define IX86_CALLCVT_STDCALL 0x2 |
| 2992 | #define IX86_CALLCVT_FASTCALL 0x4 |
| 2993 | #define IX86_CALLCVT_THISCALL 0x8 |
| 2994 | #define IX86_CALLCVT_REGPARM 0x10 |
| 2995 | #define IX86_CALLCVT_SSEREGPARM 0x20 |
| 2996 | |
| 2997 | #define IX86_BASE_CALLCVT(FLAGS) \ |
| 2998 | ((FLAGS) & (IX86_CALLCVT_CDECL | IX86_CALLCVT_STDCALL \ |
| 2999 | | IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL)) |
| 3000 | |
| 3001 | #define RECIP_MASK_NONE 0x00 |
| 3002 | #define RECIP_MASK_DIV 0x01 |
| 3003 | #define RECIP_MASK_SQRT 0x02 |
| 3004 | #define RECIP_MASK_VEC_DIV 0x04 |
| 3005 | #define RECIP_MASK_VEC_SQRT 0x08 |
| 3006 | #define RECIP_MASK_ALL (RECIP_MASK_DIV | RECIP_MASK_SQRT \ |
| 3007 | | RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT) |
| 3008 | #define RECIP_MASK_DEFAULT (RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT) |
| 3009 | |
| 3010 | #define TARGET_RECIP_DIV ((recip_mask & RECIP_MASK_DIV) != 0) |
| 3011 | #define TARGET_RECIP_SQRT ((recip_mask & RECIP_MASK_SQRT) != 0) |
| 3012 | #define TARGET_RECIP_VEC_DIV ((recip_mask & RECIP_MASK_VEC_DIV) != 0) |
| 3013 | #define TARGET_RECIP_VEC_SQRT ((recip_mask & RECIP_MASK_VEC_SQRT) != 0) |
| 3014 | |
| 3015 | /* Use 128-bit AVX instructions in the auto-vectorizer. */ |
| 3016 | #define TARGET_PREFER_AVX128 (prefer_vector_width_type == PVW_AVX128) |
| 3017 | /* Use 256-bit AVX instructions in the auto-vectorizer. */ |
| 3018 | #define TARGET_PREFER_AVX256 (TARGET_PREFER_AVX128 \ |
| 3019 | || prefer_vector_width_type == PVW_AVX256) |
| 3020 | |
| 3021 | #define TARGET_INDIRECT_BRANCH_REGISTER \ |
| 3022 | (ix86_indirect_branch_register \ |
| 3023 | || cfun->machine->indirect_branch_type != indirect_branch_keep) |
| 3024 | |
| 3025 | #define IX86_HLE_ACQUIRE (1 << 16) |
| 3026 | #define IX86_HLE_RELEASE (1 << 17) |
| 3027 | |
| 3028 | /* For switching between functions with different target attributes. */ |
| 3029 | #define SWITCHABLE_TARGET 1 |
| 3030 | |
| 3031 | #define TARGET_SUPPORTS_WIDE_INT 1 |
| 3032 | |
| 3033 | #if !defined(GENERATOR_FILE) && !defined(IN_LIBGCC2) |
| 3034 | extern enum attr_cpu ix86_schedule; |
| 3035 | |
| 3036 | #define NUM_X86_64_MS_CLOBBERED_REGS 12 |
| 3037 | #endif |
| 3038 | |
| 3039 | /* __builtin_eh_return can't handle stack realignment, so disable MMX/SSE |
| 3040 | in 32-bit libgcc functions that call it. */ |
| 3041 | #ifndef __x86_64__ |
| 3042 | #define LIBGCC2_UNWIND_ATTRIBUTE __attribute__((target ("no-mmx,no-sse"))) |
| 3043 | #endif |
| 3044 | |
| 3045 | /* |
| 3046 | Local variables: |
| 3047 | version-control: t |
| 3048 | End: |
| 3049 | */ |
| 3050 | |