1 | /* Default target hook functions. |
2 | Copyright (C) 2003-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | /* The migration of target macros to target hooks works as follows: |
21 | |
22 | 1. Create a target hook that uses the existing target macros to |
23 | implement the same functionality. |
24 | |
25 | 2. Convert all the MI files to use the hook instead of the macro. |
26 | |
27 | 3. Repeat for a majority of the remaining target macros. This will |
28 | take some time. |
29 | |
30 | 4. Tell target maintainers to start migrating. |
31 | |
32 | 5. Eventually convert the backends to override the hook instead of |
33 | defining the macros. This will take some time too. |
34 | |
35 | 6. TBD when, poison the macros. Unmigrated targets will break at |
36 | this point. |
37 | |
38 | Note that we expect steps 1-3 to be done by the people that |
39 | understand what the MI does with each macro, and step 5 to be done |
40 | by the target maintainers for their respective targets. |
41 | |
42 | Note that steps 1 and 2 don't have to be done together, but no |
43 | target can override the new hook until step 2 is complete for it. |
44 | |
45 | Once the macros are poisoned, we will revert to the old migration |
46 | rules - migrate the macro, callers, and targets all at once. This |
47 | comment can thus be removed at that point. */ |
48 | |
49 | #include "config.h" |
50 | #include "system.h" |
51 | #include "coretypes.h" |
52 | #include "target.h" |
53 | #include "function.h" |
54 | #include "rtl.h" |
55 | #include "tree.h" |
56 | #include "tree-ssa-alias.h" |
57 | #include "gimple-expr.h" |
58 | #include "memmodel.h" |
59 | #include "backend.h" |
60 | #include "emit-rtl.h" |
61 | #include "df.h" |
62 | #include "tm_p.h" |
63 | #include "stringpool.h" |
64 | #include "tree-vrp.h" |
65 | #include "tree-ssanames.h" |
66 | #include "profile-count.h" |
67 | #include "optabs.h" |
68 | #include "regs.h" |
69 | #include "recog.h" |
70 | #include "diagnostic-core.h" |
71 | #include "fold-const.h" |
72 | #include "stor-layout.h" |
73 | #include "varasm.h" |
74 | #include "flags.h" |
75 | #include "explow.h" |
76 | #include "expmed.h" |
77 | #include "calls.h" |
78 | #include "expr.h" |
79 | #include "output.h" |
80 | #include "common/common-target.h" |
81 | #include "reload.h" |
82 | #include "intl.h" |
83 | #include "opts.h" |
84 | #include "gimplify.h" |
85 | #include "predict.h" |
86 | #include "real.h" |
87 | #include "langhooks.h" |
88 | #include "sbitmap.h" |
89 | #include "function-abi.h" |
90 | #include "attribs.h" |
91 | #include "asan.h" |
92 | #include "emit-rtl.h" |
93 | #include "gimple.h" |
94 | #include "cfgloop.h" |
95 | #include "tree-vectorizer.h" |
96 | #include "options.h" |
97 | #include "case-cfn-macros.h" |
98 | |
99 | bool |
100 | default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED, |
101 | rtx addr ATTRIBUTE_UNUSED, |
102 | bool strict ATTRIBUTE_UNUSED, |
103 | code_helper ATTRIBUTE_UNUSED) |
104 | { |
105 | #ifdef GO_IF_LEGITIMATE_ADDRESS |
106 | /* Defer to the old implementation using a goto. */ |
107 | if (strict) |
108 | return strict_memory_address_p (mode, addr); |
109 | else |
110 | return memory_address_p (mode, addr); |
111 | #else |
112 | gcc_unreachable (); |
113 | #endif |
114 | } |
115 | |
116 | void |
117 | default_external_libcall (rtx fun ATTRIBUTE_UNUSED) |
118 | { |
119 | #ifdef ASM_OUTPUT_EXTERNAL_LIBCALL |
120 | ASM_OUTPUT_EXTERNAL_LIBCALL (asm_out_file, fun); |
121 | #endif |
122 | } |
123 | |
124 | int |
125 | default_unspec_may_trap_p (const_rtx x, unsigned flags) |
126 | { |
127 | int i; |
128 | |
129 | /* Any floating arithmetic may trap. */ |
130 | if ((SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)) |
131 | return 1; |
132 | |
133 | for (i = 0; i < XVECLEN (x, 0); ++i) |
134 | { |
135 | if (may_trap_p_1 (XVECEXP (x, 0, i), flags)) |
136 | return 1; |
137 | } |
138 | |
139 | return 0; |
140 | } |
141 | |
142 | machine_mode |
143 | default_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, |
144 | machine_mode mode, |
145 | int *punsignedp ATTRIBUTE_UNUSED, |
146 | const_tree funtype ATTRIBUTE_UNUSED, |
147 | int for_return ATTRIBUTE_UNUSED) |
148 | { |
149 | if (type != NULL_TREE && for_return == 2) |
150 | return promote_mode (type, mode, punsignedp); |
151 | return mode; |
152 | } |
153 | |
154 | machine_mode |
155 | default_promote_function_mode_always_promote (const_tree type, |
156 | machine_mode mode, |
157 | int *punsignedp, |
158 | const_tree funtype ATTRIBUTE_UNUSED, |
159 | int for_return ATTRIBUTE_UNUSED) |
160 | { |
161 | return promote_mode (type, mode, punsignedp); |
162 | } |
163 | |
164 | machine_mode |
165 | default_cc_modes_compatible (machine_mode m1, machine_mode m2) |
166 | { |
167 | if (m1 == m2) |
168 | return m1; |
169 | return VOIDmode; |
170 | } |
171 | |
172 | bool |
173 | default_return_in_memory (const_tree type, |
174 | const_tree fntype ATTRIBUTE_UNUSED) |
175 | { |
176 | return (TYPE_MODE (type) == BLKmode); |
177 | } |
178 | |
179 | rtx |
180 | default_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED, |
181 | machine_mode mode ATTRIBUTE_UNUSED) |
182 | { |
183 | return x; |
184 | } |
185 | |
186 | bool |
187 | default_legitimize_address_displacement (rtx *, rtx *, poly_int64, |
188 | machine_mode) |
189 | { |
190 | return false; |
191 | } |
192 | |
193 | bool |
194 | default_const_not_ok_for_debug_p (rtx x) |
195 | { |
196 | if (GET_CODE (x) == UNSPEC) |
197 | return true; |
198 | return false; |
199 | } |
200 | |
201 | rtx |
202 | default_expand_builtin_saveregs (void) |
203 | { |
204 | error ("%<__builtin_saveregs%> not supported by this target" ); |
205 | return const0_rtx; |
206 | } |
207 | |
208 | void |
209 | default_setup_incoming_varargs (cumulative_args_t, |
210 | const function_arg_info &, int *, int) |
211 | { |
212 | } |
213 | |
214 | /* The default implementation of TARGET_BUILTIN_SETJMP_FRAME_VALUE. */ |
215 | |
216 | rtx |
217 | default_builtin_setjmp_frame_value (void) |
218 | { |
219 | return virtual_stack_vars_rtx; |
220 | } |
221 | |
222 | /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns false. */ |
223 | |
224 | bool |
225 | hook_bool_CUMULATIVE_ARGS_false (cumulative_args_t ca ATTRIBUTE_UNUSED) |
226 | { |
227 | return false; |
228 | } |
229 | |
230 | bool |
231 | default_pretend_outgoing_varargs_named (cumulative_args_t ca ATTRIBUTE_UNUSED) |
232 | { |
233 | return (targetm.calls.setup_incoming_varargs |
234 | != default_setup_incoming_varargs); |
235 | } |
236 | |
237 | scalar_int_mode |
238 | default_eh_return_filter_mode (void) |
239 | { |
240 | return targetm.unwind_word_mode (); |
241 | } |
242 | |
243 | scalar_int_mode |
244 | default_libgcc_cmp_return_mode (void) |
245 | { |
246 | return word_mode; |
247 | } |
248 | |
249 | scalar_int_mode |
250 | default_libgcc_shift_count_mode (void) |
251 | { |
252 | return word_mode; |
253 | } |
254 | |
255 | scalar_int_mode |
256 | default_unwind_word_mode (void) |
257 | { |
258 | return word_mode; |
259 | } |
260 | |
261 | /* The default implementation of TARGET_SHIFT_TRUNCATION_MASK. */ |
262 | |
263 | unsigned HOST_WIDE_INT |
264 | default_shift_truncation_mask (machine_mode mode) |
265 | { |
266 | return SHIFT_COUNT_TRUNCATED ? GET_MODE_UNIT_BITSIZE (mode) - 1 : 0; |
267 | } |
268 | |
269 | /* The default implementation of TARGET_MIN_DIVISIONS_FOR_RECIP_MUL. */ |
270 | |
271 | unsigned int |
272 | default_min_divisions_for_recip_mul (machine_mode mode ATTRIBUTE_UNUSED) |
273 | { |
274 | return have_insn_for (DIV, mode) ? 3 : 2; |
275 | } |
276 | |
277 | /* The default implementation of TARGET_MODE_REP_EXTENDED. */ |
278 | |
279 | int |
280 | default_mode_rep_extended (scalar_int_mode, scalar_int_mode) |
281 | { |
282 | return UNKNOWN; |
283 | } |
284 | |
285 | /* Generic hook that takes a CUMULATIVE_ARGS pointer and returns true. */ |
286 | |
287 | bool |
288 | hook_bool_CUMULATIVE_ARGS_true (cumulative_args_t a ATTRIBUTE_UNUSED) |
289 | { |
290 | return true; |
291 | } |
292 | |
293 | /* Return machine mode for non-standard suffix |
294 | or VOIDmode if non-standard suffixes are unsupported. */ |
295 | machine_mode |
296 | default_mode_for_suffix (char suffix ATTRIBUTE_UNUSED) |
297 | { |
298 | return VOIDmode; |
299 | } |
300 | |
301 | /* The generic C++ ABI specifies this is a 64-bit value. */ |
302 | tree |
303 | default_cxx_guard_type (void) |
304 | { |
305 | return long_long_integer_type_node; |
306 | } |
307 | |
308 | /* Returns the size of the cookie to use when allocating an array |
309 | whose elements have the indicated TYPE. Assumes that it is already |
310 | known that a cookie is needed. */ |
311 | |
312 | tree |
313 | default_cxx_get_cookie_size (tree type) |
314 | { |
315 | tree cookie_size; |
316 | |
317 | /* We need to allocate an additional max (sizeof (size_t), alignof |
318 | (true_type)) bytes. */ |
319 | tree sizetype_size; |
320 | tree type_align; |
321 | |
322 | sizetype_size = size_in_bytes (sizetype); |
323 | type_align = size_int (TYPE_ALIGN_UNIT (type)); |
324 | if (tree_int_cst_lt (t1: type_align, t2: sizetype_size)) |
325 | cookie_size = sizetype_size; |
326 | else |
327 | cookie_size = type_align; |
328 | |
329 | return cookie_size; |
330 | } |
331 | |
332 | /* Return true if a parameter must be passed by reference. This version |
333 | of the TARGET_PASS_BY_REFERENCE hook uses just MUST_PASS_IN_STACK. */ |
334 | |
335 | bool |
336 | hook_pass_by_reference_must_pass_in_stack (cumulative_args_t, |
337 | const function_arg_info &arg) |
338 | { |
339 | return targetm.calls.must_pass_in_stack (arg); |
340 | } |
341 | |
342 | /* Return true if a parameter follows callee copies conventions. This |
343 | version of the hook is true for all named arguments. */ |
344 | |
345 | bool |
346 | hook_callee_copies_named (cumulative_args_t, const function_arg_info &arg) |
347 | { |
348 | return arg.named; |
349 | } |
350 | |
351 | /* Emit to STREAM the assembler syntax for insn operand X. */ |
352 | |
353 | void |
354 | default_print_operand (FILE *stream ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED, |
355 | int code ATTRIBUTE_UNUSED) |
356 | { |
357 | #ifdef PRINT_OPERAND |
358 | PRINT_OPERAND (stream, x, code); |
359 | #else |
360 | gcc_unreachable (); |
361 | #endif |
362 | } |
363 | |
364 | /* Emit to STREAM the assembler syntax for an insn operand whose memory |
365 | address is X. */ |
366 | |
367 | void |
368 | default_print_operand_address (FILE *stream ATTRIBUTE_UNUSED, |
369 | machine_mode /*mode*/, |
370 | rtx x ATTRIBUTE_UNUSED) |
371 | { |
372 | #ifdef PRINT_OPERAND_ADDRESS |
373 | PRINT_OPERAND_ADDRESS (stream, x); |
374 | #else |
375 | gcc_unreachable (); |
376 | #endif |
377 | } |
378 | |
379 | /* Return true if CODE is a valid punctuation character for the |
380 | `print_operand' hook. */ |
381 | |
382 | bool |
383 | default_print_operand_punct_valid_p (unsigned char code ATTRIBUTE_UNUSED) |
384 | { |
385 | #ifdef PRINT_OPERAND_PUNCT_VALID_P |
386 | return PRINT_OPERAND_PUNCT_VALID_P (code); |
387 | #else |
388 | return false; |
389 | #endif |
390 | } |
391 | |
392 | /* The default implementation of TARGET_MANGLE_ASSEMBLER_NAME. */ |
393 | tree |
394 | default_mangle_assembler_name (const char *name ATTRIBUTE_UNUSED) |
395 | { |
396 | const char *skipped = name + (*name == '*' ? 1 : 0); |
397 | const char *stripped = targetm.strip_name_encoding (skipped); |
398 | if (*name != '*' && user_label_prefix[0]) |
399 | stripped = ACONCAT ((user_label_prefix, stripped, NULL)); |
400 | return get_identifier (stripped); |
401 | } |
402 | |
403 | /* The default implementation of TARGET_TRANSLATE_MODE_ATTRIBUTE. */ |
404 | |
405 | machine_mode |
406 | default_translate_mode_attribute (machine_mode mode) |
407 | { |
408 | return mode; |
409 | } |
410 | |
411 | /* True if MODE is valid for the target. By "valid", we mean able to |
412 | be manipulated in non-trivial ways. In particular, this means all |
413 | the arithmetic is supported. |
414 | |
415 | By default we guess this means that any C type is supported. If |
416 | we can't map the mode back to a type that would be available in C, |
417 | then reject it. Special case, here, is the double-word arithmetic |
418 | supported by optabs.cc. */ |
419 | |
420 | bool |
421 | default_scalar_mode_supported_p (scalar_mode mode) |
422 | { |
423 | int precision = GET_MODE_PRECISION (mode); |
424 | |
425 | switch (GET_MODE_CLASS (mode)) |
426 | { |
427 | case MODE_PARTIAL_INT: |
428 | case MODE_INT: |
429 | if (precision == CHAR_TYPE_SIZE) |
430 | return true; |
431 | if (precision == SHORT_TYPE_SIZE) |
432 | return true; |
433 | if (precision == INT_TYPE_SIZE) |
434 | return true; |
435 | if (precision == LONG_TYPE_SIZE) |
436 | return true; |
437 | if (precision == LONG_LONG_TYPE_SIZE) |
438 | return true; |
439 | if (precision == 2 * BITS_PER_WORD) |
440 | return true; |
441 | return false; |
442 | |
443 | case MODE_FLOAT: |
444 | if (precision == FLOAT_TYPE_SIZE) |
445 | return true; |
446 | if (precision == DOUBLE_TYPE_SIZE) |
447 | return true; |
448 | if (precision == LONG_DOUBLE_TYPE_SIZE) |
449 | return true; |
450 | return false; |
451 | |
452 | case MODE_DECIMAL_FLOAT: |
453 | case MODE_FRACT: |
454 | case MODE_UFRACT: |
455 | case MODE_ACCUM: |
456 | case MODE_UACCUM: |
457 | return false; |
458 | |
459 | default: |
460 | gcc_unreachable (); |
461 | } |
462 | } |
463 | |
464 | /* Return true if libgcc supports floating-point mode MODE (known to |
465 | be supported as a scalar mode). */ |
466 | |
467 | bool |
468 | default_libgcc_floating_mode_supported_p (scalar_float_mode mode) |
469 | { |
470 | switch (mode) |
471 | { |
472 | #ifdef HAVE_SFmode |
473 | case E_SFmode: |
474 | #endif |
475 | #ifdef HAVE_DFmode |
476 | case E_DFmode: |
477 | #endif |
478 | #ifdef HAVE_XFmode |
479 | case E_XFmode: |
480 | #endif |
481 | #ifdef HAVE_TFmode |
482 | case E_TFmode: |
483 | #endif |
484 | return true; |
485 | |
486 | default: |
487 | return false; |
488 | } |
489 | } |
490 | |
491 | /* Return the machine mode to use for the type _FloatN, if EXTENDED is |
492 | false, or _FloatNx, if EXTENDED is true, or VOIDmode if not |
493 | supported. */ |
494 | opt_scalar_float_mode |
495 | default_floatn_mode (int n, bool extended) |
496 | { |
497 | if (extended) |
498 | { |
499 | opt_scalar_float_mode cand1, cand2; |
500 | scalar_float_mode mode; |
501 | switch (n) |
502 | { |
503 | case 32: |
504 | #ifdef HAVE_DFmode |
505 | cand1 = DFmode; |
506 | #endif |
507 | break; |
508 | |
509 | case 64: |
510 | #ifdef HAVE_XFmode |
511 | cand1 = XFmode; |
512 | #endif |
513 | #ifdef HAVE_TFmode |
514 | cand2 = TFmode; |
515 | #endif |
516 | break; |
517 | |
518 | case 128: |
519 | break; |
520 | |
521 | default: |
522 | /* Those are the only valid _FloatNx types. */ |
523 | gcc_unreachable (); |
524 | } |
525 | if (cand1.exists (mode: &mode) |
526 | && REAL_MODE_FORMAT (mode)->ieee_bits > n |
527 | && targetm.scalar_mode_supported_p (mode) |
528 | && targetm.libgcc_floating_mode_supported_p (mode)) |
529 | return cand1; |
530 | if (cand2.exists (mode: &mode) |
531 | && REAL_MODE_FORMAT (mode)->ieee_bits > n |
532 | && targetm.scalar_mode_supported_p (mode) |
533 | && targetm.libgcc_floating_mode_supported_p (mode)) |
534 | return cand2; |
535 | } |
536 | else |
537 | { |
538 | opt_scalar_float_mode cand; |
539 | scalar_float_mode mode; |
540 | switch (n) |
541 | { |
542 | case 16: |
543 | /* Always enable _Float16 if we have basic support for the mode. |
544 | Targets can control the range and precision of operations on |
545 | the _Float16 type using TARGET_C_EXCESS_PRECISION. */ |
546 | #ifdef HAVE_HFmode |
547 | cand = HFmode; |
548 | #endif |
549 | break; |
550 | |
551 | case 32: |
552 | #ifdef HAVE_SFmode |
553 | cand = SFmode; |
554 | #endif |
555 | break; |
556 | |
557 | case 64: |
558 | #ifdef HAVE_DFmode |
559 | cand = DFmode; |
560 | #endif |
561 | break; |
562 | |
563 | case 128: |
564 | #ifdef HAVE_TFmode |
565 | cand = TFmode; |
566 | #endif |
567 | break; |
568 | |
569 | default: |
570 | break; |
571 | } |
572 | if (cand.exists (mode: &mode) |
573 | && REAL_MODE_FORMAT (mode)->ieee_bits == n |
574 | && targetm.scalar_mode_supported_p (mode) |
575 | && targetm.libgcc_floating_mode_supported_p (mode)) |
576 | return cand; |
577 | } |
578 | return opt_scalar_float_mode (); |
579 | } |
580 | |
581 | /* Define this to return true if the _Floatn and _Floatnx built-in functions |
582 | should implicitly enable the built-in function without the __builtin_ prefix |
583 | in addition to the normal built-in function with the __builtin_ prefix. The |
584 | default is to only enable built-in functions without the __builtin_ prefix |
585 | for the GNU C langauge. The argument FUNC is the enum builtin_in_function |
586 | id of the function to be enabled. */ |
587 | |
588 | bool |
589 | default_floatn_builtin_p (int func ATTRIBUTE_UNUSED) |
590 | { |
591 | static bool first_time_p = true; |
592 | static bool c_or_objective_c; |
593 | |
594 | if (first_time_p) |
595 | { |
596 | first_time_p = false; |
597 | c_or_objective_c = lang_GNU_C () || lang_GNU_OBJC (); |
598 | } |
599 | |
600 | return c_or_objective_c; |
601 | } |
602 | |
603 | /* Make some target macros useable by target-independent code. */ |
604 | bool |
605 | targhook_words_big_endian (void) |
606 | { |
607 | return !!WORDS_BIG_ENDIAN; |
608 | } |
609 | |
610 | bool |
611 | targhook_float_words_big_endian (void) |
612 | { |
613 | return !!FLOAT_WORDS_BIG_ENDIAN; |
614 | } |
615 | |
616 | /* True if the target supports floating-point exceptions and rounding |
617 | modes. */ |
618 | |
619 | bool |
620 | default_float_exceptions_rounding_supported_p (void) |
621 | { |
622 | #ifdef HAVE_adddf3 |
623 | return HAVE_adddf3; |
624 | #else |
625 | return false; |
626 | #endif |
627 | } |
628 | |
629 | /* True if the target supports decimal floating point. */ |
630 | |
631 | bool |
632 | default_decimal_float_supported_p (void) |
633 | { |
634 | return ENABLE_DECIMAL_FLOAT; |
635 | } |
636 | |
637 | /* True if the target supports fixed-point arithmetic. */ |
638 | |
639 | bool |
640 | default_fixed_point_supported_p (void) |
641 | { |
642 | return ENABLE_FIXED_POINT; |
643 | } |
644 | |
645 | /* True if the target supports GNU indirect functions. */ |
646 | |
647 | bool |
648 | default_has_ifunc_p (void) |
649 | { |
650 | return HAVE_GNU_INDIRECT_FUNCTION; |
651 | } |
652 | |
653 | /* Return true if we predict the loop LOOP will be transformed to a |
654 | low-overhead loop, otherwise return false. |
655 | |
656 | By default, false is returned, as this hook's applicability should be |
657 | verified for each target. Target maintainers should re-define the hook |
658 | if the target can take advantage of it. */ |
659 | |
660 | bool |
661 | default_predict_doloop_p (class loop *loop ATTRIBUTE_UNUSED) |
662 | { |
663 | return false; |
664 | } |
665 | |
666 | /* By default, just use the input MODE itself. */ |
667 | |
668 | machine_mode |
669 | default_preferred_doloop_mode (machine_mode mode) |
670 | { |
671 | return mode; |
672 | } |
673 | |
674 | /* NULL if INSN insn is valid within a low-overhead loop, otherwise returns |
675 | an error message. |
676 | |
677 | This function checks whether a given INSN is valid within a low-overhead |
678 | loop. If INSN is invalid it returns the reason for that, otherwise it |
679 | returns NULL. A called function may clobber any special registers required |
680 | for low-overhead looping. Additionally, some targets (eg, PPC) use the count |
681 | register for branch on table instructions. We reject the doloop pattern in |
682 | these cases. */ |
683 | |
684 | const char * |
685 | default_invalid_within_doloop (const rtx_insn *insn) |
686 | { |
687 | if (CALL_P (insn)) |
688 | return "Function call in loop." ; |
689 | |
690 | if (tablejump_p (insn, NULL, NULL) || computed_jump_p (insn)) |
691 | return "Computed branch in the loop." ; |
692 | |
693 | return NULL; |
694 | } |
695 | |
696 | /* Mapping of builtin functions to vectorized variants. */ |
697 | |
698 | tree |
699 | default_builtin_vectorized_function (unsigned int, tree, tree) |
700 | { |
701 | return NULL_TREE; |
702 | } |
703 | |
704 | /* Mapping of target builtin functions to vectorized variants. */ |
705 | |
706 | tree |
707 | default_builtin_md_vectorized_function (tree, tree, tree) |
708 | { |
709 | return NULL_TREE; |
710 | } |
711 | |
712 | /* Default vectorizer cost model values. */ |
713 | |
714 | int |
715 | default_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, |
716 | tree vectype, |
717 | int misalign ATTRIBUTE_UNUSED) |
718 | { |
719 | switch (type_of_cost) |
720 | { |
721 | case scalar_stmt: |
722 | case scalar_load: |
723 | case scalar_store: |
724 | case vector_stmt: |
725 | case vector_load: |
726 | case vector_store: |
727 | case vec_to_scalar: |
728 | case scalar_to_vec: |
729 | case cond_branch_not_taken: |
730 | case vec_perm: |
731 | case vec_promote_demote: |
732 | return 1; |
733 | |
734 | case unaligned_load: |
735 | case unaligned_store: |
736 | return 2; |
737 | |
738 | case cond_branch_taken: |
739 | return 3; |
740 | |
741 | case vec_construct: |
742 | return estimated_poly_value (x: TYPE_VECTOR_SUBPARTS (node: vectype)) - 1; |
743 | |
744 | default: |
745 | gcc_unreachable (); |
746 | } |
747 | } |
748 | |
749 | /* Reciprocal. */ |
750 | |
751 | tree |
752 | default_builtin_reciprocal (tree) |
753 | { |
754 | return NULL_TREE; |
755 | } |
756 | |
757 | void |
758 | default_emit_support_tinfos (emit_support_tinfos_callback) |
759 | { |
760 | } |
761 | |
762 | bool |
763 | hook_bool_CUMULATIVE_ARGS_arg_info_false (cumulative_args_t, |
764 | const function_arg_info &) |
765 | { |
766 | return false; |
767 | } |
768 | |
769 | bool |
770 | hook_bool_CUMULATIVE_ARGS_arg_info_true (cumulative_args_t, |
771 | const function_arg_info &) |
772 | { |
773 | return true; |
774 | } |
775 | |
776 | int |
777 | hook_int_CUMULATIVE_ARGS_arg_info_0 (cumulative_args_t, |
778 | const function_arg_info &) |
779 | { |
780 | return 0; |
781 | } |
782 | |
783 | void |
784 | hook_void_CUMULATIVE_ARGS_tree (cumulative_args_t ca ATTRIBUTE_UNUSED, |
785 | tree ATTRIBUTE_UNUSED) |
786 | { |
787 | } |
788 | |
789 | /* Default implementation of TARGET_PUSH_ARGUMENT. */ |
790 | |
791 | bool |
792 | default_push_argument (unsigned int) |
793 | { |
794 | #ifdef PUSH_ROUNDING |
795 | return !ACCUMULATE_OUTGOING_ARGS; |
796 | #else |
797 | return false; |
798 | #endif |
799 | } |
800 | |
801 | void |
802 | default_function_arg_advance (cumulative_args_t, const function_arg_info &) |
803 | { |
804 | gcc_unreachable (); |
805 | } |
806 | |
807 | /* Default implementation of TARGET_FUNCTION_ARG_OFFSET. */ |
808 | |
809 | HOST_WIDE_INT |
810 | default_function_arg_offset (machine_mode, const_tree) |
811 | { |
812 | return 0; |
813 | } |
814 | |
815 | /* Default implementation of TARGET_FUNCTION_ARG_PADDING: usually pad |
816 | upward, but pad short args downward on big-endian machines. */ |
817 | |
818 | pad_direction |
819 | default_function_arg_padding (machine_mode mode, const_tree type) |
820 | { |
821 | if (!BYTES_BIG_ENDIAN) |
822 | return PAD_UPWARD; |
823 | |
824 | unsigned HOST_WIDE_INT size; |
825 | if (mode == BLKmode) |
826 | { |
827 | if (!type || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) |
828 | return PAD_UPWARD; |
829 | size = int_size_in_bytes (type); |
830 | } |
831 | else |
832 | /* Targets with variable-sized modes must override this hook |
833 | and handle variable-sized modes explicitly. */ |
834 | size = GET_MODE_SIZE (mode).to_constant (); |
835 | |
836 | if (size < (PARM_BOUNDARY / BITS_PER_UNIT)) |
837 | return PAD_DOWNWARD; |
838 | |
839 | return PAD_UPWARD; |
840 | } |
841 | |
842 | rtx |
843 | default_function_arg (cumulative_args_t, const function_arg_info &) |
844 | { |
845 | gcc_unreachable (); |
846 | } |
847 | |
848 | rtx |
849 | default_function_incoming_arg (cumulative_args_t, const function_arg_info &) |
850 | { |
851 | gcc_unreachable (); |
852 | } |
853 | |
854 | unsigned int |
855 | default_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED, |
856 | const_tree type ATTRIBUTE_UNUSED) |
857 | { |
858 | return PARM_BOUNDARY; |
859 | } |
860 | |
861 | unsigned int |
862 | default_function_arg_round_boundary (machine_mode mode ATTRIBUTE_UNUSED, |
863 | const_tree type ATTRIBUTE_UNUSED) |
864 | { |
865 | return PARM_BOUNDARY; |
866 | } |
867 | |
868 | void |
869 | hook_void_bitmap (bitmap regs ATTRIBUTE_UNUSED) |
870 | { |
871 | } |
872 | |
873 | const char * |
874 | hook_invalid_arg_for_unprototyped_fn ( |
875 | const_tree typelist ATTRIBUTE_UNUSED, |
876 | const_tree funcdecl ATTRIBUTE_UNUSED, |
877 | const_tree val ATTRIBUTE_UNUSED) |
878 | { |
879 | return NULL; |
880 | } |
881 | |
882 | /* Initialize the stack protection decls. */ |
883 | |
884 | /* Stack protection related decls living in libgcc. */ |
885 | static GTY(()) tree stack_chk_guard_decl; |
886 | |
887 | tree |
888 | default_stack_protect_guard (void) |
889 | { |
890 | tree t = stack_chk_guard_decl; |
891 | |
892 | if (t == NULL) |
893 | { |
894 | rtx x; |
895 | |
896 | t = build_decl (UNKNOWN_LOCATION, |
897 | VAR_DECL, get_identifier ("__stack_chk_guard" ), |
898 | ptr_type_node); |
899 | TREE_STATIC (t) = 1; |
900 | TREE_PUBLIC (t) = 1; |
901 | DECL_EXTERNAL (t) = 1; |
902 | TREE_USED (t) = 1; |
903 | TREE_THIS_VOLATILE (t) = 1; |
904 | DECL_ARTIFICIAL (t) = 1; |
905 | DECL_IGNORED_P (t) = 1; |
906 | |
907 | /* Do not share RTL as the declaration is visible outside of |
908 | current function. */ |
909 | x = DECL_RTL (t); |
910 | RTX_FLAG (x, used) = 1; |
911 | |
912 | stack_chk_guard_decl = t; |
913 | } |
914 | |
915 | return t; |
916 | } |
917 | |
918 | static GTY(()) tree stack_chk_fail_decl; |
919 | |
920 | tree |
921 | default_external_stack_protect_fail (void) |
922 | { |
923 | tree t = stack_chk_fail_decl; |
924 | |
925 | if (t == NULL_TREE) |
926 | { |
927 | t = build_function_type_list (void_type_node, NULL_TREE); |
928 | t = build_decl (UNKNOWN_LOCATION, |
929 | FUNCTION_DECL, get_identifier ("__stack_chk_fail" ), t); |
930 | TREE_STATIC (t) = 1; |
931 | TREE_PUBLIC (t) = 1; |
932 | DECL_EXTERNAL (t) = 1; |
933 | TREE_USED (t) = 1; |
934 | TREE_THIS_VOLATILE (t) = 1; |
935 | TREE_NOTHROW (t) = 1; |
936 | DECL_ARTIFICIAL (t) = 1; |
937 | DECL_IGNORED_P (t) = 1; |
938 | DECL_VISIBILITY (t) = VISIBILITY_DEFAULT; |
939 | DECL_VISIBILITY_SPECIFIED (t) = 1; |
940 | |
941 | stack_chk_fail_decl = t; |
942 | } |
943 | |
944 | return build_call_expr (t, 0); |
945 | } |
946 | |
947 | tree |
948 | default_hidden_stack_protect_fail (void) |
949 | { |
950 | #ifndef HAVE_GAS_HIDDEN |
951 | return default_external_stack_protect_fail (); |
952 | #else |
953 | tree t = stack_chk_fail_decl; |
954 | |
955 | if (!flag_pic) |
956 | return default_external_stack_protect_fail (); |
957 | |
958 | if (t == NULL_TREE) |
959 | { |
960 | t = build_function_type_list (void_type_node, NULL_TREE); |
961 | t = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL, |
962 | get_identifier ("__stack_chk_fail_local" ), t); |
963 | TREE_STATIC (t) = 1; |
964 | TREE_PUBLIC (t) = 1; |
965 | DECL_EXTERNAL (t) = 1; |
966 | TREE_USED (t) = 1; |
967 | TREE_THIS_VOLATILE (t) = 1; |
968 | TREE_NOTHROW (t) = 1; |
969 | DECL_ARTIFICIAL (t) = 1; |
970 | DECL_IGNORED_P (t) = 1; |
971 | DECL_VISIBILITY_SPECIFIED (t) = 1; |
972 | DECL_VISIBILITY (t) = VISIBILITY_HIDDEN; |
973 | |
974 | stack_chk_fail_decl = t; |
975 | } |
976 | |
977 | return build_call_expr (t, 0); |
978 | #endif |
979 | } |
980 | |
981 | bool |
982 | hook_bool_const_rtx_commutative_p (const_rtx x, |
983 | int outer_code ATTRIBUTE_UNUSED) |
984 | { |
985 | return COMMUTATIVE_P (x); |
986 | } |
987 | |
988 | rtx |
989 | default_function_value (const_tree ret_type ATTRIBUTE_UNUSED, |
990 | const_tree fn_decl_or_type, |
991 | bool outgoing ATTRIBUTE_UNUSED) |
992 | { |
993 | /* The old interface doesn't handle receiving the function type. */ |
994 | if (fn_decl_or_type |
995 | && !DECL_P (fn_decl_or_type)) |
996 | fn_decl_or_type = NULL; |
997 | |
998 | #ifdef FUNCTION_VALUE |
999 | return FUNCTION_VALUE (ret_type, fn_decl_or_type); |
1000 | #else |
1001 | gcc_unreachable (); |
1002 | #endif |
1003 | } |
1004 | |
1005 | rtx |
1006 | default_libcall_value (machine_mode mode ATTRIBUTE_UNUSED, |
1007 | const_rtx fun ATTRIBUTE_UNUSED) |
1008 | { |
1009 | #ifdef LIBCALL_VALUE |
1010 | return LIBCALL_VALUE (MACRO_MODE (mode)); |
1011 | #else |
1012 | gcc_unreachable (); |
1013 | #endif |
1014 | } |
1015 | |
1016 | /* The default hook for TARGET_FUNCTION_VALUE_REGNO_P. */ |
1017 | |
1018 | bool |
1019 | default_function_value_regno_p (const unsigned int regno ATTRIBUTE_UNUSED) |
1020 | { |
1021 | #ifdef FUNCTION_VALUE_REGNO_P |
1022 | return FUNCTION_VALUE_REGNO_P (regno); |
1023 | #else |
1024 | gcc_unreachable (); |
1025 | #endif |
1026 | } |
1027 | |
1028 | /* Choose the mode and rtx to use to zero REGNO, storing tem in PMODE and |
1029 | PREGNO_RTX and returning TRUE if successful, otherwise returning FALSE. If |
1030 | the natural mode for REGNO doesn't work, attempt to group it with subsequent |
1031 | adjacent registers set in TOZERO. */ |
1032 | |
1033 | static inline bool |
1034 | zcur_select_mode_rtx (unsigned int regno, machine_mode *pmode, |
1035 | rtx *pregno_rtx, HARD_REG_SET tozero) |
1036 | { |
1037 | rtx regno_rtx = regno_reg_rtx[regno]; |
1038 | machine_mode mode = GET_MODE (regno_rtx); |
1039 | |
1040 | /* If the natural mode doesn't work, try some wider mode. */ |
1041 | if (!targetm.hard_regno_mode_ok (regno, mode)) |
1042 | { |
1043 | bool found = false; |
1044 | for (int nregs = 2; |
1045 | !found && nregs <= hard_regno_max_nregs |
1046 | && regno + nregs <= FIRST_PSEUDO_REGISTER |
1047 | && TEST_HARD_REG_BIT (set: tozero, |
1048 | bit: regno + nregs - 1); |
1049 | nregs++) |
1050 | { |
1051 | mode = choose_hard_reg_mode (regno, nregs, 0); |
1052 | if (mode == E_VOIDmode) |
1053 | continue; |
1054 | gcc_checking_assert (targetm.hard_regno_mode_ok (regno, mode)); |
1055 | regno_rtx = gen_rtx_REG (mode, regno); |
1056 | found = true; |
1057 | } |
1058 | if (!found) |
1059 | return false; |
1060 | } |
1061 | |
1062 | *pmode = mode; |
1063 | *pregno_rtx = regno_rtx; |
1064 | return true; |
1065 | } |
1066 | |
1067 | /* The default hook for TARGET_ZERO_CALL_USED_REGS. */ |
1068 | |
1069 | HARD_REG_SET |
1070 | default_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs) |
1071 | { |
1072 | gcc_assert (!hard_reg_set_empty_p (need_zeroed_hardregs)); |
1073 | |
1074 | HARD_REG_SET failed; |
1075 | CLEAR_HARD_REG_SET (set&: failed); |
1076 | bool progress = false; |
1077 | |
1078 | /* First, try to zero each register in need_zeroed_hardregs by |
1079 | loading a zero into it, taking note of any failures in |
1080 | FAILED. */ |
1081 | for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) |
1082 | if (TEST_HARD_REG_BIT (set: need_zeroed_hardregs, bit: regno)) |
1083 | { |
1084 | rtx_insn *last_insn = get_last_insn (); |
1085 | rtx regno_rtx; |
1086 | machine_mode mode; |
1087 | |
1088 | if (!zcur_select_mode_rtx (regno, pmode: &mode, pregno_rtx: ®no_rtx, |
1089 | tozero: need_zeroed_hardregs)) |
1090 | { |
1091 | SET_HARD_REG_BIT (set&: failed, bit: regno); |
1092 | continue; |
1093 | } |
1094 | |
1095 | rtx zero = CONST0_RTX (mode); |
1096 | rtx_insn *insn = emit_move_insn (regno_rtx, zero); |
1097 | if (!valid_insn_p (insn)) |
1098 | { |
1099 | SET_HARD_REG_BIT (set&: failed, bit: regno); |
1100 | delete_insns_since (last_insn); |
1101 | } |
1102 | else |
1103 | { |
1104 | progress = true; |
1105 | regno += hard_regno_nregs (regno, mode) - 1; |
1106 | } |
1107 | } |
1108 | |
1109 | /* Now retry with copies from zeroed registers, as long as we've |
1110 | made some PROGRESS, and registers remain to be zeroed in |
1111 | FAILED. */ |
1112 | while (progress && !hard_reg_set_empty_p (x: failed)) |
1113 | { |
1114 | HARD_REG_SET retrying = failed; |
1115 | |
1116 | CLEAR_HARD_REG_SET (set&: failed); |
1117 | progress = false; |
1118 | |
1119 | for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) |
1120 | if (TEST_HARD_REG_BIT (set: retrying, bit: regno)) |
1121 | { |
1122 | rtx regno_rtx; |
1123 | machine_mode mode; |
1124 | |
1125 | /* This might select registers we've already zeroed. If grouping |
1126 | with them is what it takes to get regno zeroed, so be it. */ |
1127 | if (!zcur_select_mode_rtx (regno, pmode: &mode, pregno_rtx: ®no_rtx, |
1128 | tozero: need_zeroed_hardregs)) |
1129 | { |
1130 | SET_HARD_REG_BIT (set&: failed, bit: regno); |
1131 | continue; |
1132 | } |
1133 | |
1134 | bool success = false; |
1135 | /* Look for a source. */ |
1136 | for (unsigned int src = 0; src < FIRST_PSEUDO_REGISTER; src++) |
1137 | { |
1138 | /* If SRC hasn't been zeroed (yet?), skip it. */ |
1139 | if (! TEST_HARD_REG_BIT (set: need_zeroed_hardregs, bit: src)) |
1140 | continue; |
1141 | if (TEST_HARD_REG_BIT (set: retrying, bit: src)) |
1142 | continue; |
1143 | |
1144 | /* Check that SRC can hold MODE, and that any other |
1145 | registers needed to hold MODE in SRC have also been |
1146 | zeroed. */ |
1147 | if (!targetm.hard_regno_mode_ok (src, mode)) |
1148 | continue; |
1149 | unsigned n = targetm.hard_regno_nregs (src, mode); |
1150 | bool ok = true; |
1151 | for (unsigned i = 1; ok && i < n; i++) |
1152 | ok = (TEST_HARD_REG_BIT (set: need_zeroed_hardregs, bit: src + i) |
1153 | && !TEST_HARD_REG_BIT (set: retrying, bit: src + i)); |
1154 | if (!ok) |
1155 | continue; |
1156 | |
1157 | /* SRC is usable, try to copy from it. */ |
1158 | rtx_insn *last_insn = get_last_insn (); |
1159 | rtx src_rtx = gen_rtx_REG (mode, src); |
1160 | rtx_insn *insn = emit_move_insn (regno_rtx, src_rtx); |
1161 | if (!valid_insn_p (insn)) |
1162 | /* It didn't work, remove any inserts. We'll look |
1163 | for another SRC. */ |
1164 | delete_insns_since (last_insn); |
1165 | else |
1166 | { |
1167 | /* We're done for REGNO. */ |
1168 | success = true; |
1169 | break; |
1170 | } |
1171 | } |
1172 | |
1173 | /* If nothing worked for REGNO this round, mark it to be |
1174 | retried if we get another round. */ |
1175 | if (!success) |
1176 | SET_HARD_REG_BIT (set&: failed, bit: regno); |
1177 | else |
1178 | { |
1179 | /* Take note so as to enable another round if needed. */ |
1180 | progress = true; |
1181 | regno += hard_regno_nregs (regno, mode) - 1; |
1182 | } |
1183 | } |
1184 | } |
1185 | |
1186 | /* If any register remained, report it. */ |
1187 | if (!progress) |
1188 | { |
1189 | static bool issued_error; |
1190 | if (!issued_error) |
1191 | { |
1192 | const char *name = NULL; |
1193 | for (unsigned int i = 0; zero_call_used_regs_opts[i].name != NULL; |
1194 | ++i) |
1195 | if (flag_zero_call_used_regs == zero_call_used_regs_opts[i].flag) |
1196 | { |
1197 | name = zero_call_used_regs_opts[i].name; |
1198 | break; |
1199 | } |
1200 | |
1201 | if (!name) |
1202 | name = "" ; |
1203 | |
1204 | issued_error = true; |
1205 | sorry ("argument %qs is not supported for %qs on this target" , |
1206 | name, "-fzero-call-used-regs" ); |
1207 | } |
1208 | } |
1209 | |
1210 | return need_zeroed_hardregs; |
1211 | } |
1212 | |
1213 | rtx |
1214 | default_internal_arg_pointer (void) |
1215 | { |
1216 | /* If the reg that the virtual arg pointer will be translated into is |
1217 | not a fixed reg or is the stack pointer, make a copy of the virtual |
1218 | arg pointer, and address parms via the copy. The frame pointer is |
1219 | considered fixed even though it is not marked as such. */ |
1220 | if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM |
1221 | || ! (fixed_regs[ARG_POINTER_REGNUM] |
1222 | || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM))) |
1223 | return copy_to_reg (virtual_incoming_args_rtx); |
1224 | else |
1225 | return virtual_incoming_args_rtx; |
1226 | } |
1227 | |
1228 | rtx |
1229 | default_static_chain (const_tree ARG_UNUSED (fndecl_or_type), bool incoming_p) |
1230 | { |
1231 | if (incoming_p) |
1232 | { |
1233 | #ifdef STATIC_CHAIN_INCOMING_REGNUM |
1234 | return gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM); |
1235 | #endif |
1236 | } |
1237 | |
1238 | #ifdef STATIC_CHAIN_REGNUM |
1239 | return gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM); |
1240 | #endif |
1241 | |
1242 | { |
1243 | static bool issued_error; |
1244 | if (!issued_error) |
1245 | { |
1246 | issued_error = true; |
1247 | sorry ("nested functions not supported on this target" ); |
1248 | } |
1249 | |
1250 | /* It really doesn't matter what we return here, so long at it |
1251 | doesn't cause the rest of the compiler to crash. */ |
1252 | return gen_rtx_MEM (Pmode, stack_pointer_rtx); |
1253 | } |
1254 | } |
1255 | |
1256 | void |
1257 | default_trampoline_init (rtx ARG_UNUSED (m_tramp), tree ARG_UNUSED (t_func), |
1258 | rtx ARG_UNUSED (r_chain)) |
1259 | { |
1260 | sorry ("nested function trampolines not supported on this target" ); |
1261 | } |
1262 | |
1263 | poly_int64 |
1264 | default_return_pops_args (tree, tree, poly_int64) |
1265 | { |
1266 | return 0; |
1267 | } |
1268 | |
1269 | reg_class_t |
1270 | default_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED, |
1271 | reg_class_t cl, |
1272 | reg_class_t best_cl ATTRIBUTE_UNUSED) |
1273 | { |
1274 | return cl; |
1275 | } |
1276 | |
1277 | extern bool |
1278 | default_lra_p (void) |
1279 | { |
1280 | return true; |
1281 | } |
1282 | |
1283 | int |
1284 | default_register_priority (int hard_regno ATTRIBUTE_UNUSED) |
1285 | { |
1286 | return 0; |
1287 | } |
1288 | |
1289 | extern bool |
1290 | default_register_usage_leveling_p (void) |
1291 | { |
1292 | return false; |
1293 | } |
1294 | |
1295 | extern bool |
1296 | default_different_addr_displacement_p (void) |
1297 | { |
1298 | return false; |
1299 | } |
1300 | |
1301 | reg_class_t |
1302 | default_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED, |
1303 | reg_class_t reload_class_i ATTRIBUTE_UNUSED, |
1304 | machine_mode reload_mode ATTRIBUTE_UNUSED, |
1305 | secondary_reload_info *sri) |
1306 | { |
1307 | enum reg_class rclass = NO_REGS; |
1308 | enum reg_class reload_class = (enum reg_class) reload_class_i; |
1309 | |
1310 | if (sri->prev_sri && sri->prev_sri->t_icode != CODE_FOR_nothing) |
1311 | { |
1312 | sri->icode = sri->prev_sri->t_icode; |
1313 | return NO_REGS; |
1314 | } |
1315 | #ifdef SECONDARY_INPUT_RELOAD_CLASS |
1316 | if (in_p) |
1317 | rclass = SECONDARY_INPUT_RELOAD_CLASS (reload_class, |
1318 | MACRO_MODE (reload_mode), x); |
1319 | #endif |
1320 | #ifdef SECONDARY_OUTPUT_RELOAD_CLASS |
1321 | if (! in_p) |
1322 | rclass = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class, |
1323 | MACRO_MODE (reload_mode), x); |
1324 | #endif |
1325 | if (rclass != NO_REGS) |
1326 | { |
1327 | enum insn_code icode |
1328 | = direct_optab_handler (op: in_p ? reload_in_optab : reload_out_optab, |
1329 | mode: reload_mode); |
1330 | |
1331 | if (icode != CODE_FOR_nothing |
1332 | && !insn_operand_matches (icode, opno: in_p, operand: x)) |
1333 | icode = CODE_FOR_nothing; |
1334 | else if (icode != CODE_FOR_nothing) |
1335 | { |
1336 | const char *insn_constraint, *scratch_constraint; |
1337 | enum reg_class insn_class, scratch_class; |
1338 | |
1339 | gcc_assert (insn_data[(int) icode].n_operands == 3); |
1340 | insn_constraint = insn_data[(int) icode].operand[!in_p].constraint; |
1341 | if (!*insn_constraint) |
1342 | insn_class = ALL_REGS; |
1343 | else |
1344 | { |
1345 | if (in_p) |
1346 | { |
1347 | gcc_assert (*insn_constraint == '='); |
1348 | insn_constraint++; |
1349 | } |
1350 | insn_class = (reg_class_for_constraint |
1351 | (c: lookup_constraint (p: insn_constraint))); |
1352 | gcc_assert (insn_class != NO_REGS); |
1353 | } |
1354 | |
1355 | scratch_constraint = insn_data[(int) icode].operand[2].constraint; |
1356 | /* The scratch register's constraint must start with "=&", |
1357 | except for an input reload, where only "=" is necessary, |
1358 | and where it might be beneficial to re-use registers from |
1359 | the input. */ |
1360 | gcc_assert (scratch_constraint[0] == '=' |
1361 | && (in_p || scratch_constraint[1] == '&')); |
1362 | scratch_constraint++; |
1363 | if (*scratch_constraint == '&') |
1364 | scratch_constraint++; |
1365 | scratch_class = (reg_class_for_constraint |
1366 | (c: lookup_constraint (p: scratch_constraint))); |
1367 | |
1368 | if (reg_class_subset_p (reload_class, insn_class)) |
1369 | { |
1370 | gcc_assert (scratch_class == rclass); |
1371 | rclass = NO_REGS; |
1372 | } |
1373 | else |
1374 | rclass = insn_class; |
1375 | |
1376 | } |
1377 | if (rclass == NO_REGS) |
1378 | sri->icode = icode; |
1379 | else |
1380 | sri->t_icode = icode; |
1381 | } |
1382 | return rclass; |
1383 | } |
1384 | |
1385 | /* The default implementation of TARGET_SECONDARY_MEMORY_NEEDED_MODE. */ |
1386 | |
1387 | machine_mode |
1388 | default_secondary_memory_needed_mode (machine_mode mode) |
1389 | { |
1390 | if (!targetm.lra_p () |
1391 | && known_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD) |
1392 | && INTEGRAL_MODE_P (mode)) |
1393 | return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require (); |
1394 | return mode; |
1395 | } |
1396 | |
1397 | /* By default, if flag_pic is true, then neither local nor global relocs |
1398 | should be placed in readonly memory. */ |
1399 | |
1400 | int |
1401 | default_reloc_rw_mask (void) |
1402 | { |
1403 | return flag_pic ? 3 : 0; |
1404 | } |
1405 | |
1406 | /* By default, address diff vectors are generated |
1407 | for jump tables when flag_pic is true. */ |
1408 | |
1409 | bool |
1410 | default_generate_pic_addr_diff_vec (void) |
1411 | { |
1412 | return flag_pic; |
1413 | } |
1414 | |
1415 | /* Record an element in the table of global constructors. SYMBOL is |
1416 | a SYMBOL_REF of the function to be called; PRIORITY is a number |
1417 | between 0 and MAX_INIT_PRIORITY. */ |
1418 | |
1419 | void |
1420 | default_asm_out_constructor (rtx symbol ATTRIBUTE_UNUSED, |
1421 | int priority ATTRIBUTE_UNUSED) |
1422 | { |
1423 | sorry ("global constructors not supported on this target" ); |
1424 | } |
1425 | |
1426 | /* Likewise for global destructors. */ |
1427 | |
1428 | void |
1429 | default_asm_out_destructor (rtx symbol ATTRIBUTE_UNUSED, |
1430 | int priority ATTRIBUTE_UNUSED) |
1431 | { |
1432 | sorry ("global destructors not supported on this target" ); |
1433 | } |
1434 | |
1435 | /* By default, do no modification. */ |
1436 | tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED, |
1437 | tree id) |
1438 | { |
1439 | return id; |
1440 | } |
1441 | |
1442 | /* The default implementation of TARGET_STATIC_RTX_ALIGNMENT. */ |
1443 | |
1444 | HOST_WIDE_INT |
1445 | default_static_rtx_alignment (machine_mode mode) |
1446 | { |
1447 | return GET_MODE_ALIGNMENT (mode); |
1448 | } |
1449 | |
1450 | /* The default implementation of TARGET_CONSTANT_ALIGNMENT. */ |
1451 | |
1452 | HOST_WIDE_INT |
1453 | default_constant_alignment (const_tree, HOST_WIDE_INT align) |
1454 | { |
1455 | return align; |
1456 | } |
1457 | |
1458 | /* An implementation of TARGET_CONSTANT_ALIGNMENT that aligns strings |
1459 | to at least BITS_PER_WORD but otherwise makes no changes. */ |
1460 | |
1461 | HOST_WIDE_INT |
1462 | constant_alignment_word_strings (const_tree exp, HOST_WIDE_INT align) |
1463 | { |
1464 | if (TREE_CODE (exp) == STRING_CST) |
1465 | return MAX (align, BITS_PER_WORD); |
1466 | return align; |
1467 | } |
1468 | |
1469 | /* Default to natural alignment for vector types, bounded by |
1470 | MAX_OFILE_ALIGNMENT. */ |
1471 | |
1472 | HOST_WIDE_INT |
1473 | default_vector_alignment (const_tree type) |
1474 | { |
1475 | unsigned HOST_WIDE_INT align = MAX_OFILE_ALIGNMENT; |
1476 | tree size = TYPE_SIZE (type); |
1477 | if (tree_fits_uhwi_p (size)) |
1478 | align = tree_to_uhwi (size); |
1479 | if (align >= MAX_OFILE_ALIGNMENT) |
1480 | return MAX_OFILE_ALIGNMENT; |
1481 | return MAX (align, GET_MODE_ALIGNMENT (TYPE_MODE (type))); |
1482 | } |
1483 | |
1484 | /* The default implementation of |
1485 | TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT. */ |
1486 | |
1487 | poly_uint64 |
1488 | default_preferred_vector_alignment (const_tree type) |
1489 | { |
1490 | return TYPE_ALIGN (type); |
1491 | } |
1492 | |
1493 | /* The default implementation of |
1494 | TARGET_VECTORIZE_PREFERRED_DIV_AS_SHIFTS_OVER_MULT. */ |
1495 | |
1496 | bool |
1497 | default_preferred_div_as_shifts_over_mult (const_tree type) |
1498 | { |
1499 | return !can_mult_highpart_p (TYPE_MODE (type), TYPE_UNSIGNED (type)); |
1500 | } |
1501 | |
1502 | /* By default assume vectors of element TYPE require a multiple of the natural |
1503 | alignment of TYPE. TYPE is naturally aligned if IS_PACKED is false. */ |
1504 | bool |
1505 | default_builtin_vector_alignment_reachable (const_tree /*type*/, bool is_packed) |
1506 | { |
1507 | return ! is_packed; |
1508 | } |
1509 | |
1510 | /* By default, assume that a target supports any factor of misalignment |
1511 | memory access if it supports movmisalign patten. |
1512 | is_packed is true if the memory access is defined in a packed struct. */ |
1513 | bool |
1514 | default_builtin_support_vector_misalignment (machine_mode mode, |
1515 | const_tree type |
1516 | ATTRIBUTE_UNUSED, |
1517 | int misalignment |
1518 | ATTRIBUTE_UNUSED, |
1519 | bool is_packed |
1520 | ATTRIBUTE_UNUSED) |
1521 | { |
1522 | if (optab_handler (op: movmisalign_optab, mode) != CODE_FOR_nothing) |
1523 | return true; |
1524 | return false; |
1525 | } |
1526 | |
1527 | /* By default, only attempt to parallelize bitwise operations, and |
1528 | possibly adds/subtracts using bit-twiddling. */ |
1529 | |
1530 | machine_mode |
1531 | default_preferred_simd_mode (scalar_mode) |
1532 | { |
1533 | return word_mode; |
1534 | } |
1535 | |
1536 | /* By default do not split reductions further. */ |
1537 | |
1538 | machine_mode |
1539 | default_split_reduction (machine_mode mode) |
1540 | { |
1541 | return mode; |
1542 | } |
1543 | |
1544 | /* By default only the preferred vector mode is tried. */ |
1545 | |
1546 | unsigned int |
1547 | default_autovectorize_vector_modes (vector_modes *, bool) |
1548 | { |
1549 | return 0; |
1550 | } |
1551 | |
1552 | /* The default implementation of TARGET_VECTORIZE_RELATED_MODE. */ |
1553 | |
1554 | opt_machine_mode |
1555 | default_vectorize_related_mode (machine_mode vector_mode, |
1556 | scalar_mode element_mode, |
1557 | poly_uint64 nunits) |
1558 | { |
1559 | machine_mode result_mode; |
1560 | if ((maybe_ne (a: nunits, b: 0U) |
1561 | || multiple_p (a: GET_MODE_SIZE (mode: vector_mode), |
1562 | b: GET_MODE_SIZE (mode: element_mode), multiple: &nunits)) |
1563 | && mode_for_vector (element_mode, nunits).exists (mode: &result_mode) |
1564 | && VECTOR_MODE_P (result_mode) |
1565 | && targetm.vector_mode_supported_p (result_mode)) |
1566 | return result_mode; |
1567 | |
1568 | return opt_machine_mode (); |
1569 | } |
1570 | |
1571 | /* By default a vector of integers is used as a mask. */ |
1572 | |
1573 | opt_machine_mode |
1574 | default_get_mask_mode (machine_mode mode) |
1575 | { |
1576 | return related_int_vector_mode (mode); |
1577 | } |
1578 | |
1579 | /* By default consider masked stores to be expensive. */ |
1580 | |
1581 | bool |
1582 | default_empty_mask_is_expensive (unsigned ifn) |
1583 | { |
1584 | return ifn == IFN_MASK_STORE; |
1585 | } |
1586 | |
1587 | /* By default, the cost model accumulates three separate costs (prologue, |
1588 | loop body, and epilogue) for a vectorized loop or block. So allocate an |
1589 | array of three unsigned ints, set it to zero, and return its address. */ |
1590 | |
1591 | vector_costs * |
1592 | default_vectorize_create_costs (vec_info *vinfo, bool costing_for_scalar) |
1593 | { |
1594 | return new vector_costs (vinfo, costing_for_scalar); |
1595 | } |
1596 | |
1597 | /* Determine whether or not a pointer mode is valid. Assume defaults |
1598 | of ptr_mode or Pmode - can be overridden. */ |
1599 | bool |
1600 | default_valid_pointer_mode (scalar_int_mode mode) |
1601 | { |
1602 | return (mode == ptr_mode || mode == Pmode); |
1603 | } |
1604 | |
1605 | /* Determine whether the memory reference specified by REF may alias |
1606 | the C libraries errno location. */ |
1607 | bool |
1608 | default_ref_may_alias_errno (ao_ref *ref) |
1609 | { |
1610 | tree base = ao_ref_base (ref); |
1611 | /* The default implementation assumes the errno location is |
1612 | a declaration of type int or is always accessed via a |
1613 | pointer to int. We assume that accesses to errno are |
1614 | not deliberately obfuscated (even in conforming ways). */ |
1615 | if (TYPE_UNSIGNED (TREE_TYPE (base)) |
1616 | || TYPE_MODE (TREE_TYPE (base)) != TYPE_MODE (integer_type_node)) |
1617 | return false; |
1618 | /* The default implementation assumes an errno location declaration |
1619 | is never defined in the current compilation unit and may not be |
1620 | aliased by a local variable. */ |
1621 | if (DECL_P (base) |
1622 | && DECL_EXTERNAL (base) |
1623 | && !TREE_STATIC (base)) |
1624 | return true; |
1625 | else if (TREE_CODE (base) == MEM_REF |
1626 | && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME) |
1627 | { |
1628 | struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0)); |
1629 | return !pi || pi->pt.anything || pi->pt.nonlocal; |
1630 | } |
1631 | return false; |
1632 | } |
1633 | |
1634 | /* Return the mode for a pointer to a given ADDRSPACE, |
1635 | defaulting to ptr_mode for all address spaces. */ |
1636 | |
1637 | scalar_int_mode |
1638 | default_addr_space_pointer_mode (addr_space_t addrspace ATTRIBUTE_UNUSED) |
1639 | { |
1640 | return ptr_mode; |
1641 | } |
1642 | |
1643 | /* Return the mode for an address in a given ADDRSPACE, |
1644 | defaulting to Pmode for all address spaces. */ |
1645 | |
1646 | scalar_int_mode |
1647 | default_addr_space_address_mode (addr_space_t addrspace ATTRIBUTE_UNUSED) |
1648 | { |
1649 | return Pmode; |
1650 | } |
1651 | |
1652 | /* Named address space version of valid_pointer_mode. |
1653 | To match the above, the same modes apply to all address spaces. */ |
1654 | |
1655 | bool |
1656 | default_addr_space_valid_pointer_mode (scalar_int_mode mode, |
1657 | addr_space_t as ATTRIBUTE_UNUSED) |
1658 | { |
1659 | return targetm.valid_pointer_mode (mode); |
1660 | } |
1661 | |
1662 | /* Some places still assume that all pointer or address modes are the |
1663 | standard Pmode and ptr_mode. These optimizations become invalid if |
1664 | the target actually supports multiple different modes. For now, |
1665 | we disable such optimizations on such targets, using this function. */ |
1666 | |
1667 | bool |
1668 | target_default_pointer_address_modes_p (void) |
1669 | { |
1670 | if (targetm.addr_space.address_mode != default_addr_space_address_mode) |
1671 | return false; |
1672 | if (targetm.addr_space.pointer_mode != default_addr_space_pointer_mode) |
1673 | return false; |
1674 | |
1675 | return true; |
1676 | } |
1677 | |
1678 | /* Named address space version of legitimate_address_p. |
1679 | By default, all address spaces have the same form. */ |
1680 | |
1681 | bool |
1682 | default_addr_space_legitimate_address_p (machine_mode mode, rtx mem, |
1683 | bool strict, |
1684 | addr_space_t as ATTRIBUTE_UNUSED, |
1685 | code_helper code) |
1686 | { |
1687 | return targetm.legitimate_address_p (mode, mem, strict, code); |
1688 | } |
1689 | |
1690 | /* Named address space version of LEGITIMIZE_ADDRESS. |
1691 | By default, all address spaces have the same form. */ |
1692 | |
1693 | rtx |
1694 | default_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode, |
1695 | addr_space_t as ATTRIBUTE_UNUSED) |
1696 | { |
1697 | return targetm.legitimize_address (x, oldx, mode); |
1698 | } |
1699 | |
1700 | /* The default hook for determining if one named address space is a subset of |
1701 | another and to return which address space to use as the common address |
1702 | space. */ |
1703 | |
1704 | bool |
1705 | default_addr_space_subset_p (addr_space_t subset, addr_space_t superset) |
1706 | { |
1707 | return (subset == superset); |
1708 | } |
1709 | |
1710 | /* The default hook for determining if 0 within a named address |
1711 | space is a valid address. */ |
1712 | |
1713 | bool |
1714 | default_addr_space_zero_address_valid (addr_space_t as ATTRIBUTE_UNUSED) |
1715 | { |
1716 | return false; |
1717 | } |
1718 | |
1719 | /* The default hook for debugging the address space is to return the |
1720 | address space number to indicate DW_AT_address_class. */ |
1721 | int |
1722 | default_addr_space_debug (addr_space_t as) |
1723 | { |
1724 | return as; |
1725 | } |
1726 | |
1727 | /* The default hook implementation for TARGET_ADDR_SPACE_DIAGNOSE_USAGE. |
1728 | Don't complain about any address space. */ |
1729 | |
1730 | void |
1731 | default_addr_space_diagnose_usage (addr_space_t, location_t) |
1732 | { |
1733 | } |
1734 | |
1735 | |
1736 | /* The default hook for TARGET_ADDR_SPACE_CONVERT. This hook should never be |
1737 | called for targets with only a generic address space. */ |
1738 | |
1739 | rtx |
1740 | default_addr_space_convert (rtx op ATTRIBUTE_UNUSED, |
1741 | tree from_type ATTRIBUTE_UNUSED, |
1742 | tree to_type ATTRIBUTE_UNUSED) |
1743 | { |
1744 | gcc_unreachable (); |
1745 | } |
1746 | |
1747 | /* The defualt implementation of TARGET_HARD_REGNO_NREGS. */ |
1748 | |
1749 | unsigned int |
1750 | default_hard_regno_nregs (unsigned int, machine_mode mode) |
1751 | { |
1752 | /* Targets with variable-sized modes must provide their own definition |
1753 | of this hook. */ |
1754 | return CEIL (GET_MODE_SIZE (mode).to_constant (), UNITS_PER_WORD); |
1755 | } |
1756 | |
1757 | bool |
1758 | default_hard_regno_scratch_ok (unsigned int regno ATTRIBUTE_UNUSED) |
1759 | { |
1760 | return true; |
1761 | } |
1762 | |
1763 | /* The default implementation of TARGET_MODE_DEPENDENT_ADDRESS_P. */ |
1764 | |
1765 | bool |
1766 | default_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED, |
1767 | addr_space_t addrspace ATTRIBUTE_UNUSED) |
1768 | { |
1769 | return false; |
1770 | } |
1771 | |
1772 | extern bool default_new_address_profitable_p (rtx, rtx); |
1773 | |
1774 | |
1775 | /* The default implementation of TARGET_NEW_ADDRESS_PROFITABLE_P. */ |
1776 | |
1777 | bool |
1778 | default_new_address_profitable_p (rtx memref ATTRIBUTE_UNUSED, |
1779 | rtx_insn *insn ATTRIBUTE_UNUSED, |
1780 | rtx new_addr ATTRIBUTE_UNUSED) |
1781 | { |
1782 | return true; |
1783 | } |
1784 | |
1785 | bool |
1786 | default_target_option_valid_attribute_p (tree ARG_UNUSED (fndecl), |
1787 | tree ARG_UNUSED (name), |
1788 | tree ARG_UNUSED (args), |
1789 | int ARG_UNUSED (flags)) |
1790 | { |
1791 | warning (OPT_Wattributes, |
1792 | "target attribute is not supported on this machine" ); |
1793 | |
1794 | return false; |
1795 | } |
1796 | |
1797 | bool |
1798 | default_target_option_pragma_parse (tree ARG_UNUSED (args), |
1799 | tree ARG_UNUSED (pop_target)) |
1800 | { |
1801 | /* If args is NULL the caller is handle_pragma_pop_options (). In that case, |
1802 | emit no warning because "#pragma GCC pop_target" is valid on targets that |
1803 | do not have the "target" pragma. */ |
1804 | if (args) |
1805 | warning (OPT_Wpragmas, |
1806 | "%<#pragma GCC target%> is not supported for this machine" ); |
1807 | |
1808 | return false; |
1809 | } |
1810 | |
1811 | bool |
1812 | default_target_can_inline_p (tree caller, tree callee) |
1813 | { |
1814 | tree callee_opts = DECL_FUNCTION_SPECIFIC_TARGET (callee); |
1815 | tree caller_opts = DECL_FUNCTION_SPECIFIC_TARGET (caller); |
1816 | if (! callee_opts) |
1817 | callee_opts = target_option_default_node; |
1818 | if (! caller_opts) |
1819 | caller_opts = target_option_default_node; |
1820 | |
1821 | /* If both caller and callee have attributes, assume that if the |
1822 | pointer is different, the two functions have different target |
1823 | options since build_target_option_node uses a hash table for the |
1824 | options. */ |
1825 | return callee_opts == caller_opts; |
1826 | } |
1827 | |
1828 | /* By default, return false to not need to collect any target information |
1829 | for inlining. Target maintainer should re-define the hook if the |
1830 | target want to take advantage of it. */ |
1831 | |
1832 | bool |
1833 | default_need_ipa_fn_target_info (const_tree, unsigned int &) |
1834 | { |
1835 | return false; |
1836 | } |
1837 | |
1838 | bool |
1839 | default_update_ipa_fn_target_info (unsigned int &, const gimple *) |
1840 | { |
1841 | return false; |
1842 | } |
1843 | |
1844 | /* If the machine does not have a case insn that compares the bounds, |
1845 | this means extra overhead for dispatch tables, which raises the |
1846 | threshold for using them. */ |
1847 | |
1848 | unsigned int |
1849 | default_case_values_threshold (void) |
1850 | { |
1851 | return (targetm.have_casesi () ? 4 : 5); |
1852 | } |
1853 | |
1854 | bool |
1855 | default_have_conditional_execution (void) |
1856 | { |
1857 | return HAVE_conditional_execution; |
1858 | } |
1859 | |
1860 | /* By default we assume that c99 functions are present at the runtime, |
1861 | but sincos is not. */ |
1862 | bool |
1863 | default_libc_has_function (enum function_class fn_class, |
1864 | tree type ATTRIBUTE_UNUSED) |
1865 | { |
1866 | if (fn_class == function_c94 |
1867 | || fn_class == function_c99_misc |
1868 | || fn_class == function_c99_math_complex) |
1869 | return true; |
1870 | |
1871 | return false; |
1872 | } |
1873 | |
1874 | /* By default assume that libc has not a fast implementation. */ |
1875 | |
1876 | bool |
1877 | default_libc_has_fast_function (int fcode ATTRIBUTE_UNUSED) |
1878 | { |
1879 | return false; |
1880 | } |
1881 | |
1882 | bool |
1883 | gnu_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED, |
1884 | tree type ATTRIBUTE_UNUSED) |
1885 | { |
1886 | return true; |
1887 | } |
1888 | |
1889 | bool |
1890 | no_c99_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED, |
1891 | tree type ATTRIBUTE_UNUSED) |
1892 | { |
1893 | return false; |
1894 | } |
1895 | |
1896 | /* Assume some c99 functions are present at the runtime including sincos. */ |
1897 | bool |
1898 | bsd_libc_has_function (enum function_class fn_class, |
1899 | tree type ATTRIBUTE_UNUSED) |
1900 | { |
1901 | if (fn_class == function_c94 |
1902 | || fn_class == function_c99_misc |
1903 | || fn_class == function_sincos) |
1904 | return true; |
1905 | |
1906 | return false; |
1907 | } |
1908 | |
1909 | unsigned |
1910 | default_libm_function_max_error (unsigned, machine_mode, bool) |
1911 | { |
1912 | return ~0U; |
1913 | } |
1914 | |
1915 | unsigned |
1916 | glibc_linux_libm_function_max_error (unsigned cfn, machine_mode mode, |
1917 | bool boundary_p) |
1918 | { |
1919 | /* Let's use |
1920 | https://www.gnu.org/software/libc/manual/2.22/html_node/Errors-in-Math-Functions.html |
1921 | https://www.gnu.org/software/libc/manual/html_node/Errors-in-Math-Functions.html |
1922 | with usual values recorded here and significant outliers handled in |
1923 | target CPU specific overriders. The tables only record default |
1924 | rounding to nearest, for -frounding-math let's add some extra ulps. |
1925 | For boundary_p values (say finite results outside of [-1.,1.] for |
1926 | sin/cos, or [-0.,+Inf] for sqrt etc. let's use custom random testers. */ |
1927 | int rnd = flag_rounding_math ? 4 : 0; |
1928 | bool sf = (REAL_MODE_FORMAT (mode) == &ieee_single_format |
1929 | || REAL_MODE_FORMAT (mode) == &mips_single_format |
1930 | || REAL_MODE_FORMAT (mode) == &motorola_single_format); |
1931 | bool df = (REAL_MODE_FORMAT (mode) == &ieee_double_format |
1932 | || REAL_MODE_FORMAT (mode) == &mips_double_format |
1933 | || REAL_MODE_FORMAT (mode) == &motorola_double_format); |
1934 | bool xf = (REAL_MODE_FORMAT (mode) == &ieee_extended_intel_96_format |
1935 | || REAL_MODE_FORMAT (mode) == &ieee_extended_intel_128_format |
1936 | || REAL_MODE_FORMAT (mode) == &ieee_extended_motorola_format); |
1937 | bool tf = (REAL_MODE_FORMAT (mode) == &ieee_quad_format |
1938 | || REAL_MODE_FORMAT (mode) == &mips_quad_format); |
1939 | |
1940 | switch (cfn) |
1941 | { |
1942 | CASE_CFN_SQRT: |
1943 | CASE_CFN_SQRT_FN: |
1944 | if (boundary_p) |
1945 | /* https://gcc.gnu.org/pipermail/gcc-patches/2023-April/616595.html */ |
1946 | return 0; |
1947 | if (sf || df || xf || tf) |
1948 | return 0 + rnd; |
1949 | break; |
1950 | CASE_CFN_COS: |
1951 | CASE_CFN_COS_FN: |
1952 | /* cos is generally errors like sin, but far more arches have 2ulps |
1953 | for double. */ |
1954 | if (!boundary_p && df) |
1955 | return 2 + rnd; |
1956 | gcc_fallthrough (); |
1957 | CASE_CFN_SIN: |
1958 | CASE_CFN_SIN_FN: |
1959 | if (boundary_p) |
1960 | /* According to |
1961 | https://sourceware.org/pipermail/gcc-patches/2023-April/616315.html |
1962 | seems default rounding sin/cos stay strictly in [-1.,1.] range, |
1963 | with rounding to infinity it can be 1ulp larger/smaller. */ |
1964 | return flag_rounding_math ? 1 : 0; |
1965 | if (sf || df) |
1966 | return 1 + rnd; |
1967 | if (xf || tf) |
1968 | return 2 + rnd; |
1969 | break; |
1970 | default: |
1971 | break; |
1972 | } |
1973 | |
1974 | return default_libm_function_max_error (cfn, mode, boundary_p); |
1975 | } |
1976 | |
1977 | tree |
1978 | default_builtin_tm_load_store (tree ARG_UNUSED (type)) |
1979 | { |
1980 | return NULL_TREE; |
1981 | } |
1982 | |
1983 | /* Compute cost of moving registers to/from memory. */ |
1984 | |
1985 | int |
1986 | default_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED, |
1987 | reg_class_t rclass ATTRIBUTE_UNUSED, |
1988 | bool in ATTRIBUTE_UNUSED) |
1989 | { |
1990 | #ifndef MEMORY_MOVE_COST |
1991 | return (4 + memory_move_secondary_cost (mode, (enum reg_class) rclass, in)); |
1992 | #else |
1993 | return MEMORY_MOVE_COST (MACRO_MODE (mode), (enum reg_class) rclass, in); |
1994 | #endif |
1995 | } |
1996 | |
1997 | /* Compute cost of moving data from a register of class FROM to one of |
1998 | TO, using MODE. */ |
1999 | |
2000 | int |
2001 | default_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED, |
2002 | reg_class_t from ATTRIBUTE_UNUSED, |
2003 | reg_class_t to ATTRIBUTE_UNUSED) |
2004 | { |
2005 | #ifndef REGISTER_MOVE_COST |
2006 | return 2; |
2007 | #else |
2008 | return REGISTER_MOVE_COST (MACRO_MODE (mode), |
2009 | (enum reg_class) from, (enum reg_class) to); |
2010 | #endif |
2011 | } |
2012 | |
2013 | /* The default implementation of TARGET_SLOW_UNALIGNED_ACCESS. */ |
2014 | |
2015 | bool |
2016 | default_slow_unaligned_access (machine_mode, unsigned int) |
2017 | { |
2018 | return STRICT_ALIGNMENT; |
2019 | } |
2020 | |
2021 | /* The default implementation of TARGET_ESTIMATED_POLY_VALUE. */ |
2022 | |
2023 | HOST_WIDE_INT |
2024 | default_estimated_poly_value (poly_int64 x, poly_value_estimate_kind) |
2025 | { |
2026 | return x.coeffs[0]; |
2027 | } |
2028 | |
2029 | /* For hooks which use the MOVE_RATIO macro, this gives the legacy default |
2030 | behavior. SPEED_P is true if we are compiling for speed. */ |
2031 | |
2032 | unsigned int |
2033 | get_move_ratio (bool speed_p ATTRIBUTE_UNUSED) |
2034 | { |
2035 | unsigned int move_ratio; |
2036 | #ifdef MOVE_RATIO |
2037 | move_ratio = (unsigned int) MOVE_RATIO (speed_p); |
2038 | #else |
2039 | #if defined (HAVE_cpymemqi) || defined (HAVE_cpymemhi) || defined (HAVE_cpymemsi) || defined (HAVE_cpymemdi) || defined (HAVE_cpymemti) |
2040 | move_ratio = 2; |
2041 | #else /* No cpymem patterns, pick a default. */ |
2042 | move_ratio = ((speed_p) ? 15 : 3); |
2043 | #endif |
2044 | #endif |
2045 | return move_ratio; |
2046 | } |
2047 | |
2048 | /* Return TRUE if the move_by_pieces/set_by_pieces infrastructure should be |
2049 | used; return FALSE if the cpymem/setmem optab should be expanded, or |
2050 | a call to memcpy emitted. */ |
2051 | |
2052 | bool |
2053 | default_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size, |
2054 | unsigned int alignment, |
2055 | enum by_pieces_operation op, |
2056 | bool speed_p) |
2057 | { |
2058 | unsigned int max_size = 0; |
2059 | unsigned int ratio = 0; |
2060 | |
2061 | switch (op) |
2062 | { |
2063 | case CLEAR_BY_PIECES: |
2064 | max_size = STORE_MAX_PIECES; |
2065 | ratio = CLEAR_RATIO (speed_p); |
2066 | break; |
2067 | case MOVE_BY_PIECES: |
2068 | max_size = MOVE_MAX_PIECES; |
2069 | ratio = get_move_ratio (speed_p); |
2070 | break; |
2071 | case SET_BY_PIECES: |
2072 | max_size = STORE_MAX_PIECES; |
2073 | ratio = SET_RATIO (speed_p); |
2074 | break; |
2075 | case STORE_BY_PIECES: |
2076 | max_size = STORE_MAX_PIECES; |
2077 | ratio = get_move_ratio (speed_p); |
2078 | break; |
2079 | case COMPARE_BY_PIECES: |
2080 | max_size = COMPARE_MAX_PIECES; |
2081 | /* Pick a likely default, just as in get_move_ratio. */ |
2082 | ratio = speed_p ? 15 : 3; |
2083 | break; |
2084 | } |
2085 | |
2086 | return by_pieces_ninsns (size, alignment, max_size + 1, op) < ratio; |
2087 | } |
2088 | |
2089 | /* This hook controls code generation for expanding a memcmp operation by |
2090 | pieces. Return 1 for the normal pattern of compare/jump after each pair |
2091 | of loads, or a higher number to reduce the number of branches. */ |
2092 | |
2093 | int |
2094 | default_compare_by_pieces_branch_ratio (machine_mode) |
2095 | { |
2096 | return 1; |
2097 | } |
2098 | |
2099 | /* Write PATCH_AREA_SIZE NOPs into the asm outfile FILE around a function |
2100 | entry. If RECORD_P is true and the target supports named sections, |
2101 | the location of the NOPs will be recorded in a special object section |
2102 | called "__patchable_function_entries". This routine may be called |
2103 | twice per function to put NOPs before and after the function |
2104 | entry. */ |
2105 | |
2106 | void |
2107 | default_print_patchable_function_entry (FILE *file, |
2108 | unsigned HOST_WIDE_INT patch_area_size, |
2109 | bool record_p) |
2110 | { |
2111 | const char *nop_templ = 0; |
2112 | int code_num; |
2113 | rtx_insn *my_nop = make_insn_raw (gen_nop ()); |
2114 | |
2115 | /* We use the template alone, relying on the (currently sane) assumption |
2116 | that the NOP template does not have variable operands. */ |
2117 | code_num = recog_memoized (insn: my_nop); |
2118 | nop_templ = get_insn_template (code_num, my_nop); |
2119 | |
2120 | if (record_p && targetm_common.have_named_sections) |
2121 | { |
2122 | char buf[256]; |
2123 | section *previous_section = in_section; |
2124 | const char *asm_op = integer_asm_op (POINTER_SIZE_UNITS, false); |
2125 | |
2126 | gcc_assert (asm_op != NULL); |
2127 | /* If SECTION_LINK_ORDER is supported, this internal label will |
2128 | be filled as the symbol for linked_to section. */ |
2129 | ASM_GENERATE_INTERNAL_LABEL (buf, "LPFE" , current_function_funcdef_no); |
2130 | |
2131 | unsigned int flags = SECTION_WRITE | SECTION_RELRO; |
2132 | if (HAVE_GAS_SECTION_LINK_ORDER) |
2133 | flags |= SECTION_LINK_ORDER; |
2134 | |
2135 | section *sect = get_section ("__patchable_function_entries" , |
2136 | flags, current_function_decl); |
2137 | if (HAVE_COMDAT_GROUP && DECL_COMDAT_GROUP (current_function_decl)) |
2138 | switch_to_comdat_section (sect, current_function_decl); |
2139 | else |
2140 | switch_to_section (sect); |
2141 | assemble_align (POINTER_SIZE); |
2142 | fputs (s: asm_op, stream: file); |
2143 | assemble_name_raw (file, buf); |
2144 | fputc (c: '\n', stream: file); |
2145 | |
2146 | switch_to_section (previous_section); |
2147 | ASM_OUTPUT_LABEL (file, buf); |
2148 | } |
2149 | |
2150 | unsigned i; |
2151 | for (i = 0; i < patch_area_size; ++i) |
2152 | output_asm_insn (nop_templ, NULL); |
2153 | } |
2154 | |
2155 | bool |
2156 | default_profile_before_prologue (void) |
2157 | { |
2158 | #ifdef PROFILE_BEFORE_PROLOGUE |
2159 | return true; |
2160 | #else |
2161 | return false; |
2162 | #endif |
2163 | } |
2164 | |
2165 | /* The default implementation of TARGET_PREFERRED_RELOAD_CLASS. */ |
2166 | |
2167 | reg_class_t |
2168 | default_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, |
2169 | reg_class_t rclass) |
2170 | { |
2171 | #ifdef PREFERRED_RELOAD_CLASS |
2172 | return (reg_class_t) PREFERRED_RELOAD_CLASS (x, (enum reg_class) rclass); |
2173 | #else |
2174 | return rclass; |
2175 | #endif |
2176 | } |
2177 | |
2178 | /* The default implementation of TARGET_OUTPUT_PREFERRED_RELOAD_CLASS. */ |
2179 | |
2180 | reg_class_t |
2181 | default_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED, |
2182 | reg_class_t rclass) |
2183 | { |
2184 | return rclass; |
2185 | } |
2186 | |
2187 | /* The default implementation of TARGET_PREFERRED_RENAME_CLASS. */ |
2188 | reg_class_t |
2189 | default_preferred_rename_class (reg_class_t rclass ATTRIBUTE_UNUSED) |
2190 | { |
2191 | return NO_REGS; |
2192 | } |
2193 | |
2194 | /* The default implementation of TARGET_CLASS_LIKELY_SPILLED_P. */ |
2195 | |
2196 | bool |
2197 | default_class_likely_spilled_p (reg_class_t rclass) |
2198 | { |
2199 | return (reg_class_size[(int) rclass] == 1); |
2200 | } |
2201 | |
2202 | /* The default implementation of TARGET_CLASS_MAX_NREGS. */ |
2203 | |
2204 | unsigned char |
2205 | default_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED, |
2206 | machine_mode mode ATTRIBUTE_UNUSED) |
2207 | { |
2208 | #ifdef CLASS_MAX_NREGS |
2209 | return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass, |
2210 | MACRO_MODE (mode)); |
2211 | #else |
2212 | /* Targets with variable-sized modes must provide their own definition |
2213 | of this hook. */ |
2214 | unsigned int size = GET_MODE_SIZE (mode).to_constant (); |
2215 | return (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD; |
2216 | #endif |
2217 | } |
2218 | |
2219 | /* Determine the debugging unwind mechanism for the target. */ |
2220 | |
2221 | enum unwind_info_type |
2222 | default_debug_unwind_info (void) |
2223 | { |
2224 | /* If the target wants to force the use of dwarf2 unwind info, let it. */ |
2225 | /* ??? Change all users to the hook, then poison this. */ |
2226 | #ifdef DWARF2_FRAME_INFO |
2227 | if (DWARF2_FRAME_INFO) |
2228 | return UI_DWARF2; |
2229 | #endif |
2230 | |
2231 | /* Otherwise, only turn it on if dwarf2 debugging is enabled. */ |
2232 | #ifdef DWARF2_DEBUGGING_INFO |
2233 | if (dwarf_debuginfo_p ()) |
2234 | return UI_DWARF2; |
2235 | #endif |
2236 | |
2237 | return UI_NONE; |
2238 | } |
2239 | |
2240 | /* Targets that set NUM_POLY_INT_COEFFS to something greater than 1 |
2241 | must define this hook. */ |
2242 | |
2243 | unsigned int |
2244 | default_dwarf_poly_indeterminate_value (unsigned int, unsigned int *, int *) |
2245 | { |
2246 | gcc_unreachable (); |
2247 | } |
2248 | |
2249 | /* Determine the correct mode for a Dwarf frame register that represents |
2250 | register REGNO. */ |
2251 | |
2252 | machine_mode |
2253 | default_dwarf_frame_reg_mode (int regno) |
2254 | { |
2255 | machine_mode save_mode = reg_raw_mode[regno]; |
2256 | |
2257 | if (targetm.hard_regno_call_part_clobbered (eh_edge_abi.id (), |
2258 | regno, save_mode)) |
2259 | save_mode = choose_hard_reg_mode (regno, 1, &eh_edge_abi); |
2260 | return save_mode; |
2261 | } |
2262 | |
2263 | /* To be used by targets where reg_raw_mode doesn't return the right |
2264 | mode for registers used in apply_builtin_return and apply_builtin_arg. */ |
2265 | |
2266 | fixed_size_mode |
2267 | default_get_reg_raw_mode (int regno) |
2268 | { |
2269 | /* Targets must override this hook if the underlying register is |
2270 | variable-sized. */ |
2271 | return as_a <fixed_size_mode> (reg_raw_mode[regno]); |
2272 | } |
2273 | |
2274 | /* Return true if a leaf function should stay leaf even with profiling |
2275 | enabled. */ |
2276 | |
2277 | bool |
2278 | default_keep_leaf_when_profiled () |
2279 | { |
2280 | return false; |
2281 | } |
2282 | |
2283 | /* Return true if the state of option OPTION should be stored in PCH files |
2284 | and checked by default_pch_valid_p. Store the option's current state |
2285 | in STATE if so. */ |
2286 | |
2287 | static inline bool |
2288 | option_affects_pch_p (int option, struct cl_option_state *state) |
2289 | { |
2290 | if ((cl_options[option].flags & CL_TARGET) == 0) |
2291 | return false; |
2292 | if ((cl_options[option].flags & CL_PCH_IGNORE) != 0) |
2293 | return false; |
2294 | if (option_flag_var (opt_index: option, opts: &global_options) == &target_flags) |
2295 | if (targetm.check_pch_target_flags) |
2296 | return false; |
2297 | return get_option_state (&global_options, option, state); |
2298 | } |
2299 | |
2300 | /* Default version of get_pch_validity. |
2301 | By default, every flag difference is fatal; that will be mostly right for |
2302 | most targets, but completely right for very few. */ |
2303 | |
2304 | void * |
2305 | default_get_pch_validity (size_t *sz) |
2306 | { |
2307 | struct cl_option_state state; |
2308 | size_t i; |
2309 | char *result, *r; |
2310 | |
2311 | *sz = 2; |
2312 | if (targetm.check_pch_target_flags) |
2313 | *sz += sizeof (target_flags); |
2314 | for (i = 0; i < cl_options_count; i++) |
2315 | if (option_affects_pch_p (option: i, state: &state)) |
2316 | *sz += state.size; |
2317 | |
2318 | result = r = XNEWVEC (char, *sz); |
2319 | r[0] = flag_pic; |
2320 | r[1] = flag_pie; |
2321 | r += 2; |
2322 | if (targetm.check_pch_target_flags) |
2323 | { |
2324 | memcpy (dest: r, src: &target_flags, n: sizeof (target_flags)); |
2325 | r += sizeof (target_flags); |
2326 | } |
2327 | |
2328 | for (i = 0; i < cl_options_count; i++) |
2329 | if (option_affects_pch_p (option: i, state: &state)) |
2330 | { |
2331 | memcpy (dest: r, src: state.data, n: state.size); |
2332 | r += state.size; |
2333 | } |
2334 | |
2335 | return result; |
2336 | } |
2337 | |
2338 | /* Return a message which says that a PCH file was created with a different |
2339 | setting of OPTION. */ |
2340 | |
2341 | static const char * |
2342 | pch_option_mismatch (const char *option) |
2343 | { |
2344 | return xasprintf (_("created and used with differing settings of '%s'" ), |
2345 | option); |
2346 | } |
2347 | |
2348 | /* Default version of pch_valid_p. */ |
2349 | |
2350 | const char * |
2351 | default_pch_valid_p (const void *data_p, size_t len ATTRIBUTE_UNUSED) |
2352 | { |
2353 | struct cl_option_state state; |
2354 | const char *data = (const char *)data_p; |
2355 | size_t i; |
2356 | |
2357 | /* -fpic and -fpie also usually make a PCH invalid. */ |
2358 | if (data[0] != flag_pic) |
2359 | return _("created and used with different settings of %<-fpic%>" ); |
2360 | if (data[1] != flag_pie) |
2361 | return _("created and used with different settings of %<-fpie%>" ); |
2362 | data += 2; |
2363 | |
2364 | /* Check target_flags. */ |
2365 | if (targetm.check_pch_target_flags) |
2366 | { |
2367 | int tf; |
2368 | const char *r; |
2369 | |
2370 | memcpy (dest: &tf, src: data, n: sizeof (target_flags)); |
2371 | data += sizeof (target_flags); |
2372 | r = targetm.check_pch_target_flags (tf); |
2373 | if (r != NULL) |
2374 | return r; |
2375 | } |
2376 | |
2377 | for (i = 0; i < cl_options_count; i++) |
2378 | if (option_affects_pch_p (option: i, state: &state)) |
2379 | { |
2380 | if (memcmp (s1: data, s2: state.data, n: state.size) != 0) |
2381 | return pch_option_mismatch (option: cl_options[i].opt_text); |
2382 | data += state.size; |
2383 | } |
2384 | |
2385 | return NULL; |
2386 | } |
2387 | |
2388 | /* Default version of cstore_mode. */ |
2389 | |
2390 | scalar_int_mode |
2391 | default_cstore_mode (enum insn_code icode) |
2392 | { |
2393 | return as_a <scalar_int_mode> (m: insn_data[(int) icode].operand[0].mode); |
2394 | } |
2395 | |
2396 | /* Default version of member_type_forces_blk. */ |
2397 | |
2398 | bool |
2399 | default_member_type_forces_blk (const_tree, machine_mode) |
2400 | { |
2401 | return false; |
2402 | } |
2403 | |
2404 | /* Default version of canonicalize_comparison. */ |
2405 | |
2406 | void |
2407 | default_canonicalize_comparison (int *, rtx *, rtx *, bool) |
2408 | { |
2409 | } |
2410 | |
2411 | /* Default implementation of TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */ |
2412 | |
2413 | void |
2414 | default_atomic_assign_expand_fenv (tree *, tree *, tree *) |
2415 | { |
2416 | } |
2417 | |
2418 | #ifndef PAD_VARARGS_DOWN |
2419 | #define PAD_VARARGS_DOWN BYTES_BIG_ENDIAN |
2420 | #endif |
2421 | |
2422 | /* Build an indirect-ref expression over the given TREE, which represents a |
2423 | piece of a va_arg() expansion. */ |
2424 | tree |
2425 | build_va_arg_indirect_ref (tree addr) |
2426 | { |
2427 | addr = build_simple_mem_ref_loc (EXPR_LOCATION (addr), addr); |
2428 | return addr; |
2429 | } |
2430 | |
2431 | /* The "standard" implementation of va_arg: read the value from the |
2432 | current (padded) address and increment by the (padded) size. */ |
2433 | |
2434 | tree |
2435 | std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p, |
2436 | gimple_seq *post_p) |
2437 | { |
2438 | tree addr, t, type_size, rounded_size, valist_tmp; |
2439 | unsigned HOST_WIDE_INT align, boundary; |
2440 | bool indirect; |
2441 | |
2442 | /* All of the alignment and movement below is for args-grow-up machines. |
2443 | As of 2004, there are only 3 ARGS_GROW_DOWNWARD targets, and they all |
2444 | implement their own specialized gimplify_va_arg_expr routines. */ |
2445 | if (ARGS_GROW_DOWNWARD) |
2446 | gcc_unreachable (); |
2447 | |
2448 | indirect = pass_va_arg_by_reference (type); |
2449 | if (indirect) |
2450 | type = build_pointer_type (type); |
2451 | |
2452 | if (targetm.calls.split_complex_arg |
2453 | && TREE_CODE (type) == COMPLEX_TYPE |
2454 | && targetm.calls.split_complex_arg (type)) |
2455 | { |
2456 | tree real_part, imag_part; |
2457 | |
2458 | real_part = std_gimplify_va_arg_expr (valist, |
2459 | TREE_TYPE (type), pre_p, NULL); |
2460 | real_part = get_initialized_tmp_var (real_part, pre_p); |
2461 | |
2462 | imag_part = std_gimplify_va_arg_expr (valist: unshare_expr (valist), |
2463 | TREE_TYPE (type), pre_p, NULL); |
2464 | imag_part = get_initialized_tmp_var (imag_part, pre_p); |
2465 | |
2466 | return build2 (COMPLEX_EXPR, type, real_part, imag_part); |
2467 | } |
2468 | |
2469 | align = PARM_BOUNDARY / BITS_PER_UNIT; |
2470 | boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type); |
2471 | |
2472 | /* When we align parameter on stack for caller, if the parameter |
2473 | alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be |
2474 | aligned at MAX_SUPPORTED_STACK_ALIGNMENT. We will match callee |
2475 | here with caller. */ |
2476 | if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT) |
2477 | boundary = MAX_SUPPORTED_STACK_ALIGNMENT; |
2478 | |
2479 | boundary /= BITS_PER_UNIT; |
2480 | |
2481 | /* Hoist the valist value into a temporary for the moment. */ |
2482 | valist_tmp = get_initialized_tmp_var (valist, pre_p); |
2483 | |
2484 | /* va_list pointer is aligned to PARM_BOUNDARY. If argument actually |
2485 | requires greater alignment, we must perform dynamic alignment. */ |
2486 | if (boundary > align |
2487 | && !TYPE_EMPTY_P (type) |
2488 | && !integer_zerop (TYPE_SIZE (type))) |
2489 | { |
2490 | t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp, |
2491 | fold_build_pointer_plus_hwi (valist_tmp, boundary - 1)); |
2492 | gimplify_and_add (t, pre_p); |
2493 | |
2494 | t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp, |
2495 | fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist), |
2496 | valist_tmp, |
2497 | build_int_cst (TREE_TYPE (valist), -boundary))); |
2498 | gimplify_and_add (t, pre_p); |
2499 | } |
2500 | else |
2501 | boundary = align; |
2502 | |
2503 | /* If the actual alignment is less than the alignment of the type, |
2504 | adjust the type accordingly so that we don't assume strict alignment |
2505 | when dereferencing the pointer. */ |
2506 | boundary *= BITS_PER_UNIT; |
2507 | if (boundary < TYPE_ALIGN (type)) |
2508 | { |
2509 | type = build_variant_type_copy (type); |
2510 | SET_TYPE_ALIGN (type, boundary); |
2511 | } |
2512 | |
2513 | /* Compute the rounded size of the type. */ |
2514 | type_size = arg_size_in_bytes (type); |
2515 | rounded_size = round_up (type_size, align); |
2516 | |
2517 | /* Reduce rounded_size so it's sharable with the postqueue. */ |
2518 | gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue); |
2519 | |
2520 | /* Get AP. */ |
2521 | addr = valist_tmp; |
2522 | if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size)) |
2523 | { |
2524 | /* Small args are padded downward. */ |
2525 | t = fold_build2_loc (input_location, GT_EXPR, sizetype, |
2526 | rounded_size, size_int (align)); |
2527 | t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node, |
2528 | size_binop (MINUS_EXPR, rounded_size, type_size)); |
2529 | addr = fold_build_pointer_plus (addr, t); |
2530 | } |
2531 | |
2532 | /* Compute new value for AP. */ |
2533 | t = fold_build_pointer_plus (valist_tmp, rounded_size); |
2534 | t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t); |
2535 | gimplify_and_add (t, pre_p); |
2536 | |
2537 | addr = fold_convert (build_pointer_type (type), addr); |
2538 | |
2539 | if (indirect) |
2540 | addr = build_va_arg_indirect_ref (addr); |
2541 | |
2542 | return build_va_arg_indirect_ref (addr); |
2543 | } |
2544 | |
2545 | /* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do |
2546 | not support nested low-overhead loops. */ |
2547 | |
2548 | bool |
2549 | can_use_doloop_if_innermost (const widest_int &, const widest_int &, |
2550 | unsigned int loop_depth, bool) |
2551 | { |
2552 | return loop_depth == 1; |
2553 | } |
2554 | |
2555 | /* Default implementation of TARGET_OPTAB_SUPPORTED_P. */ |
2556 | |
2557 | bool |
2558 | default_optab_supported_p (int, machine_mode, machine_mode, optimization_type) |
2559 | { |
2560 | return true; |
2561 | } |
2562 | |
2563 | /* Default implementation of TARGET_MAX_NOCE_IFCVT_SEQ_COST. */ |
2564 | |
2565 | unsigned int |
2566 | default_max_noce_ifcvt_seq_cost (edge e) |
2567 | { |
2568 | bool predictable_p = predictable_edge_p (e); |
2569 | |
2570 | if (predictable_p) |
2571 | { |
2572 | if (OPTION_SET_P (param_max_rtl_if_conversion_predictable_cost)) |
2573 | return param_max_rtl_if_conversion_predictable_cost; |
2574 | } |
2575 | else |
2576 | { |
2577 | if (OPTION_SET_P (param_max_rtl_if_conversion_unpredictable_cost)) |
2578 | return param_max_rtl_if_conversion_unpredictable_cost; |
2579 | } |
2580 | |
2581 | return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3); |
2582 | } |
2583 | |
2584 | /* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION. */ |
2585 | |
2586 | unsigned int |
2587 | default_min_arithmetic_precision (void) |
2588 | { |
2589 | return WORD_REGISTER_OPERATIONS ? BITS_PER_WORD : BITS_PER_UNIT; |
2590 | } |
2591 | |
2592 | /* Default implementation of TARGET_C_EXCESS_PRECISION. */ |
2593 | |
2594 | enum flt_eval_method |
2595 | default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED) |
2596 | { |
2597 | return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT; |
2598 | } |
2599 | |
2600 | /* Return true if _BitInt(N) is supported and fill details about it into |
2601 | *INFO. */ |
2602 | bool |
2603 | default_bitint_type_info (int, struct bitint_info *) |
2604 | { |
2605 | return false; |
2606 | } |
2607 | |
2608 | /* Default implementation for |
2609 | TARGET_STACK_CLASH_PROTECTION_ALLOCA_PROBE_RANGE. */ |
2610 | HOST_WIDE_INT |
2611 | default_stack_clash_protection_alloca_probe_range (void) |
2612 | { |
2613 | return 0; |
2614 | } |
2615 | |
2616 | /* The default implementation of TARGET_EARLY_REMAT_MODES. */ |
2617 | |
2618 | void |
2619 | default_select_early_remat_modes (sbitmap) |
2620 | { |
2621 | } |
2622 | |
2623 | /* The default implementation of TARGET_PREFERRED_ELSE_VALUE. */ |
2624 | |
2625 | tree |
2626 | default_preferred_else_value (unsigned, tree type, unsigned, tree *) |
2627 | { |
2628 | return build_zero_cst (type); |
2629 | } |
2630 | |
2631 | /* Default implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE. */ |
2632 | bool |
2633 | default_have_speculation_safe_value (bool active ATTRIBUTE_UNUSED) |
2634 | { |
2635 | #ifdef HAVE_speculation_barrier |
2636 | return active ? HAVE_speculation_barrier : true; |
2637 | #else |
2638 | return false; |
2639 | #endif |
2640 | } |
2641 | /* Alternative implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE |
2642 | that can be used on targets that never have speculative execution. */ |
2643 | bool |
2644 | speculation_safe_value_not_needed (bool active) |
2645 | { |
2646 | return !active; |
2647 | } |
2648 | |
2649 | /* Default implementation of the speculation-safe-load builtin. This |
2650 | implementation simply copies val to result and generates a |
2651 | speculation_barrier insn, if such a pattern is defined. */ |
2652 | rtx |
2653 | default_speculation_safe_value (machine_mode mode ATTRIBUTE_UNUSED, |
2654 | rtx result, rtx val, |
2655 | rtx failval ATTRIBUTE_UNUSED) |
2656 | { |
2657 | emit_move_insn (result, val); |
2658 | |
2659 | #ifdef HAVE_speculation_barrier |
2660 | /* Assume the target knows what it is doing: if it defines a |
2661 | speculation barrier, but it is not enabled, then assume that one |
2662 | isn't needed. */ |
2663 | if (HAVE_speculation_barrier) |
2664 | emit_insn (gen_speculation_barrier ()); |
2665 | #endif |
2666 | |
2667 | return result; |
2668 | } |
2669 | |
2670 | /* How many bits to shift in order to access the tag bits. |
2671 | The default is to store the tag in the top 8 bits of a 64 bit pointer, hence |
2672 | shifting 56 bits will leave just the tag. */ |
2673 | #define HWASAN_SHIFT (GET_MODE_PRECISION (Pmode) - 8) |
2674 | #define HWASAN_SHIFT_RTX GEN_INT (HWASAN_SHIFT) |
2675 | |
2676 | bool |
2677 | default_memtag_can_tag_addresses () |
2678 | { |
2679 | return false; |
2680 | } |
2681 | |
2682 | uint8_t |
2683 | default_memtag_tag_size () |
2684 | { |
2685 | return 8; |
2686 | } |
2687 | |
2688 | uint8_t |
2689 | default_memtag_granule_size () |
2690 | { |
2691 | return 16; |
2692 | } |
2693 | |
2694 | /* The default implementation of TARGET_MEMTAG_INSERT_RANDOM_TAG. */ |
2695 | rtx |
2696 | default_memtag_insert_random_tag (rtx untagged, rtx target) |
2697 | { |
2698 | gcc_assert (param_hwasan_instrument_stack); |
2699 | if (param_hwasan_random_frame_tag) |
2700 | { |
2701 | rtx fn = init_one_libfunc ("__hwasan_generate_tag" ); |
2702 | rtx new_tag = emit_library_call_value (fun: fn, NULL_RTX, fn_type: LCT_NORMAL, QImode); |
2703 | return targetm.memtag.set_tag (untagged, new_tag, target); |
2704 | } |
2705 | else |
2706 | { |
2707 | /* NOTE: The kernel API does not have __hwasan_generate_tag exposed. |
2708 | In the future we may add the option emit random tags with inline |
2709 | instrumentation instead of function calls. This would be the same |
2710 | between the kernel and userland. */ |
2711 | return untagged; |
2712 | } |
2713 | } |
2714 | |
2715 | /* The default implementation of TARGET_MEMTAG_ADD_TAG. */ |
2716 | rtx |
2717 | default_memtag_add_tag (rtx base, poly_int64 offset, uint8_t tag_offset) |
2718 | { |
2719 | /* Need to look into what the most efficient code sequence is. |
2720 | This is a code sequence that would be emitted *many* times, so we |
2721 | want it as small as possible. |
2722 | |
2723 | There are two places where tag overflow is a question: |
2724 | - Tagging the shadow stack. |
2725 | (both tagging and untagging). |
2726 | - Tagging addressable pointers. |
2727 | |
2728 | We need to ensure both behaviors are the same (i.e. that the tag that |
2729 | ends up in a pointer after "overflowing" the tag bits with a tag addition |
2730 | is the same that ends up in the shadow space). |
2731 | |
2732 | The aim is that the behavior of tag addition should follow modulo |
2733 | wrapping in both instances. |
2734 | |
2735 | The libhwasan code doesn't have any path that increments a pointer's tag, |
2736 | which means it has no opinion on what happens when a tag increment |
2737 | overflows (and hence we can choose our own behavior). */ |
2738 | |
2739 | offset += ((uint64_t)tag_offset << HWASAN_SHIFT); |
2740 | return plus_constant (Pmode, base, offset); |
2741 | } |
2742 | |
2743 | /* The default implementation of TARGET_MEMTAG_SET_TAG. */ |
2744 | rtx |
2745 | default_memtag_set_tag (rtx untagged, rtx tag, rtx target) |
2746 | { |
2747 | gcc_assert (GET_MODE (untagged) == Pmode && GET_MODE (tag) == QImode); |
2748 | tag = expand_simple_binop (Pmode, ASHIFT, tag, HWASAN_SHIFT_RTX, NULL_RTX, |
2749 | /* unsignedp = */1, OPTAB_WIDEN); |
2750 | rtx ret = expand_simple_binop (Pmode, IOR, untagged, tag, target, |
2751 | /* unsignedp = */1, OPTAB_DIRECT); |
2752 | gcc_assert (ret); |
2753 | return ret; |
2754 | } |
2755 | |
2756 | /* The default implementation of TARGET_MEMTAG_EXTRACT_TAG. */ |
2757 | rtx |
2758 | (rtx tagged_pointer, rtx target) |
2759 | { |
2760 | rtx tag = expand_simple_binop (Pmode, LSHIFTRT, tagged_pointer, |
2761 | HWASAN_SHIFT_RTX, target, |
2762 | /* unsignedp = */0, |
2763 | OPTAB_DIRECT); |
2764 | rtx ret = gen_lowpart (QImode, tag); |
2765 | gcc_assert (ret); |
2766 | return ret; |
2767 | } |
2768 | |
2769 | /* The default implementation of TARGET_MEMTAG_UNTAGGED_POINTER. */ |
2770 | rtx |
2771 | default_memtag_untagged_pointer (rtx tagged_pointer, rtx target) |
2772 | { |
2773 | rtx tag_mask = gen_int_mode ((HOST_WIDE_INT_1U << HWASAN_SHIFT) - 1, Pmode); |
2774 | rtx untagged_base = expand_simple_binop (Pmode, AND, tagged_pointer, |
2775 | tag_mask, target, true, |
2776 | OPTAB_DIRECT); |
2777 | gcc_assert (untagged_base); |
2778 | return untagged_base; |
2779 | } |
2780 | |
2781 | /* The default implementation of TARGET_GCOV_TYPE_SIZE. */ |
2782 | HOST_WIDE_INT |
2783 | default_gcov_type_size (void) |
2784 | { |
2785 | return TYPE_PRECISION (long_long_integer_type_node) > 32 ? 64 : 32; |
2786 | } |
2787 | |
2788 | #include "gt-targhooks.h" |
2789 | |