1 | /* Subroutines for manipulating rtx's in semantically interesting ways. |
2 | Copyright (C) 1987-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | |
21 | #include "config.h" |
22 | #include "system.h" |
23 | #include "coretypes.h" |
24 | #include "target.h" |
25 | #include "function.h" |
26 | #include "rtl.h" |
27 | #include "tree.h" |
28 | #include "memmodel.h" |
29 | #include "tm_p.h" |
30 | #include "optabs.h" |
31 | #include "expmed.h" |
32 | #include "profile-count.h" |
33 | #include "emit-rtl.h" |
34 | #include "recog.h" |
35 | #include "diagnostic-core.h" |
36 | #include "stor-layout.h" |
37 | #include "langhooks.h" |
38 | #include "except.h" |
39 | #include "dojump.h" |
40 | #include "explow.h" |
41 | #include "expr.h" |
42 | #include "stringpool.h" |
43 | #include "common/common-target.h" |
44 | #include "output.h" |
45 | |
46 | static rtx break_out_memory_refs (rtx); |
47 | |
48 | |
49 | /* Truncate and perhaps sign-extend C as appropriate for MODE. */ |
50 | |
51 | HOST_WIDE_INT |
52 | trunc_int_for_mode (HOST_WIDE_INT c, machine_mode mode) |
53 | { |
54 | /* Not scalar_int_mode because we also allow pointer bound modes. */ |
55 | scalar_mode smode = as_a <scalar_mode> (m: mode); |
56 | int width = GET_MODE_PRECISION (mode: smode); |
57 | |
58 | /* You want to truncate to a _what_? */ |
59 | gcc_assert (SCALAR_INT_MODE_P (mode)); |
60 | |
61 | /* Canonicalize BImode to 0 and STORE_FLAG_VALUE. */ |
62 | if (smode == BImode) |
63 | return c & 1 ? STORE_FLAG_VALUE : 0; |
64 | |
65 | /* Sign-extend for the requested mode. */ |
66 | |
67 | if (width < HOST_BITS_PER_WIDE_INT) |
68 | { |
69 | HOST_WIDE_INT sign = 1; |
70 | sign <<= width - 1; |
71 | c &= (sign << 1) - 1; |
72 | c ^= sign; |
73 | c -= sign; |
74 | } |
75 | |
76 | return c; |
77 | } |
78 | |
79 | /* Likewise for polynomial values, using the sign-extended representation |
80 | for each individual coefficient. */ |
81 | |
82 | poly_int64 |
83 | trunc_int_for_mode (poly_int64 x, machine_mode mode) |
84 | { |
85 | for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i) |
86 | x.coeffs[i] = trunc_int_for_mode (c: x.coeffs[i], mode); |
87 | return x; |
88 | } |
89 | |
90 | /* Return an rtx for the sum of X and the integer C, given that X has |
91 | mode MODE. INPLACE is true if X can be modified inplace or false |
92 | if it must be treated as immutable. */ |
93 | |
94 | rtx |
95 | plus_constant (machine_mode mode, rtx x, poly_int64 c, bool inplace) |
96 | { |
97 | RTX_CODE code; |
98 | rtx y; |
99 | rtx tem; |
100 | int all_constant = 0; |
101 | |
102 | gcc_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode); |
103 | |
104 | if (known_eq (c, 0)) |
105 | return x; |
106 | |
107 | restart: |
108 | |
109 | code = GET_CODE (x); |
110 | y = x; |
111 | |
112 | switch (code) |
113 | { |
114 | CASE_CONST_SCALAR_INT: |
115 | return immed_wide_int_const (wi::add (a: rtx_mode_t (x, mode), b: c), mode); |
116 | case MEM: |
117 | /* If this is a reference to the constant pool, try replacing it with |
118 | a reference to a new constant. If the resulting address isn't |
119 | valid, don't return it because we have no way to validize it. */ |
120 | if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF |
121 | && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0))) |
122 | { |
123 | rtx cst = get_pool_constant (XEXP (x, 0)); |
124 | |
125 | if (GET_CODE (cst) == CONST_VECTOR |
126 | && GET_MODE_INNER (GET_MODE (cst)) == mode) |
127 | { |
128 | cst = gen_lowpart (mode, cst); |
129 | gcc_assert (cst); |
130 | } |
131 | else if (GET_MODE (cst) == VOIDmode |
132 | && get_pool_mode (XEXP (x, 0)) != mode) |
133 | break; |
134 | if (GET_MODE (cst) == VOIDmode || GET_MODE (cst) == mode) |
135 | { |
136 | tem = plus_constant (mode, x: cst, c); |
137 | tem = force_const_mem (GET_MODE (x), tem); |
138 | /* Targets may disallow some constants in the constant pool, thus |
139 | force_const_mem may return NULL_RTX. */ |
140 | if (tem && memory_address_p (GET_MODE (tem), XEXP (tem, 0))) |
141 | return tem; |
142 | } |
143 | } |
144 | break; |
145 | |
146 | case CONST: |
147 | /* If adding to something entirely constant, set a flag |
148 | so that we can add a CONST around the result. */ |
149 | if (inplace && shared_const_p (x)) |
150 | inplace = false; |
151 | x = XEXP (x, 0); |
152 | all_constant = 1; |
153 | goto restart; |
154 | |
155 | case SYMBOL_REF: |
156 | case LABEL_REF: |
157 | all_constant = 1; |
158 | break; |
159 | |
160 | case PLUS: |
161 | /* The interesting case is adding the integer to a sum. Look |
162 | for constant term in the sum and combine with C. For an |
163 | integer constant term or a constant term that is not an |
164 | explicit integer, we combine or group them together anyway. |
165 | |
166 | We may not immediately return from the recursive call here, lest |
167 | all_constant gets lost. */ |
168 | |
169 | if (CONSTANT_P (XEXP (x, 1))) |
170 | { |
171 | rtx term = plus_constant (mode, XEXP (x, 1), c, inplace); |
172 | if (term == const0_rtx) |
173 | x = XEXP (x, 0); |
174 | else if (inplace) |
175 | XEXP (x, 1) = term; |
176 | else |
177 | x = gen_rtx_PLUS (mode, XEXP (x, 0), term); |
178 | c = 0; |
179 | } |
180 | else if (rtx *const_loc = find_constant_term_loc (&y)) |
181 | { |
182 | if (!inplace) |
183 | { |
184 | /* We need to be careful since X may be shared and we can't |
185 | modify it in place. */ |
186 | x = copy_rtx (x); |
187 | const_loc = find_constant_term_loc (&x); |
188 | } |
189 | *const_loc = plus_constant (mode, x: *const_loc, c, inplace: true); |
190 | c = 0; |
191 | } |
192 | break; |
193 | |
194 | default: |
195 | if (CONST_POLY_INT_P (x)) |
196 | return immed_wide_int_const (const_poly_int_value (x) + c, mode); |
197 | break; |
198 | } |
199 | |
200 | if (maybe_ne (a: c, b: 0)) |
201 | x = gen_rtx_PLUS (mode, x, gen_int_mode (c, mode)); |
202 | |
203 | if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF) |
204 | return x; |
205 | else if (all_constant) |
206 | return gen_rtx_CONST (mode, x); |
207 | else |
208 | return x; |
209 | } |
210 | |
211 | /* If X is a sum, return a new sum like X but lacking any constant terms. |
212 | Add all the removed constant terms into *CONSTPTR. |
213 | X itself is not altered. The result != X if and only if |
214 | it is not isomorphic to X. */ |
215 | |
216 | rtx |
217 | eliminate_constant_term (rtx x, rtx *constptr) |
218 | { |
219 | rtx x0, x1; |
220 | rtx tem; |
221 | |
222 | if (GET_CODE (x) != PLUS) |
223 | return x; |
224 | |
225 | /* First handle constants appearing at this level explicitly. */ |
226 | if (CONST_INT_P (XEXP (x, 1)) |
227 | && (tem = simplify_binary_operation (code: PLUS, GET_MODE (x), op0: *constptr, |
228 | XEXP (x, 1))) != 0 |
229 | && CONST_INT_P (tem)) |
230 | { |
231 | *constptr = tem; |
232 | return eliminate_constant_term (XEXP (x, 0), constptr); |
233 | } |
234 | |
235 | tem = const0_rtx; |
236 | x0 = eliminate_constant_term (XEXP (x, 0), constptr: &tem); |
237 | x1 = eliminate_constant_term (XEXP (x, 1), constptr: &tem); |
238 | if ((x1 != XEXP (x, 1) || x0 != XEXP (x, 0)) |
239 | && (tem = simplify_binary_operation (code: PLUS, GET_MODE (x), |
240 | op0: *constptr, op1: tem)) != 0 |
241 | && CONST_INT_P (tem)) |
242 | { |
243 | *constptr = tem; |
244 | return gen_rtx_PLUS (GET_MODE (x), x0, x1); |
245 | } |
246 | |
247 | return x; |
248 | } |
249 | |
250 | |
251 | /* Return a copy of X in which all memory references |
252 | and all constants that involve symbol refs |
253 | have been replaced with new temporary registers. |
254 | Also emit code to load the memory locations and constants |
255 | into those registers. |
256 | |
257 | If X contains no such constants or memory references, |
258 | X itself (not a copy) is returned. |
259 | |
260 | If a constant is found in the address that is not a legitimate constant |
261 | in an insn, it is left alone in the hope that it might be valid in the |
262 | address. |
263 | |
264 | X may contain no arithmetic except addition, subtraction and multiplication. |
265 | Values returned by expand_expr with 1 for sum_ok fit this constraint. */ |
266 | |
267 | static rtx |
268 | break_out_memory_refs (rtx x) |
269 | { |
270 | if (MEM_P (x) |
271 | || (CONSTANT_P (x) && CONSTANT_ADDRESS_P (x) |
272 | && GET_MODE (x) != VOIDmode)) |
273 | x = force_reg (GET_MODE (x), x); |
274 | else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS |
275 | || GET_CODE (x) == MULT) |
276 | { |
277 | rtx op0 = break_out_memory_refs (XEXP (x, 0)); |
278 | rtx op1 = break_out_memory_refs (XEXP (x, 1)); |
279 | |
280 | if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) |
281 | x = simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1); |
282 | } |
283 | |
284 | return x; |
285 | } |
286 | |
287 | /* Given X, a memory address in address space AS' pointer mode, convert it to |
288 | an address in the address space's address mode, or vice versa (TO_MODE says |
289 | which way). We take advantage of the fact that pointers are not allowed to |
290 | overflow by commuting arithmetic operations over conversions so that address |
291 | arithmetic insns can be used. IN_CONST is true if this conversion is inside |
292 | a CONST. NO_EMIT is true if no insns should be emitted, and instead |
293 | it should return NULL if it can't be simplified without emitting insns. */ |
294 | |
295 | rtx |
296 | convert_memory_address_addr_space_1 (scalar_int_mode to_mode ATTRIBUTE_UNUSED, |
297 | rtx x, addr_space_t as ATTRIBUTE_UNUSED, |
298 | bool in_const ATTRIBUTE_UNUSED, |
299 | bool no_emit ATTRIBUTE_UNUSED) |
300 | { |
301 | #ifndef POINTERS_EXTEND_UNSIGNED |
302 | gcc_assert (GET_MODE (x) == to_mode || GET_MODE (x) == VOIDmode); |
303 | return x; |
304 | #else /* defined(POINTERS_EXTEND_UNSIGNED) */ |
305 | scalar_int_mode pointer_mode, address_mode, from_mode; |
306 | rtx temp; |
307 | enum rtx_code code; |
308 | |
309 | /* If X already has the right mode, just return it. */ |
310 | if (GET_MODE (x) == to_mode) |
311 | return x; |
312 | |
313 | pointer_mode = targetm.addr_space.pointer_mode (as); |
314 | address_mode = targetm.addr_space.address_mode (as); |
315 | from_mode = to_mode == pointer_mode ? address_mode : pointer_mode; |
316 | |
317 | /* Here we handle some special cases. If none of them apply, fall through |
318 | to the default case. */ |
319 | switch (GET_CODE (x)) |
320 | { |
321 | CASE_CONST_SCALAR_INT: |
322 | if (GET_MODE_SIZE (mode: to_mode) < GET_MODE_SIZE (mode: from_mode)) |
323 | code = TRUNCATE; |
324 | else if (POINTERS_EXTEND_UNSIGNED < 0) |
325 | break; |
326 | else if (POINTERS_EXTEND_UNSIGNED > 0) |
327 | code = ZERO_EXTEND; |
328 | else |
329 | code = SIGN_EXTEND; |
330 | temp = simplify_unary_operation (code, mode: to_mode, op: x, op_mode: from_mode); |
331 | if (temp) |
332 | return temp; |
333 | break; |
334 | |
335 | case SUBREG: |
336 | if ((SUBREG_PROMOTED_VAR_P (x) || REG_POINTER (SUBREG_REG (x))) |
337 | && GET_MODE (SUBREG_REG (x)) == to_mode) |
338 | return SUBREG_REG (x); |
339 | break; |
340 | |
341 | case LABEL_REF: |
342 | temp = gen_rtx_LABEL_REF (to_mode, label_ref_label (x)); |
343 | LABEL_REF_NONLOCAL_P (temp) = LABEL_REF_NONLOCAL_P (x); |
344 | return temp; |
345 | |
346 | case SYMBOL_REF: |
347 | temp = shallow_copy_rtx (x); |
348 | PUT_MODE (x: temp, mode: to_mode); |
349 | return temp; |
350 | |
351 | case CONST: |
352 | { |
353 | auto *last = no_emit ? nullptr : get_last_insn (); |
354 | temp = convert_memory_address_addr_space_1 (to_mode, XEXP (x, 0), as, |
355 | in_const: true, no_emit); |
356 | if (temp && (no_emit || last == get_last_insn ())) |
357 | return gen_rtx_CONST (to_mode, temp); |
358 | return temp; |
359 | } |
360 | |
361 | case PLUS: |
362 | case MULT: |
363 | /* For addition we can safely permute the conversion and addition |
364 | operation if one operand is a constant and converting the constant |
365 | does not change it or if one operand is a constant and we are |
366 | using a ptr_extend instruction (POINTERS_EXTEND_UNSIGNED < 0). |
367 | We can always safely permute them if we are making the address |
368 | narrower. Inside a CONST RTL, this is safe for both pointers |
369 | zero or sign extended as pointers cannot wrap. */ |
370 | if (GET_MODE_SIZE (mode: to_mode) < GET_MODE_SIZE (mode: from_mode) |
371 | || (GET_CODE (x) == PLUS |
372 | && CONST_INT_P (XEXP (x, 1)) |
373 | && ((in_const && POINTERS_EXTEND_UNSIGNED != 0) |
374 | || XEXP (x, 1) == convert_memory_address_addr_space_1 |
375 | (to_mode, XEXP (x, 1), as, in_const, |
376 | no_emit) |
377 | || POINTERS_EXTEND_UNSIGNED < 0))) |
378 | { |
379 | temp = convert_memory_address_addr_space_1 (to_mode, XEXP (x, 0), |
380 | as, in_const, no_emit); |
381 | return (temp ? gen_rtx_fmt_ee (GET_CODE (x), to_mode, |
382 | temp, XEXP (x, 1)) |
383 | : temp); |
384 | } |
385 | break; |
386 | |
387 | case UNSPEC: |
388 | /* Assume that all UNSPECs in a constant address can be converted |
389 | operand-by-operand. We could add a target hook if some targets |
390 | require different behavior. */ |
391 | if (in_const && GET_MODE (x) == from_mode) |
392 | { |
393 | unsigned int n = XVECLEN (x, 0); |
394 | rtvec v = gen_rtvec (n); |
395 | for (unsigned int i = 0; i < n; ++i) |
396 | { |
397 | rtx op = XVECEXP (x, 0, i); |
398 | if (GET_MODE (op) == from_mode) |
399 | op = convert_memory_address_addr_space_1 (to_mode, x: op, as, |
400 | in_const, no_emit); |
401 | RTVEC_ELT (v, i) = op; |
402 | } |
403 | return gen_rtx_UNSPEC (to_mode, v, XINT (x, 1)); |
404 | } |
405 | break; |
406 | |
407 | default: |
408 | break; |
409 | } |
410 | |
411 | if (no_emit) |
412 | return NULL_RTX; |
413 | |
414 | return convert_modes (mode: to_mode, oldmode: from_mode, |
415 | x, POINTERS_EXTEND_UNSIGNED); |
416 | #endif /* defined(POINTERS_EXTEND_UNSIGNED) */ |
417 | } |
418 | |
419 | /* Given X, a memory address in address space AS' pointer mode, convert it to |
420 | an address in the address space's address mode, or vice versa (TO_MODE says |
421 | which way). We take advantage of the fact that pointers are not allowed to |
422 | overflow by commuting arithmetic operations over conversions so that address |
423 | arithmetic insns can be used. */ |
424 | |
425 | rtx |
426 | convert_memory_address_addr_space (scalar_int_mode to_mode, rtx x, |
427 | addr_space_t as) |
428 | { |
429 | return convert_memory_address_addr_space_1 (to_mode, x, as, in_const: false, no_emit: false); |
430 | } |
431 | |
432 | |
433 | /* Return something equivalent to X but valid as a memory address for something |
434 | of mode MODE in the named address space AS. When X is not itself valid, |
435 | this works by copying X or subexpressions of it into registers. */ |
436 | |
437 | rtx |
438 | memory_address_addr_space (machine_mode mode, rtx x, addr_space_t as) |
439 | { |
440 | rtx oldx = x; |
441 | scalar_int_mode address_mode = targetm.addr_space.address_mode (as); |
442 | |
443 | x = convert_memory_address_addr_space (to_mode: address_mode, x, as); |
444 | |
445 | /* By passing constant addresses through registers |
446 | we get a chance to cse them. */ |
447 | if (! cse_not_expected && CONSTANT_P (x) && CONSTANT_ADDRESS_P (x)) |
448 | x = force_reg (address_mode, x); |
449 | |
450 | /* We get better cse by rejecting indirect addressing at this stage. |
451 | Let the combiner create indirect addresses where appropriate. |
452 | For now, generate the code so that the subexpressions useful to share |
453 | are visible. But not if cse won't be done! */ |
454 | else |
455 | { |
456 | if (! cse_not_expected && !REG_P (x)) |
457 | x = break_out_memory_refs (x); |
458 | |
459 | /* At this point, any valid address is accepted. */ |
460 | if (memory_address_addr_space_p (mode, x, as)) |
461 | goto done; |
462 | |
463 | /* If it was valid before but breaking out memory refs invalidated it, |
464 | use it the old way. */ |
465 | if (memory_address_addr_space_p (mode, oldx, as)) |
466 | { |
467 | x = oldx; |
468 | goto done; |
469 | } |
470 | |
471 | /* Perform machine-dependent transformations on X |
472 | in certain cases. This is not necessary since the code |
473 | below can handle all possible cases, but machine-dependent |
474 | transformations can make better code. */ |
475 | { |
476 | rtx orig_x = x; |
477 | x = targetm.addr_space.legitimize_address (x, oldx, mode, as); |
478 | if (orig_x != x && memory_address_addr_space_p (mode, x, as)) |
479 | goto done; |
480 | } |
481 | |
482 | /* PLUS and MULT can appear in special ways |
483 | as the result of attempts to make an address usable for indexing. |
484 | Usually they are dealt with by calling force_operand, below. |
485 | But a sum containing constant terms is special |
486 | if removing them makes the sum a valid address: |
487 | then we generate that address in a register |
488 | and index off of it. We do this because it often makes |
489 | shorter code, and because the addresses thus generated |
490 | in registers often become common subexpressions. */ |
491 | if (GET_CODE (x) == PLUS) |
492 | { |
493 | rtx constant_term = const0_rtx; |
494 | rtx y = eliminate_constant_term (x, constptr: &constant_term); |
495 | if (constant_term == const0_rtx |
496 | || ! memory_address_addr_space_p (mode, y, as)) |
497 | x = force_operand (x, NULL_RTX); |
498 | else |
499 | { |
500 | y = gen_rtx_PLUS (GET_MODE (x), copy_to_reg (y), constant_term); |
501 | if (! memory_address_addr_space_p (mode, y, as)) |
502 | x = force_operand (x, NULL_RTX); |
503 | else |
504 | x = y; |
505 | } |
506 | } |
507 | |
508 | else if (GET_CODE (x) == MULT || GET_CODE (x) == MINUS) |
509 | x = force_operand (x, NULL_RTX); |
510 | |
511 | /* If we have a register that's an invalid address, |
512 | it must be a hard reg of the wrong class. Copy it to a pseudo. */ |
513 | else if (REG_P (x)) |
514 | x = copy_to_reg (x); |
515 | |
516 | /* Last resort: copy the value to a register, since |
517 | the register is a valid address. */ |
518 | else |
519 | x = force_reg (address_mode, x); |
520 | } |
521 | |
522 | done: |
523 | |
524 | gcc_assert (memory_address_addr_space_p (mode, x, as)); |
525 | /* If we didn't change the address, we are done. Otherwise, mark |
526 | a reg as a pointer if we have REG or REG + CONST_INT. */ |
527 | if (oldx == x) |
528 | return x; |
529 | else if (REG_P (x)) |
530 | mark_reg_pointer (x, BITS_PER_UNIT); |
531 | else if (GET_CODE (x) == PLUS |
532 | && REG_P (XEXP (x, 0)) |
533 | && CONST_INT_P (XEXP (x, 1))) |
534 | mark_reg_pointer (XEXP (x, 0), BITS_PER_UNIT); |
535 | |
536 | /* OLDX may have been the address on a temporary. Update the address |
537 | to indicate that X is now used. */ |
538 | update_temp_slot_address (oldx, x); |
539 | |
540 | return x; |
541 | } |
542 | |
543 | /* Convert a mem ref into one with a valid memory address. |
544 | Pass through anything else unchanged. */ |
545 | |
546 | rtx |
547 | validize_mem (rtx ref) |
548 | { |
549 | if (!MEM_P (ref)) |
550 | return ref; |
551 | ref = use_anchored_address (ref); |
552 | if (memory_address_addr_space_p (GET_MODE (ref), XEXP (ref, 0), |
553 | MEM_ADDR_SPACE (ref))) |
554 | return ref; |
555 | |
556 | /* Don't alter REF itself, since that is probably a stack slot. */ |
557 | return replace_equiv_address (ref, XEXP (ref, 0)); |
558 | } |
559 | |
560 | /* If X is a memory reference to a member of an object block, try rewriting |
561 | it to use an anchor instead. Return the new memory reference on success |
562 | and the old one on failure. */ |
563 | |
564 | rtx |
565 | use_anchored_address (rtx x) |
566 | { |
567 | rtx base; |
568 | HOST_WIDE_INT offset; |
569 | machine_mode mode; |
570 | |
571 | if (!flag_section_anchors) |
572 | return x; |
573 | |
574 | if (!MEM_P (x)) |
575 | return x; |
576 | |
577 | /* Split the address into a base and offset. */ |
578 | base = XEXP (x, 0); |
579 | offset = 0; |
580 | if (GET_CODE (base) == CONST |
581 | && GET_CODE (XEXP (base, 0)) == PLUS |
582 | && CONST_INT_P (XEXP (XEXP (base, 0), 1))) |
583 | { |
584 | offset += INTVAL (XEXP (XEXP (base, 0), 1)); |
585 | base = XEXP (XEXP (base, 0), 0); |
586 | } |
587 | |
588 | /* Check whether BASE is suitable for anchors. */ |
589 | if (GET_CODE (base) != SYMBOL_REF |
590 | || !SYMBOL_REF_HAS_BLOCK_INFO_P (base) |
591 | || SYMBOL_REF_ANCHOR_P (base) |
592 | || SYMBOL_REF_BLOCK (base) == NULL |
593 | || !targetm.use_anchors_for_symbol_p (base)) |
594 | return x; |
595 | |
596 | /* Decide where BASE is going to be. */ |
597 | place_block_symbol (base); |
598 | |
599 | /* Get the anchor we need to use. */ |
600 | offset += SYMBOL_REF_BLOCK_OFFSET (base); |
601 | base = get_section_anchor (SYMBOL_REF_BLOCK (base), offset, |
602 | SYMBOL_REF_TLS_MODEL (base)); |
603 | |
604 | /* Work out the offset from the anchor. */ |
605 | offset -= SYMBOL_REF_BLOCK_OFFSET (base); |
606 | |
607 | /* If we're going to run a CSE pass, force the anchor into a register. |
608 | We will then be able to reuse registers for several accesses, if the |
609 | target costs say that that's worthwhile. */ |
610 | mode = GET_MODE (base); |
611 | if (!cse_not_expected) |
612 | base = force_reg (mode, base); |
613 | |
614 | return replace_equiv_address (x, plus_constant (mode, x: base, c: offset)); |
615 | } |
616 | |
617 | /* Copy the value or contents of X to a new temp reg and return that reg. */ |
618 | |
619 | rtx |
620 | copy_to_reg (rtx x) |
621 | { |
622 | rtx temp = gen_reg_rtx (GET_MODE (x)); |
623 | |
624 | /* If not an operand, must be an address with PLUS and MULT so |
625 | do the computation. */ |
626 | if (! general_operand (x, VOIDmode)) |
627 | x = force_operand (x, temp); |
628 | |
629 | if (x != temp) |
630 | emit_move_insn (temp, x); |
631 | |
632 | return temp; |
633 | } |
634 | |
635 | /* Like copy_to_reg but always give the new register mode Pmode |
636 | in case X is a constant. */ |
637 | |
638 | rtx |
639 | copy_addr_to_reg (rtx x) |
640 | { |
641 | return copy_to_mode_reg (Pmode, x); |
642 | } |
643 | |
644 | /* Like copy_to_reg but always give the new register mode MODE |
645 | in case X is a constant. */ |
646 | |
647 | rtx |
648 | copy_to_mode_reg (machine_mode mode, rtx x) |
649 | { |
650 | rtx temp = gen_reg_rtx (mode); |
651 | |
652 | /* If not an operand, must be an address with PLUS and MULT so |
653 | do the computation. */ |
654 | if (! general_operand (x, VOIDmode)) |
655 | x = force_operand (x, temp); |
656 | |
657 | gcc_assert (GET_MODE (x) == mode || GET_MODE (x) == VOIDmode); |
658 | if (x != temp) |
659 | emit_move_insn (temp, x); |
660 | return temp; |
661 | } |
662 | |
663 | /* Load X into a register if it is not already one. |
664 | Use mode MODE for the register. |
665 | X should be valid for mode MODE, but it may be a constant which |
666 | is valid for all integer modes; that's why caller must specify MODE. |
667 | |
668 | The caller must not alter the value in the register we return, |
669 | since we mark it as a "constant" register. */ |
670 | |
671 | rtx |
672 | force_reg (machine_mode mode, rtx x) |
673 | { |
674 | rtx temp, set; |
675 | rtx_insn *insn; |
676 | |
677 | if (REG_P (x)) |
678 | return x; |
679 | |
680 | if (general_operand (x, mode)) |
681 | { |
682 | temp = gen_reg_rtx (mode); |
683 | insn = emit_move_insn (temp, x); |
684 | } |
685 | else |
686 | { |
687 | temp = force_operand (x, NULL_RTX); |
688 | if (REG_P (temp)) |
689 | insn = get_last_insn (); |
690 | else |
691 | { |
692 | rtx temp2 = gen_reg_rtx (mode); |
693 | insn = emit_move_insn (temp2, temp); |
694 | temp = temp2; |
695 | } |
696 | } |
697 | |
698 | /* Let optimizers know that TEMP's value never changes |
699 | and that X can be substituted for it. Don't get confused |
700 | if INSN set something else (such as a SUBREG of TEMP). */ |
701 | if (CONSTANT_P (x) |
702 | && (set = single_set (insn)) != 0 |
703 | && SET_DEST (set) == temp |
704 | && ! rtx_equal_p (x, SET_SRC (set))) |
705 | set_unique_reg_note (insn, REG_EQUAL, x); |
706 | |
707 | /* Let optimizers know that TEMP is a pointer, and if so, the |
708 | known alignment of that pointer. */ |
709 | { |
710 | unsigned align = 0; |
711 | if (GET_CODE (x) == SYMBOL_REF) |
712 | { |
713 | align = BITS_PER_UNIT; |
714 | if (SYMBOL_REF_DECL (x) && DECL_P (SYMBOL_REF_DECL (x))) |
715 | align = DECL_ALIGN (SYMBOL_REF_DECL (x)); |
716 | } |
717 | else if (GET_CODE (x) == LABEL_REF) |
718 | align = BITS_PER_UNIT; |
719 | else if (GET_CODE (x) == CONST |
720 | && GET_CODE (XEXP (x, 0)) == PLUS |
721 | && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF |
722 | && CONST_INT_P (XEXP (XEXP (x, 0), 1))) |
723 | { |
724 | rtx s = XEXP (XEXP (x, 0), 0); |
725 | rtx c = XEXP (XEXP (x, 0), 1); |
726 | unsigned sa, ca; |
727 | |
728 | sa = BITS_PER_UNIT; |
729 | if (SYMBOL_REF_DECL (s) && DECL_P (SYMBOL_REF_DECL (s))) |
730 | sa = DECL_ALIGN (SYMBOL_REF_DECL (s)); |
731 | |
732 | if (INTVAL (c) == 0) |
733 | align = sa; |
734 | else |
735 | { |
736 | ca = ctz_hwi (INTVAL (c)) * BITS_PER_UNIT; |
737 | align = MIN (sa, ca); |
738 | } |
739 | } |
740 | |
741 | if (align || (MEM_P (x) && MEM_POINTER (x))) |
742 | mark_reg_pointer (temp, align); |
743 | } |
744 | |
745 | return temp; |
746 | } |
747 | |
748 | /* If X is a memory ref, copy its contents to a new temp reg and return |
749 | that reg. Otherwise, return X. */ |
750 | |
751 | rtx |
752 | force_not_mem (rtx x) |
753 | { |
754 | rtx temp; |
755 | |
756 | if (!MEM_P (x) || GET_MODE (x) == BLKmode) |
757 | return x; |
758 | |
759 | temp = gen_reg_rtx (GET_MODE (x)); |
760 | |
761 | if (MEM_POINTER (x)) |
762 | REG_POINTER (temp) = 1; |
763 | |
764 | emit_move_insn (temp, x); |
765 | return temp; |
766 | } |
767 | |
768 | /* Copy X to TARGET (if it's nonzero and a reg) |
769 | or to a new temp reg and return that reg. |
770 | MODE is the mode to use for X in case it is a constant. */ |
771 | |
772 | rtx |
773 | copy_to_suggested_reg (rtx x, rtx target, machine_mode mode) |
774 | { |
775 | rtx temp; |
776 | |
777 | if (target && REG_P (target)) |
778 | temp = target; |
779 | else |
780 | temp = gen_reg_rtx (mode); |
781 | |
782 | emit_move_insn (temp, x); |
783 | return temp; |
784 | } |
785 | |
786 | /* Return the mode to use to pass or return a scalar of TYPE and MODE. |
787 | PUNSIGNEDP points to the signedness of the type and may be adjusted |
788 | to show what signedness to use on extension operations. |
789 | |
790 | FOR_RETURN is nonzero if the caller is promoting the return value |
791 | of FNDECL, else it is for promoting args. */ |
792 | |
793 | machine_mode |
794 | promote_function_mode (const_tree type, machine_mode mode, int *punsignedp, |
795 | const_tree funtype, int for_return) |
796 | { |
797 | /* Called without a type node for a libcall. */ |
798 | if (type == NULL_TREE) |
799 | { |
800 | if (INTEGRAL_MODE_P (mode)) |
801 | return targetm.calls.promote_function_mode (NULL_TREE, mode, |
802 | punsignedp, funtype, |
803 | for_return); |
804 | else |
805 | return mode; |
806 | } |
807 | |
808 | switch (TREE_CODE (type)) |
809 | { |
810 | case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: |
811 | case REAL_TYPE: case OFFSET_TYPE: case FIXED_POINT_TYPE: |
812 | case POINTER_TYPE: case REFERENCE_TYPE: |
813 | return targetm.calls.promote_function_mode (type, mode, punsignedp, funtype, |
814 | for_return); |
815 | |
816 | default: |
817 | return mode; |
818 | } |
819 | } |
820 | /* Return the mode to use to store a scalar of TYPE and MODE. |
821 | PUNSIGNEDP points to the signedness of the type and may be adjusted |
822 | to show what signedness to use on extension operations. */ |
823 | |
824 | machine_mode |
825 | promote_mode (const_tree type ATTRIBUTE_UNUSED, machine_mode mode, |
826 | int *punsignedp ATTRIBUTE_UNUSED) |
827 | { |
828 | #ifdef PROMOTE_MODE |
829 | enum tree_code code; |
830 | int unsignedp; |
831 | scalar_mode smode; |
832 | #endif |
833 | |
834 | /* For libcalls this is invoked without TYPE from the backends |
835 | TARGET_PROMOTE_FUNCTION_MODE hooks. Don't do anything in that |
836 | case. */ |
837 | if (type == NULL_TREE) |
838 | return mode; |
839 | |
840 | /* FIXME: this is the same logic that was there until GCC 4.4, but we |
841 | probably want to test POINTERS_EXTEND_UNSIGNED even if PROMOTE_MODE |
842 | is not defined. The affected targets are M32C, S390, SPARC. */ |
843 | #ifdef PROMOTE_MODE |
844 | code = TREE_CODE (type); |
845 | unsignedp = *punsignedp; |
846 | |
847 | switch (code) |
848 | { |
849 | case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: |
850 | case REAL_TYPE: case OFFSET_TYPE: case FIXED_POINT_TYPE: |
851 | /* Values of these types always have scalar mode. */ |
852 | smode = as_a <scalar_mode> (m: mode); |
853 | PROMOTE_MODE (smode, unsignedp, type); |
854 | *punsignedp = unsignedp; |
855 | return smode; |
856 | |
857 | #ifdef POINTERS_EXTEND_UNSIGNED |
858 | case REFERENCE_TYPE: |
859 | case POINTER_TYPE: |
860 | *punsignedp = POINTERS_EXTEND_UNSIGNED; |
861 | return targetm.addr_space.address_mode |
862 | (TYPE_ADDR_SPACE (TREE_TYPE (type))); |
863 | #endif |
864 | |
865 | default: |
866 | return mode; |
867 | } |
868 | #else |
869 | return mode; |
870 | #endif |
871 | } |
872 | |
873 | |
874 | /* Use one of promote_mode or promote_function_mode to find the promoted |
875 | mode of DECL. If PUNSIGNEDP is not NULL, store there the unsignedness |
876 | of DECL after promotion. */ |
877 | |
878 | machine_mode |
879 | promote_decl_mode (const_tree decl, int *punsignedp) |
880 | { |
881 | tree type = TREE_TYPE (decl); |
882 | int unsignedp = TYPE_UNSIGNED (type); |
883 | machine_mode mode = DECL_MODE (decl); |
884 | machine_mode pmode; |
885 | |
886 | if (TREE_CODE (decl) == RESULT_DECL && !DECL_BY_REFERENCE (decl)) |
887 | pmode = promote_function_mode (type, mode, punsignedp: &unsignedp, |
888 | TREE_TYPE (current_function_decl), for_return: 1); |
889 | else if (TREE_CODE (decl) == RESULT_DECL || TREE_CODE (decl) == PARM_DECL) |
890 | pmode = promote_function_mode (type, mode, punsignedp: &unsignedp, |
891 | TREE_TYPE (current_function_decl), for_return: 2); |
892 | else |
893 | pmode = promote_mode (type, mode, punsignedp: &unsignedp); |
894 | |
895 | if (punsignedp) |
896 | *punsignedp = unsignedp; |
897 | return pmode; |
898 | } |
899 | |
900 | /* Return the promoted mode for name. If it is a named SSA_NAME, it |
901 | is the same as promote_decl_mode. Otherwise, it is the promoted |
902 | mode of a temp decl of same type as the SSA_NAME, if we had created |
903 | one. */ |
904 | |
905 | machine_mode |
906 | promote_ssa_mode (const_tree name, int *punsignedp) |
907 | { |
908 | gcc_assert (TREE_CODE (name) == SSA_NAME); |
909 | |
910 | /* Partitions holding parms and results must be promoted as expected |
911 | by function.cc. */ |
912 | if (SSA_NAME_VAR (name) |
913 | && (TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL |
914 | || TREE_CODE (SSA_NAME_VAR (name)) == RESULT_DECL)) |
915 | { |
916 | machine_mode mode = promote_decl_mode (SSA_NAME_VAR (name), punsignedp); |
917 | if (mode != BLKmode) |
918 | return mode; |
919 | } |
920 | |
921 | tree type = TREE_TYPE (name); |
922 | int unsignedp = TYPE_UNSIGNED (type); |
923 | machine_mode pmode = promote_mode (type, TYPE_MODE (type), punsignedp: &unsignedp); |
924 | if (punsignedp) |
925 | *punsignedp = unsignedp; |
926 | |
927 | return pmode; |
928 | } |
929 | |
930 | |
931 | |
932 | /* Controls the behavior of {anti_,}adjust_stack. */ |
933 | static bool suppress_reg_args_size; |
934 | |
935 | /* A helper for adjust_stack and anti_adjust_stack. */ |
936 | |
937 | static void |
938 | adjust_stack_1 (rtx adjust, bool anti_p) |
939 | { |
940 | rtx temp; |
941 | rtx_insn *insn; |
942 | |
943 | /* Hereafter anti_p means subtract_p. */ |
944 | if (!STACK_GROWS_DOWNWARD) |
945 | anti_p = !anti_p; |
946 | |
947 | temp = expand_binop (Pmode, |
948 | anti_p ? sub_optab : add_optab, |
949 | stack_pointer_rtx, adjust, stack_pointer_rtx, 0, |
950 | OPTAB_LIB_WIDEN); |
951 | |
952 | if (temp != stack_pointer_rtx) |
953 | insn = emit_move_insn (stack_pointer_rtx, temp); |
954 | else |
955 | { |
956 | insn = get_last_insn (); |
957 | temp = single_set (insn); |
958 | gcc_assert (temp != NULL && SET_DEST (temp) == stack_pointer_rtx); |
959 | } |
960 | |
961 | if (!suppress_reg_args_size) |
962 | add_args_size_note (insn, stack_pointer_delta); |
963 | } |
964 | |
965 | /* Adjust the stack pointer by ADJUST (an rtx for a number of bytes). |
966 | This pops when ADJUST is positive. ADJUST need not be constant. */ |
967 | |
968 | void |
969 | adjust_stack (rtx adjust) |
970 | { |
971 | if (adjust == const0_rtx) |
972 | return; |
973 | |
974 | /* We expect all variable sized adjustments to be multiple of |
975 | PREFERRED_STACK_BOUNDARY. */ |
976 | poly_int64 const_adjust; |
977 | if (poly_int_rtx_p (x: adjust, res: &const_adjust)) |
978 | stack_pointer_delta -= const_adjust; |
979 | |
980 | adjust_stack_1 (adjust, anti_p: false); |
981 | } |
982 | |
983 | /* Adjust the stack pointer by minus ADJUST (an rtx for a number of bytes). |
984 | This pushes when ADJUST is positive. ADJUST need not be constant. */ |
985 | |
986 | void |
987 | anti_adjust_stack (rtx adjust) |
988 | { |
989 | if (adjust == const0_rtx) |
990 | return; |
991 | |
992 | /* We expect all variable sized adjustments to be multiple of |
993 | PREFERRED_STACK_BOUNDARY. */ |
994 | poly_int64 const_adjust; |
995 | if (poly_int_rtx_p (x: adjust, res: &const_adjust)) |
996 | stack_pointer_delta += const_adjust; |
997 | |
998 | adjust_stack_1 (adjust, anti_p: true); |
999 | } |
1000 | |
1001 | /* Round the size of a block to be pushed up to the boundary required |
1002 | by this machine. SIZE is the desired size, which need not be constant. */ |
1003 | |
1004 | static rtx |
1005 | round_push (rtx size) |
1006 | { |
1007 | rtx align_rtx, alignm1_rtx; |
1008 | |
1009 | if (!SUPPORTS_STACK_ALIGNMENT |
1010 | || crtl->preferred_stack_boundary == MAX_SUPPORTED_STACK_ALIGNMENT) |
1011 | { |
1012 | int align = crtl->preferred_stack_boundary / BITS_PER_UNIT; |
1013 | |
1014 | if (align == 1) |
1015 | return size; |
1016 | |
1017 | if (CONST_INT_P (size)) |
1018 | { |
1019 | HOST_WIDE_INT new_size = (INTVAL (size) + align - 1) / align * align; |
1020 | |
1021 | if (INTVAL (size) != new_size) |
1022 | size = GEN_INT (new_size); |
1023 | return size; |
1024 | } |
1025 | |
1026 | align_rtx = GEN_INT (align); |
1027 | alignm1_rtx = GEN_INT (align - 1); |
1028 | } |
1029 | else |
1030 | { |
1031 | /* If crtl->preferred_stack_boundary might still grow, use |
1032 | virtual_preferred_stack_boundary_rtx instead. This will be |
1033 | substituted by the right value in vregs pass and optimized |
1034 | during combine. */ |
1035 | align_rtx = virtual_preferred_stack_boundary_rtx; |
1036 | alignm1_rtx = force_operand (plus_constant (Pmode, x: align_rtx, c: -1), |
1037 | NULL_RTX); |
1038 | } |
1039 | |
1040 | /* CEIL_DIV_EXPR needs to worry about the addition overflowing, |
1041 | but we know it can't. So add ourselves and then do |
1042 | TRUNC_DIV_EXPR. */ |
1043 | size = expand_binop (Pmode, add_optab, size, alignm1_rtx, |
1044 | NULL_RTX, 1, OPTAB_LIB_WIDEN); |
1045 | size = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, size, align_rtx, |
1046 | NULL_RTX, 1); |
1047 | size = expand_mult (Pmode, size, align_rtx, NULL_RTX, 1); |
1048 | |
1049 | return size; |
1050 | } |
1051 | |
1052 | /* Save the stack pointer for the purpose in SAVE_LEVEL. PSAVE is a pointer |
1053 | to a previously-created save area. If no save area has been allocated, |
1054 | this function will allocate one. If a save area is specified, it |
1055 | must be of the proper mode. */ |
1056 | |
1057 | void |
1058 | emit_stack_save (enum save_level save_level, rtx *psave) |
1059 | { |
1060 | rtx sa = *psave; |
1061 | /* The default is that we use a move insn and save in a Pmode object. */ |
1062 | rtx_insn *(*fcn) (rtx, rtx) = gen_move_insn; |
1063 | machine_mode mode = STACK_SAVEAREA_MODE (save_level); |
1064 | |
1065 | /* See if this machine has anything special to do for this kind of save. */ |
1066 | switch (save_level) |
1067 | { |
1068 | case SAVE_BLOCK: |
1069 | if (targetm.have_save_stack_block ()) |
1070 | fcn = targetm.gen_save_stack_block; |
1071 | break; |
1072 | case SAVE_FUNCTION: |
1073 | if (targetm.have_save_stack_function ()) |
1074 | fcn = targetm.gen_save_stack_function; |
1075 | break; |
1076 | case SAVE_NONLOCAL: |
1077 | if (targetm.have_save_stack_nonlocal ()) |
1078 | fcn = targetm.gen_save_stack_nonlocal; |
1079 | break; |
1080 | default: |
1081 | break; |
1082 | } |
1083 | |
1084 | /* If there is no save area and we have to allocate one, do so. Otherwise |
1085 | verify the save area is the proper mode. */ |
1086 | |
1087 | if (sa == 0) |
1088 | { |
1089 | if (mode != VOIDmode) |
1090 | { |
1091 | if (save_level == SAVE_NONLOCAL) |
1092 | *psave = sa = assign_stack_local (mode, GET_MODE_SIZE (mode), 0); |
1093 | else |
1094 | *psave = sa = gen_reg_rtx (mode); |
1095 | } |
1096 | } |
1097 | |
1098 | do_pending_stack_adjust (); |
1099 | if (sa != 0) |
1100 | sa = validize_mem (ref: sa); |
1101 | emit_insn (fcn (sa, stack_pointer_rtx)); |
1102 | } |
1103 | |
1104 | /* Restore the stack pointer for the purpose in SAVE_LEVEL. SA is the save |
1105 | area made by emit_stack_save. If it is zero, we have nothing to do. */ |
1106 | |
1107 | void |
1108 | emit_stack_restore (enum save_level save_level, rtx sa) |
1109 | { |
1110 | /* The default is that we use a move insn. */ |
1111 | rtx_insn *(*fcn) (rtx, rtx) = gen_move_insn; |
1112 | |
1113 | /* If stack_realign_drap, the x86 backend emits a prologue that aligns both |
1114 | STACK_POINTER and HARD_FRAME_POINTER. |
1115 | If stack_realign_fp, the x86 backend emits a prologue that aligns only |
1116 | STACK_POINTER. This renders the HARD_FRAME_POINTER unusable for accessing |
1117 | aligned variables, which is reflected in ix86_can_eliminate. |
1118 | We normally still have the realigned STACK_POINTER that we can use. |
1119 | But if there is a stack restore still present at reload, it can trigger |
1120 | mark_not_eliminable for the STACK_POINTER, leaving no way to eliminate |
1121 | FRAME_POINTER into a hard reg. |
1122 | To prevent this situation, we force need_drap if we emit a stack |
1123 | restore. */ |
1124 | if (SUPPORTS_STACK_ALIGNMENT) |
1125 | crtl->need_drap = true; |
1126 | |
1127 | /* See if this machine has anything special to do for this kind of save. */ |
1128 | switch (save_level) |
1129 | { |
1130 | case SAVE_BLOCK: |
1131 | if (targetm.have_restore_stack_block ()) |
1132 | fcn = targetm.gen_restore_stack_block; |
1133 | break; |
1134 | case SAVE_FUNCTION: |
1135 | if (targetm.have_restore_stack_function ()) |
1136 | fcn = targetm.gen_restore_stack_function; |
1137 | break; |
1138 | case SAVE_NONLOCAL: |
1139 | if (targetm.have_restore_stack_nonlocal ()) |
1140 | fcn = targetm.gen_restore_stack_nonlocal; |
1141 | break; |
1142 | default: |
1143 | break; |
1144 | } |
1145 | |
1146 | if (sa != 0) |
1147 | { |
1148 | sa = validize_mem (ref: sa); |
1149 | /* These clobbers prevent the scheduler from moving |
1150 | references to variable arrays below the code |
1151 | that deletes (pops) the arrays. */ |
1152 | emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode))); |
1153 | emit_clobber (gen_rtx_MEM (BLKmode, stack_pointer_rtx)); |
1154 | } |
1155 | |
1156 | discard_pending_stack_adjust (); |
1157 | |
1158 | emit_insn (fcn (stack_pointer_rtx, sa)); |
1159 | } |
1160 | |
1161 | /* Invoke emit_stack_save on the nonlocal_goto_save_area for the current |
1162 | function. This should be called whenever we allocate or deallocate |
1163 | dynamic stack space. */ |
1164 | |
1165 | void |
1166 | update_nonlocal_goto_save_area (void) |
1167 | { |
1168 | tree t_save; |
1169 | rtx r_save; |
1170 | |
1171 | /* The nonlocal_goto_save_area object is an array of N pointers. The |
1172 | first one is used for the frame pointer save; the rest are sized by |
1173 | STACK_SAVEAREA_MODE. Create a reference to array index 1, the first |
1174 | of the stack save area slots. */ |
1175 | t_save = build4 (ARRAY_REF, |
1176 | TREE_TYPE (TREE_TYPE (cfun->nonlocal_goto_save_area)), |
1177 | cfun->nonlocal_goto_save_area, |
1178 | integer_one_node, NULL_TREE, NULL_TREE); |
1179 | r_save = expand_expr (exp: t_save, NULL_RTX, VOIDmode, modifier: EXPAND_WRITE); |
1180 | |
1181 | emit_stack_save (save_level: SAVE_NONLOCAL, psave: &r_save); |
1182 | } |
1183 | |
1184 | /* Record a new stack level for the current function. This should be called |
1185 | whenever we allocate or deallocate dynamic stack space. */ |
1186 | |
1187 | void |
1188 | record_new_stack_level (void) |
1189 | { |
1190 | /* Record the new stack level for nonlocal gotos. */ |
1191 | if (cfun->nonlocal_goto_save_area) |
1192 | update_nonlocal_goto_save_area (); |
1193 | |
1194 | /* Record the new stack level for SJLJ exceptions. */ |
1195 | if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) |
1196 | update_sjlj_context (); |
1197 | } |
1198 | |
1199 | /* Return an rtx doing runtime alignment to REQUIRED_ALIGN on TARGET. */ |
1200 | |
1201 | rtx |
1202 | align_dynamic_address (rtx target, unsigned required_align) |
1203 | { |
1204 | if (required_align == BITS_PER_UNIT) |
1205 | return target; |
1206 | |
1207 | /* CEIL_DIV_EXPR needs to worry about the addition overflowing, |
1208 | but we know it can't. So add ourselves and then do |
1209 | TRUNC_DIV_EXPR. */ |
1210 | target = expand_binop (Pmode, add_optab, target, |
1211 | gen_int_mode (required_align / BITS_PER_UNIT - 1, |
1212 | Pmode), |
1213 | NULL_RTX, 1, OPTAB_LIB_WIDEN); |
1214 | target = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, target, |
1215 | gen_int_mode (required_align / BITS_PER_UNIT, |
1216 | Pmode), |
1217 | NULL_RTX, 1); |
1218 | target = expand_mult (Pmode, target, |
1219 | gen_int_mode (required_align / BITS_PER_UNIT, |
1220 | Pmode), |
1221 | NULL_RTX, 1); |
1222 | |
1223 | return target; |
1224 | } |
1225 | |
1226 | /* Return an rtx through *PSIZE, representing the size of an area of memory to |
1227 | be dynamically pushed on the stack. |
1228 | |
1229 | *PSIZE is an rtx representing the size of the area. |
1230 | |
1231 | SIZE_ALIGN is the alignment (in bits) that we know SIZE has. This |
1232 | parameter may be zero. If so, a proper value will be extracted |
1233 | from SIZE if it is constant, otherwise BITS_PER_UNIT will be assumed. |
1234 | |
1235 | REQUIRED_ALIGN is the alignment (in bits) required for the region |
1236 | of memory. |
1237 | |
1238 | If PSTACK_USAGE_SIZE is not NULL it points to a value that is increased for |
1239 | the additional size returned. */ |
1240 | void |
1241 | get_dynamic_stack_size (rtx *psize, unsigned size_align, |
1242 | unsigned required_align, |
1243 | HOST_WIDE_INT *pstack_usage_size) |
1244 | { |
1245 | rtx size = *psize; |
1246 | |
1247 | /* Ensure the size is in the proper mode. */ |
1248 | if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode) |
1249 | size = convert_to_mode (Pmode, size, 1); |
1250 | |
1251 | if (CONST_INT_P (size)) |
1252 | { |
1253 | unsigned HOST_WIDE_INT lsb; |
1254 | |
1255 | lsb = INTVAL (size); |
1256 | lsb &= -lsb; |
1257 | |
1258 | /* Watch out for overflow truncating to "unsigned". */ |
1259 | if (lsb > UINT_MAX / BITS_PER_UNIT) |
1260 | size_align = 1u << (HOST_BITS_PER_INT - 1); |
1261 | else |
1262 | size_align = (unsigned)lsb * BITS_PER_UNIT; |
1263 | } |
1264 | else if (size_align < BITS_PER_UNIT) |
1265 | size_align = BITS_PER_UNIT; |
1266 | |
1267 | /* We can't attempt to minimize alignment necessary, because we don't |
1268 | know the final value of preferred_stack_boundary yet while executing |
1269 | this code. */ |
1270 | if (crtl->preferred_stack_boundary < PREFERRED_STACK_BOUNDARY) |
1271 | crtl->preferred_stack_boundary = PREFERRED_STACK_BOUNDARY; |
1272 | |
1273 | /* We will need to ensure that the address we return is aligned to |
1274 | REQUIRED_ALIGN. At this point in the compilation, we don't always |
1275 | know the final value of the STACK_DYNAMIC_OFFSET used in function.cc |
1276 | (it might depend on the size of the outgoing parameter lists, for |
1277 | example), so we must preventively align the value. We leave space |
1278 | in SIZE for the hole that might result from the alignment operation. */ |
1279 | |
1280 | unsigned known_align = REGNO_POINTER_ALIGN (VIRTUAL_STACK_DYNAMIC_REGNUM); |
1281 | if (known_align == 0) |
1282 | known_align = BITS_PER_UNIT; |
1283 | if (required_align > known_align) |
1284 | { |
1285 | unsigned = (required_align - known_align) / BITS_PER_UNIT; |
1286 | size = plus_constant (Pmode, x: size, c: extra); |
1287 | size = force_operand (size, NULL_RTX); |
1288 | if (size_align > known_align) |
1289 | size_align = known_align; |
1290 | |
1291 | if (flag_stack_usage_info && pstack_usage_size) |
1292 | *pstack_usage_size += extra; |
1293 | } |
1294 | |
1295 | /* Round the size to a multiple of the required stack alignment. |
1296 | Since the stack is presumed to be rounded before this allocation, |
1297 | this will maintain the required alignment. |
1298 | |
1299 | If the stack grows downward, we could save an insn by subtracting |
1300 | SIZE from the stack pointer and then aligning the stack pointer. |
1301 | The problem with this is that the stack pointer may be unaligned |
1302 | between the execution of the subtraction and alignment insns and |
1303 | some machines do not allow this. Even on those that do, some |
1304 | signal handlers malfunction if a signal should occur between those |
1305 | insns. Since this is an extremely rare event, we have no reliable |
1306 | way of knowing which systems have this problem. So we avoid even |
1307 | momentarily mis-aligning the stack. */ |
1308 | if (size_align % MAX_SUPPORTED_STACK_ALIGNMENT != 0) |
1309 | { |
1310 | size = round_push (size); |
1311 | |
1312 | if (flag_stack_usage_info && pstack_usage_size) |
1313 | { |
1314 | int align = crtl->preferred_stack_boundary / BITS_PER_UNIT; |
1315 | *pstack_usage_size = |
1316 | (*pstack_usage_size + align - 1) / align * align; |
1317 | } |
1318 | } |
1319 | |
1320 | *psize = size; |
1321 | } |
1322 | |
1323 | /* Return the number of bytes to "protect" on the stack for -fstack-check. |
1324 | |
1325 | "protect" in the context of -fstack-check means how many bytes we need |
1326 | to always ensure are available on the stack; as a consequence, this is |
1327 | also how many bytes are first skipped when probing the stack. |
1328 | |
1329 | On some targets we want to reuse the -fstack-check prologue support |
1330 | to give a degree of protection against stack clashing style attacks. |
1331 | |
1332 | In that scenario we do not want to skip bytes before probing as that |
1333 | would render the stack clash protections useless. |
1334 | |
1335 | So we never use STACK_CHECK_PROTECT directly. Instead we indirectly |
1336 | use it through this helper, which allows to provide different values |
1337 | for -fstack-check and -fstack-clash-protection. */ |
1338 | |
1339 | HOST_WIDE_INT |
1340 | get_stack_check_protect (void) |
1341 | { |
1342 | if (flag_stack_clash_protection) |
1343 | return 0; |
1344 | |
1345 | return STACK_CHECK_PROTECT; |
1346 | } |
1347 | |
1348 | /* Return an rtx representing the address of an area of memory dynamically |
1349 | pushed on the stack. |
1350 | |
1351 | Any required stack pointer alignment is preserved. |
1352 | |
1353 | SIZE is an rtx representing the size of the area. |
1354 | |
1355 | SIZE_ALIGN is the alignment (in bits) that we know SIZE has. This |
1356 | parameter may be zero. If so, a proper value will be extracted |
1357 | from SIZE if it is constant, otherwise BITS_PER_UNIT will be assumed. |
1358 | |
1359 | REQUIRED_ALIGN is the alignment (in bits) required for the region |
1360 | of memory. |
1361 | |
1362 | MAX_SIZE is an upper bound for SIZE, if SIZE is not constant, or -1 if |
1363 | no such upper bound is known. |
1364 | |
1365 | If CANNOT_ACCUMULATE is set to TRUE, the caller guarantees that the |
1366 | stack space allocated by the generated code cannot be added with itself |
1367 | in the course of the execution of the function. It is always safe to |
1368 | pass FALSE here and the following criterion is sufficient in order to |
1369 | pass TRUE: every path in the CFG that starts at the allocation point and |
1370 | loops to it executes the associated deallocation code. */ |
1371 | |
1372 | rtx |
1373 | allocate_dynamic_stack_space (rtx size, unsigned size_align, |
1374 | unsigned required_align, |
1375 | HOST_WIDE_INT max_size, |
1376 | bool cannot_accumulate) |
1377 | { |
1378 | HOST_WIDE_INT stack_usage_size = -1; |
1379 | rtx_code_label *final_label; |
1380 | rtx final_target, target; |
1381 | rtx addr = (virtuals_instantiated |
1382 | ? plus_constant (Pmode, stack_pointer_rtx, |
1383 | c: get_stack_dynamic_offset ()) |
1384 | : virtual_stack_dynamic_rtx); |
1385 | |
1386 | /* If we're asking for zero bytes, it doesn't matter what we point |
1387 | to since we can't dereference it. But return a reasonable |
1388 | address anyway. */ |
1389 | if (size == const0_rtx) |
1390 | return addr; |
1391 | |
1392 | /* Otherwise, show we're calling alloca or equivalent. */ |
1393 | cfun->calls_alloca = 1; |
1394 | |
1395 | /* If stack usage info is requested, look into the size we are passed. |
1396 | We need to do so this early to avoid the obfuscation that may be |
1397 | introduced later by the various alignment operations. */ |
1398 | if (flag_stack_usage_info) |
1399 | { |
1400 | if (CONST_INT_P (size)) |
1401 | stack_usage_size = INTVAL (size); |
1402 | else if (REG_P (size)) |
1403 | { |
1404 | /* Look into the last emitted insn and see if we can deduce |
1405 | something for the register. */ |
1406 | rtx_insn *insn; |
1407 | rtx set, note; |
1408 | insn = get_last_insn (); |
1409 | if ((set = single_set (insn)) && rtx_equal_p (SET_DEST (set), size)) |
1410 | { |
1411 | if (CONST_INT_P (SET_SRC (set))) |
1412 | stack_usage_size = INTVAL (SET_SRC (set)); |
1413 | else if ((note = find_reg_equal_equiv_note (insn)) |
1414 | && CONST_INT_P (XEXP (note, 0))) |
1415 | stack_usage_size = INTVAL (XEXP (note, 0)); |
1416 | } |
1417 | } |
1418 | |
1419 | /* If the size is not constant, try the maximum size. */ |
1420 | if (stack_usage_size < 0) |
1421 | stack_usage_size = max_size; |
1422 | |
1423 | /* If the size is still not constant, we can't say anything. */ |
1424 | if (stack_usage_size < 0) |
1425 | { |
1426 | current_function_has_unbounded_dynamic_stack_size = 1; |
1427 | stack_usage_size = 0; |
1428 | } |
1429 | } |
1430 | |
1431 | get_dynamic_stack_size (psize: &size, size_align, required_align, pstack_usage_size: &stack_usage_size); |
1432 | |
1433 | target = gen_reg_rtx (Pmode); |
1434 | |
1435 | /* The size is supposed to be fully adjusted at this point so record it |
1436 | if stack usage info is requested. */ |
1437 | if (flag_stack_usage_info) |
1438 | { |
1439 | current_function_dynamic_stack_size += stack_usage_size; |
1440 | |
1441 | /* ??? This is gross but the only safe stance in the absence |
1442 | of stack usage oriented flow analysis. */ |
1443 | if (!cannot_accumulate) |
1444 | current_function_has_unbounded_dynamic_stack_size = 1; |
1445 | } |
1446 | |
1447 | do_pending_stack_adjust (); |
1448 | |
1449 | final_label = NULL; |
1450 | final_target = NULL_RTX; |
1451 | |
1452 | /* If we are splitting the stack, we need to ask the backend whether |
1453 | there is enough room on the current stack. If there isn't, or if |
1454 | the backend doesn't know how to tell is, then we need to call a |
1455 | function to allocate memory in some other way. This memory will |
1456 | be released when we release the current stack segment. The |
1457 | effect is that stack allocation becomes less efficient, but at |
1458 | least it doesn't cause a stack overflow. */ |
1459 | if (flag_split_stack) |
1460 | { |
1461 | rtx_code_label *available_label; |
1462 | rtx ask, space, func; |
1463 | |
1464 | available_label = NULL; |
1465 | |
1466 | if (targetm.have_split_stack_space_check ()) |
1467 | { |
1468 | available_label = gen_label_rtx (); |
1469 | |
1470 | /* This instruction will branch to AVAILABLE_LABEL if there |
1471 | are SIZE bytes available on the stack. */ |
1472 | emit_insn (targetm.gen_split_stack_space_check |
1473 | (size, available_label)); |
1474 | } |
1475 | |
1476 | /* The __morestack_allocate_stack_space function will allocate |
1477 | memory using malloc. If the alignment of the memory returned |
1478 | by malloc does not meet REQUIRED_ALIGN, we increase SIZE to |
1479 | make sure we allocate enough space. */ |
1480 | if (MALLOC_ABI_ALIGNMENT >= required_align) |
1481 | ask = size; |
1482 | else |
1483 | ask = expand_binop (Pmode, add_optab, size, |
1484 | gen_int_mode (required_align / BITS_PER_UNIT - 1, |
1485 | Pmode), |
1486 | NULL_RTX, 1, OPTAB_LIB_WIDEN); |
1487 | |
1488 | func = init_one_libfunc ("__morestack_allocate_stack_space" ); |
1489 | |
1490 | space = emit_library_call_value (fun: func, value: target, fn_type: LCT_NORMAL, Pmode, |
1491 | arg1: ask, Pmode); |
1492 | |
1493 | if (available_label == NULL_RTX) |
1494 | return space; |
1495 | |
1496 | final_target = gen_reg_rtx (Pmode); |
1497 | |
1498 | emit_move_insn (final_target, space); |
1499 | |
1500 | final_label = gen_label_rtx (); |
1501 | emit_jump (final_label); |
1502 | |
1503 | emit_label (available_label); |
1504 | } |
1505 | |
1506 | /* We ought to be called always on the toplevel and stack ought to be aligned |
1507 | properly. */ |
1508 | gcc_assert (multiple_p (stack_pointer_delta, |
1509 | PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)); |
1510 | |
1511 | /* If needed, check that we have the required amount of stack. Take into |
1512 | account what has already been checked. */ |
1513 | if (STACK_CHECK_MOVING_SP) |
1514 | ; |
1515 | else if (flag_stack_check == GENERIC_STACK_CHECK) |
1516 | probe_stack_range (STACK_OLD_CHECK_PROTECT + STACK_CHECK_MAX_FRAME_SIZE, |
1517 | size); |
1518 | else if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK) |
1519 | probe_stack_range (get_stack_check_protect (), size); |
1520 | |
1521 | /* Don't let anti_adjust_stack emit notes. */ |
1522 | suppress_reg_args_size = true; |
1523 | |
1524 | /* Perform the required allocation from the stack. Some systems do |
1525 | this differently than simply incrementing/decrementing from the |
1526 | stack pointer, such as acquiring the space by calling malloc(). */ |
1527 | if (targetm.have_allocate_stack ()) |
1528 | { |
1529 | class expand_operand ops[2]; |
1530 | /* We don't have to check against the predicate for operand 0 since |
1531 | TARGET is known to be a pseudo of the proper mode, which must |
1532 | be valid for the operand. */ |
1533 | create_fixed_operand (op: &ops[0], x: target); |
1534 | create_convert_operand_to (op: &ops[1], value: size, STACK_SIZE_MODE, unsigned_p: true); |
1535 | expand_insn (icode: targetm.code_for_allocate_stack, nops: 2, ops); |
1536 | } |
1537 | else |
1538 | { |
1539 | poly_int64 saved_stack_pointer_delta; |
1540 | |
1541 | if (!STACK_GROWS_DOWNWARD) |
1542 | emit_move_insn (target, force_operand (addr, target)); |
1543 | |
1544 | /* Check stack bounds if necessary. */ |
1545 | if (crtl->limit_stack) |
1546 | { |
1547 | rtx available; |
1548 | rtx_code_label *space_available = gen_label_rtx (); |
1549 | if (STACK_GROWS_DOWNWARD) |
1550 | available = expand_binop (Pmode, sub_optab, |
1551 | stack_pointer_rtx, stack_limit_rtx, |
1552 | NULL_RTX, 1, OPTAB_WIDEN); |
1553 | else |
1554 | available = expand_binop (Pmode, sub_optab, |
1555 | stack_limit_rtx, stack_pointer_rtx, |
1556 | NULL_RTX, 1, OPTAB_WIDEN); |
1557 | |
1558 | emit_cmp_and_jump_insns (available, size, GEU, NULL_RTX, Pmode, 1, |
1559 | space_available); |
1560 | if (targetm.have_trap ()) |
1561 | emit_insn (targetm.gen_trap ()); |
1562 | else |
1563 | error ("stack limits not supported on this target" ); |
1564 | emit_barrier (); |
1565 | emit_label (space_available); |
1566 | } |
1567 | |
1568 | saved_stack_pointer_delta = stack_pointer_delta; |
1569 | |
1570 | /* If stack checking or stack clash protection is requested, |
1571 | then probe the stack while allocating space from it. */ |
1572 | if (flag_stack_check && STACK_CHECK_MOVING_SP) |
1573 | anti_adjust_stack_and_probe (size, false); |
1574 | else if (flag_stack_clash_protection) |
1575 | anti_adjust_stack_and_probe_stack_clash (size); |
1576 | else |
1577 | anti_adjust_stack (adjust: size); |
1578 | |
1579 | /* Even if size is constant, don't modify stack_pointer_delta. |
1580 | The constant size alloca should preserve |
1581 | crtl->preferred_stack_boundary alignment. */ |
1582 | stack_pointer_delta = saved_stack_pointer_delta; |
1583 | |
1584 | if (STACK_GROWS_DOWNWARD) |
1585 | emit_move_insn (target, force_operand (addr, target)); |
1586 | } |
1587 | |
1588 | suppress_reg_args_size = false; |
1589 | |
1590 | /* Finish up the split stack handling. */ |
1591 | if (final_label != NULL_RTX) |
1592 | { |
1593 | gcc_assert (flag_split_stack); |
1594 | emit_move_insn (final_target, target); |
1595 | emit_label (final_label); |
1596 | target = final_target; |
1597 | } |
1598 | |
1599 | target = align_dynamic_address (target, required_align); |
1600 | |
1601 | /* Now that we've committed to a return value, mark its alignment. */ |
1602 | mark_reg_pointer (target, required_align); |
1603 | |
1604 | /* Record the new stack level. */ |
1605 | record_new_stack_level (); |
1606 | |
1607 | return target; |
1608 | } |
1609 | |
1610 | /* Return an rtx representing the address of an area of memory already |
1611 | statically pushed onto the stack in the virtual stack vars area. (It is |
1612 | assumed that the area is allocated in the function prologue.) |
1613 | |
1614 | Any required stack pointer alignment is preserved. |
1615 | |
1616 | OFFSET is the offset of the area into the virtual stack vars area. |
1617 | |
1618 | REQUIRED_ALIGN is the alignment (in bits) required for the region |
1619 | of memory. |
1620 | |
1621 | BASE is the rtx of the base of this virtual stack vars area. |
1622 | The only time this is not `virtual_stack_vars_rtx` is when tagging pointers |
1623 | on the stack. */ |
1624 | |
1625 | rtx |
1626 | get_dynamic_stack_base (poly_int64 offset, unsigned required_align, rtx base) |
1627 | { |
1628 | rtx target; |
1629 | |
1630 | if (crtl->preferred_stack_boundary < PREFERRED_STACK_BOUNDARY) |
1631 | crtl->preferred_stack_boundary = PREFERRED_STACK_BOUNDARY; |
1632 | |
1633 | target = gen_reg_rtx (Pmode); |
1634 | emit_move_insn (target, base); |
1635 | target = expand_binop (Pmode, add_optab, target, |
1636 | gen_int_mode (offset, Pmode), |
1637 | NULL_RTX, 1, OPTAB_LIB_WIDEN); |
1638 | target = align_dynamic_address (target, required_align); |
1639 | |
1640 | /* Now that we've committed to a return value, mark its alignment. */ |
1641 | mark_reg_pointer (target, required_align); |
1642 | |
1643 | return target; |
1644 | } |
1645 | |
1646 | /* A front end may want to override GCC's stack checking by providing a |
1647 | run-time routine to call to check the stack, so provide a mechanism for |
1648 | calling that routine. */ |
1649 | |
1650 | static GTY(()) rtx stack_check_libfunc; |
1651 | |
1652 | void |
1653 | set_stack_check_libfunc (const char *libfunc_name) |
1654 | { |
1655 | gcc_assert (stack_check_libfunc == NULL_RTX); |
1656 | stack_check_libfunc = gen_rtx_SYMBOL_REF (Pmode, libfunc_name); |
1657 | tree ptype |
1658 | = Pmode == ptr_mode |
1659 | ? ptr_type_node |
1660 | : lang_hooks.types.type_for_mode (Pmode, 1); |
1661 | tree ftype |
1662 | = build_function_type_list (void_type_node, ptype, NULL_TREE); |
1663 | tree decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL, |
1664 | get_identifier (libfunc_name), ftype); |
1665 | DECL_EXTERNAL (decl) = 1; |
1666 | SET_SYMBOL_REF_DECL (stack_check_libfunc, decl); |
1667 | } |
1668 | |
1669 | /* Emit one stack probe at ADDRESS, an address within the stack. */ |
1670 | |
1671 | void |
1672 | emit_stack_probe (rtx address) |
1673 | { |
1674 | if (targetm.have_probe_stack_address ()) |
1675 | { |
1676 | class expand_operand ops[1]; |
1677 | insn_code icode = targetm.code_for_probe_stack_address; |
1678 | create_address_operand (op: ops, value: address); |
1679 | maybe_legitimize_operands (icode, opno: 0, nops: 1, ops); |
1680 | expand_insn (icode, nops: 1, ops); |
1681 | } |
1682 | else |
1683 | { |
1684 | rtx memref = gen_rtx_MEM (word_mode, address); |
1685 | |
1686 | MEM_VOLATILE_P (memref) = 1; |
1687 | memref = validize_mem (ref: memref); |
1688 | |
1689 | /* See if we have an insn to probe the stack. */ |
1690 | if (targetm.have_probe_stack ()) |
1691 | emit_insn (targetm.gen_probe_stack (memref)); |
1692 | else |
1693 | emit_move_insn (memref, const0_rtx); |
1694 | } |
1695 | } |
1696 | |
1697 | /* Probe a range of stack addresses from FIRST to FIRST+SIZE, inclusive. |
1698 | FIRST is a constant and size is a Pmode RTX. These are offsets from |
1699 | the current stack pointer. STACK_GROWS_DOWNWARD says whether to add |
1700 | or subtract them from the stack pointer. */ |
1701 | |
1702 | #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP) |
1703 | |
1704 | #if STACK_GROWS_DOWNWARD |
1705 | #define STACK_GROW_OP MINUS |
1706 | #define STACK_GROW_OPTAB sub_optab |
1707 | #define STACK_GROW_OFF(off) -(off) |
1708 | #else |
1709 | #define STACK_GROW_OP PLUS |
1710 | #define STACK_GROW_OPTAB add_optab |
1711 | #define STACK_GROW_OFF(off) (off) |
1712 | #endif |
1713 | |
1714 | void |
1715 | probe_stack_range (HOST_WIDE_INT first, rtx size) |
1716 | { |
1717 | /* First ensure SIZE is Pmode. */ |
1718 | if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode) |
1719 | size = convert_to_mode (Pmode, size, 1); |
1720 | |
1721 | /* Next see if we have a function to check the stack. */ |
1722 | if (stack_check_libfunc) |
1723 | { |
1724 | rtx addr = memory_address (Pmode, |
1725 | gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, |
1726 | stack_pointer_rtx, |
1727 | plus_constant (Pmode, |
1728 | size, first))); |
1729 | emit_library_call (fun: stack_check_libfunc, fn_type: LCT_THROW, VOIDmode, |
1730 | arg1: addr, Pmode); |
1731 | } |
1732 | |
1733 | /* Next see if we have an insn to check the stack. */ |
1734 | else if (targetm.have_check_stack ()) |
1735 | { |
1736 | class expand_operand ops[1]; |
1737 | rtx addr = memory_address (Pmode, |
1738 | gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, |
1739 | stack_pointer_rtx, |
1740 | plus_constant (Pmode, |
1741 | size, first))); |
1742 | bool success; |
1743 | create_input_operand (op: &ops[0], value: addr, Pmode); |
1744 | success = maybe_expand_insn (icode: targetm.code_for_check_stack, nops: 1, ops); |
1745 | gcc_assert (success); |
1746 | } |
1747 | |
1748 | /* Otherwise we have to generate explicit probes. If we have a constant |
1749 | small number of them to generate, that's the easy case. */ |
1750 | else if (CONST_INT_P (size) && INTVAL (size) < 7 * PROBE_INTERVAL) |
1751 | { |
1752 | HOST_WIDE_INT isize = INTVAL (size), i; |
1753 | rtx addr; |
1754 | |
1755 | /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until |
1756 | it exceeds SIZE. If only one probe is needed, this will not |
1757 | generate any code. Then probe at FIRST + SIZE. */ |
1758 | for (i = PROBE_INTERVAL; i < isize; i += PROBE_INTERVAL) |
1759 | { |
1760 | addr = memory_address (Pmode, |
1761 | plus_constant (Pmode, stack_pointer_rtx, |
1762 | STACK_GROW_OFF (first + i))); |
1763 | emit_stack_probe (address: addr); |
1764 | } |
1765 | |
1766 | addr = memory_address (Pmode, |
1767 | plus_constant (Pmode, stack_pointer_rtx, |
1768 | STACK_GROW_OFF (first + isize))); |
1769 | emit_stack_probe (address: addr); |
1770 | } |
1771 | |
1772 | /* In the variable case, do the same as above, but in a loop. Note that we |
1773 | must be extra careful with variables wrapping around because we might be |
1774 | at the very top (or the very bottom) of the address space and we have to |
1775 | be able to handle this case properly; in particular, we use an equality |
1776 | test for the loop condition. */ |
1777 | else |
1778 | { |
1779 | rtx rounded_size, rounded_size_op, test_addr, last_addr, temp; |
1780 | rtx_code_label *loop_lab = gen_label_rtx (); |
1781 | rtx_code_label *end_lab = gen_label_rtx (); |
1782 | |
1783 | /* Step 1: round SIZE to the previous multiple of the interval. */ |
1784 | |
1785 | /* ROUNDED_SIZE = SIZE & -PROBE_INTERVAL */ |
1786 | rounded_size |
1787 | = simplify_gen_binary (code: AND, Pmode, op0: size, |
1788 | op1: gen_int_mode (-PROBE_INTERVAL, Pmode)); |
1789 | rounded_size_op = force_operand (rounded_size, NULL_RTX); |
1790 | |
1791 | |
1792 | /* Step 2: compute initial and final value of the loop counter. */ |
1793 | |
1794 | /* TEST_ADDR = SP + FIRST. */ |
1795 | test_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, |
1796 | stack_pointer_rtx, |
1797 | gen_int_mode (first, Pmode)), |
1798 | NULL_RTX); |
1799 | |
1800 | /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */ |
1801 | last_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, |
1802 | test_addr, |
1803 | rounded_size_op), NULL_RTX); |
1804 | |
1805 | |
1806 | /* Step 3: the loop |
1807 | |
1808 | while (TEST_ADDR != LAST_ADDR) |
1809 | { |
1810 | TEST_ADDR = TEST_ADDR + PROBE_INTERVAL |
1811 | probe at TEST_ADDR |
1812 | } |
1813 | |
1814 | probes at FIRST + N * PROBE_INTERVAL for values of N from 1 |
1815 | until it is equal to ROUNDED_SIZE. */ |
1816 | |
1817 | emit_label (loop_lab); |
1818 | |
1819 | /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */ |
1820 | emit_cmp_and_jump_insns (test_addr, last_addr, EQ, NULL_RTX, Pmode, 1, |
1821 | end_lab); |
1822 | |
1823 | /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */ |
1824 | temp = expand_binop (Pmode, STACK_GROW_OPTAB, test_addr, |
1825 | gen_int_mode (PROBE_INTERVAL, Pmode), test_addr, |
1826 | 1, OPTAB_WIDEN); |
1827 | |
1828 | /* There is no guarantee that expand_binop constructs its result |
1829 | in TEST_ADDR. So copy into TEST_ADDR if necessary. */ |
1830 | if (temp != test_addr) |
1831 | emit_move_insn (test_addr, temp); |
1832 | |
1833 | /* Probe at TEST_ADDR. */ |
1834 | emit_stack_probe (address: test_addr); |
1835 | |
1836 | emit_jump (loop_lab); |
1837 | |
1838 | emit_label (end_lab); |
1839 | |
1840 | |
1841 | /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time |
1842 | that SIZE is equal to ROUNDED_SIZE. */ |
1843 | |
1844 | /* TEMP = SIZE - ROUNDED_SIZE. */ |
1845 | temp = simplify_gen_binary (code: MINUS, Pmode, op0: size, op1: rounded_size); |
1846 | if (temp != const0_rtx) |
1847 | { |
1848 | rtx addr; |
1849 | |
1850 | if (CONST_INT_P (temp)) |
1851 | { |
1852 | /* Use [base + disp} addressing mode if supported. */ |
1853 | HOST_WIDE_INT offset = INTVAL (temp); |
1854 | addr = memory_address (Pmode, |
1855 | plus_constant (Pmode, last_addr, |
1856 | STACK_GROW_OFF (offset))); |
1857 | } |
1858 | else |
1859 | { |
1860 | /* Manual CSE if the difference is not known at compile-time. */ |
1861 | temp = gen_rtx_MINUS (Pmode, size, rounded_size_op); |
1862 | addr = memory_address (Pmode, |
1863 | gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, |
1864 | last_addr, temp)); |
1865 | } |
1866 | |
1867 | emit_stack_probe (address: addr); |
1868 | } |
1869 | } |
1870 | |
1871 | /* Make sure nothing is scheduled before we are done. */ |
1872 | emit_insn (gen_blockage ()); |
1873 | } |
1874 | |
1875 | /* Compute parameters for stack clash probing a dynamic stack |
1876 | allocation of SIZE bytes. |
1877 | |
1878 | We compute ROUNDED_SIZE, LAST_ADDR, RESIDUAL and PROBE_INTERVAL. |
1879 | |
1880 | Additionally we conditionally dump the type of probing that will |
1881 | be needed given the values computed. */ |
1882 | |
1883 | void |
1884 | compute_stack_clash_protection_loop_data (rtx *rounded_size, rtx *last_addr, |
1885 | rtx *residual, |
1886 | HOST_WIDE_INT *probe_interval, |
1887 | rtx size) |
1888 | { |
1889 | /* Round SIZE down to STACK_CLASH_PROTECTION_PROBE_INTERVAL */ |
1890 | *probe_interval |
1891 | = 1 << param_stack_clash_protection_probe_interval; |
1892 | *rounded_size = simplify_gen_binary (code: AND, Pmode, op0: size, |
1893 | GEN_INT (-*probe_interval)); |
1894 | |
1895 | /* Compute the value of the stack pointer for the last iteration. |
1896 | It's just SP + ROUNDED_SIZE. */ |
1897 | rtx rounded_size_op = force_operand (*rounded_size, NULL_RTX); |
1898 | *last_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, |
1899 | stack_pointer_rtx, |
1900 | rounded_size_op), |
1901 | NULL_RTX); |
1902 | |
1903 | /* Compute any residuals not allocated by the loop above. Residuals |
1904 | are just the ROUNDED_SIZE - SIZE. */ |
1905 | *residual = simplify_gen_binary (code: MINUS, Pmode, op0: size, op1: *rounded_size); |
1906 | |
1907 | /* Dump key information to make writing tests easy. */ |
1908 | if (dump_file) |
1909 | { |
1910 | if (*rounded_size == CONST0_RTX (Pmode)) |
1911 | fprintf (stream: dump_file, |
1912 | format: "Stack clash skipped dynamic allocation and probing loop.\n" ); |
1913 | else if (CONST_INT_P (*rounded_size) |
1914 | && INTVAL (*rounded_size) <= 4 * *probe_interval) |
1915 | fprintf (stream: dump_file, |
1916 | format: "Stack clash dynamic allocation and probing inline.\n" ); |
1917 | else if (CONST_INT_P (*rounded_size)) |
1918 | fprintf (stream: dump_file, |
1919 | format: "Stack clash dynamic allocation and probing in " |
1920 | "rotated loop.\n" ); |
1921 | else |
1922 | fprintf (stream: dump_file, |
1923 | format: "Stack clash dynamic allocation and probing in loop.\n" ); |
1924 | |
1925 | if (*residual != CONST0_RTX (Pmode)) |
1926 | fprintf (stream: dump_file, |
1927 | format: "Stack clash dynamic allocation and probing residuals.\n" ); |
1928 | else |
1929 | fprintf (stream: dump_file, |
1930 | format: "Stack clash skipped dynamic allocation and " |
1931 | "probing residuals.\n" ); |
1932 | } |
1933 | } |
1934 | |
1935 | /* Emit the start of an allocate/probe loop for stack |
1936 | clash protection. |
1937 | |
1938 | LOOP_LAB and END_LAB are returned for use when we emit the |
1939 | end of the loop. |
1940 | |
1941 | LAST addr is the value for SP which stops the loop. */ |
1942 | void |
1943 | emit_stack_clash_protection_probe_loop_start (rtx *loop_lab, |
1944 | rtx *end_lab, |
1945 | rtx last_addr, |
1946 | bool rotated) |
1947 | { |
1948 | /* Essentially we want to emit any setup code, the top of loop |
1949 | label and the comparison at the top of the loop. */ |
1950 | *loop_lab = gen_label_rtx (); |
1951 | *end_lab = gen_label_rtx (); |
1952 | |
1953 | emit_label (*loop_lab); |
1954 | if (!rotated) |
1955 | emit_cmp_and_jump_insns (stack_pointer_rtx, last_addr, EQ, NULL_RTX, |
1956 | Pmode, 1, *end_lab); |
1957 | } |
1958 | |
1959 | /* Emit the end of a stack clash probing loop. |
1960 | |
1961 | This consists of just the jump back to LOOP_LAB and |
1962 | emitting END_LOOP after the loop. */ |
1963 | |
1964 | void |
1965 | emit_stack_clash_protection_probe_loop_end (rtx loop_lab, rtx end_loop, |
1966 | rtx last_addr, bool rotated) |
1967 | { |
1968 | if (rotated) |
1969 | emit_cmp_and_jump_insns (stack_pointer_rtx, last_addr, NE, NULL_RTX, |
1970 | Pmode, 1, loop_lab); |
1971 | else |
1972 | emit_jump (loop_lab); |
1973 | |
1974 | emit_label (end_loop); |
1975 | |
1976 | } |
1977 | |
1978 | /* Adjust the stack pointer by minus SIZE (an rtx for a number of bytes) |
1979 | while probing it. This pushes when SIZE is positive. SIZE need not |
1980 | be constant. |
1981 | |
1982 | This is subtly different than anti_adjust_stack_and_probe to try and |
1983 | prevent stack-clash attacks |
1984 | |
1985 | 1. It must assume no knowledge of the probing state, any allocation |
1986 | must probe. |
1987 | |
1988 | Consider the case of a 1 byte alloca in a loop. If the sum of the |
1989 | allocations is large, then this could be used to jump the guard if |
1990 | probes were not emitted. |
1991 | |
1992 | 2. It never skips probes, whereas anti_adjust_stack_and_probe will |
1993 | skip the probe on the first PROBE_INTERVAL on the assumption it |
1994 | was already done in the prologue and in previous allocations. |
1995 | |
1996 | 3. It only allocates and probes SIZE bytes, it does not need to |
1997 | allocate/probe beyond that because this probing style does not |
1998 | guarantee signal handling capability if the guard is hit. */ |
1999 | |
2000 | void |
2001 | anti_adjust_stack_and_probe_stack_clash (rtx size) |
2002 | { |
2003 | /* First ensure SIZE is Pmode. */ |
2004 | if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode) |
2005 | size = convert_to_mode (Pmode, size, 1); |
2006 | |
2007 | /* We can get here with a constant size on some targets. */ |
2008 | rtx rounded_size, last_addr, residual; |
2009 | HOST_WIDE_INT probe_interval, probe_range; |
2010 | bool target_probe_range_p = false; |
2011 | compute_stack_clash_protection_loop_data (rounded_size: &rounded_size, last_addr: &last_addr, |
2012 | residual: &residual, probe_interval: &probe_interval, size); |
2013 | |
2014 | /* Get the back-end specific probe ranges. */ |
2015 | probe_range = targetm.stack_clash_protection_alloca_probe_range (); |
2016 | target_probe_range_p = probe_range != 0; |
2017 | gcc_assert (probe_range >= 0); |
2018 | |
2019 | /* If no back-end specific range defined, default to the top of the newly |
2020 | allocated range. */ |
2021 | if (probe_range == 0) |
2022 | probe_range = probe_interval - GET_MODE_SIZE (mode: word_mode); |
2023 | |
2024 | if (rounded_size != CONST0_RTX (Pmode)) |
2025 | { |
2026 | if (CONST_INT_P (rounded_size) |
2027 | && INTVAL (rounded_size) <= 4 * probe_interval) |
2028 | { |
2029 | for (HOST_WIDE_INT i = 0; |
2030 | i < INTVAL (rounded_size); |
2031 | i += probe_interval) |
2032 | { |
2033 | anti_adjust_stack (GEN_INT (probe_interval)); |
2034 | /* The prologue does not probe residuals. Thus the offset |
2035 | here to probe just beyond what the prologue had already |
2036 | allocated. */ |
2037 | emit_stack_probe (address: plus_constant (Pmode, stack_pointer_rtx, |
2038 | c: probe_range)); |
2039 | |
2040 | emit_insn (gen_blockage ()); |
2041 | } |
2042 | } |
2043 | else |
2044 | { |
2045 | rtx loop_lab, end_loop; |
2046 | bool rotate_loop = CONST_INT_P (rounded_size); |
2047 | emit_stack_clash_protection_probe_loop_start (loop_lab: &loop_lab, end_lab: &end_loop, |
2048 | last_addr, rotated: rotate_loop); |
2049 | |
2050 | anti_adjust_stack (GEN_INT (probe_interval)); |
2051 | |
2052 | /* The prologue does not probe residuals. Thus the offset here |
2053 | to probe just beyond what the prologue had already |
2054 | allocated. */ |
2055 | emit_stack_probe (address: plus_constant (Pmode, stack_pointer_rtx, |
2056 | c: probe_range)); |
2057 | |
2058 | emit_stack_clash_protection_probe_loop_end (loop_lab, end_loop, |
2059 | last_addr, rotated: rotate_loop); |
2060 | emit_insn (gen_blockage ()); |
2061 | } |
2062 | } |
2063 | |
2064 | if (residual != CONST0_RTX (Pmode)) |
2065 | { |
2066 | rtx label = NULL_RTX; |
2067 | /* RESIDUAL could be zero at runtime and in that case *sp could |
2068 | hold live data. Furthermore, we do not want to probe into the |
2069 | red zone. |
2070 | |
2071 | If TARGET_PROBE_RANGE_P then the target has promised it's safe to |
2072 | probe at offset 0. In which case we no longer have to check for |
2073 | RESIDUAL == 0. However we still need to probe at the right offset |
2074 | when RESIDUAL > PROBE_RANGE, in which case we probe at PROBE_RANGE. |
2075 | |
2076 | If !TARGET_PROBE_RANGE_P then go ahead and just guard the probe at *sp |
2077 | on RESIDUAL != 0 at runtime if RESIDUAL is not a compile time constant. |
2078 | */ |
2079 | anti_adjust_stack (adjust: residual); |
2080 | |
2081 | if (!CONST_INT_P (residual)) |
2082 | { |
2083 | label = gen_label_rtx (); |
2084 | rtx_code op = target_probe_range_p ? LT : EQ; |
2085 | rtx probe_cmp_value = target_probe_range_p |
2086 | ? gen_rtx_CONST_INT (GET_MODE (residual), probe_range) |
2087 | : CONST0_RTX (GET_MODE (residual)); |
2088 | |
2089 | if (target_probe_range_p) |
2090 | emit_stack_probe (stack_pointer_rtx); |
2091 | |
2092 | emit_cmp_and_jump_insns (residual, probe_cmp_value, |
2093 | op, NULL_RTX, Pmode, 1, label); |
2094 | } |
2095 | |
2096 | rtx x = NULL_RTX; |
2097 | |
2098 | /* If RESIDUAL isn't a constant and TARGET_PROBE_RANGE_P then we probe up |
2099 | by the ABI defined safe value. */ |
2100 | if (!CONST_INT_P (residual) && target_probe_range_p) |
2101 | x = GEN_INT (probe_range); |
2102 | /* If RESIDUAL is a constant but smaller than the ABI defined safe value, |
2103 | we still want to probe up, but the safest amount if a word. */ |
2104 | else if (target_probe_range_p) |
2105 | { |
2106 | if (INTVAL (residual) <= probe_range) |
2107 | x = GEN_INT (GET_MODE_SIZE (word_mode)); |
2108 | else |
2109 | x = GEN_INT (probe_range); |
2110 | } |
2111 | else |
2112 | /* If nothing else, probe at the top of the new allocation. */ |
2113 | x = plus_constant (Pmode, x: residual, c: -GET_MODE_SIZE (mode: word_mode)); |
2114 | |
2115 | emit_stack_probe (gen_rtx_PLUS (Pmode, stack_pointer_rtx, x)); |
2116 | |
2117 | emit_insn (gen_blockage ()); |
2118 | if (!CONST_INT_P (residual)) |
2119 | emit_label (label); |
2120 | } |
2121 | } |
2122 | |
2123 | |
2124 | /* Adjust the stack pointer by minus SIZE (an rtx for a number of bytes) |
2125 | while probing it. This pushes when SIZE is positive. SIZE need not |
2126 | be constant. If ADJUST_BACK is true, adjust back the stack pointer |
2127 | by plus SIZE at the end. */ |
2128 | |
2129 | void |
2130 | anti_adjust_stack_and_probe (rtx size, bool adjust_back) |
2131 | { |
2132 | /* We skip the probe for the first interval + a small dope of 4 words and |
2133 | probe that many bytes past the specified size to maintain a protection |
2134 | area at the botton of the stack. */ |
2135 | const int dope = 4 * UNITS_PER_WORD; |
2136 | |
2137 | /* First ensure SIZE is Pmode. */ |
2138 | if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode) |
2139 | size = convert_to_mode (Pmode, size, 1); |
2140 | |
2141 | /* If we have a constant small number of probes to generate, that's the |
2142 | easy case. */ |
2143 | if (CONST_INT_P (size) && INTVAL (size) < 7 * PROBE_INTERVAL) |
2144 | { |
2145 | HOST_WIDE_INT isize = INTVAL (size), i; |
2146 | bool first_probe = true; |
2147 | |
2148 | /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for |
2149 | values of N from 1 until it exceeds SIZE. If only one probe is |
2150 | needed, this will not generate any code. Then adjust and probe |
2151 | to PROBE_INTERVAL + SIZE. */ |
2152 | for (i = PROBE_INTERVAL; i < isize; i += PROBE_INTERVAL) |
2153 | { |
2154 | if (first_probe) |
2155 | { |
2156 | anti_adjust_stack (GEN_INT (2 * PROBE_INTERVAL + dope)); |
2157 | first_probe = false; |
2158 | } |
2159 | else |
2160 | anti_adjust_stack (GEN_INT (PROBE_INTERVAL)); |
2161 | emit_stack_probe (stack_pointer_rtx); |
2162 | } |
2163 | |
2164 | if (first_probe) |
2165 | anti_adjust_stack (adjust: plus_constant (Pmode, x: size, PROBE_INTERVAL + dope)); |
2166 | else |
2167 | anti_adjust_stack (adjust: plus_constant (Pmode, x: size, PROBE_INTERVAL - i)); |
2168 | emit_stack_probe (stack_pointer_rtx); |
2169 | } |
2170 | |
2171 | /* In the variable case, do the same as above, but in a loop. Note that we |
2172 | must be extra careful with variables wrapping around because we might be |
2173 | at the very top (or the very bottom) of the address space and we have to |
2174 | be able to handle this case properly; in particular, we use an equality |
2175 | test for the loop condition. */ |
2176 | else |
2177 | { |
2178 | rtx rounded_size, rounded_size_op, last_addr, temp; |
2179 | rtx_code_label *loop_lab = gen_label_rtx (); |
2180 | rtx_code_label *end_lab = gen_label_rtx (); |
2181 | |
2182 | |
2183 | /* Step 1: round SIZE to the previous multiple of the interval. */ |
2184 | |
2185 | /* ROUNDED_SIZE = SIZE & -PROBE_INTERVAL */ |
2186 | rounded_size |
2187 | = simplify_gen_binary (code: AND, Pmode, op0: size, |
2188 | op1: gen_int_mode (-PROBE_INTERVAL, Pmode)); |
2189 | rounded_size_op = force_operand (rounded_size, NULL_RTX); |
2190 | |
2191 | |
2192 | /* Step 2: compute initial and final value of the loop counter. */ |
2193 | |
2194 | /* SP = SP_0 + PROBE_INTERVAL. */ |
2195 | anti_adjust_stack (GEN_INT (PROBE_INTERVAL + dope)); |
2196 | |
2197 | /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */ |
2198 | last_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode, |
2199 | stack_pointer_rtx, |
2200 | rounded_size_op), NULL_RTX); |
2201 | |
2202 | |
2203 | /* Step 3: the loop |
2204 | |
2205 | while (SP != LAST_ADDR) |
2206 | { |
2207 | SP = SP + PROBE_INTERVAL |
2208 | probe at SP |
2209 | } |
2210 | |
2211 | adjusts SP and probes at PROBE_INTERVAL + N * PROBE_INTERVAL for |
2212 | values of N from 1 until it is equal to ROUNDED_SIZE. */ |
2213 | |
2214 | emit_label (loop_lab); |
2215 | |
2216 | /* Jump to END_LAB if SP == LAST_ADDR. */ |
2217 | emit_cmp_and_jump_insns (stack_pointer_rtx, last_addr, EQ, NULL_RTX, |
2218 | Pmode, 1, end_lab); |
2219 | |
2220 | /* SP = SP + PROBE_INTERVAL and probe at SP. */ |
2221 | anti_adjust_stack (GEN_INT (PROBE_INTERVAL)); |
2222 | emit_stack_probe (stack_pointer_rtx); |
2223 | |
2224 | emit_jump (loop_lab); |
2225 | |
2226 | emit_label (end_lab); |
2227 | |
2228 | |
2229 | /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot |
2230 | assert at compile-time that SIZE is equal to ROUNDED_SIZE. */ |
2231 | |
2232 | /* TEMP = SIZE - ROUNDED_SIZE. */ |
2233 | temp = simplify_gen_binary (code: MINUS, Pmode, op0: size, op1: rounded_size); |
2234 | if (temp != const0_rtx) |
2235 | { |
2236 | /* Manual CSE if the difference is not known at compile-time. */ |
2237 | if (GET_CODE (temp) != CONST_INT) |
2238 | temp = gen_rtx_MINUS (Pmode, size, rounded_size_op); |
2239 | anti_adjust_stack (adjust: temp); |
2240 | emit_stack_probe (stack_pointer_rtx); |
2241 | } |
2242 | } |
2243 | |
2244 | /* Adjust back and account for the additional first interval. */ |
2245 | if (adjust_back) |
2246 | adjust_stack (adjust: plus_constant (Pmode, x: size, PROBE_INTERVAL + dope)); |
2247 | else |
2248 | adjust_stack (GEN_INT (PROBE_INTERVAL + dope)); |
2249 | } |
2250 | |
2251 | /* Return an rtx representing the register or memory location |
2252 | in which a scalar value of data type VALTYPE |
2253 | was returned by a function call to function FUNC. |
2254 | FUNC is a FUNCTION_DECL, FNTYPE a FUNCTION_TYPE node if the precise |
2255 | function is known, otherwise 0. |
2256 | OUTGOING is 1 if on a machine with register windows this function |
2257 | should return the register in which the function will put its result |
2258 | and 0 otherwise. */ |
2259 | |
2260 | rtx |
2261 | hard_function_value (const_tree valtype, const_tree func, const_tree fntype, |
2262 | int outgoing ATTRIBUTE_UNUSED) |
2263 | { |
2264 | rtx val; |
2265 | |
2266 | val = targetm.calls.function_value (valtype, func ? func : fntype, outgoing); |
2267 | |
2268 | if (REG_P (val) |
2269 | && GET_MODE (val) == BLKmode) |
2270 | { |
2271 | unsigned HOST_WIDE_INT bytes = arg_int_size_in_bytes (valtype); |
2272 | opt_scalar_int_mode tmpmode; |
2273 | |
2274 | /* int_size_in_bytes can return -1. We don't need a check here |
2275 | since the value of bytes will then be large enough that no |
2276 | mode will match anyway. */ |
2277 | |
2278 | FOR_EACH_MODE_IN_CLASS (tmpmode, MODE_INT) |
2279 | { |
2280 | /* Have we found a large enough mode? */ |
2281 | if (GET_MODE_SIZE (mode: tmpmode.require ()) >= bytes) |
2282 | break; |
2283 | } |
2284 | |
2285 | PUT_MODE (x: val, mode: tmpmode.require ()); |
2286 | } |
2287 | return val; |
2288 | } |
2289 | |
2290 | /* Return an rtx representing the register or memory location |
2291 | in which a scalar value of mode MODE was returned by a library call. */ |
2292 | |
2293 | rtx |
2294 | hard_libcall_value (machine_mode mode, rtx fun) |
2295 | { |
2296 | return targetm.calls.libcall_value (mode, fun); |
2297 | } |
2298 | |
2299 | /* Look up the tree code for a given rtx code |
2300 | to provide the arithmetic operation for real_arithmetic. |
2301 | The function returns an int because the caller may not know |
2302 | what `enum tree_code' means. */ |
2303 | |
2304 | int |
2305 | rtx_to_tree_code (enum rtx_code code) |
2306 | { |
2307 | enum tree_code tcode; |
2308 | |
2309 | switch (code) |
2310 | { |
2311 | case PLUS: |
2312 | tcode = PLUS_EXPR; |
2313 | break; |
2314 | case MINUS: |
2315 | tcode = MINUS_EXPR; |
2316 | break; |
2317 | case MULT: |
2318 | tcode = MULT_EXPR; |
2319 | break; |
2320 | case DIV: |
2321 | tcode = RDIV_EXPR; |
2322 | break; |
2323 | case SMIN: |
2324 | tcode = MIN_EXPR; |
2325 | break; |
2326 | case SMAX: |
2327 | tcode = MAX_EXPR; |
2328 | break; |
2329 | default: |
2330 | tcode = LAST_AND_UNUSED_TREE_CODE; |
2331 | break; |
2332 | } |
2333 | return ((int) tcode); |
2334 | } |
2335 | |
2336 | #include "gt-explow.h" |
2337 | |