1 | /* Rtl-level induction variable analysis. |
2 | Copyright (C) 2004-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the |
8 | Free Software Foundation; either version 3, or (at your option) any |
9 | later version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | /* This is a simple analysis of induction variables of the loop. The major use |
21 | is for determining the number of iterations of a loop for loop unrolling, |
22 | doloop optimization and branch prediction. The iv information is computed |
23 | on demand. |
24 | |
25 | Induction variables are analyzed by walking the use-def chains. When |
26 | a basic induction variable (biv) is found, it is cached in the bivs |
27 | hash table. When register is proved to be a biv, its description |
28 | is stored to DF_REF_DATA of the def reference. |
29 | |
30 | The analysis works always with one loop -- you must call |
31 | iv_analysis_loop_init (loop) for it. All the other functions then work with |
32 | this loop. When you need to work with another loop, just call |
33 | iv_analysis_loop_init for it. When you no longer need iv analysis, call |
34 | iv_analysis_done () to clean up the memory. |
35 | |
36 | The available functions are: |
37 | |
38 | iv_analyze (insn, mode, reg, iv): Stores the description of the induction |
39 | variable corresponding to the use of register REG in INSN to IV, given |
40 | that REG has mode MODE. Returns true if REG is an induction variable |
41 | in INSN. false otherwise. If a use of REG is not found in INSN, |
42 | the following insns are scanned (so that we may call this function |
43 | on insns returned by get_condition). |
44 | iv_analyze_result (insn, def, iv): Stores to IV the description of the iv |
45 | corresponding to DEF, which is a register defined in INSN. |
46 | iv_analyze_expr (insn, mode, expr, iv): Stores to IV the description of iv |
47 | corresponding to expression EXPR evaluated at INSN. All registers used by |
48 | EXPR must also be used in INSN. MODE is the mode of EXPR. |
49 | */ |
50 | |
51 | #include "config.h" |
52 | #include "system.h" |
53 | #include "coretypes.h" |
54 | #include "backend.h" |
55 | #include "rtl.h" |
56 | #include "df.h" |
57 | #include "memmodel.h" |
58 | #include "emit-rtl.h" |
59 | #include "diagnostic-core.h" |
60 | #include "cfgloop.h" |
61 | #include "intl.h" |
62 | #include "dumpfile.h" |
63 | #include "rtl-iter.h" |
64 | #include "tree-ssa-loop-niter.h" |
65 | #include "regs.h" |
66 | #include "function-abi.h" |
67 | |
68 | /* Possible return values of iv_get_reaching_def. */ |
69 | |
70 | enum iv_grd_result |
71 | { |
72 | /* More than one reaching def, or reaching def that does not |
73 | dominate the use. */ |
74 | GRD_INVALID, |
75 | |
76 | /* The use is trivial invariant of the loop, i.e. is not changed |
77 | inside the loop. */ |
78 | GRD_INVARIANT, |
79 | |
80 | /* The use is reached by initial value and a value from the |
81 | previous iteration. */ |
82 | GRD_MAYBE_BIV, |
83 | |
84 | /* The use has single dominating def. */ |
85 | GRD_SINGLE_DOM |
86 | }; |
87 | |
88 | /* Information about a biv. */ |
89 | |
90 | class biv_entry |
91 | { |
92 | public: |
93 | unsigned regno; /* The register of the biv. */ |
94 | class rtx_iv iv; /* Value of the biv. */ |
95 | }; |
96 | |
97 | static bool clean_slate = true; |
98 | |
99 | static unsigned int iv_ref_table_size = 0; |
100 | |
101 | /* Table of rtx_ivs indexed by the df_ref uid field. */ |
102 | static class rtx_iv ** iv_ref_table; |
103 | |
104 | /* Induction variable stored at the reference. */ |
105 | #define DF_REF_IV(REF) iv_ref_table[DF_REF_ID (REF)] |
106 | #define DF_REF_IV_SET(REF, IV) iv_ref_table[DF_REF_ID (REF)] = (IV) |
107 | |
108 | /* The current loop. */ |
109 | |
110 | static class loop *current_loop; |
111 | |
112 | /* Hashtable helper. */ |
113 | |
114 | struct biv_entry_hasher : free_ptr_hash <biv_entry> |
115 | { |
116 | typedef rtx_def *compare_type; |
117 | static inline hashval_t hash (const biv_entry *); |
118 | static inline bool equal (const biv_entry *, const rtx_def *); |
119 | }; |
120 | |
121 | /* Returns hash value for biv B. */ |
122 | |
123 | inline hashval_t |
124 | biv_entry_hasher::hash (const biv_entry *b) |
125 | { |
126 | return b->regno; |
127 | } |
128 | |
129 | /* Compares biv B and register R. */ |
130 | |
131 | inline bool |
132 | biv_entry_hasher::equal (const biv_entry *b, const rtx_def *r) |
133 | { |
134 | return b->regno == REGNO (r); |
135 | } |
136 | |
137 | /* Bivs of the current loop. */ |
138 | |
139 | static hash_table<biv_entry_hasher> *bivs; |
140 | |
141 | static bool iv_analyze_op (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *); |
142 | |
143 | /* Return the RTX code corresponding to the IV extend code EXTEND. */ |
144 | static inline enum rtx_code |
145 | iv_extend_to_rtx_code (enum iv_extend_code extend) |
146 | { |
147 | switch (extend) |
148 | { |
149 | case IV_SIGN_EXTEND: |
150 | return SIGN_EXTEND; |
151 | case IV_ZERO_EXTEND: |
152 | return ZERO_EXTEND; |
153 | case IV_UNKNOWN_EXTEND: |
154 | return UNKNOWN; |
155 | } |
156 | gcc_unreachable (); |
157 | } |
158 | |
159 | /* Dumps information about IV to FILE. */ |
160 | |
161 | extern void dump_iv_info (FILE *, class rtx_iv *); |
162 | void |
163 | dump_iv_info (FILE *file, class rtx_iv *iv) |
164 | { |
165 | if (!iv->base) |
166 | { |
167 | fprintf (stream: file, format: "not simple" ); |
168 | return; |
169 | } |
170 | |
171 | if (iv->step == const0_rtx |
172 | && !iv->first_special) |
173 | fprintf (stream: file, format: "invariant " ); |
174 | |
175 | print_rtl (file, iv->base); |
176 | if (iv->step != const0_rtx) |
177 | { |
178 | fprintf (stream: file, format: " + " ); |
179 | print_rtl (file, iv->step); |
180 | fprintf (stream: file, format: " * iteration" ); |
181 | } |
182 | fprintf (stream: file, format: " (in %s)" , GET_MODE_NAME (iv->mode)); |
183 | |
184 | if (iv->mode != iv->extend_mode) |
185 | fprintf (stream: file, format: " %s to %s" , |
186 | rtx_name[iv_extend_to_rtx_code (extend: iv->extend)], |
187 | GET_MODE_NAME (iv->extend_mode)); |
188 | |
189 | if (iv->mult != const1_rtx) |
190 | { |
191 | fprintf (stream: file, format: " * " ); |
192 | print_rtl (file, iv->mult); |
193 | } |
194 | if (iv->delta != const0_rtx) |
195 | { |
196 | fprintf (stream: file, format: " + " ); |
197 | print_rtl (file, iv->delta); |
198 | } |
199 | if (iv->first_special) |
200 | fprintf (stream: file, format: " (first special)" ); |
201 | } |
202 | |
203 | static void |
204 | check_iv_ref_table_size (void) |
205 | { |
206 | if (iv_ref_table_size < DF_DEFS_TABLE_SIZE ()) |
207 | { |
208 | unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4); |
209 | iv_ref_table = XRESIZEVEC (class rtx_iv *, iv_ref_table, new_size); |
210 | memset (s: &iv_ref_table[iv_ref_table_size], c: 0, |
211 | n: (new_size - iv_ref_table_size) * sizeof (class rtx_iv *)); |
212 | iv_ref_table_size = new_size; |
213 | } |
214 | } |
215 | |
216 | |
217 | /* Checks whether REG is a well-behaved register. */ |
218 | |
219 | static bool |
220 | simple_reg_p (rtx reg) |
221 | { |
222 | unsigned r; |
223 | |
224 | if (GET_CODE (reg) == SUBREG) |
225 | { |
226 | if (!subreg_lowpart_p (reg)) |
227 | return false; |
228 | reg = SUBREG_REG (reg); |
229 | } |
230 | |
231 | if (!REG_P (reg)) |
232 | return false; |
233 | |
234 | r = REGNO (reg); |
235 | if (HARD_REGISTER_NUM_P (r)) |
236 | return false; |
237 | |
238 | if (GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT) |
239 | return false; |
240 | |
241 | return true; |
242 | } |
243 | |
244 | /* Clears the information about ivs stored in df. */ |
245 | |
246 | static void |
247 | clear_iv_info (void) |
248 | { |
249 | unsigned i, n_defs = DF_DEFS_TABLE_SIZE (); |
250 | class rtx_iv *iv; |
251 | |
252 | check_iv_ref_table_size (); |
253 | for (i = 0; i < n_defs; i++) |
254 | { |
255 | iv = iv_ref_table[i]; |
256 | if (iv) |
257 | { |
258 | free (ptr: iv); |
259 | iv_ref_table[i] = NULL; |
260 | } |
261 | } |
262 | |
263 | bivs->empty (); |
264 | } |
265 | |
266 | |
267 | /* Prepare the data for an induction variable analysis of a LOOP. */ |
268 | |
269 | void |
270 | iv_analysis_loop_init (class loop *loop) |
271 | { |
272 | current_loop = loop; |
273 | |
274 | /* Clear the information from the analysis of the previous loop. */ |
275 | if (clean_slate) |
276 | { |
277 | df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN); |
278 | bivs = new hash_table<biv_entry_hasher> (10); |
279 | clean_slate = false; |
280 | } |
281 | else |
282 | clear_iv_info (); |
283 | |
284 | /* Get rid of the ud chains before processing the rescans. Then add |
285 | the problem back. */ |
286 | df_remove_problem (df_chain); |
287 | df_process_deferred_rescans (); |
288 | df_set_flags (DF_RD_PRUNE_DEAD_DEFS); |
289 | df_chain_add_problem (DF_UD_CHAIN); |
290 | df_note_add_problem (); |
291 | df_analyze_loop (loop); |
292 | if (dump_file) |
293 | df_dump_region (dump_file); |
294 | |
295 | check_iv_ref_table_size (); |
296 | } |
297 | |
298 | /* Finds the definition of REG that dominates loop latch and stores |
299 | it to DEF. Returns false if there is not a single definition |
300 | dominating the latch. If REG has no definition in loop, DEF |
301 | is set to NULL and true is returned. */ |
302 | |
303 | static bool |
304 | latch_dominating_def (rtx reg, df_ref *def) |
305 | { |
306 | df_ref single_rd = NULL, adef; |
307 | unsigned regno = REGNO (reg); |
308 | class df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch); |
309 | |
310 | for (adef = DF_REG_DEF_CHAIN (regno); adef; adef = DF_REF_NEXT_REG (adef)) |
311 | { |
312 | if (!bitmap_bit_p (df->blocks_to_analyze, DF_REF_BBNO (adef)) |
313 | || !bitmap_bit_p (&bb_info->out, DF_REF_ID (adef))) |
314 | continue; |
315 | |
316 | /* More than one reaching definition. */ |
317 | if (single_rd) |
318 | return false; |
319 | |
320 | if (!just_once_each_iteration_p (current_loop, DF_REF_BB (adef))) |
321 | return false; |
322 | |
323 | single_rd = adef; |
324 | } |
325 | |
326 | *def = single_rd; |
327 | return true; |
328 | } |
329 | |
330 | /* Gets definition of REG reaching its use in INSN and stores it to DEF. */ |
331 | |
332 | static enum iv_grd_result |
333 | iv_get_reaching_def (rtx_insn *insn, rtx reg, df_ref *def) |
334 | { |
335 | df_ref use, adef; |
336 | basic_block def_bb, use_bb; |
337 | rtx_insn *def_insn; |
338 | bool dom_p; |
339 | |
340 | *def = NULL; |
341 | if (!simple_reg_p (reg)) |
342 | return GRD_INVALID; |
343 | if (GET_CODE (reg) == SUBREG) |
344 | reg = SUBREG_REG (reg); |
345 | gcc_assert (REG_P (reg)); |
346 | |
347 | use = df_find_use (insn, reg); |
348 | gcc_assert (use != NULL); |
349 | |
350 | if (!DF_REF_CHAIN (use)) |
351 | return GRD_INVARIANT; |
352 | |
353 | /* More than one reaching def. */ |
354 | if (DF_REF_CHAIN (use)->next) |
355 | return GRD_INVALID; |
356 | |
357 | adef = DF_REF_CHAIN (use)->ref; |
358 | |
359 | /* We do not handle setting only part of the register. */ |
360 | if (DF_REF_FLAGS (adef) & DF_REF_READ_WRITE) |
361 | return GRD_INVALID; |
362 | |
363 | def_insn = DF_REF_INSN (adef); |
364 | def_bb = DF_REF_BB (adef); |
365 | use_bb = BLOCK_FOR_INSN (insn); |
366 | |
367 | if (use_bb == def_bb) |
368 | dom_p = (DF_INSN_LUID (def_insn) < DF_INSN_LUID (insn)); |
369 | else |
370 | dom_p = dominated_by_p (CDI_DOMINATORS, use_bb, def_bb); |
371 | |
372 | if (dom_p) |
373 | { |
374 | *def = adef; |
375 | return GRD_SINGLE_DOM; |
376 | } |
377 | |
378 | /* The definition does not dominate the use. This is still OK if |
379 | this may be a use of a biv, i.e. if the def_bb dominates loop |
380 | latch. */ |
381 | if (just_once_each_iteration_p (current_loop, def_bb)) |
382 | return GRD_MAYBE_BIV; |
383 | |
384 | return GRD_INVALID; |
385 | } |
386 | |
387 | /* Sets IV to invariant CST in MODE. Always returns true (just for |
388 | consistency with other iv manipulation functions that may fail). */ |
389 | |
390 | static bool |
391 | iv_constant (class rtx_iv *iv, scalar_int_mode mode, rtx cst) |
392 | { |
393 | iv->mode = mode; |
394 | iv->base = cst; |
395 | iv->step = const0_rtx; |
396 | iv->first_special = false; |
397 | iv->extend = IV_UNKNOWN_EXTEND; |
398 | iv->extend_mode = iv->mode; |
399 | iv->delta = const0_rtx; |
400 | iv->mult = const1_rtx; |
401 | |
402 | return true; |
403 | } |
404 | |
405 | /* Evaluates application of subreg to MODE on IV. */ |
406 | |
407 | static bool |
408 | iv_subreg (class rtx_iv *iv, scalar_int_mode mode) |
409 | { |
410 | /* If iv is invariant, just calculate the new value. */ |
411 | if (iv->step == const0_rtx |
412 | && !iv->first_special) |
413 | { |
414 | rtx val = get_iv_value (iv, const0_rtx); |
415 | val = lowpart_subreg (outermode: mode, op: val, |
416 | innermode: iv->extend == IV_UNKNOWN_EXTEND |
417 | ? iv->mode : iv->extend_mode); |
418 | |
419 | iv->base = val; |
420 | iv->extend = IV_UNKNOWN_EXTEND; |
421 | iv->mode = iv->extend_mode = mode; |
422 | iv->delta = const0_rtx; |
423 | iv->mult = const1_rtx; |
424 | return true; |
425 | } |
426 | |
427 | if (iv->extend_mode == mode) |
428 | return true; |
429 | |
430 | if (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (mode: iv->mode)) |
431 | return false; |
432 | |
433 | iv->extend = IV_UNKNOWN_EXTEND; |
434 | iv->mode = mode; |
435 | |
436 | iv->base = simplify_gen_binary (code: PLUS, mode: iv->extend_mode, op0: iv->delta, |
437 | op1: simplify_gen_binary (code: MULT, mode: iv->extend_mode, |
438 | op0: iv->base, op1: iv->mult)); |
439 | iv->step = simplify_gen_binary (code: MULT, mode: iv->extend_mode, op0: iv->step, op1: iv->mult); |
440 | iv->mult = const1_rtx; |
441 | iv->delta = const0_rtx; |
442 | iv->first_special = false; |
443 | |
444 | return true; |
445 | } |
446 | |
447 | /* Evaluates application of EXTEND to MODE on IV. */ |
448 | |
449 | static bool |
450 | iv_extend (class rtx_iv *iv, enum iv_extend_code extend, scalar_int_mode mode) |
451 | { |
452 | /* If iv is invariant, just calculate the new value. */ |
453 | if (iv->step == const0_rtx |
454 | && !iv->first_special) |
455 | { |
456 | rtx val = get_iv_value (iv, const0_rtx); |
457 | if (iv->extend_mode != iv->mode |
458 | && iv->extend != IV_UNKNOWN_EXTEND |
459 | && iv->extend != extend) |
460 | val = lowpart_subreg (outermode: iv->mode, op: val, innermode: iv->extend_mode); |
461 | val = simplify_gen_unary (code: iv_extend_to_rtx_code (extend), mode, |
462 | op: val, |
463 | op_mode: iv->extend == extend |
464 | ? iv->extend_mode : iv->mode); |
465 | iv->base = val; |
466 | iv->extend = IV_UNKNOWN_EXTEND; |
467 | iv->mode = iv->extend_mode = mode; |
468 | iv->delta = const0_rtx; |
469 | iv->mult = const1_rtx; |
470 | return true; |
471 | } |
472 | |
473 | if (mode != iv->extend_mode) |
474 | return false; |
475 | |
476 | if (iv->extend != IV_UNKNOWN_EXTEND |
477 | && iv->extend != extend) |
478 | return false; |
479 | |
480 | iv->extend = extend; |
481 | |
482 | return true; |
483 | } |
484 | |
485 | /* Evaluates negation of IV. */ |
486 | |
487 | static bool |
488 | iv_neg (class rtx_iv *iv) |
489 | { |
490 | if (iv->extend == IV_UNKNOWN_EXTEND) |
491 | { |
492 | iv->base = simplify_gen_unary (code: NEG, mode: iv->extend_mode, |
493 | op: iv->base, op_mode: iv->extend_mode); |
494 | iv->step = simplify_gen_unary (code: NEG, mode: iv->extend_mode, |
495 | op: iv->step, op_mode: iv->extend_mode); |
496 | } |
497 | else |
498 | { |
499 | iv->delta = simplify_gen_unary (code: NEG, mode: iv->extend_mode, |
500 | op: iv->delta, op_mode: iv->extend_mode); |
501 | iv->mult = simplify_gen_unary (code: NEG, mode: iv->extend_mode, |
502 | op: iv->mult, op_mode: iv->extend_mode); |
503 | } |
504 | |
505 | return true; |
506 | } |
507 | |
508 | /* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */ |
509 | |
510 | static bool |
511 | iv_add (class rtx_iv *iv0, class rtx_iv *iv1, enum rtx_code op) |
512 | { |
513 | scalar_int_mode mode; |
514 | rtx arg; |
515 | |
516 | /* Extend the constant to extend_mode of the other operand if necessary. */ |
517 | if (iv0->extend == IV_UNKNOWN_EXTEND |
518 | && iv0->mode == iv0->extend_mode |
519 | && iv0->step == const0_rtx |
520 | && GET_MODE_SIZE (mode: iv0->extend_mode) < GET_MODE_SIZE (mode: iv1->extend_mode)) |
521 | { |
522 | iv0->extend_mode = iv1->extend_mode; |
523 | iv0->base = simplify_gen_unary (code: ZERO_EXTEND, mode: iv0->extend_mode, |
524 | op: iv0->base, op_mode: iv0->mode); |
525 | } |
526 | if (iv1->extend == IV_UNKNOWN_EXTEND |
527 | && iv1->mode == iv1->extend_mode |
528 | && iv1->step == const0_rtx |
529 | && GET_MODE_SIZE (mode: iv1->extend_mode) < GET_MODE_SIZE (mode: iv0->extend_mode)) |
530 | { |
531 | iv1->extend_mode = iv0->extend_mode; |
532 | iv1->base = simplify_gen_unary (code: ZERO_EXTEND, mode: iv1->extend_mode, |
533 | op: iv1->base, op_mode: iv1->mode); |
534 | } |
535 | |
536 | mode = iv0->extend_mode; |
537 | if (mode != iv1->extend_mode) |
538 | return false; |
539 | |
540 | if (iv0->extend == IV_UNKNOWN_EXTEND |
541 | && iv1->extend == IV_UNKNOWN_EXTEND) |
542 | { |
543 | if (iv0->mode != iv1->mode) |
544 | return false; |
545 | |
546 | iv0->base = simplify_gen_binary (code: op, mode, op0: iv0->base, op1: iv1->base); |
547 | iv0->step = simplify_gen_binary (code: op, mode, op0: iv0->step, op1: iv1->step); |
548 | |
549 | return true; |
550 | } |
551 | |
552 | /* Handle addition of constant. */ |
553 | if (iv1->extend == IV_UNKNOWN_EXTEND |
554 | && iv1->mode == mode |
555 | && iv1->step == const0_rtx) |
556 | { |
557 | iv0->delta = simplify_gen_binary (code: op, mode, op0: iv0->delta, op1: iv1->base); |
558 | return true; |
559 | } |
560 | |
561 | if (iv0->extend == IV_UNKNOWN_EXTEND |
562 | && iv0->mode == mode |
563 | && iv0->step == const0_rtx) |
564 | { |
565 | arg = iv0->base; |
566 | *iv0 = *iv1; |
567 | if (op == MINUS |
568 | && !iv_neg (iv: iv0)) |
569 | return false; |
570 | |
571 | iv0->delta = simplify_gen_binary (code: PLUS, mode, op0: iv0->delta, op1: arg); |
572 | return true; |
573 | } |
574 | |
575 | return false; |
576 | } |
577 | |
578 | /* Evaluates multiplication of IV by constant CST. */ |
579 | |
580 | static bool |
581 | iv_mult (class rtx_iv *iv, rtx mby) |
582 | { |
583 | scalar_int_mode mode = iv->extend_mode; |
584 | |
585 | if (GET_MODE (mby) != VOIDmode |
586 | && GET_MODE (mby) != mode) |
587 | return false; |
588 | |
589 | if (iv->extend == IV_UNKNOWN_EXTEND) |
590 | { |
591 | iv->base = simplify_gen_binary (code: MULT, mode, op0: iv->base, op1: mby); |
592 | iv->step = simplify_gen_binary (code: MULT, mode, op0: iv->step, op1: mby); |
593 | } |
594 | else |
595 | { |
596 | iv->delta = simplify_gen_binary (code: MULT, mode, op0: iv->delta, op1: mby); |
597 | iv->mult = simplify_gen_binary (code: MULT, mode, op0: iv->mult, op1: mby); |
598 | } |
599 | |
600 | return true; |
601 | } |
602 | |
603 | /* Evaluates shift of IV by constant CST. */ |
604 | |
605 | static bool |
606 | iv_shift (class rtx_iv *iv, rtx mby) |
607 | { |
608 | scalar_int_mode mode = iv->extend_mode; |
609 | |
610 | if (GET_MODE (mby) != VOIDmode |
611 | && GET_MODE (mby) != mode) |
612 | return false; |
613 | |
614 | if (iv->extend == IV_UNKNOWN_EXTEND) |
615 | { |
616 | iv->base = simplify_gen_binary (code: ASHIFT, mode, op0: iv->base, op1: mby); |
617 | iv->step = simplify_gen_binary (code: ASHIFT, mode, op0: iv->step, op1: mby); |
618 | } |
619 | else |
620 | { |
621 | iv->delta = simplify_gen_binary (code: ASHIFT, mode, op0: iv->delta, op1: mby); |
622 | iv->mult = simplify_gen_binary (code: ASHIFT, mode, op0: iv->mult, op1: mby); |
623 | } |
624 | |
625 | return true; |
626 | } |
627 | |
628 | /* The recursive part of get_biv_step. Gets the value of the single value |
629 | defined by DEF wrto initial value of REG inside loop, in shape described |
630 | at get_biv_step. */ |
631 | |
632 | static bool |
633 | get_biv_step_1 (df_ref def, scalar_int_mode outer_mode, rtx reg, |
634 | rtx *inner_step, scalar_int_mode *inner_mode, |
635 | enum iv_extend_code *extend, |
636 | rtx *outer_step) |
637 | { |
638 | rtx set, rhs, op0 = NULL_RTX, op1 = NULL_RTX; |
639 | rtx next, nextr; |
640 | enum rtx_code code, prev_code = UNKNOWN; |
641 | rtx_insn *insn = DF_REF_INSN (def); |
642 | df_ref next_def; |
643 | enum iv_grd_result res; |
644 | |
645 | set = single_set (insn); |
646 | if (!set) |
647 | return false; |
648 | |
649 | rhs = find_reg_equal_equiv_note (insn); |
650 | if (rhs) |
651 | rhs = XEXP (rhs, 0); |
652 | else |
653 | rhs = SET_SRC (set); |
654 | |
655 | code = GET_CODE (rhs); |
656 | switch (code) |
657 | { |
658 | case SUBREG: |
659 | case REG: |
660 | next = rhs; |
661 | break; |
662 | |
663 | case PLUS: |
664 | case MINUS: |
665 | op0 = XEXP (rhs, 0); |
666 | op1 = XEXP (rhs, 1); |
667 | |
668 | if (code == PLUS && CONSTANT_P (op0)) |
669 | std::swap (a&: op0, b&: op1); |
670 | |
671 | if (!simple_reg_p (reg: op0) |
672 | || !CONSTANT_P (op1)) |
673 | return false; |
674 | |
675 | if (GET_MODE (rhs) != outer_mode) |
676 | { |
677 | /* ppc64 uses expressions like |
678 | |
679 | (set x:SI (plus:SI (subreg:SI y:DI) 1)). |
680 | |
681 | this is equivalent to |
682 | |
683 | (set x':DI (plus:DI y:DI 1)) |
684 | (set x:SI (subreg:SI (x':DI)). */ |
685 | if (GET_CODE (op0) != SUBREG) |
686 | return false; |
687 | if (GET_MODE (SUBREG_REG (op0)) != outer_mode) |
688 | return false; |
689 | } |
690 | |
691 | next = op0; |
692 | break; |
693 | |
694 | case SIGN_EXTEND: |
695 | case ZERO_EXTEND: |
696 | if (GET_MODE (rhs) != outer_mode) |
697 | return false; |
698 | |
699 | op0 = XEXP (rhs, 0); |
700 | |
701 | /* rv64 wraps SImode arithmetic inside an extension to DImode. |
702 | This matches the actual hardware semantics. So peek inside |
703 | the extension and see if we have simple arithmetic that we |
704 | can analyze. */ |
705 | if (GET_CODE (op0) == PLUS) |
706 | { |
707 | rhs = op0; |
708 | op0 = XEXP (rhs, 0); |
709 | op1 = XEXP (rhs, 1); |
710 | |
711 | if (CONSTANT_P (op0)) |
712 | std::swap (a&: op0, b&: op1); |
713 | |
714 | if (!simple_reg_p (reg: op0) || !CONSTANT_P (op1)) |
715 | return false; |
716 | |
717 | prev_code = code; |
718 | code = PLUS; |
719 | } |
720 | |
721 | if (!simple_reg_p (reg: op0)) |
722 | return false; |
723 | |
724 | next = op0; |
725 | break; |
726 | |
727 | default: |
728 | return false; |
729 | } |
730 | |
731 | if (GET_CODE (next) == SUBREG) |
732 | { |
733 | if (!subreg_lowpart_p (next)) |
734 | return false; |
735 | |
736 | nextr = SUBREG_REG (next); |
737 | if (GET_MODE (nextr) != outer_mode) |
738 | return false; |
739 | } |
740 | else |
741 | nextr = next; |
742 | |
743 | res = iv_get_reaching_def (insn, reg: nextr, def: &next_def); |
744 | |
745 | if (res == GRD_INVALID || res == GRD_INVARIANT) |
746 | return false; |
747 | |
748 | if (res == GRD_MAYBE_BIV) |
749 | { |
750 | if (!rtx_equal_p (nextr, reg)) |
751 | return false; |
752 | |
753 | *inner_step = const0_rtx; |
754 | *extend = IV_UNKNOWN_EXTEND; |
755 | *inner_mode = outer_mode; |
756 | *outer_step = const0_rtx; |
757 | } |
758 | else if (!get_biv_step_1 (def: next_def, outer_mode, reg, |
759 | inner_step, inner_mode, extend, |
760 | outer_step)) |
761 | return false; |
762 | |
763 | if (GET_CODE (next) == SUBREG) |
764 | { |
765 | scalar_int_mode amode; |
766 | if (!is_a <scalar_int_mode> (GET_MODE (next), result: &amode) |
767 | || GET_MODE_SIZE (mode: amode) > GET_MODE_SIZE (mode: *inner_mode)) |
768 | return false; |
769 | |
770 | *inner_mode = amode; |
771 | *inner_step = simplify_gen_binary (code: PLUS, mode: outer_mode, |
772 | op0: *inner_step, op1: *outer_step); |
773 | *outer_step = const0_rtx; |
774 | *extend = IV_UNKNOWN_EXTEND; |
775 | } |
776 | |
777 | switch (code) |
778 | { |
779 | case REG: |
780 | case SUBREG: |
781 | break; |
782 | |
783 | case PLUS: |
784 | case MINUS: |
785 | if (*inner_mode == outer_mode |
786 | /* See comment in previous switch. */ |
787 | || GET_MODE (rhs) != outer_mode) |
788 | *inner_step = simplify_gen_binary (code, mode: outer_mode, |
789 | op0: *inner_step, op1); |
790 | else |
791 | *outer_step = simplify_gen_binary (code, mode: outer_mode, |
792 | op0: *outer_step, op1); |
793 | |
794 | if (prev_code == SIGN_EXTEND) |
795 | *extend = IV_SIGN_EXTEND; |
796 | else if (prev_code == ZERO_EXTEND) |
797 | *extend = IV_ZERO_EXTEND; |
798 | break; |
799 | |
800 | case SIGN_EXTEND: |
801 | case ZERO_EXTEND: |
802 | gcc_assert (GET_MODE (op0) == *inner_mode |
803 | && *extend == IV_UNKNOWN_EXTEND |
804 | && *outer_step == const0_rtx); |
805 | |
806 | *extend = (code == SIGN_EXTEND) ? IV_SIGN_EXTEND : IV_ZERO_EXTEND; |
807 | break; |
808 | |
809 | default: |
810 | return false; |
811 | } |
812 | |
813 | return true; |
814 | } |
815 | |
816 | /* Gets the operation on register REG inside loop, in shape |
817 | |
818 | OUTER_STEP + EXTEND_{OUTER_MODE} (SUBREG_{INNER_MODE} (REG + INNER_STEP)) |
819 | |
820 | If the operation cannot be described in this shape, return false. |
821 | LAST_DEF is the definition of REG that dominates loop latch. */ |
822 | |
823 | static bool |
824 | get_biv_step (df_ref last_def, scalar_int_mode outer_mode, rtx reg, |
825 | rtx *inner_step, scalar_int_mode *inner_mode, |
826 | enum iv_extend_code *extend, rtx *outer_step) |
827 | { |
828 | if (!get_biv_step_1 (def: last_def, outer_mode, reg, |
829 | inner_step, inner_mode, extend, |
830 | outer_step)) |
831 | return false; |
832 | |
833 | gcc_assert ((*inner_mode == outer_mode) != (*extend != IV_UNKNOWN_EXTEND)); |
834 | gcc_assert (*inner_mode != outer_mode || *outer_step == const0_rtx); |
835 | |
836 | return true; |
837 | } |
838 | |
839 | /* Records information that DEF is induction variable IV. */ |
840 | |
841 | static void |
842 | record_iv (df_ref def, class rtx_iv *iv) |
843 | { |
844 | class rtx_iv *recorded_iv = XNEW (class rtx_iv); |
845 | |
846 | *recorded_iv = *iv; |
847 | check_iv_ref_table_size (); |
848 | DF_REF_IV_SET (def, recorded_iv); |
849 | } |
850 | |
851 | /* If DEF was already analyzed for bivness, store the description of the biv to |
852 | IV and return true. Otherwise return false. */ |
853 | |
854 | static bool |
855 | analyzed_for_bivness_p (rtx def, class rtx_iv *iv) |
856 | { |
857 | class biv_entry *biv = bivs->find_with_hash (comparable: def, REGNO (def)); |
858 | |
859 | if (!biv) |
860 | return false; |
861 | |
862 | *iv = biv->iv; |
863 | return true; |
864 | } |
865 | |
866 | static void |
867 | record_biv (rtx def, class rtx_iv *iv) |
868 | { |
869 | class biv_entry *biv = XNEW (class biv_entry); |
870 | biv_entry **slot = bivs->find_slot_with_hash (comparable: def, REGNO (def), insert: INSERT); |
871 | |
872 | biv->regno = REGNO (def); |
873 | biv->iv = *iv; |
874 | gcc_assert (!*slot); |
875 | *slot = biv; |
876 | } |
877 | |
878 | /* Determines whether DEF is a biv and if so, stores its description |
879 | to *IV. OUTER_MODE is the mode of DEF. */ |
880 | |
881 | static bool |
882 | iv_analyze_biv (scalar_int_mode outer_mode, rtx def, class rtx_iv *iv) |
883 | { |
884 | rtx inner_step, outer_step; |
885 | scalar_int_mode inner_mode; |
886 | enum iv_extend_code extend; |
887 | df_ref last_def; |
888 | |
889 | if (dump_file) |
890 | { |
891 | fprintf (stream: dump_file, format: "Analyzing " ); |
892 | print_rtl (dump_file, def); |
893 | fprintf (stream: dump_file, format: " for bivness.\n" ); |
894 | } |
895 | |
896 | if (!REG_P (def)) |
897 | { |
898 | if (!CONSTANT_P (def)) |
899 | return false; |
900 | |
901 | return iv_constant (iv, mode: outer_mode, cst: def); |
902 | } |
903 | |
904 | if (!latch_dominating_def (reg: def, def: &last_def)) |
905 | { |
906 | if (dump_file) |
907 | fprintf (stream: dump_file, format: " not simple.\n" ); |
908 | return false; |
909 | } |
910 | |
911 | if (!last_def) |
912 | return iv_constant (iv, mode: outer_mode, cst: def); |
913 | |
914 | if (analyzed_for_bivness_p (def, iv)) |
915 | { |
916 | if (dump_file) |
917 | fprintf (stream: dump_file, format: " already analysed.\n" ); |
918 | return iv->base != NULL_RTX; |
919 | } |
920 | |
921 | if (!get_biv_step (last_def, outer_mode, reg: def, inner_step: &inner_step, inner_mode: &inner_mode, |
922 | extend: &extend, outer_step: &outer_step)) |
923 | { |
924 | iv->base = NULL_RTX; |
925 | goto end; |
926 | } |
927 | |
928 | /* Loop transforms base to es (base + inner_step) + outer_step, |
929 | where es means extend of subreg between inner_mode and outer_mode. |
930 | The corresponding induction variable is |
931 | |
932 | es ((base - outer_step) + i * (inner_step + outer_step)) + outer_step */ |
933 | |
934 | iv->base = simplify_gen_binary (code: MINUS, mode: outer_mode, op0: def, op1: outer_step); |
935 | iv->step = simplify_gen_binary (code: PLUS, mode: outer_mode, op0: inner_step, op1: outer_step); |
936 | iv->mode = inner_mode; |
937 | iv->extend_mode = outer_mode; |
938 | iv->extend = extend; |
939 | iv->mult = const1_rtx; |
940 | iv->delta = outer_step; |
941 | iv->first_special = inner_mode != outer_mode; |
942 | |
943 | end: |
944 | if (dump_file) |
945 | { |
946 | fprintf (stream: dump_file, format: " " ); |
947 | dump_iv_info (file: dump_file, iv); |
948 | fprintf (stream: dump_file, format: "\n" ); |
949 | } |
950 | |
951 | record_biv (def, iv); |
952 | return iv->base != NULL_RTX; |
953 | } |
954 | |
955 | /* Analyzes expression RHS used at INSN and stores the result to *IV. |
956 | The mode of the induction variable is MODE. */ |
957 | |
958 | bool |
959 | iv_analyze_expr (rtx_insn *insn, scalar_int_mode mode, rtx rhs, |
960 | class rtx_iv *iv) |
961 | { |
962 | rtx mby = NULL_RTX; |
963 | rtx op0 = NULL_RTX, op1 = NULL_RTX; |
964 | class rtx_iv iv0, iv1; |
965 | enum rtx_code code = GET_CODE (rhs); |
966 | scalar_int_mode omode = mode; |
967 | |
968 | iv->base = NULL_RTX; |
969 | iv->step = NULL_RTX; |
970 | |
971 | gcc_assert (GET_MODE (rhs) == mode || GET_MODE (rhs) == VOIDmode); |
972 | |
973 | if (CONSTANT_P (rhs) |
974 | || REG_P (rhs) |
975 | || code == SUBREG) |
976 | return iv_analyze_op (insn, mode, rhs, iv); |
977 | |
978 | switch (code) |
979 | { |
980 | case REG: |
981 | op0 = rhs; |
982 | break; |
983 | |
984 | case SIGN_EXTEND: |
985 | case ZERO_EXTEND: |
986 | case NEG: |
987 | op0 = XEXP (rhs, 0); |
988 | /* We don't know how many bits there are in a sign-extended constant. */ |
989 | if (!is_a <scalar_int_mode> (GET_MODE (op0), result: &omode)) |
990 | return false; |
991 | break; |
992 | |
993 | case PLUS: |
994 | case MINUS: |
995 | op0 = XEXP (rhs, 0); |
996 | op1 = XEXP (rhs, 1); |
997 | break; |
998 | |
999 | case MULT: |
1000 | op0 = XEXP (rhs, 0); |
1001 | mby = XEXP (rhs, 1); |
1002 | if (!CONSTANT_P (mby)) |
1003 | std::swap (a&: op0, b&: mby); |
1004 | if (!CONSTANT_P (mby)) |
1005 | return false; |
1006 | break; |
1007 | |
1008 | case ASHIFT: |
1009 | op0 = XEXP (rhs, 0); |
1010 | mby = XEXP (rhs, 1); |
1011 | if (!CONSTANT_P (mby)) |
1012 | return false; |
1013 | break; |
1014 | |
1015 | default: |
1016 | return false; |
1017 | } |
1018 | |
1019 | if (op0 |
1020 | && !iv_analyze_expr (insn, mode: omode, rhs: op0, iv: &iv0)) |
1021 | return false; |
1022 | |
1023 | if (op1 |
1024 | && !iv_analyze_expr (insn, mode: omode, rhs: op1, iv: &iv1)) |
1025 | return false; |
1026 | |
1027 | switch (code) |
1028 | { |
1029 | case SIGN_EXTEND: |
1030 | if (!iv_extend (iv: &iv0, extend: IV_SIGN_EXTEND, mode)) |
1031 | return false; |
1032 | break; |
1033 | |
1034 | case ZERO_EXTEND: |
1035 | if (!iv_extend (iv: &iv0, extend: IV_ZERO_EXTEND, mode)) |
1036 | return false; |
1037 | break; |
1038 | |
1039 | case NEG: |
1040 | if (!iv_neg (iv: &iv0)) |
1041 | return false; |
1042 | break; |
1043 | |
1044 | case PLUS: |
1045 | case MINUS: |
1046 | if (!iv_add (iv0: &iv0, iv1: &iv1, op: code)) |
1047 | return false; |
1048 | break; |
1049 | |
1050 | case MULT: |
1051 | if (!iv_mult (iv: &iv0, mby)) |
1052 | return false; |
1053 | break; |
1054 | |
1055 | case ASHIFT: |
1056 | if (!iv_shift (iv: &iv0, mby)) |
1057 | return false; |
1058 | break; |
1059 | |
1060 | default: |
1061 | break; |
1062 | } |
1063 | |
1064 | *iv = iv0; |
1065 | return iv->base != NULL_RTX; |
1066 | } |
1067 | |
1068 | /* Analyzes iv DEF and stores the result to *IV. */ |
1069 | |
1070 | static bool |
1071 | iv_analyze_def (df_ref def, class rtx_iv *iv) |
1072 | { |
1073 | rtx_insn *insn = DF_REF_INSN (def); |
1074 | rtx reg = DF_REF_REG (def); |
1075 | rtx set, rhs; |
1076 | |
1077 | if (dump_file) |
1078 | { |
1079 | fprintf (stream: dump_file, format: "Analyzing def of " ); |
1080 | print_rtl (dump_file, reg); |
1081 | fprintf (stream: dump_file, format: " in insn " ); |
1082 | print_rtl_single (dump_file, insn); |
1083 | } |
1084 | |
1085 | check_iv_ref_table_size (); |
1086 | if (DF_REF_IV (def)) |
1087 | { |
1088 | if (dump_file) |
1089 | fprintf (stream: dump_file, format: " already analysed.\n" ); |
1090 | *iv = *DF_REF_IV (def); |
1091 | return iv->base != NULL_RTX; |
1092 | } |
1093 | |
1094 | iv->base = NULL_RTX; |
1095 | iv->step = NULL_RTX; |
1096 | |
1097 | scalar_int_mode mode; |
1098 | if (!REG_P (reg) || !is_a <scalar_int_mode> (GET_MODE (reg), result: &mode)) |
1099 | return false; |
1100 | |
1101 | set = single_set (insn); |
1102 | if (!set) |
1103 | return false; |
1104 | |
1105 | if (!REG_P (SET_DEST (set))) |
1106 | return false; |
1107 | |
1108 | gcc_assert (SET_DEST (set) == reg); |
1109 | rhs = find_reg_equal_equiv_note (insn); |
1110 | if (rhs) |
1111 | rhs = XEXP (rhs, 0); |
1112 | else |
1113 | rhs = SET_SRC (set); |
1114 | |
1115 | iv_analyze_expr (insn, mode, rhs, iv); |
1116 | record_iv (def, iv); |
1117 | |
1118 | if (dump_file) |
1119 | { |
1120 | print_rtl (dump_file, reg); |
1121 | fprintf (stream: dump_file, format: " in insn " ); |
1122 | print_rtl_single (dump_file, insn); |
1123 | fprintf (stream: dump_file, format: " is " ); |
1124 | dump_iv_info (file: dump_file, iv); |
1125 | fprintf (stream: dump_file, format: "\n" ); |
1126 | } |
1127 | |
1128 | return iv->base != NULL_RTX; |
1129 | } |
1130 | |
1131 | /* Analyzes operand OP of INSN and stores the result to *IV. MODE is the |
1132 | mode of OP. */ |
1133 | |
1134 | static bool |
1135 | iv_analyze_op (rtx_insn *insn, scalar_int_mode mode, rtx op, class rtx_iv *iv) |
1136 | { |
1137 | df_ref def = NULL; |
1138 | enum iv_grd_result res; |
1139 | |
1140 | if (dump_file) |
1141 | { |
1142 | fprintf (stream: dump_file, format: "Analyzing operand " ); |
1143 | print_rtl (dump_file, op); |
1144 | fprintf (stream: dump_file, format: " of insn " ); |
1145 | print_rtl_single (dump_file, insn); |
1146 | } |
1147 | |
1148 | if (function_invariant_p (op)) |
1149 | res = GRD_INVARIANT; |
1150 | else if (GET_CODE (op) == SUBREG) |
1151 | { |
1152 | scalar_int_mode inner_mode; |
1153 | if (!subreg_lowpart_p (op) |
1154 | || !is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), result: &inner_mode)) |
1155 | return false; |
1156 | |
1157 | if (!iv_analyze_op (insn, mode: inner_mode, SUBREG_REG (op), iv)) |
1158 | return false; |
1159 | |
1160 | return iv_subreg (iv, mode); |
1161 | } |
1162 | else |
1163 | { |
1164 | res = iv_get_reaching_def (insn, reg: op, def: &def); |
1165 | if (res == GRD_INVALID) |
1166 | { |
1167 | if (dump_file) |
1168 | fprintf (stream: dump_file, format: " not simple.\n" ); |
1169 | return false; |
1170 | } |
1171 | } |
1172 | |
1173 | if (res == GRD_INVARIANT) |
1174 | { |
1175 | iv_constant (iv, mode, cst: op); |
1176 | |
1177 | if (dump_file) |
1178 | { |
1179 | fprintf (stream: dump_file, format: " " ); |
1180 | dump_iv_info (file: dump_file, iv); |
1181 | fprintf (stream: dump_file, format: "\n" ); |
1182 | } |
1183 | return true; |
1184 | } |
1185 | |
1186 | if (res == GRD_MAYBE_BIV) |
1187 | return iv_analyze_biv (outer_mode: mode, def: op, iv); |
1188 | |
1189 | return iv_analyze_def (def, iv); |
1190 | } |
1191 | |
1192 | /* Analyzes value VAL at INSN and stores the result to *IV. MODE is the |
1193 | mode of VAL. */ |
1194 | |
1195 | bool |
1196 | iv_analyze (rtx_insn *insn, scalar_int_mode mode, rtx val, class rtx_iv *iv) |
1197 | { |
1198 | rtx reg; |
1199 | |
1200 | /* We must find the insn in that val is used, so that we get to UD chains. |
1201 | Since the function is sometimes called on result of get_condition, |
1202 | this does not necessarily have to be directly INSN; scan also the |
1203 | following insns. */ |
1204 | if (simple_reg_p (reg: val)) |
1205 | { |
1206 | if (GET_CODE (val) == SUBREG) |
1207 | reg = SUBREG_REG (val); |
1208 | else |
1209 | reg = val; |
1210 | |
1211 | while (!df_find_use (insn, reg)) |
1212 | insn = NEXT_INSN (insn); |
1213 | } |
1214 | |
1215 | return iv_analyze_op (insn, mode, op: val, iv); |
1216 | } |
1217 | |
1218 | /* Analyzes definition of DEF in INSN and stores the result to IV. */ |
1219 | |
1220 | bool |
1221 | iv_analyze_result (rtx_insn *insn, rtx def, class rtx_iv *iv) |
1222 | { |
1223 | df_ref adef; |
1224 | |
1225 | adef = df_find_def (insn, def); |
1226 | if (!adef) |
1227 | return false; |
1228 | |
1229 | return iv_analyze_def (def: adef, iv); |
1230 | } |
1231 | |
1232 | /* Checks whether definition of register REG in INSN is a basic induction |
1233 | variable. MODE is the mode of REG. |
1234 | |
1235 | IV analysis must have been initialized (via a call to |
1236 | iv_analysis_loop_init) for this function to produce a result. */ |
1237 | |
1238 | bool |
1239 | biv_p (rtx_insn *insn, scalar_int_mode mode, rtx reg) |
1240 | { |
1241 | class rtx_iv iv; |
1242 | df_ref def, last_def; |
1243 | |
1244 | if (!simple_reg_p (reg)) |
1245 | return false; |
1246 | |
1247 | def = df_find_def (insn, reg); |
1248 | gcc_assert (def != NULL); |
1249 | if (!latch_dominating_def (reg, def: &last_def)) |
1250 | return false; |
1251 | if (last_def != def) |
1252 | return false; |
1253 | |
1254 | if (!iv_analyze_biv (outer_mode: mode, def: reg, iv: &iv)) |
1255 | return false; |
1256 | |
1257 | return iv.step != const0_rtx; |
1258 | } |
1259 | |
1260 | /* Calculates value of IV at ITERATION-th iteration. */ |
1261 | |
1262 | rtx |
1263 | get_iv_value (class rtx_iv *iv, rtx iteration) |
1264 | { |
1265 | rtx val; |
1266 | |
1267 | /* We would need to generate some if_then_else patterns, and so far |
1268 | it is not needed anywhere. */ |
1269 | gcc_assert (!iv->first_special); |
1270 | |
1271 | if (iv->step != const0_rtx && iteration != const0_rtx) |
1272 | val = simplify_gen_binary (code: PLUS, mode: iv->extend_mode, op0: iv->base, |
1273 | op1: simplify_gen_binary (code: MULT, mode: iv->extend_mode, |
1274 | op0: iv->step, op1: iteration)); |
1275 | else |
1276 | val = iv->base; |
1277 | |
1278 | if (iv->extend_mode == iv->mode) |
1279 | return val; |
1280 | |
1281 | val = lowpart_subreg (outermode: iv->mode, op: val, innermode: iv->extend_mode); |
1282 | |
1283 | if (iv->extend == IV_UNKNOWN_EXTEND) |
1284 | return val; |
1285 | |
1286 | val = simplify_gen_unary (code: iv_extend_to_rtx_code (extend: iv->extend), |
1287 | mode: iv->extend_mode, op: val, op_mode: iv->mode); |
1288 | val = simplify_gen_binary (code: PLUS, mode: iv->extend_mode, op0: iv->delta, |
1289 | op1: simplify_gen_binary (code: MULT, mode: iv->extend_mode, |
1290 | op0: iv->mult, op1: val)); |
1291 | |
1292 | return val; |
1293 | } |
1294 | |
1295 | /* Free the data for an induction variable analysis. */ |
1296 | |
1297 | void |
1298 | iv_analysis_done (void) |
1299 | { |
1300 | if (!clean_slate) |
1301 | { |
1302 | clear_iv_info (); |
1303 | clean_slate = true; |
1304 | df_finish_pass (true); |
1305 | delete bivs; |
1306 | bivs = NULL; |
1307 | free (ptr: iv_ref_table); |
1308 | iv_ref_table = NULL; |
1309 | iv_ref_table_size = 0; |
1310 | } |
1311 | } |
1312 | |
1313 | /* Computes inverse to X modulo (1 << MOD). */ |
1314 | |
1315 | static uint64_t |
1316 | inverse (uint64_t x, int mod) |
1317 | { |
1318 | uint64_t mask = |
1319 | ((uint64_t) 1 << (mod - 1) << 1) - 1; |
1320 | uint64_t rslt = 1; |
1321 | int i; |
1322 | |
1323 | for (i = 0; i < mod - 1; i++) |
1324 | { |
1325 | rslt = (rslt * x) & mask; |
1326 | x = (x * x) & mask; |
1327 | } |
1328 | |
1329 | return rslt; |
1330 | } |
1331 | |
1332 | /* Checks whether any register in X is in set ALT. */ |
1333 | |
1334 | static bool |
1335 | altered_reg_used (const_rtx x, bitmap alt) |
1336 | { |
1337 | subrtx_iterator::array_type array; |
1338 | FOR_EACH_SUBRTX (iter, array, x, NONCONST) |
1339 | { |
1340 | const_rtx x = *iter; |
1341 | if (REG_P (x) && REGNO_REG_SET_P (alt, REGNO (x))) |
1342 | return true; |
1343 | } |
1344 | return false; |
1345 | } |
1346 | |
1347 | /* Marks registers altered by EXPR in set ALT. */ |
1348 | |
1349 | static void |
1350 | mark_altered (rtx expr, const_rtx by ATTRIBUTE_UNUSED, void *alt) |
1351 | { |
1352 | if (GET_CODE (expr) == SUBREG) |
1353 | expr = SUBREG_REG (expr); |
1354 | if (!REG_P (expr)) |
1355 | return; |
1356 | |
1357 | SET_REGNO_REG_SET ((bitmap) alt, REGNO (expr)); |
1358 | } |
1359 | |
1360 | /* Checks whether RHS is simple enough to process. */ |
1361 | |
1362 | static bool |
1363 | simple_rhs_p (rtx rhs) |
1364 | { |
1365 | rtx op0, op1; |
1366 | |
1367 | if (function_invariant_p (rhs) |
1368 | || (REG_P (rhs) && !HARD_REGISTER_P (rhs))) |
1369 | return true; |
1370 | |
1371 | switch (GET_CODE (rhs)) |
1372 | { |
1373 | case PLUS: |
1374 | case MINUS: |
1375 | case AND: |
1376 | op0 = XEXP (rhs, 0); |
1377 | op1 = XEXP (rhs, 1); |
1378 | /* Allow reg OP const and reg OP reg. */ |
1379 | if (!(REG_P (op0) && !HARD_REGISTER_P (op0)) |
1380 | && !function_invariant_p (op0)) |
1381 | return false; |
1382 | if (!(REG_P (op1) && !HARD_REGISTER_P (op1)) |
1383 | && !function_invariant_p (op1)) |
1384 | return false; |
1385 | |
1386 | return true; |
1387 | |
1388 | case ASHIFT: |
1389 | case ASHIFTRT: |
1390 | case LSHIFTRT: |
1391 | case MULT: |
1392 | op0 = XEXP (rhs, 0); |
1393 | op1 = XEXP (rhs, 1); |
1394 | /* Allow reg OP const. */ |
1395 | if (!(REG_P (op0) && !HARD_REGISTER_P (op0))) |
1396 | return false; |
1397 | if (!function_invariant_p (op1)) |
1398 | return false; |
1399 | |
1400 | return true; |
1401 | |
1402 | default: |
1403 | return false; |
1404 | } |
1405 | } |
1406 | |
1407 | /* If any registers in *EXPR that have a single definition, try to replace |
1408 | them with the known-equivalent values. */ |
1409 | |
1410 | static void |
1411 | replace_single_def_regs (rtx *expr) |
1412 | { |
1413 | subrtx_var_iterator::array_type array; |
1414 | repeat: |
1415 | FOR_EACH_SUBRTX_VAR (iter, array, *expr, NONCONST) |
1416 | { |
1417 | rtx x = *iter; |
1418 | if (REG_P (x)) |
1419 | if (rtx new_x = df_find_single_def_src (x)) |
1420 | { |
1421 | *expr = simplify_replace_rtx (*expr, x, new_x); |
1422 | goto repeat; |
1423 | } |
1424 | } |
1425 | } |
1426 | |
1427 | /* A subroutine of simplify_using_initial_values, this function examines INSN |
1428 | to see if it contains a suitable set that we can use to make a replacement. |
1429 | If it is suitable, return true and set DEST and SRC to the lhs and rhs of |
1430 | the set; return false otherwise. */ |
1431 | |
1432 | static bool |
1433 | suitable_set_for_replacement (rtx_insn *insn, rtx *dest, rtx *src) |
1434 | { |
1435 | rtx set = single_set (insn); |
1436 | rtx lhs = NULL_RTX, rhs; |
1437 | |
1438 | if (!set) |
1439 | return false; |
1440 | |
1441 | lhs = SET_DEST (set); |
1442 | if (!REG_P (lhs)) |
1443 | return false; |
1444 | |
1445 | rhs = find_reg_equal_equiv_note (insn); |
1446 | if (rhs) |
1447 | rhs = XEXP (rhs, 0); |
1448 | else |
1449 | rhs = SET_SRC (set); |
1450 | |
1451 | if (!simple_rhs_p (rhs)) |
1452 | return false; |
1453 | |
1454 | *dest = lhs; |
1455 | *src = rhs; |
1456 | return true; |
1457 | } |
1458 | |
1459 | /* Using the data returned by suitable_set_for_replacement, replace DEST |
1460 | with SRC in *EXPR and return the new expression. Also call |
1461 | replace_single_def_regs if the replacement changed something. */ |
1462 | static void |
1463 | replace_in_expr (rtx *expr, rtx dest, rtx src) |
1464 | { |
1465 | rtx old = *expr; |
1466 | *expr = simplify_replace_rtx (*expr, dest, src); |
1467 | if (old == *expr) |
1468 | return; |
1469 | replace_single_def_regs (expr); |
1470 | } |
1471 | |
1472 | /* Checks whether A implies B. */ |
1473 | |
1474 | static bool |
1475 | implies_p (rtx a, rtx b) |
1476 | { |
1477 | rtx op0, op1, opb0, opb1; |
1478 | machine_mode mode; |
1479 | |
1480 | if (rtx_equal_p (a, b)) |
1481 | return true; |
1482 | |
1483 | if (GET_CODE (a) == EQ) |
1484 | { |
1485 | op0 = XEXP (a, 0); |
1486 | op1 = XEXP (a, 1); |
1487 | |
1488 | if (REG_P (op0) |
1489 | || (GET_CODE (op0) == SUBREG |
1490 | && REG_P (SUBREG_REG (op0)))) |
1491 | { |
1492 | rtx r = simplify_replace_rtx (b, op0, op1); |
1493 | if (r == const_true_rtx) |
1494 | return true; |
1495 | } |
1496 | |
1497 | if (REG_P (op1) |
1498 | || (GET_CODE (op1) == SUBREG |
1499 | && REG_P (SUBREG_REG (op1)))) |
1500 | { |
1501 | rtx r = simplify_replace_rtx (b, op1, op0); |
1502 | if (r == const_true_rtx) |
1503 | return true; |
1504 | } |
1505 | } |
1506 | |
1507 | if (b == const_true_rtx) |
1508 | return true; |
1509 | |
1510 | if ((GET_RTX_CLASS (GET_CODE (a)) != RTX_COMM_COMPARE |
1511 | && GET_RTX_CLASS (GET_CODE (a)) != RTX_COMPARE) |
1512 | || (GET_RTX_CLASS (GET_CODE (b)) != RTX_COMM_COMPARE |
1513 | && GET_RTX_CLASS (GET_CODE (b)) != RTX_COMPARE)) |
1514 | return false; |
1515 | |
1516 | op0 = XEXP (a, 0); |
1517 | op1 = XEXP (a, 1); |
1518 | opb0 = XEXP (b, 0); |
1519 | opb1 = XEXP (b, 1); |
1520 | |
1521 | mode = GET_MODE (op0); |
1522 | if (mode != GET_MODE (opb0)) |
1523 | mode = VOIDmode; |
1524 | else if (mode == VOIDmode) |
1525 | { |
1526 | mode = GET_MODE (op1); |
1527 | if (mode != GET_MODE (opb1)) |
1528 | mode = VOIDmode; |
1529 | } |
1530 | |
1531 | /* A < B implies A + 1 <= B. */ |
1532 | if ((GET_CODE (a) == GT || GET_CODE (a) == LT) |
1533 | && (GET_CODE (b) == GE || GET_CODE (b) == LE)) |
1534 | { |
1535 | |
1536 | if (GET_CODE (a) == GT) |
1537 | std::swap (a&: op0, b&: op1); |
1538 | |
1539 | if (GET_CODE (b) == GE) |
1540 | std::swap (a&: opb0, b&: opb1); |
1541 | |
1542 | if (SCALAR_INT_MODE_P (mode) |
1543 | && rtx_equal_p (op1, opb1) |
1544 | && simplify_gen_binary (code: MINUS, mode, op0: opb0, op1: op0) == const1_rtx) |
1545 | return true; |
1546 | return false; |
1547 | } |
1548 | |
1549 | /* A < B or A > B imply A != B. TODO: Likewise |
1550 | A + n < B implies A != B + n if neither wraps. */ |
1551 | if (GET_CODE (b) == NE |
1552 | && (GET_CODE (a) == GT || GET_CODE (a) == GTU |
1553 | || GET_CODE (a) == LT || GET_CODE (a) == LTU)) |
1554 | { |
1555 | if (rtx_equal_p (op0, opb0) |
1556 | && rtx_equal_p (op1, opb1)) |
1557 | return true; |
1558 | } |
1559 | |
1560 | /* For unsigned comparisons, A != 0 implies A > 0 and A >= 1. */ |
1561 | if (GET_CODE (a) == NE |
1562 | && op1 == const0_rtx) |
1563 | { |
1564 | if ((GET_CODE (b) == GTU |
1565 | && opb1 == const0_rtx) |
1566 | || (GET_CODE (b) == GEU |
1567 | && opb1 == const1_rtx)) |
1568 | return rtx_equal_p (op0, opb0); |
1569 | } |
1570 | |
1571 | /* A != N is equivalent to A - (N + 1) <u -1. */ |
1572 | if (GET_CODE (a) == NE |
1573 | && CONST_INT_P (op1) |
1574 | && GET_CODE (b) == LTU |
1575 | && opb1 == constm1_rtx |
1576 | && GET_CODE (opb0) == PLUS |
1577 | && CONST_INT_P (XEXP (opb0, 1)) |
1578 | /* Avoid overflows. */ |
1579 | && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1)) |
1580 | != ((unsigned HOST_WIDE_INT)1 |
1581 | << (HOST_BITS_PER_WIDE_INT - 1)) - 1) |
1582 | && INTVAL (XEXP (opb0, 1)) + 1 == -INTVAL (op1)) |
1583 | return rtx_equal_p (op0, XEXP (opb0, 0)); |
1584 | |
1585 | /* Likewise, A != N implies A - N > 0. */ |
1586 | if (GET_CODE (a) == NE |
1587 | && CONST_INT_P (op1)) |
1588 | { |
1589 | if (GET_CODE (b) == GTU |
1590 | && GET_CODE (opb0) == PLUS |
1591 | && opb1 == const0_rtx |
1592 | && CONST_INT_P (XEXP (opb0, 1)) |
1593 | /* Avoid overflows. */ |
1594 | && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1)) |
1595 | != (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) |
1596 | && rtx_equal_p (XEXP (opb0, 0), op0)) |
1597 | return INTVAL (op1) == -INTVAL (XEXP (opb0, 1)); |
1598 | if (GET_CODE (b) == GEU |
1599 | && GET_CODE (opb0) == PLUS |
1600 | && opb1 == const1_rtx |
1601 | && CONST_INT_P (XEXP (opb0, 1)) |
1602 | /* Avoid overflows. */ |
1603 | && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (opb0, 1)) |
1604 | != (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) |
1605 | && rtx_equal_p (XEXP (opb0, 0), op0)) |
1606 | return INTVAL (op1) == -INTVAL (XEXP (opb0, 1)); |
1607 | } |
1608 | |
1609 | /* A >s X, where X is positive, implies A <u Y, if Y is negative. */ |
1610 | if ((GET_CODE (a) == GT || GET_CODE (a) == GE) |
1611 | && CONST_INT_P (op1) |
1612 | && ((GET_CODE (a) == GT && op1 == constm1_rtx) |
1613 | || INTVAL (op1) >= 0) |
1614 | && GET_CODE (b) == LTU |
1615 | && CONST_INT_P (opb1) |
1616 | && rtx_equal_p (op0, opb0)) |
1617 | return INTVAL (opb1) < 0; |
1618 | |
1619 | return false; |
1620 | } |
1621 | |
1622 | /* Canonicalizes COND so that |
1623 | |
1624 | (1) Ensure that operands are ordered according to |
1625 | swap_commutative_operands_p. |
1626 | (2) (LE x const) will be replaced with (LT x <const+1>) and similarly |
1627 | for GE, GEU, and LEU. */ |
1628 | |
1629 | rtx |
1630 | canon_condition (rtx cond) |
1631 | { |
1632 | rtx op0, op1; |
1633 | enum rtx_code code; |
1634 | machine_mode mode; |
1635 | |
1636 | code = GET_CODE (cond); |
1637 | op0 = XEXP (cond, 0); |
1638 | op1 = XEXP (cond, 1); |
1639 | |
1640 | if (swap_commutative_operands_p (op0, op1)) |
1641 | { |
1642 | code = swap_condition (code); |
1643 | std::swap (a&: op0, b&: op1); |
1644 | } |
1645 | |
1646 | mode = GET_MODE (op0); |
1647 | if (mode == VOIDmode) |
1648 | mode = GET_MODE (op1); |
1649 | gcc_assert (mode != VOIDmode); |
1650 | |
1651 | if (CONST_SCALAR_INT_P (op1) && GET_MODE_CLASS (mode) != MODE_CC) |
1652 | { |
1653 | rtx_mode_t const_val (op1, mode); |
1654 | |
1655 | switch (code) |
1656 | { |
1657 | case LE: |
1658 | if (wi::ne_p (x: const_val, y: wi::max_value (mode, sgn: SIGNED))) |
1659 | { |
1660 | code = LT; |
1661 | op1 = immed_wide_int_const (wi::add (x: const_val, y: 1), mode); |
1662 | } |
1663 | break; |
1664 | |
1665 | case GE: |
1666 | if (wi::ne_p (x: const_val, y: wi::min_value (mode, sgn: SIGNED))) |
1667 | { |
1668 | code = GT; |
1669 | op1 = immed_wide_int_const (wi::sub (x: const_val, y: 1), mode); |
1670 | } |
1671 | break; |
1672 | |
1673 | case LEU: |
1674 | if (wi::ne_p (x: const_val, y: -1)) |
1675 | { |
1676 | code = LTU; |
1677 | op1 = immed_wide_int_const (wi::add (x: const_val, y: 1), mode); |
1678 | } |
1679 | break; |
1680 | |
1681 | case GEU: |
1682 | if (wi::ne_p (x: const_val, y: 0)) |
1683 | { |
1684 | code = GTU; |
1685 | op1 = immed_wide_int_const (wi::sub (x: const_val, y: 1), mode); |
1686 | } |
1687 | break; |
1688 | |
1689 | default: |
1690 | break; |
1691 | } |
1692 | } |
1693 | |
1694 | if (op0 != XEXP (cond, 0) |
1695 | || op1 != XEXP (cond, 1) |
1696 | || code != GET_CODE (cond) |
1697 | || GET_MODE (cond) != SImode) |
1698 | cond = gen_rtx_fmt_ee (code, SImode, op0, op1); |
1699 | |
1700 | return cond; |
1701 | } |
1702 | |
1703 | /* Reverses CONDition; returns NULL if we cannot. */ |
1704 | |
1705 | static rtx |
1706 | reversed_condition (rtx cond) |
1707 | { |
1708 | enum rtx_code reversed; |
1709 | reversed = reversed_comparison_code (cond, NULL); |
1710 | if (reversed == UNKNOWN) |
1711 | return NULL_RTX; |
1712 | else |
1713 | return gen_rtx_fmt_ee (reversed, |
1714 | GET_MODE (cond), XEXP (cond, 0), |
1715 | XEXP (cond, 1)); |
1716 | } |
1717 | |
1718 | /* Tries to use the fact that COND holds to simplify EXPR. ALTERED is the |
1719 | set of altered regs. */ |
1720 | |
1721 | void |
1722 | simplify_using_condition (rtx cond, rtx *expr, regset altered) |
1723 | { |
1724 | rtx rev, reve, exp = *expr; |
1725 | |
1726 | /* If some register gets altered later, we do not really speak about its |
1727 | value at the time of comparison. */ |
1728 | if (altered && altered_reg_used (x: cond, alt: altered)) |
1729 | return; |
1730 | |
1731 | if (GET_CODE (cond) == EQ |
1732 | && REG_P (XEXP (cond, 0)) && CONSTANT_P (XEXP (cond, 1))) |
1733 | { |
1734 | *expr = simplify_replace_rtx (*expr, XEXP (cond, 0), XEXP (cond, 1)); |
1735 | return; |
1736 | } |
1737 | |
1738 | if (!COMPARISON_P (exp)) |
1739 | return; |
1740 | |
1741 | rev = reversed_condition (cond); |
1742 | reve = reversed_condition (cond: exp); |
1743 | |
1744 | cond = canon_condition (cond); |
1745 | exp = canon_condition (cond: exp); |
1746 | if (rev) |
1747 | rev = canon_condition (cond: rev); |
1748 | if (reve) |
1749 | reve = canon_condition (cond: reve); |
1750 | |
1751 | if (rtx_equal_p (exp, cond)) |
1752 | { |
1753 | *expr = const_true_rtx; |
1754 | return; |
1755 | } |
1756 | |
1757 | if (rev && rtx_equal_p (exp, rev)) |
1758 | { |
1759 | *expr = const0_rtx; |
1760 | return; |
1761 | } |
1762 | |
1763 | if (implies_p (a: cond, b: exp)) |
1764 | { |
1765 | *expr = const_true_rtx; |
1766 | return; |
1767 | } |
1768 | |
1769 | if (reve && implies_p (a: cond, b: reve)) |
1770 | { |
1771 | *expr = const0_rtx; |
1772 | return; |
1773 | } |
1774 | |
1775 | /* A proof by contradiction. If *EXPR implies (not cond), *EXPR must |
1776 | be false. */ |
1777 | if (rev && implies_p (a: exp, b: rev)) |
1778 | { |
1779 | *expr = const0_rtx; |
1780 | return; |
1781 | } |
1782 | |
1783 | /* Similarly, If (not *EXPR) implies (not cond), *EXPR must be true. */ |
1784 | if (rev && reve && implies_p (a: reve, b: rev)) |
1785 | { |
1786 | *expr = const_true_rtx; |
1787 | return; |
1788 | } |
1789 | |
1790 | /* We would like to have some other tests here. TODO. */ |
1791 | |
1792 | return; |
1793 | } |
1794 | |
1795 | /* Use relationship between A and *B to eventually eliminate *B. |
1796 | OP is the operation we consider. */ |
1797 | |
1798 | static void |
1799 | eliminate_implied_condition (enum rtx_code op, rtx a, rtx *b) |
1800 | { |
1801 | switch (op) |
1802 | { |
1803 | case AND: |
1804 | /* If A implies *B, we may replace *B by true. */ |
1805 | if (implies_p (a, b: *b)) |
1806 | *b = const_true_rtx; |
1807 | break; |
1808 | |
1809 | case IOR: |
1810 | /* If *B implies A, we may replace *B by false. */ |
1811 | if (implies_p (a: *b, b: a)) |
1812 | *b = const0_rtx; |
1813 | break; |
1814 | |
1815 | default: |
1816 | gcc_unreachable (); |
1817 | } |
1818 | } |
1819 | |
1820 | /* Eliminates the conditions in TAIL that are implied by HEAD. OP is the |
1821 | operation we consider. */ |
1822 | |
1823 | static void |
1824 | eliminate_implied_conditions (enum rtx_code op, rtx *head, rtx tail) |
1825 | { |
1826 | rtx elt; |
1827 | |
1828 | for (elt = tail; elt; elt = XEXP (elt, 1)) |
1829 | eliminate_implied_condition (op, a: *head, b: &XEXP (elt, 0)); |
1830 | for (elt = tail; elt; elt = XEXP (elt, 1)) |
1831 | eliminate_implied_condition (op, XEXP (elt, 0), b: head); |
1832 | } |
1833 | |
1834 | /* Simplifies *EXPR using initial values at the start of the LOOP. If *EXPR |
1835 | is a list, its elements are assumed to be combined using OP. */ |
1836 | |
1837 | static void |
1838 | simplify_using_initial_values (class loop *loop, enum rtx_code op, rtx *expr) |
1839 | { |
1840 | bool expression_valid; |
1841 | rtx head, tail, last_valid_expr; |
1842 | rtx_expr_list *cond_list; |
1843 | rtx_insn *insn; |
1844 | rtx neutral, aggr; |
1845 | regset altered, this_altered; |
1846 | edge e; |
1847 | |
1848 | if (!*expr) |
1849 | return; |
1850 | |
1851 | if (CONSTANT_P (*expr)) |
1852 | return; |
1853 | |
1854 | if (GET_CODE (*expr) == EXPR_LIST) |
1855 | { |
1856 | head = XEXP (*expr, 0); |
1857 | tail = XEXP (*expr, 1); |
1858 | |
1859 | eliminate_implied_conditions (op, head: &head, tail); |
1860 | |
1861 | switch (op) |
1862 | { |
1863 | case AND: |
1864 | neutral = const_true_rtx; |
1865 | aggr = const0_rtx; |
1866 | break; |
1867 | |
1868 | case IOR: |
1869 | neutral = const0_rtx; |
1870 | aggr = const_true_rtx; |
1871 | break; |
1872 | |
1873 | default: |
1874 | gcc_unreachable (); |
1875 | } |
1876 | |
1877 | simplify_using_initial_values (loop, op: UNKNOWN, expr: &head); |
1878 | if (head == aggr) |
1879 | { |
1880 | XEXP (*expr, 0) = aggr; |
1881 | XEXP (*expr, 1) = NULL_RTX; |
1882 | return; |
1883 | } |
1884 | else if (head == neutral) |
1885 | { |
1886 | *expr = tail; |
1887 | simplify_using_initial_values (loop, op, expr); |
1888 | return; |
1889 | } |
1890 | simplify_using_initial_values (loop, op, expr: &tail); |
1891 | |
1892 | if (tail && XEXP (tail, 0) == aggr) |
1893 | { |
1894 | *expr = tail; |
1895 | return; |
1896 | } |
1897 | |
1898 | XEXP (*expr, 0) = head; |
1899 | XEXP (*expr, 1) = tail; |
1900 | return; |
1901 | } |
1902 | |
1903 | gcc_assert (op == UNKNOWN); |
1904 | |
1905 | replace_single_def_regs (expr); |
1906 | if (CONSTANT_P (*expr)) |
1907 | return; |
1908 | |
1909 | e = loop_preheader_edge (loop); |
1910 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
1911 | return; |
1912 | |
1913 | altered = ALLOC_REG_SET (®_obstack); |
1914 | this_altered = ALLOC_REG_SET (®_obstack); |
1915 | |
1916 | expression_valid = true; |
1917 | last_valid_expr = *expr; |
1918 | cond_list = NULL; |
1919 | while (1) |
1920 | { |
1921 | insn = BB_END (e->src); |
1922 | if (any_condjump_p (insn) && onlyjump_p (insn)) |
1923 | { |
1924 | rtx cond = get_condition (BB_END (e->src), NULL, false, true); |
1925 | |
1926 | if (cond && (e->flags & EDGE_FALLTHRU)) |
1927 | cond = reversed_condition (cond); |
1928 | if (cond) |
1929 | { |
1930 | rtx old = *expr; |
1931 | simplify_using_condition (cond, expr, altered); |
1932 | if (old != *expr) |
1933 | { |
1934 | rtx note; |
1935 | if (CONSTANT_P (*expr)) |
1936 | goto out; |
1937 | for (note = cond_list; note; note = XEXP (note, 1)) |
1938 | { |
1939 | simplify_using_condition (XEXP (note, 0), expr, altered); |
1940 | if (CONSTANT_P (*expr)) |
1941 | goto out; |
1942 | } |
1943 | } |
1944 | cond_list = alloc_EXPR_LIST (0, cond, cond_list); |
1945 | } |
1946 | } |
1947 | |
1948 | FOR_BB_INSNS_REVERSE (e->src, insn) |
1949 | { |
1950 | rtx src, dest; |
1951 | rtx old = *expr; |
1952 | |
1953 | if (!INSN_P (insn)) |
1954 | continue; |
1955 | |
1956 | CLEAR_REG_SET (this_altered); |
1957 | note_stores (insn, mark_altered, this_altered); |
1958 | if (CALL_P (insn)) |
1959 | { |
1960 | /* Kill all registers that might be clobbered by the call. |
1961 | We don't track modes of hard registers, so we need to be |
1962 | conservative and assume that partial kills are full kills. */ |
1963 | function_abi callee_abi = insn_callee_abi (insn); |
1964 | IOR_REG_SET_HRS (this_altered, |
1965 | callee_abi.full_and_partial_reg_clobbers ()); |
1966 | } |
1967 | |
1968 | if (suitable_set_for_replacement (insn, dest: &dest, src: &src)) |
1969 | { |
1970 | rtx_expr_list **pnote, **pnote_next; |
1971 | |
1972 | replace_in_expr (expr, dest, src); |
1973 | if (CONSTANT_P (*expr)) |
1974 | goto out; |
1975 | |
1976 | for (pnote = &cond_list; *pnote; pnote = pnote_next) |
1977 | { |
1978 | rtx_expr_list *note = *pnote; |
1979 | rtx old_cond = XEXP (note, 0); |
1980 | |
1981 | pnote_next = (rtx_expr_list **)&XEXP (note, 1); |
1982 | replace_in_expr (expr: &XEXP (note, 0), dest, src); |
1983 | |
1984 | /* We can no longer use a condition that has been simplified |
1985 | to a constant, and simplify_using_condition will abort if |
1986 | we try. */ |
1987 | if (CONSTANT_P (XEXP (note, 0))) |
1988 | { |
1989 | *pnote = *pnote_next; |
1990 | pnote_next = pnote; |
1991 | free_EXPR_LIST_node (note); |
1992 | } |
1993 | /* Retry simplifications with this condition if either the |
1994 | expression or the condition changed. */ |
1995 | else if (old_cond != XEXP (note, 0) || old != *expr) |
1996 | simplify_using_condition (XEXP (note, 0), expr, altered); |
1997 | } |
1998 | } |
1999 | else |
2000 | { |
2001 | rtx_expr_list **pnote, **pnote_next; |
2002 | |
2003 | /* If we did not use this insn to make a replacement, any overlap |
2004 | between stores in this insn and our expression will cause the |
2005 | expression to become invalid. */ |
2006 | if (altered_reg_used (x: *expr, alt: this_altered)) |
2007 | goto out; |
2008 | |
2009 | /* Likewise for the conditions. */ |
2010 | for (pnote = &cond_list; *pnote; pnote = pnote_next) |
2011 | { |
2012 | rtx_expr_list *note = *pnote; |
2013 | rtx old_cond = XEXP (note, 0); |
2014 | |
2015 | pnote_next = (rtx_expr_list **)&XEXP (note, 1); |
2016 | if (altered_reg_used (x: old_cond, alt: this_altered)) |
2017 | { |
2018 | *pnote = *pnote_next; |
2019 | pnote_next = pnote; |
2020 | free_EXPR_LIST_node (note); |
2021 | } |
2022 | } |
2023 | } |
2024 | |
2025 | if (CONSTANT_P (*expr)) |
2026 | goto out; |
2027 | |
2028 | IOR_REG_SET (altered, this_altered); |
2029 | |
2030 | /* If the expression now contains regs that have been altered, we |
2031 | can't return it to the caller. However, it is still valid for |
2032 | further simplification, so keep searching to see if we can |
2033 | eventually turn it into a constant. */ |
2034 | if (altered_reg_used (x: *expr, alt: altered)) |
2035 | expression_valid = false; |
2036 | if (expression_valid) |
2037 | last_valid_expr = *expr; |
2038 | } |
2039 | |
2040 | if (!single_pred_p (bb: e->src) |
2041 | || single_pred (bb: e->src) == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
2042 | break; |
2043 | e = single_pred_edge (bb: e->src); |
2044 | } |
2045 | |
2046 | out: |
2047 | free_EXPR_LIST_list (&cond_list); |
2048 | if (!CONSTANT_P (*expr)) |
2049 | *expr = last_valid_expr; |
2050 | FREE_REG_SET (altered); |
2051 | FREE_REG_SET (this_altered); |
2052 | } |
2053 | |
2054 | /* Transforms invariant IV into MODE. Adds assumptions based on the fact |
2055 | that IV occurs as left operands of comparison COND and its signedness |
2056 | is SIGNED_P to DESC. */ |
2057 | |
2058 | static void |
2059 | shorten_into_mode (class rtx_iv *iv, scalar_int_mode mode, |
2060 | enum rtx_code cond, bool signed_p, class niter_desc *desc) |
2061 | { |
2062 | rtx mmin, mmax, cond_over, cond_under; |
2063 | |
2064 | get_mode_bounds (mode, signed_p, iv->extend_mode, &mmin, &mmax); |
2065 | cond_under = simplify_gen_relational (code: LT, SImode, op_mode: iv->extend_mode, |
2066 | op0: iv->base, op1: mmin); |
2067 | cond_over = simplify_gen_relational (code: GT, SImode, op_mode: iv->extend_mode, |
2068 | op0: iv->base, op1: mmax); |
2069 | |
2070 | switch (cond) |
2071 | { |
2072 | case LE: |
2073 | case LT: |
2074 | case LEU: |
2075 | case LTU: |
2076 | if (cond_under != const0_rtx) |
2077 | desc->infinite = |
2078 | alloc_EXPR_LIST (0, cond_under, desc->infinite); |
2079 | if (cond_over != const0_rtx) |
2080 | desc->noloop_assumptions = |
2081 | alloc_EXPR_LIST (0, cond_over, desc->noloop_assumptions); |
2082 | break; |
2083 | |
2084 | case GE: |
2085 | case GT: |
2086 | case GEU: |
2087 | case GTU: |
2088 | if (cond_over != const0_rtx) |
2089 | desc->infinite = |
2090 | alloc_EXPR_LIST (0, cond_over, desc->infinite); |
2091 | if (cond_under != const0_rtx) |
2092 | desc->noloop_assumptions = |
2093 | alloc_EXPR_LIST (0, cond_under, desc->noloop_assumptions); |
2094 | break; |
2095 | |
2096 | case NE: |
2097 | if (cond_over != const0_rtx) |
2098 | desc->infinite = |
2099 | alloc_EXPR_LIST (0, cond_over, desc->infinite); |
2100 | if (cond_under != const0_rtx) |
2101 | desc->infinite = |
2102 | alloc_EXPR_LIST (0, cond_under, desc->infinite); |
2103 | break; |
2104 | |
2105 | default: |
2106 | gcc_unreachable (); |
2107 | } |
2108 | |
2109 | iv->mode = mode; |
2110 | iv->extend = signed_p ? IV_SIGN_EXTEND : IV_ZERO_EXTEND; |
2111 | } |
2112 | |
2113 | /* Transforms IV0 and IV1 compared by COND so that they are both compared as |
2114 | subregs of the same mode if possible (sometimes it is necessary to add |
2115 | some assumptions to DESC). */ |
2116 | |
2117 | static bool |
2118 | canonicalize_iv_subregs (class rtx_iv *iv0, class rtx_iv *iv1, |
2119 | enum rtx_code cond, class niter_desc *desc) |
2120 | { |
2121 | scalar_int_mode comp_mode; |
2122 | bool signed_p; |
2123 | |
2124 | /* If the ivs behave specially in the first iteration, or are |
2125 | added/multiplied after extending, we ignore them. */ |
2126 | if (iv0->first_special || iv0->mult != const1_rtx || iv0->delta != const0_rtx) |
2127 | return false; |
2128 | if (iv1->first_special || iv1->mult != const1_rtx || iv1->delta != const0_rtx) |
2129 | return false; |
2130 | |
2131 | /* If there is some extend, it must match signedness of the comparison. */ |
2132 | switch (cond) |
2133 | { |
2134 | case LE: |
2135 | case LT: |
2136 | if (iv0->extend == IV_ZERO_EXTEND |
2137 | || iv1->extend == IV_ZERO_EXTEND) |
2138 | return false; |
2139 | signed_p = true; |
2140 | break; |
2141 | |
2142 | case LEU: |
2143 | case LTU: |
2144 | if (iv0->extend == IV_SIGN_EXTEND |
2145 | || iv1->extend == IV_SIGN_EXTEND) |
2146 | return false; |
2147 | signed_p = false; |
2148 | break; |
2149 | |
2150 | case NE: |
2151 | if (iv0->extend != IV_UNKNOWN_EXTEND |
2152 | && iv1->extend != IV_UNKNOWN_EXTEND |
2153 | && iv0->extend != iv1->extend) |
2154 | return false; |
2155 | |
2156 | signed_p = false; |
2157 | if (iv0->extend != IV_UNKNOWN_EXTEND) |
2158 | signed_p = iv0->extend == IV_SIGN_EXTEND; |
2159 | if (iv1->extend != IV_UNKNOWN_EXTEND) |
2160 | signed_p = iv1->extend == IV_SIGN_EXTEND; |
2161 | break; |
2162 | |
2163 | default: |
2164 | gcc_unreachable (); |
2165 | } |
2166 | |
2167 | /* Values of both variables should be computed in the same mode. These |
2168 | might indeed be different, if we have comparison like |
2169 | |
2170 | (compare (subreg:SI (iv0)) (subreg:SI (iv1))) |
2171 | |
2172 | and iv0 and iv1 are both ivs iterating in SI mode, but calculated |
2173 | in different modes. This does not seem impossible to handle, but |
2174 | it hardly ever occurs in practice. |
2175 | |
2176 | The only exception is the case when one of operands is invariant. |
2177 | For example pentium 3 generates comparisons like |
2178 | (lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we |
2179 | definitely do not want this prevent the optimization. */ |
2180 | comp_mode = iv0->extend_mode; |
2181 | if (GET_MODE_BITSIZE (mode: comp_mode) < GET_MODE_BITSIZE (mode: iv1->extend_mode)) |
2182 | comp_mode = iv1->extend_mode; |
2183 | |
2184 | if (iv0->extend_mode != comp_mode) |
2185 | { |
2186 | if (iv0->mode != iv0->extend_mode |
2187 | || iv0->step != const0_rtx) |
2188 | return false; |
2189 | |
2190 | iv0->base = simplify_gen_unary (code: signed_p ? SIGN_EXTEND : ZERO_EXTEND, |
2191 | mode: comp_mode, op: iv0->base, op_mode: iv0->mode); |
2192 | iv0->extend_mode = comp_mode; |
2193 | } |
2194 | |
2195 | if (iv1->extend_mode != comp_mode) |
2196 | { |
2197 | if (iv1->mode != iv1->extend_mode |
2198 | || iv1->step != const0_rtx) |
2199 | return false; |
2200 | |
2201 | iv1->base = simplify_gen_unary (code: signed_p ? SIGN_EXTEND : ZERO_EXTEND, |
2202 | mode: comp_mode, op: iv1->base, op_mode: iv1->mode); |
2203 | iv1->extend_mode = comp_mode; |
2204 | } |
2205 | |
2206 | /* Check that both ivs belong to a range of a single mode. If one of the |
2207 | operands is an invariant, we may need to shorten it into the common |
2208 | mode. */ |
2209 | if (iv0->mode == iv0->extend_mode |
2210 | && iv0->step == const0_rtx |
2211 | && iv0->mode != iv1->mode) |
2212 | shorten_into_mode (iv: iv0, mode: iv1->mode, cond, signed_p, desc); |
2213 | |
2214 | if (iv1->mode == iv1->extend_mode |
2215 | && iv1->step == const0_rtx |
2216 | && iv0->mode != iv1->mode) |
2217 | shorten_into_mode (iv: iv1, mode: iv0->mode, cond: swap_condition (cond), signed_p, desc); |
2218 | |
2219 | if (iv0->mode != iv1->mode) |
2220 | return false; |
2221 | |
2222 | desc->mode = iv0->mode; |
2223 | desc->signed_p = signed_p; |
2224 | |
2225 | return true; |
2226 | } |
2227 | |
2228 | /* Tries to estimate the maximum number of iterations in LOOP, and return the |
2229 | result. This function is called from iv_number_of_iterations with |
2230 | a number of fields in DESC already filled in. OLD_NITER is the original |
2231 | expression for the number of iterations, before we tried to simplify it. */ |
2232 | |
2233 | static uint64_t |
2234 | determine_max_iter (class loop *loop, class niter_desc *desc, rtx old_niter) |
2235 | { |
2236 | rtx niter = desc->niter_expr; |
2237 | rtx mmin, mmax, cmp; |
2238 | uint64_t nmax, inc; |
2239 | uint64_t andmax = 0; |
2240 | |
2241 | /* We used to look for constant operand 0 of AND, |
2242 | but canonicalization should always make this impossible. */ |
2243 | gcc_checking_assert (GET_CODE (niter) != AND |
2244 | || !CONST_INT_P (XEXP (niter, 0))); |
2245 | |
2246 | if (GET_CODE (niter) == AND |
2247 | && CONST_INT_P (XEXP (niter, 1))) |
2248 | { |
2249 | andmax = UINTVAL (XEXP (niter, 1)); |
2250 | niter = XEXP (niter, 0); |
2251 | } |
2252 | |
2253 | get_mode_bounds (desc->mode, desc->signed_p, desc->mode, &mmin, &mmax); |
2254 | nmax = UINTVAL (mmax) - UINTVAL (mmin); |
2255 | |
2256 | if (GET_CODE (niter) == UDIV) |
2257 | { |
2258 | if (!CONST_INT_P (XEXP (niter, 1))) |
2259 | return nmax; |
2260 | inc = INTVAL (XEXP (niter, 1)); |
2261 | niter = XEXP (niter, 0); |
2262 | } |
2263 | else |
2264 | inc = 1; |
2265 | |
2266 | /* We could use a binary search here, but for now improving the upper |
2267 | bound by just one eliminates one important corner case. */ |
2268 | cmp = simplify_gen_relational (code: desc->signed_p ? LT : LTU, VOIDmode, |
2269 | op_mode: desc->mode, op0: old_niter, op1: mmax); |
2270 | simplify_using_initial_values (loop, op: UNKNOWN, expr: &cmp); |
2271 | if (cmp == const_true_rtx) |
2272 | { |
2273 | nmax--; |
2274 | |
2275 | if (dump_file) |
2276 | fprintf (stream: dump_file, format: ";; improved upper bound by one.\n" ); |
2277 | } |
2278 | nmax /= inc; |
2279 | if (andmax) |
2280 | nmax = MIN (nmax, andmax); |
2281 | if (dump_file) |
2282 | fprintf (stream: dump_file, format: ";; Determined upper bound %" PRId64".\n" , |
2283 | nmax); |
2284 | return nmax; |
2285 | } |
2286 | |
2287 | /* Computes number of iterations of the CONDITION in INSN in LOOP and stores |
2288 | the result into DESC. Very similar to determine_number_of_iterations |
2289 | (basically its rtl version), complicated by things like subregs. */ |
2290 | |
2291 | static void |
2292 | iv_number_of_iterations (class loop *loop, rtx_insn *insn, rtx condition, |
2293 | class niter_desc *desc) |
2294 | { |
2295 | rtx op0, op1, delta, step, bound, may_xform, tmp, tmp0, tmp1; |
2296 | class rtx_iv iv0, iv1; |
2297 | rtx assumption, may_not_xform; |
2298 | enum rtx_code cond; |
2299 | machine_mode nonvoid_mode; |
2300 | scalar_int_mode comp_mode; |
2301 | rtx mmin, mmax, mode_mmin, mode_mmax; |
2302 | uint64_t s, size, d, inv, max, up, down; |
2303 | int64_t inc, step_val; |
2304 | int was_sharp = false; |
2305 | rtx old_niter; |
2306 | bool step_is_pow2; |
2307 | |
2308 | /* The meaning of these assumptions is this: |
2309 | if !assumptions |
2310 | then the rest of information does not have to be valid |
2311 | if noloop_assumptions then the loop does not roll |
2312 | if infinite then this exit is never used */ |
2313 | |
2314 | desc->assumptions = NULL_RTX; |
2315 | desc->noloop_assumptions = NULL_RTX; |
2316 | desc->infinite = NULL_RTX; |
2317 | desc->simple_p = true; |
2318 | |
2319 | desc->const_iter = false; |
2320 | desc->niter_expr = NULL_RTX; |
2321 | |
2322 | cond = GET_CODE (condition); |
2323 | gcc_assert (COMPARISON_P (condition)); |
2324 | |
2325 | nonvoid_mode = GET_MODE (XEXP (condition, 0)); |
2326 | if (nonvoid_mode == VOIDmode) |
2327 | nonvoid_mode = GET_MODE (XEXP (condition, 1)); |
2328 | /* The constant comparisons should be folded. */ |
2329 | gcc_assert (nonvoid_mode != VOIDmode); |
2330 | |
2331 | /* We only handle integers or pointers. */ |
2332 | scalar_int_mode mode; |
2333 | if (!is_a <scalar_int_mode> (m: nonvoid_mode, result: &mode)) |
2334 | goto fail; |
2335 | |
2336 | op0 = XEXP (condition, 0); |
2337 | if (!iv_analyze (insn, mode, val: op0, iv: &iv0)) |
2338 | goto fail; |
2339 | |
2340 | op1 = XEXP (condition, 1); |
2341 | if (!iv_analyze (insn, mode, val: op1, iv: &iv1)) |
2342 | goto fail; |
2343 | |
2344 | if (GET_MODE_BITSIZE (mode: iv0.extend_mode) > HOST_BITS_PER_WIDE_INT |
2345 | || GET_MODE_BITSIZE (mode: iv1.extend_mode) > HOST_BITS_PER_WIDE_INT) |
2346 | goto fail; |
2347 | |
2348 | /* Check condition and normalize it. */ |
2349 | |
2350 | switch (cond) |
2351 | { |
2352 | case GE: |
2353 | case GT: |
2354 | case GEU: |
2355 | case GTU: |
2356 | std::swap (a&: iv0, b&: iv1); |
2357 | cond = swap_condition (cond); |
2358 | break; |
2359 | case NE: |
2360 | case LE: |
2361 | case LEU: |
2362 | case LT: |
2363 | case LTU: |
2364 | break; |
2365 | default: |
2366 | goto fail; |
2367 | } |
2368 | |
2369 | /* Handle extends. This is relatively nontrivial, so we only try in some |
2370 | easy cases, when we can canonicalize the ivs (possibly by adding some |
2371 | assumptions) to shape subreg (base + i * step). This function also fills |
2372 | in desc->mode and desc->signed_p. */ |
2373 | |
2374 | if (!canonicalize_iv_subregs (iv0: &iv0, iv1: &iv1, cond, desc)) |
2375 | goto fail; |
2376 | |
2377 | comp_mode = iv0.extend_mode; |
2378 | mode = iv0.mode; |
2379 | size = GET_MODE_PRECISION (mode); |
2380 | get_mode_bounds (mode, (cond == LE || cond == LT), comp_mode, &mmin, &mmax); |
2381 | mode_mmin = lowpart_subreg (outermode: mode, op: mmin, innermode: comp_mode); |
2382 | mode_mmax = lowpart_subreg (outermode: mode, op: mmax, innermode: comp_mode); |
2383 | |
2384 | if (!CONST_INT_P (iv0.step) || !CONST_INT_P (iv1.step)) |
2385 | goto fail; |
2386 | |
2387 | /* We can take care of the case of two induction variables chasing each other |
2388 | if the test is NE. I have never seen a loop using it, but still it is |
2389 | cool. */ |
2390 | if (iv0.step != const0_rtx && iv1.step != const0_rtx) |
2391 | { |
2392 | if (cond != NE) |
2393 | goto fail; |
2394 | |
2395 | iv0.step = simplify_gen_binary (code: MINUS, mode: comp_mode, op0: iv0.step, op1: iv1.step); |
2396 | iv1.step = const0_rtx; |
2397 | } |
2398 | |
2399 | iv0.step = lowpart_subreg (outermode: mode, op: iv0.step, innermode: comp_mode); |
2400 | iv1.step = lowpart_subreg (outermode: mode, op: iv1.step, innermode: comp_mode); |
2401 | |
2402 | /* This is either infinite loop or the one that ends immediately, depending |
2403 | on initial values. Unswitching should remove this kind of conditions. */ |
2404 | if (iv0.step == const0_rtx && iv1.step == const0_rtx) |
2405 | goto fail; |
2406 | |
2407 | if (cond != NE) |
2408 | { |
2409 | if (iv0.step == const0_rtx) |
2410 | step_val = -INTVAL (iv1.step); |
2411 | else |
2412 | step_val = INTVAL (iv0.step); |
2413 | |
2414 | /* Ignore loops of while (i-- < 10) type. */ |
2415 | if (step_val < 0) |
2416 | goto fail; |
2417 | |
2418 | step_is_pow2 = !(step_val & (step_val - 1)); |
2419 | } |
2420 | else |
2421 | { |
2422 | /* We do not care about whether the step is power of two in this |
2423 | case. */ |
2424 | step_is_pow2 = false; |
2425 | step_val = 0; |
2426 | } |
2427 | |
2428 | /* Some more condition normalization. We must record some assumptions |
2429 | due to overflows. */ |
2430 | switch (cond) |
2431 | { |
2432 | case LT: |
2433 | case LTU: |
2434 | /* We want to take care only of non-sharp relationals; this is easy, |
2435 | as in cases the overflow would make the transformation unsafe |
2436 | the loop does not roll. Seemingly it would make more sense to want |
2437 | to take care of sharp relationals instead, as NE is more similar to |
2438 | them, but the problem is that here the transformation would be more |
2439 | difficult due to possibly infinite loops. */ |
2440 | if (iv0.step == const0_rtx) |
2441 | { |
2442 | tmp = lowpart_subreg (outermode: mode, op: iv0.base, innermode: comp_mode); |
2443 | assumption = simplify_gen_relational (code: EQ, SImode, op_mode: mode, op0: tmp, |
2444 | op1: mode_mmax); |
2445 | if (assumption == const_true_rtx) |
2446 | goto zero_iter_simplify; |
2447 | iv0.base = simplify_gen_binary (code: PLUS, mode: comp_mode, |
2448 | op0: iv0.base, const1_rtx); |
2449 | } |
2450 | else |
2451 | { |
2452 | tmp = lowpart_subreg (outermode: mode, op: iv1.base, innermode: comp_mode); |
2453 | assumption = simplify_gen_relational (code: EQ, SImode, op_mode: mode, op0: tmp, |
2454 | op1: mode_mmin); |
2455 | if (assumption == const_true_rtx) |
2456 | goto zero_iter_simplify; |
2457 | iv1.base = simplify_gen_binary (code: PLUS, mode: comp_mode, |
2458 | op0: iv1.base, constm1_rtx); |
2459 | } |
2460 | |
2461 | if (assumption != const0_rtx) |
2462 | desc->noloop_assumptions = |
2463 | alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); |
2464 | cond = (cond == LT) ? LE : LEU; |
2465 | |
2466 | /* It will be useful to be able to tell the difference once more in |
2467 | LE -> NE reduction. */ |
2468 | was_sharp = true; |
2469 | break; |
2470 | default: ; |
2471 | } |
2472 | |
2473 | /* Take care of trivially infinite loops. */ |
2474 | if (cond != NE) |
2475 | { |
2476 | if (iv0.step == const0_rtx) |
2477 | { |
2478 | tmp = lowpart_subreg (outermode: mode, op: iv0.base, innermode: comp_mode); |
2479 | if (rtx_equal_p (tmp, mode_mmin)) |
2480 | { |
2481 | desc->infinite = |
2482 | alloc_EXPR_LIST (0, const_true_rtx, NULL_RTX); |
2483 | /* Fill in the remaining fields somehow. */ |
2484 | goto zero_iter_simplify; |
2485 | } |
2486 | } |
2487 | else |
2488 | { |
2489 | tmp = lowpart_subreg (outermode: mode, op: iv1.base, innermode: comp_mode); |
2490 | if (rtx_equal_p (tmp, mode_mmax)) |
2491 | { |
2492 | desc->infinite = |
2493 | alloc_EXPR_LIST (0, const_true_rtx, NULL_RTX); |
2494 | /* Fill in the remaining fields somehow. */ |
2495 | goto zero_iter_simplify; |
2496 | } |
2497 | } |
2498 | } |
2499 | |
2500 | /* If we can we want to take care of NE conditions instead of size |
2501 | comparisons, as they are much more friendly (most importantly |
2502 | this takes care of special handling of loops with step 1). We can |
2503 | do it if we first check that upper bound is greater or equal to |
2504 | lower bound, their difference is constant c modulo step and that |
2505 | there is not an overflow. */ |
2506 | if (cond != NE) |
2507 | { |
2508 | if (iv0.step == const0_rtx) |
2509 | step = simplify_gen_unary (code: NEG, mode: comp_mode, op: iv1.step, op_mode: comp_mode); |
2510 | else |
2511 | step = iv0.step; |
2512 | step = lowpart_subreg (outermode: mode, op: step, innermode: comp_mode); |
2513 | delta = simplify_gen_binary (code: MINUS, mode: comp_mode, op0: iv1.base, op1: iv0.base); |
2514 | delta = lowpart_subreg (outermode: mode, op: delta, innermode: comp_mode); |
2515 | delta = simplify_gen_binary (code: UMOD, mode, op0: delta, op1: step); |
2516 | may_xform = const0_rtx; |
2517 | may_not_xform = const_true_rtx; |
2518 | |
2519 | if (CONST_INT_P (delta)) |
2520 | { |
2521 | if (was_sharp && INTVAL (delta) == INTVAL (step) - 1) |
2522 | { |
2523 | /* A special case. We have transformed condition of type |
2524 | for (i = 0; i < 4; i += 4) |
2525 | into |
2526 | for (i = 0; i <= 3; i += 4) |
2527 | obviously if the test for overflow during that transformation |
2528 | passed, we cannot overflow here. Most importantly any |
2529 | loop with sharp end condition and step 1 falls into this |
2530 | category, so handling this case specially is definitely |
2531 | worth the troubles. */ |
2532 | may_xform = const_true_rtx; |
2533 | } |
2534 | else if (iv0.step == const0_rtx) |
2535 | { |
2536 | bound = simplify_gen_binary (code: PLUS, mode: comp_mode, op0: mmin, op1: step); |
2537 | bound = simplify_gen_binary (code: MINUS, mode: comp_mode, op0: bound, op1: delta); |
2538 | bound = lowpart_subreg (outermode: mode, op: bound, innermode: comp_mode); |
2539 | tmp = lowpart_subreg (outermode: mode, op: iv0.base, innermode: comp_mode); |
2540 | may_xform = simplify_gen_relational (code: cond, SImode, op_mode: mode, |
2541 | op0: bound, op1: tmp); |
2542 | may_not_xform = simplify_gen_relational (code: reverse_condition (cond), |
2543 | SImode, op_mode: mode, |
2544 | op0: bound, op1: tmp); |
2545 | } |
2546 | else |
2547 | { |
2548 | bound = simplify_gen_binary (code: MINUS, mode: comp_mode, op0: mmax, op1: step); |
2549 | bound = simplify_gen_binary (code: PLUS, mode: comp_mode, op0: bound, op1: delta); |
2550 | bound = lowpart_subreg (outermode: mode, op: bound, innermode: comp_mode); |
2551 | tmp = lowpart_subreg (outermode: mode, op: iv1.base, innermode: comp_mode); |
2552 | may_xform = simplify_gen_relational (code: cond, SImode, op_mode: mode, |
2553 | op0: tmp, op1: bound); |
2554 | may_not_xform = simplify_gen_relational (code: reverse_condition (cond), |
2555 | SImode, op_mode: mode, |
2556 | op0: tmp, op1: bound); |
2557 | } |
2558 | } |
2559 | |
2560 | if (may_xform != const0_rtx) |
2561 | { |
2562 | /* We perform the transformation always provided that it is not |
2563 | completely senseless. This is OK, as we would need this assumption |
2564 | to determine the number of iterations anyway. */ |
2565 | if (may_xform != const_true_rtx) |
2566 | { |
2567 | /* If the step is a power of two and the final value we have |
2568 | computed overflows, the cycle is infinite. Otherwise it |
2569 | is nontrivial to compute the number of iterations. */ |
2570 | if (step_is_pow2) |
2571 | desc->infinite = alloc_EXPR_LIST (0, may_not_xform, |
2572 | desc->infinite); |
2573 | else |
2574 | desc->assumptions = alloc_EXPR_LIST (0, may_xform, |
2575 | desc->assumptions); |
2576 | } |
2577 | |
2578 | /* We are going to lose some information about upper bound on |
2579 | number of iterations in this step, so record the information |
2580 | here. */ |
2581 | inc = INTVAL (iv0.step) - INTVAL (iv1.step); |
2582 | if (CONST_INT_P (iv1.base)) |
2583 | up = INTVAL (iv1.base); |
2584 | else |
2585 | up = INTVAL (mode_mmax) - inc; |
2586 | down = INTVAL (CONST_INT_P (iv0.base) |
2587 | ? iv0.base |
2588 | : mode_mmin); |
2589 | max = (up - down) / inc + 1; |
2590 | if (!desc->infinite |
2591 | && !desc->assumptions) |
2592 | record_niter_bound (loop, max, false, true); |
2593 | |
2594 | if (iv0.step == const0_rtx) |
2595 | { |
2596 | iv0.base = simplify_gen_binary (code: PLUS, mode: comp_mode, op0: iv0.base, op1: delta); |
2597 | iv0.base = simplify_gen_binary (code: MINUS, mode: comp_mode, op0: iv0.base, op1: step); |
2598 | } |
2599 | else |
2600 | { |
2601 | iv1.base = simplify_gen_binary (code: MINUS, mode: comp_mode, op0: iv1.base, op1: delta); |
2602 | iv1.base = simplify_gen_binary (code: PLUS, mode: comp_mode, op0: iv1.base, op1: step); |
2603 | } |
2604 | |
2605 | tmp0 = lowpart_subreg (outermode: mode, op: iv0.base, innermode: comp_mode); |
2606 | tmp1 = lowpart_subreg (outermode: mode, op: iv1.base, innermode: comp_mode); |
2607 | assumption = simplify_gen_relational (code: reverse_condition (cond), |
2608 | SImode, op_mode: mode, op0: tmp0, op1: tmp1); |
2609 | if (assumption == const_true_rtx) |
2610 | goto zero_iter_simplify; |
2611 | else if (assumption != const0_rtx) |
2612 | desc->noloop_assumptions = |
2613 | alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); |
2614 | cond = NE; |
2615 | } |
2616 | } |
2617 | |
2618 | /* Count the number of iterations. */ |
2619 | if (cond == NE) |
2620 | { |
2621 | /* Everything we do here is just arithmetics modulo size of mode. This |
2622 | makes us able to do more involved computations of number of iterations |
2623 | than in other cases. First transform the condition into shape |
2624 | s * i <> c, with s positive. */ |
2625 | iv1.base = simplify_gen_binary (code: MINUS, mode: comp_mode, op0: iv1.base, op1: iv0.base); |
2626 | iv0.base = const0_rtx; |
2627 | iv0.step = simplify_gen_binary (code: MINUS, mode: comp_mode, op0: iv0.step, op1: iv1.step); |
2628 | iv1.step = const0_rtx; |
2629 | if (INTVAL (iv0.step) < 0) |
2630 | { |
2631 | iv0.step = simplify_gen_unary (code: NEG, mode: comp_mode, op: iv0.step, op_mode: comp_mode); |
2632 | iv1.base = simplify_gen_unary (code: NEG, mode: comp_mode, op: iv1.base, op_mode: comp_mode); |
2633 | } |
2634 | iv0.step = lowpart_subreg (outermode: mode, op: iv0.step, innermode: comp_mode); |
2635 | |
2636 | /* Let nsd (s, size of mode) = d. If d does not divide c, the loop |
2637 | is infinite. Otherwise, the number of iterations is |
2638 | (inverse(s/d) * (c/d)) mod (size of mode/d). */ |
2639 | s = INTVAL (iv0.step); d = 1; |
2640 | while (s % 2 != 1) |
2641 | { |
2642 | s /= 2; |
2643 | d *= 2; |
2644 | size--; |
2645 | } |
2646 | bound = gen_int_mode (((uint64_t) 1 << (size - 1) << 1) - 1, mode); |
2647 | |
2648 | tmp1 = lowpart_subreg (outermode: mode, op: iv1.base, innermode: comp_mode); |
2649 | tmp = simplify_gen_binary (code: UMOD, mode, op0: tmp1, op1: gen_int_mode (d, mode)); |
2650 | assumption = simplify_gen_relational (code: NE, SImode, op_mode: mode, op0: tmp, const0_rtx); |
2651 | desc->infinite = alloc_EXPR_LIST (0, assumption, desc->infinite); |
2652 | |
2653 | tmp = simplify_gen_binary (code: UDIV, mode, op0: tmp1, op1: gen_int_mode (d, mode)); |
2654 | inv = inverse (x: s, mod: size); |
2655 | tmp = simplify_gen_binary (code: MULT, mode, op0: tmp, op1: gen_int_mode (inv, mode)); |
2656 | desc->niter_expr = simplify_gen_binary (code: AND, mode, op0: tmp, op1: bound); |
2657 | } |
2658 | else |
2659 | { |
2660 | if (iv1.step == const0_rtx) |
2661 | /* Condition in shape a + s * i <= b |
2662 | We must know that b + s does not overflow and a <= b + s and then we |
2663 | can compute number of iterations as (b + s - a) / s. (It might |
2664 | seem that we in fact could be more clever about testing the b + s |
2665 | overflow condition using some information about b - a mod s, |
2666 | but it was already taken into account during LE -> NE transform). */ |
2667 | { |
2668 | step = iv0.step; |
2669 | tmp0 = lowpart_subreg (outermode: mode, op: iv0.base, innermode: comp_mode); |
2670 | tmp1 = lowpart_subreg (outermode: mode, op: iv1.base, innermode: comp_mode); |
2671 | |
2672 | bound = simplify_gen_binary (code: MINUS, mode, op0: mode_mmax, |
2673 | op1: lowpart_subreg (outermode: mode, op: step, |
2674 | innermode: comp_mode)); |
2675 | if (step_is_pow2) |
2676 | { |
2677 | rtx t0, t1; |
2678 | |
2679 | /* If s is power of 2, we know that the loop is infinite if |
2680 | a % s <= b % s and b + s overflows. */ |
2681 | assumption = simplify_gen_relational (code: reverse_condition (cond), |
2682 | SImode, op_mode: mode, |
2683 | op0: tmp1, op1: bound); |
2684 | |
2685 | t0 = simplify_gen_binary (code: UMOD, mode, op0: copy_rtx (tmp0), op1: step); |
2686 | t1 = simplify_gen_binary (code: UMOD, mode, op0: copy_rtx (tmp1), op1: step); |
2687 | tmp = simplify_gen_relational (code: cond, SImode, op_mode: mode, op0: t0, op1: t1); |
2688 | assumption = simplify_gen_binary (code: AND, SImode, op0: assumption, op1: tmp); |
2689 | desc->infinite = |
2690 | alloc_EXPR_LIST (0, assumption, desc->infinite); |
2691 | } |
2692 | else |
2693 | { |
2694 | assumption = simplify_gen_relational (code: cond, SImode, op_mode: mode, |
2695 | op0: tmp1, op1: bound); |
2696 | desc->assumptions = |
2697 | alloc_EXPR_LIST (0, assumption, desc->assumptions); |
2698 | } |
2699 | |
2700 | tmp = simplify_gen_binary (code: PLUS, mode: comp_mode, op0: iv1.base, op1: iv0.step); |
2701 | tmp = lowpart_subreg (outermode: mode, op: tmp, innermode: comp_mode); |
2702 | assumption = simplify_gen_relational (code: reverse_condition (cond), |
2703 | SImode, op_mode: mode, op0: tmp0, op1: tmp); |
2704 | |
2705 | delta = simplify_gen_binary (code: PLUS, mode, op0: tmp1, op1: step); |
2706 | delta = simplify_gen_binary (code: MINUS, mode, op0: delta, op1: tmp0); |
2707 | } |
2708 | else |
2709 | { |
2710 | /* Condition in shape a <= b - s * i |
2711 | We must know that a - s does not overflow and a - s <= b and then |
2712 | we can again compute number of iterations as (b - (a - s)) / s. */ |
2713 | step = simplify_gen_unary (code: NEG, mode, op: iv1.step, op_mode: mode); |
2714 | tmp0 = lowpart_subreg (outermode: mode, op: iv0.base, innermode: comp_mode); |
2715 | tmp1 = lowpart_subreg (outermode: mode, op: iv1.base, innermode: comp_mode); |
2716 | |
2717 | bound = simplify_gen_binary (code: PLUS, mode, op0: mode_mmin, |
2718 | op1: lowpart_subreg (outermode: mode, op: step, innermode: comp_mode)); |
2719 | if (step_is_pow2) |
2720 | { |
2721 | rtx t0, t1; |
2722 | |
2723 | /* If s is power of 2, we know that the loop is infinite if |
2724 | a % s <= b % s and a - s overflows. */ |
2725 | assumption = simplify_gen_relational (code: reverse_condition (cond), |
2726 | SImode, op_mode: mode, |
2727 | op0: bound, op1: tmp0); |
2728 | |
2729 | t0 = simplify_gen_binary (code: UMOD, mode, op0: copy_rtx (tmp0), op1: step); |
2730 | t1 = simplify_gen_binary (code: UMOD, mode, op0: copy_rtx (tmp1), op1: step); |
2731 | tmp = simplify_gen_relational (code: cond, SImode, op_mode: mode, op0: t0, op1: t1); |
2732 | assumption = simplify_gen_binary (code: AND, SImode, op0: assumption, op1: tmp); |
2733 | desc->infinite = |
2734 | alloc_EXPR_LIST (0, assumption, desc->infinite); |
2735 | } |
2736 | else |
2737 | { |
2738 | assumption = simplify_gen_relational (code: cond, SImode, op_mode: mode, |
2739 | op0: bound, op1: tmp0); |
2740 | desc->assumptions = |
2741 | alloc_EXPR_LIST (0, assumption, desc->assumptions); |
2742 | } |
2743 | |
2744 | tmp = simplify_gen_binary (code: PLUS, mode: comp_mode, op0: iv0.base, op1: iv1.step); |
2745 | tmp = lowpart_subreg (outermode: mode, op: tmp, innermode: comp_mode); |
2746 | assumption = simplify_gen_relational (code: reverse_condition (cond), |
2747 | SImode, op_mode: mode, |
2748 | op0: tmp, op1: tmp1); |
2749 | delta = simplify_gen_binary (code: MINUS, mode, op0: tmp0, op1: step); |
2750 | delta = simplify_gen_binary (code: MINUS, mode, op0: tmp1, op1: delta); |
2751 | } |
2752 | if (assumption == const_true_rtx) |
2753 | goto zero_iter_simplify; |
2754 | else if (assumption != const0_rtx) |
2755 | desc->noloop_assumptions = |
2756 | alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); |
2757 | delta = simplify_gen_binary (code: UDIV, mode, op0: delta, op1: step); |
2758 | desc->niter_expr = delta; |
2759 | } |
2760 | |
2761 | old_niter = desc->niter_expr; |
2762 | |
2763 | simplify_using_initial_values (loop, op: AND, expr: &desc->assumptions); |
2764 | if (desc->assumptions |
2765 | && XEXP (desc->assumptions, 0) == const0_rtx) |
2766 | goto fail; |
2767 | simplify_using_initial_values (loop, op: IOR, expr: &desc->noloop_assumptions); |
2768 | simplify_using_initial_values (loop, op: IOR, expr: &desc->infinite); |
2769 | simplify_using_initial_values (loop, op: UNKNOWN, expr: &desc->niter_expr); |
2770 | |
2771 | /* Rerun the simplification. Consider code (created by copying loop headers) |
2772 | |
2773 | i = 0; |
2774 | |
2775 | if (0 < n) |
2776 | { |
2777 | do |
2778 | { |
2779 | i++; |
2780 | } while (i < n); |
2781 | } |
2782 | |
2783 | The first pass determines that i = 0, the second pass uses it to eliminate |
2784 | noloop assumption. */ |
2785 | |
2786 | simplify_using_initial_values (loop, op: AND, expr: &desc->assumptions); |
2787 | if (desc->assumptions |
2788 | && XEXP (desc->assumptions, 0) == const0_rtx) |
2789 | goto fail; |
2790 | simplify_using_initial_values (loop, op: IOR, expr: &desc->noloop_assumptions); |
2791 | simplify_using_initial_values (loop, op: IOR, expr: &desc->infinite); |
2792 | simplify_using_initial_values (loop, op: UNKNOWN, expr: &desc->niter_expr); |
2793 | |
2794 | if (desc->noloop_assumptions |
2795 | && XEXP (desc->noloop_assumptions, 0) == const_true_rtx) |
2796 | goto zero_iter; |
2797 | |
2798 | if (CONST_INT_P (desc->niter_expr)) |
2799 | { |
2800 | uint64_t val = INTVAL (desc->niter_expr); |
2801 | |
2802 | desc->const_iter = true; |
2803 | desc->niter = val & GET_MODE_MASK (desc->mode); |
2804 | if (!desc->infinite |
2805 | && !desc->assumptions) |
2806 | record_niter_bound (loop, desc->niter, false, true); |
2807 | } |
2808 | else |
2809 | { |
2810 | max = determine_max_iter (loop, desc, old_niter); |
2811 | if (!max) |
2812 | goto zero_iter_simplify; |
2813 | if (!desc->infinite |
2814 | && !desc->assumptions) |
2815 | record_niter_bound (loop, max, false, true); |
2816 | |
2817 | /* simplify_using_initial_values does a copy propagation on the registers |
2818 | in the expression for the number of iterations. This prolongs life |
2819 | ranges of registers and increases register pressure, and usually |
2820 | brings no gain (and if it happens to do, the cse pass will take care |
2821 | of it anyway). So prevent this behavior, unless it enabled us to |
2822 | derive that the number of iterations is a constant. */ |
2823 | desc->niter_expr = old_niter; |
2824 | } |
2825 | |
2826 | return; |
2827 | |
2828 | zero_iter_simplify: |
2829 | /* Simplify the assumptions. */ |
2830 | simplify_using_initial_values (loop, op: AND, expr: &desc->assumptions); |
2831 | if (desc->assumptions |
2832 | && XEXP (desc->assumptions, 0) == const0_rtx) |
2833 | goto fail; |
2834 | simplify_using_initial_values (loop, op: IOR, expr: &desc->infinite); |
2835 | |
2836 | /* Fallthru. */ |
2837 | zero_iter: |
2838 | desc->const_iter = true; |
2839 | desc->niter = 0; |
2840 | record_niter_bound (loop, 0, true, true); |
2841 | desc->noloop_assumptions = NULL_RTX; |
2842 | desc->niter_expr = const0_rtx; |
2843 | return; |
2844 | |
2845 | fail: |
2846 | desc->simple_p = false; |
2847 | return; |
2848 | } |
2849 | |
2850 | /* Checks whether E is a simple exit from LOOP and stores its description |
2851 | into DESC. */ |
2852 | |
2853 | static void |
2854 | check_simple_exit (class loop *loop, edge e, class niter_desc *desc) |
2855 | { |
2856 | basic_block exit_bb; |
2857 | rtx condition; |
2858 | rtx_insn *at; |
2859 | edge ein; |
2860 | |
2861 | exit_bb = e->src; |
2862 | desc->simple_p = false; |
2863 | |
2864 | /* It must belong directly to the loop. */ |
2865 | if (exit_bb->loop_father != loop) |
2866 | return; |
2867 | |
2868 | /* It must be tested (at least) once during any iteration. */ |
2869 | if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit_bb)) |
2870 | return; |
2871 | |
2872 | /* It must end in a simple conditional jump. */ |
2873 | if (!any_condjump_p (BB_END (exit_bb)) || !onlyjump_p (BB_END (exit_bb))) |
2874 | return; |
2875 | |
2876 | ein = EDGE_SUCC (exit_bb, 0); |
2877 | if (ein == e) |
2878 | ein = EDGE_SUCC (exit_bb, 1); |
2879 | |
2880 | desc->out_edge = e; |
2881 | desc->in_edge = ein; |
2882 | |
2883 | /* Test whether the condition is suitable. */ |
2884 | if (!(condition = get_condition (BB_END (ein->src), &at, false, false))) |
2885 | return; |
2886 | |
2887 | if (ein->flags & EDGE_FALLTHRU) |
2888 | { |
2889 | condition = reversed_condition (cond: condition); |
2890 | if (!condition) |
2891 | return; |
2892 | } |
2893 | |
2894 | /* Check that we are able to determine number of iterations and fill |
2895 | in information about it. */ |
2896 | iv_number_of_iterations (loop, insn: at, condition, desc); |
2897 | } |
2898 | |
2899 | /* Finds a simple exit of LOOP and stores its description into DESC. */ |
2900 | |
2901 | static void |
2902 | find_simple_exit (class loop *loop, class niter_desc *desc) |
2903 | { |
2904 | unsigned i; |
2905 | basic_block *body; |
2906 | edge e; |
2907 | class niter_desc act; |
2908 | bool any = false; |
2909 | edge_iterator ei; |
2910 | |
2911 | desc->simple_p = false; |
2912 | body = get_loop_body (loop); |
2913 | |
2914 | for (i = 0; i < loop->num_nodes; i++) |
2915 | { |
2916 | FOR_EACH_EDGE (e, ei, body[i]->succs) |
2917 | { |
2918 | if (flow_bb_inside_loop_p (loop, e->dest)) |
2919 | continue; |
2920 | |
2921 | check_simple_exit (loop, e, desc: &act); |
2922 | if (!act.simple_p) |
2923 | continue; |
2924 | |
2925 | if (!any) |
2926 | any = true; |
2927 | else |
2928 | { |
2929 | /* Prefer constant iterations; the less the better. */ |
2930 | if (!act.const_iter |
2931 | || (desc->const_iter && act.niter >= desc->niter)) |
2932 | continue; |
2933 | |
2934 | /* Also if the actual exit may be infinite, while the old one |
2935 | not, prefer the old one. */ |
2936 | if (act.infinite && !desc->infinite) |
2937 | continue; |
2938 | } |
2939 | |
2940 | *desc = act; |
2941 | } |
2942 | } |
2943 | |
2944 | if (dump_file) |
2945 | { |
2946 | if (desc->simple_p) |
2947 | { |
2948 | fprintf (stream: dump_file, format: "Loop %d is simple:\n" , loop->num); |
2949 | fprintf (stream: dump_file, format: " simple exit %d -> %d\n" , |
2950 | desc->out_edge->src->index, |
2951 | desc->out_edge->dest->index); |
2952 | if (desc->assumptions) |
2953 | { |
2954 | fprintf (stream: dump_file, format: " assumptions: " ); |
2955 | print_rtl (dump_file, desc->assumptions); |
2956 | fprintf (stream: dump_file, format: "\n" ); |
2957 | } |
2958 | if (desc->noloop_assumptions) |
2959 | { |
2960 | fprintf (stream: dump_file, format: " does not roll if: " ); |
2961 | print_rtl (dump_file, desc->noloop_assumptions); |
2962 | fprintf (stream: dump_file, format: "\n" ); |
2963 | } |
2964 | if (desc->infinite) |
2965 | { |
2966 | fprintf (stream: dump_file, format: " infinite if: " ); |
2967 | print_rtl (dump_file, desc->infinite); |
2968 | fprintf (stream: dump_file, format: "\n" ); |
2969 | } |
2970 | |
2971 | fprintf (stream: dump_file, format: " number of iterations: " ); |
2972 | print_rtl (dump_file, desc->niter_expr); |
2973 | fprintf (stream: dump_file, format: "\n" ); |
2974 | |
2975 | fprintf (stream: dump_file, format: " upper bound: %li\n" , |
2976 | (long)get_max_loop_iterations_int (loop)); |
2977 | fprintf (stream: dump_file, format: " likely upper bound: %li\n" , |
2978 | (long)get_likely_max_loop_iterations_int (loop)); |
2979 | fprintf (stream: dump_file, format: " realistic bound: %li\n" , |
2980 | (long)get_estimated_loop_iterations_int (loop)); |
2981 | } |
2982 | else |
2983 | fprintf (stream: dump_file, format: "Loop %d is not simple.\n" , loop->num); |
2984 | } |
2985 | |
2986 | /* Fix up the finiteness if possible. We can only do it for single exit, |
2987 | since the loop is finite, but it's possible that we predicate one loop |
2988 | exit to be finite which can not be determined as finite in middle-end as |
2989 | well. It results in incorrect predicate information on the exit condition |
2990 | expression. For example, if says [(int) _1 + -8, + , -8] != 0 finite, |
2991 | it means _1 can exactly divide -8. */ |
2992 | if (desc->infinite && single_exit (loop) && finite_loop_p (loop)) |
2993 | { |
2994 | desc->infinite = NULL_RTX; |
2995 | if (dump_file) |
2996 | fprintf (stream: dump_file, format: " infinite updated to finite.\n" ); |
2997 | } |
2998 | |
2999 | free (ptr: body); |
3000 | } |
3001 | |
3002 | /* Creates a simple loop description of LOOP if it was not computed |
3003 | already. */ |
3004 | |
3005 | class niter_desc * |
3006 | get_simple_loop_desc (class loop *loop) |
3007 | { |
3008 | class niter_desc *desc = simple_loop_desc (loop); |
3009 | |
3010 | if (desc) |
3011 | return desc; |
3012 | |
3013 | /* At least desc->infinite is not always initialized by |
3014 | find_simple_loop_exit. */ |
3015 | desc = ggc_cleared_alloc<niter_desc> (); |
3016 | iv_analysis_loop_init (loop); |
3017 | find_simple_exit (loop, desc); |
3018 | loop->simple_loop_desc = desc; |
3019 | return desc; |
3020 | } |
3021 | |
3022 | /* Releases simple loop description for LOOP. */ |
3023 | |
3024 | void |
3025 | free_simple_loop_desc (class loop *loop) |
3026 | { |
3027 | class niter_desc *desc = simple_loop_desc (loop); |
3028 | |
3029 | if (!desc) |
3030 | return; |
3031 | |
3032 | ggc_free (desc); |
3033 | loop->simple_loop_desc = NULL; |
3034 | } |
3035 | |