1/* LRA (local register allocator) driver and LRA utilities.
2 Copyright (C) 2010-2025 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21
22/* The Local Register Allocator (LRA) is a replacement of former
23 reload pass. It is focused to simplify code solving the reload
24 pass tasks, to make the code maintenance easier, and to implement new
25 perspective optimizations.
26
27 The major LRA design solutions are:
28 o division small manageable, separated sub-tasks
29 o reflection of all transformations and decisions in RTL as more
30 as possible
31 o insn constraints as a primary source of the info (minimizing
32 number of target-depended macros/hooks)
33
34 In brief LRA works by iterative insn process with the final goal is
35 to satisfy all insn and address constraints:
36 o New reload insns (in brief reloads) and reload pseudos might be
37 generated;
38 o Some pseudos might be spilled to assign hard registers to
39 new reload pseudos;
40 o Recalculating spilled pseudo values (rematerialization);
41 o Changing spilled pseudos to stack memory or their equivalences;
42 o Allocation stack memory changes the address displacement and
43 new iteration is needed.
44
45 Here is block diagram of LRA passes:
46
47 ------------------------
48 --------------- | Undo inheritance for | ---------------
49 | Memory-memory | | spilled pseudos, | | New (and old) |
50 | move coalesce |<---| splits for pseudos got |<-- | pseudos |
51 --------------- | the same hard regs, | | assignment |
52 Start | | and optional reloads | ---------------
53 | | ------------------------ ^
54 V | ---------------- |
55 ----------- V | Update virtual | |
56| Remove |----> ------------>| register | |
57| scratches | ^ | displacements | |
58 ----------- | ---------------- |
59 | | |
60 | V New |
61 | ------------ pseudos -------------------
62 | |Constraints:| or insns | Inheritance/split |
63 | | RTL |--------->| transformations |
64 | | transfor- | | in EBB scope |
65 | substi- | mations | -------------------
66 | tutions ------------
67 | | No change
68 ---------------- V
69 | Spilled pseudo | -------------------
70 | to memory |<----| Rematerialization |
71 | substitution | -------------------
72 ----------------
73 | No susbtitions
74 V
75 -------------------------
76 | Hard regs substitution, |
77 | devirtalization, and |------> Finish
78 | restoring scratches got |
79 | memory |
80 -------------------------
81
82 To speed up the process:
83 o We process only insns affected by changes on previous
84 iterations;
85 o We don't use DFA-infrastructure because it results in much slower
86 compiler speed than a special IR described below does;
87 o We use a special insn representation for quick access to insn
88 info which is always *synchronized* with the current RTL;
89 o Insn IR is minimized by memory. It is divided on three parts:
90 o one specific for each insn in RTL (only operand locations);
91 o one common for all insns in RTL with the same insn code
92 (different operand attributes from machine descriptions);
93 o one oriented for maintenance of live info (list of pseudos).
94 o Pseudo data:
95 o all insns where the pseudo is referenced;
96 o live info (conflicting hard regs, live ranges, # of
97 references etc);
98 o data used for assigning (preferred hard regs, costs etc).
99
100 This file contains LRA driver, LRA utility functions and data, and
101 code for dealing with scratches. */
102
103#include "config.h"
104#include "system.h"
105#include "coretypes.h"
106#include "backend.h"
107#include "target.h"
108#include "rtl.h"
109#include "rtl-error.h"
110#include "tree.h"
111#include "predict.h"
112#include "df.h"
113#include "memmodel.h"
114#include "tm_p.h"
115#include "optabs.h"
116#include "regs.h"
117#include "ira.h"
118#include "recog.h"
119#include "expr.h"
120#include "cfgrtl.h"
121#include "cfgbuild.h"
122#include "lra.h"
123#include "lra-int.h"
124#include "print-rtl.h"
125#include "function-abi.h"
126
127/* Dump bitmap SET with TITLE and BB INDEX. */
128void
129lra_dump_bitmap_with_title (const char *title, bitmap set, int index)
130{
131 unsigned int i;
132 int count;
133 bitmap_iterator bi;
134 static const int max_nums_on_line = 10;
135
136 if (bitmap_empty_p (map: set))
137 return;
138 fprintf (stream: lra_dump_file, format: " %s %d:", title, index);
139 fprintf (stream: lra_dump_file, format: "\n");
140 count = max_nums_on_line + 1;
141 EXECUTE_IF_SET_IN_BITMAP (set, 0, i, bi)
142 {
143 if (count > max_nums_on_line)
144 {
145 fprintf (stream: lra_dump_file, format: "\n ");
146 count = 0;
147 }
148 fprintf (stream: lra_dump_file, format: " %4u", i);
149 count++;
150 }
151 fprintf (stream: lra_dump_file, format: "\n");
152}
153
154/* Hard registers currently not available for allocation. It can
155 changed after some hard registers become not eliminable. */
156HARD_REG_SET lra_no_alloc_regs;
157
158static int get_new_reg_value (void);
159static void expand_reg_info (void);
160static void invalidate_insn_recog_data (int);
161static int get_insn_freq (rtx_insn *);
162static void invalidate_insn_data_regno_info (lra_insn_recog_data_t,
163 rtx_insn *, int);
164/* Expand all regno related info needed for LRA. */
165static void
166expand_reg_data (int old)
167{
168 resize_reg_info ();
169 expand_reg_info ();
170 ira_expand_reg_equiv ();
171 for (int i = (int) max_reg_num () - 1; i >= old; i--)
172 lra_change_class (regno: i, new_class: ALL_REGS, title: " Set", nl_p: true);
173}
174
175/* Create and return a new reg of ORIGINAL mode. If ORIGINAL is NULL
176 or of VOIDmode, use MD_MODE for the new reg. Initialize its
177 register class to RCLASS. Print message about assigning class
178 RCLASS containing new register name TITLE unless it is NULL. Use
179 attributes of ORIGINAL if it is a register. The created register
180 will have unique held value. */
181rtx
182lra_create_new_reg_with_unique_value (machine_mode md_mode, rtx original,
183 enum reg_class rclass,
184 HARD_REG_SET *exclude_start_hard_regs,
185 const char *title)
186{
187 machine_mode mode;
188 rtx new_reg;
189
190 if (original == NULL_RTX || (mode = GET_MODE (original)) == VOIDmode)
191 mode = md_mode;
192 lra_assert (mode != VOIDmode);
193 new_reg = gen_reg_rtx (mode);
194 if (original == NULL_RTX || ! REG_P (original))
195 {
196 if (lra_dump_file != NULL)
197 fprintf (stream: lra_dump_file, format: " Creating newreg=%i", REGNO (new_reg));
198 }
199 else
200 {
201 if (ORIGINAL_REGNO (original) >= FIRST_PSEUDO_REGISTER)
202 ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original);
203 REG_USERVAR_P (new_reg) = REG_USERVAR_P (original);
204 REG_POINTER (new_reg) = REG_POINTER (original);
205 REG_ATTRS (new_reg) = REG_ATTRS (original);
206 if (lra_dump_file != NULL)
207 fprintf (stream: lra_dump_file, format: " Creating newreg=%i from oldreg=%i",
208 REGNO (new_reg), REGNO (original));
209 }
210 if (lra_dump_file != NULL)
211 {
212 if (title != NULL)
213 fprintf (stream: lra_dump_file, format: ", assigning class %s to%s%s r%d",
214 reg_class_names[rclass], *title == '\0' ? "" : " ",
215 title, REGNO (new_reg));
216 fprintf (stream: lra_dump_file, format: "\n");
217 }
218 expand_reg_data (old: max_reg_num ());
219 setup_reg_classes (REGNO (new_reg), rclass, NO_REGS, rclass);
220 if (exclude_start_hard_regs != NULL)
221 lra_reg_info[REGNO (new_reg)].exclude_start_hard_regs
222 = *exclude_start_hard_regs;
223 return new_reg;
224}
225
226/* Analogous to the previous function but also inherits value of
227 ORIGINAL. */
228rtx
229lra_create_new_reg (machine_mode md_mode, rtx original, enum reg_class rclass,
230 HARD_REG_SET *exclude_start_hard_regs, const char *title)
231{
232 rtx new_reg;
233
234 new_reg
235 = lra_create_new_reg_with_unique_value (md_mode, original, rclass,
236 exclude_start_hard_regs, title);
237 if (original != NULL_RTX && REG_P (original))
238 lra_assign_reg_val (REGNO (original), REGNO (new_reg));
239 return new_reg;
240}
241
242/* Set up for REGNO unique hold value. */
243void
244lra_set_regno_unique_value (int regno)
245{
246 lra_reg_info[regno].val = get_new_reg_value ();
247}
248
249/* Invalidate INSN related info used by LRA. The info should never be
250 used after that. */
251void
252lra_invalidate_insn_data (rtx_insn *insn)
253{
254 lra_invalidate_insn_regno_info (insn);
255 invalidate_insn_recog_data (INSN_UID (insn));
256}
257
258/* Mark INSN deleted and invalidate the insn related info used by
259 LRA. */
260void
261lra_set_insn_deleted (rtx_insn *insn)
262{
263 lra_invalidate_insn_data (insn);
264 SET_INSN_DELETED (insn);
265}
266
267/* Delete an unneeded INSN and any previous insns who sole purpose is
268 loading data that is dead in INSN. */
269void
270lra_delete_dead_insn (rtx_insn *insn)
271{
272 rtx_insn *prev = prev_real_insn (insn);
273 rtx prev_dest;
274
275 /* If the previous insn sets a register that dies in our insn,
276 delete it too. */
277 if (prev && GET_CODE (PATTERN (prev)) == SET
278 && (prev_dest = SET_DEST (PATTERN (prev)), REG_P (prev_dest))
279 && reg_mentioned_p (prev_dest, PATTERN (insn))
280 && find_regno_note (insn, REG_DEAD, REGNO (prev_dest))
281 && ! side_effects_p (SET_SRC (PATTERN (prev))))
282 lra_delete_dead_insn (insn: prev);
283
284 lra_set_insn_deleted (insn);
285}
286
287/* Emit insn x = y + z. Return NULL if we failed to do it.
288 Otherwise, return the insn. We don't use gen_add3_insn as it might
289 clobber CC. */
290static rtx_insn *
291emit_add3_insn (rtx x, rtx y, rtx z)
292{
293 rtx_insn *last;
294
295 last = get_last_insn ();
296
297 if (have_addptr3_insn (x, y, z))
298 {
299 rtx_insn *insn = gen_addptr3_insn (x, y, z);
300
301 /* If the target provides an "addptr" pattern it hopefully does
302 for a reason. So falling back to the normal add would be
303 a bug. */
304 lra_assert (insn != NULL_RTX);
305 emit_insn (insn);
306 return insn;
307 }
308
309 rtx_insn *insn = emit_insn (gen_rtx_SET (x, gen_rtx_PLUS (GET_MODE (y),
310 y, z)));
311 if (recog_memoized (insn) < 0)
312 {
313 delete_insns_since (last);
314 insn = NULL;
315 }
316 return insn;
317}
318
319/* Emit insn x = x + y. Return the insn. We use gen_add2_insn as the
320 last resort. */
321static rtx_insn *
322emit_add2_insn (rtx x, rtx y)
323{
324 rtx_insn *insn = emit_add3_insn (x, y: x, z: y);
325 if (insn == NULL_RTX)
326 {
327 insn = gen_add2_insn (x, y);
328 if (insn != NULL_RTX)
329 emit_insn (insn);
330 }
331 return insn;
332}
333
334/* Target checks operands through operand predicates to recognize an
335 insn. We should have a special precaution to generate add insns
336 which are frequent results of elimination.
337
338 Emit insns for x = y + z. X can be used to store intermediate
339 values and should be not in Y and Z when we use X to store an
340 intermediate value. Y + Z should form [base] [+ index[ * scale]] [
341 + disp] where base and index are registers, disp and scale are
342 constants. Y should contain base if it is present, Z should
343 contain disp if any. index[*scale] can be part of Y or Z. */
344void
345lra_emit_add (rtx x, rtx y, rtx z)
346{
347 int old;
348 rtx_insn *last;
349 rtx a1, a2, base, index, disp, scale, index_scale;
350 bool ok_p;
351
352 rtx_insn *add3_insn = emit_add3_insn (x, y, z);
353 old = max_reg_num ();
354 if (add3_insn != NULL)
355 ;
356 else
357 {
358 disp = a2 = NULL_RTX;
359 if (GET_CODE (y) == PLUS)
360 {
361 a1 = XEXP (y, 0);
362 a2 = XEXP (y, 1);
363 disp = z;
364 }
365 else
366 {
367 a1 = y;
368 if (CONSTANT_P (z))
369 disp = z;
370 else
371 a2 = z;
372 }
373 index_scale = scale = NULL_RTX;
374 if (GET_CODE (a1) == MULT)
375 {
376 index_scale = a1;
377 index = XEXP (a1, 0);
378 scale = XEXP (a1, 1);
379 base = a2;
380 }
381 else if (a2 != NULL_RTX && GET_CODE (a2) == MULT)
382 {
383 index_scale = a2;
384 index = XEXP (a2, 0);
385 scale = XEXP (a2, 1);
386 base = a1;
387 }
388 else
389 {
390 base = a1;
391 index = a2;
392 }
393 if ((base != NULL_RTX && ! (REG_P (base) || GET_CODE (base) == SUBREG))
394 || (index != NULL_RTX
395 && ! (REG_P (index) || GET_CODE (index) == SUBREG))
396 || (disp != NULL_RTX && ! CONSTANT_P (disp))
397 || (scale != NULL_RTX && ! CONSTANT_P (scale)))
398 {
399 /* Probably we have no 3 op add. Last chance is to use 2-op
400 add insn. To succeed, don't move Z to X as an address
401 segment always comes in Y. Otherwise, we might fail when
402 adding the address segment to register. */
403 lra_assert (x != y && x != z);
404 emit_move_insn (x, y);
405 rtx_insn *insn = emit_add2_insn (x, y: z);
406 lra_assert (insn != NULL_RTX);
407 }
408 else
409 {
410 if (index_scale == NULL_RTX)
411 index_scale = index;
412 if (disp == NULL_RTX)
413 {
414 /* Generate x = index_scale; x = x + base. */
415 lra_assert (index_scale != NULL_RTX && base != NULL_RTX);
416 emit_move_insn (x, index_scale);
417 rtx_insn *insn = emit_add2_insn (x, y: base);
418 lra_assert (insn != NULL_RTX);
419 }
420 else if (scale == NULL_RTX)
421 {
422 /* Try x = base + disp. */
423 lra_assert (base != NULL_RTX);
424 last = get_last_insn ();
425 rtx_insn *move_insn =
426 emit_move_insn (x, gen_rtx_PLUS (GET_MODE (base), base, disp));
427 if (recog_memoized (insn: move_insn) < 0)
428 {
429 delete_insns_since (last);
430 /* Generate x = disp; x = x + base. */
431 emit_move_insn (x, disp);
432 rtx_insn *add2_insn = emit_add2_insn (x, y: base);
433 lra_assert (add2_insn != NULL_RTX);
434 }
435 /* Generate x = x + index. */
436 if (index != NULL_RTX)
437 {
438 rtx_insn *insn = emit_add2_insn (x, y: index);
439 lra_assert (insn != NULL_RTX);
440 }
441 }
442 else
443 {
444 /* Try x = index_scale; x = x + disp; x = x + base. */
445 last = get_last_insn ();
446 rtx_insn *move_insn = emit_move_insn (x, index_scale);
447 ok_p = false;
448 if (recog_memoized (insn: move_insn) >= 0)
449 {
450 rtx_insn *insn = emit_add2_insn (x, y: disp);
451 if (insn != NULL_RTX)
452 {
453 if (base == NULL_RTX)
454 ok_p = true;
455 else
456 {
457 insn = emit_add2_insn (x, y: base);
458 if (insn != NULL_RTX)
459 ok_p = true;
460 }
461 }
462 }
463 if (! ok_p)
464 {
465 rtx_insn *insn;
466
467 delete_insns_since (last);
468 /* Generate x = disp; x = x + base; x = x + index_scale. */
469 emit_move_insn (x, disp);
470 if (base != NULL_RTX)
471 {
472 insn = emit_add2_insn (x, y: base);
473 lra_assert (insn != NULL_RTX);
474 }
475 insn = emit_add2_insn (x, y: index_scale);
476 lra_assert (insn != NULL_RTX);
477 }
478 }
479 }
480 }
481 /* Functions emit_... can create pseudos -- so expand the pseudo
482 data. */
483 if (old != max_reg_num ())
484 expand_reg_data (old);
485}
486
487/* The number of emitted reload insns so far. */
488int lra_curr_reload_num;
489
490static void remove_insn_scratches (rtx_insn *insn);
491
492/* Emit x := y, processing special case when y = u + v or y = u + v *
493 scale + w through emit_add (Y can be an address which is base +
494 index reg * scale + displacement in general case). X may be used
495 as intermediate result therefore it should be not in Y. */
496void
497lra_emit_move (rtx x, rtx y)
498{
499 int old;
500 rtx_insn *insn;
501
502 if (GET_CODE (y) != PLUS)
503 {
504 if (rtx_equal_p (x, y))
505 return;
506 old = max_reg_num ();
507
508 insn = (GET_CODE (x) != STRICT_LOW_PART
509 ? emit_move_insn (x, y) : emit_insn (gen_rtx_SET (x, y)));
510 /* The move pattern may require scratch registers, so convert them
511 into real registers now. */
512 if (insn != NULL_RTX)
513 remove_insn_scratches (insn);
514 if (REG_P (x))
515 lra_reg_info[ORIGINAL_REGNO (x)].last_reload = ++lra_curr_reload_num;
516 /* Function emit_move can create pseudos -- so expand the pseudo
517 data. */
518 if (old != max_reg_num ())
519 expand_reg_data (old);
520 return;
521 }
522 lra_emit_add (x, XEXP (y, 0), XEXP (y, 1));
523}
524
525/* Update insn operands which are duplication of operands whose
526 numbers are in array of NOPS (with end marker -1). The insn is
527 represented by its LRA internal representation ID. */
528void
529lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
530{
531 int i, j, nop;
532 struct lra_static_insn_data *static_id = id->insn_static_data;
533
534 for (i = 0; i < static_id->n_dups; i++)
535 for (j = 0; (nop = nops[j]) >= 0; j++)
536 if (static_id->dup_num[i] == nop)
537 *id->dup_loc[i] = *id->operand_loc[nop];
538}
539
540/* Report asm insn error and modify the asm insn. */
541void
542lra_asm_insn_error (rtx_insn *insn)
543{
544 lra_asm_error_p = true;
545 error_for_asm (insn,
546 "%<asm%> operand has impossible constraints"
547 " or there are not enough registers");
548 /* Avoid further trouble with this insn. */
549 if (JUMP_P (insn))
550 {
551 ira_nullify_asm_goto (insn);
552 lra_invalidate_insn_data (insn);
553 }
554 else
555 {
556 PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
557 lra_set_insn_deleted (insn);
558 }
559}
560
561
562
563/* This page contains code dealing with info about registers in the
564 insns. */
565
566/* Pools for insn reg info. */
567object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs");
568
569/* Create LRA insn related info about a reference to REGNO in INSN
570 with TYPE (in/out/inout), biggest reference mode MODE, flag that it
571 is reference through subreg (SUBREG_P), and reference to the next
572 insn reg info (NEXT). If REGNO can be early clobbered,
573 alternatives in which it can be early clobbered are given by
574 EARLY_CLOBBER_ALTS. */
575static struct lra_insn_reg *
576new_insn_reg (rtx_insn *insn, int regno, enum op_type type,
577 machine_mode mode, bool subreg_p,
578 alternative_mask early_clobber_alts,
579 struct lra_insn_reg *next)
580{
581 lra_insn_reg *ir = lra_insn_reg_pool.allocate ();
582 ir->type = type;
583 ir->biggest_mode = mode;
584 if (NONDEBUG_INSN_P (insn))
585 lra_update_biggest_mode (regno, mode);
586 ir->subreg_p = subreg_p;
587 ir->early_clobber_alts = early_clobber_alts;
588 ir->regno = regno;
589 ir->next = next;
590 return ir;
591}
592
593/* Free insn reg info list IR. */
594static void
595free_insn_regs (struct lra_insn_reg *ir)
596{
597 struct lra_insn_reg *next_ir;
598
599 for (; ir != NULL; ir = next_ir)
600 {
601 next_ir = ir->next;
602 lra_insn_reg_pool.remove (object: ir);
603 }
604}
605
606/* Finish pool for insn reg info. */
607static void
608finish_insn_regs (void)
609{
610 lra_insn_reg_pool.release ();
611}
612
613
614
615/* This page contains code dealing LRA insn info (or in other words
616 LRA internal insn representation). */
617
618/* Map INSN_CODE -> the static insn data. This info is valid during
619 all translation unit. */
620struct lra_static_insn_data *insn_code_data[NUM_INSN_CODES];
621
622/* Debug insns are represented as a special insn with one input
623 operand which is RTL expression in var_location. */
624
625/* The following data are used as static insn operand data for all
626 debug insns. If structure lra_operand_data is changed, the
627 initializer should be changed too. */
628static struct lra_operand_data debug_operand_data =
629 {
630 NULL, /* alternative */
631 .early_clobber_alts: 0, /* early_clobber_alts */
632 .mode: E_VOIDmode, /* We are not interesting in the operand mode. */
633 .type: OP_IN,
634 .strict_low: 0, .is_operator: 0, .is_address: 0
635 };
636
637/* The following data are used as static insn data for all debug
638 bind insns. If structure lra_static_insn_data is changed, the
639 initializer should be changed too. */
640static struct lra_static_insn_data debug_bind_static_data =
641 {
642 .operand: &debug_operand_data,
643 .dup_num: 0, /* Duplication operands #. */
644 .commutative: -1, /* Commutative operand #. */
645 .n_operands: 1, /* Operands #. There is only one operand which is debug RTL
646 expression. */
647 .n_dups: 0, /* Duplications #. */
648 .n_alternatives: 0, /* Alternatives #. We are not interesting in alternatives
649 because we does not proceed debug_insns for reloads. */
650 NULL, /* Hard registers referenced in machine description. */
651 NULL /* Descriptions of operands in alternatives. */
652 };
653
654/* The following data are used as static insn data for all debug
655 marker insns. If structure lra_static_insn_data is changed, the
656 initializer should be changed too. */
657static struct lra_static_insn_data debug_marker_static_data =
658 {
659 .operand: &debug_operand_data,
660 .dup_num: 0, /* Duplication operands #. */
661 .commutative: -1, /* Commutative operand #. */
662 .n_operands: 0, /* Operands #. There isn't any operand. */
663 .n_dups: 0, /* Duplications #. */
664 .n_alternatives: 0, /* Alternatives #. We are not interesting in alternatives
665 because we does not proceed debug_insns for reloads. */
666 NULL, /* Hard registers referenced in machine description. */
667 NULL /* Descriptions of operands in alternatives. */
668 };
669
670/* Called once per compiler work to initialize some LRA data related
671 to insns. */
672static void
673init_insn_code_data_once (void)
674{
675 memset (s: insn_code_data, c: 0, n: sizeof (insn_code_data));
676}
677
678/* Called once per compiler work to finalize some LRA data related to
679 insns. */
680static void
681finish_insn_code_data_once (void)
682{
683 for (unsigned int i = 0; i < NUM_INSN_CODES; i++)
684 {
685 if (insn_code_data[i] != NULL)
686 {
687 free (ptr: insn_code_data[i]);
688 insn_code_data[i] = NULL;
689 }
690 }
691}
692
693/* Return static insn data, allocate and setup if necessary. Although
694 dup_num is static data (it depends only on icode), to set it up we
695 need to extract insn first. So recog_data should be valid for
696 normal insn (ICODE >= 0) before the call. */
697static struct lra_static_insn_data *
698get_static_insn_data (int icode, int nop, int ndup, int nalt)
699{
700 struct lra_static_insn_data *data;
701 size_t n_bytes;
702
703 lra_assert (icode < (int) NUM_INSN_CODES);
704 if (icode >= 0 && (data = insn_code_data[icode]) != NULL)
705 return data;
706 lra_assert (nop >= 0 && ndup >= 0 && nalt >= 0);
707 n_bytes = sizeof (struct lra_static_insn_data)
708 + sizeof (struct lra_operand_data) * nop
709 + sizeof (int) * ndup;
710 data = XNEWVAR (struct lra_static_insn_data, n_bytes);
711 data->operand_alternative = NULL;
712 data->n_operands = nop;
713 data->n_dups = ndup;
714 data->n_alternatives = nalt;
715 data->operand = ((struct lra_operand_data *)
716 ((char *) data + sizeof (struct lra_static_insn_data)));
717 data->dup_num = ((int *) ((char *) data->operand
718 + sizeof (struct lra_operand_data) * nop));
719 if (icode >= 0)
720 {
721 int i;
722
723 insn_code_data[icode] = data;
724 for (i = 0; i < nop; i++)
725 {
726 data->operand[i].constraint
727 = insn_data[icode].operand[i].constraint;
728 data->operand[i].mode = insn_data[icode].operand[i].mode;
729 data->operand[i].strict_low = insn_data[icode].operand[i].strict_low;
730 data->operand[i].is_operator
731 = insn_data[icode].operand[i].is_operator;
732 data->operand[i].type
733 = (data->operand[i].constraint[0] == '=' ? OP_OUT
734 : data->operand[i].constraint[0] == '+' ? OP_INOUT
735 : OP_IN);
736 data->operand[i].is_address = false;
737 }
738 for (i = 0; i < ndup; i++)
739 data->dup_num[i] = recog_data.dup_num[i];
740 }
741 return data;
742}
743
744/* The current length of the following array. */
745int lra_insn_recog_data_len;
746
747/* Map INSN_UID -> the insn recog data (NULL if unknown). */
748lra_insn_recog_data_t *lra_insn_recog_data;
749
750/* Alloc pool we allocate entries for lra_insn_recog_data from. */
751static object_allocator<class lra_insn_recog_data>
752 lra_insn_recog_data_pool ("insn recog data pool");
753
754/* Initialize LRA data about insns. */
755static void
756init_insn_recog_data (void)
757{
758 lra_insn_recog_data_len = 0;
759 lra_insn_recog_data = NULL;
760}
761
762/* Expand, if necessary, LRA data about insns. */
763static void
764check_and_expand_insn_recog_data (int index)
765{
766 int i, old;
767
768 if (lra_insn_recog_data_len > index)
769 return;
770 old = lra_insn_recog_data_len;
771 lra_insn_recog_data_len = index * 3U / 2;
772 if (lra_insn_recog_data_len <= index)
773 lra_insn_recog_data_len = index + 1;
774 lra_insn_recog_data = XRESIZEVEC (lra_insn_recog_data_t,
775 lra_insn_recog_data,
776 lra_insn_recog_data_len);
777 for (i = old; i < lra_insn_recog_data_len; i++)
778 lra_insn_recog_data[i] = NULL;
779}
780
781/* Finish LRA DATA about insn. */
782static void
783free_insn_recog_data (lra_insn_recog_data_t data)
784{
785 if (data->operand_loc != NULL)
786 free (ptr: data->operand_loc);
787 if (data->dup_loc != NULL)
788 free (ptr: data->dup_loc);
789 if (data->arg_hard_regs != NULL)
790 free (ptr: data->arg_hard_regs);
791 if (data->icode < 0 && NONDEBUG_INSN_P (data->insn))
792 {
793 if (data->insn_static_data->operand_alternative != NULL)
794 free (ptr: const_cast <operand_alternative *>
795 (data->insn_static_data->operand_alternative));
796 free_insn_regs (ir: data->insn_static_data->hard_regs);
797 free (ptr: data->insn_static_data);
798 }
799 free_insn_regs (ir: data->regs);
800 data->regs = NULL;
801 lra_insn_recog_data_pool.remove (object: data);
802}
803
804/* Pools for copies. */
805static object_allocator<lra_copy> lra_copy_pool ("lra copies");
806
807/* Finish LRA data about all insns. */
808static void
809finish_insn_recog_data (void)
810{
811 int i;
812 lra_insn_recog_data_t data;
813
814 for (i = 0; i < lra_insn_recog_data_len; i++)
815 if ((data = lra_insn_recog_data[i]) != NULL)
816 free_insn_recog_data (data);
817 finish_insn_regs ();
818 lra_copy_pool.release ();
819 lra_insn_reg_pool.release ();
820 lra_insn_recog_data_pool.release ();
821 free (ptr: lra_insn_recog_data);
822}
823
824/* Setup info about operands in alternatives of LRA DATA of insn. */
825static void
826setup_operand_alternative (lra_insn_recog_data_t data,
827 const operand_alternative *op_alt)
828{
829 int i, j, nop, nalt;
830 int icode = data->icode;
831 struct lra_static_insn_data *static_data = data->insn_static_data;
832
833 static_data->commutative = -1;
834 nop = static_data->n_operands;
835 nalt = static_data->n_alternatives;
836 static_data->operand_alternative = op_alt;
837 for (i = 0; i < nop; i++)
838 {
839 static_data->operand[i].early_clobber_alts = 0;
840 static_data->operand[i].is_address = false;
841 if (static_data->operand[i].constraint[0] == '%')
842 {
843 /* We currently only support one commutative pair of operands. */
844 if (static_data->commutative < 0)
845 static_data->commutative = i;
846 else
847 lra_assert (icode < 0); /* Asm */
848 /* The last operand should not be marked commutative. */
849 lra_assert (i != nop - 1);
850 }
851 }
852 for (j = 0; j < nalt; j++)
853 for (i = 0; i < nop; i++, op_alt++)
854 {
855 if (op_alt->earlyclobber)
856 static_data->operand[i].early_clobber_alts |= (alternative_mask) 1 << j;
857 static_data->operand[i].is_address |= op_alt->is_address;
858 }
859}
860
861/* Recursively process X and collect info about registers, which are
862 not the insn operands, in X with TYPE (in/out/inout) and flag that
863 it is early clobbered in the insn (EARLY_CLOBBER) and add the info
864 to LIST. X is a part of insn given by DATA. Return the result
865 list. */
866static struct lra_insn_reg *
867collect_non_operand_hard_regs (rtx_insn *insn, rtx *x,
868 lra_insn_recog_data_t data,
869 struct lra_insn_reg *list,
870 enum op_type type, bool early_clobber)
871{
872 int i, j, regno, last;
873 bool subreg_p;
874 machine_mode mode;
875 struct lra_insn_reg *curr;
876 rtx op = *x;
877 enum rtx_code code = GET_CODE (op);
878 const char *fmt = GET_RTX_FORMAT (code);
879
880 for (i = 0; i < data->insn_static_data->n_operands; i++)
881 if (! data->insn_static_data->operand[i].is_operator
882 && x == data->operand_loc[i])
883 /* It is an operand loc. Stop here. */
884 return list;
885 for (i = 0; i < data->insn_static_data->n_dups; i++)
886 if (x == data->dup_loc[i])
887 /* It is a dup loc. Stop here. */
888 return list;
889 mode = GET_MODE (op);
890 subreg_p = false;
891 if (code == SUBREG)
892 {
893 mode = wider_subreg_mode (x: op);
894 if (read_modify_subreg_p (op))
895 subreg_p = true;
896 op = SUBREG_REG (op);
897 code = GET_CODE (op);
898 }
899 if (REG_P (op))
900 {
901 if ((regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER)
902 return list;
903 /* Process all regs even unallocatable ones as we need info
904 about all regs for rematerialization pass. */
905 for (last = end_hard_regno (mode, regno); regno < last; regno++)
906 {
907 for (curr = list; curr != NULL; curr = curr->next)
908 if (curr->regno == regno && curr->subreg_p == subreg_p
909 && curr->biggest_mode == mode)
910 {
911 if (curr->type != type)
912 curr->type = OP_INOUT;
913 if (early_clobber)
914 curr->early_clobber_alts = ALL_ALTERNATIVES;
915 break;
916 }
917 if (curr == NULL)
918 {
919 /* This is a new hard regno or the info cannot be
920 integrated into the found structure. */
921#ifdef STACK_REGS
922 early_clobber
923 = (early_clobber
924 /* This clobber is to inform popping floating
925 point stack only. */
926 && ! (FIRST_STACK_REG <= regno
927 && regno <= LAST_STACK_REG));
928#endif
929 list = new_insn_reg (insn: data->insn, regno, type, mode, subreg_p,
930 early_clobber_alts: early_clobber ? ALL_ALTERNATIVES : 0, next: list);
931 }
932 }
933 return list;
934 }
935 switch (code)
936 {
937 case SET:
938 list = collect_non_operand_hard_regs (insn, x: &SET_DEST (op), data,
939 list, type: OP_OUT, early_clobber: false);
940 list = collect_non_operand_hard_regs (insn, x: &SET_SRC (op), data,
941 list, type: OP_IN, early_clobber: false);
942 break;
943 case CLOBBER:
944 /* We treat clobber of non-operand hard registers as early clobber. */
945 list = collect_non_operand_hard_regs (insn, x: &XEXP (op, 0), data,
946 list, type: OP_OUT, early_clobber: true);
947 break;
948 case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
949 list = collect_non_operand_hard_regs (insn, x: &XEXP (op, 0), data,
950 list, type: OP_INOUT, early_clobber: false);
951 break;
952 case PRE_MODIFY: case POST_MODIFY:
953 list = collect_non_operand_hard_regs (insn, x: &XEXP (op, 0), data,
954 list, type: OP_INOUT, early_clobber: false);
955 list = collect_non_operand_hard_regs (insn, x: &XEXP (op, 1), data,
956 list, type: OP_IN, early_clobber: false);
957 break;
958 default:
959 fmt = GET_RTX_FORMAT (code);
960 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
961 {
962 if (fmt[i] == 'e')
963 list = collect_non_operand_hard_regs (insn, x: &XEXP (op, i), data,
964 list, type: OP_IN, early_clobber: false);
965 else if (fmt[i] == 'E')
966 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
967 list = collect_non_operand_hard_regs (insn, x: &XVECEXP (op, i, j),
968 data, list, type: OP_IN, early_clobber: false);
969 }
970 }
971 return list;
972}
973
974/* Set up and return info about INSN. Set up the info if it is not set up
975 yet. */
976lra_insn_recog_data_t
977lra_set_insn_recog_data (rtx_insn *insn)
978{
979 lra_insn_recog_data_t data;
980 int i, n, icode;
981 rtx **locs;
982 unsigned int uid = INSN_UID (insn);
983 struct lra_static_insn_data *insn_static_data;
984
985 check_and_expand_insn_recog_data (index: uid);
986 if (DEBUG_INSN_P (insn))
987 icode = -1;
988 else
989 {
990 icode = INSN_CODE (insn);
991 if (icode < 0)
992 /* It might be a new simple insn which is not recognized yet. */
993 INSN_CODE (insn) = icode = recog_memoized (insn);
994 }
995 data = lra_insn_recog_data_pool.allocate ();
996 lra_insn_recog_data[uid] = data;
997 data->insn = insn;
998 data->used_insn_alternative = LRA_UNKNOWN_ALT;
999 data->asm_reloads_num = 0;
1000 data->icode = icode;
1001 data->regs = NULL;
1002 if (DEBUG_INSN_P (insn))
1003 {
1004 data->dup_loc = NULL;
1005 data->arg_hard_regs = NULL;
1006 data->preferred_alternatives = ALL_ALTERNATIVES;
1007 if (DEBUG_BIND_INSN_P (insn))
1008 {
1009 data->insn_static_data = &debug_bind_static_data;
1010 data->operand_loc = XNEWVEC (rtx *, 1);
1011 data->operand_loc[0] = &INSN_VAR_LOCATION_LOC (insn);
1012 }
1013 else if (DEBUG_MARKER_INSN_P (insn))
1014 {
1015 data->insn_static_data = &debug_marker_static_data;
1016 data->operand_loc = NULL;
1017 }
1018 return data;
1019 }
1020 if (icode < 0)
1021 {
1022 int nop, nalt;
1023 machine_mode operand_mode[MAX_RECOG_OPERANDS];
1024 const char *constraints[MAX_RECOG_OPERANDS];
1025
1026 nop = asm_noperands (PATTERN (insn));
1027 data->operand_loc = data->dup_loc = NULL;
1028 nalt = 1;
1029 if (nop < 0)
1030 {
1031 /* It is a special insn like USE or CLOBBER. We should
1032 recognize any regular insn otherwise LRA can do nothing
1033 with this insn. */
1034 gcc_assert (GET_CODE (PATTERN (insn)) == USE
1035 || GET_CODE (PATTERN (insn)) == CLOBBER
1036 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
1037 data->insn_static_data = insn_static_data
1038 = get_static_insn_data (icode: -1, nop: 0, ndup: 0, nalt);
1039 }
1040 else
1041 {
1042 /* expand_asm_operands makes sure there aren't too many
1043 operands. */
1044 lra_assert (nop <= MAX_RECOG_OPERANDS);
1045 if (nop != 0)
1046 data->operand_loc = XNEWVEC (rtx *, nop);
1047 /* Now get the operand values and constraints out of the
1048 insn. */
1049 decode_asm_operands (PATTERN (insn), NULL,
1050 data->operand_loc,
1051 constraints, operand_mode, NULL);
1052 if (nop > 0)
1053 for (const char *p =constraints[0]; *p; p++)
1054 nalt += *p == ',';
1055 data->insn_static_data = insn_static_data
1056 = get_static_insn_data (icode: -1, nop, ndup: 0, nalt);
1057 for (i = 0; i < nop; i++)
1058 {
1059 insn_static_data->operand[i].mode = operand_mode[i];
1060 insn_static_data->operand[i].constraint = constraints[i];
1061 insn_static_data->operand[i].strict_low = false;
1062 insn_static_data->operand[i].is_operator = false;
1063 insn_static_data->operand[i].is_address = false;
1064 }
1065 }
1066 for (i = 0; i < insn_static_data->n_operands; i++)
1067 insn_static_data->operand[i].type
1068 = (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT
1069 : insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT
1070 : OP_IN);
1071 data->preferred_alternatives = ALL_ALTERNATIVES;
1072 if (nop > 0)
1073 {
1074 operand_alternative *op_alt = XCNEWVEC (operand_alternative,
1075 nalt * nop);
1076 preprocess_constraints (nop, nalt, constraints, op_alt,
1077 data->operand_loc);
1078 setup_operand_alternative (data, op_alt);
1079 }
1080 }
1081 else
1082 {
1083 insn_extract (insn);
1084 data->insn_static_data = insn_static_data
1085 = get_static_insn_data (icode, nop: insn_data[icode].n_operands,
1086 ndup: insn_data[icode].n_dups,
1087 nalt: insn_data[icode].n_alternatives);
1088 n = insn_static_data->n_operands;
1089 if (n == 0)
1090 locs = NULL;
1091 else
1092 {
1093 locs = XNEWVEC (rtx *, n);
1094 memcpy (dest: locs, src: recog_data.operand_loc, n: n * sizeof (rtx *));
1095 }
1096 data->operand_loc = locs;
1097 n = insn_static_data->n_dups;
1098 if (n == 0)
1099 locs = NULL;
1100 else
1101 {
1102 locs = XNEWVEC (rtx *, n);
1103 memcpy (dest: locs, src: recog_data.dup_loc, n: n * sizeof (rtx *));
1104 }
1105 data->dup_loc = locs;
1106 data->preferred_alternatives = get_preferred_alternatives (insn);
1107 const operand_alternative *op_alt = preprocess_insn_constraints (icode);
1108 if (!insn_static_data->operand_alternative)
1109 setup_operand_alternative (data, op_alt);
1110 else if (op_alt != insn_static_data->operand_alternative)
1111 insn_static_data->operand_alternative = op_alt;
1112 }
1113 if (GET_CODE (PATTERN (insn)) == CLOBBER || GET_CODE (PATTERN (insn)) == USE)
1114 insn_static_data->hard_regs = NULL;
1115 else
1116 insn_static_data->hard_regs
1117 = collect_non_operand_hard_regs (insn, x: &PATTERN (insn), data,
1118 NULL, type: OP_IN, early_clobber: false);
1119 data->arg_hard_regs = NULL;
1120 if (CALL_P (insn))
1121 {
1122 bool use_p;
1123 rtx link;
1124 int n_hard_regs, regno, arg_hard_regs[FIRST_PSEUDO_REGISTER];
1125
1126 n_hard_regs = 0;
1127 /* Finding implicit hard register usage. We believe it will be
1128 not changed whatever transformations are used. Call insns
1129 are such example. */
1130 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1131 link != NULL_RTX;
1132 link = XEXP (link, 1))
1133 if (((use_p = GET_CODE (XEXP (link, 0)) == USE)
1134 || GET_CODE (XEXP (link, 0)) == CLOBBER)
1135 && REG_P (XEXP (XEXP (link, 0), 0)))
1136 {
1137 regno = REGNO (XEXP (XEXP (link, 0), 0));
1138 lra_assert (regno < FIRST_PSEUDO_REGISTER);
1139 /* It is an argument register. */
1140 for (i = REG_NREGS (XEXP (XEXP (link, 0), 0)) - 1; i >= 0; i--)
1141 arg_hard_regs[n_hard_regs++]
1142 = regno + i + (use_p ? 0 : FIRST_PSEUDO_REGISTER);
1143 }
1144
1145 if (n_hard_regs != 0)
1146 {
1147 arg_hard_regs[n_hard_regs++] = -1;
1148 data->arg_hard_regs = XNEWVEC (int, n_hard_regs);
1149 memcpy (dest: data->arg_hard_regs, src: arg_hard_regs,
1150 n: sizeof (int) * n_hard_regs);
1151 }
1152 }
1153 /* Some output operand can be recognized only from the context not
1154 from the constraints which are empty in this case. Call insn may
1155 contain a hard register in set destination with empty constraint
1156 and extract_insn treats them as an input. */
1157 for (i = 0; i < insn_static_data->n_operands; i++)
1158 {
1159 int j;
1160 rtx pat, set;
1161 struct lra_operand_data *operand = &insn_static_data->operand[i];
1162
1163 /* ??? Should we treat 'X' the same way. It looks to me that
1164 'X' means anything and empty constraint means we do not
1165 care. */
1166 if (operand->type != OP_IN || *operand->constraint != '\0'
1167 || operand->is_operator)
1168 continue;
1169 pat = PATTERN (insn);
1170 if (GET_CODE (pat) == SET)
1171 {
1172 if (data->operand_loc[i] != &SET_DEST (pat))
1173 continue;
1174 }
1175 else if (GET_CODE (pat) == PARALLEL)
1176 {
1177 for (j = XVECLEN (pat, 0) - 1; j >= 0; j--)
1178 {
1179 set = XVECEXP (PATTERN (insn), 0, j);
1180 if (GET_CODE (set) == SET
1181 && &SET_DEST (set) == data->operand_loc[i])
1182 break;
1183 }
1184 if (j < 0)
1185 continue;
1186 }
1187 else
1188 continue;
1189 operand->type = OP_OUT;
1190 }
1191 return data;
1192}
1193
1194/* Return info about insn give by UID. The info should be already set
1195 up. */
1196static lra_insn_recog_data_t
1197get_insn_recog_data_by_uid (int uid)
1198{
1199 lra_insn_recog_data_t data;
1200
1201 data = lra_insn_recog_data[uid];
1202 lra_assert (data != NULL);
1203 return data;
1204}
1205
1206/* Invalidate all info about insn given by its UID. */
1207static void
1208invalidate_insn_recog_data (int uid)
1209{
1210 lra_insn_recog_data_t data;
1211
1212 data = lra_insn_recog_data[uid];
1213 lra_assert (data != NULL);
1214 free_insn_recog_data (data);
1215 lra_insn_recog_data[uid] = NULL;
1216}
1217
1218/* Update all the insn info about INSN. It is usually called when
1219 something in the insn was changed. Return the updated info. */
1220lra_insn_recog_data_t
1221lra_update_insn_recog_data (rtx_insn *insn)
1222{
1223 lra_insn_recog_data_t data;
1224 int n;
1225 unsigned int uid = INSN_UID (insn);
1226 struct lra_static_insn_data *insn_static_data;
1227 poly_int64 sp_offset = 0;
1228
1229 check_and_expand_insn_recog_data (index: uid);
1230 if ((data = lra_insn_recog_data[uid]) != NULL
1231 && data->icode != INSN_CODE (insn))
1232 {
1233 sp_offset = data->sp_offset;
1234 invalidate_insn_data_regno_info (data, insn, get_insn_freq (insn));
1235 invalidate_insn_recog_data (uid);
1236 data = NULL;
1237 }
1238 if (data == NULL)
1239 {
1240 data = lra_get_insn_recog_data (insn);
1241 /* Initiate or restore SP offset. */
1242 data->sp_offset = sp_offset;
1243 return data;
1244 }
1245 insn_static_data = data->insn_static_data;
1246 data->used_insn_alternative = LRA_UNKNOWN_ALT;
1247 if (DEBUG_INSN_P (insn))
1248 return data;
1249 if (data->icode < 0)
1250 {
1251 int nop;
1252 machine_mode operand_mode[MAX_RECOG_OPERANDS];
1253 const char *constraints[MAX_RECOG_OPERANDS];
1254
1255 nop = asm_noperands (PATTERN (insn));
1256 if (nop >= 0)
1257 {
1258 lra_assert (nop == data->insn_static_data->n_operands);
1259 /* Now get the operand values and constraints out of the
1260 insn. */
1261 decode_asm_operands (PATTERN (insn), NULL,
1262 data->operand_loc,
1263 constraints, operand_mode, NULL);
1264
1265 if (flag_checking)
1266 for (int i = 0; i < nop; i++)
1267 lra_assert
1268 (insn_static_data->operand[i].mode == operand_mode[i]
1269 && insn_static_data->operand[i].constraint == constraints[i]
1270 && ! insn_static_data->operand[i].is_operator);
1271 }
1272
1273 if (flag_checking)
1274 for (int i = 0; i < insn_static_data->n_operands; i++)
1275 lra_assert
1276 (insn_static_data->operand[i].type
1277 == (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT
1278 : insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT
1279 : OP_IN));
1280 }
1281 else
1282 {
1283 insn_extract (insn);
1284 n = insn_static_data->n_operands;
1285 if (n != 0)
1286 memcpy (dest: data->operand_loc, src: recog_data.operand_loc, n: n * sizeof (rtx *));
1287 n = insn_static_data->n_dups;
1288 if (n != 0)
1289 memcpy (dest: data->dup_loc, src: recog_data.dup_loc, n: n * sizeof (rtx *));
1290 lra_assert (check_bool_attrs (insn));
1291 }
1292 return data;
1293}
1294
1295/* Set up that INSN is using alternative ALT now. */
1296void
1297lra_set_used_insn_alternative (rtx_insn *insn, int alt)
1298{
1299 lra_insn_recog_data_t data;
1300
1301 data = lra_get_insn_recog_data (insn);
1302 data->used_insn_alternative = alt;
1303}
1304
1305/* Set up that insn with UID is using alternative ALT now. The insn
1306 info should be already set up. */
1307void
1308lra_set_used_insn_alternative_by_uid (int uid, int alt)
1309{
1310 lra_insn_recog_data_t data;
1311
1312 check_and_expand_insn_recog_data (index: uid);
1313 data = lra_insn_recog_data[uid];
1314 lra_assert (data != NULL);
1315 data->used_insn_alternative = alt;
1316}
1317
1318
1319
1320/* This page contains code dealing with common register info and
1321 pseudo copies. */
1322
1323/* The size of the following array. */
1324static int reg_info_size;
1325/* Common info about each register. */
1326class lra_reg *lra_reg_info;
1327
1328HARD_REG_SET hard_regs_spilled_into;
1329
1330/* Last register value. */
1331static int last_reg_value;
1332
1333/* Return new register value. */
1334static int
1335get_new_reg_value (void)
1336{
1337 return ++last_reg_value;
1338}
1339
1340/* Vec referring to pseudo copies. */
1341static vec<lra_copy_t> copy_vec;
1342
1343/* Initialize I-th element of lra_reg_info. */
1344static inline void
1345initialize_lra_reg_info_element (int i)
1346{
1347 bitmap_initialize (head: &lra_reg_info[i].insn_bitmap, obstack: &reg_obstack);
1348#ifdef STACK_REGS
1349 lra_reg_info[i].no_stack_p = false;
1350#endif
1351 CLEAR_HARD_REG_SET (set&: lra_reg_info[i].conflict_hard_regs);
1352 CLEAR_HARD_REG_SET (set&: lra_reg_info[i].exclude_start_hard_regs);
1353 lra_reg_info[i].preferred_hard_regno1 = -1;
1354 lra_reg_info[i].preferred_hard_regno2 = -1;
1355 lra_reg_info[i].preferred_hard_regno_profit1 = 0;
1356 lra_reg_info[i].preferred_hard_regno_profit2 = 0;
1357 lra_reg_info[i].biggest_mode = VOIDmode;
1358 lra_reg_info[i].live_ranges = NULL;
1359 lra_reg_info[i].nrefs = lra_reg_info[i].freq = 0;
1360 lra_reg_info[i].last_reload = 0;
1361 lra_reg_info[i].restore_rtx = NULL_RTX;
1362 lra_reg_info[i].val = get_new_reg_value ();
1363 lra_reg_info[i].offset = 0;
1364 lra_reg_info[i].copies = NULL;
1365}
1366
1367/* Initialize common reg info and copies. */
1368static void
1369init_reg_info (void)
1370{
1371 int i;
1372
1373 last_reg_value = 0;
1374 reg_info_size = max_reg_num () * 3 / 2 + 1;
1375 lra_reg_info = XNEWVEC (class lra_reg, reg_info_size);
1376 for (i = 0; i < reg_info_size; i++)
1377 initialize_lra_reg_info_element (i);
1378 copy_vec.truncate (size: 0);
1379 CLEAR_HARD_REG_SET (set&: hard_regs_spilled_into);
1380}
1381
1382
1383/* Finish common reg info and copies. */
1384static void
1385finish_reg_info (void)
1386{
1387 int i;
1388
1389 for (i = 0; i < reg_info_size; i++)
1390 bitmap_clear (&lra_reg_info[i].insn_bitmap);
1391 free (ptr: lra_reg_info);
1392 reg_info_size = 0;
1393}
1394
1395/* Expand common reg info if it is necessary. */
1396static void
1397expand_reg_info (void)
1398{
1399 int i, old = reg_info_size;
1400
1401 if (reg_info_size > max_reg_num ())
1402 return;
1403 reg_info_size = max_reg_num () * 3 / 2 + 1;
1404 lra_reg_info = XRESIZEVEC (class lra_reg, lra_reg_info, reg_info_size);
1405 for (i = old; i < reg_info_size; i++)
1406 initialize_lra_reg_info_element (i);
1407}
1408
1409/* Free all copies. */
1410void
1411lra_free_copies (void)
1412{
1413 lra_copy_t cp;
1414
1415 while (copy_vec.length () != 0)
1416 {
1417 cp = copy_vec.pop ();
1418 lra_reg_info[cp->regno1].copies = lra_reg_info[cp->regno2].copies = NULL;
1419 lra_copy_pool.remove (object: cp);
1420 }
1421}
1422
1423/* Create copy of two pseudos REGNO1 and REGNO2. The copy execution
1424 frequency is FREQ. */
1425void
1426lra_create_copy (int regno1, int regno2, int freq)
1427{
1428 bool regno1_dest_p;
1429 lra_copy_t cp;
1430
1431 lra_assert (regno1 != regno2);
1432 regno1_dest_p = true;
1433 if (regno1 > regno2)
1434 {
1435 std::swap (a&: regno1, b&: regno2);
1436 regno1_dest_p = false;
1437 }
1438 cp = lra_copy_pool.allocate ();
1439 copy_vec.safe_push (obj: cp);
1440 cp->regno1_dest_p = regno1_dest_p;
1441 cp->freq = freq;
1442 cp->regno1 = regno1;
1443 cp->regno2 = regno2;
1444 cp->regno1_next = lra_reg_info[regno1].copies;
1445 lra_reg_info[regno1].copies = cp;
1446 cp->regno2_next = lra_reg_info[regno2].copies;
1447 lra_reg_info[regno2].copies = cp;
1448 if (lra_dump_file != NULL)
1449 fprintf (stream: lra_dump_file, format: " Creating copy r%d%sr%d@%d\n",
1450 regno1, regno1_dest_p ? "<-" : "->", regno2, freq);
1451}
1452
1453/* Return N-th (0, 1, ...) copy. If there is no copy, return
1454 NULL. */
1455lra_copy_t
1456lra_get_copy (int n)
1457{
1458 if (n >= (int) copy_vec.length ())
1459 return NULL;
1460 return copy_vec[n];
1461}
1462
1463
1464
1465/* This page contains code dealing with info about registers in
1466 insns. */
1467
1468/* Process X of INSN recursively and add info (operand type is given
1469 by TYPE) about registers in X to the insn DATA. If X can be early
1470 clobbered, alternatives in which it can be early clobbered are given
1471 by EARLY_CLOBBER_ALTS. */
1472static void
1473add_regs_to_insn_regno_info (lra_insn_recog_data_t data, rtx x,
1474 rtx_insn *insn, enum op_type type,
1475 alternative_mask early_clobber_alts)
1476{
1477 int i, j, regno;
1478 bool subreg_p;
1479 machine_mode mode;
1480 const char *fmt;
1481 enum rtx_code code;
1482 struct lra_insn_reg *curr;
1483
1484 code = GET_CODE (x);
1485 mode = GET_MODE (x);
1486 subreg_p = false;
1487 if (GET_CODE (x) == SUBREG)
1488 {
1489 mode = wider_subreg_mode (x);
1490 if (read_modify_subreg_p (x))
1491 subreg_p = true;
1492 x = SUBREG_REG (x);
1493 code = GET_CODE (x);
1494 }
1495 if (REG_P (x))
1496 {
1497 regno = REGNO (x);
1498 /* Process all regs even unallocatable ones as we need info about
1499 all regs for rematerialization pass. */
1500 expand_reg_info ();
1501 if (bitmap_set_bit (&lra_reg_info[regno].insn_bitmap, INSN_UID (insn)))
1502 {
1503 data->regs = new_insn_reg (insn: data->insn, regno, type, mode, subreg_p,
1504 early_clobber_alts, next: data->regs);
1505 return;
1506 }
1507 else
1508 {
1509 for (curr = data->regs; curr != NULL; curr = curr->next)
1510 if (curr->regno == regno)
1511 {
1512 if (curr->subreg_p != subreg_p || curr->biggest_mode != mode)
1513 /* The info cannot be integrated into the found
1514 structure. */
1515 data->regs = new_insn_reg (insn: data->insn, regno, type, mode,
1516 subreg_p, early_clobber_alts,
1517 next: data->regs);
1518 else
1519 {
1520 if (curr->type != type)
1521 curr->type = OP_INOUT;
1522 curr->early_clobber_alts |= early_clobber_alts;
1523 }
1524 return;
1525 }
1526 gcc_unreachable ();
1527 }
1528 }
1529
1530 switch (code)
1531 {
1532 case SET:
1533 add_regs_to_insn_regno_info (data, SET_DEST (x), insn, type: OP_OUT, early_clobber_alts: 0);
1534 add_regs_to_insn_regno_info (data, SET_SRC (x), insn, type: OP_IN, early_clobber_alts: 0);
1535 break;
1536 case CLOBBER:
1537 /* We treat clobber of non-operand hard registers as early
1538 clobber. */
1539 add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, type: OP_OUT,
1540 ALL_ALTERNATIVES);
1541 break;
1542 case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
1543 add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, type: OP_INOUT, early_clobber_alts: 0);
1544 break;
1545 case PRE_MODIFY: case POST_MODIFY:
1546 add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, type: OP_INOUT, early_clobber_alts: 0);
1547 add_regs_to_insn_regno_info (data, XEXP (x, 1), insn, type: OP_IN, early_clobber_alts: 0);
1548 break;
1549 default:
1550 if ((code != PARALLEL && code != EXPR_LIST) || type != OP_OUT)
1551 /* Some targets place small structures in registers for return
1552 values of functions, and those registers are wrapped in
1553 PARALLEL that we may see as the destination of a SET. Here
1554 is an example:
1555
1556 (call_insn 13 12 14 2 (set (parallel:BLK [
1557 (expr_list:REG_DEP_TRUE (reg:DI 0 ax)
1558 (const_int 0 [0]))
1559 (expr_list:REG_DEP_TRUE (reg:DI 1 dx)
1560 (const_int 8 [0x8]))
1561 ])
1562 (call (mem:QI (symbol_ref:DI (... */
1563 type = OP_IN;
1564 fmt = GET_RTX_FORMAT (code);
1565 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1566 {
1567 if (fmt[i] == 'e')
1568 add_regs_to_insn_regno_info (data, XEXP (x, i), insn, type, early_clobber_alts: 0);
1569 else if (fmt[i] == 'E')
1570 {
1571 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1572 add_regs_to_insn_regno_info (data, XVECEXP (x, i, j), insn,
1573 type, early_clobber_alts: 0);
1574 }
1575 }
1576 }
1577}
1578
1579/* Return execution frequency of INSN. */
1580static int
1581get_insn_freq (rtx_insn *insn)
1582{
1583 basic_block bb = BLOCK_FOR_INSN (insn);
1584
1585 gcc_checking_assert (bb != NULL);
1586 return REG_FREQ_FROM_BB (bb);
1587}
1588
1589/* Invalidate all reg info of INSN with DATA and execution frequency
1590 FREQ. Update common info about the invalidated registers. */
1591static void
1592invalidate_insn_data_regno_info (lra_insn_recog_data_t data, rtx_insn *insn,
1593 int freq)
1594{
1595 int uid;
1596 bool debug_p;
1597 unsigned int i;
1598 struct lra_insn_reg *ir, *next_ir;
1599
1600 uid = INSN_UID (insn);
1601 debug_p = DEBUG_INSN_P (insn);
1602 for (ir = data->regs; ir != NULL; ir = next_ir)
1603 {
1604 i = ir->regno;
1605 next_ir = ir->next;
1606 lra_insn_reg_pool.remove (object: ir);
1607 bitmap_clear_bit (&lra_reg_info[i].insn_bitmap, uid);
1608 if (i >= FIRST_PSEUDO_REGISTER && ! debug_p)
1609 {
1610 lra_reg_info[i].nrefs--;
1611 lra_reg_info[i].freq -= freq;
1612 lra_assert (lra_reg_info[i].nrefs >= 0 && lra_reg_info[i].freq >= 0);
1613 }
1614 }
1615 data->regs = NULL;
1616}
1617
1618/* Invalidate all reg info of INSN. Update common info about the
1619 invalidated registers. */
1620void
1621lra_invalidate_insn_regno_info (rtx_insn *insn)
1622{
1623 invalidate_insn_data_regno_info (data: lra_get_insn_recog_data (insn), insn,
1624 freq: get_insn_freq (insn));
1625}
1626
1627/* Update common reg info from reg info of insn given by its DATA and
1628 execution frequency FREQ. */
1629static void
1630setup_insn_reg_info (lra_insn_recog_data_t data, int freq)
1631{
1632 unsigned int i;
1633 struct lra_insn_reg *ir;
1634
1635 for (ir = data->regs; ir != NULL; ir = ir->next)
1636 if ((i = ir->regno) >= FIRST_PSEUDO_REGISTER)
1637 {
1638 lra_reg_info[i].nrefs++;
1639 lra_reg_info[i].freq += freq;
1640 }
1641}
1642
1643/* Set up insn reg info of INSN. Update common reg info from reg info
1644 of INSN. */
1645void
1646lra_update_insn_regno_info (rtx_insn *insn)
1647{
1648 int i, freq;
1649 lra_insn_recog_data_t data;
1650 struct lra_static_insn_data *static_data;
1651 enum rtx_code code;
1652 rtx link;
1653
1654 if (! INSN_P (insn))
1655 return;
1656 data = lra_get_insn_recog_data (insn);
1657 static_data = data->insn_static_data;
1658 freq = NONDEBUG_INSN_P (insn) ? get_insn_freq (insn) : 0;
1659 invalidate_insn_data_regno_info (data, insn, freq);
1660 for (i = static_data->n_operands - 1; i >= 0; i--)
1661 add_regs_to_insn_regno_info (data, x: *data->operand_loc[i], insn,
1662 type: static_data->operand[i].type,
1663 early_clobber_alts: static_data->operand[i].early_clobber_alts);
1664 if ((code = GET_CODE (PATTERN (insn))) == CLOBBER || code == USE)
1665 add_regs_to_insn_regno_info (data, XEXP (PATTERN (insn), 0), insn,
1666 type: code == USE ? OP_IN : OP_OUT, early_clobber_alts: 0);
1667 if (CALL_P (insn))
1668 /* On some targets call insns can refer to pseudos in memory in
1669 CALL_INSN_FUNCTION_USAGE list. Process them in order to
1670 consider their occurrences in calls for different
1671 transformations (e.g. inheritance) with given pseudos. */
1672 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1673 link != NULL_RTX;
1674 link = XEXP (link, 1))
1675 {
1676 code = GET_CODE (XEXP (link, 0));
1677 if ((code == USE || code == CLOBBER)
1678 && MEM_P (XEXP (XEXP (link, 0), 0)))
1679 add_regs_to_insn_regno_info (data, XEXP (XEXP (link, 0), 0), insn,
1680 type: code == USE ? OP_IN : OP_OUT, early_clobber_alts: 0);
1681 }
1682 if (NONDEBUG_INSN_P (insn))
1683 setup_insn_reg_info (data, freq);
1684}
1685
1686/* Return reg info of insn given by it UID. */
1687struct lra_insn_reg *
1688lra_get_insn_regs (int uid)
1689{
1690 lra_insn_recog_data_t data;
1691
1692 data = get_insn_recog_data_by_uid (uid);
1693 return data->regs;
1694}
1695
1696
1697
1698/* Recursive hash function for RTL X. */
1699hashval_t
1700lra_rtx_hash (rtx x)
1701{
1702 int i, j;
1703 enum rtx_code code;
1704 const char *fmt;
1705 hashval_t val = 0;
1706
1707 if (x == 0)
1708 return val;
1709
1710 code = GET_CODE (x);
1711 val += (int) code + 4095;
1712
1713 /* Some RTL can be compared nonrecursively. */
1714 switch (code)
1715 {
1716 case REG:
1717 return val + REGNO (x);
1718
1719 case LABEL_REF:
1720 return iterative_hash_object (XEXP (x, 0), val);
1721
1722 case SYMBOL_REF:
1723 return iterative_hash_object (XSTR (x, 0), val);
1724
1725 case SCRATCH:
1726 case CONST_DOUBLE:
1727 case CONST_VECTOR:
1728 return val;
1729
1730 case CONST_INT:
1731 return val + UINTVAL (x);
1732
1733 case SUBREG:
1734 val += lra_rtx_hash (SUBREG_REG (x));
1735 for (int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
1736 val += SUBREG_BYTE (x).coeffs[i];
1737 return val;
1738
1739 default:
1740 break;
1741 }
1742
1743 /* Hash the elements. */
1744 fmt = GET_RTX_FORMAT (code);
1745 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1746 {
1747 switch (fmt[i])
1748 {
1749 case 'w':
1750 val += XWINT (x, i);
1751 break;
1752
1753 case 'n':
1754 case 'i':
1755 val += XINT (x, i);
1756 break;
1757
1758 case 'L':
1759 val += XLOC (x, i);
1760 break;
1761
1762 case 'V':
1763 case 'E':
1764 val += XVECLEN (x, i);
1765
1766 for (j = 0; j < XVECLEN (x, i); j++)
1767 val += lra_rtx_hash (XVECEXP (x, i, j));
1768 break;
1769
1770 case 'e':
1771 val += lra_rtx_hash (XEXP (x, i));
1772 break;
1773
1774 case 'S':
1775 case 's':
1776 val += htab_hash_string (XSTR (x, i));
1777 break;
1778
1779 case 'u':
1780 case '0':
1781 case 't':
1782 break;
1783
1784 /* It is believed that rtx's at this level will never
1785 contain anything but integers and other rtx's, except for
1786 within LABEL_REFs and SYMBOL_REFs. */
1787 default:
1788 abort ();
1789 }
1790 }
1791 return val;
1792}
1793
1794
1795
1796/* This page contains code dealing with stack of the insns which
1797 should be processed by the next constraint pass. */
1798
1799/* Bitmap used to put an insn on the stack only in one exemplar. */
1800static sbitmap lra_constraint_insn_stack_bitmap;
1801
1802/* The stack itself. */
1803vec<rtx_insn *> lra_constraint_insn_stack;
1804
1805/* Put INSN on the stack. If ALWAYS_UPDATE is true, always update the reg
1806 info for INSN, otherwise only update it if INSN is not already on the
1807 stack. */
1808static inline void
1809lra_push_insn_1 (rtx_insn *insn, bool always_update)
1810{
1811 unsigned int uid = INSN_UID (insn);
1812 if (always_update)
1813 lra_update_insn_regno_info (insn);
1814 if (uid >= SBITMAP_SIZE (lra_constraint_insn_stack_bitmap))
1815 lra_constraint_insn_stack_bitmap =
1816 sbitmap_resize (lra_constraint_insn_stack_bitmap, 3 * uid / 2, 0);
1817 if (bitmap_bit_p (map: lra_constraint_insn_stack_bitmap, bitno: uid))
1818 return;
1819 bitmap_set_bit (map: lra_constraint_insn_stack_bitmap, bitno: uid);
1820 if (! always_update)
1821 lra_update_insn_regno_info (insn);
1822 lra_constraint_insn_stack.safe_push (obj: insn);
1823}
1824
1825/* Put INSN on the stack. */
1826void
1827lra_push_insn (rtx_insn *insn)
1828{
1829 lra_push_insn_1 (insn, always_update: false);
1830}
1831
1832/* Put INSN on the stack and update its reg info. */
1833void
1834lra_push_insn_and_update_insn_regno_info (rtx_insn *insn)
1835{
1836 lra_push_insn_1 (insn, always_update: true);
1837}
1838
1839/* Put insn with UID on the stack. */
1840void
1841lra_push_insn_by_uid (unsigned int uid)
1842{
1843 lra_push_insn (insn: lra_insn_recog_data[uid]->insn);
1844}
1845
1846/* Take the last-inserted insns off the stack and return it. */
1847rtx_insn *
1848lra_pop_insn (void)
1849{
1850 rtx_insn *insn = lra_constraint_insn_stack.pop ();
1851 bitmap_clear_bit (map: lra_constraint_insn_stack_bitmap, bitno: INSN_UID (insn));
1852 return insn;
1853}
1854
1855/* Return the current size of the insn stack. */
1856unsigned int
1857lra_insn_stack_length (void)
1858{
1859 return lra_constraint_insn_stack.length ();
1860}
1861
1862/* Push insns FROM to TO (excluding it) going in reverse order. */
1863static void
1864push_insns (rtx_insn *from, rtx_insn *to)
1865{
1866 rtx_insn *insn;
1867
1868 if (from == NULL_RTX)
1869 return;
1870 for (insn = from; insn != to; insn = PREV_INSN (insn))
1871 if (INSN_P (insn))
1872 lra_push_insn (insn);
1873}
1874
1875/* Set up and return sp offset for insns in range [FROM, LAST]. The offset is
1876 taken from the BB insn before FROM after simulating its effects,
1877 or zero if there is no such insn. */
1878static poly_int64
1879setup_sp_offset (rtx_insn *from, rtx_insn *last)
1880{
1881 rtx_insn *before = prev_nonnote_nondebug_insn_bb (from);
1882 poly_int64 offset = 0;
1883
1884 if (before && INSN_P (before))
1885 offset = lra_update_sp_offset (PATTERN (insn: before),
1886 lra_get_insn_recog_data (insn: before)->sp_offset);
1887
1888 for (rtx_insn *insn = from; insn != NEXT_INSN (insn: last); insn = NEXT_INSN (insn))
1889 {
1890 lra_get_insn_recog_data (insn)->sp_offset = offset;
1891 offset = lra_update_sp_offset (PATTERN (insn), offset);
1892 }
1893 return offset;
1894}
1895
1896/* Dump all func insns in a slim form. */
1897void
1898lra_dump_insns (FILE *f)
1899{
1900 dump_rtl_slim (f, get_insns (), NULL, -1, 0);
1901}
1902
1903/* Dump all func insns in a slim form with TITLE when the dump file is open and
1904 lra_verbose >=7. */
1905void
1906lra_dump_insns_if_possible (const char *title)
1907{
1908 if (lra_dump_file == NULL || lra_verbose < 7)
1909 return;
1910 fprintf (stream: lra_dump_file, format: "%s:", title);
1911 lra_dump_insns (f: lra_dump_file);
1912}
1913
1914/* Emit insns BEFORE before INSN and insns AFTER after INSN. Put the
1915 insns onto the stack. Print about emitting the insns with
1916 TITLE. */
1917void
1918lra_process_new_insns (rtx_insn *insn, rtx_insn *before, rtx_insn *after,
1919 const char *title)
1920{
1921 if (before == NULL_RTX && after == NULL_RTX)
1922 return;
1923 if (lra_dump_file != NULL)
1924 {
1925 dump_insn_slim (lra_dump_file, insn);
1926 if (before != NULL_RTX)
1927 {
1928 fprintf (stream: lra_dump_file,format: " %s before:\n", title);
1929 dump_rtl_slim (lra_dump_file, before, NULL, -1, 0);
1930 }
1931 }
1932 if (before != NULL_RTX)
1933 {
1934 if (cfun->can_throw_non_call_exceptions)
1935 copy_reg_eh_region_note_forward (insn, before, NULL);
1936 emit_insn_before (before, insn);
1937 poly_int64 old_sp_offset = lra_get_insn_recog_data (insn)->sp_offset;
1938 poly_int64 new_sp_offset = setup_sp_offset (from: before, last: PREV_INSN (insn));
1939 if (maybe_ne (a: old_sp_offset, b: new_sp_offset))
1940 {
1941 if (lra_dump_file != NULL)
1942 {
1943 fprintf (stream: lra_dump_file, format: " Changing sp offset from ");
1944 print_dec (value: old_sp_offset, file: lra_dump_file);
1945 fprintf (stream: lra_dump_file, format: " to ");
1946 print_dec (value: new_sp_offset, file: lra_dump_file);
1947 fprintf (stream: lra_dump_file, format: " for insn");
1948 dump_rtl_slim (lra_dump_file, insn, NULL, -1, 0);
1949 }
1950 lra_get_insn_recog_data (insn)->sp_offset = new_sp_offset;
1951 eliminate_regs_in_insn (insn, false, false,
1952 old_sp_offset - new_sp_offset);
1953 lra_push_insn (insn);
1954 }
1955 push_insns (from: PREV_INSN (insn), to: PREV_INSN (insn: before));
1956 }
1957 if (after != NULL_RTX)
1958 {
1959 if (cfun->can_throw_non_call_exceptions)
1960 copy_reg_eh_region_note_forward (insn, after, NULL);
1961 if (! JUMP_P (insn))
1962 {
1963 rtx_insn *last;
1964
1965 if (lra_dump_file != NULL)
1966 {
1967 fprintf (stream: lra_dump_file, format: " %s after:\n", title);
1968 dump_rtl_slim (lra_dump_file, after, NULL, -1, 0);
1969 }
1970 for (last = after;
1971 NEXT_INSN (insn: last) != NULL_RTX;
1972 last = NEXT_INSN (insn: last))
1973 ;
1974 emit_insn_after (after, insn);
1975 push_insns (from: last, to: insn);
1976 setup_sp_offset (from: after, last);
1977 }
1978 else
1979 {
1980 /* Put output reload insns on successor BBs: */
1981 edge_iterator ei;
1982 edge e;
1983
1984 FOR_EACH_EDGE (e, ei, BLOCK_FOR_INSN (insn)->succs)
1985 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1986 {
1987 /* We already made the edge no-critical in ira.cc::ira */
1988 lra_assert (!EDGE_CRITICAL_P (e));
1989 rtx_insn *curr, *tmp = BB_HEAD (e->dest);
1990 if (LABEL_P (tmp))
1991 tmp = NEXT_INSN (insn: tmp);
1992 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1993 tmp = NEXT_INSN (insn: tmp);
1994 /* Do not put reload insns if it is the last BB
1995 without actual insns. */
1996 if (tmp == NULL)
1997 continue;
1998 start_sequence ();
1999 for (curr = after; curr != NULL_RTX; curr = NEXT_INSN (insn: curr))
2000 emit_insn (copy_insn (PATTERN (insn: curr)));
2001 rtx_insn *copy = get_insns (), *last = get_last_insn ();
2002 end_sequence ();
2003 if (lra_dump_file != NULL)
2004 {
2005 fprintf (stream: lra_dump_file, format: " %s after in bb%d:\n", title,
2006 e->dest->index);
2007 dump_rtl_slim (lra_dump_file, copy, NULL, -1, 0);
2008 }
2009 /* Use the right emit func for setting up BB_END/BB_HEAD: */
2010 if (BB_END (e->dest) == PREV_INSN (insn: tmp))
2011 emit_insn_after_noloc (copy, PREV_INSN (insn: tmp), e->dest);
2012 else
2013 emit_insn_before_noloc (copy, tmp, e->dest);
2014 push_insns (from: last, to: PREV_INSN (insn: copy));
2015 setup_sp_offset (from: copy, last);
2016 /* We can ignore BB live info here as it and reg notes
2017 will be updated before the next assignment
2018 sub-pass. */
2019 }
2020 }
2021 }
2022 if (lra_dump_file != NULL)
2023 fprintf (stream: lra_dump_file, format: "\n");
2024 if (cfun->can_throw_non_call_exceptions)
2025 {
2026 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
2027 if (note && !insn_could_throw_p (insn))
2028 remove_note (insn, note);
2029 }
2030}
2031
2032
2033/* Replace all references to register OLD_REGNO in *LOC with pseudo
2034 register NEW_REG. Try to simplify subreg of constant if SUBREG_P.
2035 DEBUG_P is if LOC is within a DEBUG_INSN. Return true if any
2036 change was made. */
2037bool
2038lra_substitute_pseudo (rtx *loc, int old_regno, rtx new_reg, bool subreg_p,
2039 bool debug_p)
2040{
2041 rtx x = *loc;
2042 bool result = false;
2043 enum rtx_code code;
2044 const char *fmt;
2045 int i, j;
2046
2047 if (x == NULL_RTX)
2048 return false;
2049
2050 code = GET_CODE (x);
2051 if (code == SUBREG && subreg_p)
2052 {
2053 rtx subst, inner = SUBREG_REG (x);
2054 /* Transform subreg of constant while we still have inner mode
2055 of the subreg. The subreg internal should not be an insn
2056 operand. */
2057 if (REG_P (inner) && (int) REGNO (inner) == old_regno
2058 && CONSTANT_P (new_reg)
2059 && (subst = simplify_subreg (GET_MODE (x), op: new_reg, GET_MODE (inner),
2060 SUBREG_BYTE (x))) != NULL_RTX)
2061 {
2062 *loc = subst;
2063 return true;
2064 }
2065
2066 }
2067 else if (code == REG && (int) REGNO (x) == old_regno)
2068 {
2069 machine_mode mode = GET_MODE (x);
2070 machine_mode inner_mode = GET_MODE (new_reg);
2071
2072 if (mode != inner_mode
2073 && ! (CONST_SCALAR_INT_P (new_reg) && SCALAR_INT_MODE_P (mode)))
2074 {
2075 poly_uint64 offset = 0;
2076 if (partial_subreg_p (outermode: mode, innermode: inner_mode)
2077 && SCALAR_INT_MODE_P (inner_mode))
2078 offset = subreg_lowpart_offset (outermode: mode, innermode: inner_mode);
2079 if (debug_p)
2080 new_reg = gen_rtx_raw_SUBREG (mode, new_reg, offset);
2081 else
2082 new_reg = gen_rtx_SUBREG (mode, new_reg, offset);
2083 }
2084 *loc = new_reg;
2085 return true;
2086 }
2087
2088 /* Scan all the operand sub-expressions. */
2089 fmt = GET_RTX_FORMAT (code);
2090 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2091 {
2092 if (fmt[i] == 'e')
2093 {
2094 if (debug_p
2095 && i == 0
2096 && (code == SUBREG
2097 || code == ZERO_EXTEND
2098 || code == SIGN_EXTEND
2099 || code == FLOAT
2100 || code == UNSIGNED_FLOAT))
2101 {
2102 rtx y = XEXP (x, 0);
2103 if (lra_substitute_pseudo (loc: &y, old_regno,
2104 new_reg, subreg_p, debug_p))
2105 {
2106 result = true;
2107 if (CONST_SCALAR_INT_P (y))
2108 {
2109 if (code == SUBREG)
2110 y = simplify_subreg (GET_MODE (x), op: y,
2111 GET_MODE (SUBREG_REG (x)),
2112 SUBREG_BYTE (x));
2113 else
2114 y = simplify_unary_operation (code, GET_MODE (x), op: y,
2115 GET_MODE (XEXP (x, 0)));
2116 if (y)
2117 *loc = y;
2118 else
2119 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
2120 }
2121 else
2122 XEXP (x, 0) = y;
2123 }
2124 }
2125 else if (lra_substitute_pseudo (loc: &XEXP (x, i), old_regno,
2126 new_reg, subreg_p, debug_p))
2127 result = true;
2128 }
2129 else if (fmt[i] == 'E')
2130 {
2131 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2132 if (lra_substitute_pseudo (loc: &XVECEXP (x, i, j), old_regno,
2133 new_reg, subreg_p, debug_p))
2134 result = true;
2135 }
2136 }
2137 return result;
2138}
2139
2140/* Call lra_substitute_pseudo within an insn. Try to simplify subreg
2141 of constant if SUBREG_P. This won't update the insn ptr, just the
2142 contents of the insn. */
2143bool
2144lra_substitute_pseudo_within_insn (rtx_insn *insn, int old_regno,
2145 rtx new_reg, bool subreg_p)
2146{
2147 rtx loc = insn;
2148 return lra_substitute_pseudo (loc: &loc, old_regno, new_reg, subreg_p,
2149 DEBUG_INSN_P (insn));
2150}
2151
2152
2153
2154/* Return new register of the same mode as ORIGINAL of class ALL_REGS.
2155 Used in ira_remove_scratches. */
2156static rtx
2157get_scratch_reg (rtx original)
2158{
2159 return lra_create_new_reg (GET_MODE (original), original, rclass: ALL_REGS,
2160 NULL, NULL);
2161}
2162
2163/* Remove all insn scratches in INSN. */
2164static void
2165remove_insn_scratches (rtx_insn *insn)
2166{
2167 if (ira_remove_insn_scratches (insn, all_p: true, dump_file: lra_dump_file, get_reg: get_scratch_reg))
2168 df_insn_rescan (insn);
2169}
2170
2171/* Remove all insn scratches in the current function. */
2172static void
2173remove_scratches (void)
2174{
2175 basic_block bb;
2176 rtx_insn *insn;
2177
2178 FOR_EACH_BB_FN (bb, cfun)
2179 FOR_BB_INSNS (bb, insn)
2180 if (INSN_P (insn))
2181 remove_insn_scratches (insn);
2182}
2183
2184/* Function checks RTL for correctness. If FINAL_P is true, it is
2185 done at the end of LRA and the check is more rigorous. */
2186static void
2187check_rtl (bool final_p)
2188{
2189 basic_block bb;
2190 rtx_insn *insn;
2191
2192 lra_assert (! final_p || reload_completed);
2193 FOR_EACH_BB_FN (bb, cfun)
2194 FOR_BB_INSNS (bb, insn)
2195 if (NONDEBUG_INSN_P (insn)
2196 && GET_CODE (PATTERN (insn)) != USE
2197 && GET_CODE (PATTERN (insn)) != CLOBBER
2198 && GET_CODE (PATTERN (insn)) != ASM_INPUT)
2199 {
2200 if (final_p)
2201 {
2202 extract_constrain_insn (insn);
2203 continue;
2204 }
2205 /* LRA code is based on assumption that all addresses can be
2206 correctly decomposed. LRA can generate reloads for
2207 decomposable addresses. The decomposition code checks the
2208 correctness of the addresses. So we don't need to check
2209 the addresses here. Don't call insn_invalid_p here, it can
2210 change the code at this stage. */
2211 if (recog_memoized (insn) < 0 && asm_noperands (PATTERN (insn)) < 0)
2212 fatal_insn_not_found (insn);
2213 }
2214}
2215
2216/* Determine if the current function has an exception receiver block
2217 that reaches the exit block via non-exceptional edges */
2218static bool
2219has_nonexceptional_receiver (void)
2220{
2221 edge e;
2222 edge_iterator ei;
2223 basic_block *tos, *worklist, bb;
2224
2225 /* If we're not optimizing, then just err on the safe side. */
2226 if (!optimize)
2227 return true;
2228
2229 /* First determine which blocks can reach exit via normal paths. */
2230 tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
2231
2232 FOR_EACH_BB_FN (bb, cfun)
2233 bb->flags &= ~BB_REACHABLE;
2234
2235 /* Place the exit block on our worklist. */
2236 EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_REACHABLE;
2237 *tos++ = EXIT_BLOCK_PTR_FOR_FN (cfun);
2238
2239 /* Iterate: find everything reachable from what we've already seen. */
2240 while (tos != worklist)
2241 {
2242 bb = *--tos;
2243
2244 FOR_EACH_EDGE (e, ei, bb->preds)
2245 if (e->flags & EDGE_ABNORMAL)
2246 {
2247 free (ptr: worklist);
2248 return true;
2249 }
2250 else
2251 {
2252 basic_block src = e->src;
2253
2254 if (!(src->flags & BB_REACHABLE))
2255 {
2256 src->flags |= BB_REACHABLE;
2257 *tos++ = src;
2258 }
2259 }
2260 }
2261 free (ptr: worklist);
2262 /* No exceptional block reached exit unexceptionally. */
2263 return false;
2264}
2265
2266/* Remove all REG_DEAD and REG_UNUSED notes and regenerate REG_INC.
2267 We change pseudos by hard registers without notification of DF and
2268 that can make the notes obsolete. DF-infrastructure does not deal
2269 with REG_INC notes -- so we should regenerate them here. */
2270static void
2271update_inc_notes (void)
2272{
2273 rtx *pnote;
2274 basic_block bb;
2275 rtx_insn *insn;
2276
2277 FOR_EACH_BB_FN (bb, cfun)
2278 FOR_BB_INSNS (bb, insn)
2279 if (NONDEBUG_INSN_P (insn))
2280 {
2281 pnote = &REG_NOTES (insn);
2282 while (*pnote != 0)
2283 {
2284 if (REG_NOTE_KIND (*pnote) == REG_DEAD
2285 || REG_NOTE_KIND (*pnote) == REG_UNUSED
2286 || REG_NOTE_KIND (*pnote) == REG_INC)
2287 *pnote = XEXP (*pnote, 1);
2288 else
2289 pnote = &XEXP (*pnote, 1);
2290 }
2291
2292 if (AUTO_INC_DEC)
2293 add_auto_inc_notes (insn, PATTERN (insn));
2294 }
2295}
2296
2297/* Set to true while in LRA. */
2298bool lra_in_progress = false;
2299
2300/* Start of pseudo regnos before the LRA. */
2301int lra_new_regno_start;
2302
2303/* Start of reload pseudo regnos before the new spill pass. */
2304int lra_constraint_new_regno_start;
2305
2306/* Avoid spilling pseudos with regno more than the following value if
2307 it is possible. */
2308int lra_bad_spill_regno_start;
2309
2310/* A pseudo of Pmode. */
2311rtx lra_pmode_pseudo;
2312
2313/* Inheritance pseudo regnos before the new spill pass. */
2314bitmap_head lra_inheritance_pseudos;
2315
2316/* Split regnos before the new spill pass. */
2317bitmap_head lra_split_regs;
2318
2319/* Reload pseudo regnos before the new assignment pass which still can
2320 be spilled after the assignment pass as memory is also accepted in
2321 insns for the reload pseudos. */
2322bitmap_head lra_optional_reload_pseudos;
2323
2324/* Pseudo regnos used for subreg reloads before the new assignment
2325 pass. Such pseudos still can be spilled after the assignment
2326 pass. */
2327bitmap_head lra_subreg_reload_pseudos;
2328
2329/* File used for output of LRA debug information. */
2330FILE *lra_dump_file;
2331
2332/* How verbose should be the debug information. */
2333int lra_verbose;
2334
2335/* True if we split hard reg after the last constraint sub-pass. */
2336bool lra_hard_reg_split_p;
2337
2338/* True if we found an asm error. */
2339bool lra_asm_error_p;
2340
2341/* True if we should try spill into registers of different classes
2342 instead of memory. */
2343bool lra_reg_spill_p;
2344
2345/* Set up value LRA_REG_SPILL_P. */
2346static void
2347setup_reg_spill_flag (void)
2348{
2349 int cl, mode;
2350
2351 if (targetm.spill_class != NULL)
2352 for (cl = 0; cl < (int) LIM_REG_CLASSES; cl++)
2353 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
2354 if (targetm.spill_class ((enum reg_class) cl,
2355 (machine_mode) mode) != NO_REGS)
2356 {
2357 lra_reg_spill_p = true;
2358 return;
2359 }
2360 lra_reg_spill_p = false;
2361}
2362
2363/* True if the current function is too big to use regular algorithms
2364 in LRA. In other words, we should use simpler and faster algorithms
2365 in LRA. It also means we should not worry about generation code
2366 for caller saves. The value is set up in IRA. */
2367bool lra_simple_p;
2368
2369/* Major LRA entry function. F is a file should be used to dump LRA
2370 debug info with given verbosity. */
2371void
2372lra (FILE *f, int verbose)
2373{
2374 int i;
2375 bool live_p, inserted_p;
2376
2377 lra_dump_file = f;
2378 lra_verbose = verbose;
2379 lra_asm_error_p = false;
2380 lra_pmode_pseudo = gen_reg_rtx (Pmode);
2381
2382 timevar_push (tv: TV_LRA);
2383
2384 /* Make sure that the last insn is a note. Some subsequent passes
2385 need it. */
2386 emit_note (NOTE_INSN_DELETED);
2387
2388 lra_no_alloc_regs = ira_no_alloc_regs;
2389
2390 init_reg_info ();
2391 expand_reg_info ();
2392
2393 init_insn_recog_data ();
2394
2395 /* Some quick check on RTL generated by previous passes. */
2396 if (flag_checking)
2397 check_rtl (final_p: false);
2398
2399 lra_in_progress = true;
2400
2401 lra_live_range_iter = lra_coalesce_iter = lra_constraint_iter = 0;
2402 lra_assignment_iter = lra_assignment_iter_after_spill = 0;
2403 lra_inheritance_iter = lra_undo_inheritance_iter = 0;
2404 lra_rematerialization_iter = 0;
2405
2406 setup_reg_spill_flag ();
2407
2408 /* Function remove_scratches can creates new pseudos for clobbers --
2409 so set up lra_constraint_new_regno_start before its call to
2410 permit changing reg classes for pseudos created by this
2411 simplification. */
2412 lra_constraint_new_regno_start = lra_new_regno_start = max_reg_num ();
2413 lra_bad_spill_regno_start = INT_MAX;
2414 remove_scratches ();
2415
2416 /* A function that has a non-local label that can reach the exit
2417 block via non-exceptional paths must save all call-saved
2418 registers. */
2419 if (cfun->has_nonlocal_label && has_nonexceptional_receiver ())
2420 crtl->saves_all_registers = 1;
2421
2422 if (crtl->saves_all_registers)
2423 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2424 if (!crtl->abi->clobbers_full_reg_p (regno: i)
2425 && !fixed_regs[i]
2426 && !LOCAL_REGNO (i))
2427 df_set_regs_ever_live (i, true);
2428
2429 /* We don't DF from now and avoid its using because it is to
2430 expensive when a lot of RTL changes are made. */
2431 df_set_flags (DF_NO_INSN_RESCAN);
2432 lra_constraint_insn_stack.create (nelems: get_max_uid ());
2433 lra_constraint_insn_stack_bitmap = sbitmap_alloc (get_max_uid ());
2434 bitmap_clear (lra_constraint_insn_stack_bitmap);
2435 lra_live_ranges_init ();
2436 lra_constraints_init ();
2437 lra_curr_reload_num = 0;
2438 push_insns (from: get_last_insn (), NULL);
2439 /* It is needed for the 1st coalescing. */
2440 bitmap_initialize (head: &lra_inheritance_pseudos, obstack: &reg_obstack);
2441 bitmap_initialize (head: &lra_split_regs, obstack: &reg_obstack);
2442 bitmap_initialize (head: &lra_optional_reload_pseudos, obstack: &reg_obstack);
2443 bitmap_initialize (head: &lra_subreg_reload_pseudos, obstack: &reg_obstack);
2444 live_p = false;
2445 if (maybe_ne (a: get_frame_size (), b: 0) && crtl->stack_alignment_needed)
2446 /* If we have a stack frame, we must align it now. The stack size
2447 may be a part of the offset computation for register
2448 elimination. */
2449 assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed);
2450 lra_init_equiv ();
2451 for (;;)
2452 {
2453 for (;;)
2454 {
2455 bool reloads_p = lra_constraints (lra_constraint_iter == 0);
2456 /* Constraint transformations may result in that eliminable
2457 hard regs become uneliminable and pseudos which use them
2458 should be spilled. It is better to do it before pseudo
2459 assignments.
2460
2461 For example, rs6000 can make
2462 RS6000_PIC_OFFSET_TABLE_REGNUM uneliminable if we started
2463 to use a constant pool. */
2464 lra_eliminate (false, false);
2465 /* We should try to assign hard registers to scratches even
2466 if there were no RTL transformations in lra_constraints.
2467 Also we should check IRA assignments on the first
2468 iteration as they can be wrong because of early clobbers
2469 operands which are ignored in IRA. */
2470 if (! reloads_p && lra_constraint_iter > 1)
2471 {
2472 /* Stack is not empty here only when there are changes
2473 during the elimination sub-pass. */
2474 if (bitmap_empty_p (lra_constraint_insn_stack_bitmap))
2475 break;
2476 else
2477 /* If there are no reloads but changing due
2478 elimination, restart the constraint sub-pass
2479 first. */
2480 continue;
2481 }
2482 /* Do inheritance only for regular algorithms. */
2483 if (! lra_simple_p)
2484 lra_inheritance ();
2485 if (live_p)
2486 lra_clear_live_ranges ();
2487 bool fails_p;
2488 lra_hard_reg_split_p = false;
2489 int split_fails_num = 0;
2490 do
2491 {
2492 /* We need live ranges for lra_assign -- so build them.
2493 But don't remove dead insns or change global live
2494 info as we can undo inheritance transformations after
2495 inheritance pseudo assigning. */
2496 lra_create_live_ranges (true, !lra_simple_p);
2497 live_p = true;
2498 /* If we don't spill non-reload and non-inheritance
2499 pseudos, there is no sense to run memory-memory move
2500 coalescing. If inheritance pseudos were spilled, the
2501 memory-memory moves involving them will be removed by
2502 pass undoing inheritance. */
2503 if (lra_simple_p || lra_hard_reg_split_p)
2504 lra_assign (fails_p);
2505 else
2506 {
2507 bool spill_p = !lra_assign (fails_p);
2508
2509 if (lra_undo_inheritance ())
2510 live_p = false;
2511 if (spill_p && ! fails_p)
2512 {
2513 if (! live_p)
2514 {
2515 lra_create_live_ranges (true, true);
2516 live_p = true;
2517 }
2518 if (lra_coalesce ())
2519 live_p = false;
2520 }
2521 if (! live_p)
2522 lra_clear_live_ranges ();
2523 }
2524 if (fails_p)
2525 {
2526 /* It is a very rare case. It is the last hope to
2527 split a hard regno live range for a reload
2528 pseudo. */
2529 if (live_p)
2530 lra_clear_live_ranges ();
2531 live_p = false;
2532 /* See a comment for LRA_MAX_FAILED_SPLITS definition. */
2533 bool last_failed_split_p
2534 = split_fails_num > LRA_MAX_FAILED_SPLITS;
2535 if (! lra_split_hard_reg_for (fail_p: last_failed_split_p))
2536 {
2537 if (last_failed_split_p)
2538 break;
2539 split_fails_num++;
2540 }
2541 lra_hard_reg_split_p = true;
2542 }
2543 }
2544 while (fails_p && !lra_asm_error_p);
2545 if (! live_p) {
2546 /* We need the correct reg notes for work of constraint sub-pass. */
2547 lra_create_live_ranges (true, true);
2548 live_p = true;
2549 }
2550 }
2551 /* Don't clear optional reloads bitmap until all constraints are
2552 satisfied as we need to differ them from regular reloads. */
2553 bitmap_clear (&lra_optional_reload_pseudos);
2554 bitmap_clear (&lra_subreg_reload_pseudos);
2555 bitmap_clear (&lra_inheritance_pseudos);
2556 bitmap_clear (&lra_split_regs);
2557 if (! live_p)
2558 {
2559 /* We need full live info for spilling pseudos into
2560 registers instead of memory. */
2561 lra_create_live_ranges (lra_reg_spill_p, true);
2562 live_p = true;
2563 }
2564 /* We should check necessity for spilling here as the above live
2565 range pass can remove spilled pseudos. */
2566 if (! lra_need_for_spills_p ())
2567 break;
2568 /* Now we know what pseudos should be spilled. Try to
2569 rematerialize them first. */
2570 if (lra_remat ())
2571 {
2572 /* We need full live info -- see the comment above. We also might
2573 need live info if we have a pseudo assigned to hard frame pointer
2574 reg and will need FP for usual purposes. */
2575 lra_create_live_ranges (lra_reg_spill_p || lra_fp_pseudo_p (),
2576 true);
2577 live_p = true;
2578 if (! lra_need_for_spills_p ())
2579 {
2580 if (lra_need_for_scratch_reg_p ())
2581 continue;
2582 break;
2583 }
2584 }
2585 lra_spill ();
2586 /* Assignment of stack slots changes elimination offsets for
2587 some eliminations. So update the offsets here. */
2588 lra_eliminate (false, false);
2589 lra_constraint_new_regno_start = max_reg_num ();
2590 if (lra_bad_spill_regno_start == INT_MAX
2591 && lra_inheritance_iter > LRA_MAX_INHERITANCE_PASSES
2592 && lra_rematerialization_iter > LRA_MAX_REMATERIALIZATION_PASSES)
2593 /* After switching off inheritance and rematerialization
2594 passes, avoid spilling reload pseudos will be created to
2595 prevent LRA cycling in some complicated cases. */
2596 lra_bad_spill_regno_start = lra_constraint_new_regno_start;
2597 lra_assignment_iter_after_spill = 0;
2598 }
2599 ira_restore_scratches (dump_file: lra_dump_file);
2600 lra_eliminate (true, false);
2601 lra_final_code_change ();
2602 lra_in_progress = false;
2603 if (live_p)
2604 lra_clear_live_ranges ();
2605 lra_live_ranges_finish ();
2606 lra_constraints_finish ();
2607 finish_reg_info ();
2608 sbitmap_free (map: lra_constraint_insn_stack_bitmap);
2609 lra_constraint_insn_stack.release ();
2610 finish_insn_recog_data ();
2611 regstat_free_n_sets_and_refs ();
2612 regstat_free_ri ();
2613 reload_completed = 1;
2614 update_inc_notes ();
2615
2616 inserted_p = fixup_abnormal_edges ();
2617
2618 /* Split basic blocks if we've possibly turned single trapping insn
2619 into multiple ones or otherwise the backend requested to do so. */
2620 if (cfun->can_throw_non_call_exceptions
2621 || cfun->split_basic_blocks_after_reload)
2622 {
2623 auto_sbitmap blocks (last_basic_block_for_fn (cfun));
2624 bitmap_ones (blocks);
2625 find_many_sub_basic_blocks (blocks);
2626 }
2627
2628 if (inserted_p)
2629 commit_edge_insertions ();
2630
2631 /* Subsequent passes expect that rtl is unshared, so unshare everything
2632 here. */
2633 unshare_all_rtl_again (get_insns ());
2634
2635 if (flag_checking)
2636 check_rtl (final_p: true);
2637
2638 timevar_pop (tv: TV_LRA);
2639}
2640
2641/* Called once per compiler to initialize LRA data once. */
2642void
2643lra_init_once (void)
2644{
2645 init_insn_code_data_once ();
2646}
2647
2648/* Called once per compiler to finish LRA data which are initialize
2649 once. */
2650void
2651lra_finish_once (void)
2652{
2653 finish_insn_code_data_once ();
2654}
2655

source code of gcc/lra.cc