1/* LRA (local register allocator) driver and LRA utilities.
2 Copyright (C) 2010-2023 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21
22/* The Local Register Allocator (LRA) is a replacement of former
23 reload pass. It is focused to simplify code solving the reload
24 pass tasks, to make the code maintenance easier, and to implement new
25 perspective optimizations.
26
27 The major LRA design solutions are:
28 o division small manageable, separated sub-tasks
29 o reflection of all transformations and decisions in RTL as more
30 as possible
31 o insn constraints as a primary source of the info (minimizing
32 number of target-depended macros/hooks)
33
34 In brief LRA works by iterative insn process with the final goal is
35 to satisfy all insn and address constraints:
36 o New reload insns (in brief reloads) and reload pseudos might be
37 generated;
38 o Some pseudos might be spilled to assign hard registers to
39 new reload pseudos;
40 o Recalculating spilled pseudo values (rematerialization);
41 o Changing spilled pseudos to stack memory or their equivalences;
42 o Allocation stack memory changes the address displacement and
43 new iteration is needed.
44
45 Here is block diagram of LRA passes:
46
47 ------------------------
48 --------------- | Undo inheritance for | ---------------
49 | Memory-memory | | spilled pseudos, | | New (and old) |
50 | move coalesce |<---| splits for pseudos got |<-- | pseudos |
51 --------------- | the same hard regs, | | assignment |
52 Start | | and optional reloads | ---------------
53 | | ------------------------ ^
54 V | ---------------- |
55 ----------- V | Update virtual | |
56| Remove |----> ------------>| register | |
57| scratches | ^ | displacements | |
58 ----------- | ---------------- |
59 | | |
60 | V New |
61 | ------------ pseudos -------------------
62 | |Constraints:| or insns | Inheritance/split |
63 | | RTL |--------->| transformations |
64 | | transfor- | | in EBB scope |
65 | substi- | mations | -------------------
66 | tutions ------------
67 | | No change
68 ---------------- V
69 | Spilled pseudo | -------------------
70 | to memory |<----| Rematerialization |
71 | substitution | -------------------
72 ----------------
73 | No susbtitions
74 V
75 -------------------------
76 | Hard regs substitution, |
77 | devirtalization, and |------> Finish
78 | restoring scratches got |
79 | memory |
80 -------------------------
81
82 To speed up the process:
83 o We process only insns affected by changes on previous
84 iterations;
85 o We don't use DFA-infrastructure because it results in much slower
86 compiler speed than a special IR described below does;
87 o We use a special insn representation for quick access to insn
88 info which is always *synchronized* with the current RTL;
89 o Insn IR is minimized by memory. It is divided on three parts:
90 o one specific for each insn in RTL (only operand locations);
91 o one common for all insns in RTL with the same insn code
92 (different operand attributes from machine descriptions);
93 o one oriented for maintenance of live info (list of pseudos).
94 o Pseudo data:
95 o all insns where the pseudo is referenced;
96 o live info (conflicting hard regs, live ranges, # of
97 references etc);
98 o data used for assigning (preferred hard regs, costs etc).
99
100 This file contains LRA driver, LRA utility functions and data, and
101 code for dealing with scratches. */
102
103#include "config.h"
104#include "system.h"
105#include "coretypes.h"
106#include "backend.h"
107#include "target.h"
108#include "rtl.h"
109#include "rtl-error.h"
110#include "tree.h"
111#include "predict.h"
112#include "df.h"
113#include "memmodel.h"
114#include "tm_p.h"
115#include "optabs.h"
116#include "regs.h"
117#include "ira.h"
118#include "recog.h"
119#include "expr.h"
120#include "cfgrtl.h"
121#include "cfgbuild.h"
122#include "lra.h"
123#include "lra-int.h"
124#include "print-rtl.h"
125#include "function-abi.h"
126
127/* Dump bitmap SET with TITLE and BB INDEX. */
128void
129lra_dump_bitmap_with_title (const char *title, bitmap set, int index)
130{
131 unsigned int i;
132 int count;
133 bitmap_iterator bi;
134 static const int max_nums_on_line = 10;
135
136 if (bitmap_empty_p (map: set))
137 return;
138 fprintf (stream: lra_dump_file, format: " %s %d:", title, index);
139 fprintf (stream: lra_dump_file, format: "\n");
140 count = max_nums_on_line + 1;
141 EXECUTE_IF_SET_IN_BITMAP (set, 0, i, bi)
142 {
143 if (count > max_nums_on_line)
144 {
145 fprintf (stream: lra_dump_file, format: "\n ");
146 count = 0;
147 }
148 fprintf (stream: lra_dump_file, format: " %4u", i);
149 count++;
150 }
151 fprintf (stream: lra_dump_file, format: "\n");
152}
153
154/* Hard registers currently not available for allocation. It can
155 changed after some hard registers become not eliminable. */
156HARD_REG_SET lra_no_alloc_regs;
157
158static int get_new_reg_value (void);
159static void expand_reg_info (void);
160static void invalidate_insn_recog_data (int);
161static int get_insn_freq (rtx_insn *);
162static void invalidate_insn_data_regno_info (lra_insn_recog_data_t,
163 rtx_insn *, int);
164/* Expand all regno related info needed for LRA. */
165static void
166expand_reg_data (int old)
167{
168 resize_reg_info ();
169 expand_reg_info ();
170 ira_expand_reg_equiv ();
171 for (int i = (int) max_reg_num () - 1; i >= old; i--)
172 lra_change_class (regno: i, new_class: ALL_REGS, title: " Set", nl_p: true);
173}
174
175/* Create and return a new reg of ORIGINAL mode. If ORIGINAL is NULL
176 or of VOIDmode, use MD_MODE for the new reg. Initialize its
177 register class to RCLASS. Print message about assigning class
178 RCLASS containing new register name TITLE unless it is NULL. Use
179 attributes of ORIGINAL if it is a register. The created register
180 will have unique held value. */
181rtx
182lra_create_new_reg_with_unique_value (machine_mode md_mode, rtx original,
183 enum reg_class rclass,
184 HARD_REG_SET *exclude_start_hard_regs,
185 const char *title)
186{
187 machine_mode mode;
188 rtx new_reg;
189
190 if (original == NULL_RTX || (mode = GET_MODE (original)) == VOIDmode)
191 mode = md_mode;
192 lra_assert (mode != VOIDmode);
193 new_reg = gen_reg_rtx (mode);
194 if (original == NULL_RTX || ! REG_P (original))
195 {
196 if (lra_dump_file != NULL)
197 fprintf (stream: lra_dump_file, format: " Creating newreg=%i", REGNO (new_reg));
198 }
199 else
200 {
201 if (ORIGINAL_REGNO (original) >= FIRST_PSEUDO_REGISTER)
202 ORIGINAL_REGNO (new_reg) = ORIGINAL_REGNO (original);
203 REG_USERVAR_P (new_reg) = REG_USERVAR_P (original);
204 REG_POINTER (new_reg) = REG_POINTER (original);
205 REG_ATTRS (new_reg) = REG_ATTRS (original);
206 if (lra_dump_file != NULL)
207 fprintf (stream: lra_dump_file, format: " Creating newreg=%i from oldreg=%i",
208 REGNO (new_reg), REGNO (original));
209 }
210 if (lra_dump_file != NULL)
211 {
212 if (title != NULL)
213 fprintf (stream: lra_dump_file, format: ", assigning class %s to%s%s r%d",
214 reg_class_names[rclass], *title == '\0' ? "" : " ",
215 title, REGNO (new_reg));
216 fprintf (stream: lra_dump_file, format: "\n");
217 }
218 expand_reg_data (old: max_reg_num ());
219 setup_reg_classes (REGNO (new_reg), rclass, NO_REGS, rclass);
220 if (exclude_start_hard_regs != NULL)
221 lra_reg_info[REGNO (new_reg)].exclude_start_hard_regs
222 = *exclude_start_hard_regs;
223 return new_reg;
224}
225
226/* Analogous to the previous function but also inherits value of
227 ORIGINAL. */
228rtx
229lra_create_new_reg (machine_mode md_mode, rtx original, enum reg_class rclass,
230 HARD_REG_SET *exclude_start_hard_regs, const char *title)
231{
232 rtx new_reg;
233
234 new_reg
235 = lra_create_new_reg_with_unique_value (md_mode, original, rclass,
236 exclude_start_hard_regs, title);
237 if (original != NULL_RTX && REG_P (original))
238 lra_assign_reg_val (REGNO (original), REGNO (new_reg));
239 return new_reg;
240}
241
242/* Set up for REGNO unique hold value. */
243void
244lra_set_regno_unique_value (int regno)
245{
246 lra_reg_info[regno].val = get_new_reg_value ();
247}
248
249/* Invalidate INSN related info used by LRA. The info should never be
250 used after that. */
251void
252lra_invalidate_insn_data (rtx_insn *insn)
253{
254 lra_invalidate_insn_regno_info (insn);
255 invalidate_insn_recog_data (INSN_UID (insn));
256}
257
258/* Mark INSN deleted and invalidate the insn related info used by
259 LRA. */
260void
261lra_set_insn_deleted (rtx_insn *insn)
262{
263 lra_invalidate_insn_data (insn);
264 SET_INSN_DELETED (insn);
265}
266
267/* Delete an unneeded INSN and any previous insns who sole purpose is
268 loading data that is dead in INSN. */
269void
270lra_delete_dead_insn (rtx_insn *insn)
271{
272 rtx_insn *prev = prev_real_insn (insn);
273 rtx prev_dest;
274
275 /* If the previous insn sets a register that dies in our insn,
276 delete it too. */
277 if (prev && GET_CODE (PATTERN (prev)) == SET
278 && (prev_dest = SET_DEST (PATTERN (prev)), REG_P (prev_dest))
279 && reg_mentioned_p (prev_dest, PATTERN (insn))
280 && find_regno_note (insn, REG_DEAD, REGNO (prev_dest))
281 && ! side_effects_p (SET_SRC (PATTERN (prev))))
282 lra_delete_dead_insn (insn: prev);
283
284 lra_set_insn_deleted (insn);
285}
286
287/* Emit insn x = y + z. Return NULL if we failed to do it.
288 Otherwise, return the insn. We don't use gen_add3_insn as it might
289 clobber CC. */
290static rtx_insn *
291emit_add3_insn (rtx x, rtx y, rtx z)
292{
293 rtx_insn *last;
294
295 last = get_last_insn ();
296
297 if (have_addptr3_insn (x, y, z))
298 {
299 rtx_insn *insn = gen_addptr3_insn (x, y, z);
300
301 /* If the target provides an "addptr" pattern it hopefully does
302 for a reason. So falling back to the normal add would be
303 a bug. */
304 lra_assert (insn != NULL_RTX);
305 emit_insn (insn);
306 return insn;
307 }
308
309 rtx_insn *insn = emit_insn (gen_rtx_SET (x, gen_rtx_PLUS (GET_MODE (y),
310 y, z)));
311 if (recog_memoized (insn) < 0)
312 {
313 delete_insns_since (last);
314 insn = NULL;
315 }
316 return insn;
317}
318
319/* Emit insn x = x + y. Return the insn. We use gen_add2_insn as the
320 last resort. */
321static rtx_insn *
322emit_add2_insn (rtx x, rtx y)
323{
324 rtx_insn *insn = emit_add3_insn (x, y: x, z: y);
325 if (insn == NULL_RTX)
326 {
327 insn = gen_add2_insn (x, y);
328 if (insn != NULL_RTX)
329 emit_insn (insn);
330 }
331 return insn;
332}
333
334/* Target checks operands through operand predicates to recognize an
335 insn. We should have a special precaution to generate add insns
336 which are frequent results of elimination.
337
338 Emit insns for x = y + z. X can be used to store intermediate
339 values and should be not in Y and Z when we use X to store an
340 intermediate value. Y + Z should form [base] [+ index[ * scale]] [
341 + disp] where base and index are registers, disp and scale are
342 constants. Y should contain base if it is present, Z should
343 contain disp if any. index[*scale] can be part of Y or Z. */
344void
345lra_emit_add (rtx x, rtx y, rtx z)
346{
347 int old;
348 rtx_insn *last;
349 rtx a1, a2, base, index, disp, scale, index_scale;
350 bool ok_p;
351
352 rtx_insn *add3_insn = emit_add3_insn (x, y, z);
353 old = max_reg_num ();
354 if (add3_insn != NULL)
355 ;
356 else
357 {
358 disp = a2 = NULL_RTX;
359 if (GET_CODE (y) == PLUS)
360 {
361 a1 = XEXP (y, 0);
362 a2 = XEXP (y, 1);
363 disp = z;
364 }
365 else
366 {
367 a1 = y;
368 if (CONSTANT_P (z))
369 disp = z;
370 else
371 a2 = z;
372 }
373 index_scale = scale = NULL_RTX;
374 if (GET_CODE (a1) == MULT)
375 {
376 index_scale = a1;
377 index = XEXP (a1, 0);
378 scale = XEXP (a1, 1);
379 base = a2;
380 }
381 else if (a2 != NULL_RTX && GET_CODE (a2) == MULT)
382 {
383 index_scale = a2;
384 index = XEXP (a2, 0);
385 scale = XEXP (a2, 1);
386 base = a1;
387 }
388 else
389 {
390 base = a1;
391 index = a2;
392 }
393 if ((base != NULL_RTX && ! (REG_P (base) || GET_CODE (base) == SUBREG))
394 || (index != NULL_RTX
395 && ! (REG_P (index) || GET_CODE (index) == SUBREG))
396 || (disp != NULL_RTX && ! CONSTANT_P (disp))
397 || (scale != NULL_RTX && ! CONSTANT_P (scale)))
398 {
399 /* Probably we have no 3 op add. Last chance is to use 2-op
400 add insn. To succeed, don't move Z to X as an address
401 segment always comes in Y. Otherwise, we might fail when
402 adding the address segment to register. */
403 lra_assert (x != y && x != z);
404 emit_move_insn (x, y);
405 rtx_insn *insn = emit_add2_insn (x, y: z);
406 lra_assert (insn != NULL_RTX);
407 }
408 else
409 {
410 if (index_scale == NULL_RTX)
411 index_scale = index;
412 if (disp == NULL_RTX)
413 {
414 /* Generate x = index_scale; x = x + base. */
415 lra_assert (index_scale != NULL_RTX && base != NULL_RTX);
416 emit_move_insn (x, index_scale);
417 rtx_insn *insn = emit_add2_insn (x, y: base);
418 lra_assert (insn != NULL_RTX);
419 }
420 else if (scale == NULL_RTX)
421 {
422 /* Try x = base + disp. */
423 lra_assert (base != NULL_RTX);
424 last = get_last_insn ();
425 rtx_insn *move_insn =
426 emit_move_insn (x, gen_rtx_PLUS (GET_MODE (base), base, disp));
427 if (recog_memoized (insn: move_insn) < 0)
428 {
429 delete_insns_since (last);
430 /* Generate x = disp; x = x + base. */
431 emit_move_insn (x, disp);
432 rtx_insn *add2_insn = emit_add2_insn (x, y: base);
433 lra_assert (add2_insn != NULL_RTX);
434 }
435 /* Generate x = x + index. */
436 if (index != NULL_RTX)
437 {
438 rtx_insn *insn = emit_add2_insn (x, y: index);
439 lra_assert (insn != NULL_RTX);
440 }
441 }
442 else
443 {
444 /* Try x = index_scale; x = x + disp; x = x + base. */
445 last = get_last_insn ();
446 rtx_insn *move_insn = emit_move_insn (x, index_scale);
447 ok_p = false;
448 if (recog_memoized (insn: move_insn) >= 0)
449 {
450 rtx_insn *insn = emit_add2_insn (x, y: disp);
451 if (insn != NULL_RTX)
452 {
453 if (base == NULL_RTX)
454 ok_p = true;
455 else
456 {
457 insn = emit_add2_insn (x, y: base);
458 if (insn != NULL_RTX)
459 ok_p = true;
460 }
461 }
462 }
463 if (! ok_p)
464 {
465 rtx_insn *insn;
466
467 delete_insns_since (last);
468 /* Generate x = disp; x = x + base; x = x + index_scale. */
469 emit_move_insn (x, disp);
470 if (base != NULL_RTX)
471 {
472 insn = emit_add2_insn (x, y: base);
473 lra_assert (insn != NULL_RTX);
474 }
475 insn = emit_add2_insn (x, y: index_scale);
476 lra_assert (insn != NULL_RTX);
477 }
478 }
479 }
480 }
481 /* Functions emit_... can create pseudos -- so expand the pseudo
482 data. */
483 if (old != max_reg_num ())
484 expand_reg_data (old);
485}
486
487/* The number of emitted reload insns so far. */
488int lra_curr_reload_num;
489
490static void remove_insn_scratches (rtx_insn *insn);
491
492/* Emit x := y, processing special case when y = u + v or y = u + v *
493 scale + w through emit_add (Y can be an address which is base +
494 index reg * scale + displacement in general case). X may be used
495 as intermediate result therefore it should be not in Y. */
496void
497lra_emit_move (rtx x, rtx y)
498{
499 int old;
500 rtx_insn *insn;
501
502 if (GET_CODE (y) != PLUS)
503 {
504 if (rtx_equal_p (x, y))
505 return;
506 old = max_reg_num ();
507
508 insn = (GET_CODE (x) != STRICT_LOW_PART
509 ? emit_move_insn (x, y) : emit_insn (gen_rtx_SET (x, y)));
510 /* The move pattern may require scratch registers, so convert them
511 into real registers now. */
512 if (insn != NULL_RTX)
513 remove_insn_scratches (insn);
514 if (REG_P (x))
515 lra_reg_info[ORIGINAL_REGNO (x)].last_reload = ++lra_curr_reload_num;
516 /* Function emit_move can create pseudos -- so expand the pseudo
517 data. */
518 if (old != max_reg_num ())
519 expand_reg_data (old);
520 return;
521 }
522 lra_emit_add (x, XEXP (y, 0), XEXP (y, 1));
523}
524
525/* Update insn operands which are duplication of operands whose
526 numbers are in array of NOPS (with end marker -1). The insn is
527 represented by its LRA internal representation ID. */
528void
529lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
530{
531 int i, j, nop;
532 struct lra_static_insn_data *static_id = id->insn_static_data;
533
534 for (i = 0; i < static_id->n_dups; i++)
535 for (j = 0; (nop = nops[j]) >= 0; j++)
536 if (static_id->dup_num[i] == nop)
537 *id->dup_loc[i] = *id->operand_loc[nop];
538}
539
540/* Report asm insn error and modify the asm insn. */
541void
542lra_asm_insn_error (rtx_insn *insn)
543{
544 lra_asm_error_p = true;
545 error_for_asm (insn,
546 "%<asm%> operand has impossible constraints"
547 " or there are not enough registers");
548 /* Avoid further trouble with this insn. */
549 if (JUMP_P (insn))
550 {
551 ira_nullify_asm_goto (insn);
552 lra_update_insn_regno_info (insn);
553 }
554 else
555 {
556 PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx);
557 lra_set_insn_deleted (insn);
558 }
559}
560
561
562
563/* This page contains code dealing with info about registers in the
564 insns. */
565
566/* Pools for insn reg info. */
567object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs");
568
569/* Create LRA insn related info about a reference to REGNO in INSN
570 with TYPE (in/out/inout), biggest reference mode MODE, flag that it
571 is reference through subreg (SUBREG_P), and reference to the next
572 insn reg info (NEXT). If REGNO can be early clobbered,
573 alternatives in which it can be early clobbered are given by
574 EARLY_CLOBBER_ALTS. */
575static struct lra_insn_reg *
576new_insn_reg (rtx_insn *insn, int regno, enum op_type type,
577 machine_mode mode, bool subreg_p,
578 alternative_mask early_clobber_alts,
579 struct lra_insn_reg *next)
580{
581 lra_insn_reg *ir = lra_insn_reg_pool.allocate ();
582 ir->type = type;
583 ir->biggest_mode = mode;
584 if (NONDEBUG_INSN_P (insn)
585 && partial_subreg_p (outermode: lra_reg_info[regno].biggest_mode, innermode: mode))
586 lra_reg_info[regno].biggest_mode = mode;
587 ir->subreg_p = subreg_p;
588 ir->early_clobber_alts = early_clobber_alts;
589 ir->regno = regno;
590 ir->next = next;
591 return ir;
592}
593
594/* Free insn reg info list IR. */
595static void
596free_insn_regs (struct lra_insn_reg *ir)
597{
598 struct lra_insn_reg *next_ir;
599
600 for (; ir != NULL; ir = next_ir)
601 {
602 next_ir = ir->next;
603 lra_insn_reg_pool.remove (object: ir);
604 }
605}
606
607/* Finish pool for insn reg info. */
608static void
609finish_insn_regs (void)
610{
611 lra_insn_reg_pool.release ();
612}
613
614
615
616/* This page contains code dealing LRA insn info (or in other words
617 LRA internal insn representation). */
618
619/* Map INSN_CODE -> the static insn data. This info is valid during
620 all translation unit. */
621struct lra_static_insn_data *insn_code_data[NUM_INSN_CODES];
622
623/* Debug insns are represented as a special insn with one input
624 operand which is RTL expression in var_location. */
625
626/* The following data are used as static insn operand data for all
627 debug insns. If structure lra_operand_data is changed, the
628 initializer should be changed too. */
629static struct lra_operand_data debug_operand_data =
630 {
631 NULL, /* alternative */
632 .early_clobber_alts: 0, /* early_clobber_alts */
633 .mode: E_VOIDmode, /* We are not interesting in the operand mode. */
634 .type: OP_IN,
635 .strict_low: 0, .is_operator: 0, .is_address: 0
636 };
637
638/* The following data are used as static insn data for all debug
639 bind insns. If structure lra_static_insn_data is changed, the
640 initializer should be changed too. */
641static struct lra_static_insn_data debug_bind_static_data =
642 {
643 .operand: &debug_operand_data,
644 .dup_num: 0, /* Duplication operands #. */
645 .commutative: -1, /* Commutative operand #. */
646 .n_operands: 1, /* Operands #. There is only one operand which is debug RTL
647 expression. */
648 .n_dups: 0, /* Duplications #. */
649 .n_alternatives: 0, /* Alternatives #. We are not interesting in alternatives
650 because we does not proceed debug_insns for reloads. */
651 NULL, /* Hard registers referenced in machine description. */
652 NULL /* Descriptions of operands in alternatives. */
653 };
654
655/* The following data are used as static insn data for all debug
656 marker insns. If structure lra_static_insn_data is changed, the
657 initializer should be changed too. */
658static struct lra_static_insn_data debug_marker_static_data =
659 {
660 .operand: &debug_operand_data,
661 .dup_num: 0, /* Duplication operands #. */
662 .commutative: -1, /* Commutative operand #. */
663 .n_operands: 0, /* Operands #. There isn't any operand. */
664 .n_dups: 0, /* Duplications #. */
665 .n_alternatives: 0, /* Alternatives #. We are not interesting in alternatives
666 because we does not proceed debug_insns for reloads. */
667 NULL, /* Hard registers referenced in machine description. */
668 NULL /* Descriptions of operands in alternatives. */
669 };
670
671/* Called once per compiler work to initialize some LRA data related
672 to insns. */
673static void
674init_insn_code_data_once (void)
675{
676 memset (s: insn_code_data, c: 0, n: sizeof (insn_code_data));
677}
678
679/* Called once per compiler work to finalize some LRA data related to
680 insns. */
681static void
682finish_insn_code_data_once (void)
683{
684 for (unsigned int i = 0; i < NUM_INSN_CODES; i++)
685 {
686 if (insn_code_data[i] != NULL)
687 {
688 free (ptr: insn_code_data[i]);
689 insn_code_data[i] = NULL;
690 }
691 }
692}
693
694/* Return static insn data, allocate and setup if necessary. Although
695 dup_num is static data (it depends only on icode), to set it up we
696 need to extract insn first. So recog_data should be valid for
697 normal insn (ICODE >= 0) before the call. */
698static struct lra_static_insn_data *
699get_static_insn_data (int icode, int nop, int ndup, int nalt)
700{
701 struct lra_static_insn_data *data;
702 size_t n_bytes;
703
704 lra_assert (icode < (int) NUM_INSN_CODES);
705 if (icode >= 0 && (data = insn_code_data[icode]) != NULL)
706 return data;
707 lra_assert (nop >= 0 && ndup >= 0 && nalt >= 0);
708 n_bytes = sizeof (struct lra_static_insn_data)
709 + sizeof (struct lra_operand_data) * nop
710 + sizeof (int) * ndup;
711 data = XNEWVAR (struct lra_static_insn_data, n_bytes);
712 data->operand_alternative = NULL;
713 data->n_operands = nop;
714 data->n_dups = ndup;
715 data->n_alternatives = nalt;
716 data->operand = ((struct lra_operand_data *)
717 ((char *) data + sizeof (struct lra_static_insn_data)));
718 data->dup_num = ((int *) ((char *) data->operand
719 + sizeof (struct lra_operand_data) * nop));
720 if (icode >= 0)
721 {
722 int i;
723
724 insn_code_data[icode] = data;
725 for (i = 0; i < nop; i++)
726 {
727 data->operand[i].constraint
728 = insn_data[icode].operand[i].constraint;
729 data->operand[i].mode = insn_data[icode].operand[i].mode;
730 data->operand[i].strict_low = insn_data[icode].operand[i].strict_low;
731 data->operand[i].is_operator
732 = insn_data[icode].operand[i].is_operator;
733 data->operand[i].type
734 = (data->operand[i].constraint[0] == '=' ? OP_OUT
735 : data->operand[i].constraint[0] == '+' ? OP_INOUT
736 : OP_IN);
737 data->operand[i].is_address = false;
738 }
739 for (i = 0; i < ndup; i++)
740 data->dup_num[i] = recog_data.dup_num[i];
741 }
742 return data;
743}
744
745/* The current length of the following array. */
746int lra_insn_recog_data_len;
747
748/* Map INSN_UID -> the insn recog data (NULL if unknown). */
749lra_insn_recog_data_t *lra_insn_recog_data;
750
751/* Alloc pool we allocate entries for lra_insn_recog_data from. */
752static object_allocator<class lra_insn_recog_data>
753 lra_insn_recog_data_pool ("insn recog data pool");
754
755/* Initialize LRA data about insns. */
756static void
757init_insn_recog_data (void)
758{
759 lra_insn_recog_data_len = 0;
760 lra_insn_recog_data = NULL;
761}
762
763/* Expand, if necessary, LRA data about insns. */
764static void
765check_and_expand_insn_recog_data (int index)
766{
767 int i, old;
768
769 if (lra_insn_recog_data_len > index)
770 return;
771 old = lra_insn_recog_data_len;
772 lra_insn_recog_data_len = index * 3 / 2 + 1;
773 lra_insn_recog_data = XRESIZEVEC (lra_insn_recog_data_t,
774 lra_insn_recog_data,
775 lra_insn_recog_data_len);
776 for (i = old; i < lra_insn_recog_data_len; i++)
777 lra_insn_recog_data[i] = NULL;
778}
779
780/* Finish LRA DATA about insn. */
781static void
782free_insn_recog_data (lra_insn_recog_data_t data)
783{
784 if (data->operand_loc != NULL)
785 free (ptr: data->operand_loc);
786 if (data->dup_loc != NULL)
787 free (ptr: data->dup_loc);
788 if (data->arg_hard_regs != NULL)
789 free (ptr: data->arg_hard_regs);
790 if (data->icode < 0 && NONDEBUG_INSN_P (data->insn))
791 {
792 if (data->insn_static_data->operand_alternative != NULL)
793 free (ptr: const_cast <operand_alternative *>
794 (data->insn_static_data->operand_alternative));
795 free_insn_regs (ir: data->insn_static_data->hard_regs);
796 free (ptr: data->insn_static_data);
797 }
798 free_insn_regs (ir: data->regs);
799 data->regs = NULL;
800 lra_insn_recog_data_pool.remove (object: data);
801}
802
803/* Pools for copies. */
804static object_allocator<lra_copy> lra_copy_pool ("lra copies");
805
806/* Finish LRA data about all insns. */
807static void
808finish_insn_recog_data (void)
809{
810 int i;
811 lra_insn_recog_data_t data;
812
813 for (i = 0; i < lra_insn_recog_data_len; i++)
814 if ((data = lra_insn_recog_data[i]) != NULL)
815 free_insn_recog_data (data);
816 finish_insn_regs ();
817 lra_copy_pool.release ();
818 lra_insn_reg_pool.release ();
819 lra_insn_recog_data_pool.release ();
820 free (ptr: lra_insn_recog_data);
821}
822
823/* Setup info about operands in alternatives of LRA DATA of insn. */
824static void
825setup_operand_alternative (lra_insn_recog_data_t data,
826 const operand_alternative *op_alt)
827{
828 int i, j, nop, nalt;
829 int icode = data->icode;
830 struct lra_static_insn_data *static_data = data->insn_static_data;
831
832 static_data->commutative = -1;
833 nop = static_data->n_operands;
834 nalt = static_data->n_alternatives;
835 static_data->operand_alternative = op_alt;
836 for (i = 0; i < nop; i++)
837 {
838 static_data->operand[i].early_clobber_alts = 0;
839 static_data->operand[i].is_address = false;
840 if (static_data->operand[i].constraint[0] == '%')
841 {
842 /* We currently only support one commutative pair of operands. */
843 if (static_data->commutative < 0)
844 static_data->commutative = i;
845 else
846 lra_assert (icode < 0); /* Asm */
847 /* The last operand should not be marked commutative. */
848 lra_assert (i != nop - 1);
849 }
850 }
851 for (j = 0; j < nalt; j++)
852 for (i = 0; i < nop; i++, op_alt++)
853 {
854 if (op_alt->earlyclobber)
855 static_data->operand[i].early_clobber_alts |= (alternative_mask) 1 << j;
856 static_data->operand[i].is_address |= op_alt->is_address;
857 }
858}
859
860/* Recursively process X and collect info about registers, which are
861 not the insn operands, in X with TYPE (in/out/inout) and flag that
862 it is early clobbered in the insn (EARLY_CLOBBER) and add the info
863 to LIST. X is a part of insn given by DATA. Return the result
864 list. */
865static struct lra_insn_reg *
866collect_non_operand_hard_regs (rtx_insn *insn, rtx *x,
867 lra_insn_recog_data_t data,
868 struct lra_insn_reg *list,
869 enum op_type type, bool early_clobber)
870{
871 int i, j, regno, last;
872 bool subreg_p;
873 machine_mode mode;
874 struct lra_insn_reg *curr;
875 rtx op = *x;
876 enum rtx_code code = GET_CODE (op);
877 const char *fmt = GET_RTX_FORMAT (code);
878
879 for (i = 0; i < data->insn_static_data->n_operands; i++)
880 if (! data->insn_static_data->operand[i].is_operator
881 && x == data->operand_loc[i])
882 /* It is an operand loc. Stop here. */
883 return list;
884 for (i = 0; i < data->insn_static_data->n_dups; i++)
885 if (x == data->dup_loc[i])
886 /* It is a dup loc. Stop here. */
887 return list;
888 mode = GET_MODE (op);
889 subreg_p = false;
890 if (code == SUBREG)
891 {
892 mode = wider_subreg_mode (x: op);
893 if (read_modify_subreg_p (op))
894 subreg_p = true;
895 op = SUBREG_REG (op);
896 code = GET_CODE (op);
897 }
898 if (REG_P (op))
899 {
900 if ((regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER)
901 return list;
902 /* Process all regs even unallocatable ones as we need info
903 about all regs for rematerialization pass. */
904 for (last = end_hard_regno (mode, regno); regno < last; regno++)
905 {
906 for (curr = list; curr != NULL; curr = curr->next)
907 if (curr->regno == regno && curr->subreg_p == subreg_p
908 && curr->biggest_mode == mode)
909 {
910 if (curr->type != type)
911 curr->type = OP_INOUT;
912 if (early_clobber)
913 curr->early_clobber_alts = ALL_ALTERNATIVES;
914 break;
915 }
916 if (curr == NULL)
917 {
918 /* This is a new hard regno or the info cannot be
919 integrated into the found structure. */
920#ifdef STACK_REGS
921 early_clobber
922 = (early_clobber
923 /* This clobber is to inform popping floating
924 point stack only. */
925 && ! (FIRST_STACK_REG <= regno
926 && regno <= LAST_STACK_REG));
927#endif
928 list = new_insn_reg (insn: data->insn, regno, type, mode, subreg_p,
929 early_clobber_alts: early_clobber ? ALL_ALTERNATIVES : 0, next: list);
930 }
931 }
932 return list;
933 }
934 switch (code)
935 {
936 case SET:
937 list = collect_non_operand_hard_regs (insn, x: &SET_DEST (op), data,
938 list, type: OP_OUT, early_clobber: false);
939 list = collect_non_operand_hard_regs (insn, x: &SET_SRC (op), data,
940 list, type: OP_IN, early_clobber: false);
941 break;
942 case CLOBBER:
943 /* We treat clobber of non-operand hard registers as early clobber. */
944 list = collect_non_operand_hard_regs (insn, x: &XEXP (op, 0), data,
945 list, type: OP_OUT, early_clobber: true);
946 break;
947 case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
948 list = collect_non_operand_hard_regs (insn, x: &XEXP (op, 0), data,
949 list, type: OP_INOUT, early_clobber: false);
950 break;
951 case PRE_MODIFY: case POST_MODIFY:
952 list = collect_non_operand_hard_regs (insn, x: &XEXP (op, 0), data,
953 list, type: OP_INOUT, early_clobber: false);
954 list = collect_non_operand_hard_regs (insn, x: &XEXP (op, 1), data,
955 list, type: OP_IN, early_clobber: false);
956 break;
957 default:
958 fmt = GET_RTX_FORMAT (code);
959 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
960 {
961 if (fmt[i] == 'e')
962 list = collect_non_operand_hard_regs (insn, x: &XEXP (op, i), data,
963 list, type: OP_IN, early_clobber: false);
964 else if (fmt[i] == 'E')
965 for (j = XVECLEN (op, i) - 1; j >= 0; j--)
966 list = collect_non_operand_hard_regs (insn, x: &XVECEXP (op, i, j),
967 data, list, type: OP_IN, early_clobber: false);
968 }
969 }
970 return list;
971}
972
973/* Set up and return info about INSN. Set up the info if it is not set up
974 yet. */
975lra_insn_recog_data_t
976lra_set_insn_recog_data (rtx_insn *insn)
977{
978 lra_insn_recog_data_t data;
979 int i, n, icode;
980 rtx **locs;
981 unsigned int uid = INSN_UID (insn);
982 struct lra_static_insn_data *insn_static_data;
983
984 check_and_expand_insn_recog_data (index: uid);
985 if (DEBUG_INSN_P (insn))
986 icode = -1;
987 else
988 {
989 icode = INSN_CODE (insn);
990 if (icode < 0)
991 /* It might be a new simple insn which is not recognized yet. */
992 INSN_CODE (insn) = icode = recog_memoized (insn);
993 }
994 data = lra_insn_recog_data_pool.allocate ();
995 lra_insn_recog_data[uid] = data;
996 data->insn = insn;
997 data->used_insn_alternative = LRA_UNKNOWN_ALT;
998 data->asm_reloads_num = 0;
999 data->icode = icode;
1000 data->regs = NULL;
1001 if (DEBUG_INSN_P (insn))
1002 {
1003 data->dup_loc = NULL;
1004 data->arg_hard_regs = NULL;
1005 data->preferred_alternatives = ALL_ALTERNATIVES;
1006 if (DEBUG_BIND_INSN_P (insn))
1007 {
1008 data->insn_static_data = &debug_bind_static_data;
1009 data->operand_loc = XNEWVEC (rtx *, 1);
1010 data->operand_loc[0] = &INSN_VAR_LOCATION_LOC (insn);
1011 }
1012 else if (DEBUG_MARKER_INSN_P (insn))
1013 {
1014 data->insn_static_data = &debug_marker_static_data;
1015 data->operand_loc = NULL;
1016 }
1017 return data;
1018 }
1019 if (icode < 0)
1020 {
1021 int nop, nalt;
1022 machine_mode operand_mode[MAX_RECOG_OPERANDS];
1023 const char *constraints[MAX_RECOG_OPERANDS];
1024
1025 nop = asm_noperands (PATTERN (insn));
1026 data->operand_loc = data->dup_loc = NULL;
1027 nalt = 1;
1028 if (nop < 0)
1029 {
1030 /* It is a special insn like USE or CLOBBER. We should
1031 recognize any regular insn otherwise LRA can do nothing
1032 with this insn. */
1033 gcc_assert (GET_CODE (PATTERN (insn)) == USE
1034 || GET_CODE (PATTERN (insn)) == CLOBBER
1035 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
1036 data->insn_static_data = insn_static_data
1037 = get_static_insn_data (icode: -1, nop: 0, ndup: 0, nalt);
1038 }
1039 else
1040 {
1041 /* expand_asm_operands makes sure there aren't too many
1042 operands. */
1043 lra_assert (nop <= MAX_RECOG_OPERANDS);
1044 if (nop != 0)
1045 data->operand_loc = XNEWVEC (rtx *, nop);
1046 /* Now get the operand values and constraints out of the
1047 insn. */
1048 decode_asm_operands (PATTERN (insn), NULL,
1049 data->operand_loc,
1050 constraints, operand_mode, NULL);
1051 if (nop > 0)
1052 for (const char *p =constraints[0]; *p; p++)
1053 nalt += *p == ',';
1054 data->insn_static_data = insn_static_data
1055 = get_static_insn_data (icode: -1, nop, ndup: 0, nalt);
1056 for (i = 0; i < nop; i++)
1057 {
1058 insn_static_data->operand[i].mode = operand_mode[i];
1059 insn_static_data->operand[i].constraint = constraints[i];
1060 insn_static_data->operand[i].strict_low = false;
1061 insn_static_data->operand[i].is_operator = false;
1062 insn_static_data->operand[i].is_address = false;
1063 }
1064 }
1065 for (i = 0; i < insn_static_data->n_operands; i++)
1066 insn_static_data->operand[i].type
1067 = (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT
1068 : insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT
1069 : OP_IN);
1070 data->preferred_alternatives = ALL_ALTERNATIVES;
1071 if (nop > 0)
1072 {
1073 operand_alternative *op_alt = XCNEWVEC (operand_alternative,
1074 nalt * nop);
1075 preprocess_constraints (nop, nalt, constraints, op_alt,
1076 data->operand_loc);
1077 setup_operand_alternative (data, op_alt);
1078 }
1079 }
1080 else
1081 {
1082 insn_extract (insn);
1083 data->insn_static_data = insn_static_data
1084 = get_static_insn_data (icode, nop: insn_data[icode].n_operands,
1085 ndup: insn_data[icode].n_dups,
1086 nalt: insn_data[icode].n_alternatives);
1087 n = insn_static_data->n_operands;
1088 if (n == 0)
1089 locs = NULL;
1090 else
1091 {
1092 locs = XNEWVEC (rtx *, n);
1093 memcpy (dest: locs, src: recog_data.operand_loc, n: n * sizeof (rtx *));
1094 }
1095 data->operand_loc = locs;
1096 n = insn_static_data->n_dups;
1097 if (n == 0)
1098 locs = NULL;
1099 else
1100 {
1101 locs = XNEWVEC (rtx *, n);
1102 memcpy (dest: locs, src: recog_data.dup_loc, n: n * sizeof (rtx *));
1103 }
1104 data->dup_loc = locs;
1105 data->preferred_alternatives = get_preferred_alternatives (insn);
1106 const operand_alternative *op_alt = preprocess_insn_constraints (icode);
1107 if (!insn_static_data->operand_alternative)
1108 setup_operand_alternative (data, op_alt);
1109 else if (op_alt != insn_static_data->operand_alternative)
1110 insn_static_data->operand_alternative = op_alt;
1111 }
1112 if (GET_CODE (PATTERN (insn)) == CLOBBER || GET_CODE (PATTERN (insn)) == USE)
1113 insn_static_data->hard_regs = NULL;
1114 else
1115 insn_static_data->hard_regs
1116 = collect_non_operand_hard_regs (insn, x: &PATTERN (insn), data,
1117 NULL, type: OP_IN, early_clobber: false);
1118 data->arg_hard_regs = NULL;
1119 if (CALL_P (insn))
1120 {
1121 bool use_p;
1122 rtx link;
1123 int n_hard_regs, regno, arg_hard_regs[FIRST_PSEUDO_REGISTER];
1124
1125 n_hard_regs = 0;
1126 /* Finding implicit hard register usage. We believe it will be
1127 not changed whatever transformations are used. Call insns
1128 are such example. */
1129 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1130 link != NULL_RTX;
1131 link = XEXP (link, 1))
1132 if (((use_p = GET_CODE (XEXP (link, 0)) == USE)
1133 || GET_CODE (XEXP (link, 0)) == CLOBBER)
1134 && REG_P (XEXP (XEXP (link, 0), 0)))
1135 {
1136 regno = REGNO (XEXP (XEXP (link, 0), 0));
1137 lra_assert (regno < FIRST_PSEUDO_REGISTER);
1138 /* It is an argument register. */
1139 for (i = REG_NREGS (XEXP (XEXP (link, 0), 0)) - 1; i >= 0; i--)
1140 arg_hard_regs[n_hard_regs++]
1141 = regno + i + (use_p ? 0 : FIRST_PSEUDO_REGISTER);
1142 }
1143
1144 if (n_hard_regs != 0)
1145 {
1146 arg_hard_regs[n_hard_regs++] = -1;
1147 data->arg_hard_regs = XNEWVEC (int, n_hard_regs);
1148 memcpy (dest: data->arg_hard_regs, src: arg_hard_regs,
1149 n: sizeof (int) * n_hard_regs);
1150 }
1151 }
1152 /* Some output operand can be recognized only from the context not
1153 from the constraints which are empty in this case. Call insn may
1154 contain a hard register in set destination with empty constraint
1155 and extract_insn treats them as an input. */
1156 for (i = 0; i < insn_static_data->n_operands; i++)
1157 {
1158 int j;
1159 rtx pat, set;
1160 struct lra_operand_data *operand = &insn_static_data->operand[i];
1161
1162 /* ??? Should we treat 'X' the same way. It looks to me that
1163 'X' means anything and empty constraint means we do not
1164 care. */
1165 if (operand->type != OP_IN || *operand->constraint != '\0'
1166 || operand->is_operator)
1167 continue;
1168 pat = PATTERN (insn);
1169 if (GET_CODE (pat) == SET)
1170 {
1171 if (data->operand_loc[i] != &SET_DEST (pat))
1172 continue;
1173 }
1174 else if (GET_CODE (pat) == PARALLEL)
1175 {
1176 for (j = XVECLEN (pat, 0) - 1; j >= 0; j--)
1177 {
1178 set = XVECEXP (PATTERN (insn), 0, j);
1179 if (GET_CODE (set) == SET
1180 && &SET_DEST (set) == data->operand_loc[i])
1181 break;
1182 }
1183 if (j < 0)
1184 continue;
1185 }
1186 else
1187 continue;
1188 operand->type = OP_OUT;
1189 }
1190 return data;
1191}
1192
1193/* Return info about insn give by UID. The info should be already set
1194 up. */
1195static lra_insn_recog_data_t
1196get_insn_recog_data_by_uid (int uid)
1197{
1198 lra_insn_recog_data_t data;
1199
1200 data = lra_insn_recog_data[uid];
1201 lra_assert (data != NULL);
1202 return data;
1203}
1204
1205/* Invalidate all info about insn given by its UID. */
1206static void
1207invalidate_insn_recog_data (int uid)
1208{
1209 lra_insn_recog_data_t data;
1210
1211 data = lra_insn_recog_data[uid];
1212 lra_assert (data != NULL);
1213 free_insn_recog_data (data);
1214 lra_insn_recog_data[uid] = NULL;
1215}
1216
1217/* Update all the insn info about INSN. It is usually called when
1218 something in the insn was changed. Return the updated info. */
1219lra_insn_recog_data_t
1220lra_update_insn_recog_data (rtx_insn *insn)
1221{
1222 lra_insn_recog_data_t data;
1223 int n;
1224 unsigned int uid = INSN_UID (insn);
1225 struct lra_static_insn_data *insn_static_data;
1226 poly_int64 sp_offset = 0;
1227
1228 check_and_expand_insn_recog_data (index: uid);
1229 if ((data = lra_insn_recog_data[uid]) != NULL
1230 && data->icode != INSN_CODE (insn))
1231 {
1232 sp_offset = data->sp_offset;
1233 invalidate_insn_data_regno_info (data, insn, get_insn_freq (insn));
1234 invalidate_insn_recog_data (uid);
1235 data = NULL;
1236 }
1237 if (data == NULL)
1238 {
1239 data = lra_get_insn_recog_data (insn);
1240 /* Initiate or restore SP offset. */
1241 data->sp_offset = sp_offset;
1242 return data;
1243 }
1244 insn_static_data = data->insn_static_data;
1245 data->used_insn_alternative = LRA_UNKNOWN_ALT;
1246 if (DEBUG_INSN_P (insn))
1247 return data;
1248 if (data->icode < 0)
1249 {
1250 int nop;
1251 machine_mode operand_mode[MAX_RECOG_OPERANDS];
1252 const char *constraints[MAX_RECOG_OPERANDS];
1253
1254 nop = asm_noperands (PATTERN (insn));
1255 if (nop >= 0)
1256 {
1257 lra_assert (nop == data->insn_static_data->n_operands);
1258 /* Now get the operand values and constraints out of the
1259 insn. */
1260 decode_asm_operands (PATTERN (insn), NULL,
1261 data->operand_loc,
1262 constraints, operand_mode, NULL);
1263
1264 if (flag_checking)
1265 for (int i = 0; i < nop; i++)
1266 lra_assert
1267 (insn_static_data->operand[i].mode == operand_mode[i]
1268 && insn_static_data->operand[i].constraint == constraints[i]
1269 && ! insn_static_data->operand[i].is_operator);
1270 }
1271
1272 if (flag_checking)
1273 for (int i = 0; i < insn_static_data->n_operands; i++)
1274 lra_assert
1275 (insn_static_data->operand[i].type
1276 == (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT
1277 : insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT
1278 : OP_IN));
1279 }
1280 else
1281 {
1282 insn_extract (insn);
1283 n = insn_static_data->n_operands;
1284 if (n != 0)
1285 memcpy (dest: data->operand_loc, src: recog_data.operand_loc, n: n * sizeof (rtx *));
1286 n = insn_static_data->n_dups;
1287 if (n != 0)
1288 memcpy (dest: data->dup_loc, src: recog_data.dup_loc, n: n * sizeof (rtx *));
1289 lra_assert (check_bool_attrs (insn));
1290 }
1291 return data;
1292}
1293
1294/* Set up that INSN is using alternative ALT now. */
1295void
1296lra_set_used_insn_alternative (rtx_insn *insn, int alt)
1297{
1298 lra_insn_recog_data_t data;
1299
1300 data = lra_get_insn_recog_data (insn);
1301 data->used_insn_alternative = alt;
1302}
1303
1304/* Set up that insn with UID is using alternative ALT now. The insn
1305 info should be already set up. */
1306void
1307lra_set_used_insn_alternative_by_uid (int uid, int alt)
1308{
1309 lra_insn_recog_data_t data;
1310
1311 check_and_expand_insn_recog_data (index: uid);
1312 data = lra_insn_recog_data[uid];
1313 lra_assert (data != NULL);
1314 data->used_insn_alternative = alt;
1315}
1316
1317
1318
1319/* This page contains code dealing with common register info and
1320 pseudo copies. */
1321
1322/* The size of the following array. */
1323static int reg_info_size;
1324/* Common info about each register. */
1325class lra_reg *lra_reg_info;
1326
1327HARD_REG_SET hard_regs_spilled_into;
1328
1329/* Last register value. */
1330static int last_reg_value;
1331
1332/* Return new register value. */
1333static int
1334get_new_reg_value (void)
1335{
1336 return ++last_reg_value;
1337}
1338
1339/* Vec referring to pseudo copies. */
1340static vec<lra_copy_t> copy_vec;
1341
1342/* Initialize I-th element of lra_reg_info. */
1343static inline void
1344initialize_lra_reg_info_element (int i)
1345{
1346 bitmap_initialize (head: &lra_reg_info[i].insn_bitmap, obstack: &reg_obstack);
1347#ifdef STACK_REGS
1348 lra_reg_info[i].no_stack_p = false;
1349#endif
1350 CLEAR_HARD_REG_SET (set&: lra_reg_info[i].conflict_hard_regs);
1351 CLEAR_HARD_REG_SET (set&: lra_reg_info[i].exclude_start_hard_regs);
1352 lra_reg_info[i].preferred_hard_regno1 = -1;
1353 lra_reg_info[i].preferred_hard_regno2 = -1;
1354 lra_reg_info[i].preferred_hard_regno_profit1 = 0;
1355 lra_reg_info[i].preferred_hard_regno_profit2 = 0;
1356 lra_reg_info[i].biggest_mode = VOIDmode;
1357 lra_reg_info[i].live_ranges = NULL;
1358 lra_reg_info[i].nrefs = lra_reg_info[i].freq = 0;
1359 lra_reg_info[i].last_reload = 0;
1360 lra_reg_info[i].restore_rtx = NULL_RTX;
1361 lra_reg_info[i].val = get_new_reg_value ();
1362 lra_reg_info[i].offset = 0;
1363 lra_reg_info[i].copies = NULL;
1364}
1365
1366/* Initialize common reg info and copies. */
1367static void
1368init_reg_info (void)
1369{
1370 int i;
1371
1372 last_reg_value = 0;
1373 reg_info_size = max_reg_num () * 3 / 2 + 1;
1374 lra_reg_info = XNEWVEC (class lra_reg, reg_info_size);
1375 for (i = 0; i < reg_info_size; i++)
1376 initialize_lra_reg_info_element (i);
1377 copy_vec.truncate (size: 0);
1378 CLEAR_HARD_REG_SET (set&: hard_regs_spilled_into);
1379}
1380
1381
1382/* Finish common reg info and copies. */
1383static void
1384finish_reg_info (void)
1385{
1386 int i;
1387
1388 for (i = 0; i < reg_info_size; i++)
1389 bitmap_clear (&lra_reg_info[i].insn_bitmap);
1390 free (ptr: lra_reg_info);
1391 reg_info_size = 0;
1392}
1393
1394/* Expand common reg info if it is necessary. */
1395static void
1396expand_reg_info (void)
1397{
1398 int i, old = reg_info_size;
1399
1400 if (reg_info_size > max_reg_num ())
1401 return;
1402 reg_info_size = max_reg_num () * 3 / 2 + 1;
1403 lra_reg_info = XRESIZEVEC (class lra_reg, lra_reg_info, reg_info_size);
1404 for (i = old; i < reg_info_size; i++)
1405 initialize_lra_reg_info_element (i);
1406}
1407
1408/* Free all copies. */
1409void
1410lra_free_copies (void)
1411{
1412 lra_copy_t cp;
1413
1414 while (copy_vec.length () != 0)
1415 {
1416 cp = copy_vec.pop ();
1417 lra_reg_info[cp->regno1].copies = lra_reg_info[cp->regno2].copies = NULL;
1418 lra_copy_pool.remove (object: cp);
1419 }
1420}
1421
1422/* Create copy of two pseudos REGNO1 and REGNO2. The copy execution
1423 frequency is FREQ. */
1424void
1425lra_create_copy (int regno1, int regno2, int freq)
1426{
1427 bool regno1_dest_p;
1428 lra_copy_t cp;
1429
1430 lra_assert (regno1 != regno2);
1431 regno1_dest_p = true;
1432 if (regno1 > regno2)
1433 {
1434 std::swap (a&: regno1, b&: regno2);
1435 regno1_dest_p = false;
1436 }
1437 cp = lra_copy_pool.allocate ();
1438 copy_vec.safe_push (obj: cp);
1439 cp->regno1_dest_p = regno1_dest_p;
1440 cp->freq = freq;
1441 cp->regno1 = regno1;
1442 cp->regno2 = regno2;
1443 cp->regno1_next = lra_reg_info[regno1].copies;
1444 lra_reg_info[regno1].copies = cp;
1445 cp->regno2_next = lra_reg_info[regno2].copies;
1446 lra_reg_info[regno2].copies = cp;
1447 if (lra_dump_file != NULL)
1448 fprintf (stream: lra_dump_file, format: " Creating copy r%d%sr%d@%d\n",
1449 regno1, regno1_dest_p ? "<-" : "->", regno2, freq);
1450}
1451
1452/* Return N-th (0, 1, ...) copy. If there is no copy, return
1453 NULL. */
1454lra_copy_t
1455lra_get_copy (int n)
1456{
1457 if (n >= (int) copy_vec.length ())
1458 return NULL;
1459 return copy_vec[n];
1460}
1461
1462
1463
1464/* This page contains code dealing with info about registers in
1465 insns. */
1466
1467/* Process X of INSN recursively and add info (operand type is given
1468 by TYPE) about registers in X to the insn DATA. If X can be early
1469 clobbered, alternatives in which it can be early clobbered are given
1470 by EARLY_CLOBBER_ALTS. */
1471static void
1472add_regs_to_insn_regno_info (lra_insn_recog_data_t data, rtx x,
1473 rtx_insn *insn, enum op_type type,
1474 alternative_mask early_clobber_alts)
1475{
1476 int i, j, regno;
1477 bool subreg_p;
1478 machine_mode mode;
1479 const char *fmt;
1480 enum rtx_code code;
1481 struct lra_insn_reg *curr;
1482
1483 code = GET_CODE (x);
1484 mode = GET_MODE (x);
1485 subreg_p = false;
1486 if (GET_CODE (x) == SUBREG)
1487 {
1488 mode = wider_subreg_mode (x);
1489 if (read_modify_subreg_p (x))
1490 subreg_p = true;
1491 x = SUBREG_REG (x);
1492 code = GET_CODE (x);
1493 }
1494 if (REG_P (x))
1495 {
1496 regno = REGNO (x);
1497 /* Process all regs even unallocatable ones as we need info about
1498 all regs for rematerialization pass. */
1499 expand_reg_info ();
1500 if (bitmap_set_bit (&lra_reg_info[regno].insn_bitmap, INSN_UID (insn)))
1501 {
1502 data->regs = new_insn_reg (insn: data->insn, regno, type, mode, subreg_p,
1503 early_clobber_alts, next: data->regs);
1504 return;
1505 }
1506 else
1507 {
1508 for (curr = data->regs; curr != NULL; curr = curr->next)
1509 if (curr->regno == regno)
1510 {
1511 if (curr->subreg_p != subreg_p || curr->biggest_mode != mode)
1512 /* The info cannot be integrated into the found
1513 structure. */
1514 data->regs = new_insn_reg (insn: data->insn, regno, type, mode,
1515 subreg_p, early_clobber_alts,
1516 next: data->regs);
1517 else
1518 {
1519 if (curr->type != type)
1520 curr->type = OP_INOUT;
1521 curr->early_clobber_alts |= early_clobber_alts;
1522 }
1523 return;
1524 }
1525 gcc_unreachable ();
1526 }
1527 }
1528
1529 switch (code)
1530 {
1531 case SET:
1532 add_regs_to_insn_regno_info (data, SET_DEST (x), insn, type: OP_OUT, early_clobber_alts: 0);
1533 add_regs_to_insn_regno_info (data, SET_SRC (x), insn, type: OP_IN, early_clobber_alts: 0);
1534 break;
1535 case CLOBBER:
1536 /* We treat clobber of non-operand hard registers as early
1537 clobber. */
1538 add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, type: OP_OUT,
1539 ALL_ALTERNATIVES);
1540 break;
1541 case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC:
1542 add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, type: OP_INOUT, early_clobber_alts: 0);
1543 break;
1544 case PRE_MODIFY: case POST_MODIFY:
1545 add_regs_to_insn_regno_info (data, XEXP (x, 0), insn, type: OP_INOUT, early_clobber_alts: 0);
1546 add_regs_to_insn_regno_info (data, XEXP (x, 1), insn, type: OP_IN, early_clobber_alts: 0);
1547 break;
1548 default:
1549 if ((code != PARALLEL && code != EXPR_LIST) || type != OP_OUT)
1550 /* Some targets place small structures in registers for return
1551 values of functions, and those registers are wrapped in
1552 PARALLEL that we may see as the destination of a SET. Here
1553 is an example:
1554
1555 (call_insn 13 12 14 2 (set (parallel:BLK [
1556 (expr_list:REG_DEP_TRUE (reg:DI 0 ax)
1557 (const_int 0 [0]))
1558 (expr_list:REG_DEP_TRUE (reg:DI 1 dx)
1559 (const_int 8 [0x8]))
1560 ])
1561 (call (mem:QI (symbol_ref:DI (... */
1562 type = OP_IN;
1563 fmt = GET_RTX_FORMAT (code);
1564 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1565 {
1566 if (fmt[i] == 'e')
1567 add_regs_to_insn_regno_info (data, XEXP (x, i), insn, type, early_clobber_alts: 0);
1568 else if (fmt[i] == 'E')
1569 {
1570 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1571 add_regs_to_insn_regno_info (data, XVECEXP (x, i, j), insn,
1572 type, early_clobber_alts: 0);
1573 }
1574 }
1575 }
1576}
1577
1578/* Return execution frequency of INSN. */
1579static int
1580get_insn_freq (rtx_insn *insn)
1581{
1582 basic_block bb = BLOCK_FOR_INSN (insn);
1583
1584 gcc_checking_assert (bb != NULL);
1585 return REG_FREQ_FROM_BB (bb);
1586}
1587
1588/* Invalidate all reg info of INSN with DATA and execution frequency
1589 FREQ. Update common info about the invalidated registers. */
1590static void
1591invalidate_insn_data_regno_info (lra_insn_recog_data_t data, rtx_insn *insn,
1592 int freq)
1593{
1594 int uid;
1595 bool debug_p;
1596 unsigned int i;
1597 struct lra_insn_reg *ir, *next_ir;
1598
1599 uid = INSN_UID (insn);
1600 debug_p = DEBUG_INSN_P (insn);
1601 for (ir = data->regs; ir != NULL; ir = next_ir)
1602 {
1603 i = ir->regno;
1604 next_ir = ir->next;
1605 lra_insn_reg_pool.remove (object: ir);
1606 bitmap_clear_bit (&lra_reg_info[i].insn_bitmap, uid);
1607 if (i >= FIRST_PSEUDO_REGISTER && ! debug_p)
1608 {
1609 lra_reg_info[i].nrefs--;
1610 lra_reg_info[i].freq -= freq;
1611 lra_assert (lra_reg_info[i].nrefs >= 0 && lra_reg_info[i].freq >= 0);
1612 }
1613 }
1614 data->regs = NULL;
1615}
1616
1617/* Invalidate all reg info of INSN. Update common info about the
1618 invalidated registers. */
1619void
1620lra_invalidate_insn_regno_info (rtx_insn *insn)
1621{
1622 invalidate_insn_data_regno_info (data: lra_get_insn_recog_data (insn), insn,
1623 freq: get_insn_freq (insn));
1624}
1625
1626/* Update common reg info from reg info of insn given by its DATA and
1627 execution frequency FREQ. */
1628static void
1629setup_insn_reg_info (lra_insn_recog_data_t data, int freq)
1630{
1631 unsigned int i;
1632 struct lra_insn_reg *ir;
1633
1634 for (ir = data->regs; ir != NULL; ir = ir->next)
1635 if ((i = ir->regno) >= FIRST_PSEUDO_REGISTER)
1636 {
1637 lra_reg_info[i].nrefs++;
1638 lra_reg_info[i].freq += freq;
1639 }
1640}
1641
1642/* Set up insn reg info of INSN. Update common reg info from reg info
1643 of INSN. */
1644void
1645lra_update_insn_regno_info (rtx_insn *insn)
1646{
1647 int i, freq;
1648 lra_insn_recog_data_t data;
1649 struct lra_static_insn_data *static_data;
1650 enum rtx_code code;
1651 rtx link;
1652
1653 if (! INSN_P (insn))
1654 return;
1655 data = lra_get_insn_recog_data (insn);
1656 static_data = data->insn_static_data;
1657 freq = NONDEBUG_INSN_P (insn) ? get_insn_freq (insn) : 0;
1658 invalidate_insn_data_regno_info (data, insn, freq);
1659 for (i = static_data->n_operands - 1; i >= 0; i--)
1660 add_regs_to_insn_regno_info (data, x: *data->operand_loc[i], insn,
1661 type: static_data->operand[i].type,
1662 early_clobber_alts: static_data->operand[i].early_clobber_alts);
1663 if ((code = GET_CODE (PATTERN (insn))) == CLOBBER || code == USE)
1664 add_regs_to_insn_regno_info (data, XEXP (PATTERN (insn), 0), insn,
1665 type: code == USE ? OP_IN : OP_OUT, early_clobber_alts: 0);
1666 if (CALL_P (insn))
1667 /* On some targets call insns can refer to pseudos in memory in
1668 CALL_INSN_FUNCTION_USAGE list. Process them in order to
1669 consider their occurrences in calls for different
1670 transformations (e.g. inheritance) with given pseudos. */
1671 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1672 link != NULL_RTX;
1673 link = XEXP (link, 1))
1674 {
1675 code = GET_CODE (XEXP (link, 0));
1676 if ((code == USE || code == CLOBBER)
1677 && MEM_P (XEXP (XEXP (link, 0), 0)))
1678 add_regs_to_insn_regno_info (data, XEXP (XEXP (link, 0), 0), insn,
1679 type: code == USE ? OP_IN : OP_OUT, early_clobber_alts: 0);
1680 }
1681 if (NONDEBUG_INSN_P (insn))
1682 setup_insn_reg_info (data, freq);
1683}
1684
1685/* Return reg info of insn given by it UID. */
1686struct lra_insn_reg *
1687lra_get_insn_regs (int uid)
1688{
1689 lra_insn_recog_data_t data;
1690
1691 data = get_insn_recog_data_by_uid (uid);
1692 return data->regs;
1693}
1694
1695
1696
1697/* Recursive hash function for RTL X. */
1698hashval_t
1699lra_rtx_hash (rtx x)
1700{
1701 int i, j;
1702 enum rtx_code code;
1703 const char *fmt;
1704 hashval_t val = 0;
1705
1706 if (x == 0)
1707 return val;
1708
1709 code = GET_CODE (x);
1710 val += (int) code + 4095;
1711
1712 /* Some RTL can be compared nonrecursively. */
1713 switch (code)
1714 {
1715 case REG:
1716 return val + REGNO (x);
1717
1718 case LABEL_REF:
1719 return iterative_hash_object (XEXP (x, 0), val);
1720
1721 case SYMBOL_REF:
1722 return iterative_hash_object (XSTR (x, 0), val);
1723
1724 case SCRATCH:
1725 case CONST_DOUBLE:
1726 case CONST_VECTOR:
1727 return val;
1728
1729 case CONST_INT:
1730 return val + UINTVAL (x);
1731
1732 default:
1733 break;
1734 }
1735
1736 /* Hash the elements. */
1737 fmt = GET_RTX_FORMAT (code);
1738 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1739 {
1740 switch (fmt[i])
1741 {
1742 case 'w':
1743 val += XWINT (x, i);
1744 break;
1745
1746 case 'n':
1747 case 'i':
1748 val += XINT (x, i);
1749 break;
1750
1751 case 'V':
1752 case 'E':
1753 val += XVECLEN (x, i);
1754
1755 for (j = 0; j < XVECLEN (x, i); j++)
1756 val += lra_rtx_hash (XVECEXP (x, i, j));
1757 break;
1758
1759 case 'e':
1760 val += lra_rtx_hash (XEXP (x, i));
1761 break;
1762
1763 case 'S':
1764 case 's':
1765 val += htab_hash_string (XSTR (x, i));
1766 break;
1767
1768 case 'u':
1769 case '0':
1770 case 't':
1771 break;
1772
1773 /* It is believed that rtx's at this level will never
1774 contain anything but integers and other rtx's, except for
1775 within LABEL_REFs and SYMBOL_REFs. */
1776 default:
1777 abort ();
1778 }
1779 }
1780 return val;
1781}
1782
1783
1784
1785/* This page contains code dealing with stack of the insns which
1786 should be processed by the next constraint pass. */
1787
1788/* Bitmap used to put an insn on the stack only in one exemplar. */
1789static sbitmap lra_constraint_insn_stack_bitmap;
1790
1791/* The stack itself. */
1792vec<rtx_insn *> lra_constraint_insn_stack;
1793
1794/* Put INSN on the stack. If ALWAYS_UPDATE is true, always update the reg
1795 info for INSN, otherwise only update it if INSN is not already on the
1796 stack. */
1797static inline void
1798lra_push_insn_1 (rtx_insn *insn, bool always_update)
1799{
1800 unsigned int uid = INSN_UID (insn);
1801 if (always_update)
1802 lra_update_insn_regno_info (insn);
1803 if (uid >= SBITMAP_SIZE (lra_constraint_insn_stack_bitmap))
1804 lra_constraint_insn_stack_bitmap =
1805 sbitmap_resize (lra_constraint_insn_stack_bitmap, 3 * uid / 2, 0);
1806 if (bitmap_bit_p (map: lra_constraint_insn_stack_bitmap, bitno: uid))
1807 return;
1808 bitmap_set_bit (map: lra_constraint_insn_stack_bitmap, bitno: uid);
1809 if (! always_update)
1810 lra_update_insn_regno_info (insn);
1811 lra_constraint_insn_stack.safe_push (obj: insn);
1812}
1813
1814/* Put INSN on the stack. */
1815void
1816lra_push_insn (rtx_insn *insn)
1817{
1818 lra_push_insn_1 (insn, always_update: false);
1819}
1820
1821/* Put INSN on the stack and update its reg info. */
1822void
1823lra_push_insn_and_update_insn_regno_info (rtx_insn *insn)
1824{
1825 lra_push_insn_1 (insn, always_update: true);
1826}
1827
1828/* Put insn with UID on the stack. */
1829void
1830lra_push_insn_by_uid (unsigned int uid)
1831{
1832 lra_push_insn (insn: lra_insn_recog_data[uid]->insn);
1833}
1834
1835/* Take the last-inserted insns off the stack and return it. */
1836rtx_insn *
1837lra_pop_insn (void)
1838{
1839 rtx_insn *insn = lra_constraint_insn_stack.pop ();
1840 bitmap_clear_bit (map: lra_constraint_insn_stack_bitmap, bitno: INSN_UID (insn));
1841 return insn;
1842}
1843
1844/* Return the current size of the insn stack. */
1845unsigned int
1846lra_insn_stack_length (void)
1847{
1848 return lra_constraint_insn_stack.length ();
1849}
1850
1851/* Push insns FROM to TO (excluding it) going in reverse order. */
1852static void
1853push_insns (rtx_insn *from, rtx_insn *to)
1854{
1855 rtx_insn *insn;
1856
1857 if (from == NULL_RTX)
1858 return;
1859 for (insn = from; insn != to; insn = PREV_INSN (insn))
1860 if (INSN_P (insn))
1861 lra_push_insn (insn);
1862}
1863
1864/* Set up and return sp offset for insns in range [FROM, LAST]. The offset is
1865 taken from the next BB insn after LAST or zero if there in such
1866 insn. */
1867static poly_int64
1868setup_sp_offset (rtx_insn *from, rtx_insn *last)
1869{
1870 rtx_insn *before = next_nonnote_nondebug_insn_bb (last);
1871 poly_int64 offset = (before == NULL_RTX || ! INSN_P (before)
1872 ? 0 : lra_get_insn_recog_data (insn: before)->sp_offset);
1873
1874 for (rtx_insn *insn = from; insn != NEXT_INSN (insn: last); insn = NEXT_INSN (insn))
1875 {
1876 lra_get_insn_recog_data (insn)->sp_offset = offset;
1877 offset = lra_update_sp_offset (PATTERN (insn), offset);
1878 }
1879 return offset;
1880}
1881
1882/* Emit insns BEFORE before INSN and insns AFTER after INSN. Put the
1883 insns onto the stack. Print about emitting the insns with
1884 TITLE. */
1885void
1886lra_process_new_insns (rtx_insn *insn, rtx_insn *before, rtx_insn *after,
1887 const char *title)
1888{
1889 if (before == NULL_RTX && after == NULL_RTX)
1890 return;
1891 if (lra_dump_file != NULL)
1892 {
1893 dump_insn_slim (lra_dump_file, insn);
1894 if (before != NULL_RTX)
1895 {
1896 fprintf (stream: lra_dump_file,format: " %s before:\n", title);
1897 dump_rtl_slim (lra_dump_file, before, NULL, -1, 0);
1898 }
1899 }
1900 if (before != NULL_RTX)
1901 {
1902 if (cfun->can_throw_non_call_exceptions)
1903 copy_reg_eh_region_note_forward (insn, before, NULL);
1904 emit_insn_before (before, insn);
1905 poly_int64 old_sp_offset = lra_get_insn_recog_data (insn)->sp_offset;
1906 poly_int64 new_sp_offset = setup_sp_offset (from: before, last: PREV_INSN (insn));
1907 if (maybe_ne (a: old_sp_offset, b: new_sp_offset))
1908 {
1909 if (lra_dump_file != NULL)
1910 {
1911 fprintf (stream: lra_dump_file, format: " Changing sp offset from ");
1912 print_dec (value: old_sp_offset, file: lra_dump_file);
1913 fprintf (stream: lra_dump_file, format: " to ");
1914 print_dec (value: new_sp_offset, file: lra_dump_file);
1915 fprintf (stream: lra_dump_file, format: " for insn");
1916 dump_rtl_slim (lra_dump_file, insn, NULL, -1, 0);
1917 }
1918 lra_get_insn_recog_data (insn)->sp_offset = new_sp_offset;
1919 eliminate_regs_in_insn (insn, false, false,
1920 old_sp_offset - new_sp_offset);
1921 lra_push_insn (insn);
1922 }
1923 push_insns (from: PREV_INSN (insn), to: PREV_INSN (insn: before));
1924 }
1925 if (after != NULL_RTX)
1926 {
1927 if (cfun->can_throw_non_call_exceptions)
1928 copy_reg_eh_region_note_forward (insn, after, NULL);
1929 if (! JUMP_P (insn))
1930 {
1931 rtx_insn *last;
1932
1933 if (lra_dump_file != NULL)
1934 {
1935 fprintf (stream: lra_dump_file, format: " %s after:\n", title);
1936 dump_rtl_slim (lra_dump_file, after, NULL, -1, 0);
1937 }
1938 for (last = after;
1939 NEXT_INSN (insn: last) != NULL_RTX;
1940 last = NEXT_INSN (insn: last))
1941 ;
1942 emit_insn_after (after, insn);
1943 push_insns (from: last, to: insn);
1944 setup_sp_offset (from: after, last);
1945 }
1946 else
1947 {
1948 /* Put output reload insns on successor BBs: */
1949 edge_iterator ei;
1950 edge e;
1951
1952 FOR_EACH_EDGE (e, ei, BLOCK_FOR_INSN (insn)->succs)
1953 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1954 {
1955 /* We already made the edge no-critical in ira.cc::ira */
1956 lra_assert (!EDGE_CRITICAL_P (e));
1957 rtx_insn *curr, *tmp = BB_HEAD (e->dest);
1958 if (LABEL_P (tmp))
1959 tmp = NEXT_INSN (insn: tmp);
1960 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
1961 tmp = NEXT_INSN (insn: tmp);
1962 /* Do not put reload insns if it is the last BB
1963 without actual insns. */
1964 if (tmp == NULL)
1965 continue;
1966 start_sequence ();
1967 for (curr = after; curr != NULL_RTX; curr = NEXT_INSN (insn: curr))
1968 emit_insn (copy_insn (PATTERN (insn: curr)));
1969 rtx_insn *copy = get_insns (), *last = get_last_insn ();
1970 end_sequence ();
1971 if (lra_dump_file != NULL)
1972 {
1973 fprintf (stream: lra_dump_file, format: " %s after in bb%d:\n", title,
1974 e->dest->index);
1975 dump_rtl_slim (lra_dump_file, copy, NULL, -1, 0);
1976 }
1977 /* Use the right emit func for setting up BB_END/BB_HEAD: */
1978 if (BB_END (e->dest) == PREV_INSN (insn: tmp))
1979 emit_insn_after_noloc (copy, PREV_INSN (insn: tmp), e->dest);
1980 else
1981 emit_insn_before_noloc (copy, tmp, e->dest);
1982 push_insns (from: last, to: PREV_INSN (insn: copy));
1983 setup_sp_offset (from: copy, last);
1984 /* We can ignore BB live info here as it and reg notes
1985 will be updated before the next assignment
1986 sub-pass. */
1987 }
1988 }
1989 }
1990 if (lra_dump_file != NULL)
1991 fprintf (stream: lra_dump_file, format: "\n");
1992 if (cfun->can_throw_non_call_exceptions)
1993 {
1994 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
1995 if (note && !insn_could_throw_p (insn))
1996 remove_note (insn, note);
1997 }
1998}
1999
2000
2001/* Replace all references to register OLD_REGNO in *LOC with pseudo
2002 register NEW_REG. Try to simplify subreg of constant if SUBREG_P.
2003 DEBUG_P is if LOC is within a DEBUG_INSN. Return true if any
2004 change was made. */
2005bool
2006lra_substitute_pseudo (rtx *loc, int old_regno, rtx new_reg, bool subreg_p,
2007 bool debug_p)
2008{
2009 rtx x = *loc;
2010 bool result = false;
2011 enum rtx_code code;
2012 const char *fmt;
2013 int i, j;
2014
2015 if (x == NULL_RTX)
2016 return false;
2017
2018 code = GET_CODE (x);
2019 if (code == SUBREG && subreg_p)
2020 {
2021 rtx subst, inner = SUBREG_REG (x);
2022 /* Transform subreg of constant while we still have inner mode
2023 of the subreg. The subreg internal should not be an insn
2024 operand. */
2025 if (REG_P (inner) && (int) REGNO (inner) == old_regno
2026 && CONSTANT_P (new_reg)
2027 && (subst = simplify_subreg (GET_MODE (x), op: new_reg, GET_MODE (inner),
2028 SUBREG_BYTE (x))) != NULL_RTX)
2029 {
2030 *loc = subst;
2031 return true;
2032 }
2033
2034 }
2035 else if (code == REG && (int) REGNO (x) == old_regno)
2036 {
2037 machine_mode mode = GET_MODE (x);
2038 machine_mode inner_mode = GET_MODE (new_reg);
2039
2040 if (mode != inner_mode
2041 && ! (CONST_SCALAR_INT_P (new_reg) && SCALAR_INT_MODE_P (mode)))
2042 {
2043 poly_uint64 offset = 0;
2044 if (partial_subreg_p (outermode: mode, innermode: inner_mode)
2045 && SCALAR_INT_MODE_P (inner_mode))
2046 offset = subreg_lowpart_offset (outermode: mode, innermode: inner_mode);
2047 if (debug_p)
2048 new_reg = gen_rtx_raw_SUBREG (mode, new_reg, offset);
2049 else
2050 new_reg = gen_rtx_SUBREG (mode, new_reg, offset);
2051 }
2052 *loc = new_reg;
2053 return true;
2054 }
2055
2056 /* Scan all the operand sub-expressions. */
2057 fmt = GET_RTX_FORMAT (code);
2058 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2059 {
2060 if (fmt[i] == 'e')
2061 {
2062 if (debug_p
2063 && i == 0
2064 && (code == SUBREG
2065 || code == ZERO_EXTEND
2066 || code == SIGN_EXTEND
2067 || code == FLOAT
2068 || code == UNSIGNED_FLOAT))
2069 {
2070 rtx y = XEXP (x, 0);
2071 if (lra_substitute_pseudo (loc: &y, old_regno,
2072 new_reg, subreg_p, debug_p))
2073 {
2074 result = true;
2075 if (CONST_SCALAR_INT_P (y))
2076 {
2077 if (code == SUBREG)
2078 y = simplify_subreg (GET_MODE (x), op: y,
2079 GET_MODE (SUBREG_REG (x)),
2080 SUBREG_BYTE (x));
2081 else
2082 y = simplify_unary_operation (code, GET_MODE (x), op: y,
2083 GET_MODE (XEXP (x, 0)));
2084 if (y)
2085 *loc = y;
2086 else
2087 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
2088 }
2089 else
2090 XEXP (x, 0) = y;
2091 }
2092 }
2093 else if (lra_substitute_pseudo (loc: &XEXP (x, i), old_regno,
2094 new_reg, subreg_p, debug_p))
2095 result = true;
2096 }
2097 else if (fmt[i] == 'E')
2098 {
2099 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2100 if (lra_substitute_pseudo (loc: &XVECEXP (x, i, j), old_regno,
2101 new_reg, subreg_p, debug_p))
2102 result = true;
2103 }
2104 }
2105 return result;
2106}
2107
2108/* Call lra_substitute_pseudo within an insn. Try to simplify subreg
2109 of constant if SUBREG_P. This won't update the insn ptr, just the
2110 contents of the insn. */
2111bool
2112lra_substitute_pseudo_within_insn (rtx_insn *insn, int old_regno,
2113 rtx new_reg, bool subreg_p)
2114{
2115 rtx loc = insn;
2116 return lra_substitute_pseudo (loc: &loc, old_regno, new_reg, subreg_p,
2117 DEBUG_INSN_P (insn));
2118}
2119
2120
2121
2122/* Return new register of the same mode as ORIGINAL of class ALL_REGS.
2123 Used in ira_remove_scratches. */
2124static rtx
2125get_scratch_reg (rtx original)
2126{
2127 return lra_create_new_reg (GET_MODE (original), original, rclass: ALL_REGS,
2128 NULL, NULL);
2129}
2130
2131/* Remove all insn scratches in INSN. */
2132static void
2133remove_insn_scratches (rtx_insn *insn)
2134{
2135 if (ira_remove_insn_scratches (insn, all_p: true, dump_file: lra_dump_file, get_reg: get_scratch_reg))
2136 df_insn_rescan (insn);
2137}
2138
2139/* Remove all insn scratches in the current function. */
2140static void
2141remove_scratches (void)
2142{
2143 basic_block bb;
2144 rtx_insn *insn;
2145
2146 FOR_EACH_BB_FN (bb, cfun)
2147 FOR_BB_INSNS (bb, insn)
2148 if (INSN_P (insn))
2149 remove_insn_scratches (insn);
2150}
2151
2152/* Function checks RTL for correctness. If FINAL_P is true, it is
2153 done at the end of LRA and the check is more rigorous. */
2154static void
2155check_rtl (bool final_p)
2156{
2157 basic_block bb;
2158 rtx_insn *insn;
2159
2160 lra_assert (! final_p || reload_completed);
2161 FOR_EACH_BB_FN (bb, cfun)
2162 FOR_BB_INSNS (bb, insn)
2163 if (NONDEBUG_INSN_P (insn)
2164 && GET_CODE (PATTERN (insn)) != USE
2165 && GET_CODE (PATTERN (insn)) != CLOBBER
2166 && GET_CODE (PATTERN (insn)) != ASM_INPUT)
2167 {
2168 if (final_p)
2169 {
2170 extract_constrain_insn (insn);
2171 continue;
2172 }
2173 /* LRA code is based on assumption that all addresses can be
2174 correctly decomposed. LRA can generate reloads for
2175 decomposable addresses. The decomposition code checks the
2176 correctness of the addresses. So we don't need to check
2177 the addresses here. Don't call insn_invalid_p here, it can
2178 change the code at this stage. */
2179 if (recog_memoized (insn) < 0 && asm_noperands (PATTERN (insn)) < 0)
2180 fatal_insn_not_found (insn);
2181 }
2182}
2183
2184/* Determine if the current function has an exception receiver block
2185 that reaches the exit block via non-exceptional edges */
2186static bool
2187has_nonexceptional_receiver (void)
2188{
2189 edge e;
2190 edge_iterator ei;
2191 basic_block *tos, *worklist, bb;
2192
2193 /* If we're not optimizing, then just err on the safe side. */
2194 if (!optimize)
2195 return true;
2196
2197 /* First determine which blocks can reach exit via normal paths. */
2198 tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1);
2199
2200 FOR_EACH_BB_FN (bb, cfun)
2201 bb->flags &= ~BB_REACHABLE;
2202
2203 /* Place the exit block on our worklist. */
2204 EXIT_BLOCK_PTR_FOR_FN (cfun)->flags |= BB_REACHABLE;
2205 *tos++ = EXIT_BLOCK_PTR_FOR_FN (cfun);
2206
2207 /* Iterate: find everything reachable from what we've already seen. */
2208 while (tos != worklist)
2209 {
2210 bb = *--tos;
2211
2212 FOR_EACH_EDGE (e, ei, bb->preds)
2213 if (e->flags & EDGE_ABNORMAL)
2214 {
2215 free (ptr: worklist);
2216 return true;
2217 }
2218 else
2219 {
2220 basic_block src = e->src;
2221
2222 if (!(src->flags & BB_REACHABLE))
2223 {
2224 src->flags |= BB_REACHABLE;
2225 *tos++ = src;
2226 }
2227 }
2228 }
2229 free (ptr: worklist);
2230 /* No exceptional block reached exit unexceptionally. */
2231 return false;
2232}
2233
2234/* Remove all REG_DEAD and REG_UNUSED notes and regenerate REG_INC.
2235 We change pseudos by hard registers without notification of DF and
2236 that can make the notes obsolete. DF-infrastructure does not deal
2237 with REG_INC notes -- so we should regenerate them here. */
2238static void
2239update_inc_notes (void)
2240{
2241 rtx *pnote;
2242 basic_block bb;
2243 rtx_insn *insn;
2244
2245 FOR_EACH_BB_FN (bb, cfun)
2246 FOR_BB_INSNS (bb, insn)
2247 if (NONDEBUG_INSN_P (insn))
2248 {
2249 pnote = &REG_NOTES (insn);
2250 while (*pnote != 0)
2251 {
2252 if (REG_NOTE_KIND (*pnote) == REG_DEAD
2253 || REG_NOTE_KIND (*pnote) == REG_UNUSED
2254 || REG_NOTE_KIND (*pnote) == REG_INC)
2255 *pnote = XEXP (*pnote, 1);
2256 else
2257 pnote = &XEXP (*pnote, 1);
2258 }
2259
2260 if (AUTO_INC_DEC)
2261 add_auto_inc_notes (insn, PATTERN (insn));
2262 }
2263}
2264
2265/* Set to true while in LRA. */
2266bool lra_in_progress = false;
2267
2268/* Start of pseudo regnos before the LRA. */
2269int lra_new_regno_start;
2270
2271/* Start of reload pseudo regnos before the new spill pass. */
2272int lra_constraint_new_regno_start;
2273
2274/* Avoid spilling pseudos with regno more than the following value if
2275 it is possible. */
2276int lra_bad_spill_regno_start;
2277
2278/* A pseudo of Pmode. */
2279rtx lra_pmode_pseudo;
2280
2281/* Inheritance pseudo regnos before the new spill pass. */
2282bitmap_head lra_inheritance_pseudos;
2283
2284/* Split regnos before the new spill pass. */
2285bitmap_head lra_split_regs;
2286
2287/* Reload pseudo regnos before the new assignment pass which still can
2288 be spilled after the assignment pass as memory is also accepted in
2289 insns for the reload pseudos. */
2290bitmap_head lra_optional_reload_pseudos;
2291
2292/* Pseudo regnos used for subreg reloads before the new assignment
2293 pass. Such pseudos still can be spilled after the assignment
2294 pass. */
2295bitmap_head lra_subreg_reload_pseudos;
2296
2297/* File used for output of LRA debug information. */
2298FILE *lra_dump_file;
2299
2300/* True if we split hard reg after the last constraint sub-pass. */
2301bool lra_hard_reg_split_p;
2302
2303/* True if we found an asm error. */
2304bool lra_asm_error_p;
2305
2306/* True if we should try spill into registers of different classes
2307 instead of memory. */
2308bool lra_reg_spill_p;
2309
2310/* Set up value LRA_REG_SPILL_P. */
2311static void
2312setup_reg_spill_flag (void)
2313{
2314 int cl, mode;
2315
2316 if (targetm.spill_class != NULL)
2317 for (cl = 0; cl < (int) LIM_REG_CLASSES; cl++)
2318 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
2319 if (targetm.spill_class ((enum reg_class) cl,
2320 (machine_mode) mode) != NO_REGS)
2321 {
2322 lra_reg_spill_p = true;
2323 return;
2324 }
2325 lra_reg_spill_p = false;
2326}
2327
2328/* True if the current function is too big to use regular algorithms
2329 in LRA. In other words, we should use simpler and faster algorithms
2330 in LRA. It also means we should not worry about generation code
2331 for caller saves. The value is set up in IRA. */
2332bool lra_simple_p;
2333
2334/* Major LRA entry function. F is a file should be used to dump LRA
2335 debug info. */
2336void
2337lra (FILE *f)
2338{
2339 int i;
2340 bool live_p, inserted_p;
2341
2342 lra_dump_file = f;
2343 lra_asm_error_p = false;
2344 lra_pmode_pseudo = gen_reg_rtx (Pmode);
2345
2346 timevar_push (tv: TV_LRA);
2347
2348 /* Make sure that the last insn is a note. Some subsequent passes
2349 need it. */
2350 emit_note (NOTE_INSN_DELETED);
2351
2352 lra_no_alloc_regs = ira_no_alloc_regs;
2353
2354 init_reg_info ();
2355 expand_reg_info ();
2356
2357 init_insn_recog_data ();
2358
2359 /* Some quick check on RTL generated by previous passes. */
2360 if (flag_checking)
2361 check_rtl (final_p: false);
2362
2363 lra_in_progress = true;
2364
2365 lra_live_range_iter = lra_coalesce_iter = lra_constraint_iter = 0;
2366 lra_assignment_iter = lra_assignment_iter_after_spill = 0;
2367 lra_inheritance_iter = lra_undo_inheritance_iter = 0;
2368 lra_rematerialization_iter = 0;
2369
2370 setup_reg_spill_flag ();
2371
2372 /* Function remove_scratches can creates new pseudos for clobbers --
2373 so set up lra_constraint_new_regno_start before its call to
2374 permit changing reg classes for pseudos created by this
2375 simplification. */
2376 lra_constraint_new_regno_start = lra_new_regno_start = max_reg_num ();
2377 lra_bad_spill_regno_start = INT_MAX;
2378 remove_scratches ();
2379
2380 /* A function that has a non-local label that can reach the exit
2381 block via non-exceptional paths must save all call-saved
2382 registers. */
2383 if (cfun->has_nonlocal_label && has_nonexceptional_receiver ())
2384 crtl->saves_all_registers = 1;
2385
2386 if (crtl->saves_all_registers)
2387 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2388 if (!crtl->abi->clobbers_full_reg_p (regno: i)
2389 && !fixed_regs[i]
2390 && !LOCAL_REGNO (i))
2391 df_set_regs_ever_live (i, true);
2392
2393 /* We don't DF from now and avoid its using because it is to
2394 expensive when a lot of RTL changes are made. */
2395 df_set_flags (DF_NO_INSN_RESCAN);
2396 lra_constraint_insn_stack.create (nelems: get_max_uid ());
2397 lra_constraint_insn_stack_bitmap = sbitmap_alloc (get_max_uid ());
2398 bitmap_clear (lra_constraint_insn_stack_bitmap);
2399 lra_live_ranges_init ();
2400 lra_constraints_init ();
2401 lra_curr_reload_num = 0;
2402 push_insns (from: get_last_insn (), NULL);
2403 /* It is needed for the 1st coalescing. */
2404 bitmap_initialize (head: &lra_inheritance_pseudos, obstack: &reg_obstack);
2405 bitmap_initialize (head: &lra_split_regs, obstack: &reg_obstack);
2406 bitmap_initialize (head: &lra_optional_reload_pseudos, obstack: &reg_obstack);
2407 bitmap_initialize (head: &lra_subreg_reload_pseudos, obstack: &reg_obstack);
2408 live_p = false;
2409 if (maybe_ne (a: get_frame_size (), b: 0) && crtl->stack_alignment_needed)
2410 /* If we have a stack frame, we must align it now. The stack size
2411 may be a part of the offset computation for register
2412 elimination. */
2413 assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed);
2414 lra_init_equiv ();
2415 for (;;)
2416 {
2417 for (;;)
2418 {
2419 bool reloads_p = lra_constraints (lra_constraint_iter == 0);
2420 /* Constraint transformations may result in that eliminable
2421 hard regs become uneliminable and pseudos which use them
2422 should be spilled. It is better to do it before pseudo
2423 assignments.
2424
2425 For example, rs6000 can make
2426 RS6000_PIC_OFFSET_TABLE_REGNUM uneliminable if we started
2427 to use a constant pool. */
2428 lra_eliminate (false, false);
2429 /* We should try to assign hard registers to scratches even
2430 if there were no RTL transformations in lra_constraints.
2431 Also we should check IRA assignments on the first
2432 iteration as they can be wrong because of early clobbers
2433 operands which are ignored in IRA. */
2434 if (! reloads_p && lra_constraint_iter > 1)
2435 {
2436 /* Stack is not empty here only when there are changes
2437 during the elimination sub-pass. */
2438 if (bitmap_empty_p (lra_constraint_insn_stack_bitmap))
2439 break;
2440 else
2441 /* If there are no reloads but changing due
2442 elimination, restart the constraint sub-pass
2443 first. */
2444 continue;
2445 }
2446 /* Do inheritance only for regular algorithms. */
2447 if (! lra_simple_p)
2448 lra_inheritance ();
2449 if (live_p)
2450 lra_clear_live_ranges ();
2451 bool fails_p;
2452 lra_hard_reg_split_p = false;
2453 do
2454 {
2455 /* We need live ranges for lra_assign -- so build them.
2456 But don't remove dead insns or change global live
2457 info as we can undo inheritance transformations after
2458 inheritance pseudo assigning. */
2459 lra_create_live_ranges (true, !lra_simple_p);
2460 live_p = true;
2461 /* If we don't spill non-reload and non-inheritance
2462 pseudos, there is no sense to run memory-memory move
2463 coalescing. If inheritance pseudos were spilled, the
2464 memory-memory moves involving them will be removed by
2465 pass undoing inheritance. */
2466 if (lra_simple_p)
2467 lra_assign (fails_p);
2468 else
2469 {
2470 bool spill_p = !lra_assign (fails_p);
2471
2472 if (lra_undo_inheritance ())
2473 live_p = false;
2474 if (spill_p && ! fails_p)
2475 {
2476 if (! live_p)
2477 {
2478 lra_create_live_ranges (true, true);
2479 live_p = true;
2480 }
2481 if (lra_coalesce ())
2482 live_p = false;
2483 }
2484 if (! live_p)
2485 lra_clear_live_ranges ();
2486 }
2487 if (fails_p)
2488 {
2489 /* It is a very rare case. It is the last hope to
2490 split a hard regno live range for a reload
2491 pseudo. */
2492 if (live_p)
2493 lra_clear_live_ranges ();
2494 live_p = false;
2495 if (! lra_split_hard_reg_for ())
2496 break;
2497 lra_hard_reg_split_p = true;
2498 }
2499 }
2500 while (fails_p && !lra_asm_error_p);
2501 if (! live_p) {
2502 /* We need the correct reg notes for work of constraint sub-pass. */
2503 lra_create_live_ranges (true, true);
2504 live_p = true;
2505 }
2506 }
2507 /* Don't clear optional reloads bitmap until all constraints are
2508 satisfied as we need to differ them from regular reloads. */
2509 bitmap_clear (&lra_optional_reload_pseudos);
2510 bitmap_clear (&lra_subreg_reload_pseudos);
2511 bitmap_clear (&lra_inheritance_pseudos);
2512 bitmap_clear (&lra_split_regs);
2513 if (! live_p)
2514 {
2515 /* We need full live info for spilling pseudos into
2516 registers instead of memory. */
2517 lra_create_live_ranges (lra_reg_spill_p, true);
2518 live_p = true;
2519 }
2520 /* We should check necessity for spilling here as the above live
2521 range pass can remove spilled pseudos. */
2522 if (! lra_need_for_spills_p ())
2523 break;
2524 /* Now we know what pseudos should be spilled. Try to
2525 rematerialize them first. */
2526 if (lra_remat ())
2527 {
2528 /* We need full live info -- see the comment above. */
2529 lra_create_live_ranges (lra_reg_spill_p, true);
2530 live_p = true;
2531 if (! lra_need_for_spills_p ())
2532 {
2533 if (lra_need_for_scratch_reg_p ())
2534 continue;
2535 break;
2536 }
2537 }
2538 lra_spill ();
2539 /* Assignment of stack slots changes elimination offsets for
2540 some eliminations. So update the offsets here. */
2541 lra_eliminate (false, false);
2542 lra_constraint_new_regno_start = max_reg_num ();
2543 if (lra_bad_spill_regno_start == INT_MAX
2544 && lra_inheritance_iter > LRA_MAX_INHERITANCE_PASSES
2545 && lra_rematerialization_iter > LRA_MAX_REMATERIALIZATION_PASSES)
2546 /* After switching off inheritance and rematerialization
2547 passes, avoid spilling reload pseudos will be created to
2548 prevent LRA cycling in some complicated cases. */
2549 lra_bad_spill_regno_start = lra_constraint_new_regno_start;
2550 lra_assignment_iter_after_spill = 0;
2551 }
2552 ira_restore_scratches (dump_file: lra_dump_file);
2553 lra_eliminate (true, false);
2554 lra_final_code_change ();
2555 lra_in_progress = false;
2556 if (live_p)
2557 lra_clear_live_ranges ();
2558 lra_live_ranges_finish ();
2559 lra_constraints_finish ();
2560 finish_reg_info ();
2561 sbitmap_free (map: lra_constraint_insn_stack_bitmap);
2562 lra_constraint_insn_stack.release ();
2563 finish_insn_recog_data ();
2564 regstat_free_n_sets_and_refs ();
2565 regstat_free_ri ();
2566 reload_completed = 1;
2567 update_inc_notes ();
2568
2569 inserted_p = fixup_abnormal_edges ();
2570
2571 /* We've possibly turned single trapping insn into multiple ones. */
2572 if (cfun->can_throw_non_call_exceptions)
2573 {
2574 auto_sbitmap blocks (last_basic_block_for_fn (cfun));
2575 bitmap_ones (blocks);
2576 find_many_sub_basic_blocks (blocks);
2577 }
2578
2579 if (inserted_p)
2580 commit_edge_insertions ();
2581
2582 /* Subsequent passes expect that rtl is unshared, so unshare everything
2583 here. */
2584 unshare_all_rtl_again (get_insns ());
2585
2586 if (flag_checking)
2587 check_rtl (final_p: true);
2588
2589 timevar_pop (tv: TV_LRA);
2590}
2591
2592/* Called once per compiler to initialize LRA data once. */
2593void
2594lra_init_once (void)
2595{
2596 init_insn_code_data_once ();
2597}
2598
2599/* Called once per compiler to finish LRA data which are initialize
2600 once. */
2601void
2602lra_finish_once (void)
2603{
2604 finish_insn_code_data_once ();
2605}
2606

source code of gcc/lra.cc