1/* Building internal representation for IRA.
2 Copyright (C) 2006-2023 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "backend.h"
25#include "target.h"
26#include "rtl.h"
27#include "predict.h"
28#include "df.h"
29#include "insn-config.h"
30#include "regs.h"
31#include "memmodel.h"
32#include "ira.h"
33#include "ira-int.h"
34#include "sparseset.h"
35#include "cfgloop.h"
36
37static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx_insn *,
38 ira_loop_tree_node_t);
39
40/* The root of the loop tree corresponding to the all function. */
41ira_loop_tree_node_t ira_loop_tree_root;
42
43/* Height of the loop tree. */
44int ira_loop_tree_height;
45
46/* All nodes representing basic blocks are referred through the
47 following array. We cannot use basic block member `aux' for this
48 because it is used for insertion of insns on edges. */
49ira_loop_tree_node_t ira_bb_nodes;
50
51/* All nodes representing loops are referred through the following
52 array. */
53ira_loop_tree_node_t ira_loop_nodes;
54
55/* And size of the ira_loop_nodes array. */
56unsigned int ira_loop_nodes_count;
57
58/* Map regno -> allocnos with given regno (see comments for
59 allocno member `next_regno_allocno'). */
60ira_allocno_t *ira_regno_allocno_map;
61
62/* Array of references to all allocnos. The order number of the
63 allocno corresponds to the index in the array. Removed allocnos
64 have NULL element value. */
65ira_allocno_t *ira_allocnos;
66
67/* Sizes of the previous array. */
68int ira_allocnos_num;
69
70/* Count of conflict record structures we've created, used when creating
71 a new conflict id. */
72int ira_objects_num;
73
74/* Map a conflict id to its conflict record. */
75ira_object_t *ira_object_id_map;
76
77/* Array of references to all allocno preferences. The order number
78 of the preference corresponds to the index in the array. */
79ira_pref_t *ira_prefs;
80
81/* Size of the previous array. */
82int ira_prefs_num;
83
84/* Array of references to all copies. The order number of the copy
85 corresponds to the index in the array. Removed copies have NULL
86 element value. */
87ira_copy_t *ira_copies;
88
89/* Size of the previous array. */
90int ira_copies_num;
91
92
93
94/* LAST_BASIC_BLOCK before generating additional insns because of live
95 range splitting. Emitting insns on a critical edge creates a new
96 basic block. */
97static int last_basic_block_before_change;
98
99/* Initialize some members in loop tree node NODE. Use LOOP_NUM for
100 the member loop_num. */
101static void
102init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
103{
104 int max_regno = max_reg_num ();
105
106 node->regno_allocno_map
107 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
108 memset (s: node->regno_allocno_map, c: 0, n: sizeof (ira_allocno_t) * max_regno);
109 memset (s: node->reg_pressure, c: 0, n: sizeof (node->reg_pressure));
110 node->all_allocnos = ira_allocate_bitmap ();
111 node->modified_regnos = ira_allocate_bitmap ();
112 node->border_allocnos = ira_allocate_bitmap ();
113 node->local_copies = ira_allocate_bitmap ();
114 node->loop_num = loop_num;
115 node->children = NULL;
116 node->subloops = NULL;
117}
118
119
120/* The following function allocates the loop tree nodes. If
121 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
122 the root which corresponds the all function) will be not allocated
123 but nodes will still be allocated for basic blocks. */
124static void
125create_loop_tree_nodes (void)
126{
127 unsigned int i, j;
128 bool skip_p;
129 edge_iterator ei;
130 edge e;
131 loop_p loop;
132
133 ira_bb_nodes
134 = ((struct ira_loop_tree_node *)
135 ira_allocate (sizeof (struct ira_loop_tree_node)
136 * last_basic_block_for_fn (cfun)));
137 last_basic_block_before_change = last_basic_block_for_fn (cfun);
138 for (i = 0; i < (unsigned int) last_basic_block_for_fn (cfun); i++)
139 {
140 ira_bb_nodes[i].regno_allocno_map = NULL;
141 memset (s: ira_bb_nodes[i].reg_pressure, c: 0,
142 n: sizeof (ira_bb_nodes[i].reg_pressure));
143 ira_bb_nodes[i].all_allocnos = NULL;
144 ira_bb_nodes[i].modified_regnos = NULL;
145 ira_bb_nodes[i].border_allocnos = NULL;
146 ira_bb_nodes[i].local_copies = NULL;
147 }
148 if (current_loops == NULL)
149 {
150 ira_loop_nodes_count = 1;
151 ira_loop_nodes = ((struct ira_loop_tree_node *)
152 ira_allocate (sizeof (struct ira_loop_tree_node)));
153 init_loop_tree_node (node: ira_loop_nodes, loop_num: 0);
154 return;
155 }
156 ira_loop_nodes_count = number_of_loops (cfun);
157 ira_loop_nodes = ((struct ira_loop_tree_node *)
158 ira_allocate (sizeof (struct ira_loop_tree_node)
159 * ira_loop_nodes_count));
160 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
161 {
162 if (loop_outer (loop) != NULL)
163 {
164 ira_loop_nodes[i].regno_allocno_map = NULL;
165 skip_p = false;
166 FOR_EACH_EDGE (e, ei, loop->header->preds)
167 if (e->src != loop->latch
168 && (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
169 {
170 skip_p = true;
171 break;
172 }
173 if (skip_p)
174 continue;
175 auto_vec<edge> edges = get_loop_exit_edges (loop);
176 FOR_EACH_VEC_ELT (edges, j, e)
177 if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
178 {
179 skip_p = true;
180 break;
181 }
182 if (skip_p)
183 continue;
184 }
185 init_loop_tree_node (node: &ira_loop_nodes[i], loop_num: loop->num);
186 }
187}
188
189/* The function returns TRUE if there are more one allocation
190 region. */
191static bool
192more_one_region_p (void)
193{
194 unsigned int i;
195 loop_p loop;
196
197 if (current_loops != NULL)
198 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
199 if (ira_loop_nodes[i].regno_allocno_map != NULL
200 && ira_loop_tree_root != &ira_loop_nodes[i])
201 return true;
202 return false;
203}
204
205/* Free the loop tree node of a loop. */
206static void
207finish_loop_tree_node (ira_loop_tree_node_t loop)
208{
209 if (loop->regno_allocno_map != NULL)
210 {
211 ira_assert (loop->bb == NULL);
212 ira_free_bitmap (loop->local_copies);
213 ira_free_bitmap (loop->border_allocnos);
214 ira_free_bitmap (loop->modified_regnos);
215 ira_free_bitmap (loop->all_allocnos);
216 ira_free (addr: loop->regno_allocno_map);
217 loop->regno_allocno_map = NULL;
218 }
219}
220
221/* Free the loop tree nodes. */
222static void
223finish_loop_tree_nodes (void)
224{
225 unsigned int i;
226
227 for (i = 0; i < ira_loop_nodes_count; i++)
228 finish_loop_tree_node (loop: &ira_loop_nodes[i]);
229 ira_free (addr: ira_loop_nodes);
230 for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
231 {
232 if (ira_bb_nodes[i].local_copies != NULL)
233 ira_free_bitmap (ira_bb_nodes[i].local_copies);
234 if (ira_bb_nodes[i].border_allocnos != NULL)
235 ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
236 if (ira_bb_nodes[i].modified_regnos != NULL)
237 ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
238 if (ira_bb_nodes[i].all_allocnos != NULL)
239 ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
240 if (ira_bb_nodes[i].regno_allocno_map != NULL)
241 ira_free (addr: ira_bb_nodes[i].regno_allocno_map);
242 }
243 ira_free (addr: ira_bb_nodes);
244}
245
246
247
248/* The following recursive function adds LOOP to the loop tree
249 hierarchy. LOOP is added only once. If LOOP is NULL we adding
250 loop designating the whole function when CFG loops are not
251 built. */
252static void
253add_loop_to_tree (class loop *loop)
254{
255 int loop_num;
256 class loop *parent;
257 ira_loop_tree_node_t loop_node, parent_node;
258
259 /* We cannot use loop node access macros here because of potential
260 checking and because the nodes are not initialized enough
261 yet. */
262 if (loop != NULL && loop_outer (loop) != NULL)
263 add_loop_to_tree (loop: loop_outer (loop));
264 loop_num = loop != NULL ? loop->num : 0;
265 if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
266 && ira_loop_nodes[loop_num].children == NULL)
267 {
268 /* We have not added loop node to the tree yet. */
269 loop_node = &ira_loop_nodes[loop_num];
270 loop_node->loop = loop;
271 loop_node->bb = NULL;
272 if (loop == NULL)
273 parent = NULL;
274 else
275 {
276 for (parent = loop_outer (loop);
277 parent != NULL;
278 parent = loop_outer (loop: parent))
279 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
280 break;
281 }
282 if (parent == NULL)
283 {
284 loop_node->next = NULL;
285 loop_node->subloop_next = NULL;
286 loop_node->parent = NULL;
287 }
288 else
289 {
290 parent_node = &ira_loop_nodes[parent->num];
291 loop_node->next = parent_node->children;
292 parent_node->children = loop_node;
293 loop_node->subloop_next = parent_node->subloops;
294 parent_node->subloops = loop_node;
295 loop_node->parent = parent_node;
296 }
297 }
298}
299
300/* The following recursive function sets up levels of nodes of the
301 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
302 The function returns maximal value of level in the tree + 1. */
303static int
304setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
305{
306 int height, max_height;
307 ira_loop_tree_node_t subloop_node;
308
309 ira_assert (loop_node->bb == NULL);
310 loop_node->level = level;
311 max_height = level + 1;
312 for (subloop_node = loop_node->subloops;
313 subloop_node != NULL;
314 subloop_node = subloop_node->subloop_next)
315 {
316 ira_assert (subloop_node->bb == NULL);
317 height = setup_loop_tree_level (loop_node: subloop_node, level: level + 1);
318 if (height > max_height)
319 max_height = height;
320 }
321 return max_height;
322}
323
324/* Create the loop tree. The algorithm is designed to provide correct
325 order of loops (they are ordered by their last loop BB) and basic
326 blocks in the chain formed by member next. */
327static void
328form_loop_tree (void)
329{
330 basic_block bb;
331 class loop *parent;
332 ira_loop_tree_node_t bb_node, loop_node;
333
334 /* We cannot use loop/bb node access macros because of potential
335 checking and because the nodes are not initialized enough
336 yet. */
337 FOR_EACH_BB_FN (bb, cfun)
338 {
339 bb_node = &ira_bb_nodes[bb->index];
340 bb_node->bb = bb;
341 bb_node->loop = NULL;
342 bb_node->subloops = NULL;
343 bb_node->children = NULL;
344 bb_node->subloop_next = NULL;
345 bb_node->next = NULL;
346 if (current_loops == NULL)
347 parent = NULL;
348 else
349 {
350 for (parent = bb->loop_father;
351 parent != NULL;
352 parent = loop_outer (loop: parent))
353 if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
354 break;
355 }
356 add_loop_to_tree (loop: parent);
357 loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
358 bb_node->next = loop_node->children;
359 bb_node->parent = loop_node;
360 loop_node->children = bb_node;
361 }
362 ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
363 ira_loop_tree_height = setup_loop_tree_level (loop_node: ira_loop_tree_root, level: 0);
364 ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
365}
366
367
368
369/* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
370 tree nodes. */
371static void
372rebuild_regno_allocno_maps (void)
373{
374 unsigned int l;
375 int max_regno, regno;
376 ira_allocno_t a;
377 ira_loop_tree_node_t loop_tree_node;
378 loop_p loop;
379 ira_allocno_iterator ai;
380
381 ira_assert (current_loops != NULL);
382 max_regno = max_reg_num ();
383 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), l, loop)
384 if (ira_loop_nodes[l].regno_allocno_map != NULL)
385 {
386 ira_free (addr: ira_loop_nodes[l].regno_allocno_map);
387 ira_loop_nodes[l].regno_allocno_map
388 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
389 * max_regno);
390 memset (s: ira_loop_nodes[l].regno_allocno_map, c: 0,
391 n: sizeof (ira_allocno_t) * max_regno);
392 }
393 ira_free (addr: ira_regno_allocno_map);
394 ira_regno_allocno_map
395 = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
396 memset (s: ira_regno_allocno_map, c: 0, n: max_regno * sizeof (ira_allocno_t));
397 FOR_EACH_ALLOCNO (a, ai)
398 {
399 if (ALLOCNO_CAP_MEMBER (a) != NULL)
400 /* Caps are not in the regno allocno maps. */
401 continue;
402 regno = ALLOCNO_REGNO (a);
403 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
404 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
405 ira_regno_allocno_map[regno] = a;
406 if (loop_tree_node->regno_allocno_map[regno] == NULL)
407 /* Remember that we can create temporary allocnos to break
408 cycles in register shuffle. */
409 loop_tree_node->regno_allocno_map[regno] = a;
410 }
411}
412
413
414/* Pools for allocnos, allocno live ranges and objects. */
415static object_allocator<live_range> live_range_pool ("live ranges");
416static object_allocator<ira_allocno> allocno_pool ("allocnos");
417static object_allocator<ira_object> object_pool ("objects");
418
419/* Vec containing references to all created allocnos. It is a
420 container of array allocnos. */
421static vec<ira_allocno_t> allocno_vec;
422
423/* Vec containing references to all created ira_objects. It is a
424 container of ira_object_id_map. */
425static vec<ira_object_t> ira_object_id_map_vec;
426
427/* Initialize data concerning allocnos. */
428static void
429initiate_allocnos (void)
430{
431 allocno_vec.create (nelems: max_reg_num () * 2);
432 ira_allocnos = NULL;
433 ira_allocnos_num = 0;
434 ira_objects_num = 0;
435 ira_object_id_map_vec.create (nelems: max_reg_num () * 2);
436 ira_object_id_map = NULL;
437 ira_regno_allocno_map
438 = (ira_allocno_t *) ira_allocate (max_reg_num ()
439 * sizeof (ira_allocno_t));
440 memset (s: ira_regno_allocno_map, c: 0, n: max_reg_num () * sizeof (ira_allocno_t));
441}
442
443/* Create and return an object corresponding to a new allocno A. */
444static ira_object_t
445ira_create_object (ira_allocno_t a, int subword)
446{
447 enum reg_class aclass = ALLOCNO_CLASS (a);
448 ira_object_t obj = object_pool.allocate ();
449
450 OBJECT_ALLOCNO (obj) = a;
451 OBJECT_SUBWORD (obj) = subword;
452 OBJECT_CONFLICT_ID (obj) = ira_objects_num;
453 OBJECT_CONFLICT_VEC_P (obj) = false;
454 OBJECT_CONFLICT_ARRAY (obj) = NULL;
455 OBJECT_NUM_CONFLICTS (obj) = 0;
456 OBJECT_CONFLICT_HARD_REGS (obj) = ira_no_alloc_regs;
457 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) = ira_no_alloc_regs;
458 OBJECT_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
459 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
460 OBJECT_MIN (obj) = INT_MAX;
461 OBJECT_MAX (obj) = -1;
462 OBJECT_LIVE_RANGES (obj) = NULL;
463
464 ira_object_id_map_vec.safe_push (obj);
465 ira_object_id_map
466 = ira_object_id_map_vec.address ();
467 ira_objects_num = ira_object_id_map_vec.length ();
468
469 return obj;
470}
471
472/* Create and return the allocno corresponding to REGNO in
473 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
474 same regno if CAP_P is FALSE. */
475ira_allocno_t
476ira_create_allocno (int regno, bool cap_p,
477 ira_loop_tree_node_t loop_tree_node)
478{
479 ira_allocno_t a;
480
481 a = allocno_pool.allocate ();
482 ALLOCNO_REGNO (a) = regno;
483 ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
484 if (! cap_p)
485 {
486 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
487 ira_regno_allocno_map[regno] = a;
488 if (loop_tree_node->regno_allocno_map[regno] == NULL)
489 /* Remember that we can create temporary allocnos to break
490 cycles in register shuffle on region borders (see
491 ira-emit.cc). */
492 loop_tree_node->regno_allocno_map[regno] = a;
493 }
494 ALLOCNO_CAP (a) = NULL;
495 ALLOCNO_CAP_MEMBER (a) = NULL;
496 ALLOCNO_NUM (a) = ira_allocnos_num;
497 bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
498 ALLOCNO_NREFS (a) = 0;
499 ALLOCNO_FREQ (a) = 0;
500 ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P (a) = false;
501 ALLOCNO_HARD_REGNO (a) = -1;
502 ALLOCNO_CALL_FREQ (a) = 0;
503 ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
504 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a) = 0;
505 ALLOCNO_CROSSED_CALLS_ABIS (a) = 0;
506 CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
507#ifdef STACK_REGS
508 ALLOCNO_NO_STACK_REG_P (a) = false;
509 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
510#endif
511 ALLOCNO_DONT_REASSIGN_P (a) = false;
512 ALLOCNO_BAD_SPILL_P (a) = false;
513 ALLOCNO_ASSIGNED_P (a) = false;
514 ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
515 ALLOCNO_WMODE (a) = ALLOCNO_MODE (a);
516 ALLOCNO_PREFS (a) = NULL;
517 ALLOCNO_COPIES (a) = NULL;
518 ALLOCNO_HARD_REG_COSTS (a) = NULL;
519 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
520 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
521 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
522 ALLOCNO_CLASS (a) = NO_REGS;
523 ALLOCNO_UPDATED_CLASS_COST (a) = 0;
524 ALLOCNO_CLASS_COST (a) = 0;
525 ALLOCNO_MEMORY_COST (a) = 0;
526 ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
527 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
528 ALLOCNO_NUM_OBJECTS (a) = 0;
529
530 ALLOCNO_ADD_DATA (a) = NULL;
531 allocno_vec.safe_push (obj: a);
532 ira_allocnos = allocno_vec.address ();
533 ira_allocnos_num = allocno_vec.length ();
534
535 return a;
536}
537
538/* Set up register class for A and update its conflict hard
539 registers. */
540void
541ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
542{
543 ira_allocno_object_iterator oi;
544 ira_object_t obj;
545
546 ALLOCNO_CLASS (a) = aclass;
547 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
548 {
549 OBJECT_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
550 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= ~reg_class_contents[aclass];
551 }
552}
553
554/* Determine the number of objects we should associate with allocno A
555 and allocate them. */
556void
557ira_create_allocno_objects (ira_allocno_t a)
558{
559 machine_mode mode = ALLOCNO_MODE (a);
560 enum reg_class aclass = ALLOCNO_CLASS (a);
561 int n = ira_reg_class_max_nregs[aclass][mode];
562 int i;
563
564 if (n != 2 || maybe_ne (a: GET_MODE_SIZE (mode), b: n * UNITS_PER_WORD))
565 n = 1;
566
567 ALLOCNO_NUM_OBJECTS (a) = n;
568 for (i = 0; i < n; i++)
569 ALLOCNO_OBJECT (a, i) = ira_create_object (a, subword: i);
570}
571
572/* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
573 ALLOCNO_OBJECT structures. This must be called after the allocno
574 classes are known. */
575static void
576create_allocno_objects (void)
577{
578 ira_allocno_t a;
579 ira_allocno_iterator ai;
580
581 FOR_EACH_ALLOCNO (a, ai)
582 ira_create_allocno_objects (a);
583}
584
585/* Merge hard register conflict information for all objects associated with
586 allocno TO into the corresponding objects associated with FROM.
587 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
588static void
589merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
590 bool total_only)
591{
592 int i;
593 gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
594 for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
595 {
596 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
597 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
598
599 if (!total_only)
600 OBJECT_CONFLICT_HARD_REGS (to_obj)
601 |= OBJECT_CONFLICT_HARD_REGS (from_obj);
602 OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj)
603 |= OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj);
604 }
605#ifdef STACK_REGS
606 if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
607 ALLOCNO_NO_STACK_REG_P (to) = true;
608 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
609 ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
610#endif
611}
612
613/* Update hard register conflict information for all objects associated with
614 A to include the regs in SET. */
615void
616ior_hard_reg_conflicts (ira_allocno_t a, const_hard_reg_set set)
617{
618 ira_allocno_object_iterator i;
619 ira_object_t obj;
620
621 FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
622 {
623 OBJECT_CONFLICT_HARD_REGS (obj) |= set;
624 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= set;
625 }
626}
627
628/* Return TRUE if a conflict vector with NUM elements is more
629 profitable than a conflict bit vector for OBJ. */
630bool
631ira_conflict_vector_profitable_p (ira_object_t obj, int num)
632{
633 int nbytes;
634 int max = OBJECT_MAX (obj);
635 int min = OBJECT_MIN (obj);
636
637 if (max < min)
638 /* We prefer a bit vector in such case because it does not result
639 in allocation. */
640 return false;
641
642 nbytes = (max - min) / 8 + 1;
643 STATIC_ASSERT (sizeof (ira_object_t) <= 8);
644 /* Don't use sizeof (ira_object_t), use constant 8. Size of ira_object_t (a
645 pointer) is different on 32-bit and 64-bit targets. Usage sizeof
646 (ira_object_t) can result in different code generation by GCC built as 32-
647 and 64-bit program. In any case the profitability is just an estimation
648 and border cases are rare. */
649 return (2 * 8 /* sizeof (ira_object_t) */ * (num + 1) < 3 * nbytes);
650}
651
652/* Allocates and initialize the conflict vector of OBJ for NUM
653 conflicting objects. */
654void
655ira_allocate_conflict_vec (ira_object_t obj, int num)
656{
657 int size;
658 ira_object_t *vec;
659
660 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
661 num++; /* for NULL end marker */
662 size = sizeof (ira_object_t) * num;
663 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
664 vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
665 vec[0] = NULL;
666 OBJECT_NUM_CONFLICTS (obj) = 0;
667 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
668 OBJECT_CONFLICT_VEC_P (obj) = true;
669}
670
671/* Allocate and initialize the conflict bit vector of OBJ. */
672static void
673allocate_conflict_bit_vec (ira_object_t obj)
674{
675 unsigned int size;
676
677 ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
678 size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
679 / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
680 OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
681 memset (OBJECT_CONFLICT_ARRAY (obj), c: 0, n: size);
682 OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
683 OBJECT_CONFLICT_VEC_P (obj) = false;
684}
685
686/* Allocate and initialize the conflict vector or conflict bit vector
687 of OBJ for NUM conflicting allocnos whatever is more profitable. */
688void
689ira_allocate_object_conflicts (ira_object_t obj, int num)
690{
691 if (ira_conflict_vector_profitable_p (obj, num))
692 ira_allocate_conflict_vec (obj, num);
693 else
694 allocate_conflict_bit_vec (obj);
695}
696
697/* Add OBJ2 to the conflicts of OBJ1. */
698static void
699add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
700{
701 int num;
702 unsigned int size;
703
704 if (OBJECT_CONFLICT_VEC_P (obj1))
705 {
706 ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
707 int curr_num = OBJECT_NUM_CONFLICTS (obj1);
708 num = curr_num + 2;
709 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
710 {
711 ira_object_t *newvec;
712 size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
713 newvec = (ira_object_t *) ira_allocate (size);
714 memcpy (dest: newvec, src: vec, n: curr_num * sizeof (ira_object_t));
715 ira_free (addr: vec);
716 vec = newvec;
717 OBJECT_CONFLICT_ARRAY (obj1) = vec;
718 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
719 }
720 vec[num - 2] = obj2;
721 vec[num - 1] = NULL;
722 OBJECT_NUM_CONFLICTS (obj1)++;
723 }
724 else
725 {
726 int nw, added_head_nw, id;
727 IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
728
729 id = OBJECT_CONFLICT_ID (obj2);
730 if (OBJECT_MIN (obj1) > id)
731 {
732 /* Expand head of the bit vector. */
733 added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
734 nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
735 size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
736 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
737 {
738 memmove (dest: (char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
739 src: vec, n: nw * sizeof (IRA_INT_TYPE));
740 memset (s: vec, c: 0, n: added_head_nw * sizeof (IRA_INT_TYPE));
741 }
742 else
743 {
744 size
745 = (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
746 vec = (IRA_INT_TYPE *) ira_allocate (size);
747 memcpy (dest: (char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
748 OBJECT_CONFLICT_ARRAY (obj1), n: nw * sizeof (IRA_INT_TYPE));
749 memset (s: vec, c: 0, n: added_head_nw * sizeof (IRA_INT_TYPE));
750 memset (s: (char *) vec
751 + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
752 c: 0, n: size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
753 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
754 OBJECT_CONFLICT_ARRAY (obj1) = vec;
755 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
756 }
757 OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
758 }
759 else if (OBJECT_MAX (obj1) < id)
760 {
761 nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
762 size = nw * sizeof (IRA_INT_TYPE);
763 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
764 {
765 /* Expand tail of the bit vector. */
766 size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
767 vec = (IRA_INT_TYPE *) ira_allocate (size);
768 memcpy (dest: vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
769 memset (s: (char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
770 c: 0, n: size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
771 ira_free (OBJECT_CONFLICT_ARRAY (obj1));
772 OBJECT_CONFLICT_ARRAY (obj1) = vec;
773 OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
774 }
775 OBJECT_MAX (obj1) = id;
776 }
777 SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
778 }
779}
780
781/* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
782static void
783ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
784{
785 add_to_conflicts (obj1, obj2);
786 add_to_conflicts (obj1: obj2, obj2: obj1);
787}
788
789/* Clear all conflicts of OBJ. */
790static void
791clear_conflicts (ira_object_t obj)
792{
793 if (OBJECT_CONFLICT_VEC_P (obj))
794 {
795 OBJECT_NUM_CONFLICTS (obj) = 0;
796 OBJECT_CONFLICT_VEC (obj)[0] = NULL;
797 }
798 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
799 {
800 int nw;
801
802 nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
803 memset (OBJECT_CONFLICT_BITVEC (obj), c: 0, n: nw * sizeof (IRA_INT_TYPE));
804 }
805}
806
807/* The array used to find duplications in conflict vectors of
808 allocnos. */
809static int *conflict_check;
810
811/* The value used to mark allocation presence in conflict vector of
812 the current allocno. */
813static int curr_conflict_check_tick;
814
815/* Remove duplications in conflict vector of OBJ. */
816static void
817compress_conflict_vec (ira_object_t obj)
818{
819 ira_object_t *vec, conflict_obj;
820 int i, j;
821
822 ira_assert (OBJECT_CONFLICT_VEC_P (obj));
823 vec = OBJECT_CONFLICT_VEC (obj);
824 curr_conflict_check_tick++;
825 for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
826 {
827 int id = OBJECT_CONFLICT_ID (conflict_obj);
828 if (conflict_check[id] != curr_conflict_check_tick)
829 {
830 conflict_check[id] = curr_conflict_check_tick;
831 vec[j++] = conflict_obj;
832 }
833 }
834 OBJECT_NUM_CONFLICTS (obj) = j;
835 vec[j] = NULL;
836}
837
838/* Remove duplications in conflict vectors of all allocnos. */
839static void
840compress_conflict_vecs (void)
841{
842 ira_object_t obj;
843 ira_object_iterator oi;
844
845 conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
846 memset (s: conflict_check, c: 0, n: sizeof (int) * ira_objects_num);
847 curr_conflict_check_tick = 0;
848 FOR_EACH_OBJECT (obj, oi)
849 {
850 if (OBJECT_CONFLICT_VEC_P (obj))
851 compress_conflict_vec (obj);
852 }
853 ira_free (addr: conflict_check);
854}
855
856/* This recursive function outputs allocno A and if it is a cap the
857 function outputs its members. */
858void
859ira_print_expanded_allocno (ira_allocno_t a)
860{
861 basic_block bb;
862
863 fprintf (stream: ira_dump_file, format: " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
864 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
865 fprintf (stream: ira_dump_file, format: ",b%d", bb->index);
866 else
867 fprintf (stream: ira_dump_file, format: ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
868 if (ALLOCNO_CAP_MEMBER (a) != NULL)
869 {
870 fprintf (stream: ira_dump_file, format: ":");
871 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
872 }
873 fprintf (stream: ira_dump_file, format: ")");
874}
875
876/* Create and return the cap representing allocno A in the
877 parent loop. */
878static ira_allocno_t
879create_cap_allocno (ira_allocno_t a)
880{
881 ira_allocno_t cap;
882 ira_loop_tree_node_t parent;
883 enum reg_class aclass;
884
885 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
886 cap = ira_create_allocno (ALLOCNO_REGNO (a), cap_p: true, loop_tree_node: parent);
887 ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
888 ALLOCNO_WMODE (cap) = ALLOCNO_WMODE (a);
889 aclass = ALLOCNO_CLASS (a);
890 ira_set_allocno_class (a: cap, aclass);
891 ira_create_allocno_objects (a: cap);
892 ALLOCNO_CAP_MEMBER (cap) = a;
893 ALLOCNO_CAP (a) = cap;
894 ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
895 ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
896 ira_allocate_and_copy_costs
897 (vec: &ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
898 ira_allocate_and_copy_costs
899 (vec: &ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
900 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
901 ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
902 ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
903 ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
904 ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
905
906 merge_hard_reg_conflicts (from: a, to: cap, total_only: false);
907
908 ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
909 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
910 ALLOCNO_CROSSED_CALLS_ABIS (cap) = ALLOCNO_CROSSED_CALLS_ABIS (a);
911 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap)
912 = ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
913 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
914 {
915 fprintf (stream: ira_dump_file, format: " Creating cap ");
916 ira_print_expanded_allocno (a: cap);
917 fprintf (stream: ira_dump_file, format: "\n");
918 }
919 return cap;
920}
921
922/* Create and return a live range for OBJECT with given attributes. */
923live_range_t
924ira_create_live_range (ira_object_t obj, int start, int finish,
925 live_range_t next)
926{
927 live_range_t p;
928
929 p = live_range_pool.allocate ();
930 p->object = obj;
931 p->start = start;
932 p->finish = finish;
933 p->next = next;
934 return p;
935}
936
937/* Create a new live range for OBJECT and queue it at the head of its
938 live range list. */
939void
940ira_add_live_range_to_object (ira_object_t object, int start, int finish)
941{
942 live_range_t p;
943 p = ira_create_live_range (obj: object, start, finish,
944 OBJECT_LIVE_RANGES (object));
945 OBJECT_LIVE_RANGES (object) = p;
946}
947
948/* Copy allocno live range R and return the result. */
949static live_range_t
950copy_live_range (live_range_t r)
951{
952 live_range_t p;
953
954 p = live_range_pool.allocate ();
955 *p = *r;
956 return p;
957}
958
959/* Copy allocno live range list given by its head R and return the
960 result. */
961live_range_t
962ira_copy_live_range_list (live_range_t r)
963{
964 live_range_t p, first, last;
965
966 if (r == NULL)
967 return NULL;
968 for (first = last = NULL; r != NULL; r = r->next)
969 {
970 p = copy_live_range (r);
971 if (first == NULL)
972 first = p;
973 else
974 last->next = p;
975 last = p;
976 }
977 return first;
978}
979
980/* Merge ranges R1 and R2 and returns the result. The function
981 maintains the order of ranges and tries to minimize number of the
982 result ranges. */
983live_range_t
984ira_merge_live_ranges (live_range_t r1, live_range_t r2)
985{
986 live_range_t first, last;
987
988 if (r1 == NULL)
989 return r2;
990 if (r2 == NULL)
991 return r1;
992 for (first = last = NULL; r1 != NULL && r2 != NULL;)
993 {
994 if (r1->start < r2->start)
995 std::swap (a&: r1, b&: r2);
996 if (r1->start <= r2->finish + 1)
997 {
998 /* Intersected ranges: merge r1 and r2 into r1. */
999 r1->start = r2->start;
1000 if (r1->finish < r2->finish)
1001 r1->finish = r2->finish;
1002 live_range_t temp = r2;
1003 r2 = r2->next;
1004 ira_finish_live_range (temp);
1005 if (r2 == NULL)
1006 {
1007 /* To try to merge with subsequent ranges in r1. */
1008 r2 = r1->next;
1009 r1->next = NULL;
1010 }
1011 }
1012 else
1013 {
1014 /* Add r1 to the result. */
1015 if (first == NULL)
1016 first = last = r1;
1017 else
1018 {
1019 last->next = r1;
1020 last = r1;
1021 }
1022 r1 = r1->next;
1023 if (r1 == NULL)
1024 {
1025 /* To try to merge with subsequent ranges in r2. */
1026 r1 = r2->next;
1027 r2->next = NULL;
1028 }
1029 }
1030 }
1031 if (r1 != NULL)
1032 {
1033 if (first == NULL)
1034 first = r1;
1035 else
1036 last->next = r1;
1037 ira_assert (r1->next == NULL);
1038 }
1039 else if (r2 != NULL)
1040 {
1041 if (first == NULL)
1042 first = r2;
1043 else
1044 last->next = r2;
1045 ira_assert (r2->next == NULL);
1046 }
1047 else
1048 {
1049 ira_assert (last->next == NULL);
1050 }
1051 return first;
1052}
1053
1054/* Return TRUE if live ranges R1 and R2 intersect. */
1055bool
1056ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1057{
1058 /* Remember the live ranges are always kept ordered. */
1059 while (r1 != NULL && r2 != NULL)
1060 {
1061 if (r1->start > r2->finish)
1062 r1 = r1->next;
1063 else if (r2->start > r1->finish)
1064 r2 = r2->next;
1065 else
1066 return true;
1067 }
1068 return false;
1069}
1070
1071/* Free allocno live range R. */
1072void
1073ira_finish_live_range (live_range_t r)
1074{
1075 live_range_pool.remove (object: r);
1076}
1077
1078/* Free list of allocno live ranges starting with R. */
1079void
1080ira_finish_live_range_list (live_range_t r)
1081{
1082 live_range_t next_r;
1083
1084 for (; r != NULL; r = next_r)
1085 {
1086 next_r = r->next;
1087 ira_finish_live_range (r);
1088 }
1089}
1090
1091/* Free updated register costs of allocno A. */
1092void
1093ira_free_allocno_updated_costs (ira_allocno_t a)
1094{
1095 enum reg_class aclass;
1096
1097 aclass = ALLOCNO_CLASS (a);
1098 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1099 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1100 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1101 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1102 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1103 aclass);
1104 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1105}
1106
1107/* Free and nullify all cost vectors allocated earlier for allocno
1108 A. */
1109static void
1110ira_free_allocno_costs (ira_allocno_t a)
1111{
1112 enum reg_class aclass = ALLOCNO_CLASS (a);
1113 ira_object_t obj;
1114 ira_allocno_object_iterator oi;
1115
1116 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1117 {
1118 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1119 ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1120 if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1121 ira_free (OBJECT_CONFLICT_ARRAY (obj));
1122 object_pool.remove (object: obj);
1123 }
1124
1125 ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1126 if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1127 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1128 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1129 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1130 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1131 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1132 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1133 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1134 aclass);
1135 ALLOCNO_HARD_REG_COSTS (a) = NULL;
1136 ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1137 ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1138 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1139}
1140
1141/* Free the memory allocated for allocno A. */
1142static void
1143finish_allocno (ira_allocno_t a)
1144{
1145 ira_free_allocno_costs (a);
1146 allocno_pool.remove (object: a);
1147}
1148
1149/* Free the memory allocated for all allocnos. */
1150static void
1151finish_allocnos (void)
1152{
1153 ira_allocno_t a;
1154 ira_allocno_iterator ai;
1155
1156 FOR_EACH_ALLOCNO (a, ai)
1157 finish_allocno (a);
1158 ira_free (addr: ira_regno_allocno_map);
1159 ira_object_id_map_vec.release ();
1160 allocno_vec.release ();
1161 allocno_pool.release ();
1162 object_pool.release ();
1163 live_range_pool.release ();
1164}
1165
1166
1167
1168/* Pools for allocno preferences. */
1169static object_allocator <ira_allocno_pref> pref_pool ("prefs");
1170
1171/* Vec containing references to all created preferences. It is a
1172 container of array ira_prefs. */
1173static vec<ira_pref_t> pref_vec;
1174
1175/* The function initializes data concerning allocno prefs. */
1176static void
1177initiate_prefs (void)
1178{
1179 pref_vec.create (nelems: get_max_uid ());
1180 ira_prefs = NULL;
1181 ira_prefs_num = 0;
1182}
1183
1184/* Return pref for A and HARD_REGNO if any. */
1185static ira_pref_t
1186find_allocno_pref (ira_allocno_t a, int hard_regno)
1187{
1188 ira_pref_t pref;
1189
1190 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1191 if (pref->allocno == a && pref->hard_regno == hard_regno)
1192 return pref;
1193 return NULL;
1194}
1195
1196/* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1197ira_pref_t
1198ira_create_pref (ira_allocno_t a, int hard_regno, int freq)
1199{
1200 ira_pref_t pref;
1201
1202 pref = pref_pool.allocate ();
1203 pref->num = ira_prefs_num;
1204 pref->allocno = a;
1205 pref->hard_regno = hard_regno;
1206 pref->freq = freq;
1207 pref_vec.safe_push (obj: pref);
1208 ira_prefs = pref_vec.address ();
1209 ira_prefs_num = pref_vec.length ();
1210 return pref;
1211}
1212
1213/* Attach a pref PREF to the corresponding allocno. */
1214static void
1215add_allocno_pref_to_list (ira_pref_t pref)
1216{
1217 ira_allocno_t a = pref->allocno;
1218
1219 pref->next_pref = ALLOCNO_PREFS (a);
1220 ALLOCNO_PREFS (a) = pref;
1221}
1222
1223/* Create (or update frequency if the pref already exists) the pref of
1224 allocnos A preferring HARD_REGNO with frequency FREQ. */
1225void
1226ira_add_allocno_pref (ira_allocno_t a, int hard_regno, int freq)
1227{
1228 ira_pref_t pref;
1229
1230 if (freq <= 0)
1231 return;
1232 if ((pref = find_allocno_pref (a, hard_regno)) != NULL)
1233 {
1234 pref->freq += freq;
1235 return;
1236 }
1237 pref = ira_create_pref (a, hard_regno, freq);
1238 ira_assert (a != NULL);
1239 add_allocno_pref_to_list (pref);
1240}
1241
1242/* Print info about PREF into file F. */
1243static void
1244print_pref (FILE *f, ira_pref_t pref)
1245{
1246 fprintf (stream: f, format: " pref%d:a%d(r%d)<-hr%d@%d\n", pref->num,
1247 ALLOCNO_NUM (pref->allocno), ALLOCNO_REGNO (pref->allocno),
1248 pref->hard_regno, pref->freq);
1249}
1250
1251/* Print info about PREF into stderr. */
1252void
1253ira_debug_pref (ira_pref_t pref)
1254{
1255 print_pref (stderr, pref);
1256}
1257
1258/* Print info about all prefs into file F. */
1259static void
1260print_prefs (FILE *f)
1261{
1262 ira_pref_t pref;
1263 ira_pref_iterator pi;
1264
1265 FOR_EACH_PREF (pref, pi)
1266 print_pref (f, pref);
1267}
1268
1269/* Print info about all prefs into stderr. */
1270void
1271ira_debug_prefs (void)
1272{
1273 print_prefs (stderr);
1274}
1275
1276/* Print info about prefs involving allocno A into file F. */
1277static void
1278print_allocno_prefs (FILE *f, ira_allocno_t a)
1279{
1280 ira_pref_t pref;
1281
1282 fprintf (stream: f, format: " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1283 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = pref->next_pref)
1284 fprintf (stream: f, format: " pref%d:hr%d@%d", pref->num, pref->hard_regno, pref->freq);
1285 fprintf (stream: f, format: "\n");
1286}
1287
1288/* Print info about prefs involving allocno A into stderr. */
1289void
1290ira_debug_allocno_prefs (ira_allocno_t a)
1291{
1292 print_allocno_prefs (stderr, a);
1293}
1294
1295/* The function frees memory allocated for PREF. */
1296static void
1297finish_pref (ira_pref_t pref)
1298{
1299 ira_prefs[pref->num] = NULL;
1300 pref_pool.remove (object: pref);
1301}
1302
1303/* Remove PREF from the list of allocno prefs and free memory for
1304 it. */
1305void
1306ira_remove_pref (ira_pref_t pref)
1307{
1308 ira_pref_t cpref, prev;
1309
1310 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1311 fprintf (stream: ira_dump_file, format: " Removing pref%d:hr%d@%d\n",
1312 pref->num, pref->hard_regno, pref->freq);
1313 for (prev = NULL, cpref = ALLOCNO_PREFS (pref->allocno);
1314 cpref != NULL;
1315 prev = cpref, cpref = cpref->next_pref)
1316 if (cpref == pref)
1317 break;
1318 ira_assert (cpref != NULL);
1319 if (prev == NULL)
1320 ALLOCNO_PREFS (pref->allocno) = pref->next_pref;
1321 else
1322 prev->next_pref = pref->next_pref;
1323 finish_pref (pref);
1324}
1325
1326/* Remove all prefs of allocno A. */
1327void
1328ira_remove_allocno_prefs (ira_allocno_t a)
1329{
1330 ira_pref_t pref, next_pref;
1331
1332 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
1333 {
1334 next_pref = pref->next_pref;
1335 finish_pref (pref);
1336 }
1337 ALLOCNO_PREFS (a) = NULL;
1338}
1339
1340/* Free memory allocated for all prefs. */
1341static void
1342finish_prefs (void)
1343{
1344 ira_pref_t pref;
1345 ira_pref_iterator pi;
1346
1347 FOR_EACH_PREF (pref, pi)
1348 finish_pref (pref);
1349 pref_vec.release ();
1350 pref_pool.release ();
1351}
1352
1353
1354
1355/* Pools for copies. */
1356static object_allocator<ira_allocno_copy> copy_pool ("copies");
1357
1358/* Vec containing references to all created copies. It is a
1359 container of array ira_copies. */
1360static vec<ira_copy_t> copy_vec;
1361
1362/* The function initializes data concerning allocno copies. */
1363static void
1364initiate_copies (void)
1365{
1366 copy_vec.create (nelems: get_max_uid ());
1367 ira_copies = NULL;
1368 ira_copies_num = 0;
1369}
1370
1371/* Return copy connecting A1 and A2 and originated from INSN of
1372 LOOP_TREE_NODE if any. */
1373static ira_copy_t
1374find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx_insn *insn,
1375 ira_loop_tree_node_t loop_tree_node)
1376{
1377 ira_copy_t cp, next_cp;
1378 ira_allocno_t another_a;
1379
1380 for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1381 {
1382 if (cp->first == a1)
1383 {
1384 next_cp = cp->next_first_allocno_copy;
1385 another_a = cp->second;
1386 }
1387 else if (cp->second == a1)
1388 {
1389 next_cp = cp->next_second_allocno_copy;
1390 another_a = cp->first;
1391 }
1392 else
1393 gcc_unreachable ();
1394 if (another_a == a2 && cp->insn == insn
1395 && cp->loop_tree_node == loop_tree_node)
1396 return cp;
1397 }
1398 return NULL;
1399}
1400
1401/* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1402 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1403ira_copy_t
1404ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1405 bool constraint_p, rtx_insn *insn,
1406 ira_loop_tree_node_t loop_tree_node)
1407{
1408 ira_copy_t cp;
1409
1410 cp = copy_pool.allocate ();
1411 cp->num = ira_copies_num;
1412 cp->first = first;
1413 cp->second = second;
1414 cp->freq = freq;
1415 cp->constraint_p = constraint_p;
1416 cp->insn = insn;
1417 cp->loop_tree_node = loop_tree_node;
1418 copy_vec.safe_push (obj: cp);
1419 ira_copies = copy_vec.address ();
1420 ira_copies_num = copy_vec.length ();
1421 return cp;
1422}
1423
1424/* Attach a copy CP to allocnos involved into the copy. */
1425static void
1426add_allocno_copy_to_list (ira_copy_t cp)
1427{
1428 ira_allocno_t first = cp->first, second = cp->second;
1429
1430 cp->prev_first_allocno_copy = NULL;
1431 cp->prev_second_allocno_copy = NULL;
1432 cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1433 if (cp->next_first_allocno_copy != NULL)
1434 {
1435 if (cp->next_first_allocno_copy->first == first)
1436 cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1437 else
1438 cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1439 }
1440 cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1441 if (cp->next_second_allocno_copy != NULL)
1442 {
1443 if (cp->next_second_allocno_copy->second == second)
1444 cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1445 else
1446 cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1447 }
1448 ALLOCNO_COPIES (first) = cp;
1449 ALLOCNO_COPIES (second) = cp;
1450}
1451
1452/* Make a copy CP a canonical copy where number of the
1453 first allocno is less than the second one. */
1454static void
1455swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1456{
1457 if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1458 return;
1459
1460 std::swap (a&: cp->first, b&: cp->second);
1461 std::swap (a&: cp->prev_first_allocno_copy, b&: cp->prev_second_allocno_copy);
1462 std::swap (a&: cp->next_first_allocno_copy, b&: cp->next_second_allocno_copy);
1463}
1464
1465/* Create (or update frequency if the copy already exists) and return
1466 the copy of allocnos FIRST and SECOND with frequency FREQ
1467 corresponding to move insn INSN (if any) and originated from
1468 LOOP_TREE_NODE. */
1469ira_copy_t
1470ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1471 bool constraint_p, rtx_insn *insn,
1472 ira_loop_tree_node_t loop_tree_node)
1473{
1474 ira_copy_t cp;
1475
1476 if ((cp = find_allocno_copy (a1: first, a2: second, insn, loop_tree_node)) != NULL)
1477 {
1478 cp->freq += freq;
1479 return cp;
1480 }
1481 cp = ira_create_copy (first, second, freq, constraint_p, insn,
1482 loop_tree_node);
1483 ira_assert (first != NULL && second != NULL);
1484 add_allocno_copy_to_list (cp);
1485 swap_allocno_copy_ends_if_necessary (cp);
1486 return cp;
1487}
1488
1489/* Print info about copy CP into file F. */
1490static void
1491print_copy (FILE *f, ira_copy_t cp)
1492{
1493 fprintf (stream: f, format: " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1494 ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1495 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1496 cp->insn != NULL
1497 ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1498}
1499
1500DEBUG_FUNCTION void
1501debug (ira_allocno_copy &ref)
1502{
1503 print_copy (stderr, cp: &ref);
1504}
1505
1506DEBUG_FUNCTION void
1507debug (ira_allocno_copy *ptr)
1508{
1509 if (ptr)
1510 debug (ref&: *ptr);
1511 else
1512 fprintf (stderr, format: "<nil>\n");
1513}
1514
1515/* Print info about copy CP into stderr. */
1516void
1517ira_debug_copy (ira_copy_t cp)
1518{
1519 print_copy (stderr, cp);
1520}
1521
1522/* Print info about all copies into file F. */
1523static void
1524print_copies (FILE *f)
1525{
1526 ira_copy_t cp;
1527 ira_copy_iterator ci;
1528
1529 FOR_EACH_COPY (cp, ci)
1530 print_copy (f, cp);
1531}
1532
1533/* Print info about all copies into stderr. */
1534void
1535ira_debug_copies (void)
1536{
1537 print_copies (stderr);
1538}
1539
1540/* Print info about copies involving allocno A into file F. */
1541static void
1542print_allocno_copies (FILE *f, ira_allocno_t a)
1543{
1544 ira_allocno_t another_a;
1545 ira_copy_t cp, next_cp;
1546
1547 fprintf (stream: f, format: " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1548 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1549 {
1550 if (cp->first == a)
1551 {
1552 next_cp = cp->next_first_allocno_copy;
1553 another_a = cp->second;
1554 }
1555 else if (cp->second == a)
1556 {
1557 next_cp = cp->next_second_allocno_copy;
1558 another_a = cp->first;
1559 }
1560 else
1561 gcc_unreachable ();
1562 fprintf (stream: f, format: " cp%d:a%d(r%d)@%d", cp->num,
1563 ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1564 }
1565 fprintf (stream: f, format: "\n");
1566}
1567
1568DEBUG_FUNCTION void
1569debug (ira_allocno &ref)
1570{
1571 print_allocno_copies (stderr, a: &ref);
1572}
1573
1574DEBUG_FUNCTION void
1575debug (ira_allocno *ptr)
1576{
1577 if (ptr)
1578 debug (ref&: *ptr);
1579 else
1580 fprintf (stderr, format: "<nil>\n");
1581}
1582
1583
1584/* Print info about copies involving allocno A into stderr. */
1585void
1586ira_debug_allocno_copies (ira_allocno_t a)
1587{
1588 print_allocno_copies (stderr, a);
1589}
1590
1591/* The function frees memory allocated for copy CP. */
1592static void
1593finish_copy (ira_copy_t cp)
1594{
1595 copy_pool.remove (object: cp);
1596}
1597
1598
1599/* Free memory allocated for all copies. */
1600static void
1601finish_copies (void)
1602{
1603 ira_copy_t cp;
1604 ira_copy_iterator ci;
1605
1606 FOR_EACH_COPY (cp, ci)
1607 finish_copy (cp);
1608 copy_vec.release ();
1609 copy_pool.release ();
1610}
1611
1612
1613
1614/* Pools for cost vectors. It is defined only for allocno classes. */
1615static pool_allocator *cost_vector_pool[N_REG_CLASSES];
1616
1617/* The function initiates work with hard register cost vectors. It
1618 creates allocation pool for each allocno class. */
1619static void
1620initiate_cost_vectors (void)
1621{
1622 int i;
1623 enum reg_class aclass;
1624
1625 for (i = 0; i < ira_allocno_classes_num; i++)
1626 {
1627 aclass = ira_allocno_classes[i];
1628 cost_vector_pool[aclass] = new pool_allocator
1629 ("cost vectors", sizeof (int) * (ira_class_hard_regs_num[aclass]));
1630 }
1631}
1632
1633/* Allocate and return a cost vector VEC for ACLASS. */
1634int *
1635ira_allocate_cost_vector (reg_class_t aclass)
1636{
1637 return (int*) cost_vector_pool[(int) aclass]->allocate ();
1638}
1639
1640/* Free a cost vector VEC for ACLASS. */
1641void
1642ira_free_cost_vector (int *vec, reg_class_t aclass)
1643{
1644 ira_assert (vec != NULL);
1645 cost_vector_pool[(int) aclass]->remove (object: vec);
1646}
1647
1648/* Finish work with hard register cost vectors. Release allocation
1649 pool for each allocno class. */
1650static void
1651finish_cost_vectors (void)
1652{
1653 int i;
1654 enum reg_class aclass;
1655
1656 for (i = 0; i < ira_allocno_classes_num; i++)
1657 {
1658 aclass = ira_allocno_classes[i];
1659 delete cost_vector_pool[aclass];
1660 }
1661}
1662
1663
1664
1665/* Compute a post-ordering of the reverse control flow of the loop body
1666 designated by the children nodes of LOOP_NODE, whose body nodes in
1667 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1668 of the reverse loop body.
1669
1670 For the post-order of the reverse CFG, we visit the basic blocks in
1671 LOOP_PREORDER array in the reverse order of where they appear.
1672 This is important: We do not just want to compute a post-order of
1673 the reverse CFG, we want to make a best-guess for a visiting order that
1674 minimizes the number of chain elements per allocno live range. If the
1675 blocks would be visited in a different order, we would still compute a
1676 correct post-ordering but it would be less likely that two nodes
1677 connected by an edge in the CFG are neighbors in the topsort. */
1678
1679static vec<ira_loop_tree_node_t>
1680ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED,
1681 const vec<ira_loop_tree_node_t> &loop_preorder)
1682{
1683 vec<ira_loop_tree_node_t> topsort_nodes = vNULL;
1684 unsigned int n_loop_preorder;
1685
1686 n_loop_preorder = loop_preorder.length ();
1687 if (n_loop_preorder != 0)
1688 {
1689 ira_loop_tree_node_t subloop_node;
1690 unsigned int i;
1691 auto_vec<ira_loop_tree_node_t> dfs_stack;
1692
1693 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1694 the flag to mark blocks we still have to visit to add them to
1695 our post-order. Define an alias to avoid confusion. */
1696#define BB_TO_VISIT BB_VISITED
1697
1698 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1699 {
1700 gcc_checking_assert (! (subloop_node->bb->flags & BB_TO_VISIT));
1701 subloop_node->bb->flags |= BB_TO_VISIT;
1702 }
1703
1704 topsort_nodes.create (nelems: n_loop_preorder);
1705 dfs_stack.create (nelems: n_loop_preorder);
1706
1707 FOR_EACH_VEC_ELT_REVERSE (loop_preorder, i, subloop_node)
1708 {
1709 if (! (subloop_node->bb->flags & BB_TO_VISIT))
1710 continue;
1711
1712 subloop_node->bb->flags &= ~BB_TO_VISIT;
1713 dfs_stack.quick_push (obj: subloop_node);
1714 while (! dfs_stack.is_empty ())
1715 {
1716 edge e;
1717 edge_iterator ei;
1718
1719 ira_loop_tree_node_t n = dfs_stack.last ();
1720 FOR_EACH_EDGE (e, ei, n->bb->preds)
1721 {
1722 ira_loop_tree_node_t pred_node;
1723 basic_block pred_bb = e->src;
1724
1725 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1726 continue;
1727
1728 pred_node = IRA_BB_NODE_BY_INDEX (pred_bb->index);
1729 if (pred_node != n
1730 && (pred_node->bb->flags & BB_TO_VISIT))
1731 {
1732 pred_node->bb->flags &= ~BB_TO_VISIT;
1733 dfs_stack.quick_push (obj: pred_node);
1734 }
1735 }
1736 if (n == dfs_stack.last ())
1737 {
1738 dfs_stack.pop ();
1739 topsort_nodes.quick_push (obj: n);
1740 }
1741 }
1742 }
1743
1744#undef BB_TO_VISIT
1745 }
1746
1747 gcc_assert (topsort_nodes.length () == n_loop_preorder);
1748 return topsort_nodes;
1749}
1750
1751/* The current loop tree node and its regno allocno map. */
1752ira_loop_tree_node_t ira_curr_loop_tree_node;
1753ira_allocno_t *ira_curr_regno_allocno_map;
1754
1755/* This recursive function traverses loop tree with root LOOP_NODE
1756 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1757 correspondingly in preorder and postorder. The function sets up
1758 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1759 basic block nodes of LOOP_NODE is also processed (before its
1760 subloop nodes).
1761
1762 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1763 the loop are passed in the *reverse* post-order of the *reverse*
1764 CFG. This is only used by ira_create_allocno_live_ranges, which
1765 wants to visit basic blocks in this order to minimize the number
1766 of elements per live range chain.
1767 Note that the loop tree nodes are still visited in the normal,
1768 forward post-order of the loop tree. */
1769
1770void
1771ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1772 void (*preorder_func) (ira_loop_tree_node_t),
1773 void (*postorder_func) (ira_loop_tree_node_t))
1774{
1775 ira_loop_tree_node_t subloop_node;
1776
1777 ira_assert (loop_node->bb == NULL);
1778 ira_curr_loop_tree_node = loop_node;
1779 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1780
1781 if (preorder_func != NULL)
1782 (*preorder_func) (loop_node);
1783
1784 if (bb_p)
1785 {
1786 auto_vec<ira_loop_tree_node_t> loop_preorder;
1787 unsigned int i;
1788
1789 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1790 is set up such that nodes in the loop body appear in a pre-order
1791 of their place in the CFG. */
1792 for (subloop_node = loop_node->children;
1793 subloop_node != NULL;
1794 subloop_node = subloop_node->next)
1795 if (subloop_node->bb != NULL)
1796 loop_preorder.safe_push (obj: subloop_node);
1797
1798 if (preorder_func != NULL)
1799 FOR_EACH_VEC_ELT (loop_preorder, i, subloop_node)
1800 (*preorder_func) (subloop_node);
1801
1802 if (postorder_func != NULL)
1803 {
1804 vec<ira_loop_tree_node_t> loop_rev_postorder =
1805 ira_loop_tree_body_rev_postorder (loop_node, loop_preorder);
1806 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder, i, subloop_node)
1807 (*postorder_func) (subloop_node);
1808 loop_rev_postorder.release ();
1809 }
1810 }
1811
1812 for (subloop_node = loop_node->subloops;
1813 subloop_node != NULL;
1814 subloop_node = subloop_node->subloop_next)
1815 {
1816 ira_assert (subloop_node->bb == NULL);
1817 ira_traverse_loop_tree (bb_p, loop_node: subloop_node,
1818 preorder_func, postorder_func);
1819 }
1820
1821 ira_curr_loop_tree_node = loop_node;
1822 ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1823
1824 if (postorder_func != NULL)
1825 (*postorder_func) (loop_node);
1826}
1827
1828
1829
1830/* The basic block currently being processed. */
1831static basic_block curr_bb;
1832
1833/* This recursive function creates allocnos corresponding to
1834 pseudo-registers containing in X. True OUTPUT_P means that X is
1835 an lvalue. OUTER corresponds to the parent expression of X. */
1836static void
1837create_insn_allocnos (rtx x, rtx outer, bool output_p)
1838{
1839 int i, j;
1840 const char *fmt;
1841 enum rtx_code code = GET_CODE (x);
1842
1843 if (code == REG)
1844 {
1845 int regno;
1846
1847 if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1848 {
1849 ira_allocno_t a;
1850
1851 if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1852 {
1853 a = ira_create_allocno (regno, cap_p: false, loop_tree_node: ira_curr_loop_tree_node);
1854 if (outer != NULL && GET_CODE (outer) == SUBREG)
1855 {
1856 machine_mode wmode = GET_MODE (outer);
1857 if (partial_subreg_p (ALLOCNO_WMODE (a), innermode: wmode))
1858 ALLOCNO_WMODE (a) = wmode;
1859 }
1860 }
1861
1862 ALLOCNO_NREFS (a)++;
1863 ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1864 if (output_p)
1865 bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1866 }
1867 return;
1868 }
1869 else if (code == SET)
1870 {
1871 create_insn_allocnos (SET_DEST (x), NULL, output_p: true);
1872 create_insn_allocnos (SET_SRC (x), NULL, output_p: false);
1873 return;
1874 }
1875 else if (code == CLOBBER)
1876 {
1877 create_insn_allocnos (XEXP (x, 0), NULL, output_p: true);
1878 return;
1879 }
1880 else if (code == MEM)
1881 {
1882 create_insn_allocnos (XEXP (x, 0), NULL, output_p: false);
1883 return;
1884 }
1885 else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1886 code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1887 {
1888 create_insn_allocnos (XEXP (x, 0), NULL, output_p: true);
1889 create_insn_allocnos (XEXP (x, 0), NULL, output_p: false);
1890 return;
1891 }
1892
1893 fmt = GET_RTX_FORMAT (code);
1894 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1895 {
1896 if (fmt[i] == 'e')
1897 create_insn_allocnos (XEXP (x, i), outer: x, output_p);
1898 else if (fmt[i] == 'E')
1899 for (j = 0; j < XVECLEN (x, i); j++)
1900 create_insn_allocnos (XVECEXP (x, i, j), outer: x, output_p);
1901 }
1902}
1903
1904/* Create allocnos corresponding to pseudo-registers living in the
1905 basic block represented by the corresponding loop tree node
1906 BB_NODE. */
1907static void
1908create_bb_allocnos (ira_loop_tree_node_t bb_node)
1909{
1910 basic_block bb;
1911 rtx_insn *insn;
1912 unsigned int i;
1913 bitmap_iterator bi;
1914
1915 curr_bb = bb = bb_node->bb;
1916 ira_assert (bb != NULL);
1917 FOR_BB_INSNS_REVERSE (bb, insn)
1918 if (NONDEBUG_INSN_P (insn))
1919 create_insn_allocnos (x: PATTERN (insn), NULL, output_p: false);
1920 /* It might be a allocno living through from one subloop to
1921 another. */
1922 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb), FIRST_PSEUDO_REGISTER, i, bi)
1923 if (ira_curr_regno_allocno_map[i] == NULL)
1924 ira_create_allocno (regno: i, cap_p: false, loop_tree_node: ira_curr_loop_tree_node);
1925}
1926
1927/* Create allocnos corresponding to pseudo-registers living on edge E
1928 (a loop entry or exit). Also mark the allocnos as living on the
1929 loop border. */
1930static void
1931create_loop_allocnos (edge e)
1932{
1933 unsigned int i;
1934 bitmap live_in_regs, border_allocnos;
1935 bitmap_iterator bi;
1936 ira_loop_tree_node_t parent;
1937
1938 live_in_regs = df_get_live_in (bb: e->dest);
1939 border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1940 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e->src),
1941 FIRST_PSEUDO_REGISTER, i, bi)
1942 if (bitmap_bit_p (live_in_regs, i))
1943 {
1944 if (ira_curr_regno_allocno_map[i] == NULL)
1945 {
1946 /* The order of creations is important for right
1947 ira_regno_allocno_map. */
1948 if ((parent = ira_curr_loop_tree_node->parent) != NULL
1949 && parent->regno_allocno_map[i] == NULL)
1950 ira_create_allocno (regno: i, cap_p: false, loop_tree_node: parent);
1951 ira_create_allocno (regno: i, cap_p: false, loop_tree_node: ira_curr_loop_tree_node);
1952 }
1953 bitmap_set_bit (border_allocnos,
1954 ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1955 }
1956}
1957
1958/* Create allocnos corresponding to pseudo-registers living in loop
1959 represented by the corresponding loop tree node LOOP_NODE. This
1960 function is called by ira_traverse_loop_tree. */
1961static void
1962create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1963{
1964 if (loop_node->bb != NULL)
1965 create_bb_allocnos (bb_node: loop_node);
1966 else if (loop_node != ira_loop_tree_root)
1967 {
1968 int i;
1969 edge_iterator ei;
1970 edge e;
1971
1972 ira_assert (current_loops != NULL);
1973 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1974 if (e->src != loop_node->loop->latch)
1975 create_loop_allocnos (e);
1976
1977 auto_vec<edge> edges = get_loop_exit_edges (loop_node->loop);
1978 FOR_EACH_VEC_ELT (edges, i, e)
1979 create_loop_allocnos (e);
1980 }
1981}
1982
1983/* Propagate information about allocnos modified inside the loop given
1984 by its LOOP_TREE_NODE to its parent. */
1985static void
1986propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1987{
1988 if (loop_tree_node == ira_loop_tree_root)
1989 return;
1990 ira_assert (loop_tree_node->bb == NULL);
1991 bitmap_ior_into (loop_tree_node->parent->modified_regnos,
1992 loop_tree_node->modified_regnos);
1993}
1994
1995/* Propagate ALLOCNO_HARD_REG_COSTS from A to PARENT_A. Use SPILL_COST
1996 as the cost of spilling a register throughout A (which we have to do
1997 for PARENT_A allocations that conflict with A). */
1998static void
1999ira_propagate_hard_reg_costs (ira_allocno_t parent_a, ira_allocno_t a,
2000 int spill_cost)
2001{
2002 HARD_REG_SET conflicts = ira_total_conflict_hard_regs (a);
2003 if (ira_caller_save_loop_spill_p (a: parent_a, subloop_a: a, spill_cost))
2004 conflicts |= ira_need_caller_save_regs (a);
2005 conflicts &= ~ira_total_conflict_hard_regs (a: parent_a);
2006
2007 auto costs = ALLOCNO_HARD_REG_COSTS (a);
2008 if (!hard_reg_set_empty_p (x: conflicts))
2009 ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P (a) = true;
2010 else if (!costs)
2011 return;
2012
2013 auto aclass = ALLOCNO_CLASS (a);
2014 ira_allocate_and_set_costs (vec: &ALLOCNO_HARD_REG_COSTS (parent_a),
2015 aclass, ALLOCNO_CLASS_COST (parent_a));
2016 auto parent_costs = ALLOCNO_HARD_REG_COSTS (parent_a);
2017 for (int i = 0; i < ira_class_hard_regs_num[aclass]; ++i)
2018 if (TEST_HARD_REG_BIT (set: conflicts, ira_class_hard_regs[aclass][i]))
2019 parent_costs[i] += spill_cost;
2020 else if (costs)
2021 /* The cost to A of allocating this register to PARENT_A can't
2022 be more than the cost of spilling the register throughout A. */
2023 parent_costs[i] += MIN (costs[i], spill_cost);
2024}
2025
2026/* Propagate new info about allocno A (see comments about accumulated
2027 info in allocno definition) to the corresponding allocno on upper
2028 loop tree level. So allocnos on upper levels accumulate
2029 information about the corresponding allocnos in nested regions.
2030 The new info means allocno info finally calculated in this
2031 file. */
2032static void
2033propagate_allocno_info (void)
2034{
2035 int i;
2036 ira_allocno_t a, parent_a;
2037 ira_loop_tree_node_t parent;
2038 enum reg_class aclass;
2039
2040 if (flag_ira_region != IRA_REGION_ALL
2041 && flag_ira_region != IRA_REGION_MIXED)
2042 return;
2043 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2044 for (a = ira_regno_allocno_map[i];
2045 a != NULL;
2046 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2047 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
2048 && (parent_a = parent->regno_allocno_map[i]) != NULL
2049 /* There are no caps yet at this point. So use
2050 border_allocnos to find allocnos for the propagation. */
2051 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
2052 ALLOCNO_NUM (a)))
2053 {
2054 /* Calculate the cost of storing to memory on entry to A's loop,
2055 referencing as memory within A's loop, and restoring from
2056 memory on exit from A's loop. */
2057 ira_loop_border_costs border_costs (a);
2058 int spill_cost = INT_MAX;
2059 if (ira_subloop_allocnos_can_differ_p (a: parent_a))
2060 spill_cost = (border_costs.spill_inside_loop_cost ()
2061 + ALLOCNO_MEMORY_COST (a));
2062
2063 if (! ALLOCNO_BAD_SPILL_P (a))
2064 ALLOCNO_BAD_SPILL_P (parent_a) = false;
2065 ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
2066 ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
2067
2068 /* If A's allocation can differ from PARENT_A's, we can if necessary
2069 spill PARENT_A on entry to A's loop and restore it afterwards.
2070 Doing that has cost SPILL_COST. */
2071 if (!ira_subloop_allocnos_can_differ_p (a: parent_a))
2072 merge_hard_reg_conflicts (from: a, to: parent_a, total_only: true);
2073
2074 if (!ira_caller_save_loop_spill_p (a: parent_a, subloop_a: a, spill_cost))
2075 {
2076 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2077 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2078 += ALLOCNO_CALLS_CROSSED_NUM (a);
2079 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
2080 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
2081 ALLOCNO_CROSSED_CALLS_ABIS (parent_a)
2082 |= ALLOCNO_CROSSED_CALLS_ABIS (a);
2083 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a)
2084 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
2085 }
2086 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2087 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2088 aclass = ALLOCNO_CLASS (a);
2089 ira_assert (aclass == ALLOCNO_CLASS (parent_a));
2090 ira_propagate_hard_reg_costs (parent_a, a, spill_cost);
2091 ira_allocate_and_accumulate_costs
2092 (vec: &ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
2093 aclass,
2094 ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
2095 /* The cost to A of allocating a register to PARENT_A can't be
2096 more than the cost of spilling the register throughout A. */
2097 ALLOCNO_CLASS_COST (parent_a)
2098 += MIN (ALLOCNO_CLASS_COST (a), spill_cost);
2099 ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
2100 }
2101}
2102
2103/* Create allocnos corresponding to pseudo-registers in the current
2104 function. Traverse the loop tree for this. */
2105static void
2106create_allocnos (void)
2107{
2108 /* We need to process BB first to correctly link allocnos by member
2109 next_regno_allocno. */
2110 ira_traverse_loop_tree (bb_p: true, loop_node: ira_loop_tree_root,
2111 preorder_func: create_loop_tree_node_allocnos, NULL);
2112 if (optimize)
2113 ira_traverse_loop_tree (bb_p: false, loop_node: ira_loop_tree_root, NULL,
2114 postorder_func: propagate_modified_regnos);
2115}
2116
2117
2118
2119/* The page contains function to remove some regions from a separate
2120 register allocation. We remove regions whose separate allocation
2121 will hardly improve the result. As a result we speed up regional
2122 register allocation. */
2123
2124/* The function changes the object in range list given by R to OBJ. */
2125static void
2126change_object_in_range_list (live_range_t r, ira_object_t obj)
2127{
2128 for (; r != NULL; r = r->next)
2129 r->object = obj;
2130}
2131
2132/* Move all live ranges associated with allocno FROM to allocno TO. */
2133static void
2134move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2135{
2136 int i;
2137 int n = ALLOCNO_NUM_OBJECTS (from);
2138
2139 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2140
2141 for (i = 0; i < n; i++)
2142 {
2143 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2144 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2145 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2146
2147 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2148 {
2149 fprintf (stream: ira_dump_file,
2150 format: " Moving ranges of a%dr%d to a%dr%d: ",
2151 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2152 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2153 ira_print_live_range_list (ira_dump_file, lr);
2154 }
2155 change_object_in_range_list (r: lr, obj: to_obj);
2156 OBJECT_LIVE_RANGES (to_obj)
2157 = ira_merge_live_ranges (r1: lr, OBJECT_LIVE_RANGES (to_obj));
2158 OBJECT_LIVE_RANGES (from_obj) = NULL;
2159 }
2160}
2161
2162static void
2163copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
2164{
2165 int i;
2166 int n = ALLOCNO_NUM_OBJECTS (from);
2167
2168 gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
2169
2170 for (i = 0; i < n; i++)
2171 {
2172 ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
2173 ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
2174 live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
2175
2176 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2177 {
2178 fprintf (stream: ira_dump_file, format: " Copying ranges of a%dr%d to a%dr%d: ",
2179 ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
2180 ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
2181 ira_print_live_range_list (ira_dump_file, lr);
2182 }
2183 lr = ira_copy_live_range_list (r: lr);
2184 change_object_in_range_list (r: lr, obj: to_obj);
2185 OBJECT_LIVE_RANGES (to_obj)
2186 = ira_merge_live_ranges (r1: lr, OBJECT_LIVE_RANGES (to_obj));
2187 }
2188}
2189
2190/* Return TRUE if NODE represents a loop with low register
2191 pressure. */
2192static bool
2193low_pressure_loop_node_p (ira_loop_tree_node_t node)
2194{
2195 int i;
2196 enum reg_class pclass;
2197
2198 if (node->bb != NULL)
2199 return false;
2200
2201 for (i = 0; i < ira_pressure_classes_num; i++)
2202 {
2203 pclass = ira_pressure_classes[i];
2204 if (node->reg_pressure[pclass] > ira_class_hard_regs_num[pclass]
2205 && ira_class_hard_regs_num[pclass] > 1)
2206 return false;
2207 }
2208 return true;
2209}
2210
2211#ifdef STACK_REGS
2212/* Return TRUE if LOOP has a complex enter or exit edge. We don't
2213 form a region from such loop if the target use stack register
2214 because reg-stack.cc cannot deal with such edges. */
2215static bool
2216loop_with_complex_edge_p (class loop *loop)
2217{
2218 int i;
2219 edge_iterator ei;
2220 edge e;
2221 bool res;
2222
2223 FOR_EACH_EDGE (e, ei, loop->header->preds)
2224 if (e->flags & EDGE_EH)
2225 return true;
2226 auto_vec<edge> edges = get_loop_exit_edges (loop);
2227 res = false;
2228 FOR_EACH_VEC_ELT (edges, i, e)
2229 if (e->flags & EDGE_COMPLEX)
2230 {
2231 res = true;
2232 break;
2233 }
2234 return res;
2235}
2236#endif
2237
2238/* Sort loops for marking them for removal. We put already marked
2239 loops first, then less frequent loops next, and then outer loops
2240 next. */
2241static int
2242loop_compare_func (const void *v1p, const void *v2p)
2243{
2244 int diff;
2245 ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
2246 ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
2247
2248 ira_assert (l1->parent != NULL && l2->parent != NULL);
2249 if (l1->to_remove_p && ! l2->to_remove_p)
2250 return -1;
2251 if (! l1->to_remove_p && l2->to_remove_p)
2252 return 1;
2253 if ((diff = l1->loop->header->count.to_frequency (cfun)
2254 - l2->loop->header->count.to_frequency (cfun)) != 0)
2255 return diff;
2256 if ((diff = (int) loop_depth (loop: l1->loop) - (int) loop_depth (loop: l2->loop)) != 0)
2257 return diff;
2258 /* Make sorting stable. */
2259 return l1->loop_num - l2->loop_num;
2260}
2261
2262/* Mark loops which should be removed from regional allocation. We
2263 remove a loop with low register pressure inside another loop with
2264 register pressure. In this case a separate allocation of the loop
2265 hardly helps (for irregular register file architecture it could
2266 help by choosing a better hard register in the loop but we prefer
2267 faster allocation even in this case). We also remove cheap loops
2268 if there are more than param_ira_max_loops_num of them. Loop with EH
2269 exit or enter edges are removed too because the allocation might
2270 require put pseudo moves on the EH edges (we could still do this
2271 for pseudos with caller saved hard registers in some cases but it
2272 is impossible to say here or during top-down allocation pass what
2273 hard register the pseudos get finally). */
2274static void
2275mark_loops_for_removal (void)
2276{
2277 int i, n;
2278 ira_loop_tree_node_t *sorted_loops;
2279 loop_p loop;
2280
2281 ira_assert (current_loops != NULL);
2282 sorted_loops
2283 = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
2284 * number_of_loops (cfun));
2285 for (n = i = 0; vec_safe_iterate (v: get_loops (cfun), ix: i, ptr: &loop); i++)
2286 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2287 {
2288 if (ira_loop_nodes[i].parent == NULL)
2289 {
2290 /* Don't remove the root. */
2291 ira_loop_nodes[i].to_remove_p = false;
2292 continue;
2293 }
2294 sorted_loops[n++] = &ira_loop_nodes[i];
2295 ira_loop_nodes[i].to_remove_p
2296 = ((low_pressure_loop_node_p (node: ira_loop_nodes[i].parent)
2297 && low_pressure_loop_node_p (node: &ira_loop_nodes[i]))
2298#ifdef STACK_REGS
2299 || loop_with_complex_edge_p (loop: ira_loop_nodes[i].loop)
2300#endif
2301 );
2302 }
2303 qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
2304 for (i = 0; i < n - param_ira_max_loops_num; i++)
2305 {
2306 sorted_loops[i]->to_remove_p = true;
2307 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2308 fprintf
2309 (stream: ira_dump_file,
2310 format: " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2311 sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
2312 sorted_loops[i]->loop->header->count.to_frequency (cfun),
2313 loop_depth (loop: sorted_loops[i]->loop),
2314 low_pressure_loop_node_p (node: sorted_loops[i]->parent)
2315 && low_pressure_loop_node_p (node: sorted_loops[i])
2316 ? "low pressure" : "cheap loop");
2317 }
2318 ira_free (addr: sorted_loops);
2319}
2320
2321/* Mark all loops but root for removing. */
2322static void
2323mark_all_loops_for_removal (void)
2324{
2325 int i;
2326 loop_p loop;
2327
2328 ira_assert (current_loops != NULL);
2329 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)
2330 if (ira_loop_nodes[i].regno_allocno_map != NULL)
2331 {
2332 if (ira_loop_nodes[i].parent == NULL)
2333 {
2334 /* Don't remove the root. */
2335 ira_loop_nodes[i].to_remove_p = false;
2336 continue;
2337 }
2338 ira_loop_nodes[i].to_remove_p = true;
2339 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2340 fprintf
2341 (stream: ira_dump_file,
2342 format: " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2343 ira_loop_nodes[i].loop_num,
2344 ira_loop_nodes[i].loop->header->index,
2345 ira_loop_nodes[i].loop->header->count.to_frequency (cfun),
2346 loop_depth (loop: ira_loop_nodes[i].loop));
2347 }
2348}
2349
2350/* Definition of vector of loop tree nodes. */
2351
2352/* Vec containing references to all removed loop tree nodes. */
2353static vec<ira_loop_tree_node_t> removed_loop_vec;
2354
2355/* Vec containing references to all children of loop tree nodes. */
2356static vec<ira_loop_tree_node_t> children_vec;
2357
2358/* Remove subregions of NODE if their separate allocation will not
2359 improve the result. */
2360static void
2361remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
2362{
2363 unsigned int start;
2364 bool remove_p;
2365 ira_loop_tree_node_t subnode;
2366
2367 remove_p = node->to_remove_p;
2368 if (! remove_p)
2369 children_vec.safe_push (obj: node);
2370 start = children_vec.length ();
2371 for (subnode = node->children; subnode != NULL; subnode = subnode->next)
2372 if (subnode->bb == NULL)
2373 remove_uneccesary_loop_nodes_from_loop_tree (node: subnode);
2374 else
2375 children_vec.safe_push (obj: subnode);
2376 node->children = node->subloops = NULL;
2377 if (remove_p)
2378 {
2379 removed_loop_vec.safe_push (obj: node);
2380 return;
2381 }
2382 while (children_vec.length () > start)
2383 {
2384 subnode = children_vec.pop ();
2385 subnode->parent = node;
2386 subnode->next = node->children;
2387 node->children = subnode;
2388 if (subnode->bb == NULL)
2389 {
2390 subnode->subloop_next = node->subloops;
2391 node->subloops = subnode;
2392 }
2393 }
2394}
2395
2396/* Return TRUE if NODE is inside PARENT. */
2397static bool
2398loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2399{
2400 for (node = node->parent; node != NULL; node = node->parent)
2401 if (node == parent)
2402 return true;
2403 return false;
2404}
2405
2406/* Sort allocnos according to their order in regno allocno list. */
2407static int
2408regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2409{
2410 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2411 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2412 ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2413 ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2414
2415 if (loop_is_inside_p (node: n1, parent: n2))
2416 return -1;
2417 else if (loop_is_inside_p (node: n2, parent: n1))
2418 return 1;
2419 /* If allocnos are equally good, sort by allocno numbers, so that
2420 the results of qsort leave nothing to chance. We put allocnos
2421 with higher number first in the list because it is the original
2422 order for allocnos from loops on the same levels. */
2423 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2424}
2425
2426/* This array is used to sort allocnos to restore allocno order in
2427 the regno allocno list. */
2428static ira_allocno_t *regno_allocnos;
2429
2430/* Restore allocno order for REGNO in the regno allocno list. */
2431static void
2432ira_rebuild_regno_allocno_list (int regno)
2433{
2434 int i, n;
2435 ira_allocno_t a;
2436
2437 for (n = 0, a = ira_regno_allocno_map[regno];
2438 a != NULL;
2439 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2440 regno_allocnos[n++] = a;
2441 ira_assert (n > 0);
2442 qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2443 regno_allocno_order_compare_func);
2444 for (i = 1; i < n; i++)
2445 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2446 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2447 ira_regno_allocno_map[regno] = regno_allocnos[0];
2448 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2449 fprintf (stream: ira_dump_file, format: " Rebuilding regno allocno list for %d\n", regno);
2450}
2451
2452/* Propagate info from allocno FROM_A to allocno A. */
2453static void
2454propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2455{
2456 enum reg_class aclass;
2457
2458 merge_hard_reg_conflicts (from: from_a, to: a, total_only: false);
2459 ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2460 ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2461 ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2462 ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2463 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)
2464 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a);
2465 ALLOCNO_CROSSED_CALLS_ABIS (a) |= ALLOCNO_CROSSED_CALLS_ABIS (from_a);
2466 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a)
2467 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a);
2468
2469 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2470 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2471 if (! ALLOCNO_BAD_SPILL_P (from_a))
2472 ALLOCNO_BAD_SPILL_P (a) = false;
2473 aclass = ALLOCNO_CLASS (from_a);
2474 ira_assert (aclass == ALLOCNO_CLASS (a));
2475 ira_allocate_and_accumulate_costs (vec: &ALLOCNO_HARD_REG_COSTS (a), aclass,
2476 ALLOCNO_HARD_REG_COSTS (from_a));
2477 ira_allocate_and_accumulate_costs (vec: &ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2478 aclass,
2479 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2480 ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2481 ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2482}
2483
2484/* Remove allocnos from loops removed from the allocation
2485 consideration. */
2486static void
2487remove_unnecessary_allocnos (void)
2488{
2489 int regno;
2490 bool merged_p, rebuild_p;
2491 ira_allocno_t a, prev_a, next_a, parent_a;
2492 ira_loop_tree_node_t a_node, parent;
2493
2494 merged_p = false;
2495 regno_allocnos = NULL;
2496 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2497 {
2498 rebuild_p = false;
2499 for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2500 a != NULL;
2501 a = next_a)
2502 {
2503 next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2504 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2505 if (! a_node->to_remove_p)
2506 prev_a = a;
2507 else
2508 {
2509 for (parent = a_node->parent;
2510 (parent_a = parent->regno_allocno_map[regno]) == NULL
2511 && parent->to_remove_p;
2512 parent = parent->parent)
2513 ;
2514 if (parent_a == NULL)
2515 {
2516 /* There are no allocnos with the same regno in
2517 upper region -- just move the allocno to the
2518 upper region. */
2519 prev_a = a;
2520 ALLOCNO_LOOP_TREE_NODE (a) = parent;
2521 parent->regno_allocno_map[regno] = a;
2522 bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2523 rebuild_p = true;
2524 }
2525 else
2526 {
2527 /* Remove the allocno and update info of allocno in
2528 the upper region. */
2529 if (prev_a == NULL)
2530 ira_regno_allocno_map[regno] = next_a;
2531 else
2532 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2533 move_allocno_live_ranges (from: a, to: parent_a);
2534 merged_p = true;
2535 propagate_some_info_from_allocno (a: parent_a, from_a: a);
2536 /* Remove it from the corresponding regno allocno
2537 map to avoid info propagation of subsequent
2538 allocno into this already removed allocno. */
2539 a_node->regno_allocno_map[regno] = NULL;
2540 ira_remove_allocno_prefs (a);
2541 finish_allocno (a);
2542 }
2543 }
2544 }
2545 if (rebuild_p)
2546 /* We need to restore the order in regno allocno list. */
2547 {
2548 if (regno_allocnos == NULL)
2549 regno_allocnos
2550 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2551 * ira_allocnos_num);
2552 ira_rebuild_regno_allocno_list (regno);
2553 }
2554 }
2555 if (merged_p)
2556 ira_rebuild_start_finish_chains ();
2557 if (regno_allocnos != NULL)
2558 ira_free (addr: regno_allocnos);
2559}
2560
2561/* Remove allocnos from all loops but the root. */
2562static void
2563remove_low_level_allocnos (void)
2564{
2565 int regno;
2566 bool merged_p, propagate_p;
2567 ira_allocno_t a, top_a;
2568 ira_loop_tree_node_t a_node, parent;
2569 ira_allocno_iterator ai;
2570
2571 merged_p = false;
2572 FOR_EACH_ALLOCNO (a, ai)
2573 {
2574 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2575 if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2576 continue;
2577 regno = ALLOCNO_REGNO (a);
2578 if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2579 {
2580 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2581 ira_loop_tree_root->regno_allocno_map[regno] = a;
2582 continue;
2583 }
2584 propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2585 /* Remove the allocno and update info of allocno in the upper
2586 region. */
2587 move_allocno_live_ranges (from: a, to: top_a);
2588 merged_p = true;
2589 if (propagate_p)
2590 propagate_some_info_from_allocno (a: top_a, from_a: a);
2591 }
2592 FOR_EACH_ALLOCNO (a, ai)
2593 {
2594 a_node = ALLOCNO_LOOP_TREE_NODE (a);
2595 if (a_node == ira_loop_tree_root)
2596 continue;
2597 parent = a_node->parent;
2598 regno = ALLOCNO_REGNO (a);
2599 if (ALLOCNO_CAP_MEMBER (a) != NULL)
2600 ira_assert (ALLOCNO_CAP (a) != NULL);
2601 else if (ALLOCNO_CAP (a) == NULL)
2602 ira_assert (parent->regno_allocno_map[regno] != NULL);
2603 }
2604 FOR_EACH_ALLOCNO (a, ai)
2605 {
2606 regno = ALLOCNO_REGNO (a);
2607 if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2608 {
2609 ira_object_t obj;
2610 ira_allocno_object_iterator oi;
2611
2612 ira_regno_allocno_map[regno] = a;
2613 ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2614 ALLOCNO_CAP_MEMBER (a) = NULL;
2615 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2616 OBJECT_CONFLICT_HARD_REGS (obj)
2617 = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj);
2618#ifdef STACK_REGS
2619 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2620 ALLOCNO_NO_STACK_REG_P (a) = true;
2621#endif
2622 }
2623 else
2624 {
2625 ira_remove_allocno_prefs (a);
2626 finish_allocno (a);
2627 }
2628 }
2629 if (merged_p)
2630 ira_rebuild_start_finish_chains ();
2631}
2632
2633/* Remove loops from consideration. We remove all loops except for
2634 root if ALL_P or loops for which a separate allocation will not
2635 improve the result. We have to do this after allocno creation and
2636 their costs and allocno class evaluation because only after that
2637 the register pressure can be known and is calculated. */
2638static void
2639remove_unnecessary_regions (bool all_p)
2640{
2641 if (current_loops == NULL)
2642 return;
2643 if (all_p)
2644 mark_all_loops_for_removal ();
2645 else
2646 mark_loops_for_removal ();
2647 children_vec.create (last_basic_block_for_fn (cfun)
2648 + number_of_loops (cfun));
2649 removed_loop_vec.create (last_basic_block_for_fn (cfun)
2650 + number_of_loops (cfun));
2651 remove_uneccesary_loop_nodes_from_loop_tree (node: ira_loop_tree_root);
2652 children_vec.release ();
2653 if (all_p)
2654 remove_low_level_allocnos ();
2655 else
2656 remove_unnecessary_allocnos ();
2657 while (removed_loop_vec.length () > 0)
2658 finish_loop_tree_node (loop: removed_loop_vec.pop ());
2659 removed_loop_vec.release ();
2660}
2661
2662
2663
2664/* At this point true value of allocno attribute bad_spill_p means
2665 that there is an insn where allocno occurs and where the allocno
2666 cannot be used as memory. The function updates the attribute, now
2667 it can be true only for allocnos which cannot be used as memory in
2668 an insn and in whose live ranges there is other allocno deaths.
2669 Spilling allocnos with true value will not improve the code because
2670 it will not make other allocnos colorable and additional reloads
2671 for the corresponding pseudo will be generated in reload pass for
2672 each insn it occurs.
2673
2674 This is a trick mentioned in one classic article of Chaitin etc
2675 which is frequently omitted in other implementations of RA based on
2676 graph coloring. */
2677static void
2678update_bad_spill_attribute (void)
2679{
2680 int i;
2681 ira_allocno_t a;
2682 ira_allocno_iterator ai;
2683 ira_allocno_object_iterator aoi;
2684 ira_object_t obj;
2685 live_range_t r;
2686 enum reg_class aclass;
2687 bitmap_head dead_points[N_REG_CLASSES];
2688
2689 for (i = 0; i < ira_allocno_classes_num; i++)
2690 {
2691 aclass = ira_allocno_classes[i];
2692 bitmap_initialize (head: &dead_points[aclass], obstack: &reg_obstack);
2693 }
2694 FOR_EACH_ALLOCNO (a, ai)
2695 {
2696 aclass = ALLOCNO_CLASS (a);
2697 if (aclass == NO_REGS)
2698 continue;
2699 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2700 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2701 bitmap_set_bit (&dead_points[aclass], r->finish);
2702 }
2703 FOR_EACH_ALLOCNO (a, ai)
2704 {
2705 aclass = ALLOCNO_CLASS (a);
2706 if (aclass == NO_REGS)
2707 continue;
2708 if (! ALLOCNO_BAD_SPILL_P (a))
2709 continue;
2710 FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2711 {
2712 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2713 {
2714 for (i = r->start + 1; i < r->finish; i++)
2715 if (bitmap_bit_p (&dead_points[aclass], i))
2716 break;
2717 if (i < r->finish)
2718 break;
2719 }
2720 if (r != NULL)
2721 {
2722 ALLOCNO_BAD_SPILL_P (a) = false;
2723 break;
2724 }
2725 }
2726 }
2727 for (i = 0; i < ira_allocno_classes_num; i++)
2728 {
2729 aclass = ira_allocno_classes[i];
2730 bitmap_clear (&dead_points[aclass]);
2731 }
2732}
2733
2734
2735
2736/* Set up minimal and maximal live range points for allocnos. */
2737static void
2738setup_min_max_allocno_live_range_point (void)
2739{
2740 int i;
2741 ira_allocno_t a, parent_a, cap;
2742 ira_allocno_iterator ai;
2743#ifdef ENABLE_IRA_CHECKING
2744 ira_object_iterator oi;
2745 ira_object_t obj;
2746#endif
2747 live_range_t r;
2748 ira_loop_tree_node_t parent;
2749
2750 FOR_EACH_ALLOCNO (a, ai)
2751 {
2752 int n = ALLOCNO_NUM_OBJECTS (a);
2753
2754 for (i = 0; i < n; i++)
2755 {
2756 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2757 r = OBJECT_LIVE_RANGES (obj);
2758 if (r == NULL)
2759 continue;
2760 OBJECT_MAX (obj) = r->finish;
2761 for (; r->next != NULL; r = r->next)
2762 ;
2763 OBJECT_MIN (obj) = r->start;
2764 }
2765 }
2766 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2767 for (a = ira_regno_allocno_map[i];
2768 a != NULL;
2769 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2770 {
2771 int j;
2772 int n = ALLOCNO_NUM_OBJECTS (a);
2773
2774 for (j = 0; j < n; j++)
2775 {
2776 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2777 ira_object_t parent_obj;
2778
2779 if (OBJECT_MAX (obj) < 0)
2780 {
2781 /* The object is not used and hence does not live. */
2782 ira_assert (OBJECT_LIVE_RANGES (obj) == NULL);
2783 OBJECT_MAX (obj) = 0;
2784 OBJECT_MIN (obj) = 1;
2785 continue;
2786 }
2787 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2788 /* Accumulation of range info. */
2789 if (ALLOCNO_CAP (a) != NULL)
2790 {
2791 for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2792 {
2793 ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2794 if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2795 OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2796 if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2797 OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2798 }
2799 continue;
2800 }
2801 if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2802 continue;
2803 parent_a = parent->regno_allocno_map[i];
2804 parent_obj = ALLOCNO_OBJECT (parent_a, j);
2805 if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2806 OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2807 if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2808 OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2809 }
2810 }
2811#ifdef ENABLE_IRA_CHECKING
2812 FOR_EACH_OBJECT (obj, oi)
2813 {
2814 if ((OBJECT_MIN (obj) >= 0 && OBJECT_MIN (obj) <= ira_max_point)
2815 && (OBJECT_MAX (obj) >= 0 && OBJECT_MAX (obj) <= ira_max_point))
2816 continue;
2817 gcc_unreachable ();
2818 }
2819#endif
2820}
2821
2822/* Sort allocnos according to their live ranges. Allocnos with
2823 smaller allocno class are put first unless we use priority
2824 coloring. Allocnos with the same class are ordered according
2825 their start (min). Allocnos with the same start are ordered
2826 according their finish (max). */
2827static int
2828object_range_compare_func (const void *v1p, const void *v2p)
2829{
2830 int diff;
2831 ira_object_t obj1 = *(const ira_object_t *) v1p;
2832 ira_object_t obj2 = *(const ira_object_t *) v2p;
2833 ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2834 ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2835
2836 if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2837 return diff;
2838 if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2839 return diff;
2840 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2841}
2842
2843/* Sort ira_object_id_map and set up conflict id of allocnos. */
2844static void
2845sort_conflict_id_map (void)
2846{
2847 int i, num;
2848 ira_allocno_t a;
2849 ira_allocno_iterator ai;
2850
2851 num = 0;
2852 FOR_EACH_ALLOCNO (a, ai)
2853 {
2854 ira_allocno_object_iterator oi;
2855 ira_object_t obj;
2856
2857 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2858 ira_object_id_map[num++] = obj;
2859 }
2860 if (num > 1)
2861 qsort (ira_object_id_map, num, sizeof (ira_object_t),
2862 object_range_compare_func);
2863 for (i = 0; i < num; i++)
2864 {
2865 ira_object_t obj = ira_object_id_map[i];
2866
2867 gcc_assert (obj != NULL);
2868 OBJECT_CONFLICT_ID (obj) = i;
2869 }
2870 for (i = num; i < ira_objects_num; i++)
2871 ira_object_id_map[i] = NULL;
2872}
2873
2874/* Set up minimal and maximal conflict ids of allocnos with which
2875 given allocno can conflict. */
2876static void
2877setup_min_max_conflict_allocno_ids (void)
2878{
2879 int aclass;
2880 int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2881 int *live_range_min, *last_lived;
2882 int word0_min, word0_max;
2883 ira_allocno_t a;
2884 ira_allocno_iterator ai;
2885
2886 live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2887 aclass = -1;
2888 first_not_finished = -1;
2889 for (i = 0; i < ira_objects_num; i++)
2890 {
2891 ira_object_t obj = ira_object_id_map[i];
2892
2893 if (obj == NULL)
2894 continue;
2895
2896 a = OBJECT_ALLOCNO (obj);
2897
2898 if (aclass < 0)
2899 {
2900 aclass = ALLOCNO_CLASS (a);
2901 min = i;
2902 first_not_finished = i;
2903 }
2904 else
2905 {
2906 start = OBJECT_MIN (obj);
2907 /* If we skip an allocno, the allocno with smaller ids will
2908 be also skipped because of the secondary sorting the
2909 range finishes (see function
2910 object_range_compare_func). */
2911 while (first_not_finished < i
2912 && start > OBJECT_MAX (ira_object_id_map
2913 [first_not_finished]))
2914 first_not_finished++;
2915 min = first_not_finished;
2916 }
2917 if (min == i)
2918 /* We could increase min further in this case but it is good
2919 enough. */
2920 min++;
2921 live_range_min[i] = OBJECT_MIN (obj);
2922 OBJECT_MIN (obj) = min;
2923 }
2924 last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2925 aclass = -1;
2926 filled_area_start = -1;
2927 for (i = ira_objects_num - 1; i >= 0; i--)
2928 {
2929 ira_object_t obj = ira_object_id_map[i];
2930
2931 if (obj == NULL)
2932 continue;
2933
2934 a = OBJECT_ALLOCNO (obj);
2935 if (aclass < 0)
2936 {
2937 aclass = ALLOCNO_CLASS (a);
2938 for (j = 0; j < ira_max_point; j++)
2939 last_lived[j] = -1;
2940 filled_area_start = ira_max_point;
2941 }
2942 min = live_range_min[i];
2943 finish = OBJECT_MAX (obj);
2944 max = last_lived[finish];
2945 if (max < 0)
2946 /* We could decrease max further in this case but it is good
2947 enough. */
2948 max = OBJECT_CONFLICT_ID (obj) - 1;
2949 OBJECT_MAX (obj) = max;
2950 /* In filling, we can go further A range finish to recognize
2951 intersection quickly because if the finish of subsequently
2952 processed allocno (it has smaller conflict id) range is
2953 further A range finish than they are definitely intersected
2954 (the reason for this is the allocnos with bigger conflict id
2955 have their range starts not smaller than allocnos with
2956 smaller ids. */
2957 for (j = min; j < filled_area_start; j++)
2958 last_lived[j] = i;
2959 filled_area_start = min;
2960 }
2961 ira_free (addr: last_lived);
2962 ira_free (addr: live_range_min);
2963
2964 /* For allocnos with more than one object, we may later record extra conflicts in
2965 subobject 0 that we cannot really know about here.
2966 For now, simply widen the min/max range of these subobjects. */
2967
2968 word0_min = INT_MAX;
2969 word0_max = INT_MIN;
2970
2971 FOR_EACH_ALLOCNO (a, ai)
2972 {
2973 int n = ALLOCNO_NUM_OBJECTS (a);
2974 ira_object_t obj0;
2975
2976 if (n < 2)
2977 continue;
2978 obj0 = ALLOCNO_OBJECT (a, 0);
2979 if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2980 word0_min = OBJECT_CONFLICT_ID (obj0);
2981 if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2982 word0_max = OBJECT_CONFLICT_ID (obj0);
2983 }
2984 FOR_EACH_ALLOCNO (a, ai)
2985 {
2986 int n = ALLOCNO_NUM_OBJECTS (a);
2987 ira_object_t obj0;
2988
2989 if (n < 2)
2990 continue;
2991 obj0 = ALLOCNO_OBJECT (a, 0);
2992 if (OBJECT_MIN (obj0) > word0_min)
2993 OBJECT_MIN (obj0) = word0_min;
2994 if (OBJECT_MAX (obj0) < word0_max)
2995 OBJECT_MAX (obj0) = word0_max;
2996 }
2997}
2998
2999
3000
3001static void
3002create_caps (void)
3003{
3004 ira_allocno_t a;
3005 ira_allocno_iterator ai;
3006 ira_loop_tree_node_t loop_tree_node;
3007
3008 FOR_EACH_ALLOCNO (a, ai)
3009 {
3010 if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
3011 continue;
3012 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3013 create_cap_allocno (a);
3014 else if (ALLOCNO_CAP (a) == NULL)
3015 {
3016 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3017 if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
3018 create_cap_allocno (a);
3019 }
3020 }
3021}
3022
3023
3024
3025/* The page contains code transforming more one region internal
3026 representation (IR) to one region IR which is necessary for reload.
3027 This transformation is called IR flattening. We might just rebuild
3028 the IR for one region but we don't do it because it takes a lot of
3029 time. */
3030
3031/* Map: regno -> allocnos which will finally represent the regno for
3032 IR with one region. */
3033static ira_allocno_t *regno_top_level_allocno_map;
3034
3035/* Find the allocno that corresponds to A at a level one higher up in the
3036 loop tree. Returns NULL if A is a cap, or if it has no parent. */
3037ira_allocno_t
3038ira_parent_allocno (ira_allocno_t a)
3039{
3040 ira_loop_tree_node_t parent;
3041
3042 if (ALLOCNO_CAP (a) != NULL)
3043 return NULL;
3044
3045 parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3046 if (parent == NULL)
3047 return NULL;
3048
3049 return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
3050}
3051
3052/* Find the allocno that corresponds to A at a level one higher up in the
3053 loop tree. If ALLOCNO_CAP is set for A, return that. */
3054ira_allocno_t
3055ira_parent_or_cap_allocno (ira_allocno_t a)
3056{
3057 if (ALLOCNO_CAP (a) != NULL)
3058 return ALLOCNO_CAP (a);
3059
3060 return ira_parent_allocno (a);
3061}
3062
3063/* Process all allocnos originated from pseudo REGNO and copy live
3064 ranges, hard reg conflicts, and allocno stack reg attributes from
3065 low level allocnos to final allocnos which are destinations of
3066 removed stores at a loop exit. Return true if we copied live
3067 ranges. */
3068static bool
3069copy_info_to_removed_store_destinations (int regno)
3070{
3071 ira_allocno_t a;
3072 ira_allocno_t parent_a = NULL;
3073 ira_loop_tree_node_t parent;
3074 bool merged_p;
3075
3076 merged_p = false;
3077 for (a = ira_regno_allocno_map[regno];
3078 a != NULL;
3079 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3080 {
3081 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
3082 /* This allocno will be removed. */
3083 continue;
3084
3085 /* Caps will be removed. */
3086 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3087 for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
3088 parent != NULL;
3089 parent = parent->parent)
3090 if ((parent_a = parent->regno_allocno_map[regno]) == NULL
3091 || (parent_a
3092 == regno_top_level_allocno_map[REGNO
3093 (allocno_emit_reg (parent_a))]
3094 && ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
3095 break;
3096 if (parent == NULL || parent_a == NULL)
3097 continue;
3098
3099 copy_allocno_live_ranges (from: a, to: parent_a);
3100 merge_hard_reg_conflicts (from: a, to: parent_a, total_only: true);
3101
3102 ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
3103 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3104 += ALLOCNO_CALLS_CROSSED_NUM (a);
3105 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3106 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3107 ALLOCNO_CROSSED_CALLS_ABIS (parent_a)
3108 |= ALLOCNO_CROSSED_CALLS_ABIS (a);
3109 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a)
3110 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a);
3111 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3112 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3113 merged_p = true;
3114 }
3115 return merged_p;
3116}
3117
3118/* Flatten the IR. In other words, this function transforms IR as if
3119 it were built with one region (without loops). We could make it
3120 much simpler by rebuilding IR with one region, but unfortunately it
3121 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3122 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3123 IRA_MAX_POINT before emitting insns on the loop borders. */
3124void
3125ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
3126{
3127 int i, j;
3128 bool keep_p;
3129 int hard_regs_num;
3130 bool new_pseudos_p, merged_p, mem_dest_p;
3131 unsigned int n;
3132 enum reg_class aclass;
3133 ira_allocno_t a, parent_a, first, second, node_first, node_second;
3134 ira_copy_t cp;
3135 ira_loop_tree_node_t node;
3136 live_range_t r;
3137 ira_allocno_iterator ai;
3138 ira_copy_iterator ci;
3139
3140 regno_top_level_allocno_map
3141 = (ira_allocno_t *) ira_allocate (max_reg_num ()
3142 * sizeof (ira_allocno_t));
3143 memset (s: regno_top_level_allocno_map, c: 0,
3144 n: max_reg_num () * sizeof (ira_allocno_t));
3145 new_pseudos_p = merged_p = false;
3146 FOR_EACH_ALLOCNO (a, ai)
3147 {
3148 ira_allocno_object_iterator oi;
3149 ira_object_t obj;
3150
3151 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3152 /* Caps are not in the regno allocno maps and they are never
3153 will be transformed into allocnos existing after IR
3154 flattening. */
3155 continue;
3156 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3157 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)
3158 = OBJECT_CONFLICT_HARD_REGS (obj);
3159#ifdef STACK_REGS
3160 ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
3161#endif
3162 }
3163 /* Fix final allocno attributes. */
3164 for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
3165 {
3166 mem_dest_p = false;
3167 for (a = ira_regno_allocno_map[i];
3168 a != NULL;
3169 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
3170 {
3171 ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
3172
3173 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3174 if (data->somewhere_renamed_p)
3175 new_pseudos_p = true;
3176 parent_a = ira_parent_allocno (a);
3177 if (parent_a == NULL)
3178 {
3179 ALLOCNO_COPIES (a) = NULL;
3180 regno_top_level_allocno_map[REGNO (data->reg)] = a;
3181 continue;
3182 }
3183 ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
3184
3185 if (data->mem_optimized_dest != NULL)
3186 mem_dest_p = true;
3187 parent_data = ALLOCNO_EMIT_DATA (parent_a);
3188 if (REGNO (data->reg) == REGNO (parent_data->reg))
3189 {
3190 merge_hard_reg_conflicts (from: a, to: parent_a, total_only: true);
3191 move_allocno_live_ranges (from: a, to: parent_a);
3192 merged_p = true;
3193 parent_data->mem_optimized_dest_p
3194 = (parent_data->mem_optimized_dest_p
3195 || data->mem_optimized_dest_p);
3196 continue;
3197 }
3198 new_pseudos_p = true;
3199 for (;;)
3200 {
3201 ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
3202 ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
3203 ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
3204 ALLOCNO_CALLS_CROSSED_NUM (parent_a)
3205 -= ALLOCNO_CALLS_CROSSED_NUM (a);
3206 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a)
3207 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a);
3208 /* Assume that ALLOCNO_CROSSED_CALLS_ABIS and
3209 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS stay the same.
3210 We'd need to rebuild the IR to do better. */
3211 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
3212 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
3213 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
3214 && ALLOCNO_NREFS (parent_a) >= 0
3215 && ALLOCNO_FREQ (parent_a) >= 0);
3216 aclass = ALLOCNO_CLASS (parent_a);
3217 hard_regs_num = ira_class_hard_regs_num[aclass];
3218 if (ALLOCNO_HARD_REG_COSTS (a) != NULL
3219 && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
3220 for (j = 0; j < hard_regs_num; j++)
3221 ALLOCNO_HARD_REG_COSTS (parent_a)[j]
3222 -= ALLOCNO_HARD_REG_COSTS (a)[j];
3223 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
3224 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
3225 for (j = 0; j < hard_regs_num; j++)
3226 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
3227 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
3228 ALLOCNO_CLASS_COST (parent_a)
3229 -= ALLOCNO_CLASS_COST (a);
3230 ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
3231 parent_a = ira_parent_allocno (a: parent_a);
3232 if (parent_a == NULL)
3233 break;
3234 }
3235 ALLOCNO_COPIES (a) = NULL;
3236 regno_top_level_allocno_map[REGNO (data->reg)] = a;
3237 }
3238 if (mem_dest_p && copy_info_to_removed_store_destinations (regno: i))
3239 merged_p = true;
3240 }
3241 ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
3242 if (merged_p || ira_max_point_before_emit != ira_max_point)
3243 ira_rebuild_start_finish_chains ();
3244 if (new_pseudos_p)
3245 {
3246 sparseset objects_live;
3247
3248 /* Rebuild conflicts. */
3249 FOR_EACH_ALLOCNO (a, ai)
3250 {
3251 ira_allocno_object_iterator oi;
3252 ira_object_t obj;
3253
3254 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3255 || ALLOCNO_CAP_MEMBER (a) != NULL)
3256 continue;
3257 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
3258 {
3259 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3260 ira_assert (r->object == obj);
3261 clear_conflicts (obj);
3262 }
3263 }
3264 objects_live = sparseset_alloc (n_elms: ira_objects_num);
3265 for (i = 0; i < ira_max_point; i++)
3266 {
3267 for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
3268 {
3269 ira_object_t obj = r->object;
3270
3271 a = OBJECT_ALLOCNO (obj);
3272 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3273 || ALLOCNO_CAP_MEMBER (a) != NULL)
3274 continue;
3275
3276 aclass = ALLOCNO_CLASS (a);
3277 EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
3278 {
3279 ira_object_t live_obj = ira_object_id_map[n];
3280 ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
3281 enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
3282
3283 if (ira_reg_classes_intersect_p[aclass][live_aclass]
3284 /* Don't set up conflict for the allocno with itself. */
3285 && live_a != a)
3286 ira_add_conflict (obj1: obj, obj2: live_obj);
3287 }
3288 sparseset_set_bit (s: objects_live, OBJECT_CONFLICT_ID (obj));
3289 }
3290
3291 for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
3292 sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
3293 }
3294 sparseset_free (objects_live);
3295 compress_conflict_vecs ();
3296 }
3297 /* Mark some copies for removing and change allocnos in the rest
3298 copies. */
3299 FOR_EACH_COPY (cp, ci)
3300 {
3301 if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
3302 || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
3303 {
3304 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3305 fprintf
3306 (stream: ira_dump_file, format: " Remove cp%d:%c%dr%d-%c%dr%d\n",
3307 cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
3308 ALLOCNO_NUM (cp->first),
3309 REGNO (allocno_emit_reg (cp->first)),
3310 ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
3311 ALLOCNO_NUM (cp->second),
3312 REGNO (allocno_emit_reg (cp->second)));
3313 cp->loop_tree_node = NULL;
3314 continue;
3315 }
3316 first
3317 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
3318 second
3319 = regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
3320 node = cp->loop_tree_node;
3321 if (node == NULL)
3322 keep_p = true; /* It copy generated in ira-emit.cc. */
3323 else
3324 {
3325 /* Check that the copy was not propagated from level on
3326 which we will have different pseudos. */
3327 node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
3328 node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
3329 keep_p = ((REGNO (allocno_emit_reg (first))
3330 == REGNO (allocno_emit_reg (node_first)))
3331 && (REGNO (allocno_emit_reg (second))
3332 == REGNO (allocno_emit_reg (node_second))));
3333 }
3334 if (keep_p)
3335 {
3336 cp->loop_tree_node = ira_loop_tree_root;
3337 cp->first = first;
3338 cp->second = second;
3339 }
3340 else
3341 {
3342 cp->loop_tree_node = NULL;
3343 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3344 fprintf (stream: ira_dump_file, format: " Remove cp%d:a%dr%d-a%dr%d\n",
3345 cp->num, ALLOCNO_NUM (cp->first),
3346 REGNO (allocno_emit_reg (cp->first)),
3347 ALLOCNO_NUM (cp->second),
3348 REGNO (allocno_emit_reg (cp->second)));
3349 }
3350 }
3351 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3352 FOR_EACH_ALLOCNO (a, ai)
3353 {
3354 if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
3355 || ALLOCNO_CAP_MEMBER (a) != NULL)
3356 {
3357 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
3358 fprintf (stream: ira_dump_file, format: " Remove a%dr%d\n",
3359 ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
3360 ira_remove_allocno_prefs (a);
3361 finish_allocno (a);
3362 continue;
3363 }
3364 ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
3365 ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
3366 ALLOCNO_CAP (a) = NULL;
3367 /* Restore updated costs for assignments from reload. */
3368 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3369 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
3370 if (! ALLOCNO_ASSIGNED_P (a))
3371 ira_free_allocno_updated_costs (a);
3372 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3373 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3374 }
3375 /* Remove unnecessary copies. */
3376 FOR_EACH_COPY (cp, ci)
3377 {
3378 if (cp->loop_tree_node == NULL)
3379 {
3380 ira_copies[cp->num] = NULL;
3381 finish_copy (cp);
3382 continue;
3383 }
3384 ira_assert
3385 (ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
3386 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
3387 add_allocno_copy_to_list (cp);
3388 swap_allocno_copy_ends_if_necessary (cp);
3389 }
3390 rebuild_regno_allocno_maps ();
3391 if (ira_max_point != ira_max_point_before_emit)
3392 ira_compress_allocno_live_ranges ();
3393 ira_free (addr: regno_top_level_allocno_map);
3394}
3395
3396
3397
3398#ifdef ENABLE_IRA_CHECKING
3399/* Check creation of all allocnos. Allocnos on lower levels should
3400 have allocnos or caps on all upper levels. */
3401static void
3402check_allocno_creation (void)
3403{
3404 ira_allocno_t a;
3405 ira_allocno_iterator ai;
3406 ira_loop_tree_node_t loop_tree_node;
3407
3408 FOR_EACH_ALLOCNO (a, ai)
3409 {
3410 loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3411 ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3412 ALLOCNO_NUM (a)));
3413 if (loop_tree_node == ira_loop_tree_root)
3414 continue;
3415 if (ALLOCNO_CAP_MEMBER (a) != NULL)
3416 ira_assert (ALLOCNO_CAP (a) != NULL);
3417 else if (ALLOCNO_CAP (a) == NULL)
3418 ira_assert (loop_tree_node->parent
3419 ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3420 && bitmap_bit_p (loop_tree_node->border_allocnos,
3421 ALLOCNO_NUM (a)));
3422 }
3423}
3424#endif
3425
3426/* Identify allocnos which prefer a register class with a single hard register.
3427 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3428 less likely to use the preferred singleton register. */
3429static void
3430update_conflict_hard_reg_costs (void)
3431{
3432 ira_allocno_t a;
3433 ira_allocno_iterator ai;
3434 int i, index, min;
3435
3436 FOR_EACH_ALLOCNO (a, ai)
3437 {
3438 reg_class_t aclass = ALLOCNO_CLASS (a);
3439 reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3440 int singleton = ira_class_singleton[pref][ALLOCNO_MODE (a)];
3441 if (singleton < 0)
3442 continue;
3443 index = ira_class_hard_reg_index[(int) aclass][singleton];
3444 if (index < 0)
3445 continue;
3446 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3447 || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3448 continue;
3449 min = INT_MAX;
3450 for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3451 if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3452 && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3453 min = ALLOCNO_HARD_REG_COSTS (a)[i];
3454 if (min == INT_MAX)
3455 continue;
3456 ira_allocate_and_set_costs (vec: &ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3457 aclass, val: 0);
3458 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3459 -= min - ALLOCNO_CLASS_COST (a);
3460 }
3461}
3462
3463/* Create a internal representation (IR) for IRA (allocnos, copies,
3464 loop tree nodes). The function returns TRUE if we generate loop
3465 structure (besides nodes representing all function and the basic
3466 blocks) for regional allocation. A true return means that we
3467 really need to flatten IR before the reload. */
3468bool
3469ira_build (void)
3470{
3471 bool loops_p;
3472
3473 df_analyze ();
3474 initiate_cost_vectors ();
3475 initiate_allocnos ();
3476 initiate_prefs ();
3477 initiate_copies ();
3478 create_loop_tree_nodes ();
3479 form_loop_tree ();
3480 create_allocnos ();
3481 ira_costs ();
3482 create_allocno_objects ();
3483 ira_create_allocno_live_ranges ();
3484 remove_unnecessary_regions (all_p: false);
3485 ira_compress_allocno_live_ranges ();
3486 update_bad_spill_attribute ();
3487 loops_p = more_one_region_p ();
3488 if (loops_p)
3489 {
3490 propagate_allocno_info ();
3491 create_caps ();
3492 }
3493 ira_tune_allocno_costs ();
3494#ifdef ENABLE_IRA_CHECKING
3495 check_allocno_creation ();
3496#endif
3497 setup_min_max_allocno_live_range_point ();
3498 sort_conflict_id_map ();
3499 setup_min_max_conflict_allocno_ids ();
3500 ira_build_conflicts ();
3501 update_conflict_hard_reg_costs ();
3502 if (! ira_conflicts_p)
3503 {
3504 ira_allocno_t a;
3505 ira_allocno_iterator ai;
3506
3507 /* Remove all regions but root one. */
3508 if (loops_p)
3509 {
3510 remove_unnecessary_regions (all_p: true);
3511 loops_p = false;
3512 }
3513 /* We don't save hard registers around calls for fast allocation
3514 -- add caller clobbered registers as conflicting ones to
3515 allocno crossing calls. */
3516 FOR_EACH_ALLOCNO (a, ai)
3517 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3518 ior_hard_reg_conflicts (a, set: ira_need_caller_save_regs (a));
3519 }
3520 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3521 print_copies (f: ira_dump_file);
3522 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3523 print_prefs (f: ira_dump_file);
3524 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3525 {
3526 int n, nr, nr_big;
3527 ira_allocno_t a;
3528 live_range_t r;
3529 ira_allocno_iterator ai;
3530
3531 n = 0;
3532 nr = 0;
3533 nr_big = 0;
3534 FOR_EACH_ALLOCNO (a, ai)
3535 {
3536 int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3537
3538 if (nobj > 1)
3539 nr_big++;
3540 for (j = 0; j < nobj; j++)
3541 {
3542 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3543 n += OBJECT_NUM_CONFLICTS (obj);
3544 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3545 nr++;
3546 }
3547 }
3548 fprintf (stream: ira_dump_file, format: " regions=%d, blocks=%d, points=%d\n",
3549 current_loops == NULL ? 1 : number_of_loops (cfun),
3550 n_basic_blocks_for_fn (cfun), ira_max_point);
3551 fprintf (stream: ira_dump_file,
3552 format: " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3553 ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3554 }
3555 return loops_p;
3556}
3557
3558/* Release the data created by function ira_build. */
3559void
3560ira_destroy (void)
3561{
3562 finish_loop_tree_nodes ();
3563 finish_prefs ();
3564 finish_copies ();
3565 finish_allocnos ();
3566 finish_cost_vectors ();
3567 ira_finish_allocno_live_ranges ();
3568}
3569

source code of gcc/ira-build.cc