1 | /* Change pseudos by memory. |
2 | Copyright (C) 2010-2023 Free Software Foundation, Inc. |
3 | Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | |
22 | /* This file contains code for a pass to change spilled pseudos into |
23 | memory. |
24 | |
25 | The pass creates necessary stack slots and assigns spilled pseudos |
26 | to the stack slots in following way: |
27 | |
28 | for all spilled pseudos P most frequently used first do |
29 | for all stack slots S do |
30 | if P doesn't conflict with pseudos assigned to S then |
31 | assign S to P and goto to the next pseudo process |
32 | end |
33 | end |
34 | create new stack slot S and assign P to S |
35 | end |
36 | |
37 | The actual algorithm is bit more complicated because of different |
38 | pseudo sizes. |
39 | |
40 | After that the code changes spilled pseudos (except ones created |
41 | from scratches) by corresponding stack slot memory in RTL. |
42 | |
43 | If at least one stack slot was created, we need to run more passes |
44 | because we have new addresses which should be checked and because |
45 | the old address displacements might change and address constraints |
46 | (or insn memory constraints) might not be satisfied any more. |
47 | |
48 | For some targets, the pass can spill some pseudos into hard |
49 | registers of different class (usually into vector registers) |
50 | instead of spilling them into memory if it is possible and |
51 | profitable. Spilling GENERAL_REGS pseudo into SSE registers for |
52 | Intel Corei7 is an example of such optimization. And this is |
53 | actually recommended by Intel optimization guide. |
54 | |
55 | The file also contains code for final change of pseudos on hard |
56 | regs correspondingly assigned to them. */ |
57 | |
58 | #include "config.h" |
59 | #include "system.h" |
60 | #include "coretypes.h" |
61 | #include "backend.h" |
62 | #include "target.h" |
63 | #include "rtl.h" |
64 | #include "df.h" |
65 | #include "insn-config.h" |
66 | #include "regs.h" |
67 | #include "memmodel.h" |
68 | #include "ira.h" |
69 | #include "recog.h" |
70 | #include "output.h" |
71 | #include "cfgrtl.h" |
72 | #include "lra.h" |
73 | #include "lra-int.h" |
74 | |
75 | |
76 | /* Max regno at the start of the pass. */ |
77 | static int regs_num; |
78 | |
79 | /* Map spilled regno -> hard regno used instead of memory for |
80 | spilling. */ |
81 | static rtx *spill_hard_reg; |
82 | |
83 | /* The structure describes stack slot of a spilled pseudo. */ |
84 | struct pseudo_slot |
85 | { |
86 | /* Number (0, 1, ...) of the stack slot to which given pseudo |
87 | belongs. */ |
88 | int slot_num; |
89 | /* First or next slot with the same slot number. */ |
90 | struct pseudo_slot *next, *first; |
91 | /* Memory representing the spilled pseudo. */ |
92 | rtx mem; |
93 | }; |
94 | |
95 | /* The stack slots for each spilled pseudo. Indexed by regnos. */ |
96 | static struct pseudo_slot *pseudo_slots; |
97 | |
98 | /* The structure describes a register or a stack slot which can be |
99 | used for several spilled pseudos. */ |
100 | class slot |
101 | { |
102 | public: |
103 | /* First pseudo with given stack slot. */ |
104 | int regno; |
105 | /* Hard reg into which the slot pseudos are spilled. The value is |
106 | negative for pseudos spilled into memory. */ |
107 | int hard_regno; |
108 | /* Maximum alignment required by all users of the slot. */ |
109 | unsigned int align; |
110 | /* Maximum size required by all users of the slot. */ |
111 | poly_int64 size; |
112 | /* Memory representing the all stack slot. It can be different from |
113 | memory representing a pseudo belonging to give stack slot because |
114 | pseudo can be placed in a part of the corresponding stack slot. |
115 | The value is NULL for pseudos spilled into a hard reg. */ |
116 | rtx mem; |
117 | /* Combined live ranges of all pseudos belonging to given slot. It |
118 | is used to figure out that a new spilled pseudo can use given |
119 | stack slot. */ |
120 | lra_live_range_t live_ranges; |
121 | }; |
122 | |
123 | /* Array containing info about the stack slots. The array element is |
124 | indexed by the stack slot number in the range [0..slots_num). */ |
125 | static class slot *slots; |
126 | /* The number of the stack slots currently existing. */ |
127 | static int slots_num; |
128 | |
129 | /* Set up memory of the spilled pseudo I. The function can allocate |
130 | the corresponding stack slot if it is not done yet. */ |
131 | static void |
132 | assign_mem_slot (int i) |
133 | { |
134 | rtx x = NULL_RTX; |
135 | machine_mode mode = GET_MODE (regno_reg_rtx[i]); |
136 | poly_int64 inherent_size = PSEUDO_REGNO_BYTES (i); |
137 | machine_mode wider_mode |
138 | = wider_subreg_mode (outermode: mode, innermode: lra_reg_info[i].biggest_mode); |
139 | poly_int64 total_size = GET_MODE_SIZE (mode: wider_mode); |
140 | poly_int64 adjust = 0; |
141 | |
142 | lra_assert (regno_reg_rtx[i] != NULL_RTX && REG_P (regno_reg_rtx[i]) |
143 | && lra_reg_info[i].nrefs != 0 && reg_renumber[i] < 0); |
144 | |
145 | unsigned int slot_num = pseudo_slots[i].slot_num; |
146 | x = slots[slot_num].mem; |
147 | if (!x) |
148 | { |
149 | x = assign_stack_local (BLKmode, slots[slot_num].size, |
150 | slots[slot_num].align); |
151 | slots[slot_num].mem = x; |
152 | } |
153 | |
154 | /* On a big endian machine, the "address" of the slot is the address |
155 | of the low part that fits its inherent mode. */ |
156 | adjust += subreg_size_lowpart_offset (inherent_size, total_size); |
157 | x = adjust_address_nv (x, GET_MODE (regno_reg_rtx[i]), adjust); |
158 | |
159 | /* Set all of the memory attributes as appropriate for a spill. */ |
160 | set_mem_attrs_for_spill (x); |
161 | pseudo_slots[i].mem = x; |
162 | } |
163 | |
164 | /* Sort pseudos according their usage frequencies. */ |
165 | static int |
166 | regno_freq_compare (const void *v1p, const void *v2p) |
167 | { |
168 | const int regno1 = *(const int *) v1p; |
169 | const int regno2 = *(const int *) v2p; |
170 | int diff; |
171 | |
172 | if ((diff = lra_reg_info[regno2].freq - lra_reg_info[regno1].freq) != 0) |
173 | return diff; |
174 | return regno1 - regno2; |
175 | } |
176 | |
177 | /* Sort pseudos according to their slots, putting the slots in the order |
178 | that they should be allocated. |
179 | |
180 | First prefer to group slots with variable sizes together and slots |
181 | with constant sizes together, since that usually makes them easier |
182 | to address from a common anchor point. E.g. loads of polynomial-sized |
183 | registers tend to take polynomial offsets while loads of constant-sized |
184 | registers tend to take constant (non-polynomial) offsets. |
185 | |
186 | Next, slots with lower numbers have the highest priority and should |
187 | get the smallest displacement from the stack or frame pointer |
188 | (whichever is being used). |
189 | |
190 | The first allocated slot is always closest to the frame pointer, |
191 | so prefer lower slot numbers when frame_pointer_needed. If the stack |
192 | and frame grow in the same direction, then the first allocated slot is |
193 | always closest to the initial stack pointer and furthest away from the |
194 | final stack pointer, so allocate higher numbers first when using the |
195 | stack pointer in that case. The reverse is true if the stack and |
196 | frame grow in opposite directions. */ |
197 | static int |
198 | pseudo_reg_slot_compare (const void *v1p, const void *v2p) |
199 | { |
200 | const int regno1 = *(const int *) v1p; |
201 | const int regno2 = *(const int *) v2p; |
202 | int diff, slot_num1, slot_num2; |
203 | |
204 | slot_num1 = pseudo_slots[regno1].slot_num; |
205 | slot_num2 = pseudo_slots[regno2].slot_num; |
206 | diff = (int (slots[slot_num1].size.is_constant ()) |
207 | - int (slots[slot_num2].size.is_constant ())); |
208 | if (diff != 0) |
209 | return diff; |
210 | if ((diff = slot_num1 - slot_num2) != 0) |
211 | return (frame_pointer_needed |
212 | || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff); |
213 | poly_int64 total_size1 = GET_MODE_SIZE (mode: lra_reg_info[regno1].biggest_mode); |
214 | poly_int64 total_size2 = GET_MODE_SIZE (mode: lra_reg_info[regno2].biggest_mode); |
215 | if ((diff = compare_sizes_for_sort (a: total_size2, b: total_size1)) != 0) |
216 | return diff; |
217 | return regno1 - regno2; |
218 | } |
219 | |
220 | /* Assign spill hard registers to N pseudos in PSEUDO_REGNOS which is |
221 | sorted in order of highest frequency first. Put the pseudos which |
222 | did not get a spill hard register at the beginning of array |
223 | PSEUDO_REGNOS. Return the number of such pseudos. */ |
224 | static int |
225 | assign_spill_hard_regs (int *pseudo_regnos, int n) |
226 | { |
227 | int i, k, p, regno, res, spill_class_size, hard_regno, nr; |
228 | enum reg_class rclass, spill_class; |
229 | machine_mode mode; |
230 | lra_live_range_t r; |
231 | rtx_insn *insn; |
232 | rtx set; |
233 | basic_block bb; |
234 | HARD_REG_SET conflict_hard_regs; |
235 | bitmap setjump_crosses = regstat_get_setjmp_crosses (); |
236 | /* Hard registers which cannot be used for any purpose at given |
237 | program point because they are unallocatable or already allocated |
238 | for other pseudos. */ |
239 | HARD_REG_SET *reserved_hard_regs; |
240 | |
241 | if (! lra_reg_spill_p) |
242 | return n; |
243 | /* Set up reserved hard regs for every program point. */ |
244 | reserved_hard_regs = XNEWVEC (HARD_REG_SET, lra_live_max_point); |
245 | for (p = 0; p < lra_live_max_point; p++) |
246 | reserved_hard_regs[p] = lra_no_alloc_regs; |
247 | for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++) |
248 | if (lra_reg_info[i].nrefs != 0 |
249 | && (hard_regno = lra_get_regno_hard_regno (regno: i)) >= 0) |
250 | for (r = lra_reg_info[i].live_ranges; r != NULL; r = r->next) |
251 | for (p = r->start; p <= r->finish; p++) |
252 | add_to_hard_reg_set (regs: &reserved_hard_regs[p], |
253 | mode: lra_reg_info[i].biggest_mode, regno: hard_regno); |
254 | auto_bitmap ok_insn_bitmap (®_obstack); |
255 | FOR_EACH_BB_FN (bb, cfun) |
256 | FOR_BB_INSNS (bb, insn) |
257 | if (DEBUG_INSN_P (insn) |
258 | || ((set = single_set (insn)) != NULL_RTX |
259 | && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set)))) |
260 | bitmap_set_bit (ok_insn_bitmap, INSN_UID (insn)); |
261 | for (res = i = 0; i < n; i++) |
262 | { |
263 | regno = pseudo_regnos[i]; |
264 | rclass = lra_get_allocno_class (regno); |
265 | if (bitmap_bit_p (setjump_crosses, regno) |
266 | || (spill_class |
267 | = ((enum reg_class) |
268 | targetm.spill_class ((reg_class_t) rclass, |
269 | PSEUDO_REGNO_MODE (regno)))) == NO_REGS |
270 | || bitmap_intersect_compl_p (&lra_reg_info[regno].insn_bitmap, |
271 | ok_insn_bitmap)) |
272 | { |
273 | pseudo_regnos[res++] = regno; |
274 | continue; |
275 | } |
276 | lra_assert (spill_class != NO_REGS); |
277 | conflict_hard_regs = lra_reg_info[regno].conflict_hard_regs; |
278 | for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next) |
279 | for (p = r->start; p <= r->finish; p++) |
280 | conflict_hard_regs |= reserved_hard_regs[p]; |
281 | spill_class_size = ira_class_hard_regs_num[spill_class]; |
282 | mode = lra_reg_info[regno].biggest_mode; |
283 | for (k = 0; k < spill_class_size; k++) |
284 | { |
285 | hard_regno = ira_class_hard_regs[spill_class][k]; |
286 | if (TEST_HARD_REG_BIT (set: eliminable_regset, bit: hard_regno) |
287 | || !targetm.hard_regno_mode_ok (hard_regno, mode)) |
288 | continue; |
289 | if (! overlaps_hard_reg_set_p (regs: conflict_hard_regs, mode, regno: hard_regno)) |
290 | break; |
291 | } |
292 | if (k >= spill_class_size) |
293 | { |
294 | /* There is no available regs -- assign memory later. */ |
295 | pseudo_regnos[res++] = regno; |
296 | continue; |
297 | } |
298 | if (lra_dump_file != NULL) |
299 | fprintf (stream: lra_dump_file, format: " Spill r%d into hr%d\n" , regno, hard_regno); |
300 | add_to_hard_reg_set (regs: &hard_regs_spilled_into, |
301 | mode: lra_reg_info[regno].biggest_mode, regno: hard_regno); |
302 | /* Update reserved_hard_regs. */ |
303 | for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next) |
304 | for (p = r->start; p <= r->finish; p++) |
305 | add_to_hard_reg_set (regs: &reserved_hard_regs[p], |
306 | mode: lra_reg_info[regno].biggest_mode, regno: hard_regno); |
307 | spill_hard_reg[regno] |
308 | = gen_raw_REG (PSEUDO_REGNO_MODE (regno), hard_regno); |
309 | for (nr = 0; |
310 | nr < hard_regno_nregs (regno: hard_regno, |
311 | mode: lra_reg_info[regno].biggest_mode); |
312 | nr++) |
313 | /* Just loop. */ |
314 | df_set_regs_ever_live (hard_regno + nr, true); |
315 | } |
316 | free (ptr: reserved_hard_regs); |
317 | return res; |
318 | } |
319 | |
320 | /* Add pseudo REGNO to slot SLOT_NUM. */ |
321 | static void |
322 | add_pseudo_to_slot (int regno, int slot_num) |
323 | { |
324 | struct pseudo_slot *first; |
325 | |
326 | /* Each pseudo has an inherent size which comes from its own mode, |
327 | and a total size which provides room for paradoxical subregs. |
328 | We need to make sure the size and alignment of the slot are |
329 | sufficient for both. */ |
330 | machine_mode mode = wider_subreg_mode (PSEUDO_REGNO_MODE (regno), |
331 | innermode: lra_reg_info[regno].biggest_mode); |
332 | unsigned int align = spill_slot_alignment (mode); |
333 | slots[slot_num].align = MAX (slots[slot_num].align, align); |
334 | slots[slot_num].size = upper_bound (a: slots[slot_num].size, |
335 | b: GET_MODE_SIZE (mode)); |
336 | |
337 | if (slots[slot_num].regno < 0) |
338 | { |
339 | /* It is the first pseudo in the slot. */ |
340 | slots[slot_num].regno = regno; |
341 | pseudo_slots[regno].first = &pseudo_slots[regno]; |
342 | pseudo_slots[regno].next = NULL; |
343 | } |
344 | else |
345 | { |
346 | first = pseudo_slots[regno].first = &pseudo_slots[slots[slot_num].regno]; |
347 | pseudo_slots[regno].next = first->next; |
348 | first->next = &pseudo_slots[regno]; |
349 | } |
350 | pseudo_slots[regno].mem = NULL_RTX; |
351 | pseudo_slots[regno].slot_num = slot_num; |
352 | slots[slot_num].live_ranges |
353 | = lra_merge_live_ranges (slots[slot_num].live_ranges, |
354 | lra_copy_live_range_list |
355 | (lra_reg_info[regno].live_ranges)); |
356 | } |
357 | |
358 | /* Assign stack slot numbers to pseudos in array PSEUDO_REGNOS of |
359 | length N. Sort pseudos in PSEUDO_REGNOS for subsequent assigning |
360 | memory stack slots. */ |
361 | static void |
362 | assign_stack_slot_num_and_sort_pseudos (int *pseudo_regnos, int n) |
363 | { |
364 | int i, j, regno; |
365 | |
366 | /* Assign stack slot numbers to spilled pseudos, use smaller numbers |
367 | for most frequently used pseudos. */ |
368 | for (i = 0; i < n; i++) |
369 | { |
370 | regno = pseudo_regnos[i]; |
371 | if (! flag_ira_share_spill_slots) |
372 | j = slots_num; |
373 | else |
374 | { |
375 | machine_mode mode |
376 | = wider_subreg_mode (PSEUDO_REGNO_MODE (regno), |
377 | innermode: lra_reg_info[regno].biggest_mode); |
378 | for (j = 0; j < slots_num; j++) |
379 | if (slots[j].hard_regno < 0 |
380 | /* Although it's possible to share slots between modes |
381 | with constant and non-constant widths, we usually |
382 | get better spill code by keeping the constant and |
383 | non-constant areas separate. */ |
384 | && (GET_MODE_SIZE (mode).is_constant () |
385 | == slots[j].size.is_constant ()) |
386 | && ! (lra_intersected_live_ranges_p |
387 | (slots[j].live_ranges, |
388 | lra_reg_info[regno].live_ranges))) |
389 | break; |
390 | } |
391 | if (j >= slots_num) |
392 | { |
393 | /* New slot. */ |
394 | slots[j].live_ranges = NULL; |
395 | slots[j].size = 0; |
396 | slots[j].align = BITS_PER_UNIT; |
397 | slots[j].regno = slots[j].hard_regno = -1; |
398 | slots[j].mem = NULL_RTX; |
399 | slots_num++; |
400 | } |
401 | add_pseudo_to_slot (regno, slot_num: j); |
402 | } |
403 | /* Sort regnos according to their slot numbers. */ |
404 | qsort (pseudo_regnos, n, sizeof (int), pseudo_reg_slot_compare); |
405 | } |
406 | |
407 | /* Recursively process LOC in INSN and change spilled pseudos to the |
408 | corresponding memory or spilled hard reg. Ignore spilled pseudos |
409 | created from the scratches. Return true if the pseudo nrefs equal |
410 | to 0 (don't change the pseudo in this case). Otherwise return false. */ |
411 | static bool |
412 | remove_pseudos (rtx *loc, rtx_insn *insn) |
413 | { |
414 | int i; |
415 | rtx hard_reg; |
416 | const char *fmt; |
417 | enum rtx_code code; |
418 | bool res = false; |
419 | |
420 | if (*loc == NULL_RTX) |
421 | return res; |
422 | code = GET_CODE (*loc); |
423 | if (code == SUBREG && REG_P (SUBREG_REG (*loc))) |
424 | { |
425 | /* Try to remove memory subregs to simplify LRA job |
426 | and avoid LRA cycling in case of subreg memory reload. */ |
427 | res = remove_pseudos (loc: &SUBREG_REG (*loc), insn); |
428 | if (GET_CODE (SUBREG_REG (*loc)) == MEM) |
429 | { |
430 | alter_subreg (loc, false); |
431 | if (GET_CODE (*loc) == MEM) |
432 | { |
433 | lra_update_insn_recog_data (insn); |
434 | if (lra_dump_file != NULL) |
435 | fprintf (stream: lra_dump_file, |
436 | format: "Memory subreg was simplified in insn #%u\n" , |
437 | INSN_UID (insn)); |
438 | } |
439 | } |
440 | return res; |
441 | } |
442 | else if (code == REG && (i = REGNO (*loc)) >= FIRST_PSEUDO_REGISTER |
443 | && lra_get_regno_hard_regno (regno: i) < 0 |
444 | /* We do not want to assign memory for former scratches because |
445 | it might result in an address reload for some targets. In |
446 | any case we transform such pseudos not getting hard registers |
447 | into scratches back. */ |
448 | && ! ira_former_scratch_p (regno: i)) |
449 | { |
450 | if (lra_reg_info[i].nrefs == 0 |
451 | && pseudo_slots[i].mem == NULL && spill_hard_reg[i] == NULL) |
452 | return true; |
453 | if ((hard_reg = spill_hard_reg[i]) != NULL_RTX) |
454 | *loc = copy_rtx (hard_reg); |
455 | else if (pseudo_slots[i].mem != NULL_RTX) |
456 | /* There might be no memory slot or hard reg for a pseudo when we spill |
457 | the frame pointer after elimination of frame pointer to stack |
458 | pointer became impossible. */ |
459 | { |
460 | rtx x = lra_eliminate_regs_1 (insn, pseudo_slots[i].mem, |
461 | GET_MODE (pseudo_slots[i].mem), |
462 | false, false, 0, true); |
463 | *loc = x != pseudo_slots[i].mem ? x : copy_rtx (x); |
464 | } |
465 | return res; |
466 | } |
467 | |
468 | fmt = GET_RTX_FORMAT (code); |
469 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
470 | { |
471 | if (fmt[i] == 'e') |
472 | res = remove_pseudos (loc: &XEXP (*loc, i), insn) || res; |
473 | else if (fmt[i] == 'E') |
474 | { |
475 | int j; |
476 | |
477 | for (j = XVECLEN (*loc, i) - 1; j >= 0; j--) |
478 | res = remove_pseudos (loc: &XVECEXP (*loc, i, j), insn) || res; |
479 | } |
480 | } |
481 | return res; |
482 | } |
483 | |
484 | /* Convert spilled pseudos into their stack slots or spill hard regs, |
485 | put insns to process on the constraint stack (that is all insns in |
486 | which pseudos were changed to memory or spill hard regs). */ |
487 | static void |
488 | spill_pseudos (void) |
489 | { |
490 | basic_block bb; |
491 | rtx_insn *insn, *curr; |
492 | int i; |
493 | |
494 | auto_bitmap spilled_pseudos (®_obstack); |
495 | auto_bitmap changed_insns (®_obstack); |
496 | for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++) |
497 | { |
498 | if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (regno: i) < 0 |
499 | && ! ira_former_scratch_p (regno: i)) |
500 | { |
501 | bitmap_set_bit (spilled_pseudos, i); |
502 | bitmap_ior_into (changed_insns, &lra_reg_info[i].insn_bitmap); |
503 | } |
504 | } |
505 | FOR_EACH_BB_FN (bb, cfun) |
506 | { |
507 | FOR_BB_INSNS_SAFE (bb, insn, curr) |
508 | { |
509 | bool removed_pseudo_p = false; |
510 | |
511 | if (bitmap_bit_p (changed_insns, INSN_UID (insn))) |
512 | { |
513 | rtx *link_loc, link; |
514 | |
515 | removed_pseudo_p = remove_pseudos (loc: &PATTERN (insn), insn); |
516 | if (CALL_P (insn) |
517 | && remove_pseudos (loc: &CALL_INSN_FUNCTION_USAGE (insn), insn)) |
518 | removed_pseudo_p = true; |
519 | for (link_loc = ®_NOTES (insn); |
520 | (link = *link_loc) != NULL_RTX; |
521 | link_loc = &XEXP (link, 1)) |
522 | { |
523 | switch (REG_NOTE_KIND (link)) |
524 | { |
525 | case REG_FRAME_RELATED_EXPR: |
526 | case REG_CFA_DEF_CFA: |
527 | case REG_CFA_ADJUST_CFA: |
528 | case REG_CFA_OFFSET: |
529 | case REG_CFA_REGISTER: |
530 | case REG_CFA_EXPRESSION: |
531 | case REG_CFA_RESTORE: |
532 | case REG_CFA_SET_VDRAP: |
533 | if (remove_pseudos (loc: &XEXP (link, 0), insn)) |
534 | removed_pseudo_p = true; |
535 | break; |
536 | default: |
537 | break; |
538 | } |
539 | } |
540 | if (lra_dump_file != NULL) |
541 | fprintf (stream: lra_dump_file, |
542 | format: "Changing spilled pseudos to memory in insn #%u\n" , |
543 | INSN_UID (insn)); |
544 | lra_push_insn (insn); |
545 | if (lra_reg_spill_p || targetm.different_addr_displacement_p ()) |
546 | lra_set_used_insn_alternative (insn, LRA_UNKNOWN_ALT); |
547 | } |
548 | else if (CALL_P (insn) |
549 | /* Presence of any pseudo in CALL_INSN_FUNCTION_USAGE |
550 | does not affect value of insn_bitmap of the |
551 | corresponding lra_reg_info. That is because we |
552 | don't need to reload pseudos in |
553 | CALL_INSN_FUNCTION_USAGEs. So if we process only |
554 | insns in the insn_bitmap of given pseudo here, we |
555 | can miss the pseudo in some |
556 | CALL_INSN_FUNCTION_USAGEs. */ |
557 | && remove_pseudos (loc: &CALL_INSN_FUNCTION_USAGE (insn), insn)) |
558 | removed_pseudo_p = true; |
559 | if (removed_pseudo_p) |
560 | { |
561 | lra_assert (DEBUG_INSN_P (insn)); |
562 | lra_invalidate_insn_data (insn); |
563 | INSN_VAR_LOCATION_LOC (insn) = gen_rtx_UNKNOWN_VAR_LOC (); |
564 | if (lra_dump_file != NULL) |
565 | fprintf (stream: lra_dump_file, |
566 | format: "Debug insn #%u is reset because it referenced " |
567 | "removed pseudo\n" , INSN_UID (insn)); |
568 | } |
569 | bitmap_and_compl_into (df_get_live_in (bb), spilled_pseudos); |
570 | bitmap_and_compl_into (df_get_live_out (bb), spilled_pseudos); |
571 | } |
572 | } |
573 | } |
574 | |
575 | /* Return true if we need scratch reg assignments. */ |
576 | bool |
577 | lra_need_for_scratch_reg_p (void) |
578 | { |
579 | int i; max_regno = max_reg_num (); |
580 | |
581 | for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) |
582 | if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (regno: i) < 0 |
583 | && ira_former_scratch_p (regno: i)) |
584 | return true; |
585 | return false; |
586 | } |
587 | |
588 | /* Return true if we need to change some pseudos into memory. */ |
589 | bool |
590 | lra_need_for_spills_p (void) |
591 | { |
592 | int i; max_regno = max_reg_num (); |
593 | |
594 | for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) |
595 | if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (regno: i) < 0 |
596 | && ! ira_former_scratch_p (regno: i)) |
597 | return true; |
598 | return false; |
599 | } |
600 | |
601 | /* Change spilled pseudos into memory or spill hard regs. Put changed |
602 | insns on the constraint stack (these insns will be considered on |
603 | the next constraint pass). The changed insns are all insns in |
604 | which pseudos were changed. */ |
605 | void |
606 | lra_spill (void) |
607 | { |
608 | int i, n, n2, curr_regno; |
609 | int *pseudo_regnos; |
610 | |
611 | regs_num = max_reg_num (); |
612 | spill_hard_reg = XNEWVEC (rtx, regs_num); |
613 | pseudo_regnos = XNEWVEC (int, regs_num); |
614 | for (n = 0, i = FIRST_PSEUDO_REGISTER; i < regs_num; i++) |
615 | if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (regno: i) < 0 |
616 | /* We do not want to assign memory for former scratches. */ |
617 | && ! ira_former_scratch_p (regno: i)) |
618 | pseudo_regnos[n++] = i; |
619 | lra_assert (n > 0); |
620 | pseudo_slots = XNEWVEC (struct pseudo_slot, regs_num); |
621 | for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++) |
622 | { |
623 | spill_hard_reg[i] = NULL_RTX; |
624 | pseudo_slots[i].mem = NULL_RTX; |
625 | } |
626 | slots = XNEWVEC (class slot, regs_num); |
627 | /* Sort regnos according their usage frequencies. */ |
628 | qsort (pseudo_regnos, n, sizeof (int), regno_freq_compare); |
629 | n = assign_spill_hard_regs (pseudo_regnos, n); |
630 | slots_num = 0; |
631 | assign_stack_slot_num_and_sort_pseudos (pseudo_regnos, n); |
632 | for (i = 0; i < n; i++) |
633 | if (pseudo_slots[pseudo_regnos[i]].mem == NULL_RTX) |
634 | assign_mem_slot (i: pseudo_regnos[i]); |
635 | if ((n2 = lra_update_fp2sp_elimination (spilled_pseudos: pseudo_regnos)) > 0) |
636 | { |
637 | /* Assign stack slots to spilled pseudos assigned to fp. */ |
638 | assign_stack_slot_num_and_sort_pseudos (pseudo_regnos, n: n2); |
639 | for (i = 0; i < n2; i++) |
640 | if (pseudo_slots[pseudo_regnos[i]].mem == NULL_RTX) |
641 | assign_mem_slot (i: pseudo_regnos[i]); |
642 | } |
643 | if (n + n2 > 0 && crtl->stack_alignment_needed) |
644 | /* If we have a stack frame, we must align it now. The stack size |
645 | may be a part of the offset computation for register |
646 | elimination. */ |
647 | assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed); |
648 | if (lra_dump_file != NULL) |
649 | { |
650 | for (i = 0; i < slots_num; i++) |
651 | { |
652 | fprintf (stream: lra_dump_file, format: " Slot %d regnos (width = " , i); |
653 | print_dec (value: GET_MODE_SIZE (GET_MODE (slots[i].mem)), |
654 | file: lra_dump_file, sgn: SIGNED); |
655 | fprintf (stream: lra_dump_file, format: "):" ); |
656 | for (curr_regno = slots[i].regno;; |
657 | curr_regno = pseudo_slots[curr_regno].next - pseudo_slots) |
658 | { |
659 | fprintf (stream: lra_dump_file, format: " %d" , curr_regno); |
660 | if (pseudo_slots[curr_regno].next == NULL) |
661 | break; |
662 | } |
663 | fprintf (stream: lra_dump_file, format: "\n" ); |
664 | } |
665 | } |
666 | spill_pseudos (); |
667 | free (ptr: slots); |
668 | free (ptr: pseudo_slots); |
669 | free (ptr: pseudo_regnos); |
670 | free (ptr: spill_hard_reg); |
671 | } |
672 | |
673 | /* Apply alter_subreg for subregs of regs in *LOC. Use FINAL_P for |
674 | alter_subreg calls. Return true if any subreg of reg is |
675 | processed. */ |
676 | static bool |
677 | alter_subregs (rtx *loc, bool final_p) |
678 | { |
679 | int i; |
680 | rtx x = *loc; |
681 | bool res; |
682 | const char *fmt; |
683 | enum rtx_code code; |
684 | |
685 | if (x == NULL_RTX) |
686 | return false; |
687 | code = GET_CODE (x); |
688 | if (code == SUBREG && REG_P (SUBREG_REG (x))) |
689 | { |
690 | lra_assert (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER); |
691 | alter_subreg (loc, final_p); |
692 | return true; |
693 | } |
694 | fmt = GET_RTX_FORMAT (code); |
695 | res = false; |
696 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
697 | { |
698 | if (fmt[i] == 'e') |
699 | { |
700 | if (alter_subregs (loc: &XEXP (x, i), final_p)) |
701 | res = true; |
702 | } |
703 | else if (fmt[i] == 'E') |
704 | { |
705 | int j; |
706 | |
707 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
708 | if (alter_subregs (loc: &XVECEXP (x, i, j), final_p)) |
709 | res = true; |
710 | } |
711 | } |
712 | return res; |
713 | } |
714 | |
715 | /* Final change of pseudos got hard registers into the corresponding |
716 | hard registers and removing temporary clobbers. */ |
717 | void |
718 | lra_final_code_change (void) |
719 | { |
720 | int i, hard_regno; |
721 | basic_block bb; |
722 | rtx_insn *insn, *curr; |
723 | rtx set; |
724 | int max_regno = max_reg_num (); |
725 | |
726 | for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++) |
727 | if (lra_reg_info[i].nrefs != 0 |
728 | && (hard_regno = lra_get_regno_hard_regno (regno: i)) >= 0) |
729 | SET_REGNO (regno_reg_rtx[i], hard_regno); |
730 | FOR_EACH_BB_FN (bb, cfun) |
731 | FOR_BB_INSNS_SAFE (bb, insn, curr) |
732 | if (INSN_P (insn)) |
733 | { |
734 | rtx pat = PATTERN (insn); |
735 | |
736 | if (GET_CODE (pat) == USE && XEXP (pat, 0) == const1_rtx) |
737 | { |
738 | /* Remove markers to eliminate critical edges for jump insn |
739 | output reloads (see code in ira.cc::ira). */ |
740 | lra_invalidate_insn_data (insn); |
741 | delete_insn (insn); |
742 | continue; |
743 | } |
744 | if (GET_CODE (pat) == CLOBBER && LRA_TEMP_CLOBBER_P (pat)) |
745 | { |
746 | /* Remove clobbers temporarily created in LRA. We don't |
747 | need them anymore and don't want to waste compiler |
748 | time processing them in a few subsequent passes. */ |
749 | lra_invalidate_insn_data (insn); |
750 | delete_insn (insn); |
751 | continue; |
752 | } |
753 | |
754 | /* IRA can generate move insns involving pseudos. It is |
755 | better remove them earlier to speed up compiler a bit. |
756 | It is also better to do it here as they might not pass |
757 | final RTL check in LRA, (e.g. insn moving a control |
758 | register into itself). So remove an useless move insn |
759 | unless next insn is USE marking the return reg (we should |
760 | save this as some subsequent optimizations assume that |
761 | such original insns are saved). */ |
762 | if (NONJUMP_INSN_P (insn) && GET_CODE (pat) == SET |
763 | && REG_P (SET_SRC (pat)) && REG_P (SET_DEST (pat)) |
764 | && REGNO (SET_SRC (pat)) == REGNO (SET_DEST (pat)) |
765 | && REGNO (SET_SRC (pat)) >= FIRST_PSEUDO_REGISTER) |
766 | { |
767 | lra_invalidate_insn_data (insn); |
768 | delete_insn (insn); |
769 | continue; |
770 | } |
771 | |
772 | lra_insn_recog_data_t id = lra_get_insn_recog_data (insn); |
773 | struct lra_insn_reg *reg; |
774 | |
775 | for (reg = id->regs; reg != NULL; reg = reg->next) |
776 | if (reg->regno >= FIRST_PSEUDO_REGISTER |
777 | && lra_reg_info [reg->regno].nrefs == 0) |
778 | break; |
779 | |
780 | if (reg != NULL) |
781 | { |
782 | /* Pseudos still can be in debug insns in some very rare |
783 | and complicated cases, e.g. the pseudo was removed by |
784 | inheritance and the debug insn is not EBBs where the |
785 | inheritance happened. It is difficult and time |
786 | consuming to find what hard register corresponds the |
787 | pseudo -- so just remove the debug insn. Another |
788 | solution could be assigning hard reg/memory but it |
789 | would be a misleading info. It is better not to have |
790 | info than have it wrong. */ |
791 | lra_assert (DEBUG_INSN_P (insn)); |
792 | lra_invalidate_insn_data (insn); |
793 | delete_insn (insn); |
794 | continue; |
795 | } |
796 | |
797 | struct lra_static_insn_data *static_id = id->insn_static_data; |
798 | bool insn_change_p = false; |
799 | |
800 | for (i = id->insn_static_data->n_operands - 1; i >= 0; i--) |
801 | if ((DEBUG_INSN_P (insn) || ! static_id->operand[i].is_operator) |
802 | && alter_subregs (loc: id->operand_loc[i], final_p: ! DEBUG_INSN_P (insn))) |
803 | { |
804 | lra_update_dup (id, nop: i); |
805 | insn_change_p = true; |
806 | } |
807 | if ((GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) |
808 | && alter_subregs (loc: &XEXP (pat, 0), final_p: false)) |
809 | insn_change_p = true; |
810 | if (insn_change_p) |
811 | lra_update_operator_dups (id); |
812 | |
813 | if ((set = single_set (insn)) != NULL |
814 | && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set)) |
815 | && REGNO (SET_SRC (set)) == REGNO (SET_DEST (set))) |
816 | { |
817 | /* Remove an useless move insn. IRA can generate move |
818 | insns involving pseudos. It is better remove them |
819 | earlier to speed up compiler a bit. It is also |
820 | better to do it here as they might not pass final RTL |
821 | check in LRA, (e.g. insn moving a control register |
822 | into itself). */ |
823 | lra_invalidate_insn_data (insn); |
824 | delete_insn (insn); |
825 | } |
826 | } |
827 | } |
828 | |