1 | /* Code for RTL transformations to satisfy insn constraints. |
2 | Copyright (C) 2010-2023 Free Software Foundation, Inc. |
3 | Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | |
22 | /* This file contains code for 3 passes: constraint pass, |
23 | inheritance/split pass, and pass for undoing failed inheritance and |
24 | split. |
25 | |
26 | The major goal of constraint pass is to transform RTL to satisfy |
27 | insn and address constraints by: |
28 | o choosing insn alternatives; |
29 | o generating *reload insns* (or reloads in brief) and *reload |
30 | pseudos* which will get necessary hard registers later; |
31 | o substituting pseudos with equivalent values and removing the |
32 | instructions that initialized those pseudos. |
33 | |
34 | The constraint pass has biggest and most complicated code in LRA. |
35 | There are a lot of important details like: |
36 | o reuse of input reload pseudos to simplify reload pseudo |
37 | allocations; |
38 | o some heuristics to choose insn alternative to improve the |
39 | inheritance; |
40 | o early clobbers etc. |
41 | |
42 | The pass is mimicking former reload pass in alternative choosing |
43 | because the reload pass is oriented to current machine description |
44 | model. It might be changed if the machine description model is |
45 | changed. |
46 | |
47 | There is special code for preventing all LRA and this pass cycling |
48 | in case of bugs. |
49 | |
50 | On the first iteration of the pass we process every instruction and |
51 | choose an alternative for each one. On subsequent iterations we try |
52 | to avoid reprocessing instructions if we can be sure that the old |
53 | choice is still valid. |
54 | |
55 | The inheritance/spilt pass is to transform code to achieve |
56 | ineheritance and live range splitting. It is done on backward |
57 | traversal of EBBs. |
58 | |
59 | The inheritance optimization goal is to reuse values in hard |
60 | registers. There is analogous optimization in old reload pass. The |
61 | inheritance is achieved by following transformation: |
62 | |
63 | reload_p1 <- p reload_p1 <- p |
64 | ... new_p <- reload_p1 |
65 | ... => ... |
66 | reload_p2 <- p reload_p2 <- new_p |
67 | |
68 | where p is spilled and not changed between the insns. Reload_p1 is |
69 | also called *original pseudo* and new_p is called *inheritance |
70 | pseudo*. |
71 | |
72 | The subsequent assignment pass will try to assign the same (or |
73 | another if it is not possible) hard register to new_p as to |
74 | reload_p1 or reload_p2. |
75 | |
76 | If the assignment pass fails to assign a hard register to new_p, |
77 | this file will undo the inheritance and restore the original code. |
78 | This is because implementing the above sequence with a spilled |
79 | new_p would make the code much worse. The inheritance is done in |
80 | EBB scope. The above is just a simplified example to get an idea |
81 | of the inheritance as the inheritance is also done for non-reload |
82 | insns. |
83 | |
84 | Splitting (transformation) is also done in EBB scope on the same |
85 | pass as the inheritance: |
86 | |
87 | r <- ... or ... <- r r <- ... or ... <- r |
88 | ... s <- r (new insn -- save) |
89 | ... => |
90 | ... r <- s (new insn -- restore) |
91 | ... <- r ... <- r |
92 | |
93 | The *split pseudo* s is assigned to the hard register of the |
94 | original pseudo or hard register r. |
95 | |
96 | Splitting is done: |
97 | o In EBBs with high register pressure for global pseudos (living |
98 | in at least 2 BBs) and assigned to hard registers when there |
99 | are more one reloads needing the hard registers; |
100 | o for pseudos needing save/restore code around calls. |
101 | |
102 | If the split pseudo still has the same hard register as the |
103 | original pseudo after the subsequent assignment pass or the |
104 | original pseudo was split, the opposite transformation is done on |
105 | the same pass for undoing inheritance. */ |
106 | |
107 | #undef REG_OK_STRICT |
108 | |
109 | #include "config.h" |
110 | #include "system.h" |
111 | #include "coretypes.h" |
112 | #include "backend.h" |
113 | #include "hooks.h" |
114 | #include "target.h" |
115 | #include "rtl.h" |
116 | #include "tree.h" |
117 | #include "predict.h" |
118 | #include "df.h" |
119 | #include "memmodel.h" |
120 | #include "tm_p.h" |
121 | #include "expmed.h" |
122 | #include "optabs.h" |
123 | #include "regs.h" |
124 | #include "ira.h" |
125 | #include "recog.h" |
126 | #include "output.h" |
127 | #include "addresses.h" |
128 | #include "expr.h" |
129 | #include "cfgrtl.h" |
130 | #include "rtl-error.h" |
131 | #include "lra.h" |
132 | #include "lra-int.h" |
133 | #include "print-rtl.h" |
134 | #include "function-abi.h" |
135 | #include "rtl-iter.h" |
136 | |
137 | /* Value of LRA_CURR_RELOAD_NUM at the beginning of BB of the current |
138 | insn. Remember that LRA_CURR_RELOAD_NUM is the number of emitted |
139 | reload insns. */ |
140 | static int bb_reload_num; |
141 | |
142 | /* The current insn being processed and corresponding its single set |
143 | (NULL otherwise), its data (basic block, the insn data, the insn |
144 | static data, and the mode of each operand). */ |
145 | static rtx_insn *curr_insn; |
146 | static rtx curr_insn_set; |
147 | static basic_block curr_bb; |
148 | static lra_insn_recog_data_t curr_id; |
149 | static struct lra_static_insn_data *curr_static_id; |
150 | static machine_mode curr_operand_mode[MAX_RECOG_OPERANDS]; |
151 | /* Mode of the register substituted by its equivalence with VOIDmode |
152 | (e.g. constant) and whose subreg is given operand of the current |
153 | insn. VOIDmode in all other cases. */ |
154 | static machine_mode original_subreg_reg_mode[MAX_RECOG_OPERANDS]; |
155 | |
156 | |
157 | |
158 | /* Start numbers for new registers and insns at the current constraints |
159 | pass start. */ |
160 | static int new_regno_start; |
161 | static int new_insn_uid_start; |
162 | |
163 | /* If LOC is nonnull, strip any outer subreg from it. */ |
164 | static inline rtx * |
165 | strip_subreg (rtx *loc) |
166 | { |
167 | return loc && GET_CODE (*loc) == SUBREG ? &SUBREG_REG (*loc) : loc; |
168 | } |
169 | |
170 | /* Return hard regno of REGNO or if it is was not assigned to a hard |
171 | register, use a hard register from its allocno class. */ |
172 | static int |
173 | get_try_hard_regno (int regno) |
174 | { |
175 | int hard_regno; |
176 | enum reg_class rclass; |
177 | |
178 | if ((hard_regno = regno) >= FIRST_PSEUDO_REGISTER) |
179 | hard_regno = lra_get_regno_hard_regno (regno); |
180 | if (hard_regno >= 0) |
181 | return hard_regno; |
182 | rclass = lra_get_allocno_class (regno); |
183 | if (rclass == NO_REGS) |
184 | return -1; |
185 | return ira_class_hard_regs[rclass][0]; |
186 | } |
187 | |
188 | /* Return the hard regno of X after removing its subreg. If X is not a |
189 | register or a subreg of a register, return -1. If X is a pseudo, use its |
190 | assignment. If X is a hard regno, return the final hard regno which will be |
191 | after elimination. */ |
192 | static int |
193 | get_hard_regno (rtx x) |
194 | { |
195 | rtx reg; |
196 | int hard_regno; |
197 | |
198 | reg = x; |
199 | if (SUBREG_P (x)) |
200 | reg = SUBREG_REG (x); |
201 | if (! REG_P (reg)) |
202 | return -1; |
203 | if (! HARD_REGISTER_NUM_P (hard_regno = REGNO (reg))) |
204 | hard_regno = lra_get_regno_hard_regno (regno: hard_regno); |
205 | if (hard_regno < 0) |
206 | return -1; |
207 | if (HARD_REGISTER_NUM_P (REGNO (reg))) |
208 | hard_regno = lra_get_elimination_hard_regno (hard_regno); |
209 | if (SUBREG_P (x)) |
210 | hard_regno += subreg_regno_offset (hard_regno, GET_MODE (reg), |
211 | SUBREG_BYTE (x), GET_MODE (x)); |
212 | return hard_regno; |
213 | } |
214 | |
215 | /* If REGNO is a hard register or has been allocated a hard register, |
216 | return the class of that register. If REGNO is a reload pseudo |
217 | created by the current constraints pass, return its allocno class. |
218 | Return NO_REGS otherwise. */ |
219 | static enum reg_class |
220 | get_reg_class (int regno) |
221 | { |
222 | int hard_regno; |
223 | |
224 | if (! HARD_REGISTER_NUM_P (hard_regno = regno)) |
225 | hard_regno = lra_get_regno_hard_regno (regno); |
226 | if (hard_regno >= 0) |
227 | { |
228 | hard_regno = lra_get_elimination_hard_regno (hard_regno); |
229 | return REGNO_REG_CLASS (hard_regno); |
230 | } |
231 | if (regno >= new_regno_start) |
232 | return lra_get_allocno_class (regno); |
233 | return NO_REGS; |
234 | } |
235 | |
236 | /* Return true if REG_CLASS has enough allocatable hard regs to keep value of |
237 | REG_MODE. */ |
238 | static bool |
239 | enough_allocatable_hard_regs_p (enum reg_class reg_class, |
240 | enum machine_mode reg_mode) |
241 | { |
242 | int i, j, hard_regno, class_size, nregs; |
243 | |
244 | if (hard_reg_set_subset_p (reg_class_contents[reg_class], y: lra_no_alloc_regs)) |
245 | return false; |
246 | class_size = ira_class_hard_regs_num[reg_class]; |
247 | for (i = 0; i < class_size; i++) |
248 | { |
249 | hard_regno = ira_class_hard_regs[reg_class][i]; |
250 | nregs = hard_regno_nregs (regno: hard_regno, mode: reg_mode); |
251 | if (nregs == 1) |
252 | return true; |
253 | for (j = 0; j < nregs; j++) |
254 | if (TEST_HARD_REG_BIT (set: lra_no_alloc_regs, bit: hard_regno + j) |
255 | || ! TEST_HARD_REG_BIT (reg_class_contents[reg_class], |
256 | bit: hard_regno + j)) |
257 | break; |
258 | if (j >= nregs) |
259 | return true; |
260 | } |
261 | return false; |
262 | } |
263 | |
264 | /* Return true if REG satisfies (or will satisfy) reg class constraint |
265 | CL. Use elimination first if REG is a hard register. If REG is a |
266 | reload pseudo created by this constraints pass, assume that it will |
267 | be allocated a hard register from its allocno class, but allow that |
268 | class to be narrowed to CL if it is currently a superset of CL and |
269 | if either: |
270 | |
271 | - ALLOW_ALL_RELOAD_CLASS_CHANGES_P is true or |
272 | - the instruction we're processing is not a reload move. |
273 | |
274 | If NEW_CLASS is nonnull, set *NEW_CLASS to the new allocno class of |
275 | REGNO (reg), or NO_REGS if no change in its class was needed. */ |
276 | static bool |
277 | in_class_p (rtx reg, enum reg_class cl, enum reg_class *new_class, |
278 | bool allow_all_reload_class_changes_p = false) |
279 | { |
280 | enum reg_class rclass, common_class; |
281 | machine_mode reg_mode; |
282 | rtx src; |
283 | int regno = REGNO (reg); |
284 | |
285 | if (new_class != NULL) |
286 | *new_class = NO_REGS; |
287 | if (regno < FIRST_PSEUDO_REGISTER) |
288 | { |
289 | rtx final_reg = reg; |
290 | rtx *final_loc = &final_reg; |
291 | |
292 | lra_eliminate_reg_if_possible (final_loc); |
293 | return TEST_HARD_REG_BIT (reg_class_contents[cl], REGNO (*final_loc)); |
294 | } |
295 | reg_mode = GET_MODE (reg); |
296 | rclass = get_reg_class (regno); |
297 | src = curr_insn_set != NULL ? SET_SRC (curr_insn_set) : NULL; |
298 | if (regno < new_regno_start |
299 | /* Do not allow the constraints for reload instructions to |
300 | influence the classes of new pseudos. These reloads are |
301 | typically moves that have many alternatives, and restricting |
302 | reload pseudos for one alternative may lead to situations |
303 | where other reload pseudos are no longer allocatable. */ |
304 | || (!allow_all_reload_class_changes_p |
305 | && INSN_UID (insn: curr_insn) >= new_insn_uid_start |
306 | && src != NULL |
307 | && ((REG_P (src) || MEM_P (src)) |
308 | || (GET_CODE (src) == SUBREG |
309 | && (REG_P (SUBREG_REG (src)) || MEM_P (SUBREG_REG (src))))))) |
310 | /* When we don't know what class will be used finally for reload |
311 | pseudos, we use ALL_REGS. */ |
312 | return ((regno >= new_regno_start && rclass == ALL_REGS) |
313 | || (rclass != NO_REGS && ira_class_subset_p[rclass][cl] |
314 | && ! hard_reg_set_subset_p (reg_class_contents[cl], |
315 | y: lra_no_alloc_regs))); |
316 | else |
317 | { |
318 | common_class = ira_reg_class_subset[rclass][cl]; |
319 | if (new_class != NULL) |
320 | *new_class = common_class; |
321 | return enough_allocatable_hard_regs_p (reg_class: common_class, reg_mode); |
322 | } |
323 | } |
324 | |
325 | /* Return true if REGNO satisfies a memory constraint. */ |
326 | static bool |
327 | in_mem_p (int regno) |
328 | { |
329 | return get_reg_class (regno) == NO_REGS; |
330 | } |
331 | |
332 | /* Return true if ADDR is a valid memory address for mode MODE in address |
333 | space AS, and check that each pseudo has the proper kind of hard |
334 | reg. */ |
335 | static bool |
336 | valid_address_p (machine_mode mode ATTRIBUTE_UNUSED, |
337 | rtx addr, addr_space_t as) |
338 | { |
339 | #ifdef GO_IF_LEGITIMATE_ADDRESS |
340 | lra_assert (ADDR_SPACE_GENERIC_P (as)); |
341 | GO_IF_LEGITIMATE_ADDRESS (mode, addr, win); |
342 | return false; |
343 | |
344 | win: |
345 | return true; |
346 | #else |
347 | return targetm.addr_space.legitimate_address_p (mode, addr, 0, as, |
348 | ERROR_MARK); |
349 | #endif |
350 | } |
351 | |
352 | namespace { |
353 | /* Temporarily eliminates registers in an address (for the lifetime of |
354 | the object). */ |
355 | class address_eliminator { |
356 | public: |
357 | address_eliminator (struct address_info *ad); |
358 | ~address_eliminator (); |
359 | |
360 | private: |
361 | struct address_info *m_ad; |
362 | rtx *m_base_loc; |
363 | rtx m_base_reg; |
364 | rtx *m_index_loc; |
365 | rtx m_index_reg; |
366 | }; |
367 | } |
368 | |
369 | address_eliminator::address_eliminator (struct address_info *ad) |
370 | : m_ad (ad), |
371 | m_base_loc (strip_subreg (loc: ad->base_term)), |
372 | m_base_reg (NULL_RTX), |
373 | m_index_loc (strip_subreg (loc: ad->index_term)), |
374 | m_index_reg (NULL_RTX) |
375 | { |
376 | if (m_base_loc != NULL) |
377 | { |
378 | m_base_reg = *m_base_loc; |
379 | /* If we have non-legitimate address which is decomposed not in |
380 | the way we expected, don't do elimination here. In such case |
381 | the address will be reloaded and elimination will be done in |
382 | reload insn finally. */ |
383 | if (REG_P (m_base_reg)) |
384 | lra_eliminate_reg_if_possible (m_base_loc); |
385 | if (m_ad->base_term2 != NULL) |
386 | *m_ad->base_term2 = *m_ad->base_term; |
387 | } |
388 | if (m_index_loc != NULL) |
389 | { |
390 | m_index_reg = *m_index_loc; |
391 | if (REG_P (m_index_reg)) |
392 | lra_eliminate_reg_if_possible (m_index_loc); |
393 | } |
394 | } |
395 | |
396 | address_eliminator::~address_eliminator () |
397 | { |
398 | if (m_base_loc && *m_base_loc != m_base_reg) |
399 | { |
400 | *m_base_loc = m_base_reg; |
401 | if (m_ad->base_term2 != NULL) |
402 | *m_ad->base_term2 = *m_ad->base_term; |
403 | } |
404 | if (m_index_loc && *m_index_loc != m_index_reg) |
405 | *m_index_loc = m_index_reg; |
406 | } |
407 | |
408 | /* Return true if the eliminated form of AD is a legitimate target address. |
409 | If OP is a MEM, AD is the address within OP, otherwise OP should be |
410 | ignored. CONSTRAINT is one constraint that the operand may need |
411 | to meet. */ |
412 | static bool |
413 | valid_address_p (rtx op, struct address_info *ad, |
414 | enum constraint_num constraint) |
415 | { |
416 | address_eliminator eliminator (ad); |
417 | |
418 | /* Allow a memory OP if it matches CONSTRAINT, even if CONSTRAINT is more |
419 | forgiving than "m". |
420 | Need to extract memory from op for special memory constraint, |
421 | i.e. bcst_mem_operand in i386 backend. */ |
422 | if (MEM_P (extract_mem_from_operand (op)) |
423 | && insn_extra_relaxed_memory_constraint (constraint) |
424 | && constraint_satisfied_p (x: op, c: constraint)) |
425 | return true; |
426 | |
427 | return valid_address_p (mode: ad->mode, addr: *ad->outer, as: ad->as); |
428 | } |
429 | |
430 | /* For special_memory_operand, it could be false for MEM_P (op), |
431 | i.e. bcst_mem_operand in i386 backend. |
432 | Extract and return real memory operand or op. */ |
433 | rtx |
434 | extract_mem_from_operand (rtx op) |
435 | { |
436 | for (rtx x = op;; x = XEXP (x, 0)) |
437 | { |
438 | if (MEM_P (x)) |
439 | return x; |
440 | if (GET_RTX_LENGTH (GET_CODE (x)) != 1 |
441 | || GET_RTX_FORMAT (GET_CODE (x))[0] != 'e') |
442 | break; |
443 | } |
444 | return op; |
445 | } |
446 | |
447 | /* Return true if the eliminated form of memory reference OP satisfies |
448 | extra (special) memory constraint CONSTRAINT. */ |
449 | static bool |
450 | satisfies_memory_constraint_p (rtx op, enum constraint_num constraint) |
451 | { |
452 | struct address_info ad; |
453 | rtx mem = extract_mem_from_operand (op); |
454 | if (!MEM_P (mem)) |
455 | return false; |
456 | |
457 | decompose_mem_address (&ad, mem); |
458 | address_eliminator eliminator (&ad); |
459 | return constraint_satisfied_p (x: op, c: constraint); |
460 | } |
461 | |
462 | /* Return true if the eliminated form of address AD satisfies extra |
463 | address constraint CONSTRAINT. */ |
464 | static bool |
465 | satisfies_address_constraint_p (struct address_info *ad, |
466 | enum constraint_num constraint) |
467 | { |
468 | address_eliminator eliminator (ad); |
469 | return constraint_satisfied_p (x: *ad->outer, c: constraint); |
470 | } |
471 | |
472 | /* Return true if the eliminated form of address OP satisfies extra |
473 | address constraint CONSTRAINT. */ |
474 | static bool |
475 | satisfies_address_constraint_p (rtx op, enum constraint_num constraint) |
476 | { |
477 | struct address_info ad; |
478 | |
479 | decompose_lea_address (&ad, &op); |
480 | return satisfies_address_constraint_p (ad: &ad, constraint); |
481 | } |
482 | |
483 | /* Initiate equivalences for LRA. As we keep original equivalences |
484 | before any elimination, we need to make copies otherwise any change |
485 | in insns might change the equivalences. */ |
486 | void |
487 | lra_init_equiv (void) |
488 | { |
489 | ira_expand_reg_equiv (); |
490 | for (int i = FIRST_PSEUDO_REGISTER; i < max_reg_num (); i++) |
491 | { |
492 | rtx res; |
493 | |
494 | if ((res = ira_reg_equiv[i].memory) != NULL_RTX) |
495 | ira_reg_equiv[i].memory = copy_rtx (res); |
496 | if ((res = ira_reg_equiv[i].invariant) != NULL_RTX) |
497 | ira_reg_equiv[i].invariant = copy_rtx (res); |
498 | } |
499 | } |
500 | |
501 | static rtx loc_equivalence_callback (rtx, const_rtx, void *); |
502 | |
503 | /* Update equivalence for REGNO. We need to this as the equivalence |
504 | might contain other pseudos which are changed by their |
505 | equivalences. */ |
506 | static void |
507 | update_equiv (int regno) |
508 | { |
509 | rtx x; |
510 | |
511 | if ((x = ira_reg_equiv[regno].memory) != NULL_RTX) |
512 | ira_reg_equiv[regno].memory |
513 | = simplify_replace_fn_rtx (x, NULL_RTX, fn: loc_equivalence_callback, |
514 | NULL_RTX); |
515 | if ((x = ira_reg_equiv[regno].invariant) != NULL_RTX) |
516 | ira_reg_equiv[regno].invariant |
517 | = simplify_replace_fn_rtx (x, NULL_RTX, fn: loc_equivalence_callback, |
518 | NULL_RTX); |
519 | } |
520 | |
521 | /* If we have decided to substitute X with another value, return that |
522 | value, otherwise return X. */ |
523 | static rtx |
524 | get_equiv (rtx x) |
525 | { |
526 | int regno; |
527 | rtx res; |
528 | |
529 | if (! REG_P (x) || (regno = REGNO (x)) < FIRST_PSEUDO_REGISTER |
530 | || ! ira_reg_equiv[regno].defined_p |
531 | || ! ira_reg_equiv[regno].profitable_p |
532 | || lra_get_regno_hard_regno (regno) >= 0) |
533 | return x; |
534 | if ((res = ira_reg_equiv[regno].memory) != NULL_RTX) |
535 | { |
536 | if (targetm.cannot_substitute_mem_equiv_p (res)) |
537 | return x; |
538 | return res; |
539 | } |
540 | if ((res = ira_reg_equiv[regno].constant) != NULL_RTX) |
541 | return res; |
542 | if ((res = ira_reg_equiv[regno].invariant) != NULL_RTX) |
543 | return res; |
544 | gcc_unreachable (); |
545 | } |
546 | |
547 | /* If we have decided to substitute X with the equivalent value, |
548 | return that value after elimination for INSN, otherwise return |
549 | X. */ |
550 | static rtx |
551 | get_equiv_with_elimination (rtx x, rtx_insn *insn) |
552 | { |
553 | rtx res = get_equiv (x); |
554 | |
555 | if (x == res || CONSTANT_P (res)) |
556 | return res; |
557 | return lra_eliminate_regs_1 (insn, res, GET_MODE (res), |
558 | false, false, 0, true); |
559 | } |
560 | |
561 | /* Set up curr_operand_mode. */ |
562 | static void |
563 | init_curr_operand_mode (void) |
564 | { |
565 | int nop = curr_static_id->n_operands; |
566 | for (int i = 0; i < nop; i++) |
567 | { |
568 | machine_mode mode = GET_MODE (*curr_id->operand_loc[i]); |
569 | if (mode == VOIDmode) |
570 | { |
571 | /* The .md mode for address operands is the mode of the |
572 | addressed value rather than the mode of the address itself. */ |
573 | if (curr_id->icode >= 0 && curr_static_id->operand[i].is_address) |
574 | mode = Pmode; |
575 | else |
576 | mode = curr_static_id->operand[i].mode; |
577 | } |
578 | curr_operand_mode[i] = mode; |
579 | } |
580 | } |
581 | |
582 | |
583 | |
584 | /* The page contains code to reuse input reloads. */ |
585 | |
586 | /* Structure describes input reload of the current insns. */ |
587 | struct input_reload |
588 | { |
589 | /* True for input reload of matched operands. */ |
590 | bool match_p; |
591 | /* Reloaded value. */ |
592 | rtx input; |
593 | /* Reload pseudo used. */ |
594 | rtx reg; |
595 | }; |
596 | |
597 | /* The number of elements in the following array. */ |
598 | static int curr_insn_input_reloads_num; |
599 | /* Array containing info about input reloads. It is used to find the |
600 | same input reload and reuse the reload pseudo in this case. */ |
601 | static struct input_reload curr_insn_input_reloads[LRA_MAX_INSN_RELOADS]; |
602 | |
603 | /* Initiate data concerning reuse of input reloads for the current |
604 | insn. */ |
605 | static void |
606 | init_curr_insn_input_reloads (void) |
607 | { |
608 | curr_insn_input_reloads_num = 0; |
609 | } |
610 | |
611 | /* The canonical form of an rtx inside a MEM is not necessarily the same as the |
612 | canonical form of the rtx outside the MEM. Fix this up in the case that |
613 | we're reloading an address (and therefore pulling it outside a MEM). */ |
614 | static rtx |
615 | canonicalize_reload_addr (rtx addr) |
616 | { |
617 | subrtx_var_iterator::array_type array; |
618 | FOR_EACH_SUBRTX_VAR (iter, array, addr, NONCONST) |
619 | { |
620 | rtx x = *iter; |
621 | if (GET_CODE (x) == MULT && CONST_INT_P (XEXP (x, 1))) |
622 | { |
623 | const HOST_WIDE_INT ci = INTVAL (XEXP (x, 1)); |
624 | const int pwr2 = exact_log2 (x: ci); |
625 | if (pwr2 > 0) |
626 | { |
627 | /* Rewrite this to use a shift instead, which is canonical when |
628 | outside of a MEM. */ |
629 | PUT_CODE (x, ASHIFT); |
630 | XEXP (x, 1) = GEN_INT (pwr2); |
631 | } |
632 | } |
633 | } |
634 | |
635 | return addr; |
636 | } |
637 | |
638 | /* Create a new pseudo using MODE, RCLASS, EXCLUDE_START_HARD_REGS, ORIGINAL or |
639 | reuse an existing reload pseudo. Don't reuse an existing reload pseudo if |
640 | IN_SUBREG_P is true and the reused pseudo should be wrapped up in a SUBREG. |
641 | The result pseudo is returned through RESULT_REG. Return TRUE if we created |
642 | a new pseudo, FALSE if we reused an existing reload pseudo. Use TITLE to |
643 | describe new registers for debug purposes. */ |
644 | static bool |
645 | get_reload_reg (enum op_type type, machine_mode mode, rtx original, |
646 | enum reg_class rclass, HARD_REG_SET *exclude_start_hard_regs, |
647 | bool in_subreg_p, const char *title, rtx *result_reg) |
648 | { |
649 | int i, regno; |
650 | enum reg_class new_class; |
651 | bool unique_p = false; |
652 | |
653 | if (type == OP_OUT) |
654 | { |
655 | /* Output reload registers tend to start out with a conservative |
656 | choice of register class. Usually this is ALL_REGS, although |
657 | a target might narrow it (for performance reasons) through |
658 | targetm.preferred_reload_class. It's therefore quite common |
659 | for a reload instruction to require a more restrictive class |
660 | than the class that was originally assigned to the reload register. |
661 | |
662 | In these situations, it's more efficient to refine the choice |
663 | of register class rather than create a second reload register. |
664 | This also helps to avoid cycling for registers that are only |
665 | used by reload instructions. */ |
666 | if (REG_P (original) |
667 | && (int) REGNO (original) >= new_regno_start |
668 | && INSN_UID (insn: curr_insn) >= new_insn_uid_start |
669 | && in_class_p (reg: original, cl: rclass, new_class: &new_class, allow_all_reload_class_changes_p: true)) |
670 | { |
671 | unsigned int regno = REGNO (original); |
672 | if (lra_dump_file != NULL) |
673 | { |
674 | fprintf (stream: lra_dump_file, format: " Reuse r%d for output " , regno); |
675 | dump_value_slim (lra_dump_file, original, 1); |
676 | } |
677 | if (new_class != lra_get_allocno_class (regno)) |
678 | lra_change_class (regno, new_class, title: ", change to" , nl_p: false); |
679 | if (lra_dump_file != NULL) |
680 | fprintf (stream: lra_dump_file, format: "\n" ); |
681 | *result_reg = original; |
682 | return false; |
683 | } |
684 | *result_reg |
685 | = lra_create_new_reg_with_unique_value (mode, original, rclass, |
686 | exclude_start_hard_regs, title); |
687 | return true; |
688 | } |
689 | /* Prevent reuse value of expression with side effects, |
690 | e.g. volatile memory. */ |
691 | if (! side_effects_p (original)) |
692 | for (i = 0; i < curr_insn_input_reloads_num; i++) |
693 | { |
694 | if (! curr_insn_input_reloads[i].match_p |
695 | && rtx_equal_p (curr_insn_input_reloads[i].input, original) |
696 | && in_class_p (reg: curr_insn_input_reloads[i].reg, cl: rclass, new_class: &new_class)) |
697 | { |
698 | rtx reg = curr_insn_input_reloads[i].reg; |
699 | regno = REGNO (reg); |
700 | /* If input is equal to original and both are VOIDmode, |
701 | GET_MODE (reg) might be still different from mode. |
702 | Ensure we don't return *result_reg with wrong mode. */ |
703 | if (GET_MODE (reg) != mode) |
704 | { |
705 | if (in_subreg_p) |
706 | continue; |
707 | if (maybe_lt (a: GET_MODE_SIZE (GET_MODE (reg)), |
708 | b: GET_MODE_SIZE (mode))) |
709 | continue; |
710 | reg = lowpart_subreg (outermode: mode, op: reg, GET_MODE (reg)); |
711 | if (reg == NULL_RTX || GET_CODE (reg) != SUBREG) |
712 | continue; |
713 | } |
714 | *result_reg = reg; |
715 | if (lra_dump_file != NULL) |
716 | { |
717 | fprintf (stream: lra_dump_file, format: " Reuse r%d for reload " , regno); |
718 | dump_value_slim (lra_dump_file, original, 1); |
719 | } |
720 | if (new_class != lra_get_allocno_class (regno)) |
721 | lra_change_class (regno, new_class, title: ", change to" , nl_p: false); |
722 | if (lra_dump_file != NULL) |
723 | fprintf (stream: lra_dump_file, format: "\n" ); |
724 | return false; |
725 | } |
726 | /* If we have an input reload with a different mode, make sure it |
727 | will get a different hard reg. */ |
728 | else if (REG_P (original) |
729 | && REG_P (curr_insn_input_reloads[i].input) |
730 | && REGNO (original) == REGNO (curr_insn_input_reloads[i].input) |
731 | && (GET_MODE (original) |
732 | != GET_MODE (curr_insn_input_reloads[i].input))) |
733 | unique_p = true; |
734 | } |
735 | *result_reg = (unique_p |
736 | ? lra_create_new_reg_with_unique_value |
737 | : lra_create_new_reg) (mode, original, rclass, |
738 | exclude_start_hard_regs, title); |
739 | lra_assert (curr_insn_input_reloads_num < LRA_MAX_INSN_RELOADS); |
740 | curr_insn_input_reloads[curr_insn_input_reloads_num].input = original; |
741 | curr_insn_input_reloads[curr_insn_input_reloads_num].match_p = false; |
742 | curr_insn_input_reloads[curr_insn_input_reloads_num++].reg = *result_reg; |
743 | return true; |
744 | } |
745 | |
746 | |
747 | /* The page contains major code to choose the current insn alternative |
748 | and generate reloads for it. */ |
749 | |
750 | /* Return the offset from REGNO of the least significant register |
751 | in (reg:MODE REGNO). |
752 | |
753 | This function is used to tell whether two registers satisfy |
754 | a matching constraint. (reg:MODE1 REGNO1) matches (reg:MODE2 REGNO2) if: |
755 | |
756 | REGNO1 + lra_constraint_offset (REGNO1, MODE1) |
757 | == REGNO2 + lra_constraint_offset (REGNO2, MODE2) */ |
758 | int |
759 | lra_constraint_offset (int regno, machine_mode mode) |
760 | { |
761 | lra_assert (regno < FIRST_PSEUDO_REGISTER); |
762 | |
763 | scalar_int_mode int_mode; |
764 | if (WORDS_BIG_ENDIAN |
765 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
766 | && GET_MODE_SIZE (mode: int_mode) > UNITS_PER_WORD) |
767 | return hard_regno_nregs (regno, mode) - 1; |
768 | return 0; |
769 | } |
770 | |
771 | /* Like rtx_equal_p except that it allows a REG and a SUBREG to match |
772 | if they are the same hard reg, and has special hacks for |
773 | auto-increment and auto-decrement. This is specifically intended for |
774 | process_alt_operands to use in determining whether two operands |
775 | match. X is the operand whose number is the lower of the two. |
776 | |
777 | It is supposed that X is the output operand and Y is the input |
778 | operand. Y_HARD_REGNO is the final hard regno of register Y or |
779 | register in subreg Y as we know it now. Otherwise, it is a |
780 | negative value. */ |
781 | static bool |
782 | operands_match_p (rtx x, rtx y, int y_hard_regno) |
783 | { |
784 | int i; |
785 | RTX_CODE code = GET_CODE (x); |
786 | const char *fmt; |
787 | |
788 | if (x == y) |
789 | return true; |
790 | if ((code == REG || (code == SUBREG && REG_P (SUBREG_REG (x)))) |
791 | && (REG_P (y) || (GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y))))) |
792 | { |
793 | int j; |
794 | |
795 | i = get_hard_regno (x); |
796 | if (i < 0) |
797 | goto slow; |
798 | |
799 | if ((j = y_hard_regno) < 0) |
800 | goto slow; |
801 | |
802 | i += lra_constraint_offset (regno: i, GET_MODE (x)); |
803 | j += lra_constraint_offset (regno: j, GET_MODE (y)); |
804 | |
805 | return i == j; |
806 | } |
807 | |
808 | /* If two operands must match, because they are really a single |
809 | operand of an assembler insn, then two post-increments are invalid |
810 | because the assembler insn would increment only once. On the |
811 | other hand, a post-increment matches ordinary indexing if the |
812 | post-increment is the output operand. */ |
813 | if (code == POST_DEC || code == POST_INC || code == POST_MODIFY) |
814 | return operands_match_p (XEXP (x, 0), y, y_hard_regno); |
815 | |
816 | /* Two pre-increments are invalid because the assembler insn would |
817 | increment only once. On the other hand, a pre-increment matches |
818 | ordinary indexing if the pre-increment is the input operand. */ |
819 | if (GET_CODE (y) == PRE_DEC || GET_CODE (y) == PRE_INC |
820 | || GET_CODE (y) == PRE_MODIFY) |
821 | return operands_match_p (x, XEXP (y, 0), y_hard_regno: -1); |
822 | |
823 | slow: |
824 | |
825 | if (code == REG && REG_P (y)) |
826 | return REGNO (x) == REGNO (y); |
827 | |
828 | if (code == REG && GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y)) |
829 | && x == SUBREG_REG (y)) |
830 | return true; |
831 | if (GET_CODE (y) == REG && code == SUBREG && REG_P (SUBREG_REG (x)) |
832 | && SUBREG_REG (x) == y) |
833 | return true; |
834 | |
835 | /* Now we have disposed of all the cases in which different rtx |
836 | codes can match. */ |
837 | if (code != GET_CODE (y)) |
838 | return false; |
839 | |
840 | /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ |
841 | if (GET_MODE (x) != GET_MODE (y)) |
842 | return false; |
843 | |
844 | switch (code) |
845 | { |
846 | CASE_CONST_UNIQUE: |
847 | return false; |
848 | |
849 | case CONST_VECTOR: |
850 | if (!same_vector_encodings_p (x, y)) |
851 | return false; |
852 | break; |
853 | |
854 | case LABEL_REF: |
855 | return label_ref_label (ref: x) == label_ref_label (ref: y); |
856 | case SYMBOL_REF: |
857 | return XSTR (x, 0) == XSTR (y, 0); |
858 | |
859 | default: |
860 | break; |
861 | } |
862 | |
863 | /* Compare the elements. If any pair of corresponding elements fail |
864 | to match, return false for the whole things. */ |
865 | |
866 | fmt = GET_RTX_FORMAT (code); |
867 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
868 | { |
869 | int val, j; |
870 | switch (fmt[i]) |
871 | { |
872 | case 'w': |
873 | if (XWINT (x, i) != XWINT (y, i)) |
874 | return false; |
875 | break; |
876 | |
877 | case 'i': |
878 | if (XINT (x, i) != XINT (y, i)) |
879 | return false; |
880 | break; |
881 | |
882 | case 'p': |
883 | if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y))) |
884 | return false; |
885 | break; |
886 | |
887 | case 'e': |
888 | val = operands_match_p (XEXP (x, i), XEXP (y, i), y_hard_regno: -1); |
889 | if (val == 0) |
890 | return false; |
891 | break; |
892 | |
893 | case '0': |
894 | break; |
895 | |
896 | case 'E': |
897 | if (XVECLEN (x, i) != XVECLEN (y, i)) |
898 | return false; |
899 | for (j = XVECLEN (x, i) - 1; j >= 0; --j) |
900 | { |
901 | val = operands_match_p (XVECEXP (x, i, j), XVECEXP (y, i, j), y_hard_regno: -1); |
902 | if (val == 0) |
903 | return false; |
904 | } |
905 | break; |
906 | |
907 | /* It is believed that rtx's at this level will never |
908 | contain anything but integers and other rtx's, except for |
909 | within LABEL_REFs and SYMBOL_REFs. */ |
910 | default: |
911 | gcc_unreachable (); |
912 | } |
913 | } |
914 | return true; |
915 | } |
916 | |
917 | /* True if X is a constant that can be forced into the constant pool. |
918 | MODE is the mode of the operand, or VOIDmode if not known. */ |
919 | #define CONST_POOL_OK_P(MODE, X) \ |
920 | ((MODE) != VOIDmode \ |
921 | && CONSTANT_P (X) \ |
922 | && GET_CODE (X) != HIGH \ |
923 | && GET_MODE_SIZE (MODE).is_constant () \ |
924 | && !targetm.cannot_force_const_mem (MODE, X)) |
925 | |
926 | /* True if C is a non-empty register class that has too few registers |
927 | to be safely used as a reload target class. */ |
928 | #define SMALL_REGISTER_CLASS_P(C) \ |
929 | (ira_class_hard_regs_num [(C)] == 1 \ |
930 | || (ira_class_hard_regs_num [(C)] >= 1 \ |
931 | && targetm.class_likely_spilled_p (C))) |
932 | |
933 | /* If REG is a reload pseudo, try to make its class satisfying CL. */ |
934 | static void |
935 | narrow_reload_pseudo_class (rtx reg, enum reg_class cl) |
936 | { |
937 | enum reg_class rclass; |
938 | |
939 | /* Do not make more accurate class from reloads generated. They are |
940 | mostly moves with a lot of constraints. Making more accurate |
941 | class may results in very narrow class and impossibility of find |
942 | registers for several reloads of one insn. */ |
943 | if (INSN_UID (insn: curr_insn) >= new_insn_uid_start) |
944 | return; |
945 | if (GET_CODE (reg) == SUBREG) |
946 | reg = SUBREG_REG (reg); |
947 | if (! REG_P (reg) || (int) REGNO (reg) < new_regno_start) |
948 | return; |
949 | if (in_class_p (reg, cl, new_class: &rclass) && rclass != cl) |
950 | lra_change_class (REGNO (reg), new_class: rclass, title: " Change to" , nl_p: true); |
951 | } |
952 | |
953 | /* Searches X for any reference to a reg with the same value as REGNO, |
954 | returning the rtx of the reference found if any. Otherwise, |
955 | returns NULL_RTX. */ |
956 | static rtx |
957 | regno_val_use_in (unsigned int regno, rtx x) |
958 | { |
959 | const char *fmt; |
960 | int i, j; |
961 | rtx tem; |
962 | |
963 | if (REG_P (x) && lra_reg_info[REGNO (x)].val == lra_reg_info[regno].val) |
964 | return x; |
965 | |
966 | fmt = GET_RTX_FORMAT (GET_CODE (x)); |
967 | for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
968 | { |
969 | if (fmt[i] == 'e') |
970 | { |
971 | if ((tem = regno_val_use_in (regno, XEXP (x, i)))) |
972 | return tem; |
973 | } |
974 | else if (fmt[i] == 'E') |
975 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
976 | if ((tem = regno_val_use_in (regno , XVECEXP (x, i, j)))) |
977 | return tem; |
978 | } |
979 | |
980 | return NULL_RTX; |
981 | } |
982 | |
983 | /* Return true if all current insn non-output operands except INS (it |
984 | has a negaitve end marker) do not use pseudos with the same value |
985 | as REGNO. */ |
986 | static bool |
987 | check_conflict_input_operands (int regno, signed char *ins) |
988 | { |
989 | int in; |
990 | int n_operands = curr_static_id->n_operands; |
991 | |
992 | for (int nop = 0; nop < n_operands; nop++) |
993 | if (! curr_static_id->operand[nop].is_operator |
994 | && curr_static_id->operand[nop].type != OP_OUT) |
995 | { |
996 | for (int i = 0; (in = ins[i]) >= 0; i++) |
997 | if (in == nop) |
998 | break; |
999 | if (in < 0 |
1000 | && regno_val_use_in (regno, x: *curr_id->operand_loc[nop]) != NULL_RTX) |
1001 | return false; |
1002 | } |
1003 | return true; |
1004 | } |
1005 | |
1006 | /* Generate reloads for matching OUT and INS (array of input operand numbers |
1007 | with end marker -1) with reg class GOAL_CLASS and EXCLUDE_START_HARD_REGS, |
1008 | considering output operands OUTS (similar array to INS) needing to be in |
1009 | different registers. Add input and output reloads correspondingly to the |
1010 | lists *BEFORE and *AFTER. OUT might be negative. In this case we generate |
1011 | input reloads for matched input operands INS. EARLY_CLOBBER_P is a flag |
1012 | that the output operand is early clobbered for chosen alternative. */ |
1013 | static void |
1014 | match_reload (signed char out, signed char *ins, signed char *outs, |
1015 | enum reg_class goal_class, HARD_REG_SET *exclude_start_hard_regs, |
1016 | rtx_insn **before, rtx_insn **after, bool early_clobber_p) |
1017 | { |
1018 | bool out_conflict; |
1019 | int i, in; |
1020 | rtx new_in_reg, new_out_reg, reg; |
1021 | machine_mode inmode, outmode; |
1022 | rtx in_rtx = *curr_id->operand_loc[ins[0]]; |
1023 | rtx out_rtx = out < 0 ? in_rtx : *curr_id->operand_loc[out]; |
1024 | |
1025 | inmode = curr_operand_mode[ins[0]]; |
1026 | outmode = out < 0 ? inmode : curr_operand_mode[out]; |
1027 | push_to_sequence (*before); |
1028 | if (inmode != outmode) |
1029 | { |
1030 | /* process_alt_operands has already checked that the mode sizes |
1031 | are ordered. */ |
1032 | if (partial_subreg_p (outermode: outmode, innermode: inmode)) |
1033 | { |
1034 | bool asm_p = asm_noperands (PATTERN (insn: curr_insn)) >= 0; |
1035 | int hr; |
1036 | HARD_REG_SET temp_hard_reg_set; |
1037 | |
1038 | if (asm_p && (hr = get_hard_regno (x: out_rtx)) >= 0 |
1039 | && hard_regno_nregs (regno: hr, mode: inmode) > 1) |
1040 | { |
1041 | /* See gcc.c-torture/execute/20030222-1.c. |
1042 | Consider the code for 32-bit (e.g. BE) target: |
1043 | int i, v; long x; x = v; asm ("" : "=r" (i) : "0" (x)); |
1044 | We generate the following RTL with reload insns: |
1045 | 1. subreg:si(x:di, 0) = 0; |
1046 | 2. subreg:si(x:di, 4) = v:si; |
1047 | 3. t:di = x:di, dead x; |
1048 | 4. asm ("" : "=r" (subreg:si(t:di,4)) : "0" (t:di)) |
1049 | 5. i:si = subreg:si(t:di,4); |
1050 | If we assign hard reg of x to t, dead code elimination |
1051 | will remove insn #2 and we will use unitialized hard reg. |
1052 | So exclude the hard reg of x for t. We could ignore this |
1053 | problem for non-empty asm using all x value but it is hard to |
1054 | check that the asm are expanded into insn realy using x |
1055 | and setting r. */ |
1056 | CLEAR_HARD_REG_SET (set&: temp_hard_reg_set); |
1057 | if (exclude_start_hard_regs != NULL) |
1058 | temp_hard_reg_set = *exclude_start_hard_regs; |
1059 | SET_HARD_REG_BIT (set&: temp_hard_reg_set, bit: hr); |
1060 | exclude_start_hard_regs = &temp_hard_reg_set; |
1061 | } |
1062 | reg = new_in_reg |
1063 | = lra_create_new_reg_with_unique_value (inmode, in_rtx, goal_class, |
1064 | exclude_start_hard_regs, |
1065 | "" ); |
1066 | new_out_reg = gen_lowpart_SUBREG (outmode, reg); |
1067 | LRA_SUBREG_P (new_out_reg) = 1; |
1068 | /* If the input reg is dying here, we can use the same hard |
1069 | register for REG and IN_RTX. We do it only for original |
1070 | pseudos as reload pseudos can die although original |
1071 | pseudos still live where reload pseudos dies. */ |
1072 | if (REG_P (in_rtx) && (int) REGNO (in_rtx) < lra_new_regno_start |
1073 | && find_regno_note (curr_insn, REG_DEAD, REGNO (in_rtx)) |
1074 | && (!early_clobber_p |
1075 | || check_conflict_input_operands(REGNO (in_rtx), ins))) |
1076 | lra_assign_reg_val (REGNO (in_rtx), REGNO (reg)); |
1077 | } |
1078 | else |
1079 | { |
1080 | reg = new_out_reg |
1081 | = lra_create_new_reg_with_unique_value (outmode, out_rtx, |
1082 | goal_class, |
1083 | exclude_start_hard_regs, |
1084 | "" ); |
1085 | new_in_reg = gen_lowpart_SUBREG (inmode, reg); |
1086 | /* NEW_IN_REG is non-paradoxical subreg. We don't want |
1087 | NEW_OUT_REG living above. We add clobber clause for |
1088 | this. This is just a temporary clobber. We can remove |
1089 | it at the end of LRA work. */ |
1090 | rtx_insn *clobber = emit_clobber (new_out_reg); |
1091 | LRA_TEMP_CLOBBER_P (PATTERN (clobber)) = 1; |
1092 | LRA_SUBREG_P (new_in_reg) = 1; |
1093 | if (GET_CODE (in_rtx) == SUBREG) |
1094 | { |
1095 | rtx subreg_reg = SUBREG_REG (in_rtx); |
1096 | |
1097 | /* If SUBREG_REG is dying here and sub-registers IN_RTX |
1098 | and NEW_IN_REG are similar, we can use the same hard |
1099 | register for REG and SUBREG_REG. */ |
1100 | if (REG_P (subreg_reg) |
1101 | && (int) REGNO (subreg_reg) < lra_new_regno_start |
1102 | && GET_MODE (subreg_reg) == outmode |
1103 | && known_eq (SUBREG_BYTE (in_rtx), SUBREG_BYTE (new_in_reg)) |
1104 | && find_regno_note (curr_insn, REG_DEAD, REGNO (subreg_reg)) |
1105 | && (! early_clobber_p |
1106 | || check_conflict_input_operands (REGNO (subreg_reg), |
1107 | ins))) |
1108 | lra_assign_reg_val (REGNO (subreg_reg), REGNO (reg)); |
1109 | } |
1110 | } |
1111 | } |
1112 | else |
1113 | { |
1114 | /* Pseudos have values -- see comments for lra_reg_info. |
1115 | Different pseudos with the same value do not conflict even if |
1116 | they live in the same place. When we create a pseudo we |
1117 | assign value of original pseudo (if any) from which we |
1118 | created the new pseudo. If we create the pseudo from the |
1119 | input pseudo, the new pseudo will have no conflict with the |
1120 | input pseudo which is wrong when the input pseudo lives after |
1121 | the insn and as the new pseudo value is changed by the insn |
1122 | output. Therefore we create the new pseudo from the output |
1123 | except the case when we have single matched dying input |
1124 | pseudo. |
1125 | |
1126 | We cannot reuse the current output register because we might |
1127 | have a situation like "a <- a op b", where the constraints |
1128 | force the second input operand ("b") to match the output |
1129 | operand ("a"). "b" must then be copied into a new register |
1130 | so that it doesn't clobber the current value of "a". |
1131 | |
1132 | We cannot use the same value if the output pseudo is |
1133 | early clobbered or the input pseudo is mentioned in the |
1134 | output, e.g. as an address part in memory, because |
1135 | output reload will actually extend the pseudo liveness. |
1136 | We don't care about eliminable hard regs here as we are |
1137 | interesting only in pseudos. */ |
1138 | |
1139 | /* Matching input's register value is the same as one of the other |
1140 | output operand. Output operands in a parallel insn must be in |
1141 | different registers. */ |
1142 | out_conflict = false; |
1143 | if (REG_P (in_rtx)) |
1144 | { |
1145 | for (i = 0; outs[i] >= 0; i++) |
1146 | { |
1147 | rtx other_out_rtx = *curr_id->operand_loc[outs[i]]; |
1148 | if (outs[i] != out && REG_P (other_out_rtx) |
1149 | && (regno_val_use_in (REGNO (in_rtx), x: other_out_rtx) |
1150 | != NULL_RTX)) |
1151 | { |
1152 | out_conflict = true; |
1153 | break; |
1154 | } |
1155 | } |
1156 | } |
1157 | |
1158 | new_in_reg = new_out_reg |
1159 | = (! early_clobber_p && ins[1] < 0 && REG_P (in_rtx) |
1160 | && (int) REGNO (in_rtx) < lra_new_regno_start |
1161 | && find_regno_note (curr_insn, REG_DEAD, REGNO (in_rtx)) |
1162 | && (! early_clobber_p |
1163 | || check_conflict_input_operands (REGNO (in_rtx), ins)) |
1164 | && (out < 0 |
1165 | || regno_val_use_in (REGNO (in_rtx), x: out_rtx) == NULL_RTX) |
1166 | && !out_conflict |
1167 | ? lra_create_new_reg (inmode, in_rtx, goal_class, |
1168 | exclude_start_hard_regs, "" ) |
1169 | : lra_create_new_reg_with_unique_value (outmode, out_rtx, goal_class, |
1170 | exclude_start_hard_regs, |
1171 | "" )); |
1172 | } |
1173 | /* In operand can be got from transformations before processing insn |
1174 | constraints. One example of such transformations is subreg |
1175 | reloading (see function simplify_operand_subreg). The new |
1176 | pseudos created by the transformations might have inaccurate |
1177 | class (ALL_REGS) and we should make their classes more |
1178 | accurate. */ |
1179 | narrow_reload_pseudo_class (reg: in_rtx, cl: goal_class); |
1180 | lra_emit_move (copy_rtx (new_in_reg), in_rtx); |
1181 | *before = get_insns (); |
1182 | end_sequence (); |
1183 | /* Add the new pseudo to consider values of subsequent input reload |
1184 | pseudos. */ |
1185 | lra_assert (curr_insn_input_reloads_num < LRA_MAX_INSN_RELOADS); |
1186 | curr_insn_input_reloads[curr_insn_input_reloads_num].input = in_rtx; |
1187 | curr_insn_input_reloads[curr_insn_input_reloads_num].match_p = true; |
1188 | curr_insn_input_reloads[curr_insn_input_reloads_num++].reg = new_in_reg; |
1189 | for (i = 0; (in = ins[i]) >= 0; i++) |
1190 | if (GET_MODE (*curr_id->operand_loc[in]) == VOIDmode |
1191 | || GET_MODE (new_in_reg) == GET_MODE (*curr_id->operand_loc[in])) |
1192 | *curr_id->operand_loc[in] = new_in_reg; |
1193 | else |
1194 | { |
1195 | lra_assert |
1196 | (GET_MODE (new_out_reg) == GET_MODE (*curr_id->operand_loc[in])); |
1197 | *curr_id->operand_loc[in] = new_out_reg; |
1198 | } |
1199 | lra_update_dups (curr_id, ins); |
1200 | if (out < 0) |
1201 | return; |
1202 | /* See a comment for the input operand above. */ |
1203 | narrow_reload_pseudo_class (reg: out_rtx, cl: goal_class); |
1204 | if (find_reg_note (curr_insn, REG_UNUSED, out_rtx) == NULL_RTX) |
1205 | { |
1206 | reg = SUBREG_P (out_rtx) ? SUBREG_REG (out_rtx) : out_rtx; |
1207 | start_sequence (); |
1208 | /* If we had strict_low_part, use it also in reload to keep other |
1209 | parts unchanged but do it only for regs as strict_low_part |
1210 | has no sense for memory and probably there is no insn pattern |
1211 | to match the reload insn in memory case. */ |
1212 | if (out >= 0 && curr_static_id->operand[out].strict_low && REG_P (reg)) |
1213 | out_rtx = gen_rtx_STRICT_LOW_PART (VOIDmode, out_rtx); |
1214 | lra_emit_move (out_rtx, copy_rtx (new_out_reg)); |
1215 | emit_insn (*after); |
1216 | *after = get_insns (); |
1217 | end_sequence (); |
1218 | } |
1219 | *curr_id->operand_loc[out] = new_out_reg; |
1220 | lra_update_dup (id: curr_id, nop: out); |
1221 | } |
1222 | |
1223 | /* Return register class which is union of all reg classes in insn |
1224 | constraint alternative string starting with P. */ |
1225 | static enum reg_class |
1226 | reg_class_from_constraints (const char *p) |
1227 | { |
1228 | int c, len; |
1229 | enum reg_class op_class = NO_REGS; |
1230 | |
1231 | do |
1232 | switch ((c = *p, len = CONSTRAINT_LEN (c, p)), c) |
1233 | { |
1234 | case '#': |
1235 | case ',': |
1236 | return op_class; |
1237 | |
1238 | case 'g': |
1239 | op_class = reg_class_subunion[op_class][GENERAL_REGS]; |
1240 | break; |
1241 | |
1242 | default: |
1243 | enum constraint_num cn = lookup_constraint (p); |
1244 | enum reg_class cl = reg_class_for_constraint (c: cn); |
1245 | if (cl == NO_REGS) |
1246 | { |
1247 | if (insn_extra_address_constraint (c: cn)) |
1248 | op_class |
1249 | = (reg_class_subunion |
1250 | [op_class][base_reg_class (VOIDmode, ADDR_SPACE_GENERIC, |
1251 | outer_code: ADDRESS, index_code: SCRATCH)]); |
1252 | break; |
1253 | } |
1254 | |
1255 | op_class = reg_class_subunion[op_class][cl]; |
1256 | break; |
1257 | } |
1258 | while ((p += len), c); |
1259 | return op_class; |
1260 | } |
1261 | |
1262 | /* If OP is a register, return the class of the register as per |
1263 | get_reg_class, otherwise return NO_REGS. */ |
1264 | static inline enum reg_class |
1265 | get_op_class (rtx op) |
1266 | { |
1267 | return REG_P (op) ? get_reg_class (REGNO (op)) : NO_REGS; |
1268 | } |
1269 | |
1270 | /* Return generated insn mem_pseudo:=val if TO_P or val:=mem_pseudo |
1271 | otherwise. If modes of MEM_PSEUDO and VAL are different, use |
1272 | SUBREG for VAL to make them equal. */ |
1273 | static rtx_insn * |
1274 | emit_spill_move (bool to_p, rtx mem_pseudo, rtx val) |
1275 | { |
1276 | if (GET_MODE (mem_pseudo) != GET_MODE (val)) |
1277 | { |
1278 | /* Usually size of mem_pseudo is greater than val size but in |
1279 | rare cases it can be less as it can be defined by target |
1280 | dependent macro HARD_REGNO_CALLER_SAVE_MODE. */ |
1281 | if (! MEM_P (val)) |
1282 | { |
1283 | val = gen_lowpart_SUBREG (GET_MODE (mem_pseudo), |
1284 | GET_CODE (val) == SUBREG |
1285 | ? SUBREG_REG (val) : val); |
1286 | LRA_SUBREG_P (val) = 1; |
1287 | } |
1288 | else |
1289 | { |
1290 | mem_pseudo = gen_lowpart_SUBREG (GET_MODE (val), mem_pseudo); |
1291 | LRA_SUBREG_P (mem_pseudo) = 1; |
1292 | } |
1293 | } |
1294 | return to_p ? gen_move_insn (mem_pseudo, val) |
1295 | : gen_move_insn (val, mem_pseudo); |
1296 | } |
1297 | |
1298 | /* Process a special case insn (register move), return true if we |
1299 | don't need to process it anymore. INSN should be a single set |
1300 | insn. Set up that RTL was changed through CHANGE_P and that hook |
1301 | TARGET_SECONDARY_MEMORY_NEEDED says to use secondary memory through |
1302 | SEC_MEM_P. */ |
1303 | static bool |
1304 | check_and_process_move (bool *change_p, bool *sec_mem_p ATTRIBUTE_UNUSED) |
1305 | { |
1306 | int sregno, dregno; |
1307 | rtx dest, src, dreg, sreg, new_reg, scratch_reg; |
1308 | rtx_insn *before; |
1309 | enum reg_class dclass, sclass, secondary_class; |
1310 | secondary_reload_info sri; |
1311 | |
1312 | lra_assert (curr_insn_set != NULL_RTX); |
1313 | dreg = dest = SET_DEST (curr_insn_set); |
1314 | sreg = src = SET_SRC (curr_insn_set); |
1315 | if (GET_CODE (dest) == SUBREG) |
1316 | dreg = SUBREG_REG (dest); |
1317 | if (GET_CODE (src) == SUBREG) |
1318 | sreg = SUBREG_REG (src); |
1319 | if (! (REG_P (dreg) || MEM_P (dreg)) || ! (REG_P (sreg) || MEM_P (sreg))) |
1320 | return false; |
1321 | sclass = dclass = NO_REGS; |
1322 | if (REG_P (dreg)) |
1323 | dclass = get_reg_class (REGNO (dreg)); |
1324 | gcc_assert (dclass < LIM_REG_CLASSES && dclass >= NO_REGS); |
1325 | if (dclass == ALL_REGS) |
1326 | /* ALL_REGS is used for new pseudos created by transformations |
1327 | like reload of SUBREG_REG (see function |
1328 | simplify_operand_subreg). We don't know their class yet. We |
1329 | should figure out the class from processing the insn |
1330 | constraints not in this fast path function. Even if ALL_REGS |
1331 | were a right class for the pseudo, secondary_... hooks usually |
1332 | are not define for ALL_REGS. */ |
1333 | return false; |
1334 | if (REG_P (sreg)) |
1335 | sclass = get_reg_class (REGNO (sreg)); |
1336 | gcc_assert (sclass < LIM_REG_CLASSES && sclass >= NO_REGS); |
1337 | if (sclass == ALL_REGS) |
1338 | /* See comments above. */ |
1339 | return false; |
1340 | if (sclass == NO_REGS && dclass == NO_REGS) |
1341 | return false; |
1342 | if (targetm.secondary_memory_needed (GET_MODE (src), sclass, dclass) |
1343 | && ((sclass != NO_REGS && dclass != NO_REGS) |
1344 | || (GET_MODE (src) |
1345 | != targetm.secondary_memory_needed_mode (GET_MODE (src))))) |
1346 | { |
1347 | *sec_mem_p = true; |
1348 | return false; |
1349 | } |
1350 | if (! REG_P (dreg) || ! REG_P (sreg)) |
1351 | return false; |
1352 | sri.prev_sri = NULL; |
1353 | sri.icode = CODE_FOR_nothing; |
1354 | sri.extra_cost = 0; |
1355 | secondary_class = NO_REGS; |
1356 | /* Set up hard register for a reload pseudo for hook |
1357 | secondary_reload because some targets just ignore unassigned |
1358 | pseudos in the hook. */ |
1359 | if (dclass != NO_REGS && lra_get_regno_hard_regno (REGNO (dreg)) < 0) |
1360 | { |
1361 | dregno = REGNO (dreg); |
1362 | reg_renumber[dregno] = ira_class_hard_regs[dclass][0]; |
1363 | } |
1364 | else |
1365 | dregno = -1; |
1366 | if (sclass != NO_REGS && lra_get_regno_hard_regno (REGNO (sreg)) < 0) |
1367 | { |
1368 | sregno = REGNO (sreg); |
1369 | reg_renumber[sregno] = ira_class_hard_regs[sclass][0]; |
1370 | } |
1371 | else |
1372 | sregno = -1; |
1373 | if (sclass != NO_REGS) |
1374 | secondary_class |
1375 | = (enum reg_class) targetm.secondary_reload (false, dest, |
1376 | (reg_class_t) sclass, |
1377 | GET_MODE (src), &sri); |
1378 | if (sclass == NO_REGS |
1379 | || ((secondary_class != NO_REGS || sri.icode != CODE_FOR_nothing) |
1380 | && dclass != NO_REGS)) |
1381 | { |
1382 | enum reg_class old_sclass = secondary_class; |
1383 | secondary_reload_info old_sri = sri; |
1384 | |
1385 | sri.prev_sri = NULL; |
1386 | sri.icode = CODE_FOR_nothing; |
1387 | sri.extra_cost = 0; |
1388 | secondary_class |
1389 | = (enum reg_class) targetm.secondary_reload (true, src, |
1390 | (reg_class_t) dclass, |
1391 | GET_MODE (src), &sri); |
1392 | /* Check the target hook consistency. */ |
1393 | lra_assert |
1394 | ((secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) |
1395 | || (old_sclass == NO_REGS && old_sri.icode == CODE_FOR_nothing) |
1396 | || (secondary_class == old_sclass && sri.icode == old_sri.icode)); |
1397 | } |
1398 | if (sregno >= 0) |
1399 | reg_renumber [sregno] = -1; |
1400 | if (dregno >= 0) |
1401 | reg_renumber [dregno] = -1; |
1402 | if (secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) |
1403 | return false; |
1404 | *change_p = true; |
1405 | new_reg = NULL_RTX; |
1406 | if (secondary_class != NO_REGS) |
1407 | new_reg = lra_create_new_reg_with_unique_value (GET_MODE (src), NULL_RTX, |
1408 | secondary_class, NULL, |
1409 | "secondary" ); |
1410 | start_sequence (); |
1411 | if (sri.icode == CODE_FOR_nothing) |
1412 | lra_emit_move (new_reg, src); |
1413 | else |
1414 | { |
1415 | enum reg_class scratch_class; |
1416 | |
1417 | scratch_class = (reg_class_from_constraints |
1418 | (p: insn_data[sri.icode].operand[2].constraint)); |
1419 | scratch_reg = (lra_create_new_reg_with_unique_value |
1420 | (insn_data[sri.icode].operand[2].mode, NULL_RTX, |
1421 | scratch_class, NULL, "scratch" )); |
1422 | emit_insn (GEN_FCN (sri.icode) (new_reg != NULL_RTX ? new_reg : dest, |
1423 | src, scratch_reg)); |
1424 | } |
1425 | before = get_insns (); |
1426 | end_sequence (); |
1427 | lra_process_new_insns (curr_insn, before, NULL, "Inserting the move" ); |
1428 | if (new_reg != NULL_RTX) |
1429 | SET_SRC (curr_insn_set) = new_reg; |
1430 | else |
1431 | { |
1432 | if (lra_dump_file != NULL) |
1433 | { |
1434 | fprintf (stream: lra_dump_file, format: "Deleting move %u\n" , INSN_UID (insn: curr_insn)); |
1435 | dump_insn_slim (lra_dump_file, curr_insn); |
1436 | } |
1437 | lra_set_insn_deleted (curr_insn); |
1438 | return true; |
1439 | } |
1440 | return false; |
1441 | } |
1442 | |
1443 | /* The following data describe the result of process_alt_operands. |
1444 | The data are used in curr_insn_transform to generate reloads. */ |
1445 | |
1446 | /* The chosen reg classes which should be used for the corresponding |
1447 | operands. */ |
1448 | static enum reg_class goal_alt[MAX_RECOG_OPERANDS]; |
1449 | /* Hard registers which cannot be a start hard register for the corresponding |
1450 | operands. */ |
1451 | static HARD_REG_SET goal_alt_exclude_start_hard_regs[MAX_RECOG_OPERANDS]; |
1452 | /* True if the operand should be the same as another operand and that |
1453 | other operand does not need a reload. */ |
1454 | static bool goal_alt_match_win[MAX_RECOG_OPERANDS]; |
1455 | /* True if the operand does not need a reload. */ |
1456 | static bool goal_alt_win[MAX_RECOG_OPERANDS]; |
1457 | /* True if the operand can be offsetable memory. */ |
1458 | static bool goal_alt_offmemok[MAX_RECOG_OPERANDS]; |
1459 | /* The number of an operand to which given operand can be matched to. */ |
1460 | static int goal_alt_matches[MAX_RECOG_OPERANDS]; |
1461 | /* The number of elements in the following array. */ |
1462 | static int goal_alt_dont_inherit_ops_num; |
1463 | /* Numbers of operands whose reload pseudos should not be inherited. */ |
1464 | static int goal_alt_dont_inherit_ops[MAX_RECOG_OPERANDS]; |
1465 | /* True if we should try only this alternative for the next constraint sub-pass |
1466 | to speed up the sub-pass. */ |
1467 | static bool goal_reuse_alt_p; |
1468 | /* True if the insn commutative operands should be swapped. */ |
1469 | static bool goal_alt_swapped; |
1470 | /* The chosen insn alternative. */ |
1471 | static int goal_alt_number; |
1472 | /* True if output reload of the stack pointer should be generated. */ |
1473 | static bool goal_alt_out_sp_reload_p; |
1474 | |
1475 | /* True if the corresponding operand is the result of an equivalence |
1476 | substitution. */ |
1477 | static bool equiv_substition_p[MAX_RECOG_OPERANDS]; |
1478 | |
1479 | /* The following five variables are used to choose the best insn |
1480 | alternative. They reflect final characteristics of the best |
1481 | alternative. */ |
1482 | |
1483 | /* Number of necessary reloads and overall cost reflecting the |
1484 | previous value and other unpleasantness of the best alternative. */ |
1485 | static int best_losers, best_overall; |
1486 | /* Overall number hard registers used for reloads. For example, on |
1487 | some targets we need 2 general registers to reload DFmode and only |
1488 | one floating point register. */ |
1489 | static int best_reload_nregs; |
1490 | /* Overall number reflecting distances of previous reloading the same |
1491 | value. The distances are counted from the current BB start. It is |
1492 | used to improve inheritance chances. */ |
1493 | static int best_reload_sum; |
1494 | |
1495 | /* True if the current insn should have no correspondingly input or |
1496 | output reloads. */ |
1497 | static bool no_input_reloads_p, no_output_reloads_p; |
1498 | |
1499 | /* True if we swapped the commutative operands in the current |
1500 | insn. */ |
1501 | static int curr_swapped; |
1502 | |
1503 | /* if CHECK_ONLY_P is false, arrange for address element *LOC to be a |
1504 | register of class CL. Add any input reloads to list BEFORE. AFTER |
1505 | is nonnull if *LOC is an automodified value; handle that case by |
1506 | adding the required output reloads to list AFTER. Return true if |
1507 | the RTL was changed. |
1508 | |
1509 | if CHECK_ONLY_P is true, check that the *LOC is a correct address |
1510 | register. Return false if the address register is correct. */ |
1511 | static bool |
1512 | process_addr_reg (rtx *loc, bool check_only_p, rtx_insn **before, rtx_insn **after, |
1513 | enum reg_class cl) |
1514 | { |
1515 | int regno; |
1516 | enum reg_class rclass, new_class; |
1517 | rtx reg; |
1518 | rtx new_reg; |
1519 | machine_mode mode; |
1520 | bool subreg_p, before_p = false; |
1521 | |
1522 | subreg_p = GET_CODE (*loc) == SUBREG; |
1523 | if (subreg_p) |
1524 | { |
1525 | reg = SUBREG_REG (*loc); |
1526 | mode = GET_MODE (reg); |
1527 | |
1528 | /* For mode with size bigger than ptr_mode, there unlikely to be "mov" |
1529 | between two registers with different classes, but there normally will |
1530 | be "mov" which transfers element of vector register into the general |
1531 | register, and this normally will be a subreg which should be reloaded |
1532 | as a whole. This is particularly likely to be triggered when |
1533 | -fno-split-wide-types specified. */ |
1534 | if (!REG_P (reg) |
1535 | || in_class_p (reg, cl, new_class: &new_class) |
1536 | || known_le (GET_MODE_SIZE (mode), GET_MODE_SIZE (ptr_mode))) |
1537 | loc = &SUBREG_REG (*loc); |
1538 | } |
1539 | |
1540 | reg = *loc; |
1541 | mode = GET_MODE (reg); |
1542 | if (! REG_P (reg)) |
1543 | { |
1544 | if (check_only_p) |
1545 | return true; |
1546 | /* Always reload memory in an address even if the target supports |
1547 | such addresses. */ |
1548 | new_reg = lra_create_new_reg_with_unique_value (mode, reg, cl, NULL, |
1549 | "address" ); |
1550 | before_p = true; |
1551 | } |
1552 | else |
1553 | { |
1554 | regno = REGNO (reg); |
1555 | rclass = get_reg_class (regno); |
1556 | if (! check_only_p |
1557 | && (*loc = get_equiv_with_elimination (x: reg, insn: curr_insn)) != reg) |
1558 | { |
1559 | if (lra_dump_file != NULL) |
1560 | { |
1561 | fprintf (stream: lra_dump_file, |
1562 | format: "Changing pseudo %d in address of insn %u on equiv " , |
1563 | REGNO (reg), INSN_UID (insn: curr_insn)); |
1564 | dump_value_slim (lra_dump_file, *loc, 1); |
1565 | fprintf (stream: lra_dump_file, format: "\n" ); |
1566 | } |
1567 | *loc = copy_rtx (*loc); |
1568 | } |
1569 | if (*loc != reg || ! in_class_p (reg, cl, new_class: &new_class)) |
1570 | { |
1571 | if (check_only_p) |
1572 | return true; |
1573 | reg = *loc; |
1574 | if (get_reload_reg (type: after == NULL ? OP_IN : OP_INOUT, |
1575 | mode, original: reg, rclass: cl, NULL, |
1576 | in_subreg_p: subreg_p, title: "address" , result_reg: &new_reg)) |
1577 | before_p = true; |
1578 | } |
1579 | else if (new_class != NO_REGS && rclass != new_class) |
1580 | { |
1581 | if (check_only_p) |
1582 | return true; |
1583 | lra_change_class (regno, new_class, title: " Change to" , nl_p: true); |
1584 | return false; |
1585 | } |
1586 | else |
1587 | return false; |
1588 | } |
1589 | if (before_p) |
1590 | { |
1591 | push_to_sequence (*before); |
1592 | lra_emit_move (new_reg, reg); |
1593 | *before = get_insns (); |
1594 | end_sequence (); |
1595 | } |
1596 | *loc = new_reg; |
1597 | if (after != NULL) |
1598 | { |
1599 | start_sequence (); |
1600 | lra_emit_move (before_p ? copy_rtx (reg) : reg, new_reg); |
1601 | emit_insn (*after); |
1602 | *after = get_insns (); |
1603 | end_sequence (); |
1604 | } |
1605 | return true; |
1606 | } |
1607 | |
1608 | /* Insert move insn in simplify_operand_subreg. BEFORE returns |
1609 | the insn to be inserted before curr insn. AFTER returns the |
1610 | the insn to be inserted after curr insn. ORIGREG and NEWREG |
1611 | are the original reg and new reg for reload. */ |
1612 | static void |
1613 | insert_move_for_subreg (rtx_insn **before, rtx_insn **after, rtx origreg, |
1614 | rtx newreg) |
1615 | { |
1616 | if (before) |
1617 | { |
1618 | push_to_sequence (*before); |
1619 | lra_emit_move (newreg, origreg); |
1620 | *before = get_insns (); |
1621 | end_sequence (); |
1622 | } |
1623 | if (after) |
1624 | { |
1625 | start_sequence (); |
1626 | lra_emit_move (origreg, newreg); |
1627 | emit_insn (*after); |
1628 | *after = get_insns (); |
1629 | end_sequence (); |
1630 | } |
1631 | } |
1632 | |
1633 | static bool valid_address_p (machine_mode mode, rtx addr, addr_space_t as); |
1634 | static bool process_address (int, bool, rtx_insn **, rtx_insn **); |
1635 | |
1636 | /* Make reloads for subreg in operand NOP with internal subreg mode |
1637 | REG_MODE, add new reloads for further processing. Return true if |
1638 | any change was done. */ |
1639 | static bool |
1640 | simplify_operand_subreg (int nop, machine_mode reg_mode) |
1641 | { |
1642 | int hard_regno, inner_hard_regno; |
1643 | rtx_insn *before, *after; |
1644 | machine_mode mode, innermode; |
1645 | rtx reg, new_reg; |
1646 | rtx operand = *curr_id->operand_loc[nop]; |
1647 | enum reg_class regclass; |
1648 | enum op_type type; |
1649 | |
1650 | before = after = NULL; |
1651 | |
1652 | if (GET_CODE (operand) != SUBREG) |
1653 | return false; |
1654 | |
1655 | mode = GET_MODE (operand); |
1656 | reg = SUBREG_REG (operand); |
1657 | innermode = GET_MODE (reg); |
1658 | type = curr_static_id->operand[nop].type; |
1659 | if (MEM_P (reg)) |
1660 | { |
1661 | const bool addr_was_valid |
1662 | = valid_address_p (mode: innermode, XEXP (reg, 0), MEM_ADDR_SPACE (reg)); |
1663 | alter_subreg (curr_id->operand_loc[nop], false); |
1664 | rtx subst = *curr_id->operand_loc[nop]; |
1665 | lra_assert (MEM_P (subst)); |
1666 | const bool addr_is_valid = valid_address_p (GET_MODE (subst), |
1667 | XEXP (subst, 0), |
1668 | MEM_ADDR_SPACE (subst)); |
1669 | if (!addr_was_valid |
1670 | || addr_is_valid |
1671 | || ((get_constraint_type (c: lookup_constraint |
1672 | (p: curr_static_id->operand[nop].constraint)) |
1673 | != CT_SPECIAL_MEMORY) |
1674 | /* We still can reload address and if the address is |
1675 | valid, we can remove subreg without reloading its |
1676 | inner memory. */ |
1677 | && valid_address_p (GET_MODE (subst), |
1678 | addr: regno_reg_rtx |
1679 | [ira_class_hard_regs |
1680 | [base_reg_class (GET_MODE (subst), |
1681 | MEM_ADDR_SPACE (subst), |
1682 | outer_code: ADDRESS, index_code: SCRATCH)][0]], |
1683 | MEM_ADDR_SPACE (subst)))) |
1684 | { |
1685 | /* If we change the address for a paradoxical subreg of memory, the |
1686 | new address might violate the necessary alignment or the access |
1687 | might be slow; take this into consideration. We need not worry |
1688 | about accesses beyond allocated memory for paradoxical memory |
1689 | subregs as we don't substitute such equiv memory (see processing |
1690 | equivalences in function lra_constraints) and because for spilled |
1691 | pseudos we allocate stack memory enough for the biggest |
1692 | corresponding paradoxical subreg. |
1693 | |
1694 | However, do not blindly simplify a (subreg (mem ...)) for |
1695 | WORD_REGISTER_OPERATIONS targets as this may lead to loading junk |
1696 | data into a register when the inner is narrower than outer or |
1697 | missing important data from memory when the inner is wider than |
1698 | outer. This rule only applies to modes that are no wider than |
1699 | a word. |
1700 | |
1701 | If valid memory becomes invalid after subreg elimination |
1702 | and address might be different we still have to reload |
1703 | memory. |
1704 | */ |
1705 | if ((! addr_was_valid |
1706 | || addr_is_valid |
1707 | || known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (innermode))) |
1708 | && !(maybe_ne (a: GET_MODE_PRECISION (mode), |
1709 | b: GET_MODE_PRECISION (mode: innermode)) |
1710 | && known_le (GET_MODE_SIZE (mode), UNITS_PER_WORD) |
1711 | && known_le (GET_MODE_SIZE (innermode), UNITS_PER_WORD) |
1712 | && WORD_REGISTER_OPERATIONS) |
1713 | && (!(MEM_ALIGN (subst) < GET_MODE_ALIGNMENT (mode) |
1714 | && targetm.slow_unaligned_access (mode, MEM_ALIGN (subst))) |
1715 | || (MEM_ALIGN (reg) < GET_MODE_ALIGNMENT (innermode) |
1716 | && targetm.slow_unaligned_access (innermode, |
1717 | MEM_ALIGN (reg))))) |
1718 | return true; |
1719 | |
1720 | *curr_id->operand_loc[nop] = operand; |
1721 | |
1722 | /* But if the address was not valid, we cannot reload the MEM without |
1723 | reloading the address first. */ |
1724 | if (!addr_was_valid) |
1725 | process_address (nop, false, &before, &after); |
1726 | |
1727 | /* INNERMODE is fast, MODE slow. Reload the mem in INNERMODE. */ |
1728 | enum reg_class rclass |
1729 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1730 | if (get_reload_reg (type: curr_static_id->operand[nop].type, mode: innermode, |
1731 | original: reg, rclass, NULL, |
1732 | in_subreg_p: true, title: "slow/invalid mem" , result_reg: &new_reg)) |
1733 | { |
1734 | bool insert_before, insert_after; |
1735 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
1736 | |
1737 | insert_before = (type != OP_OUT |
1738 | || partial_subreg_p (outermode: mode, innermode)); |
1739 | insert_after = type != OP_IN; |
1740 | insert_move_for_subreg (before: insert_before ? &before : NULL, |
1741 | after: insert_after ? &after : NULL, |
1742 | origreg: reg, newreg: new_reg); |
1743 | } |
1744 | SUBREG_REG (operand) = new_reg; |
1745 | |
1746 | /* Convert to MODE. */ |
1747 | reg = operand; |
1748 | rclass |
1749 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1750 | if (get_reload_reg (type: curr_static_id->operand[nop].type, mode, original: reg, |
1751 | rclass, NULL, |
1752 | in_subreg_p: true, title: "slow/invalid mem" , result_reg: &new_reg)) |
1753 | { |
1754 | bool insert_before, insert_after; |
1755 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
1756 | |
1757 | insert_before = type != OP_OUT; |
1758 | insert_after = type != OP_IN; |
1759 | insert_move_for_subreg (before: insert_before ? &before : NULL, |
1760 | after: insert_after ? &after : NULL, |
1761 | origreg: reg, newreg: new_reg); |
1762 | } |
1763 | *curr_id->operand_loc[nop] = new_reg; |
1764 | lra_process_new_insns (curr_insn, before, after, |
1765 | "Inserting slow/invalid mem reload" ); |
1766 | return true; |
1767 | } |
1768 | |
1769 | /* If the address was valid and became invalid, prefer to reload |
1770 | the memory. Typical case is when the index scale should |
1771 | correspond the memory. */ |
1772 | *curr_id->operand_loc[nop] = operand; |
1773 | /* Do not return false here as the MEM_P (reg) will be processed |
1774 | later in this function. */ |
1775 | } |
1776 | else if (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER) |
1777 | { |
1778 | alter_subreg (curr_id->operand_loc[nop], false); |
1779 | return true; |
1780 | } |
1781 | else if (CONSTANT_P (reg)) |
1782 | { |
1783 | /* Try to simplify subreg of constant. It is usually result of |
1784 | equivalence substitution. */ |
1785 | if (innermode == VOIDmode |
1786 | && (innermode = original_subreg_reg_mode[nop]) == VOIDmode) |
1787 | innermode = curr_static_id->operand[nop].mode; |
1788 | if ((new_reg = simplify_subreg (outermode: mode, op: reg, innermode, |
1789 | SUBREG_BYTE (operand))) != NULL_RTX) |
1790 | { |
1791 | *curr_id->operand_loc[nop] = new_reg; |
1792 | return true; |
1793 | } |
1794 | } |
1795 | /* Put constant into memory when we have mixed modes. It generates |
1796 | a better code in most cases as it does not need a secondary |
1797 | reload memory. It also prevents LRA looping when LRA is using |
1798 | secondary reload memory again and again. */ |
1799 | if (CONSTANT_P (reg) && CONST_POOL_OK_P (reg_mode, reg) |
1800 | && SCALAR_INT_MODE_P (reg_mode) != SCALAR_INT_MODE_P (mode)) |
1801 | { |
1802 | SUBREG_REG (operand) = force_const_mem (reg_mode, reg); |
1803 | alter_subreg (curr_id->operand_loc[nop], false); |
1804 | return true; |
1805 | } |
1806 | auto fp_subreg_can_be_simplified_after_reload_p = [] (machine_mode innermode, |
1807 | poly_uint64 offset, |
1808 | machine_mode mode) { |
1809 | reload_completed = 1; |
1810 | bool res = simplify_subreg_regno (FRAME_POINTER_REGNUM, |
1811 | innermode, |
1812 | offset, mode) >= 0; |
1813 | reload_completed = 0; |
1814 | return res; |
1815 | }; |
1816 | /* Force a reload of the SUBREG_REG if this is a constant or PLUS or |
1817 | if there may be a problem accessing OPERAND in the outer |
1818 | mode. */ |
1819 | if ((REG_P (reg) |
1820 | && REGNO (reg) >= FIRST_PSEUDO_REGISTER |
1821 | && (hard_regno = lra_get_regno_hard_regno (REGNO (reg))) >= 0 |
1822 | /* Don't reload paradoxical subregs because we could be looping |
1823 | having repeatedly final regno out of hard regs range. */ |
1824 | && (hard_regno_nregs (regno: hard_regno, mode: innermode) |
1825 | >= hard_regno_nregs (regno: hard_regno, mode)) |
1826 | && simplify_subreg_regno (hard_regno, innermode, |
1827 | SUBREG_BYTE (operand), mode) < 0 |
1828 | /* Exclude reloading of frame pointer in subreg if frame pointer can not |
1829 | be simplified here only because the reload is not finished yet. */ |
1830 | && (hard_regno != FRAME_POINTER_REGNUM |
1831 | || !fp_subreg_can_be_simplified_after_reload_p (innermode, |
1832 | SUBREG_BYTE (operand), |
1833 | mode)) |
1834 | /* Don't reload subreg for matching reload. It is actually |
1835 | valid subreg in LRA. */ |
1836 | && ! LRA_SUBREG_P (operand)) |
1837 | || CONSTANT_P (reg) || GET_CODE (reg) == PLUS || MEM_P (reg)) |
1838 | { |
1839 | enum reg_class rclass; |
1840 | |
1841 | if (REG_P (reg)) |
1842 | /* There is a big probability that we will get the same class |
1843 | for the new pseudo and we will get the same insn which |
1844 | means infinite looping. So spill the new pseudo. */ |
1845 | rclass = NO_REGS; |
1846 | else |
1847 | /* The class will be defined later in curr_insn_transform. */ |
1848 | rclass |
1849 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1850 | |
1851 | if (get_reload_reg (type: curr_static_id->operand[nop].type, mode: reg_mode, original: reg, |
1852 | rclass, NULL, |
1853 | in_subreg_p: true, title: "subreg reg" , result_reg: &new_reg)) |
1854 | { |
1855 | bool insert_before, insert_after; |
1856 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
1857 | |
1858 | insert_before = (type != OP_OUT |
1859 | || read_modify_subreg_p (operand)); |
1860 | insert_after = (type != OP_IN); |
1861 | insert_move_for_subreg (before: insert_before ? &before : NULL, |
1862 | after: insert_after ? &after : NULL, |
1863 | origreg: reg, newreg: new_reg); |
1864 | } |
1865 | SUBREG_REG (operand) = new_reg; |
1866 | lra_process_new_insns (curr_insn, before, after, |
1867 | "Inserting subreg reload" ); |
1868 | return true; |
1869 | } |
1870 | /* Force a reload for a paradoxical subreg. For paradoxical subreg, |
1871 | IRA allocates hardreg to the inner pseudo reg according to its mode |
1872 | instead of the outermode, so the size of the hardreg may not be enough |
1873 | to contain the outermode operand, in that case we may need to insert |
1874 | reload for the reg. For the following two types of paradoxical subreg, |
1875 | we need to insert reload: |
1876 | 1. If the op_type is OP_IN, and the hardreg could not be paired with |
1877 | other hardreg to contain the outermode operand |
1878 | (checked by in_hard_reg_set_p), we need to insert the reload. |
1879 | 2. If the op_type is OP_OUT or OP_INOUT. |
1880 | |
1881 | Here is a paradoxical subreg example showing how the reload is generated: |
1882 | |
1883 | (insn 5 4 7 2 (set (reg:TI 106 [ __comp ]) |
1884 | (subreg:TI (reg:DI 107 [ __comp ]) 0)) {*movti_internal_rex64} |
1885 | |
1886 | In IRA, reg107 is allocated to a DImode hardreg. We use x86-64 as example |
1887 | here, if reg107 is assigned to hardreg R15, because R15 is the last |
1888 | hardreg, compiler cannot find another hardreg to pair with R15 to |
1889 | contain TImode data. So we insert a TImode reload reg180 for it. |
1890 | After reload is inserted: |
1891 | |
1892 | (insn 283 0 0 (set (subreg:DI (reg:TI 180 [orig:107 __comp ] [107]) 0) |
1893 | (reg:DI 107 [ __comp ])) -1 |
1894 | (insn 5 4 7 2 (set (reg:TI 106 [ __comp ]) |
1895 | (subreg:TI (reg:TI 180 [orig:107 __comp ] [107]) 0)) {*movti_internal_rex64} |
1896 | |
1897 | Two reload hard registers will be allocated to reg180 to save TImode data |
1898 | in LRA_assign. |
1899 | |
1900 | For LRA pseudos this should normally be handled by the biggest_mode |
1901 | mechanism. However, it's possible for new uses of an LRA pseudo |
1902 | to be introduced after we've allocated it, such as when undoing |
1903 | inheritance, and the allocated register might not then be appropriate |
1904 | for the new uses. */ |
1905 | else if (REG_P (reg) |
1906 | && REGNO (reg) >= FIRST_PSEUDO_REGISTER |
1907 | && paradoxical_subreg_p (x: operand) |
1908 | && (inner_hard_regno = lra_get_regno_hard_regno (REGNO (reg))) >= 0 |
1909 | && ((hard_regno |
1910 | = simplify_subreg_regno (inner_hard_regno, innermode, |
1911 | SUBREG_BYTE (operand), mode)) < 0 |
1912 | || ((hard_regno_nregs (regno: inner_hard_regno, mode: innermode) |
1913 | < hard_regno_nregs (regno: hard_regno, mode)) |
1914 | && (regclass = lra_get_allocno_class (REGNO (reg))) |
1915 | && (type != OP_IN |
1916 | || !in_hard_reg_set_p (reg_class_contents[regclass], |
1917 | mode, regno: hard_regno) |
1918 | || overlaps_hard_reg_set_p (regs: lra_no_alloc_regs, |
1919 | mode, regno: hard_regno))))) |
1920 | { |
1921 | /* The class will be defined later in curr_insn_transform. */ |
1922 | enum reg_class rclass |
1923 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1924 | |
1925 | if (get_reload_reg (type: curr_static_id->operand[nop].type, mode, original: reg, |
1926 | rclass, NULL, |
1927 | in_subreg_p: true, title: "paradoxical subreg" , result_reg: &new_reg)) |
1928 | { |
1929 | rtx subreg; |
1930 | bool insert_before, insert_after; |
1931 | |
1932 | PUT_MODE (x: new_reg, mode); |
1933 | subreg = gen_lowpart_SUBREG (innermode, new_reg); |
1934 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
1935 | |
1936 | insert_before = (type != OP_OUT); |
1937 | insert_after = (type != OP_IN); |
1938 | insert_move_for_subreg (before: insert_before ? &before : NULL, |
1939 | after: insert_after ? &after : NULL, |
1940 | origreg: reg, newreg: subreg); |
1941 | } |
1942 | SUBREG_REG (operand) = new_reg; |
1943 | lra_process_new_insns (curr_insn, before, after, |
1944 | "Inserting paradoxical subreg reload" ); |
1945 | return true; |
1946 | } |
1947 | return false; |
1948 | } |
1949 | |
1950 | /* Return TRUE if X refers for a hard register from SET. */ |
1951 | static bool |
1952 | uses_hard_regs_p (rtx x, HARD_REG_SET set) |
1953 | { |
1954 | int i, j, x_hard_regno; |
1955 | machine_mode mode; |
1956 | const char *fmt; |
1957 | enum rtx_code code; |
1958 | |
1959 | if (x == NULL_RTX) |
1960 | return false; |
1961 | code = GET_CODE (x); |
1962 | mode = GET_MODE (x); |
1963 | |
1964 | if (code == SUBREG) |
1965 | { |
1966 | /* For all SUBREGs we want to check whether the full multi-register |
1967 | overlaps the set. For normal SUBREGs this means 'get_hard_regno' of |
1968 | the inner register, for paradoxical SUBREGs this means the |
1969 | 'get_hard_regno' of the full SUBREG and for complete SUBREGs either is |
1970 | fine. Use the wider mode for all cases. */ |
1971 | rtx subreg = SUBREG_REG (x); |
1972 | mode = wider_subreg_mode (x); |
1973 | if (mode == GET_MODE (subreg)) |
1974 | { |
1975 | x = subreg; |
1976 | code = GET_CODE (x); |
1977 | } |
1978 | } |
1979 | |
1980 | if (REG_P (x) || SUBREG_P (x)) |
1981 | { |
1982 | x_hard_regno = get_hard_regno (x); |
1983 | return (x_hard_regno >= 0 |
1984 | && overlaps_hard_reg_set_p (regs: set, mode, regno: x_hard_regno)); |
1985 | } |
1986 | fmt = GET_RTX_FORMAT (code); |
1987 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
1988 | { |
1989 | if (fmt[i] == 'e') |
1990 | { |
1991 | if (uses_hard_regs_p (XEXP (x, i), set)) |
1992 | return true; |
1993 | } |
1994 | else if (fmt[i] == 'E') |
1995 | { |
1996 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
1997 | if (uses_hard_regs_p (XVECEXP (x, i, j), set)) |
1998 | return true; |
1999 | } |
2000 | } |
2001 | return false; |
2002 | } |
2003 | |
2004 | /* Return true if OP is a spilled pseudo. */ |
2005 | static inline bool |
2006 | spilled_pseudo_p (rtx op) |
2007 | { |
2008 | return (REG_P (op) |
2009 | && REGNO (op) >= FIRST_PSEUDO_REGISTER && in_mem_p (REGNO (op))); |
2010 | } |
2011 | |
2012 | /* Return true if X is a general constant. */ |
2013 | static inline bool |
2014 | general_constant_p (rtx x) |
2015 | { |
2016 | return CONSTANT_P (x) && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (x)); |
2017 | } |
2018 | |
2019 | static bool |
2020 | reg_in_class_p (rtx reg, enum reg_class cl) |
2021 | { |
2022 | if (cl == NO_REGS) |
2023 | return get_reg_class (REGNO (reg)) == NO_REGS; |
2024 | return in_class_p (reg, cl, NULL); |
2025 | } |
2026 | |
2027 | /* Return true if SET of RCLASS contains no hard regs which can be |
2028 | used in MODE. */ |
2029 | static bool |
2030 | prohibited_class_reg_set_mode_p (enum reg_class rclass, |
2031 | HARD_REG_SET &set, |
2032 | machine_mode mode) |
2033 | { |
2034 | HARD_REG_SET temp; |
2035 | |
2036 | lra_assert (hard_reg_set_subset_p (reg_class_contents[rclass], set)); |
2037 | temp = set & ~lra_no_alloc_regs; |
2038 | return (hard_reg_set_subset_p |
2039 | (x: temp, ira_prohibited_class_mode_regs[rclass][mode])); |
2040 | } |
2041 | |
2042 | |
2043 | /* Used to check validity info about small class input operands. It |
2044 | should be incremented at start of processing an insn |
2045 | alternative. */ |
2046 | static unsigned int curr_small_class_check = 0; |
2047 | |
2048 | /* Update number of used inputs of class OP_CLASS for operand NOP |
2049 | of alternative NALT. Return true if we have more such class operands |
2050 | than the number of available regs. */ |
2051 | static bool |
2052 | update_and_check_small_class_inputs (int nop, int nalt, |
2053 | enum reg_class op_class) |
2054 | { |
2055 | static unsigned int small_class_check[LIM_REG_CLASSES]; |
2056 | static int small_class_input_nums[LIM_REG_CLASSES]; |
2057 | |
2058 | if (SMALL_REGISTER_CLASS_P (op_class) |
2059 | /* We are interesting in classes became small because of fixing |
2060 | some hard regs, e.g. by an user through GCC options. */ |
2061 | && hard_reg_set_intersect_p (reg_class_contents[op_class], |
2062 | ira_no_alloc_regs) |
2063 | && (curr_static_id->operand[nop].type != OP_OUT |
2064 | || TEST_BIT (curr_static_id->operand[nop].early_clobber_alts, nalt))) |
2065 | { |
2066 | if (small_class_check[op_class] == curr_small_class_check) |
2067 | small_class_input_nums[op_class]++; |
2068 | else |
2069 | { |
2070 | small_class_check[op_class] = curr_small_class_check; |
2071 | small_class_input_nums[op_class] = 1; |
2072 | } |
2073 | if (small_class_input_nums[op_class] > ira_class_hard_regs_num[op_class]) |
2074 | return true; |
2075 | } |
2076 | return false; |
2077 | } |
2078 | |
2079 | /* Print operand constraints for alternative ALT_NUMBER of the current |
2080 | insn. */ |
2081 | static void |
2082 | print_curr_insn_alt (int alt_number) |
2083 | { |
2084 | for (int i = 0; i < curr_static_id->n_operands; i++) |
2085 | { |
2086 | const char *p = (curr_static_id->operand_alternative |
2087 | [alt_number * curr_static_id->n_operands + i].constraint); |
2088 | if (*p == '\0') |
2089 | continue; |
2090 | fprintf (stream: lra_dump_file, format: " (%d) " , i); |
2091 | for (; *p != '\0' && *p != ',' && *p != '#'; p++) |
2092 | fputc (c: *p, stream: lra_dump_file); |
2093 | } |
2094 | } |
2095 | |
2096 | /* Major function to choose the current insn alternative and what |
2097 | operands should be reloaded and how. If ONLY_ALTERNATIVE is not |
2098 | negative we should consider only this alternative. Return false if |
2099 | we cannot choose the alternative or find how to reload the |
2100 | operands. */ |
2101 | static bool |
2102 | process_alt_operands (int only_alternative) |
2103 | { |
2104 | bool ok_p = false; |
2105 | int nop, overall, nalt; |
2106 | int n_alternatives = curr_static_id->n_alternatives; |
2107 | int n_operands = curr_static_id->n_operands; |
2108 | /* LOSERS counts the operands that don't fit this alternative and |
2109 | would require loading. */ |
2110 | int losers; |
2111 | int addr_losers; |
2112 | /* REJECT is a count of how undesirable this alternative says it is |
2113 | if any reloading is required. If the alternative matches exactly |
2114 | then REJECT is ignored, but otherwise it gets this much counted |
2115 | against it in addition to the reloading needed. */ |
2116 | int reject; |
2117 | /* This is defined by '!' or '?' alternative constraint and added to |
2118 | reject. But in some cases it can be ignored. */ |
2119 | int static_reject; |
2120 | int op_reject; |
2121 | /* The number of elements in the following array. */ |
2122 | int early_clobbered_regs_num; |
2123 | /* Numbers of operands which are early clobber registers. */ |
2124 | int early_clobbered_nops[MAX_RECOG_OPERANDS]; |
2125 | enum reg_class curr_alt[MAX_RECOG_OPERANDS]; |
2126 | HARD_REG_SET curr_alt_set[MAX_RECOG_OPERANDS]; |
2127 | HARD_REG_SET curr_alt_exclude_start_hard_regs[MAX_RECOG_OPERANDS]; |
2128 | bool curr_alt_match_win[MAX_RECOG_OPERANDS]; |
2129 | bool curr_alt_win[MAX_RECOG_OPERANDS]; |
2130 | bool curr_alt_offmemok[MAX_RECOG_OPERANDS]; |
2131 | int curr_alt_matches[MAX_RECOG_OPERANDS]; |
2132 | /* The number of elements in the following array. */ |
2133 | int curr_alt_dont_inherit_ops_num; |
2134 | /* Numbers of operands whose reload pseudos should not be inherited. */ |
2135 | int curr_alt_dont_inherit_ops[MAX_RECOG_OPERANDS]; |
2136 | bool curr_reuse_alt_p; |
2137 | /* True if output stack pointer reload should be generated for the current |
2138 | alternative. */ |
2139 | bool curr_alt_out_sp_reload_p; |
2140 | rtx op; |
2141 | /* The register when the operand is a subreg of register, otherwise the |
2142 | operand itself. */ |
2143 | rtx no_subreg_reg_operand[MAX_RECOG_OPERANDS]; |
2144 | /* The register if the operand is a register or subreg of register, |
2145 | otherwise NULL. */ |
2146 | rtx operand_reg[MAX_RECOG_OPERANDS]; |
2147 | int hard_regno[MAX_RECOG_OPERANDS]; |
2148 | machine_mode biggest_mode[MAX_RECOG_OPERANDS]; |
2149 | int reload_nregs, reload_sum; |
2150 | bool costly_p; |
2151 | enum reg_class cl; |
2152 | |
2153 | /* Calculate some data common for all alternatives to speed up the |
2154 | function. */ |
2155 | for (nop = 0; nop < n_operands; nop++) |
2156 | { |
2157 | rtx reg; |
2158 | |
2159 | op = no_subreg_reg_operand[nop] = *curr_id->operand_loc[nop]; |
2160 | /* The real hard regno of the operand after the allocation. */ |
2161 | hard_regno[nop] = get_hard_regno (x: op); |
2162 | |
2163 | operand_reg[nop] = reg = op; |
2164 | biggest_mode[nop] = GET_MODE (op); |
2165 | if (GET_CODE (op) == SUBREG) |
2166 | { |
2167 | biggest_mode[nop] = wider_subreg_mode (x: op); |
2168 | operand_reg[nop] = reg = SUBREG_REG (op); |
2169 | } |
2170 | if (! REG_P (reg)) |
2171 | operand_reg[nop] = NULL_RTX; |
2172 | else if (REGNO (reg) >= FIRST_PSEUDO_REGISTER |
2173 | || ((int) REGNO (reg) |
2174 | == lra_get_elimination_hard_regno (REGNO (reg)))) |
2175 | no_subreg_reg_operand[nop] = reg; |
2176 | else |
2177 | operand_reg[nop] = no_subreg_reg_operand[nop] |
2178 | /* Just use natural mode for elimination result. It should |
2179 | be enough for extra constraints hooks. */ |
2180 | = regno_reg_rtx[hard_regno[nop]]; |
2181 | } |
2182 | |
2183 | /* The constraints are made of several alternatives. Each operand's |
2184 | constraint looks like foo,bar,... with commas separating the |
2185 | alternatives. The first alternatives for all operands go |
2186 | together, the second alternatives go together, etc. |
2187 | |
2188 | First loop over alternatives. */ |
2189 | alternative_mask preferred = curr_id->preferred_alternatives; |
2190 | if (only_alternative >= 0) |
2191 | preferred &= ALTERNATIVE_BIT (only_alternative); |
2192 | |
2193 | for (nalt = 0; nalt < n_alternatives; nalt++) |
2194 | { |
2195 | /* Loop over operands for one constraint alternative. */ |
2196 | if (!TEST_BIT (preferred, nalt)) |
2197 | continue; |
2198 | |
2199 | if (lra_dump_file != NULL) |
2200 | { |
2201 | fprintf (stream: lra_dump_file, format: " Considering alt=%d of insn %d: " , |
2202 | nalt, INSN_UID (insn: curr_insn)); |
2203 | print_curr_insn_alt (alt_number: nalt); |
2204 | fprintf (stream: lra_dump_file, format: "\n" ); |
2205 | } |
2206 | |
2207 | bool matching_early_clobber[MAX_RECOG_OPERANDS]; |
2208 | curr_small_class_check++; |
2209 | overall = losers = addr_losers = 0; |
2210 | static_reject = reject = reload_nregs = reload_sum = 0; |
2211 | for (nop = 0; nop < n_operands; nop++) |
2212 | { |
2213 | int inc = (curr_static_id |
2214 | ->operand_alternative[nalt * n_operands + nop].reject); |
2215 | if (lra_dump_file != NULL && inc != 0) |
2216 | fprintf (stream: lra_dump_file, |
2217 | format: " Staticly defined alt reject+=%d\n" , inc); |
2218 | static_reject += inc; |
2219 | matching_early_clobber[nop] = 0; |
2220 | } |
2221 | reject += static_reject; |
2222 | early_clobbered_regs_num = 0; |
2223 | curr_alt_out_sp_reload_p = false; |
2224 | curr_reuse_alt_p = true; |
2225 | |
2226 | for (nop = 0; nop < n_operands; nop++) |
2227 | { |
2228 | const char *p; |
2229 | char *end; |
2230 | int len, c, m, i, opalt_num, this_alternative_matches; |
2231 | bool win, did_match, offmemok, early_clobber_p; |
2232 | /* false => this operand can be reloaded somehow for this |
2233 | alternative. */ |
2234 | bool badop; |
2235 | /* true => this operand can be reloaded if the alternative |
2236 | allows regs. */ |
2237 | bool winreg; |
2238 | /* True if a constant forced into memory would be OK for |
2239 | this operand. */ |
2240 | bool constmemok; |
2241 | enum reg_class this_alternative, this_costly_alternative; |
2242 | HARD_REG_SET this_alternative_set, this_costly_alternative_set; |
2243 | HARD_REG_SET this_alternative_exclude_start_hard_regs; |
2244 | bool this_alternative_match_win, this_alternative_win; |
2245 | bool this_alternative_offmemok; |
2246 | bool scratch_p; |
2247 | machine_mode mode; |
2248 | enum constraint_num cn; |
2249 | |
2250 | opalt_num = nalt * n_operands + nop; |
2251 | if (curr_static_id->operand_alternative[opalt_num].anything_ok) |
2252 | { |
2253 | /* Fast track for no constraints at all. */ |
2254 | curr_alt[nop] = NO_REGS; |
2255 | CLEAR_HARD_REG_SET (set&: curr_alt_set[nop]); |
2256 | curr_alt_win[nop] = true; |
2257 | curr_alt_match_win[nop] = false; |
2258 | curr_alt_offmemok[nop] = false; |
2259 | curr_alt_matches[nop] = -1; |
2260 | continue; |
2261 | } |
2262 | |
2263 | op = no_subreg_reg_operand[nop]; |
2264 | mode = curr_operand_mode[nop]; |
2265 | |
2266 | win = did_match = winreg = offmemok = constmemok = false; |
2267 | badop = true; |
2268 | |
2269 | early_clobber_p = false; |
2270 | p = curr_static_id->operand_alternative[opalt_num].constraint; |
2271 | |
2272 | this_costly_alternative = this_alternative = NO_REGS; |
2273 | /* We update set of possible hard regs besides its class |
2274 | because reg class might be inaccurate. For example, |
2275 | union of LO_REGS (l), HI_REGS(h), and STACK_REG(k) in ARM |
2276 | is translated in HI_REGS because classes are merged by |
2277 | pairs and there is no accurate intermediate class. */ |
2278 | CLEAR_HARD_REG_SET (set&: this_alternative_set); |
2279 | CLEAR_HARD_REG_SET (set&: this_costly_alternative_set); |
2280 | CLEAR_HARD_REG_SET (set&: this_alternative_exclude_start_hard_regs); |
2281 | this_alternative_win = false; |
2282 | this_alternative_match_win = false; |
2283 | this_alternative_offmemok = false; |
2284 | this_alternative_matches = -1; |
2285 | |
2286 | /* An empty constraint should be excluded by the fast |
2287 | track. */ |
2288 | lra_assert (*p != 0 && *p != ','); |
2289 | |
2290 | op_reject = 0; |
2291 | /* Scan this alternative's specs for this operand; set WIN |
2292 | if the operand fits any letter in this alternative. |
2293 | Otherwise, clear BADOP if this operand could fit some |
2294 | letter after reloads, or set WINREG if this operand could |
2295 | fit after reloads provided the constraint allows some |
2296 | registers. */ |
2297 | costly_p = false; |
2298 | do |
2299 | { |
2300 | switch ((c = *p, len = CONSTRAINT_LEN (c, p)), c) |
2301 | { |
2302 | case '\0': |
2303 | len = 0; |
2304 | break; |
2305 | case ',': |
2306 | c = '\0'; |
2307 | break; |
2308 | |
2309 | case '&': |
2310 | early_clobber_p = true; |
2311 | break; |
2312 | |
2313 | case '$': |
2314 | op_reject += LRA_MAX_REJECT; |
2315 | break; |
2316 | case '^': |
2317 | op_reject += LRA_LOSER_COST_FACTOR; |
2318 | break; |
2319 | |
2320 | case '#': |
2321 | /* Ignore rest of this alternative. */ |
2322 | c = '\0'; |
2323 | break; |
2324 | |
2325 | case '0': case '1': case '2': case '3': case '4': |
2326 | case '5': case '6': case '7': case '8': case '9': |
2327 | { |
2328 | int m_hregno; |
2329 | bool match_p; |
2330 | |
2331 | m = strtoul (nptr: p, endptr: &end, base: 10); |
2332 | p = end; |
2333 | len = 0; |
2334 | lra_assert (nop > m); |
2335 | |
2336 | /* Reject matches if we don't know which operand is |
2337 | bigger. This situation would arguably be a bug in |
2338 | an .md pattern, but could also occur in a user asm. */ |
2339 | if (!ordered_p (a: GET_MODE_SIZE (mode: biggest_mode[m]), |
2340 | b: GET_MODE_SIZE (mode: biggest_mode[nop]))) |
2341 | break; |
2342 | |
2343 | /* Don't match wrong asm insn operands for proper |
2344 | diagnostic later. */ |
2345 | if (INSN_CODE (curr_insn) < 0 |
2346 | && (curr_operand_mode[m] == BLKmode |
2347 | || curr_operand_mode[nop] == BLKmode) |
2348 | && curr_operand_mode[m] != curr_operand_mode[nop]) |
2349 | break; |
2350 | |
2351 | m_hregno = get_hard_regno (x: *curr_id->operand_loc[m]); |
2352 | /* We are supposed to match a previous operand. |
2353 | If we do, we win if that one did. If we do |
2354 | not, count both of the operands as losers. |
2355 | (This is too conservative, since most of the |
2356 | time only a single reload insn will be needed |
2357 | to make the two operands win. As a result, |
2358 | this alternative may be rejected when it is |
2359 | actually desirable.) */ |
2360 | match_p = false; |
2361 | if (operands_match_p (x: *curr_id->operand_loc[nop], |
2362 | y: *curr_id->operand_loc[m], y_hard_regno: m_hregno)) |
2363 | { |
2364 | /* We should reject matching of an early |
2365 | clobber operand if the matching operand is |
2366 | not dying in the insn. */ |
2367 | if (!TEST_BIT (curr_static_id->operand[m] |
2368 | .early_clobber_alts, nalt) |
2369 | || operand_reg[nop] == NULL_RTX |
2370 | || (find_regno_note (curr_insn, REG_DEAD, |
2371 | REGNO (op)) |
2372 | || REGNO (op) == REGNO (operand_reg[m]))) |
2373 | match_p = true; |
2374 | } |
2375 | if (match_p) |
2376 | { |
2377 | /* If we are matching a non-offsettable |
2378 | address where an offsettable address was |
2379 | expected, then we must reject this |
2380 | combination, because we can't reload |
2381 | it. */ |
2382 | if (curr_alt_offmemok[m] |
2383 | && MEM_P (*curr_id->operand_loc[m]) |
2384 | && curr_alt[m] == NO_REGS && ! curr_alt_win[m]) |
2385 | continue; |
2386 | } |
2387 | else |
2388 | { |
2389 | /* If the operands do not match and one |
2390 | operand is INOUT, we can not match them. |
2391 | Try other possibilities, e.g. other |
2392 | alternatives or commutative operand |
2393 | exchange. */ |
2394 | if (curr_static_id->operand[nop].type == OP_INOUT |
2395 | || curr_static_id->operand[m].type == OP_INOUT) |
2396 | break; |
2397 | /* Operands don't match. If the operands are |
2398 | different user defined explicit hard |
2399 | registers, then we cannot make them match |
2400 | when one is early clobber operand. */ |
2401 | if ((REG_P (*curr_id->operand_loc[nop]) |
2402 | || SUBREG_P (*curr_id->operand_loc[nop])) |
2403 | && (REG_P (*curr_id->operand_loc[m]) |
2404 | || SUBREG_P (*curr_id->operand_loc[m]))) |
2405 | { |
2406 | rtx nop_reg = *curr_id->operand_loc[nop]; |
2407 | if (SUBREG_P (nop_reg)) |
2408 | nop_reg = SUBREG_REG (nop_reg); |
2409 | rtx m_reg = *curr_id->operand_loc[m]; |
2410 | if (SUBREG_P (m_reg)) |
2411 | m_reg = SUBREG_REG (m_reg); |
2412 | |
2413 | if (REG_P (nop_reg) |
2414 | && HARD_REGISTER_P (nop_reg) |
2415 | && REG_USERVAR_P (nop_reg) |
2416 | && REG_P (m_reg) |
2417 | && HARD_REGISTER_P (m_reg) |
2418 | && REG_USERVAR_P (m_reg)) |
2419 | { |
2420 | int i; |
2421 | |
2422 | for (i = 0; i < early_clobbered_regs_num; i++) |
2423 | if (m == early_clobbered_nops[i]) |
2424 | break; |
2425 | if (i < early_clobbered_regs_num |
2426 | || early_clobber_p) |
2427 | break; |
2428 | } |
2429 | } |
2430 | /* Both operands must allow a reload register, |
2431 | otherwise we cannot make them match. */ |
2432 | if (curr_alt[m] == NO_REGS) |
2433 | break; |
2434 | /* Retroactively mark the operand we had to |
2435 | match as a loser, if it wasn't already and |
2436 | it wasn't matched to a register constraint |
2437 | (e.g it might be matched by memory). */ |
2438 | if (curr_alt_win[m] |
2439 | && (operand_reg[m] == NULL_RTX |
2440 | || hard_regno[m] < 0)) |
2441 | { |
2442 | losers++; |
2443 | reload_nregs |
2444 | += (ira_reg_class_max_nregs[curr_alt[m]] |
2445 | [GET_MODE (*curr_id->operand_loc[m])]); |
2446 | } |
2447 | |
2448 | /* Prefer matching earlyclobber alternative as |
2449 | it results in less hard regs required for |
2450 | the insn than a non-matching earlyclobber |
2451 | alternative. */ |
2452 | if (TEST_BIT (curr_static_id->operand[m] |
2453 | .early_clobber_alts, nalt)) |
2454 | { |
2455 | if (lra_dump_file != NULL) |
2456 | fprintf |
2457 | (stream: lra_dump_file, |
2458 | format: " %d Matching earlyclobber alt:" |
2459 | " reject--\n" , |
2460 | nop); |
2461 | if (!matching_early_clobber[m]) |
2462 | { |
2463 | reject--; |
2464 | matching_early_clobber[m] = 1; |
2465 | } |
2466 | } |
2467 | /* Otherwise we prefer no matching |
2468 | alternatives because it gives more freedom |
2469 | in RA. */ |
2470 | else if (operand_reg[nop] == NULL_RTX |
2471 | || (find_regno_note (curr_insn, REG_DEAD, |
2472 | REGNO (operand_reg[nop])) |
2473 | == NULL_RTX)) |
2474 | { |
2475 | if (lra_dump_file != NULL) |
2476 | fprintf |
2477 | (stream: lra_dump_file, |
2478 | format: " %d Matching alt: reject+=2\n" , |
2479 | nop); |
2480 | reject += 2; |
2481 | } |
2482 | } |
2483 | /* If we have to reload this operand and some |
2484 | previous operand also had to match the same |
2485 | thing as this operand, we don't know how to do |
2486 | that. */ |
2487 | if (!match_p || !curr_alt_win[m]) |
2488 | { |
2489 | for (i = 0; i < nop; i++) |
2490 | if (curr_alt_matches[i] == m) |
2491 | break; |
2492 | if (i < nop) |
2493 | break; |
2494 | } |
2495 | else |
2496 | did_match = true; |
2497 | |
2498 | this_alternative_matches = m; |
2499 | /* This can be fixed with reloads if the operand |
2500 | we are supposed to match can be fixed with |
2501 | reloads. */ |
2502 | badop = false; |
2503 | this_alternative = curr_alt[m]; |
2504 | this_alternative_set = curr_alt_set[m]; |
2505 | this_alternative_exclude_start_hard_regs |
2506 | = curr_alt_exclude_start_hard_regs[m]; |
2507 | winreg = this_alternative != NO_REGS; |
2508 | break; |
2509 | } |
2510 | |
2511 | case 'g': |
2512 | if (MEM_P (op) |
2513 | || general_constant_p (x: op) |
2514 | || spilled_pseudo_p (op)) |
2515 | win = true; |
2516 | cl = GENERAL_REGS; |
2517 | goto reg; |
2518 | |
2519 | default: |
2520 | cn = lookup_constraint (p); |
2521 | switch (get_constraint_type (c: cn)) |
2522 | { |
2523 | case CT_REGISTER: |
2524 | cl = reg_class_for_constraint (c: cn); |
2525 | if (cl != NO_REGS) |
2526 | goto reg; |
2527 | break; |
2528 | |
2529 | case CT_CONST_INT: |
2530 | if (CONST_INT_P (op) |
2531 | && insn_const_int_ok_for_constraint (INTVAL (op), cn)) |
2532 | win = true; |
2533 | break; |
2534 | |
2535 | case CT_MEMORY: |
2536 | case CT_RELAXED_MEMORY: |
2537 | if (MEM_P (op) |
2538 | && satisfies_memory_constraint_p (op, constraint: cn)) |
2539 | win = true; |
2540 | else if (spilled_pseudo_p (op)) |
2541 | win = true; |
2542 | |
2543 | /* If we didn't already win, we can reload constants |
2544 | via force_const_mem or put the pseudo value into |
2545 | memory, or make other memory by reloading the |
2546 | address like for 'o'. */ |
2547 | if (CONST_POOL_OK_P (mode, op) |
2548 | || MEM_P (op) || REG_P (op) |
2549 | /* We can restore the equiv insn by a |
2550 | reload. */ |
2551 | || equiv_substition_p[nop]) |
2552 | badop = false; |
2553 | constmemok = true; |
2554 | offmemok = true; |
2555 | break; |
2556 | |
2557 | case CT_ADDRESS: |
2558 | /* An asm operand with an address constraint |
2559 | that doesn't satisfy address_operand has |
2560 | is_address cleared, so that we don't try to |
2561 | make a non-address fit. */ |
2562 | if (!curr_static_id->operand[nop].is_address) |
2563 | break; |
2564 | /* If we didn't already win, we can reload the address |
2565 | into a base register. */ |
2566 | if (satisfies_address_constraint_p (op, constraint: cn)) |
2567 | win = true; |
2568 | cl = base_reg_class (VOIDmode, ADDR_SPACE_GENERIC, |
2569 | outer_code: ADDRESS, index_code: SCRATCH); |
2570 | badop = false; |
2571 | goto reg; |
2572 | |
2573 | case CT_FIXED_FORM: |
2574 | if (constraint_satisfied_p (x: op, c: cn)) |
2575 | win = true; |
2576 | break; |
2577 | |
2578 | case CT_SPECIAL_MEMORY: |
2579 | if (satisfies_memory_constraint_p (op, constraint: cn)) |
2580 | win = true; |
2581 | else if (spilled_pseudo_p (op)) |
2582 | { |
2583 | curr_reuse_alt_p = false; |
2584 | win = true; |
2585 | } |
2586 | break; |
2587 | } |
2588 | break; |
2589 | |
2590 | reg: |
2591 | if (mode == BLKmode) |
2592 | break; |
2593 | this_alternative = reg_class_subunion[this_alternative][cl]; |
2594 | if (hard_reg_set_subset_p (x: this_alternative_set, |
2595 | reg_class_contents[cl])) |
2596 | this_alternative_exclude_start_hard_regs |
2597 | = ira_exclude_class_mode_regs[cl][mode]; |
2598 | else if (!hard_reg_set_subset_p (reg_class_contents[cl], |
2599 | y: this_alternative_set)) |
2600 | this_alternative_exclude_start_hard_regs |
2601 | |= ira_exclude_class_mode_regs[cl][mode]; |
2602 | this_alternative_set |= reg_class_contents[cl]; |
2603 | if (costly_p) |
2604 | { |
2605 | this_costly_alternative |
2606 | = reg_class_subunion[this_costly_alternative][cl]; |
2607 | this_costly_alternative_set |= reg_class_contents[cl]; |
2608 | } |
2609 | winreg = true; |
2610 | if (REG_P (op)) |
2611 | { |
2612 | tree decl; |
2613 | if (hard_regno[nop] >= 0 |
2614 | && in_hard_reg_set_p (regs: this_alternative_set, |
2615 | mode, regno: hard_regno[nop]) |
2616 | && ((REG_ATTRS (op) && (decl = REG_EXPR (op)) != NULL |
2617 | && VAR_P (decl) && DECL_HARD_REGISTER (decl)) |
2618 | || !(TEST_HARD_REG_BIT |
2619 | (set: this_alternative_exclude_start_hard_regs, |
2620 | bit: hard_regno[nop])))) |
2621 | win = true; |
2622 | else if (hard_regno[nop] < 0 |
2623 | && in_class_p (reg: op, cl: this_alternative, NULL)) |
2624 | win = true; |
2625 | } |
2626 | break; |
2627 | } |
2628 | if (c != ' ' && c != '\t') |
2629 | costly_p = c == '*'; |
2630 | } |
2631 | while ((p += len), c); |
2632 | |
2633 | scratch_p = (operand_reg[nop] != NULL_RTX |
2634 | && ira_former_scratch_p (REGNO (operand_reg[nop]))); |
2635 | /* Record which operands fit this alternative. */ |
2636 | if (win) |
2637 | { |
2638 | this_alternative_win = true; |
2639 | if (operand_reg[nop] != NULL_RTX) |
2640 | { |
2641 | if (hard_regno[nop] >= 0) |
2642 | { |
2643 | if (in_hard_reg_set_p (regs: this_costly_alternative_set, |
2644 | mode, regno: hard_regno[nop])) |
2645 | { |
2646 | if (lra_dump_file != NULL) |
2647 | fprintf (stream: lra_dump_file, |
2648 | format: " %d Costly set: reject++\n" , |
2649 | nop); |
2650 | reject++; |
2651 | } |
2652 | } |
2653 | else |
2654 | { |
2655 | /* Prefer won reg to spilled pseudo under other |
2656 | equal conditions for possibe inheritance. */ |
2657 | if (! scratch_p) |
2658 | { |
2659 | if (lra_dump_file != NULL) |
2660 | fprintf |
2661 | (stream: lra_dump_file, |
2662 | format: " %d Non pseudo reload: reject++\n" , |
2663 | nop); |
2664 | reject++; |
2665 | } |
2666 | if (in_class_p (reg: operand_reg[nop], |
2667 | cl: this_costly_alternative, NULL)) |
2668 | { |
2669 | if (lra_dump_file != NULL) |
2670 | fprintf |
2671 | (stream: lra_dump_file, |
2672 | format: " %d Non pseudo costly reload:" |
2673 | " reject++\n" , |
2674 | nop); |
2675 | reject++; |
2676 | } |
2677 | } |
2678 | /* We simulate the behavior of old reload here. |
2679 | Although scratches need hard registers and it |
2680 | might result in spilling other pseudos, no reload |
2681 | insns are generated for the scratches. So it |
2682 | might cost something but probably less than old |
2683 | reload pass believes. */ |
2684 | if (scratch_p) |
2685 | { |
2686 | if (lra_dump_file != NULL) |
2687 | fprintf (stream: lra_dump_file, |
2688 | format: " %d Scratch win: reject+=2\n" , |
2689 | nop); |
2690 | reject += 2; |
2691 | } |
2692 | } |
2693 | } |
2694 | else if (did_match) |
2695 | this_alternative_match_win = true; |
2696 | else |
2697 | { |
2698 | int const_to_mem = 0; |
2699 | bool no_regs_p; |
2700 | |
2701 | reject += op_reject; |
2702 | /* Mark output reload of the stack pointer. */ |
2703 | if (op == stack_pointer_rtx |
2704 | && curr_static_id->operand[nop].type != OP_IN) |
2705 | curr_alt_out_sp_reload_p = true; |
2706 | |
2707 | /* If this alternative asks for a specific reg class, see if there |
2708 | is at least one allocatable register in that class. */ |
2709 | no_regs_p |
2710 | = (this_alternative == NO_REGS |
2711 | || (hard_reg_set_subset_p |
2712 | (reg_class_contents[this_alternative], |
2713 | y: lra_no_alloc_regs))); |
2714 | |
2715 | /* For asms, verify that the class for this alternative is possible |
2716 | for the mode that is specified. */ |
2717 | if (!no_regs_p && INSN_CODE (curr_insn) < 0) |
2718 | { |
2719 | int i; |
2720 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
2721 | if (targetm.hard_regno_mode_ok (i, mode) |
2722 | && in_hard_reg_set_p (reg_class_contents[this_alternative], |
2723 | mode, regno: i)) |
2724 | break; |
2725 | if (i == FIRST_PSEUDO_REGISTER) |
2726 | winreg = false; |
2727 | } |
2728 | |
2729 | /* If this operand accepts a register, and if the |
2730 | register class has at least one allocatable register, |
2731 | then this operand can be reloaded. */ |
2732 | if (winreg && !no_regs_p) |
2733 | badop = false; |
2734 | |
2735 | if (badop) |
2736 | { |
2737 | if (lra_dump_file != NULL) |
2738 | fprintf (stream: lra_dump_file, |
2739 | format: " Bad operand -- refuse\n" ); |
2740 | goto fail; |
2741 | } |
2742 | |
2743 | if (this_alternative != NO_REGS) |
2744 | { |
2745 | HARD_REG_SET available_regs |
2746 | = (reg_class_contents[this_alternative] |
2747 | & ~((ira_prohibited_class_mode_regs |
2748 | [this_alternative][mode]) |
2749 | | lra_no_alloc_regs)); |
2750 | if (hard_reg_set_empty_p (x: available_regs)) |
2751 | { |
2752 | /* There are no hard regs holding a value of given |
2753 | mode. */ |
2754 | if (offmemok) |
2755 | { |
2756 | this_alternative = NO_REGS; |
2757 | if (lra_dump_file != NULL) |
2758 | fprintf (stream: lra_dump_file, |
2759 | format: " %d Using memory because of" |
2760 | " a bad mode: reject+=2\n" , |
2761 | nop); |
2762 | reject += 2; |
2763 | } |
2764 | else |
2765 | { |
2766 | if (lra_dump_file != NULL) |
2767 | fprintf (stream: lra_dump_file, |
2768 | format: " Wrong mode -- refuse\n" ); |
2769 | goto fail; |
2770 | } |
2771 | } |
2772 | } |
2773 | |
2774 | /* If not assigned pseudo has a class which a subset of |
2775 | required reg class, it is a less costly alternative |
2776 | as the pseudo still can get a hard reg of necessary |
2777 | class. */ |
2778 | if (! no_regs_p && REG_P (op) && hard_regno[nop] < 0 |
2779 | && (cl = get_reg_class (REGNO (op))) != NO_REGS |
2780 | && ira_class_subset_p[this_alternative][cl]) |
2781 | { |
2782 | if (lra_dump_file != NULL) |
2783 | fprintf |
2784 | (stream: lra_dump_file, |
2785 | format: " %d Super set class reg: reject-=3\n" , nop); |
2786 | reject -= 3; |
2787 | } |
2788 | |
2789 | this_alternative_offmemok = offmemok; |
2790 | if (this_costly_alternative != NO_REGS) |
2791 | { |
2792 | if (lra_dump_file != NULL) |
2793 | fprintf (stream: lra_dump_file, |
2794 | format: " %d Costly loser: reject++\n" , nop); |
2795 | reject++; |
2796 | } |
2797 | /* If the operand is dying, has a matching constraint, |
2798 | and satisfies constraints of the matched operand |
2799 | which failed to satisfy the own constraints, most probably |
2800 | the reload for this operand will be gone. */ |
2801 | if (this_alternative_matches >= 0 |
2802 | && !curr_alt_win[this_alternative_matches] |
2803 | && REG_P (op) |
2804 | && find_regno_note (curr_insn, REG_DEAD, REGNO (op)) |
2805 | && (hard_regno[nop] >= 0 |
2806 | ? in_hard_reg_set_p (regs: this_alternative_set, |
2807 | mode, regno: hard_regno[nop]) |
2808 | : in_class_p (reg: op, cl: this_alternative, NULL))) |
2809 | { |
2810 | if (lra_dump_file != NULL) |
2811 | fprintf |
2812 | (stream: lra_dump_file, |
2813 | format: " %d Dying matched operand reload: reject++\n" , |
2814 | nop); |
2815 | reject++; |
2816 | } |
2817 | else |
2818 | { |
2819 | /* Strict_low_part requires to reload the register |
2820 | not the sub-register. In this case we should |
2821 | check that a final reload hard reg can hold the |
2822 | value mode. */ |
2823 | if (curr_static_id->operand[nop].strict_low |
2824 | && REG_P (op) |
2825 | && hard_regno[nop] < 0 |
2826 | && GET_CODE (*curr_id->operand_loc[nop]) == SUBREG |
2827 | && ira_class_hard_regs_num[this_alternative] > 0 |
2828 | && (!targetm.hard_regno_mode_ok |
2829 | (ira_class_hard_regs[this_alternative][0], |
2830 | GET_MODE (*curr_id->operand_loc[nop])))) |
2831 | { |
2832 | if (lra_dump_file != NULL) |
2833 | fprintf |
2834 | (stream: lra_dump_file, |
2835 | format: " Strict low subreg reload -- refuse\n" ); |
2836 | goto fail; |
2837 | } |
2838 | losers++; |
2839 | } |
2840 | if (operand_reg[nop] != NULL_RTX |
2841 | /* Output operands and matched input operands are |
2842 | not inherited. The following conditions do not |
2843 | exactly describe the previous statement but they |
2844 | are pretty close. */ |
2845 | && curr_static_id->operand[nop].type != OP_OUT |
2846 | && (this_alternative_matches < 0 |
2847 | || curr_static_id->operand[nop].type != OP_IN)) |
2848 | { |
2849 | int last_reload = (lra_reg_info[ORIGINAL_REGNO |
2850 | (operand_reg[nop])] |
2851 | .last_reload); |
2852 | |
2853 | /* The value of reload_sum has sense only if we |
2854 | process insns in their order. It happens only on |
2855 | the first constraints sub-pass when we do most of |
2856 | reload work. */ |
2857 | if (lra_constraint_iter == 1 && last_reload > bb_reload_num) |
2858 | reload_sum += last_reload - bb_reload_num; |
2859 | } |
2860 | /* If this is a constant that is reloaded into the |
2861 | desired class by copying it to memory first, count |
2862 | that as another reload. This is consistent with |
2863 | other code and is required to avoid choosing another |
2864 | alternative when the constant is moved into memory. |
2865 | Note that the test here is precisely the same as in |
2866 | the code below that calls force_const_mem. */ |
2867 | if (CONST_POOL_OK_P (mode, op) |
2868 | && ((targetm.preferred_reload_class |
2869 | (op, this_alternative) == NO_REGS) |
2870 | || no_input_reloads_p)) |
2871 | { |
2872 | const_to_mem = 1; |
2873 | if (! no_regs_p) |
2874 | losers++; |
2875 | } |
2876 | |
2877 | /* Alternative loses if it requires a type of reload not |
2878 | permitted for this insn. We can always reload |
2879 | objects with a REG_UNUSED note. */ |
2880 | if ((curr_static_id->operand[nop].type != OP_IN |
2881 | && no_output_reloads_p |
2882 | && ! find_reg_note (curr_insn, REG_UNUSED, op)) |
2883 | || (curr_static_id->operand[nop].type != OP_OUT |
2884 | && no_input_reloads_p && ! const_to_mem) |
2885 | || (this_alternative_matches >= 0 |
2886 | && (no_input_reloads_p |
2887 | || (no_output_reloads_p |
2888 | && (curr_static_id->operand |
2889 | [this_alternative_matches].type != OP_IN) |
2890 | && ! find_reg_note (curr_insn, REG_UNUSED, |
2891 | no_subreg_reg_operand |
2892 | [this_alternative_matches]))))) |
2893 | { |
2894 | if (lra_dump_file != NULL) |
2895 | fprintf |
2896 | (stream: lra_dump_file, |
2897 | format: " No input/output reload -- refuse\n" ); |
2898 | goto fail; |
2899 | } |
2900 | |
2901 | /* Alternative loses if it required class pseudo cannot |
2902 | hold value of required mode. Such insns can be |
2903 | described by insn definitions with mode iterators. */ |
2904 | if (GET_MODE (*curr_id->operand_loc[nop]) != VOIDmode |
2905 | && ! hard_reg_set_empty_p (x: this_alternative_set) |
2906 | /* It is common practice for constraints to use a |
2907 | class which does not have actually enough regs to |
2908 | hold the value (e.g. x86 AREG for mode requiring |
2909 | more one general reg). Therefore we have 2 |
2910 | conditions to check that the reload pseudo cannot |
2911 | hold the mode value. */ |
2912 | && (!targetm.hard_regno_mode_ok |
2913 | (ira_class_hard_regs[this_alternative][0], |
2914 | GET_MODE (*curr_id->operand_loc[nop]))) |
2915 | /* The above condition is not enough as the first |
2916 | reg in ira_class_hard_regs can be not aligned for |
2917 | multi-words mode values. */ |
2918 | && (prohibited_class_reg_set_mode_p |
2919 | (rclass: this_alternative, set&: this_alternative_set, |
2920 | GET_MODE (*curr_id->operand_loc[nop])))) |
2921 | { |
2922 | if (lra_dump_file != NULL) |
2923 | fprintf (stream: lra_dump_file, |
2924 | format: " reload pseudo for op %d " |
2925 | "cannot hold the mode value -- refuse\n" , |
2926 | nop); |
2927 | goto fail; |
2928 | } |
2929 | |
2930 | /* Check strong discouragement of reload of non-constant |
2931 | into class THIS_ALTERNATIVE. */ |
2932 | if (! CONSTANT_P (op) && ! no_regs_p |
2933 | && (targetm.preferred_reload_class |
2934 | (op, this_alternative) == NO_REGS |
2935 | || (curr_static_id->operand[nop].type == OP_OUT |
2936 | && (targetm.preferred_output_reload_class |
2937 | (op, this_alternative) == NO_REGS)))) |
2938 | { |
2939 | if (offmemok && REG_P (op)) |
2940 | { |
2941 | if (lra_dump_file != NULL) |
2942 | fprintf |
2943 | (stream: lra_dump_file, |
2944 | format: " %d Spill pseudo into memory: reject+=3\n" , |
2945 | nop); |
2946 | reject += 3; |
2947 | } |
2948 | else |
2949 | { |
2950 | if (lra_dump_file != NULL) |
2951 | fprintf |
2952 | (stream: lra_dump_file, |
2953 | format: " %d Non-prefered reload: reject+=%d\n" , |
2954 | nop, LRA_MAX_REJECT); |
2955 | reject += LRA_MAX_REJECT; |
2956 | } |
2957 | } |
2958 | |
2959 | if (! (MEM_P (op) && offmemok) |
2960 | && ! (const_to_mem && constmemok)) |
2961 | { |
2962 | /* We prefer to reload pseudos over reloading other |
2963 | things, since such reloads may be able to be |
2964 | eliminated later. So bump REJECT in other cases. |
2965 | Don't do this in the case where we are forcing a |
2966 | constant into memory and it will then win since |
2967 | we don't want to have a different alternative |
2968 | match then. */ |
2969 | if (! (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)) |
2970 | { |
2971 | if (lra_dump_file != NULL) |
2972 | fprintf |
2973 | (stream: lra_dump_file, |
2974 | format: " %d Non-pseudo reload: reject+=2\n" , |
2975 | nop); |
2976 | reject += 2; |
2977 | } |
2978 | |
2979 | if (! no_regs_p) |
2980 | reload_nregs |
2981 | += ira_reg_class_max_nregs[this_alternative][mode]; |
2982 | |
2983 | if (SMALL_REGISTER_CLASS_P (this_alternative)) |
2984 | { |
2985 | if (lra_dump_file != NULL) |
2986 | fprintf |
2987 | (stream: lra_dump_file, |
2988 | format: " %d Small class reload: reject+=%d\n" , |
2989 | nop, LRA_LOSER_COST_FACTOR / 2); |
2990 | reject += LRA_LOSER_COST_FACTOR / 2; |
2991 | } |
2992 | } |
2993 | |
2994 | /* We are trying to spill pseudo into memory. It is |
2995 | usually more costly than moving to a hard register |
2996 | although it might takes the same number of |
2997 | reloads. |
2998 | |
2999 | Non-pseudo spill may happen also. Suppose a target allows both |
3000 | register and memory in the operand constraint alternatives, |
3001 | then it's typical that an eliminable register has a substition |
3002 | of "base + offset" which can either be reloaded by a simple |
3003 | "new_reg <= base + offset" which will match the register |
3004 | constraint, or a similar reg addition followed by further spill |
3005 | to and reload from memory which will match the memory |
3006 | constraint, but this memory spill will be much more costly |
3007 | usually. |
3008 | |
3009 | Code below increases the reject for both pseudo and non-pseudo |
3010 | spill. */ |
3011 | if (no_regs_p |
3012 | && !(MEM_P (op) && offmemok) |
3013 | && !(REG_P (op) && hard_regno[nop] < 0)) |
3014 | { |
3015 | if (lra_dump_file != NULL) |
3016 | fprintf |
3017 | (stream: lra_dump_file, |
3018 | format: " %d Spill %spseudo into memory: reject+=3\n" , |
3019 | nop, REG_P (op) ? "" : "Non-" ); |
3020 | reject += 3; |
3021 | if (VECTOR_MODE_P (mode)) |
3022 | { |
3023 | /* Spilling vectors into memory is usually more |
3024 | costly as they contain big values. */ |
3025 | if (lra_dump_file != NULL) |
3026 | fprintf |
3027 | (stream: lra_dump_file, |
3028 | format: " %d Spill vector pseudo: reject+=2\n" , |
3029 | nop); |
3030 | reject += 2; |
3031 | } |
3032 | } |
3033 | |
3034 | /* When we use an operand requiring memory in given |
3035 | alternative, the insn should write *and* read the |
3036 | value to/from memory it is costly in comparison with |
3037 | an insn alternative which does not use memory |
3038 | (e.g. register or immediate operand). We exclude |
3039 | memory operand for such case as we can satisfy the |
3040 | memory constraints by reloading address. */ |
3041 | if (no_regs_p && offmemok && !MEM_P (op)) |
3042 | { |
3043 | if (lra_dump_file != NULL) |
3044 | fprintf |
3045 | (stream: lra_dump_file, |
3046 | format: " Using memory insn operand %d: reject+=3\n" , |
3047 | nop); |
3048 | reject += 3; |
3049 | } |
3050 | |
3051 | /* If reload requires moving value through secondary |
3052 | memory, it will need one more insn at least. */ |
3053 | if (this_alternative != NO_REGS |
3054 | && REG_P (op) && (cl = get_reg_class (REGNO (op))) != NO_REGS |
3055 | && ((curr_static_id->operand[nop].type != OP_OUT |
3056 | && targetm.secondary_memory_needed (GET_MODE (op), cl, |
3057 | this_alternative)) |
3058 | || (curr_static_id->operand[nop].type != OP_IN |
3059 | && (targetm.secondary_memory_needed |
3060 | (GET_MODE (op), this_alternative, cl))))) |
3061 | losers++; |
3062 | |
3063 | if (MEM_P (op) && offmemok) |
3064 | addr_losers++; |
3065 | else |
3066 | { |
3067 | /* Input reloads can be inherited more often than |
3068 | output reloads can be removed, so penalize output |
3069 | reloads. */ |
3070 | if (!REG_P (op) || curr_static_id->operand[nop].type != OP_IN) |
3071 | { |
3072 | if (lra_dump_file != NULL) |
3073 | fprintf |
3074 | (stream: lra_dump_file, |
3075 | format: " %d Non input pseudo reload: reject++\n" , |
3076 | nop); |
3077 | reject++; |
3078 | } |
3079 | |
3080 | if (curr_static_id->operand[nop].type == OP_INOUT) |
3081 | { |
3082 | if (lra_dump_file != NULL) |
3083 | fprintf |
3084 | (stream: lra_dump_file, |
3085 | format: " %d Input/Output reload: reject+=%d\n" , |
3086 | nop, LRA_LOSER_COST_FACTOR); |
3087 | reject += LRA_LOSER_COST_FACTOR; |
3088 | } |
3089 | } |
3090 | } |
3091 | |
3092 | if (early_clobber_p && ! scratch_p) |
3093 | { |
3094 | if (lra_dump_file != NULL) |
3095 | fprintf (stream: lra_dump_file, |
3096 | format: " %d Early clobber: reject++\n" , nop); |
3097 | reject++; |
3098 | } |
3099 | /* ??? We check early clobbers after processing all operands |
3100 | (see loop below) and there we update the costs more. |
3101 | Should we update the cost (may be approximately) here |
3102 | because of early clobber register reloads or it is a rare |
3103 | or non-important thing to be worth to do it. */ |
3104 | overall = (losers * LRA_LOSER_COST_FACTOR + reject |
3105 | - (addr_losers == losers ? static_reject : 0)); |
3106 | if ((best_losers == 0 || losers != 0) && best_overall < overall) |
3107 | { |
3108 | if (lra_dump_file != NULL) |
3109 | fprintf (stream: lra_dump_file, |
3110 | format: " overall=%d,losers=%d -- refuse\n" , |
3111 | overall, losers); |
3112 | goto fail; |
3113 | } |
3114 | |
3115 | if (update_and_check_small_class_inputs (nop, nalt, |
3116 | op_class: this_alternative)) |
3117 | { |
3118 | if (lra_dump_file != NULL) |
3119 | fprintf (stream: lra_dump_file, |
3120 | format: " not enough small class regs -- refuse\n" ); |
3121 | goto fail; |
3122 | } |
3123 | curr_alt[nop] = this_alternative; |
3124 | curr_alt_set[nop] = this_alternative_set; |
3125 | curr_alt_exclude_start_hard_regs[nop] |
3126 | = this_alternative_exclude_start_hard_regs; |
3127 | curr_alt_win[nop] = this_alternative_win; |
3128 | curr_alt_match_win[nop] = this_alternative_match_win; |
3129 | curr_alt_offmemok[nop] = this_alternative_offmemok; |
3130 | curr_alt_matches[nop] = this_alternative_matches; |
3131 | |
3132 | if (this_alternative_matches >= 0 |
3133 | && !did_match && !this_alternative_win) |
3134 | curr_alt_win[this_alternative_matches] = false; |
3135 | |
3136 | if (early_clobber_p && operand_reg[nop] != NULL_RTX) |
3137 | early_clobbered_nops[early_clobbered_regs_num++] = nop; |
3138 | } |
3139 | |
3140 | if (curr_insn_set != NULL_RTX && n_operands == 2 |
3141 | /* Prevent processing non-move insns. */ |
3142 | && (GET_CODE (SET_SRC (curr_insn_set)) == SUBREG |
3143 | || SET_SRC (curr_insn_set) == no_subreg_reg_operand[1]) |
3144 | && ((! curr_alt_win[0] && ! curr_alt_win[1] |
3145 | && REG_P (no_subreg_reg_operand[0]) |
3146 | && REG_P (no_subreg_reg_operand[1]) |
3147 | && (reg_in_class_p (reg: no_subreg_reg_operand[0], cl: curr_alt[1]) |
3148 | || reg_in_class_p (reg: no_subreg_reg_operand[1], cl: curr_alt[0]))) |
3149 | || (! curr_alt_win[0] && curr_alt_win[1] |
3150 | && REG_P (no_subreg_reg_operand[1]) |
3151 | /* Check that we reload memory not the memory |
3152 | address. */ |
3153 | && ! (curr_alt_offmemok[0] |
3154 | && MEM_P (no_subreg_reg_operand[0])) |
3155 | && reg_in_class_p (reg: no_subreg_reg_operand[1], cl: curr_alt[0])) |
3156 | || (curr_alt_win[0] && ! curr_alt_win[1] |
3157 | && REG_P (no_subreg_reg_operand[0]) |
3158 | /* Check that we reload memory not the memory |
3159 | address. */ |
3160 | && ! (curr_alt_offmemok[1] |
3161 | && MEM_P (no_subreg_reg_operand[1])) |
3162 | && reg_in_class_p (reg: no_subreg_reg_operand[0], cl: curr_alt[1]) |
3163 | && (! CONST_POOL_OK_P (curr_operand_mode[1], |
3164 | no_subreg_reg_operand[1]) |
3165 | || (targetm.preferred_reload_class |
3166 | (no_subreg_reg_operand[1], |
3167 | (enum reg_class) curr_alt[1]) != NO_REGS)) |
3168 | /* If it is a result of recent elimination in move |
3169 | insn we can transform it into an add still by |
3170 | using this alternative. */ |
3171 | && GET_CODE (no_subreg_reg_operand[1]) != PLUS |
3172 | /* Likewise if the source has been replaced with an |
3173 | equivalent value. This only happens once -- the reload |
3174 | will use the equivalent value instead of the register it |
3175 | replaces -- so there should be no danger of cycling. */ |
3176 | && !equiv_substition_p[1]))) |
3177 | { |
3178 | /* We have a move insn and a new reload insn will be similar |
3179 | to the current insn. We should avoid such situation as |
3180 | it results in LRA cycling. */ |
3181 | if (lra_dump_file != NULL) |
3182 | fprintf (stream: lra_dump_file, |
3183 | format: " Cycle danger: overall += LRA_MAX_REJECT\n" ); |
3184 | overall += LRA_MAX_REJECT; |
3185 | } |
3186 | ok_p = true; |
3187 | curr_alt_dont_inherit_ops_num = 0; |
3188 | for (nop = 0; nop < early_clobbered_regs_num; nop++) |
3189 | { |
3190 | int i, j, clobbered_hard_regno, first_conflict_j, last_conflict_j; |
3191 | HARD_REG_SET temp_set; |
3192 | |
3193 | i = early_clobbered_nops[nop]; |
3194 | if ((! curr_alt_win[i] && ! curr_alt_match_win[i]) |
3195 | || hard_regno[i] < 0) |
3196 | continue; |
3197 | lra_assert (operand_reg[i] != NULL_RTX); |
3198 | clobbered_hard_regno = hard_regno[i]; |
3199 | CLEAR_HARD_REG_SET (set&: temp_set); |
3200 | add_to_hard_reg_set (regs: &temp_set, GET_MODE (*curr_id->operand_loc[i]), |
3201 | regno: clobbered_hard_regno); |
3202 | first_conflict_j = last_conflict_j = -1; |
3203 | for (j = 0; j < n_operands; j++) |
3204 | if (j == i |
3205 | /* We don't want process insides of match_operator and |
3206 | match_parallel because otherwise we would process |
3207 | their operands once again generating a wrong |
3208 | code. */ |
3209 | || curr_static_id->operand[j].is_operator) |
3210 | continue; |
3211 | else if ((curr_alt_matches[j] == i && curr_alt_match_win[j]) |
3212 | || (curr_alt_matches[i] == j && curr_alt_match_win[i])) |
3213 | continue; |
3214 | /* If we don't reload j-th operand, check conflicts. */ |
3215 | else if ((curr_alt_win[j] || curr_alt_match_win[j]) |
3216 | && uses_hard_regs_p (x: *curr_id->operand_loc[j], set: temp_set)) |
3217 | { |
3218 | if (first_conflict_j < 0) |
3219 | first_conflict_j = j; |
3220 | last_conflict_j = j; |
3221 | /* Both the earlyclobber operand and conflicting operand |
3222 | cannot both be user defined hard registers. */ |
3223 | if (HARD_REGISTER_P (operand_reg[i]) |
3224 | && REG_USERVAR_P (operand_reg[i]) |
3225 | && operand_reg[j] != NULL_RTX |
3226 | && HARD_REGISTER_P (operand_reg[j]) |
3227 | && REG_USERVAR_P (operand_reg[j])) |
3228 | { |
3229 | /* For asm, let curr_insn_transform diagnose it. */ |
3230 | if (INSN_CODE (curr_insn) < 0) |
3231 | return false; |
3232 | fatal_insn ("unable to generate reloads for " |
3233 | "impossible constraints:" , curr_insn); |
3234 | } |
3235 | } |
3236 | if (last_conflict_j < 0) |
3237 | continue; |
3238 | |
3239 | /* If an earlyclobber operand conflicts with another non-matching |
3240 | operand (ie, they have been assigned the same hard register), |
3241 | then it is better to reload the other operand, as there may |
3242 | exist yet another operand with a matching constraint associated |
3243 | with the earlyclobber operand. However, if one of the operands |
3244 | is an explicit use of a hard register, then we must reload the |
3245 | other non-hard register operand. */ |
3246 | if (HARD_REGISTER_P (operand_reg[i]) |
3247 | || (first_conflict_j == last_conflict_j |
3248 | && operand_reg[last_conflict_j] != NULL_RTX |
3249 | && !curr_alt_match_win[last_conflict_j] |
3250 | && !HARD_REGISTER_P (operand_reg[last_conflict_j]))) |
3251 | { |
3252 | curr_alt_win[last_conflict_j] = false; |
3253 | curr_alt_dont_inherit_ops[curr_alt_dont_inherit_ops_num++] |
3254 | = last_conflict_j; |
3255 | losers++; |
3256 | if (lra_dump_file != NULL) |
3257 | fprintf |
3258 | (stream: lra_dump_file, |
3259 | format: " %d Conflict early clobber reload: reject--\n" , |
3260 | i); |
3261 | } |
3262 | else |
3263 | { |
3264 | /* We need to reload early clobbered register and the |
3265 | matched registers. */ |
3266 | for (j = 0; j < n_operands; j++) |
3267 | if (curr_alt_matches[j] == i) |
3268 | { |
3269 | curr_alt_match_win[j] = false; |
3270 | losers++; |
3271 | overall += LRA_LOSER_COST_FACTOR; |
3272 | } |
3273 | if (! curr_alt_match_win[i]) |
3274 | curr_alt_dont_inherit_ops[curr_alt_dont_inherit_ops_num++] = i; |
3275 | else |
3276 | { |
3277 | /* Remember pseudos used for match reloads are never |
3278 | inherited. */ |
3279 | lra_assert (curr_alt_matches[i] >= 0); |
3280 | curr_alt_win[curr_alt_matches[i]] = false; |
3281 | } |
3282 | curr_alt_win[i] = curr_alt_match_win[i] = false; |
3283 | losers++; |
3284 | if (lra_dump_file != NULL) |
3285 | fprintf |
3286 | (stream: lra_dump_file, |
3287 | format: " %d Matched conflict early clobber reloads: " |
3288 | "reject--\n" , |
3289 | i); |
3290 | } |
3291 | /* Early clobber was already reflected in REJECT. */ |
3292 | if (!matching_early_clobber[i]) |
3293 | { |
3294 | lra_assert (reject > 0); |
3295 | reject--; |
3296 | matching_early_clobber[i] = 1; |
3297 | } |
3298 | overall += LRA_LOSER_COST_FACTOR - 1; |
3299 | } |
3300 | if (lra_dump_file != NULL) |
3301 | fprintf (stream: lra_dump_file, format: " overall=%d,losers=%d,rld_nregs=%d\n" , |
3302 | overall, losers, reload_nregs); |
3303 | |
3304 | /* If this alternative can be made to work by reloading, and it |
3305 | needs less reloading than the others checked so far, record |
3306 | it as the chosen goal for reloading. */ |
3307 | if ((best_losers != 0 && losers == 0) |
3308 | || (((best_losers == 0 && losers == 0) |
3309 | || (best_losers != 0 && losers != 0)) |
3310 | && (best_overall > overall |
3311 | || (best_overall == overall |
3312 | /* If the cost of the reloads is the same, |
3313 | prefer alternative which requires minimal |
3314 | number of reload regs. */ |
3315 | && (reload_nregs < best_reload_nregs |
3316 | || (reload_nregs == best_reload_nregs |
3317 | && (best_reload_sum < reload_sum |
3318 | || (best_reload_sum == reload_sum |
3319 | && nalt < goal_alt_number)))))))) |
3320 | { |
3321 | for (nop = 0; nop < n_operands; nop++) |
3322 | { |
3323 | goal_alt_win[nop] = curr_alt_win[nop]; |
3324 | goal_alt_match_win[nop] = curr_alt_match_win[nop]; |
3325 | goal_alt_matches[nop] = curr_alt_matches[nop]; |
3326 | goal_alt[nop] = curr_alt[nop]; |
3327 | goal_alt_exclude_start_hard_regs[nop] |
3328 | = curr_alt_exclude_start_hard_regs[nop]; |
3329 | goal_alt_offmemok[nop] = curr_alt_offmemok[nop]; |
3330 | } |
3331 | goal_alt_dont_inherit_ops_num = curr_alt_dont_inherit_ops_num; |
3332 | goal_reuse_alt_p = curr_reuse_alt_p; |
3333 | for (nop = 0; nop < curr_alt_dont_inherit_ops_num; nop++) |
3334 | goal_alt_dont_inherit_ops[nop] = curr_alt_dont_inherit_ops[nop]; |
3335 | goal_alt_swapped = curr_swapped; |
3336 | goal_alt_out_sp_reload_p = curr_alt_out_sp_reload_p; |
3337 | best_overall = overall; |
3338 | best_losers = losers; |
3339 | best_reload_nregs = reload_nregs; |
3340 | best_reload_sum = reload_sum; |
3341 | goal_alt_number = nalt; |
3342 | } |
3343 | if (losers == 0) |
3344 | /* Everything is satisfied. Do not process alternatives |
3345 | anymore. */ |
3346 | break; |
3347 | fail: |
3348 | ; |
3349 | } |
3350 | return ok_p; |
3351 | } |
3352 | |
3353 | /* Make reload base reg from address AD. */ |
3354 | static rtx |
3355 | base_to_reg (struct address_info *ad) |
3356 | { |
3357 | enum reg_class cl; |
3358 | int code = -1; |
3359 | rtx new_inner = NULL_RTX; |
3360 | rtx new_reg = NULL_RTX; |
3361 | rtx_insn *insn; |
3362 | rtx_insn *last_insn = get_last_insn(); |
3363 | |
3364 | lra_assert (ad->disp == ad->disp_term); |
3365 | cl = base_reg_class (mode: ad->mode, as: ad->as, outer_code: ad->base_outer_code, |
3366 | index_code: get_index_code (ad)); |
3367 | new_reg = lra_create_new_reg (GET_MODE (*ad->base), NULL_RTX, cl, NULL, |
3368 | "base" ); |
3369 | new_inner = simplify_gen_binary (code: PLUS, GET_MODE (new_reg), op0: new_reg, |
3370 | op1: ad->disp_term == NULL |
3371 | ? const0_rtx |
3372 | : *ad->disp_term); |
3373 | if (!valid_address_p (mode: ad->mode, addr: new_inner, as: ad->as)) |
3374 | return NULL_RTX; |
3375 | insn = emit_insn (gen_rtx_SET (new_reg, *ad->base)); |
3376 | code = recog_memoized (insn); |
3377 | if (code < 0) |
3378 | { |
3379 | delete_insns_since (last_insn); |
3380 | return NULL_RTX; |
3381 | } |
3382 | |
3383 | return new_inner; |
3384 | } |
3385 | |
3386 | /* Make reload base reg + DISP from address AD. Return the new pseudo. */ |
3387 | static rtx |
3388 | base_plus_disp_to_reg (struct address_info *ad, rtx disp) |
3389 | { |
3390 | enum reg_class cl; |
3391 | rtx new_reg; |
3392 | |
3393 | lra_assert (ad->base == ad->base_term); |
3394 | cl = base_reg_class (mode: ad->mode, as: ad->as, outer_code: ad->base_outer_code, |
3395 | index_code: get_index_code (ad)); |
3396 | new_reg = lra_create_new_reg (GET_MODE (*ad->base_term), NULL_RTX, cl, NULL, |
3397 | "base + disp" ); |
3398 | lra_emit_add (new_reg, *ad->base_term, disp); |
3399 | return new_reg; |
3400 | } |
3401 | |
3402 | /* Make reload of index part of address AD. Return the new |
3403 | pseudo. */ |
3404 | static rtx |
3405 | index_part_to_reg (struct address_info *ad, enum reg_class index_class) |
3406 | { |
3407 | rtx new_reg; |
3408 | |
3409 | new_reg = lra_create_new_reg (GET_MODE (*ad->index), NULL_RTX, |
3410 | index_class, NULL, "index term" ); |
3411 | expand_mult (GET_MODE (*ad->index), *ad->index_term, |
3412 | GEN_INT (get_index_scale (ad)), new_reg, 1); |
3413 | return new_reg; |
3414 | } |
3415 | |
3416 | /* Return true if we can add a displacement to address AD, even if that |
3417 | makes the address invalid. The fix-up code requires any new address |
3418 | to be the sum of the BASE_TERM, INDEX and DISP_TERM fields. */ |
3419 | static bool |
3420 | can_add_disp_p (struct address_info *ad) |
3421 | { |
3422 | return (!ad->autoinc_p |
3423 | && ad->segment == NULL |
3424 | && ad->base == ad->base_term |
3425 | && ad->disp == ad->disp_term); |
3426 | } |
3427 | |
3428 | /* Make equiv substitution in address AD. Return true if a substitution |
3429 | was made. */ |
3430 | static bool |
3431 | equiv_address_substitution (struct address_info *ad) |
3432 | { |
3433 | rtx base_reg, new_base_reg, index_reg, new_index_reg, *base_term, *index_term; |
3434 | poly_int64 disp; |
3435 | HOST_WIDE_INT scale; |
3436 | bool change_p; |
3437 | |
3438 | base_term = strip_subreg (loc: ad->base_term); |
3439 | if (base_term == NULL) |
3440 | base_reg = new_base_reg = NULL_RTX; |
3441 | else |
3442 | { |
3443 | base_reg = *base_term; |
3444 | new_base_reg = get_equiv_with_elimination (x: base_reg, insn: curr_insn); |
3445 | } |
3446 | index_term = strip_subreg (loc: ad->index_term); |
3447 | if (index_term == NULL) |
3448 | index_reg = new_index_reg = NULL_RTX; |
3449 | else |
3450 | { |
3451 | index_reg = *index_term; |
3452 | new_index_reg = get_equiv_with_elimination (x: index_reg, insn: curr_insn); |
3453 | } |
3454 | if (base_reg == new_base_reg && index_reg == new_index_reg) |
3455 | return false; |
3456 | disp = 0; |
3457 | change_p = false; |
3458 | if (lra_dump_file != NULL) |
3459 | { |
3460 | fprintf (stream: lra_dump_file, format: "Changing address in insn %d " , |
3461 | INSN_UID (insn: curr_insn)); |
3462 | dump_value_slim (lra_dump_file, *ad->outer, 1); |
3463 | } |
3464 | if (base_reg != new_base_reg) |
3465 | { |
3466 | poly_int64 offset; |
3467 | if (REG_P (new_base_reg)) |
3468 | { |
3469 | *base_term = new_base_reg; |
3470 | change_p = true; |
3471 | } |
3472 | else if (GET_CODE (new_base_reg) == PLUS |
3473 | && REG_P (XEXP (new_base_reg, 0)) |
3474 | && poly_int_rtx_p (XEXP (new_base_reg, 1), res: &offset) |
3475 | && can_add_disp_p (ad)) |
3476 | { |
3477 | disp += offset; |
3478 | *base_term = XEXP (new_base_reg, 0); |
3479 | change_p = true; |
3480 | } |
3481 | if (ad->base_term2 != NULL) |
3482 | *ad->base_term2 = *ad->base_term; |
3483 | } |
3484 | if (index_reg != new_index_reg) |
3485 | { |
3486 | poly_int64 offset; |
3487 | if (REG_P (new_index_reg)) |
3488 | { |
3489 | *index_term = new_index_reg; |
3490 | change_p = true; |
3491 | } |
3492 | else if (GET_CODE (new_index_reg) == PLUS |
3493 | && REG_P (XEXP (new_index_reg, 0)) |
3494 | && poly_int_rtx_p (XEXP (new_index_reg, 1), res: &offset) |
3495 | && can_add_disp_p (ad) |
3496 | && (scale = get_index_scale (ad))) |
3497 | { |
3498 | disp += offset * scale; |
3499 | *index_term = XEXP (new_index_reg, 0); |
3500 | change_p = true; |
3501 | } |
3502 | } |
3503 | if (maybe_ne (a: disp, b: 0)) |
3504 | { |
3505 | if (ad->disp != NULL) |
3506 | *ad->disp = plus_constant (GET_MODE (*ad->inner), *ad->disp, disp); |
3507 | else |
3508 | { |
3509 | *ad->inner = plus_constant (GET_MODE (*ad->inner), *ad->inner, disp); |
3510 | update_address (ad); |
3511 | } |
3512 | change_p = true; |
3513 | } |
3514 | if (lra_dump_file != NULL) |
3515 | { |
3516 | if (! change_p) |
3517 | fprintf (stream: lra_dump_file, format: " -- no change\n" ); |
3518 | else |
3519 | { |
3520 | fprintf (stream: lra_dump_file, format: " on equiv " ); |
3521 | dump_value_slim (lra_dump_file, *ad->outer, 1); |
3522 | fprintf (stream: lra_dump_file, format: "\n" ); |
3523 | } |
3524 | } |
3525 | return change_p; |
3526 | } |
3527 | |
3528 | /* Skip all modifiers and whitespaces in constraint STR and return the |
3529 | result. */ |
3530 | static const char * |
3531 | skip_constraint_modifiers (const char *str) |
3532 | { |
3533 | for (;;str++) |
3534 | switch (*str) |
3535 | { |
3536 | case '+': case '&' : case '=': case '*': case ' ': case '\t': |
3537 | case '$': case '^' : case '%': case '?': case '!': |
3538 | break; |
3539 | default: return str; |
3540 | } |
3541 | } |
3542 | |
3543 | /* Takes a string of 0 or more comma-separated constraints. When more |
3544 | than one constraint is present, evaluate whether they all correspond |
3545 | to a single, repeated constraint (e.g. "r,r") or whether we have |
3546 | more than one distinct constraints (e.g. "r,m"). */ |
3547 | static bool |
3548 | constraint_unique (const char *cstr) |
3549 | { |
3550 | enum constraint_num ca, cb; |
3551 | ca = CONSTRAINT__UNKNOWN; |
3552 | for (;;) |
3553 | { |
3554 | cstr = skip_constraint_modifiers (str: cstr); |
3555 | if (*cstr == '\0' || *cstr == ',') |
3556 | cb = CONSTRAINT_X; |
3557 | else |
3558 | { |
3559 | cb = lookup_constraint (p: cstr); |
3560 | if (cb == CONSTRAINT__UNKNOWN) |
3561 | return false; |
3562 | cstr += CONSTRAINT_LEN (cstr[0], cstr); |
3563 | } |
3564 | /* Handle the first iteration of the loop. */ |
3565 | if (ca == CONSTRAINT__UNKNOWN) |
3566 | ca = cb; |
3567 | /* Handle the general case of comparing ca with subsequent |
3568 | constraints. */ |
3569 | else if (ca != cb) |
3570 | return false; |
3571 | if (*cstr == '\0') |
3572 | return true; |
3573 | if (*cstr == ',') |
3574 | cstr += 1; |
3575 | } |
3576 | } |
3577 | |
3578 | /* Major function to make reloads for an address in operand NOP or |
3579 | check its correctness (If CHECK_ONLY_P is true). The supported |
3580 | cases are: |
3581 | |
3582 | 1) an address that existed before LRA started, at which point it |
3583 | must have been valid. These addresses are subject to elimination |
3584 | and may have become invalid due to the elimination offset being out |
3585 | of range. |
3586 | |
3587 | 2) an address created by forcing a constant to memory |
3588 | (force_const_to_mem). The initial form of these addresses might |
3589 | not be valid, and it is this function's job to make them valid. |
3590 | |
3591 | 3) a frame address formed from a register and a (possibly zero) |
3592 | constant offset. As above, these addresses might not be valid and |
3593 | this function must make them so. |
3594 | |
3595 | Add reloads to the lists *BEFORE and *AFTER. We might need to add |
3596 | reloads to *AFTER because of inc/dec, {pre, post} modify in the |
3597 | address. Return true for any RTL change. |
3598 | |
3599 | The function is a helper function which does not produce all |
3600 | transformations (when CHECK_ONLY_P is false) which can be |
3601 | necessary. It does just basic steps. To do all necessary |
3602 | transformations use function process_address. */ |
3603 | static bool |
3604 | process_address_1 (int nop, bool check_only_p, |
3605 | rtx_insn **before, rtx_insn **after) |
3606 | { |
3607 | struct address_info ad; |
3608 | rtx new_reg; |
3609 | HOST_WIDE_INT scale; |
3610 | rtx op = *curr_id->operand_loc[nop]; |
3611 | rtx mem = extract_mem_from_operand (op); |
3612 | const char *constraint; |
3613 | enum constraint_num cn; |
3614 | bool change_p = false; |
3615 | |
3616 | if (MEM_P (mem) |
3617 | && GET_MODE (mem) == BLKmode |
3618 | && GET_CODE (XEXP (mem, 0)) == SCRATCH) |
3619 | return false; |
3620 | |
3621 | constraint |
3622 | = skip_constraint_modifiers (str: curr_static_id->operand[nop].constraint); |
3623 | if (IN_RANGE (constraint[0], '0', '9')) |
3624 | { |
3625 | char *end; |
3626 | unsigned long dup = strtoul (nptr: constraint, endptr: &end, base: 10); |
3627 | constraint |
3628 | = skip_constraint_modifiers (str: curr_static_id->operand[dup].constraint); |
3629 | } |
3630 | cn = lookup_constraint (p: *constraint == '\0' ? "X" : constraint); |
3631 | /* If we have several alternatives or/and several constraints in an |
3632 | alternative and we can not say at this stage what constraint will be used, |
3633 | use unknown constraint. The exception is an address constraint. If |
3634 | operand has one address constraint, probably all others constraints are |
3635 | address ones. */ |
3636 | if (constraint[0] != '\0' && get_constraint_type (c: cn) != CT_ADDRESS |
3637 | && !constraint_unique (cstr: constraint)) |
3638 | cn = CONSTRAINT__UNKNOWN; |
3639 | if (insn_extra_address_constraint (c: cn) |
3640 | /* When we find an asm operand with an address constraint that |
3641 | doesn't satisfy address_operand to begin with, we clear |
3642 | is_address, so that we don't try to make a non-address fit. |
3643 | If the asm statement got this far, it's because other |
3644 | constraints are available, and we'll use them, disregarding |
3645 | the unsatisfiable address ones. */ |
3646 | && curr_static_id->operand[nop].is_address) |
3647 | decompose_lea_address (&ad, curr_id->operand_loc[nop]); |
3648 | /* Do not attempt to decompose arbitrary addresses generated by combine |
3649 | for asm operands with loose constraints, e.g 'X'. |
3650 | Need to extract memory from op for special memory constraint, |
3651 | i.e. bcst_mem_operand in i386 backend. */ |
3652 | else if (MEM_P (mem) |
3653 | && !(INSN_CODE (curr_insn) < 0 |
3654 | && get_constraint_type (c: cn) == CT_FIXED_FORM |
3655 | && constraint_satisfied_p (x: op, c: cn))) |
3656 | decompose_mem_address (&ad, mem); |
3657 | else if (GET_CODE (op) == SUBREG |
3658 | && MEM_P (SUBREG_REG (op))) |
3659 | decompose_mem_address (&ad, SUBREG_REG (op)); |
3660 | else |
3661 | return false; |
3662 | /* If INDEX_REG_CLASS is assigned to base_term already and isn't to |
3663 | index_term, swap them so to avoid assigning INDEX_REG_CLASS to both |
3664 | when INDEX_REG_CLASS is a single register class. */ |
3665 | enum reg_class index_cl = index_reg_class (insn: curr_insn); |
3666 | if (ad.base_term != NULL |
3667 | && ad.index_term != NULL |
3668 | && ira_class_hard_regs_num[index_cl] == 1 |
3669 | && REG_P (*ad.base_term) |
3670 | && REG_P (*ad.index_term) |
3671 | && in_class_p (reg: *ad.base_term, cl: index_cl, NULL) |
3672 | && ! in_class_p (reg: *ad.index_term, cl: index_cl, NULL)) |
3673 | { |
3674 | std::swap (a&: ad.base, b&: ad.index); |
3675 | std::swap (a&: ad.base_term, b&: ad.index_term); |
3676 | } |
3677 | if (! check_only_p) |
3678 | change_p = equiv_address_substitution (ad: &ad); |
3679 | if (ad.base_term != NULL |
3680 | && (process_addr_reg |
3681 | (loc: ad.base_term, check_only_p, before, |
3682 | after: (ad.autoinc_p |
3683 | && !(REG_P (*ad.base_term) |
3684 | && find_regno_note (curr_insn, REG_DEAD, |
3685 | REGNO (*ad.base_term)) != NULL_RTX) |
3686 | ? after : NULL), |
3687 | cl: base_reg_class (mode: ad.mode, as: ad.as, outer_code: ad.base_outer_code, |
3688 | index_code: get_index_code (&ad), insn: curr_insn)))) |
3689 | { |
3690 | change_p = true; |
3691 | if (ad.base_term2 != NULL) |
3692 | *ad.base_term2 = *ad.base_term; |
3693 | } |
3694 | if (ad.index_term != NULL |
3695 | && process_addr_reg (loc: ad.index_term, check_only_p, |
3696 | before, NULL, cl: index_cl)) |
3697 | change_p = true; |
3698 | |
3699 | /* Target hooks sometimes don't treat extra-constraint addresses as |
3700 | legitimate address_operands, so handle them specially. */ |
3701 | if (insn_extra_address_constraint (c: cn) |
3702 | && satisfies_address_constraint_p (ad: &ad, constraint: cn)) |
3703 | return change_p; |
3704 | |
3705 | if (check_only_p) |
3706 | return change_p; |
3707 | |
3708 | /* There are three cases where the shape of *AD.INNER may now be invalid: |
3709 | |
3710 | 1) the original address was valid, but either elimination or |
3711 | equiv_address_substitution was applied and that made |
3712 | the address invalid. |
3713 | |
3714 | 2) the address is an invalid symbolic address created by |
3715 | force_const_to_mem. |
3716 | |
3717 | 3) the address is a frame address with an invalid offset. |
3718 | |
3719 | 4) the address is a frame address with an invalid base. |
3720 | |
3721 | All these cases involve a non-autoinc address, so there is no |
3722 | point revalidating other types. */ |
3723 | if (ad.autoinc_p || valid_address_p (op, ad: &ad, constraint: cn)) |
3724 | return change_p; |
3725 | |
3726 | /* Any index existed before LRA started, so we can assume that the |
3727 | presence and shape of the index is valid. */ |
3728 | push_to_sequence (*before); |
3729 | lra_assert (ad.disp == ad.disp_term); |
3730 | if (ad.base == NULL) |
3731 | { |
3732 | if (ad.index == NULL) |
3733 | { |
3734 | rtx_insn *insn; |
3735 | rtx_insn *last = get_last_insn (); |
3736 | int code = -1; |
3737 | enum reg_class cl = base_reg_class (mode: ad.mode, as: ad.as, |
3738 | outer_code: SCRATCH, index_code: SCRATCH, |
3739 | insn: curr_insn); |
3740 | rtx addr = *ad.inner; |
3741 | |
3742 | new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, NULL, "addr" ); |
3743 | if (HAVE_lo_sum) |
3744 | { |
3745 | /* addr => lo_sum (new_base, addr), case (2) above. */ |
3746 | insn = emit_insn (gen_rtx_SET |
3747 | (new_reg, |
3748 | gen_rtx_HIGH (Pmode, copy_rtx (addr)))); |
3749 | code = recog_memoized (insn); |
3750 | if (code >= 0) |
3751 | { |
3752 | *ad.inner = gen_rtx_LO_SUM (Pmode, new_reg, addr); |
3753 | if (!valid_address_p (op, ad: &ad, constraint: cn)) |
3754 | { |
3755 | /* Try to put lo_sum into register. */ |
3756 | insn = emit_insn (gen_rtx_SET |
3757 | (new_reg, |
3758 | gen_rtx_LO_SUM (Pmode, new_reg, addr))); |
3759 | code = recog_memoized (insn); |
3760 | if (code >= 0) |
3761 | { |
3762 | *ad.inner = new_reg; |
3763 | if (!valid_address_p (op, ad: &ad, constraint: cn)) |
3764 | { |
3765 | *ad.inner = addr; |
3766 | code = -1; |
3767 | } |
3768 | } |
3769 | |
3770 | } |
3771 | } |
3772 | if (code < 0) |
3773 | delete_insns_since (last); |
3774 | } |
3775 | |
3776 | if (code < 0) |
3777 | { |
3778 | /* addr => new_base, case (2) above. */ |
3779 | lra_emit_move (new_reg, addr); |
3780 | |
3781 | for (insn = last == NULL_RTX ? get_insns () : NEXT_INSN (insn: last); |
3782 | insn != NULL_RTX; |
3783 | insn = NEXT_INSN (insn)) |
3784 | if (recog_memoized (insn) < 0) |
3785 | break; |
3786 | if (insn != NULL_RTX) |
3787 | { |
3788 | /* Do nothing if we cannot generate right insns. |
3789 | This is analogous to reload pass behavior. */ |
3790 | delete_insns_since (last); |
3791 | end_sequence (); |
3792 | return false; |
3793 | } |
3794 | *ad.inner = new_reg; |
3795 | } |
3796 | } |
3797 | else |
3798 | { |
3799 | /* index * scale + disp => new base + index * scale, |
3800 | case (1) above. */ |
3801 | enum reg_class cl = base_reg_class (mode: ad.mode, as: ad.as, outer_code: PLUS, |
3802 | GET_CODE (*ad.index), |
3803 | insn: curr_insn); |
3804 | |
3805 | lra_assert (index_cl != NO_REGS); |
3806 | new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, NULL, "disp" ); |
3807 | lra_emit_move (new_reg, *ad.disp); |
3808 | *ad.inner = simplify_gen_binary (code: PLUS, GET_MODE (new_reg), |
3809 | op0: new_reg, op1: *ad.index); |
3810 | } |
3811 | } |
3812 | else if (ad.index == NULL) |
3813 | { |
3814 | int regno; |
3815 | enum reg_class cl; |
3816 | rtx set; |
3817 | rtx_insn *insns, *last_insn; |
3818 | /* Try to reload base into register only if the base is invalid |
3819 | for the address but with valid offset, case (4) above. */ |
3820 | start_sequence (); |
3821 | new_reg = base_to_reg (ad: &ad); |
3822 | |
3823 | /* base + disp => new base, cases (1) and (3) above. */ |
3824 | /* Another option would be to reload the displacement into an |
3825 | index register. However, postreload has code to optimize |
3826 | address reloads that have the same base and different |
3827 | displacements, so reloading into an index register would |
3828 | not necessarily be a win. */ |
3829 | if (new_reg == NULL_RTX) |
3830 | { |
3831 | /* See if the target can split the displacement into a |
3832 | legitimate new displacement from a local anchor. */ |
3833 | gcc_assert (ad.disp == ad.disp_term); |
3834 | poly_int64 orig_offset; |
3835 | rtx offset1, offset2; |
3836 | if (poly_int_rtx_p (x: *ad.disp, res: &orig_offset) |
3837 | && targetm.legitimize_address_displacement (&offset1, &offset2, |
3838 | orig_offset, |
3839 | ad.mode)) |
3840 | { |
3841 | new_reg = base_plus_disp_to_reg (ad: &ad, disp: offset1); |
3842 | new_reg = gen_rtx_PLUS (GET_MODE (new_reg), new_reg, offset2); |
3843 | } |
3844 | else |
3845 | new_reg = base_plus_disp_to_reg (ad: &ad, disp: *ad.disp); |
3846 | } |
3847 | insns = get_insns (); |
3848 | last_insn = get_last_insn (); |
3849 | /* If we generated at least two insns, try last insn source as |
3850 | an address. If we succeed, we generate one less insn. */ |
3851 | if (REG_P (new_reg) |
3852 | && last_insn != insns |
3853 | && (set = single_set (insn: last_insn)) != NULL_RTX |
3854 | && GET_CODE (SET_SRC (set)) == PLUS |
3855 | && REG_P (XEXP (SET_SRC (set), 0)) |
3856 | && CONSTANT_P (XEXP (SET_SRC (set), 1))) |
3857 | { |
3858 | *ad.inner = SET_SRC (set); |
3859 | if (valid_address_p (op, ad: &ad, constraint: cn)) |
3860 | { |
3861 | *ad.base_term = XEXP (SET_SRC (set), 0); |
3862 | *ad.disp_term = XEXP (SET_SRC (set), 1); |
3863 | cl = base_reg_class (mode: ad.mode, as: ad.as, outer_code: ad.base_outer_code, |
3864 | index_code: get_index_code (&ad), insn: curr_insn); |
3865 | regno = REGNO (*ad.base_term); |
3866 | if (regno >= FIRST_PSEUDO_REGISTER |
3867 | && cl != lra_get_allocno_class (regno)) |
3868 | lra_change_class (regno, new_class: cl, title: " Change to" , nl_p: true); |
3869 | new_reg = SET_SRC (set); |
3870 | delete_insns_since (PREV_INSN (insn: last_insn)); |
3871 | } |
3872 | } |
3873 | end_sequence (); |
3874 | emit_insn (insns); |
3875 | *ad.inner = new_reg; |
3876 | } |
3877 | else if (ad.disp_term != NULL) |
3878 | { |
3879 | /* base + scale * index + disp => new base + scale * index, |
3880 | case (1) above. */ |
3881 | gcc_assert (ad.disp == ad.disp_term); |
3882 | new_reg = base_plus_disp_to_reg (ad: &ad, disp: *ad.disp); |
3883 | *ad.inner = simplify_gen_binary (code: PLUS, GET_MODE (new_reg), |
3884 | op0: new_reg, op1: *ad.index); |
3885 | } |
3886 | else if ((scale = get_index_scale (&ad)) == 1) |
3887 | { |
3888 | /* The last transformation to one reg will be made in |
3889 | curr_insn_transform function. */ |
3890 | end_sequence (); |
3891 | return false; |
3892 | } |
3893 | else if (scale != 0) |
3894 | { |
3895 | /* base + scale * index => base + new_reg, |
3896 | case (1) above. |
3897 | Index part of address may become invalid. For example, we |
3898 | changed pseudo on the equivalent memory and a subreg of the |
3899 | pseudo onto the memory of different mode for which the scale is |
3900 | prohibitted. */ |
3901 | new_reg = index_part_to_reg (ad: &ad, index_class: index_cl); |
3902 | *ad.inner = simplify_gen_binary (code: PLUS, GET_MODE (new_reg), |
3903 | op0: *ad.base_term, op1: new_reg); |
3904 | } |
3905 | else |
3906 | { |
3907 | enum reg_class cl = base_reg_class (mode: ad.mode, as: ad.as, |
3908 | outer_code: SCRATCH, index_code: SCRATCH, |
3909 | insn: curr_insn); |
3910 | rtx addr = *ad.inner; |
3911 | |
3912 | new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, NULL, "addr" ); |
3913 | /* addr => new_base. */ |
3914 | lra_emit_move (new_reg, addr); |
3915 | *ad.inner = new_reg; |
3916 | } |
3917 | *before = get_insns (); |
3918 | end_sequence (); |
3919 | return true; |
3920 | } |
3921 | |
3922 | /* If CHECK_ONLY_P is false, do address reloads until it is necessary. |
3923 | Use process_address_1 as a helper function. Return true for any |
3924 | RTL changes. |
3925 | |
3926 | If CHECK_ONLY_P is true, just check address correctness. Return |
3927 | false if the address correct. */ |
3928 | static bool |
3929 | process_address (int nop, bool check_only_p, |
3930 | rtx_insn **before, rtx_insn **after) |
3931 | { |
3932 | bool res = false; |
3933 | |
3934 | while (process_address_1 (nop, check_only_p, before, after)) |
3935 | { |
3936 | if (check_only_p) |
3937 | return true; |
3938 | res = true; |
3939 | } |
3940 | return res; |
3941 | } |
3942 | |
3943 | /* Emit insns to reload VALUE into a new register. VALUE is an |
3944 | auto-increment or auto-decrement RTX whose operand is a register or |
3945 | memory location; so reloading involves incrementing that location. |
3946 | IN is either identical to VALUE, or some cheaper place to reload |
3947 | value being incremented/decremented from. |
3948 | |
3949 | INC_AMOUNT is the number to increment or decrement by (always |
3950 | positive and ignored for POST_MODIFY/PRE_MODIFY). |
3951 | |
3952 | Return pseudo containing the result. */ |
3953 | static rtx |
3954 | emit_inc (enum reg_class new_rclass, rtx in, rtx value, poly_int64 inc_amount) |
3955 | { |
3956 | /* REG or MEM to be copied and incremented. */ |
3957 | rtx incloc = XEXP (value, 0); |
3958 | /* Nonzero if increment after copying. */ |
3959 | int post = (GET_CODE (value) == POST_DEC || GET_CODE (value) == POST_INC |
3960 | || GET_CODE (value) == POST_MODIFY); |
3961 | rtx_insn *last; |
3962 | rtx inc; |
3963 | rtx_insn *add_insn; |
3964 | int code; |
3965 | rtx real_in = in == value ? incloc : in; |
3966 | rtx result; |
3967 | bool plus_p = true; |
3968 | |
3969 | if (GET_CODE (value) == PRE_MODIFY || GET_CODE (value) == POST_MODIFY) |
3970 | { |
3971 | lra_assert (GET_CODE (XEXP (value, 1)) == PLUS |
3972 | || GET_CODE (XEXP (value, 1)) == MINUS); |
3973 | lra_assert (rtx_equal_p (XEXP (XEXP (value, 1), 0), XEXP (value, 0))); |
3974 | plus_p = GET_CODE (XEXP (value, 1)) == PLUS; |
3975 | inc = XEXP (XEXP (value, 1), 1); |
3976 | } |
3977 | else |
3978 | { |
3979 | if (GET_CODE (value) == PRE_DEC || GET_CODE (value) == POST_DEC) |
3980 | inc_amount = -inc_amount; |
3981 | |
3982 | inc = gen_int_mode (inc_amount, GET_MODE (value)); |
3983 | } |
3984 | |
3985 | if (! post && REG_P (incloc)) |
3986 | result = incloc; |
3987 | else |
3988 | result = lra_create_new_reg (GET_MODE (value), value, new_rclass, NULL, |
3989 | "INC/DEC result" ); |
3990 | |
3991 | if (real_in != result) |
3992 | { |
3993 | /* First copy the location to the result register. */ |
3994 | lra_assert (REG_P (result)); |
3995 | emit_insn (gen_move_insn (result, real_in)); |
3996 | } |
3997 | |
3998 | /* We suppose that there are insns to add/sub with the constant |
3999 | increment permitted in {PRE/POST)_{DEC/INC/MODIFY}. At least the |
4000 | old reload worked with this assumption. If the assumption |
4001 | becomes wrong, we should use approach in function |
4002 | base_plus_disp_to_reg. */ |
4003 | if (in == value) |
4004 | { |
4005 | /* See if we can directly increment INCLOC. */ |
4006 | last = get_last_insn (); |
4007 | add_insn = emit_insn (plus_p |
4008 | ? gen_add2_insn (incloc, inc) |
4009 | : gen_sub2_insn (incloc, inc)); |
4010 | |
4011 | code = recog_memoized (insn: add_insn); |
4012 | if (code >= 0) |
4013 | { |
4014 | if (! post && result != incloc) |
4015 | emit_insn (gen_move_insn (result, incloc)); |
4016 | return result; |
4017 | } |
4018 | delete_insns_since (last); |
4019 | } |
4020 | |
4021 | /* If couldn't do the increment directly, must increment in RESULT. |
4022 | The way we do this depends on whether this is pre- or |
4023 | post-increment. For pre-increment, copy INCLOC to the reload |
4024 | register, increment it there, then save back. */ |
4025 | if (! post) |
4026 | { |
4027 | if (real_in != result) |
4028 | emit_insn (gen_move_insn (result, real_in)); |
4029 | if (plus_p) |
4030 | emit_insn (gen_add2_insn (result, inc)); |
4031 | else |
4032 | emit_insn (gen_sub2_insn (result, inc)); |
4033 | if (result != incloc) |
4034 | emit_insn (gen_move_insn (incloc, result)); |
4035 | } |
4036 | else |
4037 | { |
4038 | /* Post-increment. |
4039 | |
4040 | Because this might be a jump insn or a compare, and because |
4041 | RESULT may not be available after the insn in an input |
4042 | reload, we must do the incrementing before the insn being |
4043 | reloaded for. |
4044 | |
4045 | We have already copied IN to RESULT. Increment the copy in |
4046 | RESULT, save that back, then decrement RESULT so it has |
4047 | the original value. */ |
4048 | if (plus_p) |
4049 | emit_insn (gen_add2_insn (result, inc)); |
4050 | else |
4051 | emit_insn (gen_sub2_insn (result, inc)); |
4052 | emit_insn (gen_move_insn (incloc, result)); |
4053 | /* Restore non-modified value for the result. We prefer this |
4054 | way because it does not require an additional hard |
4055 | register. */ |
4056 | if (plus_p) |
4057 | { |
4058 | poly_int64 offset; |
4059 | if (poly_int_rtx_p (x: inc, res: &offset)) |
4060 | emit_insn (gen_add2_insn (result, |
4061 | gen_int_mode (-offset, |
4062 | GET_MODE (result)))); |
4063 | else |
4064 | emit_insn (gen_sub2_insn (result, inc)); |
4065 | } |
4066 | else |
4067 | emit_insn (gen_add2_insn (result, inc)); |
4068 | } |
4069 | return result; |
4070 | } |
4071 | |
4072 | /* Return true if the current move insn does not need processing as we |
4073 | already know that it satisfies its constraints. */ |
4074 | static bool |
4075 | simple_move_p (void) |
4076 | { |
4077 | rtx dest, src; |
4078 | enum reg_class dclass, sclass; |
4079 | |
4080 | lra_assert (curr_insn_set != NULL_RTX); |
4081 | dest = SET_DEST (curr_insn_set); |
4082 | src = SET_SRC (curr_insn_set); |
4083 | |
4084 | /* If the instruction has multiple sets we need to process it even if it |
4085 | is single_set. This can happen if one or more of the SETs are dead. |
4086 | See PR73650. */ |
4087 | if (multiple_sets (curr_insn)) |
4088 | return false; |
4089 | |
4090 | return ((dclass = get_op_class (op: dest)) != NO_REGS |
4091 | && (sclass = get_op_class (op: src)) != NO_REGS |
4092 | /* The backend guarantees that register moves of cost 2 |
4093 | never need reloads. */ |
4094 | && targetm.register_move_cost (GET_MODE (src), sclass, dclass) == 2); |
4095 | } |
4096 | |
4097 | /* Swap operands NOP and NOP + 1. */ |
4098 | static inline void |
4099 | swap_operands (int nop) |
4100 | { |
4101 | std::swap (a&: curr_operand_mode[nop], b&: curr_operand_mode[nop + 1]); |
4102 | std::swap (a&: original_subreg_reg_mode[nop], b&: original_subreg_reg_mode[nop + 1]); |
4103 | std::swap (a&: *curr_id->operand_loc[nop], b&: *curr_id->operand_loc[nop + 1]); |
4104 | std::swap (a&: equiv_substition_p[nop], b&: equiv_substition_p[nop + 1]); |
4105 | /* Swap the duplicates too. */ |
4106 | lra_update_dup (id: curr_id, nop); |
4107 | lra_update_dup (id: curr_id, nop: nop + 1); |
4108 | } |
4109 | |
4110 | /* Main entry point of the constraint code: search the body of the |
4111 | current insn to choose the best alternative. It is mimicking insn |
4112 | alternative cost calculation model of former reload pass. That is |
4113 | because machine descriptions were written to use this model. This |
4114 | model can be changed in future. Make commutative operand exchange |
4115 | if it is chosen. |
4116 | |
4117 | if CHECK_ONLY_P is false, do RTL changes to satisfy the |
4118 | constraints. Return true if any change happened during function |
4119 | call. |
4120 | |
4121 | If CHECK_ONLY_P is true then don't do any transformation. Just |
4122 | check that the insn satisfies all constraints. If the insn does |
4123 | not satisfy any constraint, return true. */ |
4124 | static bool |
4125 | curr_insn_transform (bool check_only_p) |
4126 | { |
4127 | int i, j, k; |
4128 | int n_operands; |
4129 | int n_alternatives; |
4130 | int n_outputs; |
4131 | int commutative; |
4132 | signed char goal_alt_matched[MAX_RECOG_OPERANDS][MAX_RECOG_OPERANDS]; |
4133 | signed char match_inputs[MAX_RECOG_OPERANDS + 1]; |
4134 | signed char outputs[MAX_RECOG_OPERANDS + 1]; |
4135 | rtx_insn *before, *after; |
4136 | bool alt_p = false; |
4137 | /* Flag that the insn has been changed through a transformation. */ |
4138 | bool change_p; |
4139 | bool sec_mem_p; |
4140 | bool use_sec_mem_p; |
4141 | int max_regno_before; |
4142 | int reused_alternative_num; |
4143 | |
4144 | curr_insn_set = single_set (insn: curr_insn); |
4145 | if (curr_insn_set != NULL_RTX && simple_move_p ()) |
4146 | { |
4147 | /* We assume that the corresponding insn alternative has no |
4148 | earlier clobbers. If it is not the case, don't define move |
4149 | cost equal to 2 for the corresponding register classes. */ |
4150 | lra_set_used_insn_alternative (curr_insn, LRA_NON_CLOBBERED_ALT); |
4151 | return false; |
4152 | } |
4153 | |
4154 | no_input_reloads_p = no_output_reloads_p = false; |
4155 | goal_alt_number = -1; |
4156 | change_p = sec_mem_p = false; |
4157 | |
4158 | /* CALL_INSNs are not allowed to have any output reloads. */ |
4159 | if (CALL_P (curr_insn)) |
4160 | no_output_reloads_p = true; |
4161 | |
4162 | n_operands = curr_static_id->n_operands; |
4163 | n_alternatives = curr_static_id->n_alternatives; |
4164 | |
4165 | /* Just return "no reloads" if insn has no operands with |
4166 | constraints. */ |
4167 | if (n_operands == 0 || n_alternatives == 0) |
4168 | return false; |
4169 | |
4170 | max_regno_before = max_reg_num (); |
4171 | |
4172 | for (i = 0; i < n_operands; i++) |
4173 | { |
4174 | goal_alt_matched[i][0] = -1; |
4175 | goal_alt_matches[i] = -1; |
4176 | } |
4177 | |
4178 | commutative = curr_static_id->commutative; |
4179 | |
4180 | /* Now see what we need for pseudos that didn't get hard regs or got |
4181 | the wrong kind of hard reg. For this, we must consider all the |
4182 | operands together against the register constraints. */ |
4183 | |
4184 | best_losers = best_overall = INT_MAX; |
4185 | best_reload_sum = 0; |
4186 | |
4187 | curr_swapped = false; |
4188 | goal_alt_swapped = false; |
4189 | |
4190 | if (! check_only_p) |
4191 | /* Make equivalence substitution and memory subreg elimination |
4192 | before address processing because an address legitimacy can |
4193 | depend on memory mode. */ |
4194 | for (i = 0; i < n_operands; i++) |
4195 | { |
4196 | rtx op, subst, old; |
4197 | bool op_change_p = false; |
4198 | |
4199 | if (curr_static_id->operand[i].is_operator) |
4200 | continue; |
4201 | |
4202 | old = op = *curr_id->operand_loc[i]; |
4203 | if (GET_CODE (old) == SUBREG) |
4204 | old = SUBREG_REG (old); |
4205 | subst = get_equiv_with_elimination (x: old, insn: curr_insn); |
4206 | original_subreg_reg_mode[i] = VOIDmode; |
4207 | equiv_substition_p[i] = false; |
4208 | if (subst != old) |
4209 | { |
4210 | equiv_substition_p[i] = true; |
4211 | subst = copy_rtx (subst); |
4212 | lra_assert (REG_P (old)); |
4213 | if (GET_CODE (op) != SUBREG) |
4214 | *curr_id->operand_loc[i] = subst; |
4215 | else |
4216 | { |
4217 | SUBREG_REG (op) = subst; |
4218 | if (GET_MODE (subst) == VOIDmode) |
4219 | original_subreg_reg_mode[i] = GET_MODE (old); |
4220 | } |
4221 | if (lra_dump_file != NULL) |
4222 | { |
4223 | fprintf (stream: lra_dump_file, |
4224 | format: "Changing pseudo %d in operand %i of insn %u on equiv " , |
4225 | REGNO (old), i, INSN_UID (insn: curr_insn)); |
4226 | dump_value_slim (lra_dump_file, subst, 1); |
4227 | fprintf (stream: lra_dump_file, format: "\n" ); |
4228 | } |
4229 | op_change_p = change_p = true; |
4230 | } |
4231 | if (simplify_operand_subreg (nop: i, GET_MODE (old)) || op_change_p) |
4232 | { |
4233 | change_p = true; |
4234 | lra_update_dup (id: curr_id, nop: i); |
4235 | } |
4236 | } |
4237 | |
4238 | /* Reload address registers and displacements. We do it before |
4239 | finding an alternative because of memory constraints. */ |
4240 | before = after = NULL; |
4241 | for (i = 0; i < n_operands; i++) |
4242 | if (! curr_static_id->operand[i].is_operator |
4243 | && process_address (nop: i, check_only_p, before: &before, after: &after)) |
4244 | { |
4245 | if (check_only_p) |
4246 | return true; |
4247 | change_p = true; |
4248 | lra_update_dup (id: curr_id, nop: i); |
4249 | } |
4250 | |
4251 | if (change_p) |
4252 | /* If we've changed the instruction then any alternative that |
4253 | we chose previously may no longer be valid. */ |
4254 | lra_set_used_insn_alternative (curr_insn, LRA_UNKNOWN_ALT); |
4255 | |
4256 | if (! check_only_p && curr_insn_set != NULL_RTX |
4257 | && check_and_process_move (change_p: &change_p, sec_mem_p: &sec_mem_p)) |
4258 | return change_p; |
4259 | |
4260 | try_swapped: |
4261 | |
4262 | reused_alternative_num = check_only_p ? LRA_UNKNOWN_ALT : curr_id->used_insn_alternative; |
4263 | if (lra_dump_file != NULL && reused_alternative_num >= 0) |
4264 | fprintf (stream: lra_dump_file, format: "Reusing alternative %d for insn #%u\n" , |
4265 | reused_alternative_num, INSN_UID (insn: curr_insn)); |
4266 | |
4267 | if (process_alt_operands (only_alternative: reused_alternative_num)) |
4268 | alt_p = true; |
4269 | |
4270 | if (check_only_p) |
4271 | return ! alt_p || best_losers != 0; |
4272 | |
4273 | /* If insn is commutative (it's safe to exchange a certain pair of |
4274 | operands) then we need to try each alternative twice, the second |
4275 | time matching those two operands as if we had exchanged them. To |
4276 | do this, really exchange them in operands. |
4277 | |
4278 | If we have just tried the alternatives the second time, return |
4279 | operands to normal and drop through. */ |
4280 | |
4281 | if (reused_alternative_num < 0 && commutative >= 0) |
4282 | { |
4283 | curr_swapped = !curr_swapped; |
4284 | if (curr_swapped) |
4285 | { |
4286 | swap_operands (nop: commutative); |
4287 | goto try_swapped; |
4288 | } |
4289 | else |
4290 | swap_operands (nop: commutative); |
4291 | } |
4292 | |
4293 | if (! alt_p && ! sec_mem_p) |
4294 | { |
4295 | /* No alternative works with reloads?? */ |
4296 | if (INSN_CODE (curr_insn) >= 0) |
4297 | fatal_insn ("unable to generate reloads for:" , curr_insn); |
4298 | error_for_asm (curr_insn, |
4299 | "inconsistent operand constraints in an %<asm%>" ); |
4300 | lra_asm_error_p = true; |
4301 | if (! JUMP_P (curr_insn)) |
4302 | { |
4303 | /* Avoid further trouble with this insn. Don't generate use |
4304 | pattern here as we could use the insn SP offset. */ |
4305 | lra_set_insn_deleted (curr_insn); |
4306 | } |
4307 | else |
4308 | { |
4309 | lra_invalidate_insn_data (curr_insn); |
4310 | ira_nullify_asm_goto (insn: curr_insn); |
4311 | lra_update_insn_regno_info (curr_insn); |
4312 | } |
4313 | return true; |
4314 | } |
4315 | |
4316 | /* If the best alternative is with operands 1 and 2 swapped, swap |
4317 | them. Update the operand numbers of any reloads already |
4318 | pushed. */ |
4319 | |
4320 | if (goal_alt_swapped) |
4321 | { |
4322 | if (lra_dump_file != NULL) |
4323 | fprintf (stream: lra_dump_file, format: " Commutative operand exchange in insn %u\n" , |
4324 | INSN_UID (insn: curr_insn)); |
4325 | |
4326 | /* Swap the duplicates too. */ |
4327 | swap_operands (nop: commutative); |
4328 | change_p = true; |
4329 | } |
4330 | |
4331 | /* Some targets' TARGET_SECONDARY_MEMORY_NEEDED (e.g. x86) are defined |
4332 | too conservatively. So we use the secondary memory only if there |
4333 | is no any alternative without reloads. */ |
4334 | use_sec_mem_p = false; |
4335 | if (! alt_p) |
4336 | use_sec_mem_p = true; |
4337 | else if (sec_mem_p) |
4338 | { |
4339 | for (i = 0; i < n_operands; i++) |
4340 | if (! goal_alt_win[i] && ! goal_alt_match_win[i]) |
4341 | break; |
4342 | use_sec_mem_p = i < n_operands; |
4343 | } |
4344 | |
4345 | if (use_sec_mem_p) |
4346 | { |
4347 | int in = -1, out = -1; |
4348 | rtx new_reg, src, dest, rld; |
4349 | machine_mode sec_mode, rld_mode; |
4350 | |
4351 | lra_assert (curr_insn_set != NULL_RTX && sec_mem_p); |
4352 | dest = SET_DEST (curr_insn_set); |
4353 | src = SET_SRC (curr_insn_set); |
4354 | for (i = 0; i < n_operands; i++) |
4355 | if (*curr_id->operand_loc[i] == dest) |
4356 | out = i; |
4357 | else if (*curr_id->operand_loc[i] == src) |
4358 | in = i; |
4359 | for (i = 0; i < curr_static_id->n_dups; i++) |
4360 | if (out < 0 && *curr_id->dup_loc[i] == dest) |
4361 | out = curr_static_id->dup_num[i]; |
4362 | else if (in < 0 && *curr_id->dup_loc[i] == src) |
4363 | in = curr_static_id->dup_num[i]; |
4364 | lra_assert (out >= 0 && in >= 0 |
4365 | && curr_static_id->operand[out].type == OP_OUT |
4366 | && curr_static_id->operand[in].type == OP_IN); |
4367 | rld = partial_subreg_p (GET_MODE (src), GET_MODE (dest)) ? src : dest; |
4368 | rld_mode = GET_MODE (rld); |
4369 | sec_mode = targetm.secondary_memory_needed_mode (rld_mode); |
4370 | new_reg = lra_create_new_reg (sec_mode, NULL_RTX, NO_REGS, NULL, |
4371 | "secondary" ); |
4372 | /* If the mode is changed, it should be wider. */ |
4373 | lra_assert (!partial_subreg_p (sec_mode, rld_mode)); |
4374 | if (sec_mode != rld_mode) |
4375 | { |
4376 | /* If the target says specifically to use another mode for |
4377 | secondary memory moves we cannot reuse the original |
4378 | insn. */ |
4379 | after = emit_spill_move (to_p: false, mem_pseudo: new_reg, val: dest); |
4380 | lra_process_new_insns (curr_insn, NULL, after, |
4381 | "Inserting the sec. move" ); |
4382 | /* We may have non null BEFORE here (e.g. after address |
4383 | processing. */ |
4384 | push_to_sequence (before); |
4385 | before = emit_spill_move (to_p: true, mem_pseudo: new_reg, val: src); |
4386 | emit_insn (before); |
4387 | before = get_insns (); |
4388 | end_sequence (); |
4389 | lra_process_new_insns (curr_insn, before, NULL, "Changing on" ); |
4390 | lra_set_insn_deleted (curr_insn); |
4391 | } |
4392 | else if (dest == rld) |
4393 | { |
4394 | *curr_id->operand_loc[out] = new_reg; |
4395 | lra_update_dup (id: curr_id, nop: out); |
4396 | after = emit_spill_move (to_p: false, mem_pseudo: new_reg, val: dest); |
4397 | lra_process_new_insns (curr_insn, NULL, after, |
4398 | "Inserting the sec. move" ); |
4399 | } |
4400 | else |
4401 | { |
4402 | *curr_id->operand_loc[in] = new_reg; |
4403 | lra_update_dup (id: curr_id, nop: in); |
4404 | /* See comments above. */ |
4405 | push_to_sequence (before); |
4406 | before = emit_spill_move (to_p: true, mem_pseudo: new_reg, val: src); |
4407 | emit_insn (before); |
4408 | before = get_insns (); |
4409 | end_sequence (); |
4410 | lra_process_new_insns (curr_insn, before, NULL, |
4411 | "Inserting the sec. move" ); |
4412 | } |
4413 | lra_update_insn_regno_info (curr_insn); |
4414 | return true; |
4415 | } |
4416 | |
4417 | lra_assert (goal_alt_number >= 0); |
4418 | lra_set_used_insn_alternative (curr_insn, goal_reuse_alt_p |
4419 | ? goal_alt_number : LRA_UNKNOWN_ALT); |
4420 | |
4421 | if (lra_dump_file != NULL) |
4422 | { |
4423 | const char *p; |
4424 | |
4425 | fprintf (stream: lra_dump_file, format: " Choosing alt %d in insn %u:" , |
4426 | goal_alt_number, INSN_UID (insn: curr_insn)); |
4427 | print_curr_insn_alt (alt_number: goal_alt_number); |
4428 | if (INSN_CODE (curr_insn) >= 0 |
4429 | && (p = get_insn_name (INSN_CODE (curr_insn))) != NULL) |
4430 | fprintf (stream: lra_dump_file, format: " {%s}" , p); |
4431 | if (maybe_ne (a: curr_id->sp_offset, b: 0)) |
4432 | { |
4433 | fprintf (stream: lra_dump_file, format: " (sp_off=" ); |
4434 | print_dec (value: curr_id->sp_offset, file: lra_dump_file); |
4435 | fprintf (stream: lra_dump_file, format: ")" ); |
4436 | } |
4437 | fprintf (stream: lra_dump_file, format: "\n" ); |
4438 | } |
4439 | |
4440 | /* Right now, for any pair of operands I and J that are required to |
4441 | match, with J < I, goal_alt_matches[I] is J. Add I to |
4442 | goal_alt_matched[J]. */ |
4443 | |
4444 | for (i = 0; i < n_operands; i++) |
4445 | if ((j = goal_alt_matches[i]) >= 0) |
4446 | { |
4447 | for (k = 0; goal_alt_matched[j][k] >= 0; k++) |
4448 | ; |
4449 | /* We allow matching one output operand and several input |
4450 | operands. */ |
4451 | lra_assert (k == 0 |
4452 | || (curr_static_id->operand[j].type == OP_OUT |
4453 | && curr_static_id->operand[i].type == OP_IN |
4454 | && (curr_static_id->operand |
4455 | [goal_alt_matched[j][0]].type == OP_IN))); |
4456 | goal_alt_matched[j][k] = i; |
4457 | goal_alt_matched[j][k + 1] = -1; |
4458 | } |
4459 | |
4460 | for (i = 0; i < n_operands; i++) |
4461 | goal_alt_win[i] |= goal_alt_match_win[i]; |
4462 | |
4463 | /* Any constants that aren't allowed and can't be reloaded into |
4464 | registers are here changed into memory references. */ |
4465 | for (i = 0; i < n_operands; i++) |
4466 | if (goal_alt_win[i]) |
4467 | { |
4468 | int regno; |
4469 | enum reg_class new_class; |
4470 | rtx reg = *curr_id->operand_loc[i]; |
4471 | |
4472 | if (GET_CODE (reg) == SUBREG) |
4473 | reg = SUBREG_REG (reg); |
4474 | |
4475 | if (REG_P (reg) && (regno = REGNO (reg)) >= FIRST_PSEUDO_REGISTER) |
4476 | { |
4477 | bool ok_p = in_class_p (reg, cl: goal_alt[i], new_class: &new_class); |
4478 | |
4479 | if (new_class != NO_REGS && get_reg_class (regno) != new_class) |
4480 | { |
4481 | lra_assert (ok_p); |
4482 | lra_change_class (regno, new_class, title: " Change to" , nl_p: true); |
4483 | } |
4484 | } |
4485 | } |
4486 | else |
4487 | { |
4488 | const char *constraint; |
4489 | char c; |
4490 | rtx op = *curr_id->operand_loc[i]; |
4491 | rtx subreg = NULL_RTX; |
4492 | machine_mode mode = curr_operand_mode[i]; |
4493 | |
4494 | if (GET_CODE (op) == SUBREG) |
4495 | { |
4496 | subreg = op; |
4497 | op = SUBREG_REG (op); |
4498 | mode = GET_MODE (op); |
4499 | } |
4500 | |
4501 | if (CONST_POOL_OK_P (mode, op) |
4502 | && ((targetm.preferred_reload_class |
4503 | (op, (enum reg_class) goal_alt[i]) == NO_REGS) |
4504 | || no_input_reloads_p)) |
4505 | { |
4506 | rtx tem = force_const_mem (mode, op); |
4507 | |
4508 | change_p = true; |
4509 | if (subreg != NULL_RTX) |
4510 | tem = gen_rtx_SUBREG (mode, tem, SUBREG_BYTE (subreg)); |
4511 | |
4512 | *curr_id->operand_loc[i] = tem; |
4513 | lra_update_dup (id: curr_id, nop: i); |
4514 | process_address (nop: i, check_only_p: false, before: &before, after: &after); |
4515 | |
4516 | /* If the alternative accepts constant pool refs directly |
4517 | there will be no reload needed at all. */ |
4518 | if (subreg != NULL_RTX) |
4519 | continue; |
4520 | /* Skip alternatives before the one requested. */ |
4521 | constraint = (curr_static_id->operand_alternative |
4522 | [goal_alt_number * n_operands + i].constraint); |
4523 | for (; |
4524 | (c = *constraint) && c != ',' && c != '#'; |
4525 | constraint += CONSTRAINT_LEN (c, constraint)) |
4526 | { |
4527 | enum constraint_num cn = lookup_constraint (p: constraint); |
4528 | if ((insn_extra_memory_constraint (c: cn) |
4529 | || insn_extra_special_memory_constraint (c: cn) |
4530 | || insn_extra_relaxed_memory_constraint (cn)) |
4531 | && satisfies_memory_constraint_p (op: tem, constraint: cn)) |
4532 | break; |
4533 | } |
4534 | if (c == '\0' || c == ',' || c == '#') |
4535 | continue; |
4536 | |
4537 | goal_alt_win[i] = true; |
4538 | } |
4539 | } |
4540 | |
4541 | n_outputs = 0; |
4542 | for (i = 0; i < n_operands; i++) |
4543 | if (curr_static_id->operand[i].type == OP_OUT) |
4544 | outputs[n_outputs++] = i; |
4545 | outputs[n_outputs] = -1; |
4546 | for (i = 0; i < n_operands; i++) |
4547 | { |
4548 | int regno; |
4549 | bool optional_p = false; |
4550 | rtx old, new_reg; |
4551 | rtx op = *curr_id->operand_loc[i]; |
4552 | |
4553 | if (goal_alt_win[i]) |
4554 | { |
4555 | if (goal_alt[i] == NO_REGS |
4556 | && REG_P (op) |
4557 | /* When we assign NO_REGS it means that we will not |
4558 | assign a hard register to the scratch pseudo by |
4559 | assigment pass and the scratch pseudo will be |
4560 | spilled. Spilled scratch pseudos are transformed |
4561 | back to scratches at the LRA end. */ |
4562 | && ira_former_scratch_operand_p (insn: curr_insn, nop: i) |
4563 | && ira_former_scratch_p (REGNO (op))) |
4564 | { |
4565 | int regno = REGNO (op); |
4566 | lra_change_class (regno, new_class: NO_REGS, title: " Change to" , nl_p: true); |
4567 | if (lra_get_regno_hard_regno (regno) >= 0) |
4568 | /* We don't have to mark all insn affected by the |
4569 | spilled pseudo as there is only one such insn, the |
4570 | current one. */ |
4571 | reg_renumber[regno] = -1; |
4572 | lra_assert (bitmap_single_bit_set_p |
4573 | (&lra_reg_info[REGNO (op)].insn_bitmap)); |
4574 | } |
4575 | /* We can do an optional reload. If the pseudo got a hard |
4576 | reg, we might improve the code through inheritance. If |
4577 | it does not get a hard register we coalesce memory/memory |
4578 | moves later. Ignore move insns to avoid cycling. */ |
4579 | if (! lra_simple_p |
4580 | && lra_undo_inheritance_iter < LRA_MAX_INHERITANCE_PASSES |
4581 | && goal_alt[i] != NO_REGS && REG_P (op) |
4582 | && (regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER |
4583 | && regno < new_regno_start |
4584 | && ! ira_former_scratch_p (regno) |
4585 | && reg_renumber[regno] < 0 |
4586 | /* Check that the optional reload pseudo will be able to |
4587 | hold given mode value. */ |
4588 | && ! (prohibited_class_reg_set_mode_p |
4589 | (rclass: goal_alt[i], reg_class_contents[goal_alt[i]], |
4590 | PSEUDO_REGNO_MODE (regno))) |
4591 | && (curr_insn_set == NULL_RTX |
4592 | || !((REG_P (SET_SRC (curr_insn_set)) |
4593 | || MEM_P (SET_SRC (curr_insn_set)) |
4594 | || GET_CODE (SET_SRC (curr_insn_set)) == SUBREG) |
4595 | && (REG_P (SET_DEST (curr_insn_set)) |
4596 | || MEM_P (SET_DEST (curr_insn_set)) |
4597 | || GET_CODE (SET_DEST (curr_insn_set)) == SUBREG)))) |
4598 | optional_p = true; |
4599 | else if (goal_alt_matched[i][0] != -1 |
4600 | && curr_static_id->operand[i].type == OP_OUT |
4601 | && (curr_static_id->operand_alternative |
4602 | [goal_alt_number * n_operands + i].earlyclobber) |
4603 | && REG_P (op)) |
4604 | { |
4605 | for (j = 0; goal_alt_matched[i][j] != -1; j++) |
4606 | { |
4607 | rtx op2 = *curr_id->operand_loc[goal_alt_matched[i][j]]; |
4608 | |
4609 | if (REG_P (op2) && REGNO (op) != REGNO (op2)) |
4610 | break; |
4611 | } |
4612 | if (goal_alt_matched[i][j] != -1) |
4613 | { |
4614 | /* Generate reloads for different output and matched |
4615 | input registers. This is the easiest way to avoid |
4616 | creation of non-existing register conflicts in |
4617 | lra-lives.cc. */ |
4618 | match_reload (out: i, ins: goal_alt_matched[i], outs: outputs, goal_class: goal_alt[i], |
4619 | exclude_start_hard_regs: &goal_alt_exclude_start_hard_regs[i], before: &before, |
4620 | after: &after, early_clobber_p: true); |
4621 | } |
4622 | continue; |
4623 | } |
4624 | else |
4625 | { |
4626 | enum reg_class rclass, common_class; |
4627 | |
4628 | if (REG_P (op) && goal_alt[i] != NO_REGS |
4629 | && (regno = REGNO (op)) >= new_regno_start |
4630 | && (rclass = get_reg_class (regno)) == ALL_REGS |
4631 | && ((common_class = ira_reg_class_subset[rclass][goal_alt[i]]) |
4632 | != NO_REGS) |
4633 | && common_class != ALL_REGS |
4634 | && enough_allocatable_hard_regs_p (reg_class: common_class, |
4635 | GET_MODE (op))) |
4636 | /* Refine reload pseudo class from chosen alternative |
4637 | constraint. */ |
4638 | lra_change_class (regno, new_class: common_class, title: " Change to" , nl_p: true); |
4639 | continue; |
4640 | } |
4641 | } |
4642 | |
4643 | /* Operands that match previous ones have already been handled. */ |
4644 | if (goal_alt_matches[i] >= 0) |
4645 | continue; |
4646 | |
4647 | /* We should not have an operand with a non-offsettable address |
4648 | appearing where an offsettable address will do. It also may |
4649 | be a case when the address should be special in other words |
4650 | not a general one (e.g. it needs no index reg). */ |
4651 | if (goal_alt_matched[i][0] == -1 && goal_alt_offmemok[i] && MEM_P (op)) |
4652 | { |
4653 | enum reg_class rclass; |
4654 | rtx *loc = &XEXP (op, 0); |
4655 | enum rtx_code code = GET_CODE (*loc); |
4656 | |
4657 | push_to_sequence (before); |
4658 | rclass = base_reg_class (GET_MODE (op), MEM_ADDR_SPACE (op), |
4659 | outer_code: MEM, index_code: SCRATCH, insn: curr_insn); |
4660 | if (GET_RTX_CLASS (code) == RTX_AUTOINC) |
4661 | new_reg = emit_inc (new_rclass: rclass, in: *loc, value: *loc, |
4662 | /* This value does not matter for MODIFY. */ |
4663 | inc_amount: GET_MODE_SIZE (GET_MODE (op))); |
4664 | else if (get_reload_reg (type: OP_IN, Pmode, original: *loc, rclass, |
4665 | NULL, in_subreg_p: false, |
4666 | title: "offsetable address" , result_reg: &new_reg)) |
4667 | { |
4668 | rtx addr = *loc; |
4669 | enum rtx_code code = GET_CODE (addr); |
4670 | bool align_p = false; |
4671 | |
4672 | if (code == AND && CONST_INT_P (XEXP (addr, 1))) |
4673 | { |
4674 | /* (and ... (const_int -X)) is used to align to X bytes. */ |
4675 | align_p = true; |
4676 | addr = XEXP (*loc, 0); |
4677 | } |
4678 | else |
4679 | addr = canonicalize_reload_addr (addr); |
4680 | |
4681 | lra_emit_move (new_reg, addr); |
4682 | if (align_p) |
4683 | emit_move_insn (new_reg, gen_rtx_AND (GET_MODE (new_reg), new_reg, XEXP (*loc, 1))); |
4684 | } |
4685 | before = get_insns (); |
4686 | end_sequence (); |
4687 | *loc = new_reg; |
4688 | lra_update_dup (id: curr_id, nop: i); |
4689 | } |
4690 | else if (goal_alt_matched[i][0] == -1) |
4691 | { |
4692 | machine_mode mode; |
4693 | rtx reg, *loc; |
4694 | int hard_regno; |
4695 | enum op_type type = curr_static_id->operand[i].type; |
4696 | |
4697 | loc = curr_id->operand_loc[i]; |
4698 | mode = curr_operand_mode[i]; |
4699 | if (GET_CODE (*loc) == SUBREG) |
4700 | { |
4701 | reg = SUBREG_REG (*loc); |
4702 | poly_int64 byte = SUBREG_BYTE (*loc); |
4703 | if (REG_P (reg) |
4704 | /* Strict_low_part requires reloading the register and not |
4705 | just the subreg. Likewise for a strict subreg no wider |
4706 | than a word for WORD_REGISTER_OPERATIONS targets. */ |
4707 | && (curr_static_id->operand[i].strict_low |
4708 | || (!paradoxical_subreg_p (outermode: mode, GET_MODE (reg)) |
4709 | && (hard_regno |
4710 | = get_try_hard_regno (REGNO (reg))) >= 0 |
4711 | && (simplify_subreg_regno |
4712 | (hard_regno, |
4713 | GET_MODE (reg), byte, mode) < 0) |
4714 | && (goal_alt[i] == NO_REGS |
4715 | || (simplify_subreg_regno |
4716 | (ira_class_hard_regs[goal_alt[i]][0], |
4717 | GET_MODE (reg), byte, mode) >= 0))) |
4718 | || (partial_subreg_p (outermode: mode, GET_MODE (reg)) |
4719 | && known_le (GET_MODE_SIZE (GET_MODE (reg)), |
4720 | UNITS_PER_WORD) |
4721 | && WORD_REGISTER_OPERATIONS)) |
4722 | /* Avoid the situation when there are no available hard regs |
4723 | for the pseudo mode but there are ones for the subreg |
4724 | mode: */ |
4725 | && !(goal_alt[i] != NO_REGS |
4726 | && REGNO (reg) >= FIRST_PSEUDO_REGISTER |
4727 | && (prohibited_class_reg_set_mode_p |
4728 | (rclass: goal_alt[i], reg_class_contents[goal_alt[i]], |
4729 | GET_MODE (reg))) |
4730 | && !(prohibited_class_reg_set_mode_p |
4731 | (rclass: goal_alt[i], reg_class_contents[goal_alt[i]], |
4732 | mode)))) |
4733 | { |
4734 | /* An OP_INOUT is required when reloading a subreg of a |
4735 | mode wider than a word to ensure that data beyond the |
4736 | word being reloaded is preserved. Also automatically |
4737 | ensure that strict_low_part reloads are made into |
4738 | OP_INOUT which should already be true from the backend |
4739 | constraints. */ |
4740 | if (type == OP_OUT |
4741 | && (curr_static_id->operand[i].strict_low |
4742 | || read_modify_subreg_p (*loc))) |
4743 | type = OP_INOUT; |
4744 | loc = &SUBREG_REG (*loc); |
4745 | mode = GET_MODE (*loc); |
4746 | } |
4747 | } |
4748 | old = *loc; |
4749 | if (get_reload_reg (type, mode, original: old, rclass: goal_alt[i], |
4750 | exclude_start_hard_regs: &goal_alt_exclude_start_hard_regs[i], |
4751 | in_subreg_p: loc != curr_id->operand_loc[i], title: "" , result_reg: &new_reg) |
4752 | && type != OP_OUT) |
4753 | { |
4754 | push_to_sequence (before); |
4755 | lra_emit_move (new_reg, old); |
4756 | before = get_insns (); |
4757 | end_sequence (); |
4758 | } |
4759 | *loc = new_reg; |
4760 | if (type != OP_IN |
4761 | && find_reg_note (curr_insn, REG_UNUSED, old) == NULL_RTX) |
4762 | { |
4763 | start_sequence (); |
4764 | lra_emit_move (type == OP_INOUT ? copy_rtx (old) : old, new_reg); |
4765 | emit_insn (after); |
4766 | after = get_insns (); |
4767 | end_sequence (); |
4768 | *loc = new_reg; |
4769 | } |
4770 | for (j = 0; j < goal_alt_dont_inherit_ops_num; j++) |
4771 | if (goal_alt_dont_inherit_ops[j] == i) |
4772 | { |
4773 | lra_set_regno_unique_value (REGNO (new_reg)); |
4774 | break; |
4775 | } |
4776 | lra_update_dup (id: curr_id, nop: i); |
4777 | } |
4778 | else if (curr_static_id->operand[i].type == OP_IN |
4779 | && (curr_static_id->operand[goal_alt_matched[i][0]].type |
4780 | == OP_OUT |
4781 | || (curr_static_id->operand[goal_alt_matched[i][0]].type |
4782 | == OP_INOUT |
4783 | && (operands_match_p |
4784 | (x: *curr_id->operand_loc[i], |
4785 | y: *curr_id->operand_loc[goal_alt_matched[i][0]], |
4786 | y_hard_regno: -1))))) |
4787 | { |
4788 | /* generate reloads for input and matched outputs. */ |
4789 | match_inputs[0] = i; |
4790 | match_inputs[1] = -1; |
4791 | match_reload (out: goal_alt_matched[i][0], ins: match_inputs, outs: outputs, |
4792 | goal_class: goal_alt[i], exclude_start_hard_regs: &goal_alt_exclude_start_hard_regs[i], |
4793 | before: &before, after: &after, |
4794 | early_clobber_p: curr_static_id->operand_alternative |
4795 | [goal_alt_number * n_operands + goal_alt_matched[i][0]] |
4796 | .earlyclobber); |
4797 | } |
4798 | else if ((curr_static_id->operand[i].type == OP_OUT |
4799 | || (curr_static_id->operand[i].type == OP_INOUT |
4800 | && (operands_match_p |
4801 | (x: *curr_id->operand_loc[i], |
4802 | y: *curr_id->operand_loc[goal_alt_matched[i][0]], |
4803 | y_hard_regno: -1)))) |
4804 | && (curr_static_id->operand[goal_alt_matched[i][0]].type |
4805 | == OP_IN)) |
4806 | /* Generate reloads for output and matched inputs. */ |
4807 | match_reload (out: i, ins: goal_alt_matched[i], outs: outputs, goal_class: goal_alt[i], |
4808 | exclude_start_hard_regs: &goal_alt_exclude_start_hard_regs[i], before: &before, after: &after, |
4809 | early_clobber_p: curr_static_id->operand_alternative |
4810 | [goal_alt_number * n_operands + i].earlyclobber); |
4811 | else if (curr_static_id->operand[i].type == OP_IN |
4812 | && (curr_static_id->operand[goal_alt_matched[i][0]].type |
4813 | == OP_IN)) |
4814 | { |
4815 | /* Generate reloads for matched inputs. */ |
4816 | match_inputs[0] = i; |
4817 | for (j = 0; (k = goal_alt_matched[i][j]) >= 0; j++) |
4818 | match_inputs[j + 1] = k; |
4819 | match_inputs[j + 1] = -1; |
4820 | match_reload (out: -1, ins: match_inputs, outs: outputs, goal_class: goal_alt[i], |
4821 | exclude_start_hard_regs: &goal_alt_exclude_start_hard_regs[i], |
4822 | before: &before, after: &after, early_clobber_p: false); |
4823 | } |
4824 | else |
4825 | /* We must generate code in any case when function |
4826 | process_alt_operands decides that it is possible. */ |
4827 | gcc_unreachable (); |
4828 | |
4829 | if (optional_p) |
4830 | { |
4831 | rtx reg = op; |
4832 | |
4833 | lra_assert (REG_P (reg)); |
4834 | regno = REGNO (reg); |
4835 | op = *curr_id->operand_loc[i]; /* Substitution. */ |
4836 | if (GET_CODE (op) == SUBREG) |
4837 | op = SUBREG_REG (op); |
4838 | gcc_assert (REG_P (op) && (int) REGNO (op) >= new_regno_start); |
4839 | bitmap_set_bit (&lra_optional_reload_pseudos, REGNO (op)); |
4840 | lra_reg_info[REGNO (op)].restore_rtx = reg; |
4841 | if (lra_dump_file != NULL) |
4842 | fprintf (stream: lra_dump_file, |
4843 | format: " Making reload reg %d for reg %d optional\n" , |
4844 | REGNO (op), regno); |
4845 | } |
4846 | } |
4847 | if (before != NULL_RTX || after != NULL_RTX |
4848 | || max_regno_before != max_reg_num ()) |
4849 | change_p = true; |
4850 | if (change_p) |
4851 | { |
4852 | lra_update_operator_dups (id: curr_id); |
4853 | /* Something changes -- process the insn. */ |
4854 | lra_update_insn_regno_info (curr_insn); |
4855 | if (asm_noperands (PATTERN (insn: curr_insn)) >= 0 |
4856 | && ++curr_id->asm_reloads_num >= FIRST_PSEUDO_REGISTER) |
4857 | /* Most probably there are no enough registers to satisfy asm insn: */ |
4858 | lra_asm_insn_error (insn: curr_insn); |
4859 | } |
4860 | if (goal_alt_out_sp_reload_p) |
4861 | { |
4862 | /* We have an output stack pointer reload -- update sp offset: */ |
4863 | rtx set; |
4864 | bool done_p = false; |
4865 | poly_int64 sp_offset = curr_id->sp_offset; |
4866 | for (rtx_insn *insn = after; insn != NULL_RTX; insn = NEXT_INSN (insn)) |
4867 | if ((set = single_set (insn)) != NULL_RTX |
4868 | && SET_DEST (set) == stack_pointer_rtx) |
4869 | { |
4870 | lra_assert (!done_p); |
4871 | done_p = true; |
4872 | curr_id->sp_offset = 0; |
4873 | lra_insn_recog_data_t id = lra_get_insn_recog_data (insn); |
4874 | id->sp_offset = sp_offset; |
4875 | if (lra_dump_file != NULL) |
4876 | fprintf (stream: lra_dump_file, |
4877 | format: " Moving sp offset from insn %u to %u\n" , |
4878 | INSN_UID (insn: curr_insn), INSN_UID (insn)); |
4879 | } |
4880 | lra_assert (done_p); |
4881 | } |
4882 | lra_process_new_insns (curr_insn, before, after, "Inserting insn reload" ); |
4883 | return change_p; |
4884 | } |
4885 | |
4886 | /* Return true if INSN satisfies all constraints. In other words, no |
4887 | reload insns are needed. */ |
4888 | bool |
4889 | lra_constrain_insn (rtx_insn *insn) |
4890 | { |
4891 | int saved_new_regno_start = new_regno_start; |
4892 | int saved_new_insn_uid_start = new_insn_uid_start; |
4893 | bool change_p; |
4894 | |
4895 | curr_insn = insn; |
4896 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
4897 | curr_static_id = curr_id->insn_static_data; |
4898 | new_insn_uid_start = get_max_uid (); |
4899 | new_regno_start = max_reg_num (); |
4900 | change_p = curr_insn_transform (check_only_p: true); |
4901 | new_regno_start = saved_new_regno_start; |
4902 | new_insn_uid_start = saved_new_insn_uid_start; |
4903 | return ! change_p; |
4904 | } |
4905 | |
4906 | /* Return true if X is in LIST. */ |
4907 | static bool |
4908 | in_list_p (rtx x, rtx list) |
4909 | { |
4910 | for (; list != NULL_RTX; list = XEXP (list, 1)) |
4911 | if (XEXP (list, 0) == x) |
4912 | return true; |
4913 | return false; |
4914 | } |
4915 | |
4916 | /* Return true if X contains an allocatable hard register (if |
4917 | HARD_REG_P) or a (spilled if SPILLED_P) pseudo. */ |
4918 | static bool |
4919 | contains_reg_p (rtx x, bool hard_reg_p, bool spilled_p) |
4920 | { |
4921 | int i, j; |
4922 | const char *fmt; |
4923 | enum rtx_code code; |
4924 | |
4925 | code = GET_CODE (x); |
4926 | if (REG_P (x)) |
4927 | { |
4928 | int regno = REGNO (x); |
4929 | HARD_REG_SET alloc_regs; |
4930 | |
4931 | if (hard_reg_p) |
4932 | { |
4933 | if (regno >= FIRST_PSEUDO_REGISTER) |
4934 | regno = lra_get_regno_hard_regno (regno); |
4935 | if (regno < 0) |
4936 | return false; |
4937 | alloc_regs = ~lra_no_alloc_regs; |
4938 | return overlaps_hard_reg_set_p (regs: alloc_regs, GET_MODE (x), regno); |
4939 | } |
4940 | else |
4941 | { |
4942 | if (regno < FIRST_PSEUDO_REGISTER) |
4943 | return false; |
4944 | if (! spilled_p) |
4945 | return true; |
4946 | return lra_get_regno_hard_regno (regno) < 0; |
4947 | } |
4948 | } |
4949 | fmt = GET_RTX_FORMAT (code); |
4950 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
4951 | { |
4952 | if (fmt[i] == 'e') |
4953 | { |
4954 | if (contains_reg_p (XEXP (x, i), hard_reg_p, spilled_p)) |
4955 | return true; |
4956 | } |
4957 | else if (fmt[i] == 'E') |
4958 | { |
4959 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
4960 | if (contains_reg_p (XVECEXP (x, i, j), hard_reg_p, spilled_p)) |
4961 | return true; |
4962 | } |
4963 | } |
4964 | return false; |
4965 | } |
4966 | |
4967 | /* Process all regs in location *LOC and change them on equivalent |
4968 | substitution. Return true if any change was done. */ |
4969 | static bool |
4970 | loc_equivalence_change_p (rtx *loc) |
4971 | { |
4972 | rtx subst, reg, x = *loc; |
4973 | bool result = false; |
4974 | enum rtx_code code = GET_CODE (x); |
4975 | const char *fmt; |
4976 | int i, j; |
4977 | |
4978 | if (code == SUBREG) |
4979 | { |
4980 | reg = SUBREG_REG (x); |
4981 | if ((subst = get_equiv_with_elimination (x: reg, insn: curr_insn)) != reg |
4982 | && GET_MODE (subst) == VOIDmode) |
4983 | { |
4984 | /* We cannot reload debug location. Simplify subreg here |
4985 | while we know the inner mode. */ |
4986 | *loc = simplify_gen_subreg (GET_MODE (x), op: subst, |
4987 | GET_MODE (reg), SUBREG_BYTE (x)); |
4988 | return true; |
4989 | } |
4990 | } |
4991 | if (code == REG && (subst = get_equiv_with_elimination (x, insn: curr_insn)) != x) |
4992 | { |
4993 | *loc = subst; |
4994 | return true; |
4995 | } |
4996 | |
4997 | /* Scan all the operand sub-expressions. */ |
4998 | fmt = GET_RTX_FORMAT (code); |
4999 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
5000 | { |
5001 | if (fmt[i] == 'e') |
5002 | result = loc_equivalence_change_p (loc: &XEXP (x, i)) || result; |
5003 | else if (fmt[i] == 'E') |
5004 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
5005 | result |
5006 | = loc_equivalence_change_p (loc: &XVECEXP (x, i, j)) || result; |
5007 | } |
5008 | return result; |
5009 | } |
5010 | |
5011 | /* Similar to loc_equivalence_change_p, but for use as |
5012 | simplify_replace_fn_rtx callback. DATA is insn for which the |
5013 | elimination is done. If it null we don't do the elimination. */ |
5014 | static rtx |
5015 | loc_equivalence_callback (rtx loc, const_rtx, void *data) |
5016 | { |
5017 | if (!REG_P (loc)) |
5018 | return NULL_RTX; |
5019 | |
5020 | rtx subst = (data == NULL |
5021 | ? get_equiv (x: loc) : get_equiv_with_elimination (x: loc, insn: (rtx_insn *) data)); |
5022 | if (subst != loc) |
5023 | return subst; |
5024 | |
5025 | return NULL_RTX; |
5026 | } |
5027 | |
5028 | /* Maximum number of generated reload insns per an insn. It is for |
5029 | preventing this pass cycling in a bug case. */ |
5030 | #define MAX_RELOAD_INSNS_NUMBER LRA_MAX_INSN_RELOADS |
5031 | |
5032 | /* The current iteration number of this LRA pass. */ |
5033 | int lra_constraint_iter; |
5034 | |
5035 | /* True if we should during assignment sub-pass check assignment |
5036 | correctness for all pseudos and spill some of them to correct |
5037 | conflicts. It can be necessary when we substitute equiv which |
5038 | needs checking register allocation correctness because the |
5039 | equivalent value contains allocatable hard registers, or when we |
5040 | restore multi-register pseudo, or when we change the insn code and |
5041 | its operand became INOUT operand when it was IN one before. */ |
5042 | bool check_and_force_assignment_correctness_p; |
5043 | |
5044 | /* Return true if REGNO is referenced in more than one block. */ |
5045 | static bool |
5046 | multi_block_pseudo_p (int regno) |
5047 | { |
5048 | basic_block bb = NULL; |
5049 | unsigned int uid; |
5050 | bitmap_iterator bi; |
5051 | |
5052 | if (regno < FIRST_PSEUDO_REGISTER) |
5053 | return false; |
5054 | |
5055 | EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi) |
5056 | if (bb == NULL) |
5057 | bb = BLOCK_FOR_INSN (insn: lra_insn_recog_data[uid]->insn); |
5058 | else if (BLOCK_FOR_INSN (insn: lra_insn_recog_data[uid]->insn) != bb) |
5059 | return true; |
5060 | return false; |
5061 | } |
5062 | |
5063 | /* Return true if LIST contains a deleted insn. */ |
5064 | static bool |
5065 | contains_deleted_insn_p (rtx_insn_list *list) |
5066 | { |
5067 | for (; list != NULL_RTX; list = list->next ()) |
5068 | if (NOTE_P (list->insn ()) |
5069 | && NOTE_KIND (list->insn ()) == NOTE_INSN_DELETED) |
5070 | return true; |
5071 | return false; |
5072 | } |
5073 | |
5074 | /* Return true if X contains a pseudo dying in INSN. */ |
5075 | static bool |
5076 | dead_pseudo_p (rtx x, rtx_insn *insn) |
5077 | { |
5078 | int i, j; |
5079 | const char *fmt; |
5080 | enum rtx_code code; |
5081 | |
5082 | if (REG_P (x)) |
5083 | return (insn != NULL_RTX |
5084 | && find_regno_note (insn, REG_DEAD, REGNO (x)) != NULL_RTX); |
5085 | code = GET_CODE (x); |
5086 | fmt = GET_RTX_FORMAT (code); |
5087 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
5088 | { |
5089 | if (fmt[i] == 'e') |
5090 | { |
5091 | if (dead_pseudo_p (XEXP (x, i), insn)) |
5092 | return true; |
5093 | } |
5094 | else if (fmt[i] == 'E') |
5095 | { |
5096 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
5097 | if (dead_pseudo_p (XVECEXP (x, i, j), insn)) |
5098 | return true; |
5099 | } |
5100 | } |
5101 | return false; |
5102 | } |
5103 | |
5104 | /* Return true if INSN contains a dying pseudo in INSN right hand |
5105 | side. */ |
5106 | static bool |
5107 | insn_rhs_dead_pseudo_p (rtx_insn *insn) |
5108 | { |
5109 | rtx set = single_set (insn); |
5110 | |
5111 | gcc_assert (set != NULL); |
5112 | return dead_pseudo_p (SET_SRC (set), insn); |
5113 | } |
5114 | |
5115 | /* Return true if any init insn of REGNO contains a dying pseudo in |
5116 | insn right hand side. */ |
5117 | static bool |
5118 | init_insn_rhs_dead_pseudo_p (int regno) |
5119 | { |
5120 | rtx_insn_list *insns = ira_reg_equiv[regno].init_insns; |
5121 | |
5122 | if (insns == NULL) |
5123 | return false; |
5124 | for (; insns != NULL_RTX; insns = insns->next ()) |
5125 | if (insn_rhs_dead_pseudo_p (insn: insns->insn ())) |
5126 | return true; |
5127 | return false; |
5128 | } |
5129 | |
5130 | /* Return TRUE if REGNO has a reverse equivalence. The equivalence is |
5131 | reverse only if we have one init insn with given REGNO as a |
5132 | source. */ |
5133 | static bool |
5134 | reverse_equiv_p (int regno) |
5135 | { |
5136 | rtx_insn_list *insns = ira_reg_equiv[regno].init_insns; |
5137 | rtx set; |
5138 | |
5139 | if (insns == NULL) |
5140 | return false; |
5141 | if (! INSN_P (insns->insn ()) |
5142 | || insns->next () != NULL) |
5143 | return false; |
5144 | if ((set = single_set (insn: insns->insn ())) == NULL_RTX) |
5145 | return false; |
5146 | return REG_P (SET_SRC (set)) && (int) REGNO (SET_SRC (set)) == regno; |
5147 | } |
5148 | |
5149 | /* Return TRUE if REGNO was reloaded in an equivalence init insn. We |
5150 | call this function only for non-reverse equivalence. */ |
5151 | static bool |
5152 | contains_reloaded_insn_p (int regno) |
5153 | { |
5154 | rtx set; |
5155 | rtx_insn_list *list = ira_reg_equiv[regno].init_insns; |
5156 | |
5157 | for (; list != NULL; list = list->next ()) |
5158 | if ((set = single_set (insn: list->insn ())) == NULL_RTX |
5159 | || ! REG_P (SET_DEST (set)) |
5160 | || (int) REGNO (SET_DEST (set)) != regno) |
5161 | return true; |
5162 | return false; |
5163 | } |
5164 | |
5165 | /* Try combine secondary memory reload insn FROM for insn TO into TO insn. |
5166 | FROM should be a load insn (usually a secondary memory reload insn). Return |
5167 | TRUE in case of success. */ |
5168 | static bool |
5169 | combine_reload_insn (rtx_insn *from, rtx_insn *to) |
5170 | { |
5171 | bool ok_p; |
5172 | rtx_insn *saved_insn; |
5173 | rtx set, from_reg, to_reg, op; |
5174 | enum reg_class to_class, from_class; |
5175 | int n, nop; |
5176 | signed char changed_nops[MAX_RECOG_OPERANDS + 1]; |
5177 | |
5178 | /* Check conditions for second memory reload and original insn: */ |
5179 | if ((targetm.secondary_memory_needed |
5180 | == hook_bool_mode_reg_class_t_reg_class_t_false) |
5181 | || NEXT_INSN (insn: from) != to |
5182 | || !NONDEBUG_INSN_P (to) |
5183 | || CALL_P (to)) |
5184 | return false; |
5185 | |
5186 | lra_insn_recog_data_t id = lra_get_insn_recog_data (insn: to); |
5187 | struct lra_static_insn_data *static_id = id->insn_static_data; |
5188 | |
5189 | if (id->used_insn_alternative == LRA_UNKNOWN_ALT |
5190 | || (set = single_set (insn: from)) == NULL_RTX) |
5191 | return false; |
5192 | from_reg = SET_DEST (set); |
5193 | to_reg = SET_SRC (set); |
5194 | /* Ignore optional reloads: */ |
5195 | if (! REG_P (from_reg) || ! REG_P (to_reg) |
5196 | || bitmap_bit_p (&lra_optional_reload_pseudos, REGNO (from_reg))) |
5197 | return false; |
5198 | to_class = lra_get_allocno_class (REGNO (to_reg)); |
5199 | from_class = lra_get_allocno_class (REGNO (from_reg)); |
5200 | /* Check that reload insn is a load: */ |
5201 | if (to_class != NO_REGS || from_class == NO_REGS) |
5202 | return false; |
5203 | for (n = nop = 0; nop < static_id->n_operands; nop++) |
5204 | { |
5205 | if (static_id->operand[nop].type != OP_IN) |
5206 | continue; |
5207 | op = *id->operand_loc[nop]; |
5208 | if (!REG_P (op) || REGNO (op) != REGNO (from_reg)) |
5209 | continue; |
5210 | *id->operand_loc[nop] = to_reg; |
5211 | changed_nops[n++] = nop; |
5212 | } |
5213 | changed_nops[n] = -1; |
5214 | lra_update_dups (id, changed_nops); |
5215 | lra_update_insn_regno_info (to); |
5216 | ok_p = recog_memoized (insn: to) >= 0; |
5217 | if (ok_p) |
5218 | { |
5219 | /* Check that combined insn does not need any reloads: */ |
5220 | saved_insn = curr_insn; |
5221 | curr_insn = to; |
5222 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
5223 | curr_static_id = curr_id->insn_static_data; |
5224 | for (bool swapped_p = false;;) |
5225 | { |
5226 | ok_p = !curr_insn_transform (check_only_p: true); |
5227 | if (ok_p || curr_static_id->commutative < 0) |
5228 | break; |
5229 | swap_operands (nop: curr_static_id->commutative); |
5230 | if (lra_dump_file != NULL) |
5231 | { |
5232 | fprintf (stream: lra_dump_file, |
5233 | format: " Swapping %scombined insn operands:\n" , |
5234 | swapped_p ? "back " : "" ); |
5235 | dump_insn_slim (lra_dump_file, to); |
5236 | } |
5237 | if (swapped_p) |
5238 | break; |
5239 | swapped_p = true; |
5240 | } |
5241 | curr_insn = saved_insn; |
5242 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
5243 | curr_static_id = curr_id->insn_static_data; |
5244 | } |
5245 | if (ok_p) |
5246 | { |
5247 | id->used_insn_alternative = -1; |
5248 | lra_push_insn_and_update_insn_regno_info (to); |
5249 | if (lra_dump_file != NULL) |
5250 | { |
5251 | fprintf (stream: lra_dump_file, format: " Use combined insn:\n" ); |
5252 | dump_insn_slim (lra_dump_file, to); |
5253 | } |
5254 | return true; |
5255 | } |
5256 | if (lra_dump_file != NULL) |
5257 | { |
5258 | fprintf (stream: lra_dump_file, format: " Failed combined insn:\n" ); |
5259 | dump_insn_slim (lra_dump_file, to); |
5260 | } |
5261 | for (int i = 0; i < n; i++) |
5262 | { |
5263 | nop = changed_nops[i]; |
5264 | *id->operand_loc[nop] = from_reg; |
5265 | } |
5266 | lra_update_dups (id, changed_nops); |
5267 | lra_update_insn_regno_info (to); |
5268 | if (lra_dump_file != NULL) |
5269 | { |
5270 | fprintf (stream: lra_dump_file, format: " Restoring insn after failed combining:\n" ); |
5271 | dump_insn_slim (lra_dump_file, to); |
5272 | } |
5273 | return false; |
5274 | } |
5275 | |
5276 | /* Entry function of LRA constraint pass. Return true if the |
5277 | constraint pass did change the code. */ |
5278 | bool |
5279 | lra_constraints (bool first_p) |
5280 | { |
5281 | bool changed_p; |
5282 | int i, hard_regno, new_insns_num; |
5283 | unsigned int min_len, new_min_len, uid; |
5284 | rtx set, x, reg, dest_reg; |
5285 | rtx_insn *original_insn; |
5286 | basic_block last_bb; |
5287 | bitmap_iterator bi; |
5288 | |
5289 | lra_constraint_iter++; |
5290 | if (lra_dump_file != NULL) |
5291 | fprintf (stream: lra_dump_file, format: "\n********** Local #%d: **********\n\n" , |
5292 | lra_constraint_iter); |
5293 | changed_p = false; |
5294 | if (pic_offset_table_rtx |
5295 | && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER) |
5296 | check_and_force_assignment_correctness_p = true; |
5297 | else if (first_p) |
5298 | /* On the first iteration we should check IRA assignment |
5299 | correctness. In rare cases, the assignments can be wrong as |
5300 | early clobbers operands are ignored in IRA or usages of |
5301 | paradoxical sub-registers are not taken into account by |
5302 | IRA. */ |
5303 | check_and_force_assignment_correctness_p = true; |
5304 | new_insn_uid_start = get_max_uid (); |
5305 | new_regno_start = first_p ? lra_constraint_new_regno_start : max_reg_num (); |
5306 | /* Mark used hard regs for target stack size calulations. */ |
5307 | for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++) |
5308 | if (lra_reg_info[i].nrefs != 0 |
5309 | && (hard_regno = lra_get_regno_hard_regno (regno: i)) >= 0) |
5310 | { |
5311 | int j, nregs; |
5312 | |
5313 | nregs = hard_regno_nregs (regno: hard_regno, mode: lra_reg_info[i].biggest_mode); |
5314 | for (j = 0; j < nregs; j++) |
5315 | df_set_regs_ever_live (hard_regno + j, true); |
5316 | } |
5317 | /* Do elimination before the equivalence processing as we can spill |
5318 | some pseudos during elimination. */ |
5319 | lra_eliminate (false, first_p); |
5320 | auto_bitmap equiv_insn_bitmap (®_obstack); |
5321 | for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++) |
5322 | if (lra_reg_info[i].nrefs != 0) |
5323 | { |
5324 | ira_reg_equiv[i].profitable_p = true; |
5325 | reg = regno_reg_rtx[i]; |
5326 | if (lra_get_regno_hard_regno (regno: i) < 0 && (x = get_equiv (x: reg)) != reg) |
5327 | { |
5328 | bool pseudo_p = contains_reg_p (x, hard_reg_p: false, spilled_p: false); |
5329 | |
5330 | /* After RTL transformation, we cannot guarantee that |
5331 | pseudo in the substitution was not reloaded which might |
5332 | make equivalence invalid. For example, in reverse |
5333 | equiv of p0 |
5334 | |
5335 | p0 <- ... |
5336 | ... |
5337 | equiv_mem <- p0 |
5338 | |
5339 | the memory address register was reloaded before the 2nd |
5340 | insn. */ |
5341 | if ((! first_p && pseudo_p) |
5342 | /* We don't use DF for compilation speed sake. So it |
5343 | is problematic to update live info when we use an |
5344 | equivalence containing pseudos in more than one |
5345 | BB. */ |
5346 | || (pseudo_p && multi_block_pseudo_p (regno: i)) |
5347 | /* If an init insn was deleted for some reason, cancel |
5348 | the equiv. We could update the equiv insns after |
5349 | transformations including an equiv insn deletion |
5350 | but it is not worthy as such cases are extremely |
5351 | rare. */ |
5352 | || contains_deleted_insn_p (list: ira_reg_equiv[i].init_insns) |
5353 | /* If it is not a reverse equivalence, we check that a |
5354 | pseudo in rhs of the init insn is not dying in the |
5355 | insn. Otherwise, the live info at the beginning of |
5356 | the corresponding BB might be wrong after we |
5357 | removed the insn. When the equiv can be a |
5358 | constant, the right hand side of the init insn can |
5359 | be a pseudo. */ |
5360 | || (! reverse_equiv_p (regno: i) |
5361 | && (init_insn_rhs_dead_pseudo_p (regno: i) |
5362 | /* If we reloaded the pseudo in an equivalence |
5363 | init insn, we cannot remove the equiv init |
5364 | insns and the init insns might write into |
5365 | const memory in this case. */ |
5366 | || contains_reloaded_insn_p (regno: i))) |
5367 | /* Prevent access beyond equivalent memory for |
5368 | paradoxical subregs. */ |
5369 | || (MEM_P (x) |
5370 | && maybe_gt (GET_MODE_SIZE (lra_reg_info[i].biggest_mode), |
5371 | GET_MODE_SIZE (GET_MODE (x)))) |
5372 | || (pic_offset_table_rtx |
5373 | && ((CONST_POOL_OK_P (PSEUDO_REGNO_MODE (i), x) |
5374 | && (targetm.preferred_reload_class |
5375 | (x, lra_get_allocno_class (regno: i)) == NO_REGS)) |
5376 | || contains_symbol_ref_p (x)))) |
5377 | ira_reg_equiv[i].defined_p |
5378 | = ira_reg_equiv[i].caller_save_p = false; |
5379 | if (contains_reg_p (x, hard_reg_p: false, spilled_p: true)) |
5380 | ira_reg_equiv[i].profitable_p = false; |
5381 | if (get_equiv (x: reg) != reg) |
5382 | bitmap_ior_into (equiv_insn_bitmap, &lra_reg_info[i].insn_bitmap); |
5383 | } |
5384 | } |
5385 | for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++) |
5386 | update_equiv (regno: i); |
5387 | /* We should add all insns containing pseudos which should be |
5388 | substituted by their equivalences. */ |
5389 | EXECUTE_IF_SET_IN_BITMAP (equiv_insn_bitmap, 0, uid, bi) |
5390 | lra_push_insn_by_uid (uid); |
5391 | min_len = lra_insn_stack_length (); |
5392 | new_insns_num = 0; |
5393 | last_bb = NULL; |
5394 | changed_p = false; |
5395 | original_insn = NULL; |
5396 | while ((new_min_len = lra_insn_stack_length ()) != 0) |
5397 | { |
5398 | curr_insn = lra_pop_insn (); |
5399 | --new_min_len; |
5400 | curr_bb = BLOCK_FOR_INSN (insn: curr_insn); |
5401 | if (curr_bb != last_bb) |
5402 | { |
5403 | last_bb = curr_bb; |
5404 | bb_reload_num = lra_curr_reload_num; |
5405 | } |
5406 | if (min_len > new_min_len) |
5407 | { |
5408 | min_len = new_min_len; |
5409 | new_insns_num = 0; |
5410 | original_insn = curr_insn; |
5411 | } |
5412 | else if (combine_reload_insn (from: curr_insn, to: original_insn)) |
5413 | { |
5414 | continue; |
5415 | } |
5416 | if (new_insns_num > MAX_RELOAD_INSNS_NUMBER) |
5417 | internal_error |
5418 | ("maximum number of generated reload insns per insn achieved (%d)" , |
5419 | MAX_RELOAD_INSNS_NUMBER); |
5420 | new_insns_num++; |
5421 | if (DEBUG_INSN_P (curr_insn)) |
5422 | { |
5423 | /* We need to check equivalence in debug insn and change |
5424 | pseudo to the equivalent value if necessary. */ |
5425 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
5426 | if (bitmap_bit_p (equiv_insn_bitmap, INSN_UID (insn: curr_insn))) |
5427 | { |
5428 | rtx old = *curr_id->operand_loc[0]; |
5429 | *curr_id->operand_loc[0] |
5430 | = simplify_replace_fn_rtx (old, NULL_RTX, |
5431 | fn: loc_equivalence_callback, curr_insn); |
5432 | if (old != *curr_id->operand_loc[0]) |
5433 | { |
5434 | /* If we substitute pseudo by shared equivalence, we can fail |
5435 | to update LRA reg info and this can result in many |
5436 | unexpected consequences. So keep rtl unshared: */ |
5437 | *curr_id->operand_loc[0] |
5438 | = copy_rtx (*curr_id->operand_loc[0]); |
5439 | lra_update_insn_regno_info (curr_insn); |
5440 | changed_p = true; |
5441 | } |
5442 | } |
5443 | } |
5444 | else if (INSN_P (curr_insn)) |
5445 | { |
5446 | if ((set = single_set (insn: curr_insn)) != NULL_RTX) |
5447 | { |
5448 | dest_reg = SET_DEST (set); |
5449 | /* The equivalence pseudo could be set up as SUBREG in a |
5450 | case when it is a call restore insn in a mode |
5451 | different from the pseudo mode. */ |
5452 | if (GET_CODE (dest_reg) == SUBREG) |
5453 | dest_reg = SUBREG_REG (dest_reg); |
5454 | if ((REG_P (dest_reg) |
5455 | && (x = get_equiv (x: dest_reg)) != dest_reg |
5456 | /* Remove insns which set up a pseudo whose value |
5457 | cannot be changed. Such insns might be not in |
5458 | init_insns because we don't update equiv data |
5459 | during insn transformations. |
5460 | |
5461 | As an example, let suppose that a pseudo got |
5462 | hard register and on the 1st pass was not |
5463 | changed to equivalent constant. We generate an |
5464 | additional insn setting up the pseudo because of |
5465 | secondary memory movement. Then the pseudo is |
5466 | spilled and we use the equiv constant. In this |
5467 | case we should remove the additional insn and |
5468 | this insn is not init_insns list. */ |
5469 | && (! MEM_P (x) || MEM_READONLY_P (x) |
5470 | /* Check that this is actually an insn setting |
5471 | up the equivalence. */ |
5472 | || in_list_p (x: curr_insn, |
5473 | list: ira_reg_equiv |
5474 | [REGNO (dest_reg)].init_insns))) |
5475 | || (((x = get_equiv (SET_SRC (set))) != SET_SRC (set)) |
5476 | && in_list_p (x: curr_insn, |
5477 | list: ira_reg_equiv |
5478 | [REGNO (SET_SRC (set))].init_insns))) |
5479 | { |
5480 | /* This is equiv init insn of pseudo which did not get a |
5481 | hard register -- remove the insn. */ |
5482 | if (lra_dump_file != NULL) |
5483 | { |
5484 | fprintf (stream: lra_dump_file, |
5485 | format: " Removing equiv init insn %i (freq=%d)\n" , |
5486 | INSN_UID (insn: curr_insn), |
5487 | REG_FREQ_FROM_BB (BLOCK_FOR_INSN (curr_insn))); |
5488 | dump_insn_slim (lra_dump_file, curr_insn); |
5489 | } |
5490 | if (contains_reg_p (x, hard_reg_p: true, spilled_p: false)) |
5491 | check_and_force_assignment_correctness_p = true; |
5492 | lra_set_insn_deleted (curr_insn); |
5493 | continue; |
5494 | } |
5495 | } |
5496 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
5497 | curr_static_id = curr_id->insn_static_data; |
5498 | init_curr_insn_input_reloads (); |
5499 | init_curr_operand_mode (); |
5500 | if (curr_insn_transform (check_only_p: false)) |
5501 | changed_p = true; |
5502 | /* Check non-transformed insns too for equiv change as USE |
5503 | or CLOBBER don't need reloads but can contain pseudos |
5504 | being changed on their equivalences. */ |
5505 | else if (bitmap_bit_p (equiv_insn_bitmap, INSN_UID (insn: curr_insn)) |
5506 | && loc_equivalence_change_p (loc: &PATTERN (insn: curr_insn))) |
5507 | { |
5508 | lra_update_insn_regno_info (curr_insn); |
5509 | changed_p = true; |
5510 | } |
5511 | } |
5512 | } |
5513 | |
5514 | /* If we used a new hard regno, changed_p should be true because the |
5515 | hard reg is assigned to a new pseudo. */ |
5516 | if (flag_checking && !changed_p) |
5517 | { |
5518 | for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++) |
5519 | if (lra_reg_info[i].nrefs != 0 |
5520 | && (hard_regno = lra_get_regno_hard_regno (regno: i)) >= 0) |
5521 | { |
5522 | int j, nregs = hard_regno_nregs (regno: hard_regno, |
5523 | PSEUDO_REGNO_MODE (i)); |
5524 | |
5525 | for (j = 0; j < nregs; j++) |
5526 | lra_assert (df_regs_ever_live_p (hard_regno + j)); |
5527 | } |
5528 | } |
5529 | return changed_p; |
5530 | } |
5531 | |
5532 | static void initiate_invariants (void); |
5533 | static void finish_invariants (void); |
5534 | |
5535 | /* Initiate the LRA constraint pass. It is done once per |
5536 | function. */ |
5537 | void |
5538 | lra_constraints_init (void) |
5539 | { |
5540 | initiate_invariants (); |
5541 | } |
5542 | |
5543 | /* Finalize the LRA constraint pass. It is done once per |
5544 | function. */ |
5545 | void |
5546 | lra_constraints_finish (void) |
5547 | { |
5548 | finish_invariants (); |
5549 | } |
5550 | |
5551 | |
5552 | |
5553 | /* Structure describes invariants for ineheritance. */ |
5554 | struct lra_invariant |
5555 | { |
5556 | /* The order number of the invariant. */ |
5557 | int num; |
5558 | /* The invariant RTX. */ |
5559 | rtx invariant_rtx; |
5560 | /* The origin insn of the invariant. */ |
5561 | rtx_insn *insn; |
5562 | }; |
5563 | |
5564 | typedef lra_invariant invariant_t; |
5565 | typedef invariant_t *invariant_ptr_t; |
5566 | typedef const invariant_t *const_invariant_ptr_t; |
5567 | |
5568 | /* Pointer to the inheritance invariants. */ |
5569 | static vec<invariant_ptr_t> invariants; |
5570 | |
5571 | /* Allocation pool for the invariants. */ |
5572 | static object_allocator<lra_invariant> *invariants_pool; |
5573 | |
5574 | /* Hash table for the invariants. */ |
5575 | static htab_t invariant_table; |
5576 | |
5577 | /* Hash function for INVARIANT. */ |
5578 | static hashval_t |
5579 | invariant_hash (const void *invariant) |
5580 | { |
5581 | rtx inv = ((const_invariant_ptr_t) invariant)->invariant_rtx; |
5582 | return lra_rtx_hash (x: inv); |
5583 | } |
5584 | |
5585 | /* Equal function for invariants INVARIANT1 and INVARIANT2. */ |
5586 | static int |
5587 | invariant_eq_p (const void *invariant1, const void *invariant2) |
5588 | { |
5589 | rtx inv1 = ((const_invariant_ptr_t) invariant1)->invariant_rtx; |
5590 | rtx inv2 = ((const_invariant_ptr_t) invariant2)->invariant_rtx; |
5591 | |
5592 | return rtx_equal_p (inv1, inv2); |
5593 | } |
5594 | |
5595 | /* Insert INVARIANT_RTX into the table if it is not there yet. Return |
5596 | invariant which is in the table. */ |
5597 | static invariant_ptr_t |
5598 | insert_invariant (rtx invariant_rtx) |
5599 | { |
5600 | void **entry_ptr; |
5601 | invariant_t invariant; |
5602 | invariant_ptr_t invariant_ptr; |
5603 | |
5604 | invariant.invariant_rtx = invariant_rtx; |
5605 | entry_ptr = htab_find_slot (invariant_table, &invariant, INSERT); |
5606 | if (*entry_ptr == NULL) |
5607 | { |
5608 | invariant_ptr = invariants_pool->allocate (); |
5609 | invariant_ptr->invariant_rtx = invariant_rtx; |
5610 | invariant_ptr->insn = NULL; |
5611 | invariants.safe_push (obj: invariant_ptr); |
5612 | *entry_ptr = (void *) invariant_ptr; |
5613 | } |
5614 | return (invariant_ptr_t) *entry_ptr; |
5615 | } |
5616 | |
5617 | /* Initiate the invariant table. */ |
5618 | static void |
5619 | initiate_invariants (void) |
5620 | { |
5621 | invariants.create (nelems: 100); |
5622 | invariants_pool |
5623 | = new object_allocator<lra_invariant> ("Inheritance invariants" ); |
5624 | invariant_table = htab_create (100, invariant_hash, invariant_eq_p, NULL); |
5625 | } |
5626 | |
5627 | /* Finish the invariant table. */ |
5628 | static void |
5629 | finish_invariants (void) |
5630 | { |
5631 | htab_delete (invariant_table); |
5632 | delete invariants_pool; |
5633 | invariants.release (); |
5634 | } |
5635 | |
5636 | /* Make the invariant table empty. */ |
5637 | static void |
5638 | clear_invariants (void) |
5639 | { |
5640 | htab_empty (invariant_table); |
5641 | invariants_pool->release (); |
5642 | invariants.truncate (size: 0); |
5643 | } |
5644 | |
5645 | |
5646 | |
5647 | /* This page contains code to do inheritance/split |
5648 | transformations. */ |
5649 | |
5650 | /* Number of reloads passed so far in current EBB. */ |
5651 | static int reloads_num; |
5652 | |
5653 | /* Number of calls passed so far in current EBB. */ |
5654 | static int calls_num; |
5655 | |
5656 | /* Index ID is the CALLS_NUM associated the last call we saw with |
5657 | ABI identifier ID. */ |
5658 | static int last_call_for_abi[NUM_ABI_IDS]; |
5659 | |
5660 | /* Which registers have been fully or partially clobbered by a call |
5661 | since they were last used. */ |
5662 | static HARD_REG_SET full_and_partial_call_clobbers; |
5663 | |
5664 | /* Current reload pseudo check for validity of elements in |
5665 | USAGE_INSNS. */ |
5666 | static int curr_usage_insns_check; |
5667 | |
5668 | /* Info about last usage of registers in EBB to do inheritance/split |
5669 | transformation. Inheritance transformation is done from a spilled |
5670 | pseudo and split transformations from a hard register or a pseudo |
5671 | assigned to a hard register. */ |
5672 | struct usage_insns |
5673 | { |
5674 | /* If the value is equal to CURR_USAGE_INSNS_CHECK, then the member |
5675 | value INSNS is valid. The insns is chain of optional debug insns |
5676 | and a finishing non-debug insn using the corresponding reg. The |
5677 | value is also used to mark the registers which are set up in the |
5678 | current insn. The negated insn uid is used for this. */ |
5679 | int check; |
5680 | /* Value of global reloads_num at the last insn in INSNS. */ |
5681 | int reloads_num; |
5682 | /* Value of global reloads_nums at the last insn in INSNS. */ |
5683 | int calls_num; |
5684 | /* It can be true only for splitting. And it means that the restore |
5685 | insn should be put after insn given by the following member. */ |
5686 | bool after_p; |
5687 | /* Next insns in the current EBB which use the original reg and the |
5688 | original reg value is not changed between the current insn and |
5689 | the next insns. In order words, e.g. for inheritance, if we need |
5690 | to use the original reg value again in the next insns we can try |
5691 | to use the value in a hard register from a reload insn of the |
5692 | current insn. */ |
5693 | rtx insns; |
5694 | }; |
5695 | |
5696 | /* Map: regno -> corresponding pseudo usage insns. */ |
5697 | static struct usage_insns *usage_insns; |
5698 | |
5699 | static void |
5700 | setup_next_usage_insn (int regno, rtx insn, int reloads_num, bool after_p) |
5701 | { |
5702 | usage_insns[regno].check = curr_usage_insns_check; |
5703 | usage_insns[regno].insns = insn; |
5704 | usage_insns[regno].reloads_num = reloads_num; |
5705 | usage_insns[regno].calls_num = calls_num; |
5706 | usage_insns[regno].after_p = after_p; |
5707 | if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0) |
5708 | remove_from_hard_reg_set (regs: &full_and_partial_call_clobbers, |
5709 | PSEUDO_REGNO_MODE (regno), |
5710 | regno: reg_renumber[regno]); |
5711 | } |
5712 | |
5713 | /* The function is used to form list REGNO usages which consists of |
5714 | optional debug insns finished by a non-debug insn using REGNO. |
5715 | RELOADS_NUM is current number of reload insns processed so far. */ |
5716 | static void |
5717 | add_next_usage_insn (int regno, rtx_insn *insn, int reloads_num) |
5718 | { |
5719 | rtx next_usage_insns; |
5720 | |
5721 | if (usage_insns[regno].check == curr_usage_insns_check |
5722 | && (next_usage_insns = usage_insns[regno].insns) != NULL_RTX |
5723 | && DEBUG_INSN_P (insn)) |
5724 | { |
5725 | /* Check that we did not add the debug insn yet. */ |
5726 | if (next_usage_insns != insn |
5727 | && (GET_CODE (next_usage_insns) != INSN_LIST |
5728 | || XEXP (next_usage_insns, 0) != insn)) |
5729 | usage_insns[regno].insns = gen_rtx_INSN_LIST (VOIDmode, insn, |
5730 | next_usage_insns); |
5731 | } |
5732 | else if (NONDEBUG_INSN_P (insn)) |
5733 | setup_next_usage_insn (regno, insn, reloads_num, after_p: false); |
5734 | else |
5735 | usage_insns[regno].check = 0; |
5736 | } |
5737 | |
5738 | /* Return first non-debug insn in list USAGE_INSNS. */ |
5739 | static rtx_insn * |
5740 | skip_usage_debug_insns (rtx usage_insns) |
5741 | { |
5742 | rtx insn; |
5743 | |
5744 | /* Skip debug insns. */ |
5745 | for (insn = usage_insns; |
5746 | insn != NULL_RTX && GET_CODE (insn) == INSN_LIST; |
5747 | insn = XEXP (insn, 1)) |
5748 | ; |
5749 | return safe_as_a <rtx_insn *> (p: insn); |
5750 | } |
5751 | |
5752 | /* Return true if we need secondary memory moves for insn in |
5753 | USAGE_INSNS after inserting inherited pseudo of class INHER_CL |
5754 | into the insn. */ |
5755 | static bool |
5756 | check_secondary_memory_needed_p (enum reg_class inher_cl ATTRIBUTE_UNUSED, |
5757 | rtx usage_insns ATTRIBUTE_UNUSED) |
5758 | { |
5759 | rtx_insn *insn; |
5760 | rtx set, dest; |
5761 | enum reg_class cl; |
5762 | |
5763 | if (inher_cl == ALL_REGS |
5764 | || (insn = skip_usage_debug_insns (usage_insns)) == NULL_RTX) |
5765 | return false; |
5766 | lra_assert (INSN_P (insn)); |
5767 | if ((set = single_set (insn)) == NULL_RTX || ! REG_P (SET_DEST (set))) |
5768 | return false; |
5769 | dest = SET_DEST (set); |
5770 | if (! REG_P (dest)) |
5771 | return false; |
5772 | lra_assert (inher_cl != NO_REGS); |
5773 | cl = get_reg_class (REGNO (dest)); |
5774 | return (cl != NO_REGS && cl != ALL_REGS |
5775 | && targetm.secondary_memory_needed (GET_MODE (dest), inher_cl, cl)); |
5776 | } |
5777 | |
5778 | /* Registers involved in inheritance/split in the current EBB |
5779 | (inheritance/split pseudos and original registers). */ |
5780 | static bitmap_head check_only_regs; |
5781 | |
5782 | /* Reload pseudos cannot be involded in invariant inheritance in the |
5783 | current EBB. */ |
5784 | static bitmap_head invalid_invariant_regs; |
5785 | |
5786 | /* Do inheritance transformations for insn INSN, which defines (if |
5787 | DEF_P) or uses ORIGINAL_REGNO. NEXT_USAGE_INSNS specifies which |
5788 | instruction in the EBB next uses ORIGINAL_REGNO; it has the same |
5789 | form as the "insns" field of usage_insns. Return true if we |
5790 | succeed in such transformation. |
5791 | |
5792 | The transformations look like: |
5793 | |
5794 | p <- ... i <- ... |
5795 | ... p <- i (new insn) |
5796 | ... => |
5797 | <- ... p ... <- ... i ... |
5798 | or |
5799 | ... i <- p (new insn) |
5800 | <- ... p ... <- ... i ... |
5801 | ... => |
5802 | <- ... p ... <- ... i ... |
5803 | where p is a spilled original pseudo and i is a new inheritance pseudo. |
5804 | |
5805 | |
5806 | The inheritance pseudo has the smallest class of two classes CL and |
5807 | class of ORIGINAL REGNO. */ |
5808 | static bool |
5809 | inherit_reload_reg (bool def_p, int original_regno, |
5810 | enum reg_class cl, rtx_insn *insn, rtx next_usage_insns) |
5811 | { |
5812 | if (optimize_function_for_size_p (cfun)) |
5813 | return false; |
5814 | |
5815 | enum reg_class rclass = lra_get_allocno_class (regno: original_regno); |
5816 | rtx original_reg = regno_reg_rtx[original_regno]; |
5817 | rtx new_reg, usage_insn; |
5818 | rtx_insn *new_insns; |
5819 | |
5820 | lra_assert (! usage_insns[original_regno].after_p); |
5821 | if (lra_dump_file != NULL) |
5822 | fprintf (stream: lra_dump_file, |
5823 | format: " <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n" ); |
5824 | if (! ira_reg_classes_intersect_p[cl][rclass]) |
5825 | { |
5826 | if (lra_dump_file != NULL) |
5827 | { |
5828 | fprintf (stream: lra_dump_file, |
5829 | format: " Rejecting inheritance for %d " |
5830 | "because of disjoint classes %s and %s\n" , |
5831 | original_regno, reg_class_names[cl], |
5832 | reg_class_names[rclass]); |
5833 | fprintf (stream: lra_dump_file, |
5834 | format: " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" ); |
5835 | } |
5836 | return false; |
5837 | } |
5838 | if ((ira_class_subset_p[cl][rclass] && cl != rclass) |
5839 | /* We don't use a subset of two classes because it can be |
5840 | NO_REGS. This transformation is still profitable in most |
5841 | cases even if the classes are not intersected as register |
5842 | move is probably cheaper than a memory load. */ |
5843 | || ira_class_hard_regs_num[cl] < ira_class_hard_regs_num[rclass]) |
5844 | { |
5845 | if (lra_dump_file != NULL) |
5846 | fprintf (stream: lra_dump_file, format: " Use smallest class of %s and %s\n" , |
5847 | reg_class_names[cl], reg_class_names[rclass]); |
5848 | |
5849 | rclass = cl; |
5850 | } |
5851 | if (check_secondary_memory_needed_p (inher_cl: rclass, usage_insns: next_usage_insns)) |
5852 | { |
5853 | /* Reject inheritance resulting in secondary memory moves. |
5854 | Otherwise, there is a danger in LRA cycling. Also such |
5855 | transformation will be unprofitable. */ |
5856 | if (lra_dump_file != NULL) |
5857 | { |
5858 | rtx_insn *insn = skip_usage_debug_insns (usage_insns: next_usage_insns); |
5859 | rtx set = single_set (insn); |
5860 | |
5861 | lra_assert (set != NULL_RTX); |
5862 | |
5863 | rtx dest = SET_DEST (set); |
5864 | |
5865 | lra_assert (REG_P (dest)); |
5866 | fprintf (stream: lra_dump_file, |
5867 | format: " Rejecting inheritance for insn %d(%s)<-%d(%s) " |
5868 | "as secondary mem is needed\n" , |
5869 | REGNO (dest), reg_class_names[get_reg_class (REGNO (dest))], |
5870 | original_regno, reg_class_names[rclass]); |
5871 | fprintf (stream: lra_dump_file, |
5872 | format: " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" ); |
5873 | } |
5874 | return false; |
5875 | } |
5876 | new_reg = lra_create_new_reg (GET_MODE (original_reg), original_reg, |
5877 | rclass, NULL, "inheritance" ); |
5878 | start_sequence (); |
5879 | if (def_p) |
5880 | lra_emit_move (original_reg, new_reg); |
5881 | else |
5882 | lra_emit_move (new_reg, original_reg); |
5883 | new_insns = get_insns (); |
5884 | end_sequence (); |
5885 | if (NEXT_INSN (insn: new_insns) != NULL_RTX) |
5886 | { |
5887 | if (lra_dump_file != NULL) |
5888 | { |
5889 | fprintf (stream: lra_dump_file, |
5890 | format: " Rejecting inheritance %d->%d " |
5891 | "as it results in 2 or more insns:\n" , |
5892 | original_regno, REGNO (new_reg)); |
5893 | dump_rtl_slim (lra_dump_file, new_insns, NULL, -1, 0); |
5894 | fprintf (stream: lra_dump_file, |
5895 | format: " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" ); |
5896 | } |
5897 | return false; |
5898 | } |
5899 | lra_substitute_pseudo_within_insn (insn, original_regno, new_reg, false); |
5900 | lra_update_insn_regno_info (insn); |
5901 | if (! def_p) |
5902 | /* We now have a new usage insn for original regno. */ |
5903 | setup_next_usage_insn (regno: original_regno, insn: new_insns, reloads_num, after_p: false); |
5904 | if (lra_dump_file != NULL) |
5905 | fprintf (stream: lra_dump_file, format: " Original reg change %d->%d (bb%d):\n" , |
5906 | original_regno, REGNO (new_reg), BLOCK_FOR_INSN (insn)->index); |
5907 | lra_reg_info[REGNO (new_reg)].restore_rtx = regno_reg_rtx[original_regno]; |
5908 | bitmap_set_bit (&check_only_regs, REGNO (new_reg)); |
5909 | bitmap_set_bit (&check_only_regs, original_regno); |
5910 | bitmap_set_bit (&lra_inheritance_pseudos, REGNO (new_reg)); |
5911 | if (def_p) |
5912 | lra_process_new_insns (insn, NULL, new_insns, |
5913 | "Add original<-inheritance" ); |
5914 | else |
5915 | lra_process_new_insns (insn, new_insns, NULL, |
5916 | "Add inheritance<-original" ); |
5917 | while (next_usage_insns != NULL_RTX) |
5918 | { |
5919 | if (GET_CODE (next_usage_insns) != INSN_LIST) |
5920 | { |
5921 | usage_insn = next_usage_insns; |
5922 | lra_assert (NONDEBUG_INSN_P (usage_insn)); |
5923 | next_usage_insns = NULL; |
5924 | } |
5925 | else |
5926 | { |
5927 | usage_insn = XEXP (next_usage_insns, 0); |
5928 | lra_assert (DEBUG_INSN_P (usage_insn)); |
5929 | next_usage_insns = XEXP (next_usage_insns, 1); |
5930 | } |
5931 | lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false, |
5932 | DEBUG_INSN_P (usage_insn)); |
5933 | lra_update_insn_regno_info (as_a <rtx_insn *> (p: usage_insn)); |
5934 | if (lra_dump_file != NULL) |
5935 | { |
5936 | basic_block bb = BLOCK_FOR_INSN (insn: usage_insn); |
5937 | fprintf (stream: lra_dump_file, |
5938 | format: " Inheritance reuse change %d->%d (bb%d):\n" , |
5939 | original_regno, REGNO (new_reg), |
5940 | bb ? bb->index : -1); |
5941 | dump_insn_slim (lra_dump_file, as_a <rtx_insn *> (p: usage_insn)); |
5942 | } |
5943 | } |
5944 | if (lra_dump_file != NULL) |
5945 | fprintf (stream: lra_dump_file, |
5946 | format: " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" ); |
5947 | return true; |
5948 | } |
5949 | |
5950 | /* Return true if we need a caller save/restore for pseudo REGNO which |
5951 | was assigned to a hard register. */ |
5952 | static inline bool |
5953 | need_for_call_save_p (int regno) |
5954 | { |
5955 | lra_assert (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0); |
5956 | if (usage_insns[regno].calls_num < calls_num) |
5957 | { |
5958 | unsigned int abis = 0; |
5959 | for (unsigned int i = 0; i < NUM_ABI_IDS; ++i) |
5960 | if (last_call_for_abi[i] > usage_insns[regno].calls_num) |
5961 | abis |= 1 << i; |
5962 | gcc_assert (abis); |
5963 | if (call_clobbered_in_region_p (abis, mask: full_and_partial_call_clobbers, |
5964 | PSEUDO_REGNO_MODE (regno), |
5965 | regno: reg_renumber[regno])) |
5966 | return true; |
5967 | } |
5968 | return false; |
5969 | } |
5970 | |
5971 | /* Global registers occurring in the current EBB. */ |
5972 | static bitmap_head ebb_global_regs; |
5973 | |
5974 | /* Return true if we need a split for hard register REGNO or pseudo |
5975 | REGNO which was assigned to a hard register. |
5976 | POTENTIAL_RELOAD_HARD_REGS contains hard registers which might be |
5977 | used for reloads since the EBB end. It is an approximation of the |
5978 | used hard registers in the split range. The exact value would |
5979 | require expensive calculations. If we were aggressive with |
5980 | splitting because of the approximation, the split pseudo will save |
5981 | the same hard register assignment and will be removed in the undo |
5982 | pass. We still need the approximation because too aggressive |
5983 | splitting would result in too inaccurate cost calculation in the |
5984 | assignment pass because of too many generated moves which will be |
5985 | probably removed in the undo pass. */ |
5986 | static inline bool |
5987 | need_for_split_p (HARD_REG_SET potential_reload_hard_regs, int regno) |
5988 | { |
5989 | int hard_regno = regno < FIRST_PSEUDO_REGISTER ? regno : reg_renumber[regno]; |
5990 | |
5991 | lra_assert (hard_regno >= 0); |
5992 | return ((TEST_HARD_REG_BIT (set: potential_reload_hard_regs, bit: hard_regno) |
5993 | /* Don't split eliminable hard registers, otherwise we can |
5994 | split hard registers like hard frame pointer, which |
5995 | lives on BB start/end according to DF-infrastructure, |
5996 | when there is a pseudo assigned to the register and |
5997 | living in the same BB. */ |
5998 | && (regno >= FIRST_PSEUDO_REGISTER |
5999 | || ! TEST_HARD_REG_BIT (set: eliminable_regset, bit: hard_regno)) |
6000 | && ! TEST_HARD_REG_BIT (set: lra_no_alloc_regs, bit: hard_regno) |
6001 | /* Don't split call clobbered hard regs living through |
6002 | calls, otherwise we might have a check problem in the |
6003 | assign sub-pass as in the most cases (exception is a |
6004 | situation when check_and_force_assignment_correctness_p value is |
6005 | true) the assign pass assumes that all pseudos living |
6006 | through calls are assigned to call saved hard regs. */ |
6007 | && (regno >= FIRST_PSEUDO_REGISTER |
6008 | || !TEST_HARD_REG_BIT (set: full_and_partial_call_clobbers, bit: regno)) |
6009 | /* We need at least 2 reloads to make pseudo splitting |
6010 | profitable. We should provide hard regno splitting in |
6011 | any case to solve 1st insn scheduling problem when |
6012 | moving hard register definition up might result in |
6013 | impossibility to find hard register for reload pseudo of |
6014 | small register class. */ |
6015 | && (usage_insns[regno].reloads_num |
6016 | + (regno < FIRST_PSEUDO_REGISTER ? 0 : 3) < reloads_num) |
6017 | && (regno < FIRST_PSEUDO_REGISTER |
6018 | /* For short living pseudos, spilling + inheritance can |
6019 | be considered a substitution for splitting. |
6020 | Therefore we do not splitting for local pseudos. It |
6021 | decreases also aggressiveness of splitting. The |
6022 | minimal number of references is chosen taking into |
6023 | account that for 2 references splitting has no sense |
6024 | as we can just spill the pseudo. */ |
6025 | || (regno >= FIRST_PSEUDO_REGISTER |
6026 | && lra_reg_info[regno].nrefs > 3 |
6027 | && bitmap_bit_p (&ebb_global_regs, regno)))) |
6028 | || (regno >= FIRST_PSEUDO_REGISTER && need_for_call_save_p (regno))); |
6029 | } |
6030 | |
6031 | /* Return class for the split pseudo created from original pseudo with |
6032 | ALLOCNO_CLASS and MODE which got a hard register HARD_REGNO. We |
6033 | choose subclass of ALLOCNO_CLASS which contains HARD_REGNO and |
6034 | results in no secondary memory movements. */ |
6035 | static enum reg_class |
6036 | choose_split_class (enum reg_class allocno_class, |
6037 | int hard_regno ATTRIBUTE_UNUSED, |
6038 | machine_mode mode ATTRIBUTE_UNUSED) |
6039 | { |
6040 | int i; |
6041 | enum reg_class cl, best_cl = NO_REGS; |
6042 | enum reg_class hard_reg_class ATTRIBUTE_UNUSED |
6043 | = REGNO_REG_CLASS (hard_regno); |
6044 | |
6045 | if (! targetm.secondary_memory_needed (mode, allocno_class, allocno_class) |
6046 | && TEST_HARD_REG_BIT (reg_class_contents[allocno_class], bit: hard_regno)) |
6047 | return allocno_class; |
6048 | for (i = 0; |
6049 | (cl = reg_class_subclasses[allocno_class][i]) != LIM_REG_CLASSES; |
6050 | i++) |
6051 | if (! targetm.secondary_memory_needed (mode, cl, hard_reg_class) |
6052 | && ! targetm.secondary_memory_needed (mode, hard_reg_class, cl) |
6053 | && TEST_HARD_REG_BIT (reg_class_contents[cl], bit: hard_regno) |
6054 | && (best_cl == NO_REGS |
6055 | || ira_class_hard_regs_num[best_cl] < ira_class_hard_regs_num[cl])) |
6056 | best_cl = cl; |
6057 | return best_cl; |
6058 | } |
6059 | |
6060 | /* Copy any equivalence information from ORIGINAL_REGNO to NEW_REGNO. It only |
6061 | makes sense to call this function if NEW_REGNO is always equal to |
6062 | ORIGINAL_REGNO. Set up defined_p flag when caller_save_p flag is set up and |
6063 | CALL_SAVE_P is true. */ |
6064 | |
6065 | static void |
6066 | lra_copy_reg_equiv (unsigned int new_regno, unsigned int original_regno, |
6067 | bool call_save_p) |
6068 | { |
6069 | if (!ira_reg_equiv[original_regno].defined_p |
6070 | && !(call_save_p && ira_reg_equiv[original_regno].caller_save_p)) |
6071 | return; |
6072 | |
6073 | ira_expand_reg_equiv (); |
6074 | ira_reg_equiv[new_regno].defined_p = true; |
6075 | if (ira_reg_equiv[original_regno].memory) |
6076 | ira_reg_equiv[new_regno].memory |
6077 | = copy_rtx (ira_reg_equiv[original_regno].memory); |
6078 | if (ira_reg_equiv[original_regno].constant) |
6079 | ira_reg_equiv[new_regno].constant |
6080 | = copy_rtx (ira_reg_equiv[original_regno].constant); |
6081 | if (ira_reg_equiv[original_regno].invariant) |
6082 | ira_reg_equiv[new_regno].invariant |
6083 | = copy_rtx (ira_reg_equiv[original_regno].invariant); |
6084 | } |
6085 | |
6086 | /* Do split transformations for insn INSN, which defines or uses |
6087 | ORIGINAL_REGNO. NEXT_USAGE_INSNS specifies which instruction in |
6088 | the EBB next uses ORIGINAL_REGNO; it has the same form as the |
6089 | "insns" field of usage_insns. If TO is not NULL, we don't use |
6090 | usage_insns, we put restore insns after TO insn. It is a case when |
6091 | we call it from lra_split_hard_reg_for, outside the inheritance |
6092 | pass. |
6093 | |
6094 | The transformations look like: |
6095 | |
6096 | p <- ... p <- ... |
6097 | ... s <- p (new insn -- save) |
6098 | ... => |
6099 | ... p <- s (new insn -- restore) |
6100 | <- ... p ... <- ... p ... |
6101 | or |
6102 | <- ... p ... <- ... p ... |
6103 | ... s <- p (new insn -- save) |
6104 | ... => |
6105 | ... p <- s (new insn -- restore) |
6106 | <- ... p ... <- ... p ... |
6107 | |
6108 | where p is an original pseudo got a hard register or a hard |
6109 | register and s is a new split pseudo. The save is put before INSN |
6110 | if BEFORE_P is true. Return true if we succeed in such |
6111 | transformation. */ |
6112 | static bool |
6113 | split_reg (bool before_p, int original_regno, rtx_insn *insn, |
6114 | rtx next_usage_insns, rtx_insn *to) |
6115 | { |
6116 | enum reg_class rclass; |
6117 | rtx original_reg; |
6118 | int hard_regno, nregs; |
6119 | rtx new_reg, usage_insn; |
6120 | rtx_insn *restore, *save; |
6121 | bool after_p; |
6122 | bool call_save_p; |
6123 | machine_mode mode; |
6124 | |
6125 | if (original_regno < FIRST_PSEUDO_REGISTER) |
6126 | { |
6127 | rclass = ira_allocno_class_translate[REGNO_REG_CLASS (original_regno)]; |
6128 | hard_regno = original_regno; |
6129 | call_save_p = false; |
6130 | nregs = 1; |
6131 | mode = lra_reg_info[hard_regno].biggest_mode; |
6132 | machine_mode reg_rtx_mode = GET_MODE (regno_reg_rtx[hard_regno]); |
6133 | /* A reg can have a biggest_mode of VOIDmode if it was only ever seen as |
6134 | part of a multi-word register. In that case, just use the reg_rtx |
6135 | mode. Do the same also if the biggest mode was larger than a register |
6136 | or we can not compare the modes. Otherwise, limit the size to that of |
6137 | the biggest access in the function or to the natural mode at least. */ |
6138 | if (mode == VOIDmode |
6139 | || !ordered_p (a: GET_MODE_PRECISION (mode), |
6140 | b: GET_MODE_PRECISION (mode: reg_rtx_mode)) |
6141 | || paradoxical_subreg_p (outermode: mode, innermode: reg_rtx_mode) |
6142 | || maybe_gt (GET_MODE_PRECISION (reg_rtx_mode), GET_MODE_PRECISION (mode))) |
6143 | { |
6144 | original_reg = regno_reg_rtx[hard_regno]; |
6145 | mode = reg_rtx_mode; |
6146 | } |
6147 | else |
6148 | original_reg = gen_rtx_REG (mode, hard_regno); |
6149 | } |
6150 | else |
6151 | { |
6152 | mode = PSEUDO_REGNO_MODE (original_regno); |
6153 | hard_regno = reg_renumber[original_regno]; |
6154 | nregs = hard_regno_nregs (regno: hard_regno, mode); |
6155 | rclass = lra_get_allocno_class (regno: original_regno); |
6156 | original_reg = regno_reg_rtx[original_regno]; |
6157 | call_save_p = need_for_call_save_p (regno: original_regno); |
6158 | } |
6159 | lra_assert (hard_regno >= 0); |
6160 | if (lra_dump_file != NULL) |
6161 | fprintf (stream: lra_dump_file, |
6162 | format: " ((((((((((((((((((((((((((((((((((((((((((((((((\n" ); |
6163 | |
6164 | if (call_save_p) |
6165 | { |
6166 | mode = HARD_REGNO_CALLER_SAVE_MODE (hard_regno, |
6167 | hard_regno_nregs (hard_regno, mode), |
6168 | mode); |
6169 | new_reg = lra_create_new_reg (mode, NULL_RTX, NO_REGS, NULL, "save" ); |
6170 | } |
6171 | else |
6172 | { |
6173 | rclass = choose_split_class (allocno_class: rclass, hard_regno, mode); |
6174 | if (rclass == NO_REGS) |
6175 | { |
6176 | if (lra_dump_file != NULL) |
6177 | { |
6178 | fprintf (stream: lra_dump_file, |
6179 | format: " Rejecting split of %d(%s): " |
6180 | "no good reg class for %d(%s)\n" , |
6181 | original_regno, |
6182 | reg_class_names[lra_get_allocno_class (regno: original_regno)], |
6183 | hard_regno, |
6184 | reg_class_names[REGNO_REG_CLASS (hard_regno)]); |
6185 | fprintf |
6186 | (stream: lra_dump_file, |
6187 | format: " ))))))))))))))))))))))))))))))))))))))))))))))))\n" ); |
6188 | } |
6189 | return false; |
6190 | } |
6191 | /* Split_if_necessary can split hard registers used as part of a |
6192 | multi-register mode but splits each register individually. The |
6193 | mode used for each independent register may not be supported |
6194 | so reject the split. Splitting the wider mode should theoretically |
6195 | be possible but is not implemented. */ |
6196 | if (!targetm.hard_regno_mode_ok (hard_regno, mode)) |
6197 | { |
6198 | if (lra_dump_file != NULL) |
6199 | { |
6200 | fprintf (stream: lra_dump_file, |
6201 | format: " Rejecting split of %d(%s): unsuitable mode %s\n" , |
6202 | original_regno, |
6203 | reg_class_names[lra_get_allocno_class (regno: original_regno)], |
6204 | GET_MODE_NAME (mode)); |
6205 | fprintf |
6206 | (stream: lra_dump_file, |
6207 | format: " ))))))))))))))))))))))))))))))))))))))))))))))))\n" ); |
6208 | } |
6209 | return false; |
6210 | } |
6211 | new_reg = lra_create_new_reg (mode, original_reg, rclass, NULL, "split" ); |
6212 | reg_renumber[REGNO (new_reg)] = hard_regno; |
6213 | } |
6214 | int new_regno = REGNO (new_reg); |
6215 | save = emit_spill_move (to_p: true, mem_pseudo: new_reg, val: original_reg); |
6216 | if (NEXT_INSN (insn: save) != NULL_RTX && !call_save_p) |
6217 | { |
6218 | if (lra_dump_file != NULL) |
6219 | { |
6220 | fprintf |
6221 | (stream: lra_dump_file, |
6222 | format: " Rejecting split %d->%d resulting in > 2 save insns:\n" , |
6223 | original_regno, new_regno); |
6224 | dump_rtl_slim (lra_dump_file, save, NULL, -1, 0); |
6225 | fprintf (stream: lra_dump_file, |
6226 | format: " ))))))))))))))))))))))))))))))))))))))))))))))))\n" ); |
6227 | } |
6228 | return false; |
6229 | } |
6230 | restore = emit_spill_move (to_p: false, mem_pseudo: new_reg, val: original_reg); |
6231 | if (NEXT_INSN (insn: restore) != NULL_RTX && !call_save_p) |
6232 | { |
6233 | if (lra_dump_file != NULL) |
6234 | { |
6235 | fprintf (stream: lra_dump_file, |
6236 | format: " Rejecting split %d->%d " |
6237 | "resulting in > 2 restore insns:\n" , |
6238 | original_regno, new_regno); |
6239 | dump_rtl_slim (lra_dump_file, restore, NULL, -1, 0); |
6240 | fprintf (stream: lra_dump_file, |
6241 | format: " ))))))))))))))))))))))))))))))))))))))))))))))))\n" ); |
6242 | } |
6243 | return false; |
6244 | } |
6245 | /* Transfer equivalence information to the spill register, so that |
6246 | if we fail to allocate the spill register, we have the option of |
6247 | rematerializing the original value instead of spilling to the stack. */ |
6248 | if (!HARD_REGISTER_NUM_P (original_regno) |
6249 | && mode == PSEUDO_REGNO_MODE (original_regno)) |
6250 | lra_copy_reg_equiv (new_regno, original_regno, call_save_p); |
6251 | lra_reg_info[new_regno].restore_rtx = regno_reg_rtx[original_regno]; |
6252 | bitmap_set_bit (&lra_split_regs, new_regno); |
6253 | if (to != NULL) |
6254 | { |
6255 | lra_assert (next_usage_insns == NULL); |
6256 | usage_insn = to; |
6257 | after_p = true; |
6258 | } |
6259 | else |
6260 | { |
6261 | /* We need check_only_regs only inside the inheritance pass. */ |
6262 | bitmap_set_bit (&check_only_regs, new_regno); |
6263 | bitmap_set_bit (&check_only_regs, original_regno); |
6264 | after_p = usage_insns[original_regno].after_p; |
6265 | for (;;) |
6266 | { |
6267 | if (GET_CODE (next_usage_insns) != INSN_LIST) |
6268 | { |
6269 | usage_insn = next_usage_insns; |
6270 | break; |
6271 | } |
6272 | usage_insn = XEXP (next_usage_insns, 0); |
6273 | lra_assert (DEBUG_INSN_P (usage_insn)); |
6274 | next_usage_insns = XEXP (next_usage_insns, 1); |
6275 | lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false, |
6276 | true); |
6277 | lra_update_insn_regno_info (as_a <rtx_insn *> (p: usage_insn)); |
6278 | if (lra_dump_file != NULL) |
6279 | { |
6280 | fprintf (stream: lra_dump_file, format: " Split reuse change %d->%d:\n" , |
6281 | original_regno, new_regno); |
6282 | dump_insn_slim (lra_dump_file, as_a <rtx_insn *> (p: usage_insn)); |
6283 | } |
6284 | } |
6285 | } |
6286 | lra_assert (NOTE_P (usage_insn) || NONDEBUG_INSN_P (usage_insn)); |
6287 | lra_assert (usage_insn != insn || (after_p && before_p)); |
6288 | lra_process_new_insns (as_a <rtx_insn *> (p: usage_insn), |
6289 | after_p ? NULL : restore, |
6290 | after_p ? restore : NULL, |
6291 | call_save_p |
6292 | ? "Add reg<-save" : "Add reg<-split" ); |
6293 | lra_process_new_insns (insn, before_p ? save : NULL, |
6294 | before_p ? NULL : save, |
6295 | call_save_p |
6296 | ? "Add save<-reg" : "Add split<-reg" ); |
6297 | if (nregs > 1 || original_regno < FIRST_PSEUDO_REGISTER) |
6298 | /* If we are trying to split multi-register. We should check |
6299 | conflicts on the next assignment sub-pass. IRA can allocate on |
6300 | sub-register levels, LRA do this on pseudos level right now and |
6301 | this discrepancy may create allocation conflicts after |
6302 | splitting. |
6303 | |
6304 | If we are trying to split hard register we should also check conflicts |
6305 | as such splitting can create artificial conflict of the hard register |
6306 | with another pseudo because of simplified conflict calculation in |
6307 | LRA. */ |
6308 | check_and_force_assignment_correctness_p = true; |
6309 | if (lra_dump_file != NULL) |
6310 | fprintf (stream: lra_dump_file, |
6311 | format: " ))))))))))))))))))))))))))))))))))))))))))))))))\n" ); |
6312 | return true; |
6313 | } |
6314 | |
6315 | /* Split a hard reg for reload pseudo REGNO having RCLASS and living |
6316 | in the range [FROM, TO]. Return true if did a split. Otherwise, |
6317 | return false. */ |
6318 | bool |
6319 | spill_hard_reg_in_range (int regno, enum reg_class rclass, rtx_insn *from, rtx_insn *to) |
6320 | { |
6321 | int i, hard_regno; |
6322 | int rclass_size; |
6323 | rtx_insn *insn; |
6324 | unsigned int uid; |
6325 | bitmap_iterator bi; |
6326 | HARD_REG_SET ignore; |
6327 | |
6328 | lra_assert (from != NULL && to != NULL); |
6329 | ignore = lra_no_alloc_regs; |
6330 | EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi) |
6331 | { |
6332 | lra_insn_recog_data_t id = lra_insn_recog_data[uid]; |
6333 | struct lra_static_insn_data *static_id = id->insn_static_data; |
6334 | struct lra_insn_reg *reg; |
6335 | |
6336 | for (reg = id->regs; reg != NULL; reg = reg->next) |
6337 | if (reg->regno < FIRST_PSEUDO_REGISTER) |
6338 | SET_HARD_REG_BIT (set&: ignore, bit: reg->regno); |
6339 | for (reg = static_id->hard_regs; reg != NULL; reg = reg->next) |
6340 | SET_HARD_REG_BIT (set&: ignore, bit: reg->regno); |
6341 | } |
6342 | rclass_size = ira_class_hard_regs_num[rclass]; |
6343 | for (i = 0; i < rclass_size; i++) |
6344 | { |
6345 | hard_regno = ira_class_hard_regs[rclass][i]; |
6346 | if (! TEST_HARD_REG_BIT (set: lra_reg_info[regno].conflict_hard_regs, bit: hard_regno) |
6347 | || TEST_HARD_REG_BIT (set: ignore, bit: hard_regno)) |
6348 | continue; |
6349 | for (insn = from; insn != NEXT_INSN (insn: to); insn = NEXT_INSN (insn)) |
6350 | { |
6351 | struct lra_static_insn_data *static_id; |
6352 | struct lra_insn_reg *reg; |
6353 | |
6354 | if (!INSN_P (insn)) |
6355 | continue; |
6356 | if (bitmap_bit_p (&lra_reg_info[hard_regno].insn_bitmap, |
6357 | INSN_UID (insn))) |
6358 | break; |
6359 | static_id = lra_get_insn_recog_data (insn)->insn_static_data; |
6360 | for (reg = static_id->hard_regs; reg != NULL; reg = reg->next) |
6361 | if (reg->regno == hard_regno) |
6362 | break; |
6363 | if (reg != NULL) |
6364 | break; |
6365 | } |
6366 | if (insn != NEXT_INSN (insn: to)) |
6367 | continue; |
6368 | if (split_reg (before_p: true, original_regno: hard_regno, insn: from, NULL, to)) |
6369 | return true; |
6370 | } |
6371 | return false; |
6372 | } |
6373 | |
6374 | /* Recognize that we need a split transformation for insn INSN, which |
6375 | defines or uses REGNO in its insn biggest MODE (we use it only if |
6376 | REGNO is a hard register). POTENTIAL_RELOAD_HARD_REGS contains |
6377 | hard registers which might be used for reloads since the EBB end. |
6378 | Put the save before INSN if BEFORE_P is true. MAX_UID is maximla |
6379 | uid before starting INSN processing. Return true if we succeed in |
6380 | such transformation. */ |
6381 | static bool |
6382 | split_if_necessary (int regno, machine_mode mode, |
6383 | HARD_REG_SET potential_reload_hard_regs, |
6384 | bool before_p, rtx_insn *insn, int max_uid) |
6385 | { |
6386 | bool res = false; |
6387 | int i, nregs = 1; |
6388 | rtx next_usage_insns; |
6389 | |
6390 | if (regno < FIRST_PSEUDO_REGISTER) |
6391 | nregs = hard_regno_nregs (regno, mode); |
6392 | for (i = 0; i < nregs; i++) |
6393 | if (usage_insns[regno + i].check == curr_usage_insns_check |
6394 | && (next_usage_insns = usage_insns[regno + i].insns) != NULL_RTX |
6395 | /* To avoid processing the register twice or more. */ |
6396 | && ((GET_CODE (next_usage_insns) != INSN_LIST |
6397 | && INSN_UID (insn: next_usage_insns) < max_uid) |
6398 | || (GET_CODE (next_usage_insns) == INSN_LIST |
6399 | && (INSN_UID (XEXP (next_usage_insns, 0)) < max_uid))) |
6400 | && need_for_split_p (potential_reload_hard_regs, regno: regno + i) |
6401 | && split_reg (before_p, original_regno: regno + i, insn, next_usage_insns, NULL)) |
6402 | res = true; |
6403 | return res; |
6404 | } |
6405 | |
6406 | /* Return TRUE if rtx X is considered as an invariant for |
6407 | inheritance. */ |
6408 | static bool |
6409 | invariant_p (const_rtx x) |
6410 | { |
6411 | machine_mode mode; |
6412 | const char *fmt; |
6413 | enum rtx_code code; |
6414 | int i, j; |
6415 | |
6416 | if (side_effects_p (x)) |
6417 | return false; |
6418 | |
6419 | code = GET_CODE (x); |
6420 | mode = GET_MODE (x); |
6421 | if (code == SUBREG) |
6422 | { |
6423 | x = SUBREG_REG (x); |
6424 | code = GET_CODE (x); |
6425 | mode = wider_subreg_mode (outermode: mode, GET_MODE (x)); |
6426 | } |
6427 | |
6428 | if (MEM_P (x)) |
6429 | return false; |
6430 | |
6431 | if (REG_P (x)) |
6432 | { |
6433 | int i, nregs, regno = REGNO (x); |
6434 | |
6435 | if (regno >= FIRST_PSEUDO_REGISTER || regno == STACK_POINTER_REGNUM |
6436 | || TEST_HARD_REG_BIT (set: eliminable_regset, bit: regno) |
6437 | || GET_MODE_CLASS (GET_MODE (x)) == MODE_CC) |
6438 | return false; |
6439 | nregs = hard_regno_nregs (regno, mode); |
6440 | for (i = 0; i < nregs; i++) |
6441 | if (! fixed_regs[regno + i] |
6442 | /* A hard register may be clobbered in the current insn |
6443 | but we can ignore this case because if the hard |
6444 | register is used it should be set somewhere after the |
6445 | clobber. */ |
6446 | || bitmap_bit_p (&invalid_invariant_regs, regno + i)) |
6447 | return false; |
6448 | } |
6449 | fmt = GET_RTX_FORMAT (code); |
6450 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
6451 | { |
6452 | if (fmt[i] == 'e') |
6453 | { |
6454 | if (! invariant_p (XEXP (x, i))) |
6455 | return false; |
6456 | } |
6457 | else if (fmt[i] == 'E') |
6458 | { |
6459 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
6460 | if (! invariant_p (XVECEXP (x, i, j))) |
6461 | return false; |
6462 | } |
6463 | } |
6464 | return true; |
6465 | } |
6466 | |
6467 | /* We have 'dest_reg <- invariant'. Let us try to make an invariant |
6468 | inheritance transformation (using dest_reg instead invariant in a |
6469 | subsequent insn). */ |
6470 | static bool |
6471 | process_invariant_for_inheritance (rtx dst_reg, rtx invariant_rtx) |
6472 | { |
6473 | invariant_ptr_t invariant_ptr; |
6474 | rtx_insn *insn, *new_insns; |
6475 | rtx insn_set, insn_reg, new_reg; |
6476 | int insn_regno; |
6477 | bool succ_p = false; |
6478 | int dst_regno = REGNO (dst_reg); |
6479 | machine_mode dst_mode = GET_MODE (dst_reg); |
6480 | enum reg_class cl = lra_get_allocno_class (regno: dst_regno), insn_reg_cl; |
6481 | |
6482 | invariant_ptr = insert_invariant (invariant_rtx); |
6483 | if ((insn = invariant_ptr->insn) != NULL_RTX) |
6484 | { |
6485 | /* We have a subsequent insn using the invariant. */ |
6486 | insn_set = single_set (insn); |
6487 | lra_assert (insn_set != NULL); |
6488 | insn_reg = SET_DEST (insn_set); |
6489 | lra_assert (REG_P (insn_reg)); |
6490 | insn_regno = REGNO (insn_reg); |
6491 | insn_reg_cl = lra_get_allocno_class (regno: insn_regno); |
6492 | |
6493 | if (dst_mode == GET_MODE (insn_reg) |
6494 | /* We should consider only result move reg insns which are |
6495 | cheap. */ |
6496 | && targetm.register_move_cost (dst_mode, cl, insn_reg_cl) == 2 |
6497 | && targetm.register_move_cost (dst_mode, cl, cl) == 2) |
6498 | { |
6499 | if (lra_dump_file != NULL) |
6500 | fprintf (stream: lra_dump_file, |
6501 | format: " [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n" ); |
6502 | new_reg = lra_create_new_reg (dst_mode, dst_reg, cl, NULL, |
6503 | "invariant inheritance" ); |
6504 | bitmap_set_bit (&lra_inheritance_pseudos, REGNO (new_reg)); |
6505 | bitmap_set_bit (&check_only_regs, REGNO (new_reg)); |
6506 | lra_reg_info[REGNO (new_reg)].restore_rtx = PATTERN (insn); |
6507 | start_sequence (); |
6508 | lra_emit_move (new_reg, dst_reg); |
6509 | new_insns = get_insns (); |
6510 | end_sequence (); |
6511 | lra_process_new_insns (curr_insn, NULL, new_insns, |
6512 | "Add invariant inheritance<-original" ); |
6513 | start_sequence (); |
6514 | lra_emit_move (SET_DEST (insn_set), new_reg); |
6515 | new_insns = get_insns (); |
6516 | end_sequence (); |
6517 | lra_process_new_insns (insn, NULL, new_insns, |
6518 | "Changing reload<-inheritance" ); |
6519 | lra_set_insn_deleted (insn); |
6520 | succ_p = true; |
6521 | if (lra_dump_file != NULL) |
6522 | { |
6523 | fprintf (stream: lra_dump_file, |
6524 | format: " Invariant inheritance reuse change %d (bb%d):\n" , |
6525 | REGNO (new_reg), BLOCK_FOR_INSN (insn)->index); |
6526 | dump_insn_slim (lra_dump_file, insn); |
6527 | fprintf (stream: lra_dump_file, |
6528 | format: " ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\n" ); |
6529 | } |
6530 | } |
6531 | } |
6532 | invariant_ptr->insn = curr_insn; |
6533 | return succ_p; |
6534 | } |
6535 | |
6536 | /* Check only registers living at the current program point in the |
6537 | current EBB. */ |
6538 | static bitmap_head live_regs; |
6539 | |
6540 | /* Update live info in EBB given by its HEAD and TAIL insns after |
6541 | inheritance/split transformation. The function removes dead moves |
6542 | too. */ |
6543 | static void |
6544 | update_ebb_live_info (rtx_insn *head, rtx_insn *tail) |
6545 | { |
6546 | unsigned int j; |
6547 | int i, regno; |
6548 | bool live_p; |
6549 | rtx_insn *prev_insn; |
6550 | rtx set; |
6551 | bool remove_p; |
6552 | basic_block last_bb, prev_bb, curr_bb; |
6553 | bitmap_iterator bi; |
6554 | struct lra_insn_reg *reg; |
6555 | edge e; |
6556 | edge_iterator ei; |
6557 | |
6558 | last_bb = BLOCK_FOR_INSN (insn: tail); |
6559 | prev_bb = NULL; |
6560 | for (curr_insn = tail; |
6561 | curr_insn != PREV_INSN (insn: head); |
6562 | curr_insn = prev_insn) |
6563 | { |
6564 | prev_insn = PREV_INSN (insn: curr_insn); |
6565 | /* We need to process empty blocks too. They contain |
6566 | NOTE_INSN_BASIC_BLOCK referring for the basic block. */ |
6567 | if (NOTE_P (curr_insn) && NOTE_KIND (curr_insn) != NOTE_INSN_BASIC_BLOCK) |
6568 | continue; |
6569 | curr_bb = BLOCK_FOR_INSN (insn: curr_insn); |
6570 | if (curr_bb != prev_bb) |
6571 | { |
6572 | if (prev_bb != NULL) |
6573 | { |
6574 | /* Update df_get_live_in (prev_bb): */ |
6575 | EXECUTE_IF_SET_IN_BITMAP (&check_only_regs, 0, j, bi) |
6576 | if (bitmap_bit_p (&live_regs, j)) |
6577 | bitmap_set_bit (df_get_live_in (bb: prev_bb), j); |
6578 | else |
6579 | bitmap_clear_bit (df_get_live_in (bb: prev_bb), j); |
6580 | } |
6581 | if (curr_bb != last_bb) |
6582 | { |
6583 | /* Update df_get_live_out (curr_bb): */ |
6584 | EXECUTE_IF_SET_IN_BITMAP (&check_only_regs, 0, j, bi) |
6585 | { |
6586 | live_p = bitmap_bit_p (&live_regs, j); |
6587 | if (! live_p) |
6588 | FOR_EACH_EDGE (e, ei, curr_bb->succs) |
6589 | if (bitmap_bit_p (df_get_live_in (bb: e->dest), j)) |
6590 | { |
6591 | live_p = true; |
6592 | break; |
6593 | } |
6594 | if (live_p) |
6595 | bitmap_set_bit (df_get_live_out (bb: curr_bb), j); |
6596 | else |
6597 | bitmap_clear_bit (df_get_live_out (bb: curr_bb), j); |
6598 | } |
6599 | } |
6600 | prev_bb = curr_bb; |
6601 | bitmap_and (&live_regs, &check_only_regs, df_get_live_out (bb: curr_bb)); |
6602 | } |
6603 | if (! NONDEBUG_INSN_P (curr_insn)) |
6604 | continue; |
6605 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
6606 | curr_static_id = curr_id->insn_static_data; |
6607 | remove_p = false; |
6608 | if ((set = single_set (insn: curr_insn)) != NULL_RTX |
6609 | && REG_P (SET_DEST (set)) |
6610 | && (regno = REGNO (SET_DEST (set))) >= FIRST_PSEUDO_REGISTER |
6611 | && SET_DEST (set) != pic_offset_table_rtx |
6612 | && bitmap_bit_p (&check_only_regs, regno) |
6613 | && ! bitmap_bit_p (&live_regs, regno)) |
6614 | remove_p = true; |
6615 | /* See which defined values die here. */ |
6616 | for (reg = curr_id->regs; reg != NULL; reg = reg->next) |
6617 | if (reg->type == OP_OUT && ! reg->subreg_p) |
6618 | bitmap_clear_bit (&live_regs, reg->regno); |
6619 | for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next) |
6620 | if (reg->type == OP_OUT && ! reg->subreg_p) |
6621 | bitmap_clear_bit (&live_regs, reg->regno); |
6622 | if (curr_id->arg_hard_regs != NULL) |
6623 | /* Make clobbered argument hard registers die. */ |
6624 | for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++) |
6625 | if (regno >= FIRST_PSEUDO_REGISTER) |
6626 | bitmap_clear_bit (&live_regs, regno - FIRST_PSEUDO_REGISTER); |
6627 | /* Mark each used value as live. */ |
6628 | for (reg = curr_id->regs; reg != NULL; reg = reg->next) |
6629 | if (reg->type != OP_OUT |
6630 | && bitmap_bit_p (&check_only_regs, reg->regno)) |
6631 | bitmap_set_bit (&live_regs, reg->regno); |
6632 | for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next) |
6633 | if (reg->type != OP_OUT |
6634 | && bitmap_bit_p (&check_only_regs, reg->regno)) |
6635 | bitmap_set_bit (&live_regs, reg->regno); |
6636 | if (curr_id->arg_hard_regs != NULL) |
6637 | /* Make used argument hard registers live. */ |
6638 | for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++) |
6639 | if (regno < FIRST_PSEUDO_REGISTER |
6640 | && bitmap_bit_p (&check_only_regs, regno)) |
6641 | bitmap_set_bit (&live_regs, regno); |
6642 | /* It is quite important to remove dead move insns because it |
6643 | means removing dead store. We don't need to process them for |
6644 | constraints. */ |
6645 | if (remove_p) |
6646 | { |
6647 | if (lra_dump_file != NULL) |
6648 | { |
6649 | fprintf (stream: lra_dump_file, format: " Removing dead insn:\n " ); |
6650 | dump_insn_slim (lra_dump_file, curr_insn); |
6651 | } |
6652 | lra_set_insn_deleted (curr_insn); |
6653 | } |
6654 | } |
6655 | } |
6656 | |
6657 | /* The structure describes info to do an inheritance for the current |
6658 | insn. We need to collect such info first before doing the |
6659 | transformations because the transformations change the insn |
6660 | internal representation. */ |
6661 | struct to_inherit |
6662 | { |
6663 | /* Original regno. */ |
6664 | int regno; |
6665 | /* Subsequent insns which can inherit original reg value. */ |
6666 | rtx insns; |
6667 | }; |
6668 | |
6669 | /* Array containing all info for doing inheritance from the current |
6670 | insn. */ |
6671 | static struct to_inherit to_inherit[LRA_MAX_INSN_RELOADS]; |
6672 | |
6673 | /* Number elements in the previous array. */ |
6674 | static int to_inherit_num; |
6675 | |
6676 | /* Add inheritance info REGNO and INSNS. Their meaning is described in |
6677 | structure to_inherit. */ |
6678 | static void |
6679 | add_to_inherit (int regno, rtx insns) |
6680 | { |
6681 | int i; |
6682 | |
6683 | for (i = 0; i < to_inherit_num; i++) |
6684 | if (to_inherit[i].regno == regno) |
6685 | return; |
6686 | lra_assert (to_inherit_num < LRA_MAX_INSN_RELOADS); |
6687 | to_inherit[to_inherit_num].regno = regno; |
6688 | to_inherit[to_inherit_num++].insns = insns; |
6689 | } |
6690 | |
6691 | /* Return the last non-debug insn in basic block BB, or the block begin |
6692 | note if none. */ |
6693 | static rtx_insn * |
6694 | get_last_insertion_point (basic_block bb) |
6695 | { |
6696 | rtx_insn *insn; |
6697 | |
6698 | FOR_BB_INSNS_REVERSE (bb, insn) |
6699 | if (NONDEBUG_INSN_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn)) |
6700 | return insn; |
6701 | gcc_unreachable (); |
6702 | } |
6703 | |
6704 | /* Set up RES by registers living on edges FROM except the edge (FROM, |
6705 | TO) or by registers set up in a jump insn in BB FROM. */ |
6706 | static void |
6707 | get_live_on_other_edges (basic_block from, basic_block to, bitmap res) |
6708 | { |
6709 | rtx_insn *last; |
6710 | struct lra_insn_reg *reg; |
6711 | edge e; |
6712 | edge_iterator ei; |
6713 | |
6714 | lra_assert (to != NULL); |
6715 | bitmap_clear (res); |
6716 | FOR_EACH_EDGE (e, ei, from->succs) |
6717 | if (e->dest != to) |
6718 | bitmap_ior_into (res, df_get_live_in (bb: e->dest)); |
6719 | last = get_last_insertion_point (bb: from); |
6720 | if (! JUMP_P (last)) |
6721 | return; |
6722 | curr_id = lra_get_insn_recog_data (insn: last); |
6723 | for (reg = curr_id->regs; reg != NULL; reg = reg->next) |
6724 | if (reg->type != OP_IN) |
6725 | bitmap_set_bit (res, reg->regno); |
6726 | } |
6727 | |
6728 | /* Used as a temporary results of some bitmap calculations. */ |
6729 | static bitmap_head temp_bitmap; |
6730 | |
6731 | /* We split for reloads of small class of hard regs. The following |
6732 | defines how many hard regs the class should have to be qualified as |
6733 | small. The code is mostly oriented to x86/x86-64 architecture |
6734 | where some insns need to use only specific register or pair of |
6735 | registers and these register can live in RTL explicitly, e.g. for |
6736 | parameter passing. */ |
6737 | static const int max_small_class_regs_num = 2; |
6738 | |
6739 | /* Do inheritance/split transformations in EBB starting with HEAD and |
6740 | finishing on TAIL. We process EBB insns in the reverse order. |
6741 | Return true if we did any inheritance/split transformation in the |
6742 | EBB. |
6743 | |
6744 | We should avoid excessive splitting which results in worse code |
6745 | because of inaccurate cost calculations for spilling new split |
6746 | pseudos in such case. To achieve this we do splitting only if |
6747 | register pressure is high in given basic block and there are reload |
6748 | pseudos requiring hard registers. We could do more register |
6749 | pressure calculations at any given program point to avoid necessary |
6750 | splitting even more but it is to expensive and the current approach |
6751 | works well enough. */ |
6752 | static bool |
6753 | inherit_in_ebb (rtx_insn *head, rtx_insn *tail) |
6754 | { |
6755 | int i, src_regno, dst_regno, nregs; |
6756 | bool change_p, succ_p, update_reloads_num_p; |
6757 | rtx_insn *prev_insn, *last_insn; |
6758 | rtx next_usage_insns, curr_set; |
6759 | enum reg_class cl; |
6760 | struct lra_insn_reg *reg; |
6761 | basic_block last_processed_bb, curr_bb = NULL; |
6762 | HARD_REG_SET potential_reload_hard_regs, live_hard_regs; |
6763 | bitmap to_process; |
6764 | unsigned int j; |
6765 | bitmap_iterator bi; |
6766 | bool head_p, after_p; |
6767 | |
6768 | change_p = false; |
6769 | curr_usage_insns_check++; |
6770 | clear_invariants (); |
6771 | reloads_num = calls_num = 0; |
6772 | for (unsigned int i = 0; i < NUM_ABI_IDS; ++i) |
6773 | last_call_for_abi[i] = 0; |
6774 | CLEAR_HARD_REG_SET (set&: full_and_partial_call_clobbers); |
6775 | bitmap_clear (&check_only_regs); |
6776 | bitmap_clear (&invalid_invariant_regs); |
6777 | last_processed_bb = NULL; |
6778 | CLEAR_HARD_REG_SET (set&: potential_reload_hard_regs); |
6779 | live_hard_regs = eliminable_regset | lra_no_alloc_regs; |
6780 | /* We don't process new insns generated in the loop. */ |
6781 | for (curr_insn = tail; curr_insn != PREV_INSN (insn: head); curr_insn = prev_insn) |
6782 | { |
6783 | prev_insn = PREV_INSN (insn: curr_insn); |
6784 | if (BLOCK_FOR_INSN (insn: curr_insn) != NULL) |
6785 | curr_bb = BLOCK_FOR_INSN (insn: curr_insn); |
6786 | if (last_processed_bb != curr_bb) |
6787 | { |
6788 | /* We are at the end of BB. Add qualified living |
6789 | pseudos for potential splitting. */ |
6790 | to_process = df_get_live_out (bb: curr_bb); |
6791 | if (last_processed_bb != NULL) |
6792 | { |
6793 | /* We are somewhere in the middle of EBB. */ |
6794 | get_live_on_other_edges (from: curr_bb, to: last_processed_bb, |
6795 | res: &temp_bitmap); |
6796 | to_process = &temp_bitmap; |
6797 | } |
6798 | last_processed_bb = curr_bb; |
6799 | last_insn = get_last_insertion_point (bb: curr_bb); |
6800 | after_p = (! JUMP_P (last_insn) |
6801 | && (! CALL_P (last_insn) |
6802 | || (find_reg_note (last_insn, |
6803 | REG_NORETURN, NULL_RTX) == NULL_RTX |
6804 | && ! SIBLING_CALL_P (last_insn)))); |
6805 | CLEAR_HARD_REG_SET (set&: potential_reload_hard_regs); |
6806 | EXECUTE_IF_SET_IN_BITMAP (to_process, 0, j, bi) |
6807 | { |
6808 | if ((int) j >= lra_constraint_new_regno_start) |
6809 | break; |
6810 | if (j < FIRST_PSEUDO_REGISTER || reg_renumber[j] >= 0) |
6811 | { |
6812 | if (j < FIRST_PSEUDO_REGISTER) |
6813 | SET_HARD_REG_BIT (set&: live_hard_regs, bit: j); |
6814 | else |
6815 | add_to_hard_reg_set (regs: &live_hard_regs, |
6816 | PSEUDO_REGNO_MODE (j), |
6817 | regno: reg_renumber[j]); |
6818 | setup_next_usage_insn (regno: j, insn: last_insn, reloads_num, after_p); |
6819 | } |
6820 | } |
6821 | } |
6822 | src_regno = dst_regno = -1; |
6823 | curr_set = single_set (insn: curr_insn); |
6824 | if (curr_set != NULL_RTX && REG_P (SET_DEST (curr_set))) |
6825 | dst_regno = REGNO (SET_DEST (curr_set)); |
6826 | if (curr_set != NULL_RTX && REG_P (SET_SRC (curr_set))) |
6827 | src_regno = REGNO (SET_SRC (curr_set)); |
6828 | update_reloads_num_p = true; |
6829 | if (src_regno < lra_constraint_new_regno_start |
6830 | && src_regno >= FIRST_PSEUDO_REGISTER |
6831 | && reg_renumber[src_regno] < 0 |
6832 | && dst_regno >= lra_constraint_new_regno_start |
6833 | && (cl = lra_get_allocno_class (regno: dst_regno)) != NO_REGS) |
6834 | { |
6835 | /* 'reload_pseudo <- original_pseudo'. */ |
6836 | if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num) |
6837 | reloads_num++; |
6838 | update_reloads_num_p = false; |
6839 | succ_p = false; |
6840 | if (usage_insns[src_regno].check == curr_usage_insns_check |
6841 | && (next_usage_insns = usage_insns[src_regno].insns) != NULL_RTX) |
6842 | succ_p = inherit_reload_reg (def_p: false, original_regno: src_regno, cl, |
6843 | insn: curr_insn, next_usage_insns); |
6844 | if (succ_p) |
6845 | change_p = true; |
6846 | else |
6847 | setup_next_usage_insn (regno: src_regno, insn: curr_insn, reloads_num, after_p: false); |
6848 | if (hard_reg_set_subset_p (reg_class_contents[cl], y: live_hard_regs)) |
6849 | potential_reload_hard_regs |= reg_class_contents[cl]; |
6850 | } |
6851 | else if (src_regno < 0 |
6852 | && dst_regno >= lra_constraint_new_regno_start |
6853 | && invariant_p (SET_SRC (curr_set)) |
6854 | && (cl = lra_get_allocno_class (regno: dst_regno)) != NO_REGS |
6855 | && ! bitmap_bit_p (&invalid_invariant_regs, dst_regno) |
6856 | && ! bitmap_bit_p (&invalid_invariant_regs, |
6857 | ORIGINAL_REGNO(regno_reg_rtx[dst_regno]))) |
6858 | { |
6859 | /* 'reload_pseudo <- invariant'. */ |
6860 | if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num) |
6861 | reloads_num++; |
6862 | update_reloads_num_p = false; |
6863 | if (process_invariant_for_inheritance (SET_DEST (curr_set), SET_SRC (curr_set))) |
6864 | change_p = true; |
6865 | if (hard_reg_set_subset_p (reg_class_contents[cl], y: live_hard_regs)) |
6866 | potential_reload_hard_regs |= reg_class_contents[cl]; |
6867 | } |
6868 | else if (src_regno >= lra_constraint_new_regno_start |
6869 | && dst_regno < lra_constraint_new_regno_start |
6870 | && dst_regno >= FIRST_PSEUDO_REGISTER |
6871 | && reg_renumber[dst_regno] < 0 |
6872 | && (cl = lra_get_allocno_class (regno: src_regno)) != NO_REGS |
6873 | && usage_insns[dst_regno].check == curr_usage_insns_check |
6874 | && (next_usage_insns |
6875 | = usage_insns[dst_regno].insns) != NULL_RTX) |
6876 | { |
6877 | if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num) |
6878 | reloads_num++; |
6879 | update_reloads_num_p = false; |
6880 | /* 'original_pseudo <- reload_pseudo'. */ |
6881 | if (! JUMP_P (curr_insn) |
6882 | && inherit_reload_reg (def_p: true, original_regno: dst_regno, cl, |
6883 | insn: curr_insn, next_usage_insns)) |
6884 | change_p = true; |
6885 | /* Invalidate. */ |
6886 | usage_insns[dst_regno].check = 0; |
6887 | if (hard_reg_set_subset_p (reg_class_contents[cl], y: live_hard_regs)) |
6888 | potential_reload_hard_regs |= reg_class_contents[cl]; |
6889 | } |
6890 | else if (INSN_P (curr_insn)) |
6891 | { |
6892 | int iter; |
6893 | int max_uid = get_max_uid (); |
6894 | |
6895 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
6896 | curr_static_id = curr_id->insn_static_data; |
6897 | to_inherit_num = 0; |
6898 | /* Process insn definitions. */ |
6899 | for (iter = 0; iter < 2; iter++) |
6900 | for (reg = iter == 0 ? curr_id->regs : curr_static_id->hard_regs; |
6901 | reg != NULL; |
6902 | reg = reg->next) |
6903 | if (reg->type != OP_IN |
6904 | && (dst_regno = reg->regno) < lra_constraint_new_regno_start) |
6905 | { |
6906 | if (dst_regno >= FIRST_PSEUDO_REGISTER && reg->type == OP_OUT |
6907 | && reg_renumber[dst_regno] < 0 && ! reg->subreg_p |
6908 | && usage_insns[dst_regno].check == curr_usage_insns_check |
6909 | && (next_usage_insns |
6910 | = usage_insns[dst_regno].insns) != NULL_RTX) |
6911 | { |
6912 | struct lra_insn_reg *r; |
6913 | |
6914 | for (r = curr_id->regs; r != NULL; r = r->next) |
6915 | if (r->type != OP_OUT && r->regno == dst_regno) |
6916 | break; |
6917 | /* Don't do inheritance if the pseudo is also |
6918 | used in the insn. */ |
6919 | if (r == NULL) |
6920 | /* We cannot do inheritance right now |
6921 | because the current insn reg info (chain |
6922 | regs) can change after that. */ |
6923 | add_to_inherit (regno: dst_regno, insns: next_usage_insns); |
6924 | } |
6925 | /* We cannot process one reg twice here because of |
6926 | usage_insns invalidation. */ |
6927 | if ((dst_regno < FIRST_PSEUDO_REGISTER |
6928 | || reg_renumber[dst_regno] >= 0) |
6929 | && ! reg->subreg_p && reg->type != OP_IN) |
6930 | { |
6931 | HARD_REG_SET s; |
6932 | |
6933 | if (split_if_necessary (regno: dst_regno, mode: reg->biggest_mode, |
6934 | potential_reload_hard_regs, |
6935 | before_p: false, insn: curr_insn, max_uid)) |
6936 | change_p = true; |
6937 | CLEAR_HARD_REG_SET (set&: s); |
6938 | if (dst_regno < FIRST_PSEUDO_REGISTER) |
6939 | add_to_hard_reg_set (regs: &s, mode: reg->biggest_mode, regno: dst_regno); |
6940 | else |
6941 | add_to_hard_reg_set (regs: &s, PSEUDO_REGNO_MODE (dst_regno), |
6942 | regno: reg_renumber[dst_regno]); |
6943 | live_hard_regs &= ~s; |
6944 | potential_reload_hard_regs &= ~s; |
6945 | } |
6946 | /* We should invalidate potential inheritance or |
6947 | splitting for the current insn usages to the next |
6948 | usage insns (see code below) as the output pseudo |
6949 | prevents this. */ |
6950 | if ((dst_regno >= FIRST_PSEUDO_REGISTER |
6951 | && reg_renumber[dst_regno] < 0) |
6952 | || (reg->type == OP_OUT && ! reg->subreg_p |
6953 | && (dst_regno < FIRST_PSEUDO_REGISTER |
6954 | || reg_renumber[dst_regno] >= 0))) |
6955 | { |
6956 | /* Invalidate and mark definitions. */ |
6957 | if (dst_regno >= FIRST_PSEUDO_REGISTER) |
6958 | usage_insns[dst_regno].check = -(int) INSN_UID (insn: curr_insn); |
6959 | else |
6960 | { |
6961 | nregs = hard_regno_nregs (regno: dst_regno, |
6962 | mode: reg->biggest_mode); |
6963 | for (i = 0; i < nregs; i++) |
6964 | usage_insns[dst_regno + i].check |
6965 | = -(int) INSN_UID (insn: curr_insn); |
6966 | } |
6967 | } |
6968 | } |
6969 | /* Process clobbered call regs. */ |
6970 | if (curr_id->arg_hard_regs != NULL) |
6971 | for (i = 0; (dst_regno = curr_id->arg_hard_regs[i]) >= 0; i++) |
6972 | if (dst_regno >= FIRST_PSEUDO_REGISTER) |
6973 | usage_insns[dst_regno - FIRST_PSEUDO_REGISTER].check |
6974 | = -(int) INSN_UID (insn: curr_insn); |
6975 | if (! JUMP_P (curr_insn)) |
6976 | for (i = 0; i < to_inherit_num; i++) |
6977 | if (inherit_reload_reg (def_p: true, original_regno: to_inherit[i].regno, |
6978 | cl: ALL_REGS, insn: curr_insn, |
6979 | next_usage_insns: to_inherit[i].insns)) |
6980 | change_p = true; |
6981 | if (CALL_P (curr_insn)) |
6982 | { |
6983 | rtx cheap, pat, dest; |
6984 | rtx_insn *restore; |
6985 | int regno, hard_regno; |
6986 | |
6987 | calls_num++; |
6988 | function_abi callee_abi = insn_callee_abi (curr_insn); |
6989 | last_call_for_abi[callee_abi.id ()] = calls_num; |
6990 | full_and_partial_call_clobbers |
6991 | |= callee_abi.full_and_partial_reg_clobbers (); |
6992 | if ((cheap = find_reg_note (curr_insn, |
6993 | REG_RETURNED, NULL_RTX)) != NULL_RTX |
6994 | && ((cheap = XEXP (cheap, 0)), true) |
6995 | && (regno = REGNO (cheap)) >= FIRST_PSEUDO_REGISTER |
6996 | && (hard_regno = reg_renumber[regno]) >= 0 |
6997 | && usage_insns[regno].check == curr_usage_insns_check |
6998 | /* If there are pending saves/restores, the |
6999 | optimization is not worth. */ |
7000 | && usage_insns[regno].calls_num == calls_num - 1 |
7001 | && callee_abi.clobbers_reg_p (GET_MODE (cheap), regno: hard_regno)) |
7002 | { |
7003 | /* Restore the pseudo from the call result as |
7004 | REG_RETURNED note says that the pseudo value is |
7005 | in the call result and the pseudo is an argument |
7006 | of the call. */ |
7007 | pat = PATTERN (insn: curr_insn); |
7008 | if (GET_CODE (pat) == PARALLEL) |
7009 | pat = XVECEXP (pat, 0, 0); |
7010 | dest = SET_DEST (pat); |
7011 | /* For multiple return values dest is PARALLEL. |
7012 | Currently we handle only single return value case. */ |
7013 | if (REG_P (dest)) |
7014 | { |
7015 | start_sequence (); |
7016 | emit_move_insn (cheap, copy_rtx (dest)); |
7017 | restore = get_insns (); |
7018 | end_sequence (); |
7019 | lra_process_new_insns (curr_insn, NULL, restore, |
7020 | "Inserting call parameter restore" ); |
7021 | /* We don't need to save/restore of the pseudo from |
7022 | this call. */ |
7023 | usage_insns[regno].calls_num = calls_num; |
7024 | remove_from_hard_reg_set |
7025 | (regs: &full_and_partial_call_clobbers, |
7026 | GET_MODE (cheap), regno: hard_regno); |
7027 | bitmap_set_bit (&check_only_regs, regno); |
7028 | } |
7029 | } |
7030 | } |
7031 | to_inherit_num = 0; |
7032 | /* Process insn usages. */ |
7033 | for (iter = 0; iter < 2; iter++) |
7034 | for (reg = iter == 0 ? curr_id->regs : curr_static_id->hard_regs; |
7035 | reg != NULL; |
7036 | reg = reg->next) |
7037 | if ((reg->type != OP_OUT |
7038 | || (reg->type == OP_OUT && reg->subreg_p)) |
7039 | && (src_regno = reg->regno) < lra_constraint_new_regno_start) |
7040 | { |
7041 | if (src_regno >= FIRST_PSEUDO_REGISTER |
7042 | && reg_renumber[src_regno] < 0 && reg->type == OP_IN) |
7043 | { |
7044 | if (usage_insns[src_regno].check == curr_usage_insns_check |
7045 | && (next_usage_insns |
7046 | = usage_insns[src_regno].insns) != NULL_RTX |
7047 | && NONDEBUG_INSN_P (curr_insn)) |
7048 | add_to_inherit (regno: src_regno, insns: next_usage_insns); |
7049 | else if (usage_insns[src_regno].check |
7050 | != -(int) INSN_UID (insn: curr_insn)) |
7051 | /* Add usages but only if the reg is not set up |
7052 | in the same insn. */ |
7053 | add_next_usage_insn (regno: src_regno, insn: curr_insn, reloads_num); |
7054 | } |
7055 | else if (src_regno < FIRST_PSEUDO_REGISTER |
7056 | || reg_renumber[src_regno] >= 0) |
7057 | { |
7058 | bool before_p; |
7059 | rtx_insn *use_insn = curr_insn; |
7060 | |
7061 | before_p = (JUMP_P (curr_insn) |
7062 | || (CALL_P (curr_insn) && reg->type == OP_IN)); |
7063 | if (NONDEBUG_INSN_P (curr_insn) |
7064 | && (! JUMP_P (curr_insn) || reg->type == OP_IN) |
7065 | && split_if_necessary (regno: src_regno, mode: reg->biggest_mode, |
7066 | potential_reload_hard_regs, |
7067 | before_p, insn: curr_insn, max_uid)) |
7068 | { |
7069 | if (reg->subreg_p) |
7070 | check_and_force_assignment_correctness_p = true; |
7071 | change_p = true; |
7072 | /* Invalidate. */ |
7073 | usage_insns[src_regno].check = 0; |
7074 | if (before_p) |
7075 | use_insn = PREV_INSN (insn: curr_insn); |
7076 | } |
7077 | if (NONDEBUG_INSN_P (curr_insn)) |
7078 | { |
7079 | if (src_regno < FIRST_PSEUDO_REGISTER) |
7080 | add_to_hard_reg_set (regs: &live_hard_regs, |
7081 | mode: reg->biggest_mode, regno: src_regno); |
7082 | else |
7083 | add_to_hard_reg_set (regs: &live_hard_regs, |
7084 | PSEUDO_REGNO_MODE (src_regno), |
7085 | regno: reg_renumber[src_regno]); |
7086 | } |
7087 | if (src_regno >= FIRST_PSEUDO_REGISTER) |
7088 | add_next_usage_insn (regno: src_regno, insn: use_insn, reloads_num); |
7089 | else |
7090 | { |
7091 | for (i = 0; i < hard_regno_nregs (regno: src_regno, mode: reg->biggest_mode); i++) |
7092 | add_next_usage_insn (regno: src_regno + i, insn: use_insn, reloads_num); |
7093 | } |
7094 | } |
7095 | } |
7096 | /* Process used call regs. */ |
7097 | if (curr_id->arg_hard_regs != NULL) |
7098 | for (i = 0; (src_regno = curr_id->arg_hard_regs[i]) >= 0; i++) |
7099 | if (src_regno < FIRST_PSEUDO_REGISTER) |
7100 | { |
7101 | SET_HARD_REG_BIT (set&: live_hard_regs, bit: src_regno); |
7102 | add_next_usage_insn (regno: src_regno, insn: curr_insn, reloads_num); |
7103 | } |
7104 | for (i = 0; i < to_inherit_num; i++) |
7105 | { |
7106 | src_regno = to_inherit[i].regno; |
7107 | if (inherit_reload_reg (def_p: false, original_regno: src_regno, cl: ALL_REGS, |
7108 | insn: curr_insn, next_usage_insns: to_inherit[i].insns)) |
7109 | change_p = true; |
7110 | else |
7111 | setup_next_usage_insn (regno: src_regno, insn: curr_insn, reloads_num, after_p: false); |
7112 | } |
7113 | } |
7114 | if (update_reloads_num_p |
7115 | && NONDEBUG_INSN_P (curr_insn) && curr_set != NULL_RTX) |
7116 | { |
7117 | int regno = -1; |
7118 | if ((REG_P (SET_DEST (curr_set)) |
7119 | && (regno = REGNO (SET_DEST (curr_set))) >= lra_constraint_new_regno_start |
7120 | && reg_renumber[regno] < 0 |
7121 | && (cl = lra_get_allocno_class (regno)) != NO_REGS) |
7122 | || (REG_P (SET_SRC (curr_set)) |
7123 | && (regno = REGNO (SET_SRC (curr_set))) >= lra_constraint_new_regno_start |
7124 | && reg_renumber[regno] < 0 |
7125 | && (cl = lra_get_allocno_class (regno)) != NO_REGS)) |
7126 | { |
7127 | if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num) |
7128 | reloads_num++; |
7129 | if (hard_reg_set_subset_p (reg_class_contents[cl], y: live_hard_regs)) |
7130 | potential_reload_hard_regs |= reg_class_contents[cl]; |
7131 | } |
7132 | } |
7133 | if (NONDEBUG_INSN_P (curr_insn)) |
7134 | { |
7135 | int regno; |
7136 | |
7137 | /* Invalidate invariants with changed regs. */ |
7138 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
7139 | for (reg = curr_id->regs; reg != NULL; reg = reg->next) |
7140 | if (reg->type != OP_IN) |
7141 | { |
7142 | bitmap_set_bit (&invalid_invariant_regs, reg->regno); |
7143 | bitmap_set_bit (&invalid_invariant_regs, |
7144 | ORIGINAL_REGNO (regno_reg_rtx[reg->regno])); |
7145 | } |
7146 | curr_static_id = curr_id->insn_static_data; |
7147 | for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next) |
7148 | if (reg->type != OP_IN) |
7149 | bitmap_set_bit (&invalid_invariant_regs, reg->regno); |
7150 | if (curr_id->arg_hard_regs != NULL) |
7151 | for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++) |
7152 | if (regno >= FIRST_PSEUDO_REGISTER) |
7153 | bitmap_set_bit (&invalid_invariant_regs, |
7154 | regno - FIRST_PSEUDO_REGISTER); |
7155 | } |
7156 | /* We reached the start of the current basic block. */ |
7157 | if (prev_insn == NULL_RTX || prev_insn == PREV_INSN (insn: head) |
7158 | || BLOCK_FOR_INSN (insn: prev_insn) != curr_bb) |
7159 | { |
7160 | /* We reached the beginning of the current block -- do |
7161 | rest of spliting in the current BB. */ |
7162 | to_process = df_get_live_in (bb: curr_bb); |
7163 | if (BLOCK_FOR_INSN (insn: head) != curr_bb) |
7164 | { |
7165 | /* We are somewhere in the middle of EBB. */ |
7166 | get_live_on_other_edges (EDGE_PRED (curr_bb, 0)->src, |
7167 | to: curr_bb, res: &temp_bitmap); |
7168 | to_process = &temp_bitmap; |
7169 | } |
7170 | head_p = true; |
7171 | EXECUTE_IF_SET_IN_BITMAP (to_process, 0, j, bi) |
7172 | { |
7173 | if ((int) j >= lra_constraint_new_regno_start) |
7174 | break; |
7175 | if (((int) j < FIRST_PSEUDO_REGISTER || reg_renumber[j] >= 0) |
7176 | && usage_insns[j].check == curr_usage_insns_check |
7177 | && (next_usage_insns = usage_insns[j].insns) != NULL_RTX) |
7178 | { |
7179 | if (need_for_split_p (potential_reload_hard_regs, regno: j)) |
7180 | { |
7181 | if (lra_dump_file != NULL && head_p) |
7182 | { |
7183 | fprintf (stream: lra_dump_file, |
7184 | format: " ----------------------------------\n" ); |
7185 | head_p = false; |
7186 | } |
7187 | if (split_reg (before_p: false, original_regno: j, insn: bb_note (curr_bb), |
7188 | next_usage_insns, NULL)) |
7189 | change_p = true; |
7190 | } |
7191 | usage_insns[j].check = 0; |
7192 | } |
7193 | } |
7194 | } |
7195 | } |
7196 | return change_p; |
7197 | } |
7198 | |
7199 | /* This value affects EBB forming. If probability of edge from EBB to |
7200 | a BB is not greater than the following value, we don't add the BB |
7201 | to EBB. */ |
7202 | #define EBB_PROBABILITY_CUTOFF \ |
7203 | ((REG_BR_PROB_BASE * param_lra_inheritance_ebb_probability_cutoff) / 100) |
7204 | |
7205 | /* Current number of inheritance/split iteration. */ |
7206 | int lra_inheritance_iter; |
7207 | |
7208 | /* Entry function for inheritance/split pass. */ |
7209 | void |
7210 | lra_inheritance (void) |
7211 | { |
7212 | int i; |
7213 | basic_block bb, start_bb; |
7214 | edge e; |
7215 | |
7216 | lra_inheritance_iter++; |
7217 | if (lra_inheritance_iter > LRA_MAX_INHERITANCE_PASSES) |
7218 | return; |
7219 | timevar_push (tv: TV_LRA_INHERITANCE); |
7220 | if (lra_dump_file != NULL) |
7221 | fprintf (stream: lra_dump_file, format: "\n********** Inheritance #%d: **********\n\n" , |
7222 | lra_inheritance_iter); |
7223 | curr_usage_insns_check = 0; |
7224 | usage_insns = XNEWVEC (struct usage_insns, lra_constraint_new_regno_start); |
7225 | for (i = 0; i < lra_constraint_new_regno_start; i++) |
7226 | usage_insns[i].check = 0; |
7227 | bitmap_initialize (head: &check_only_regs, obstack: ®_obstack); |
7228 | bitmap_initialize (head: &invalid_invariant_regs, obstack: ®_obstack); |
7229 | bitmap_initialize (head: &live_regs, obstack: ®_obstack); |
7230 | bitmap_initialize (head: &temp_bitmap, obstack: ®_obstack); |
7231 | bitmap_initialize (head: &ebb_global_regs, obstack: ®_obstack); |
7232 | FOR_EACH_BB_FN (bb, cfun) |
7233 | { |
7234 | start_bb = bb; |
7235 | if (lra_dump_file != NULL) |
7236 | fprintf (stream: lra_dump_file, format: "EBB" ); |
7237 | /* Form a EBB starting with BB. */ |
7238 | bitmap_clear (&ebb_global_regs); |
7239 | bitmap_ior_into (&ebb_global_regs, df_get_live_in (bb)); |
7240 | for (;;) |
7241 | { |
7242 | if (lra_dump_file != NULL) |
7243 | fprintf (stream: lra_dump_file, format: " %d" , bb->index); |
7244 | if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
7245 | || LABEL_P (BB_HEAD (bb->next_bb))) |
7246 | break; |
7247 | e = find_fallthru_edge (edges: bb->succs); |
7248 | if (! e) |
7249 | break; |
7250 | if (e->probability.initialized_p () |
7251 | && e->probability.to_reg_br_prob_base () < EBB_PROBABILITY_CUTOFF) |
7252 | break; |
7253 | bb = bb->next_bb; |
7254 | } |
7255 | bitmap_ior_into (&ebb_global_regs, df_get_live_out (bb)); |
7256 | if (lra_dump_file != NULL) |
7257 | fprintf (stream: lra_dump_file, format: "\n" ); |
7258 | if (inherit_in_ebb (BB_HEAD (start_bb), BB_END (bb))) |
7259 | /* Remember that the EBB head and tail can change in |
7260 | inherit_in_ebb. */ |
7261 | update_ebb_live_info (BB_HEAD (start_bb), BB_END (bb)); |
7262 | } |
7263 | bitmap_release (head: &ebb_global_regs); |
7264 | bitmap_release (head: &temp_bitmap); |
7265 | bitmap_release (head: &live_regs); |
7266 | bitmap_release (head: &invalid_invariant_regs); |
7267 | bitmap_release (head: &check_only_regs); |
7268 | free (ptr: usage_insns); |
7269 | |
7270 | timevar_pop (tv: TV_LRA_INHERITANCE); |
7271 | } |
7272 | |
7273 | |
7274 | |
7275 | /* This page contains code to undo failed inheritance/split |
7276 | transformations. */ |
7277 | |
7278 | /* Current number of iteration undoing inheritance/split. */ |
7279 | int lra_undo_inheritance_iter; |
7280 | |
7281 | /* Fix BB live info LIVE after removing pseudos created on pass doing |
7282 | inheritance/split which are REMOVED_PSEUDOS. */ |
7283 | static void |
7284 | fix_bb_live_info (bitmap live, bitmap removed_pseudos) |
7285 | { |
7286 | unsigned int regno; |
7287 | bitmap_iterator bi; |
7288 | |
7289 | EXECUTE_IF_SET_IN_BITMAP (removed_pseudos, 0, regno, bi) |
7290 | if (bitmap_clear_bit (live, regno) |
7291 | && REG_P (lra_reg_info[regno].restore_rtx)) |
7292 | bitmap_set_bit (live, REGNO (lra_reg_info[regno].restore_rtx)); |
7293 | } |
7294 | |
7295 | /* Return regno of the (subreg of) REG. Otherwise, return a negative |
7296 | number. */ |
7297 | static int |
7298 | get_regno (rtx reg) |
7299 | { |
7300 | if (GET_CODE (reg) == SUBREG) |
7301 | reg = SUBREG_REG (reg); |
7302 | if (REG_P (reg)) |
7303 | return REGNO (reg); |
7304 | return -1; |
7305 | } |
7306 | |
7307 | /* Delete a move INSN with destination reg DREGNO and a previous |
7308 | clobber insn with the same regno. The inheritance/split code can |
7309 | generate moves with preceding clobber and when we delete such moves |
7310 | we should delete the clobber insn too to keep the correct life |
7311 | info. */ |
7312 | static void |
7313 | delete_move_and_clobber (rtx_insn *insn, int dregno) |
7314 | { |
7315 | rtx_insn *prev_insn = PREV_INSN (insn); |
7316 | |
7317 | lra_set_insn_deleted (insn); |
7318 | lra_assert (dregno >= 0); |
7319 | if (prev_insn != NULL && NONDEBUG_INSN_P (prev_insn) |
7320 | && GET_CODE (PATTERN (prev_insn)) == CLOBBER |
7321 | && dregno == get_regno (XEXP (PATTERN (prev_insn), 0))) |
7322 | lra_set_insn_deleted (prev_insn); |
7323 | } |
7324 | |
7325 | /* Remove inheritance/split pseudos which are in REMOVE_PSEUDOS and |
7326 | return true if we did any change. The undo transformations for |
7327 | inheritance looks like |
7328 | i <- i2 |
7329 | p <- i => p <- i2 |
7330 | or removing |
7331 | p <- i, i <- p, and i <- i3 |
7332 | where p is original pseudo from which inheritance pseudo i was |
7333 | created, i and i3 are removed inheritance pseudos, i2 is another |
7334 | not removed inheritance pseudo. All split pseudos or other |
7335 | occurrences of removed inheritance pseudos are changed on the |
7336 | corresponding original pseudos. |
7337 | |
7338 | The function also schedules insns changed and created during |
7339 | inheritance/split pass for processing by the subsequent constraint |
7340 | pass. */ |
7341 | static bool |
7342 | remove_inheritance_pseudos (bitmap remove_pseudos) |
7343 | { |
7344 | basic_block bb; |
7345 | int regno, sregno, prev_sregno, dregno; |
7346 | rtx restore_rtx; |
7347 | rtx set, prev_set; |
7348 | rtx_insn *prev_insn; |
7349 | bool change_p, done_p; |
7350 | |
7351 | change_p = ! bitmap_empty_p (map: remove_pseudos); |
7352 | /* We cannot finish the function right away if CHANGE_P is true |
7353 | because we need to marks insns affected by previous |
7354 | inheritance/split pass for processing by the subsequent |
7355 | constraint pass. */ |
7356 | FOR_EACH_BB_FN (bb, cfun) |
7357 | { |
7358 | fix_bb_live_info (live: df_get_live_in (bb), removed_pseudos: remove_pseudos); |
7359 | fix_bb_live_info (live: df_get_live_out (bb), removed_pseudos: remove_pseudos); |
7360 | FOR_BB_INSNS_REVERSE (bb, curr_insn) |
7361 | { |
7362 | if (! INSN_P (curr_insn)) |
7363 | continue; |
7364 | done_p = false; |
7365 | sregno = dregno = -1; |
7366 | if (change_p && NONDEBUG_INSN_P (curr_insn) |
7367 | && (set = single_set (insn: curr_insn)) != NULL_RTX) |
7368 | { |
7369 | dregno = get_regno (SET_DEST (set)); |
7370 | sregno = get_regno (SET_SRC (set)); |
7371 | } |
7372 | |
7373 | if (sregno >= 0 && dregno >= 0) |
7374 | { |
7375 | if (bitmap_bit_p (remove_pseudos, dregno) |
7376 | && ! REG_P (lra_reg_info[dregno].restore_rtx)) |
7377 | { |
7378 | /* invariant inheritance pseudo <- original pseudo */ |
7379 | if (lra_dump_file != NULL) |
7380 | { |
7381 | fprintf (stream: lra_dump_file, format: " Removing invariant inheritance:\n" ); |
7382 | dump_insn_slim (lra_dump_file, curr_insn); |
7383 | fprintf (stream: lra_dump_file, format: "\n" ); |
7384 | } |
7385 | delete_move_and_clobber (insn: curr_insn, dregno); |
7386 | done_p = true; |
7387 | } |
7388 | else if (bitmap_bit_p (remove_pseudos, sregno) |
7389 | && ! REG_P (lra_reg_info[sregno].restore_rtx)) |
7390 | { |
7391 | /* reload pseudo <- invariant inheritance pseudo */ |
7392 | start_sequence (); |
7393 | /* We cannot just change the source. It might be |
7394 | an insn different from the move. */ |
7395 | emit_insn (lra_reg_info[sregno].restore_rtx); |
7396 | rtx_insn *new_insns = get_insns (); |
7397 | end_sequence (); |
7398 | lra_assert (single_set (new_insns) != NULL |
7399 | && SET_DEST (set) == SET_DEST (single_set (new_insns))); |
7400 | lra_process_new_insns (curr_insn, NULL, new_insns, |
7401 | "Changing reload<-invariant inheritance" ); |
7402 | delete_move_and_clobber (insn: curr_insn, dregno); |
7403 | done_p = true; |
7404 | } |
7405 | else if ((bitmap_bit_p (remove_pseudos, sregno) |
7406 | && (get_regno (reg: lra_reg_info[sregno].restore_rtx) == dregno |
7407 | || (bitmap_bit_p (remove_pseudos, dregno) |
7408 | && get_regno (reg: lra_reg_info[sregno].restore_rtx) >= 0 |
7409 | && (get_regno (reg: lra_reg_info[sregno].restore_rtx) |
7410 | == get_regno (reg: lra_reg_info[dregno].restore_rtx))))) |
7411 | || (bitmap_bit_p (remove_pseudos, dregno) |
7412 | && get_regno (reg: lra_reg_info[dregno].restore_rtx) == sregno)) |
7413 | /* One of the following cases: |
7414 | original <- removed inheritance pseudo |
7415 | removed inherit pseudo <- another removed inherit pseudo |
7416 | removed inherit pseudo <- original pseudo |
7417 | Or |
7418 | removed_split_pseudo <- original_reg |
7419 | original_reg <- removed_split_pseudo */ |
7420 | { |
7421 | if (lra_dump_file != NULL) |
7422 | { |
7423 | fprintf (stream: lra_dump_file, format: " Removing %s:\n" , |
7424 | bitmap_bit_p (&lra_split_regs, sregno) |
7425 | || bitmap_bit_p (&lra_split_regs, dregno) |
7426 | ? "split" : "inheritance" ); |
7427 | dump_insn_slim (lra_dump_file, curr_insn); |
7428 | } |
7429 | delete_move_and_clobber (insn: curr_insn, dregno); |
7430 | done_p = true; |
7431 | } |
7432 | else if (bitmap_bit_p (remove_pseudos, sregno) |
7433 | && bitmap_bit_p (&lra_inheritance_pseudos, sregno)) |
7434 | { |
7435 | /* Search the following pattern: |
7436 | inherit_or_split_pseudo1 <- inherit_or_split_pseudo2 |
7437 | original_pseudo <- inherit_or_split_pseudo1 |
7438 | where the 2nd insn is the current insn and |
7439 | inherit_or_split_pseudo2 is not removed. If it is found, |
7440 | change the current insn onto: |
7441 | original_pseudo <- inherit_or_split_pseudo2. */ |
7442 | for (prev_insn = PREV_INSN (insn: curr_insn); |
7443 | prev_insn != NULL_RTX && ! NONDEBUG_INSN_P (prev_insn); |
7444 | prev_insn = PREV_INSN (insn: prev_insn)) |
7445 | ; |
7446 | if (prev_insn != NULL_RTX && BLOCK_FOR_INSN (insn: prev_insn) == bb |
7447 | && (prev_set = single_set (insn: prev_insn)) != NULL_RTX |
7448 | /* There should be no subregs in insn we are |
7449 | searching because only the original reg might |
7450 | be in subreg when we changed the mode of |
7451 | load/store for splitting. */ |
7452 | && REG_P (SET_DEST (prev_set)) |
7453 | && REG_P (SET_SRC (prev_set)) |
7454 | && (int) REGNO (SET_DEST (prev_set)) == sregno |
7455 | && ((prev_sregno = REGNO (SET_SRC (prev_set))) |
7456 | >= FIRST_PSEUDO_REGISTER) |
7457 | && (lra_reg_info[prev_sregno].restore_rtx == NULL_RTX |
7458 | || |
7459 | /* As we consider chain of inheritance or |
7460 | splitting described in above comment we should |
7461 | check that sregno and prev_sregno were |
7462 | inheritance/split pseudos created from the |
7463 | same original regno. */ |
7464 | (get_regno (reg: lra_reg_info[sregno].restore_rtx) >= 0 |
7465 | && (get_regno (reg: lra_reg_info[sregno].restore_rtx) |
7466 | == get_regno (reg: lra_reg_info[prev_sregno].restore_rtx)))) |
7467 | && ! bitmap_bit_p (remove_pseudos, prev_sregno)) |
7468 | { |
7469 | lra_assert (GET_MODE (SET_SRC (prev_set)) |
7470 | == GET_MODE (regno_reg_rtx[sregno])); |
7471 | /* Although we have a single set, the insn can |
7472 | contain more one sregno register occurrence |
7473 | as a source. Change all occurrences. */ |
7474 | lra_substitute_pseudo_within_insn (curr_insn, sregno, |
7475 | SET_SRC (prev_set), |
7476 | false); |
7477 | /* As we are finishing with processing the insn |
7478 | here, check the destination too as it might |
7479 | inheritance pseudo for another pseudo. */ |
7480 | if (bitmap_bit_p (remove_pseudos, dregno) |
7481 | && bitmap_bit_p (&lra_inheritance_pseudos, dregno) |
7482 | && (restore_rtx |
7483 | = lra_reg_info[dregno].restore_rtx) != NULL_RTX) |
7484 | { |
7485 | if (GET_CODE (SET_DEST (set)) == SUBREG) |
7486 | SUBREG_REG (SET_DEST (set)) = restore_rtx; |
7487 | else |
7488 | SET_DEST (set) = restore_rtx; |
7489 | } |
7490 | lra_push_insn_and_update_insn_regno_info (curr_insn); |
7491 | lra_set_used_insn_alternative_by_uid |
7492 | (INSN_UID (insn: curr_insn), LRA_UNKNOWN_ALT); |
7493 | done_p = true; |
7494 | if (lra_dump_file != NULL) |
7495 | { |
7496 | fprintf (stream: lra_dump_file, format: " Change reload insn:\n" ); |
7497 | dump_insn_slim (lra_dump_file, curr_insn); |
7498 | } |
7499 | } |
7500 | } |
7501 | } |
7502 | if (! done_p) |
7503 | { |
7504 | struct lra_insn_reg *reg; |
7505 | bool restored_regs_p = false; |
7506 | bool kept_regs_p = false; |
7507 | |
7508 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
7509 | for (reg = curr_id->regs; reg != NULL; reg = reg->next) |
7510 | { |
7511 | regno = reg->regno; |
7512 | restore_rtx = lra_reg_info[regno].restore_rtx; |
7513 | if (restore_rtx != NULL_RTX) |
7514 | { |
7515 | if (change_p && bitmap_bit_p (remove_pseudos, regno)) |
7516 | { |
7517 | lra_substitute_pseudo_within_insn |
7518 | (curr_insn, regno, restore_rtx, false); |
7519 | restored_regs_p = true; |
7520 | } |
7521 | else |
7522 | kept_regs_p = true; |
7523 | } |
7524 | } |
7525 | if (NONDEBUG_INSN_P (curr_insn) && kept_regs_p) |
7526 | { |
7527 | /* The instruction has changed since the previous |
7528 | constraints pass. */ |
7529 | lra_push_insn_and_update_insn_regno_info (curr_insn); |
7530 | lra_set_used_insn_alternative_by_uid |
7531 | (INSN_UID (insn: curr_insn), LRA_UNKNOWN_ALT); |
7532 | } |
7533 | else if (restored_regs_p) |
7534 | /* The instruction has been restored to the form that |
7535 | it had during the previous constraints pass. */ |
7536 | lra_update_insn_regno_info (curr_insn); |
7537 | if (restored_regs_p && lra_dump_file != NULL) |
7538 | { |
7539 | fprintf (stream: lra_dump_file, format: " Insn after restoring regs:\n" ); |
7540 | dump_insn_slim (lra_dump_file, curr_insn); |
7541 | } |
7542 | } |
7543 | } |
7544 | } |
7545 | return change_p; |
7546 | } |
7547 | |
7548 | /* If optional reload pseudos failed to get a hard register or was not |
7549 | inherited, it is better to remove optional reloads. We do this |
7550 | transformation after undoing inheritance to figure out necessity to |
7551 | remove optional reloads easier. Return true if we do any |
7552 | change. */ |
7553 | static bool |
7554 | undo_optional_reloads (void) |
7555 | { |
7556 | bool change_p, keep_p; |
7557 | unsigned int regno, uid; |
7558 | bitmap_iterator bi, bi2; |
7559 | rtx_insn *insn; |
7560 | rtx set, src, dest; |
7561 | auto_bitmap removed_optional_reload_pseudos (®_obstack); |
7562 | |
7563 | bitmap_copy (removed_optional_reload_pseudos, &lra_optional_reload_pseudos); |
7564 | EXECUTE_IF_SET_IN_BITMAP (&lra_optional_reload_pseudos, 0, regno, bi) |
7565 | { |
7566 | keep_p = false; |
7567 | /* Keep optional reloads from previous subpasses. */ |
7568 | if (lra_reg_info[regno].restore_rtx == NULL_RTX |
7569 | /* If the original pseudo changed its allocation, just |
7570 | removing the optional pseudo is dangerous as the original |
7571 | pseudo will have longer live range. */ |
7572 | || reg_renumber[REGNO (lra_reg_info[regno].restore_rtx)] >= 0) |
7573 | keep_p = true; |
7574 | else if (reg_renumber[regno] >= 0) |
7575 | EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi2) |
7576 | { |
7577 | insn = lra_insn_recog_data[uid]->insn; |
7578 | if ((set = single_set (insn)) == NULL_RTX) |
7579 | continue; |
7580 | src = SET_SRC (set); |
7581 | dest = SET_DEST (set); |
7582 | if ((! REG_P (src) && ! SUBREG_P (src)) |
7583 | || (! REG_P (dest) && ! SUBREG_P (dest))) |
7584 | continue; |
7585 | if (get_regno (reg: dest) == (int) regno |
7586 | /* Ignore insn for optional reloads itself. */ |
7587 | && (get_regno (reg: lra_reg_info[regno].restore_rtx) |
7588 | != get_regno (reg: src)) |
7589 | /* Check only inheritance on last inheritance pass. */ |
7590 | && get_regno (reg: src) >= new_regno_start |
7591 | /* Check that the optional reload was inherited. */ |
7592 | && bitmap_bit_p (&lra_inheritance_pseudos, get_regno (reg: src))) |
7593 | { |
7594 | keep_p = true; |
7595 | break; |
7596 | } |
7597 | } |
7598 | if (keep_p) |
7599 | { |
7600 | bitmap_clear_bit (removed_optional_reload_pseudos, regno); |
7601 | if (lra_dump_file != NULL) |
7602 | fprintf (stream: lra_dump_file, format: "Keep optional reload reg %d\n" , regno); |
7603 | } |
7604 | } |
7605 | change_p = ! bitmap_empty_p (map: removed_optional_reload_pseudos); |
7606 | auto_bitmap insn_bitmap (®_obstack); |
7607 | EXECUTE_IF_SET_IN_BITMAP (removed_optional_reload_pseudos, 0, regno, bi) |
7608 | { |
7609 | if (lra_dump_file != NULL) |
7610 | fprintf (stream: lra_dump_file, format: "Remove optional reload reg %d\n" , regno); |
7611 | bitmap_copy (insn_bitmap, &lra_reg_info[regno].insn_bitmap); |
7612 | EXECUTE_IF_SET_IN_BITMAP (insn_bitmap, 0, uid, bi2) |
7613 | { |
7614 | /* We may have already removed a clobber. */ |
7615 | if (!lra_insn_recog_data[uid]) |
7616 | continue; |
7617 | insn = lra_insn_recog_data[uid]->insn; |
7618 | if ((set = single_set (insn)) != NULL_RTX) |
7619 | { |
7620 | src = SET_SRC (set); |
7621 | dest = SET_DEST (set); |
7622 | if ((REG_P (src) || SUBREG_P (src)) |
7623 | && (REG_P (dest) || SUBREG_P (dest)) |
7624 | && ((get_regno (reg: src) == (int) regno |
7625 | && (get_regno (reg: lra_reg_info[regno].restore_rtx) |
7626 | == get_regno (reg: dest))) |
7627 | || (get_regno (reg: dest) == (int) regno |
7628 | && (get_regno (reg: lra_reg_info[regno].restore_rtx) |
7629 | == get_regno (reg: src))))) |
7630 | { |
7631 | if (lra_dump_file != NULL) |
7632 | { |
7633 | fprintf (stream: lra_dump_file, format: " Deleting move %u\n" , |
7634 | INSN_UID (insn)); |
7635 | dump_insn_slim (lra_dump_file, insn); |
7636 | } |
7637 | delete_move_and_clobber (insn, dregno: get_regno (reg: dest)); |
7638 | continue; |
7639 | } |
7640 | /* We should not worry about generation memory-memory |
7641 | moves here as if the corresponding inheritance did |
7642 | not work (inheritance pseudo did not get a hard reg), |
7643 | we remove the inheritance pseudo and the optional |
7644 | reload. */ |
7645 | } |
7646 | if (GET_CODE (PATTERN (insn)) == CLOBBER |
7647 | && REG_P (SET_DEST (insn)) |
7648 | && get_regno (SET_DEST (insn)) == (int) regno) |
7649 | /* Refuse to remap clobbers to preexisting pseudos. */ |
7650 | gcc_unreachable (); |
7651 | lra_substitute_pseudo_within_insn |
7652 | (insn, regno, lra_reg_info[regno].restore_rtx, false); |
7653 | lra_update_insn_regno_info (insn); |
7654 | if (lra_dump_file != NULL) |
7655 | { |
7656 | fprintf (stream: lra_dump_file, |
7657 | format: " Restoring original insn:\n" ); |
7658 | dump_insn_slim (lra_dump_file, insn); |
7659 | } |
7660 | } |
7661 | } |
7662 | /* Clear restore_regnos. */ |
7663 | EXECUTE_IF_SET_IN_BITMAP (&lra_optional_reload_pseudos, 0, regno, bi) |
7664 | lra_reg_info[regno].restore_rtx = NULL_RTX; |
7665 | return change_p; |
7666 | } |
7667 | |
7668 | /* Entry function for undoing inheritance/split transformation. Return true |
7669 | if we did any RTL change in this pass. */ |
7670 | bool |
7671 | lra_undo_inheritance (void) |
7672 | { |
7673 | unsigned int regno; |
7674 | int hard_regno; |
7675 | int n_all_inherit, n_inherit, n_all_split, n_split; |
7676 | rtx restore_rtx; |
7677 | bitmap_iterator bi; |
7678 | bool change_p; |
7679 | |
7680 | lra_undo_inheritance_iter++; |
7681 | if (lra_undo_inheritance_iter > LRA_MAX_INHERITANCE_PASSES) |
7682 | return false; |
7683 | if (lra_dump_file != NULL) |
7684 | fprintf (stream: lra_dump_file, |
7685 | format: "\n********** Undoing inheritance #%d: **********\n\n" , |
7686 | lra_undo_inheritance_iter); |
7687 | auto_bitmap remove_pseudos (®_obstack); |
7688 | n_inherit = n_all_inherit = 0; |
7689 | EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, regno, bi) |
7690 | if (lra_reg_info[regno].restore_rtx != NULL_RTX) |
7691 | { |
7692 | n_all_inherit++; |
7693 | if (reg_renumber[regno] < 0 |
7694 | /* If the original pseudo changed its allocation, just |
7695 | removing inheritance is dangerous as for changing |
7696 | allocation we used shorter live-ranges. */ |
7697 | && (! REG_P (lra_reg_info[regno].restore_rtx) |
7698 | || reg_renumber[REGNO (lra_reg_info[regno].restore_rtx)] < 0)) |
7699 | bitmap_set_bit (remove_pseudos, regno); |
7700 | else |
7701 | n_inherit++; |
7702 | } |
7703 | if (lra_dump_file != NULL && n_all_inherit != 0) |
7704 | fprintf (stream: lra_dump_file, format: "Inherit %d out of %d (%.2f%%)\n" , |
7705 | n_inherit, n_all_inherit, |
7706 | (double) n_inherit / n_all_inherit * 100); |
7707 | n_split = n_all_split = 0; |
7708 | EXECUTE_IF_SET_IN_BITMAP (&lra_split_regs, 0, regno, bi) |
7709 | if ((restore_rtx = lra_reg_info[regno].restore_rtx) != NULL_RTX) |
7710 | { |
7711 | int restore_regno = REGNO (restore_rtx); |
7712 | |
7713 | n_all_split++; |
7714 | hard_regno = (restore_regno >= FIRST_PSEUDO_REGISTER |
7715 | ? reg_renumber[restore_regno] : restore_regno); |
7716 | if (hard_regno < 0 || reg_renumber[regno] == hard_regno) |
7717 | bitmap_set_bit (remove_pseudos, regno); |
7718 | else |
7719 | { |
7720 | n_split++; |
7721 | if (lra_dump_file != NULL) |
7722 | fprintf (stream: lra_dump_file, format: " Keep split r%d (orig=r%d)\n" , |
7723 | regno, restore_regno); |
7724 | } |
7725 | } |
7726 | if (lra_dump_file != NULL && n_all_split != 0) |
7727 | fprintf (stream: lra_dump_file, format: "Split %d out of %d (%.2f%%)\n" , |
7728 | n_split, n_all_split, |
7729 | (double) n_split / n_all_split * 100); |
7730 | change_p = remove_inheritance_pseudos (remove_pseudos); |
7731 | /* Clear restore_regnos. */ |
7732 | EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, regno, bi) |
7733 | lra_reg_info[regno].restore_rtx = NULL_RTX; |
7734 | EXECUTE_IF_SET_IN_BITMAP (&lra_split_regs, 0, regno, bi) |
7735 | lra_reg_info[regno].restore_rtx = NULL_RTX; |
7736 | change_p = undo_optional_reloads () || change_p; |
7737 | return change_p; |
7738 | } |
7739 | |