1 | /* Compute different info about registers. |
2 | Copyright (C) 1987-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | |
21 | /* This file contains regscan pass of the compiler and passes for |
22 | dealing with info about modes of pseudo-registers inside |
23 | subregisters. It also defines some tables of information about the |
24 | hardware registers, function init_reg_sets to initialize the |
25 | tables, and other auxiliary functions to deal with info about |
26 | registers and their classes. */ |
27 | |
28 | #include "config.h" |
29 | #include "system.h" |
30 | #include "coretypes.h" |
31 | #include "backend.h" |
32 | #include "target.h" |
33 | #include "rtl.h" |
34 | #include "tree.h" |
35 | #include "df.h" |
36 | #include "memmodel.h" |
37 | #include "tm_p.h" |
38 | #include "insn-config.h" |
39 | #include "regs.h" |
40 | #include "ira.h" |
41 | #include "recog.h" |
42 | #include "diagnostic-core.h" |
43 | #include "reload.h" |
44 | #include "output.h" |
45 | #include "tree-pass.h" |
46 | #include "function-abi.h" |
47 | |
48 | /* Maximum register number used in this function, plus one. */ |
49 | |
50 | int max_regno; |
51 | |
52 | /* Used to cache the results of simplifiable_subregs. SHAPE is the input |
53 | parameter and SIMPLIFIABLE_REGS is the result. */ |
54 | class simplifiable_subreg |
55 | { |
56 | public: |
57 | simplifiable_subreg (const subreg_shape &); |
58 | |
59 | subreg_shape shape; |
60 | HARD_REG_SET simplifiable_regs; |
61 | }; |
62 | |
63 | struct target_hard_regs default_target_hard_regs; |
64 | struct target_regs default_target_regs; |
65 | #if SWITCHABLE_TARGET |
66 | struct target_hard_regs *this_target_hard_regs = &default_target_hard_regs; |
67 | struct target_regs *this_target_regs = &default_target_regs; |
68 | #endif |
69 | |
70 | #define call_used_regs \ |
71 | (this_target_hard_regs->x_call_used_regs) |
72 | #define regs_invalidated_by_call \ |
73 | (this_target_hard_regs->x_regs_invalidated_by_call) |
74 | |
75 | /* Data for initializing fixed_regs. */ |
76 | static const char initial_fixed_regs[] = FIXED_REGISTERS; |
77 | |
78 | /* Data for initializing call_used_regs. */ |
79 | #ifdef CALL_REALLY_USED_REGISTERS |
80 | #ifdef CALL_USED_REGISTERS |
81 | #error CALL_USED_REGISTERS and CALL_REALLY_USED_REGISTERS are both defined |
82 | #endif |
83 | static const char initial_call_used_regs[] = CALL_REALLY_USED_REGISTERS; |
84 | #else |
85 | static const char initial_call_used_regs[] = CALL_USED_REGISTERS; |
86 | #endif |
87 | |
88 | /* Indexed by hard register number, contains 1 for registers |
89 | that are being used for global register decls. |
90 | These must be exempt from ordinary flow analysis |
91 | and are also considered fixed. */ |
92 | char global_regs[FIRST_PSEUDO_REGISTER]; |
93 | |
94 | /* The set of global registers. */ |
95 | HARD_REG_SET global_reg_set; |
96 | |
97 | /* Declaration for the global register. */ |
98 | tree global_regs_decl[FIRST_PSEUDO_REGISTER]; |
99 | |
100 | /* Used to initialize reg_alloc_order. */ |
101 | #ifdef REG_ALLOC_ORDER |
102 | static int initial_reg_alloc_order[FIRST_PSEUDO_REGISTER] = REG_ALLOC_ORDER; |
103 | #endif |
104 | |
105 | /* The same information, but as an array of unsigned ints. We copy from |
106 | these unsigned ints to the table above. We do this so the tm.h files |
107 | do not have to be aware of the wordsize for machines with <= 64 regs. |
108 | Note that we hard-code 32 here, not HOST_BITS_PER_INT. */ |
109 | #define N_REG_INTS \ |
110 | ((FIRST_PSEUDO_REGISTER + (32 - 1)) / 32) |
111 | |
112 | static const unsigned int_reg_class_contents[N_REG_CLASSES][N_REG_INTS] |
113 | = REG_CLASS_CONTENTS; |
114 | |
115 | /* Array containing all of the register names. */ |
116 | static const char *const initial_reg_names[] = REGISTER_NAMES; |
117 | |
118 | /* Array containing all of the register class names. */ |
119 | const char * reg_class_names[] = REG_CLASS_NAMES; |
120 | |
121 | /* No more global register variables may be declared; true once |
122 | reginfo has been initialized. */ |
123 | static int no_global_reg_vars = 0; |
124 | |
125 | static void |
126 | clear_global_regs_cache (void) |
127 | { |
128 | for (size_t i = 0 ; i < FIRST_PSEUDO_REGISTER ; i++) |
129 | { |
130 | global_regs[i] = 0; |
131 | global_regs_decl[i] = NULL; |
132 | } |
133 | } |
134 | |
135 | void |
136 | reginfo_cc_finalize (void) |
137 | { |
138 | clear_global_regs_cache (); |
139 | no_global_reg_vars = 0; |
140 | CLEAR_HARD_REG_SET (set&: global_reg_set); |
141 | } |
142 | |
143 | /* Given a register bitmap, turn on the bits in a HARD_REG_SET that |
144 | correspond to the hard registers, if any, set in that map. This |
145 | could be done far more efficiently by having all sorts of special-cases |
146 | with moving single words, but probably isn't worth the trouble. */ |
147 | void |
148 | reg_set_to_hard_reg_set (HARD_REG_SET *to, const_bitmap from) |
149 | { |
150 | unsigned i; |
151 | bitmap_iterator bi; |
152 | |
153 | EXECUTE_IF_SET_IN_BITMAP (from, 0, i, bi) |
154 | { |
155 | if (i >= FIRST_PSEUDO_REGISTER) |
156 | return; |
157 | SET_HARD_REG_BIT (set&: *to, bit: i); |
158 | } |
159 | } |
160 | |
161 | /* Function called only once per target_globals to initialize the |
162 | target_hard_regs structure. Once this is done, various switches |
163 | may override. */ |
164 | void |
165 | init_reg_sets (void) |
166 | { |
167 | int i, j; |
168 | |
169 | /* First copy the register information from the initial int form into |
170 | the regsets. */ |
171 | |
172 | for (i = 0; i < N_REG_CLASSES; i++) |
173 | { |
174 | CLEAR_HARD_REG_SET (reg_class_contents[i]); |
175 | |
176 | /* Note that we hard-code 32 here, not HOST_BITS_PER_INT. */ |
177 | for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) |
178 | if (int_reg_class_contents[i][j / 32] |
179 | & ((unsigned) 1 << (j % 32))) |
180 | SET_HARD_REG_BIT (reg_class_contents[i], bit: j); |
181 | } |
182 | |
183 | /* Sanity check: make sure the target macros FIXED_REGISTERS and |
184 | CALL_USED_REGISTERS had the right number of initializers. */ |
185 | gcc_assert (sizeof fixed_regs == sizeof initial_fixed_regs); |
186 | gcc_assert (sizeof call_used_regs == sizeof initial_call_used_regs); |
187 | #ifdef REG_ALLOC_ORDER |
188 | gcc_assert (sizeof reg_alloc_order == sizeof initial_reg_alloc_order); |
189 | #endif |
190 | gcc_assert (sizeof reg_names == sizeof initial_reg_names); |
191 | |
192 | memcpy (fixed_regs, src: initial_fixed_regs, n: sizeof fixed_regs); |
193 | memcpy (call_used_regs, src: initial_call_used_regs, n: sizeof call_used_regs); |
194 | #ifdef REG_ALLOC_ORDER |
195 | memcpy (reg_alloc_order, src: initial_reg_alloc_order, n: sizeof reg_alloc_order); |
196 | #endif |
197 | memcpy (reg_names, src: initial_reg_names, n: sizeof reg_names); |
198 | |
199 | SET_HARD_REG_SET (accessible_reg_set); |
200 | SET_HARD_REG_SET (operand_reg_set); |
201 | } |
202 | |
203 | /* We need to save copies of some of the register information which |
204 | can be munged by command-line switches so we can restore it during |
205 | subsequent back-end reinitialization. */ |
206 | static char saved_fixed_regs[FIRST_PSEUDO_REGISTER]; |
207 | static char saved_call_used_regs[FIRST_PSEUDO_REGISTER]; |
208 | static const char *saved_reg_names[FIRST_PSEUDO_REGISTER]; |
209 | static HARD_REG_SET saved_accessible_reg_set; |
210 | static HARD_REG_SET saved_operand_reg_set; |
211 | |
212 | /* Save the register information. */ |
213 | void |
214 | save_register_info (void) |
215 | { |
216 | /* Sanity check: make sure the target macros FIXED_REGISTERS and |
217 | CALL_USED_REGISTERS had the right number of initializers. */ |
218 | gcc_assert (sizeof fixed_regs == sizeof saved_fixed_regs); |
219 | gcc_assert (sizeof call_used_regs == sizeof saved_call_used_regs); |
220 | memcpy (dest: saved_fixed_regs, fixed_regs, n: sizeof fixed_regs); |
221 | memcpy (dest: saved_call_used_regs, call_used_regs, n: sizeof call_used_regs); |
222 | |
223 | /* And similarly for reg_names. */ |
224 | gcc_assert (sizeof reg_names == sizeof saved_reg_names); |
225 | memcpy (dest: saved_reg_names, reg_names, n: sizeof reg_names); |
226 | saved_accessible_reg_set = accessible_reg_set; |
227 | saved_operand_reg_set = operand_reg_set; |
228 | } |
229 | |
230 | /* Restore the register information. */ |
231 | static void |
232 | restore_register_info (void) |
233 | { |
234 | memcpy (fixed_regs, src: saved_fixed_regs, n: sizeof fixed_regs); |
235 | memcpy (call_used_regs, src: saved_call_used_regs, n: sizeof call_used_regs); |
236 | |
237 | memcpy (reg_names, src: saved_reg_names, n: sizeof reg_names); |
238 | accessible_reg_set = saved_accessible_reg_set; |
239 | operand_reg_set = saved_operand_reg_set; |
240 | } |
241 | |
242 | /* After switches have been processed, which perhaps alter |
243 | `fixed_regs' and `call_used_regs', convert them to HARD_REG_SETs. */ |
244 | static void |
245 | init_reg_sets_1 (void) |
246 | { |
247 | unsigned int i, j; |
248 | unsigned int /* machine_mode */ m; |
249 | |
250 | restore_register_info (); |
251 | |
252 | #ifdef REG_ALLOC_ORDER |
253 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
254 | inv_reg_alloc_order[reg_alloc_order[i]] = i; |
255 | #endif |
256 | |
257 | /* Let the target tweak things if necessary. */ |
258 | |
259 | targetm.conditional_register_usage (); |
260 | |
261 | /* Compute number of hard regs in each class. */ |
262 | |
263 | memset (reg_class_size, c: 0, n: sizeof reg_class_size); |
264 | for (i = 0; i < N_REG_CLASSES; i++) |
265 | { |
266 | bool any_nonfixed = false; |
267 | for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) |
268 | if (TEST_HARD_REG_BIT (reg_class_contents[i], bit: j)) |
269 | { |
270 | reg_class_size[i]++; |
271 | if (!fixed_regs[j]) |
272 | any_nonfixed = true; |
273 | } |
274 | class_only_fixed_regs[i] = !any_nonfixed; |
275 | } |
276 | |
277 | /* Initialize the table of subunions. |
278 | reg_class_subunion[I][J] gets the largest-numbered reg-class |
279 | that is contained in the union of classes I and J. */ |
280 | |
281 | memset (reg_class_subunion, c: 0, n: sizeof reg_class_subunion); |
282 | for (i = 0; i < N_REG_CLASSES; i++) |
283 | { |
284 | for (j = 0; j < N_REG_CLASSES; j++) |
285 | { |
286 | HARD_REG_SET c; |
287 | int k; |
288 | |
289 | c = reg_class_contents[i] | reg_class_contents[j]; |
290 | for (k = 0; k < N_REG_CLASSES; k++) |
291 | if (hard_reg_set_subset_p (reg_class_contents[k], y: c) |
292 | && !hard_reg_set_subset_p (reg_class_contents[k], |
293 | reg_class_contents |
294 | [(int) reg_class_subunion[i][j]])) |
295 | reg_class_subunion[i][j] = (enum reg_class) k; |
296 | } |
297 | } |
298 | |
299 | /* Initialize the table of superunions. |
300 | reg_class_superunion[I][J] gets the smallest-numbered reg-class |
301 | containing the union of classes I and J. */ |
302 | |
303 | memset (reg_class_superunion, c: 0, n: sizeof reg_class_superunion); |
304 | for (i = 0; i < N_REG_CLASSES; i++) |
305 | { |
306 | for (j = 0; j < N_REG_CLASSES; j++) |
307 | { |
308 | HARD_REG_SET c; |
309 | int k; |
310 | |
311 | c = reg_class_contents[i] | reg_class_contents[j]; |
312 | for (k = 0; k < N_REG_CLASSES; k++) |
313 | if (hard_reg_set_subset_p (x: c, reg_class_contents[k])) |
314 | break; |
315 | |
316 | reg_class_superunion[i][j] = (enum reg_class) k; |
317 | } |
318 | } |
319 | |
320 | /* Initialize the tables of subclasses and superclasses of each reg class. |
321 | First clear the whole table, then add the elements as they are found. */ |
322 | |
323 | for (i = 0; i < N_REG_CLASSES; i++) |
324 | { |
325 | for (j = 0; j < N_REG_CLASSES; j++) |
326 | reg_class_subclasses[i][j] = LIM_REG_CLASSES; |
327 | } |
328 | |
329 | for (i = 0; i < N_REG_CLASSES; i++) |
330 | { |
331 | if (i == (int) NO_REGS) |
332 | continue; |
333 | |
334 | for (j = i + 1; j < N_REG_CLASSES; j++) |
335 | if (hard_reg_set_subset_p (reg_class_contents[i], |
336 | reg_class_contents[j])) |
337 | { |
338 | /* Reg class I is a subclass of J. |
339 | Add J to the table of superclasses of I. */ |
340 | enum reg_class *p; |
341 | |
342 | /* Add I to the table of superclasses of J. */ |
343 | p = ®_class_subclasses[j][0]; |
344 | while (*p != LIM_REG_CLASSES) p++; |
345 | *p = (enum reg_class) i; |
346 | } |
347 | } |
348 | |
349 | /* Initialize "constant" tables. */ |
350 | |
351 | CLEAR_HARD_REG_SET (fixed_reg_set); |
352 | CLEAR_HARD_REG_SET (regs_invalidated_by_call); |
353 | |
354 | operand_reg_set &= accessible_reg_set; |
355 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
356 | { |
357 | /* As a special exception, registers whose class is NO_REGS are |
358 | not accepted by `register_operand'. The reason for this change |
359 | is to allow the representation of special architecture artifacts |
360 | (such as a condition code register) without extending the rtl |
361 | definitions. Since registers of class NO_REGS cannot be used |
362 | as registers in any case where register classes are examined, |
363 | it is better to apply this exception in a target-independent way. */ |
364 | if (REGNO_REG_CLASS (i) == NO_REGS) |
365 | CLEAR_HARD_REG_BIT (operand_reg_set, bit: i); |
366 | |
367 | /* If a register is too limited to be treated as a register operand, |
368 | then it should never be allocated to a pseudo. */ |
369 | if (!TEST_HARD_REG_BIT (operand_reg_set, bit: i)) |
370 | fixed_regs[i] = 1; |
371 | |
372 | if (fixed_regs[i]) |
373 | SET_HARD_REG_BIT (fixed_reg_set, bit: i); |
374 | |
375 | /* There are a couple of fixed registers that we know are safe to |
376 | exclude from being clobbered by calls: |
377 | |
378 | The frame pointer is always preserved across calls. The arg |
379 | pointer is if it is fixed. The stack pointer usually is, |
380 | unless TARGET_RETURN_POPS_ARGS, in which case an explicit |
381 | CLOBBER will be present. If we are generating PIC code, the |
382 | PIC offset table register is preserved across calls, though the |
383 | target can override that. */ |
384 | |
385 | if (i == STACK_POINTER_REGNUM) |
386 | ; |
387 | else if (global_regs[i]) |
388 | SET_HARD_REG_BIT (regs_invalidated_by_call, bit: i); |
389 | else if (i == FRAME_POINTER_REGNUM) |
390 | ; |
391 | else if (!HARD_FRAME_POINTER_IS_FRAME_POINTER |
392 | && i == HARD_FRAME_POINTER_REGNUM) |
393 | ; |
394 | else if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
395 | && i == ARG_POINTER_REGNUM && fixed_regs[i]) |
396 | ; |
397 | else if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED |
398 | && i == (unsigned) PIC_OFFSET_TABLE_REGNUM && fixed_regs[i]) |
399 | ; |
400 | else if (call_used_regs[i]) |
401 | SET_HARD_REG_BIT (regs_invalidated_by_call, bit: i); |
402 | } |
403 | |
404 | SET_HARD_REG_SET (savable_regs); |
405 | fixed_nonglobal_reg_set = fixed_reg_set; |
406 | |
407 | /* Preserve global registers if called more than once. */ |
408 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
409 | { |
410 | if (global_regs[i]) |
411 | { |
412 | fixed_regs[i] = call_used_regs[i] = 1; |
413 | SET_HARD_REG_BIT (fixed_reg_set, bit: i); |
414 | SET_HARD_REG_BIT (set&: global_reg_set, bit: i); |
415 | } |
416 | } |
417 | |
418 | memset (have_regs_of_mode, c: 0, n: sizeof (have_regs_of_mode)); |
419 | memset (contains_reg_of_mode, c: 0, n: sizeof (contains_reg_of_mode)); |
420 | for (m = 0; m < (unsigned int) MAX_MACHINE_MODE; m++) |
421 | { |
422 | HARD_REG_SET ok_regs, ok_regs2; |
423 | CLEAR_HARD_REG_SET (set&: ok_regs); |
424 | CLEAR_HARD_REG_SET (set&: ok_regs2); |
425 | for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) |
426 | if (!TEST_HARD_REG_BIT (fixed_nonglobal_reg_set, bit: j) |
427 | && targetm.hard_regno_mode_ok (j, (machine_mode) m)) |
428 | { |
429 | SET_HARD_REG_BIT (set&: ok_regs, bit: j); |
430 | if (!fixed_regs[j]) |
431 | SET_HARD_REG_BIT (set&: ok_regs2, bit: j); |
432 | } |
433 | |
434 | for (i = 0; i < N_REG_CLASSES; i++) |
435 | if ((targetm.class_max_nregs ((reg_class_t) i, (machine_mode) m) |
436 | <= reg_class_size[i]) |
437 | && hard_reg_set_intersect_p (x: ok_regs, reg_class_contents[i])) |
438 | { |
439 | contains_reg_of_mode[i][m] = 1; |
440 | if (hard_reg_set_intersect_p (x: ok_regs2, reg_class_contents[i])) |
441 | { |
442 | have_regs_of_mode[m] = 1; |
443 | contains_allocatable_reg_of_mode[i][m] = 1; |
444 | } |
445 | } |
446 | } |
447 | |
448 | default_function_abi.initialize (0, regs_invalidated_by_call); |
449 | } |
450 | |
451 | /* Compute the table of register modes. |
452 | These values are used to record death information for individual registers |
453 | (as opposed to a multi-register mode). |
454 | This function might be invoked more than once, if the target has support |
455 | for changing register usage conventions on a per-function basis. |
456 | */ |
457 | void |
458 | init_reg_modes_target (void) |
459 | { |
460 | int i, j; |
461 | |
462 | this_target_regs->x_hard_regno_max_nregs = 1; |
463 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
464 | for (j = 0; j < MAX_MACHINE_MODE; j++) |
465 | { |
466 | unsigned char nregs = targetm.hard_regno_nregs (i, (machine_mode) j); |
467 | this_target_regs->x_hard_regno_nregs[i][j] = nregs; |
468 | if (nregs > this_target_regs->x_hard_regno_max_nregs) |
469 | this_target_regs->x_hard_regno_max_nregs = nregs; |
470 | } |
471 | |
472 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
473 | { |
474 | reg_raw_mode[i] = choose_hard_reg_mode (i, 1, NULL); |
475 | |
476 | /* If we couldn't find a valid mode, just use the previous mode |
477 | if it is suitable, otherwise fall back on word_mode. */ |
478 | if (reg_raw_mode[i] == VOIDmode) |
479 | { |
480 | if (i > 0 && hard_regno_nregs (regno: i, reg_raw_mode[i - 1]) == 1) |
481 | reg_raw_mode[i] = reg_raw_mode[i - 1]; |
482 | else |
483 | reg_raw_mode[i] = word_mode; |
484 | } |
485 | } |
486 | } |
487 | |
488 | /* Finish initializing the register sets and initialize the register modes. |
489 | This function might be invoked more than once, if the target has support |
490 | for changing register usage conventions on a per-function basis. |
491 | */ |
492 | void |
493 | init_regs (void) |
494 | { |
495 | /* This finishes what was started by init_reg_sets, but couldn't be done |
496 | until after register usage was specified. */ |
497 | init_reg_sets_1 (); |
498 | } |
499 | |
500 | /* The same as previous function plus initializing IRA. */ |
501 | void |
502 | reinit_regs (void) |
503 | { |
504 | init_regs (); |
505 | /* caller_save needs to be re-initialized. */ |
506 | caller_save_initialized_p = false; |
507 | if (this_target_rtl->target_specific_initialized) |
508 | { |
509 | ira_init (); |
510 | recog_init (); |
511 | } |
512 | } |
513 | |
514 | /* Initialize some fake stack-frame MEM references for use in |
515 | memory_move_secondary_cost. */ |
516 | void |
517 | init_fake_stack_mems (void) |
518 | { |
519 | int i; |
520 | |
521 | for (i = 0; i < MAX_MACHINE_MODE; i++) |
522 | top_of_stack[i] = gen_rtx_MEM ((machine_mode) i, stack_pointer_rtx); |
523 | } |
524 | |
525 | |
526 | /* Compute cost of moving data from a register of class FROM to one of |
527 | TO, using MODE. */ |
528 | |
529 | int |
530 | register_move_cost (machine_mode mode, reg_class_t from, reg_class_t to) |
531 | { |
532 | return targetm.register_move_cost (mode, from, to); |
533 | } |
534 | |
535 | /* Compute cost of moving registers to/from memory. */ |
536 | |
537 | int |
538 | memory_move_cost (machine_mode mode, reg_class_t rclass, bool in) |
539 | { |
540 | return targetm.memory_move_cost (mode, rclass, in); |
541 | } |
542 | |
543 | /* Compute extra cost of moving registers to/from memory due to reloads. |
544 | Only needed if secondary reloads are required for memory moves. */ |
545 | int |
546 | memory_move_secondary_cost (machine_mode mode, reg_class_t rclass, |
547 | bool in) |
548 | { |
549 | reg_class_t altclass; |
550 | int partial_cost = 0; |
551 | /* We need a memory reference to feed to SECONDARY... macros. */ |
552 | /* mem may be unused even if the SECONDARY_ macros are defined. */ |
553 | rtx mem ATTRIBUTE_UNUSED = top_of_stack[(int) mode]; |
554 | |
555 | altclass = secondary_reload_class (in ? 1 : 0, rclass, mode, mem); |
556 | |
557 | if (altclass == NO_REGS) |
558 | return 0; |
559 | |
560 | if (in) |
561 | partial_cost = register_move_cost (mode, from: altclass, to: rclass); |
562 | else |
563 | partial_cost = register_move_cost (mode, from: rclass, to: altclass); |
564 | |
565 | if (rclass == altclass) |
566 | /* This isn't simply a copy-to-temporary situation. Can't guess |
567 | what it is, so TARGET_MEMORY_MOVE_COST really ought not to be |
568 | calling here in that case. |
569 | |
570 | I'm tempted to put in an assert here, but returning this will |
571 | probably only give poor estimates, which is what we would've |
572 | had before this code anyways. */ |
573 | return partial_cost; |
574 | |
575 | /* Check if the secondary reload register will also need a |
576 | secondary reload. */ |
577 | return memory_move_secondary_cost (mode, rclass: altclass, in) + partial_cost; |
578 | } |
579 | |
580 | /* Return a machine mode that is legitimate for hard reg REGNO and large |
581 | enough to save nregs. If we can't find one, return VOIDmode. |
582 | If ABI is nonnull, only consider modes that are preserved across |
583 | calls that use ABI. */ |
584 | machine_mode |
585 | choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED, |
586 | unsigned int nregs, const predefined_function_abi *abi) |
587 | { |
588 | unsigned int /* machine_mode */ m; |
589 | machine_mode found_mode = VOIDmode, mode; |
590 | |
591 | /* We first look for the largest integer mode that can be validly |
592 | held in REGNO. If none, we look for the largest floating-point mode. |
593 | If we still didn't find a valid mode, try CCmode. |
594 | |
595 | The tests use maybe_gt rather than known_gt because we want (for example) |
596 | N V4SFs to win over plain V4SF even though N might be 1. */ |
597 | FOR_EACH_MODE_IN_CLASS (mode, MODE_INT) |
598 | if (hard_regno_nregs (regno, mode) == nregs |
599 | && targetm.hard_regno_mode_ok (regno, mode) |
600 | && (!abi || !abi->clobbers_reg_p (mode, regno)) |
601 | && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) |
602 | found_mode = mode; |
603 | |
604 | FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT) |
605 | if (hard_regno_nregs (regno, mode) == nregs |
606 | && targetm.hard_regno_mode_ok (regno, mode) |
607 | && (!abi || !abi->clobbers_reg_p (mode, regno)) |
608 | && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) |
609 | found_mode = mode; |
610 | |
611 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT) |
612 | if (hard_regno_nregs (regno, mode) == nregs |
613 | && targetm.hard_regno_mode_ok (regno, mode) |
614 | && (!abi || !abi->clobbers_reg_p (mode, regno)) |
615 | && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) |
616 | found_mode = mode; |
617 | |
618 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT) |
619 | if (hard_regno_nregs (regno, mode) == nregs |
620 | && targetm.hard_regno_mode_ok (regno, mode) |
621 | && (!abi || !abi->clobbers_reg_p (mode, regno)) |
622 | && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) |
623 | found_mode = mode; |
624 | |
625 | if (found_mode != VOIDmode) |
626 | return found_mode; |
627 | |
628 | /* Iterate over all of the CCmodes. */ |
629 | for (m = (unsigned int) CCmode; m < (unsigned int) NUM_MACHINE_MODES; ++m) |
630 | { |
631 | mode = (machine_mode) m; |
632 | if (hard_regno_nregs (regno, mode) == nregs |
633 | && targetm.hard_regno_mode_ok (regno, mode) |
634 | && (!abi || !abi->clobbers_reg_p (mode, regno))) |
635 | return mode; |
636 | } |
637 | |
638 | /* We can't find a mode valid for this register. */ |
639 | return VOIDmode; |
640 | } |
641 | |
642 | /* Specify the usage characteristics of the register named NAME. |
643 | It should be a fixed register if FIXED and a |
644 | call-used register if CALL_USED. */ |
645 | void |
646 | fix_register (const char *name, int fixed, int call_used) |
647 | { |
648 | int i; |
649 | int reg, nregs; |
650 | |
651 | /* Decode the name and update the primary form of |
652 | the register info. */ |
653 | |
654 | if ((reg = decode_reg_name_and_count (name, &nregs)) >= 0) |
655 | { |
656 | gcc_assert (nregs >= 1); |
657 | for (i = reg; i < reg + nregs; i++) |
658 | { |
659 | if ((i == STACK_POINTER_REGNUM |
660 | #ifdef HARD_FRAME_POINTER_REGNUM |
661 | || i == HARD_FRAME_POINTER_REGNUM |
662 | #else |
663 | || i == FRAME_POINTER_REGNUM |
664 | #endif |
665 | ) |
666 | && (fixed == 0 || call_used == 0)) |
667 | { |
668 | switch (fixed) |
669 | { |
670 | case 0: |
671 | switch (call_used) |
672 | { |
673 | case 0: |
674 | error ("cannot use %qs as a call-saved register" , name); |
675 | break; |
676 | |
677 | case 1: |
678 | error ("cannot use %qs as a call-used register" , name); |
679 | break; |
680 | |
681 | default: |
682 | gcc_unreachable (); |
683 | } |
684 | break; |
685 | |
686 | case 1: |
687 | switch (call_used) |
688 | { |
689 | case 1: |
690 | error ("cannot use %qs as a fixed register" , name); |
691 | break; |
692 | |
693 | case 0: |
694 | default: |
695 | gcc_unreachable (); |
696 | } |
697 | break; |
698 | |
699 | default: |
700 | gcc_unreachable (); |
701 | } |
702 | } |
703 | else |
704 | { |
705 | fixed_regs[i] = fixed; |
706 | #ifdef CALL_REALLY_USED_REGISTERS |
707 | if (fixed == 0) |
708 | call_used_regs[i] = call_used; |
709 | #else |
710 | call_used_regs[i] = call_used; |
711 | #endif |
712 | } |
713 | } |
714 | } |
715 | else |
716 | { |
717 | warning (0, "unknown register name: %s" , name); |
718 | } |
719 | } |
720 | |
721 | /* Mark register number I as global. */ |
722 | void |
723 | globalize_reg (tree decl, int i) |
724 | { |
725 | location_t loc = DECL_SOURCE_LOCATION (decl); |
726 | |
727 | #ifdef STACK_REGS |
728 | if (IN_RANGE (i, FIRST_STACK_REG, LAST_STACK_REG)) |
729 | { |
730 | error ("stack register used for global register variable" ); |
731 | return; |
732 | } |
733 | #endif |
734 | |
735 | if (fixed_regs[i] == 0 && no_global_reg_vars) |
736 | error_at (loc, "global register variable follows a function definition" ); |
737 | |
738 | if (global_regs[i]) |
739 | { |
740 | auto_diagnostic_group d; |
741 | warning_at (loc, 0, |
742 | "register of %qD used for multiple global register variables" , |
743 | decl); |
744 | inform (DECL_SOURCE_LOCATION (global_regs_decl[i]), |
745 | "conflicts with %qD" , global_regs_decl[i]); |
746 | return; |
747 | } |
748 | |
749 | if (call_used_regs[i] && ! fixed_regs[i]) |
750 | warning_at (loc, 0, "call-clobbered register used for global register variable" ); |
751 | |
752 | global_regs[i] = 1; |
753 | global_regs_decl[i] = decl; |
754 | SET_HARD_REG_BIT (set&: global_reg_set, bit: i); |
755 | |
756 | /* If we're globalizing the frame pointer, we need to set the |
757 | appropriate regs_invalidated_by_call bit, even if it's already |
758 | set in fixed_regs. */ |
759 | if (i != STACK_POINTER_REGNUM) |
760 | { |
761 | SET_HARD_REG_BIT (regs_invalidated_by_call, bit: i); |
762 | for (unsigned int j = 0; j < NUM_ABI_IDS; ++j) |
763 | function_abis[j].add_full_reg_clobber (i); |
764 | } |
765 | |
766 | /* If already fixed, nothing else to do. */ |
767 | if (fixed_regs[i]) |
768 | return; |
769 | |
770 | fixed_regs[i] = call_used_regs[i] = 1; |
771 | |
772 | SET_HARD_REG_BIT (fixed_reg_set, bit: i); |
773 | |
774 | reinit_regs (); |
775 | } |
776 | |
777 | |
778 | /* Structure used to record preferences of given pseudo. */ |
779 | struct reg_pref |
780 | { |
781 | /* (enum reg_class) prefclass is the preferred class. May be |
782 | NO_REGS if no class is better than memory. */ |
783 | char prefclass; |
784 | |
785 | /* altclass is a register class that we should use for allocating |
786 | pseudo if no register in the preferred class is available. |
787 | If no register in this class is available, memory is preferred. |
788 | |
789 | It might appear to be more general to have a bitmask of classes here, |
790 | but since it is recommended that there be a class corresponding to the |
791 | union of most major pair of classes, that generality is not required. */ |
792 | char altclass; |
793 | |
794 | /* allocnoclass is a register class that IRA uses for allocating |
795 | the pseudo. */ |
796 | char allocnoclass; |
797 | }; |
798 | |
799 | /* Record preferences of each pseudo. This is available after RA is |
800 | run. */ |
801 | static struct reg_pref *reg_pref; |
802 | |
803 | /* Current size of reg_info. */ |
804 | static int reg_info_size; |
805 | /* Max_reg_num still last resize_reg_info call. */ |
806 | static int max_regno_since_last_resize; |
807 | |
808 | /* Return the reg_class in which pseudo reg number REGNO is best allocated. |
809 | This function is sometimes called before the info has been computed. |
810 | When that happens, just return GENERAL_REGS, which is innocuous. */ |
811 | enum reg_class |
812 | reg_preferred_class (int regno) |
813 | { |
814 | if (reg_pref == 0) |
815 | return GENERAL_REGS; |
816 | |
817 | gcc_assert (regno < reg_info_size); |
818 | return (enum reg_class) reg_pref[regno].prefclass; |
819 | } |
820 | |
821 | enum reg_class |
822 | reg_alternate_class (int regno) |
823 | { |
824 | if (reg_pref == 0) |
825 | return ALL_REGS; |
826 | |
827 | gcc_assert (regno < reg_info_size); |
828 | return (enum reg_class) reg_pref[regno].altclass; |
829 | } |
830 | |
831 | /* Return the reg_class which is used by IRA for its allocation. */ |
832 | enum reg_class |
833 | reg_allocno_class (int regno) |
834 | { |
835 | if (reg_pref == 0) |
836 | return NO_REGS; |
837 | |
838 | gcc_assert (regno < reg_info_size); |
839 | return (enum reg_class) reg_pref[regno].allocnoclass; |
840 | } |
841 | |
842 | |
843 | |
844 | /* Allocate space for reg info and initilize it. */ |
845 | static void |
846 | allocate_reg_info (void) |
847 | { |
848 | int i; |
849 | |
850 | max_regno_since_last_resize = max_reg_num (); |
851 | reg_info_size = max_regno_since_last_resize * 3 / 2 + 1; |
852 | gcc_assert (! reg_pref && ! reg_renumber); |
853 | reg_renumber = XNEWVEC (short, reg_info_size); |
854 | reg_pref = XCNEWVEC (struct reg_pref, reg_info_size); |
855 | memset (s: reg_renumber, c: -1, n: reg_info_size * sizeof (short)); |
856 | for (i = 0; i < reg_info_size; i++) |
857 | { |
858 | reg_pref[i].prefclass = GENERAL_REGS; |
859 | reg_pref[i].altclass = ALL_REGS; |
860 | reg_pref[i].allocnoclass = GENERAL_REGS; |
861 | } |
862 | } |
863 | |
864 | |
865 | /* Resize reg info. The new elements will be initialized. Return TRUE |
866 | if new pseudos were added since the last call. */ |
867 | bool |
868 | resize_reg_info (void) |
869 | { |
870 | int old, i; |
871 | bool change_p; |
872 | |
873 | if (reg_pref == NULL) |
874 | { |
875 | allocate_reg_info (); |
876 | return true; |
877 | } |
878 | change_p = max_regno_since_last_resize != max_reg_num (); |
879 | max_regno_since_last_resize = max_reg_num (); |
880 | if (reg_info_size >= max_reg_num ()) |
881 | return change_p; |
882 | old = reg_info_size; |
883 | reg_info_size = max_reg_num () * 3 / 2 + 1; |
884 | gcc_assert (reg_pref && reg_renumber); |
885 | reg_renumber = XRESIZEVEC (short, reg_renumber, reg_info_size); |
886 | reg_pref = XRESIZEVEC (struct reg_pref, reg_pref, reg_info_size); |
887 | memset (s: reg_pref + old, c: -1, |
888 | n: (reg_info_size - old) * sizeof (struct reg_pref)); |
889 | memset (s: reg_renumber + old, c: -1, n: (reg_info_size - old) * sizeof (short)); |
890 | for (i = old; i < reg_info_size; i++) |
891 | { |
892 | reg_pref[i].prefclass = GENERAL_REGS; |
893 | reg_pref[i].altclass = ALL_REGS; |
894 | reg_pref[i].allocnoclass = GENERAL_REGS; |
895 | } |
896 | return true; |
897 | } |
898 | |
899 | |
900 | /* Free up the space allocated by allocate_reg_info. */ |
901 | void |
902 | free_reg_info (void) |
903 | { |
904 | if (reg_pref) |
905 | { |
906 | free (ptr: reg_pref); |
907 | reg_pref = NULL; |
908 | } |
909 | |
910 | if (reg_renumber) |
911 | { |
912 | free (ptr: reg_renumber); |
913 | reg_renumber = NULL; |
914 | } |
915 | } |
916 | |
917 | /* Initialize some global data for this pass. */ |
918 | static unsigned int |
919 | reginfo_init (void) |
920 | { |
921 | if (df) |
922 | df_compute_regs_ever_live (true); |
923 | |
924 | /* This prevents dump_reg_info from losing if called |
925 | before reginfo is run. */ |
926 | reg_pref = NULL; |
927 | reg_info_size = max_regno_since_last_resize = 0; |
928 | /* No more global register variables may be declared. */ |
929 | no_global_reg_vars = 1; |
930 | return 1; |
931 | } |
932 | |
933 | namespace { |
934 | |
935 | const pass_data pass_data_reginfo_init = |
936 | { |
937 | .type: RTL_PASS, /* type */ |
938 | .name: "reginfo" , /* name */ |
939 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
940 | .tv_id: TV_NONE, /* tv_id */ |
941 | .properties_required: 0, /* properties_required */ |
942 | .properties_provided: 0, /* properties_provided */ |
943 | .properties_destroyed: 0, /* properties_destroyed */ |
944 | .todo_flags_start: 0, /* todo_flags_start */ |
945 | .todo_flags_finish: 0, /* todo_flags_finish */ |
946 | }; |
947 | |
948 | class pass_reginfo_init : public rtl_opt_pass |
949 | { |
950 | public: |
951 | pass_reginfo_init (gcc::context *ctxt) |
952 | : rtl_opt_pass (pass_data_reginfo_init, ctxt) |
953 | {} |
954 | |
955 | /* opt_pass methods: */ |
956 | unsigned int execute (function *) final override { return reginfo_init (); } |
957 | |
958 | }; // class pass_reginfo_init |
959 | |
960 | } // anon namespace |
961 | |
962 | rtl_opt_pass * |
963 | make_pass_reginfo_init (gcc::context *ctxt) |
964 | { |
965 | return new pass_reginfo_init (ctxt); |
966 | } |
967 | |
968 | |
969 | |
970 | /* Set up preferred, alternate, and allocno classes for REGNO as |
971 | PREFCLASS, ALTCLASS, and ALLOCNOCLASS. */ |
972 | void |
973 | setup_reg_classes (int regno, |
974 | enum reg_class prefclass, enum reg_class altclass, |
975 | enum reg_class allocnoclass) |
976 | { |
977 | if (reg_pref == NULL) |
978 | return; |
979 | gcc_assert (reg_info_size >= max_reg_num ()); |
980 | reg_pref[regno].prefclass = prefclass; |
981 | reg_pref[regno].altclass = altclass; |
982 | reg_pref[regno].allocnoclass = allocnoclass; |
983 | } |
984 | |
985 | |
986 | /* This is the `regscan' pass of the compiler, run just before cse and |
987 | again just before loop. It finds the first and last use of each |
988 | pseudo-register. */ |
989 | |
990 | static void reg_scan_mark_refs (rtx, rtx_insn *); |
991 | |
992 | void |
993 | reg_scan (rtx_insn *f, unsigned int nregs ATTRIBUTE_UNUSED) |
994 | { |
995 | rtx_insn *insn; |
996 | |
997 | timevar_push (tv: TV_REG_SCAN); |
998 | |
999 | for (insn = f; insn; insn = NEXT_INSN (insn)) |
1000 | if (INSN_P (insn)) |
1001 | { |
1002 | reg_scan_mark_refs (PATTERN (insn), insn); |
1003 | if (REG_NOTES (insn)) |
1004 | reg_scan_mark_refs (REG_NOTES (insn), insn); |
1005 | } |
1006 | |
1007 | timevar_pop (tv: TV_REG_SCAN); |
1008 | } |
1009 | |
1010 | |
1011 | /* X is the expression to scan. INSN is the insn it appears in. |
1012 | NOTE_FLAG is nonzero if X is from INSN's notes rather than its body. |
1013 | We should only record information for REGs with numbers |
1014 | greater than or equal to MIN_REGNO. */ |
1015 | static void |
1016 | reg_scan_mark_refs (rtx x, rtx_insn *insn) |
1017 | { |
1018 | enum rtx_code code; |
1019 | rtx dest; |
1020 | rtx note; |
1021 | |
1022 | if (!x) |
1023 | return; |
1024 | code = GET_CODE (x); |
1025 | switch (code) |
1026 | { |
1027 | case CONST: |
1028 | CASE_CONST_ANY: |
1029 | case PC: |
1030 | case SYMBOL_REF: |
1031 | case LABEL_REF: |
1032 | case ADDR_VEC: |
1033 | case ADDR_DIFF_VEC: |
1034 | case REG: |
1035 | return; |
1036 | |
1037 | case EXPR_LIST: |
1038 | if (XEXP (x, 0)) |
1039 | reg_scan_mark_refs (XEXP (x, 0), insn); |
1040 | if (XEXP (x, 1)) |
1041 | reg_scan_mark_refs (XEXP (x, 1), insn); |
1042 | break; |
1043 | |
1044 | case INSN_LIST: |
1045 | case INT_LIST: |
1046 | if (XEXP (x, 1)) |
1047 | reg_scan_mark_refs (XEXP (x, 1), insn); |
1048 | break; |
1049 | |
1050 | case CLOBBER: |
1051 | if (MEM_P (XEXP (x, 0))) |
1052 | reg_scan_mark_refs (XEXP (XEXP (x, 0), 0), insn); |
1053 | break; |
1054 | |
1055 | case SET: |
1056 | /* Count a set of the destination if it is a register. */ |
1057 | for (dest = SET_DEST (x); |
1058 | GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART |
1059 | || GET_CODE (dest) == ZERO_EXTRACT; |
1060 | dest = XEXP (dest, 0)) |
1061 | ; |
1062 | |
1063 | /* If this is setting a pseudo from another pseudo or the sum of a |
1064 | pseudo and a constant integer and the other pseudo is known to be |
1065 | a pointer, set the destination to be a pointer as well. |
1066 | |
1067 | Likewise if it is setting the destination from an address or from a |
1068 | value equivalent to an address or to the sum of an address and |
1069 | something else. |
1070 | |
1071 | But don't do any of this if the pseudo corresponds to a user |
1072 | variable since it should have already been set as a pointer based |
1073 | on the type. */ |
1074 | |
1075 | if (REG_P (SET_DEST (x)) |
1076 | && REGNO (SET_DEST (x)) >= FIRST_PSEUDO_REGISTER |
1077 | /* If the destination pseudo is set more than once, then other |
1078 | sets might not be to a pointer value (consider access to a |
1079 | union in two threads of control in the presence of global |
1080 | optimizations). So only set REG_POINTER on the destination |
1081 | pseudo if this is the only set of that pseudo. */ |
1082 | && DF_REG_DEF_COUNT (REGNO (SET_DEST (x))) == 1 |
1083 | && ! REG_USERVAR_P (SET_DEST (x)) |
1084 | && ! REG_POINTER (SET_DEST (x)) |
1085 | && ((REG_P (SET_SRC (x)) |
1086 | && REG_POINTER (SET_SRC (x))) |
1087 | || ((GET_CODE (SET_SRC (x)) == PLUS |
1088 | || GET_CODE (SET_SRC (x)) == LO_SUM) |
1089 | && CONST_INT_P (XEXP (SET_SRC (x), 1)) |
1090 | && REG_P (XEXP (SET_SRC (x), 0)) |
1091 | && REG_POINTER (XEXP (SET_SRC (x), 0))) |
1092 | || GET_CODE (SET_SRC (x)) == CONST |
1093 | || GET_CODE (SET_SRC (x)) == SYMBOL_REF |
1094 | || GET_CODE (SET_SRC (x)) == LABEL_REF |
1095 | || (GET_CODE (SET_SRC (x)) == HIGH |
1096 | && (GET_CODE (XEXP (SET_SRC (x), 0)) == CONST |
1097 | || GET_CODE (XEXP (SET_SRC (x), 0)) == SYMBOL_REF |
1098 | || GET_CODE (XEXP (SET_SRC (x), 0)) == LABEL_REF)) |
1099 | || ((GET_CODE (SET_SRC (x)) == PLUS |
1100 | || GET_CODE (SET_SRC (x)) == LO_SUM) |
1101 | && (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST |
1102 | || GET_CODE (XEXP (SET_SRC (x), 1)) == SYMBOL_REF |
1103 | || GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF)) |
1104 | || ((note = find_reg_note (insn, REG_EQUAL, 0)) != 0 |
1105 | && (GET_CODE (XEXP (note, 0)) == CONST |
1106 | || GET_CODE (XEXP (note, 0)) == SYMBOL_REF |
1107 | || GET_CODE (XEXP (note, 0)) == LABEL_REF)))) |
1108 | REG_POINTER (SET_DEST (x)) = 1; |
1109 | |
1110 | /* If this is setting a register from a register or from a simple |
1111 | conversion of a register, propagate REG_EXPR. */ |
1112 | if (REG_P (dest) && !REG_ATTRS (dest)) |
1113 | set_reg_attrs_from_value (dest, SET_SRC (x)); |
1114 | |
1115 | /* fall through */ |
1116 | |
1117 | default: |
1118 | { |
1119 | const char *fmt = GET_RTX_FORMAT (code); |
1120 | int i; |
1121 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
1122 | { |
1123 | if (fmt[i] == 'e') |
1124 | reg_scan_mark_refs (XEXP (x, i), insn); |
1125 | else if (fmt[i] == 'E' && XVEC (x, i) != 0) |
1126 | { |
1127 | int j; |
1128 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
1129 | reg_scan_mark_refs (XVECEXP (x, i, j), insn); |
1130 | } |
1131 | } |
1132 | } |
1133 | } |
1134 | } |
1135 | |
1136 | |
1137 | /* Return true if C1 is a subset of C2, i.e., if every register in C1 |
1138 | is also in C2. */ |
1139 | bool |
1140 | reg_class_subset_p (reg_class_t c1, reg_class_t c2) |
1141 | { |
1142 | return (c1 == c2 |
1143 | || c2 == ALL_REGS |
1144 | || hard_reg_set_subset_p (reg_class_contents[(int) c1], |
1145 | reg_class_contents[(int) c2])); |
1146 | } |
1147 | |
1148 | /* Return true if there is a register that is in both C1 and C2. */ |
1149 | bool |
1150 | reg_classes_intersect_p (reg_class_t c1, reg_class_t c2) |
1151 | { |
1152 | return (c1 == c2 |
1153 | || c1 == ALL_REGS |
1154 | || c2 == ALL_REGS |
1155 | || hard_reg_set_intersect_p (reg_class_contents[(int) c1], |
1156 | reg_class_contents[(int) c2])); |
1157 | } |
1158 | |
1159 | |
1160 | inline hashval_t |
1161 | simplifiable_subregs_hasher::hash (const simplifiable_subreg *value) |
1162 | { |
1163 | inchash::hash h; |
1164 | h.add_hwi (v: value->shape.unique_id ()); |
1165 | return h.end (); |
1166 | } |
1167 | |
1168 | inline bool |
1169 | simplifiable_subregs_hasher::equal (const simplifiable_subreg *value, |
1170 | const subreg_shape *compare) |
1171 | { |
1172 | return value->shape == *compare; |
1173 | } |
1174 | |
1175 | inline simplifiable_subreg::simplifiable_subreg (const subreg_shape &shape_in) |
1176 | : shape (shape_in) |
1177 | { |
1178 | CLEAR_HARD_REG_SET (set&: simplifiable_regs); |
1179 | } |
1180 | |
1181 | /* Return the set of hard registers that are able to form the subreg |
1182 | described by SHAPE. */ |
1183 | |
1184 | const HARD_REG_SET & |
1185 | simplifiable_subregs (const subreg_shape &shape) |
1186 | { |
1187 | if (!this_target_hard_regs->x_simplifiable_subregs) |
1188 | this_target_hard_regs->x_simplifiable_subregs |
1189 | = new hash_table <simplifiable_subregs_hasher> (30); |
1190 | inchash::hash h; |
1191 | h.add_hwi (v: shape.unique_id ()); |
1192 | simplifiable_subreg **slot |
1193 | = (this_target_hard_regs->x_simplifiable_subregs |
1194 | ->find_slot_with_hash (comparable: &shape, hash: h.end (), insert: INSERT)); |
1195 | |
1196 | if (!*slot) |
1197 | { |
1198 | simplifiable_subreg *info = new simplifiable_subreg (shape); |
1199 | for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; ++i) |
1200 | if (targetm.hard_regno_mode_ok (i, shape.inner_mode) |
1201 | && simplify_subreg_regno (i, shape.inner_mode, shape.offset, |
1202 | shape.outer_mode) >= 0) |
1203 | SET_HARD_REG_BIT (set&: info->simplifiable_regs, bit: i); |
1204 | *slot = info; |
1205 | } |
1206 | return (*slot)->simplifiable_regs; |
1207 | } |
1208 | |
1209 | /* Passes for keeping and updating info about modes of registers |
1210 | inside subregisters. */ |
1211 | |
1212 | static HARD_REG_SET **valid_mode_changes; |
1213 | static obstack valid_mode_changes_obstack; |
1214 | |
1215 | /* Restrict the choice of register for SUBREG_REG (SUBREG) based |
1216 | on information about SUBREG. |
1217 | |
1218 | If PARTIAL_DEF, SUBREG is a partial definition of a multipart inner |
1219 | register and we want to ensure that the other parts of the inner |
1220 | register are correctly preserved. If !PARTIAL_DEF we need to |
1221 | ensure that SUBREG itself can be formed. */ |
1222 | |
1223 | static void |
1224 | record_subregs_of_mode (rtx subreg, bool partial_def) |
1225 | { |
1226 | unsigned int regno; |
1227 | |
1228 | if (!REG_P (SUBREG_REG (subreg))) |
1229 | return; |
1230 | |
1231 | regno = REGNO (SUBREG_REG (subreg)); |
1232 | if (regno < FIRST_PSEUDO_REGISTER) |
1233 | return; |
1234 | |
1235 | subreg_shape shape (shape_of_subreg (x: subreg)); |
1236 | if (partial_def) |
1237 | { |
1238 | /* The number of independently-accessible SHAPE.outer_mode values |
1239 | in SHAPE.inner_mode is GET_MODE_SIZE (SHAPE.inner_mode) / SIZE. |
1240 | We need to check that the assignment will preserve all the other |
1241 | SIZE-byte chunks in the inner register besides the one that |
1242 | includes SUBREG. |
1243 | |
1244 | In practice it is enough to check whether an equivalent |
1245 | SHAPE.inner_mode value in an adjacent SIZE-byte chunk can be formed. |
1246 | If the underlying registers are small enough, both subregs will |
1247 | be valid. If the underlying registers are too large, one of the |
1248 | subregs will be invalid. |
1249 | |
1250 | This relies on the fact that we've already been passed |
1251 | SUBREG with PARTIAL_DEF set to false. |
1252 | |
1253 | The size of the outer mode must ordered wrt the size of the |
1254 | inner mode's registers, since otherwise we wouldn't know at |
1255 | compile time how many registers the outer mode occupies. */ |
1256 | poly_uint64 size = ordered_max (REGMODE_NATURAL_SIZE (shape.inner_mode), |
1257 | b: GET_MODE_SIZE (mode: shape.outer_mode)); |
1258 | gcc_checking_assert (known_lt (size, GET_MODE_SIZE (shape.inner_mode))); |
1259 | if (known_ge (shape.offset, size)) |
1260 | shape.offset -= size; |
1261 | else |
1262 | shape.offset += size; |
1263 | } |
1264 | |
1265 | if (valid_mode_changes[regno]) |
1266 | *valid_mode_changes[regno] &= simplifiable_subregs (shape); |
1267 | else |
1268 | { |
1269 | valid_mode_changes[regno] |
1270 | = XOBNEW (&valid_mode_changes_obstack, HARD_REG_SET); |
1271 | *valid_mode_changes[regno] = simplifiable_subregs (shape); |
1272 | } |
1273 | } |
1274 | |
1275 | /* Call record_subregs_of_mode for all the subregs in X. */ |
1276 | static void |
1277 | find_subregs_of_mode (rtx x) |
1278 | { |
1279 | enum rtx_code code = GET_CODE (x); |
1280 | const char * const fmt = GET_RTX_FORMAT (code); |
1281 | int i; |
1282 | |
1283 | if (code == SUBREG) |
1284 | record_subregs_of_mode (subreg: x, partial_def: false); |
1285 | |
1286 | /* Time for some deep diving. */ |
1287 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
1288 | { |
1289 | if (fmt[i] == 'e') |
1290 | find_subregs_of_mode (XEXP (x, i)); |
1291 | else if (fmt[i] == 'E') |
1292 | { |
1293 | int j; |
1294 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
1295 | find_subregs_of_mode (XVECEXP (x, i, j)); |
1296 | } |
1297 | } |
1298 | } |
1299 | |
1300 | void |
1301 | init_subregs_of_mode (void) |
1302 | { |
1303 | basic_block bb; |
1304 | rtx_insn *insn; |
1305 | |
1306 | gcc_obstack_init (&valid_mode_changes_obstack); |
1307 | valid_mode_changes = XCNEWVEC (HARD_REG_SET *, max_reg_num ()); |
1308 | |
1309 | FOR_EACH_BB_FN (bb, cfun) |
1310 | FOR_BB_INSNS (bb, insn) |
1311 | if (NONDEBUG_INSN_P (insn)) |
1312 | { |
1313 | find_subregs_of_mode (x: PATTERN (insn)); |
1314 | df_ref def; |
1315 | FOR_EACH_INSN_DEF (def, insn) |
1316 | if (DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL) |
1317 | && read_modify_subreg_p (DF_REF_REG (def))) |
1318 | record_subregs_of_mode (DF_REF_REG (def), partial_def: true); |
1319 | } |
1320 | } |
1321 | |
1322 | const HARD_REG_SET * |
1323 | valid_mode_changes_for_regno (unsigned int regno) |
1324 | { |
1325 | return valid_mode_changes[regno]; |
1326 | } |
1327 | |
1328 | void |
1329 | finish_subregs_of_mode (void) |
1330 | { |
1331 | XDELETEVEC (valid_mode_changes); |
1332 | obstack_free (&valid_mode_changes_obstack, NULL); |
1333 | } |
1334 | |
1335 | /* Free all data attached to the structure. This isn't a destructor because |
1336 | we don't want to run on exit. */ |
1337 | |
1338 | void |
1339 | target_hard_regs::finalize () |
1340 | { |
1341 | delete x_simplifiable_subregs; |
1342 | } |
1343 | |