1 | /* Expand the basic unary and binary arithmetic operations, for GNU compiler. |
2 | Copyright (C) 1987-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | |
21 | #include "config.h" |
22 | #include "system.h" |
23 | #include "coretypes.h" |
24 | #include "backend.h" |
25 | #include "target.h" |
26 | #include "rtl.h" |
27 | #include "tree.h" |
28 | #include "memmodel.h" |
29 | #include "predict.h" |
30 | #include "tm_p.h" |
31 | #include "optabs.h" |
32 | #include "expmed.h" |
33 | #include "emit-rtl.h" |
34 | #include "recog.h" |
35 | #include "diagnostic-core.h" |
36 | #include "rtx-vector-builder.h" |
37 | |
38 | /* Include insn-config.h before expr.h so that HAVE_conditional_move |
39 | is properly defined. */ |
40 | #include "stor-layout.h" |
41 | #include "except.h" |
42 | #include "dojump.h" |
43 | #include "explow.h" |
44 | #include "expr.h" |
45 | #include "optabs-tree.h" |
46 | #include "libfuncs.h" |
47 | #include "internal-fn.h" |
48 | #include "langhooks.h" |
49 | #include "gimple.h" |
50 | #include "ssa.h" |
51 | |
52 | static void prepare_float_lib_cmp (rtx, rtx, enum rtx_code, rtx *, |
53 | machine_mode *); |
54 | static rtx expand_unop_direct (machine_mode, optab, rtx, rtx, int); |
55 | static void emit_libcall_block_1 (rtx_insn *, rtx, rtx, rtx, bool); |
56 | |
57 | static rtx emit_conditional_move_1 (rtx, rtx, rtx, rtx, machine_mode); |
58 | |
59 | /* Debug facility for use in GDB. */ |
60 | void debug_optab_libfuncs (void); |
61 | |
62 | /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to |
63 | the result of operation CODE applied to OP0 (and OP1 if it is a binary |
64 | operation). OP0_MODE is OP0's mode. |
65 | |
66 | If the last insn does not set TARGET, don't do anything, but return true. |
67 | |
68 | If the last insn or a previous insn sets TARGET and TARGET is one of OP0 |
69 | or OP1, don't add the REG_EQUAL note but return false. Our caller can then |
70 | try again, ensuring that TARGET is not one of the operands. */ |
71 | |
72 | static bool |
73 | add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, |
74 | rtx op1, machine_mode op0_mode) |
75 | { |
76 | rtx_insn *last_insn; |
77 | rtx set; |
78 | rtx note; |
79 | |
80 | gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns)); |
81 | |
82 | if (GET_RTX_CLASS (code) != RTX_COMM_ARITH |
83 | && GET_RTX_CLASS (code) != RTX_BIN_ARITH |
84 | && GET_RTX_CLASS (code) != RTX_COMM_COMPARE |
85 | && GET_RTX_CLASS (code) != RTX_COMPARE |
86 | && GET_RTX_CLASS (code) != RTX_UNARY) |
87 | return true; |
88 | |
89 | if (GET_CODE (target) == ZERO_EXTRACT) |
90 | return true; |
91 | |
92 | for (last_insn = insns; |
93 | NEXT_INSN (insn: last_insn) != NULL_RTX; |
94 | last_insn = NEXT_INSN (insn: last_insn)) |
95 | ; |
96 | |
97 | /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing |
98 | a value changing in the insn, so the note would be invalid for CSE. */ |
99 | if (reg_overlap_mentioned_p (target, op0) |
100 | || (op1 && reg_overlap_mentioned_p (target, op1))) |
101 | { |
102 | if (MEM_P (target) |
103 | && (rtx_equal_p (target, op0) |
104 | || (op1 && rtx_equal_p (target, op1)))) |
105 | { |
106 | /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note |
107 | over expanding it as temp = MEM op X, MEM = temp. If the target |
108 | supports MEM = MEM op X instructions, it is sometimes too hard |
109 | to reconstruct that form later, especially if X is also a memory, |
110 | and due to multiple occurrences of addresses the address might |
111 | be forced into register unnecessarily. |
112 | Note that not emitting the REG_EQUIV note might inhibit |
113 | CSE in some cases. */ |
114 | set = single_set (insn: last_insn); |
115 | if (set |
116 | && GET_CODE (SET_SRC (set)) == code |
117 | && MEM_P (SET_DEST (set)) |
118 | && (rtx_equal_p (SET_DEST (set), XEXP (SET_SRC (set), 0)) |
119 | || (op1 && rtx_equal_p (SET_DEST (set), |
120 | XEXP (SET_SRC (set), 1))))) |
121 | return true; |
122 | } |
123 | return false; |
124 | } |
125 | |
126 | set = set_for_reg_notes (last_insn); |
127 | if (set == NULL_RTX) |
128 | return true; |
129 | |
130 | if (! rtx_equal_p (SET_DEST (set), target) |
131 | /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */ |
132 | && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART |
133 | || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target))) |
134 | return true; |
135 | |
136 | if (GET_RTX_CLASS (code) == RTX_UNARY) |
137 | switch (code) |
138 | { |
139 | case FFS: |
140 | case CLZ: |
141 | case CTZ: |
142 | case CLRSB: |
143 | case POPCOUNT: |
144 | case PARITY: |
145 | case BSWAP: |
146 | if (op0_mode != VOIDmode && GET_MODE (target) != op0_mode) |
147 | { |
148 | note = gen_rtx_fmt_e (code, op0_mode, copy_rtx (op0)); |
149 | if (GET_MODE_UNIT_SIZE (op0_mode) |
150 | > GET_MODE_UNIT_SIZE (GET_MODE (target))) |
151 | note = simplify_gen_unary (code: TRUNCATE, GET_MODE (target), |
152 | op: note, op_mode: op0_mode); |
153 | else |
154 | note = simplify_gen_unary (code: ZERO_EXTEND, GET_MODE (target), |
155 | op: note, op_mode: op0_mode); |
156 | break; |
157 | } |
158 | /* FALLTHRU */ |
159 | default: |
160 | note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0)); |
161 | break; |
162 | } |
163 | else |
164 | note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1)); |
165 | |
166 | set_unique_reg_note (last_insn, REG_EQUAL, note); |
167 | |
168 | return true; |
169 | } |
170 | |
171 | /* Given two input operands, OP0 and OP1, determine what the correct from_mode |
172 | for a widening operation would be. In most cases this would be OP0, but if |
173 | that's a constant it'll be VOIDmode, which isn't useful. */ |
174 | |
175 | static machine_mode |
176 | widened_mode (machine_mode to_mode, rtx op0, rtx op1) |
177 | { |
178 | machine_mode m0 = GET_MODE (op0); |
179 | machine_mode m1 = GET_MODE (op1); |
180 | machine_mode result; |
181 | |
182 | if (m0 == VOIDmode && m1 == VOIDmode) |
183 | return to_mode; |
184 | else if (m0 == VOIDmode || GET_MODE_UNIT_SIZE (m0) < GET_MODE_UNIT_SIZE (m1)) |
185 | result = m1; |
186 | else |
187 | result = m0; |
188 | |
189 | if (GET_MODE_UNIT_SIZE (result) > GET_MODE_UNIT_SIZE (to_mode)) |
190 | return to_mode; |
191 | |
192 | return result; |
193 | } |
194 | |
195 | /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP |
196 | says whether OP is signed or unsigned. NO_EXTEND is true if we need |
197 | not actually do a sign-extend or zero-extend, but can leave the |
198 | higher-order bits of the result rtx undefined, for example, in the case |
199 | of logical operations, but not right shifts. */ |
200 | |
201 | static rtx |
202 | widen_operand (rtx op, machine_mode mode, machine_mode oldmode, |
203 | int unsignedp, bool no_extend) |
204 | { |
205 | rtx result; |
206 | scalar_int_mode int_mode; |
207 | |
208 | /* If we don't have to extend and this is a constant, return it. */ |
209 | if (no_extend && GET_MODE (op) == VOIDmode) |
210 | return op; |
211 | |
212 | /* If we must extend do so. If OP is a SUBREG for a promoted object, also |
213 | extend since it will be more efficient to do so unless the signedness of |
214 | a promoted object differs from our extension. */ |
215 | if (! no_extend |
216 | || !is_a <scalar_int_mode> (m: mode, result: &int_mode) |
217 | || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op) |
218 | && SUBREG_CHECK_PROMOTED_SIGN (op, unsignedp))) |
219 | return convert_modes (mode, oldmode, x: op, unsignedp); |
220 | |
221 | /* If MODE is no wider than a single word, we return a lowpart or paradoxical |
222 | SUBREG. */ |
223 | if (GET_MODE_SIZE (mode: int_mode) <= UNITS_PER_WORD) |
224 | return gen_lowpart (int_mode, force_reg (GET_MODE (op), op)); |
225 | |
226 | /* Otherwise, get an object of MODE, clobber it, and set the low-order |
227 | part to OP. */ |
228 | |
229 | result = gen_reg_rtx (int_mode); |
230 | emit_clobber (result); |
231 | emit_move_insn (gen_lowpart (GET_MODE (op), result), op); |
232 | return result; |
233 | } |
234 | |
235 | /* Expand vector widening operations. |
236 | |
237 | There are two different classes of operations handled here: |
238 | 1) Operations whose result is wider than all the arguments to the operation. |
239 | Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR |
240 | In this case OP0 and optionally OP1 would be initialized, |
241 | but WIDE_OP wouldn't (not relevant for this case). |
242 | 2) Operations whose result is of the same size as the last argument to the |
243 | operation, but wider than all the other arguments to the operation. |
244 | Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR. |
245 | In the case WIDE_OP, OP0 and optionally OP1 would be initialized. |
246 | |
247 | E.g, when called to expand the following operations, this is how |
248 | the arguments will be initialized: |
249 | nops OP0 OP1 WIDE_OP |
250 | widening-sum 2 oprnd0 - oprnd1 |
251 | widening-dot-product 3 oprnd0 oprnd1 oprnd2 |
252 | widening-mult 2 oprnd0 oprnd1 - |
253 | type-promotion (vec-unpack) 1 oprnd0 - - */ |
254 | |
255 | rtx |
256 | expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op, |
257 | rtx target, int unsignedp) |
258 | { |
259 | class expand_operand eops[4]; |
260 | tree oprnd0, oprnd1, oprnd2; |
261 | machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode; |
262 | optab widen_pattern_optab; |
263 | enum insn_code icode; |
264 | int nops = TREE_CODE_LENGTH (ops->code); |
265 | int op; |
266 | bool sbool = false; |
267 | |
268 | oprnd0 = ops->op0; |
269 | oprnd1 = nops >= 2 ? ops->op1 : NULL_TREE; |
270 | oprnd2 = nops >= 3 ? ops->op2 : NULL_TREE; |
271 | |
272 | tmode0 = TYPE_MODE (TREE_TYPE (oprnd0)); |
273 | if (ops->code == VEC_UNPACK_FIX_TRUNC_HI_EXPR |
274 | || ops->code == VEC_UNPACK_FIX_TRUNC_LO_EXPR) |
275 | /* The sign is from the result type rather than operand's type |
276 | for these ops. */ |
277 | widen_pattern_optab |
278 | = optab_for_tree_code (ops->code, ops->type, optab_default); |
279 | else if ((ops->code == VEC_UNPACK_HI_EXPR |
280 | || ops->code == VEC_UNPACK_LO_EXPR) |
281 | && VECTOR_BOOLEAN_TYPE_P (ops->type) |
282 | && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0)) |
283 | && TYPE_MODE (ops->type) == TYPE_MODE (TREE_TYPE (oprnd0)) |
284 | && SCALAR_INT_MODE_P (TYPE_MODE (ops->type))) |
285 | { |
286 | /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is |
287 | the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use |
288 | vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in |
289 | the pattern number of elements in the wider vector. */ |
290 | widen_pattern_optab |
291 | = (ops->code == VEC_UNPACK_HI_EXPR |
292 | ? vec_unpacks_sbool_hi_optab : vec_unpacks_sbool_lo_optab); |
293 | sbool = true; |
294 | } |
295 | else if (ops->code == DOT_PROD_EXPR) |
296 | { |
297 | enum optab_subtype subtype = optab_default; |
298 | signop sign1 = TYPE_SIGN (TREE_TYPE (oprnd0)); |
299 | signop sign2 = TYPE_SIGN (TREE_TYPE (oprnd1)); |
300 | if (sign1 == sign2) |
301 | ; |
302 | else if (sign1 == SIGNED && sign2 == UNSIGNED) |
303 | { |
304 | subtype = optab_vector_mixed_sign; |
305 | /* Same as optab_vector_mixed_sign but flip the operands. */ |
306 | std::swap (a&: op0, b&: op1); |
307 | } |
308 | else if (sign1 == UNSIGNED && sign2 == SIGNED) |
309 | subtype = optab_vector_mixed_sign; |
310 | else |
311 | gcc_unreachable (); |
312 | |
313 | widen_pattern_optab |
314 | = optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), subtype); |
315 | } |
316 | else |
317 | widen_pattern_optab |
318 | = optab_for_tree_code (ops->code, TREE_TYPE (oprnd0), optab_default); |
319 | if (ops->code == WIDEN_MULT_PLUS_EXPR |
320 | || ops->code == WIDEN_MULT_MINUS_EXPR) |
321 | icode = find_widening_optab_handler (widen_pattern_optab, |
322 | TYPE_MODE (TREE_TYPE (ops->op2)), |
323 | tmode0); |
324 | else |
325 | icode = optab_handler (op: widen_pattern_optab, mode: tmode0); |
326 | gcc_assert (icode != CODE_FOR_nothing); |
327 | |
328 | if (nops >= 2) |
329 | tmode1 = TYPE_MODE (TREE_TYPE (oprnd1)); |
330 | else if (sbool) |
331 | { |
332 | nops = 2; |
333 | op1 = GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0)).to_constant ()); |
334 | tmode1 = tmode0; |
335 | } |
336 | |
337 | /* The last operand is of a wider mode than the rest of the operands. */ |
338 | if (nops == 2) |
339 | wmode = tmode1; |
340 | else if (nops == 3) |
341 | { |
342 | gcc_assert (tmode1 == tmode0); |
343 | gcc_assert (op1); |
344 | wmode = TYPE_MODE (TREE_TYPE (oprnd2)); |
345 | } |
346 | |
347 | op = 0; |
348 | create_output_operand (op: &eops[op++], x: target, TYPE_MODE (ops->type)); |
349 | create_convert_operand_from (op: &eops[op++], value: op0, mode: tmode0, unsigned_p: unsignedp); |
350 | if (op1) |
351 | create_convert_operand_from (op: &eops[op++], value: op1, mode: tmode1, unsigned_p: unsignedp); |
352 | if (wide_op) |
353 | create_convert_operand_from (op: &eops[op++], value: wide_op, mode: wmode, unsigned_p: unsignedp); |
354 | expand_insn (icode, nops: op, ops: eops); |
355 | return eops[0].value; |
356 | } |
357 | |
358 | /* Generate code to perform an operation specified by TERNARY_OPTAB |
359 | on operands OP0, OP1 and OP2, with result having machine-mode MODE. |
360 | |
361 | UNSIGNEDP is for the case where we have to widen the operands |
362 | to perform the operation. It says to use zero-extension. |
363 | |
364 | If TARGET is nonzero, the value |
365 | is generated there, if it is convenient to do so. |
366 | In all cases an rtx is returned for the locus of the value; |
367 | this may or may not be TARGET. */ |
368 | |
369 | rtx |
370 | expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0, |
371 | rtx op1, rtx op2, rtx target, int unsignedp) |
372 | { |
373 | class expand_operand ops[4]; |
374 | enum insn_code icode = optab_handler (op: ternary_optab, mode); |
375 | |
376 | gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing); |
377 | |
378 | create_output_operand (op: &ops[0], x: target, mode); |
379 | create_convert_operand_from (op: &ops[1], value: op0, mode, unsigned_p: unsignedp); |
380 | create_convert_operand_from (op: &ops[2], value: op1, mode, unsigned_p: unsignedp); |
381 | create_convert_operand_from (op: &ops[3], value: op2, mode, unsigned_p: unsignedp); |
382 | expand_insn (icode, nops: 4, ops); |
383 | return ops[0].value; |
384 | } |
385 | |
386 | |
387 | /* Like expand_binop, but return a constant rtx if the result can be |
388 | calculated at compile time. The arguments and return value are |
389 | otherwise the same as for expand_binop. */ |
390 | |
391 | rtx |
392 | simplify_expand_binop (machine_mode mode, optab binoptab, |
393 | rtx op0, rtx op1, rtx target, int unsignedp, |
394 | enum optab_methods methods) |
395 | { |
396 | if (CONSTANT_P (op0) && CONSTANT_P (op1)) |
397 | { |
398 | rtx x = simplify_binary_operation (code: optab_to_code (op: binoptab), |
399 | mode, op0, op1); |
400 | if (x) |
401 | return x; |
402 | } |
403 | |
404 | return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods); |
405 | } |
406 | |
407 | /* Like simplify_expand_binop, but always put the result in TARGET. |
408 | Return true if the expansion succeeded. */ |
409 | |
410 | bool |
411 | force_expand_binop (machine_mode mode, optab binoptab, |
412 | rtx op0, rtx op1, rtx target, int unsignedp, |
413 | enum optab_methods methods) |
414 | { |
415 | rtx x = simplify_expand_binop (mode, binoptab, op0, op1, |
416 | target, unsignedp, methods); |
417 | if (x == 0) |
418 | return false; |
419 | if (x != target) |
420 | emit_move_insn (target, x); |
421 | return true; |
422 | } |
423 | |
424 | /* Create a new vector value in VMODE with all elements set to OP. The |
425 | mode of OP must be the element mode of VMODE. If OP is a constant, |
426 | then the return value will be a constant. */ |
427 | |
428 | rtx |
429 | expand_vector_broadcast (machine_mode vmode, rtx op) |
430 | { |
431 | int n; |
432 | rtvec vec; |
433 | |
434 | gcc_checking_assert (VECTOR_MODE_P (vmode)); |
435 | |
436 | if (valid_for_const_vector_p (vmode, op)) |
437 | return gen_const_vec_duplicate (vmode, op); |
438 | |
439 | insn_code icode = optab_handler (op: vec_duplicate_optab, mode: vmode); |
440 | if (icode != CODE_FOR_nothing) |
441 | { |
442 | class expand_operand ops[2]; |
443 | create_output_operand (op: &ops[0], NULL_RTX, mode: vmode); |
444 | create_input_operand (op: &ops[1], value: op, GET_MODE (op)); |
445 | expand_insn (icode, nops: 2, ops); |
446 | return ops[0].value; |
447 | } |
448 | |
449 | if (!GET_MODE_NUNITS (mode: vmode).is_constant (const_value: &n)) |
450 | return NULL; |
451 | |
452 | /* ??? If the target doesn't have a vec_init, then we have no easy way |
453 | of performing this operation. Most of this sort of generic support |
454 | is hidden away in the vector lowering support in gimple. */ |
455 | icode = convert_optab_handler (op: vec_init_optab, to_mode: vmode, |
456 | GET_MODE_INNER (vmode)); |
457 | if (icode == CODE_FOR_nothing) |
458 | return NULL; |
459 | |
460 | vec = rtvec_alloc (n); |
461 | for (int i = 0; i < n; ++i) |
462 | RTVEC_ELT (vec, i) = op; |
463 | rtx ret = gen_reg_rtx (vmode); |
464 | emit_insn (GEN_FCN (icode) (ret, gen_rtx_PARALLEL (vmode, vec))); |
465 | |
466 | return ret; |
467 | } |
468 | |
469 | /* This subroutine of expand_doubleword_shift handles the cases in which |
470 | the effective shift value is >= BITS_PER_WORD. The arguments and return |
471 | value are the same as for the parent routine, except that SUPERWORD_OP1 |
472 | is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET. |
473 | INTO_TARGET may be null if the caller has decided to calculate it. */ |
474 | |
475 | static bool |
476 | expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1, |
477 | rtx outof_target, rtx into_target, |
478 | int unsignedp, enum optab_methods methods) |
479 | { |
480 | if (into_target != 0) |
481 | if (!force_expand_binop (mode: word_mode, binoptab, op0: outof_input, op1: superword_op1, |
482 | target: into_target, unsignedp, methods)) |
483 | return false; |
484 | |
485 | if (outof_target != 0) |
486 | { |
487 | /* For a signed right shift, we must fill OUTOF_TARGET with copies |
488 | of the sign bit, otherwise we must fill it with zeros. */ |
489 | if (binoptab != ashr_optab) |
490 | emit_move_insn (outof_target, CONST0_RTX (word_mode)); |
491 | else |
492 | if (!force_expand_binop (mode: word_mode, binoptab, op0: outof_input, |
493 | op1: gen_int_shift_amount (word_mode, |
494 | BITS_PER_WORD - 1), |
495 | target: outof_target, unsignedp, methods)) |
496 | return false; |
497 | } |
498 | return true; |
499 | } |
500 | |
501 | /* This subroutine of expand_doubleword_shift handles the cases in which |
502 | the effective shift value is < BITS_PER_WORD. The arguments and return |
503 | value are the same as for the parent routine. */ |
504 | |
505 | static bool |
506 | expand_subword_shift (scalar_int_mode op1_mode, optab binoptab, |
507 | rtx outof_input, rtx into_input, rtx op1, |
508 | rtx outof_target, rtx into_target, |
509 | int unsignedp, enum optab_methods methods, |
510 | unsigned HOST_WIDE_INT shift_mask) |
511 | { |
512 | optab reverse_unsigned_shift, unsigned_shift; |
513 | rtx tmp, carries; |
514 | |
515 | reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab); |
516 | unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab); |
517 | |
518 | /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT. |
519 | We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in |
520 | the opposite direction to BINOPTAB. */ |
521 | if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD) |
522 | { |
523 | carries = outof_input; |
524 | tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, |
525 | mode: op1_mode), op1_mode); |
526 | tmp = simplify_expand_binop (mode: op1_mode, binoptab: sub_optab, op0: tmp, op1, |
527 | target: 0, unsignedp: true, methods); |
528 | } |
529 | else |
530 | { |
531 | /* We must avoid shifting by BITS_PER_WORD bits since that is either |
532 | the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or |
533 | has unknown behavior. Do a single shift first, then shift by the |
534 | remainder. It's OK to use ~OP1 as the remainder if shift counts |
535 | are truncated to the mode size. */ |
536 | carries = simplify_expand_binop (mode: word_mode, binoptab: reverse_unsigned_shift, |
537 | op0: outof_input, const1_rtx, target: 0, |
538 | unsignedp, methods); |
539 | if (carries == const0_rtx) |
540 | tmp = const0_rtx; |
541 | else if (shift_mask == BITS_PER_WORD - 1) |
542 | tmp = expand_unop (op1_mode, one_cmpl_optab, op1, 0, true); |
543 | else |
544 | { |
545 | tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1, |
546 | mode: op1_mode), op1_mode); |
547 | tmp = simplify_expand_binop (mode: op1_mode, binoptab: sub_optab, op0: tmp, op1, |
548 | target: 0, unsignedp: true, methods); |
549 | } |
550 | } |
551 | if (tmp == 0 || carries == 0) |
552 | return false; |
553 | if (carries != const0_rtx && tmp != const0_rtx) |
554 | carries = simplify_expand_binop (mode: word_mode, binoptab: reverse_unsigned_shift, |
555 | op0: carries, op1: tmp, target: 0, unsignedp, methods); |
556 | if (carries == 0) |
557 | return false; |
558 | |
559 | if (into_input != const0_rtx) |
560 | { |
561 | /* Shift INTO_INPUT logically by OP1. This is the last use of |
562 | INTO_INPUT so the result can go directly into INTO_TARGET if |
563 | convenient. */ |
564 | tmp = simplify_expand_binop (mode: word_mode, binoptab: unsigned_shift, op0: into_input, |
565 | op1, target: into_target, unsignedp, methods); |
566 | if (tmp == 0) |
567 | return false; |
568 | |
569 | /* Now OR in the bits carried over from OUTOF_INPUT. */ |
570 | if (!force_expand_binop (mode: word_mode, binoptab: ior_optab, op0: tmp, op1: carries, |
571 | target: into_target, unsignedp, methods)) |
572 | return false; |
573 | } |
574 | else |
575 | emit_move_insn (into_target, carries); |
576 | |
577 | /* Use a standard word_mode shift for the out-of half. */ |
578 | if (outof_target != 0) |
579 | if (!force_expand_binop (mode: word_mode, binoptab, op0: outof_input, op1, |
580 | target: outof_target, unsignedp, methods)) |
581 | return false; |
582 | |
583 | return true; |
584 | } |
585 | |
586 | |
587 | /* Try implementing expand_doubleword_shift using conditional moves. |
588 | The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true, |
589 | otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1 |
590 | are the shift counts to use in the former and latter case. All other |
591 | arguments are the same as the parent routine. */ |
592 | |
593 | static bool |
594 | expand_doubleword_shift_condmove (scalar_int_mode op1_mode, optab binoptab, |
595 | enum rtx_code cmp_code, rtx cmp1, rtx cmp2, |
596 | rtx outof_input, rtx into_input, |
597 | rtx subword_op1, rtx superword_op1, |
598 | rtx outof_target, rtx into_target, |
599 | int unsignedp, enum optab_methods methods, |
600 | unsigned HOST_WIDE_INT shift_mask) |
601 | { |
602 | rtx outof_superword, into_superword; |
603 | |
604 | /* Put the superword version of the output into OUTOF_SUPERWORD and |
605 | INTO_SUPERWORD. */ |
606 | outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0; |
607 | if (outof_target != 0 && subword_op1 == superword_op1) |
608 | { |
609 | /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in |
610 | OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */ |
611 | into_superword = outof_target; |
612 | if (!expand_superword_shift (binoptab, outof_input, superword_op1, |
613 | outof_target: outof_superword, into_target: 0, unsignedp, methods)) |
614 | return false; |
615 | } |
616 | else |
617 | { |
618 | into_superword = gen_reg_rtx (word_mode); |
619 | if (!expand_superword_shift (binoptab, outof_input, superword_op1, |
620 | outof_target: outof_superword, into_target: into_superword, |
621 | unsignedp, methods)) |
622 | return false; |
623 | } |
624 | |
625 | /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */ |
626 | if (!expand_subword_shift (op1_mode, binoptab, |
627 | outof_input, into_input, op1: subword_op1, |
628 | outof_target, into_target, |
629 | unsignedp, methods, shift_mask)) |
630 | return false; |
631 | |
632 | /* Select between them. Do the INTO half first because INTO_SUPERWORD |
633 | might be the current value of OUTOF_TARGET. */ |
634 | if (!emit_conditional_move (into_target, { .code: cmp_code, .op0: cmp1, .op1: cmp2, .mode: op1_mode }, |
635 | into_target, into_superword, word_mode, false)) |
636 | return false; |
637 | |
638 | if (outof_target != 0) |
639 | if (!emit_conditional_move (outof_target, |
640 | { .code: cmp_code, .op0: cmp1, .op1: cmp2, .mode: op1_mode }, |
641 | outof_target, outof_superword, |
642 | word_mode, false)) |
643 | return false; |
644 | |
645 | return true; |
646 | } |
647 | |
648 | /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts. |
649 | OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first |
650 | input operand; the shift moves bits in the direction OUTOF_INPUT-> |
651 | INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words |
652 | of the target. OP1 is the shift count and OP1_MODE is its mode. |
653 | If OP1 is constant, it will have been truncated as appropriate |
654 | and is known to be nonzero. |
655 | |
656 | If SHIFT_MASK is zero, the result of word shifts is undefined when the |
657 | shift count is outside the range [0, BITS_PER_WORD). This routine must |
658 | avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2). |
659 | |
660 | If SHIFT_MASK is nonzero, all word-mode shift counts are effectively |
661 | masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will |
662 | fill with zeros or sign bits as appropriate. |
663 | |
664 | If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize |
665 | a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1. |
666 | Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED. |
667 | In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2) |
668 | are undefined. |
669 | |
670 | BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function |
671 | may not use INTO_INPUT after modifying INTO_TARGET, and similarly for |
672 | OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent |
673 | function wants to calculate it itself. |
674 | |
675 | Return true if the shift could be successfully synthesized. */ |
676 | |
677 | static bool |
678 | expand_doubleword_shift (scalar_int_mode op1_mode, optab binoptab, |
679 | rtx outof_input, rtx into_input, rtx op1, |
680 | rtx outof_target, rtx into_target, |
681 | int unsignedp, enum optab_methods methods, |
682 | unsigned HOST_WIDE_INT shift_mask) |
683 | { |
684 | rtx superword_op1, tmp, cmp1, cmp2; |
685 | enum rtx_code cmp_code; |
686 | |
687 | /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will |
688 | fill the result with sign or zero bits as appropriate. If so, the value |
689 | of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call |
690 | this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT |
691 | and INTO_INPUT), then emit code to set up OUTOF_TARGET. |
692 | |
693 | This isn't worthwhile for constant shifts since the optimizers will |
694 | cope better with in-range shift counts. */ |
695 | if (shift_mask >= BITS_PER_WORD |
696 | && outof_target != 0 |
697 | && !CONSTANT_P (op1)) |
698 | { |
699 | if (!expand_doubleword_shift (op1_mode, binoptab, |
700 | outof_input, into_input, op1, |
701 | outof_target: 0, into_target, |
702 | unsignedp, methods, shift_mask)) |
703 | return false; |
704 | if (!force_expand_binop (mode: word_mode, binoptab, op0: outof_input, op1, |
705 | target: outof_target, unsignedp, methods)) |
706 | return false; |
707 | return true; |
708 | } |
709 | |
710 | /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2) |
711 | is true when the effective shift value is less than BITS_PER_WORD. |
712 | Set SUPERWORD_OP1 to the shift count that should be used to shift |
713 | OUTOF_INPUT into INTO_TARGET when the condition is false. */ |
714 | tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, mode: op1_mode), op1_mode); |
715 | if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1) |
716 | { |
717 | /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1 |
718 | is a subword shift count. */ |
719 | cmp1 = simplify_expand_binop (mode: op1_mode, binoptab: and_optab, op0: op1, op1: tmp, |
720 | target: 0, unsignedp: true, methods); |
721 | cmp2 = CONST0_RTX (op1_mode); |
722 | cmp_code = EQ; |
723 | superword_op1 = op1; |
724 | } |
725 | else |
726 | { |
727 | /* Set CMP1 to OP1 - BITS_PER_WORD. */ |
728 | cmp1 = simplify_expand_binop (mode: op1_mode, binoptab: sub_optab, op0: op1, op1: tmp, |
729 | target: 0, unsignedp: true, methods); |
730 | cmp2 = CONST0_RTX (op1_mode); |
731 | cmp_code = LT; |
732 | superword_op1 = cmp1; |
733 | } |
734 | if (cmp1 == 0) |
735 | return false; |
736 | |
737 | /* If we can compute the condition at compile time, pick the |
738 | appropriate subroutine. */ |
739 | tmp = simplify_relational_operation (code: cmp_code, SImode, op_mode: op1_mode, op0: cmp1, op1: cmp2); |
740 | if (tmp != 0 && CONST_INT_P (tmp)) |
741 | { |
742 | if (tmp == const0_rtx) |
743 | return expand_superword_shift (binoptab, outof_input, superword_op1, |
744 | outof_target, into_target, |
745 | unsignedp, methods); |
746 | else |
747 | return expand_subword_shift (op1_mode, binoptab, |
748 | outof_input, into_input, op1, |
749 | outof_target, into_target, |
750 | unsignedp, methods, shift_mask); |
751 | } |
752 | |
753 | /* Try using conditional moves to generate straight-line code. */ |
754 | if (HAVE_conditional_move) |
755 | { |
756 | rtx_insn *start = get_last_insn (); |
757 | if (expand_doubleword_shift_condmove (op1_mode, binoptab, |
758 | cmp_code, cmp1, cmp2, |
759 | outof_input, into_input, |
760 | subword_op1: op1, superword_op1, |
761 | outof_target, into_target, |
762 | unsignedp, methods, shift_mask)) |
763 | return true; |
764 | delete_insns_since (start); |
765 | } |
766 | |
767 | /* As a last resort, use branches to select the correct alternative. */ |
768 | rtx_code_label *subword_label = gen_label_rtx (); |
769 | rtx_code_label *done_label = gen_label_rtx (); |
770 | |
771 | NO_DEFER_POP; |
772 | do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode, |
773 | 0, 0, subword_label, |
774 | profile_probability::uninitialized ()); |
775 | OK_DEFER_POP; |
776 | |
777 | if (!expand_superword_shift (binoptab, outof_input, superword_op1, |
778 | outof_target, into_target, |
779 | unsignedp, methods)) |
780 | return false; |
781 | |
782 | emit_jump_insn (targetm.gen_jump (done_label)); |
783 | emit_barrier (); |
784 | emit_label (subword_label); |
785 | |
786 | if (!expand_subword_shift (op1_mode, binoptab, |
787 | outof_input, into_input, op1, |
788 | outof_target, into_target, |
789 | unsignedp, methods, shift_mask)) |
790 | return false; |
791 | |
792 | emit_label (done_label); |
793 | return true; |
794 | } |
795 | |
796 | /* Subroutine of expand_binop. Perform a double word multiplication of |
797 | operands OP0 and OP1 both of mode MODE, which is exactly twice as wide |
798 | as the target's word_mode. This function return NULL_RTX if anything |
799 | goes wrong, in which case it may have already emitted instructions |
800 | which need to be deleted. |
801 | |
802 | If we want to multiply two two-word values and have normal and widening |
803 | multiplies of single-word values, we can do this with three smaller |
804 | multiplications. |
805 | |
806 | The multiplication proceeds as follows: |
807 | _______________________ |
808 | [__op0_high_|__op0_low__] |
809 | _______________________ |
810 | * [__op1_high_|__op1_low__] |
811 | _______________________________________________ |
812 | _______________________ |
813 | (1) [__op0_low__*__op1_low__] |
814 | _______________________ |
815 | (2a) [__op0_low__*__op1_high_] |
816 | _______________________ |
817 | (2b) [__op0_high_*__op1_low__] |
818 | _______________________ |
819 | (3) [__op0_high_*__op1_high_] |
820 | |
821 | |
822 | This gives a 4-word result. Since we are only interested in the |
823 | lower 2 words, partial result (3) and the upper words of (2a) and |
824 | (2b) don't need to be calculated. Hence (2a) and (2b) can be |
825 | calculated using non-widening multiplication. |
826 | |
827 | (1), however, needs to be calculated with an unsigned widening |
828 | multiplication. If this operation is not directly supported we |
829 | try using a signed widening multiplication and adjust the result. |
830 | This adjustment works as follows: |
831 | |
832 | If both operands are positive then no adjustment is needed. |
833 | |
834 | If the operands have different signs, for example op0_low < 0 and |
835 | op1_low >= 0, the instruction treats the most significant bit of |
836 | op0_low as a sign bit instead of a bit with significance |
837 | 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low |
838 | with 2**BITS_PER_WORD - op0_low, and two's complements the |
839 | result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to |
840 | the result. |
841 | |
842 | Similarly, if both operands are negative, we need to add |
843 | (op0_low + op1_low) * 2**BITS_PER_WORD. |
844 | |
845 | We use a trick to adjust quickly. We logically shift op0_low right |
846 | (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to |
847 | op0_high (op1_high) before it is used to calculate 2b (2a). If no |
848 | logical shift exists, we do an arithmetic right shift and subtract |
849 | the 0 or -1. */ |
850 | |
851 | static rtx |
852 | expand_doubleword_mult (machine_mode mode, rtx op0, rtx op1, rtx target, |
853 | bool umulp, enum optab_methods methods) |
854 | { |
855 | int low = (WORDS_BIG_ENDIAN ? 1 : 0); |
856 | int high = (WORDS_BIG_ENDIAN ? 0 : 1); |
857 | rtx wordm1 = (umulp ? NULL_RTX |
858 | : gen_int_shift_amount (word_mode, BITS_PER_WORD - 1)); |
859 | rtx product, adjust, product_high, temp; |
860 | |
861 | rtx op0_high = operand_subword_force (op0, high, mode); |
862 | rtx op0_low = operand_subword_force (op0, low, mode); |
863 | rtx op1_high = operand_subword_force (op1, high, mode); |
864 | rtx op1_low = operand_subword_force (op1, low, mode); |
865 | |
866 | /* If we're using an unsigned multiply to directly compute the product |
867 | of the low-order words of the operands and perform any required |
868 | adjustments of the operands, we begin by trying two more multiplications |
869 | and then computing the appropriate sum. |
870 | |
871 | We have checked above that the required addition is provided. |
872 | Full-word addition will normally always succeed, especially if |
873 | it is provided at all, so we don't worry about its failure. The |
874 | multiplication may well fail, however, so we do handle that. */ |
875 | |
876 | if (!umulp) |
877 | { |
878 | /* ??? This could be done with emit_store_flag where available. */ |
879 | temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1, |
880 | NULL_RTX, 1, methods); |
881 | if (temp) |
882 | op0_high = expand_binop (word_mode, add_optab, op0_high, temp, |
883 | NULL_RTX, 0, OPTAB_DIRECT); |
884 | else |
885 | { |
886 | temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1, |
887 | NULL_RTX, 0, methods); |
888 | if (!temp) |
889 | return NULL_RTX; |
890 | op0_high = expand_binop (word_mode, sub_optab, op0_high, temp, |
891 | NULL_RTX, 0, OPTAB_DIRECT); |
892 | } |
893 | |
894 | if (!op0_high) |
895 | return NULL_RTX; |
896 | } |
897 | |
898 | adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low, |
899 | NULL_RTX, 0, OPTAB_DIRECT); |
900 | if (!adjust) |
901 | return NULL_RTX; |
902 | |
903 | /* OP0_HIGH should now be dead. */ |
904 | |
905 | if (!umulp) |
906 | { |
907 | /* ??? This could be done with emit_store_flag where available. */ |
908 | temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1, |
909 | NULL_RTX, 1, methods); |
910 | if (temp) |
911 | op1_high = expand_binop (word_mode, add_optab, op1_high, temp, |
912 | NULL_RTX, 0, OPTAB_DIRECT); |
913 | else |
914 | { |
915 | temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1, |
916 | NULL_RTX, 0, methods); |
917 | if (!temp) |
918 | return NULL_RTX; |
919 | op1_high = expand_binop (word_mode, sub_optab, op1_high, temp, |
920 | NULL_RTX, 0, OPTAB_DIRECT); |
921 | } |
922 | |
923 | if (!op1_high) |
924 | return NULL_RTX; |
925 | } |
926 | |
927 | temp = expand_binop (word_mode, smul_optab, op1_high, op0_low, |
928 | NULL_RTX, 0, OPTAB_DIRECT); |
929 | if (!temp) |
930 | return NULL_RTX; |
931 | |
932 | /* OP1_HIGH should now be dead. */ |
933 | |
934 | adjust = expand_binop (word_mode, add_optab, adjust, temp, |
935 | NULL_RTX, 0, OPTAB_DIRECT); |
936 | |
937 | if (target && !REG_P (target)) |
938 | target = NULL_RTX; |
939 | |
940 | /* *_widen_optab needs to determine operand mode, make sure at least |
941 | one operand has non-VOID mode. */ |
942 | if (GET_MODE (op0_low) == VOIDmode && GET_MODE (op1_low) == VOIDmode) |
943 | op0_low = force_reg (word_mode, op0_low); |
944 | |
945 | if (umulp) |
946 | product = expand_binop (mode, umul_widen_optab, op0_low, op1_low, |
947 | target, 1, OPTAB_DIRECT); |
948 | else |
949 | product = expand_binop (mode, smul_widen_optab, op0_low, op1_low, |
950 | target, 1, OPTAB_DIRECT); |
951 | |
952 | if (!product) |
953 | return NULL_RTX; |
954 | |
955 | product_high = operand_subword (product, high, 1, mode); |
956 | adjust = expand_binop (word_mode, add_optab, product_high, adjust, |
957 | NULL_RTX, 0, OPTAB_DIRECT); |
958 | emit_move_insn (product_high, adjust); |
959 | return product; |
960 | } |
961 | |
962 | /* Subroutine of expand_binop. Optimize unsigned double-word OP0 % OP1 for |
963 | constant OP1. If for some bit in [BITS_PER_WORD / 2, BITS_PER_WORD] range |
964 | (prefer higher bits) ((1w << bit) % OP1) == 1, then the modulo can be |
965 | computed in word-mode as ((OP0 & (bit - 1)) + ((OP0 >> bit) & (bit - 1)) |
966 | + (OP0 >> (2 * bit))) % OP1. Whether we need to sum 2, 3 or 4 values |
967 | depends on the bit value, if 2, then carry from the addition needs to be |
968 | added too, i.e. like: |
969 | sum += __builtin_add_overflow (low, high, &sum) |
970 | |
971 | Optimize signed double-word OP0 % OP1 similarly, just apply some correction |
972 | factor to the sum before doing unsigned remainder, in the form of |
973 | sum += (((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & const); |
974 | then perform unsigned |
975 | remainder = sum % OP1; |
976 | and finally |
977 | remainder += ((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1); */ |
978 | |
979 | static rtx |
980 | expand_doubleword_mod (machine_mode mode, rtx op0, rtx op1, bool unsignedp) |
981 | { |
982 | if (INTVAL (op1) <= 1 || (INTVAL (op1) & 1) == 0) |
983 | return NULL_RTX; |
984 | |
985 | rtx_insn *last = get_last_insn (); |
986 | for (int bit = BITS_PER_WORD; bit >= BITS_PER_WORD / 2; bit--) |
987 | { |
988 | wide_int w = wi::shifted_mask (start: bit, width: 1, negate_p: false, precision: 2 * BITS_PER_WORD); |
989 | if (wi::ne_p (x: wi::umod_trunc (x: w, INTVAL (op1)), y: 1)) |
990 | continue; |
991 | rtx sum = NULL_RTX, mask = NULL_RTX; |
992 | if (bit == BITS_PER_WORD) |
993 | { |
994 | /* For signed modulo we need to add correction to the sum |
995 | and that might again overflow. */ |
996 | if (!unsignedp) |
997 | continue; |
998 | if (optab_handler (op: uaddv4_optab, mode: word_mode) == CODE_FOR_nothing) |
999 | continue; |
1000 | tree wtype = lang_hooks.types.type_for_mode (word_mode, 1); |
1001 | if (wtype == NULL_TREE) |
1002 | continue; |
1003 | tree ctype = build_complex_type (wtype); |
1004 | if (TYPE_MODE (ctype) != GET_MODE_COMPLEX_MODE (word_mode)) |
1005 | continue; |
1006 | machine_mode cmode = TYPE_MODE (ctype); |
1007 | rtx op00 = operand_subword_force (op0, 0, mode); |
1008 | rtx op01 = operand_subword_force (op0, 1, mode); |
1009 | rtx cres = gen_rtx_CONCAT (cmode, gen_reg_rtx (word_mode), |
1010 | gen_reg_rtx (word_mode)); |
1011 | tree lhs = make_tree (ctype, cres); |
1012 | tree arg0 = make_tree (wtype, op00); |
1013 | tree arg1 = make_tree (wtype, op01); |
1014 | expand_addsub_overflow (UNKNOWN_LOCATION, PLUS_EXPR, lhs, arg0, |
1015 | arg1, true, true, true, false, NULL); |
1016 | sum = expand_simple_binop (word_mode, PLUS, XEXP (cres, 0), |
1017 | XEXP (cres, 1), NULL_RTX, 1, |
1018 | OPTAB_DIRECT); |
1019 | if (sum == NULL_RTX) |
1020 | return NULL_RTX; |
1021 | } |
1022 | else |
1023 | { |
1024 | /* Code below uses GEN_INT, so we need the masks to be representable |
1025 | in HOST_WIDE_INTs. */ |
1026 | if (bit >= HOST_BITS_PER_WIDE_INT) |
1027 | continue; |
1028 | /* If op0 is e.g. -1 or -2 unsigned, then the 2 additions might |
1029 | overflow. Consider 64-bit -1ULL for word size 32, if we add |
1030 | 0x7fffffffU + 0x7fffffffU + 3U, it wraps around to 1. */ |
1031 | if (bit == BITS_PER_WORD - 1) |
1032 | continue; |
1033 | |
1034 | int count = (2 * BITS_PER_WORD + bit - 1) / bit; |
1035 | rtx sum_corr = NULL_RTX; |
1036 | |
1037 | if (!unsignedp) |
1038 | { |
1039 | /* For signed modulo, compute it as unsigned modulo of |
1040 | sum with a correction added to it if OP0 is negative, |
1041 | such that the result can be computed as unsigned |
1042 | remainder + ((OP1 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1). */ |
1043 | w = wi::min_value (2 * BITS_PER_WORD, SIGNED); |
1044 | wide_int wmod1 = wi::umod_trunc (x: w, INTVAL (op1)); |
1045 | wide_int wmod2 = wi::smod_trunc (x: w, INTVAL (op1)); |
1046 | /* wmod2 == -wmod1. */ |
1047 | wmod2 = wmod2 + (INTVAL (op1) - 1); |
1048 | if (wi::ne_p (x: wmod1, y: wmod2)) |
1049 | { |
1050 | wide_int wcorr = wmod2 - wmod1; |
1051 | if (wi::neg_p (x: w)) |
1052 | wcorr = wcorr + INTVAL (op1); |
1053 | /* Now verify if the count sums can't overflow, and punt |
1054 | if they could. */ |
1055 | w = wi::mask (width: bit, negate_p: false, precision: 2 * BITS_PER_WORD); |
1056 | w = w * (count - 1); |
1057 | w = w + wi::mask (width: 2 * BITS_PER_WORD - (count - 1) * bit, |
1058 | negate_p: false, precision: 2 * BITS_PER_WORD); |
1059 | w = w + wcorr; |
1060 | w = wi::lrshift (x: w, BITS_PER_WORD); |
1061 | if (wi::ne_p (x: w, y: 0)) |
1062 | continue; |
1063 | |
1064 | mask = operand_subword_force (op0, WORDS_BIG_ENDIAN ? 0 : 1, |
1065 | mode); |
1066 | mask = expand_simple_binop (word_mode, ASHIFTRT, mask, |
1067 | GEN_INT (BITS_PER_WORD - 1), |
1068 | NULL_RTX, 0, OPTAB_DIRECT); |
1069 | if (mask == NULL_RTX) |
1070 | return NULL_RTX; |
1071 | sum_corr = immed_wide_int_const (wcorr, word_mode); |
1072 | sum_corr = expand_simple_binop (word_mode, AND, mask, |
1073 | sum_corr, NULL_RTX, 1, |
1074 | OPTAB_DIRECT); |
1075 | if (sum_corr == NULL_RTX) |
1076 | return NULL_RTX; |
1077 | } |
1078 | } |
1079 | |
1080 | for (int i = 0; i < count; i++) |
1081 | { |
1082 | rtx v = op0; |
1083 | if (i) |
1084 | v = expand_simple_binop (mode, LSHIFTRT, v, GEN_INT (i * bit), |
1085 | NULL_RTX, 1, OPTAB_DIRECT); |
1086 | if (v == NULL_RTX) |
1087 | return NULL_RTX; |
1088 | v = lowpart_subreg (outermode: word_mode, op: v, innermode: mode); |
1089 | if (v == NULL_RTX) |
1090 | return NULL_RTX; |
1091 | if (i != count - 1) |
1092 | v = expand_simple_binop (word_mode, AND, v, |
1093 | GEN_INT ((HOST_WIDE_INT_1U << bit) |
1094 | - 1), NULL_RTX, 1, |
1095 | OPTAB_DIRECT); |
1096 | if (v == NULL_RTX) |
1097 | return NULL_RTX; |
1098 | if (sum == NULL_RTX) |
1099 | sum = v; |
1100 | else |
1101 | sum = expand_simple_binop (word_mode, PLUS, sum, v, NULL_RTX, |
1102 | 1, OPTAB_DIRECT); |
1103 | if (sum == NULL_RTX) |
1104 | return NULL_RTX; |
1105 | } |
1106 | if (sum_corr) |
1107 | { |
1108 | sum = expand_simple_binop (word_mode, PLUS, sum, sum_corr, |
1109 | NULL_RTX, 1, OPTAB_DIRECT); |
1110 | if (sum == NULL_RTX) |
1111 | return NULL_RTX; |
1112 | } |
1113 | } |
1114 | rtx remainder = expand_divmod (1, TRUNC_MOD_EXPR, word_mode, sum, |
1115 | gen_int_mode (INTVAL (op1), word_mode), |
1116 | NULL_RTX, 1, OPTAB_DIRECT); |
1117 | if (remainder == NULL_RTX) |
1118 | return NULL_RTX; |
1119 | |
1120 | if (!unsignedp) |
1121 | { |
1122 | if (mask == NULL_RTX) |
1123 | { |
1124 | mask = operand_subword_force (op0, WORDS_BIG_ENDIAN ? 0 : 1, |
1125 | mode); |
1126 | mask = expand_simple_binop (word_mode, ASHIFTRT, mask, |
1127 | GEN_INT (BITS_PER_WORD - 1), |
1128 | NULL_RTX, 0, OPTAB_DIRECT); |
1129 | if (mask == NULL_RTX) |
1130 | return NULL_RTX; |
1131 | } |
1132 | mask = expand_simple_binop (word_mode, AND, mask, |
1133 | gen_int_mode (1 - INTVAL (op1), |
1134 | word_mode), |
1135 | NULL_RTX, 1, OPTAB_DIRECT); |
1136 | if (mask == NULL_RTX) |
1137 | return NULL_RTX; |
1138 | remainder = expand_simple_binop (word_mode, PLUS, remainder, |
1139 | mask, NULL_RTX, 1, OPTAB_DIRECT); |
1140 | if (remainder == NULL_RTX) |
1141 | return NULL_RTX; |
1142 | } |
1143 | |
1144 | remainder = convert_modes (mode, oldmode: word_mode, x: remainder, unsignedp); |
1145 | /* Punt if we need any library calls. */ |
1146 | if (last) |
1147 | last = NEXT_INSN (insn: last); |
1148 | else |
1149 | last = get_insns (); |
1150 | for (; last; last = NEXT_INSN (insn: last)) |
1151 | if (CALL_P (last)) |
1152 | return NULL_RTX; |
1153 | return remainder; |
1154 | } |
1155 | return NULL_RTX; |
1156 | } |
1157 | |
1158 | /* Similarly to the above function, but compute both quotient and remainder. |
1159 | Quotient can be computed from the remainder as: |
1160 | rem = op0 % op1; // Handled using expand_doubleword_mod |
1161 | quot = (op0 - rem) * inv; // inv is multiplicative inverse of op1 modulo |
1162 | // 2 * BITS_PER_WORD |
1163 | |
1164 | We can also handle cases where op1 is a multiple of power of two constant |
1165 | and constant handled by expand_doubleword_mod. |
1166 | op11 = 1 << __builtin_ctz (op1); |
1167 | op12 = op1 / op11; |
1168 | rem1 = op0 % op12; // Handled using expand_doubleword_mod |
1169 | quot1 = (op0 - rem1) * inv; // inv is multiplicative inverse of op12 modulo |
1170 | // 2 * BITS_PER_WORD |
1171 | rem = (quot1 % op11) * op12 + rem1; |
1172 | quot = quot1 / op11; */ |
1173 | |
1174 | rtx |
1175 | expand_doubleword_divmod (machine_mode mode, rtx op0, rtx op1, rtx *rem, |
1176 | bool unsignedp) |
1177 | { |
1178 | *rem = NULL_RTX; |
1179 | |
1180 | /* Negative dividend should have been optimized into positive, |
1181 | similarly modulo by 1 and modulo by power of two is optimized |
1182 | differently too. */ |
1183 | if (INTVAL (op1) <= 1 || pow2p_hwi (INTVAL (op1))) |
1184 | return NULL_RTX; |
1185 | |
1186 | rtx op11 = const1_rtx; |
1187 | rtx op12 = op1; |
1188 | if ((INTVAL (op1) & 1) == 0) |
1189 | { |
1190 | int bit = ctz_hwi (INTVAL (op1)); |
1191 | op11 = GEN_INT (HOST_WIDE_INT_1 << bit); |
1192 | op12 = GEN_INT (INTVAL (op1) >> bit); |
1193 | } |
1194 | |
1195 | rtx rem1 = expand_doubleword_mod (mode, op0, op1: op12, unsignedp); |
1196 | if (rem1 == NULL_RTX) |
1197 | return NULL_RTX; |
1198 | |
1199 | int prec = 2 * BITS_PER_WORD; |
1200 | wide_int a = wide_int::from (INTVAL (op12), precision: prec + 1, sgn: UNSIGNED); |
1201 | wide_int b = wi::shifted_mask (start: prec, width: 1, negate_p: false, precision: prec + 1); |
1202 | wide_int m = wide_int::from (x: wi::mod_inv (a, b), precision: prec, sgn: UNSIGNED); |
1203 | rtx inv = immed_wide_int_const (m, mode); |
1204 | |
1205 | rtx_insn *last = get_last_insn (); |
1206 | rtx quot1 = expand_simple_binop (mode, MINUS, op0, rem1, |
1207 | NULL_RTX, unsignedp, OPTAB_DIRECT); |
1208 | if (quot1 == NULL_RTX) |
1209 | return NULL_RTX; |
1210 | |
1211 | quot1 = expand_simple_binop (mode, MULT, quot1, inv, |
1212 | NULL_RTX, unsignedp, OPTAB_DIRECT); |
1213 | if (quot1 == NULL_RTX) |
1214 | return NULL_RTX; |
1215 | |
1216 | if (op11 != const1_rtx) |
1217 | { |
1218 | rtx rem2 = expand_divmod (1, TRUNC_MOD_EXPR, mode, quot1, op11, |
1219 | NULL_RTX, unsignedp, OPTAB_DIRECT); |
1220 | if (rem2 == NULL_RTX) |
1221 | return NULL_RTX; |
1222 | |
1223 | rem2 = expand_simple_binop (mode, MULT, rem2, op12, NULL_RTX, |
1224 | unsignedp, OPTAB_DIRECT); |
1225 | if (rem2 == NULL_RTX) |
1226 | return NULL_RTX; |
1227 | |
1228 | rem2 = expand_simple_binop (mode, PLUS, rem2, rem1, NULL_RTX, |
1229 | unsignedp, OPTAB_DIRECT); |
1230 | if (rem2 == NULL_RTX) |
1231 | return NULL_RTX; |
1232 | |
1233 | rtx quot2 = expand_divmod (0, TRUNC_DIV_EXPR, mode, quot1, op11, |
1234 | NULL_RTX, unsignedp, OPTAB_DIRECT); |
1235 | if (quot2 == NULL_RTX) |
1236 | return NULL_RTX; |
1237 | |
1238 | rem1 = rem2; |
1239 | quot1 = quot2; |
1240 | } |
1241 | |
1242 | /* Punt if we need any library calls. */ |
1243 | if (last) |
1244 | last = NEXT_INSN (insn: last); |
1245 | else |
1246 | last = get_insns (); |
1247 | for (; last; last = NEXT_INSN (insn: last)) |
1248 | if (CALL_P (last)) |
1249 | return NULL_RTX; |
1250 | |
1251 | *rem = rem1; |
1252 | return quot1; |
1253 | } |
1254 | |
1255 | /* Wrapper around expand_binop which takes an rtx code to specify |
1256 | the operation to perform, not an optab pointer. All other |
1257 | arguments are the same. */ |
1258 | rtx |
1259 | expand_simple_binop (machine_mode mode, enum rtx_code code, rtx op0, |
1260 | rtx op1, rtx target, int unsignedp, |
1261 | enum optab_methods methods) |
1262 | { |
1263 | optab binop = code_to_optab (code); |
1264 | gcc_assert (binop); |
1265 | |
1266 | return expand_binop (mode, binop, op0, op1, target, unsignedp, methods); |
1267 | } |
1268 | |
1269 | /* Return whether OP0 and OP1 should be swapped when expanding a commutative |
1270 | binop. Order them according to commutative_operand_precedence and, if |
1271 | possible, try to put TARGET or a pseudo first. */ |
1272 | static bool |
1273 | swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1) |
1274 | { |
1275 | int op0_prec = commutative_operand_precedence (op0); |
1276 | int op1_prec = commutative_operand_precedence (op1); |
1277 | |
1278 | if (op0_prec < op1_prec) |
1279 | return true; |
1280 | |
1281 | if (op0_prec > op1_prec) |
1282 | return false; |
1283 | |
1284 | /* With equal precedence, both orders are ok, but it is better if the |
1285 | first operand is TARGET, or if both TARGET and OP0 are pseudos. */ |
1286 | if (target == 0 || REG_P (target)) |
1287 | return (REG_P (op1) && !REG_P (op0)) || target == op1; |
1288 | else |
1289 | return rtx_equal_p (op1, target); |
1290 | } |
1291 | |
1292 | /* Return true if BINOPTAB implements a shift operation. */ |
1293 | |
1294 | static bool |
1295 | shift_optab_p (optab binoptab) |
1296 | { |
1297 | switch (optab_to_code (op: binoptab)) |
1298 | { |
1299 | case ASHIFT: |
1300 | case SS_ASHIFT: |
1301 | case US_ASHIFT: |
1302 | case ASHIFTRT: |
1303 | case LSHIFTRT: |
1304 | case ROTATE: |
1305 | case ROTATERT: |
1306 | return true; |
1307 | |
1308 | default: |
1309 | return false; |
1310 | } |
1311 | } |
1312 | |
1313 | /* Return true if BINOPTAB implements a commutative binary operation. */ |
1314 | |
1315 | static bool |
1316 | commutative_optab_p (optab binoptab) |
1317 | { |
1318 | return (GET_RTX_CLASS (optab_to_code (binoptab)) == RTX_COMM_ARITH |
1319 | || binoptab == smul_widen_optab |
1320 | || binoptab == umul_widen_optab |
1321 | || binoptab == smul_highpart_optab |
1322 | || binoptab == umul_highpart_optab |
1323 | || binoptab == vec_widen_sadd_optab |
1324 | || binoptab == vec_widen_uadd_optab |
1325 | || binoptab == vec_widen_sadd_hi_optab |
1326 | || binoptab == vec_widen_sadd_lo_optab |
1327 | || binoptab == vec_widen_uadd_hi_optab |
1328 | || binoptab == vec_widen_uadd_lo_optab |
1329 | || binoptab == vec_widen_sadd_even_optab |
1330 | || binoptab == vec_widen_sadd_odd_optab |
1331 | || binoptab == vec_widen_uadd_even_optab |
1332 | || binoptab == vec_widen_uadd_odd_optab); |
1333 | } |
1334 | |
1335 | /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're |
1336 | optimizing, and if the operand is a constant that costs more than |
1337 | 1 instruction, force the constant into a register and return that |
1338 | register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */ |
1339 | |
1340 | static rtx |
1341 | avoid_expensive_constant (machine_mode mode, optab binoptab, |
1342 | int opn, rtx x, bool unsignedp) |
1343 | { |
1344 | bool speed = optimize_insn_for_speed_p (); |
1345 | |
1346 | if (mode != VOIDmode |
1347 | && optimize |
1348 | && CONSTANT_P (x) |
1349 | && (rtx_cost (x, mode, optab_to_code (op: binoptab), opn, speed) |
1350 | > set_src_cost (x, mode, speed_p: speed))) |
1351 | { |
1352 | if (CONST_INT_P (x)) |
1353 | { |
1354 | HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode); |
1355 | if (intval != INTVAL (x)) |
1356 | x = GEN_INT (intval); |
1357 | } |
1358 | else |
1359 | x = convert_modes (mode, VOIDmode, x, unsignedp); |
1360 | x = force_reg (mode, x); |
1361 | } |
1362 | return x; |
1363 | } |
1364 | |
1365 | /* Helper function for expand_binop: handle the case where there |
1366 | is an insn ICODE that directly implements the indicated operation. |
1367 | Returns null if this is not possible. */ |
1368 | static rtx |
1369 | expand_binop_directly (enum insn_code icode, machine_mode mode, optab binoptab, |
1370 | rtx op0, rtx op1, |
1371 | rtx target, int unsignedp, enum optab_methods methods, |
1372 | rtx_insn *last) |
1373 | { |
1374 | machine_mode xmode0 = insn_data[(int) icode].operand[1].mode; |
1375 | machine_mode xmode1 = insn_data[(int) icode].operand[2].mode; |
1376 | machine_mode mode0, mode1, tmp_mode; |
1377 | class expand_operand ops[3]; |
1378 | bool commutative_p; |
1379 | rtx_insn *pat; |
1380 | rtx xop0 = op0, xop1 = op1; |
1381 | bool canonicalize_op1 = false; |
1382 | |
1383 | /* If it is a commutative operator and the modes would match |
1384 | if we would swap the operands, we can save the conversions. */ |
1385 | commutative_p = commutative_optab_p (binoptab); |
1386 | if (commutative_p |
1387 | && GET_MODE (xop0) != xmode0 && GET_MODE (xop1) != xmode1 |
1388 | && GET_MODE (xop0) == xmode1 && GET_MODE (xop1) == xmode0) |
1389 | std::swap (a&: xop0, b&: xop1); |
1390 | |
1391 | /* If we are optimizing, force expensive constants into a register. */ |
1392 | xop0 = avoid_expensive_constant (mode: xmode0, binoptab, opn: 0, x: xop0, unsignedp); |
1393 | if (!shift_optab_p (binoptab)) |
1394 | xop1 = avoid_expensive_constant (mode: xmode1, binoptab, opn: 1, x: xop1, unsignedp); |
1395 | else |
1396 | /* Shifts and rotates often use a different mode for op1 from op0; |
1397 | for VOIDmode constants we don't know the mode, so force it |
1398 | to be canonicalized using convert_modes. */ |
1399 | canonicalize_op1 = true; |
1400 | |
1401 | /* In case the insn wants input operands in modes different from |
1402 | those of the actual operands, convert the operands. It would |
1403 | seem that we don't need to convert CONST_INTs, but we do, so |
1404 | that they're properly zero-extended, sign-extended or truncated |
1405 | for their mode. */ |
1406 | |
1407 | mode0 = GET_MODE (xop0) != VOIDmode ? GET_MODE (xop0) : mode; |
1408 | if (xmode0 != VOIDmode && xmode0 != mode0) |
1409 | { |
1410 | xop0 = convert_modes (mode: xmode0, oldmode: mode0, x: xop0, unsignedp); |
1411 | mode0 = xmode0; |
1412 | } |
1413 | |
1414 | mode1 = ((GET_MODE (xop1) != VOIDmode || canonicalize_op1) |
1415 | ? GET_MODE (xop1) : mode); |
1416 | if (xmode1 != VOIDmode && xmode1 != mode1) |
1417 | { |
1418 | xop1 = convert_modes (mode: xmode1, oldmode: mode1, x: xop1, unsignedp); |
1419 | mode1 = xmode1; |
1420 | } |
1421 | |
1422 | /* If operation is commutative, |
1423 | try to make the first operand a register. |
1424 | Even better, try to make it the same as the target. |
1425 | Also try to make the last operand a constant. */ |
1426 | if (commutative_p |
1427 | && swap_commutative_operands_with_target (target, op0: xop0, op1: xop1)) |
1428 | std::swap (a&: xop0, b&: xop1); |
1429 | |
1430 | /* Now, if insn's predicates don't allow our operands, put them into |
1431 | pseudo regs. */ |
1432 | |
1433 | if (binoptab == vec_pack_trunc_optab |
1434 | || binoptab == vec_pack_usat_optab |
1435 | || binoptab == vec_pack_ssat_optab |
1436 | || binoptab == vec_pack_ufix_trunc_optab |
1437 | || binoptab == vec_pack_sfix_trunc_optab |
1438 | || binoptab == vec_packu_float_optab |
1439 | || binoptab == vec_packs_float_optab) |
1440 | { |
1441 | /* The mode of the result is different then the mode of the |
1442 | arguments. */ |
1443 | tmp_mode = insn_data[(int) icode].operand[0].mode; |
1444 | if (VECTOR_MODE_P (mode) |
1445 | && maybe_ne (a: GET_MODE_NUNITS (mode: tmp_mode), b: 2 * GET_MODE_NUNITS (mode))) |
1446 | { |
1447 | delete_insns_since (last); |
1448 | return NULL_RTX; |
1449 | } |
1450 | } |
1451 | else |
1452 | tmp_mode = mode; |
1453 | |
1454 | create_output_operand (op: &ops[0], x: target, mode: tmp_mode); |
1455 | create_input_operand (op: &ops[1], value: xop0, mode: mode0); |
1456 | create_input_operand (op: &ops[2], value: xop1, mode: mode1); |
1457 | pat = maybe_gen_insn (icode, nops: 3, ops); |
1458 | if (pat) |
1459 | { |
1460 | /* If PAT is composed of more than one insn, try to add an appropriate |
1461 | REG_EQUAL note to it. If we can't because TEMP conflicts with an |
1462 | operand, call expand_binop again, this time without a target. */ |
1463 | if (INSN_P (pat) && NEXT_INSN (insn: pat) != NULL_RTX |
1464 | && ! add_equal_note (insns: pat, target: ops[0].value, |
1465 | code: optab_to_code (op: binoptab), |
1466 | op0: ops[1].value, op1: ops[2].value, op0_mode: mode0)) |
1467 | { |
1468 | delete_insns_since (last); |
1469 | return expand_binop (mode, binoptab, op0, op1, NULL_RTX, |
1470 | unsignedp, methods); |
1471 | } |
1472 | |
1473 | emit_insn (pat); |
1474 | return ops[0].value; |
1475 | } |
1476 | delete_insns_since (last); |
1477 | return NULL_RTX; |
1478 | } |
1479 | |
1480 | /* Generate code to perform an operation specified by BINOPTAB |
1481 | on operands OP0 and OP1, with result having machine-mode MODE. |
1482 | |
1483 | UNSIGNEDP is for the case where we have to widen the operands |
1484 | to perform the operation. It says to use zero-extension. |
1485 | |
1486 | If TARGET is nonzero, the value |
1487 | is generated there, if it is convenient to do so. |
1488 | In all cases an rtx is returned for the locus of the value; |
1489 | this may or may not be TARGET. */ |
1490 | |
1491 | rtx |
1492 | expand_binop (machine_mode mode, optab binoptab, rtx op0, rtx op1, |
1493 | rtx target, int unsignedp, enum optab_methods methods) |
1494 | { |
1495 | enum optab_methods next_methods |
1496 | = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN |
1497 | ? OPTAB_WIDEN : methods); |
1498 | enum mode_class mclass; |
1499 | enum insn_code icode; |
1500 | machine_mode wider_mode; |
1501 | scalar_int_mode int_mode; |
1502 | rtx libfunc; |
1503 | rtx temp; |
1504 | rtx_insn *entry_last = get_last_insn (); |
1505 | rtx_insn *last; |
1506 | |
1507 | mclass = GET_MODE_CLASS (mode); |
1508 | |
1509 | /* If subtracting an integer constant, convert this into an addition of |
1510 | the negated constant. */ |
1511 | |
1512 | if (binoptab == sub_optab && CONST_INT_P (op1)) |
1513 | { |
1514 | op1 = negate_rtx (mode, op1); |
1515 | binoptab = add_optab; |
1516 | } |
1517 | /* For shifts, constant invalid op1 might be expanded from different |
1518 | mode than MODE. As those are invalid, force them to a register |
1519 | to avoid further problems during expansion. */ |
1520 | else if (CONST_INT_P (op1) |
1521 | && shift_optab_p (binoptab) |
1522 | && UINTVAL (op1) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode))) |
1523 | { |
1524 | op1 = gen_int_mode (INTVAL (op1), GET_MODE_INNER (mode)); |
1525 | op1 = force_reg (GET_MODE_INNER (mode), op1); |
1526 | } |
1527 | |
1528 | /* Record where to delete back to if we backtrack. */ |
1529 | last = get_last_insn (); |
1530 | |
1531 | /* If we can do it with a three-operand insn, do so. */ |
1532 | |
1533 | if (methods != OPTAB_MUST_WIDEN) |
1534 | { |
1535 | if (convert_optab_p (op: binoptab)) |
1536 | { |
1537 | machine_mode from_mode = widened_mode (to_mode: mode, op0, op1); |
1538 | icode = find_widening_optab_handler (binoptab, mode, from_mode); |
1539 | } |
1540 | else |
1541 | icode = optab_handler (op: binoptab, mode); |
1542 | if (icode != CODE_FOR_nothing) |
1543 | { |
1544 | temp = expand_binop_directly (icode, mode, binoptab, op0, op1, |
1545 | target, unsignedp, methods, last); |
1546 | if (temp) |
1547 | return temp; |
1548 | } |
1549 | } |
1550 | |
1551 | /* If we were trying to rotate, and that didn't work, try rotating |
1552 | the other direction before falling back to shifts and bitwise-or. */ |
1553 | if (((binoptab == rotl_optab |
1554 | && (icode = optab_handler (op: rotr_optab, mode)) != CODE_FOR_nothing) |
1555 | || (binoptab == rotr_optab |
1556 | && (icode = optab_handler (op: rotl_optab, mode)) != CODE_FOR_nothing)) |
1557 | && is_int_mode (mode, int_mode: &int_mode)) |
1558 | { |
1559 | optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab); |
1560 | rtx newop1; |
1561 | unsigned int bits = GET_MODE_PRECISION (mode: int_mode); |
1562 | |
1563 | if (CONST_INT_P (op1)) |
1564 | newop1 = gen_int_shift_amount (int_mode, bits - INTVAL (op1)); |
1565 | else if (targetm.shift_truncation_mask (int_mode) == bits - 1) |
1566 | newop1 = negate_rtx (GET_MODE (op1), op1); |
1567 | else |
1568 | newop1 = expand_binop (GET_MODE (op1), binoptab: sub_optab, |
1569 | op0: gen_int_mode (bits, GET_MODE (op1)), op1, |
1570 | NULL_RTX, unsignedp, methods: OPTAB_DIRECT); |
1571 | |
1572 | temp = expand_binop_directly (icode, mode: int_mode, binoptab: otheroptab, op0, op1: newop1, |
1573 | target, unsignedp, methods, last); |
1574 | if (temp) |
1575 | return temp; |
1576 | } |
1577 | |
1578 | /* If this is a multiply, see if we can do a widening operation that |
1579 | takes operands of this mode and makes a wider mode. */ |
1580 | |
1581 | if (binoptab == smul_optab |
1582 | && GET_MODE_2XWIDER_MODE (m: mode).exists (mode: &wider_mode) |
1583 | && (convert_optab_handler (op: (unsignedp |
1584 | ? umul_widen_optab |
1585 | : smul_widen_optab), |
1586 | to_mode: wider_mode, from_mode: mode) != CODE_FOR_nothing)) |
1587 | { |
1588 | /* *_widen_optab needs to determine operand mode, make sure at least |
1589 | one operand has non-VOID mode. */ |
1590 | if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode) |
1591 | op0 = force_reg (mode, op0); |
1592 | temp = expand_binop (mode: wider_mode, |
1593 | binoptab: unsignedp ? umul_widen_optab : smul_widen_optab, |
1594 | op0, op1, NULL_RTX, unsignedp, methods: OPTAB_DIRECT); |
1595 | |
1596 | if (temp != 0) |
1597 | { |
1598 | if (GET_MODE_CLASS (mode) == MODE_INT |
1599 | && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp))) |
1600 | return gen_lowpart (mode, temp); |
1601 | else |
1602 | return convert_to_mode (mode, temp, unsignedp); |
1603 | } |
1604 | } |
1605 | |
1606 | /* If this is a vector shift by a scalar, see if we can do a vector |
1607 | shift by a vector. If so, broadcast the scalar into a vector. */ |
1608 | if (mclass == MODE_VECTOR_INT) |
1609 | { |
1610 | optab otheroptab = unknown_optab; |
1611 | |
1612 | if (binoptab == ashl_optab) |
1613 | otheroptab = vashl_optab; |
1614 | else if (binoptab == ashr_optab) |
1615 | otheroptab = vashr_optab; |
1616 | else if (binoptab == lshr_optab) |
1617 | otheroptab = vlshr_optab; |
1618 | else if (binoptab == rotl_optab) |
1619 | otheroptab = vrotl_optab; |
1620 | else if (binoptab == rotr_optab) |
1621 | otheroptab = vrotr_optab; |
1622 | |
1623 | if (otheroptab |
1624 | && (icode = optab_handler (op: otheroptab, mode)) != CODE_FOR_nothing) |
1625 | { |
1626 | /* The scalar may have been extended to be too wide. Truncate |
1627 | it back to the proper size to fit in the broadcast vector. */ |
1628 | scalar_mode inner_mode = GET_MODE_INNER (mode); |
1629 | if (!CONST_INT_P (op1) |
1630 | && (GET_MODE_BITSIZE (mode: as_a <scalar_int_mode> (GET_MODE (op1))) |
1631 | > GET_MODE_BITSIZE (mode: inner_mode))) |
1632 | op1 = force_reg (inner_mode, |
1633 | simplify_gen_unary (code: TRUNCATE, mode: inner_mode, op: op1, |
1634 | GET_MODE (op1))); |
1635 | rtx vop1 = expand_vector_broadcast (vmode: mode, op: op1); |
1636 | if (vop1) |
1637 | { |
1638 | temp = expand_binop_directly (icode, mode, binoptab: otheroptab, op0, op1: vop1, |
1639 | target, unsignedp, methods, last); |
1640 | if (temp) |
1641 | return temp; |
1642 | } |
1643 | } |
1644 | } |
1645 | |
1646 | /* Look for a wider mode of the same class for which we think we |
1647 | can open-code the operation. Check for a widening multiply at the |
1648 | wider mode as well. */ |
1649 | |
1650 | if (CLASS_HAS_WIDER_MODES_P (mclass) |
1651 | && methods != OPTAB_DIRECT && methods != OPTAB_LIB) |
1652 | FOR_EACH_WIDER_MODE (wider_mode, mode) |
1653 | { |
1654 | machine_mode next_mode; |
1655 | if (optab_handler (op: binoptab, mode: wider_mode) != CODE_FOR_nothing |
1656 | || (binoptab == smul_optab |
1657 | && GET_MODE_WIDER_MODE (m: wider_mode).exists (mode: &next_mode) |
1658 | && (find_widening_optab_handler ((unsignedp |
1659 | ? umul_widen_optab |
1660 | : smul_widen_optab), |
1661 | next_mode, mode) |
1662 | != CODE_FOR_nothing))) |
1663 | { |
1664 | rtx xop0 = op0, xop1 = op1; |
1665 | bool no_extend = false; |
1666 | |
1667 | /* For certain integer operations, we need not actually extend |
1668 | the narrow operands, as long as we will truncate |
1669 | the results to the same narrowness. */ |
1670 | |
1671 | if ((binoptab == ior_optab || binoptab == and_optab |
1672 | || binoptab == xor_optab |
1673 | || binoptab == add_optab || binoptab == sub_optab |
1674 | || binoptab == smul_optab || binoptab == ashl_optab) |
1675 | && mclass == MODE_INT) |
1676 | { |
1677 | no_extend = true; |
1678 | xop0 = avoid_expensive_constant (mode, binoptab, opn: 0, |
1679 | x: xop0, unsignedp); |
1680 | if (binoptab != ashl_optab) |
1681 | xop1 = avoid_expensive_constant (mode, binoptab, opn: 1, |
1682 | x: xop1, unsignedp); |
1683 | } |
1684 | |
1685 | xop0 = widen_operand (op: xop0, mode: wider_mode, oldmode: mode, unsignedp, no_extend); |
1686 | |
1687 | /* The second operand of a shift must always be extended. */ |
1688 | xop1 = widen_operand (op: xop1, mode: wider_mode, oldmode: mode, unsignedp, |
1689 | no_extend: no_extend && binoptab != ashl_optab); |
1690 | |
1691 | temp = expand_binop (mode: wider_mode, binoptab, op0: xop0, op1: xop1, NULL_RTX, |
1692 | unsignedp, methods: OPTAB_DIRECT); |
1693 | if (temp) |
1694 | { |
1695 | if (mclass != MODE_INT |
1696 | || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode)) |
1697 | { |
1698 | if (target == 0) |
1699 | target = gen_reg_rtx (mode); |
1700 | convert_move (target, temp, 0); |
1701 | return target; |
1702 | } |
1703 | else |
1704 | return gen_lowpart (mode, temp); |
1705 | } |
1706 | else |
1707 | delete_insns_since (last); |
1708 | } |
1709 | } |
1710 | |
1711 | /* If operation is commutative, |
1712 | try to make the first operand a register. |
1713 | Even better, try to make it the same as the target. |
1714 | Also try to make the last operand a constant. */ |
1715 | if (commutative_optab_p (binoptab) |
1716 | && swap_commutative_operands_with_target (target, op0, op1)) |
1717 | std::swap (a&: op0, b&: op1); |
1718 | |
1719 | /* These can be done a word at a time. */ |
1720 | if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab) |
1721 | && is_int_mode (mode, int_mode: &int_mode) |
1722 | && GET_MODE_SIZE (mode: int_mode) > UNITS_PER_WORD |
1723 | && optab_handler (op: binoptab, mode: word_mode) != CODE_FOR_nothing) |
1724 | { |
1725 | int i; |
1726 | rtx_insn *insns; |
1727 | |
1728 | /* If TARGET is the same as one of the operands, the REG_EQUAL note |
1729 | won't be accurate, so use a new target. */ |
1730 | if (target == 0 |
1731 | || target == op0 |
1732 | || target == op1 |
1733 | || reg_overlap_mentioned_p (target, op0) |
1734 | || reg_overlap_mentioned_p (target, op1) |
1735 | || !valid_multiword_target_p (target)) |
1736 | target = gen_reg_rtx (int_mode); |
1737 | |
1738 | start_sequence (); |
1739 | |
1740 | /* Do the actual arithmetic. */ |
1741 | machine_mode op0_mode = GET_MODE (op0); |
1742 | machine_mode op1_mode = GET_MODE (op1); |
1743 | if (op0_mode == VOIDmode) |
1744 | op0_mode = int_mode; |
1745 | if (op1_mode == VOIDmode) |
1746 | op1_mode = int_mode; |
1747 | for (i = 0; i < GET_MODE_BITSIZE (mode: int_mode) / BITS_PER_WORD; i++) |
1748 | { |
1749 | rtx target_piece = operand_subword (target, i, 1, int_mode); |
1750 | rtx x = expand_binop (mode: word_mode, binoptab, |
1751 | op0: operand_subword_force (op0, i, op0_mode), |
1752 | op1: operand_subword_force (op1, i, op1_mode), |
1753 | target: target_piece, unsignedp, methods: next_methods); |
1754 | |
1755 | if (x == 0) |
1756 | break; |
1757 | |
1758 | if (target_piece != x) |
1759 | emit_move_insn (target_piece, x); |
1760 | } |
1761 | |
1762 | insns = get_insns (); |
1763 | end_sequence (); |
1764 | |
1765 | if (i == GET_MODE_BITSIZE (mode: int_mode) / BITS_PER_WORD) |
1766 | { |
1767 | emit_insn (insns); |
1768 | return target; |
1769 | } |
1770 | } |
1771 | |
1772 | /* Synthesize double word shifts from single word shifts. */ |
1773 | if ((binoptab == lshr_optab || binoptab == ashl_optab |
1774 | || binoptab == ashr_optab) |
1775 | && is_int_mode (mode, int_mode: &int_mode) |
1776 | && (CONST_INT_P (op1) || optimize_insn_for_speed_p ()) |
1777 | && GET_MODE_SIZE (mode: int_mode) == 2 * UNITS_PER_WORD |
1778 | && GET_MODE_PRECISION (mode: int_mode) == GET_MODE_BITSIZE (mode: int_mode) |
1779 | && optab_handler (op: binoptab, mode: word_mode) != CODE_FOR_nothing |
1780 | && optab_handler (op: ashl_optab, mode: word_mode) != CODE_FOR_nothing |
1781 | && optab_handler (op: lshr_optab, mode: word_mode) != CODE_FOR_nothing) |
1782 | { |
1783 | unsigned HOST_WIDE_INT shift_mask, double_shift_mask; |
1784 | scalar_int_mode op1_mode; |
1785 | |
1786 | double_shift_mask = targetm.shift_truncation_mask (int_mode); |
1787 | shift_mask = targetm.shift_truncation_mask (word_mode); |
1788 | op1_mode = (GET_MODE (op1) != VOIDmode |
1789 | ? as_a <scalar_int_mode> (GET_MODE (op1)) |
1790 | : word_mode); |
1791 | |
1792 | /* Apply the truncation to constant shifts. */ |
1793 | if (double_shift_mask > 0 && CONST_INT_P (op1)) |
1794 | op1 = gen_int_mode (INTVAL (op1) & double_shift_mask, op1_mode); |
1795 | |
1796 | if (op1 == CONST0_RTX (op1_mode)) |
1797 | return op0; |
1798 | |
1799 | /* Make sure that this is a combination that expand_doubleword_shift |
1800 | can handle. See the comments there for details. */ |
1801 | if (double_shift_mask == 0 |
1802 | || (shift_mask == BITS_PER_WORD - 1 |
1803 | && double_shift_mask == BITS_PER_WORD * 2 - 1)) |
1804 | { |
1805 | rtx_insn *insns; |
1806 | rtx into_target, outof_target; |
1807 | rtx into_input, outof_input; |
1808 | int left_shift, outof_word; |
1809 | |
1810 | /* If TARGET is the same as one of the operands, the REG_EQUAL note |
1811 | won't be accurate, so use a new target. */ |
1812 | if (target == 0 |
1813 | || target == op0 |
1814 | || target == op1 |
1815 | || reg_overlap_mentioned_p (target, op0) |
1816 | || reg_overlap_mentioned_p (target, op1) |
1817 | || !valid_multiword_target_p (target)) |
1818 | target = gen_reg_rtx (int_mode); |
1819 | |
1820 | start_sequence (); |
1821 | |
1822 | /* OUTOF_* is the word we are shifting bits away from, and |
1823 | INTO_* is the word that we are shifting bits towards, thus |
1824 | they differ depending on the direction of the shift and |
1825 | WORDS_BIG_ENDIAN. */ |
1826 | |
1827 | left_shift = binoptab == ashl_optab; |
1828 | outof_word = left_shift ^ ! WORDS_BIG_ENDIAN; |
1829 | |
1830 | outof_target = operand_subword (target, outof_word, 1, int_mode); |
1831 | into_target = operand_subword (target, 1 - outof_word, 1, int_mode); |
1832 | |
1833 | outof_input = operand_subword_force (op0, outof_word, int_mode); |
1834 | into_input = operand_subword_force (op0, 1 - outof_word, int_mode); |
1835 | |
1836 | if (expand_doubleword_shift (op1_mode, binoptab, |
1837 | outof_input, into_input, op1, |
1838 | outof_target, into_target, |
1839 | unsignedp, methods: next_methods, shift_mask)) |
1840 | { |
1841 | insns = get_insns (); |
1842 | end_sequence (); |
1843 | |
1844 | emit_insn (insns); |
1845 | return target; |
1846 | } |
1847 | end_sequence (); |
1848 | } |
1849 | } |
1850 | |
1851 | /* Synthesize double word rotates from single word shifts. */ |
1852 | if ((binoptab == rotl_optab || binoptab == rotr_optab) |
1853 | && is_int_mode (mode, int_mode: &int_mode) |
1854 | && CONST_INT_P (op1) |
1855 | && GET_MODE_PRECISION (mode: int_mode) == 2 * BITS_PER_WORD |
1856 | && optab_handler (op: ashl_optab, mode: word_mode) != CODE_FOR_nothing |
1857 | && optab_handler (op: lshr_optab, mode: word_mode) != CODE_FOR_nothing) |
1858 | { |
1859 | rtx_insn *insns; |
1860 | rtx into_target, outof_target; |
1861 | rtx into_input, outof_input; |
1862 | rtx inter; |
1863 | int shift_count, left_shift, outof_word; |
1864 | |
1865 | /* If TARGET is the same as one of the operands, the REG_EQUAL note |
1866 | won't be accurate, so use a new target. Do this also if target is not |
1867 | a REG, first because having a register instead may open optimization |
1868 | opportunities, and second because if target and op0 happen to be MEMs |
1869 | designating the same location, we would risk clobbering it too early |
1870 | in the code sequence we generate below. */ |
1871 | if (target == 0 |
1872 | || target == op0 |
1873 | || target == op1 |
1874 | || !REG_P (target) |
1875 | || reg_overlap_mentioned_p (target, op0) |
1876 | || reg_overlap_mentioned_p (target, op1) |
1877 | || !valid_multiword_target_p (target)) |
1878 | target = gen_reg_rtx (int_mode); |
1879 | |
1880 | start_sequence (); |
1881 | |
1882 | shift_count = INTVAL (op1); |
1883 | |
1884 | /* OUTOF_* is the word we are shifting bits away from, and |
1885 | INTO_* is the word that we are shifting bits towards, thus |
1886 | they differ depending on the direction of the shift and |
1887 | WORDS_BIG_ENDIAN. */ |
1888 | |
1889 | left_shift = (binoptab == rotl_optab); |
1890 | outof_word = left_shift ^ ! WORDS_BIG_ENDIAN; |
1891 | |
1892 | outof_target = operand_subword (target, outof_word, 1, int_mode); |
1893 | into_target = operand_subword (target, 1 - outof_word, 1, int_mode); |
1894 | |
1895 | outof_input = operand_subword_force (op0, outof_word, int_mode); |
1896 | into_input = operand_subword_force (op0, 1 - outof_word, int_mode); |
1897 | |
1898 | if (shift_count == BITS_PER_WORD) |
1899 | { |
1900 | /* This is just a word swap. */ |
1901 | emit_move_insn (outof_target, into_input); |
1902 | emit_move_insn (into_target, outof_input); |
1903 | inter = const0_rtx; |
1904 | } |
1905 | else |
1906 | { |
1907 | rtx into_temp1, into_temp2, outof_temp1, outof_temp2; |
1908 | HOST_WIDE_INT first_shift_count, second_shift_count; |
1909 | optab reverse_unsigned_shift, unsigned_shift; |
1910 | |
1911 | reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD) |
1912 | ? lshr_optab : ashl_optab); |
1913 | |
1914 | unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD) |
1915 | ? ashl_optab : lshr_optab); |
1916 | |
1917 | if (shift_count > BITS_PER_WORD) |
1918 | { |
1919 | first_shift_count = shift_count - BITS_PER_WORD; |
1920 | second_shift_count = 2 * BITS_PER_WORD - shift_count; |
1921 | } |
1922 | else |
1923 | { |
1924 | first_shift_count = BITS_PER_WORD - shift_count; |
1925 | second_shift_count = shift_count; |
1926 | } |
1927 | rtx first_shift_count_rtx |
1928 | = gen_int_shift_amount (word_mode, first_shift_count); |
1929 | rtx second_shift_count_rtx |
1930 | = gen_int_shift_amount (word_mode, second_shift_count); |
1931 | |
1932 | into_temp1 = expand_binop (mode: word_mode, binoptab: unsigned_shift, |
1933 | op0: outof_input, op1: first_shift_count_rtx, |
1934 | NULL_RTX, unsignedp, methods: next_methods); |
1935 | into_temp2 = expand_binop (mode: word_mode, binoptab: reverse_unsigned_shift, |
1936 | op0: into_input, op1: second_shift_count_rtx, |
1937 | NULL_RTX, unsignedp, methods: next_methods); |
1938 | |
1939 | if (into_temp1 != 0 && into_temp2 != 0) |
1940 | inter = expand_binop (mode: word_mode, binoptab: ior_optab, op0: into_temp1, op1: into_temp2, |
1941 | target: into_target, unsignedp, methods: next_methods); |
1942 | else |
1943 | inter = 0; |
1944 | |
1945 | if (inter != 0 && inter != into_target) |
1946 | emit_move_insn (into_target, inter); |
1947 | |
1948 | outof_temp1 = expand_binop (mode: word_mode, binoptab: unsigned_shift, |
1949 | op0: into_input, op1: first_shift_count_rtx, |
1950 | NULL_RTX, unsignedp, methods: next_methods); |
1951 | outof_temp2 = expand_binop (mode: word_mode, binoptab: reverse_unsigned_shift, |
1952 | op0: outof_input, op1: second_shift_count_rtx, |
1953 | NULL_RTX, unsignedp, methods: next_methods); |
1954 | |
1955 | if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0) |
1956 | inter = expand_binop (mode: word_mode, binoptab: ior_optab, |
1957 | op0: outof_temp1, op1: outof_temp2, |
1958 | target: outof_target, unsignedp, methods: next_methods); |
1959 | |
1960 | if (inter != 0 && inter != outof_target) |
1961 | emit_move_insn (outof_target, inter); |
1962 | } |
1963 | |
1964 | insns = get_insns (); |
1965 | end_sequence (); |
1966 | |
1967 | if (inter != 0) |
1968 | { |
1969 | emit_insn (insns); |
1970 | return target; |
1971 | } |
1972 | } |
1973 | |
1974 | /* These can be done a word at a time by propagating carries. */ |
1975 | if ((binoptab == add_optab || binoptab == sub_optab) |
1976 | && is_int_mode (mode, int_mode: &int_mode) |
1977 | && GET_MODE_SIZE (mode: int_mode) >= 2 * UNITS_PER_WORD |
1978 | && optab_handler (op: binoptab, mode: word_mode) != CODE_FOR_nothing) |
1979 | { |
1980 | unsigned int i; |
1981 | optab otheroptab = binoptab == add_optab ? sub_optab : add_optab; |
1982 | const unsigned int nwords = GET_MODE_BITSIZE (mode: int_mode) / BITS_PER_WORD; |
1983 | rtx carry_in = NULL_RTX, carry_out = NULL_RTX; |
1984 | rtx xop0, xop1, xtarget; |
1985 | |
1986 | /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG |
1987 | value is one of those, use it. Otherwise, use 1 since it is the |
1988 | one easiest to get. */ |
1989 | #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1 |
1990 | int normalizep = STORE_FLAG_VALUE; |
1991 | #else |
1992 | int normalizep = 1; |
1993 | #endif |
1994 | |
1995 | /* Prepare the operands. */ |
1996 | xop0 = force_reg (int_mode, op0); |
1997 | xop1 = force_reg (int_mode, op1); |
1998 | |
1999 | xtarget = gen_reg_rtx (int_mode); |
2000 | |
2001 | if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target)) |
2002 | target = xtarget; |
2003 | |
2004 | /* Indicate for flow that the entire target reg is being set. */ |
2005 | if (REG_P (target)) |
2006 | emit_clobber (xtarget); |
2007 | |
2008 | /* Do the actual arithmetic. */ |
2009 | for (i = 0; i < nwords; i++) |
2010 | { |
2011 | int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i); |
2012 | rtx target_piece = operand_subword (xtarget, index, 1, int_mode); |
2013 | rtx op0_piece = operand_subword_force (xop0, index, int_mode); |
2014 | rtx op1_piece = operand_subword_force (xop1, index, int_mode); |
2015 | rtx x; |
2016 | |
2017 | /* Main add/subtract of the input operands. */ |
2018 | x = expand_binop (mode: word_mode, binoptab, |
2019 | op0: op0_piece, op1: op1_piece, |
2020 | target: target_piece, unsignedp, methods: next_methods); |
2021 | if (x == 0) |
2022 | break; |
2023 | |
2024 | if (i + 1 < nwords) |
2025 | { |
2026 | /* Store carry from main add/subtract. */ |
2027 | carry_out = gen_reg_rtx (word_mode); |
2028 | carry_out = emit_store_flag_force (carry_out, |
2029 | (binoptab == add_optab |
2030 | ? LT : GT), |
2031 | x, op0_piece, |
2032 | word_mode, 1, normalizep); |
2033 | } |
2034 | |
2035 | if (i > 0) |
2036 | { |
2037 | rtx newx; |
2038 | |
2039 | /* Add/subtract previous carry to main result. */ |
2040 | newx = expand_binop (mode: word_mode, |
2041 | binoptab: normalizep == 1 ? binoptab : otheroptab, |
2042 | op0: x, op1: carry_in, |
2043 | NULL_RTX, unsignedp: 1, methods: next_methods); |
2044 | |
2045 | if (i + 1 < nwords) |
2046 | { |
2047 | /* Get out carry from adding/subtracting carry in. */ |
2048 | rtx carry_tmp = gen_reg_rtx (word_mode); |
2049 | carry_tmp = emit_store_flag_force (carry_tmp, |
2050 | (binoptab == add_optab |
2051 | ? LT : GT), |
2052 | newx, x, |
2053 | word_mode, 1, normalizep); |
2054 | |
2055 | /* Logical-ior the two poss. carry together. */ |
2056 | carry_out = expand_binop (mode: word_mode, binoptab: ior_optab, |
2057 | op0: carry_out, op1: carry_tmp, |
2058 | target: carry_out, unsignedp: 0, methods: next_methods); |
2059 | if (carry_out == 0) |
2060 | break; |
2061 | } |
2062 | emit_move_insn (target_piece, newx); |
2063 | } |
2064 | else |
2065 | { |
2066 | if (x != target_piece) |
2067 | emit_move_insn (target_piece, x); |
2068 | } |
2069 | |
2070 | carry_in = carry_out; |
2071 | } |
2072 | |
2073 | if (i == GET_MODE_BITSIZE (mode: int_mode) / (unsigned) BITS_PER_WORD) |
2074 | { |
2075 | if (optab_handler (op: mov_optab, mode: int_mode) != CODE_FOR_nothing |
2076 | || ! rtx_equal_p (target, xtarget)) |
2077 | { |
2078 | rtx_insn *temp = emit_move_insn (target, xtarget); |
2079 | |
2080 | set_dst_reg_note (temp, REG_EQUAL, |
2081 | gen_rtx_fmt_ee (optab_to_code (binoptab), |
2082 | int_mode, copy_rtx (xop0), |
2083 | copy_rtx (xop1)), |
2084 | target); |
2085 | } |
2086 | else |
2087 | target = xtarget; |
2088 | |
2089 | return target; |
2090 | } |
2091 | |
2092 | else |
2093 | delete_insns_since (last); |
2094 | } |
2095 | |
2096 | /* Attempt to synthesize double word multiplies using a sequence of word |
2097 | mode multiplications. We first attempt to generate a sequence using a |
2098 | more efficient unsigned widening multiply, and if that fails we then |
2099 | try using a signed widening multiply. */ |
2100 | |
2101 | if (binoptab == smul_optab |
2102 | && is_int_mode (mode, int_mode: &int_mode) |
2103 | && GET_MODE_SIZE (mode: int_mode) == 2 * UNITS_PER_WORD |
2104 | && optab_handler (op: smul_optab, mode: word_mode) != CODE_FOR_nothing |
2105 | && optab_handler (op: add_optab, mode: word_mode) != CODE_FOR_nothing) |
2106 | { |
2107 | rtx product = NULL_RTX; |
2108 | if (convert_optab_handler (op: umul_widen_optab, to_mode: int_mode, from_mode: word_mode) |
2109 | != CODE_FOR_nothing) |
2110 | { |
2111 | product = expand_doubleword_mult (mode: int_mode, op0, op1, target, |
2112 | umulp: true, methods); |
2113 | if (!product) |
2114 | delete_insns_since (last); |
2115 | } |
2116 | |
2117 | if (product == NULL_RTX |
2118 | && (convert_optab_handler (op: smul_widen_optab, to_mode: int_mode, from_mode: word_mode) |
2119 | != CODE_FOR_nothing)) |
2120 | { |
2121 | product = expand_doubleword_mult (mode: int_mode, op0, op1, target, |
2122 | umulp: false, methods); |
2123 | if (!product) |
2124 | delete_insns_since (last); |
2125 | } |
2126 | |
2127 | if (product != NULL_RTX) |
2128 | { |
2129 | if (optab_handler (op: mov_optab, mode: int_mode) != CODE_FOR_nothing) |
2130 | { |
2131 | rtx_insn *move = emit_move_insn (target ? target : product, |
2132 | product); |
2133 | set_dst_reg_note (move, |
2134 | REG_EQUAL, |
2135 | gen_rtx_fmt_ee (MULT, int_mode, |
2136 | copy_rtx (op0), |
2137 | copy_rtx (op1)), |
2138 | target ? target : product); |
2139 | } |
2140 | return product; |
2141 | } |
2142 | } |
2143 | |
2144 | /* Attempt to synthetize double word modulo by constant divisor. */ |
2145 | if ((binoptab == umod_optab |
2146 | || binoptab == smod_optab |
2147 | || binoptab == udiv_optab |
2148 | || binoptab == sdiv_optab) |
2149 | && optimize |
2150 | && CONST_INT_P (op1) |
2151 | && is_int_mode (mode, int_mode: &int_mode) |
2152 | && GET_MODE_SIZE (mode: int_mode) == 2 * UNITS_PER_WORD |
2153 | && optab_handler (op: (binoptab == umod_optab || binoptab == udiv_optab) |
2154 | ? udivmod_optab : sdivmod_optab, |
2155 | mode: int_mode) == CODE_FOR_nothing |
2156 | && optab_handler (op: and_optab, mode: word_mode) != CODE_FOR_nothing |
2157 | && optab_handler (op: add_optab, mode: word_mode) != CODE_FOR_nothing |
2158 | && optimize_insn_for_speed_p ()) |
2159 | { |
2160 | rtx res = NULL_RTX; |
2161 | if ((binoptab == umod_optab || binoptab == smod_optab) |
2162 | && (INTVAL (op1) & 1) == 0) |
2163 | res = expand_doubleword_mod (mode: int_mode, op0, op1, |
2164 | unsignedp: binoptab == umod_optab); |
2165 | else |
2166 | { |
2167 | rtx quot = expand_doubleword_divmod (mode: int_mode, op0, op1, rem: &res, |
2168 | unsignedp: binoptab == umod_optab |
2169 | || binoptab == udiv_optab); |
2170 | if (quot == NULL_RTX) |
2171 | res = NULL_RTX; |
2172 | else if (binoptab == udiv_optab || binoptab == sdiv_optab) |
2173 | res = quot; |
2174 | } |
2175 | if (res != NULL_RTX) |
2176 | { |
2177 | if (optab_handler (op: mov_optab, mode: int_mode) != CODE_FOR_nothing) |
2178 | { |
2179 | rtx_insn *move = emit_move_insn (target ? target : res, |
2180 | res); |
2181 | set_dst_reg_note (move, REG_EQUAL, |
2182 | gen_rtx_fmt_ee (optab_to_code (binoptab), |
2183 | int_mode, copy_rtx (op0), op1), |
2184 | target ? target : res); |
2185 | } |
2186 | return res; |
2187 | } |
2188 | else |
2189 | delete_insns_since (last); |
2190 | } |
2191 | |
2192 | /* It can't be open-coded in this mode. |
2193 | Use a library call if one is available and caller says that's ok. */ |
2194 | |
2195 | libfunc = optab_libfunc (binoptab, mode); |
2196 | if (libfunc |
2197 | && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN)) |
2198 | { |
2199 | rtx_insn *insns; |
2200 | rtx op1x = op1; |
2201 | machine_mode op1_mode = mode; |
2202 | rtx value; |
2203 | |
2204 | start_sequence (); |
2205 | |
2206 | if (shift_optab_p (binoptab)) |
2207 | { |
2208 | op1_mode = targetm.libgcc_shift_count_mode (); |
2209 | /* Specify unsigned here, |
2210 | since negative shift counts are meaningless. */ |
2211 | op1x = convert_to_mode (op1_mode, op1, 1); |
2212 | } |
2213 | |
2214 | if (GET_MODE (op0) != VOIDmode |
2215 | && GET_MODE (op0) != mode) |
2216 | op0 = convert_to_mode (mode, op0, unsignedp); |
2217 | |
2218 | /* Pass 1 for NO_QUEUE so we don't lose any increments |
2219 | if the libcall is cse'd or moved. */ |
2220 | value = emit_library_call_value (fun: libfunc, |
2221 | NULL_RTX, fn_type: LCT_CONST, outmode: mode, |
2222 | arg1: op0, arg1_mode: mode, arg2: op1x, arg2_mode: op1_mode); |
2223 | |
2224 | insns = get_insns (); |
2225 | end_sequence (); |
2226 | |
2227 | bool trapv = trapv_binoptab_p (binoptab); |
2228 | target = gen_reg_rtx (mode); |
2229 | emit_libcall_block_1 (insns, target, value, |
2230 | trapv ? NULL_RTX |
2231 | : gen_rtx_fmt_ee (optab_to_code (binoptab), |
2232 | mode, op0, op1), trapv); |
2233 | |
2234 | return target; |
2235 | } |
2236 | |
2237 | delete_insns_since (last); |
2238 | |
2239 | /* It can't be done in this mode. Can we do it in a wider mode? */ |
2240 | |
2241 | if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN |
2242 | || methods == OPTAB_MUST_WIDEN)) |
2243 | { |
2244 | /* Caller says, don't even try. */ |
2245 | delete_insns_since (entry_last); |
2246 | return 0; |
2247 | } |
2248 | |
2249 | /* Compute the value of METHODS to pass to recursive calls. |
2250 | Don't allow widening to be tried recursively. */ |
2251 | |
2252 | methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT); |
2253 | |
2254 | /* Look for a wider mode of the same class for which it appears we can do |
2255 | the operation. */ |
2256 | |
2257 | if (CLASS_HAS_WIDER_MODES_P (mclass)) |
2258 | { |
2259 | /* This code doesn't make sense for conversion optabs, since we |
2260 | wouldn't then want to extend the operands to be the same size |
2261 | as the result. */ |
2262 | gcc_assert (!convert_optab_p (binoptab)); |
2263 | FOR_EACH_WIDER_MODE (wider_mode, mode) |
2264 | { |
2265 | if (optab_handler (op: binoptab, mode: wider_mode) |
2266 | || (methods == OPTAB_LIB |
2267 | && optab_libfunc (binoptab, wider_mode))) |
2268 | { |
2269 | rtx xop0 = op0, xop1 = op1; |
2270 | bool no_extend = false; |
2271 | |
2272 | /* For certain integer operations, we need not actually extend |
2273 | the narrow operands, as long as we will truncate |
2274 | the results to the same narrowness. */ |
2275 | |
2276 | if ((binoptab == ior_optab || binoptab == and_optab |
2277 | || binoptab == xor_optab |
2278 | || binoptab == add_optab || binoptab == sub_optab |
2279 | || binoptab == smul_optab || binoptab == ashl_optab) |
2280 | && mclass == MODE_INT) |
2281 | no_extend = true; |
2282 | |
2283 | xop0 = widen_operand (op: xop0, mode: wider_mode, oldmode: mode, |
2284 | unsignedp, no_extend); |
2285 | |
2286 | /* The second operand of a shift must always be extended. */ |
2287 | xop1 = widen_operand (op: xop1, mode: wider_mode, oldmode: mode, unsignedp, |
2288 | no_extend: no_extend && binoptab != ashl_optab); |
2289 | |
2290 | temp = expand_binop (mode: wider_mode, binoptab, op0: xop0, op1: xop1, NULL_RTX, |
2291 | unsignedp, methods); |
2292 | if (temp) |
2293 | { |
2294 | if (mclass != MODE_INT |
2295 | || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode)) |
2296 | { |
2297 | if (target == 0) |
2298 | target = gen_reg_rtx (mode); |
2299 | convert_move (target, temp, 0); |
2300 | return target; |
2301 | } |
2302 | else |
2303 | return gen_lowpart (mode, temp); |
2304 | } |
2305 | else |
2306 | delete_insns_since (last); |
2307 | } |
2308 | } |
2309 | } |
2310 | |
2311 | delete_insns_since (entry_last); |
2312 | return 0; |
2313 | } |
2314 | |
2315 | /* Expand a binary operator which has both signed and unsigned forms. |
2316 | UOPTAB is the optab for unsigned operations, and SOPTAB is for |
2317 | signed operations. |
2318 | |
2319 | If we widen unsigned operands, we may use a signed wider operation instead |
2320 | of an unsigned wider operation, since the result would be the same. */ |
2321 | |
2322 | rtx |
2323 | sign_expand_binop (machine_mode mode, optab uoptab, optab soptab, |
2324 | rtx op0, rtx op1, rtx target, int unsignedp, |
2325 | enum optab_methods methods) |
2326 | { |
2327 | rtx temp; |
2328 | optab direct_optab = unsignedp ? uoptab : soptab; |
2329 | bool save_enable; |
2330 | |
2331 | /* Do it without widening, if possible. */ |
2332 | temp = expand_binop (mode, binoptab: direct_optab, op0, op1, target, |
2333 | unsignedp, methods: OPTAB_DIRECT); |
2334 | if (temp || methods == OPTAB_DIRECT) |
2335 | return temp; |
2336 | |
2337 | /* Try widening to a signed int. Disable any direct use of any |
2338 | signed insn in the current mode. */ |
2339 | save_enable = swap_optab_enable (soptab, mode, false); |
2340 | |
2341 | temp = expand_binop (mode, binoptab: soptab, op0, op1, target, |
2342 | unsignedp, methods: OPTAB_WIDEN); |
2343 | |
2344 | /* For unsigned operands, try widening to an unsigned int. */ |
2345 | if (!temp && unsignedp) |
2346 | temp = expand_binop (mode, binoptab: uoptab, op0, op1, target, |
2347 | unsignedp, methods: OPTAB_WIDEN); |
2348 | if (temp || methods == OPTAB_WIDEN) |
2349 | goto egress; |
2350 | |
2351 | /* Use the right width libcall if that exists. */ |
2352 | temp = expand_binop (mode, binoptab: direct_optab, op0, op1, target, |
2353 | unsignedp, methods: OPTAB_LIB); |
2354 | if (temp || methods == OPTAB_LIB) |
2355 | goto egress; |
2356 | |
2357 | /* Must widen and use a libcall, use either signed or unsigned. */ |
2358 | temp = expand_binop (mode, binoptab: soptab, op0, op1, target, |
2359 | unsignedp, methods); |
2360 | if (!temp && unsignedp) |
2361 | temp = expand_binop (mode, binoptab: uoptab, op0, op1, target, |
2362 | unsignedp, methods); |
2363 | |
2364 | egress: |
2365 | /* Undo the fiddling above. */ |
2366 | if (save_enable) |
2367 | swap_optab_enable (soptab, mode, true); |
2368 | return temp; |
2369 | } |
2370 | |
2371 | /* Generate code to perform an operation specified by UNOPPTAB |
2372 | on operand OP0, with two results to TARG0 and TARG1. |
2373 | We assume that the order of the operands for the instruction |
2374 | is TARG0, TARG1, OP0. |
2375 | |
2376 | Either TARG0 or TARG1 may be zero, but what that means is that |
2377 | the result is not actually wanted. We will generate it into |
2378 | a dummy pseudo-reg and discard it. They may not both be zero. |
2379 | |
2380 | Returns true if this operation can be performed; false if not. */ |
2381 | |
2382 | bool |
2383 | expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1, |
2384 | int unsignedp) |
2385 | { |
2386 | machine_mode mode = GET_MODE (targ0 ? targ0 : targ1); |
2387 | enum mode_class mclass; |
2388 | machine_mode wider_mode; |
2389 | rtx_insn *entry_last = get_last_insn (); |
2390 | rtx_insn *last; |
2391 | |
2392 | mclass = GET_MODE_CLASS (mode); |
2393 | |
2394 | if (!targ0) |
2395 | targ0 = gen_reg_rtx (mode); |
2396 | if (!targ1) |
2397 | targ1 = gen_reg_rtx (mode); |
2398 | |
2399 | /* Record where to go back to if we fail. */ |
2400 | last = get_last_insn (); |
2401 | |
2402 | if (optab_handler (op: unoptab, mode) != CODE_FOR_nothing) |
2403 | { |
2404 | class expand_operand ops[3]; |
2405 | enum insn_code icode = optab_handler (op: unoptab, mode); |
2406 | |
2407 | create_fixed_operand (op: &ops[0], x: targ0); |
2408 | create_fixed_operand (op: &ops[1], x: targ1); |
2409 | create_convert_operand_from (op: &ops[2], value: op0, mode, unsigned_p: unsignedp); |
2410 | if (maybe_expand_insn (icode, nops: 3, ops)) |
2411 | return true; |
2412 | } |
2413 | |
2414 | /* It can't be done in this mode. Can we do it in a wider mode? */ |
2415 | |
2416 | if (CLASS_HAS_WIDER_MODES_P (mclass)) |
2417 | { |
2418 | FOR_EACH_WIDER_MODE (wider_mode, mode) |
2419 | { |
2420 | if (optab_handler (op: unoptab, mode: wider_mode) != CODE_FOR_nothing) |
2421 | { |
2422 | rtx t0 = gen_reg_rtx (wider_mode); |
2423 | rtx t1 = gen_reg_rtx (wider_mode); |
2424 | rtx cop0 = convert_modes (mode: wider_mode, oldmode: mode, x: op0, unsignedp); |
2425 | |
2426 | if (expand_twoval_unop (unoptab, op0: cop0, targ0: t0, targ1: t1, unsignedp)) |
2427 | { |
2428 | convert_move (targ0, t0, unsignedp); |
2429 | convert_move (targ1, t1, unsignedp); |
2430 | return true; |
2431 | } |
2432 | else |
2433 | delete_insns_since (last); |
2434 | } |
2435 | } |
2436 | } |
2437 | |
2438 | delete_insns_since (entry_last); |
2439 | return false; |
2440 | } |
2441 | |
2442 | /* Generate code to perform an operation specified by BINOPTAB |
2443 | on operands OP0 and OP1, with two results to TARG1 and TARG2. |
2444 | We assume that the order of the operands for the instruction |
2445 | is TARG0, OP0, OP1, TARG1, which would fit a pattern like |
2446 | [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))]. |
2447 | |
2448 | Either TARG0 or TARG1 may be zero, but what that means is that |
2449 | the result is not actually wanted. We will generate it into |
2450 | a dummy pseudo-reg and discard it. They may not both be zero. |
2451 | |
2452 | Returns true if this operation can be performed; false if not. */ |
2453 | |
2454 | bool |
2455 | expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1, |
2456 | int unsignedp) |
2457 | { |
2458 | machine_mode mode = GET_MODE (targ0 ? targ0 : targ1); |
2459 | enum mode_class mclass; |
2460 | machine_mode wider_mode; |
2461 | rtx_insn *entry_last = get_last_insn (); |
2462 | rtx_insn *last; |
2463 | |
2464 | mclass = GET_MODE_CLASS (mode); |
2465 | |
2466 | if (!targ0) |
2467 | targ0 = gen_reg_rtx (mode); |
2468 | if (!targ1) |
2469 | targ1 = gen_reg_rtx (mode); |
2470 | |
2471 | /* Record where to go back to if we fail. */ |
2472 | last = get_last_insn (); |
2473 | |
2474 | if (optab_handler (op: binoptab, mode) != CODE_FOR_nothing) |
2475 | { |
2476 | class expand_operand ops[4]; |
2477 | enum insn_code icode = optab_handler (op: binoptab, mode); |
2478 | machine_mode mode0 = insn_data[icode].operand[1].mode; |
2479 | machine_mode mode1 = insn_data[icode].operand[2].mode; |
2480 | rtx xop0 = op0, xop1 = op1; |
2481 | |
2482 | /* If we are optimizing, force expensive constants into a register. */ |
2483 | xop0 = avoid_expensive_constant (mode: mode0, binoptab, opn: 0, x: xop0, unsignedp); |
2484 | xop1 = avoid_expensive_constant (mode: mode1, binoptab, opn: 1, x: xop1, unsignedp); |
2485 | |
2486 | create_fixed_operand (op: &ops[0], x: targ0); |
2487 | create_convert_operand_from (op: &ops[1], value: xop0, mode, unsigned_p: unsignedp); |
2488 | create_convert_operand_from (op: &ops[2], value: xop1, mode, unsigned_p: unsignedp); |
2489 | create_fixed_operand (op: &ops[3], x: targ1); |
2490 | if (maybe_expand_insn (icode, nops: 4, ops)) |
2491 | return true; |
2492 | delete_insns_since (last); |
2493 | } |
2494 | |
2495 | /* It can't be done in this mode. Can we do it in a wider mode? */ |
2496 | |
2497 | if (CLASS_HAS_WIDER_MODES_P (mclass)) |
2498 | { |
2499 | FOR_EACH_WIDER_MODE (wider_mode, mode) |
2500 | { |
2501 | if (optab_handler (op: binoptab, mode: wider_mode) != CODE_FOR_nothing) |
2502 | { |
2503 | rtx t0 = gen_reg_rtx (wider_mode); |
2504 | rtx t1 = gen_reg_rtx (wider_mode); |
2505 | rtx cop0 = convert_modes (mode: wider_mode, oldmode: mode, x: op0, unsignedp); |
2506 | rtx cop1 = convert_modes (mode: wider_mode, oldmode: mode, x: op1, unsignedp); |
2507 | |
2508 | if (expand_twoval_binop (binoptab, op0: cop0, op1: cop1, |
2509 | targ0: t0, targ1: t1, unsignedp)) |
2510 | { |
2511 | convert_move (targ0, t0, unsignedp); |
2512 | convert_move (targ1, t1, unsignedp); |
2513 | return true; |
2514 | } |
2515 | else |
2516 | delete_insns_since (last); |
2517 | } |
2518 | } |
2519 | } |
2520 | |
2521 | delete_insns_since (entry_last); |
2522 | return false; |
2523 | } |
2524 | |
2525 | /* Expand the two-valued library call indicated by BINOPTAB, but |
2526 | preserve only one of the values. If TARG0 is non-NULL, the first |
2527 | value is placed into TARG0; otherwise the second value is placed |
2528 | into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The |
2529 | value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1). |
2530 | This routine assumes that the value returned by the library call is |
2531 | as if the return value was of an integral mode twice as wide as the |
2532 | mode of OP0. Returns 1 if the call was successful. */ |
2533 | |
2534 | bool |
2535 | expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1, |
2536 | rtx targ0, rtx targ1, enum rtx_code code) |
2537 | { |
2538 | machine_mode mode; |
2539 | machine_mode libval_mode; |
2540 | rtx libval; |
2541 | rtx_insn *insns; |
2542 | rtx libfunc; |
2543 | |
2544 | /* Exactly one of TARG0 or TARG1 should be non-NULL. */ |
2545 | gcc_assert (!targ0 != !targ1); |
2546 | |
2547 | mode = GET_MODE (op0); |
2548 | libfunc = optab_libfunc (binoptab, mode); |
2549 | if (!libfunc) |
2550 | return false; |
2551 | |
2552 | /* The value returned by the library function will have twice as |
2553 | many bits as the nominal MODE. */ |
2554 | libval_mode = smallest_int_mode_for_size (size: 2 * GET_MODE_BITSIZE (mode)); |
2555 | start_sequence (); |
2556 | libval = emit_library_call_value (fun: libfunc, NULL_RTX, fn_type: LCT_CONST, |
2557 | outmode: libval_mode, |
2558 | arg1: op0, arg1_mode: mode, |
2559 | arg2: op1, arg2_mode: mode); |
2560 | /* Get the part of VAL containing the value that we want. */ |
2561 | libval = simplify_gen_subreg (outermode: mode, op: libval, innermode: libval_mode, |
2562 | byte: targ0 ? 0 : GET_MODE_SIZE (mode)); |
2563 | insns = get_insns (); |
2564 | end_sequence (); |
2565 | /* Move the into the desired location. */ |
2566 | emit_libcall_block (insns, targ0 ? targ0 : targ1, libval, |
2567 | gen_rtx_fmt_ee (code, mode, op0, op1)); |
2568 | |
2569 | return true; |
2570 | } |
2571 | |
2572 | |
2573 | /* Wrapper around expand_unop which takes an rtx code to specify |
2574 | the operation to perform, not an optab pointer. All other |
2575 | arguments are the same. */ |
2576 | rtx |
2577 | expand_simple_unop (machine_mode mode, enum rtx_code code, rtx op0, |
2578 | rtx target, int unsignedp) |
2579 | { |
2580 | optab unop = code_to_optab (code); |
2581 | gcc_assert (unop); |
2582 | |
2583 | return expand_unop (mode, unop, op0, target, unsignedp); |
2584 | } |
2585 | |
2586 | /* Try calculating |
2587 | (clz:narrow x) |
2588 | as |
2589 | (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). |
2590 | |
2591 | A similar operation can be used for clrsb. UNOPTAB says which operation |
2592 | we are trying to expand. */ |
2593 | static rtx |
2594 | widen_leading (scalar_int_mode mode, rtx op0, rtx target, optab unoptab) |
2595 | { |
2596 | opt_scalar_int_mode wider_mode_iter; |
2597 | FOR_EACH_WIDER_MODE (wider_mode_iter, mode) |
2598 | { |
2599 | scalar_int_mode wider_mode = wider_mode_iter.require (); |
2600 | if (optab_handler (op: unoptab, mode: wider_mode) != CODE_FOR_nothing) |
2601 | { |
2602 | rtx xop0, temp; |
2603 | rtx_insn *last; |
2604 | |
2605 | last = get_last_insn (); |
2606 | |
2607 | if (target == 0) |
2608 | target = gen_reg_rtx (mode); |
2609 | xop0 = widen_operand (op: op0, mode: wider_mode, oldmode: mode, |
2610 | unsignedp: unoptab != clrsb_optab, no_extend: false); |
2611 | temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX, |
2612 | unoptab != clrsb_optab); |
2613 | if (temp != 0) |
2614 | temp = expand_binop |
2615 | (mode: wider_mode, binoptab: sub_optab, op0: temp, |
2616 | op1: gen_int_mode (GET_MODE_PRECISION (mode: wider_mode) |
2617 | - GET_MODE_PRECISION (mode), |
2618 | wider_mode), |
2619 | target, unsignedp: true, methods: OPTAB_DIRECT); |
2620 | if (temp == 0) |
2621 | delete_insns_since (last); |
2622 | |
2623 | return temp; |
2624 | } |
2625 | } |
2626 | return 0; |
2627 | } |
2628 | |
2629 | /* Attempt to emit (clrsb:mode op0) as |
2630 | (plus:mode (clz:mode (xor:mode op0 (ashr:mode op0 (const_int prec-1)))) |
2631 | (const_int -1)) |
2632 | if CLZ_DEFINED_VALUE_AT_ZERO (mode, val) is 2 and val is prec, |
2633 | or as |
2634 | (clz:mode (ior:mode (xor:mode (ashl:mode op0 (const_int 1)) |
2635 | (ashr:mode op0 (const_int prec-1))) |
2636 | (const_int 1))) |
2637 | otherwise. */ |
2638 | |
2639 | static rtx |
2640 | expand_clrsb_using_clz (scalar_int_mode mode, rtx op0, rtx target) |
2641 | { |
2642 | if (optimize_insn_for_size_p () |
2643 | || optab_handler (op: clz_optab, mode) == CODE_FOR_nothing) |
2644 | return NULL_RTX; |
2645 | |
2646 | start_sequence (); |
2647 | HOST_WIDE_INT val = 0; |
2648 | if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) != 2 |
2649 | || val != GET_MODE_PRECISION (mode)) |
2650 | val = 0; |
2651 | else |
2652 | val = 1; |
2653 | |
2654 | rtx temp2 = op0; |
2655 | if (!val) |
2656 | { |
2657 | temp2 = expand_binop (mode, binoptab: ashl_optab, op0, const1_rtx, |
2658 | NULL_RTX, unsignedp: 0, methods: OPTAB_DIRECT); |
2659 | if (!temp2) |
2660 | { |
2661 | fail: |
2662 | end_sequence (); |
2663 | return NULL_RTX; |
2664 | } |
2665 | } |
2666 | |
2667 | rtx temp = expand_binop (mode, binoptab: ashr_optab, op0, |
2668 | GEN_INT (GET_MODE_PRECISION (mode) - 1), |
2669 | NULL_RTX, unsignedp: 0, methods: OPTAB_DIRECT); |
2670 | if (!temp) |
2671 | goto fail; |
2672 | |
2673 | temp = expand_binop (mode, binoptab: xor_optab, op0: temp2, op1: temp, NULL_RTX, unsignedp: 0, |
2674 | methods: OPTAB_DIRECT); |
2675 | if (!temp) |
2676 | goto fail; |
2677 | |
2678 | if (!val) |
2679 | { |
2680 | temp = expand_binop (mode, binoptab: ior_optab, op0: temp, const1_rtx, |
2681 | NULL_RTX, unsignedp: 0, methods: OPTAB_DIRECT); |
2682 | if (!temp) |
2683 | goto fail; |
2684 | } |
2685 | temp = expand_unop_direct (mode, clz_optab, temp, val ? NULL_RTX : target, |
2686 | true); |
2687 | if (!temp) |
2688 | goto fail; |
2689 | if (val) |
2690 | { |
2691 | temp = expand_binop (mode, binoptab: add_optab, op0: temp, constm1_rtx, |
2692 | target, unsignedp: 0, methods: OPTAB_DIRECT); |
2693 | if (!temp) |
2694 | goto fail; |
2695 | } |
2696 | |
2697 | rtx_insn *seq = get_insns (); |
2698 | end_sequence (); |
2699 | |
2700 | add_equal_note (insns: seq, target: temp, code: CLRSB, op0, NULL_RTX, op0_mode: mode); |
2701 | emit_insn (seq); |
2702 | return temp; |
2703 | } |
2704 | |
2705 | static rtx expand_ffs (scalar_int_mode, rtx, rtx); |
2706 | |
2707 | /* Try calculating clz, ctz or ffs of a double-word quantity as two clz, ctz or |
2708 | ffs operations on word-sized quantities, choosing which based on whether the |
2709 | high (for clz) or low (for ctz and ffs) word is nonzero. */ |
2710 | static rtx |
2711 | expand_doubleword_clz_ctz_ffs (scalar_int_mode mode, rtx op0, rtx target, |
2712 | optab unoptab) |
2713 | { |
2714 | rtx xop0 = force_reg (mode, op0); |
2715 | rtx subhi = gen_highpart (word_mode, xop0); |
2716 | rtx sublo = gen_lowpart (word_mode, xop0); |
2717 | rtx_code_label *hi0_label = gen_label_rtx (); |
2718 | rtx_code_label *after_label = gen_label_rtx (); |
2719 | rtx_insn *seq; |
2720 | rtx temp, result; |
2721 | int addend = 0; |
2722 | |
2723 | /* If we were not given a target, use a word_mode register, not a |
2724 | 'mode' register. The result will fit, and nobody is expecting |
2725 | anything bigger (the return type of __builtin_clz* is int). */ |
2726 | if (!target) |
2727 | target = gen_reg_rtx (word_mode); |
2728 | |
2729 | /* In any case, write to a word_mode scratch in both branches of the |
2730 | conditional, so we can ensure there is a single move insn setting |
2731 | 'target' to tag a REG_EQUAL note on. */ |
2732 | result = gen_reg_rtx (word_mode); |
2733 | |
2734 | if (unoptab != clz_optab) |
2735 | std::swap (a&: subhi, b&: sublo); |
2736 | |
2737 | start_sequence (); |
2738 | |
2739 | /* If the high word is not equal to zero, |
2740 | then clz of the full value is clz of the high word. */ |
2741 | emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0, |
2742 | word_mode, true, hi0_label); |
2743 | |
2744 | if (optab_handler (op: unoptab, mode: word_mode) != CODE_FOR_nothing) |
2745 | temp = expand_unop_direct (word_mode, unoptab, subhi, result, true); |
2746 | else |
2747 | { |
2748 | gcc_assert (unoptab == ffs_optab); |
2749 | temp = expand_ffs (word_mode, subhi, result); |
2750 | } |
2751 | if (!temp) |
2752 | goto fail; |
2753 | |
2754 | if (temp != result) |
2755 | convert_move (result, temp, true); |
2756 | |
2757 | emit_jump_insn (targetm.gen_jump (after_label)); |
2758 | emit_barrier (); |
2759 | |
2760 | /* Else clz of the full value is clz of the low word plus the number |
2761 | of bits in the high word. Similarly for ctz/ffs of the high word, |
2762 | except that ffs should be 0 when both words are zero. */ |
2763 | emit_label (hi0_label); |
2764 | |
2765 | if (unoptab == ffs_optab) |
2766 | { |
2767 | convert_move (result, const0_rtx, true); |
2768 | emit_cmp_and_jump_insns (sublo, CONST0_RTX (word_mode), EQ, 0, |
2769 | word_mode, true, after_label); |
2770 | } |
2771 | |
2772 | if (optab_handler (op: unoptab, mode: word_mode) != CODE_FOR_nothing) |
2773 | temp = expand_unop_direct (word_mode, unoptab, sublo, NULL_RTX, true); |
2774 | else |
2775 | { |
2776 | gcc_assert (unoptab == ffs_optab); |
2777 | temp = expand_unop_direct (word_mode, ctz_optab, sublo, NULL_RTX, true); |
2778 | addend = 1; |
2779 | } |
2780 | |
2781 | if (!temp) |
2782 | goto fail; |
2783 | |
2784 | temp = expand_binop (mode: word_mode, binoptab: add_optab, op0: temp, |
2785 | op1: gen_int_mode (GET_MODE_BITSIZE (mode: word_mode) + addend, |
2786 | word_mode), |
2787 | target: result, unsignedp: true, methods: OPTAB_DIRECT); |
2788 | if (!temp) |
2789 | goto fail; |
2790 | if (temp != result) |
2791 | convert_move (result, temp, true); |
2792 | |
2793 | emit_label (after_label); |
2794 | convert_move (target, result, true); |
2795 | |
2796 | seq = get_insns (); |
2797 | end_sequence (); |
2798 | |
2799 | add_equal_note (insns: seq, target, code: optab_to_code (op: unoptab), op0: xop0, NULL_RTX, op0_mode: mode); |
2800 | emit_insn (seq); |
2801 | return target; |
2802 | |
2803 | fail: |
2804 | end_sequence (); |
2805 | return 0; |
2806 | } |
2807 | |
2808 | /* Try calculating popcount of a double-word quantity as two popcount's of |
2809 | word-sized quantities and summing up the results. */ |
2810 | static rtx |
2811 | expand_doubleword_popcount (scalar_int_mode mode, rtx op0, rtx target) |
2812 | { |
2813 | rtx t0, t1, t; |
2814 | rtx_insn *seq; |
2815 | |
2816 | start_sequence (); |
2817 | |
2818 | t0 = expand_unop_direct (word_mode, popcount_optab, |
2819 | operand_subword_force (op0, 0, mode), NULL_RTX, |
2820 | true); |
2821 | t1 = expand_unop_direct (word_mode, popcount_optab, |
2822 | operand_subword_force (op0, 1, mode), NULL_RTX, |
2823 | true); |
2824 | if (!t0 || !t1) |
2825 | { |
2826 | end_sequence (); |
2827 | return NULL_RTX; |
2828 | } |
2829 | |
2830 | /* If we were not given a target, use a word_mode register, not a |
2831 | 'mode' register. The result will fit, and nobody is expecting |
2832 | anything bigger (the return type of __builtin_popcount* is int). */ |
2833 | if (!target) |
2834 | target = gen_reg_rtx (word_mode); |
2835 | |
2836 | t = expand_binop (mode: word_mode, binoptab: add_optab, op0: t0, op1: t1, target, unsignedp: 0, methods: OPTAB_DIRECT); |
2837 | |
2838 | seq = get_insns (); |
2839 | end_sequence (); |
2840 | |
2841 | add_equal_note (insns: seq, target: t, code: POPCOUNT, op0, NULL_RTX, op0_mode: mode); |
2842 | emit_insn (seq); |
2843 | return t; |
2844 | } |
2845 | |
2846 | /* Try calculating |
2847 | (parity:wide x) |
2848 | as |
2849 | (parity:narrow (low (x) ^ high (x))) */ |
2850 | static rtx |
2851 | expand_doubleword_parity (scalar_int_mode mode, rtx op0, rtx target) |
2852 | { |
2853 | rtx t = expand_binop (mode: word_mode, binoptab: xor_optab, |
2854 | op0: operand_subword_force (op0, 0, mode), |
2855 | op1: operand_subword_force (op0, 1, mode), |
2856 | NULL_RTX, unsignedp: 0, methods: OPTAB_DIRECT); |
2857 | return expand_unop (word_mode, parity_optab, t, target, true); |
2858 | } |
2859 | |
2860 | /* Try calculating |
2861 | (bswap:narrow x) |
2862 | as |
2863 | (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */ |
2864 | static rtx |
2865 | widen_bswap (scalar_int_mode mode, rtx op0, rtx target) |
2866 | { |
2867 | rtx x; |
2868 | rtx_insn *last; |
2869 | opt_scalar_int_mode wider_mode_iter; |
2870 | |
2871 | FOR_EACH_WIDER_MODE (wider_mode_iter, mode) |
2872 | if (optab_handler (op: bswap_optab, mode: wider_mode_iter.require ()) |
2873 | != CODE_FOR_nothing) |
2874 | break; |
2875 | |
2876 | if (!wider_mode_iter.exists ()) |
2877 | return NULL_RTX; |
2878 | |
2879 | scalar_int_mode wider_mode = wider_mode_iter.require (); |
2880 | last = get_last_insn (); |
2881 | |
2882 | x = widen_operand (op: op0, mode: wider_mode, oldmode: mode, unsignedp: true, no_extend: true); |
2883 | x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true); |
2884 | |
2885 | gcc_assert (GET_MODE_PRECISION (wider_mode) == GET_MODE_BITSIZE (wider_mode) |
2886 | && GET_MODE_PRECISION (mode) == GET_MODE_BITSIZE (mode)); |
2887 | if (x != 0) |
2888 | x = expand_shift (RSHIFT_EXPR, wider_mode, x, |
2889 | GET_MODE_BITSIZE (mode: wider_mode) |
2890 | - GET_MODE_BITSIZE (mode), |
2891 | NULL_RTX, true); |
2892 | |
2893 | if (x != 0) |
2894 | { |
2895 | if (target == 0) |
2896 | target = gen_reg_rtx (mode); |
2897 | emit_move_insn (target, gen_lowpart (mode, x)); |
2898 | } |
2899 | else |
2900 | delete_insns_since (last); |
2901 | |
2902 | return target; |
2903 | } |
2904 | |
2905 | /* Try calculating bswap as two bswaps of two word-sized operands. */ |
2906 | |
2907 | static rtx |
2908 | expand_doubleword_bswap (machine_mode mode, rtx op, rtx target) |
2909 | { |
2910 | rtx t0, t1; |
2911 | |
2912 | t1 = expand_unop (word_mode, bswap_optab, |
2913 | operand_subword_force (op, 0, mode), NULL_RTX, true); |
2914 | t0 = expand_unop (word_mode, bswap_optab, |
2915 | operand_subword_force (op, 1, mode), NULL_RTX, true); |
2916 | |
2917 | if (target == 0 || !valid_multiword_target_p (target)) |
2918 | target = gen_reg_rtx (mode); |
2919 | if (REG_P (target)) |
2920 | emit_clobber (target); |
2921 | emit_move_insn (operand_subword (target, 0, 1, mode), t0); |
2922 | emit_move_insn (operand_subword (target, 1, 1, mode), t1); |
2923 | |
2924 | return target; |
2925 | } |
2926 | |
2927 | /* Try calculating (parity x) as (and (popcount x) 1), where |
2928 | popcount can also be done in a wider mode. */ |
2929 | static rtx |
2930 | expand_parity (scalar_int_mode mode, rtx op0, rtx target) |
2931 | { |
2932 | enum mode_class mclass = GET_MODE_CLASS (mode); |
2933 | opt_scalar_int_mode wider_mode_iter; |
2934 | FOR_EACH_MODE_FROM (wider_mode_iter, mode) |
2935 | { |
2936 | scalar_int_mode wider_mode = wider_mode_iter.require (); |
2937 | if (optab_handler (op: popcount_optab, mode: wider_mode) != CODE_FOR_nothing) |
2938 | { |
2939 | rtx xop0, temp; |
2940 | rtx_insn *last; |
2941 | |
2942 | last = get_last_insn (); |
2943 | |
2944 | if (target == 0 || GET_MODE (target) != wider_mode) |
2945 | target = gen_reg_rtx (wider_mode); |
2946 | |
2947 | xop0 = widen_operand (op: op0, mode: wider_mode, oldmode: mode, unsignedp: true, no_extend: false); |
2948 | temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX, |
2949 | true); |
2950 | if (temp != 0) |
2951 | temp = expand_binop (mode: wider_mode, binoptab: and_optab, op0: temp, const1_rtx, |
2952 | target, unsignedp: true, methods: OPTAB_DIRECT); |
2953 | |
2954 | if (temp) |
2955 | { |
2956 | if (mclass != MODE_INT |
2957 | || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode)) |
2958 | return convert_to_mode (mode, temp, 0); |
2959 | else |
2960 | return gen_lowpart (mode, temp); |
2961 | } |
2962 | else |
2963 | delete_insns_since (last); |
2964 | } |
2965 | } |
2966 | return 0; |
2967 | } |
2968 | |
2969 | /* Try calculating ctz(x) as K - clz(x & -x) , |
2970 | where K is GET_MODE_PRECISION(mode) - 1. |
2971 | |
2972 | Both __builtin_ctz and __builtin_clz are undefined at zero, so we |
2973 | don't have to worry about what the hardware does in that case. (If |
2974 | the clz instruction produces the usual value at 0, which is K, the |
2975 | result of this code sequence will be -1; expand_ffs, below, relies |
2976 | on this. It might be nice to have it be K instead, for consistency |
2977 | with the (very few) processors that provide a ctz with a defined |
2978 | value, but that would take one more instruction, and it would be |
2979 | less convenient for expand_ffs anyway. */ |
2980 | |
2981 | static rtx |
2982 | expand_ctz (scalar_int_mode mode, rtx op0, rtx target) |
2983 | { |
2984 | rtx_insn *seq; |
2985 | rtx temp; |
2986 | |
2987 | if (optab_handler (op: clz_optab, mode) == CODE_FOR_nothing) |
2988 | return 0; |
2989 | |
2990 | start_sequence (); |
2991 | |
2992 | temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true); |
2993 | if (temp) |
2994 | temp = expand_binop (mode, binoptab: and_optab, op0, op1: temp, NULL_RTX, |
2995 | unsignedp: true, methods: OPTAB_DIRECT); |
2996 | if (temp) |
2997 | temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true); |
2998 | if (temp) |
2999 | temp = expand_binop (mode, binoptab: sub_optab, |
3000 | op0: gen_int_mode (GET_MODE_PRECISION (mode) - 1, mode), |
3001 | op1: temp, target, |
3002 | unsignedp: true, methods: OPTAB_DIRECT); |
3003 | if (temp == 0) |
3004 | { |
3005 | end_sequence (); |
3006 | return 0; |
3007 | } |
3008 | |
3009 | seq = get_insns (); |
3010 | end_sequence (); |
3011 | |
3012 | add_equal_note (insns: seq, target: temp, code: CTZ, op0, NULL_RTX, op0_mode: mode); |
3013 | emit_insn (seq); |
3014 | return temp; |
3015 | } |
3016 | |
3017 | |
3018 | /* Try calculating ffs(x) using ctz(x) if we have that instruction, or |
3019 | else with the sequence used by expand_clz. |
3020 | |
3021 | The ffs builtin promises to return zero for a zero value and ctz/clz |
3022 | may have an undefined value in that case. If they do not give us a |
3023 | convenient value, we have to generate a test and branch. */ |
3024 | static rtx |
3025 | expand_ffs (scalar_int_mode mode, rtx op0, rtx target) |
3026 | { |
3027 | HOST_WIDE_INT val = 0; |
3028 | bool defined_at_zero = false; |
3029 | rtx temp; |
3030 | rtx_insn *seq; |
3031 | |
3032 | if (optab_handler (op: ctz_optab, mode) != CODE_FOR_nothing) |
3033 | { |
3034 | start_sequence (); |
3035 | |
3036 | temp = expand_unop_direct (mode, ctz_optab, op0, 0, true); |
3037 | if (!temp) |
3038 | goto fail; |
3039 | |
3040 | defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2); |
3041 | } |
3042 | else if (optab_handler (op: clz_optab, mode) != CODE_FOR_nothing) |
3043 | { |
3044 | start_sequence (); |
3045 | temp = expand_ctz (mode, op0, target: 0); |
3046 | if (!temp) |
3047 | goto fail; |
3048 | |
3049 | if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2) |
3050 | { |
3051 | defined_at_zero = true; |
3052 | val = (GET_MODE_PRECISION (mode) - 1) - val; |
3053 | } |
3054 | } |
3055 | else |
3056 | return 0; |
3057 | |
3058 | if (defined_at_zero && val == -1) |
3059 | /* No correction needed at zero. */; |
3060 | else |
3061 | { |
3062 | /* We don't try to do anything clever with the situation found |
3063 | on some processors (eg Alpha) where ctz(0:mode) == |
3064 | bitsize(mode). If someone can think of a way to send N to -1 |
3065 | and leave alone all values in the range 0..N-1 (where N is a |
3066 | power of two), cheaper than this test-and-branch, please add it. |
3067 | |
3068 | The test-and-branch is done after the operation itself, in case |
3069 | the operation sets condition codes that can be recycled for this. |
3070 | (This is true on i386, for instance.) */ |
3071 | |
3072 | rtx_code_label *nonzero_label = gen_label_rtx (); |
3073 | emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0, |
3074 | mode, true, nonzero_label); |
3075 | |
3076 | convert_move (temp, GEN_INT (-1), false); |
3077 | emit_label (nonzero_label); |
3078 | } |
3079 | |
3080 | /* temp now has a value in the range -1..bitsize-1. ffs is supposed |
3081 | to produce a value in the range 0..bitsize. */ |
3082 | temp = expand_binop (mode, binoptab: add_optab, op0: temp, op1: gen_int_mode (1, mode), |
3083 | target, unsignedp: false, methods: OPTAB_DIRECT); |
3084 | if (!temp) |
3085 | goto fail; |
3086 | |
3087 | seq = get_insns (); |
3088 | end_sequence (); |
3089 | |
3090 | add_equal_note (insns: seq, target: temp, code: FFS, op0, NULL_RTX, op0_mode: mode); |
3091 | emit_insn (seq); |
3092 | return temp; |
3093 | |
3094 | fail: |
3095 | end_sequence (); |
3096 | return 0; |
3097 | } |
3098 | |
3099 | /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain |
3100 | conditions, VAL may already be a SUBREG against which we cannot generate |
3101 | a further SUBREG. In this case, we expect forcing the value into a |
3102 | register will work around the situation. */ |
3103 | |
3104 | static rtx |
3105 | lowpart_subreg_maybe_copy (machine_mode omode, rtx val, |
3106 | machine_mode imode) |
3107 | { |
3108 | rtx ret; |
3109 | ret = lowpart_subreg (outermode: omode, op: val, innermode: imode); |
3110 | if (ret == NULL) |
3111 | { |
3112 | val = force_reg (imode, val); |
3113 | ret = lowpart_subreg (outermode: omode, op: val, innermode: imode); |
3114 | gcc_assert (ret != NULL); |
3115 | } |
3116 | return ret; |
3117 | } |
3118 | |
3119 | /* Expand a floating point absolute value or negation operation via a |
3120 | logical operation on the sign bit. */ |
3121 | |
3122 | static rtx |
3123 | expand_absneg_bit (enum rtx_code code, scalar_float_mode mode, |
3124 | rtx op0, rtx target) |
3125 | { |
3126 | const struct real_format *fmt; |
3127 | int bitpos, word, nwords, i; |
3128 | scalar_int_mode imode; |
3129 | rtx temp; |
3130 | rtx_insn *insns; |
3131 | |
3132 | /* The format has to have a simple sign bit. */ |
3133 | fmt = REAL_MODE_FORMAT (mode); |
3134 | if (fmt == NULL) |
3135 | return NULL_RTX; |
3136 | |
3137 | bitpos = fmt->signbit_rw; |
3138 | if (bitpos < 0) |
3139 | return NULL_RTX; |
3140 | |
3141 | /* Don't create negative zeros if the format doesn't support them. */ |
3142 | if (code == NEG && !fmt->has_signed_zero) |
3143 | return NULL_RTX; |
3144 | |
3145 | if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) |
3146 | { |
3147 | if (!int_mode_for_mode (mode).exists (mode: &imode)) |
3148 | return NULL_RTX; |
3149 | word = 0; |
3150 | nwords = 1; |
3151 | } |
3152 | else |
3153 | { |
3154 | imode = word_mode; |
3155 | |
3156 | if (FLOAT_WORDS_BIG_ENDIAN) |
3157 | word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD; |
3158 | else |
3159 | word = bitpos / BITS_PER_WORD; |
3160 | bitpos = bitpos % BITS_PER_WORD; |
3161 | nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD; |
3162 | } |
3163 | |
3164 | wide_int mask = wi::set_bit_in_zero (bit: bitpos, precision: GET_MODE_PRECISION (mode: imode)); |
3165 | if (code == ABS) |
3166 | mask = ~mask; |
3167 | |
3168 | if (target == 0 |
3169 | || target == op0 |
3170 | || reg_overlap_mentioned_p (target, op0) |
3171 | || (nwords > 1 && !valid_multiword_target_p (target))) |
3172 | target = gen_reg_rtx (mode); |
3173 | |
3174 | if (nwords > 1) |
3175 | { |
3176 | start_sequence (); |
3177 | |
3178 | for (i = 0; i < nwords; ++i) |
3179 | { |
3180 | rtx targ_piece = operand_subword (target, i, 1, mode); |
3181 | rtx op0_piece = operand_subword_force (op0, i, mode); |
3182 | |
3183 | if (i == word) |
3184 | { |
3185 | temp = expand_binop (mode: imode, binoptab: code == ABS ? and_optab : xor_optab, |
3186 | op0: op0_piece, |
3187 | op1: immed_wide_int_const (mask, imode), |
3188 | target: targ_piece, unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
3189 | if (temp != targ_piece) |
3190 | emit_move_insn (targ_piece, temp); |
3191 | } |
3192 | else |
3193 | emit_move_insn (targ_piece, op0_piece); |
3194 | } |
3195 | |
3196 | insns = get_insns (); |
3197 | end_sequence (); |
3198 | |
3199 | emit_insn (insns); |
3200 | } |
3201 | else |
3202 | { |
3203 | temp = expand_binop (mode: imode, binoptab: code == ABS ? and_optab : xor_optab, |
3204 | gen_lowpart (imode, op0), |
3205 | op1: immed_wide_int_const (mask, imode), |
3206 | gen_lowpart (imode, target), unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
3207 | target = lowpart_subreg_maybe_copy (omode: mode, val: temp, imode); |
3208 | |
3209 | set_dst_reg_note (get_last_insn (), REG_EQUAL, |
3210 | gen_rtx_fmt_e (code, mode, copy_rtx (op0)), |
3211 | target); |
3212 | } |
3213 | |
3214 | return target; |
3215 | } |
3216 | |
3217 | /* As expand_unop, but will fail rather than attempt the operation in a |
3218 | different mode or with a libcall. */ |
3219 | static rtx |
3220 | expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target, |
3221 | int unsignedp) |
3222 | { |
3223 | if (optab_handler (op: unoptab, mode) != CODE_FOR_nothing) |
3224 | { |
3225 | class expand_operand ops[2]; |
3226 | enum insn_code icode = optab_handler (op: unoptab, mode); |
3227 | rtx_insn *last = get_last_insn (); |
3228 | rtx_insn *pat; |
3229 | |
3230 | create_output_operand (op: &ops[0], x: target, mode); |
3231 | create_convert_operand_from (op: &ops[1], value: op0, mode, unsigned_p: unsignedp); |
3232 | pat = maybe_gen_insn (icode, nops: 2, ops); |
3233 | if (pat) |
3234 | { |
3235 | if (INSN_P (pat) && NEXT_INSN (insn: pat) != NULL_RTX |
3236 | && ! add_equal_note (insns: pat, target: ops[0].value, |
3237 | code: optab_to_code (op: unoptab), |
3238 | op0: ops[1].value, NULL_RTX, op0_mode: mode)) |
3239 | { |
3240 | delete_insns_since (last); |
3241 | return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp); |
3242 | } |
3243 | |
3244 | emit_insn (pat); |
3245 | |
3246 | return ops[0].value; |
3247 | } |
3248 | } |
3249 | return 0; |
3250 | } |
3251 | |
3252 | /* Generate code to perform an operation specified by UNOPTAB |
3253 | on operand OP0, with result having machine-mode MODE. |
3254 | |
3255 | UNSIGNEDP is for the case where we have to widen the operands |
3256 | to perform the operation. It says to use zero-extension. |
3257 | |
3258 | If TARGET is nonzero, the value |
3259 | is generated there, if it is convenient to do so. |
3260 | In all cases an rtx is returned for the locus of the value; |
3261 | this may or may not be TARGET. */ |
3262 | |
3263 | rtx |
3264 | expand_unop (machine_mode mode, optab unoptab, rtx op0, rtx target, |
3265 | int unsignedp) |
3266 | { |
3267 | enum mode_class mclass = GET_MODE_CLASS (mode); |
3268 | machine_mode wider_mode; |
3269 | scalar_int_mode int_mode; |
3270 | scalar_float_mode float_mode; |
3271 | rtx temp; |
3272 | rtx libfunc; |
3273 | |
3274 | temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp); |
3275 | if (temp) |
3276 | return temp; |
3277 | |
3278 | /* It can't be done in this mode. Can we open-code it in a wider mode? */ |
3279 | |
3280 | /* Widening (or narrowing) clz needs special treatment. */ |
3281 | if (unoptab == clz_optab) |
3282 | { |
3283 | if (is_a <scalar_int_mode> (m: mode, result: &int_mode)) |
3284 | { |
3285 | temp = widen_leading (mode: int_mode, op0, target, unoptab); |
3286 | if (temp) |
3287 | return temp; |
3288 | |
3289 | if (GET_MODE_SIZE (mode: int_mode) == 2 * UNITS_PER_WORD |
3290 | && optab_handler (op: unoptab, mode: word_mode) != CODE_FOR_nothing) |
3291 | { |
3292 | temp = expand_doubleword_clz_ctz_ffs (mode: int_mode, op0, target, |
3293 | unoptab); |
3294 | if (temp) |
3295 | return temp; |
3296 | } |
3297 | } |
3298 | |
3299 | goto try_libcall; |
3300 | } |
3301 | |
3302 | if (unoptab == clrsb_optab) |
3303 | { |
3304 | if (is_a <scalar_int_mode> (m: mode, result: &int_mode)) |
3305 | { |
3306 | temp = widen_leading (mode: int_mode, op0, target, unoptab); |
3307 | if (temp) |
3308 | return temp; |
3309 | temp = expand_clrsb_using_clz (mode: int_mode, op0, target); |
3310 | if (temp) |
3311 | return temp; |
3312 | } |
3313 | goto try_libcall; |
3314 | } |
3315 | |
3316 | if (unoptab == popcount_optab |
3317 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
3318 | && GET_MODE_SIZE (mode: int_mode) == 2 * UNITS_PER_WORD |
3319 | && optab_handler (op: unoptab, mode: word_mode) != CODE_FOR_nothing |
3320 | && optimize_insn_for_speed_p ()) |
3321 | { |
3322 | temp = expand_doubleword_popcount (mode: int_mode, op0, target); |
3323 | if (temp) |
3324 | return temp; |
3325 | } |
3326 | |
3327 | if (unoptab == parity_optab |
3328 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
3329 | && GET_MODE_SIZE (mode: int_mode) == 2 * UNITS_PER_WORD |
3330 | && (optab_handler (op: unoptab, mode: word_mode) != CODE_FOR_nothing |
3331 | || optab_handler (op: popcount_optab, mode: word_mode) != CODE_FOR_nothing) |
3332 | && optimize_insn_for_speed_p ()) |
3333 | { |
3334 | temp = expand_doubleword_parity (mode: int_mode, op0, target); |
3335 | if (temp) |
3336 | return temp; |
3337 | } |
3338 | |
3339 | /* Widening (or narrowing) bswap needs special treatment. */ |
3340 | if (unoptab == bswap_optab) |
3341 | { |
3342 | /* HImode is special because in this mode BSWAP is equivalent to ROTATE |
3343 | or ROTATERT. First try these directly; if this fails, then try the |
3344 | obvious pair of shifts with allowed widening, as this will probably |
3345 | be always more efficient than the other fallback methods. */ |
3346 | if (mode == HImode) |
3347 | { |
3348 | rtx_insn *last; |
3349 | rtx temp1, temp2; |
3350 | |
3351 | if (optab_handler (op: rotl_optab, mode) != CODE_FOR_nothing) |
3352 | { |
3353 | temp = expand_binop (mode, binoptab: rotl_optab, op0, |
3354 | op1: gen_int_shift_amount (mode, 8), |
3355 | target, unsignedp, methods: OPTAB_DIRECT); |
3356 | if (temp) |
3357 | return temp; |
3358 | } |
3359 | |
3360 | if (optab_handler (op: rotr_optab, mode) != CODE_FOR_nothing) |
3361 | { |
3362 | temp = expand_binop (mode, binoptab: rotr_optab, op0, |
3363 | op1: gen_int_shift_amount (mode, 8), |
3364 | target, unsignedp, methods: OPTAB_DIRECT); |
3365 | if (temp) |
3366 | return temp; |
3367 | } |
3368 | |
3369 | last = get_last_insn (); |
3370 | |
3371 | temp1 = expand_binop (mode, binoptab: ashl_optab, op0, |
3372 | op1: gen_int_shift_amount (mode, 8), NULL_RTX, |
3373 | unsignedp, methods: OPTAB_WIDEN); |
3374 | temp2 = expand_binop (mode, binoptab: lshr_optab, op0, |
3375 | op1: gen_int_shift_amount (mode, 8), NULL_RTX, |
3376 | unsignedp, methods: OPTAB_WIDEN); |
3377 | if (temp1 && temp2) |
3378 | { |
3379 | temp = expand_binop (mode, binoptab: ior_optab, op0: temp1, op1: temp2, target, |
3380 | unsignedp, methods: OPTAB_WIDEN); |
3381 | if (temp) |
3382 | return temp; |
3383 | } |
3384 | |
3385 | delete_insns_since (last); |
3386 | } |
3387 | |
3388 | if (is_a <scalar_int_mode> (m: mode, result: &int_mode)) |
3389 | { |
3390 | temp = widen_bswap (mode: int_mode, op0, target); |
3391 | if (temp) |
3392 | return temp; |
3393 | |
3394 | /* We do not provide a 128-bit bswap in libgcc so force the use of |
3395 | a double bswap for 64-bit targets. */ |
3396 | if (GET_MODE_SIZE (mode: int_mode) == 2 * UNITS_PER_WORD |
3397 | && (UNITS_PER_WORD == 8 |
3398 | || optab_handler (op: unoptab, mode: word_mode) != CODE_FOR_nothing)) |
3399 | { |
3400 | temp = expand_doubleword_bswap (mode, op: op0, target); |
3401 | if (temp) |
3402 | return temp; |
3403 | } |
3404 | } |
3405 | |
3406 | goto try_libcall; |
3407 | } |
3408 | |
3409 | if (CLASS_HAS_WIDER_MODES_P (mclass)) |
3410 | FOR_EACH_WIDER_MODE (wider_mode, mode) |
3411 | { |
3412 | if (optab_handler (op: unoptab, mode: wider_mode) != CODE_FOR_nothing) |
3413 | { |
3414 | rtx xop0 = op0; |
3415 | rtx_insn *last = get_last_insn (); |
3416 | |
3417 | /* For certain operations, we need not actually extend |
3418 | the narrow operand, as long as we will truncate the |
3419 | results to the same narrowness. */ |
3420 | |
3421 | xop0 = widen_operand (op: xop0, mode: wider_mode, oldmode: mode, unsignedp, |
3422 | no_extend: (unoptab == neg_optab |
3423 | || unoptab == one_cmpl_optab) |
3424 | && mclass == MODE_INT); |
3425 | |
3426 | temp = expand_unop (mode: wider_mode, unoptab, op0: xop0, NULL_RTX, |
3427 | unsignedp); |
3428 | |
3429 | if (temp) |
3430 | { |
3431 | if (mclass != MODE_INT |
3432 | || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode)) |
3433 | { |
3434 | if (target == 0) |
3435 | target = gen_reg_rtx (mode); |
3436 | convert_move (target, temp, 0); |
3437 | return target; |
3438 | } |
3439 | else |
3440 | return gen_lowpart (mode, temp); |
3441 | } |
3442 | else |
3443 | delete_insns_since (last); |
3444 | } |
3445 | } |
3446 | |
3447 | /* These can be done a word at a time. */ |
3448 | if (unoptab == one_cmpl_optab |
3449 | && is_int_mode (mode, int_mode: &int_mode) |
3450 | && GET_MODE_SIZE (mode: int_mode) > UNITS_PER_WORD |
3451 | && optab_handler (op: unoptab, mode: word_mode) != CODE_FOR_nothing) |
3452 | { |
3453 | int i; |
3454 | rtx_insn *insns; |
3455 | |
3456 | if (target == 0 |
3457 | || target == op0 |
3458 | || reg_overlap_mentioned_p (target, op0) |
3459 | || !valid_multiword_target_p (target)) |
3460 | target = gen_reg_rtx (int_mode); |
3461 | |
3462 | start_sequence (); |
3463 | |
3464 | /* Do the actual arithmetic. */ |
3465 | for (i = 0; i < GET_MODE_BITSIZE (mode: int_mode) / BITS_PER_WORD; i++) |
3466 | { |
3467 | rtx target_piece = operand_subword (target, i, 1, int_mode); |
3468 | rtx x = expand_unop (mode: word_mode, unoptab, |
3469 | op0: operand_subword_force (op0, i, int_mode), |
3470 | target: target_piece, unsignedp); |
3471 | |
3472 | if (target_piece != x) |
3473 | emit_move_insn (target_piece, x); |
3474 | } |
3475 | |
3476 | insns = get_insns (); |
3477 | end_sequence (); |
3478 | |
3479 | emit_insn (insns); |
3480 | return target; |
3481 | } |
3482 | |
3483 | /* Emit ~op0 as op0 ^ -1. */ |
3484 | if (unoptab == one_cmpl_optab |
3485 | && (SCALAR_INT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_VECTOR_INT) |
3486 | && optab_handler (op: xor_optab, mode) != CODE_FOR_nothing) |
3487 | { |
3488 | temp = expand_binop (mode, binoptab: xor_optab, op0, CONSTM1_RTX (mode), |
3489 | target, unsignedp, methods: OPTAB_DIRECT); |
3490 | if (temp) |
3491 | return temp; |
3492 | } |
3493 | |
3494 | if (optab_to_code (op: unoptab) == NEG) |
3495 | { |
3496 | /* Try negating floating point values by flipping the sign bit. */ |
3497 | if (is_a <scalar_float_mode> (m: mode, result: &float_mode)) |
3498 | { |
3499 | temp = expand_absneg_bit (code: NEG, mode: float_mode, op0, target); |
3500 | if (temp) |
3501 | return temp; |
3502 | } |
3503 | |
3504 | /* If there is no negation pattern, and we have no negative zero, |
3505 | try subtracting from zero. */ |
3506 | if (!HONOR_SIGNED_ZEROS (mode)) |
3507 | { |
3508 | temp = expand_binop (mode, binoptab: (unoptab == negv_optab |
3509 | ? subv_optab : sub_optab), |
3510 | CONST0_RTX (mode), op1: op0, target, |
3511 | unsignedp, methods: OPTAB_DIRECT); |
3512 | if (temp) |
3513 | return temp; |
3514 | } |
3515 | } |
3516 | |
3517 | /* Try calculating parity (x) as popcount (x) % 2. */ |
3518 | if (unoptab == parity_optab && is_a <scalar_int_mode> (m: mode, result: &int_mode)) |
3519 | { |
3520 | temp = expand_parity (mode: int_mode, op0, target); |
3521 | if (temp) |
3522 | return temp; |
3523 | } |
3524 | |
3525 | /* Try implementing ffs (x) in terms of clz (x). */ |
3526 | if (unoptab == ffs_optab && is_a <scalar_int_mode> (m: mode, result: &int_mode)) |
3527 | { |
3528 | temp = expand_ffs (mode: int_mode, op0, target); |
3529 | if (temp) |
3530 | return temp; |
3531 | } |
3532 | |
3533 | /* Try implementing ctz (x) in terms of clz (x). */ |
3534 | if (unoptab == ctz_optab && is_a <scalar_int_mode> (m: mode, result: &int_mode)) |
3535 | { |
3536 | temp = expand_ctz (mode: int_mode, op0, target); |
3537 | if (temp) |
3538 | return temp; |
3539 | } |
3540 | |
3541 | if ((unoptab == ctz_optab || unoptab == ffs_optab) |
3542 | && optimize_insn_for_speed_p () |
3543 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
3544 | && GET_MODE_SIZE (mode: int_mode) == 2 * UNITS_PER_WORD |
3545 | && (optab_handler (op: unoptab, mode: word_mode) != CODE_FOR_nothing |
3546 | || optab_handler (op: ctz_optab, mode: word_mode) != CODE_FOR_nothing)) |
3547 | { |
3548 | temp = expand_doubleword_clz_ctz_ffs (mode: int_mode, op0, target, unoptab); |
3549 | if (temp) |
3550 | return temp; |
3551 | } |
3552 | |
3553 | try_libcall: |
3554 | /* Now try a library call in this mode. */ |
3555 | libfunc = optab_libfunc (unoptab, mode); |
3556 | if (libfunc) |
3557 | { |
3558 | rtx_insn *insns; |
3559 | rtx value; |
3560 | rtx eq_value; |
3561 | machine_mode outmode = mode; |
3562 | |
3563 | /* All of these functions return small values. Thus we choose to |
3564 | have them return something that isn't a double-word. */ |
3565 | if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab |
3566 | || unoptab == clrsb_optab || unoptab == popcount_optab |
3567 | || unoptab == parity_optab) |
3568 | outmode |
3569 | = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node), |
3570 | optab_libfunc (unoptab, mode))); |
3571 | |
3572 | start_sequence (); |
3573 | |
3574 | /* Pass 1 for NO_QUEUE so we don't lose any increments |
3575 | if the libcall is cse'd or moved. */ |
3576 | value = emit_library_call_value (fun: libfunc, NULL_RTX, fn_type: LCT_CONST, outmode, |
3577 | arg1: op0, arg1_mode: mode); |
3578 | insns = get_insns (); |
3579 | end_sequence (); |
3580 | |
3581 | target = gen_reg_rtx (outmode); |
3582 | bool trapv = trapv_unoptab_p (unoptab); |
3583 | if (trapv) |
3584 | eq_value = NULL_RTX; |
3585 | else |
3586 | { |
3587 | eq_value = gen_rtx_fmt_e (optab_to_code (unoptab), mode, op0); |
3588 | if (GET_MODE_UNIT_SIZE (outmode) < GET_MODE_UNIT_SIZE (mode)) |
3589 | eq_value = simplify_gen_unary (code: TRUNCATE, mode: outmode, op: eq_value, op_mode: mode); |
3590 | else if (GET_MODE_UNIT_SIZE (outmode) > GET_MODE_UNIT_SIZE (mode)) |
3591 | eq_value = simplify_gen_unary (code: ZERO_EXTEND, |
3592 | mode: outmode, op: eq_value, op_mode: mode); |
3593 | } |
3594 | emit_libcall_block_1 (insns, target, value, eq_value, trapv); |
3595 | |
3596 | return target; |
3597 | } |
3598 | |
3599 | /* It can't be done in this mode. Can we do it in a wider mode? */ |
3600 | |
3601 | if (CLASS_HAS_WIDER_MODES_P (mclass)) |
3602 | { |
3603 | FOR_EACH_WIDER_MODE (wider_mode, mode) |
3604 | { |
3605 | if (optab_handler (op: unoptab, mode: wider_mode) != CODE_FOR_nothing |
3606 | || optab_libfunc (unoptab, wider_mode)) |
3607 | { |
3608 | rtx xop0 = op0; |
3609 | rtx_insn *last = get_last_insn (); |
3610 | |
3611 | /* For certain operations, we need not actually extend |
3612 | the narrow operand, as long as we will truncate the |
3613 | results to the same narrowness. */ |
3614 | xop0 = widen_operand (op: xop0, mode: wider_mode, oldmode: mode, unsignedp, |
3615 | no_extend: (unoptab == neg_optab |
3616 | || unoptab == one_cmpl_optab |
3617 | || unoptab == bswap_optab) |
3618 | && mclass == MODE_INT); |
3619 | |
3620 | temp = expand_unop (mode: wider_mode, unoptab, op0: xop0, NULL_RTX, |
3621 | unsignedp); |
3622 | |
3623 | /* If we are generating clz using wider mode, adjust the |
3624 | result. Similarly for clrsb. */ |
3625 | if ((unoptab == clz_optab || unoptab == clrsb_optab) |
3626 | && temp != 0) |
3627 | { |
3628 | scalar_int_mode wider_int_mode |
3629 | = as_a <scalar_int_mode> (m: wider_mode); |
3630 | int_mode = as_a <scalar_int_mode> (m: mode); |
3631 | temp = expand_binop |
3632 | (mode: wider_mode, binoptab: sub_optab, op0: temp, |
3633 | op1: gen_int_mode (GET_MODE_PRECISION (mode: wider_int_mode) |
3634 | - GET_MODE_PRECISION (mode: int_mode), |
3635 | wider_int_mode), |
3636 | target, unsignedp: true, methods: OPTAB_DIRECT); |
3637 | } |
3638 | |
3639 | /* Likewise for bswap. */ |
3640 | if (unoptab == bswap_optab && temp != 0) |
3641 | { |
3642 | scalar_int_mode wider_int_mode |
3643 | = as_a <scalar_int_mode> (m: wider_mode); |
3644 | int_mode = as_a <scalar_int_mode> (m: mode); |
3645 | gcc_assert (GET_MODE_PRECISION (wider_int_mode) |
3646 | == GET_MODE_BITSIZE (wider_int_mode) |
3647 | && GET_MODE_PRECISION (int_mode) |
3648 | == GET_MODE_BITSIZE (int_mode)); |
3649 | |
3650 | temp = expand_shift (RSHIFT_EXPR, wider_int_mode, temp, |
3651 | GET_MODE_BITSIZE (mode: wider_int_mode) |
3652 | - GET_MODE_BITSIZE (mode: int_mode), |
3653 | NULL_RTX, true); |
3654 | } |
3655 | |
3656 | if (temp) |
3657 | { |
3658 | if (mclass != MODE_INT) |
3659 | { |
3660 | if (target == 0) |
3661 | target = gen_reg_rtx (mode); |
3662 | convert_move (target, temp, 0); |
3663 | return target; |
3664 | } |
3665 | else |
3666 | return gen_lowpart (mode, temp); |
3667 | } |
3668 | else |
3669 | delete_insns_since (last); |
3670 | } |
3671 | } |
3672 | } |
3673 | |
3674 | /* One final attempt at implementing negation via subtraction, |
3675 | this time allowing widening of the operand. */ |
3676 | if (optab_to_code (op: unoptab) == NEG && !HONOR_SIGNED_ZEROS (mode)) |
3677 | { |
3678 | rtx temp; |
3679 | temp = expand_binop (mode, |
3680 | binoptab: unoptab == negv_optab ? subv_optab : sub_optab, |
3681 | CONST0_RTX (mode), op1: op0, |
3682 | target, unsignedp, methods: OPTAB_LIB_WIDEN); |
3683 | if (temp) |
3684 | return temp; |
3685 | } |
3686 | |
3687 | return 0; |
3688 | } |
3689 | |
3690 | /* Emit code to compute the absolute value of OP0, with result to |
3691 | TARGET if convenient. (TARGET may be 0.) The return value says |
3692 | where the result actually is to be found. |
3693 | |
3694 | MODE is the mode of the operand; the mode of the result is |
3695 | different but can be deduced from MODE. |
3696 | |
3697 | */ |
3698 | |
3699 | rtx |
3700 | expand_abs_nojump (machine_mode mode, rtx op0, rtx target, |
3701 | int result_unsignedp) |
3702 | { |
3703 | rtx temp; |
3704 | |
3705 | if (GET_MODE_CLASS (mode) != MODE_INT |
3706 | || ! flag_trapv) |
3707 | result_unsignedp = 1; |
3708 | |
3709 | /* First try to do it with a special abs instruction. */ |
3710 | temp = expand_unop (mode, unoptab: result_unsignedp ? abs_optab : absv_optab, |
3711 | op0, target, unsignedp: 0); |
3712 | if (temp != 0) |
3713 | return temp; |
3714 | |
3715 | /* For floating point modes, try clearing the sign bit. */ |
3716 | scalar_float_mode float_mode; |
3717 | if (is_a <scalar_float_mode> (m: mode, result: &float_mode)) |
3718 | { |
3719 | temp = expand_absneg_bit (code: ABS, mode: float_mode, op0, target); |
3720 | if (temp) |
3721 | return temp; |
3722 | } |
3723 | |
3724 | /* If we have a MAX insn, we can do this as MAX (x, -x). */ |
3725 | if (optab_handler (op: smax_optab, mode) != CODE_FOR_nothing |
3726 | && !HONOR_SIGNED_ZEROS (mode)) |
3727 | { |
3728 | rtx_insn *last = get_last_insn (); |
3729 | |
3730 | temp = expand_unop (mode, unoptab: result_unsignedp ? neg_optab : negv_optab, |
3731 | op0, NULL_RTX, unsignedp: 0); |
3732 | if (temp != 0) |
3733 | temp = expand_binop (mode, binoptab: smax_optab, op0, op1: temp, target, unsignedp: 0, |
3734 | methods: OPTAB_WIDEN); |
3735 | |
3736 | if (temp != 0) |
3737 | return temp; |
3738 | |
3739 | delete_insns_since (last); |
3740 | } |
3741 | |
3742 | /* If this machine has expensive jumps, we can do integer absolute |
3743 | value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)), |
3744 | where W is the width of MODE. */ |
3745 | |
3746 | scalar_int_mode int_mode; |
3747 | if (is_int_mode (mode, int_mode: &int_mode) |
3748 | && BRANCH_COST (optimize_insn_for_speed_p (), |
3749 | false) >= 2) |
3750 | { |
3751 | rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0, |
3752 | GET_MODE_PRECISION (mode: int_mode) - 1, |
3753 | NULL_RTX, 0); |
3754 | |
3755 | temp = expand_binop (mode: int_mode, binoptab: xor_optab, op0: extended, op1: op0, target, unsignedp: 0, |
3756 | methods: OPTAB_LIB_WIDEN); |
3757 | if (temp != 0) |
3758 | temp = expand_binop (mode: int_mode, |
3759 | binoptab: result_unsignedp ? sub_optab : subv_optab, |
3760 | op0: temp, op1: extended, target, unsignedp: 0, methods: OPTAB_LIB_WIDEN); |
3761 | |
3762 | if (temp != 0) |
3763 | return temp; |
3764 | } |
3765 | |
3766 | return NULL_RTX; |
3767 | } |
3768 | |
3769 | rtx |
3770 | expand_abs (machine_mode mode, rtx op0, rtx target, |
3771 | int result_unsignedp, int safe) |
3772 | { |
3773 | rtx temp; |
3774 | rtx_code_label *op1; |
3775 | |
3776 | if (GET_MODE_CLASS (mode) != MODE_INT |
3777 | || ! flag_trapv) |
3778 | result_unsignedp = 1; |
3779 | |
3780 | temp = expand_abs_nojump (mode, op0, target, result_unsignedp); |
3781 | if (temp != 0) |
3782 | return temp; |
3783 | |
3784 | /* If that does not win, use conditional jump and negate. */ |
3785 | |
3786 | /* It is safe to use the target if it is the same |
3787 | as the source if this is also a pseudo register */ |
3788 | if (op0 == target && REG_P (op0) |
3789 | && REGNO (op0) >= FIRST_PSEUDO_REGISTER) |
3790 | safe = 1; |
3791 | |
3792 | op1 = gen_label_rtx (); |
3793 | if (target == 0 || ! safe |
3794 | || GET_MODE (target) != mode |
3795 | || (MEM_P (target) && MEM_VOLATILE_P (target)) |
3796 | || (REG_P (target) |
3797 | && REGNO (target) < FIRST_PSEUDO_REGISTER)) |
3798 | target = gen_reg_rtx (mode); |
3799 | |
3800 | emit_move_insn (target, op0); |
3801 | NO_DEFER_POP; |
3802 | |
3803 | do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode, |
3804 | NULL_RTX, NULL, op1, |
3805 | profile_probability::uninitialized ()); |
3806 | |
3807 | op0 = expand_unop (mode, unoptab: result_unsignedp ? neg_optab : negv_optab, |
3808 | op0: target, target, unsignedp: 0); |
3809 | if (op0 != target) |
3810 | emit_move_insn (target, op0); |
3811 | emit_label (op1); |
3812 | OK_DEFER_POP; |
3813 | return target; |
3814 | } |
3815 | |
3816 | /* Emit code to compute the one's complement absolute value of OP0 |
3817 | (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient. |
3818 | (TARGET may be NULL_RTX.) The return value says where the result |
3819 | actually is to be found. |
3820 | |
3821 | MODE is the mode of the operand; the mode of the result is |
3822 | different but can be deduced from MODE. */ |
3823 | |
3824 | rtx |
3825 | expand_one_cmpl_abs_nojump (machine_mode mode, rtx op0, rtx target) |
3826 | { |
3827 | rtx temp; |
3828 | |
3829 | /* Not applicable for floating point modes. */ |
3830 | if (FLOAT_MODE_P (mode)) |
3831 | return NULL_RTX; |
3832 | |
3833 | /* If we have a MAX insn, we can do this as MAX (x, ~x). */ |
3834 | if (optab_handler (op: smax_optab, mode) != CODE_FOR_nothing) |
3835 | { |
3836 | rtx_insn *last = get_last_insn (); |
3837 | |
3838 | temp = expand_unop (mode, unoptab: one_cmpl_optab, op0, NULL_RTX, unsignedp: 0); |
3839 | if (temp != 0) |
3840 | temp = expand_binop (mode, binoptab: smax_optab, op0, op1: temp, target, unsignedp: 0, |
3841 | methods: OPTAB_WIDEN); |
3842 | |
3843 | if (temp != 0) |
3844 | return temp; |
3845 | |
3846 | delete_insns_since (last); |
3847 | } |
3848 | |
3849 | /* If this machine has expensive jumps, we can do one's complement |
3850 | absolute value of X as (((signed) x >> (W-1)) ^ x). */ |
3851 | |
3852 | scalar_int_mode int_mode; |
3853 | if (is_int_mode (mode, int_mode: &int_mode) |
3854 | && BRANCH_COST (optimize_insn_for_speed_p (), |
3855 | false) >= 2) |
3856 | { |
3857 | rtx extended = expand_shift (RSHIFT_EXPR, int_mode, op0, |
3858 | GET_MODE_PRECISION (mode: int_mode) - 1, |
3859 | NULL_RTX, 0); |
3860 | |
3861 | temp = expand_binop (mode: int_mode, binoptab: xor_optab, op0: extended, op1: op0, target, unsignedp: 0, |
3862 | methods: OPTAB_LIB_WIDEN); |
3863 | |
3864 | if (temp != 0) |
3865 | return temp; |
3866 | } |
3867 | |
3868 | return NULL_RTX; |
3869 | } |
3870 | |
3871 | /* A subroutine of expand_copysign, perform the copysign operation using the |
3872 | abs and neg primitives advertised to exist on the target. The assumption |
3873 | is that we have a split register file, and leaving op0 in fp registers, |
3874 | and not playing with subregs so much, will help the register allocator. */ |
3875 | |
3876 | static rtx |
3877 | expand_copysign_absneg (scalar_float_mode mode, rtx op0, rtx op1, rtx target, |
3878 | int bitpos, bool op0_is_abs) |
3879 | { |
3880 | scalar_int_mode imode; |
3881 | enum insn_code icode; |
3882 | rtx sign; |
3883 | rtx_code_label *label; |
3884 | |
3885 | if (target == op1) |
3886 | target = NULL_RTX; |
3887 | |
3888 | /* Check if the back end provides an insn that handles signbit for the |
3889 | argument's mode. */ |
3890 | icode = optab_handler (op: signbit_optab, mode); |
3891 | if (icode != CODE_FOR_nothing) |
3892 | { |
3893 | imode = as_a <scalar_int_mode> (m: insn_data[(int) icode].operand[0].mode); |
3894 | sign = gen_reg_rtx (imode); |
3895 | emit_unop_insn (icode, sign, op1, UNKNOWN); |
3896 | } |
3897 | else |
3898 | { |
3899 | if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) |
3900 | { |
3901 | if (!int_mode_for_mode (mode).exists (mode: &imode)) |
3902 | return NULL_RTX; |
3903 | op1 = gen_lowpart (imode, op1); |
3904 | } |
3905 | else |
3906 | { |
3907 | int word; |
3908 | |
3909 | imode = word_mode; |
3910 | if (FLOAT_WORDS_BIG_ENDIAN) |
3911 | word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD; |
3912 | else |
3913 | word = bitpos / BITS_PER_WORD; |
3914 | bitpos = bitpos % BITS_PER_WORD; |
3915 | op1 = operand_subword_force (op1, word, mode); |
3916 | } |
3917 | |
3918 | wide_int mask = wi::set_bit_in_zero (bit: bitpos, precision: GET_MODE_PRECISION (mode: imode)); |
3919 | sign = expand_binop (mode: imode, binoptab: and_optab, op0: op1, |
3920 | op1: immed_wide_int_const (mask, imode), |
3921 | NULL_RTX, unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
3922 | } |
3923 | |
3924 | if (!op0_is_abs) |
3925 | { |
3926 | op0 = expand_unop (mode, unoptab: abs_optab, op0, target, unsignedp: 0); |
3927 | if (op0 == NULL) |
3928 | return NULL_RTX; |
3929 | target = op0; |
3930 | } |
3931 | else |
3932 | { |
3933 | if (target == NULL_RTX) |
3934 | target = copy_to_reg (op0); |
3935 | else |
3936 | emit_move_insn (target, op0); |
3937 | } |
3938 | |
3939 | label = gen_label_rtx (); |
3940 | emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label); |
3941 | |
3942 | if (CONST_DOUBLE_AS_FLOAT_P (op0)) |
3943 | op0 = simplify_unary_operation (code: NEG, mode, op: op0, op_mode: mode); |
3944 | else |
3945 | op0 = expand_unop (mode, unoptab: neg_optab, op0, target, unsignedp: 0); |
3946 | if (op0 != target) |
3947 | emit_move_insn (target, op0); |
3948 | |
3949 | emit_label (label); |
3950 | |
3951 | return target; |
3952 | } |
3953 | |
3954 | |
3955 | /* A subroutine of expand_copysign, perform the entire copysign operation |
3956 | with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS |
3957 | is true if op0 is known to have its sign bit clear. */ |
3958 | |
3959 | static rtx |
3960 | expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target, |
3961 | int bitpos, bool op0_is_abs) |
3962 | { |
3963 | scalar_int_mode imode; |
3964 | int word, nwords, i; |
3965 | rtx temp; |
3966 | rtx_insn *insns; |
3967 | |
3968 | if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) |
3969 | { |
3970 | if (!int_mode_for_mode (mode).exists (mode: &imode)) |
3971 | return NULL_RTX; |
3972 | word = 0; |
3973 | nwords = 1; |
3974 | } |
3975 | else |
3976 | { |
3977 | imode = word_mode; |
3978 | |
3979 | if (FLOAT_WORDS_BIG_ENDIAN) |
3980 | word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD; |
3981 | else |
3982 | word = bitpos / BITS_PER_WORD; |
3983 | bitpos = bitpos % BITS_PER_WORD; |
3984 | nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD; |
3985 | } |
3986 | |
3987 | wide_int mask = wi::set_bit_in_zero (bit: bitpos, precision: GET_MODE_PRECISION (mode: imode)); |
3988 | |
3989 | if (target == 0 |
3990 | || target == op0 |
3991 | || target == op1 |
3992 | || reg_overlap_mentioned_p (target, op0) |
3993 | || reg_overlap_mentioned_p (target, op1) |
3994 | || (nwords > 1 && !valid_multiword_target_p (target))) |
3995 | target = gen_reg_rtx (mode); |
3996 | |
3997 | if (nwords > 1) |
3998 | { |
3999 | start_sequence (); |
4000 | |
4001 | for (i = 0; i < nwords; ++i) |
4002 | { |
4003 | rtx targ_piece = operand_subword (target, i, 1, mode); |
4004 | rtx op0_piece = operand_subword_force (op0, i, mode); |
4005 | |
4006 | if (i == word) |
4007 | { |
4008 | if (!op0_is_abs) |
4009 | op0_piece |
4010 | = expand_binop (mode: imode, binoptab: and_optab, op0: op0_piece, |
4011 | op1: immed_wide_int_const (~mask, imode), |
4012 | NULL_RTX, unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
4013 | op1 = expand_binop (mode: imode, binoptab: and_optab, |
4014 | op0: operand_subword_force (op1, i, mode), |
4015 | op1: immed_wide_int_const (mask, imode), |
4016 | NULL_RTX, unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
4017 | |
4018 | temp = expand_binop (mode: imode, binoptab: ior_optab, op0: op0_piece, op1, |
4019 | target: targ_piece, unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
4020 | if (temp != targ_piece) |
4021 | emit_move_insn (targ_piece, temp); |
4022 | } |
4023 | else |
4024 | emit_move_insn (targ_piece, op0_piece); |
4025 | } |
4026 | |
4027 | insns = get_insns (); |
4028 | end_sequence (); |
4029 | |
4030 | emit_insn (insns); |
4031 | } |
4032 | else |
4033 | { |
4034 | op1 = expand_binop (mode: imode, binoptab: and_optab, gen_lowpart (imode, op1), |
4035 | op1: immed_wide_int_const (mask, imode), |
4036 | NULL_RTX, unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
4037 | |
4038 | op0 = gen_lowpart (imode, op0); |
4039 | if (!op0_is_abs) |
4040 | op0 = expand_binop (mode: imode, binoptab: and_optab, op0, |
4041 | op1: immed_wide_int_const (~mask, imode), |
4042 | NULL_RTX, unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
4043 | |
4044 | temp = expand_binop (mode: imode, binoptab: ior_optab, op0, op1, |
4045 | gen_lowpart (imode, target), unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
4046 | target = lowpart_subreg_maybe_copy (omode: mode, val: temp, imode); |
4047 | } |
4048 | |
4049 | return target; |
4050 | } |
4051 | |
4052 | /* Expand the C99 copysign operation. OP0 and OP1 must be the same |
4053 | scalar floating point mode. Return NULL if we do not know how to |
4054 | expand the operation inline. */ |
4055 | |
4056 | rtx |
4057 | expand_copysign (rtx op0, rtx op1, rtx target) |
4058 | { |
4059 | scalar_float_mode mode; |
4060 | const struct real_format *fmt; |
4061 | bool op0_is_abs; |
4062 | rtx temp; |
4063 | |
4064 | mode = as_a <scalar_float_mode> (GET_MODE (op0)); |
4065 | gcc_assert (GET_MODE (op1) == mode); |
4066 | |
4067 | /* First try to do it with a special instruction. */ |
4068 | temp = expand_binop (mode, binoptab: copysign_optab, op0, op1, |
4069 | target, unsignedp: 0, methods: OPTAB_DIRECT); |
4070 | if (temp) |
4071 | return temp; |
4072 | |
4073 | fmt = REAL_MODE_FORMAT (mode); |
4074 | if (fmt == NULL || !fmt->has_signed_zero) |
4075 | return NULL_RTX; |
4076 | |
4077 | op0_is_abs = false; |
4078 | if (CONST_DOUBLE_AS_FLOAT_P (op0)) |
4079 | { |
4080 | if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0))) |
4081 | op0 = simplify_unary_operation (code: ABS, mode, op: op0, op_mode: mode); |
4082 | op0_is_abs = true; |
4083 | } |
4084 | |
4085 | if (fmt->signbit_ro >= 0 |
4086 | && (CONST_DOUBLE_AS_FLOAT_P (op0) |
4087 | || (optab_handler (op: neg_optab, mode) != CODE_FOR_nothing |
4088 | && optab_handler (op: abs_optab, mode) != CODE_FOR_nothing))) |
4089 | { |
4090 | temp = expand_copysign_absneg (mode, op0, op1, target, |
4091 | bitpos: fmt->signbit_ro, op0_is_abs); |
4092 | if (temp) |
4093 | return temp; |
4094 | } |
4095 | |
4096 | if (fmt->signbit_rw < 0) |
4097 | return NULL_RTX; |
4098 | return expand_copysign_bit (mode, op0, op1, target, |
4099 | bitpos: fmt->signbit_rw, op0_is_abs); |
4100 | } |
4101 | |
4102 | /* Generate an instruction whose insn-code is INSN_CODE, |
4103 | with two operands: an output TARGET and an input OP0. |
4104 | TARGET *must* be nonzero, and the output is always stored there. |
4105 | CODE is an rtx code such that (CODE OP0) is an rtx that describes |
4106 | the value that is stored into TARGET. |
4107 | |
4108 | Return false if expansion failed. */ |
4109 | |
4110 | bool |
4111 | maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0, |
4112 | enum rtx_code code) |
4113 | { |
4114 | class expand_operand ops[2]; |
4115 | rtx_insn *pat; |
4116 | |
4117 | create_output_operand (op: &ops[0], x: target, GET_MODE (target)); |
4118 | create_input_operand (op: &ops[1], value: op0, GET_MODE (op0)); |
4119 | pat = maybe_gen_insn (icode, nops: 2, ops); |
4120 | if (!pat) |
4121 | return false; |
4122 | |
4123 | if (INSN_P (pat) && NEXT_INSN (insn: pat) != NULL_RTX |
4124 | && code != UNKNOWN) |
4125 | add_equal_note (insns: pat, target: ops[0].value, code, op0: ops[1].value, NULL_RTX, |
4126 | GET_MODE (op0)); |
4127 | |
4128 | emit_insn (pat); |
4129 | |
4130 | if (ops[0].value != target) |
4131 | emit_move_insn (target, ops[0].value); |
4132 | return true; |
4133 | } |
4134 | /* Generate an instruction whose insn-code is INSN_CODE, |
4135 | with two operands: an output TARGET and an input OP0. |
4136 | TARGET *must* be nonzero, and the output is always stored there. |
4137 | CODE is an rtx code such that (CODE OP0) is an rtx that describes |
4138 | the value that is stored into TARGET. */ |
4139 | |
4140 | void |
4141 | emit_unop_insn (enum insn_code icode, rtx target, rtx op0, enum rtx_code code) |
4142 | { |
4143 | bool ok = maybe_emit_unop_insn (icode, target, op0, code); |
4144 | gcc_assert (ok); |
4145 | } |
4146 | |
4147 | struct no_conflict_data |
4148 | { |
4149 | rtx target; |
4150 | rtx_insn *first, *insn; |
4151 | bool must_stay; |
4152 | }; |
4153 | |
4154 | /* Called via note_stores by emit_libcall_block. Set P->must_stay if |
4155 | the currently examined clobber / store has to stay in the list of |
4156 | insns that constitute the actual libcall block. */ |
4157 | static void |
4158 | no_conflict_move_test (rtx dest, const_rtx set, void *p0) |
4159 | { |
4160 | struct no_conflict_data *p= (struct no_conflict_data *) p0; |
4161 | |
4162 | /* If this inns directly contributes to setting the target, it must stay. */ |
4163 | if (reg_overlap_mentioned_p (p->target, dest)) |
4164 | p->must_stay = true; |
4165 | /* If we haven't committed to keeping any other insns in the list yet, |
4166 | there is nothing more to check. */ |
4167 | else if (p->insn == p->first) |
4168 | return; |
4169 | /* If this insn sets / clobbers a register that feeds one of the insns |
4170 | already in the list, this insn has to stay too. */ |
4171 | else if (reg_overlap_mentioned_p (dest, PATTERN (insn: p->first)) |
4172 | || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest))) |
4173 | || reg_used_between_p (dest, p->first, p->insn) |
4174 | /* Likewise if this insn depends on a register set by a previous |
4175 | insn in the list, or if it sets a result (presumably a hard |
4176 | register) that is set or clobbered by a previous insn. |
4177 | N.B. the modified_*_p (SET_DEST...) tests applied to a MEM |
4178 | SET_DEST perform the former check on the address, and the latter |
4179 | check on the MEM. */ |
4180 | || (GET_CODE (set) == SET |
4181 | && (modified_in_p (SET_SRC (set), p->first) |
4182 | || modified_in_p (SET_DEST (set), p->first) |
4183 | || modified_between_p (SET_SRC (set), p->first, p->insn) |
4184 | || modified_between_p (SET_DEST (set), p->first, p->insn)))) |
4185 | p->must_stay = true; |
4186 | } |
4187 | |
4188 | |
4189 | /* Emit code to make a call to a constant function or a library call. |
4190 | |
4191 | INSNS is a list containing all insns emitted in the call. |
4192 | These insns leave the result in RESULT. Our block is to copy RESULT |
4193 | to TARGET, which is logically equivalent to EQUIV. |
4194 | |
4195 | We first emit any insns that set a pseudo on the assumption that these are |
4196 | loading constants into registers; doing so allows them to be safely cse'ed |
4197 | between blocks. Then we emit all the other insns in the block, followed by |
4198 | an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL |
4199 | note with an operand of EQUIV. */ |
4200 | |
4201 | static void |
4202 | emit_libcall_block_1 (rtx_insn *insns, rtx target, rtx result, rtx equiv, |
4203 | bool equiv_may_trap) |
4204 | { |
4205 | rtx final_dest = target; |
4206 | rtx_insn *next, *last, *insn; |
4207 | |
4208 | /* If this is a reg with REG_USERVAR_P set, then it could possibly turn |
4209 | into a MEM later. Protect the libcall block from this change. */ |
4210 | if (! REG_P (target) || REG_USERVAR_P (target)) |
4211 | target = gen_reg_rtx (GET_MODE (target)); |
4212 | |
4213 | /* If we're using non-call exceptions, a libcall corresponding to an |
4214 | operation that may trap may also trap. */ |
4215 | /* ??? See the comment in front of make_reg_eh_region_note. */ |
4216 | if (cfun->can_throw_non_call_exceptions |
4217 | && (equiv_may_trap || may_trap_p (equiv))) |
4218 | { |
4219 | for (insn = insns; insn; insn = NEXT_INSN (insn)) |
4220 | if (CALL_P (insn)) |
4221 | { |
4222 | rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); |
4223 | if (note) |
4224 | { |
4225 | int lp_nr = INTVAL (XEXP (note, 0)); |
4226 | if (lp_nr == 0 || lp_nr == INT_MIN) |
4227 | remove_note (insn, note); |
4228 | } |
4229 | } |
4230 | } |
4231 | else |
4232 | { |
4233 | /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION |
4234 | reg note to indicate that this call cannot throw or execute a nonlocal |
4235 | goto (unless there is already a REG_EH_REGION note, in which case |
4236 | we update it). */ |
4237 | for (insn = insns; insn; insn = NEXT_INSN (insn)) |
4238 | if (CALL_P (insn)) |
4239 | make_reg_eh_region_note_nothrow_nononlocal (insn); |
4240 | } |
4241 | |
4242 | /* First emit all insns that set pseudos. Remove them from the list as |
4243 | we go. Avoid insns that set pseudos which were referenced in previous |
4244 | insns. These can be generated by move_by_pieces, for example, |
4245 | to update an address. Similarly, avoid insns that reference things |
4246 | set in previous insns. */ |
4247 | |
4248 | for (insn = insns; insn; insn = next) |
4249 | { |
4250 | rtx set = single_set (insn); |
4251 | |
4252 | next = NEXT_INSN (insn); |
4253 | |
4254 | if (set != 0 && REG_P (SET_DEST (set)) |
4255 | && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER) |
4256 | { |
4257 | struct no_conflict_data data; |
4258 | |
4259 | data.target = const0_rtx; |
4260 | data.first = insns; |
4261 | data.insn = insn; |
4262 | data.must_stay = 0; |
4263 | note_stores (insn, no_conflict_move_test, &data); |
4264 | if (! data.must_stay) |
4265 | { |
4266 | if (PREV_INSN (insn)) |
4267 | SET_NEXT_INSN (PREV_INSN (insn)) = next; |
4268 | else |
4269 | insns = next; |
4270 | |
4271 | if (next) |
4272 | SET_PREV_INSN (next) = PREV_INSN (insn); |
4273 | |
4274 | add_insn (insn); |
4275 | } |
4276 | } |
4277 | |
4278 | /* Some ports use a loop to copy large arguments onto the stack. |
4279 | Don't move anything outside such a loop. */ |
4280 | if (LABEL_P (insn)) |
4281 | break; |
4282 | } |
4283 | |
4284 | /* Write the remaining insns followed by the final copy. */ |
4285 | for (insn = insns; insn; insn = next) |
4286 | { |
4287 | next = NEXT_INSN (insn); |
4288 | |
4289 | add_insn (insn); |
4290 | } |
4291 | |
4292 | last = emit_move_insn (target, result); |
4293 | if (equiv) |
4294 | set_dst_reg_note (last, REG_EQUAL, copy_rtx (equiv), target); |
4295 | |
4296 | if (final_dest != target) |
4297 | emit_move_insn (final_dest, target); |
4298 | } |
4299 | |
4300 | void |
4301 | emit_libcall_block (rtx_insn *insns, rtx target, rtx result, rtx equiv) |
4302 | { |
4303 | emit_libcall_block_1 (insns, target, result, equiv, equiv_may_trap: false); |
4304 | } |
4305 | |
4306 | /* True if we can perform a comparison of mode MODE straightforwardly. |
4307 | PURPOSE describes how this comparison will be used. CODE is the rtx |
4308 | comparison code we will be using. |
4309 | |
4310 | ??? Actually, CODE is slightly weaker than that. A target is still |
4311 | required to implement all of the normal bcc operations, but not |
4312 | required to implement all (or any) of the unordered bcc operations. */ |
4313 | |
4314 | bool |
4315 | can_compare_p (enum rtx_code code, machine_mode mode, |
4316 | enum can_compare_purpose purpose) |
4317 | { |
4318 | rtx test; |
4319 | test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx); |
4320 | do |
4321 | { |
4322 | enum insn_code icode; |
4323 | |
4324 | if (purpose == ccp_jump |
4325 | && (icode = optab_handler (op: cbranch_optab, mode)) != CODE_FOR_nothing |
4326 | && insn_operand_matches (icode, opno: 0, operand: test)) |
4327 | return true; |
4328 | if (purpose == ccp_store_flag |
4329 | && (icode = optab_handler (op: cstore_optab, mode)) != CODE_FOR_nothing |
4330 | && insn_operand_matches (icode, opno: 1, operand: test)) |
4331 | return true; |
4332 | if (purpose == ccp_cmov |
4333 | && optab_handler (op: cmov_optab, mode) != CODE_FOR_nothing) |
4334 | return true; |
4335 | |
4336 | mode = GET_MODE_WIDER_MODE (m: mode).else_void (); |
4337 | PUT_MODE (x: test, mode); |
4338 | } |
4339 | while (mode != VOIDmode); |
4340 | |
4341 | return false; |
4342 | } |
4343 | |
4344 | /* Return whether RTL code CODE corresponds to an unsigned optab. */ |
4345 | |
4346 | static bool |
4347 | unsigned_optab_p (enum rtx_code code) |
4348 | { |
4349 | return code == LTU || code == LEU || code == GTU || code == GEU; |
4350 | } |
4351 | |
4352 | /* Return whether the backend-emitted comparison for code CODE, comparing |
4353 | operands of mode VALUE_MODE and producing a result with MASK_MODE, matches |
4354 | operand OPNO of pattern ICODE. */ |
4355 | |
4356 | static bool |
4357 | insn_predicate_matches_p (enum insn_code icode, unsigned int opno, |
4358 | enum rtx_code code, machine_mode mask_mode, |
4359 | machine_mode value_mode) |
4360 | { |
4361 | rtx reg1 = alloca_raw_REG (value_mode, LAST_VIRTUAL_REGISTER + 1); |
4362 | rtx reg2 = alloca_raw_REG (value_mode, LAST_VIRTUAL_REGISTER + 2); |
4363 | rtx test = alloca_rtx_fmt_ee (code, mask_mode, reg1, reg2); |
4364 | return insn_operand_matches (icode, opno, operand: test); |
4365 | } |
4366 | |
4367 | /* Return whether the backend can emit a vector comparison (vec_cmp/vec_cmpu) |
4368 | for code CODE, comparing operands of mode VALUE_MODE and producing a result |
4369 | with MASK_MODE. */ |
4370 | |
4371 | bool |
4372 | can_vec_cmp_compare_p (enum rtx_code code, machine_mode value_mode, |
4373 | machine_mode mask_mode) |
4374 | { |
4375 | enum insn_code icode |
4376 | = get_vec_cmp_icode (vmode: value_mode, mask_mode, uns: unsigned_optab_p (code)); |
4377 | if (icode == CODE_FOR_nothing) |
4378 | return false; |
4379 | |
4380 | return insn_predicate_matches_p (icode, opno: 1, code, mask_mode, value_mode); |
4381 | } |
4382 | |
4383 | /* Return whether the backend can emit a vector comparison (vcond/vcondu) for |
4384 | code CODE, comparing operands of mode CMP_OP_MODE and producing a result |
4385 | with VALUE_MODE. */ |
4386 | |
4387 | bool |
4388 | can_vcond_compare_p (enum rtx_code code, machine_mode value_mode, |
4389 | machine_mode cmp_op_mode) |
4390 | { |
4391 | enum insn_code icode |
4392 | = get_vcond_icode (vmode: value_mode, cmode: cmp_op_mode, uns: unsigned_optab_p (code)); |
4393 | if (icode == CODE_FOR_nothing) |
4394 | return false; |
4395 | |
4396 | return insn_predicate_matches_p (icode, opno: 3, code, mask_mode: value_mode, value_mode: cmp_op_mode); |
4397 | } |
4398 | |
4399 | /* Return whether the backend can emit vector set instructions for inserting |
4400 | element into vector at variable index position. */ |
4401 | |
4402 | bool |
4403 | can_vec_set_var_idx_p (machine_mode vec_mode) |
4404 | { |
4405 | if (!VECTOR_MODE_P (vec_mode)) |
4406 | return false; |
4407 | |
4408 | machine_mode inner_mode = GET_MODE_INNER (vec_mode); |
4409 | |
4410 | rtx reg1 = alloca_raw_REG (vec_mode, LAST_VIRTUAL_REGISTER + 1); |
4411 | rtx reg2 = alloca_raw_REG (inner_mode, LAST_VIRTUAL_REGISTER + 2); |
4412 | |
4413 | enum insn_code icode = optab_handler (op: vec_set_optab, mode: vec_mode); |
4414 | |
4415 | const struct insn_data_d *data = &insn_data[icode]; |
4416 | machine_mode idx_mode = data->operand[2].mode; |
4417 | |
4418 | rtx reg3 = alloca_raw_REG (idx_mode, LAST_VIRTUAL_REGISTER + 3); |
4419 | |
4420 | return icode != CODE_FOR_nothing && insn_operand_matches (icode, opno: 0, operand: reg1) |
4421 | && insn_operand_matches (icode, opno: 1, operand: reg2) |
4422 | && insn_operand_matches (icode, opno: 2, operand: reg3); |
4423 | } |
4424 | |
4425 | /* Return whether the backend can emit a vec_extract instruction with |
4426 | a non-constant index. */ |
4427 | bool |
4428 | (machine_mode vec_mode, machine_mode extr_mode) |
4429 | { |
4430 | if (!VECTOR_MODE_P (vec_mode)) |
4431 | return false; |
4432 | |
4433 | rtx reg1 = alloca_raw_REG (extr_mode, LAST_VIRTUAL_REGISTER + 1); |
4434 | rtx reg2 = alloca_raw_REG (vec_mode, LAST_VIRTUAL_REGISTER + 2); |
4435 | |
4436 | enum insn_code icode = convert_optab_handler (op: vec_extract_optab, |
4437 | to_mode: vec_mode, from_mode: extr_mode); |
4438 | |
4439 | const struct insn_data_d *data = &insn_data[icode]; |
4440 | machine_mode idx_mode = data->operand[2].mode; |
4441 | |
4442 | rtx reg3 = alloca_raw_REG (idx_mode, LAST_VIRTUAL_REGISTER + 3); |
4443 | |
4444 | return icode != CODE_FOR_nothing && insn_operand_matches (icode, opno: 0, operand: reg1) |
4445 | && insn_operand_matches (icode, opno: 1, operand: reg2) |
4446 | && insn_operand_matches (icode, opno: 2, operand: reg3); |
4447 | } |
4448 | |
4449 | /* This function is called when we are going to emit a compare instruction that |
4450 | compares the values found in X and Y, using the rtl operator COMPARISON. |
4451 | |
4452 | If they have mode BLKmode, then SIZE specifies the size of both operands. |
4453 | |
4454 | UNSIGNEDP nonzero says that the operands are unsigned; |
4455 | this matters if they need to be widened (as given by METHODS). |
4456 | |
4457 | *PTEST is where the resulting comparison RTX is returned or NULL_RTX |
4458 | if we failed to produce one. |
4459 | |
4460 | *PMODE is the mode of the inputs (in case they are const_int). |
4461 | |
4462 | This function performs all the setup necessary so that the caller only has |
4463 | to emit a single comparison insn. This setup can involve doing a BLKmode |
4464 | comparison or emitting a library call to perform the comparison if no insn |
4465 | is available to handle it. |
4466 | The values which are passed in through pointers can be modified; the caller |
4467 | should perform the comparison on the modified values. Constant |
4468 | comparisons must have already been folded. */ |
4469 | |
4470 | static void |
4471 | prepare_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size, |
4472 | int unsignedp, enum optab_methods methods, |
4473 | rtx *ptest, machine_mode *pmode) |
4474 | { |
4475 | machine_mode mode = *pmode; |
4476 | rtx libfunc, test; |
4477 | machine_mode cmp_mode; |
4478 | |
4479 | /* The other methods are not needed. */ |
4480 | gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN |
4481 | || methods == OPTAB_LIB_WIDEN); |
4482 | |
4483 | if (CONST_SCALAR_INT_P (y)) |
4484 | canonicalize_comparison (mode, &comparison, &y); |
4485 | |
4486 | /* If we are optimizing, force expensive constants into a register. */ |
4487 | if (CONSTANT_P (x) && optimize |
4488 | && (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ()) |
4489 | > COSTS_N_INSNS (1)) |
4490 | && can_create_pseudo_p ()) |
4491 | x = force_reg (mode, x); |
4492 | |
4493 | if (CONSTANT_P (y) && optimize |
4494 | && (rtx_cost (y, mode, COMPARE, 1, optimize_insn_for_speed_p ()) |
4495 | > COSTS_N_INSNS (1)) |
4496 | && can_create_pseudo_p ()) |
4497 | y = force_reg (mode, y); |
4498 | |
4499 | /* Don't let both operands fail to indicate the mode. */ |
4500 | if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode) |
4501 | x = force_reg (mode, x); |
4502 | if (mode == VOIDmode) |
4503 | mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : GET_MODE (y); |
4504 | |
4505 | /* Handle all BLKmode compares. */ |
4506 | |
4507 | if (mode == BLKmode) |
4508 | { |
4509 | machine_mode result_mode; |
4510 | enum insn_code cmp_code; |
4511 | rtx result; |
4512 | rtx opalign |
4513 | = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT); |
4514 | |
4515 | gcc_assert (size); |
4516 | |
4517 | /* Try to use a memory block compare insn - either cmpstr |
4518 | or cmpmem will do. */ |
4519 | opt_scalar_int_mode cmp_mode_iter; |
4520 | FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT) |
4521 | { |
4522 | scalar_int_mode cmp_mode = cmp_mode_iter.require (); |
4523 | cmp_code = direct_optab_handler (op: cmpmem_optab, mode: cmp_mode); |
4524 | if (cmp_code == CODE_FOR_nothing) |
4525 | cmp_code = direct_optab_handler (op: cmpstr_optab, mode: cmp_mode); |
4526 | if (cmp_code == CODE_FOR_nothing) |
4527 | cmp_code = direct_optab_handler (op: cmpstrn_optab, mode: cmp_mode); |
4528 | if (cmp_code == CODE_FOR_nothing) |
4529 | continue; |
4530 | |
4531 | /* Must make sure the size fits the insn's mode. */ |
4532 | if (CONST_INT_P (size) |
4533 | ? UINTVAL (size) > GET_MODE_MASK (cmp_mode) |
4534 | : (GET_MODE_BITSIZE (mode: as_a <scalar_int_mode> (GET_MODE (size))) |
4535 | > GET_MODE_BITSIZE (mode: cmp_mode))) |
4536 | continue; |
4537 | |
4538 | result_mode = insn_data[cmp_code].operand[0].mode; |
4539 | result = gen_reg_rtx (result_mode); |
4540 | size = convert_to_mode (cmp_mode, size, 1); |
4541 | emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign)); |
4542 | |
4543 | *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, result, const0_rtx); |
4544 | *pmode = result_mode; |
4545 | return; |
4546 | } |
4547 | |
4548 | if (methods != OPTAB_LIB && methods != OPTAB_LIB_WIDEN) |
4549 | goto fail; |
4550 | |
4551 | /* Otherwise call a library function. */ |
4552 | result = emit_block_comp_via_libcall (dst: x, src: y, size); |
4553 | |
4554 | x = result; |
4555 | y = const0_rtx; |
4556 | mode = TYPE_MODE (integer_type_node); |
4557 | methods = OPTAB_LIB_WIDEN; |
4558 | unsignedp = false; |
4559 | } |
4560 | |
4561 | /* Don't allow operands to the compare to trap, as that can put the |
4562 | compare and branch in different basic blocks. */ |
4563 | if (cfun->can_throw_non_call_exceptions) |
4564 | { |
4565 | if (!can_create_pseudo_p () && (may_trap_p (x) || may_trap_p (y))) |
4566 | goto fail; |
4567 | if (may_trap_p (x)) |
4568 | x = copy_to_reg (x); |
4569 | if (may_trap_p (y)) |
4570 | y = copy_to_reg (y); |
4571 | } |
4572 | |
4573 | if (GET_MODE_CLASS (mode) == MODE_CC) |
4574 | { |
4575 | enum insn_code icode = optab_handler (op: cbranch_optab, CCmode); |
4576 | test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y); |
4577 | if (icode != CODE_FOR_nothing |
4578 | && insn_operand_matches (icode, opno: 0, operand: test)) |
4579 | { |
4580 | *ptest = test; |
4581 | return; |
4582 | } |
4583 | else |
4584 | goto fail; |
4585 | } |
4586 | |
4587 | test = gen_rtx_fmt_ee (comparison, VOIDmode, x, y); |
4588 | FOR_EACH_WIDER_MODE_FROM (cmp_mode, mode) |
4589 | { |
4590 | enum insn_code icode; |
4591 | icode = optab_handler (op: cbranch_optab, mode: cmp_mode); |
4592 | if (icode != CODE_FOR_nothing |
4593 | && insn_operand_matches (icode, opno: 0, operand: test)) |
4594 | { |
4595 | rtx_insn *last = get_last_insn (); |
4596 | rtx op0 = prepare_operand (icode, x, 1, mode, cmp_mode, unsignedp); |
4597 | rtx op1 = prepare_operand (icode, y, 2, mode, cmp_mode, unsignedp); |
4598 | if (op0 && op1 |
4599 | && insn_operand_matches (icode, opno: 1, operand: op0) |
4600 | && insn_operand_matches (icode, opno: 2, operand: op1)) |
4601 | { |
4602 | XEXP (test, 0) = op0; |
4603 | XEXP (test, 1) = op1; |
4604 | *ptest = test; |
4605 | *pmode = cmp_mode; |
4606 | return; |
4607 | } |
4608 | delete_insns_since (last); |
4609 | } |
4610 | |
4611 | if (methods == OPTAB_DIRECT) |
4612 | break; |
4613 | } |
4614 | |
4615 | if (methods != OPTAB_LIB_WIDEN) |
4616 | goto fail; |
4617 | |
4618 | if (SCALAR_FLOAT_MODE_P (mode)) |
4619 | { |
4620 | /* Small trick if UNORDERED isn't implemented by the hardware. */ |
4621 | if (comparison == UNORDERED && rtx_equal_p (x, y)) |
4622 | { |
4623 | prepare_cmp_insn (x, y, comparison: UNLT, NULL_RTX, unsignedp, methods: OPTAB_WIDEN, |
4624 | ptest, pmode); |
4625 | if (*ptest) |
4626 | return; |
4627 | } |
4628 | |
4629 | prepare_float_lib_cmp (x, y, comparison, ptest, pmode); |
4630 | } |
4631 | else |
4632 | { |
4633 | rtx result; |
4634 | machine_mode ret_mode; |
4635 | |
4636 | /* Handle a libcall just for the mode we are using. */ |
4637 | libfunc = optab_libfunc (cmp_optab, mode); |
4638 | gcc_assert (libfunc); |
4639 | |
4640 | /* If we want unsigned, and this mode has a distinct unsigned |
4641 | comparison routine, use that. */ |
4642 | if (unsignedp) |
4643 | { |
4644 | rtx ulibfunc = optab_libfunc (ucmp_optab, mode); |
4645 | if (ulibfunc) |
4646 | libfunc = ulibfunc; |
4647 | } |
4648 | |
4649 | ret_mode = targetm.libgcc_cmp_return_mode (); |
4650 | result = emit_library_call_value (fun: libfunc, NULL_RTX, fn_type: LCT_CONST, |
4651 | outmode: ret_mode, arg1: x, arg1_mode: mode, arg2: y, arg2_mode: mode); |
4652 | |
4653 | /* There are two kinds of comparison routines. Biased routines |
4654 | return 0/1/2, and unbiased routines return -1/0/1. Other parts |
4655 | of gcc expect that the comparison operation is equivalent |
4656 | to the modified comparison. For signed comparisons compare the |
4657 | result against 1 in the biased case, and zero in the unbiased |
4658 | case. For unsigned comparisons always compare against 1 after |
4659 | biasing the unbiased result by adding 1. This gives us a way to |
4660 | represent LTU. |
4661 | The comparisons in the fixed-point helper library are always |
4662 | biased. */ |
4663 | x = result; |
4664 | y = const1_rtx; |
4665 | |
4666 | if (!TARGET_LIB_INT_CMP_BIASED && !ALL_FIXED_POINT_MODE_P (mode)) |
4667 | { |
4668 | if (unsignedp) |
4669 | x = plus_constant (ret_mode, result, 1); |
4670 | else |
4671 | y = const0_rtx; |
4672 | } |
4673 | |
4674 | *pmode = ret_mode; |
4675 | prepare_cmp_insn (x, y, comparison, NULL_RTX, unsignedp, methods, |
4676 | ptest, pmode); |
4677 | } |
4678 | |
4679 | return; |
4680 | |
4681 | fail: |
4682 | *ptest = NULL_RTX; |
4683 | } |
4684 | |
4685 | /* Before emitting an insn with code ICODE, make sure that X, which is going |
4686 | to be used for operand OPNUM of the insn, is converted from mode MODE to |
4687 | WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and |
4688 | that it is accepted by the operand predicate. Return the new value. */ |
4689 | |
4690 | rtx |
4691 | prepare_operand (enum insn_code icode, rtx x, int opnum, machine_mode mode, |
4692 | machine_mode wider_mode, int unsignedp) |
4693 | { |
4694 | if (mode != wider_mode) |
4695 | x = convert_modes (mode: wider_mode, oldmode: mode, x, unsignedp); |
4696 | |
4697 | if (!insn_operand_matches (icode, opno: opnum, operand: x)) |
4698 | { |
4699 | machine_mode op_mode = insn_data[(int) icode].operand[opnum].mode; |
4700 | if (reload_completed) |
4701 | return NULL_RTX; |
4702 | if (GET_MODE (x) != op_mode && GET_MODE (x) != VOIDmode) |
4703 | return NULL_RTX; |
4704 | x = copy_to_mode_reg (op_mode, x); |
4705 | } |
4706 | |
4707 | return x; |
4708 | } |
4709 | |
4710 | /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know |
4711 | we can do the branch. */ |
4712 | |
4713 | static void |
4714 | emit_cmp_and_jump_insn_1 (rtx test, machine_mode mode, rtx label, |
4715 | direct_optab cmp_optab, profile_probability prob, |
4716 | bool test_branch) |
4717 | { |
4718 | machine_mode optab_mode; |
4719 | enum mode_class mclass; |
4720 | enum insn_code icode; |
4721 | rtx_insn *insn; |
4722 | |
4723 | mclass = GET_MODE_CLASS (mode); |
4724 | optab_mode = (mclass == MODE_CC) ? CCmode : mode; |
4725 | icode = optab_handler (op: cmp_optab, mode: optab_mode); |
4726 | |
4727 | gcc_assert (icode != CODE_FOR_nothing); |
4728 | gcc_assert (test_branch || insn_operand_matches (icode, 0, test)); |
4729 | if (test_branch) |
4730 | insn = emit_jump_insn (GEN_FCN (icode) (XEXP (test, 0), |
4731 | XEXP (test, 1), label)); |
4732 | else |
4733 | insn = emit_jump_insn (GEN_FCN (icode) (test, XEXP (test, 0), |
4734 | XEXP (test, 1), label)); |
4735 | |
4736 | if (prob.initialized_p () |
4737 | && profile_status_for_fn (cfun) != PROFILE_ABSENT |
4738 | && insn |
4739 | && JUMP_P (insn) |
4740 | && any_condjump_p (insn) |
4741 | && !find_reg_note (insn, REG_BR_PROB, 0)) |
4742 | add_reg_br_prob_note (insn, prob); |
4743 | } |
4744 | |
4745 | /* PTEST points to a comparison that compares its first operand with zero. |
4746 | Check to see if it can be performed as a bit-test-and-branch instead. |
4747 | On success, return the instruction that performs the bit-test-and-branch |
4748 | and replace the second operand of *PTEST with the bit number to test. |
4749 | On failure, return CODE_FOR_nothing and leave *PTEST unchanged. |
4750 | |
4751 | Note that the comparison described by *PTEST should not be taken |
4752 | literally after a successful return. *PTEST is just a convenient |
4753 | place to store the two operands of the bit-and-test. |
4754 | |
4755 | VAL must contain the original tree expression for the first operand |
4756 | of *PTEST. */ |
4757 | |
4758 | static enum insn_code |
4759 | validate_test_and_branch (tree val, rtx *ptest, machine_mode *pmode, optab *res) |
4760 | { |
4761 | if (!val || TREE_CODE (val) != SSA_NAME) |
4762 | return CODE_FOR_nothing; |
4763 | |
4764 | machine_mode mode = TYPE_MODE (TREE_TYPE (val)); |
4765 | rtx test = *ptest; |
4766 | direct_optab optab; |
4767 | |
4768 | if (GET_CODE (test) == EQ) |
4769 | optab = tbranch_eq_optab; |
4770 | else if (GET_CODE (test) == NE) |
4771 | optab = tbranch_ne_optab; |
4772 | else |
4773 | return CODE_FOR_nothing; |
4774 | |
4775 | *res = optab; |
4776 | |
4777 | /* If the target supports the testbit comparison directly, great. */ |
4778 | auto icode = direct_optab_handler (op: optab, mode); |
4779 | if (icode == CODE_FOR_nothing) |
4780 | return icode; |
4781 | |
4782 | if (tree_zero_one_valued_p (val)) |
4783 | { |
4784 | auto pos = BITS_BIG_ENDIAN ? GET_MODE_BITSIZE (mode) - 1 : 0; |
4785 | XEXP (test, 1) = gen_int_mode (pos, mode); |
4786 | *ptest = test; |
4787 | *pmode = mode; |
4788 | return icode; |
4789 | } |
4790 | |
4791 | wide_int wcst = get_nonzero_bits (val); |
4792 | if (wcst == -1) |
4793 | return CODE_FOR_nothing; |
4794 | |
4795 | int bitpos; |
4796 | |
4797 | if ((bitpos = wi::exact_log2 (wcst)) == -1) |
4798 | return CODE_FOR_nothing; |
4799 | |
4800 | auto pos = BITS_BIG_ENDIAN ? GET_MODE_BITSIZE (mode) - 1 - bitpos : bitpos; |
4801 | XEXP (test, 1) = gen_int_mode (pos, mode); |
4802 | *ptest = test; |
4803 | *pmode = mode; |
4804 | return icode; |
4805 | } |
4806 | |
4807 | /* Generate code to compare X with Y so that the condition codes are |
4808 | set and to jump to LABEL if the condition is true. If X is a |
4809 | constant and Y is not a constant, then the comparison is swapped to |
4810 | ensure that the comparison RTL has the canonical form. |
4811 | |
4812 | UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they |
4813 | need to be widened. UNSIGNEDP is also used to select the proper |
4814 | branch condition code. |
4815 | |
4816 | If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y. |
4817 | |
4818 | MODE is the mode of the inputs (in case they are const_int). |
4819 | |
4820 | COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). |
4821 | It will be potentially converted into an unsigned variant based on |
4822 | UNSIGNEDP to select a proper jump instruction. |
4823 | |
4824 | PROB is the probability of jumping to LABEL. If the comparison is against |
4825 | zero then VAL contains the expression from which the non-zero RTL is |
4826 | derived. */ |
4827 | |
4828 | void |
4829 | emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size, |
4830 | machine_mode mode, int unsignedp, tree val, rtx label, |
4831 | profile_probability prob) |
4832 | { |
4833 | rtx op0 = x, op1 = y; |
4834 | rtx test; |
4835 | |
4836 | /* Swap operands and condition to ensure canonical RTL. */ |
4837 | if (swap_commutative_operands_p (x, y) |
4838 | && can_compare_p (code: swap_condition (comparison), mode, purpose: ccp_jump)) |
4839 | { |
4840 | op0 = y, op1 = x; |
4841 | comparison = swap_condition (comparison); |
4842 | } |
4843 | |
4844 | /* If OP0 is still a constant, then both X and Y must be constants |
4845 | or the opposite comparison is not supported. Force X into a register |
4846 | to create canonical RTL. */ |
4847 | if (CONSTANT_P (op0)) |
4848 | op0 = force_reg (mode, op0); |
4849 | |
4850 | if (unsignedp) |
4851 | comparison = unsigned_condition (comparison); |
4852 | |
4853 | prepare_cmp_insn (x: op0, y: op1, comparison, size, unsignedp, methods: OPTAB_LIB_WIDEN, |
4854 | ptest: &test, pmode: &mode); |
4855 | |
4856 | /* Check if we're comparing a truth type with 0, and if so check if |
4857 | the target supports tbranch. */ |
4858 | machine_mode tmode = mode; |
4859 | direct_optab optab; |
4860 | if (op1 == CONST0_RTX (GET_MODE (op1)) |
4861 | && validate_test_and_branch (val, ptest: &test, pmode: &tmode, |
4862 | res: &optab) != CODE_FOR_nothing) |
4863 | { |
4864 | emit_cmp_and_jump_insn_1 (test, mode: tmode, label, cmp_optab: optab, prob, test_branch: true); |
4865 | return; |
4866 | } |
4867 | |
4868 | emit_cmp_and_jump_insn_1 (test, mode, label, cmp_optab: cbranch_optab, prob, test_branch: false); |
4869 | } |
4870 | |
4871 | /* Overloaded version of emit_cmp_and_jump_insns in which VAL is unknown. */ |
4872 | |
4873 | void |
4874 | emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size, |
4875 | machine_mode mode, int unsignedp, rtx label, |
4876 | profile_probability prob) |
4877 | { |
4878 | emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, NULL, |
4879 | label, prob); |
4880 | } |
4881 | |
4882 | |
4883 | /* Emit a library call comparison between floating point X and Y. |
4884 | COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */ |
4885 | |
4886 | static void |
4887 | prepare_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison, |
4888 | rtx *ptest, machine_mode *pmode) |
4889 | { |
4890 | enum rtx_code swapped = swap_condition (comparison); |
4891 | enum rtx_code reversed = reverse_condition_maybe_unordered (comparison); |
4892 | machine_mode orig_mode = GET_MODE (x); |
4893 | machine_mode mode; |
4894 | rtx true_rtx, false_rtx; |
4895 | rtx value, target, equiv; |
4896 | rtx_insn *insns; |
4897 | rtx libfunc = 0; |
4898 | bool reversed_p = false; |
4899 | scalar_int_mode cmp_mode = targetm.libgcc_cmp_return_mode (); |
4900 | |
4901 | FOR_EACH_WIDER_MODE_FROM (mode, orig_mode) |
4902 | { |
4903 | if (code_to_optab (code: comparison) |
4904 | && (libfunc = optab_libfunc (code_to_optab (code: comparison), mode))) |
4905 | break; |
4906 | |
4907 | if (code_to_optab (code: swapped) |
4908 | && (libfunc = optab_libfunc (code_to_optab (code: swapped), mode))) |
4909 | { |
4910 | std::swap (a&: x, b&: y); |
4911 | comparison = swapped; |
4912 | break; |
4913 | } |
4914 | |
4915 | if (code_to_optab (code: reversed) |
4916 | && (libfunc = optab_libfunc (code_to_optab (code: reversed), mode))) |
4917 | { |
4918 | comparison = reversed; |
4919 | reversed_p = true; |
4920 | break; |
4921 | } |
4922 | } |
4923 | |
4924 | gcc_assert (mode != VOIDmode); |
4925 | |
4926 | if (mode != orig_mode) |
4927 | { |
4928 | x = convert_to_mode (mode, x, 0); |
4929 | y = convert_to_mode (mode, y, 0); |
4930 | } |
4931 | |
4932 | /* Attach a REG_EQUAL note describing the semantics of the libcall to |
4933 | the RTL. The allows the RTL optimizers to delete the libcall if the |
4934 | condition can be determined at compile-time. */ |
4935 | if (comparison == UNORDERED |
4936 | || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)) |
4937 | { |
4938 | true_rtx = const_true_rtx; |
4939 | false_rtx = const0_rtx; |
4940 | } |
4941 | else |
4942 | { |
4943 | switch (comparison) |
4944 | { |
4945 | case EQ: |
4946 | true_rtx = const0_rtx; |
4947 | false_rtx = const_true_rtx; |
4948 | break; |
4949 | |
4950 | case NE: |
4951 | true_rtx = const_true_rtx; |
4952 | false_rtx = const0_rtx; |
4953 | break; |
4954 | |
4955 | case GT: |
4956 | true_rtx = const1_rtx; |
4957 | false_rtx = const0_rtx; |
4958 | break; |
4959 | |
4960 | case GE: |
4961 | true_rtx = const0_rtx; |
4962 | false_rtx = constm1_rtx; |
4963 | break; |
4964 | |
4965 | case LT: |
4966 | true_rtx = constm1_rtx; |
4967 | false_rtx = const0_rtx; |
4968 | break; |
4969 | |
4970 | case LE: |
4971 | true_rtx = const0_rtx; |
4972 | false_rtx = const1_rtx; |
4973 | break; |
4974 | |
4975 | default: |
4976 | gcc_unreachable (); |
4977 | } |
4978 | } |
4979 | |
4980 | if (comparison == UNORDERED) |
4981 | { |
4982 | rtx temp = simplify_gen_relational (code: NE, mode: cmp_mode, op_mode: mode, op0: x, op1: x); |
4983 | equiv = simplify_gen_relational (code: NE, mode: cmp_mode, op_mode: mode, op0: y, op1: y); |
4984 | equiv = simplify_gen_ternary (code: IF_THEN_ELSE, mode: cmp_mode, op0_mode: cmp_mode, |
4985 | op0: temp, op1: const_true_rtx, op2: equiv); |
4986 | } |
4987 | else |
4988 | { |
4989 | equiv = simplify_gen_relational (code: comparison, mode: cmp_mode, op_mode: mode, op0: x, op1: y); |
4990 | if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)) |
4991 | equiv = simplify_gen_ternary (code: IF_THEN_ELSE, mode: cmp_mode, op0_mode: cmp_mode, |
4992 | op0: equiv, op1: true_rtx, op2: false_rtx); |
4993 | } |
4994 | |
4995 | start_sequence (); |
4996 | value = emit_library_call_value (fun: libfunc, NULL_RTX, fn_type: LCT_CONST, |
4997 | outmode: cmp_mode, arg1: x, arg1_mode: mode, arg2: y, arg2_mode: mode); |
4998 | insns = get_insns (); |
4999 | end_sequence (); |
5000 | |
5001 | target = gen_reg_rtx (cmp_mode); |
5002 | emit_libcall_block (insns, target, result: value, equiv); |
5003 | |
5004 | if (comparison == UNORDERED |
5005 | || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison) |
5006 | || reversed_p) |
5007 | *ptest = gen_rtx_fmt_ee (reversed_p ? EQ : NE, VOIDmode, target, false_rtx); |
5008 | else |
5009 | *ptest = gen_rtx_fmt_ee (comparison, VOIDmode, target, const0_rtx); |
5010 | |
5011 | *pmode = cmp_mode; |
5012 | } |
5013 | |
5014 | /* Generate code to indirectly jump to a location given in the rtx LOC. */ |
5015 | |
5016 | void |
5017 | emit_indirect_jump (rtx loc) |
5018 | { |
5019 | if (!targetm.have_indirect_jump ()) |
5020 | sorry ("indirect jumps are not available on this target" ); |
5021 | else |
5022 | { |
5023 | class expand_operand ops[1]; |
5024 | create_address_operand (op: &ops[0], value: loc); |
5025 | expand_jump_insn (icode: targetm.code_for_indirect_jump, nops: 1, ops); |
5026 | emit_barrier (); |
5027 | } |
5028 | } |
5029 | |
5030 | |
5031 | /* Emit a conditional move instruction if the machine supports one for that |
5032 | condition and machine mode. |
5033 | |
5034 | OP0 and OP1 are the operands that should be compared using CODE. CMODE is |
5035 | the mode to use should they be constants. If it is VOIDmode, they cannot |
5036 | both be constants. |
5037 | |
5038 | OP2 should be stored in TARGET if the comparison is true, otherwise OP3 |
5039 | should be stored there. MODE is the mode to use should they be constants. |
5040 | If it is VOIDmode, they cannot both be constants. |
5041 | |
5042 | The result is either TARGET (perhaps modified) or NULL_RTX if the operation |
5043 | is not supported. */ |
5044 | |
5045 | rtx |
5046 | emit_conditional_move (rtx target, struct rtx_comparison comp, |
5047 | rtx op2, rtx op3, |
5048 | machine_mode mode, int unsignedp) |
5049 | { |
5050 | rtx comparison; |
5051 | rtx_insn *last; |
5052 | enum insn_code icode; |
5053 | enum rtx_code reversed; |
5054 | |
5055 | /* If the two source operands are identical, that's just a move. */ |
5056 | |
5057 | if (rtx_equal_p (op2, op3)) |
5058 | { |
5059 | if (!target) |
5060 | target = gen_reg_rtx (mode); |
5061 | |
5062 | emit_move_insn (target, op3); |
5063 | return target; |
5064 | } |
5065 | |
5066 | /* If one operand is constant, make it the second one. Only do this |
5067 | if the other operand is not constant as well. */ |
5068 | |
5069 | if (swap_commutative_operands_p (comp.op0, comp.op1)) |
5070 | { |
5071 | std::swap (a&: comp.op0, b&: comp.op1); |
5072 | comp.code = swap_condition (comp.code); |
5073 | } |
5074 | |
5075 | /* get_condition will prefer to generate LT and GT even if the old |
5076 | comparison was against zero, so undo that canonicalization here since |
5077 | comparisons against zero are cheaper. */ |
5078 | |
5079 | if (comp.code == LT && comp.op1 == const1_rtx) |
5080 | comp.code = LE, comp.op1 = const0_rtx; |
5081 | else if (comp.code == GT && comp.op1 == constm1_rtx) |
5082 | comp.code = GE, comp.op1 = const0_rtx; |
5083 | |
5084 | if (comp.mode == VOIDmode) |
5085 | comp.mode = GET_MODE (comp.op0); |
5086 | |
5087 | enum rtx_code orig_code = comp.code; |
5088 | bool swapped = false; |
5089 | if (swap_commutative_operands_p (op2, op3) |
5090 | && ((reversed = |
5091 | reversed_comparison_code_parts (comp.code, comp.op0, comp.op1, NULL)) |
5092 | != UNKNOWN)) |
5093 | { |
5094 | std::swap (a&: op2, b&: op3); |
5095 | comp.code = reversed; |
5096 | swapped = true; |
5097 | } |
5098 | |
5099 | if (mode == VOIDmode) |
5100 | mode = GET_MODE (op2); |
5101 | |
5102 | icode = direct_optab_handler (op: movcc_optab, mode); |
5103 | |
5104 | if (icode == CODE_FOR_nothing) |
5105 | return NULL_RTX; |
5106 | |
5107 | if (!target) |
5108 | target = gen_reg_rtx (mode); |
5109 | |
5110 | for (int pass = 0; ; pass++) |
5111 | { |
5112 | comp.code = unsignedp ? unsigned_condition (comp.code) : comp.code; |
5113 | comparison = |
5114 | simplify_gen_relational (code: comp.code, VOIDmode, |
5115 | op_mode: comp.mode, op0: comp.op0, op1: comp.op1); |
5116 | |
5117 | /* We can get const0_rtx or const_true_rtx in some circumstances. Just |
5118 | punt and let the caller figure out how best to deal with this |
5119 | situation. */ |
5120 | if (COMPARISON_P (comparison)) |
5121 | { |
5122 | saved_pending_stack_adjust save; |
5123 | save_pending_stack_adjust (&save); |
5124 | last = get_last_insn (); |
5125 | do_pending_stack_adjust (); |
5126 | machine_mode cmpmode = comp.mode; |
5127 | rtx orig_op0 = XEXP (comparison, 0); |
5128 | rtx orig_op1 = XEXP (comparison, 1); |
5129 | rtx op2p = op2; |
5130 | rtx op3p = op3; |
5131 | /* If we are optimizing, force expensive constants into a register |
5132 | but preserve an eventual equality with op2/op3. */ |
5133 | if (CONSTANT_P (orig_op0) && optimize |
5134 | && (rtx_cost (orig_op0, mode, COMPARE, 0, |
5135 | optimize_insn_for_speed_p ()) |
5136 | > COSTS_N_INSNS (1)) |
5137 | && can_create_pseudo_p ()) |
5138 | { |
5139 | if (rtx_equal_p (orig_op0, op2)) |
5140 | op2p = XEXP (comparison, 0) = force_reg (cmpmode, orig_op0); |
5141 | else if (rtx_equal_p (orig_op0, op3)) |
5142 | op3p = XEXP (comparison, 0) = force_reg (cmpmode, orig_op0); |
5143 | } |
5144 | if (CONSTANT_P (orig_op1) && optimize |
5145 | && (rtx_cost (orig_op1, mode, COMPARE, 0, |
5146 | optimize_insn_for_speed_p ()) |
5147 | > COSTS_N_INSNS (1)) |
5148 | && can_create_pseudo_p ()) |
5149 | { |
5150 | if (rtx_equal_p (orig_op1, op2)) |
5151 | op2p = XEXP (comparison, 1) = force_reg (cmpmode, orig_op1); |
5152 | else if (rtx_equal_p (orig_op1, op3)) |
5153 | op3p = XEXP (comparison, 1) = force_reg (cmpmode, orig_op1); |
5154 | } |
5155 | prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1), |
5156 | GET_CODE (comparison), NULL_RTX, unsignedp, |
5157 | methods: OPTAB_WIDEN, ptest: &comparison, pmode: &cmpmode); |
5158 | if (comparison) |
5159 | { |
5160 | rtx res = emit_conditional_move_1 (target, comparison, |
5161 | op2p, op3p, mode); |
5162 | if (res != NULL_RTX) |
5163 | return res; |
5164 | } |
5165 | delete_insns_since (last); |
5166 | restore_pending_stack_adjust (&save); |
5167 | } |
5168 | |
5169 | if (pass == 1) |
5170 | return NULL_RTX; |
5171 | |
5172 | /* If the preferred op2/op3 order is not usable, retry with other |
5173 | operand order, perhaps it will expand successfully. */ |
5174 | if (swapped) |
5175 | comp.code = orig_code; |
5176 | else if ((reversed = |
5177 | reversed_comparison_code_parts (orig_code, comp.op0, comp.op1, |
5178 | NULL)) |
5179 | != UNKNOWN) |
5180 | comp.code = reversed; |
5181 | else |
5182 | return NULL_RTX; |
5183 | std::swap (a&: op2, b&: op3); |
5184 | } |
5185 | } |
5186 | |
5187 | /* Helper function that, in addition to COMPARISON, also tries |
5188 | the reversed REV_COMPARISON with swapped OP2 and OP3. As opposed |
5189 | to when we pass the specific constituents of a comparison, no |
5190 | additional insns are emitted for it. It might still be necessary |
5191 | to emit more than one insn for the final conditional move, though. */ |
5192 | |
5193 | rtx |
5194 | emit_conditional_move (rtx target, rtx comparison, rtx rev_comparison, |
5195 | rtx op2, rtx op3, machine_mode mode) |
5196 | { |
5197 | rtx res = emit_conditional_move_1 (target, comparison, op2, op3, mode); |
5198 | |
5199 | if (res != NULL_RTX) |
5200 | return res; |
5201 | |
5202 | return emit_conditional_move_1 (target, rev_comparison, op3, op2, mode); |
5203 | } |
5204 | |
5205 | /* Helper for emitting a conditional move. */ |
5206 | |
5207 | static rtx |
5208 | emit_conditional_move_1 (rtx target, rtx comparison, |
5209 | rtx op2, rtx op3, machine_mode mode) |
5210 | { |
5211 | enum insn_code icode; |
5212 | |
5213 | if (comparison == NULL_RTX || !COMPARISON_P (comparison)) |
5214 | return NULL_RTX; |
5215 | |
5216 | /* If the two source operands are identical, that's just a move. |
5217 | As the comparison comes in non-canonicalized, we must make |
5218 | sure not to discard any possible side effects. If there are |
5219 | side effects, just let the target handle it. */ |
5220 | if (!side_effects_p (comparison) && rtx_equal_p (op2, op3)) |
5221 | { |
5222 | if (!target) |
5223 | target = gen_reg_rtx (mode); |
5224 | |
5225 | emit_move_insn (target, op3); |
5226 | return target; |
5227 | } |
5228 | |
5229 | if (mode == VOIDmode) |
5230 | mode = GET_MODE (op2); |
5231 | |
5232 | icode = direct_optab_handler (op: movcc_optab, mode); |
5233 | |
5234 | if (icode == CODE_FOR_nothing) |
5235 | return NULL_RTX; |
5236 | |
5237 | if (!target) |
5238 | target = gen_reg_rtx (mode); |
5239 | |
5240 | class expand_operand ops[4]; |
5241 | |
5242 | create_output_operand (op: &ops[0], x: target, mode); |
5243 | create_fixed_operand (op: &ops[1], x: comparison); |
5244 | create_input_operand (op: &ops[2], value: op2, mode); |
5245 | create_input_operand (op: &ops[3], value: op3, mode); |
5246 | |
5247 | if (maybe_expand_insn (icode, nops: 4, ops)) |
5248 | { |
5249 | if (ops[0].value != target) |
5250 | convert_move (target, ops[0].value, false); |
5251 | return target; |
5252 | } |
5253 | |
5254 | return NULL_RTX; |
5255 | } |
5256 | |
5257 | |
5258 | /* Emit a conditional negate or bitwise complement using the |
5259 | negcc or notcc optabs if available. Return NULL_RTX if such operations |
5260 | are not available. Otherwise return the RTX holding the result. |
5261 | TARGET is the desired destination of the result. COMP is the comparison |
5262 | on which to negate. If COND is true move into TARGET the negation |
5263 | or bitwise complement of OP1. Otherwise move OP2 into TARGET. |
5264 | CODE is either NEG or NOT. MODE is the machine mode in which the |
5265 | operation is performed. */ |
5266 | |
5267 | rtx |
5268 | emit_conditional_neg_or_complement (rtx target, rtx_code code, |
5269 | machine_mode mode, rtx cond, rtx op1, |
5270 | rtx op2) |
5271 | { |
5272 | optab op = unknown_optab; |
5273 | if (code == NEG) |
5274 | op = negcc_optab; |
5275 | else if (code == NOT) |
5276 | op = notcc_optab; |
5277 | else |
5278 | gcc_unreachable (); |
5279 | |
5280 | insn_code icode = direct_optab_handler (op, mode); |
5281 | |
5282 | if (icode == CODE_FOR_nothing) |
5283 | return NULL_RTX; |
5284 | |
5285 | if (!target) |
5286 | target = gen_reg_rtx (mode); |
5287 | |
5288 | rtx_insn *last = get_last_insn (); |
5289 | class expand_operand ops[4]; |
5290 | |
5291 | create_output_operand (op: &ops[0], x: target, mode); |
5292 | create_fixed_operand (op: &ops[1], x: cond); |
5293 | create_input_operand (op: &ops[2], value: op1, mode); |
5294 | create_input_operand (op: &ops[3], value: op2, mode); |
5295 | |
5296 | if (maybe_expand_insn (icode, nops: 4, ops)) |
5297 | { |
5298 | if (ops[0].value != target) |
5299 | convert_move (target, ops[0].value, false); |
5300 | |
5301 | return target; |
5302 | } |
5303 | delete_insns_since (last); |
5304 | return NULL_RTX; |
5305 | } |
5306 | |
5307 | /* Emit a conditional addition instruction if the machine supports one for that |
5308 | condition and machine mode. |
5309 | |
5310 | OP0 and OP1 are the operands that should be compared using CODE. CMODE is |
5311 | the mode to use should they be constants. If it is VOIDmode, they cannot |
5312 | both be constants. |
5313 | |
5314 | OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3 |
5315 | should be stored there. MODE is the mode to use should they be constants. |
5316 | If it is VOIDmode, they cannot both be constants. |
5317 | |
5318 | The result is either TARGET (perhaps modified) or NULL_RTX if the operation |
5319 | is not supported. */ |
5320 | |
5321 | rtx |
5322 | emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1, |
5323 | machine_mode cmode, rtx op2, rtx op3, |
5324 | machine_mode mode, int unsignedp) |
5325 | { |
5326 | rtx comparison; |
5327 | rtx_insn *last; |
5328 | enum insn_code icode; |
5329 | |
5330 | /* If one operand is constant, make it the second one. Only do this |
5331 | if the other operand is not constant as well. */ |
5332 | |
5333 | if (swap_commutative_operands_p (op0, op1)) |
5334 | { |
5335 | std::swap (a&: op0, b&: op1); |
5336 | code = swap_condition (code); |
5337 | } |
5338 | |
5339 | /* get_condition will prefer to generate LT and GT even if the old |
5340 | comparison was against zero, so undo that canonicalization here since |
5341 | comparisons against zero are cheaper. */ |
5342 | if (code == LT && op1 == const1_rtx) |
5343 | code = LE, op1 = const0_rtx; |
5344 | else if (code == GT && op1 == constm1_rtx) |
5345 | code = GE, op1 = const0_rtx; |
5346 | |
5347 | if (cmode == VOIDmode) |
5348 | cmode = GET_MODE (op0); |
5349 | |
5350 | if (mode == VOIDmode) |
5351 | mode = GET_MODE (op2); |
5352 | |
5353 | icode = optab_handler (op: addcc_optab, mode); |
5354 | |
5355 | if (icode == CODE_FOR_nothing) |
5356 | return 0; |
5357 | |
5358 | if (!target) |
5359 | target = gen_reg_rtx (mode); |
5360 | |
5361 | code = unsignedp ? unsigned_condition (code) : code; |
5362 | comparison = simplify_gen_relational (code, VOIDmode, op_mode: cmode, op0, op1); |
5363 | |
5364 | /* We can get const0_rtx or const_true_rtx in some circumstances. Just |
5365 | return NULL and let the caller figure out how best to deal with this |
5366 | situation. */ |
5367 | if (!COMPARISON_P (comparison)) |
5368 | return NULL_RTX; |
5369 | |
5370 | do_pending_stack_adjust (); |
5371 | last = get_last_insn (); |
5372 | prepare_cmp_insn (XEXP (comparison, 0), XEXP (comparison, 1), |
5373 | GET_CODE (comparison), NULL_RTX, unsignedp, methods: OPTAB_WIDEN, |
5374 | ptest: &comparison, pmode: &cmode); |
5375 | if (comparison) |
5376 | { |
5377 | class expand_operand ops[4]; |
5378 | |
5379 | create_output_operand (op: &ops[0], x: target, mode); |
5380 | create_fixed_operand (op: &ops[1], x: comparison); |
5381 | create_input_operand (op: &ops[2], value: op2, mode); |
5382 | create_input_operand (op: &ops[3], value: op3, mode); |
5383 | if (maybe_expand_insn (icode, nops: 4, ops)) |
5384 | { |
5385 | if (ops[0].value != target) |
5386 | convert_move (target, ops[0].value, false); |
5387 | return target; |
5388 | } |
5389 | } |
5390 | delete_insns_since (last); |
5391 | return NULL_RTX; |
5392 | } |
5393 | |
5394 | /* These functions attempt to generate an insn body, rather than |
5395 | emitting the insn, but if the gen function already emits them, we |
5396 | make no attempt to turn them back into naked patterns. */ |
5397 | |
5398 | /* Generate and return an insn body to add Y to X. */ |
5399 | |
5400 | rtx_insn * |
5401 | gen_add2_insn (rtx x, rtx y) |
5402 | { |
5403 | enum insn_code icode = optab_handler (op: add_optab, GET_MODE (x)); |
5404 | |
5405 | gcc_assert (insn_operand_matches (icode, 0, x)); |
5406 | gcc_assert (insn_operand_matches (icode, 1, x)); |
5407 | gcc_assert (insn_operand_matches (icode, 2, y)); |
5408 | |
5409 | return GEN_FCN (icode) (x, x, y); |
5410 | } |
5411 | |
5412 | /* Generate and return an insn body to add r1 and c, |
5413 | storing the result in r0. */ |
5414 | |
5415 | rtx_insn * |
5416 | gen_add3_insn (rtx r0, rtx r1, rtx c) |
5417 | { |
5418 | enum insn_code icode = optab_handler (op: add_optab, GET_MODE (r0)); |
5419 | |
5420 | if (icode == CODE_FOR_nothing |
5421 | || !insn_operand_matches (icode, opno: 0, operand: r0) |
5422 | || !insn_operand_matches (icode, opno: 1, operand: r1) |
5423 | || !insn_operand_matches (icode, opno: 2, operand: c)) |
5424 | return NULL; |
5425 | |
5426 | return GEN_FCN (icode) (r0, r1, c); |
5427 | } |
5428 | |
5429 | bool |
5430 | have_add2_insn (rtx x, rtx y) |
5431 | { |
5432 | enum insn_code icode; |
5433 | |
5434 | gcc_assert (GET_MODE (x) != VOIDmode); |
5435 | |
5436 | icode = optab_handler (op: add_optab, GET_MODE (x)); |
5437 | |
5438 | if (icode == CODE_FOR_nothing) |
5439 | return false; |
5440 | |
5441 | if (!insn_operand_matches (icode, opno: 0, operand: x) |
5442 | || !insn_operand_matches (icode, opno: 1, operand: x) |
5443 | || !insn_operand_matches (icode, opno: 2, operand: y)) |
5444 | return false; |
5445 | |
5446 | return true; |
5447 | } |
5448 | |
5449 | /* Generate and return an insn body to add Y to X. */ |
5450 | |
5451 | rtx_insn * |
5452 | gen_addptr3_insn (rtx x, rtx y, rtx z) |
5453 | { |
5454 | enum insn_code icode = optab_handler (op: addptr3_optab, GET_MODE (x)); |
5455 | |
5456 | gcc_assert (insn_operand_matches (icode, 0, x)); |
5457 | gcc_assert (insn_operand_matches (icode, 1, y)); |
5458 | gcc_assert (insn_operand_matches (icode, 2, z)); |
5459 | |
5460 | return GEN_FCN (icode) (x, y, z); |
5461 | } |
5462 | |
5463 | /* Return true if the target implements an addptr pattern and X, Y, |
5464 | and Z are valid for the pattern predicates. */ |
5465 | |
5466 | bool |
5467 | have_addptr3_insn (rtx x, rtx y, rtx z) |
5468 | { |
5469 | enum insn_code icode; |
5470 | |
5471 | gcc_assert (GET_MODE (x) != VOIDmode); |
5472 | |
5473 | icode = optab_handler (op: addptr3_optab, GET_MODE (x)); |
5474 | |
5475 | if (icode == CODE_FOR_nothing) |
5476 | return false; |
5477 | |
5478 | if (!insn_operand_matches (icode, opno: 0, operand: x) |
5479 | || !insn_operand_matches (icode, opno: 1, operand: y) |
5480 | || !insn_operand_matches (icode, opno: 2, operand: z)) |
5481 | return false; |
5482 | |
5483 | return true; |
5484 | } |
5485 | |
5486 | /* Generate and return an insn body to subtract Y from X. */ |
5487 | |
5488 | rtx_insn * |
5489 | gen_sub2_insn (rtx x, rtx y) |
5490 | { |
5491 | enum insn_code icode = optab_handler (op: sub_optab, GET_MODE (x)); |
5492 | |
5493 | gcc_assert (insn_operand_matches (icode, 0, x)); |
5494 | gcc_assert (insn_operand_matches (icode, 1, x)); |
5495 | gcc_assert (insn_operand_matches (icode, 2, y)); |
5496 | |
5497 | return GEN_FCN (icode) (x, x, y); |
5498 | } |
5499 | |
5500 | /* Generate and return an insn body to subtract r1 and c, |
5501 | storing the result in r0. */ |
5502 | |
5503 | rtx_insn * |
5504 | gen_sub3_insn (rtx r0, rtx r1, rtx c) |
5505 | { |
5506 | enum insn_code icode = optab_handler (op: sub_optab, GET_MODE (r0)); |
5507 | |
5508 | if (icode == CODE_FOR_nothing |
5509 | || !insn_operand_matches (icode, opno: 0, operand: r0) |
5510 | || !insn_operand_matches (icode, opno: 1, operand: r1) |
5511 | || !insn_operand_matches (icode, opno: 2, operand: c)) |
5512 | return NULL; |
5513 | |
5514 | return GEN_FCN (icode) (r0, r1, c); |
5515 | } |
5516 | |
5517 | bool |
5518 | have_sub2_insn (rtx x, rtx y) |
5519 | { |
5520 | enum insn_code icode; |
5521 | |
5522 | gcc_assert (GET_MODE (x) != VOIDmode); |
5523 | |
5524 | icode = optab_handler (op: sub_optab, GET_MODE (x)); |
5525 | |
5526 | if (icode == CODE_FOR_nothing) |
5527 | return false; |
5528 | |
5529 | if (!insn_operand_matches (icode, opno: 0, operand: x) |
5530 | || !insn_operand_matches (icode, opno: 1, operand: x) |
5531 | || !insn_operand_matches (icode, opno: 2, operand: y)) |
5532 | return false; |
5533 | |
5534 | return true; |
5535 | } |
5536 | |
5537 | /* Generate the body of an insn to extend Y (with mode MFROM) |
5538 | into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */ |
5539 | |
5540 | rtx_insn * |
5541 | gen_extend_insn (rtx x, rtx y, machine_mode mto, |
5542 | machine_mode mfrom, int unsignedp) |
5543 | { |
5544 | enum insn_code icode = can_extend_p (mto, mfrom, unsignedp); |
5545 | return GEN_FCN (icode) (x, y); |
5546 | } |
5547 | |
5548 | /* Generate code to convert FROM to floating point |
5549 | and store in TO. FROM must be fixed point and not VOIDmode. |
5550 | UNSIGNEDP nonzero means regard FROM as unsigned. |
5551 | Normally this is done by correcting the final value |
5552 | if it is negative. */ |
5553 | |
5554 | void |
5555 | expand_float (rtx to, rtx from, int unsignedp) |
5556 | { |
5557 | enum insn_code icode; |
5558 | rtx target = to; |
5559 | scalar_mode from_mode, to_mode; |
5560 | machine_mode fmode, imode; |
5561 | bool can_do_signed = false; |
5562 | |
5563 | /* Crash now, because we won't be able to decide which mode to use. */ |
5564 | gcc_assert (GET_MODE (from) != VOIDmode); |
5565 | |
5566 | /* Look for an insn to do the conversion. Do it in the specified |
5567 | modes if possible; otherwise convert either input, output or both to |
5568 | wider mode. If the integer mode is wider than the mode of FROM, |
5569 | we can do the conversion signed even if the input is unsigned. */ |
5570 | |
5571 | FOR_EACH_MODE_FROM (fmode, GET_MODE (to)) |
5572 | FOR_EACH_MODE_FROM (imode, GET_MODE (from)) |
5573 | { |
5574 | int doing_unsigned = unsignedp; |
5575 | |
5576 | if (fmode != GET_MODE (to) |
5577 | && (significand_size (fmode) |
5578 | < GET_MODE_UNIT_PRECISION (GET_MODE (from)))) |
5579 | continue; |
5580 | |
5581 | icode = can_float_p (fmode, imode, unsignedp); |
5582 | if (icode == CODE_FOR_nothing && unsignedp) |
5583 | { |
5584 | enum insn_code scode = can_float_p (fmode, imode, 0); |
5585 | if (scode != CODE_FOR_nothing) |
5586 | can_do_signed = true; |
5587 | if (imode != GET_MODE (from)) |
5588 | icode = scode, doing_unsigned = 0; |
5589 | } |
5590 | |
5591 | if (icode != CODE_FOR_nothing) |
5592 | { |
5593 | if (imode != GET_MODE (from)) |
5594 | from = convert_to_mode (imode, from, unsignedp); |
5595 | |
5596 | if (fmode != GET_MODE (to)) |
5597 | target = gen_reg_rtx (fmode); |
5598 | |
5599 | emit_unop_insn (icode, target, op0: from, |
5600 | code: doing_unsigned ? UNSIGNED_FLOAT : FLOAT); |
5601 | |
5602 | if (target != to) |
5603 | convert_move (to, target, 0); |
5604 | return; |
5605 | } |
5606 | } |
5607 | |
5608 | /* Unsigned integer, and no way to convert directly. Convert as signed, |
5609 | then unconditionally adjust the result. */ |
5610 | if (unsignedp |
5611 | && can_do_signed |
5612 | && is_a <scalar_mode> (GET_MODE (to), result: &to_mode) |
5613 | && is_a <scalar_mode> (GET_MODE (from), result: &from_mode)) |
5614 | { |
5615 | opt_scalar_mode fmode_iter; |
5616 | rtx_code_label *label = gen_label_rtx (); |
5617 | rtx temp; |
5618 | REAL_VALUE_TYPE offset; |
5619 | |
5620 | /* Look for a usable floating mode FMODE wider than the source and at |
5621 | least as wide as the target. Using FMODE will avoid rounding woes |
5622 | with unsigned values greater than the signed maximum value. */ |
5623 | |
5624 | FOR_EACH_MODE_FROM (fmode_iter, to_mode) |
5625 | { |
5626 | scalar_mode fmode = fmode_iter.require (); |
5627 | if (GET_MODE_PRECISION (mode: from_mode) < GET_MODE_BITSIZE (mode: fmode) |
5628 | && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing) |
5629 | break; |
5630 | } |
5631 | |
5632 | if (!fmode_iter.exists (mode: &fmode)) |
5633 | { |
5634 | /* There is no such mode. Pretend the target is wide enough. */ |
5635 | fmode = to_mode; |
5636 | |
5637 | /* Avoid double-rounding when TO is narrower than FROM. */ |
5638 | if ((significand_size (fmode) + 1) |
5639 | < GET_MODE_PRECISION (mode: from_mode)) |
5640 | { |
5641 | rtx temp1; |
5642 | rtx_code_label *neglabel = gen_label_rtx (); |
5643 | |
5644 | /* Don't use TARGET if it isn't a register, is a hard register, |
5645 | or is the wrong mode. */ |
5646 | if (!REG_P (target) |
5647 | || REGNO (target) < FIRST_PSEUDO_REGISTER |
5648 | || GET_MODE (target) != fmode) |
5649 | target = gen_reg_rtx (fmode); |
5650 | |
5651 | imode = from_mode; |
5652 | do_pending_stack_adjust (); |
5653 | |
5654 | /* Test whether the sign bit is set. */ |
5655 | emit_cmp_and_jump_insns (x: from, const0_rtx, comparison: LT, NULL_RTX, mode: imode, |
5656 | unsignedp: 0, label: neglabel); |
5657 | |
5658 | /* The sign bit is not set. Convert as signed. */ |
5659 | expand_float (to: target, from, unsignedp: 0); |
5660 | emit_jump_insn (targetm.gen_jump (label)); |
5661 | emit_barrier (); |
5662 | |
5663 | /* The sign bit is set. |
5664 | Convert to a usable (positive signed) value by shifting right |
5665 | one bit, while remembering if a nonzero bit was shifted |
5666 | out; i.e., compute (from & 1) | (from >> 1). */ |
5667 | |
5668 | emit_label (neglabel); |
5669 | temp = expand_binop (mode: imode, binoptab: and_optab, op0: from, const1_rtx, |
5670 | NULL_RTX, unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
5671 | temp1 = expand_shift (RSHIFT_EXPR, imode, from, 1, NULL_RTX, 1); |
5672 | temp = expand_binop (mode: imode, binoptab: ior_optab, op0: temp, op1: temp1, target: temp, unsignedp: 1, |
5673 | methods: OPTAB_LIB_WIDEN); |
5674 | expand_float (to: target, from: temp, unsignedp: 0); |
5675 | |
5676 | /* Multiply by 2 to undo the shift above. */ |
5677 | temp = expand_binop (mode: fmode, binoptab: add_optab, op0: target, op1: target, |
5678 | target, unsignedp: 0, methods: OPTAB_LIB_WIDEN); |
5679 | if (temp != target) |
5680 | emit_move_insn (target, temp); |
5681 | |
5682 | do_pending_stack_adjust (); |
5683 | emit_label (label); |
5684 | goto done; |
5685 | } |
5686 | } |
5687 | |
5688 | /* If we are about to do some arithmetic to correct for an |
5689 | unsigned operand, do it in a pseudo-register. */ |
5690 | |
5691 | if (to_mode != fmode |
5692 | || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER) |
5693 | target = gen_reg_rtx (fmode); |
5694 | |
5695 | /* Convert as signed integer to floating. */ |
5696 | expand_float (to: target, from, unsignedp: 0); |
5697 | |
5698 | /* If FROM is negative (and therefore TO is negative), |
5699 | correct its value by 2**bitwidth. */ |
5700 | |
5701 | do_pending_stack_adjust (); |
5702 | emit_cmp_and_jump_insns (x: from, const0_rtx, comparison: GE, NULL_RTX, mode: from_mode, |
5703 | unsignedp: 0, label); |
5704 | |
5705 | |
5706 | real_2expN (&offset, GET_MODE_PRECISION (mode: from_mode), fmode); |
5707 | temp = expand_binop (mode: fmode, binoptab: add_optab, op0: target, |
5708 | op1: const_double_from_real_value (offset, fmode), |
5709 | target, unsignedp: 0, methods: OPTAB_LIB_WIDEN); |
5710 | if (temp != target) |
5711 | emit_move_insn (target, temp); |
5712 | |
5713 | do_pending_stack_adjust (); |
5714 | emit_label (label); |
5715 | goto done; |
5716 | } |
5717 | |
5718 | /* No hardware instruction available; call a library routine. */ |
5719 | { |
5720 | rtx libfunc; |
5721 | rtx_insn *insns; |
5722 | rtx value; |
5723 | convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab; |
5724 | |
5725 | if (is_narrower_int_mode (GET_MODE (from), SImode)) |
5726 | from = convert_to_mode (SImode, from, unsignedp); |
5727 | |
5728 | libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from)); |
5729 | gcc_assert (libfunc); |
5730 | |
5731 | start_sequence (); |
5732 | |
5733 | value = emit_library_call_value (fun: libfunc, NULL_RTX, fn_type: LCT_CONST, |
5734 | GET_MODE (to), arg1: from, GET_MODE (from)); |
5735 | insns = get_insns (); |
5736 | end_sequence (); |
5737 | |
5738 | emit_libcall_block (insns, target, result: value, |
5739 | gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT, |
5740 | GET_MODE (to), from)); |
5741 | } |
5742 | |
5743 | done: |
5744 | |
5745 | /* Copy result to requested destination |
5746 | if we have been computing in a temp location. */ |
5747 | |
5748 | if (target != to) |
5749 | { |
5750 | if (GET_MODE (target) == GET_MODE (to)) |
5751 | emit_move_insn (to, target); |
5752 | else |
5753 | convert_move (to, target, 0); |
5754 | } |
5755 | } |
5756 | |
5757 | /* Generate code to convert FROM to fixed point and store in TO. FROM |
5758 | must be floating point. */ |
5759 | |
5760 | void |
5761 | expand_fix (rtx to, rtx from, int unsignedp) |
5762 | { |
5763 | enum insn_code icode; |
5764 | rtx target = to; |
5765 | machine_mode fmode, imode; |
5766 | opt_scalar_mode fmode_iter; |
5767 | bool must_trunc = false; |
5768 | |
5769 | /* We first try to find a pair of modes, one real and one integer, at |
5770 | least as wide as FROM and TO, respectively, in which we can open-code |
5771 | this conversion. If the integer mode is wider than the mode of TO, |
5772 | we can do the conversion either signed or unsigned. */ |
5773 | |
5774 | FOR_EACH_MODE_FROM (fmode, GET_MODE (from)) |
5775 | FOR_EACH_MODE_FROM (imode, GET_MODE (to)) |
5776 | { |
5777 | int doing_unsigned = unsignedp; |
5778 | |
5779 | icode = can_fix_p (imode, fmode, unsignedp, &must_trunc); |
5780 | if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp) |
5781 | icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0; |
5782 | |
5783 | if (icode != CODE_FOR_nothing) |
5784 | { |
5785 | rtx_insn *last = get_last_insn (); |
5786 | rtx from1 = from; |
5787 | if (fmode != GET_MODE (from)) |
5788 | { |
5789 | if (REAL_MODE_FORMAT (GET_MODE (from)) |
5790 | == &arm_bfloat_half_format |
5791 | && REAL_MODE_FORMAT (fmode) == &ieee_single_format) |
5792 | /* The BF -> SF conversions can be just a shift, doesn't |
5793 | need to handle sNANs. */ |
5794 | { |
5795 | int save_flag_finite_math_only = flag_finite_math_only; |
5796 | flag_finite_math_only = true; |
5797 | from1 = convert_to_mode (fmode, from, 0); |
5798 | flag_finite_math_only = save_flag_finite_math_only; |
5799 | } |
5800 | else |
5801 | from1 = convert_to_mode (fmode, from, 0); |
5802 | } |
5803 | |
5804 | if (must_trunc) |
5805 | { |
5806 | rtx temp = gen_reg_rtx (GET_MODE (from1)); |
5807 | from1 = expand_unop (GET_MODE (from1), unoptab: ftrunc_optab, op0: from1, |
5808 | target: temp, unsignedp: 0); |
5809 | } |
5810 | |
5811 | if (imode != GET_MODE (to)) |
5812 | target = gen_reg_rtx (imode); |
5813 | |
5814 | if (maybe_emit_unop_insn (icode, target, op0: from1, |
5815 | code: doing_unsigned ? UNSIGNED_FIX : FIX)) |
5816 | { |
5817 | if (target != to) |
5818 | convert_move (to, target, unsignedp); |
5819 | return; |
5820 | } |
5821 | delete_insns_since (last); |
5822 | } |
5823 | } |
5824 | |
5825 | /* For an unsigned conversion, there is one more way to do it. |
5826 | If we have a signed conversion, we generate code that compares |
5827 | the real value to the largest representable positive number. If if |
5828 | is smaller, the conversion is done normally. Otherwise, subtract |
5829 | one plus the highest signed number, convert, and add it back. |
5830 | |
5831 | We only need to check all real modes, since we know we didn't find |
5832 | anything with a wider integer mode. |
5833 | |
5834 | This code used to extend FP value into mode wider than the destination. |
5835 | This is needed for decimal float modes which cannot accurately |
5836 | represent one plus the highest signed number of the same size, but |
5837 | not for binary modes. Consider, for instance conversion from SFmode |
5838 | into DImode. |
5839 | |
5840 | The hot path through the code is dealing with inputs smaller than 2^63 |
5841 | and doing just the conversion, so there is no bits to lose. |
5842 | |
5843 | In the other path we know the value is positive in the range 2^63..2^64-1 |
5844 | inclusive. (as for other input overflow happens and result is undefined) |
5845 | So we know that the most important bit set in mantissa corresponds to |
5846 | 2^63. The subtraction of 2^63 should not generate any rounding as it |
5847 | simply clears out that bit. The rest is trivial. */ |
5848 | |
5849 | scalar_int_mode to_mode; |
5850 | if (unsignedp |
5851 | && is_a <scalar_int_mode> (GET_MODE (to), result: &to_mode) |
5852 | && HWI_COMPUTABLE_MODE_P (mode: to_mode)) |
5853 | FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from))) |
5854 | { |
5855 | scalar_mode fmode = fmode_iter.require (); |
5856 | if (CODE_FOR_nothing != can_fix_p (to_mode, fmode, |
5857 | 0, &must_trunc) |
5858 | && (!DECIMAL_FLOAT_MODE_P (fmode) |
5859 | || (GET_MODE_BITSIZE (mode: fmode) > GET_MODE_PRECISION (mode: to_mode)))) |
5860 | { |
5861 | int bitsize; |
5862 | REAL_VALUE_TYPE offset; |
5863 | rtx limit; |
5864 | rtx_code_label *lab1, *lab2; |
5865 | rtx_insn *insn; |
5866 | |
5867 | bitsize = GET_MODE_PRECISION (mode: to_mode); |
5868 | real_2expN (&offset, bitsize - 1, fmode); |
5869 | limit = const_double_from_real_value (offset, fmode); |
5870 | lab1 = gen_label_rtx (); |
5871 | lab2 = gen_label_rtx (); |
5872 | |
5873 | if (fmode != GET_MODE (from)) |
5874 | { |
5875 | if (REAL_MODE_FORMAT (GET_MODE (from)) |
5876 | == &arm_bfloat_half_format |
5877 | && REAL_MODE_FORMAT (fmode) == &ieee_single_format) |
5878 | /* The BF -> SF conversions can be just a shift, doesn't |
5879 | need to handle sNANs. */ |
5880 | { |
5881 | int save_flag_finite_math_only = flag_finite_math_only; |
5882 | flag_finite_math_only = true; |
5883 | from = convert_to_mode (fmode, from, 0); |
5884 | flag_finite_math_only = save_flag_finite_math_only; |
5885 | } |
5886 | else |
5887 | from = convert_to_mode (fmode, from, 0); |
5888 | } |
5889 | |
5890 | /* See if we need to do the subtraction. */ |
5891 | do_pending_stack_adjust (); |
5892 | emit_cmp_and_jump_insns (x: from, y: limit, comparison: GE, NULL_RTX, |
5893 | GET_MODE (from), unsignedp: 0, label: lab1); |
5894 | |
5895 | /* If not, do the signed "fix" and branch around fixup code. */ |
5896 | expand_fix (to, from, unsignedp: 0); |
5897 | emit_jump_insn (targetm.gen_jump (lab2)); |
5898 | emit_barrier (); |
5899 | |
5900 | /* Otherwise, subtract 2**(N-1), convert to signed number, |
5901 | then add 2**(N-1). Do the addition using XOR since this |
5902 | will often generate better code. */ |
5903 | emit_label (lab1); |
5904 | target = expand_binop (GET_MODE (from), binoptab: sub_optab, op0: from, op1: limit, |
5905 | NULL_RTX, unsignedp: 0, methods: OPTAB_LIB_WIDEN); |
5906 | expand_fix (to, from: target, unsignedp: 0); |
5907 | target = expand_binop (mode: to_mode, binoptab: xor_optab, op0: to, |
5908 | op1: gen_int_mode |
5909 | (HOST_WIDE_INT_1 << (bitsize - 1), |
5910 | to_mode), |
5911 | target: to, unsignedp: 1, methods: OPTAB_LIB_WIDEN); |
5912 | |
5913 | if (target != to) |
5914 | emit_move_insn (to, target); |
5915 | |
5916 | emit_label (lab2); |
5917 | |
5918 | if (optab_handler (op: mov_optab, mode: to_mode) != CODE_FOR_nothing) |
5919 | { |
5920 | /* Make a place for a REG_NOTE and add it. */ |
5921 | insn = emit_move_insn (to, to); |
5922 | set_dst_reg_note (insn, REG_EQUAL, |
5923 | gen_rtx_fmt_e (UNSIGNED_FIX, to_mode, |
5924 | copy_rtx (from)), |
5925 | to); |
5926 | } |
5927 | |
5928 | return; |
5929 | } |
5930 | } |
5931 | |
5932 | #ifdef HAVE_SFmode |
5933 | if (REAL_MODE_FORMAT (GET_MODE (from)) == &arm_bfloat_half_format |
5934 | && REAL_MODE_FORMAT (SFmode) == &ieee_single_format) |
5935 | /* We don't have BF -> TI library functions, use BF -> SF -> TI |
5936 | instead but the BF -> SF conversion can be just a shift, doesn't |
5937 | need to handle sNANs. */ |
5938 | { |
5939 | int save_flag_finite_math_only = flag_finite_math_only; |
5940 | flag_finite_math_only = true; |
5941 | from = convert_to_mode (SFmode, from, 0); |
5942 | flag_finite_math_only = save_flag_finite_math_only; |
5943 | expand_fix (to, from, unsignedp); |
5944 | return; |
5945 | } |
5946 | #endif |
5947 | |
5948 | /* We can't do it with an insn, so use a library call. But first ensure |
5949 | that the mode of TO is at least as wide as SImode, since those are the |
5950 | only library calls we know about. */ |
5951 | |
5952 | if (is_narrower_int_mode (GET_MODE (to), SImode)) |
5953 | { |
5954 | target = gen_reg_rtx (SImode); |
5955 | |
5956 | expand_fix (to: target, from, unsignedp); |
5957 | } |
5958 | else |
5959 | { |
5960 | rtx_insn *insns; |
5961 | rtx value; |
5962 | rtx libfunc; |
5963 | |
5964 | convert_optab tab = unsignedp ? ufix_optab : sfix_optab; |
5965 | libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from)); |
5966 | gcc_assert (libfunc); |
5967 | |
5968 | start_sequence (); |
5969 | |
5970 | value = emit_library_call_value (fun: libfunc, NULL_RTX, fn_type: LCT_CONST, |
5971 | GET_MODE (to), arg1: from, GET_MODE (from)); |
5972 | insns = get_insns (); |
5973 | end_sequence (); |
5974 | |
5975 | emit_libcall_block (insns, target, result: value, |
5976 | gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX, |
5977 | GET_MODE (to), from)); |
5978 | } |
5979 | |
5980 | if (target != to) |
5981 | { |
5982 | if (GET_MODE (to) == GET_MODE (target)) |
5983 | emit_move_insn (to, target); |
5984 | else |
5985 | convert_move (to, target, 0); |
5986 | } |
5987 | } |
5988 | |
5989 | |
5990 | /* Promote integer arguments for a libcall if necessary. |
5991 | emit_library_call_value cannot do the promotion because it does not |
5992 | know if it should do a signed or unsigned promotion. This is because |
5993 | there are no tree types defined for libcalls. */ |
5994 | |
5995 | static rtx |
5996 | prepare_libcall_arg (rtx arg, int uintp) |
5997 | { |
5998 | scalar_int_mode mode; |
5999 | machine_mode arg_mode; |
6000 | if (is_a <scalar_int_mode> (GET_MODE (arg), result: &mode)) |
6001 | { |
6002 | /* If we need to promote the integer function argument we need to do |
6003 | it here instead of inside emit_library_call_value because in |
6004 | emit_library_call_value we don't know if we should do a signed or |
6005 | unsigned promotion. */ |
6006 | |
6007 | int unsigned_p = 0; |
6008 | arg_mode = promote_function_mode (NULL_TREE, mode, |
6009 | &unsigned_p, NULL_TREE, 0); |
6010 | if (arg_mode != mode) |
6011 | return convert_to_mode (arg_mode, arg, uintp); |
6012 | } |
6013 | return arg; |
6014 | } |
6015 | |
6016 | /* Generate code to convert FROM or TO a fixed-point. |
6017 | If UINTP is true, either TO or FROM is an unsigned integer. |
6018 | If SATP is true, we need to saturate the result. */ |
6019 | |
6020 | void |
6021 | expand_fixed_convert (rtx to, rtx from, int uintp, int satp) |
6022 | { |
6023 | machine_mode to_mode = GET_MODE (to); |
6024 | machine_mode from_mode = GET_MODE (from); |
6025 | convert_optab tab; |
6026 | enum rtx_code this_code; |
6027 | enum insn_code code; |
6028 | rtx_insn *insns; |
6029 | rtx value; |
6030 | rtx libfunc; |
6031 | |
6032 | if (to_mode == from_mode) |
6033 | { |
6034 | emit_move_insn (to, from); |
6035 | return; |
6036 | } |
6037 | |
6038 | if (uintp) |
6039 | { |
6040 | tab = satp ? satfractuns_optab : fractuns_optab; |
6041 | this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT; |
6042 | } |
6043 | else |
6044 | { |
6045 | tab = satp ? satfract_optab : fract_optab; |
6046 | this_code = satp ? SAT_FRACT : FRACT_CONVERT; |
6047 | } |
6048 | code = convert_optab_handler (op: tab, to_mode, from_mode); |
6049 | if (code != CODE_FOR_nothing) |
6050 | { |
6051 | emit_unop_insn (icode: code, target: to, op0: from, code: this_code); |
6052 | return; |
6053 | } |
6054 | |
6055 | libfunc = convert_optab_libfunc (tab, to_mode, from_mode); |
6056 | gcc_assert (libfunc); |
6057 | |
6058 | from = prepare_libcall_arg (arg: from, uintp); |
6059 | from_mode = GET_MODE (from); |
6060 | |
6061 | start_sequence (); |
6062 | value = emit_library_call_value (fun: libfunc, NULL_RTX, fn_type: LCT_CONST, outmode: to_mode, |
6063 | arg1: from, arg1_mode: from_mode); |
6064 | insns = get_insns (); |
6065 | end_sequence (); |
6066 | |
6067 | emit_libcall_block (insns, target: to, result: value, |
6068 | gen_rtx_fmt_e (optab_to_code (tab), to_mode, from)); |
6069 | } |
6070 | |
6071 | /* Generate code to convert FROM to fixed point and store in TO. FROM |
6072 | must be floating point, TO must be signed. Use the conversion optab |
6073 | TAB to do the conversion. */ |
6074 | |
6075 | bool |
6076 | expand_sfix_optab (rtx to, rtx from, convert_optab tab) |
6077 | { |
6078 | enum insn_code icode; |
6079 | rtx target = to; |
6080 | machine_mode fmode, imode; |
6081 | |
6082 | /* We first try to find a pair of modes, one real and one integer, at |
6083 | least as wide as FROM and TO, respectively, in which we can open-code |
6084 | this conversion. If the integer mode is wider than the mode of TO, |
6085 | we can do the conversion either signed or unsigned. */ |
6086 | |
6087 | FOR_EACH_MODE_FROM (fmode, GET_MODE (from)) |
6088 | FOR_EACH_MODE_FROM (imode, GET_MODE (to)) |
6089 | { |
6090 | icode = convert_optab_handler (tab, imode, fmode, |
6091 | insn_optimization_type ()); |
6092 | if (icode != CODE_FOR_nothing) |
6093 | { |
6094 | rtx_insn *last = get_last_insn (); |
6095 | if (fmode != GET_MODE (from)) |
6096 | from = convert_to_mode (fmode, from, 0); |
6097 | |
6098 | if (imode != GET_MODE (to)) |
6099 | target = gen_reg_rtx (imode); |
6100 | |
6101 | if (!maybe_emit_unop_insn (icode, target, op0: from, code: UNKNOWN)) |
6102 | { |
6103 | delete_insns_since (last); |
6104 | continue; |
6105 | } |
6106 | if (target != to) |
6107 | convert_move (to, target, 0); |
6108 | return true; |
6109 | } |
6110 | } |
6111 | |
6112 | return false; |
6113 | } |
6114 | |
6115 | /* Report whether we have an instruction to perform the operation |
6116 | specified by CODE on operands of mode MODE. */ |
6117 | bool |
6118 | have_insn_for (enum rtx_code code, machine_mode mode) |
6119 | { |
6120 | return (code_to_optab (code) |
6121 | && (optab_handler (op: code_to_optab (code), mode) |
6122 | != CODE_FOR_nothing)); |
6123 | } |
6124 | |
6125 | /* Print information about the current contents of the optabs on |
6126 | STDERR. */ |
6127 | |
6128 | DEBUG_FUNCTION void |
6129 | debug_optab_libfuncs (void) |
6130 | { |
6131 | int i, j, k; |
6132 | |
6133 | /* Dump the arithmetic optabs. */ |
6134 | for (i = FIRST_NORM_OPTAB; i <= LAST_NORMLIB_OPTAB; ++i) |
6135 | for (j = 0; j < NUM_MACHINE_MODES; ++j) |
6136 | { |
6137 | rtx l = optab_libfunc ((optab) i, (machine_mode) j); |
6138 | if (l) |
6139 | { |
6140 | gcc_assert (GET_CODE (l) == SYMBOL_REF); |
6141 | fprintf (stderr, format: "%s\t%s:\t%s\n" , |
6142 | GET_RTX_NAME (optab_to_code ((optab) i)), |
6143 | GET_MODE_NAME (j), |
6144 | XSTR (l, 0)); |
6145 | } |
6146 | } |
6147 | |
6148 | /* Dump the conversion optabs. */ |
6149 | for (i = FIRST_CONV_OPTAB; i <= LAST_CONVLIB_OPTAB; ++i) |
6150 | for (j = 0; j < NUM_MACHINE_MODES; ++j) |
6151 | for (k = 0; k < NUM_MACHINE_MODES; ++k) |
6152 | { |
6153 | rtx l = convert_optab_libfunc ((optab) i, (machine_mode) j, |
6154 | (machine_mode) k); |
6155 | if (l) |
6156 | { |
6157 | gcc_assert (GET_CODE (l) == SYMBOL_REF); |
6158 | fprintf (stderr, format: "%s\t%s\t%s:\t%s\n" , |
6159 | GET_RTX_NAME (optab_to_code ((optab) i)), |
6160 | GET_MODE_NAME (j), |
6161 | GET_MODE_NAME (k), |
6162 | XSTR (l, 0)); |
6163 | } |
6164 | } |
6165 | } |
6166 | |
6167 | /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition |
6168 | CODE. Return 0 on failure. */ |
6169 | |
6170 | rtx_insn * |
6171 | gen_cond_trap (enum rtx_code code, rtx op1, rtx op2, rtx tcode) |
6172 | { |
6173 | machine_mode mode = GET_MODE (op1); |
6174 | enum insn_code icode; |
6175 | rtx_insn *insn; |
6176 | rtx trap_rtx; |
6177 | |
6178 | if (mode == VOIDmode) |
6179 | return 0; |
6180 | |
6181 | icode = optab_handler (op: ctrap_optab, mode); |
6182 | if (icode == CODE_FOR_nothing) |
6183 | return 0; |
6184 | |
6185 | /* Some targets only accept a zero trap code. */ |
6186 | if (!insn_operand_matches (icode, opno: 3, operand: tcode)) |
6187 | return 0; |
6188 | |
6189 | do_pending_stack_adjust (); |
6190 | start_sequence (); |
6191 | prepare_cmp_insn (x: op1, y: op2, comparison: code, NULL_RTX, unsignedp: false, methods: OPTAB_DIRECT, |
6192 | ptest: &trap_rtx, pmode: &mode); |
6193 | if (!trap_rtx) |
6194 | insn = NULL; |
6195 | else |
6196 | insn = GEN_FCN (icode) (trap_rtx, XEXP (trap_rtx, 0), XEXP (trap_rtx, 1), |
6197 | tcode); |
6198 | |
6199 | /* If that failed, then give up. */ |
6200 | if (insn == 0) |
6201 | { |
6202 | end_sequence (); |
6203 | return 0; |
6204 | } |
6205 | |
6206 | emit_insn (insn); |
6207 | insn = get_insns (); |
6208 | end_sequence (); |
6209 | return insn; |
6210 | } |
6211 | |
6212 | /* Return rtx code for TCODE or UNKNOWN. Use UNSIGNEDP to select signed |
6213 | or unsigned operation code. */ |
6214 | |
6215 | enum rtx_code |
6216 | get_rtx_code_1 (enum tree_code tcode, bool unsignedp) |
6217 | { |
6218 | enum rtx_code code; |
6219 | switch (tcode) |
6220 | { |
6221 | case EQ_EXPR: |
6222 | code = EQ; |
6223 | break; |
6224 | case NE_EXPR: |
6225 | code = NE; |
6226 | break; |
6227 | case LT_EXPR: |
6228 | code = unsignedp ? LTU : LT; |
6229 | break; |
6230 | case LE_EXPR: |
6231 | code = unsignedp ? LEU : LE; |
6232 | break; |
6233 | case GT_EXPR: |
6234 | code = unsignedp ? GTU : GT; |
6235 | break; |
6236 | case GE_EXPR: |
6237 | code = unsignedp ? GEU : GE; |
6238 | break; |
6239 | |
6240 | case UNORDERED_EXPR: |
6241 | code = UNORDERED; |
6242 | break; |
6243 | case ORDERED_EXPR: |
6244 | code = ORDERED; |
6245 | break; |
6246 | case UNLT_EXPR: |
6247 | code = UNLT; |
6248 | break; |
6249 | case UNLE_EXPR: |
6250 | code = UNLE; |
6251 | break; |
6252 | case UNGT_EXPR: |
6253 | code = UNGT; |
6254 | break; |
6255 | case UNGE_EXPR: |
6256 | code = UNGE; |
6257 | break; |
6258 | case UNEQ_EXPR: |
6259 | code = UNEQ; |
6260 | break; |
6261 | case LTGT_EXPR: |
6262 | code = LTGT; |
6263 | break; |
6264 | |
6265 | case BIT_AND_EXPR: |
6266 | code = AND; |
6267 | break; |
6268 | |
6269 | case BIT_IOR_EXPR: |
6270 | code = IOR; |
6271 | break; |
6272 | |
6273 | default: |
6274 | code = UNKNOWN; |
6275 | break; |
6276 | } |
6277 | return code; |
6278 | } |
6279 | |
6280 | /* Return rtx code for TCODE. Use UNSIGNEDP to select signed |
6281 | or unsigned operation code. */ |
6282 | |
6283 | enum rtx_code |
6284 | get_rtx_code (enum tree_code tcode, bool unsignedp) |
6285 | { |
6286 | enum rtx_code code = get_rtx_code_1 (tcode, unsignedp); |
6287 | gcc_assert (code != UNKNOWN); |
6288 | return code; |
6289 | } |
6290 | |
6291 | /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to |
6292 | select signed or unsigned operators. OPNO holds the index of the |
6293 | first comparison operand for insn ICODE. Do not generate the |
6294 | compare instruction itself. */ |
6295 | |
6296 | rtx |
6297 | vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode, |
6298 | tree t_op0, tree t_op1, bool unsignedp, |
6299 | enum insn_code icode, unsigned int opno) |
6300 | { |
6301 | class expand_operand ops[2]; |
6302 | rtx rtx_op0, rtx_op1; |
6303 | machine_mode m0, m1; |
6304 | enum rtx_code rcode = get_rtx_code (tcode, unsignedp); |
6305 | |
6306 | gcc_assert (TREE_CODE_CLASS (tcode) == tcc_comparison); |
6307 | |
6308 | /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t |
6309 | has mode DImode, this can produce a constant RTX of mode VOIDmode; in such |
6310 | cases, use the original mode. */ |
6311 | rtx_op0 = expand_expr (exp: t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), |
6312 | modifier: EXPAND_STACK_PARM); |
6313 | m0 = GET_MODE (rtx_op0); |
6314 | if (m0 == VOIDmode) |
6315 | m0 = TYPE_MODE (TREE_TYPE (t_op0)); |
6316 | |
6317 | rtx_op1 = expand_expr (exp: t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), |
6318 | modifier: EXPAND_STACK_PARM); |
6319 | m1 = GET_MODE (rtx_op1); |
6320 | if (m1 == VOIDmode) |
6321 | m1 = TYPE_MODE (TREE_TYPE (t_op1)); |
6322 | |
6323 | create_input_operand (op: &ops[0], value: rtx_op0, mode: m0); |
6324 | create_input_operand (op: &ops[1], value: rtx_op1, mode: m1); |
6325 | if (!maybe_legitimize_operands (icode, opno, nops: 2, ops)) |
6326 | gcc_unreachable (); |
6327 | return gen_rtx_fmt_ee (rcode, cmp_mode, ops[0].value, ops[1].value); |
6328 | } |
6329 | |
6330 | /* Check if vec_perm mask SEL is a constant equivalent to a shift of |
6331 | the first vec_perm operand, assuming the second operand (for left shift |
6332 | first operand) is a constant vector of zeros. Return the shift distance |
6333 | in bits if so, or NULL_RTX if the vec_perm is not a shift. MODE is the |
6334 | mode of the value being shifted. SHIFT_OPTAB is vec_shr_optab for right |
6335 | shift or vec_shl_optab for left shift. */ |
6336 | static rtx |
6337 | shift_amt_for_vec_perm_mask (machine_mode mode, const vec_perm_indices &sel, |
6338 | optab shift_optab) |
6339 | { |
6340 | unsigned int bitsize = GET_MODE_UNIT_BITSIZE (mode); |
6341 | poly_int64 first = sel[0]; |
6342 | if (maybe_ge (sel[0], GET_MODE_NUNITS (mode))) |
6343 | return NULL_RTX; |
6344 | |
6345 | if (shift_optab == vec_shl_optab) |
6346 | { |
6347 | unsigned int nelt; |
6348 | if (!GET_MODE_NUNITS (mode).is_constant (const_value: &nelt)) |
6349 | return NULL_RTX; |
6350 | unsigned firstidx = 0; |
6351 | for (unsigned int i = 0; i < nelt; i++) |
6352 | { |
6353 | if (known_eq (sel[i], nelt)) |
6354 | { |
6355 | if (i == 0 || firstidx) |
6356 | return NULL_RTX; |
6357 | firstidx = i; |
6358 | } |
6359 | else if (firstidx |
6360 | ? maybe_ne (a: sel[i], b: nelt + i - firstidx) |
6361 | : maybe_ge (sel[i], nelt)) |
6362 | return NULL_RTX; |
6363 | } |
6364 | |
6365 | if (firstidx == 0) |
6366 | return NULL_RTX; |
6367 | first = firstidx; |
6368 | } |
6369 | else if (!sel.series_p (0, 1, first, 1)) |
6370 | { |
6371 | unsigned int nelt; |
6372 | if (!GET_MODE_NUNITS (mode).is_constant (const_value: &nelt)) |
6373 | return NULL_RTX; |
6374 | for (unsigned int i = 1; i < nelt; i++) |
6375 | { |
6376 | poly_int64 expected = i + first; |
6377 | /* Indices into the second vector are all equivalent. */ |
6378 | if (maybe_lt (a: sel[i], b: nelt) |
6379 | ? maybe_ne (a: sel[i], b: expected) |
6380 | : maybe_lt (a: expected, b: nelt)) |
6381 | return NULL_RTX; |
6382 | } |
6383 | } |
6384 | |
6385 | return gen_int_shift_amount (mode, first * bitsize); |
6386 | } |
6387 | |
6388 | /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */ |
6389 | |
6390 | static rtx |
6391 | expand_vec_perm_1 (enum insn_code icode, rtx target, |
6392 | rtx v0, rtx v1, rtx sel) |
6393 | { |
6394 | machine_mode tmode = GET_MODE (target); |
6395 | machine_mode smode = GET_MODE (sel); |
6396 | class expand_operand ops[4]; |
6397 | |
6398 | gcc_assert (GET_MODE_CLASS (smode) == MODE_VECTOR_INT |
6399 | || related_int_vector_mode (tmode).require () == smode); |
6400 | create_output_operand (op: &ops[0], x: target, mode: tmode); |
6401 | create_input_operand (op: &ops[3], value: sel, mode: smode); |
6402 | |
6403 | /* Make an effort to preserve v0 == v1. The target expander is able to |
6404 | rely on this to determine if we're permuting a single input operand. */ |
6405 | if (rtx_equal_p (v0, v1)) |
6406 | { |
6407 | if (!insn_operand_matches (icode, opno: 1, operand: v0)) |
6408 | v0 = force_reg (tmode, v0); |
6409 | gcc_checking_assert (insn_operand_matches (icode, 1, v0)); |
6410 | gcc_checking_assert (insn_operand_matches (icode, 2, v0)); |
6411 | |
6412 | create_fixed_operand (op: &ops[1], x: v0); |
6413 | create_fixed_operand (op: &ops[2], x: v0); |
6414 | } |
6415 | else |
6416 | { |
6417 | create_input_operand (op: &ops[1], value: v0, mode: tmode); |
6418 | create_input_operand (op: &ops[2], value: v1, mode: tmode); |
6419 | } |
6420 | |
6421 | if (maybe_expand_insn (icode, nops: 4, ops)) |
6422 | return ops[0].value; |
6423 | return NULL_RTX; |
6424 | } |
6425 | |
6426 | /* Implement a permutation of vectors v0 and v1 using the permutation |
6427 | vector in SEL and return the result. Use TARGET to hold the result |
6428 | if nonnull and convenient. |
6429 | |
6430 | MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE |
6431 | is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known |
6432 | to have a particular mode. */ |
6433 | |
6434 | rtx |
6435 | expand_vec_perm_const (machine_mode mode, rtx v0, rtx v1, |
6436 | const vec_perm_builder &sel, machine_mode sel_mode, |
6437 | rtx target) |
6438 | { |
6439 | if (!target || !register_operand (target, mode)) |
6440 | target = gen_reg_rtx (mode); |
6441 | |
6442 | /* Set QIMODE to a different vector mode with byte elements. |
6443 | If no such mode, or if MODE already has byte elements, use VOIDmode. */ |
6444 | machine_mode qimode; |
6445 | if (!qimode_for_vec_perm (mode).exists (mode: &qimode)) |
6446 | qimode = VOIDmode; |
6447 | |
6448 | rtx_insn *last = get_last_insn (); |
6449 | |
6450 | bool single_arg_p = rtx_equal_p (v0, v1); |
6451 | /* Always specify two input vectors here and leave the target to handle |
6452 | cases in which the inputs are equal. Not all backends can cope with |
6453 | the single-input representation when testing for a double-input |
6454 | target instruction. */ |
6455 | vec_perm_indices indices (sel, 2, GET_MODE_NUNITS (mode)); |
6456 | |
6457 | /* See if this can be handled with a vec_shr or vec_shl. We only do this |
6458 | if the second (for vec_shr) or first (for vec_shl) vector is all |
6459 | zeroes. */ |
6460 | insn_code shift_code = CODE_FOR_nothing; |
6461 | insn_code shift_code_qi = CODE_FOR_nothing; |
6462 | optab shift_optab = unknown_optab; |
6463 | rtx v2 = v0; |
6464 | if (v1 == CONST0_RTX (GET_MODE (v1))) |
6465 | shift_optab = vec_shr_optab; |
6466 | else if (v0 == CONST0_RTX (GET_MODE (v0))) |
6467 | { |
6468 | shift_optab = vec_shl_optab; |
6469 | v2 = v1; |
6470 | } |
6471 | if (shift_optab != unknown_optab) |
6472 | { |
6473 | shift_code = optab_handler (op: shift_optab, mode); |
6474 | shift_code_qi = ((qimode != VOIDmode && qimode != mode) |
6475 | ? optab_handler (op: shift_optab, mode: qimode) |
6476 | : CODE_FOR_nothing); |
6477 | } |
6478 | if (shift_code != CODE_FOR_nothing || shift_code_qi != CODE_FOR_nothing) |
6479 | { |
6480 | rtx shift_amt = shift_amt_for_vec_perm_mask (mode, sel: indices, shift_optab); |
6481 | if (shift_amt) |
6482 | { |
6483 | class expand_operand ops[3]; |
6484 | if (shift_amt == const0_rtx) |
6485 | return v2; |
6486 | if (shift_code != CODE_FOR_nothing) |
6487 | { |
6488 | create_output_operand (op: &ops[0], x: target, mode); |
6489 | create_input_operand (op: &ops[1], value: v2, mode); |
6490 | create_convert_operand_from_type (op: &ops[2], value: shift_amt, sizetype); |
6491 | if (maybe_expand_insn (icode: shift_code, nops: 3, ops)) |
6492 | return ops[0].value; |
6493 | } |
6494 | if (shift_code_qi != CODE_FOR_nothing) |
6495 | { |
6496 | rtx tmp = gen_reg_rtx (qimode); |
6497 | create_output_operand (op: &ops[0], x: tmp, mode: qimode); |
6498 | create_input_operand (op: &ops[1], gen_lowpart (qimode, v2), mode: qimode); |
6499 | create_convert_operand_from_type (op: &ops[2], value: shift_amt, sizetype); |
6500 | if (maybe_expand_insn (icode: shift_code_qi, nops: 3, ops)) |
6501 | return gen_lowpart (mode, ops[0].value); |
6502 | } |
6503 | } |
6504 | } |
6505 | |
6506 | if (targetm.vectorize.vec_perm_const != NULL) |
6507 | { |
6508 | if (single_arg_p) |
6509 | v1 = v0; |
6510 | |
6511 | gcc_checking_assert (GET_MODE (v0) == GET_MODE (v1)); |
6512 | machine_mode op_mode = GET_MODE (v0); |
6513 | if (targetm.vectorize.vec_perm_const (mode, op_mode, target, v0, v1, |
6514 | indices)) |
6515 | return target; |
6516 | } |
6517 | |
6518 | /* Fall back to a constant byte-based permutation. */ |
6519 | vec_perm_indices qimode_indices; |
6520 | rtx target_qi = NULL_RTX, v0_qi = NULL_RTX, v1_qi = NULL_RTX; |
6521 | if (qimode != VOIDmode) |
6522 | { |
6523 | qimode_indices.new_expanded_vector (indices, GET_MODE_UNIT_SIZE (mode)); |
6524 | target_qi = gen_reg_rtx (qimode); |
6525 | v0_qi = gen_lowpart (qimode, v0); |
6526 | v1_qi = gen_lowpart (qimode, v1); |
6527 | if (targetm.vectorize.vec_perm_const != NULL |
6528 | && targetm.vectorize.vec_perm_const (qimode, qimode, target_qi, v0_qi, |
6529 | v1_qi, qimode_indices)) |
6530 | return gen_lowpart (mode, target_qi); |
6531 | } |
6532 | |
6533 | v0 = force_reg (mode, v0); |
6534 | if (single_arg_p) |
6535 | v1 = v0; |
6536 | v1 = force_reg (mode, v1); |
6537 | |
6538 | /* Otherwise expand as a fully variable permuation. */ |
6539 | |
6540 | /* The optabs are only defined for selectors with the same width |
6541 | as the values being permuted. */ |
6542 | machine_mode required_sel_mode; |
6543 | if (!related_int_vector_mode (mode).exists (mode: &required_sel_mode)) |
6544 | { |
6545 | delete_insns_since (last); |
6546 | return NULL_RTX; |
6547 | } |
6548 | |
6549 | /* We know that it is semantically valid to treat SEL as having SEL_MODE. |
6550 | If that isn't the mode we want then we need to prove that using |
6551 | REQUIRED_SEL_MODE is OK. */ |
6552 | if (sel_mode != required_sel_mode) |
6553 | { |
6554 | if (!selector_fits_mode_p (required_sel_mode, indices)) |
6555 | { |
6556 | delete_insns_since (last); |
6557 | return NULL_RTX; |
6558 | } |
6559 | sel_mode = required_sel_mode; |
6560 | } |
6561 | |
6562 | insn_code icode = direct_optab_handler (op: vec_perm_optab, mode); |
6563 | if (icode != CODE_FOR_nothing) |
6564 | { |
6565 | rtx sel_rtx = vec_perm_indices_to_rtx (sel_mode, indices); |
6566 | rtx tmp = expand_vec_perm_1 (icode, target, v0, v1, sel: sel_rtx); |
6567 | if (tmp) |
6568 | return tmp; |
6569 | } |
6570 | |
6571 | if (qimode != VOIDmode |
6572 | && selector_fits_mode_p (qimode, qimode_indices)) |
6573 | { |
6574 | icode = direct_optab_handler (op: vec_perm_optab, mode: qimode); |
6575 | if (icode != CODE_FOR_nothing) |
6576 | { |
6577 | rtx sel_qi = vec_perm_indices_to_rtx (qimode, qimode_indices); |
6578 | rtx tmp = expand_vec_perm_1 (icode, target: target_qi, v0: v0_qi, v1: v1_qi, sel: sel_qi); |
6579 | if (tmp) |
6580 | return gen_lowpart (mode, tmp); |
6581 | } |
6582 | } |
6583 | |
6584 | delete_insns_since (last); |
6585 | return NULL_RTX; |
6586 | } |
6587 | |
6588 | /* Implement a permutation of vectors v0 and v1 using the permutation |
6589 | vector in SEL and return the result. Use TARGET to hold the result |
6590 | if nonnull and convenient. |
6591 | |
6592 | MODE is the mode of the vectors being permuted (V0 and V1). |
6593 | SEL must have the integer equivalent of MODE and is known to be |
6594 | unsuitable for permutes with a constant permutation vector. */ |
6595 | |
6596 | rtx |
6597 | expand_vec_perm_var (machine_mode mode, rtx v0, rtx v1, rtx sel, rtx target) |
6598 | { |
6599 | enum insn_code icode; |
6600 | unsigned int i, u; |
6601 | rtx tmp, sel_qi; |
6602 | |
6603 | u = GET_MODE_UNIT_SIZE (mode); |
6604 | |
6605 | if (!target || GET_MODE (target) != mode) |
6606 | target = gen_reg_rtx (mode); |
6607 | |
6608 | icode = direct_optab_handler (op: vec_perm_optab, mode); |
6609 | if (icode != CODE_FOR_nothing) |
6610 | { |
6611 | tmp = expand_vec_perm_1 (icode, target, v0, v1, sel); |
6612 | if (tmp) |
6613 | return tmp; |
6614 | } |
6615 | |
6616 | /* As a special case to aid several targets, lower the element-based |
6617 | permutation to a byte-based permutation and try again. */ |
6618 | machine_mode qimode; |
6619 | if (!qimode_for_vec_perm (mode).exists (mode: &qimode) |
6620 | || maybe_gt (GET_MODE_NUNITS (qimode), GET_MODE_MASK (QImode) + 1)) |
6621 | return NULL_RTX; |
6622 | icode = direct_optab_handler (op: vec_perm_optab, mode: qimode); |
6623 | if (icode == CODE_FOR_nothing) |
6624 | return NULL_RTX; |
6625 | |
6626 | /* Multiply each element by its byte size. */ |
6627 | machine_mode selmode = GET_MODE (sel); |
6628 | if (u == 2) |
6629 | sel = expand_simple_binop (mode: selmode, code: PLUS, op0: sel, op1: sel, |
6630 | NULL, unsignedp: 0, methods: OPTAB_DIRECT); |
6631 | else |
6632 | sel = expand_simple_binop (mode: selmode, code: ASHIFT, op0: sel, |
6633 | op1: gen_int_shift_amount (selmode, exact_log2 (x: u)), |
6634 | NULL, unsignedp: 0, methods: OPTAB_DIRECT); |
6635 | gcc_assert (sel != NULL); |
6636 | |
6637 | /* Broadcast the low byte each element into each of its bytes. |
6638 | The encoding has U interleaved stepped patterns, one for each |
6639 | byte of an element. */ |
6640 | vec_perm_builder const_sel (GET_MODE_SIZE (mode), u, 3); |
6641 | unsigned int low_byte_in_u = BYTES_BIG_ENDIAN ? u - 1 : 0; |
6642 | for (i = 0; i < 3; ++i) |
6643 | for (unsigned int j = 0; j < u; ++j) |
6644 | const_sel.quick_push (obj: i * u + low_byte_in_u); |
6645 | sel = gen_lowpart (qimode, sel); |
6646 | sel = expand_vec_perm_const (mode: qimode, v0: sel, v1: sel, sel: const_sel, sel_mode: qimode, NULL); |
6647 | gcc_assert (sel != NULL); |
6648 | |
6649 | /* Add the byte offset to each byte element. */ |
6650 | /* Note that the definition of the indicies here is memory ordering, |
6651 | so there should be no difference between big and little endian. */ |
6652 | rtx_vector_builder byte_indices (qimode, u, 1); |
6653 | for (i = 0; i < u; ++i) |
6654 | byte_indices.quick_push (GEN_INT (i)); |
6655 | tmp = byte_indices.build (); |
6656 | sel_qi = expand_simple_binop (mode: qimode, code: PLUS, op0: sel, op1: tmp, |
6657 | target: sel, unsignedp: 0, methods: OPTAB_DIRECT); |
6658 | gcc_assert (sel_qi != NULL); |
6659 | |
6660 | tmp = mode != qimode ? gen_reg_rtx (qimode) : target; |
6661 | tmp = expand_vec_perm_1 (icode, target: tmp, gen_lowpart (qimode, v0), |
6662 | gen_lowpart (qimode, v1), sel: sel_qi); |
6663 | if (tmp) |
6664 | tmp = gen_lowpart (mode, tmp); |
6665 | return tmp; |
6666 | } |
6667 | |
6668 | /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE. |
6669 | Use TARGET for the result if nonnull and convenient. */ |
6670 | |
6671 | rtx |
6672 | expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target) |
6673 | { |
6674 | class expand_operand ops[3]; |
6675 | enum insn_code icode; |
6676 | machine_mode emode = GET_MODE_INNER (vmode); |
6677 | |
6678 | icode = direct_optab_handler (op: vec_series_optab, mode: vmode); |
6679 | gcc_assert (icode != CODE_FOR_nothing); |
6680 | |
6681 | create_output_operand (op: &ops[0], x: target, mode: vmode); |
6682 | create_input_operand (op: &ops[1], value: op0, mode: emode); |
6683 | create_input_operand (op: &ops[2], value: op1, mode: emode); |
6684 | |
6685 | expand_insn (icode, nops: 3, ops); |
6686 | return ops[0].value; |
6687 | } |
6688 | |
6689 | /* Generate insns for a vector comparison into a mask. */ |
6690 | |
6691 | rtx |
6692 | expand_vec_cmp_expr (tree type, tree exp, rtx target) |
6693 | { |
6694 | class expand_operand ops[4]; |
6695 | enum insn_code icode; |
6696 | rtx comparison; |
6697 | machine_mode mask_mode = TYPE_MODE (type); |
6698 | machine_mode vmode; |
6699 | bool unsignedp; |
6700 | tree op0a, op0b; |
6701 | enum tree_code tcode; |
6702 | |
6703 | op0a = TREE_OPERAND (exp, 0); |
6704 | op0b = TREE_OPERAND (exp, 1); |
6705 | tcode = TREE_CODE (exp); |
6706 | |
6707 | unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a)); |
6708 | vmode = TYPE_MODE (TREE_TYPE (op0a)); |
6709 | |
6710 | icode = get_vec_cmp_icode (vmode, mask_mode, uns: unsignedp); |
6711 | if (icode == CODE_FOR_nothing) |
6712 | { |
6713 | if (tcode == EQ_EXPR || tcode == NE_EXPR) |
6714 | icode = get_vec_cmp_eq_icode (vmode, mask_mode); |
6715 | if (icode == CODE_FOR_nothing) |
6716 | return 0; |
6717 | } |
6718 | |
6719 | comparison = vector_compare_rtx (cmp_mode: mask_mode, tcode, t_op0: op0a, t_op1: op0b, |
6720 | unsignedp, icode, opno: 2); |
6721 | create_output_operand (op: &ops[0], x: target, mode: mask_mode); |
6722 | create_fixed_operand (op: &ops[1], x: comparison); |
6723 | create_fixed_operand (op: &ops[2], XEXP (comparison, 0)); |
6724 | create_fixed_operand (op: &ops[3], XEXP (comparison, 1)); |
6725 | expand_insn (icode, nops: 4, ops); |
6726 | return ops[0].value; |
6727 | } |
6728 | |
6729 | /* Expand a highpart multiply. */ |
6730 | |
6731 | rtx |
6732 | expand_mult_highpart (machine_mode mode, rtx op0, rtx op1, |
6733 | rtx target, bool uns_p) |
6734 | { |
6735 | class expand_operand eops[3]; |
6736 | enum insn_code icode; |
6737 | int method, i; |
6738 | machine_mode wmode; |
6739 | rtx m1, m2; |
6740 | optab tab1, tab2; |
6741 | |
6742 | method = can_mult_highpart_p (mode, uns_p); |
6743 | switch (method) |
6744 | { |
6745 | case 0: |
6746 | return NULL_RTX; |
6747 | case 1: |
6748 | tab1 = uns_p ? umul_highpart_optab : smul_highpart_optab; |
6749 | return expand_binop (mode, binoptab: tab1, op0, op1, target, unsignedp: uns_p, |
6750 | methods: OPTAB_LIB_WIDEN); |
6751 | case 2: |
6752 | tab1 = uns_p ? vec_widen_umult_even_optab : vec_widen_smult_even_optab; |
6753 | tab2 = uns_p ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab; |
6754 | break; |
6755 | case 3: |
6756 | tab1 = uns_p ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab; |
6757 | tab2 = uns_p ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab; |
6758 | if (BYTES_BIG_ENDIAN) |
6759 | std::swap (a&: tab1, b&: tab2); |
6760 | break; |
6761 | default: |
6762 | gcc_unreachable (); |
6763 | } |
6764 | |
6765 | icode = optab_handler (op: tab1, mode); |
6766 | wmode = insn_data[icode].operand[0].mode; |
6767 | gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode), |
6768 | GET_MODE_NUNITS (mode))); |
6769 | gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode), GET_MODE_SIZE (mode))); |
6770 | |
6771 | create_output_operand (op: &eops[0], x: gen_reg_rtx (wmode), mode: wmode); |
6772 | create_input_operand (op: &eops[1], value: op0, mode); |
6773 | create_input_operand (op: &eops[2], value: op1, mode); |
6774 | expand_insn (icode, nops: 3, ops: eops); |
6775 | m1 = gen_lowpart (mode, eops[0].value); |
6776 | |
6777 | create_output_operand (op: &eops[0], x: gen_reg_rtx (wmode), mode: wmode); |
6778 | create_input_operand (op: &eops[1], value: op0, mode); |
6779 | create_input_operand (op: &eops[2], value: op1, mode); |
6780 | expand_insn (icode: optab_handler (op: tab2, mode), nops: 3, ops: eops); |
6781 | m2 = gen_lowpart (mode, eops[0].value); |
6782 | |
6783 | vec_perm_builder sel; |
6784 | if (method == 2) |
6785 | { |
6786 | /* The encoding has 2 interleaved stepped patterns. */ |
6787 | sel.new_vector (full_nelts: GET_MODE_NUNITS (mode), npatterns: 2, nelts_per_pattern: 3); |
6788 | for (i = 0; i < 6; ++i) |
6789 | sel.quick_push (obj: !BYTES_BIG_ENDIAN + (i & ~1) |
6790 | + ((i & 1) ? GET_MODE_NUNITS (mode) : 0)); |
6791 | } |
6792 | else |
6793 | { |
6794 | /* The encoding has a single interleaved stepped pattern. */ |
6795 | sel.new_vector (full_nelts: GET_MODE_NUNITS (mode), npatterns: 1, nelts_per_pattern: 3); |
6796 | for (i = 0; i < 3; ++i) |
6797 | sel.quick_push (obj: 2 * i + (BYTES_BIG_ENDIAN ? 0 : 1)); |
6798 | } |
6799 | |
6800 | return expand_vec_perm_const (mode, v0: m1, v1: m2, sel, BLKmode, target); |
6801 | } |
6802 | |
6803 | /* Helper function to find the MODE_CC set in a sync_compare_and_swap |
6804 | pattern. */ |
6805 | |
6806 | static void |
6807 | find_cc_set (rtx x, const_rtx pat, void *data) |
6808 | { |
6809 | if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC |
6810 | && GET_CODE (pat) == SET) |
6811 | { |
6812 | rtx *p_cc_reg = (rtx *) data; |
6813 | gcc_assert (!*p_cc_reg); |
6814 | *p_cc_reg = x; |
6815 | } |
6816 | } |
6817 | |
6818 | /* This is a helper function for the other atomic operations. This function |
6819 | emits a loop that contains SEQ that iterates until a compare-and-swap |
6820 | operation at the end succeeds. MEM is the memory to be modified. SEQ is |
6821 | a set of instructions that takes a value from OLD_REG as an input and |
6822 | produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be |
6823 | set to the current contents of MEM. After SEQ, a compare-and-swap will |
6824 | attempt to update MEM with NEW_REG. The function returns true when the |
6825 | loop was generated successfully. */ |
6826 | |
6827 | static bool |
6828 | expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq) |
6829 | { |
6830 | machine_mode mode = GET_MODE (mem); |
6831 | rtx_code_label *label; |
6832 | rtx cmp_reg, success, oldval; |
6833 | |
6834 | /* The loop we want to generate looks like |
6835 | |
6836 | cmp_reg = mem; |
6837 | label: |
6838 | old_reg = cmp_reg; |
6839 | seq; |
6840 | (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg) |
6841 | if (success) |
6842 | goto label; |
6843 | |
6844 | Note that we only do the plain load from memory once. Subsequent |
6845 | iterations use the value loaded by the compare-and-swap pattern. */ |
6846 | |
6847 | label = gen_label_rtx (); |
6848 | cmp_reg = gen_reg_rtx (mode); |
6849 | |
6850 | emit_move_insn (cmp_reg, mem); |
6851 | emit_label (label); |
6852 | emit_move_insn (old_reg, cmp_reg); |
6853 | if (seq) |
6854 | emit_insn (seq); |
6855 | |
6856 | success = NULL_RTX; |
6857 | oldval = cmp_reg; |
6858 | if (!expand_atomic_compare_and_swap (&success, &oldval, mem, old_reg, |
6859 | new_reg, false, MEMMODEL_SYNC_SEQ_CST, |
6860 | MEMMODEL_RELAXED)) |
6861 | return false; |
6862 | |
6863 | if (oldval != cmp_reg) |
6864 | emit_move_insn (cmp_reg, oldval); |
6865 | |
6866 | /* Mark this jump predicted not taken. */ |
6867 | emit_cmp_and_jump_insns (x: success, const0_rtx, comparison: EQ, const0_rtx, |
6868 | GET_MODE (success), unsignedp: 1, label, |
6869 | prob: profile_probability::guessed_never ()); |
6870 | return true; |
6871 | } |
6872 | |
6873 | |
6874 | /* This function tries to emit an atomic_exchange intruction. VAL is written |
6875 | to *MEM using memory model MODEL. The previous contents of *MEM are returned, |
6876 | using TARGET if possible. */ |
6877 | |
6878 | static rtx |
6879 | maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model) |
6880 | { |
6881 | machine_mode mode = GET_MODE (mem); |
6882 | enum insn_code icode; |
6883 | |
6884 | /* If the target supports the exchange directly, great. */ |
6885 | icode = direct_optab_handler (op: atomic_exchange_optab, mode); |
6886 | if (icode != CODE_FOR_nothing) |
6887 | { |
6888 | class expand_operand ops[4]; |
6889 | |
6890 | create_output_operand (op: &ops[0], x: target, mode); |
6891 | create_fixed_operand (op: &ops[1], x: mem); |
6892 | create_input_operand (op: &ops[2], value: val, mode); |
6893 | create_integer_operand (&ops[3], model); |
6894 | if (maybe_expand_insn (icode, nops: 4, ops)) |
6895 | return ops[0].value; |
6896 | } |
6897 | |
6898 | return NULL_RTX; |
6899 | } |
6900 | |
6901 | /* This function tries to implement an atomic exchange operation using |
6902 | __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL. |
6903 | The previous contents of *MEM are returned, using TARGET if possible. |
6904 | Since this instructionn is an acquire barrier only, stronger memory |
6905 | models may require additional barriers to be emitted. */ |
6906 | |
6907 | static rtx |
6908 | maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val, |
6909 | enum memmodel model) |
6910 | { |
6911 | machine_mode mode = GET_MODE (mem); |
6912 | enum insn_code icode; |
6913 | rtx_insn *last_insn = get_last_insn (); |
6914 | |
6915 | icode = optab_handler (op: sync_lock_test_and_set_optab, mode); |
6916 | |
6917 | /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern |
6918 | exists, and the memory model is stronger than acquire, add a release |
6919 | barrier before the instruction. */ |
6920 | |
6921 | if (is_mm_seq_cst (model) || is_mm_release (model) || is_mm_acq_rel (model)) |
6922 | expand_mem_thread_fence (model); |
6923 | |
6924 | if (icode != CODE_FOR_nothing) |
6925 | { |
6926 | class expand_operand ops[3]; |
6927 | create_output_operand (op: &ops[0], x: target, mode); |
6928 | create_fixed_operand (op: &ops[1], x: mem); |
6929 | create_input_operand (op: &ops[2], value: val, mode); |
6930 | if (maybe_expand_insn (icode, nops: 3, ops)) |
6931 | return ops[0].value; |
6932 | } |
6933 | |
6934 | /* If an external test-and-set libcall is provided, use that instead of |
6935 | any external compare-and-swap that we might get from the compare-and- |
6936 | swap-loop expansion later. */ |
6937 | if (!can_compare_and_swap_p (mode, false)) |
6938 | { |
6939 | rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode); |
6940 | if (libfunc != NULL) |
6941 | { |
6942 | rtx addr; |
6943 | |
6944 | addr = convert_memory_address (ptr_mode, XEXP (mem, 0)); |
6945 | return emit_library_call_value (fun: libfunc, NULL_RTX, fn_type: LCT_NORMAL, |
6946 | outmode: mode, arg1: addr, arg1_mode: ptr_mode, |
6947 | arg2: val, arg2_mode: mode); |
6948 | } |
6949 | } |
6950 | |
6951 | /* If the test_and_set can't be emitted, eliminate any barrier that might |
6952 | have been emitted. */ |
6953 | delete_insns_since (last_insn); |
6954 | return NULL_RTX; |
6955 | } |
6956 | |
6957 | /* This function tries to implement an atomic exchange operation using a |
6958 | compare_and_swap loop. VAL is written to *MEM. The previous contents of |
6959 | *MEM are returned, using TARGET if possible. No memory model is required |
6960 | since a compare_and_swap loop is seq-cst. */ |
6961 | |
6962 | static rtx |
6963 | maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val) |
6964 | { |
6965 | machine_mode mode = GET_MODE (mem); |
6966 | |
6967 | if (can_compare_and_swap_p (mode, true)) |
6968 | { |
6969 | if (!target || !register_operand (target, mode)) |
6970 | target = gen_reg_rtx (mode); |
6971 | if (expand_compare_and_swap_loop (mem, old_reg: target, new_reg: val, NULL_RTX)) |
6972 | return target; |
6973 | } |
6974 | |
6975 | return NULL_RTX; |
6976 | } |
6977 | |
6978 | /* This function tries to implement an atomic test-and-set operation |
6979 | using the atomic_test_and_set instruction pattern. A boolean value |
6980 | is returned from the operation, using TARGET if possible. */ |
6981 | |
6982 | static rtx |
6983 | maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model) |
6984 | { |
6985 | machine_mode pat_bool_mode; |
6986 | class expand_operand ops[3]; |
6987 | |
6988 | if (!targetm.have_atomic_test_and_set ()) |
6989 | return NULL_RTX; |
6990 | |
6991 | /* While we always get QImode from __atomic_test_and_set, we get |
6992 | other memory modes from __sync_lock_test_and_set. Note that we |
6993 | use no endian adjustment here. This matches the 4.6 behavior |
6994 | in the Sparc backend. */ |
6995 | enum insn_code icode = targetm.code_for_atomic_test_and_set; |
6996 | gcc_checking_assert (insn_data[icode].operand[1].mode == QImode); |
6997 | if (GET_MODE (mem) != QImode) |
6998 | mem = adjust_address_nv (mem, QImode, 0); |
6999 | |
7000 | pat_bool_mode = insn_data[icode].operand[0].mode; |
7001 | create_output_operand (op: &ops[0], x: target, mode: pat_bool_mode); |
7002 | create_fixed_operand (op: &ops[1], x: mem); |
7003 | create_integer_operand (&ops[2], model); |
7004 | |
7005 | if (maybe_expand_insn (icode, nops: 3, ops)) |
7006 | return ops[0].value; |
7007 | return NULL_RTX; |
7008 | } |
7009 | |
7010 | /* This function expands the legacy _sync_lock test_and_set operation which is |
7011 | generally an atomic exchange. Some limited targets only allow the |
7012 | constant 1 to be stored. This is an ACQUIRE operation. |
7013 | |
7014 | TARGET is an optional place to stick the return value. |
7015 | MEM is where VAL is stored. */ |
7016 | |
7017 | rtx |
7018 | expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val) |
7019 | { |
7020 | rtx ret; |
7021 | |
7022 | /* Try an atomic_exchange first. */ |
7023 | ret = maybe_emit_atomic_exchange (target, mem, val, model: MEMMODEL_SYNC_ACQUIRE); |
7024 | if (ret) |
7025 | return ret; |
7026 | |
7027 | ret = maybe_emit_sync_lock_test_and_set (target, mem, val, |
7028 | model: MEMMODEL_SYNC_ACQUIRE); |
7029 | if (ret) |
7030 | return ret; |
7031 | |
7032 | ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val); |
7033 | if (ret) |
7034 | return ret; |
7035 | |
7036 | /* If there are no other options, try atomic_test_and_set if the value |
7037 | being stored is 1. */ |
7038 | if (val == const1_rtx) |
7039 | ret = maybe_emit_atomic_test_and_set (target, mem, model: MEMMODEL_SYNC_ACQUIRE); |
7040 | |
7041 | return ret; |
7042 | } |
7043 | |
7044 | /* This function expands the atomic test_and_set operation: |
7045 | atomically store a boolean TRUE into MEM and return the previous value. |
7046 | |
7047 | MEMMODEL is the memory model variant to use. |
7048 | TARGET is an optional place to stick the return value. */ |
7049 | |
7050 | rtx |
7051 | expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model) |
7052 | { |
7053 | machine_mode mode = GET_MODE (mem); |
7054 | rtx ret, trueval, subtarget; |
7055 | |
7056 | ret = maybe_emit_atomic_test_and_set (target, mem, model); |
7057 | if (ret) |
7058 | return ret; |
7059 | |
7060 | /* Be binary compatible with non-default settings of trueval, and different |
7061 | cpu revisions. E.g. one revision may have atomic-test-and-set, but |
7062 | another only has atomic-exchange. */ |
7063 | if (targetm.atomic_test_and_set_trueval == 1) |
7064 | { |
7065 | trueval = const1_rtx; |
7066 | subtarget = target ? target : gen_reg_rtx (mode); |
7067 | } |
7068 | else |
7069 | { |
7070 | trueval = gen_int_mode (targetm.atomic_test_and_set_trueval, mode); |
7071 | subtarget = gen_reg_rtx (mode); |
7072 | } |
7073 | |
7074 | /* Try the atomic-exchange optab... */ |
7075 | ret = maybe_emit_atomic_exchange (target: subtarget, mem, val: trueval, model); |
7076 | |
7077 | /* ... then an atomic-compare-and-swap loop ... */ |
7078 | if (!ret) |
7079 | ret = maybe_emit_compare_and_swap_exchange_loop (target: subtarget, mem, val: trueval); |
7080 | |
7081 | /* ... before trying the vaguely defined legacy lock_test_and_set. */ |
7082 | if (!ret) |
7083 | ret = maybe_emit_sync_lock_test_and_set (target: subtarget, mem, val: trueval, model); |
7084 | |
7085 | /* Recall that the legacy lock_test_and_set optab was allowed to do magic |
7086 | things with the value 1. Thus we try again without trueval. */ |
7087 | if (!ret && targetm.atomic_test_and_set_trueval != 1) |
7088 | { |
7089 | ret = maybe_emit_sync_lock_test_and_set (target: subtarget, mem, const1_rtx, model); |
7090 | |
7091 | if (ret) |
7092 | { |
7093 | /* Rectify the not-one trueval. */ |
7094 | ret = emit_store_flag_force (target, NE, ret, const0_rtx, mode, 0, 1); |
7095 | gcc_assert (ret); |
7096 | } |
7097 | } |
7098 | |
7099 | return ret; |
7100 | } |
7101 | |
7102 | /* This function expands the atomic exchange operation: |
7103 | atomically store VAL in MEM and return the previous value in MEM. |
7104 | |
7105 | MEMMODEL is the memory model variant to use. |
7106 | TARGET is an optional place to stick the return value. */ |
7107 | |
7108 | rtx |
7109 | expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model) |
7110 | { |
7111 | machine_mode mode = GET_MODE (mem); |
7112 | rtx ret; |
7113 | |
7114 | /* If loads are not atomic for the required size and we are not called to |
7115 | provide a __sync builtin, do not do anything so that we stay consistent |
7116 | with atomic loads of the same size. */ |
7117 | if (!can_atomic_load_p (mode) && !is_mm_sync (model)) |
7118 | return NULL_RTX; |
7119 | |
7120 | ret = maybe_emit_atomic_exchange (target, mem, val, model); |
7121 | |
7122 | /* Next try a compare-and-swap loop for the exchange. */ |
7123 | if (!ret) |
7124 | ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val); |
7125 | |
7126 | return ret; |
7127 | } |
7128 | |
7129 | /* This function expands the atomic compare exchange operation: |
7130 | |
7131 | *PTARGET_BOOL is an optional place to store the boolean success/failure. |
7132 | *PTARGET_OVAL is an optional place to store the old value from memory. |
7133 | Both target parameters may be NULL or const0_rtx to indicate that we do |
7134 | not care about that return value. Both target parameters are updated on |
7135 | success to the actual location of the corresponding result. |
7136 | |
7137 | MEMMODEL is the memory model variant to use. |
7138 | |
7139 | The return value of the function is true for success. */ |
7140 | |
7141 | bool |
7142 | expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval, |
7143 | rtx mem, rtx expected, rtx desired, |
7144 | bool is_weak, enum memmodel succ_model, |
7145 | enum memmodel fail_model) |
7146 | { |
7147 | machine_mode mode = GET_MODE (mem); |
7148 | class expand_operand ops[8]; |
7149 | enum insn_code icode; |
7150 | rtx target_oval, target_bool = NULL_RTX; |
7151 | rtx libfunc; |
7152 | |
7153 | /* If loads are not atomic for the required size and we are not called to |
7154 | provide a __sync builtin, do not do anything so that we stay consistent |
7155 | with atomic loads of the same size. */ |
7156 | if (!can_atomic_load_p (mode) && !is_mm_sync (model: succ_model)) |
7157 | return false; |
7158 | |
7159 | /* Load expected into a register for the compare and swap. */ |
7160 | if (MEM_P (expected)) |
7161 | expected = copy_to_reg (expected); |
7162 | |
7163 | /* Make sure we always have some place to put the return oldval. |
7164 | Further, make sure that place is distinct from the input expected, |
7165 | just in case we need that path down below. */ |
7166 | if (ptarget_oval && *ptarget_oval == const0_rtx) |
7167 | ptarget_oval = NULL; |
7168 | |
7169 | if (ptarget_oval == NULL |
7170 | || (target_oval = *ptarget_oval) == NULL |
7171 | || reg_overlap_mentioned_p (expected, target_oval)) |
7172 | target_oval = gen_reg_rtx (mode); |
7173 | |
7174 | icode = direct_optab_handler (op: atomic_compare_and_swap_optab, mode); |
7175 | if (icode != CODE_FOR_nothing) |
7176 | { |
7177 | machine_mode bool_mode = insn_data[icode].operand[0].mode; |
7178 | |
7179 | if (ptarget_bool && *ptarget_bool == const0_rtx) |
7180 | ptarget_bool = NULL; |
7181 | |
7182 | /* Make sure we always have a place for the bool operand. */ |
7183 | if (ptarget_bool == NULL |
7184 | || (target_bool = *ptarget_bool) == NULL |
7185 | || GET_MODE (target_bool) != bool_mode) |
7186 | target_bool = gen_reg_rtx (bool_mode); |
7187 | |
7188 | /* Emit the compare_and_swap. */ |
7189 | create_output_operand (op: &ops[0], x: target_bool, mode: bool_mode); |
7190 | create_output_operand (op: &ops[1], x: target_oval, mode); |
7191 | create_fixed_operand (op: &ops[2], x: mem); |
7192 | create_input_operand (op: &ops[3], value: expected, mode); |
7193 | create_input_operand (op: &ops[4], value: desired, mode); |
7194 | create_integer_operand (&ops[5], is_weak); |
7195 | create_integer_operand (&ops[6], succ_model); |
7196 | create_integer_operand (&ops[7], fail_model); |
7197 | if (maybe_expand_insn (icode, nops: 8, ops)) |
7198 | { |
7199 | /* Return success/failure. */ |
7200 | target_bool = ops[0].value; |
7201 | target_oval = ops[1].value; |
7202 | goto success; |
7203 | } |
7204 | } |
7205 | |
7206 | /* Otherwise fall back to the original __sync_val_compare_and_swap |
7207 | which is always seq-cst. */ |
7208 | icode = optab_handler (op: sync_compare_and_swap_optab, mode); |
7209 | if (icode != CODE_FOR_nothing) |
7210 | { |
7211 | rtx cc_reg; |
7212 | |
7213 | create_output_operand (op: &ops[0], x: target_oval, mode); |
7214 | create_fixed_operand (op: &ops[1], x: mem); |
7215 | create_input_operand (op: &ops[2], value: expected, mode); |
7216 | create_input_operand (op: &ops[3], value: desired, mode); |
7217 | if (!maybe_expand_insn (icode, nops: 4, ops)) |
7218 | return false; |
7219 | |
7220 | target_oval = ops[0].value; |
7221 | |
7222 | /* If the caller isn't interested in the boolean return value, |
7223 | skip the computation of it. */ |
7224 | if (ptarget_bool == NULL) |
7225 | goto success; |
7226 | |
7227 | /* Otherwise, work out if the compare-and-swap succeeded. */ |
7228 | cc_reg = NULL_RTX; |
7229 | if (have_insn_for (code: COMPARE, CCmode)) |
7230 | note_stores (get_last_insn (), find_cc_set, &cc_reg); |
7231 | if (cc_reg) |
7232 | { |
7233 | target_bool = emit_store_flag_force (target_bool, EQ, cc_reg, |
7234 | const0_rtx, VOIDmode, 0, 1); |
7235 | goto success; |
7236 | } |
7237 | goto success_bool_from_val; |
7238 | } |
7239 | |
7240 | /* Also check for library support for __sync_val_compare_and_swap. */ |
7241 | libfunc = optab_libfunc (sync_compare_and_swap_optab, mode); |
7242 | if (libfunc != NULL) |
7243 | { |
7244 | rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0)); |
7245 | rtx target = emit_library_call_value (fun: libfunc, NULL_RTX, fn_type: LCT_NORMAL, |
7246 | outmode: mode, arg1: addr, arg1_mode: ptr_mode, |
7247 | arg2: expected, arg2_mode: mode, arg3: desired, arg3_mode: mode); |
7248 | emit_move_insn (target_oval, target); |
7249 | |
7250 | /* Compute the boolean return value only if requested. */ |
7251 | if (ptarget_bool) |
7252 | goto success_bool_from_val; |
7253 | else |
7254 | goto success; |
7255 | } |
7256 | |
7257 | /* Failure. */ |
7258 | return false; |
7259 | |
7260 | success_bool_from_val: |
7261 | target_bool = emit_store_flag_force (target_bool, EQ, target_oval, |
7262 | expected, VOIDmode, 1, 1); |
7263 | success: |
7264 | /* Make sure that the oval output winds up where the caller asked. */ |
7265 | if (ptarget_oval) |
7266 | *ptarget_oval = target_oval; |
7267 | if (ptarget_bool) |
7268 | *ptarget_bool = target_bool; |
7269 | return true; |
7270 | } |
7271 | |
7272 | /* Generate asm volatile("" : : : "memory") as the memory blockage. */ |
7273 | |
7274 | static void |
7275 | expand_asm_memory_blockage (void) |
7276 | { |
7277 | rtx asm_op, clob; |
7278 | |
7279 | asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "" , "" , 0, |
7280 | rtvec_alloc (0), rtvec_alloc (0), |
7281 | rtvec_alloc (0), UNKNOWN_LOCATION); |
7282 | MEM_VOLATILE_P (asm_op) = 1; |
7283 | |
7284 | clob = gen_rtx_SCRATCH (VOIDmode); |
7285 | clob = gen_rtx_MEM (BLKmode, clob); |
7286 | clob = gen_rtx_CLOBBER (VOIDmode, clob); |
7287 | |
7288 | emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, asm_op, clob))); |
7289 | } |
7290 | |
7291 | /* Do not propagate memory accesses across this point. */ |
7292 | |
7293 | static void |
7294 | expand_memory_blockage (void) |
7295 | { |
7296 | if (targetm.have_memory_blockage ()) |
7297 | emit_insn (targetm.gen_memory_blockage ()); |
7298 | else |
7299 | expand_asm_memory_blockage (); |
7300 | } |
7301 | |
7302 | /* Generate asm volatile("" : : : "memory") as a memory blockage, at the |
7303 | same time clobbering the register set specified by REGS. */ |
7304 | |
7305 | void |
7306 | expand_asm_reg_clobber_mem_blockage (HARD_REG_SET regs) |
7307 | { |
7308 | rtx asm_op, clob_mem; |
7309 | |
7310 | unsigned int num_of_regs = 0; |
7311 | for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
7312 | if (TEST_HARD_REG_BIT (set: regs, bit: i)) |
7313 | num_of_regs++; |
7314 | |
7315 | asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "" , "" , 0, |
7316 | rtvec_alloc (0), rtvec_alloc (0), |
7317 | rtvec_alloc (0), UNKNOWN_LOCATION); |
7318 | MEM_VOLATILE_P (asm_op) = 1; |
7319 | |
7320 | rtvec v = rtvec_alloc (num_of_regs + 2); |
7321 | |
7322 | clob_mem = gen_rtx_SCRATCH (VOIDmode); |
7323 | clob_mem = gen_rtx_MEM (BLKmode, clob_mem); |
7324 | clob_mem = gen_rtx_CLOBBER (VOIDmode, clob_mem); |
7325 | |
7326 | RTVEC_ELT (v, 0) = asm_op; |
7327 | RTVEC_ELT (v, 1) = clob_mem; |
7328 | |
7329 | if (num_of_regs > 0) |
7330 | { |
7331 | unsigned int j = 2; |
7332 | for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
7333 | if (TEST_HARD_REG_BIT (set: regs, bit: i)) |
7334 | { |
7335 | RTVEC_ELT (v, j) = gen_rtx_CLOBBER (VOIDmode, regno_reg_rtx[i]); |
7336 | j++; |
7337 | } |
7338 | gcc_assert (j == (num_of_regs + 2)); |
7339 | } |
7340 | |
7341 | emit_insn (gen_rtx_PARALLEL (VOIDmode, v)); |
7342 | } |
7343 | |
7344 | /* This routine will either emit the mem_thread_fence pattern or issue a |
7345 | sync_synchronize to generate a fence for memory model MEMMODEL. */ |
7346 | |
7347 | void |
7348 | expand_mem_thread_fence (enum memmodel model) |
7349 | { |
7350 | if (is_mm_relaxed (model)) |
7351 | return; |
7352 | if (targetm.have_mem_thread_fence ()) |
7353 | { |
7354 | emit_insn (targetm.gen_mem_thread_fence (GEN_INT (model))); |
7355 | expand_memory_blockage (); |
7356 | } |
7357 | else if (targetm.have_memory_barrier ()) |
7358 | emit_insn (targetm.gen_memory_barrier ()); |
7359 | else if (synchronize_libfunc != NULL_RTX) |
7360 | emit_library_call (synchronize_libfunc, fn_type: LCT_NORMAL, VOIDmode); |
7361 | else |
7362 | expand_memory_blockage (); |
7363 | } |
7364 | |
7365 | /* Emit a signal fence with given memory model. */ |
7366 | |
7367 | void |
7368 | expand_mem_signal_fence (enum memmodel model) |
7369 | { |
7370 | /* No machine barrier is required to implement a signal fence, but |
7371 | a compiler memory barrier must be issued, except for relaxed MM. */ |
7372 | if (!is_mm_relaxed (model)) |
7373 | expand_memory_blockage (); |
7374 | } |
7375 | |
7376 | /* This function expands the atomic load operation: |
7377 | return the atomically loaded value in MEM. |
7378 | |
7379 | MEMMODEL is the memory model variant to use. |
7380 | TARGET is an option place to stick the return value. */ |
7381 | |
7382 | rtx |
7383 | expand_atomic_load (rtx target, rtx mem, enum memmodel model) |
7384 | { |
7385 | machine_mode mode = GET_MODE (mem); |
7386 | enum insn_code icode; |
7387 | |
7388 | /* If the target supports the load directly, great. */ |
7389 | icode = direct_optab_handler (op: atomic_load_optab, mode); |
7390 | if (icode != CODE_FOR_nothing) |
7391 | { |
7392 | class expand_operand ops[3]; |
7393 | rtx_insn *last = get_last_insn (); |
7394 | if (is_mm_seq_cst (model)) |
7395 | expand_memory_blockage (); |
7396 | |
7397 | create_output_operand (op: &ops[0], x: target, mode); |
7398 | create_fixed_operand (op: &ops[1], x: mem); |
7399 | create_integer_operand (&ops[2], model); |
7400 | if (maybe_expand_insn (icode, nops: 3, ops)) |
7401 | { |
7402 | if (!is_mm_relaxed (model)) |
7403 | expand_memory_blockage (); |
7404 | return ops[0].value; |
7405 | } |
7406 | delete_insns_since (last); |
7407 | } |
7408 | |
7409 | /* If the size of the object is greater than word size on this target, |
7410 | then we assume that a load will not be atomic. We could try to |
7411 | emulate a load with a compare-and-swap operation, but the store that |
7412 | doing this could result in would be incorrect if this is a volatile |
7413 | atomic load or targetting read-only-mapped memory. */ |
7414 | if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD)) |
7415 | /* If there is no atomic load, leave the library call. */ |
7416 | return NULL_RTX; |
7417 | |
7418 | /* Otherwise assume loads are atomic, and emit the proper barriers. */ |
7419 | if (!target || target == const0_rtx) |
7420 | target = gen_reg_rtx (mode); |
7421 | |
7422 | /* For SEQ_CST, emit a barrier before the load. */ |
7423 | if (is_mm_seq_cst (model)) |
7424 | expand_mem_thread_fence (model); |
7425 | |
7426 | emit_move_insn (target, mem); |
7427 | |
7428 | /* Emit the appropriate barrier after the load. */ |
7429 | expand_mem_thread_fence (model); |
7430 | |
7431 | return target; |
7432 | } |
7433 | |
7434 | /* This function expands the atomic store operation: |
7435 | Atomically store VAL in MEM. |
7436 | MEMMODEL is the memory model variant to use. |
7437 | USE_RELEASE is true if __sync_lock_release can be used as a fall back. |
7438 | function returns const0_rtx if a pattern was emitted. */ |
7439 | |
7440 | rtx |
7441 | expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release) |
7442 | { |
7443 | machine_mode mode = GET_MODE (mem); |
7444 | enum insn_code icode; |
7445 | class expand_operand ops[3]; |
7446 | |
7447 | /* If the target supports the store directly, great. */ |
7448 | icode = direct_optab_handler (op: atomic_store_optab, mode); |
7449 | if (icode != CODE_FOR_nothing) |
7450 | { |
7451 | rtx_insn *last = get_last_insn (); |
7452 | if (!is_mm_relaxed (model)) |
7453 | expand_memory_blockage (); |
7454 | create_fixed_operand (op: &ops[0], x: mem); |
7455 | create_input_operand (op: &ops[1], value: val, mode); |
7456 | create_integer_operand (&ops[2], model); |
7457 | if (maybe_expand_insn (icode, nops: 3, ops)) |
7458 | { |
7459 | if (is_mm_seq_cst (model)) |
7460 | expand_memory_blockage (); |
7461 | return const0_rtx; |
7462 | } |
7463 | delete_insns_since (last); |
7464 | } |
7465 | |
7466 | /* If using __sync_lock_release is a viable alternative, try it. |
7467 | Note that this will not be set to true if we are expanding a generic |
7468 | __atomic_store_n. */ |
7469 | if (use_release) |
7470 | { |
7471 | icode = direct_optab_handler (op: sync_lock_release_optab, mode); |
7472 | if (icode != CODE_FOR_nothing) |
7473 | { |
7474 | create_fixed_operand (op: &ops[0], x: mem); |
7475 | create_input_operand (op: &ops[1], const0_rtx, mode); |
7476 | if (maybe_expand_insn (icode, nops: 2, ops)) |
7477 | { |
7478 | /* lock_release is only a release barrier. */ |
7479 | if (is_mm_seq_cst (model)) |
7480 | expand_mem_thread_fence (model); |
7481 | return const0_rtx; |
7482 | } |
7483 | } |
7484 | } |
7485 | |
7486 | /* If the size of the object is greater than word size on this target, |
7487 | a default store will not be atomic. */ |
7488 | if (maybe_gt (GET_MODE_PRECISION (mode), BITS_PER_WORD)) |
7489 | { |
7490 | /* If loads are atomic or we are called to provide a __sync builtin, |
7491 | we can try a atomic_exchange and throw away the result. Otherwise, |
7492 | don't do anything so that we do not create an inconsistency between |
7493 | loads and stores. */ |
7494 | if (can_atomic_load_p (mode) || is_mm_sync (model)) |
7495 | { |
7496 | rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model); |
7497 | if (!target) |
7498 | target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, |
7499 | val); |
7500 | if (target) |
7501 | return const0_rtx; |
7502 | } |
7503 | return NULL_RTX; |
7504 | } |
7505 | |
7506 | /* Otherwise assume stores are atomic, and emit the proper barriers. */ |
7507 | expand_mem_thread_fence (model); |
7508 | |
7509 | emit_move_insn (mem, val); |
7510 | |
7511 | /* For SEQ_CST, also emit a barrier after the store. */ |
7512 | if (is_mm_seq_cst (model)) |
7513 | expand_mem_thread_fence (model); |
7514 | |
7515 | return const0_rtx; |
7516 | } |
7517 | |
7518 | |
7519 | /* Structure containing the pointers and values required to process the |
7520 | various forms of the atomic_fetch_op and atomic_op_fetch builtins. */ |
7521 | |
7522 | struct atomic_op_functions |
7523 | { |
7524 | direct_optab mem_fetch_before; |
7525 | direct_optab mem_fetch_after; |
7526 | direct_optab mem_no_result; |
7527 | optab fetch_before; |
7528 | optab fetch_after; |
7529 | direct_optab no_result; |
7530 | enum rtx_code reverse_code; |
7531 | }; |
7532 | |
7533 | |
7534 | /* Fill in structure pointed to by OP with the various optab entries for an |
7535 | operation of type CODE. */ |
7536 | |
7537 | static void |
7538 | get_atomic_op_for_code (struct atomic_op_functions *op, enum rtx_code code) |
7539 | { |
7540 | gcc_assert (op!= NULL); |
7541 | |
7542 | /* If SWITCHABLE_TARGET is defined, then subtargets can be switched |
7543 | in the source code during compilation, and the optab entries are not |
7544 | computable until runtime. Fill in the values at runtime. */ |
7545 | switch (code) |
7546 | { |
7547 | case PLUS: |
7548 | op->mem_fetch_before = atomic_fetch_add_optab; |
7549 | op->mem_fetch_after = atomic_add_fetch_optab; |
7550 | op->mem_no_result = atomic_add_optab; |
7551 | op->fetch_before = sync_old_add_optab; |
7552 | op->fetch_after = sync_new_add_optab; |
7553 | op->no_result = sync_add_optab; |
7554 | op->reverse_code = MINUS; |
7555 | break; |
7556 | case MINUS: |
7557 | op->mem_fetch_before = atomic_fetch_sub_optab; |
7558 | op->mem_fetch_after = atomic_sub_fetch_optab; |
7559 | op->mem_no_result = atomic_sub_optab; |
7560 | op->fetch_before = sync_old_sub_optab; |
7561 | op->fetch_after = sync_new_sub_optab; |
7562 | op->no_result = sync_sub_optab; |
7563 | op->reverse_code = PLUS; |
7564 | break; |
7565 | case XOR: |
7566 | op->mem_fetch_before = atomic_fetch_xor_optab; |
7567 | op->mem_fetch_after = atomic_xor_fetch_optab; |
7568 | op->mem_no_result = atomic_xor_optab; |
7569 | op->fetch_before = sync_old_xor_optab; |
7570 | op->fetch_after = sync_new_xor_optab; |
7571 | op->no_result = sync_xor_optab; |
7572 | op->reverse_code = XOR; |
7573 | break; |
7574 | case AND: |
7575 | op->mem_fetch_before = atomic_fetch_and_optab; |
7576 | op->mem_fetch_after = atomic_and_fetch_optab; |
7577 | op->mem_no_result = atomic_and_optab; |
7578 | op->fetch_before = sync_old_and_optab; |
7579 | op->fetch_after = sync_new_and_optab; |
7580 | op->no_result = sync_and_optab; |
7581 | op->reverse_code = UNKNOWN; |
7582 | break; |
7583 | case IOR: |
7584 | op->mem_fetch_before = atomic_fetch_or_optab; |
7585 | op->mem_fetch_after = atomic_or_fetch_optab; |
7586 | op->mem_no_result = atomic_or_optab; |
7587 | op->fetch_before = sync_old_ior_optab; |
7588 | op->fetch_after = sync_new_ior_optab; |
7589 | op->no_result = sync_ior_optab; |
7590 | op->reverse_code = UNKNOWN; |
7591 | break; |
7592 | case NOT: |
7593 | op->mem_fetch_before = atomic_fetch_nand_optab; |
7594 | op->mem_fetch_after = atomic_nand_fetch_optab; |
7595 | op->mem_no_result = atomic_nand_optab; |
7596 | op->fetch_before = sync_old_nand_optab; |
7597 | op->fetch_after = sync_new_nand_optab; |
7598 | op->no_result = sync_nand_optab; |
7599 | op->reverse_code = UNKNOWN; |
7600 | break; |
7601 | default: |
7602 | gcc_unreachable (); |
7603 | } |
7604 | } |
7605 | |
7606 | /* See if there is a more optimal way to implement the operation "*MEM CODE VAL" |
7607 | using memory order MODEL. If AFTER is true the operation needs to return |
7608 | the value of *MEM after the operation, otherwise the previous value. |
7609 | TARGET is an optional place to place the result. The result is unused if |
7610 | it is const0_rtx. |
7611 | Return the result if there is a better sequence, otherwise NULL_RTX. */ |
7612 | |
7613 | static rtx |
7614 | maybe_optimize_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code, |
7615 | enum memmodel model, bool after) |
7616 | { |
7617 | /* If the value is prefetched, or not used, it may be possible to replace |
7618 | the sequence with a native exchange operation. */ |
7619 | if (!after || target == const0_rtx) |
7620 | { |
7621 | /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */ |
7622 | if (code == AND && val == const0_rtx) |
7623 | { |
7624 | if (target == const0_rtx) |
7625 | target = gen_reg_rtx (GET_MODE (mem)); |
7626 | return maybe_emit_atomic_exchange (target, mem, val, model); |
7627 | } |
7628 | |
7629 | /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */ |
7630 | if (code == IOR && val == constm1_rtx) |
7631 | { |
7632 | if (target == const0_rtx) |
7633 | target = gen_reg_rtx (GET_MODE (mem)); |
7634 | return maybe_emit_atomic_exchange (target, mem, val, model); |
7635 | } |
7636 | } |
7637 | |
7638 | return NULL_RTX; |
7639 | } |
7640 | |
7641 | /* Try to emit an instruction for a specific operation varaition. |
7642 | OPTAB contains the OP functions. |
7643 | TARGET is an optional place to return the result. const0_rtx means unused. |
7644 | MEM is the memory location to operate on. |
7645 | VAL is the value to use in the operation. |
7646 | USE_MEMMODEL is TRUE if the variation with a memory model should be tried. |
7647 | MODEL is the memory model, if used. |
7648 | AFTER is true if the returned result is the value after the operation. */ |
7649 | |
7650 | static rtx |
7651 | maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem, |
7652 | rtx val, bool use_memmodel, enum memmodel model, bool after) |
7653 | { |
7654 | machine_mode mode = GET_MODE (mem); |
7655 | class expand_operand ops[4]; |
7656 | enum insn_code icode; |
7657 | int op_counter = 0; |
7658 | int num_ops; |
7659 | |
7660 | /* Check to see if there is a result returned. */ |
7661 | if (target == const0_rtx) |
7662 | { |
7663 | if (use_memmodel) |
7664 | { |
7665 | icode = direct_optab_handler (op: optab->mem_no_result, mode); |
7666 | create_integer_operand (&ops[2], model); |
7667 | num_ops = 3; |
7668 | } |
7669 | else |
7670 | { |
7671 | icode = direct_optab_handler (op: optab->no_result, mode); |
7672 | num_ops = 2; |
7673 | } |
7674 | } |
7675 | /* Otherwise, we need to generate a result. */ |
7676 | else |
7677 | { |
7678 | if (use_memmodel) |
7679 | { |
7680 | icode = direct_optab_handler (op: after ? optab->mem_fetch_after |
7681 | : optab->mem_fetch_before, mode); |
7682 | create_integer_operand (&ops[3], model); |
7683 | num_ops = 4; |
7684 | } |
7685 | else |
7686 | { |
7687 | icode = optab_handler (op: after ? optab->fetch_after |
7688 | : optab->fetch_before, mode); |
7689 | num_ops = 3; |
7690 | } |
7691 | create_output_operand (op: &ops[op_counter++], x: target, mode); |
7692 | } |
7693 | if (icode == CODE_FOR_nothing) |
7694 | return NULL_RTX; |
7695 | |
7696 | create_fixed_operand (op: &ops[op_counter++], x: mem); |
7697 | /* VAL may have been promoted to a wider mode. Shrink it if so. */ |
7698 | create_convert_operand_to (op: &ops[op_counter++], value: val, mode, unsigned_p: true); |
7699 | |
7700 | if (maybe_expand_insn (icode, nops: num_ops, ops)) |
7701 | return (target == const0_rtx ? const0_rtx : ops[0].value); |
7702 | |
7703 | return NULL_RTX; |
7704 | } |
7705 | |
7706 | |
7707 | /* This function expands an atomic fetch_OP or OP_fetch operation: |
7708 | TARGET is an option place to stick the return value. const0_rtx indicates |
7709 | the result is unused. |
7710 | atomically fetch MEM, perform the operation with VAL and return it to MEM. |
7711 | CODE is the operation being performed (OP) |
7712 | MEMMODEL is the memory model variant to use. |
7713 | AFTER is true to return the result of the operation (OP_fetch). |
7714 | AFTER is false to return the value before the operation (fetch_OP). |
7715 | |
7716 | This function will *only* generate instructions if there is a direct |
7717 | optab. No compare and swap loops or libcalls will be generated. */ |
7718 | |
7719 | static rtx |
7720 | expand_atomic_fetch_op_no_fallback (rtx target, rtx mem, rtx val, |
7721 | enum rtx_code code, enum memmodel model, |
7722 | bool after) |
7723 | { |
7724 | machine_mode mode = GET_MODE (mem); |
7725 | struct atomic_op_functions optab; |
7726 | rtx result; |
7727 | bool unused_result = (target == const0_rtx); |
7728 | |
7729 | get_atomic_op_for_code (op: &optab, code); |
7730 | |
7731 | /* Check to see if there are any better instructions. */ |
7732 | result = maybe_optimize_fetch_op (target, mem, val, code, model, after); |
7733 | if (result) |
7734 | return result; |
7735 | |
7736 | /* Check for the case where the result isn't used and try those patterns. */ |
7737 | if (unused_result) |
7738 | { |
7739 | /* Try the memory model variant first. */ |
7740 | result = maybe_emit_op (optab: &optab, target, mem, val, use_memmodel: true, model, after: true); |
7741 | if (result) |
7742 | return result; |
7743 | |
7744 | /* Next try the old style withuot a memory model. */ |
7745 | result = maybe_emit_op (optab: &optab, target, mem, val, use_memmodel: false, model, after: true); |
7746 | if (result) |
7747 | return result; |
7748 | |
7749 | /* There is no no-result pattern, so try patterns with a result. */ |
7750 | target = NULL_RTX; |
7751 | } |
7752 | |
7753 | /* Try the __atomic version. */ |
7754 | result = maybe_emit_op (optab: &optab, target, mem, val, use_memmodel: true, model, after); |
7755 | if (result) |
7756 | return result; |
7757 | |
7758 | /* Try the older __sync version. */ |
7759 | result = maybe_emit_op (optab: &optab, target, mem, val, use_memmodel: false, model, after); |
7760 | if (result) |
7761 | return result; |
7762 | |
7763 | /* If the fetch value can be calculated from the other variation of fetch, |
7764 | try that operation. */ |
7765 | if (after || unused_result || optab.reverse_code != UNKNOWN) |
7766 | { |
7767 | /* Try the __atomic version, then the older __sync version. */ |
7768 | result = maybe_emit_op (optab: &optab, target, mem, val, use_memmodel: true, model, after: !after); |
7769 | if (!result) |
7770 | result = maybe_emit_op (optab: &optab, target, mem, val, use_memmodel: false, model, after: !after); |
7771 | |
7772 | if (result) |
7773 | { |
7774 | /* If the result isn't used, no need to do compensation code. */ |
7775 | if (unused_result) |
7776 | return result; |
7777 | |
7778 | /* Issue compensation code. Fetch_after == fetch_before OP val. |
7779 | Fetch_before == after REVERSE_OP val. */ |
7780 | if (!after) |
7781 | code = optab.reverse_code; |
7782 | if (code == NOT) |
7783 | { |
7784 | result = expand_simple_binop (mode, code: AND, op0: result, op1: val, NULL_RTX, |
7785 | unsignedp: true, methods: OPTAB_LIB_WIDEN); |
7786 | result = expand_simple_unop (mode, code: NOT, op0: result, target, unsignedp: true); |
7787 | } |
7788 | else |
7789 | result = expand_simple_binop (mode, code, op0: result, op1: val, target, |
7790 | unsignedp: true, methods: OPTAB_LIB_WIDEN); |
7791 | return result; |
7792 | } |
7793 | } |
7794 | |
7795 | /* No direct opcode can be generated. */ |
7796 | return NULL_RTX; |
7797 | } |
7798 | |
7799 | |
7800 | |
7801 | /* This function expands an atomic fetch_OP or OP_fetch operation: |
7802 | TARGET is an option place to stick the return value. const0_rtx indicates |
7803 | the result is unused. |
7804 | atomically fetch MEM, perform the operation with VAL and return it to MEM. |
7805 | CODE is the operation being performed (OP) |
7806 | MEMMODEL is the memory model variant to use. |
7807 | AFTER is true to return the result of the operation (OP_fetch). |
7808 | AFTER is false to return the value before the operation (fetch_OP). */ |
7809 | rtx |
7810 | expand_atomic_fetch_op (rtx target, rtx mem, rtx val, enum rtx_code code, |
7811 | enum memmodel model, bool after) |
7812 | { |
7813 | machine_mode mode = GET_MODE (mem); |
7814 | rtx result; |
7815 | bool unused_result = (target == const0_rtx); |
7816 | |
7817 | /* If loads are not atomic for the required size and we are not called to |
7818 | provide a __sync builtin, do not do anything so that we stay consistent |
7819 | with atomic loads of the same size. */ |
7820 | if (!can_atomic_load_p (mode) && !is_mm_sync (model)) |
7821 | return NULL_RTX; |
7822 | |
7823 | result = expand_atomic_fetch_op_no_fallback (target, mem, val, code, model, |
7824 | after); |
7825 | |
7826 | if (result) |
7827 | return result; |
7828 | |
7829 | /* Add/sub can be implemented by doing the reverse operation with -(val). */ |
7830 | if (code == PLUS || code == MINUS) |
7831 | { |
7832 | rtx tmp; |
7833 | enum rtx_code reverse = (code == PLUS ? MINUS : PLUS); |
7834 | |
7835 | start_sequence (); |
7836 | tmp = expand_simple_unop (mode, code: NEG, op0: val, NULL_RTX, unsignedp: true); |
7837 | result = expand_atomic_fetch_op_no_fallback (target, mem, val: tmp, code: reverse, |
7838 | model, after); |
7839 | if (result) |
7840 | { |
7841 | /* PLUS worked so emit the insns and return. */ |
7842 | tmp = get_insns (); |
7843 | end_sequence (); |
7844 | emit_insn (tmp); |
7845 | return result; |
7846 | } |
7847 | |
7848 | /* PLUS did not work, so throw away the negation code and continue. */ |
7849 | end_sequence (); |
7850 | } |
7851 | |
7852 | /* Try the __sync libcalls only if we can't do compare-and-swap inline. */ |
7853 | if (!can_compare_and_swap_p (mode, false)) |
7854 | { |
7855 | rtx libfunc; |
7856 | bool fixup = false; |
7857 | enum rtx_code orig_code = code; |
7858 | struct atomic_op_functions optab; |
7859 | |
7860 | get_atomic_op_for_code (op: &optab, code); |
7861 | libfunc = optab_libfunc (after ? optab.fetch_after |
7862 | : optab.fetch_before, mode); |
7863 | if (libfunc == NULL |
7864 | && (after || unused_result || optab.reverse_code != UNKNOWN)) |
7865 | { |
7866 | fixup = true; |
7867 | if (!after) |
7868 | code = optab.reverse_code; |
7869 | libfunc = optab_libfunc (after ? optab.fetch_before |
7870 | : optab.fetch_after, mode); |
7871 | } |
7872 | if (libfunc != NULL) |
7873 | { |
7874 | rtx addr = convert_memory_address (ptr_mode, XEXP (mem, 0)); |
7875 | result = emit_library_call_value (fun: libfunc, NULL, fn_type: LCT_NORMAL, outmode: mode, |
7876 | arg1: addr, arg1_mode: ptr_mode, arg2: val, arg2_mode: mode); |
7877 | |
7878 | if (!unused_result && fixup) |
7879 | result = expand_simple_binop (mode, code, op0: result, op1: val, target, |
7880 | unsignedp: true, methods: OPTAB_LIB_WIDEN); |
7881 | return result; |
7882 | } |
7883 | |
7884 | /* We need the original code for any further attempts. */ |
7885 | code = orig_code; |
7886 | } |
7887 | |
7888 | /* If nothing else has succeeded, default to a compare and swap loop. */ |
7889 | if (can_compare_and_swap_p (mode, true)) |
7890 | { |
7891 | rtx_insn *insn; |
7892 | rtx t0 = gen_reg_rtx (mode), t1; |
7893 | |
7894 | start_sequence (); |
7895 | |
7896 | /* If the result is used, get a register for it. */ |
7897 | if (!unused_result) |
7898 | { |
7899 | if (!target || !register_operand (target, mode)) |
7900 | target = gen_reg_rtx (mode); |
7901 | /* If fetch_before, copy the value now. */ |
7902 | if (!after) |
7903 | emit_move_insn (target, t0); |
7904 | } |
7905 | else |
7906 | target = const0_rtx; |
7907 | |
7908 | t1 = t0; |
7909 | if (code == NOT) |
7910 | { |
7911 | t1 = expand_simple_binop (mode, code: AND, op0: t1, op1: val, NULL_RTX, |
7912 | unsignedp: true, methods: OPTAB_LIB_WIDEN); |
7913 | t1 = expand_simple_unop (mode, code, op0: t1, NULL_RTX, unsignedp: true); |
7914 | } |
7915 | else |
7916 | t1 = expand_simple_binop (mode, code, op0: t1, op1: val, NULL_RTX, unsignedp: true, |
7917 | methods: OPTAB_LIB_WIDEN); |
7918 | |
7919 | /* For after, copy the value now. */ |
7920 | if (!unused_result && after) |
7921 | emit_move_insn (target, t1); |
7922 | insn = get_insns (); |
7923 | end_sequence (); |
7924 | |
7925 | if (t1 != NULL && expand_compare_and_swap_loop (mem, old_reg: t0, new_reg: t1, seq: insn)) |
7926 | return target; |
7927 | } |
7928 | |
7929 | return NULL_RTX; |
7930 | } |
7931 | |
7932 | /* Return true if OPERAND is suitable for operand number OPNO of |
7933 | instruction ICODE. */ |
7934 | |
7935 | bool |
7936 | insn_operand_matches (enum insn_code icode, unsigned int opno, rtx operand) |
7937 | { |
7938 | return (!insn_data[(int) icode].operand[opno].predicate |
7939 | || (insn_data[(int) icode].operand[opno].predicate |
7940 | (operand, insn_data[(int) icode].operand[opno].mode))); |
7941 | } |
7942 | |
7943 | /* TARGET is a target of a multiword operation that we are going to |
7944 | implement as a series of word-mode operations. Return true if |
7945 | TARGET is suitable for this purpose. */ |
7946 | |
7947 | bool |
7948 | valid_multiword_target_p (rtx target) |
7949 | { |
7950 | machine_mode mode; |
7951 | int i, size; |
7952 | |
7953 | mode = GET_MODE (target); |
7954 | if (!GET_MODE_SIZE (mode).is_constant (const_value: &size)) |
7955 | return false; |
7956 | for (i = 0; i < size; i += UNITS_PER_WORD) |
7957 | if (!validate_subreg (word_mode, mode, target, i)) |
7958 | return false; |
7959 | return true; |
7960 | } |
7961 | |
7962 | /* Make OP describe an input operand that has value INTVAL and that has |
7963 | no inherent mode. This function should only be used for operands that |
7964 | are always expand-time constants. The backend may request that INTVAL |
7965 | be copied into a different kind of rtx, but it must specify the mode |
7966 | of that rtx if so. */ |
7967 | |
7968 | void |
7969 | create_integer_operand (class expand_operand *op, poly_int64 intval) |
7970 | { |
7971 | create_expand_operand (op, type: EXPAND_INTEGER, |
7972 | value: gen_int_mode (intval, MAX_MODE_INT), |
7973 | VOIDmode, unsigned_p: false, int_value: intval); |
7974 | } |
7975 | |
7976 | /* Like maybe_legitimize_operand, but do not change the code of the |
7977 | current rtx value. */ |
7978 | |
7979 | static bool |
7980 | maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno, |
7981 | class expand_operand *op) |
7982 | { |
7983 | /* See if the operand matches in its current form. */ |
7984 | if (insn_operand_matches (icode, opno, operand: op->value)) |
7985 | return true; |
7986 | |
7987 | /* If the operand is a memory whose address has no side effects, |
7988 | try forcing the address into a non-virtual pseudo register. |
7989 | The check for side effects is important because copy_to_mode_reg |
7990 | cannot handle things like auto-modified addresses. */ |
7991 | if (insn_data[(int) icode].operand[opno].allows_mem && MEM_P (op->value)) |
7992 | { |
7993 | rtx addr, mem; |
7994 | |
7995 | mem = op->value; |
7996 | addr = XEXP (mem, 0); |
7997 | if (!(REG_P (addr) && REGNO (addr) > LAST_VIRTUAL_REGISTER) |
7998 | && !side_effects_p (addr)) |
7999 | { |
8000 | rtx_insn *last; |
8001 | machine_mode mode; |
8002 | |
8003 | last = get_last_insn (); |
8004 | mode = get_address_mode (mem); |
8005 | mem = replace_equiv_address (mem, copy_to_mode_reg (mode, addr)); |
8006 | if (insn_operand_matches (icode, opno, operand: mem)) |
8007 | { |
8008 | op->value = mem; |
8009 | return true; |
8010 | } |
8011 | delete_insns_since (last); |
8012 | } |
8013 | } |
8014 | |
8015 | return false; |
8016 | } |
8017 | |
8018 | /* Try to make OP match operand OPNO of instruction ICODE. Return true |
8019 | on success, storing the new operand value back in OP. */ |
8020 | |
8021 | static bool |
8022 | maybe_legitimize_operand (enum insn_code icode, unsigned int opno, |
8023 | class expand_operand *op) |
8024 | { |
8025 | machine_mode mode, imode, tmode; |
8026 | |
8027 | mode = op->mode; |
8028 | switch (op->type) |
8029 | { |
8030 | case EXPAND_FIXED: |
8031 | { |
8032 | temporary_volatile_ok v (true); |
8033 | return maybe_legitimize_operand_same_code (icode, opno, op); |
8034 | } |
8035 | |
8036 | case EXPAND_OUTPUT: |
8037 | gcc_assert (mode != VOIDmode); |
8038 | if (op->value |
8039 | && op->value != const0_rtx |
8040 | && GET_MODE (op->value) == mode |
8041 | && maybe_legitimize_operand_same_code (icode, opno, op)) |
8042 | return true; |
8043 | |
8044 | op->value = gen_reg_rtx (mode); |
8045 | op->target = 0; |
8046 | break; |
8047 | |
8048 | case EXPAND_INPUT: |
8049 | input: |
8050 | gcc_assert (mode != VOIDmode); |
8051 | gcc_assert (GET_MODE (op->value) == VOIDmode |
8052 | || GET_MODE (op->value) == mode); |
8053 | if (maybe_legitimize_operand_same_code (icode, opno, op)) |
8054 | return true; |
8055 | |
8056 | op->value = copy_to_mode_reg (mode, op->value); |
8057 | break; |
8058 | |
8059 | case EXPAND_CONVERT_TO: |
8060 | gcc_assert (mode != VOIDmode); |
8061 | op->value = convert_to_mode (mode, op->value, op->unsigned_p); |
8062 | goto input; |
8063 | |
8064 | case EXPAND_CONVERT_FROM: |
8065 | if (GET_MODE (op->value) != VOIDmode) |
8066 | mode = GET_MODE (op->value); |
8067 | else |
8068 | /* The caller must tell us what mode this value has. */ |
8069 | gcc_assert (mode != VOIDmode); |
8070 | |
8071 | imode = insn_data[(int) icode].operand[opno].mode; |
8072 | tmode = (VECTOR_MODE_P (imode) && !VECTOR_MODE_P (mode) |
8073 | ? GET_MODE_INNER (imode) : imode); |
8074 | if (tmode != VOIDmode && tmode != mode) |
8075 | { |
8076 | op->value = convert_modes (mode: tmode, oldmode: mode, x: op->value, unsignedp: op->unsigned_p); |
8077 | mode = tmode; |
8078 | } |
8079 | if (imode != VOIDmode && imode != mode) |
8080 | { |
8081 | gcc_assert (VECTOR_MODE_P (imode) && !VECTOR_MODE_P (mode)); |
8082 | op->value = expand_vector_broadcast (vmode: imode, op: op->value); |
8083 | mode = imode; |
8084 | } |
8085 | goto input; |
8086 | |
8087 | case EXPAND_ADDRESS: |
8088 | op->value = convert_memory_address (as_a <scalar_int_mode> (mode), |
8089 | op->value); |
8090 | goto input; |
8091 | |
8092 | case EXPAND_INTEGER: |
8093 | mode = insn_data[(int) icode].operand[opno].mode; |
8094 | if (mode != VOIDmode |
8095 | && known_eq (trunc_int_for_mode (op->int_value, mode), |
8096 | op->int_value)) |
8097 | { |
8098 | op->value = gen_int_mode (op->int_value, mode); |
8099 | goto input; |
8100 | } |
8101 | break; |
8102 | |
8103 | case EXPAND_UNDEFINED_INPUT: |
8104 | /* See if the predicate accepts a SCRATCH rtx, which in this context |
8105 | indicates an undefined value. Use an uninitialized register if not. */ |
8106 | if (!insn_operand_matches (icode, opno, operand: op->value)) |
8107 | { |
8108 | op->value = gen_reg_rtx (op->mode); |
8109 | goto input; |
8110 | } |
8111 | return true; |
8112 | } |
8113 | return insn_operand_matches (icode, opno, operand: op->value); |
8114 | } |
8115 | |
8116 | /* Make OP describe an input operand that should have the same value |
8117 | as VALUE, after any mode conversion that the target might request. |
8118 | TYPE is the type of VALUE. */ |
8119 | |
8120 | void |
8121 | create_convert_operand_from_type (class expand_operand *op, |
8122 | rtx value, tree type) |
8123 | { |
8124 | create_convert_operand_from (op, value, TYPE_MODE (type), |
8125 | TYPE_UNSIGNED (type)); |
8126 | } |
8127 | |
8128 | /* Return true if the requirements on operands OP1 and OP2 of instruction |
8129 | ICODE are similar enough for the result of legitimizing OP1 to be |
8130 | reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated |
8131 | with OP1 and OP2 respectively. */ |
8132 | |
8133 | static inline bool |
8134 | can_reuse_operands_p (enum insn_code icode, |
8135 | unsigned int opno1, unsigned int opno2, |
8136 | const class expand_operand *op1, |
8137 | const class expand_operand *op2) |
8138 | { |
8139 | /* Check requirements that are common to all types. */ |
8140 | if (op1->type != op2->type |
8141 | || op1->mode != op2->mode |
8142 | || (insn_data[(int) icode].operand[opno1].mode |
8143 | != insn_data[(int) icode].operand[opno2].mode)) |
8144 | return false; |
8145 | |
8146 | /* Check the requirements for specific types. */ |
8147 | switch (op1->type) |
8148 | { |
8149 | case EXPAND_OUTPUT: |
8150 | case EXPAND_UNDEFINED_INPUT: |
8151 | /* Outputs and undefined intputs must remain distinct. */ |
8152 | return false; |
8153 | |
8154 | case EXPAND_FIXED: |
8155 | case EXPAND_INPUT: |
8156 | case EXPAND_ADDRESS: |
8157 | case EXPAND_INTEGER: |
8158 | return true; |
8159 | |
8160 | case EXPAND_CONVERT_TO: |
8161 | case EXPAND_CONVERT_FROM: |
8162 | return op1->unsigned_p == op2->unsigned_p; |
8163 | } |
8164 | gcc_unreachable (); |
8165 | } |
8166 | |
8167 | /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS) |
8168 | of instruction ICODE. Return true on success, leaving the new operand |
8169 | values in the OPS themselves. Emit no code on failure. */ |
8170 | |
8171 | bool |
8172 | maybe_legitimize_operands (enum insn_code icode, unsigned int opno, |
8173 | unsigned int nops, class expand_operand *ops) |
8174 | { |
8175 | rtx_insn *last = get_last_insn (); |
8176 | rtx *orig_values = XALLOCAVEC (rtx, nops); |
8177 | for (unsigned int i = 0; i < nops; i++) |
8178 | { |
8179 | orig_values[i] = ops[i].value; |
8180 | |
8181 | /* First try reusing the result of an earlier legitimization. |
8182 | This avoids duplicate rtl and ensures that tied operands |
8183 | remain tied. |
8184 | |
8185 | This search is linear, but NOPS is bounded at compile time |
8186 | to a small number (current a single digit). */ |
8187 | unsigned int j = 0; |
8188 | for (; j < i; ++j) |
8189 | if (can_reuse_operands_p (icode, opno1: opno + j, opno2: opno + i, op1: &ops[j], op2: &ops[i]) |
8190 | && rtx_equal_p (orig_values[j], orig_values[i]) |
8191 | && ops[j].value |
8192 | && insn_operand_matches (icode, opno: opno + i, operand: ops[j].value)) |
8193 | { |
8194 | ops[i].value = copy_rtx (ops[j].value); |
8195 | break; |
8196 | } |
8197 | |
8198 | /* Otherwise try legitimizing the operand on its own. */ |
8199 | if (j == i && !maybe_legitimize_operand (icode, opno: opno + i, op: &ops[i])) |
8200 | { |
8201 | delete_insns_since (last); |
8202 | return false; |
8203 | } |
8204 | } |
8205 | return true; |
8206 | } |
8207 | |
8208 | /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS) |
8209 | as its operands. Return the instruction pattern on success, |
8210 | and emit any necessary set-up code. Return null and emit no |
8211 | code on failure. */ |
8212 | |
8213 | rtx_insn * |
8214 | maybe_gen_insn (enum insn_code icode, unsigned int nops, |
8215 | class expand_operand *ops) |
8216 | { |
8217 | gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args); |
8218 | if (!maybe_legitimize_operands (icode, opno: 0, nops, ops)) |
8219 | return NULL; |
8220 | |
8221 | switch (nops) |
8222 | { |
8223 | case 0: |
8224 | return GEN_FCN (icode) (); |
8225 | case 1: |
8226 | return GEN_FCN (icode) (ops[0].value); |
8227 | case 2: |
8228 | return GEN_FCN (icode) (ops[0].value, ops[1].value); |
8229 | case 3: |
8230 | return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value); |
8231 | case 4: |
8232 | return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value, |
8233 | ops[3].value); |
8234 | case 5: |
8235 | return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value, |
8236 | ops[3].value, ops[4].value); |
8237 | case 6: |
8238 | return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value, |
8239 | ops[3].value, ops[4].value, ops[5].value); |
8240 | case 7: |
8241 | return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value, |
8242 | ops[3].value, ops[4].value, ops[5].value, |
8243 | ops[6].value); |
8244 | case 8: |
8245 | return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value, |
8246 | ops[3].value, ops[4].value, ops[5].value, |
8247 | ops[6].value, ops[7].value); |
8248 | case 9: |
8249 | return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value, |
8250 | ops[3].value, ops[4].value, ops[5].value, |
8251 | ops[6].value, ops[7].value, ops[8].value); |
8252 | case 10: |
8253 | return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value, |
8254 | ops[3].value, ops[4].value, ops[5].value, |
8255 | ops[6].value, ops[7].value, ops[8].value, |
8256 | ops[9].value); |
8257 | case 11: |
8258 | return GEN_FCN (icode) (ops[0].value, ops[1].value, ops[2].value, |
8259 | ops[3].value, ops[4].value, ops[5].value, |
8260 | ops[6].value, ops[7].value, ops[8].value, |
8261 | ops[9].value, ops[10].value); |
8262 | } |
8263 | gcc_unreachable (); |
8264 | } |
8265 | |
8266 | /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS) |
8267 | as its operands. Return true on success and emit no code on failure. */ |
8268 | |
8269 | bool |
8270 | maybe_expand_insn (enum insn_code icode, unsigned int nops, |
8271 | class expand_operand *ops) |
8272 | { |
8273 | rtx_insn *pat = maybe_gen_insn (icode, nops, ops); |
8274 | if (pat) |
8275 | { |
8276 | emit_insn (pat); |
8277 | return true; |
8278 | } |
8279 | return false; |
8280 | } |
8281 | |
8282 | /* Like maybe_expand_insn, but for jumps. */ |
8283 | |
8284 | bool |
8285 | maybe_expand_jump_insn (enum insn_code icode, unsigned int nops, |
8286 | class expand_operand *ops) |
8287 | { |
8288 | rtx_insn *pat = maybe_gen_insn (icode, nops, ops); |
8289 | if (pat) |
8290 | { |
8291 | emit_jump_insn (pat); |
8292 | return true; |
8293 | } |
8294 | return false; |
8295 | } |
8296 | |
8297 | /* Emit instruction ICODE, using operands [OPS, OPS + NOPS) |
8298 | as its operands. */ |
8299 | |
8300 | void |
8301 | expand_insn (enum insn_code icode, unsigned int nops, |
8302 | class expand_operand *ops) |
8303 | { |
8304 | if (!maybe_expand_insn (icode, nops, ops)) |
8305 | gcc_unreachable (); |
8306 | } |
8307 | |
8308 | /* Like expand_insn, but for jumps. */ |
8309 | |
8310 | void |
8311 | expand_jump_insn (enum insn_code icode, unsigned int nops, |
8312 | class expand_operand *ops) |
8313 | { |
8314 | if (!maybe_expand_jump_insn (icode, nops, ops)) |
8315 | gcc_unreachable (); |
8316 | } |
8317 | |