1/* Lower _BitInt(N) operations to scalar operations.
2 Copyright (C) 2023-2024 Free Software Foundation, Inc.
3 Contributed by Jakub Jelinek <jakub@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by the
9Free Software Foundation; either version 3, or (at your option) any
10later version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT
13ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "backend.h"
25#include "rtl.h"
26#include "tree.h"
27#include "gimple.h"
28#include "cfghooks.h"
29#include "tree-pass.h"
30#include "ssa.h"
31#include "fold-const.h"
32#include "gimplify.h"
33#include "gimple-iterator.h"
34#include "tree-cfg.h"
35#include "tree-dfa.h"
36#include "cfgloop.h"
37#include "cfganal.h"
38#include "target.h"
39#include "tree-ssa-live.h"
40#include "tree-ssa-coalesce.h"
41#include "domwalk.h"
42#include "memmodel.h"
43#include "optabs.h"
44#include "varasm.h"
45#include "gimple-range.h"
46#include "value-range.h"
47#include "langhooks.h"
48#include "gimplify-me.h"
49#include "diagnostic-core.h"
50#include "tree-eh.h"
51#include "tree-pretty-print.h"
52#include "alloc-pool.h"
53#include "tree-into-ssa.h"
54#include "tree-cfgcleanup.h"
55#include "tree-switch-conversion.h"
56#include "ubsan.h"
57#include "stor-layout.h"
58#include "gimple-lower-bitint.h"
59
60/* Split BITINT_TYPE precisions in 4 categories. Small _BitInt, where
61 target hook says it is a single limb, middle _BitInt which per ABI
62 does not, but there is some INTEGER_TYPE in which arithmetics can be
63 performed (operations on such _BitInt are lowered to casts to that
64 arithmetic type and cast back; e.g. on x86_64 limb is DImode, but
65 target supports TImode, so _BitInt(65) to _BitInt(128) are middle
66 ones), large _BitInt which should by straight line code and
67 finally huge _BitInt which should be handled by loops over the limbs. */
68
69enum bitint_prec_kind {
70 bitint_prec_small,
71 bitint_prec_middle,
72 bitint_prec_large,
73 bitint_prec_huge
74};
75
76/* Caches to speed up bitint_precision_kind. */
77
78static int small_max_prec, mid_min_prec, large_min_prec, huge_min_prec;
79static int limb_prec;
80
81/* Categorize _BitInt(PREC) as small, middle, large or huge. */
82
83static bitint_prec_kind
84bitint_precision_kind (int prec)
85{
86 if (prec <= small_max_prec)
87 return bitint_prec_small;
88 if (huge_min_prec && prec >= huge_min_prec)
89 return bitint_prec_huge;
90 if (large_min_prec && prec >= large_min_prec)
91 return bitint_prec_large;
92 if (mid_min_prec && prec >= mid_min_prec)
93 return bitint_prec_middle;
94
95 struct bitint_info info;
96 bool ok = targetm.c.bitint_type_info (prec, &info);
97 gcc_assert (ok);
98 scalar_int_mode limb_mode = as_a <scalar_int_mode> (m: info.limb_mode);
99 if (prec <= GET_MODE_PRECISION (mode: limb_mode))
100 {
101 small_max_prec = prec;
102 return bitint_prec_small;
103 }
104 if (!large_min_prec
105 && GET_MODE_PRECISION (mode: limb_mode) < MAX_FIXED_MODE_SIZE)
106 large_min_prec = MAX_FIXED_MODE_SIZE + 1;
107 if (!limb_prec)
108 limb_prec = GET_MODE_PRECISION (mode: limb_mode);
109 if (!huge_min_prec)
110 {
111 if (4 * limb_prec >= MAX_FIXED_MODE_SIZE)
112 huge_min_prec = 4 * limb_prec;
113 else
114 huge_min_prec = MAX_FIXED_MODE_SIZE + 1;
115 }
116 if (prec <= MAX_FIXED_MODE_SIZE)
117 {
118 if (!mid_min_prec || prec < mid_min_prec)
119 mid_min_prec = prec;
120 return bitint_prec_middle;
121 }
122 if (large_min_prec && prec <= large_min_prec)
123 return bitint_prec_large;
124 return bitint_prec_huge;
125}
126
127/* Same for a TYPE. */
128
129static bitint_prec_kind
130bitint_precision_kind (tree type)
131{
132 return bitint_precision_kind (TYPE_PRECISION (type));
133}
134
135/* Return minimum precision needed to describe INTEGER_CST
136 CST. All bits above that precision up to precision of
137 TREE_TYPE (CST) are cleared if EXT is set to 0, or set
138 if EXT is set to -1. */
139
140static unsigned
141bitint_min_cst_precision (tree cst, int &ext)
142{
143 ext = tree_int_cst_sgn (cst) < 0 ? -1 : 0;
144 wide_int w = wi::to_wide (t: cst);
145 unsigned min_prec = wi::min_precision (x: w, TYPE_SIGN (TREE_TYPE (cst)));
146 /* For signed values, we don't need to count the sign bit,
147 we'll use constant 0 or -1 for the upper bits. */
148 if (!TYPE_UNSIGNED (TREE_TYPE (cst)))
149 --min_prec;
150 else
151 {
152 /* For unsigned values, also try signed min_precision
153 in case the constant has lots of most significant bits set. */
154 unsigned min_prec2 = wi::min_precision (x: w, sgn: SIGNED) - 1;
155 if (min_prec2 < min_prec)
156 {
157 ext = -1;
158 return min_prec2;
159 }
160 }
161 return min_prec;
162}
163
164namespace {
165
166/* If OP is middle _BitInt, cast it to corresponding INTEGER_TYPE
167 cached in TYPE and return it. */
168
169tree
170maybe_cast_middle_bitint (gimple_stmt_iterator *gsi, tree op, tree &type)
171{
172 if (op == NULL_TREE
173 || TREE_CODE (TREE_TYPE (op)) != BITINT_TYPE
174 || bitint_precision_kind (TREE_TYPE (op)) != bitint_prec_middle)
175 return op;
176
177 int prec = TYPE_PRECISION (TREE_TYPE (op));
178 int uns = TYPE_UNSIGNED (TREE_TYPE (op));
179 if (type == NULL_TREE
180 || TYPE_PRECISION (type) != prec
181 || TYPE_UNSIGNED (type) != uns)
182 type = build_nonstandard_integer_type (prec, uns);
183
184 if (TREE_CODE (op) != SSA_NAME)
185 {
186 tree nop = fold_convert (type, op);
187 if (is_gimple_val (nop))
188 return nop;
189 }
190
191 tree nop = make_ssa_name (var: type);
192 gimple *g = gimple_build_assign (nop, NOP_EXPR, op);
193 gsi_insert_before (gsi, g, GSI_SAME_STMT);
194 return nop;
195}
196
197/* Return true if STMT can be handled in a loop from least to most
198 significant limb together with its dependencies. */
199
200bool
201mergeable_op (gimple *stmt)
202{
203 if (!is_gimple_assign (gs: stmt))
204 return false;
205 switch (gimple_assign_rhs_code (gs: stmt))
206 {
207 case PLUS_EXPR:
208 case MINUS_EXPR:
209 case NEGATE_EXPR:
210 case BIT_AND_EXPR:
211 case BIT_IOR_EXPR:
212 case BIT_XOR_EXPR:
213 case BIT_NOT_EXPR:
214 case SSA_NAME:
215 case INTEGER_CST:
216 case BIT_FIELD_REF:
217 return true;
218 case LSHIFT_EXPR:
219 {
220 tree cnt = gimple_assign_rhs2 (gs: stmt);
221 if (tree_fits_uhwi_p (cnt)
222 && tree_to_uhwi (cnt) < (unsigned HOST_WIDE_INT) limb_prec)
223 return true;
224 }
225 break;
226 CASE_CONVERT:
227 case VIEW_CONVERT_EXPR:
228 {
229 tree lhs_type = TREE_TYPE (gimple_assign_lhs (stmt));
230 tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
231 if (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
232 && TREE_CODE (lhs_type) == BITINT_TYPE
233 && TREE_CODE (rhs_type) == BITINT_TYPE
234 && bitint_precision_kind (type: lhs_type) >= bitint_prec_large
235 && bitint_precision_kind (type: rhs_type) >= bitint_prec_large
236 && (CEIL (TYPE_PRECISION (lhs_type), limb_prec)
237 == CEIL (TYPE_PRECISION (rhs_type), limb_prec)))
238 {
239 if (TYPE_PRECISION (rhs_type) >= TYPE_PRECISION (lhs_type))
240 return true;
241 if ((unsigned) TYPE_PRECISION (lhs_type) % (2 * limb_prec) != 0)
242 return true;
243 if (bitint_precision_kind (type: lhs_type) == bitint_prec_large)
244 return true;
245 }
246 break;
247 }
248 default:
249 break;
250 }
251 return false;
252}
253
254/* Return non-zero if stmt is .{ADD,SUB,MUL}_OVERFLOW call with
255 _Complex large/huge _BitInt lhs which has at most two immediate uses,
256 at most one use in REALPART_EXPR stmt in the same bb and exactly one
257 IMAGPART_EXPR use in the same bb with a single use which casts it to
258 non-BITINT_TYPE integral type. If there is a REALPART_EXPR use,
259 return 2. Such cases (most common uses of those builtins) can be
260 optimized by marking their lhs and lhs of IMAGPART_EXPR and maybe lhs
261 of REALPART_EXPR as not needed to be backed up by a stack variable.
262 For .UBSAN_CHECK_{ADD,SUB,MUL} return 3. */
263
264int
265optimizable_arith_overflow (gimple *stmt)
266{
267 bool is_ubsan = false;
268 if (!is_gimple_call (gs: stmt) || !gimple_call_internal_p (gs: stmt))
269 return false;
270 switch (gimple_call_internal_fn (gs: stmt))
271 {
272 case IFN_ADD_OVERFLOW:
273 case IFN_SUB_OVERFLOW:
274 case IFN_MUL_OVERFLOW:
275 break;
276 case IFN_UBSAN_CHECK_ADD:
277 case IFN_UBSAN_CHECK_SUB:
278 case IFN_UBSAN_CHECK_MUL:
279 is_ubsan = true;
280 break;
281 default:
282 return 0;
283 }
284 tree lhs = gimple_call_lhs (gs: stmt);
285 if (!lhs)
286 return 0;
287 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
288 return 0;
289 tree type = is_ubsan ? TREE_TYPE (lhs) : TREE_TYPE (TREE_TYPE (lhs));
290 if (TREE_CODE (type) != BITINT_TYPE
291 || bitint_precision_kind (type) < bitint_prec_large)
292 return 0;
293
294 if (is_ubsan)
295 {
296 use_operand_p use_p;
297 gimple *use_stmt;
298 if (!single_imm_use (var: lhs, use_p: &use_p, stmt: &use_stmt)
299 || gimple_bb (g: use_stmt) != gimple_bb (g: stmt)
300 || !gimple_store_p (gs: use_stmt)
301 || !is_gimple_assign (gs: use_stmt)
302 || gimple_has_volatile_ops (stmt: use_stmt)
303 || stmt_ends_bb_p (use_stmt))
304 return 0;
305 return 3;
306 }
307
308 imm_use_iterator ui;
309 use_operand_p use_p;
310 int seen = 0;
311 gimple *realpart = NULL, *cast = NULL;
312 FOR_EACH_IMM_USE_FAST (use_p, ui, lhs)
313 {
314 gimple *g = USE_STMT (use_p);
315 if (is_gimple_debug (gs: g))
316 continue;
317 if (!is_gimple_assign (gs: g) || gimple_bb (g) != gimple_bb (g: stmt))
318 return 0;
319 if (gimple_assign_rhs_code (gs: g) == REALPART_EXPR)
320 {
321 if ((seen & 1) != 0)
322 return 0;
323 seen |= 1;
324 realpart = g;
325 }
326 else if (gimple_assign_rhs_code (gs: g) == IMAGPART_EXPR)
327 {
328 if ((seen & 2) != 0)
329 return 0;
330 seen |= 2;
331
332 use_operand_p use2_p;
333 gimple *use_stmt;
334 tree lhs2 = gimple_assign_lhs (gs: g);
335 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs2))
336 return 0;
337 if (!single_imm_use (var: lhs2, use_p: &use2_p, stmt: &use_stmt)
338 || gimple_bb (g: use_stmt) != gimple_bb (g: stmt)
339 || !gimple_assign_cast_p (s: use_stmt))
340 return 0;
341
342 lhs2 = gimple_assign_lhs (gs: use_stmt);
343 if (!INTEGRAL_TYPE_P (TREE_TYPE (lhs2))
344 || TREE_CODE (TREE_TYPE (lhs2)) == BITINT_TYPE)
345 return 0;
346 cast = use_stmt;
347 }
348 else
349 return 0;
350 }
351 if ((seen & 2) == 0)
352 return 0;
353 if (seen == 3)
354 {
355 /* Punt if the cast stmt appears before realpart stmt, because
356 if both appear, the lowering wants to emit all the code
357 at the location of realpart stmt. */
358 gimple_stmt_iterator gsi = gsi_for_stmt (realpart);
359 unsigned int cnt = 0;
360 do
361 {
362 gsi_prev_nondebug (i: &gsi);
363 if (gsi_end_p (i: gsi) || gsi_stmt (i: gsi) == cast)
364 return 0;
365 if (gsi_stmt (i: gsi) == stmt)
366 return 2;
367 /* If realpart is too far from stmt, punt as well.
368 Usually it will appear right after it. */
369 if (++cnt == 32)
370 return 0;
371 }
372 while (1);
373 }
374 return 1;
375}
376
377/* If STMT is some kind of comparison (GIMPLE_COND, comparison assignment)
378 comparing large/huge _BitInt types, return the comparison code and if
379 non-NULL fill in the comparison operands to *POP1 and *POP2. */
380
381tree_code
382comparison_op (gimple *stmt, tree *pop1, tree *pop2)
383{
384 tree op1 = NULL_TREE, op2 = NULL_TREE;
385 tree_code code = ERROR_MARK;
386 if (gimple_code (g: stmt) == GIMPLE_COND)
387 {
388 code = gimple_cond_code (gs: stmt);
389 op1 = gimple_cond_lhs (gs: stmt);
390 op2 = gimple_cond_rhs (gs: stmt);
391 }
392 else if (is_gimple_assign (gs: stmt))
393 {
394 code = gimple_assign_rhs_code (gs: stmt);
395 op1 = gimple_assign_rhs1 (gs: stmt);
396 if (TREE_CODE_CLASS (code) == tcc_comparison
397 || TREE_CODE_CLASS (code) == tcc_binary)
398 op2 = gimple_assign_rhs2 (gs: stmt);
399 }
400 if (TREE_CODE_CLASS (code) != tcc_comparison)
401 return ERROR_MARK;
402 tree type = TREE_TYPE (op1);
403 if (TREE_CODE (type) != BITINT_TYPE
404 || bitint_precision_kind (type) < bitint_prec_large)
405 return ERROR_MARK;
406 if (pop1)
407 {
408 *pop1 = op1;
409 *pop2 = op2;
410 }
411 return code;
412}
413
414/* Class used during large/huge _BitInt lowering containing all the
415 state for the methods. */
416
417struct bitint_large_huge
418{
419 bitint_large_huge ()
420 : m_names (NULL), m_loads (NULL), m_preserved (NULL),
421 m_single_use_names (NULL), m_map (NULL), m_vars (NULL),
422 m_limb_type (NULL_TREE), m_data (vNULL),
423 m_returns_twice_calls (vNULL) {}
424
425 ~bitint_large_huge ();
426
427 void insert_before (gimple *);
428 tree limb_access_type (tree, tree);
429 tree limb_access (tree, tree, tree, bool);
430 tree build_bit_field_ref (tree, tree, unsigned HOST_WIDE_INT,
431 unsigned HOST_WIDE_INT);
432 void if_then (gimple *, profile_probability, edge &, edge &);
433 void if_then_else (gimple *, profile_probability, edge &, edge &);
434 void if_then_if_then_else (gimple *g, gimple *,
435 profile_probability, profile_probability,
436 edge &, edge &, edge &);
437 tree handle_operand (tree, tree);
438 tree prepare_data_in_out (tree, tree, tree *, tree = NULL_TREE);
439 tree add_cast (tree, tree);
440 tree handle_plus_minus (tree_code, tree, tree, tree);
441 tree handle_lshift (tree, tree, tree);
442 tree handle_cast (tree, tree, tree);
443 tree handle_bit_field_ref (tree, tree);
444 tree handle_load (gimple *, tree);
445 tree handle_stmt (gimple *, tree);
446 tree handle_operand_addr (tree, gimple *, int *, int *);
447 tree create_loop (tree, tree *);
448 tree lower_mergeable_stmt (gimple *, tree_code &, tree, tree);
449 tree lower_comparison_stmt (gimple *, tree_code &, tree, tree);
450 void lower_shift_stmt (tree, gimple *);
451 void lower_muldiv_stmt (tree, gimple *);
452 void lower_float_conv_stmt (tree, gimple *);
453 tree arith_overflow_extract_bits (unsigned int, unsigned int, tree,
454 unsigned int, bool);
455 void finish_arith_overflow (tree, tree, tree, tree, tree, tree, gimple *,
456 tree_code);
457 void lower_addsub_overflow (tree, gimple *);
458 void lower_mul_overflow (tree, gimple *);
459 void lower_cplxpart_stmt (tree, gimple *);
460 void lower_complexexpr_stmt (gimple *);
461 void lower_bit_query (gimple *);
462 void lower_call (tree, gimple *);
463 void lower_asm (gimple *);
464 void lower_stmt (gimple *);
465
466 /* Bitmap of large/huge _BitInt SSA_NAMEs except those can be
467 merged with their uses. */
468 bitmap m_names;
469 /* Subset of those for lhs of load statements. These will be
470 cleared in m_names if the loads will be mergeable with all
471 their uses. */
472 bitmap m_loads;
473 /* Bitmap of large/huge _BitInt SSA_NAMEs that should survive
474 to later passes (arguments or return values of calls). */
475 bitmap m_preserved;
476 /* Subset of m_names which have a single use. As the lowering
477 can replace various original statements with their lowered
478 form even before it is done iterating over all basic blocks,
479 testing has_single_use for the purpose of emitting clobbers
480 doesn't work properly. */
481 bitmap m_single_use_names;
482 /* Used for coalescing/partitioning of large/huge _BitInt SSA_NAMEs
483 set in m_names. */
484 var_map m_map;
485 /* Mapping of the partitions to corresponding decls. */
486 tree *m_vars;
487 /* Unsigned integer type with limb precision. */
488 tree m_limb_type;
489 /* Its TYPE_SIZE_UNIT. */
490 unsigned HOST_WIDE_INT m_limb_size;
491 /* Location of a gimple stmt which is being currently lowered. */
492 location_t m_loc;
493 /* Current stmt iterator where code is being lowered currently. */
494 gimple_stmt_iterator m_gsi;
495 /* Statement after which any clobbers should be added if non-NULL. */
496 gimple *m_after_stmt;
497 /* Set when creating loops to the loop header bb and its preheader. */
498 basic_block m_bb, m_preheader_bb;
499 /* Stmt iterator after which initialization statements should be emitted. */
500 gimple_stmt_iterator m_init_gsi;
501 /* Decl into which a mergeable statement stores result. */
502 tree m_lhs;
503 /* handle_operand/handle_stmt can be invoked in various ways.
504
505 lower_mergeable_stmt for large _BitInt calls those with constant
506 idx only, expanding to straight line code, for huge _BitInt
507 emits a loop from least significant limb upwards, where each loop
508 iteration handles 2 limbs, plus there can be up to one full limb
509 and one partial limb processed after the loop, where handle_operand
510 and/or handle_stmt are called with constant idx. m_upwards_2limb
511 is set for this case, false otherwise. m_upwards is true if it
512 is either large or huge _BitInt handled by lower_mergeable_stmt,
513 i.e. indexes always increase.
514
515 Another way is used by lower_comparison_stmt, which walks limbs
516 from most significant to least significant, partial limb if any
517 processed first with constant idx and then loop processing a single
518 limb per iteration with non-constant idx.
519
520 Another way is used in lower_shift_stmt, where for LSHIFT_EXPR
521 destination limbs are processed from most significant to least
522 significant or for RSHIFT_EXPR the other way around, in loops or
523 straight line code, but idx usually is non-constant (so from
524 handle_operand/handle_stmt POV random access). The LSHIFT_EXPR
525 handling there can access even partial limbs using non-constant
526 idx (then m_var_msb should be true, for all the other cases
527 including lower_mergeable_stmt/lower_comparison_stmt that is
528 not the case and so m_var_msb should be false.
529
530 m_first should be set the first time handle_operand/handle_stmt
531 is called and clear when it is called for some other limb with
532 the same argument. If the lowering of an operand (e.g. INTEGER_CST)
533 or statement (e.g. +/-/<< with < limb_prec constant) needs some
534 state between the different calls, when m_first is true it should
535 push some trees to m_data vector and also make sure m_data_cnt is
536 incremented by how many trees were pushed, and when m_first is
537 false, it can use the m_data[m_data_cnt] etc. data or update them,
538 just needs to bump m_data_cnt by the same amount as when it was
539 called with m_first set. The toplevel calls to
540 handle_operand/handle_stmt should set m_data_cnt to 0 and truncate
541 m_data vector when setting m_first to true.
542
543 m_cast_conditional and m_bitfld_load are used when handling a
544 bit-field load inside of a widening cast. handle_cast sometimes
545 needs to do runtime comparisons and handle_operand only conditionally
546 or even in two separate conditional blocks for one idx (once with
547 constant index after comparing the runtime one for equality with the
548 constant). In these cases, m_cast_conditional is set to true and
549 the bit-field load then communicates its m_data_cnt to handle_cast
550 using m_bitfld_load. */
551 bool m_first;
552 bool m_var_msb;
553 unsigned m_upwards_2limb;
554 bool m_upwards;
555 bool m_cast_conditional;
556 unsigned m_bitfld_load;
557 vec<tree> m_data;
558 unsigned int m_data_cnt;
559 vec<gimple *> m_returns_twice_calls;
560};
561
562bitint_large_huge::~bitint_large_huge ()
563{
564 BITMAP_FREE (m_names);
565 BITMAP_FREE (m_loads);
566 BITMAP_FREE (m_preserved);
567 BITMAP_FREE (m_single_use_names);
568 if (m_map)
569 delete_var_map (m_map);
570 XDELETEVEC (m_vars);
571 m_data.release ();
572 m_returns_twice_calls.release ();
573}
574
575/* Insert gimple statement G before current location
576 and set its gimple_location. */
577
578void
579bitint_large_huge::insert_before (gimple *g)
580{
581 gimple_set_location (g, location: m_loc);
582 gsi_insert_before (&m_gsi, g, GSI_SAME_STMT);
583}
584
585/* Return type for accessing limb IDX of BITINT_TYPE TYPE.
586 This is normally m_limb_type, except for a partial most
587 significant limb if any. */
588
589tree
590bitint_large_huge::limb_access_type (tree type, tree idx)
591{
592 if (type == NULL_TREE)
593 return m_limb_type;
594 unsigned HOST_WIDE_INT i = tree_to_uhwi (idx);
595 unsigned int prec = TYPE_PRECISION (type);
596 gcc_assert (i * limb_prec < prec);
597 if ((i + 1) * limb_prec <= prec)
598 return m_limb_type;
599 else
600 return build_nonstandard_integer_type (prec % limb_prec,
601 TYPE_UNSIGNED (type));
602}
603
604/* Return a tree how to access limb IDX of VAR corresponding to BITINT_TYPE
605 TYPE. If WRITE_P is true, it will be a store, otherwise a read. */
606
607tree
608bitint_large_huge::limb_access (tree type, tree var, tree idx, bool write_p)
609{
610 tree atype = (tree_fits_uhwi_p (idx)
611 ? limb_access_type (type, idx) : m_limb_type);
612 tree ltype = m_limb_type;
613 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (var));
614 if (as != TYPE_ADDR_SPACE (ltype))
615 ltype = build_qualified_type (ltype, TYPE_QUALS (ltype)
616 | ENCODE_QUAL_ADDR_SPACE (as));
617 tree ret;
618 if (DECL_P (var) && tree_fits_uhwi_p (idx))
619 {
620 tree ptype = build_pointer_type (strip_array_types (TREE_TYPE (var)));
621 unsigned HOST_WIDE_INT off = tree_to_uhwi (idx) * m_limb_size;
622 ret = build2 (MEM_REF, ltype,
623 build_fold_addr_expr (var),
624 build_int_cst (ptype, off));
625 TREE_THIS_VOLATILE (ret) = TREE_THIS_VOLATILE (var);
626 TREE_SIDE_EFFECTS (ret) = TREE_SIDE_EFFECTS (var);
627 }
628 else if (TREE_CODE (var) == MEM_REF && tree_fits_uhwi_p (idx))
629 {
630 ret
631 = build2 (MEM_REF, ltype, unshare_expr (TREE_OPERAND (var, 0)),
632 size_binop (PLUS_EXPR, TREE_OPERAND (var, 1),
633 build_int_cst (TREE_TYPE (TREE_OPERAND (var, 1)),
634 tree_to_uhwi (idx)
635 * m_limb_size)));
636 TREE_THIS_VOLATILE (ret) = TREE_THIS_VOLATILE (var);
637 TREE_SIDE_EFFECTS (ret) = TREE_SIDE_EFFECTS (var);
638 TREE_THIS_NOTRAP (ret) = TREE_THIS_NOTRAP (var);
639 }
640 else
641 {
642 var = unshare_expr (var);
643 if (TREE_CODE (TREE_TYPE (var)) != ARRAY_TYPE
644 || !useless_type_conversion_p (m_limb_type,
645 TREE_TYPE (TREE_TYPE (var))))
646 {
647 unsigned HOST_WIDE_INT nelts
648 = CEIL (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (var))), limb_prec);
649 tree atype = build_array_type_nelts (ltype, nelts);
650 var = build1 (VIEW_CONVERT_EXPR, atype, var);
651 }
652 ret = build4 (ARRAY_REF, ltype, var, idx, NULL_TREE, NULL_TREE);
653 }
654 if (!write_p && !useless_type_conversion_p (atype, m_limb_type))
655 {
656 gimple *g = gimple_build_assign (make_ssa_name (var: m_limb_type), ret);
657 insert_before (g);
658 ret = gimple_assign_lhs (gs: g);
659 ret = build1 (NOP_EXPR, atype, ret);
660 }
661 return ret;
662}
663
664/* Build a BIT_FIELD_REF to access BITSIZE bits with FTYPE type at
665 offset BITPOS inside of OBJ. */
666
667tree
668bitint_large_huge::build_bit_field_ref (tree ftype, tree obj,
669 unsigned HOST_WIDE_INT bitsize,
670 unsigned HOST_WIDE_INT bitpos)
671{
672 if (INTEGRAL_TYPE_P (TREE_TYPE (obj))
673 && !type_has_mode_precision_p (TREE_TYPE (obj)))
674 {
675 unsigned HOST_WIDE_INT nelts
676 = CEIL (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (obj))), limb_prec);
677 tree ltype = m_limb_type;
678 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (obj));
679 if (as != TYPE_ADDR_SPACE (ltype))
680 ltype = build_qualified_type (ltype, TYPE_QUALS (ltype)
681 | ENCODE_QUAL_ADDR_SPACE (as));
682 tree atype = build_array_type_nelts (ltype, nelts);
683 obj = build1 (VIEW_CONVERT_EXPR, atype, obj);
684 }
685 return build3 (BIT_FIELD_REF, ftype, obj, bitsize_int (bitsize),
686 bitsize_int (bitpos));
687}
688
689/* Emit a half diamond,
690 if (COND)
691 |\
692 | \
693 | \
694 | new_bb1
695 | /
696 | /
697 |/
698 or if (COND) new_bb1;
699 PROB is the probability that the condition is true.
700 Updates m_gsi to start of new_bb1.
701 Sets EDGE_TRUE to edge from new_bb1 to successor and
702 EDGE_FALSE to the EDGE_FALSE_VALUE edge from if (COND) bb. */
703
704void
705bitint_large_huge::if_then (gimple *cond, profile_probability prob,
706 edge &edge_true, edge &edge_false)
707{
708 insert_before (g: cond);
709 edge e1 = split_block (gsi_bb (i: m_gsi), cond);
710 edge e2 = split_block (e1->dest, (gimple *) NULL);
711 edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE);
712 e1->flags = EDGE_TRUE_VALUE;
713 e1->probability = prob;
714 e3->probability = prob.invert ();
715 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
716 edge_true = e2;
717 edge_false = e3;
718 m_gsi = gsi_after_labels (bb: e1->dest);
719}
720
721/* Emit a full diamond,
722 if (COND)
723 /\
724 / \
725 / \
726 new_bb1 new_bb2
727 \ /
728 \ /
729 \/
730 or if (COND) new_bb2; else new_bb1;
731 PROB is the probability that the condition is true.
732 Updates m_gsi to start of new_bb2.
733 Sets EDGE_TRUE to edge from new_bb1 to successor and
734 EDGE_FALSE to the EDGE_FALSE_VALUE edge from if (COND) bb. */
735
736void
737bitint_large_huge::if_then_else (gimple *cond, profile_probability prob,
738 edge &edge_true, edge &edge_false)
739{
740 insert_before (g: cond);
741 edge e1 = split_block (gsi_bb (i: m_gsi), cond);
742 edge e2 = split_block (e1->dest, (gimple *) NULL);
743 basic_block bb = create_empty_bb (e1->dest);
744 add_bb_to_loop (bb, e1->dest->loop_father);
745 edge e3 = make_edge (e1->src, bb, EDGE_TRUE_VALUE);
746 e1->flags = EDGE_FALSE_VALUE;
747 e3->probability = prob;
748 e1->probability = prob.invert ();
749 bb->count = e1->src->count.apply_probability (prob);
750 set_immediate_dominator (CDI_DOMINATORS, bb, e1->src);
751 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
752 edge_true = make_single_succ_edge (bb, e2->dest, EDGE_FALLTHRU);
753 edge_false = e2;
754 m_gsi = gsi_after_labels (bb);
755}
756
757/* Emit a half diamond with full diamond in it
758 if (COND1)
759 |\
760 | \
761 | \
762 | if (COND2)
763 | / \
764 | / \
765 |new_bb1 new_bb2
766 | | /
767 \ | /
768 \ | /
769 \ | /
770 \|/
771 or if (COND1) { if (COND2) new_bb2; else new_bb1; }
772 PROB1 is the probability that the condition 1 is true.
773 PROB2 is the probability that the condition 2 is true.
774 Updates m_gsi to start of new_bb1.
775 Sets EDGE_TRUE_TRUE to edge from new_bb2 to successor,
776 EDGE_TRUE_FALSE to edge from new_bb1 to successor and
777 EDGE_FALSE to the EDGE_FALSE_VALUE edge from if (COND1) bb.
778 If COND2 is NULL, this is equivalent to
779 if_then (COND1, PROB1, EDGE_TRUE_FALSE, EDGE_FALSE);
780 EDGE_TRUE_TRUE = NULL; */
781
782void
783bitint_large_huge::if_then_if_then_else (gimple *cond1, gimple *cond2,
784 profile_probability prob1,
785 profile_probability prob2,
786 edge &edge_true_true,
787 edge &edge_true_false,
788 edge &edge_false)
789{
790 edge e2, e3, e4 = NULL;
791 if_then (cond: cond1, prob: prob1, edge_true&: e2, edge_false&: e3);
792 if (cond2 == NULL)
793 {
794 edge_true_true = NULL;
795 edge_true_false = e2;
796 edge_false = e3;
797 return;
798 }
799 insert_before (g: cond2);
800 e2 = split_block (gsi_bb (i: m_gsi), cond2);
801 basic_block bb = create_empty_bb (e2->dest);
802 add_bb_to_loop (bb, e2->dest->loop_father);
803 e4 = make_edge (e2->src, bb, EDGE_TRUE_VALUE);
804 set_immediate_dominator (CDI_DOMINATORS, bb, e2->src);
805 e4->probability = prob2;
806 e2->flags = EDGE_FALSE_VALUE;
807 e2->probability = prob2.invert ();
808 bb->count = e2->src->count.apply_probability (prob: prob2);
809 e4 = make_single_succ_edge (bb, e3->dest, EDGE_FALLTHRU);
810 e2 = find_edge (e2->dest, e3->dest);
811 edge_true_true = e4;
812 edge_true_false = e2;
813 edge_false = e3;
814 m_gsi = gsi_after_labels (bb: e2->src);
815}
816
817/* Emit code to access limb IDX from OP. */
818
819tree
820bitint_large_huge::handle_operand (tree op, tree idx)
821{
822 switch (TREE_CODE (op))
823 {
824 case SSA_NAME:
825 if (m_names == NULL
826 || !bitmap_bit_p (m_names, SSA_NAME_VERSION (op)))
827 {
828 if (SSA_NAME_IS_DEFAULT_DEF (op))
829 {
830 if (m_first)
831 {
832 tree v = create_tmp_reg (m_limb_type);
833 if (SSA_NAME_VAR (op) && VAR_P (SSA_NAME_VAR (op)))
834 {
835 DECL_NAME (v) = DECL_NAME (SSA_NAME_VAR (op));
836 DECL_SOURCE_LOCATION (v)
837 = DECL_SOURCE_LOCATION (SSA_NAME_VAR (op));
838 }
839 v = get_or_create_ssa_default_def (cfun, v);
840 m_data.safe_push (obj: v);
841 }
842 tree ret = m_data[m_data_cnt];
843 m_data_cnt++;
844 if (tree_fits_uhwi_p (idx))
845 {
846 tree type = limb_access_type (TREE_TYPE (op), idx);
847 ret = add_cast (type, ret);
848 }
849 return ret;
850 }
851 location_t loc_save = m_loc;
852 m_loc = gimple_location (SSA_NAME_DEF_STMT (op));
853 tree ret = handle_stmt (SSA_NAME_DEF_STMT (op), idx);
854 m_loc = loc_save;
855 return ret;
856 }
857 int p;
858 gimple *g;
859 tree t;
860 p = var_to_partition (map: m_map, var: op);
861 gcc_assert (m_vars[p] != NULL_TREE);
862 t = limb_access (TREE_TYPE (op), var: m_vars[p], idx, write_p: false);
863 g = gimple_build_assign (make_ssa_name (TREE_TYPE (t)), t);
864 insert_before (g);
865 t = gimple_assign_lhs (gs: g);
866 if (m_first
867 && m_single_use_names
868 && m_vars[p] != m_lhs
869 && m_after_stmt
870 && bitmap_bit_p (m_single_use_names, SSA_NAME_VERSION (op)))
871 {
872 tree clobber = build_clobber (TREE_TYPE (m_vars[p]),
873 CLOBBER_STORAGE_END);
874 g = gimple_build_assign (m_vars[p], clobber);
875 gimple_stmt_iterator gsi = gsi_for_stmt (m_after_stmt);
876 gsi_insert_after (&gsi, g, GSI_SAME_STMT);
877 }
878 return t;
879 case INTEGER_CST:
880 if (tree_fits_uhwi_p (idx))
881 {
882 tree c, type = limb_access_type (TREE_TYPE (op), idx);
883 unsigned HOST_WIDE_INT i = tree_to_uhwi (idx);
884 if (m_first)
885 {
886 m_data.safe_push (NULL_TREE);
887 m_data.safe_push (NULL_TREE);
888 }
889 if (limb_prec != HOST_BITS_PER_WIDE_INT)
890 {
891 wide_int w = wi::rshift (x: wi::to_wide (t: op), y: i * limb_prec,
892 TYPE_SIGN (TREE_TYPE (op)));
893 c = wide_int_to_tree (type,
894 cst: wide_int::from (x: w, TYPE_PRECISION (type),
895 sgn: UNSIGNED));
896 }
897 else if (i >= TREE_INT_CST_EXT_NUNITS (op))
898 c = build_int_cst (type,
899 tree_int_cst_sgn (op) < 0 ? -1 : 0);
900 else
901 c = build_int_cst (type, TREE_INT_CST_ELT (op, i));
902 m_data_cnt += 2;
903 return c;
904 }
905 if (m_first
906 || (m_data[m_data_cnt] == NULL_TREE
907 && m_data[m_data_cnt + 1] == NULL_TREE))
908 {
909 unsigned int prec = TYPE_PRECISION (TREE_TYPE (op));
910 unsigned int rem = prec % ((m_upwards_2limb ? 2 : 1) * limb_prec);
911 int ext;
912 unsigned min_prec = bitint_min_cst_precision (cst: op, ext);
913 if (m_first)
914 {
915 m_data.safe_push (NULL_TREE);
916 m_data.safe_push (NULL_TREE);
917 }
918 if (integer_zerop (op))
919 {
920 tree c = build_zero_cst (m_limb_type);
921 m_data[m_data_cnt] = c;
922 m_data[m_data_cnt + 1] = c;
923 }
924 else if (integer_all_onesp (op))
925 {
926 tree c = build_all_ones_cst (m_limb_type);
927 m_data[m_data_cnt] = c;
928 m_data[m_data_cnt + 1] = c;
929 }
930 else if (m_upwards_2limb && min_prec <= (unsigned) limb_prec)
931 {
932 /* Single limb constant. Use a phi with that limb from
933 the preheader edge and 0 or -1 constant from the other edge
934 and for the second limb in the loop. */
935 tree out;
936 gcc_assert (m_first);
937 m_data.pop ();
938 m_data.pop ();
939 prepare_data_in_out (fold_convert (m_limb_type, op), idx, &out,
940 build_int_cst (m_limb_type, ext));
941 }
942 else if (min_prec > prec - rem - 2 * limb_prec)
943 {
944 /* Constant which has enough significant bits that it isn't
945 worth trying to save .rodata space by extending from smaller
946 number. */
947 tree type;
948 if (m_var_msb)
949 type = TREE_TYPE (op);
950 else
951 /* If we have a guarantee the most significant partial limb
952 (if any) will be only accessed through handle_operand
953 with INTEGER_CST idx, we don't need to include the partial
954 limb in .rodata. */
955 type = build_bitint_type (prec - rem, 1);
956 tree c = tree_output_constant_def (fold_convert (type, op));
957 m_data[m_data_cnt] = c;
958 m_data[m_data_cnt + 1] = NULL_TREE;
959 }
960 else if (m_upwards_2limb)
961 {
962 /* Constant with smaller number of bits. Trade conditional
963 code for .rodata space by extending from smaller number. */
964 min_prec = CEIL (min_prec, 2 * limb_prec) * (2 * limb_prec);
965 tree type = build_bitint_type (min_prec, 1);
966 tree c = tree_output_constant_def (fold_convert (type, op));
967 tree idx2 = make_ssa_name (sizetype);
968 g = gimple_build_assign (idx2, PLUS_EXPR, idx, size_one_node);
969 insert_before (g);
970 g = gimple_build_cond (LT_EXPR, idx,
971 size_int (min_prec / limb_prec),
972 NULL_TREE, NULL_TREE);
973 edge edge_true, edge_false;
974 if_then (cond: g, prob: (min_prec >= (prec - rem) / 2
975 ? profile_probability::likely ()
976 : profile_probability::unlikely ()),
977 edge_true, edge_false);
978 tree c1 = limb_access (TREE_TYPE (op), var: c, idx, write_p: false);
979 g = gimple_build_assign (make_ssa_name (TREE_TYPE (c1)), c1);
980 insert_before (g);
981 c1 = gimple_assign_lhs (gs: g);
982 tree c2 = limb_access (TREE_TYPE (op), var: c, idx: idx2, write_p: false);
983 g = gimple_build_assign (make_ssa_name (TREE_TYPE (c2)), c2);
984 insert_before (g);
985 c2 = gimple_assign_lhs (gs: g);
986 tree c3 = build_int_cst (m_limb_type, ext);
987 m_gsi = gsi_after_labels (bb: edge_true->dest);
988 m_data[m_data_cnt] = make_ssa_name (var: m_limb_type);
989 m_data[m_data_cnt + 1] = make_ssa_name (var: m_limb_type);
990 gphi *phi = create_phi_node (m_data[m_data_cnt],
991 edge_true->dest);
992 add_phi_arg (phi, c1, edge_true, UNKNOWN_LOCATION);
993 add_phi_arg (phi, c3, edge_false, UNKNOWN_LOCATION);
994 phi = create_phi_node (m_data[m_data_cnt + 1], edge_true->dest);
995 add_phi_arg (phi, c2, edge_true, UNKNOWN_LOCATION);
996 add_phi_arg (phi, c3, edge_false, UNKNOWN_LOCATION);
997 }
998 else
999 {
1000 /* Constant with smaller number of bits. Trade conditional
1001 code for .rodata space by extending from smaller number.
1002 Version for loops with random access to the limbs or
1003 downwards loops. */
1004 min_prec = CEIL (min_prec, limb_prec) * limb_prec;
1005 tree c;
1006 if (min_prec <= (unsigned) limb_prec)
1007 c = fold_convert (m_limb_type, op);
1008 else
1009 {
1010 tree type = build_bitint_type (min_prec, 1);
1011 c = tree_output_constant_def (fold_convert (type, op));
1012 }
1013 m_data[m_data_cnt] = c;
1014 m_data[m_data_cnt + 1] = integer_type_node;
1015 }
1016 t = m_data[m_data_cnt];
1017 if (m_data[m_data_cnt + 1] == NULL_TREE)
1018 {
1019 t = limb_access (TREE_TYPE (op), var: t, idx, write_p: false);
1020 g = gimple_build_assign (make_ssa_name (TREE_TYPE (t)), t);
1021 insert_before (g);
1022 t = gimple_assign_lhs (gs: g);
1023 }
1024 }
1025 else if (m_data[m_data_cnt + 1] == NULL_TREE)
1026 {
1027 t = limb_access (TREE_TYPE (op), var: m_data[m_data_cnt], idx, write_p: false);
1028 g = gimple_build_assign (make_ssa_name (TREE_TYPE (t)), t);
1029 insert_before (g);
1030 t = gimple_assign_lhs (gs: g);
1031 }
1032 else
1033 t = m_data[m_data_cnt + 1];
1034 if (m_data[m_data_cnt + 1] == integer_type_node)
1035 {
1036 unsigned int prec = TYPE_PRECISION (TREE_TYPE (op));
1037 unsigned rem = prec % ((m_upwards_2limb ? 2 : 1) * limb_prec);
1038 int ext = wi::neg_p (x: wi::to_wide (t: op)) ? -1 : 0;
1039 tree c = m_data[m_data_cnt];
1040 unsigned min_prec = TYPE_PRECISION (TREE_TYPE (c));
1041 g = gimple_build_cond (LT_EXPR, idx,
1042 size_int (min_prec / limb_prec),
1043 NULL_TREE, NULL_TREE);
1044 edge edge_true, edge_false;
1045 if_then (cond: g, prob: (min_prec >= (prec - rem) / 2
1046 ? profile_probability::likely ()
1047 : profile_probability::unlikely ()),
1048 edge_true, edge_false);
1049 if (min_prec > (unsigned) limb_prec)
1050 {
1051 c = limb_access (TREE_TYPE (op), var: c, idx, write_p: false);
1052 g = gimple_build_assign (make_ssa_name (TREE_TYPE (c)), c);
1053 insert_before (g);
1054 c = gimple_assign_lhs (gs: g);
1055 }
1056 tree c2 = build_int_cst (m_limb_type, ext);
1057 m_gsi = gsi_after_labels (bb: edge_true->dest);
1058 t = make_ssa_name (var: m_limb_type);
1059 gphi *phi = create_phi_node (t, edge_true->dest);
1060 add_phi_arg (phi, c, edge_true, UNKNOWN_LOCATION);
1061 add_phi_arg (phi, c2, edge_false, UNKNOWN_LOCATION);
1062 }
1063 m_data_cnt += 2;
1064 return t;
1065 default:
1066 gcc_unreachable ();
1067 }
1068}
1069
1070/* Helper method, add a PHI node with VAL from preheader edge if
1071 inside of a loop and m_first. Keep state in a pair of m_data
1072 elements. If VAL_OUT is non-NULL, use that as PHI argument from
1073 the latch edge, otherwise create a new SSA_NAME for it and let
1074 caller initialize it. */
1075
1076tree
1077bitint_large_huge::prepare_data_in_out (tree val, tree idx, tree *data_out,
1078 tree val_out)
1079{
1080 if (!m_first)
1081 {
1082 *data_out = tree_fits_uhwi_p (idx) ? NULL_TREE : m_data[m_data_cnt + 1];
1083 return m_data[m_data_cnt];
1084 }
1085
1086 *data_out = NULL_TREE;
1087 if (tree_fits_uhwi_p (idx))
1088 {
1089 m_data.safe_push (obj: val);
1090 m_data.safe_push (NULL_TREE);
1091 return val;
1092 }
1093
1094 tree in = make_ssa_name (TREE_TYPE (val));
1095 gphi *phi = create_phi_node (in, m_bb);
1096 edge e1 = find_edge (m_preheader_bb, m_bb);
1097 edge e2 = EDGE_PRED (m_bb, 0);
1098 if (e1 == e2)
1099 e2 = EDGE_PRED (m_bb, 1);
1100 add_phi_arg (phi, val, e1, UNKNOWN_LOCATION);
1101 tree out = val_out ? val_out : make_ssa_name (TREE_TYPE (val));
1102 add_phi_arg (phi, out, e2, UNKNOWN_LOCATION);
1103 m_data.safe_push (obj: in);
1104 m_data.safe_push (obj: out);
1105 return in;
1106}
1107
1108/* Return VAL cast to TYPE. If VAL is INTEGER_CST, just
1109 convert it without emitting any code, otherwise emit
1110 the conversion statement before the current location. */
1111
1112tree
1113bitint_large_huge::add_cast (tree type, tree val)
1114{
1115 if (TREE_CODE (val) == INTEGER_CST)
1116 return fold_convert (type, val);
1117
1118 tree lhs = make_ssa_name (var: type);
1119 gimple *g = gimple_build_assign (lhs, NOP_EXPR, val);
1120 insert_before (g);
1121 return lhs;
1122}
1123
1124/* Helper of handle_stmt method, handle PLUS_EXPR or MINUS_EXPR. */
1125
1126tree
1127bitint_large_huge::handle_plus_minus (tree_code code, tree rhs1, tree rhs2,
1128 tree idx)
1129{
1130 tree lhs, data_out, ctype;
1131 tree rhs1_type = TREE_TYPE (rhs1);
1132 gimple *g;
1133 tree data_in = prepare_data_in_out (val: build_zero_cst (m_limb_type), idx,
1134 data_out: &data_out);
1135
1136 if (optab_handler (op: code == PLUS_EXPR ? uaddc5_optab : usubc5_optab,
1137 TYPE_MODE (m_limb_type)) != CODE_FOR_nothing)
1138 {
1139 ctype = build_complex_type (m_limb_type);
1140 if (!types_compatible_p (type1: rhs1_type, type2: m_limb_type))
1141 {
1142 if (!TYPE_UNSIGNED (rhs1_type))
1143 {
1144 tree type = unsigned_type_for (rhs1_type);
1145 rhs1 = add_cast (type, val: rhs1);
1146 rhs2 = add_cast (type, val: rhs2);
1147 }
1148 rhs1 = add_cast (type: m_limb_type, val: rhs1);
1149 rhs2 = add_cast (type: m_limb_type, val: rhs2);
1150 }
1151 lhs = make_ssa_name (var: ctype);
1152 g = gimple_build_call_internal (code == PLUS_EXPR
1153 ? IFN_UADDC : IFN_USUBC,
1154 3, rhs1, rhs2, data_in);
1155 gimple_call_set_lhs (gs: g, lhs);
1156 insert_before (g);
1157 if (data_out == NULL_TREE)
1158 data_out = make_ssa_name (var: m_limb_type);
1159 g = gimple_build_assign (data_out, IMAGPART_EXPR,
1160 build1 (IMAGPART_EXPR, m_limb_type, lhs));
1161 insert_before (g);
1162 }
1163 else if (types_compatible_p (type1: rhs1_type, type2: m_limb_type))
1164 {
1165 ctype = build_complex_type (m_limb_type);
1166 lhs = make_ssa_name (var: ctype);
1167 g = gimple_build_call_internal (code == PLUS_EXPR
1168 ? IFN_ADD_OVERFLOW : IFN_SUB_OVERFLOW,
1169 2, rhs1, rhs2);
1170 gimple_call_set_lhs (gs: g, lhs);
1171 insert_before (g);
1172 if (data_out == NULL_TREE)
1173 data_out = make_ssa_name (var: m_limb_type);
1174 if (!integer_zerop (data_in))
1175 {
1176 rhs1 = make_ssa_name (var: m_limb_type);
1177 g = gimple_build_assign (rhs1, REALPART_EXPR,
1178 build1 (REALPART_EXPR, m_limb_type, lhs));
1179 insert_before (g);
1180 rhs2 = make_ssa_name (var: m_limb_type);
1181 g = gimple_build_assign (rhs2, IMAGPART_EXPR,
1182 build1 (IMAGPART_EXPR, m_limb_type, lhs));
1183 insert_before (g);
1184 lhs = make_ssa_name (var: ctype);
1185 g = gimple_build_call_internal (code == PLUS_EXPR
1186 ? IFN_ADD_OVERFLOW
1187 : IFN_SUB_OVERFLOW,
1188 2, rhs1, data_in);
1189 gimple_call_set_lhs (gs: g, lhs);
1190 insert_before (g);
1191 data_in = make_ssa_name (var: m_limb_type);
1192 g = gimple_build_assign (data_in, IMAGPART_EXPR,
1193 build1 (IMAGPART_EXPR, m_limb_type, lhs));
1194 insert_before (g);
1195 g = gimple_build_assign (data_out, PLUS_EXPR, rhs2, data_in);
1196 insert_before (g);
1197 }
1198 else
1199 {
1200 g = gimple_build_assign (data_out, IMAGPART_EXPR,
1201 build1 (IMAGPART_EXPR, m_limb_type, lhs));
1202 insert_before (g);
1203 }
1204 }
1205 else
1206 {
1207 tree in = add_cast (type: rhs1_type, val: data_in);
1208 lhs = make_ssa_name (var: rhs1_type);
1209 g = gimple_build_assign (lhs, code, rhs1, rhs2);
1210 insert_before (g);
1211 rhs1 = make_ssa_name (var: rhs1_type);
1212 g = gimple_build_assign (rhs1, code, lhs, in);
1213 insert_before (g);
1214 m_data[m_data_cnt] = NULL_TREE;
1215 m_data_cnt += 2;
1216 return rhs1;
1217 }
1218 rhs1 = make_ssa_name (var: m_limb_type);
1219 g = gimple_build_assign (rhs1, REALPART_EXPR,
1220 build1 (REALPART_EXPR, m_limb_type, lhs));
1221 insert_before (g);
1222 if (!types_compatible_p (type1: rhs1_type, type2: m_limb_type))
1223 rhs1 = add_cast (type: rhs1_type, val: rhs1);
1224 m_data[m_data_cnt] = data_out;
1225 m_data_cnt += 2;
1226 return rhs1;
1227}
1228
1229/* Helper function for handle_stmt method, handle LSHIFT_EXPR by
1230 count in [0, limb_prec - 1] range. */
1231
1232tree
1233bitint_large_huge::handle_lshift (tree rhs1, tree rhs2, tree idx)
1234{
1235 unsigned HOST_WIDE_INT cnt = tree_to_uhwi (rhs2);
1236 gcc_checking_assert (cnt < (unsigned) limb_prec);
1237 if (cnt == 0)
1238 return rhs1;
1239
1240 tree lhs, data_out, rhs1_type = TREE_TYPE (rhs1);
1241 gimple *g;
1242 tree data_in = prepare_data_in_out (val: build_zero_cst (m_limb_type), idx,
1243 data_out: &data_out);
1244
1245 if (!integer_zerop (data_in))
1246 {
1247 lhs = make_ssa_name (var: m_limb_type);
1248 g = gimple_build_assign (lhs, RSHIFT_EXPR, data_in,
1249 build_int_cst (unsigned_type_node,
1250 limb_prec - cnt));
1251 insert_before (g);
1252 if (!types_compatible_p (type1: rhs1_type, type2: m_limb_type))
1253 lhs = add_cast (type: rhs1_type, val: lhs);
1254 data_in = lhs;
1255 }
1256 if (types_compatible_p (type1: rhs1_type, type2: m_limb_type))
1257 {
1258 if (data_out == NULL_TREE)
1259 data_out = make_ssa_name (var: m_limb_type);
1260 g = gimple_build_assign (data_out, rhs1);
1261 insert_before (g);
1262 }
1263 if (cnt < (unsigned) TYPE_PRECISION (rhs1_type))
1264 {
1265 lhs = make_ssa_name (var: rhs1_type);
1266 g = gimple_build_assign (lhs, LSHIFT_EXPR, rhs1, rhs2);
1267 insert_before (g);
1268 if (!integer_zerop (data_in))
1269 {
1270 rhs1 = lhs;
1271 lhs = make_ssa_name (var: rhs1_type);
1272 g = gimple_build_assign (lhs, BIT_IOR_EXPR, rhs1, data_in);
1273 insert_before (g);
1274 }
1275 }
1276 else
1277 lhs = data_in;
1278 m_data[m_data_cnt] = data_out;
1279 m_data_cnt += 2;
1280 return lhs;
1281}
1282
1283/* Helper function for handle_stmt method, handle an integral
1284 to integral conversion. */
1285
1286tree
1287bitint_large_huge::handle_cast (tree lhs_type, tree rhs1, tree idx)
1288{
1289 tree rhs_type = TREE_TYPE (rhs1);
1290 gimple *g;
1291 if ((TREE_CODE (rhs1) == SSA_NAME || TREE_CODE (rhs1) == INTEGER_CST)
1292 && TREE_CODE (lhs_type) == BITINT_TYPE
1293 && TREE_CODE (rhs_type) == BITINT_TYPE
1294 && bitint_precision_kind (type: lhs_type) >= bitint_prec_large
1295 && bitint_precision_kind (type: rhs_type) >= bitint_prec_large)
1296 {
1297 if (TYPE_PRECISION (rhs_type) >= TYPE_PRECISION (lhs_type)
1298 /* If lhs has bigger precision than rhs, we can use
1299 the simple case only if there is a guarantee that
1300 the most significant limb is handled in straight
1301 line code. If m_var_msb (on left shifts) or
1302 if m_upwards_2limb * limb_prec is equal to
1303 lhs precision or if not m_upwards_2limb and lhs_type
1304 has precision which is multiple of limb_prec that is
1305 not the case. */
1306 || (!m_var_msb
1307 && (CEIL (TYPE_PRECISION (lhs_type), limb_prec)
1308 == CEIL (TYPE_PRECISION (rhs_type), limb_prec))
1309 && ((!m_upwards_2limb
1310 && (TYPE_PRECISION (lhs_type) % limb_prec != 0))
1311 || (m_upwards_2limb
1312 && (m_upwards_2limb * limb_prec
1313 < TYPE_PRECISION (lhs_type))))))
1314 {
1315 rhs1 = handle_operand (op: rhs1, idx);
1316 if (tree_fits_uhwi_p (idx))
1317 {
1318 tree type = limb_access_type (type: lhs_type, idx);
1319 if (!types_compatible_p (type1: type, TREE_TYPE (rhs1)))
1320 rhs1 = add_cast (type, val: rhs1);
1321 }
1322 return rhs1;
1323 }
1324 tree t;
1325 /* Indexes lower than this don't need any special processing. */
1326 unsigned low = ((unsigned) TYPE_PRECISION (rhs_type)
1327 - !TYPE_UNSIGNED (rhs_type)) / limb_prec;
1328 /* Indexes >= than this always contain an extension. */
1329 unsigned high = CEIL ((unsigned) TYPE_PRECISION (rhs_type), limb_prec);
1330 bool save_first = m_first;
1331 if (m_first)
1332 {
1333 m_data.safe_push (NULL_TREE);
1334 m_data.safe_push (NULL_TREE);
1335 m_data.safe_push (NULL_TREE);
1336 if (TYPE_UNSIGNED (rhs_type))
1337 /* No need to keep state between iterations. */
1338 ;
1339 else if (m_upwards && !m_upwards_2limb)
1340 /* We need to keep state between iterations, but
1341 not within any loop, everything is straight line
1342 code with only increasing indexes. */
1343 ;
1344 else if (!m_upwards_2limb)
1345 {
1346 unsigned save_data_cnt = m_data_cnt;
1347 gimple_stmt_iterator save_gsi = m_gsi;
1348 m_gsi = m_init_gsi;
1349 if (gsi_end_p (i: m_gsi))
1350 m_gsi = gsi_after_labels (bb: gsi_bb (i: m_gsi));
1351 else
1352 gsi_next (i: &m_gsi);
1353 m_data_cnt = save_data_cnt + 3;
1354 t = handle_operand (op: rhs1, size_int (low));
1355 m_first = false;
1356 m_data[save_data_cnt + 2]
1357 = build_int_cst (NULL_TREE, m_data_cnt);
1358 m_data_cnt = save_data_cnt;
1359 t = add_cast (type: signed_type_for (m_limb_type), val: t);
1360 tree lpm1 = build_int_cst (unsigned_type_node, limb_prec - 1);
1361 tree n = make_ssa_name (TREE_TYPE (t));
1362 g = gimple_build_assign (n, RSHIFT_EXPR, t, lpm1);
1363 insert_before (g);
1364 m_data[save_data_cnt + 1] = add_cast (type: m_limb_type, val: n);
1365 m_init_gsi = m_gsi;
1366 if (gsi_end_p (i: m_init_gsi))
1367 m_init_gsi = gsi_last_bb (bb: gsi_bb (i: m_init_gsi));
1368 else
1369 gsi_prev (i: &m_init_gsi);
1370 m_gsi = save_gsi;
1371 }
1372 else if (m_upwards_2limb * limb_prec < TYPE_PRECISION (rhs_type))
1373 /* We need to keep state between iterations, but
1374 fortunately not within the loop, only afterwards. */
1375 ;
1376 else
1377 {
1378 tree out;
1379 m_data.truncate (size: m_data_cnt);
1380 prepare_data_in_out (val: build_zero_cst (m_limb_type), idx, data_out: &out);
1381 m_data.safe_push (NULL_TREE);
1382 }
1383 }
1384
1385 unsigned save_data_cnt = m_data_cnt;
1386 m_data_cnt += 3;
1387 if (!tree_fits_uhwi_p (idx))
1388 {
1389 if (m_upwards_2limb
1390 && low >= m_upwards_2limb - m_first)
1391 {
1392 rhs1 = handle_operand (op: rhs1, idx);
1393 if (m_first)
1394 m_data[save_data_cnt + 2]
1395 = build_int_cst (NULL_TREE, m_data_cnt);
1396 m_first = save_first;
1397 return rhs1;
1398 }
1399 bool single_comparison
1400 = low == high || (m_upwards_2limb && (low & 1) == m_first);
1401 tree idxc = idx;
1402 if (!single_comparison
1403 && m_upwards_2limb
1404 && !m_first
1405 && low + 1 == m_upwards_2limb)
1406 /* In this case we know that idx <= low always,
1407 so effectively we just needs a single comparison,
1408 idx < low or idx == low, but we'd need to emit different
1409 code for the 2 branches than single_comparison normally
1410 emits. So, instead of special-casing that, emit a
1411 low <= low comparison which cfg cleanup will clean up
1412 at the end of the pass. */
1413 idxc = size_int (low);
1414 g = gimple_build_cond (single_comparison ? LT_EXPR : LE_EXPR,
1415 idxc, size_int (low), NULL_TREE, NULL_TREE);
1416 edge edge_true_true, edge_true_false, edge_false;
1417 if_then_if_then_else (cond1: g, cond2: (single_comparison ? NULL
1418 : gimple_build_cond (EQ_EXPR, idx,
1419 size_int (low),
1420 NULL_TREE,
1421 NULL_TREE)),
1422 prob1: profile_probability::likely (),
1423 prob2: profile_probability::unlikely (),
1424 edge_true_true, edge_true_false, edge_false);
1425 bool save_cast_conditional = m_cast_conditional;
1426 m_cast_conditional = true;
1427 m_bitfld_load = 0;
1428 tree t1 = handle_operand (op: rhs1, idx), t2 = NULL_TREE;
1429 if (m_first)
1430 m_data[save_data_cnt + 2]
1431 = build_int_cst (NULL_TREE, m_data_cnt);
1432 tree ext = NULL_TREE;
1433 tree bitfld = NULL_TREE;
1434 if (!single_comparison)
1435 {
1436 m_gsi = gsi_after_labels (bb: edge_true_true->src);
1437 m_first = false;
1438 m_data_cnt = save_data_cnt + 3;
1439 if (m_bitfld_load)
1440 {
1441 bitfld = m_data[m_bitfld_load];
1442 m_data[m_bitfld_load] = m_data[m_bitfld_load + 2];
1443 m_bitfld_load = 0;
1444 }
1445 t2 = handle_operand (op: rhs1, size_int (low));
1446 if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (t2)))
1447 t2 = add_cast (type: m_limb_type, val: t2);
1448 if (!TYPE_UNSIGNED (rhs_type) && m_upwards_2limb)
1449 {
1450 ext = add_cast (type: signed_type_for (m_limb_type), val: t2);
1451 tree lpm1 = build_int_cst (unsigned_type_node,
1452 limb_prec - 1);
1453 tree n = make_ssa_name (TREE_TYPE (ext));
1454 g = gimple_build_assign (n, RSHIFT_EXPR, ext, lpm1);
1455 insert_before (g);
1456 ext = add_cast (type: m_limb_type, val: n);
1457 }
1458 }
1459 tree t3;
1460 if (TYPE_UNSIGNED (rhs_type))
1461 t3 = build_zero_cst (m_limb_type);
1462 else if (m_upwards_2limb && (save_first || ext != NULL_TREE))
1463 t3 = m_data[save_data_cnt];
1464 else
1465 t3 = m_data[save_data_cnt + 1];
1466 m_gsi = gsi_after_labels (bb: edge_true_false->dest);
1467 t = make_ssa_name (var: m_limb_type);
1468 gphi *phi = create_phi_node (t, edge_true_false->dest);
1469 add_phi_arg (phi, t1, edge_true_false, UNKNOWN_LOCATION);
1470 add_phi_arg (phi, t3, edge_false, UNKNOWN_LOCATION);
1471 if (edge_true_true)
1472 add_phi_arg (phi, t2, edge_true_true, UNKNOWN_LOCATION);
1473 if (ext)
1474 {
1475 tree t4 = make_ssa_name (var: m_limb_type);
1476 phi = create_phi_node (t4, edge_true_false->dest);
1477 add_phi_arg (phi, build_zero_cst (m_limb_type), edge_true_false,
1478 UNKNOWN_LOCATION);
1479 add_phi_arg (phi, m_data[save_data_cnt], edge_false,
1480 UNKNOWN_LOCATION);
1481 add_phi_arg (phi, ext, edge_true_true, UNKNOWN_LOCATION);
1482 if (!save_cast_conditional)
1483 {
1484 g = gimple_build_assign (m_data[save_data_cnt + 1], t4);
1485 insert_before (g);
1486 }
1487 else
1488 for (basic_block bb = gsi_bb (i: m_gsi);;)
1489 {
1490 edge e1 = single_succ_edge (bb);
1491 edge e2 = find_edge (e1->dest, m_bb), e3;
1492 tree t5 = (e2 ? m_data[save_data_cnt + 1]
1493 : make_ssa_name (var: m_limb_type));
1494 phi = create_phi_node (t5, e1->dest);
1495 edge_iterator ei;
1496 FOR_EACH_EDGE (e3, ei, e1->dest->preds)
1497 add_phi_arg (phi, (e3 == e1 ? t4
1498 : build_zero_cst (m_limb_type)),
1499 e3, UNKNOWN_LOCATION);
1500 if (e2)
1501 break;
1502 t4 = t5;
1503 bb = e1->dest;
1504 }
1505 }
1506 if (m_bitfld_load)
1507 {
1508 tree t4;
1509 if (!save_first && !save_cast_conditional)
1510 t4 = m_data[m_bitfld_load + 1];
1511 else
1512 t4 = make_ssa_name (var: m_limb_type);
1513 phi = create_phi_node (t4, edge_true_false->dest);
1514 add_phi_arg (phi,
1515 edge_true_true ? bitfld : m_data[m_bitfld_load],
1516 edge_true_false, UNKNOWN_LOCATION);
1517 add_phi_arg (phi, m_data[m_bitfld_load + 2],
1518 edge_false, UNKNOWN_LOCATION);
1519 if (edge_true_true)
1520 add_phi_arg (phi, m_data[m_bitfld_load], edge_true_true,
1521 UNKNOWN_LOCATION);
1522 if (save_cast_conditional)
1523 for (basic_block bb = gsi_bb (i: m_gsi);;)
1524 {
1525 edge e1 = single_succ_edge (bb);
1526 edge e2 = find_edge (e1->dest, m_bb), e3;
1527 tree t5 = ((e2 && !save_first) ? m_data[m_bitfld_load + 1]
1528 : make_ssa_name (var: m_limb_type));
1529 phi = create_phi_node (t5, e1->dest);
1530 edge_iterator ei;
1531 FOR_EACH_EDGE (e3, ei, e1->dest->preds)
1532 add_phi_arg (phi, (e3 == e1 ? t4
1533 : build_zero_cst (m_limb_type)),
1534 e3, UNKNOWN_LOCATION);
1535 t4 = t5;
1536 if (e2)
1537 break;
1538 bb = e1->dest;
1539 }
1540 m_data[m_bitfld_load] = t4;
1541 m_data[m_bitfld_load + 2] = t4;
1542 m_bitfld_load = 0;
1543 }
1544 m_cast_conditional = save_cast_conditional;
1545 m_first = save_first;
1546 return t;
1547 }
1548 else
1549 {
1550 if (tree_to_uhwi (idx) < low)
1551 {
1552 t = handle_operand (op: rhs1, idx);
1553 if (m_first)
1554 m_data[save_data_cnt + 2]
1555 = build_int_cst (NULL_TREE, m_data_cnt);
1556 }
1557 else if (tree_to_uhwi (idx) < high)
1558 {
1559 t = handle_operand (op: rhs1, size_int (low));
1560 if (m_first)
1561 m_data[save_data_cnt + 2]
1562 = build_int_cst (NULL_TREE, m_data_cnt);
1563 if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (t)))
1564 t = add_cast (type: m_limb_type, val: t);
1565 tree ext = NULL_TREE;
1566 if (!TYPE_UNSIGNED (rhs_type) && m_upwards)
1567 {
1568 ext = add_cast (type: signed_type_for (m_limb_type), val: t);
1569 tree lpm1 = build_int_cst (unsigned_type_node,
1570 limb_prec - 1);
1571 tree n = make_ssa_name (TREE_TYPE (ext));
1572 g = gimple_build_assign (n, RSHIFT_EXPR, ext, lpm1);
1573 insert_before (g);
1574 ext = add_cast (type: m_limb_type, val: n);
1575 m_data[save_data_cnt + 1] = ext;
1576 }
1577 }
1578 else
1579 {
1580 if (TYPE_UNSIGNED (rhs_type) && m_first)
1581 {
1582 handle_operand (op: rhs1, size_zero_node);
1583 m_data[save_data_cnt + 2]
1584 = build_int_cst (NULL_TREE, m_data_cnt);
1585 }
1586 else
1587 m_data_cnt = tree_to_uhwi (m_data[save_data_cnt + 2]);
1588 if (TYPE_UNSIGNED (rhs_type))
1589 t = build_zero_cst (m_limb_type);
1590 else if (m_bb && m_data[save_data_cnt])
1591 t = m_data[save_data_cnt];
1592 else
1593 t = m_data[save_data_cnt + 1];
1594 }
1595 tree type = limb_access_type (type: lhs_type, idx);
1596 if (!useless_type_conversion_p (type, m_limb_type))
1597 t = add_cast (type, val: t);
1598 m_first = save_first;
1599 return t;
1600 }
1601 }
1602 else if (TREE_CODE (lhs_type) == BITINT_TYPE
1603 && bitint_precision_kind (type: lhs_type) >= bitint_prec_large
1604 && INTEGRAL_TYPE_P (rhs_type))
1605 {
1606 /* Add support for 3 or more limbs filled in from normal integral
1607 type if this assert fails. If no target chooses limb mode smaller
1608 than half of largest supported normal integral type, this will not
1609 be needed. */
1610 gcc_assert (TYPE_PRECISION (rhs_type) <= 2 * limb_prec);
1611 tree r1 = NULL_TREE, r2 = NULL_TREE, rext = NULL_TREE;
1612 if (m_first)
1613 {
1614 gimple_stmt_iterator save_gsi = m_gsi;
1615 m_gsi = m_init_gsi;
1616 if (gsi_end_p (i: m_gsi))
1617 m_gsi = gsi_after_labels (bb: gsi_bb (i: m_gsi));
1618 else
1619 gsi_next (i: &m_gsi);
1620 if (TREE_CODE (rhs_type) == BITINT_TYPE
1621 && bitint_precision_kind (type: rhs_type) == bitint_prec_middle)
1622 {
1623 tree type = NULL_TREE;
1624 rhs1 = maybe_cast_middle_bitint (gsi: &m_gsi, op: rhs1, type);
1625 rhs_type = TREE_TYPE (rhs1);
1626 }
1627 r1 = rhs1;
1628 if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs1)))
1629 r1 = add_cast (type: m_limb_type, val: rhs1);
1630 if (TYPE_PRECISION (rhs_type) > limb_prec)
1631 {
1632 g = gimple_build_assign (make_ssa_name (var: rhs_type),
1633 RSHIFT_EXPR, rhs1,
1634 build_int_cst (unsigned_type_node,
1635 limb_prec));
1636 insert_before (g);
1637 r2 = add_cast (type: m_limb_type, val: gimple_assign_lhs (gs: g));
1638 }
1639 if (TYPE_UNSIGNED (rhs_type))
1640 rext = build_zero_cst (m_limb_type);
1641 else
1642 {
1643 rext = add_cast (type: signed_type_for (m_limb_type), val: r2 ? r2 : r1);
1644 g = gimple_build_assign (make_ssa_name (TREE_TYPE (rext)),
1645 RSHIFT_EXPR, rext,
1646 build_int_cst (unsigned_type_node,
1647 limb_prec - 1));
1648 insert_before (g);
1649 rext = add_cast (type: m_limb_type, val: gimple_assign_lhs (gs: g));
1650 }
1651 m_init_gsi = m_gsi;
1652 if (gsi_end_p (i: m_init_gsi))
1653 m_init_gsi = gsi_last_bb (bb: gsi_bb (i: m_init_gsi));
1654 else
1655 gsi_prev (i: &m_init_gsi);
1656 m_gsi = save_gsi;
1657 }
1658 tree t;
1659 if (m_upwards_2limb)
1660 {
1661 if (m_first)
1662 {
1663 tree out1, out2;
1664 prepare_data_in_out (val: r1, idx, data_out: &out1, val_out: rext);
1665 if (TYPE_PRECISION (rhs_type) > limb_prec)
1666 {
1667 prepare_data_in_out (val: r2, idx, data_out: &out2, val_out: rext);
1668 m_data.pop ();
1669 t = m_data.pop ();
1670 m_data[m_data_cnt + 1] = t;
1671 }
1672 else
1673 m_data[m_data_cnt + 1] = rext;
1674 m_data.safe_push (obj: rext);
1675 t = m_data[m_data_cnt];
1676 }
1677 else if (!tree_fits_uhwi_p (idx))
1678 t = m_data[m_data_cnt + 1];
1679 else
1680 {
1681 tree type = limb_access_type (type: lhs_type, idx);
1682 t = m_data[m_data_cnt + 2];
1683 if (!useless_type_conversion_p (type, m_limb_type))
1684 t = add_cast (type, val: t);
1685 }
1686 m_data_cnt += 3;
1687 return t;
1688 }
1689 else if (m_first)
1690 {
1691 m_data.safe_push (obj: r1);
1692 m_data.safe_push (obj: r2);
1693 m_data.safe_push (obj: rext);
1694 }
1695 if (tree_fits_uhwi_p (idx))
1696 {
1697 tree type = limb_access_type (type: lhs_type, idx);
1698 if (integer_zerop (idx))
1699 t = m_data[m_data_cnt];
1700 else if (TYPE_PRECISION (rhs_type) > limb_prec
1701 && integer_onep (idx))
1702 t = m_data[m_data_cnt + 1];
1703 else
1704 t = m_data[m_data_cnt + 2];
1705 if (!useless_type_conversion_p (type, m_limb_type))
1706 t = add_cast (type, val: t);
1707 m_data_cnt += 3;
1708 return t;
1709 }
1710 g = gimple_build_cond (NE_EXPR, idx, size_zero_node,
1711 NULL_TREE, NULL_TREE);
1712 edge e2, e3, e4 = NULL;
1713 if_then (cond: g, prob: profile_probability::likely (), edge_true&: e2, edge_false&: e3);
1714 if (m_data[m_data_cnt + 1])
1715 {
1716 g = gimple_build_cond (EQ_EXPR, idx, size_one_node,
1717 NULL_TREE, NULL_TREE);
1718 insert_before (g);
1719 edge e5 = split_block (gsi_bb (i: m_gsi), g);
1720 e4 = make_edge (e5->src, e2->dest, EDGE_TRUE_VALUE);
1721 e2 = find_edge (e5->dest, e2->dest);
1722 e4->probability = profile_probability::unlikely ();
1723 e5->flags = EDGE_FALSE_VALUE;
1724 e5->probability = e4->probability.invert ();
1725 }
1726 m_gsi = gsi_after_labels (bb: e2->dest);
1727 t = make_ssa_name (var: m_limb_type);
1728 gphi *phi = create_phi_node (t, e2->dest);
1729 add_phi_arg (phi, m_data[m_data_cnt + 2], e2, UNKNOWN_LOCATION);
1730 add_phi_arg (phi, m_data[m_data_cnt], e3, UNKNOWN_LOCATION);
1731 if (e4)
1732 add_phi_arg (phi, m_data[m_data_cnt + 1], e4, UNKNOWN_LOCATION);
1733 m_data_cnt += 3;
1734 return t;
1735 }
1736 return NULL_TREE;
1737}
1738
1739/* Helper function for handle_stmt method, handle a BIT_FIELD_REF. */
1740
1741tree
1742bitint_large_huge::handle_bit_field_ref (tree op, tree idx)
1743{
1744 if (tree_fits_uhwi_p (idx))
1745 {
1746 if (m_first)
1747 m_data.safe_push (NULL);
1748 ++m_data_cnt;
1749 unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE (m_limb_type));
1750 tree bfr = build3 (BIT_FIELD_REF, m_limb_type,
1751 TREE_OPERAND (op, 0),
1752 TYPE_SIZE (m_limb_type),
1753 size_binop (PLUS_EXPR, TREE_OPERAND (op, 2),
1754 bitsize_int (tree_to_uhwi (idx) * sz)));
1755 tree r = make_ssa_name (var: m_limb_type);
1756 gimple *g = gimple_build_assign (r, bfr);
1757 insert_before (g);
1758 tree type = limb_access_type (TREE_TYPE (op), idx);
1759 if (!useless_type_conversion_p (type, m_limb_type))
1760 r = add_cast (type, val: r);
1761 return r;
1762 }
1763 tree var;
1764 if (m_first)
1765 {
1766 unsigned HOST_WIDE_INT sz = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (op)));
1767 machine_mode mode;
1768 tree type, bfr;
1769 if (bitwise_mode_for_size (sz).exists (mode: &mode)
1770 && known_eq (GET_MODE_BITSIZE (mode), sz))
1771 type = bitwise_type_for_mode (mode);
1772 else
1773 {
1774 mode = VOIDmode;
1775 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (op, 0)));
1776 }
1777 if (TYPE_ALIGN (type) < TYPE_ALIGN (TREE_TYPE (op)))
1778 type = build_aligned_type (type, TYPE_ALIGN (TREE_TYPE (op)));
1779 var = create_tmp_var (type);
1780 TREE_ADDRESSABLE (var) = 1;
1781 gimple *g;
1782 if (mode != VOIDmode)
1783 {
1784 bfr = build3 (BIT_FIELD_REF, type, TREE_OPERAND (op, 0),
1785 TYPE_SIZE (type), TREE_OPERAND (op, 2));
1786 g = gimple_build_assign (make_ssa_name (var: type),
1787 BIT_FIELD_REF, bfr);
1788 gimple_set_location (g, location: m_loc);
1789 gsi_insert_after (&m_init_gsi, g, GSI_NEW_STMT);
1790 bfr = gimple_assign_lhs (gs: g);
1791 }
1792 else
1793 bfr = TREE_OPERAND (op, 0);
1794 g = gimple_build_assign (var, bfr);
1795 gimple_set_location (g, location: m_loc);
1796 gsi_insert_after (&m_init_gsi, g, GSI_NEW_STMT);
1797 if (mode == VOIDmode)
1798 {
1799 unsigned HOST_WIDE_INT nelts
1800 = CEIL (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (op))), limb_prec);
1801 tree atype = build_array_type_nelts (m_limb_type, nelts);
1802 var = build2 (MEM_REF, atype, build_fold_addr_expr (var),
1803 build_int_cst (build_pointer_type (type),
1804 tree_to_uhwi (TREE_OPERAND (op, 2))
1805 / BITS_PER_UNIT));
1806 }
1807 m_data.safe_push (obj: var);
1808 }
1809 else
1810 var = unshare_expr (m_data[m_data_cnt]);
1811 ++m_data_cnt;
1812 var = limb_access (TREE_TYPE (op), var, idx, write_p: false);
1813 tree r = make_ssa_name (var: m_limb_type);
1814 gimple *g = gimple_build_assign (r, var);
1815 insert_before (g);
1816 return r;
1817}
1818
1819/* Add a new EH edge from SRC to EH_EDGE->dest, where EH_EDGE
1820 is an older EH edge, and except for virtual PHIs duplicate the
1821 PHI argument from the EH_EDGE to the new EH edge. */
1822
1823static void
1824add_eh_edge (basic_block src, edge eh_edge)
1825{
1826 edge e = make_edge (src, eh_edge->dest, EDGE_EH);
1827 e->probability = profile_probability::very_unlikely ();
1828 for (gphi_iterator gsi = gsi_start_phis (eh_edge->dest);
1829 !gsi_end_p (i: gsi); gsi_next (i: &gsi))
1830 {
1831 gphi *phi = gsi.phi ();
1832 tree lhs = gimple_phi_result (gs: phi);
1833 if (virtual_operand_p (op: lhs))
1834 continue;
1835 const phi_arg_d *arg = gimple_phi_arg (gs: phi, index: eh_edge->dest_idx);
1836 add_phi_arg (phi, arg->def, e, arg->locus);
1837 }
1838}
1839
1840/* Helper function for handle_stmt method, handle a load from memory. */
1841
1842tree
1843bitint_large_huge::handle_load (gimple *stmt, tree idx)
1844{
1845 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
1846 tree rhs_type = TREE_TYPE (rhs1);
1847 bool eh = stmt_ends_bb_p (stmt);
1848 edge eh_edge = NULL;
1849 gimple *g;
1850
1851 if (eh)
1852 {
1853 edge_iterator ei;
1854 basic_block bb = gimple_bb (g: stmt);
1855
1856 FOR_EACH_EDGE (eh_edge, ei, bb->succs)
1857 if (eh_edge->flags & EDGE_EH)
1858 break;
1859 }
1860
1861 if (TREE_CODE (rhs1) == COMPONENT_REF
1862 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (rhs1, 1)))
1863 {
1864 tree fld = TREE_OPERAND (rhs1, 1);
1865 /* For little-endian, we can allow as inputs bit-fields
1866 which start at a limb boundary. */
1867 gcc_assert (tree_fits_uhwi_p (DECL_FIELD_BIT_OFFSET (fld)));
1868 if (DECL_OFFSET_ALIGN (fld) >= TYPE_ALIGN (TREE_TYPE (rhs1))
1869 && (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % limb_prec) == 0)
1870 goto normal_load;
1871 /* Even if DECL_FIELD_BIT_OFFSET (fld) is a multiple of UNITS_PER_BIT,
1872 handle it normally for now. */
1873 if ((tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % BITS_PER_UNIT) == 0)
1874 goto normal_load;
1875 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (fld);
1876 poly_int64 bitoffset;
1877 poly_uint64 field_offset, repr_offset;
1878 bool var_field_off = false;
1879 if (poly_int_tree_p (DECL_FIELD_OFFSET (fld), value: &field_offset)
1880 && poly_int_tree_p (DECL_FIELD_OFFSET (repr), value: &repr_offset))
1881 bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
1882 else
1883 {
1884 bitoffset = 0;
1885 var_field_off = true;
1886 }
1887 bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld))
1888 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
1889 tree nrhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr),
1890 TREE_OPERAND (rhs1, 0), repr,
1891 var_field_off ? TREE_OPERAND (rhs1, 2) : NULL_TREE);
1892 HOST_WIDE_INT bo = bitoffset.to_constant ();
1893 unsigned bo_idx = (unsigned HOST_WIDE_INT) bo / limb_prec;
1894 unsigned bo_bit = (unsigned HOST_WIDE_INT) bo % limb_prec;
1895 if (m_first)
1896 {
1897 if (m_upwards)
1898 {
1899 gimple_stmt_iterator save_gsi = m_gsi;
1900 m_gsi = m_init_gsi;
1901 if (gsi_end_p (i: m_gsi))
1902 m_gsi = gsi_after_labels (bb: gsi_bb (i: m_gsi));
1903 else
1904 gsi_next (i: &m_gsi);
1905 tree t = limb_access (NULL_TREE, var: nrhs1, size_int (bo_idx), write_p: true);
1906 tree iv = make_ssa_name (var: m_limb_type);
1907 g = gimple_build_assign (iv, t);
1908 insert_before (g);
1909 if (eh)
1910 {
1911 maybe_duplicate_eh_stmt (g, stmt);
1912 if (eh_edge)
1913 {
1914 edge e = split_block (gsi_bb (i: m_gsi), g);
1915 add_eh_edge (src: e->src, eh_edge);
1916 m_gsi = gsi_after_labels (bb: e->dest);
1917 if (gsi_bb (i: save_gsi) == e->src)
1918 {
1919 if (gsi_end_p (i: save_gsi))
1920 save_gsi = gsi_end_bb (bb: e->dest);
1921 else
1922 save_gsi = gsi_for_stmt (gsi_stmt (i: save_gsi));
1923 }
1924 if (m_preheader_bb == e->src)
1925 m_preheader_bb = e->dest;
1926 }
1927 }
1928 m_init_gsi = m_gsi;
1929 if (gsi_end_p (i: m_init_gsi))
1930 m_init_gsi = gsi_last_bb (bb: gsi_bb (i: m_init_gsi));
1931 else
1932 gsi_prev (i: &m_init_gsi);
1933 m_gsi = save_gsi;
1934 tree out;
1935 prepare_data_in_out (val: iv, idx, data_out: &out);
1936 out = m_data[m_data_cnt];
1937 m_data.safe_push (obj: out);
1938 }
1939 else
1940 {
1941 m_data.safe_push (NULL_TREE);
1942 m_data.safe_push (NULL_TREE);
1943 m_data.safe_push (NULL_TREE);
1944 }
1945 }
1946
1947 tree nidx0 = NULL_TREE, nidx1;
1948 tree iv = m_data[m_data_cnt];
1949 if (m_cast_conditional && iv)
1950 {
1951 gcc_assert (!m_bitfld_load);
1952 m_bitfld_load = m_data_cnt;
1953 }
1954 if (tree_fits_uhwi_p (idx))
1955 {
1956 unsigned prec = TYPE_PRECISION (rhs_type);
1957 unsigned HOST_WIDE_INT i = tree_to_uhwi (idx);
1958 gcc_assert (i * limb_prec < prec);
1959 nidx1 = size_int (i + bo_idx + 1);
1960 if ((i + 1) * limb_prec > prec)
1961 {
1962 prec %= limb_prec;
1963 if (prec + bo_bit <= (unsigned) limb_prec)
1964 nidx1 = NULL_TREE;
1965 }
1966 if (!iv)
1967 nidx0 = size_int (i + bo_idx);
1968 }
1969 else
1970 {
1971 if (!iv)
1972 {
1973 if (bo_idx == 0)
1974 nidx0 = idx;
1975 else
1976 {
1977 nidx0 = make_ssa_name (sizetype);
1978 g = gimple_build_assign (nidx0, PLUS_EXPR, idx,
1979 size_int (bo_idx));
1980 insert_before (g);
1981 }
1982 }
1983 nidx1 = make_ssa_name (sizetype);
1984 g = gimple_build_assign (nidx1, PLUS_EXPR, idx,
1985 size_int (bo_idx + 1));
1986 insert_before (g);
1987 }
1988
1989 tree iv2 = NULL_TREE;
1990 if (nidx0)
1991 {
1992 tree t = limb_access (NULL_TREE, var: nrhs1, idx: nidx0, write_p: true);
1993 iv = make_ssa_name (var: m_limb_type);
1994 g = gimple_build_assign (iv, t);
1995 insert_before (g);
1996 gcc_assert (!eh);
1997 }
1998 if (nidx1)
1999 {
2000 bool conditional = m_var_msb && !tree_fits_uhwi_p (idx);
2001 unsigned prec = TYPE_PRECISION (rhs_type);
2002 if (conditional)
2003 {
2004 if ((prec % limb_prec) == 0
2005 || ((prec % limb_prec) + bo_bit > (unsigned) limb_prec))
2006 conditional = false;
2007 }
2008 edge edge_true = NULL, edge_false = NULL;
2009 if (conditional)
2010 {
2011 g = gimple_build_cond (NE_EXPR, idx,
2012 size_int (prec / limb_prec),
2013 NULL_TREE, NULL_TREE);
2014 if_then (cond: g, prob: profile_probability::likely (),
2015 edge_true, edge_false);
2016 }
2017 tree t = limb_access (NULL_TREE, var: nrhs1, idx: nidx1, write_p: true);
2018 if (m_upwards_2limb
2019 && !m_first
2020 && !m_bitfld_load
2021 && !tree_fits_uhwi_p (idx))
2022 iv2 = m_data[m_data_cnt + 1];
2023 else
2024 iv2 = make_ssa_name (var: m_limb_type);
2025 g = gimple_build_assign (iv2, t);
2026 insert_before (g);
2027 if (eh)
2028 {
2029 maybe_duplicate_eh_stmt (g, stmt);
2030 if (eh_edge)
2031 {
2032 edge e = split_block (gsi_bb (i: m_gsi), g);
2033 m_gsi = gsi_after_labels (bb: e->dest);
2034 add_eh_edge (src: e->src, eh_edge);
2035 }
2036 }
2037 if (conditional)
2038 {
2039 tree iv3 = make_ssa_name (var: m_limb_type);
2040 if (eh)
2041 edge_true = find_edge (gsi_bb (i: m_gsi), edge_false->dest);
2042 gphi *phi = create_phi_node (iv3, edge_true->dest);
2043 add_phi_arg (phi, iv2, edge_true, UNKNOWN_LOCATION);
2044 add_phi_arg (phi, build_zero_cst (m_limb_type),
2045 edge_false, UNKNOWN_LOCATION);
2046 m_gsi = gsi_after_labels (bb: edge_true->dest);
2047 iv2 = iv3;
2048 }
2049 }
2050 g = gimple_build_assign (make_ssa_name (var: m_limb_type), RSHIFT_EXPR,
2051 iv, build_int_cst (unsigned_type_node, bo_bit));
2052 insert_before (g);
2053 iv = gimple_assign_lhs (gs: g);
2054 if (iv2)
2055 {
2056 g = gimple_build_assign (make_ssa_name (var: m_limb_type), LSHIFT_EXPR,
2057 iv2, build_int_cst (unsigned_type_node,
2058 limb_prec - bo_bit));
2059 insert_before (g);
2060 g = gimple_build_assign (make_ssa_name (var: m_limb_type), BIT_IOR_EXPR,
2061 gimple_assign_lhs (gs: g), iv);
2062 insert_before (g);
2063 iv = gimple_assign_lhs (gs: g);
2064 if (m_data[m_data_cnt])
2065 m_data[m_data_cnt] = iv2;
2066 }
2067 if (tree_fits_uhwi_p (idx))
2068 {
2069 tree atype = limb_access_type (type: rhs_type, idx);
2070 if (!useless_type_conversion_p (atype, TREE_TYPE (iv)))
2071 iv = add_cast (type: atype, val: iv);
2072 }
2073 m_data_cnt += 3;
2074 return iv;
2075 }
2076
2077normal_load:
2078 /* Use write_p = true for loads with EH edges to make
2079 sure limb_access doesn't add a cast as separate
2080 statement after it. */
2081 rhs1 = limb_access (type: rhs_type, var: rhs1, idx, write_p: eh);
2082 tree ret = make_ssa_name (TREE_TYPE (rhs1));
2083 g = gimple_build_assign (ret, rhs1);
2084 insert_before (g);
2085 if (eh)
2086 {
2087 maybe_duplicate_eh_stmt (g, stmt);
2088 if (eh_edge)
2089 {
2090 edge e = split_block (gsi_bb (i: m_gsi), g);
2091 m_gsi = gsi_after_labels (bb: e->dest);
2092 add_eh_edge (src: e->src, eh_edge);
2093 }
2094 if (tree_fits_uhwi_p (idx))
2095 {
2096 tree atype = limb_access_type (type: rhs_type, idx);
2097 if (!useless_type_conversion_p (atype, TREE_TYPE (rhs1)))
2098 ret = add_cast (type: atype, val: ret);
2099 }
2100 }
2101 return ret;
2102}
2103
2104/* Return a limb IDX from a mergeable statement STMT. */
2105
2106tree
2107bitint_large_huge::handle_stmt (gimple *stmt, tree idx)
2108{
2109 tree lhs, rhs1, rhs2 = NULL_TREE;
2110 gimple *g;
2111 switch (gimple_code (g: stmt))
2112 {
2113 case GIMPLE_ASSIGN:
2114 if (gimple_assign_load_p (stmt))
2115 return handle_load (stmt, idx);
2116 switch (gimple_assign_rhs_code (gs: stmt))
2117 {
2118 case BIT_AND_EXPR:
2119 case BIT_IOR_EXPR:
2120 case BIT_XOR_EXPR:
2121 rhs2 = handle_operand (op: gimple_assign_rhs2 (gs: stmt), idx);
2122 /* FALLTHRU */
2123 case BIT_NOT_EXPR:
2124 rhs1 = handle_operand (op: gimple_assign_rhs1 (gs: stmt), idx);
2125 lhs = make_ssa_name (TREE_TYPE (rhs1));
2126 g = gimple_build_assign (lhs, gimple_assign_rhs_code (gs: stmt),
2127 rhs1, rhs2);
2128 insert_before (g);
2129 return lhs;
2130 case PLUS_EXPR:
2131 case MINUS_EXPR:
2132 rhs1 = handle_operand (op: gimple_assign_rhs1 (gs: stmt), idx);
2133 rhs2 = handle_operand (op: gimple_assign_rhs2 (gs: stmt), idx);
2134 return handle_plus_minus (code: gimple_assign_rhs_code (gs: stmt),
2135 rhs1, rhs2, idx);
2136 case NEGATE_EXPR:
2137 rhs2 = handle_operand (op: gimple_assign_rhs1 (gs: stmt), idx);
2138 rhs1 = build_zero_cst (TREE_TYPE (rhs2));
2139 return handle_plus_minus (code: MINUS_EXPR, rhs1, rhs2, idx);
2140 case LSHIFT_EXPR:
2141 return handle_lshift (rhs1: handle_operand (op: gimple_assign_rhs1 (gs: stmt),
2142 idx),
2143 rhs2: gimple_assign_rhs2 (gs: stmt), idx);
2144 case SSA_NAME:
2145 case INTEGER_CST:
2146 return handle_operand (op: gimple_assign_rhs1 (gs: stmt), idx);
2147 CASE_CONVERT:
2148 return handle_cast (TREE_TYPE (gimple_assign_lhs (stmt)),
2149 rhs1: gimple_assign_rhs1 (gs: stmt), idx);
2150 case VIEW_CONVERT_EXPR:
2151 return handle_cast (TREE_TYPE (gimple_assign_lhs (stmt)),
2152 TREE_OPERAND (gimple_assign_rhs1 (stmt), 0),
2153 idx);
2154 case BIT_FIELD_REF:
2155 return handle_bit_field_ref (op: gimple_assign_rhs1 (gs: stmt), idx);
2156 default:
2157 break;
2158 }
2159 break;
2160 default:
2161 break;
2162 }
2163 gcc_unreachable ();
2164}
2165
2166/* Return minimum precision of OP at STMT.
2167 Positive value is minimum precision above which all bits
2168 are zero, negative means all bits above negation of the
2169 value are copies of the sign bit. */
2170
2171static int
2172range_to_prec (tree op, gimple *stmt)
2173{
2174 int_range_max r;
2175 wide_int w;
2176 tree type = TREE_TYPE (op);
2177 unsigned int prec = TYPE_PRECISION (type);
2178
2179 if (!optimize
2180 || !get_range_query (cfun)->range_of_expr (r, expr: op, stmt)
2181 || r.undefined_p ())
2182 {
2183 if (TYPE_UNSIGNED (type))
2184 return prec;
2185 else
2186 return MIN ((int) -prec, -2);
2187 }
2188
2189 if (!TYPE_UNSIGNED (TREE_TYPE (op)))
2190 {
2191 w = r.lower_bound ();
2192 if (wi::neg_p (x: w))
2193 {
2194 int min_prec1 = wi::min_precision (x: w, sgn: SIGNED);
2195 w = r.upper_bound ();
2196 int min_prec2 = wi::min_precision (x: w, sgn: SIGNED);
2197 int min_prec = MAX (min_prec1, min_prec2);
2198 return MIN (-min_prec, -2);
2199 }
2200 }
2201
2202 w = r.upper_bound ();
2203 int min_prec = wi::min_precision (x: w, sgn: UNSIGNED);
2204 return MAX (min_prec, 1);
2205}
2206
2207/* Return address of the first limb of OP and write into *PREC
2208 its precision. If positive, the operand is zero extended
2209 from that precision, if it is negative, the operand is sign-extended
2210 from -*PREC. If PREC_STORED is NULL, it is the toplevel call,
2211 otherwise *PREC_STORED is prec from the innermost call without
2212 range optimizations. */
2213
2214tree
2215bitint_large_huge::handle_operand_addr (tree op, gimple *stmt,
2216 int *prec_stored, int *prec)
2217{
2218 wide_int w;
2219 location_t loc_save = m_loc;
2220 if ((TREE_CODE (TREE_TYPE (op)) != BITINT_TYPE
2221 || bitint_precision_kind (TREE_TYPE (op)) < bitint_prec_large)
2222 && TREE_CODE (op) != INTEGER_CST)
2223 {
2224 do_int:
2225 *prec = range_to_prec (op, stmt);
2226 bitint_prec_kind kind = bitint_prec_small;
2227 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (op)));
2228 if (TREE_CODE (TREE_TYPE (op)) == BITINT_TYPE)
2229 kind = bitint_precision_kind (TREE_TYPE (op));
2230 if (kind == bitint_prec_middle)
2231 {
2232 tree type = NULL_TREE;
2233 op = maybe_cast_middle_bitint (gsi: &m_gsi, op, type);
2234 }
2235 tree op_type = TREE_TYPE (op);
2236 unsigned HOST_WIDE_INT nelts
2237 = CEIL (TYPE_PRECISION (op_type), limb_prec);
2238 /* Add support for 3 or more limbs filled in from normal
2239 integral type if this assert fails. If no target chooses
2240 limb mode smaller than half of largest supported normal
2241 integral type, this will not be needed. */
2242 gcc_assert (nelts <= 2);
2243 if (prec_stored)
2244 *prec_stored = (TYPE_UNSIGNED (op_type)
2245 ? TYPE_PRECISION (op_type)
2246 : -TYPE_PRECISION (op_type));
2247 if (*prec <= limb_prec && *prec >= -limb_prec)
2248 {
2249 nelts = 1;
2250 if (prec_stored)
2251 {
2252 if (TYPE_UNSIGNED (op_type))
2253 {
2254 if (*prec_stored > limb_prec)
2255 *prec_stored = limb_prec;
2256 }
2257 else if (*prec_stored < -limb_prec)
2258 *prec_stored = -limb_prec;
2259 }
2260 }
2261 tree atype = build_array_type_nelts (m_limb_type, nelts);
2262 tree var = create_tmp_var (atype);
2263 tree t1 = op;
2264 if (!useless_type_conversion_p (m_limb_type, op_type))
2265 t1 = add_cast (type: m_limb_type, val: t1);
2266 tree v = build4 (ARRAY_REF, m_limb_type, var, size_zero_node,
2267 NULL_TREE, NULL_TREE);
2268 gimple *g = gimple_build_assign (v, t1);
2269 insert_before (g);
2270 if (nelts > 1)
2271 {
2272 tree lp = build_int_cst (unsigned_type_node, limb_prec);
2273 g = gimple_build_assign (make_ssa_name (var: op_type),
2274 RSHIFT_EXPR, op, lp);
2275 insert_before (g);
2276 tree t2 = gimple_assign_lhs (gs: g);
2277 t2 = add_cast (type: m_limb_type, val: t2);
2278 v = build4 (ARRAY_REF, m_limb_type, var, size_one_node,
2279 NULL_TREE, NULL_TREE);
2280 g = gimple_build_assign (v, t2);
2281 insert_before (g);
2282 }
2283 tree ret = build_fold_addr_expr (var);
2284 if (!stmt_ends_bb_p (gsi_stmt (i: m_gsi)))
2285 {
2286 tree clobber = build_clobber (atype, CLOBBER_STORAGE_END);
2287 g = gimple_build_assign (var, clobber);
2288 gsi_insert_after (&m_gsi, g, GSI_SAME_STMT);
2289 }
2290 m_loc = loc_save;
2291 return ret;
2292 }
2293 switch (TREE_CODE (op))
2294 {
2295 case SSA_NAME:
2296 if (m_names == NULL
2297 || !bitmap_bit_p (m_names, SSA_NAME_VERSION (op)))
2298 {
2299 gimple *g = SSA_NAME_DEF_STMT (op);
2300 tree ret;
2301 m_loc = gimple_location (g);
2302 if (gimple_assign_load_p (g))
2303 {
2304 *prec = range_to_prec (op, NULL);
2305 if (prec_stored)
2306 *prec_stored = (TYPE_UNSIGNED (TREE_TYPE (op))
2307 ? TYPE_PRECISION (TREE_TYPE (op))
2308 : -TYPE_PRECISION (TREE_TYPE (op)));
2309 ret = build_fold_addr_expr (gimple_assign_rhs1 (g));
2310 ret = force_gimple_operand_gsi (&m_gsi, ret, true,
2311 NULL_TREE, true, GSI_SAME_STMT);
2312 }
2313 else if (gimple_code (g) == GIMPLE_NOP)
2314 {
2315 *prec = TYPE_UNSIGNED (TREE_TYPE (op)) ? limb_prec : -limb_prec;
2316 if (prec_stored)
2317 *prec_stored = *prec;
2318 tree var = create_tmp_var (m_limb_type);
2319 TREE_ADDRESSABLE (var) = 1;
2320 ret = build_fold_addr_expr (var);
2321 if (!stmt_ends_bb_p (gsi_stmt (i: m_gsi)))
2322 {
2323 tree clobber = build_clobber (m_limb_type,
2324 CLOBBER_STORAGE_END);
2325 g = gimple_build_assign (var, clobber);
2326 gsi_insert_after (&m_gsi, g, GSI_SAME_STMT);
2327 }
2328 }
2329 else
2330 {
2331 gcc_assert (gimple_assign_cast_p (g));
2332 tree rhs1 = gimple_assign_rhs1 (gs: g);
2333 bitint_prec_kind kind = bitint_prec_small;
2334 if (TREE_CODE (rhs1) == VIEW_CONVERT_EXPR)
2335 rhs1 = TREE_OPERAND (rhs1, 0);
2336 gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)));
2337 if (TREE_CODE (TREE_TYPE (rhs1)) == BITINT_TYPE)
2338 kind = bitint_precision_kind (TREE_TYPE (rhs1));
2339 if (kind >= bitint_prec_large)
2340 {
2341 tree lhs_type = TREE_TYPE (op);
2342 tree rhs_type = TREE_TYPE (rhs1);
2343 int prec_stored_val = 0;
2344 ret = handle_operand_addr (op: rhs1, stmt: g, prec_stored: &prec_stored_val, prec);
2345 if (TYPE_PRECISION (lhs_type) > TYPE_PRECISION (rhs_type))
2346 {
2347 if (TYPE_UNSIGNED (lhs_type)
2348 && !TYPE_UNSIGNED (rhs_type))
2349 gcc_assert (*prec >= 0 || prec_stored == NULL);
2350 }
2351 else
2352 {
2353 if (*prec > 0 && *prec < TYPE_PRECISION (lhs_type))
2354 ;
2355 else if (TYPE_UNSIGNED (lhs_type))
2356 {
2357 gcc_assert (*prec > 0
2358 || prec_stored_val > 0
2359 || (-prec_stored_val
2360 >= TYPE_PRECISION (lhs_type)));
2361 *prec = TYPE_PRECISION (lhs_type);
2362 }
2363 else if (*prec < 0 && -*prec < TYPE_PRECISION (lhs_type))
2364 ;
2365 else
2366 *prec = -TYPE_PRECISION (lhs_type);
2367 }
2368 }
2369 else
2370 {
2371 op = rhs1;
2372 stmt = g;
2373 goto do_int;
2374 }
2375 }
2376 m_loc = loc_save;
2377 return ret;
2378 }
2379 else
2380 {
2381 int p = var_to_partition (map: m_map, var: op);
2382 gcc_assert (m_vars[p] != NULL_TREE);
2383 *prec = range_to_prec (op, stmt);
2384 if (prec_stored)
2385 *prec_stored = (TYPE_UNSIGNED (TREE_TYPE (op))
2386 ? TYPE_PRECISION (TREE_TYPE (op))
2387 : -TYPE_PRECISION (TREE_TYPE (op)));
2388 return build_fold_addr_expr (m_vars[p]);
2389 }
2390 case INTEGER_CST:
2391 unsigned int min_prec, mp;
2392 tree type;
2393 w = wi::to_wide (t: op);
2394 if (tree_int_cst_sgn (op) >= 0)
2395 {
2396 min_prec = wi::min_precision (x: w, sgn: UNSIGNED);
2397 *prec = MAX (min_prec, 1);
2398 }
2399 else
2400 {
2401 min_prec = wi::min_precision (x: w, sgn: SIGNED);
2402 *prec = MIN ((int) -min_prec, -2);
2403 }
2404 mp = CEIL (min_prec, limb_prec) * limb_prec;
2405 if (mp == 0)
2406 mp = 1;
2407 if (mp >= (unsigned) TYPE_PRECISION (TREE_TYPE (op))
2408 && (TREE_CODE (TREE_TYPE (op)) == BITINT_TYPE
2409 || TYPE_PRECISION (TREE_TYPE (op)) <= limb_prec))
2410 type = TREE_TYPE (op);
2411 else
2412 type = build_bitint_type (mp, 1);
2413 if (TREE_CODE (type) != BITINT_TYPE
2414 || bitint_precision_kind (type) == bitint_prec_small)
2415 {
2416 if (TYPE_PRECISION (type) <= limb_prec)
2417 type = m_limb_type;
2418 else
2419 {
2420 while (bitint_precision_kind (prec: mp) == bitint_prec_small)
2421 mp += limb_prec;
2422 /* This case is for targets which e.g. have 64-bit
2423 limb but categorize up to 128-bits _BitInts as
2424 small. We could use type of m_limb_type[2] and
2425 similar instead to save space. */
2426 type = build_bitint_type (mp, 1);
2427 }
2428 }
2429 if (prec_stored)
2430 {
2431 if (tree_int_cst_sgn (op) >= 0)
2432 *prec_stored = MAX (TYPE_PRECISION (type), 1);
2433 else
2434 *prec_stored = MIN ((int) -TYPE_PRECISION (type), -2);
2435 }
2436 op = tree_output_constant_def (fold_convert (type, op));
2437 return build_fold_addr_expr (op);
2438 default:
2439 gcc_unreachable ();
2440 }
2441}
2442
2443/* Helper function, create a loop before the current location,
2444 start with sizetype INIT value from the preheader edge. Return
2445 a PHI result and set *IDX_NEXT to SSA_NAME it creates and uses
2446 from the latch edge. */
2447
2448tree
2449bitint_large_huge::create_loop (tree init, tree *idx_next)
2450{
2451 if (!gsi_end_p (i: m_gsi))
2452 gsi_prev (i: &m_gsi);
2453 else
2454 m_gsi = gsi_last_bb (bb: gsi_bb (i: m_gsi));
2455 edge e1 = split_block (gsi_bb (i: m_gsi), gsi_stmt (i: m_gsi));
2456 edge e2 = split_block (e1->dest, (gimple *) NULL);
2457 edge e3 = make_edge (e1->dest, e1->dest, EDGE_TRUE_VALUE);
2458 e3->probability = profile_probability::very_unlikely ();
2459 e2->flags = EDGE_FALSE_VALUE;
2460 e2->probability = e3->probability.invert ();
2461 tree idx = make_ssa_name (sizetype);
2462 gphi *phi = create_phi_node (idx, e1->dest);
2463 add_phi_arg (phi, init, e1, UNKNOWN_LOCATION);
2464 *idx_next = make_ssa_name (sizetype);
2465 add_phi_arg (phi, *idx_next, e3, UNKNOWN_LOCATION);
2466 m_gsi = gsi_after_labels (bb: e1->dest);
2467 m_bb = e1->dest;
2468 m_preheader_bb = e1->src;
2469 class loop *loop = alloc_loop ();
2470 loop->header = e1->dest;
2471 add_loop (loop, e1->src->loop_father);
2472 return idx;
2473}
2474
2475/* Lower large/huge _BitInt statement mergeable or similar STMT which can be
2476 lowered using iteration from the least significant limb up to the most
2477 significant limb. For large _BitInt it is emitted as straight line code
2478 before current location, for huge _BitInt as a loop handling two limbs
2479 at once, followed by handling up to limbs in straight line code (at most
2480 one full and one partial limb). It can also handle EQ_EXPR/NE_EXPR
2481 comparisons, in that case CMP_CODE should be the comparison code and
2482 CMP_OP1/CMP_OP2 the comparison operands. */
2483
2484tree
2485bitint_large_huge::lower_mergeable_stmt (gimple *stmt, tree_code &cmp_code,
2486 tree cmp_op1, tree cmp_op2)
2487{
2488 bool eq_p = cmp_code != ERROR_MARK;
2489 tree type;
2490 if (eq_p)
2491 type = TREE_TYPE (cmp_op1);
2492 else
2493 type = TREE_TYPE (gimple_assign_lhs (stmt));
2494 gcc_assert (TREE_CODE (type) == BITINT_TYPE);
2495 bitint_prec_kind kind = bitint_precision_kind (type);
2496 gcc_assert (kind >= bitint_prec_large);
2497 gimple *g;
2498 tree lhs = gimple_get_lhs (stmt);
2499 tree rhs1, lhs_type = lhs ? TREE_TYPE (lhs) : NULL_TREE;
2500 if (lhs
2501 && TREE_CODE (lhs) == SSA_NAME
2502 && TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
2503 && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large)
2504 {
2505 int p = var_to_partition (map: m_map, var: lhs);
2506 gcc_assert (m_vars[p] != NULL_TREE);
2507 m_lhs = lhs = m_vars[p];
2508 }
2509 unsigned cnt, rem = 0, end = 0, prec = TYPE_PRECISION (type);
2510 bool sext = false;
2511 tree ext = NULL_TREE, store_operand = NULL_TREE;
2512 bool eh = false;
2513 basic_block eh_pad = NULL;
2514 tree nlhs = NULL_TREE;
2515 unsigned HOST_WIDE_INT bo_idx = 0;
2516 unsigned HOST_WIDE_INT bo_bit = 0;
2517 tree bf_cur = NULL_TREE, bf_next = NULL_TREE;
2518 if (gimple_store_p (gs: stmt))
2519 {
2520 store_operand = gimple_assign_rhs1 (gs: stmt);
2521 eh = stmt_ends_bb_p (stmt);
2522 if (eh)
2523 {
2524 edge e;
2525 edge_iterator ei;
2526 basic_block bb = gimple_bb (g: stmt);
2527
2528 FOR_EACH_EDGE (e, ei, bb->succs)
2529 if (e->flags & EDGE_EH)
2530 {
2531 eh_pad = e->dest;
2532 break;
2533 }
2534 }
2535 if (TREE_CODE (lhs) == COMPONENT_REF
2536 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (lhs, 1)))
2537 {
2538 tree fld = TREE_OPERAND (lhs, 1);
2539 gcc_assert (tree_fits_uhwi_p (DECL_FIELD_BIT_OFFSET (fld)));
2540 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (fld);
2541 poly_int64 bitoffset;
2542 poly_uint64 field_offset, repr_offset;
2543 if ((tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % BITS_PER_UNIT) == 0)
2544 nlhs = lhs;
2545 else
2546 {
2547 bool var_field_off = false;
2548 if (poly_int_tree_p (DECL_FIELD_OFFSET (fld), value: &field_offset)
2549 && poly_int_tree_p (DECL_FIELD_OFFSET (repr), value: &repr_offset))
2550 bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
2551 else
2552 {
2553 bitoffset = 0;
2554 var_field_off = true;
2555 }
2556 bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld))
2557 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2558 nlhs = build3 (COMPONENT_REF, TREE_TYPE (repr),
2559 TREE_OPERAND (lhs, 0), repr,
2560 var_field_off
2561 ? TREE_OPERAND (lhs, 2) : NULL_TREE);
2562 HOST_WIDE_INT bo = bitoffset.to_constant ();
2563 bo_idx = (unsigned HOST_WIDE_INT) bo / limb_prec;
2564 bo_bit = (unsigned HOST_WIDE_INT) bo % limb_prec;
2565 }
2566 }
2567 }
2568 if ((store_operand
2569 && TREE_CODE (store_operand) == SSA_NAME
2570 && (m_names == NULL
2571 || !bitmap_bit_p (m_names, SSA_NAME_VERSION (store_operand)))
2572 && gimple_assign_cast_p (SSA_NAME_DEF_STMT (store_operand)))
2573 || gimple_assign_cast_p (s: stmt))
2574 {
2575 rhs1 = gimple_assign_rhs1 (gs: store_operand
2576 ? SSA_NAME_DEF_STMT (store_operand)
2577 : stmt);
2578 if (TREE_CODE (rhs1) == VIEW_CONVERT_EXPR)
2579 rhs1 = TREE_OPERAND (rhs1, 0);
2580 /* Optimize mergeable ops ending with widening cast to _BitInt
2581 (or followed by store). We can lower just the limbs of the
2582 cast operand and widen afterwards. */
2583 if (TREE_CODE (rhs1) == SSA_NAME
2584 && (m_names == NULL
2585 || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1)))
2586 && TREE_CODE (TREE_TYPE (rhs1)) == BITINT_TYPE
2587 && bitint_precision_kind (TREE_TYPE (rhs1)) >= bitint_prec_large
2588 && (CEIL ((unsigned) TYPE_PRECISION (TREE_TYPE (rhs1)),
2589 limb_prec) < CEIL (prec, limb_prec)
2590 || (kind == bitint_prec_huge
2591 && TYPE_PRECISION (TREE_TYPE (rhs1)) < prec)))
2592 {
2593 store_operand = rhs1;
2594 prec = TYPE_PRECISION (TREE_TYPE (rhs1));
2595 kind = bitint_precision_kind (TREE_TYPE (rhs1));
2596 if (!TYPE_UNSIGNED (TREE_TYPE (rhs1)))
2597 sext = true;
2598 }
2599 }
2600 tree idx = NULL_TREE, idx_first = NULL_TREE, idx_next = NULL_TREE;
2601 if (kind == bitint_prec_large)
2602 cnt = CEIL (prec, limb_prec);
2603 else
2604 {
2605 rem = (prec % (2 * limb_prec));
2606 end = (prec - rem) / limb_prec;
2607 cnt = 2 + CEIL (rem, limb_prec);
2608 idx = idx_first = create_loop (size_zero_node, idx_next: &idx_next);
2609 }
2610
2611 basic_block edge_bb = NULL;
2612 if (eq_p)
2613 {
2614 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2615 gsi_prev (i: &gsi);
2616 edge e = split_block (gsi_bb (i: gsi), gsi_stmt (i: gsi));
2617 edge_bb = e->src;
2618 if (kind == bitint_prec_large)
2619 m_gsi = gsi_end_bb (bb: edge_bb);
2620 }
2621 else
2622 m_after_stmt = stmt;
2623 if (kind != bitint_prec_large)
2624 m_upwards_2limb = end;
2625 m_upwards = true;
2626
2627 bool separate_ext
2628 = (prec != (unsigned) TYPE_PRECISION (type)
2629 && (CEIL ((unsigned) TYPE_PRECISION (type), limb_prec)
2630 > CEIL (prec, limb_prec)));
2631
2632 for (unsigned i = 0; i < cnt; i++)
2633 {
2634 m_data_cnt = 0;
2635 if (kind == bitint_prec_large)
2636 idx = size_int (i);
2637 else if (i >= 2)
2638 idx = size_int (end + (i > 2));
2639 if (eq_p)
2640 {
2641 rhs1 = handle_operand (op: cmp_op1, idx);
2642 tree rhs2 = handle_operand (op: cmp_op2, idx);
2643 g = gimple_build_cond (NE_EXPR, rhs1, rhs2, NULL_TREE, NULL_TREE);
2644 insert_before (g);
2645 edge e1 = split_block (gsi_bb (i: m_gsi), g);
2646 e1->flags = EDGE_FALSE_VALUE;
2647 edge e2 = make_edge (e1->src, gimple_bb (g: stmt), EDGE_TRUE_VALUE);
2648 e1->probability = profile_probability::unlikely ();
2649 e2->probability = e1->probability.invert ();
2650 if (i == 0)
2651 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src);
2652 m_gsi = gsi_after_labels (bb: e1->dest);
2653 }
2654 else
2655 {
2656 if (store_operand)
2657 rhs1 = handle_operand (op: store_operand, idx);
2658 else
2659 rhs1 = handle_stmt (stmt, idx);
2660 if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs1)))
2661 rhs1 = add_cast (type: m_limb_type, val: rhs1);
2662 if (sext && i == cnt - 1)
2663 ext = rhs1;
2664 tree nidx = idx;
2665 if (bo_idx)
2666 {
2667 if (tree_fits_uhwi_p (idx))
2668 nidx = size_int (tree_to_uhwi (idx) + bo_idx);
2669 else
2670 {
2671 nidx = make_ssa_name (sizetype);
2672 g = gimple_build_assign (nidx, PLUS_EXPR, idx,
2673 size_int (bo_idx));
2674 insert_before (g);
2675 }
2676 }
2677 bool done = false;
2678 basic_block new_bb = NULL;
2679 /* Handle stores into bit-fields. */
2680 if (bo_bit)
2681 {
2682 if (i == 0)
2683 {
2684 edge e2 = NULL;
2685 if (kind != bitint_prec_large)
2686 {
2687 prepare_data_in_out (val: build_zero_cst (m_limb_type),
2688 idx, data_out: &bf_next);
2689 bf_next = m_data.pop ();
2690 bf_cur = m_data.pop ();
2691 g = gimple_build_cond (EQ_EXPR, idx, size_zero_node,
2692 NULL_TREE, NULL_TREE);
2693 edge edge_true;
2694 if_then_else (cond: g, prob: profile_probability::unlikely (),
2695 edge_true, edge_false&: e2);
2696 new_bb = e2->dest;
2697 }
2698 tree ftype
2699 = build_nonstandard_integer_type (limb_prec - bo_bit, 1);
2700 tree bfr = build_bit_field_ref (ftype, obj: unshare_expr (nlhs),
2701 bitsize: limb_prec - bo_bit,
2702 bitpos: bo_idx * limb_prec + bo_bit);
2703 tree t = add_cast (type: ftype, val: rhs1);
2704 g = gimple_build_assign (bfr, t);
2705 insert_before (g);
2706 if (eh)
2707 {
2708 maybe_duplicate_eh_stmt (g, stmt);
2709 if (eh_pad)
2710 {
2711 edge e = split_block (gsi_bb (i: m_gsi), g);
2712 m_gsi = gsi_after_labels (bb: e->dest);
2713 add_eh_edge (src: e->src,
2714 eh_edge: find_edge (gimple_bb (g: stmt), eh_pad));
2715 }
2716 }
2717 if (kind == bitint_prec_large)
2718 {
2719 bf_cur = rhs1;
2720 done = true;
2721 }
2722 else if (e2)
2723 m_gsi = gsi_after_labels (bb: e2->src);
2724 }
2725 if (!done)
2726 {
2727 tree t1 = make_ssa_name (var: m_limb_type);
2728 tree t2 = make_ssa_name (var: m_limb_type);
2729 tree t3 = make_ssa_name (var: m_limb_type);
2730 g = gimple_build_assign (t1, RSHIFT_EXPR, bf_cur,
2731 build_int_cst (unsigned_type_node,
2732 limb_prec - bo_bit));
2733 insert_before (g);
2734 g = gimple_build_assign (t2, LSHIFT_EXPR, rhs1,
2735 build_int_cst (unsigned_type_node,
2736 bo_bit));
2737 insert_before (g);
2738 bf_cur = rhs1;
2739 g = gimple_build_assign (t3, BIT_IOR_EXPR, t1, t2);
2740 insert_before (g);
2741 rhs1 = t3;
2742 if (bf_next && i == 1)
2743 {
2744 g = gimple_build_assign (bf_next, bf_cur);
2745 insert_before (g);
2746 }
2747 }
2748 }
2749 if (!done)
2750 {
2751 /* Handle bit-field access to partial last limb if needed. */
2752 if (nlhs
2753 && i == cnt - 1
2754 && !separate_ext
2755 && tree_fits_uhwi_p (idx))
2756 {
2757 unsigned int tprec = TYPE_PRECISION (type);
2758 unsigned int rprec = (tprec - 1) % limb_prec + 1;
2759 if (rprec + bo_bit < (unsigned) limb_prec)
2760 {
2761 tree ftype
2762 = build_nonstandard_integer_type (rprec + bo_bit, 1);
2763 tree bfr
2764 = build_bit_field_ref (ftype, obj: unshare_expr (nlhs),
2765 bitsize: rprec + bo_bit,
2766 bitpos: (bo_idx + tprec / limb_prec)
2767 * limb_prec);
2768 tree t = add_cast (type: ftype, val: rhs1);
2769 g = gimple_build_assign (bfr, t);
2770 done = true;
2771 bf_cur = NULL_TREE;
2772 }
2773 else if (rprec + bo_bit == (unsigned) limb_prec)
2774 bf_cur = NULL_TREE;
2775 }
2776 /* Otherwise, stores to any other lhs. */
2777 if (!done)
2778 {
2779 tree l = limb_access (type: nlhs ? NULL_TREE : lhs_type,
2780 var: nlhs ? nlhs : lhs, idx: nidx, write_p: true);
2781 g = gimple_build_assign (l, rhs1);
2782 }
2783 insert_before (g);
2784 if (eh)
2785 {
2786 maybe_duplicate_eh_stmt (g, stmt);
2787 if (eh_pad)
2788 {
2789 edge e = split_block (gsi_bb (i: m_gsi), g);
2790 m_gsi = gsi_after_labels (bb: e->dest);
2791 add_eh_edge (src: e->src,
2792 eh_edge: find_edge (gimple_bb (g: stmt), eh_pad));
2793 }
2794 }
2795 if (new_bb)
2796 m_gsi = gsi_after_labels (bb: new_bb);
2797 }
2798 }
2799 m_first = false;
2800 if (kind == bitint_prec_huge && i <= 1)
2801 {
2802 if (i == 0)
2803 {
2804 idx = make_ssa_name (sizetype);
2805 g = gimple_build_assign (idx, PLUS_EXPR, idx_first,
2806 size_one_node);
2807 insert_before (g);
2808 }
2809 else
2810 {
2811 g = gimple_build_assign (idx_next, PLUS_EXPR, idx_first,
2812 size_int (2));
2813 insert_before (g);
2814 g = gimple_build_cond (NE_EXPR, idx_next, size_int (end),
2815 NULL_TREE, NULL_TREE);
2816 insert_before (g);
2817 if (eq_p)
2818 m_gsi = gsi_after_labels (bb: edge_bb);
2819 else
2820 m_gsi = gsi_for_stmt (stmt);
2821 m_bb = NULL;
2822 }
2823 }
2824 }
2825
2826 if (separate_ext)
2827 {
2828 if (sext)
2829 {
2830 ext = add_cast (type: signed_type_for (m_limb_type), val: ext);
2831 tree lpm1 = build_int_cst (unsigned_type_node,
2832 limb_prec - 1);
2833 tree n = make_ssa_name (TREE_TYPE (ext));
2834 g = gimple_build_assign (n, RSHIFT_EXPR, ext, lpm1);
2835 insert_before (g);
2836 ext = add_cast (type: m_limb_type, val: n);
2837 }
2838 else
2839 ext = build_zero_cst (m_limb_type);
2840 kind = bitint_precision_kind (type);
2841 unsigned start = CEIL (prec, limb_prec);
2842 prec = TYPE_PRECISION (type);
2843 idx = idx_first = idx_next = NULL_TREE;
2844 if (prec <= (start + 2 + (bo_bit != 0)) * limb_prec)
2845 kind = bitint_prec_large;
2846 if (kind == bitint_prec_large)
2847 cnt = CEIL (prec, limb_prec) - start;
2848 else
2849 {
2850 rem = prec % limb_prec;
2851 end = (prec - rem) / limb_prec;
2852 cnt = (bo_bit != 0) + 1 + (rem != 0);
2853 }
2854 for (unsigned i = 0; i < cnt; i++)
2855 {
2856 if (kind == bitint_prec_large || (i == 0 && bo_bit != 0))
2857 idx = size_int (start + i);
2858 else if (i == cnt - 1 && (rem != 0))
2859 idx = size_int (end);
2860 else if (i == (bo_bit != 0))
2861 idx = create_loop (size_int (start + i), idx_next: &idx_next);
2862 rhs1 = ext;
2863 if (bf_cur != NULL_TREE && bf_cur != ext)
2864 {
2865 tree t1 = make_ssa_name (var: m_limb_type);
2866 g = gimple_build_assign (t1, RSHIFT_EXPR, bf_cur,
2867 build_int_cst (unsigned_type_node,
2868 limb_prec - bo_bit));
2869 insert_before (g);
2870 if (integer_zerop (ext))
2871 rhs1 = t1;
2872 else
2873 {
2874 tree t2 = make_ssa_name (var: m_limb_type);
2875 rhs1 = make_ssa_name (var: m_limb_type);
2876 g = gimple_build_assign (t2, LSHIFT_EXPR, ext,
2877 build_int_cst (unsigned_type_node,
2878 bo_bit));
2879 insert_before (g);
2880 g = gimple_build_assign (rhs1, BIT_IOR_EXPR, t1, t2);
2881 insert_before (g);
2882 }
2883 bf_cur = ext;
2884 }
2885 tree nidx = idx;
2886 if (bo_idx)
2887 {
2888 if (tree_fits_uhwi_p (idx))
2889 nidx = size_int (tree_to_uhwi (idx) + bo_idx);
2890 else
2891 {
2892 nidx = make_ssa_name (sizetype);
2893 g = gimple_build_assign (nidx, PLUS_EXPR, idx,
2894 size_int (bo_idx));
2895 insert_before (g);
2896 }
2897 }
2898 bool done = false;
2899 /* Handle bit-field access to partial last limb if needed. */
2900 if (nlhs && i == cnt - 1)
2901 {
2902 unsigned int tprec = TYPE_PRECISION (type);
2903 unsigned int rprec = (tprec - 1) % limb_prec + 1;
2904 if (rprec + bo_bit < (unsigned) limb_prec)
2905 {
2906 tree ftype
2907 = build_nonstandard_integer_type (rprec + bo_bit, 1);
2908 tree bfr
2909 = build_bit_field_ref (ftype, obj: unshare_expr (nlhs),
2910 bitsize: rprec + bo_bit,
2911 bitpos: (bo_idx + tprec / limb_prec)
2912 * limb_prec);
2913 tree t = add_cast (type: ftype, val: rhs1);
2914 g = gimple_build_assign (bfr, t);
2915 done = true;
2916 bf_cur = NULL_TREE;
2917 }
2918 else if (rprec + bo_bit == (unsigned) limb_prec)
2919 bf_cur = NULL_TREE;
2920 }
2921 /* Otherwise, stores to any other lhs. */
2922 if (!done)
2923 {
2924 tree l = limb_access (type: nlhs ? NULL_TREE : lhs_type,
2925 var: nlhs ? nlhs : lhs, idx: nidx, write_p: true);
2926 g = gimple_build_assign (l, rhs1);
2927 }
2928 insert_before (g);
2929 if (eh)
2930 {
2931 maybe_duplicate_eh_stmt (g, stmt);
2932 if (eh_pad)
2933 {
2934 edge e = split_block (gsi_bb (i: m_gsi), g);
2935 m_gsi = gsi_after_labels (bb: e->dest);
2936 add_eh_edge (src: e->src, eh_edge: find_edge (gimple_bb (g: stmt), eh_pad));
2937 }
2938 }
2939 if (kind == bitint_prec_huge && i == (bo_bit != 0))
2940 {
2941 g = gimple_build_assign (idx_next, PLUS_EXPR, idx,
2942 size_one_node);
2943 insert_before (g);
2944 g = gimple_build_cond (NE_EXPR, idx_next, size_int (end),
2945 NULL_TREE, NULL_TREE);
2946 insert_before (g);
2947 m_gsi = gsi_for_stmt (stmt);
2948 m_bb = NULL;
2949 }
2950 }
2951 }
2952 if (bf_cur != NULL_TREE)
2953 {
2954 unsigned int tprec = TYPE_PRECISION (type);
2955 unsigned int rprec = (tprec + bo_bit) % limb_prec;
2956 tree ftype = build_nonstandard_integer_type (rprec, 1);
2957 tree bfr = build_bit_field_ref (ftype, obj: unshare_expr (nlhs),
2958 bitsize: rprec,
2959 bitpos: (bo_idx + (tprec + bo_bit) / limb_prec)
2960 * limb_prec);
2961 rhs1 = bf_cur;
2962 if (bf_cur != ext)
2963 {
2964 rhs1 = make_ssa_name (TREE_TYPE (rhs1));
2965 g = gimple_build_assign (rhs1, RSHIFT_EXPR, bf_cur,
2966 build_int_cst (unsigned_type_node,
2967 limb_prec - bo_bit));
2968 insert_before (g);
2969 }
2970 rhs1 = add_cast (type: ftype, val: rhs1);
2971 g = gimple_build_assign (bfr, rhs1);
2972 insert_before (g);
2973 if (eh)
2974 {
2975 maybe_duplicate_eh_stmt (g, stmt);
2976 if (eh_pad)
2977 {
2978 edge e = split_block (gsi_bb (i: m_gsi), g);
2979 m_gsi = gsi_after_labels (bb: e->dest);
2980 add_eh_edge (src: e->src, eh_edge: find_edge (gimple_bb (g: stmt), eh_pad));
2981 }
2982 }
2983 }
2984
2985 if (gimple_store_p (gs: stmt))
2986 {
2987 unlink_stmt_vdef (stmt);
2988 release_ssa_name (name: gimple_vdef (g: stmt));
2989 gsi_remove (&m_gsi, true);
2990 }
2991 if (eq_p)
2992 {
2993 lhs = make_ssa_name (boolean_type_node);
2994 basic_block bb = gimple_bb (g: stmt);
2995 gphi *phi = create_phi_node (lhs, bb);
2996 edge e = find_edge (gsi_bb (i: m_gsi), bb);
2997 unsigned int n = EDGE_COUNT (bb->preds);
2998 for (unsigned int i = 0; i < n; i++)
2999 {
3000 edge e2 = EDGE_PRED (bb, i);
3001 add_phi_arg (phi, e == e2 ? boolean_true_node : boolean_false_node,
3002 e2, UNKNOWN_LOCATION);
3003 }
3004 cmp_code = cmp_code == EQ_EXPR ? NE_EXPR : EQ_EXPR;
3005 return lhs;
3006 }
3007 else
3008 return NULL_TREE;
3009}
3010
3011/* Handle a large/huge _BitInt comparison statement STMT other than
3012 EQ_EXPR/NE_EXPR. CMP_CODE, CMP_OP1 and CMP_OP2 meaning is like in
3013 lower_mergeable_stmt. The {GT,GE,LT,LE}_EXPR comparisons are
3014 lowered by iteration from the most significant limb downwards to
3015 the least significant one, for large _BitInt in straight line code,
3016 otherwise with most significant limb handled in
3017 straight line code followed by a loop handling one limb at a time.
3018 Comparisons with unsigned huge _BitInt with precisions which are
3019 multiples of limb precision can use just the loop and don't need to
3020 handle most significant limb before the loop. The loop or straight
3021 line code jumps to final basic block if a particular pair of limbs
3022 is not equal. */
3023
3024tree
3025bitint_large_huge::lower_comparison_stmt (gimple *stmt, tree_code &cmp_code,
3026 tree cmp_op1, tree cmp_op2)
3027{
3028 tree type = TREE_TYPE (cmp_op1);
3029 gcc_assert (TREE_CODE (type) == BITINT_TYPE);
3030 bitint_prec_kind kind = bitint_precision_kind (type);
3031 gcc_assert (kind >= bitint_prec_large);
3032 gimple *g;
3033 if (!TYPE_UNSIGNED (type)
3034 && integer_zerop (cmp_op2)
3035 && (cmp_code == GE_EXPR || cmp_code == LT_EXPR))
3036 {
3037 unsigned end = CEIL ((unsigned) TYPE_PRECISION (type), limb_prec) - 1;
3038 tree idx = size_int (end);
3039 m_data_cnt = 0;
3040 tree rhs1 = handle_operand (op: cmp_op1, idx);
3041 if (TYPE_UNSIGNED (TREE_TYPE (rhs1)))
3042 {
3043 tree stype = signed_type_for (TREE_TYPE (rhs1));
3044 rhs1 = add_cast (type: stype, val: rhs1);
3045 }
3046 tree lhs = make_ssa_name (boolean_type_node);
3047 g = gimple_build_assign (lhs, cmp_code, rhs1,
3048 build_zero_cst (TREE_TYPE (rhs1)));
3049 insert_before (g);
3050 cmp_code = NE_EXPR;
3051 return lhs;
3052 }
3053
3054 unsigned cnt, rem = 0, end = 0;
3055 tree idx = NULL_TREE, idx_next = NULL_TREE;
3056 if (kind == bitint_prec_large)
3057 cnt = CEIL ((unsigned) TYPE_PRECISION (type), limb_prec);
3058 else
3059 {
3060 rem = ((unsigned) TYPE_PRECISION (type) % limb_prec);
3061 if (rem == 0 && !TYPE_UNSIGNED (type))
3062 rem = limb_prec;
3063 end = ((unsigned) TYPE_PRECISION (type) - rem) / limb_prec;
3064 cnt = 1 + (rem != 0);
3065 }
3066
3067 basic_block edge_bb = NULL;
3068 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
3069 gsi_prev (i: &gsi);
3070 edge e = split_block (gsi_bb (i: gsi), gsi_stmt (i: gsi));
3071 edge_bb = e->src;
3072 m_gsi = gsi_end_bb (bb: edge_bb);
3073
3074 edge *edges = XALLOCAVEC (edge, cnt * 2);
3075 for (unsigned i = 0; i < cnt; i++)
3076 {
3077 m_data_cnt = 0;
3078 if (kind == bitint_prec_large)
3079 idx = size_int (cnt - i - 1);
3080 else if (i == cnt - 1)
3081 idx = create_loop (size_int (end - 1), idx_next: &idx_next);
3082 else
3083 idx = size_int (end);
3084 tree rhs1 = handle_operand (op: cmp_op1, idx);
3085 tree rhs2 = handle_operand (op: cmp_op2, idx);
3086 if (i == 0
3087 && !TYPE_UNSIGNED (type)
3088 && TYPE_UNSIGNED (TREE_TYPE (rhs1)))
3089 {
3090 tree stype = signed_type_for (TREE_TYPE (rhs1));
3091 rhs1 = add_cast (type: stype, val: rhs1);
3092 rhs2 = add_cast (type: stype, val: rhs2);
3093 }
3094 g = gimple_build_cond (GT_EXPR, rhs1, rhs2, NULL_TREE, NULL_TREE);
3095 insert_before (g);
3096 edge e1 = split_block (gsi_bb (i: m_gsi), g);
3097 e1->flags = EDGE_FALSE_VALUE;
3098 edge e2 = make_edge (e1->src, gimple_bb (g: stmt), EDGE_TRUE_VALUE);
3099 e1->probability = profile_probability::likely ();
3100 e2->probability = e1->probability.invert ();
3101 if (i == 0)
3102 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src);
3103 m_gsi = gsi_after_labels (bb: e1->dest);
3104 edges[2 * i] = e2;
3105 g = gimple_build_cond (LT_EXPR, rhs1, rhs2, NULL_TREE, NULL_TREE);
3106 insert_before (g);
3107 e1 = split_block (gsi_bb (i: m_gsi), g);
3108 e1->flags = EDGE_FALSE_VALUE;
3109 e2 = make_edge (e1->src, gimple_bb (g: stmt), EDGE_TRUE_VALUE);
3110 e1->probability = profile_probability::unlikely ();
3111 e2->probability = e1->probability.invert ();
3112 m_gsi = gsi_after_labels (bb: e1->dest);
3113 edges[2 * i + 1] = e2;
3114 m_first = false;
3115 if (kind == bitint_prec_huge && i == cnt - 1)
3116 {
3117 g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_int (-1));
3118 insert_before (g);
3119 g = gimple_build_cond (NE_EXPR, idx, size_zero_node,
3120 NULL_TREE, NULL_TREE);
3121 insert_before (g);
3122 edge true_edge, false_edge;
3123 extract_true_false_edges_from_block (gsi_bb (i: m_gsi),
3124 &true_edge, &false_edge);
3125 m_gsi = gsi_after_labels (bb: false_edge->dest);
3126 m_bb = NULL;
3127 }
3128 }
3129
3130 tree lhs = make_ssa_name (boolean_type_node);
3131 basic_block bb = gimple_bb (g: stmt);
3132 gphi *phi = create_phi_node (lhs, bb);
3133 for (unsigned int i = 0; i < cnt * 2; i++)
3134 {
3135 tree val = ((cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3136 ^ (i & 1)) ? boolean_true_node : boolean_false_node;
3137 add_phi_arg (phi, val, edges[i], UNKNOWN_LOCATION);
3138 }
3139 add_phi_arg (phi, (cmp_code == GE_EXPR || cmp_code == LE_EXPR)
3140 ? boolean_true_node : boolean_false_node,
3141 find_edge (gsi_bb (i: m_gsi), bb), UNKNOWN_LOCATION);
3142 cmp_code = NE_EXPR;
3143 return lhs;
3144}
3145
3146/* Lower large/huge _BitInt left and right shift except for left
3147 shift by < limb_prec constant. */
3148
3149void
3150bitint_large_huge::lower_shift_stmt (tree obj, gimple *stmt)
3151{
3152 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
3153 tree lhs = gimple_assign_lhs (gs: stmt);
3154 tree_code rhs_code = gimple_assign_rhs_code (gs: stmt);
3155 tree type = TREE_TYPE (rhs1);
3156 gimple *final_stmt = gsi_stmt (i: m_gsi);
3157 gcc_assert (TREE_CODE (type) == BITINT_TYPE
3158 && bitint_precision_kind (type) >= bitint_prec_large);
3159 int prec = TYPE_PRECISION (type);
3160 tree n = gimple_assign_rhs2 (gs: stmt), n1, n2, n3, n4;
3161 gimple *g;
3162 if (obj == NULL_TREE)
3163 {
3164 int part = var_to_partition (map: m_map, var: lhs);
3165 gcc_assert (m_vars[part] != NULL_TREE);
3166 obj = m_vars[part];
3167 }
3168 /* Preparation code common for both left and right shifts.
3169 unsigned n1 = n % limb_prec;
3170 size_t n2 = n / limb_prec;
3171 size_t n3 = n1 != 0;
3172 unsigned n4 = (limb_prec - n1) % limb_prec;
3173 (for power of 2 limb_prec n4 can be -n1 & (limb_prec)). */
3174 if (TREE_CODE (n) == INTEGER_CST)
3175 {
3176 tree lp = build_int_cst (TREE_TYPE (n), limb_prec);
3177 n1 = int_const_binop (TRUNC_MOD_EXPR, n, lp);
3178 n2 = fold_convert (sizetype, int_const_binop (TRUNC_DIV_EXPR, n, lp));
3179 n3 = size_int (!integer_zerop (n1));
3180 n4 = int_const_binop (TRUNC_MOD_EXPR,
3181 int_const_binop (MINUS_EXPR, lp, n1), lp);
3182 }
3183 else
3184 {
3185 n1 = make_ssa_name (TREE_TYPE (n));
3186 n2 = make_ssa_name (sizetype);
3187 n3 = make_ssa_name (sizetype);
3188 n4 = make_ssa_name (TREE_TYPE (n));
3189 if (pow2p_hwi (x: limb_prec))
3190 {
3191 tree lpm1 = build_int_cst (TREE_TYPE (n), limb_prec - 1);
3192 g = gimple_build_assign (n1, BIT_AND_EXPR, n, lpm1);
3193 insert_before (g);
3194 g = gimple_build_assign (useless_type_conversion_p (sizetype,
3195 TREE_TYPE (n))
3196 ? n2 : make_ssa_name (TREE_TYPE (n)),
3197 RSHIFT_EXPR, n,
3198 build_int_cst (TREE_TYPE (n),
3199 exact_log2 (x: limb_prec)));
3200 insert_before (g);
3201 if (gimple_assign_lhs (gs: g) != n2)
3202 {
3203 g = gimple_build_assign (n2, NOP_EXPR, gimple_assign_lhs (gs: g));
3204 insert_before (g);
3205 }
3206 g = gimple_build_assign (make_ssa_name (TREE_TYPE (n)),
3207 NEGATE_EXPR, n1);
3208 insert_before (g);
3209 g = gimple_build_assign (n4, BIT_AND_EXPR, gimple_assign_lhs (gs: g),
3210 lpm1);
3211 insert_before (g);
3212 }
3213 else
3214 {
3215 tree lp = build_int_cst (TREE_TYPE (n), limb_prec);
3216 g = gimple_build_assign (n1, TRUNC_MOD_EXPR, n, lp);
3217 insert_before (g);
3218 g = gimple_build_assign (useless_type_conversion_p (sizetype,
3219 TREE_TYPE (n))
3220 ? n2 : make_ssa_name (TREE_TYPE (n)),
3221 TRUNC_DIV_EXPR, n, lp);
3222 insert_before (g);
3223 if (gimple_assign_lhs (gs: g) != n2)
3224 {
3225 g = gimple_build_assign (n2, NOP_EXPR, gimple_assign_lhs (gs: g));
3226 insert_before (g);
3227 }
3228 g = gimple_build_assign (make_ssa_name (TREE_TYPE (n)),
3229 MINUS_EXPR, lp, n1);
3230 insert_before (g);
3231 g = gimple_build_assign (n4, TRUNC_MOD_EXPR, gimple_assign_lhs (gs: g),
3232 lp);
3233 insert_before (g);
3234 }
3235 g = gimple_build_assign (make_ssa_name (boolean_type_node), NE_EXPR, n1,
3236 build_zero_cst (TREE_TYPE (n)));
3237 insert_before (g);
3238 g = gimple_build_assign (n3, NOP_EXPR, gimple_assign_lhs (gs: g));
3239 insert_before (g);
3240 }
3241 tree p = build_int_cst (sizetype,
3242 prec / limb_prec - (prec % limb_prec == 0));
3243 if (rhs_code == RSHIFT_EXPR)
3244 {
3245 /* Lower
3246 dst = src >> n;
3247 as
3248 unsigned n1 = n % limb_prec;
3249 size_t n2 = n / limb_prec;
3250 size_t n3 = n1 != 0;
3251 unsigned n4 = (limb_prec - n1) % limb_prec;
3252 size_t idx;
3253 size_t p = prec / limb_prec - (prec % limb_prec == 0);
3254 int signed_p = (typeof (src) -1) < 0;
3255 for (idx = n2; idx < ((!signed_p && (prec % limb_prec == 0))
3256 ? p : p - n3); ++idx)
3257 dst[idx - n2] = (src[idx] >> n1) | (src[idx + n3] << n4);
3258 limb_type ext;
3259 if (prec % limb_prec == 0)
3260 ext = src[p];
3261 else if (signed_p)
3262 ext = ((signed limb_type) (src[p] << (limb_prec
3263 - (prec % limb_prec))))
3264 >> (limb_prec - (prec % limb_prec));
3265 else
3266 ext = src[p] & (((limb_type) 1 << (prec % limb_prec)) - 1);
3267 if (!signed_p && (prec % limb_prec == 0))
3268 ;
3269 else if (idx < prec / 64)
3270 {
3271 dst[idx - n2] = (src[idx] >> n1) | (ext << n4);
3272 ++idx;
3273 }
3274 idx -= n2;
3275 if (signed_p)
3276 {
3277 dst[idx] = ((signed limb_type) ext) >> n1;
3278 ext = ((signed limb_type) ext) >> (limb_prec - 1);
3279 }
3280 else
3281 {
3282 dst[idx] = ext >> n1;
3283 ext = 0;
3284 }
3285 for (++idx; idx <= p; ++idx)
3286 dst[idx] = ext; */
3287 tree pmn3;
3288 if (TYPE_UNSIGNED (type) && prec % limb_prec == 0)
3289 pmn3 = p;
3290 else if (TREE_CODE (n3) == INTEGER_CST)
3291 pmn3 = int_const_binop (MINUS_EXPR, p, n3);
3292 else
3293 {
3294 pmn3 = make_ssa_name (sizetype);
3295 g = gimple_build_assign (pmn3, MINUS_EXPR, p, n3);
3296 insert_before (g);
3297 }
3298 g = gimple_build_cond (LT_EXPR, n2, pmn3, NULL_TREE, NULL_TREE);
3299 edge edge_true, edge_false;
3300 if_then (cond: g, prob: profile_probability::likely (), edge_true, edge_false);
3301 tree idx_next;
3302 tree idx = create_loop (init: n2, idx_next: &idx_next);
3303 tree idxmn2 = make_ssa_name (sizetype);
3304 tree idxpn3 = make_ssa_name (sizetype);
3305 g = gimple_build_assign (idxmn2, MINUS_EXPR, idx, n2);
3306 insert_before (g);
3307 g = gimple_build_assign (idxpn3, PLUS_EXPR, idx, n3);
3308 insert_before (g);
3309 m_data_cnt = 0;
3310 tree t1 = handle_operand (op: rhs1, idx);
3311 m_first = false;
3312 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3313 RSHIFT_EXPR, t1, n1);
3314 insert_before (g);
3315 t1 = gimple_assign_lhs (gs: g);
3316 if (!integer_zerop (n3))
3317 {
3318 m_data_cnt = 0;
3319 tree t2 = handle_operand (op: rhs1, idx: idxpn3);
3320 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3321 LSHIFT_EXPR, t2, n4);
3322 insert_before (g);
3323 t2 = gimple_assign_lhs (gs: g);
3324 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3325 BIT_IOR_EXPR, t1, t2);
3326 insert_before (g);
3327 t1 = gimple_assign_lhs (gs: g);
3328 }
3329 tree l = limb_access (TREE_TYPE (lhs), var: obj, idx: idxmn2, write_p: true);
3330 g = gimple_build_assign (l, t1);
3331 insert_before (g);
3332 g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_one_node);
3333 insert_before (g);
3334 g = gimple_build_cond (LT_EXPR, idx_next, pmn3, NULL_TREE, NULL_TREE);
3335 insert_before (g);
3336 idx = make_ssa_name (sizetype);
3337 m_gsi = gsi_for_stmt (final_stmt);
3338 gphi *phi = create_phi_node (idx, gsi_bb (i: m_gsi));
3339 edge_false = find_edge (edge_false->src, gsi_bb (i: m_gsi));
3340 edge_true = EDGE_PRED (gsi_bb (m_gsi),
3341 EDGE_PRED (gsi_bb (m_gsi), 0) == edge_false);
3342 add_phi_arg (phi, n2, edge_false, UNKNOWN_LOCATION);
3343 add_phi_arg (phi, idx_next, edge_true, UNKNOWN_LOCATION);
3344 m_data_cnt = 0;
3345 tree ms = handle_operand (op: rhs1, idx: p);
3346 tree ext = ms;
3347 if (!types_compatible_p (TREE_TYPE (ms), type2: m_limb_type))
3348 ext = add_cast (type: m_limb_type, val: ms);
3349 if (!(TYPE_UNSIGNED (type) && prec % limb_prec == 0)
3350 && !integer_zerop (n3))
3351 {
3352 g = gimple_build_cond (LT_EXPR, idx, p, NULL_TREE, NULL_TREE);
3353 if_then (cond: g, prob: profile_probability::likely (), edge_true, edge_false);
3354 m_data_cnt = 0;
3355 t1 = handle_operand (op: rhs1, idx);
3356 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3357 RSHIFT_EXPR, t1, n1);
3358 insert_before (g);
3359 t1 = gimple_assign_lhs (gs: g);
3360 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3361 LSHIFT_EXPR, ext, n4);
3362 insert_before (g);
3363 tree t2 = gimple_assign_lhs (gs: g);
3364 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3365 BIT_IOR_EXPR, t1, t2);
3366 insert_before (g);
3367 t1 = gimple_assign_lhs (gs: g);
3368 idxmn2 = make_ssa_name (sizetype);
3369 g = gimple_build_assign (idxmn2, MINUS_EXPR, idx, n2);
3370 insert_before (g);
3371 l = limb_access (TREE_TYPE (lhs), var: obj, idx: idxmn2, write_p: true);
3372 g = gimple_build_assign (l, t1);
3373 insert_before (g);
3374 idx_next = make_ssa_name (sizetype);
3375 g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_one_node);
3376 insert_before (g);
3377 m_gsi = gsi_for_stmt (final_stmt);
3378 tree nidx = make_ssa_name (sizetype);
3379 phi = create_phi_node (nidx, gsi_bb (i: m_gsi));
3380 edge_false = find_edge (edge_false->src, gsi_bb (i: m_gsi));
3381 edge_true = EDGE_PRED (gsi_bb (m_gsi),
3382 EDGE_PRED (gsi_bb (m_gsi), 0) == edge_false);
3383 add_phi_arg (phi, idx, edge_false, UNKNOWN_LOCATION);
3384 add_phi_arg (phi, idx_next, edge_true, UNKNOWN_LOCATION);
3385 idx = nidx;
3386 }
3387 g = gimple_build_assign (make_ssa_name (sizetype), MINUS_EXPR, idx, n2);
3388 insert_before (g);
3389 idx = gimple_assign_lhs (gs: g);
3390 tree sext = ext;
3391 if (!TYPE_UNSIGNED (type))
3392 sext = add_cast (type: signed_type_for (m_limb_type), val: ext);
3393 g = gimple_build_assign (make_ssa_name (TREE_TYPE (sext)),
3394 RSHIFT_EXPR, sext, n1);
3395 insert_before (g);
3396 t1 = gimple_assign_lhs (gs: g);
3397 if (!TYPE_UNSIGNED (type))
3398 {
3399 t1 = add_cast (type: m_limb_type, val: t1);
3400 g = gimple_build_assign (make_ssa_name (TREE_TYPE (sext)),
3401 RSHIFT_EXPR, sext,
3402 build_int_cst (TREE_TYPE (n),
3403 limb_prec - 1));
3404 insert_before (g);
3405 ext = add_cast (type: m_limb_type, val: gimple_assign_lhs (gs: g));
3406 }
3407 else
3408 ext = build_zero_cst (m_limb_type);
3409 l = limb_access (TREE_TYPE (lhs), var: obj, idx, write_p: true);
3410 g = gimple_build_assign (l, t1);
3411 insert_before (g);
3412 g = gimple_build_assign (make_ssa_name (sizetype), PLUS_EXPR, idx,
3413 size_one_node);
3414 insert_before (g);
3415 idx = gimple_assign_lhs (gs: g);
3416 g = gimple_build_cond (LE_EXPR, idx, p, NULL_TREE, NULL_TREE);
3417 if_then (cond: g, prob: profile_probability::likely (), edge_true, edge_false);
3418 idx = create_loop (init: idx, idx_next: &idx_next);
3419 l = limb_access (TREE_TYPE (lhs), var: obj, idx, write_p: true);
3420 g = gimple_build_assign (l, ext);
3421 insert_before (g);
3422 g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_one_node);
3423 insert_before (g);
3424 g = gimple_build_cond (LE_EXPR, idx_next, p, NULL_TREE, NULL_TREE);
3425 insert_before (g);
3426 }
3427 else
3428 {
3429 /* Lower
3430 dst = src << n;
3431 as
3432 unsigned n1 = n % limb_prec;
3433 size_t n2 = n / limb_prec;
3434 size_t n3 = n1 != 0;
3435 unsigned n4 = (limb_prec - n1) % limb_prec;
3436 size_t idx;
3437 size_t p = prec / limb_prec - (prec % limb_prec == 0);
3438 for (idx = p; (ssize_t) idx >= (ssize_t) (n2 + n3); --idx)
3439 dst[idx] = (src[idx - n2] << n1) | (src[idx - n2 - n3] >> n4);
3440 if (n1)
3441 {
3442 dst[idx] = src[idx - n2] << n1;
3443 --idx;
3444 }
3445 for (; (ssize_t) idx >= 0; --idx)
3446 dst[idx] = 0; */
3447 tree n2pn3;
3448 if (TREE_CODE (n2) == INTEGER_CST && TREE_CODE (n3) == INTEGER_CST)
3449 n2pn3 = int_const_binop (PLUS_EXPR, n2, n3);
3450 else
3451 {
3452 n2pn3 = make_ssa_name (sizetype);
3453 g = gimple_build_assign (n2pn3, PLUS_EXPR, n2, n3);
3454 insert_before (g);
3455 }
3456 /* For LSHIFT_EXPR, we can use handle_operand with non-INTEGER_CST
3457 idx even to access the most significant partial limb. */
3458 m_var_msb = true;
3459 if (integer_zerop (n3))
3460 /* For n3 == 0 p >= n2 + n3 is always true for all valid shift
3461 counts. Emit if (true) condition that can be optimized later. */
3462 g = gimple_build_cond (NE_EXPR, boolean_true_node, boolean_false_node,
3463 NULL_TREE, NULL_TREE);
3464 else
3465 g = gimple_build_cond (LE_EXPR, n2pn3, p, NULL_TREE, NULL_TREE);
3466 edge edge_true, edge_false;
3467 if_then (cond: g, prob: profile_probability::likely (), edge_true, edge_false);
3468 tree idx_next;
3469 tree idx = create_loop (init: p, idx_next: &idx_next);
3470 tree idxmn2 = make_ssa_name (sizetype);
3471 tree idxmn2mn3 = make_ssa_name (sizetype);
3472 g = gimple_build_assign (idxmn2, MINUS_EXPR, idx, n2);
3473 insert_before (g);
3474 g = gimple_build_assign (idxmn2mn3, MINUS_EXPR, idxmn2, n3);
3475 insert_before (g);
3476 m_data_cnt = 0;
3477 tree t1 = handle_operand (op: rhs1, idx: idxmn2);
3478 m_first = false;
3479 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3480 LSHIFT_EXPR, t1, n1);
3481 insert_before (g);
3482 t1 = gimple_assign_lhs (gs: g);
3483 if (!integer_zerop (n3))
3484 {
3485 m_data_cnt = 0;
3486 tree t2 = handle_operand (op: rhs1, idx: idxmn2mn3);
3487 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3488 RSHIFT_EXPR, t2, n4);
3489 insert_before (g);
3490 t2 = gimple_assign_lhs (gs: g);
3491 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3492 BIT_IOR_EXPR, t1, t2);
3493 insert_before (g);
3494 t1 = gimple_assign_lhs (gs: g);
3495 }
3496 tree l = limb_access (TREE_TYPE (lhs), var: obj, idx, write_p: true);
3497 g = gimple_build_assign (l, t1);
3498 insert_before (g);
3499 g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_int (-1));
3500 insert_before (g);
3501 tree sn2pn3 = add_cast (ssizetype, val: n2pn3);
3502 g = gimple_build_cond (GE_EXPR, add_cast (ssizetype, val: idx_next), sn2pn3,
3503 NULL_TREE, NULL_TREE);
3504 insert_before (g);
3505 idx = make_ssa_name (sizetype);
3506 m_gsi = gsi_for_stmt (final_stmt);
3507 gphi *phi = create_phi_node (idx, gsi_bb (i: m_gsi));
3508 edge_false = find_edge (edge_false->src, gsi_bb (i: m_gsi));
3509 edge_true = EDGE_PRED (gsi_bb (m_gsi),
3510 EDGE_PRED (gsi_bb (m_gsi), 0) == edge_false);
3511 add_phi_arg (phi, p, edge_false, UNKNOWN_LOCATION);
3512 add_phi_arg (phi, idx_next, edge_true, UNKNOWN_LOCATION);
3513 m_data_cnt = 0;
3514 if (!integer_zerop (n3))
3515 {
3516 g = gimple_build_cond (NE_EXPR, n3, size_zero_node,
3517 NULL_TREE, NULL_TREE);
3518 if_then (cond: g, prob: profile_probability::likely (), edge_true, edge_false);
3519 idxmn2 = make_ssa_name (sizetype);
3520 g = gimple_build_assign (idxmn2, MINUS_EXPR, idx, n2);
3521 insert_before (g);
3522 m_data_cnt = 0;
3523 t1 = handle_operand (op: rhs1, idx: idxmn2);
3524 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3525 LSHIFT_EXPR, t1, n1);
3526 insert_before (g);
3527 t1 = gimple_assign_lhs (gs: g);
3528 l = limb_access (TREE_TYPE (lhs), var: obj, idx, write_p: true);
3529 g = gimple_build_assign (l, t1);
3530 insert_before (g);
3531 idx_next = make_ssa_name (sizetype);
3532 g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_int (-1));
3533 insert_before (g);
3534 m_gsi = gsi_for_stmt (final_stmt);
3535 tree nidx = make_ssa_name (sizetype);
3536 phi = create_phi_node (nidx, gsi_bb (i: m_gsi));
3537 edge_false = find_edge (edge_false->src, gsi_bb (i: m_gsi));
3538 edge_true = EDGE_PRED (gsi_bb (m_gsi),
3539 EDGE_PRED (gsi_bb (m_gsi), 0) == edge_false);
3540 add_phi_arg (phi, idx, edge_false, UNKNOWN_LOCATION);
3541 add_phi_arg (phi, idx_next, edge_true, UNKNOWN_LOCATION);
3542 idx = nidx;
3543 }
3544 g = gimple_build_cond (GE_EXPR, add_cast (ssizetype, val: idx),
3545 ssize_int (0), NULL_TREE, NULL_TREE);
3546 if_then (cond: g, prob: profile_probability::likely (), edge_true, edge_false);
3547 idx = create_loop (init: idx, idx_next: &idx_next);
3548 l = limb_access (TREE_TYPE (lhs), var: obj, idx, write_p: true);
3549 g = gimple_build_assign (l, build_zero_cst (m_limb_type));
3550 insert_before (g);
3551 g = gimple_build_assign (idx_next, PLUS_EXPR, idx, size_int (-1));
3552 insert_before (g);
3553 g = gimple_build_cond (GE_EXPR, add_cast (ssizetype, val: idx_next),
3554 ssize_int (0), NULL_TREE, NULL_TREE);
3555 insert_before (g);
3556 }
3557}
3558
3559/* Lower large/huge _BitInt multiplication or division. */
3560
3561void
3562bitint_large_huge::lower_muldiv_stmt (tree obj, gimple *stmt)
3563{
3564 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
3565 tree rhs2 = gimple_assign_rhs2 (gs: stmt);
3566 tree lhs = gimple_assign_lhs (gs: stmt);
3567 tree_code rhs_code = gimple_assign_rhs_code (gs: stmt);
3568 tree type = TREE_TYPE (rhs1);
3569 gcc_assert (TREE_CODE (type) == BITINT_TYPE
3570 && bitint_precision_kind (type) >= bitint_prec_large);
3571 int prec = TYPE_PRECISION (type), prec1, prec2;
3572 rhs1 = handle_operand_addr (op: rhs1, stmt, NULL, prec: &prec1);
3573 rhs2 = handle_operand_addr (op: rhs2, stmt, NULL, prec: &prec2);
3574 if (obj == NULL_TREE)
3575 {
3576 int part = var_to_partition (map: m_map, var: lhs);
3577 gcc_assert (m_vars[part] != NULL_TREE);
3578 obj = m_vars[part];
3579 lhs = build_fold_addr_expr (obj);
3580 }
3581 else
3582 {
3583 lhs = build_fold_addr_expr (obj);
3584 lhs = force_gimple_operand_gsi (&m_gsi, lhs, true,
3585 NULL_TREE, true, GSI_SAME_STMT);
3586 }
3587 tree sitype = lang_hooks.types.type_for_mode (SImode, 0);
3588 gimple *g;
3589 switch (rhs_code)
3590 {
3591 case MULT_EXPR:
3592 g = gimple_build_call_internal (IFN_MULBITINT, 6,
3593 lhs, build_int_cst (sitype, prec),
3594 rhs1, build_int_cst (sitype, prec1),
3595 rhs2, build_int_cst (sitype, prec2));
3596 insert_before (g);
3597 break;
3598 case TRUNC_DIV_EXPR:
3599 g = gimple_build_call_internal (IFN_DIVMODBITINT, 8,
3600 lhs, build_int_cst (sitype, prec),
3601 null_pointer_node,
3602 build_int_cst (sitype, 0),
3603 rhs1, build_int_cst (sitype, prec1),
3604 rhs2, build_int_cst (sitype, prec2));
3605 if (!stmt_ends_bb_p (stmt))
3606 gimple_call_set_nothrow (s: as_a <gcall *> (p: g), nothrow_p: true);
3607 insert_before (g);
3608 break;
3609 case TRUNC_MOD_EXPR:
3610 g = gimple_build_call_internal (IFN_DIVMODBITINT, 8, null_pointer_node,
3611 build_int_cst (sitype, 0),
3612 lhs, build_int_cst (sitype, prec),
3613 rhs1, build_int_cst (sitype, prec1),
3614 rhs2, build_int_cst (sitype, prec2));
3615 if (!stmt_ends_bb_p (stmt))
3616 gimple_call_set_nothrow (s: as_a <gcall *> (p: g), nothrow_p: true);
3617 insert_before (g);
3618 break;
3619 default:
3620 gcc_unreachable ();
3621 }
3622 if (stmt_ends_bb_p (stmt))
3623 {
3624 maybe_duplicate_eh_stmt (g, stmt);
3625 edge e1;
3626 edge_iterator ei;
3627 basic_block bb = gimple_bb (g: stmt);
3628
3629 FOR_EACH_EDGE (e1, ei, bb->succs)
3630 if (e1->flags & EDGE_EH)
3631 break;
3632 if (e1)
3633 {
3634 edge e2 = split_block (gsi_bb (i: m_gsi), g);
3635 m_gsi = gsi_after_labels (bb: e2->dest);
3636 add_eh_edge (src: e2->src, eh_edge: e1);
3637 }
3638 }
3639}
3640
3641/* Lower large/huge _BitInt conversion to/from floating point. */
3642
3643void
3644bitint_large_huge::lower_float_conv_stmt (tree obj, gimple *stmt)
3645{
3646 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
3647 tree lhs = gimple_assign_lhs (gs: stmt);
3648 tree_code rhs_code = gimple_assign_rhs_code (gs: stmt);
3649 tree sitype = lang_hooks.types.type_for_mode (SImode, 0);
3650 gimple *g;
3651 if (rhs_code == FIX_TRUNC_EXPR)
3652 {
3653 int prec = TYPE_PRECISION (TREE_TYPE (lhs));
3654 if (!TYPE_UNSIGNED (TREE_TYPE (lhs)))
3655 prec = -prec;
3656 if (obj == NULL_TREE)
3657 {
3658 int part = var_to_partition (map: m_map, var: lhs);
3659 gcc_assert (m_vars[part] != NULL_TREE);
3660 obj = m_vars[part];
3661 lhs = build_fold_addr_expr (obj);
3662 }
3663 else
3664 {
3665 lhs = build_fold_addr_expr (obj);
3666 lhs = force_gimple_operand_gsi (&m_gsi, lhs, true,
3667 NULL_TREE, true, GSI_SAME_STMT);
3668 }
3669 scalar_mode from_mode
3670 = as_a <scalar_mode> (TYPE_MODE (TREE_TYPE (rhs1)));
3671#ifdef HAVE_SFmode
3672 /* IEEE single is a full superset of both IEEE half and
3673 bfloat formats, convert to float first and then to _BitInt
3674 to avoid the need of another 2 library routines. */
3675 if ((REAL_MODE_FORMAT (from_mode) == &arm_bfloat_half_format
3676 || REAL_MODE_FORMAT (from_mode) == &ieee_half_format)
3677 && REAL_MODE_FORMAT (SFmode) == &ieee_single_format)
3678 {
3679 tree type = lang_hooks.types.type_for_mode (SFmode, 0);
3680 if (type)
3681 rhs1 = add_cast (type, val: rhs1);
3682 }
3683#endif
3684 g = gimple_build_call_internal (IFN_FLOATTOBITINT, 3,
3685 lhs, build_int_cst (sitype, prec),
3686 rhs1);
3687 insert_before (g);
3688 }
3689 else
3690 {
3691 int prec;
3692 rhs1 = handle_operand_addr (op: rhs1, stmt, NULL, prec: &prec);
3693 g = gimple_build_call_internal (IFN_BITINTTOFLOAT, 2,
3694 rhs1, build_int_cst (sitype, prec));
3695 gimple_call_set_lhs (gs: g, lhs);
3696 if (!stmt_ends_bb_p (stmt))
3697 gimple_call_set_nothrow (s: as_a <gcall *> (p: g), nothrow_p: true);
3698 gsi_replace (&m_gsi, g, true);
3699 }
3700}
3701
3702/* Helper method for lower_addsub_overflow and lower_mul_overflow.
3703 If check_zero is true, caller wants to check if all bits in [start, end)
3704 are zero, otherwise if bits in [start, end) are either all zero or
3705 all ones. L is the limb with index LIMB, START and END are measured
3706 in bits. */
3707
3708tree
3709bitint_large_huge::arith_overflow_extract_bits (unsigned int start,
3710 unsigned int end, tree l,
3711 unsigned int limb,
3712 bool check_zero)
3713{
3714 unsigned startlimb = start / limb_prec;
3715 unsigned endlimb = (end - 1) / limb_prec;
3716 gimple *g;
3717
3718 if ((start % limb_prec) == 0 && (end % limb_prec) == 0)
3719 return l;
3720 if (startlimb == endlimb && limb == startlimb)
3721 {
3722 if (check_zero)
3723 {
3724 wide_int w = wi::shifted_mask (start: start % limb_prec,
3725 width: end - start, negate_p: false, precision: limb_prec);
3726 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3727 BIT_AND_EXPR, l,
3728 wide_int_to_tree (type: m_limb_type, cst: w));
3729 insert_before (g);
3730 return gimple_assign_lhs (gs: g);
3731 }
3732 unsigned int shift = start % limb_prec;
3733 if ((end % limb_prec) != 0)
3734 {
3735 unsigned int lshift = (-end) % limb_prec;
3736 shift += lshift;
3737 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3738 LSHIFT_EXPR, l,
3739 build_int_cst (unsigned_type_node,
3740 lshift));
3741 insert_before (g);
3742 l = gimple_assign_lhs (gs: g);
3743 }
3744 l = add_cast (type: signed_type_for (m_limb_type), val: l);
3745 g = gimple_build_assign (make_ssa_name (TREE_TYPE (l)),
3746 RSHIFT_EXPR, l,
3747 build_int_cst (unsigned_type_node, shift));
3748 insert_before (g);
3749 return add_cast (type: m_limb_type, val: gimple_assign_lhs (gs: g));
3750 }
3751 else if (limb == startlimb)
3752 {
3753 if ((start % limb_prec) == 0)
3754 return l;
3755 if (!check_zero)
3756 l = add_cast (type: signed_type_for (m_limb_type), val: l);
3757 g = gimple_build_assign (make_ssa_name (TREE_TYPE (l)),
3758 RSHIFT_EXPR, l,
3759 build_int_cst (unsigned_type_node,
3760 start % limb_prec));
3761 insert_before (g);
3762 l = gimple_assign_lhs (gs: g);
3763 if (!check_zero)
3764 l = add_cast (type: m_limb_type, val: l);
3765 return l;
3766 }
3767 else if (limb == endlimb)
3768 {
3769 if ((end % limb_prec) == 0)
3770 return l;
3771 if (check_zero)
3772 {
3773 wide_int w = wi::mask (width: end % limb_prec, negate_p: false, precision: limb_prec);
3774 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3775 BIT_AND_EXPR, l,
3776 wide_int_to_tree (type: m_limb_type, cst: w));
3777 insert_before (g);
3778 return gimple_assign_lhs (gs: g);
3779 }
3780 unsigned int shift = (-end) % limb_prec;
3781 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
3782 LSHIFT_EXPR, l,
3783 build_int_cst (unsigned_type_node, shift));
3784 insert_before (g);
3785 l = add_cast (type: signed_type_for (m_limb_type), val: gimple_assign_lhs (gs: g));
3786 g = gimple_build_assign (make_ssa_name (TREE_TYPE (l)),
3787 RSHIFT_EXPR, l,
3788 build_int_cst (unsigned_type_node, shift));
3789 insert_before (g);
3790 return add_cast (type: m_limb_type, val: gimple_assign_lhs (gs: g));
3791 }
3792 return l;
3793}
3794
3795/* Helper method for lower_addsub_overflow and lower_mul_overflow. Store
3796 result including overflow flag into the right locations. */
3797
3798void
3799bitint_large_huge::finish_arith_overflow (tree var, tree obj, tree type,
3800 tree ovf, tree lhs, tree orig_obj,
3801 gimple *stmt, tree_code code)
3802{
3803 gimple *g;
3804
3805 if (obj == NULL_TREE
3806 && (TREE_CODE (type) != BITINT_TYPE
3807 || bitint_precision_kind (type) < bitint_prec_large))
3808 {
3809 /* Add support for 3 or more limbs filled in from normal integral
3810 type if this assert fails. If no target chooses limb mode smaller
3811 than half of largest supported normal integral type, this will not
3812 be needed. */
3813 gcc_assert (TYPE_PRECISION (type) <= 2 * limb_prec);
3814 tree lhs_type = type;
3815 if (TREE_CODE (type) == BITINT_TYPE
3816 && bitint_precision_kind (type) == bitint_prec_middle)
3817 lhs_type = build_nonstandard_integer_type (TYPE_PRECISION (type),
3818 TYPE_UNSIGNED (type));
3819 tree r1 = limb_access (NULL_TREE, var, size_int (0), write_p: true);
3820 g = gimple_build_assign (make_ssa_name (var: m_limb_type), r1);
3821 insert_before (g);
3822 r1 = gimple_assign_lhs (gs: g);
3823 if (!useless_type_conversion_p (lhs_type, TREE_TYPE (r1)))
3824 r1 = add_cast (type: lhs_type, val: r1);
3825 if (TYPE_PRECISION (lhs_type) > limb_prec)
3826 {
3827 tree r2 = limb_access (NULL_TREE, var, size_int (1), write_p: true);
3828 g = gimple_build_assign (make_ssa_name (var: m_limb_type), r2);
3829 insert_before (g);
3830 r2 = gimple_assign_lhs (gs: g);
3831 r2 = add_cast (type: lhs_type, val: r2);
3832 g = gimple_build_assign (make_ssa_name (var: lhs_type), LSHIFT_EXPR, r2,
3833 build_int_cst (unsigned_type_node,
3834 limb_prec));
3835 insert_before (g);
3836 g = gimple_build_assign (make_ssa_name (var: lhs_type), BIT_IOR_EXPR, r1,
3837 gimple_assign_lhs (gs: g));
3838 insert_before (g);
3839 r1 = gimple_assign_lhs (gs: g);
3840 }
3841 if (lhs_type != type)
3842 r1 = add_cast (type, val: r1);
3843 ovf = add_cast (type: lhs_type, val: ovf);
3844 if (lhs_type != type)
3845 ovf = add_cast (type, val: ovf);
3846 g = gimple_build_assign (lhs, COMPLEX_EXPR, r1, ovf);
3847 m_gsi = gsi_for_stmt (stmt);
3848 gsi_replace (&m_gsi, g, true);
3849 }
3850 else
3851 {
3852 unsigned HOST_WIDE_INT nelts = 0;
3853 tree atype = NULL_TREE;
3854 if (obj)
3855 {
3856 nelts = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (obj))) / limb_prec;
3857 if (orig_obj == NULL_TREE)
3858 nelts >>= 1;
3859 atype = build_array_type_nelts (m_limb_type, nelts);
3860 }
3861 if (var && obj)
3862 {
3863 tree v1, v2;
3864 tree zero;
3865 if (orig_obj == NULL_TREE)
3866 {
3867 zero = build_zero_cst (build_pointer_type (TREE_TYPE (obj)));
3868 v1 = build2 (MEM_REF, atype,
3869 build_fold_addr_expr (unshare_expr (obj)), zero);
3870 }
3871 else if (!useless_type_conversion_p (atype, TREE_TYPE (obj)))
3872 v1 = build1 (VIEW_CONVERT_EXPR, atype, unshare_expr (obj));
3873 else
3874 v1 = unshare_expr (obj);
3875 zero = build_zero_cst (build_pointer_type (TREE_TYPE (var)));
3876 v2 = build2 (MEM_REF, atype, build_fold_addr_expr (var), zero);
3877 g = gimple_build_assign (v1, v2);
3878 insert_before (g);
3879 }
3880 if (orig_obj == NULL_TREE && obj)
3881 {
3882 ovf = add_cast (type: m_limb_type, val: ovf);
3883 tree l = limb_access (NULL_TREE, var: obj, size_int (nelts), write_p: true);
3884 g = gimple_build_assign (l, ovf);
3885 insert_before (g);
3886 if (nelts > 1)
3887 {
3888 atype = build_array_type_nelts (m_limb_type, nelts - 1);
3889 tree off = build_int_cst (build_pointer_type (TREE_TYPE (obj)),
3890 (nelts + 1) * m_limb_size);
3891 tree v1 = build2 (MEM_REF, atype,
3892 build_fold_addr_expr (unshare_expr (obj)),
3893 off);
3894 g = gimple_build_assign (v1, build_zero_cst (atype));
3895 insert_before (g);
3896 }
3897 }
3898 else if (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE)
3899 {
3900 imm_use_iterator ui;
3901 use_operand_p use_p;
3902 FOR_EACH_IMM_USE_FAST (use_p, ui, lhs)
3903 {
3904 g = USE_STMT (use_p);
3905 if (!is_gimple_assign (gs: g)
3906 || gimple_assign_rhs_code (gs: g) != IMAGPART_EXPR)
3907 continue;
3908 tree lhs2 = gimple_assign_lhs (gs: g);
3909 gimple *use_stmt;
3910 single_imm_use (var: lhs2, use_p: &use_p, stmt: &use_stmt);
3911 lhs2 = gimple_assign_lhs (gs: use_stmt);
3912 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3913 if (useless_type_conversion_p (TREE_TYPE (lhs2), TREE_TYPE (ovf)))
3914 g = gimple_build_assign (lhs2, ovf);
3915 else
3916 g = gimple_build_assign (lhs2, NOP_EXPR, ovf);
3917 gsi_replace (&gsi, g, true);
3918 if (gsi_stmt (i: m_gsi) == use_stmt)
3919 m_gsi = gsi_for_stmt (g);
3920 break;
3921 }
3922 }
3923 else if (ovf != boolean_false_node)
3924 {
3925 g = gimple_build_cond (NE_EXPR, ovf, boolean_false_node,
3926 NULL_TREE, NULL_TREE);
3927 edge edge_true, edge_false;
3928 if_then (cond: g, prob: profile_probability::very_unlikely (),
3929 edge_true, edge_false);
3930 tree zero = build_zero_cst (TREE_TYPE (lhs));
3931 tree fn = ubsan_build_overflow_builtin (code, m_loc,
3932 TREE_TYPE (lhs),
3933 zero, zero, NULL);
3934 force_gimple_operand_gsi (&m_gsi, fn, true, NULL_TREE,
3935 true, GSI_SAME_STMT);
3936 m_gsi = gsi_after_labels (bb: edge_true->dest);
3937 }
3938 }
3939 if (var)
3940 {
3941 tree clobber = build_clobber (TREE_TYPE (var), CLOBBER_STORAGE_END);
3942 g = gimple_build_assign (var, clobber);
3943 gsi_insert_after (&m_gsi, g, GSI_SAME_STMT);
3944 }
3945}
3946
3947/* Helper function for lower_addsub_overflow and lower_mul_overflow.
3948 Given precisions of result TYPE (PREC), argument 0 precision PREC0,
3949 argument 1 precision PREC1 and minimum precision for the result
3950 PREC2, compute *START, *END, *CHECK_ZERO and return OVF. */
3951
3952static tree
3953arith_overflow (tree_code code, tree type, int prec, int prec0, int prec1,
3954 int prec2, unsigned *start, unsigned *end, bool *check_zero)
3955{
3956 *start = 0;
3957 *end = 0;
3958 *check_zero = true;
3959 /* Ignore this special rule for subtraction, even if both
3960 prec0 >= 0 and prec1 >= 0, their subtraction can be negative
3961 in infinite precision. */
3962 if (code != MINUS_EXPR && prec0 >= 0 && prec1 >= 0)
3963 {
3964 /* Result in [0, prec2) is unsigned, if prec > prec2,
3965 all bits above it will be zero. */
3966 if ((prec - !TYPE_UNSIGNED (type)) >= prec2)
3967 return boolean_false_node;
3968 else
3969 {
3970 /* ovf if any of bits in [start, end) is non-zero. */
3971 *start = prec - !TYPE_UNSIGNED (type);
3972 *end = prec2;
3973 }
3974 }
3975 else if (TYPE_UNSIGNED (type))
3976 {
3977 /* If result in [0, prec2) is signed and if prec > prec2,
3978 all bits above it will be sign bit copies. */
3979 if (prec >= prec2)
3980 {
3981 /* ovf if bit prec - 1 is non-zero. */
3982 *start = prec - 1;
3983 *end = prec;
3984 }
3985 else
3986 {
3987 /* ovf if any of bits in [start, end) is non-zero. */
3988 *start = prec;
3989 *end = prec2;
3990 }
3991 }
3992 else if (prec >= prec2)
3993 return boolean_false_node;
3994 else
3995 {
3996 /* ovf if [start, end) bits aren't all zeros or all ones. */
3997 *start = prec - 1;
3998 *end = prec2;
3999 *check_zero = false;
4000 }
4001 return NULL_TREE;
4002}
4003
4004/* Lower a .{ADD,SUB}_OVERFLOW call with at least one large/huge _BitInt
4005 argument or return type _Complex large/huge _BitInt. */
4006
4007void
4008bitint_large_huge::lower_addsub_overflow (tree obj, gimple *stmt)
4009{
4010 tree arg0 = gimple_call_arg (gs: stmt, index: 0);
4011 tree arg1 = gimple_call_arg (gs: stmt, index: 1);
4012 tree lhs = gimple_call_lhs (gs: stmt);
4013 gimple *g;
4014
4015 if (!lhs)
4016 {
4017 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4018 gsi_remove (&gsi, true);
4019 return;
4020 }
4021 gimple *final_stmt = gsi_stmt (i: m_gsi);
4022 tree type = TREE_TYPE (lhs);
4023 if (TREE_CODE (type) == COMPLEX_TYPE)
4024 type = TREE_TYPE (type);
4025 int prec = TYPE_PRECISION (type);
4026 int prec0 = range_to_prec (op: arg0, stmt);
4027 int prec1 = range_to_prec (op: arg1, stmt);
4028 /* If PREC0 >= 0 && PREC1 >= 0 and CODE is not MINUS_EXPR, PREC2 is
4029 the be minimum unsigned precision of any possible operation's
4030 result, otherwise it is minimum signed precision.
4031 Some examples:
4032 If PREC0 or PREC1 is 8, it means that argument is [0, 0xff],
4033 if PREC0 or PREC1 is 10, it means that argument is [0, 0x3ff],
4034 if PREC0 or PREC1 is -8, it means that argument is [-0x80, 0x7f],
4035 if PREC0 or PREC1 is -10, it means that argument is [-0x200, 0x1ff].
4036 PREC0 CODE PREC1 RESULT PREC2 SIGNED vs. UNSIGNED
4037 8 + 8 [0, 0x1fe] 9 UNSIGNED
4038 8 + 10 [0, 0x4fe] 11 UNSIGNED
4039 -8 + -8 [-0x100, 0xfe] 9 SIGNED
4040 -8 + -10 [-0x280, 0x27e] 11 SIGNED
4041 8 + -8 [-0x80, 0x17e] 10 SIGNED
4042 8 + -10 [-0x200, 0x2fe] 11 SIGNED
4043 10 + -8 [-0x80, 0x47e] 12 SIGNED
4044 8 - 8 [-0xff, 0xff] 9 SIGNED
4045 8 - 10 [-0x3ff, 0xff] 11 SIGNED
4046 10 - 8 [-0xff, 0x3ff] 11 SIGNED
4047 -8 - -8 [-0xff, 0xff] 9 SIGNED
4048 -8 - -10 [-0x27f, 0x27f] 11 SIGNED
4049 -10 - -8 [-0x27f, 0x27f] 11 SIGNED
4050 8 - -8 [-0x7f, 0x17f] 10 SIGNED
4051 8 - -10 [-0x1ff, 0x2ff] 11 SIGNED
4052 10 - -8 [-0x7f, 0x47f] 12 SIGNED
4053 -8 - 8 [-0x17f, 0x7f] 10 SIGNED
4054 -8 - 10 [-0x47f, 0x7f] 12 SIGNED
4055 -10 - 8 [-0x2ff, 0x1ff] 11 SIGNED */
4056 int prec2 = MAX (prec0 < 0 ? -prec0 : prec0,
4057 prec1 < 0 ? -prec1 : prec1);
4058 /* If operands are either both signed or both unsigned,
4059 we need just one additional bit. */
4060 prec2 = (((prec0 < 0) == (prec1 < 0)
4061 /* If one operand is signed and one unsigned and
4062 the signed one has larger precision, we need
4063 just one extra bit, otherwise two. */
4064 || (prec0 < 0 ? (prec2 == -prec0 && prec2 != prec1)
4065 : (prec2 == -prec1 && prec2 != prec0)))
4066 ? prec2 + 1 : prec2 + 2);
4067 int prec3 = MAX (prec0 < 0 ? -prec0 : prec0,
4068 prec1 < 0 ? -prec1 : prec1);
4069 prec3 = MAX (prec3, prec);
4070 tree var = NULL_TREE;
4071 tree orig_obj = obj;
4072 if (obj == NULL_TREE
4073 && TREE_CODE (type) == BITINT_TYPE
4074 && bitint_precision_kind (type) >= bitint_prec_large
4075 && m_names
4076 && bitmap_bit_p (m_names, SSA_NAME_VERSION (lhs)))
4077 {
4078 int part = var_to_partition (map: m_map, var: lhs);
4079 gcc_assert (m_vars[part] != NULL_TREE);
4080 obj = m_vars[part];
4081 if (TREE_TYPE (lhs) == type)
4082 orig_obj = obj;
4083 }
4084 if (TREE_CODE (type) != BITINT_TYPE
4085 || bitint_precision_kind (type) < bitint_prec_large)
4086 {
4087 unsigned HOST_WIDE_INT nelts = CEIL (prec, limb_prec);
4088 tree atype = build_array_type_nelts (m_limb_type, nelts);
4089 var = create_tmp_var (atype);
4090 }
4091
4092 enum tree_code code;
4093 switch (gimple_call_internal_fn (gs: stmt))
4094 {
4095 case IFN_ADD_OVERFLOW:
4096 case IFN_UBSAN_CHECK_ADD:
4097 code = PLUS_EXPR;
4098 break;
4099 case IFN_SUB_OVERFLOW:
4100 case IFN_UBSAN_CHECK_SUB:
4101 code = MINUS_EXPR;
4102 break;
4103 default:
4104 gcc_unreachable ();
4105 }
4106 unsigned start, end;
4107 bool check_zero;
4108 tree ovf = arith_overflow (code, type, prec, prec0, prec1, prec2,
4109 start: &start, end: &end, check_zero: &check_zero);
4110
4111 unsigned startlimb, endlimb;
4112 if (ovf)
4113 {
4114 startlimb = ~0U;
4115 endlimb = ~0U;
4116 }
4117 else
4118 {
4119 startlimb = start / limb_prec;
4120 endlimb = (end - 1) / limb_prec;
4121 }
4122
4123 int prec4 = ovf != NULL_TREE ? prec : prec3;
4124 bitint_prec_kind kind = bitint_precision_kind (prec: prec4);
4125 unsigned cnt, rem = 0, fin = 0;
4126 tree idx = NULL_TREE, idx_first = NULL_TREE, idx_next = NULL_TREE;
4127 bool last_ovf = (ovf == NULL_TREE
4128 && CEIL (prec2, limb_prec) > CEIL (prec3, limb_prec));
4129 if (kind != bitint_prec_huge)
4130 cnt = CEIL (prec4, limb_prec) + last_ovf;
4131 else
4132 {
4133 rem = (prec4 % (2 * limb_prec));
4134 fin = (prec4 - rem) / limb_prec;
4135 cnt = 2 + CEIL (rem, limb_prec) + last_ovf;
4136 idx = idx_first = create_loop (size_zero_node, idx_next: &idx_next);
4137 }
4138
4139 if (kind == bitint_prec_huge)
4140 m_upwards_2limb = fin;
4141 m_upwards = true;
4142
4143 tree type0 = TREE_TYPE (arg0);
4144 tree type1 = TREE_TYPE (arg1);
4145 int prec5 = prec3;
4146 if (bitint_precision_kind (prec: prec5) < bitint_prec_large)
4147 prec5 = MAX (TYPE_PRECISION (type0), TYPE_PRECISION (type1));
4148 if (TYPE_PRECISION (type0) < prec5)
4149 {
4150 type0 = build_bitint_type (prec5, TYPE_UNSIGNED (type0));
4151 if (TREE_CODE (arg0) == INTEGER_CST)
4152 arg0 = fold_convert (type0, arg0);
4153 }
4154 if (TYPE_PRECISION (type1) < prec5)
4155 {
4156 type1 = build_bitint_type (prec5, TYPE_UNSIGNED (type1));
4157 if (TREE_CODE (arg1) == INTEGER_CST)
4158 arg1 = fold_convert (type1, arg1);
4159 }
4160 unsigned int data_cnt = 0;
4161 tree last_rhs1 = NULL_TREE, last_rhs2 = NULL_TREE;
4162 tree cmp = build_zero_cst (m_limb_type);
4163 unsigned prec_limbs = CEIL ((unsigned) prec, limb_prec);
4164 tree ovf_out = NULL_TREE, cmp_out = NULL_TREE;
4165 for (unsigned i = 0; i < cnt; i++)
4166 {
4167 m_data_cnt = 0;
4168 tree rhs1, rhs2;
4169 if (kind != bitint_prec_huge)
4170 idx = size_int (i);
4171 else if (i >= 2)
4172 idx = size_int (fin + i - 2);
4173 if (!last_ovf || i < cnt - 1)
4174 {
4175 if (type0 != TREE_TYPE (arg0))
4176 rhs1 = handle_cast (lhs_type: type0, rhs1: arg0, idx);
4177 else
4178 rhs1 = handle_operand (op: arg0, idx);
4179 if (type1 != TREE_TYPE (arg1))
4180 rhs2 = handle_cast (lhs_type: type1, rhs1: arg1, idx);
4181 else
4182 rhs2 = handle_operand (op: arg1, idx);
4183 if (i == 0)
4184 data_cnt = m_data_cnt;
4185 if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs1)))
4186 rhs1 = add_cast (type: m_limb_type, val: rhs1);
4187 if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs2)))
4188 rhs2 = add_cast (type: m_limb_type, val: rhs2);
4189 last_rhs1 = rhs1;
4190 last_rhs2 = rhs2;
4191 }
4192 else
4193 {
4194 m_data_cnt = data_cnt;
4195 if (TYPE_UNSIGNED (type0))
4196 rhs1 = build_zero_cst (m_limb_type);
4197 else
4198 {
4199 rhs1 = add_cast (type: signed_type_for (m_limb_type), val: last_rhs1);
4200 if (TREE_CODE (rhs1) == INTEGER_CST)
4201 rhs1 = build_int_cst (m_limb_type,
4202 tree_int_cst_sgn (rhs1) < 0 ? -1 : 0);
4203 else
4204 {
4205 tree lpm1 = build_int_cst (unsigned_type_node,
4206 limb_prec - 1);
4207 g = gimple_build_assign (make_ssa_name (TREE_TYPE (rhs1)),
4208 RSHIFT_EXPR, rhs1, lpm1);
4209 insert_before (g);
4210 rhs1 = add_cast (type: m_limb_type, val: gimple_assign_lhs (gs: g));
4211 }
4212 }
4213 if (TYPE_UNSIGNED (type1))
4214 rhs2 = build_zero_cst (m_limb_type);
4215 else
4216 {
4217 rhs2 = add_cast (type: signed_type_for (m_limb_type), val: last_rhs2);
4218 if (TREE_CODE (rhs2) == INTEGER_CST)
4219 rhs2 = build_int_cst (m_limb_type,
4220 tree_int_cst_sgn (rhs2) < 0 ? -1 : 0);
4221 else
4222 {
4223 tree lpm1 = build_int_cst (unsigned_type_node,
4224 limb_prec - 1);
4225 g = gimple_build_assign (make_ssa_name (TREE_TYPE (rhs2)),
4226 RSHIFT_EXPR, rhs2, lpm1);
4227 insert_before (g);
4228 rhs2 = add_cast (type: m_limb_type, val: gimple_assign_lhs (gs: g));
4229 }
4230 }
4231 }
4232 tree rhs = handle_plus_minus (code, rhs1, rhs2, idx);
4233 if (ovf != boolean_false_node)
4234 {
4235 if (tree_fits_uhwi_p (idx))
4236 {
4237 unsigned limb = tree_to_uhwi (idx);
4238 if (limb >= startlimb && limb <= endlimb)
4239 {
4240 tree l = arith_overflow_extract_bits (start, end, l: rhs,
4241 limb, check_zero);
4242 tree this_ovf = make_ssa_name (boolean_type_node);
4243 if (ovf == NULL_TREE && !check_zero)
4244 {
4245 cmp = l;
4246 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
4247 PLUS_EXPR, l,
4248 build_int_cst (m_limb_type, 1));
4249 insert_before (g);
4250 g = gimple_build_assign (this_ovf, GT_EXPR,
4251 gimple_assign_lhs (gs: g),
4252 build_int_cst (m_limb_type, 1));
4253 }
4254 else
4255 g = gimple_build_assign (this_ovf, NE_EXPR, l, cmp);
4256 insert_before (g);
4257 if (ovf == NULL_TREE)
4258 ovf = this_ovf;
4259 else
4260 {
4261 tree b = make_ssa_name (boolean_type_node);
4262 g = gimple_build_assign (b, BIT_IOR_EXPR, ovf, this_ovf);
4263 insert_before (g);
4264 ovf = b;
4265 }
4266 }
4267 }
4268 else if (startlimb < fin)
4269 {
4270 if (m_first && startlimb + 2 < fin)
4271 {
4272 tree data_out;
4273 ovf = prepare_data_in_out (boolean_false_node, idx, data_out: &data_out);
4274 ovf_out = m_data.pop ();
4275 m_data.pop ();
4276 if (!check_zero)
4277 {
4278 cmp = prepare_data_in_out (val: cmp, idx, data_out: &data_out);
4279 cmp_out = m_data.pop ();
4280 m_data.pop ();
4281 }
4282 }
4283 if (i != 0 || startlimb != fin - 1)
4284 {
4285 tree_code cmp_code;
4286 bool single_comparison
4287 = (startlimb + 2 >= fin || (startlimb & 1) != (i & 1));
4288 if (!single_comparison)
4289 {
4290 cmp_code = GE_EXPR;
4291 if (!check_zero && (start % limb_prec) == 0)
4292 single_comparison = true;
4293 }
4294 else if ((startlimb & 1) == (i & 1))
4295 cmp_code = EQ_EXPR;
4296 else
4297 cmp_code = GT_EXPR;
4298 g = gimple_build_cond (cmp_code, idx, size_int (startlimb),
4299 NULL_TREE, NULL_TREE);
4300 edge edge_true_true, edge_true_false, edge_false;
4301 gimple *g2 = NULL;
4302 if (!single_comparison)
4303 g2 = gimple_build_cond (NE_EXPR, idx,
4304 size_int (startlimb), NULL_TREE,
4305 NULL_TREE);
4306 if_then_if_then_else (cond1: g, cond2: g2, prob1: profile_probability::likely (),
4307 prob2: profile_probability::likely (),
4308 edge_true_true, edge_true_false,
4309 edge_false);
4310 unsigned tidx = startlimb + (cmp_code == GT_EXPR);
4311 tree l = arith_overflow_extract_bits (start, end, l: rhs, limb: tidx,
4312 check_zero);
4313 tree this_ovf = make_ssa_name (boolean_type_node);
4314 if (cmp_code != GT_EXPR && !check_zero)
4315 {
4316 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
4317 PLUS_EXPR, l,
4318 build_int_cst (m_limb_type, 1));
4319 insert_before (g);
4320 g = gimple_build_assign (this_ovf, GT_EXPR,
4321 gimple_assign_lhs (gs: g),
4322 build_int_cst (m_limb_type, 1));
4323 }
4324 else
4325 g = gimple_build_assign (this_ovf, NE_EXPR, l, cmp);
4326 insert_before (g);
4327 if (cmp_code == GT_EXPR)
4328 {
4329 tree t = make_ssa_name (boolean_type_node);
4330 g = gimple_build_assign (t, BIT_IOR_EXPR, ovf, this_ovf);
4331 insert_before (g);
4332 this_ovf = t;
4333 }
4334 tree this_ovf2 = NULL_TREE;
4335 if (!single_comparison)
4336 {
4337 m_gsi = gsi_after_labels (bb: edge_true_true->src);
4338 tree t = make_ssa_name (boolean_type_node);
4339 g = gimple_build_assign (t, NE_EXPR, rhs, cmp);
4340 insert_before (g);
4341 this_ovf2 = make_ssa_name (boolean_type_node);
4342 g = gimple_build_assign (this_ovf2, BIT_IOR_EXPR,
4343 ovf, t);
4344 insert_before (g);
4345 }
4346 m_gsi = gsi_after_labels (bb: edge_true_false->dest);
4347 tree t;
4348 if (i == 1 && ovf_out)
4349 t = ovf_out;
4350 else
4351 t = make_ssa_name (boolean_type_node);
4352 gphi *phi = create_phi_node (t, edge_true_false->dest);
4353 add_phi_arg (phi, this_ovf, edge_true_false,
4354 UNKNOWN_LOCATION);
4355 add_phi_arg (phi, ovf ? ovf
4356 : boolean_false_node, edge_false,
4357 UNKNOWN_LOCATION);
4358 if (edge_true_true)
4359 add_phi_arg (phi, this_ovf2, edge_true_true,
4360 UNKNOWN_LOCATION);
4361 ovf = t;
4362 if (!check_zero && cmp_code != GT_EXPR)
4363 {
4364 t = cmp_out ? cmp_out : make_ssa_name (var: m_limb_type);
4365 phi = create_phi_node (t, edge_true_false->dest);
4366 add_phi_arg (phi, l, edge_true_false, UNKNOWN_LOCATION);
4367 add_phi_arg (phi, cmp, edge_false, UNKNOWN_LOCATION);
4368 if (edge_true_true)
4369 add_phi_arg (phi, cmp, edge_true_true,
4370 UNKNOWN_LOCATION);
4371 cmp = t;
4372 }
4373 }
4374 }
4375 }
4376
4377 if (var || obj)
4378 {
4379 if (tree_fits_uhwi_p (idx) && tree_to_uhwi (idx) >= prec_limbs)
4380 ;
4381 else if (!tree_fits_uhwi_p (idx)
4382 && (unsigned) prec < (fin - (i == 0)) * limb_prec)
4383 {
4384 bool single_comparison
4385 = (((unsigned) prec % limb_prec) == 0
4386 || prec_limbs + 1 >= fin
4387 || (prec_limbs & 1) == (i & 1));
4388 g = gimple_build_cond (LE_EXPR, idx, size_int (prec_limbs - 1),
4389 NULL_TREE, NULL_TREE);
4390 gimple *g2 = NULL;
4391 if (!single_comparison)
4392 g2 = gimple_build_cond (EQ_EXPR, idx,
4393 size_int (prec_limbs - 1),
4394 NULL_TREE, NULL_TREE);
4395 edge edge_true_true, edge_true_false, edge_false;
4396 if_then_if_then_else (cond1: g, cond2: g2, prob1: profile_probability::likely (),
4397 prob2: profile_probability::unlikely (),
4398 edge_true_true, edge_true_false,
4399 edge_false);
4400 tree l = limb_access (type, var: var ? var : obj, idx, write_p: true);
4401 g = gimple_build_assign (l, rhs);
4402 insert_before (g);
4403 if (!single_comparison)
4404 {
4405 m_gsi = gsi_after_labels (bb: edge_true_true->src);
4406 tree plm1idx = size_int (prec_limbs - 1);
4407 tree plm1type = limb_access_type (type, idx: plm1idx);
4408 l = limb_access (type, var: var ? var : obj, idx: plm1idx, write_p: true);
4409 if (!useless_type_conversion_p (plm1type, TREE_TYPE (rhs)))
4410 rhs = add_cast (type: plm1type, val: rhs);
4411 if (!useless_type_conversion_p (TREE_TYPE (l),
4412 TREE_TYPE (rhs)))
4413 rhs = add_cast (TREE_TYPE (l), val: rhs);
4414 g = gimple_build_assign (l, rhs);
4415 insert_before (g);
4416 }
4417 m_gsi = gsi_after_labels (bb: edge_true_false->dest);
4418 }
4419 else
4420 {
4421 tree l = limb_access (type, var: var ? var : obj, idx, write_p: true);
4422 if (!useless_type_conversion_p (TREE_TYPE (l), TREE_TYPE (rhs)))
4423 rhs = add_cast (TREE_TYPE (l), val: rhs);
4424 g = gimple_build_assign (l, rhs);
4425 insert_before (g);
4426 }
4427 }
4428 m_first = false;
4429 if (kind == bitint_prec_huge && i <= 1)
4430 {
4431 if (i == 0)
4432 {
4433 idx = make_ssa_name (sizetype);
4434 g = gimple_build_assign (idx, PLUS_EXPR, idx_first,
4435 size_one_node);
4436 insert_before (g);
4437 }
4438 else
4439 {
4440 g = gimple_build_assign (idx_next, PLUS_EXPR, idx_first,
4441 size_int (2));
4442 insert_before (g);
4443 g = gimple_build_cond (NE_EXPR, idx_next, size_int (fin),
4444 NULL_TREE, NULL_TREE);
4445 insert_before (g);
4446 m_gsi = gsi_for_stmt (final_stmt);
4447 m_bb = NULL;
4448 }
4449 }
4450 }
4451
4452 finish_arith_overflow (var, obj, type, ovf, lhs, orig_obj, stmt, code);
4453}
4454
4455/* Lower a .MUL_OVERFLOW call with at least one large/huge _BitInt
4456 argument or return type _Complex large/huge _BitInt. */
4457
4458void
4459bitint_large_huge::lower_mul_overflow (tree obj, gimple *stmt)
4460{
4461 tree arg0 = gimple_call_arg (gs: stmt, index: 0);
4462 tree arg1 = gimple_call_arg (gs: stmt, index: 1);
4463 tree lhs = gimple_call_lhs (gs: stmt);
4464 if (!lhs)
4465 {
4466 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4467 gsi_remove (&gsi, true);
4468 return;
4469 }
4470 gimple *final_stmt = gsi_stmt (i: m_gsi);
4471 tree type = TREE_TYPE (lhs);
4472 if (TREE_CODE (type) == COMPLEX_TYPE)
4473 type = TREE_TYPE (type);
4474 int prec = TYPE_PRECISION (type), prec0, prec1;
4475 arg0 = handle_operand_addr (op: arg0, stmt, NULL, prec: &prec0);
4476 arg1 = handle_operand_addr (op: arg1, stmt, NULL, prec: &prec1);
4477 int prec2 = ((prec0 < 0 ? -prec0 : prec0)
4478 + (prec1 < 0 ? -prec1 : prec1));
4479 if (prec0 == 1 || prec1 == 1)
4480 --prec2;
4481 tree var = NULL_TREE;
4482 tree orig_obj = obj;
4483 bool force_var = false;
4484 if (obj == NULL_TREE
4485 && TREE_CODE (type) == BITINT_TYPE
4486 && bitint_precision_kind (type) >= bitint_prec_large
4487 && m_names
4488 && bitmap_bit_p (m_names, SSA_NAME_VERSION (lhs)))
4489 {
4490 int part = var_to_partition (map: m_map, var: lhs);
4491 gcc_assert (m_vars[part] != NULL_TREE);
4492 obj = m_vars[part];
4493 if (TREE_TYPE (lhs) == type)
4494 orig_obj = obj;
4495 }
4496 else if (obj != NULL_TREE && DECL_P (obj))
4497 {
4498 for (int i = 0; i < 2; ++i)
4499 {
4500 tree arg = i ? arg1 : arg0;
4501 if (TREE_CODE (arg) == ADDR_EXPR)
4502 arg = TREE_OPERAND (arg, 0);
4503 if (get_base_address (t: arg) == obj)
4504 {
4505 force_var = true;
4506 break;
4507 }
4508 }
4509 }
4510 if (obj == NULL_TREE
4511 || force_var
4512 || TREE_CODE (type) != BITINT_TYPE
4513 || bitint_precision_kind (type) < bitint_prec_large
4514 || prec2 > (CEIL (prec, limb_prec) * limb_prec * (orig_obj ? 1 : 2)))
4515 {
4516 unsigned HOST_WIDE_INT nelts = CEIL (MAX (prec, prec2), limb_prec);
4517 tree atype = build_array_type_nelts (m_limb_type, nelts);
4518 var = create_tmp_var (atype);
4519 }
4520 tree addr = build_fold_addr_expr (var ? var : obj);
4521 addr = force_gimple_operand_gsi (&m_gsi, addr, true,
4522 NULL_TREE, true, GSI_SAME_STMT);
4523 tree sitype = lang_hooks.types.type_for_mode (SImode, 0);
4524 gimple *g
4525 = gimple_build_call_internal (IFN_MULBITINT, 6,
4526 addr, build_int_cst (sitype,
4527 MAX (prec2, prec)),
4528 arg0, build_int_cst (sitype, prec0),
4529 arg1, build_int_cst (sitype, prec1));
4530 insert_before (g);
4531
4532 unsigned start, end;
4533 bool check_zero;
4534 tree ovf = arith_overflow (code: MULT_EXPR, type, prec, prec0, prec1, prec2,
4535 start: &start, end: &end, check_zero: &check_zero);
4536 if (ovf == NULL_TREE)
4537 {
4538 unsigned startlimb = start / limb_prec;
4539 unsigned endlimb = (end - 1) / limb_prec;
4540 unsigned cnt;
4541 bool use_loop = false;
4542 if (startlimb == endlimb)
4543 cnt = 1;
4544 else if (startlimb + 1 == endlimb)
4545 cnt = 2;
4546 else if ((end % limb_prec) == 0)
4547 {
4548 cnt = 2;
4549 use_loop = true;
4550 }
4551 else
4552 {
4553 cnt = 3;
4554 use_loop = startlimb + 2 < endlimb;
4555 }
4556 if (cnt == 1)
4557 {
4558 tree l = limb_access (NULL_TREE, var: var ? var : obj,
4559 size_int (startlimb), write_p: true);
4560 g = gimple_build_assign (make_ssa_name (var: m_limb_type), l);
4561 insert_before (g);
4562 l = arith_overflow_extract_bits (start, end, l: gimple_assign_lhs (gs: g),
4563 limb: startlimb, check_zero);
4564 ovf = make_ssa_name (boolean_type_node);
4565 if (check_zero)
4566 g = gimple_build_assign (ovf, NE_EXPR, l,
4567 build_zero_cst (m_limb_type));
4568 else
4569 {
4570 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
4571 PLUS_EXPR, l,
4572 build_int_cst (m_limb_type, 1));
4573 insert_before (g);
4574 g = gimple_build_assign (ovf, GT_EXPR, gimple_assign_lhs (gs: g),
4575 build_int_cst (m_limb_type, 1));
4576 }
4577 insert_before (g);
4578 }
4579 else
4580 {
4581 basic_block edge_bb = NULL;
4582 gimple_stmt_iterator gsi = m_gsi;
4583 gsi_prev (i: &gsi);
4584 edge e = split_block (gsi_bb (i: gsi), gsi_stmt (i: gsi));
4585 edge_bb = e->src;
4586 m_gsi = gsi_end_bb (bb: edge_bb);
4587
4588 tree cmp = build_zero_cst (m_limb_type);
4589 for (unsigned i = 0; i < cnt; i++)
4590 {
4591 tree idx, idx_next = NULL_TREE;
4592 if (i == 0)
4593 idx = size_int (startlimb);
4594 else if (i == 2)
4595 idx = size_int (endlimb);
4596 else if (use_loop)
4597 idx = create_loop (size_int (startlimb + 1), idx_next: &idx_next);
4598 else
4599 idx = size_int (startlimb + 1);
4600 tree l = limb_access (NULL_TREE, var: var ? var : obj, idx, write_p: true);
4601 g = gimple_build_assign (make_ssa_name (var: m_limb_type), l);
4602 insert_before (g);
4603 l = gimple_assign_lhs (gs: g);
4604 if (i == 0 || i == 2)
4605 l = arith_overflow_extract_bits (start, end, l,
4606 limb: tree_to_uhwi (idx),
4607 check_zero);
4608 if (i == 0 && !check_zero)
4609 {
4610 cmp = l;
4611 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
4612 PLUS_EXPR, l,
4613 build_int_cst (m_limb_type, 1));
4614 insert_before (g);
4615 g = gimple_build_cond (GT_EXPR, gimple_assign_lhs (gs: g),
4616 build_int_cst (m_limb_type, 1),
4617 NULL_TREE, NULL_TREE);
4618 }
4619 else
4620 g = gimple_build_cond (NE_EXPR, l, cmp, NULL_TREE, NULL_TREE);
4621 insert_before (g);
4622 edge e1 = split_block (gsi_bb (i: m_gsi), g);
4623 e1->flags = EDGE_FALSE_VALUE;
4624 edge e2 = make_edge (e1->src, gimple_bb (g: final_stmt),
4625 EDGE_TRUE_VALUE);
4626 e1->probability = profile_probability::likely ();
4627 e2->probability = e1->probability.invert ();
4628 if (i == 0)
4629 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src);
4630 m_gsi = gsi_after_labels (bb: e1->dest);
4631 if (i == 1 && use_loop)
4632 {
4633 g = gimple_build_assign (idx_next, PLUS_EXPR, idx,
4634 size_one_node);
4635 insert_before (g);
4636 g = gimple_build_cond (NE_EXPR, idx_next,
4637 size_int (endlimb + (cnt == 2)),
4638 NULL_TREE, NULL_TREE);
4639 insert_before (g);
4640 edge true_edge, false_edge;
4641 extract_true_false_edges_from_block (gsi_bb (i: m_gsi),
4642 &true_edge,
4643 &false_edge);
4644 m_gsi = gsi_after_labels (bb: false_edge->dest);
4645 m_bb = NULL;
4646 }
4647 }
4648
4649 ovf = make_ssa_name (boolean_type_node);
4650 basic_block bb = gimple_bb (g: final_stmt);
4651 gphi *phi = create_phi_node (ovf, bb);
4652 edge e1 = find_edge (gsi_bb (i: m_gsi), bb);
4653 edge_iterator ei;
4654 FOR_EACH_EDGE (e, ei, bb->preds)
4655 {
4656 tree val = e == e1 ? boolean_false_node : boolean_true_node;
4657 add_phi_arg (phi, val, e, UNKNOWN_LOCATION);
4658 }
4659 m_gsi = gsi_for_stmt (final_stmt);
4660 }
4661 }
4662
4663 finish_arith_overflow (var, obj, type, ovf, lhs, orig_obj, stmt, code: MULT_EXPR);
4664}
4665
4666/* Lower REALPART_EXPR or IMAGPART_EXPR stmt extracting part of result from
4667 .{ADD,SUB,MUL}_OVERFLOW call. */
4668
4669void
4670bitint_large_huge::lower_cplxpart_stmt (tree obj, gimple *stmt)
4671{
4672 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
4673 rhs1 = TREE_OPERAND (rhs1, 0);
4674 if (obj == NULL_TREE)
4675 {
4676 int part = var_to_partition (map: m_map, var: gimple_assign_lhs (gs: stmt));
4677 gcc_assert (m_vars[part] != NULL_TREE);
4678 obj = m_vars[part];
4679 }
4680 if (TREE_CODE (rhs1) == SSA_NAME
4681 && (m_names == NULL
4682 || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1))))
4683 {
4684 lower_call (obj, SSA_NAME_DEF_STMT (rhs1));
4685 return;
4686 }
4687 int part = var_to_partition (map: m_map, var: rhs1);
4688 gcc_assert (m_vars[part] != NULL_TREE);
4689 tree var = m_vars[part];
4690 unsigned HOST_WIDE_INT nelts
4691 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (obj))) / limb_prec;
4692 tree atype = build_array_type_nelts (m_limb_type, nelts);
4693 if (!useless_type_conversion_p (atype, TREE_TYPE (obj)))
4694 obj = build1 (VIEW_CONVERT_EXPR, atype, obj);
4695 tree off = build_int_cst (build_pointer_type (TREE_TYPE (var)),
4696 gimple_assign_rhs_code (gs: stmt) == REALPART_EXPR
4697 ? 0 : nelts * m_limb_size);
4698 tree v2 = build2 (MEM_REF, atype, build_fold_addr_expr (var), off);
4699 gimple *g = gimple_build_assign (obj, v2);
4700 insert_before (g);
4701}
4702
4703/* Lower COMPLEX_EXPR stmt. */
4704
4705void
4706bitint_large_huge::lower_complexexpr_stmt (gimple *stmt)
4707{
4708 tree lhs = gimple_assign_lhs (gs: stmt);
4709 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
4710 tree rhs2 = gimple_assign_rhs2 (gs: stmt);
4711 int part = var_to_partition (map: m_map, var: lhs);
4712 gcc_assert (m_vars[part] != NULL_TREE);
4713 lhs = m_vars[part];
4714 unsigned HOST_WIDE_INT nelts
4715 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (rhs1))) / limb_prec;
4716 tree atype = build_array_type_nelts (m_limb_type, nelts);
4717 tree zero = build_zero_cst (build_pointer_type (TREE_TYPE (lhs)));
4718 tree v1 = build2 (MEM_REF, atype, build_fold_addr_expr (lhs), zero);
4719 tree v2;
4720 if (TREE_CODE (rhs1) == SSA_NAME)
4721 {
4722 part = var_to_partition (map: m_map, var: rhs1);
4723 gcc_assert (m_vars[part] != NULL_TREE);
4724 v2 = m_vars[part];
4725 }
4726 else if (integer_zerop (rhs1))
4727 v2 = build_zero_cst (atype);
4728 else
4729 v2 = tree_output_constant_def (rhs1);
4730 if (!useless_type_conversion_p (atype, TREE_TYPE (v2)))
4731 v2 = build1 (VIEW_CONVERT_EXPR, atype, v2);
4732 gimple *g = gimple_build_assign (v1, v2);
4733 insert_before (g);
4734 tree off = fold_convert (build_pointer_type (TREE_TYPE (lhs)),
4735 TYPE_SIZE_UNIT (atype));
4736 v1 = build2 (MEM_REF, atype, build_fold_addr_expr (lhs), off);
4737 if (TREE_CODE (rhs2) == SSA_NAME)
4738 {
4739 part = var_to_partition (map: m_map, var: rhs2);
4740 gcc_assert (m_vars[part] != NULL_TREE);
4741 v2 = m_vars[part];
4742 }
4743 else if (integer_zerop (rhs2))
4744 v2 = build_zero_cst (atype);
4745 else
4746 v2 = tree_output_constant_def (rhs2);
4747 if (!useless_type_conversion_p (atype, TREE_TYPE (v2)))
4748 v2 = build1 (VIEW_CONVERT_EXPR, atype, v2);
4749 g = gimple_build_assign (v1, v2);
4750 insert_before (g);
4751}
4752
4753/* Lower a .{CLZ,CTZ,CLRSB,FFS,PARITY,POPCOUNT} call with one large/huge _BitInt
4754 argument. */
4755
4756void
4757bitint_large_huge::lower_bit_query (gimple *stmt)
4758{
4759 tree arg0 = gimple_call_arg (gs: stmt, index: 0);
4760 tree arg1 = (gimple_call_num_args (gs: stmt) == 2
4761 ? gimple_call_arg (gs: stmt, index: 1) : NULL_TREE);
4762 tree lhs = gimple_call_lhs (gs: stmt);
4763 gimple *g;
4764
4765 if (!lhs)
4766 {
4767 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4768 gsi_remove (&gsi, true);
4769 return;
4770 }
4771 tree type = TREE_TYPE (arg0);
4772 gcc_assert (TREE_CODE (type) == BITINT_TYPE);
4773 bitint_prec_kind kind = bitint_precision_kind (type);
4774 gcc_assert (kind >= bitint_prec_large);
4775 enum internal_fn ifn = gimple_call_internal_fn (gs: stmt);
4776 enum built_in_function fcode = END_BUILTINS;
4777 gcc_assert (TYPE_PRECISION (unsigned_type_node) == limb_prec
4778 || TYPE_PRECISION (long_unsigned_type_node) == limb_prec
4779 || TYPE_PRECISION (long_long_unsigned_type_node) == limb_prec);
4780 switch (ifn)
4781 {
4782 case IFN_CLZ:
4783 if (TYPE_PRECISION (unsigned_type_node) == limb_prec)
4784 fcode = BUILT_IN_CLZ;
4785 else if (TYPE_PRECISION (long_unsigned_type_node) == limb_prec)
4786 fcode = BUILT_IN_CLZL;
4787 else
4788 fcode = BUILT_IN_CLZLL;
4789 break;
4790 case IFN_FFS:
4791 /* .FFS (X) is .CTZ (X, -1) + 1, though under the hood
4792 we don't add the addend at the end. */
4793 arg1 = integer_zero_node;
4794 /* FALLTHRU */
4795 case IFN_CTZ:
4796 if (TYPE_PRECISION (unsigned_type_node) == limb_prec)
4797 fcode = BUILT_IN_CTZ;
4798 else if (TYPE_PRECISION (long_unsigned_type_node) == limb_prec)
4799 fcode = BUILT_IN_CTZL;
4800 else
4801 fcode = BUILT_IN_CTZLL;
4802 m_upwards = true;
4803 break;
4804 case IFN_CLRSB:
4805 if (TYPE_PRECISION (unsigned_type_node) == limb_prec)
4806 fcode = BUILT_IN_CLRSB;
4807 else if (TYPE_PRECISION (long_unsigned_type_node) == limb_prec)
4808 fcode = BUILT_IN_CLRSBL;
4809 else
4810 fcode = BUILT_IN_CLRSBLL;
4811 break;
4812 case IFN_PARITY:
4813 if (TYPE_PRECISION (unsigned_type_node) == limb_prec)
4814 fcode = BUILT_IN_PARITY;
4815 else if (TYPE_PRECISION (long_unsigned_type_node) == limb_prec)
4816 fcode = BUILT_IN_PARITYL;
4817 else
4818 fcode = BUILT_IN_PARITYLL;
4819 m_upwards = true;
4820 break;
4821 case IFN_POPCOUNT:
4822 if (TYPE_PRECISION (unsigned_type_node) == limb_prec)
4823 fcode = BUILT_IN_POPCOUNT;
4824 else if (TYPE_PRECISION (long_unsigned_type_node) == limb_prec)
4825 fcode = BUILT_IN_POPCOUNTL;
4826 else
4827 fcode = BUILT_IN_POPCOUNTLL;
4828 m_upwards = true;
4829 break;
4830 default:
4831 gcc_unreachable ();
4832 }
4833 tree fndecl = builtin_decl_explicit (fncode: fcode), res = NULL_TREE;
4834 unsigned cnt = 0, rem = 0, end = 0, prec = TYPE_PRECISION (type);
4835 struct bq_details { edge e; tree val, addend; } *bqp = NULL;
4836 basic_block edge_bb = NULL;
4837 if (m_upwards)
4838 {
4839 tree idx = NULL_TREE, idx_first = NULL_TREE, idx_next = NULL_TREE;
4840 if (kind == bitint_prec_large)
4841 cnt = CEIL (prec, limb_prec);
4842 else
4843 {
4844 rem = (prec % (2 * limb_prec));
4845 end = (prec - rem) / limb_prec;
4846 cnt = 2 + CEIL (rem, limb_prec);
4847 idx = idx_first = create_loop (size_zero_node, idx_next: &idx_next);
4848 }
4849
4850 if (ifn == IFN_CTZ || ifn == IFN_FFS)
4851 {
4852 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4853 gsi_prev (i: &gsi);
4854 edge e = split_block (gsi_bb (i: gsi), gsi_stmt (i: gsi));
4855 edge_bb = e->src;
4856 if (kind == bitint_prec_large)
4857 m_gsi = gsi_end_bb (bb: edge_bb);
4858 bqp = XALLOCAVEC (struct bq_details, cnt);
4859 }
4860 else
4861 m_after_stmt = stmt;
4862 if (kind != bitint_prec_large)
4863 m_upwards_2limb = end;
4864
4865 for (unsigned i = 0; i < cnt; i++)
4866 {
4867 m_data_cnt = 0;
4868 if (kind == bitint_prec_large)
4869 idx = size_int (i);
4870 else if (i >= 2)
4871 idx = size_int (end + (i > 2));
4872
4873 tree rhs1 = handle_operand (op: arg0, idx);
4874 if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs1)))
4875 {
4876 if (!TYPE_UNSIGNED (TREE_TYPE (rhs1)))
4877 rhs1 = add_cast (type: unsigned_type_for (TREE_TYPE (rhs1)), val: rhs1);
4878 rhs1 = add_cast (type: m_limb_type, val: rhs1);
4879 }
4880
4881 tree in, out, tem;
4882 if (ifn == IFN_PARITY)
4883 in = prepare_data_in_out (val: build_zero_cst (m_limb_type), idx, data_out: &out);
4884 else if (ifn == IFN_FFS)
4885 in = prepare_data_in_out (integer_one_node, idx, data_out: &out);
4886 else
4887 in = prepare_data_in_out (integer_zero_node, idx, data_out: &out);
4888
4889 switch (ifn)
4890 {
4891 case IFN_CTZ:
4892 case IFN_FFS:
4893 g = gimple_build_cond (NE_EXPR, rhs1,
4894 build_zero_cst (m_limb_type),
4895 NULL_TREE, NULL_TREE);
4896 insert_before (g);
4897 edge e1, e2;
4898 e1 = split_block (gsi_bb (i: m_gsi), g);
4899 e1->flags = EDGE_FALSE_VALUE;
4900 e2 = make_edge (e1->src, gimple_bb (g: stmt), EDGE_TRUE_VALUE);
4901 e1->probability = profile_probability::unlikely ();
4902 e2->probability = e1->probability.invert ();
4903 if (i == 0)
4904 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src);
4905 m_gsi = gsi_after_labels (bb: e1->dest);
4906 bqp[i].e = e2;
4907 bqp[i].val = rhs1;
4908 if (tree_fits_uhwi_p (idx))
4909 bqp[i].addend
4910 = build_int_cst (integer_type_node,
4911 tree_to_uhwi (idx) * limb_prec
4912 + (ifn == IFN_FFS));
4913 else
4914 {
4915 bqp[i].addend = in;
4916 if (i == 1)
4917 res = out;
4918 else
4919 res = make_ssa_name (integer_type_node);
4920 g = gimple_build_assign (res, PLUS_EXPR, in,
4921 build_int_cst (integer_type_node,
4922 limb_prec));
4923 insert_before (g);
4924 m_data[m_data_cnt] = res;
4925 }
4926 break;
4927 case IFN_PARITY:
4928 if (!integer_zerop (in))
4929 {
4930 if (kind == bitint_prec_huge && i == 1)
4931 res = out;
4932 else
4933 res = make_ssa_name (var: m_limb_type);
4934 g = gimple_build_assign (res, BIT_XOR_EXPR, in, rhs1);
4935 insert_before (g);
4936 }
4937 else
4938 res = rhs1;
4939 m_data[m_data_cnt] = res;
4940 break;
4941 case IFN_POPCOUNT:
4942 g = gimple_build_call (fndecl, 1, rhs1);
4943 tem = make_ssa_name (integer_type_node);
4944 gimple_call_set_lhs (gs: g, lhs: tem);
4945 insert_before (g);
4946 if (!integer_zerop (in))
4947 {
4948 if (kind == bitint_prec_huge && i == 1)
4949 res = out;
4950 else
4951 res = make_ssa_name (integer_type_node);
4952 g = gimple_build_assign (res, PLUS_EXPR, in, tem);
4953 insert_before (g);
4954 }
4955 else
4956 res = tem;
4957 m_data[m_data_cnt] = res;
4958 break;
4959 default:
4960 gcc_unreachable ();
4961 }
4962
4963 m_first = false;
4964 if (kind == bitint_prec_huge && i <= 1)
4965 {
4966 if (i == 0)
4967 {
4968 idx = make_ssa_name (sizetype);
4969 g = gimple_build_assign (idx, PLUS_EXPR, idx_first,
4970 size_one_node);
4971 insert_before (g);
4972 }
4973 else
4974 {
4975 g = gimple_build_assign (idx_next, PLUS_EXPR, idx_first,
4976 size_int (2));
4977 insert_before (g);
4978 g = gimple_build_cond (NE_EXPR, idx_next, size_int (end),
4979 NULL_TREE, NULL_TREE);
4980 insert_before (g);
4981 if (ifn == IFN_CTZ || ifn == IFN_FFS)
4982 m_gsi = gsi_after_labels (bb: edge_bb);
4983 else
4984 m_gsi = gsi_for_stmt (stmt);
4985 m_bb = NULL;
4986 }
4987 }
4988 }
4989 }
4990 else
4991 {
4992 tree idx = NULL_TREE, idx_next = NULL_TREE, first = NULL_TREE;
4993 int sub_one = 0;
4994 if (kind == bitint_prec_large)
4995 cnt = CEIL (prec, limb_prec);
4996 else
4997 {
4998 rem = prec % limb_prec;
4999 if (rem == 0 && (!TYPE_UNSIGNED (type) || ifn == IFN_CLRSB))
5000 rem = limb_prec;
5001 end = (prec - rem) / limb_prec;
5002 cnt = 1 + (rem != 0);
5003 if (ifn == IFN_CLRSB)
5004 sub_one = 1;
5005 }
5006
5007 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
5008 gsi_prev (i: &gsi);
5009 edge e = split_block (gsi_bb (i: gsi), gsi_stmt (i: gsi));
5010 edge_bb = e->src;
5011 m_gsi = gsi_end_bb (bb: edge_bb);
5012
5013 if (ifn == IFN_CLZ)
5014 bqp = XALLOCAVEC (struct bq_details, cnt);
5015 else
5016 {
5017 gsi = gsi_for_stmt (stmt);
5018 gsi_prev (i: &gsi);
5019 e = split_block (gsi_bb (i: gsi), gsi_stmt (i: gsi));
5020 edge_bb = e->src;
5021 bqp = XALLOCAVEC (struct bq_details, 2 * cnt);
5022 }
5023
5024 for (unsigned i = 0; i < cnt; i++)
5025 {
5026 m_data_cnt = 0;
5027 if (kind == bitint_prec_large)
5028 idx = size_int (cnt - i - 1);
5029 else if (i == cnt - 1)
5030 idx = create_loop (size_int (end - 1), idx_next: &idx_next);
5031 else
5032 idx = size_int (end);
5033
5034 tree rhs1 = handle_operand (op: arg0, idx);
5035 if (!useless_type_conversion_p (m_limb_type, TREE_TYPE (rhs1)))
5036 {
5037 if (ifn == IFN_CLZ && !TYPE_UNSIGNED (TREE_TYPE (rhs1)))
5038 rhs1 = add_cast (type: unsigned_type_for (TREE_TYPE (rhs1)), val: rhs1);
5039 else if (ifn == IFN_CLRSB && TYPE_UNSIGNED (TREE_TYPE (rhs1)))
5040 rhs1 = add_cast (type: signed_type_for (TREE_TYPE (rhs1)), val: rhs1);
5041 rhs1 = add_cast (type: m_limb_type, val: rhs1);
5042 }
5043
5044 if (ifn == IFN_CLZ)
5045 {
5046 g = gimple_build_cond (NE_EXPR, rhs1,
5047 build_zero_cst (m_limb_type),
5048 NULL_TREE, NULL_TREE);
5049 insert_before (g);
5050 edge e1 = split_block (gsi_bb (i: m_gsi), g);
5051 e1->flags = EDGE_FALSE_VALUE;
5052 edge e2 = make_edge (e1->src, gimple_bb (g: stmt), EDGE_TRUE_VALUE);
5053 e1->probability = profile_probability::unlikely ();
5054 e2->probability = e1->probability.invert ();
5055 if (i == 0)
5056 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src);
5057 m_gsi = gsi_after_labels (bb: e1->dest);
5058 bqp[i].e = e2;
5059 bqp[i].val = rhs1;
5060 }
5061 else
5062 {
5063 if (i == 0)
5064 {
5065 first = rhs1;
5066 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
5067 PLUS_EXPR, rhs1,
5068 build_int_cst (m_limb_type, 1));
5069 insert_before (g);
5070 g = gimple_build_cond (GT_EXPR, gimple_assign_lhs (gs: g),
5071 build_int_cst (m_limb_type, 1),
5072 NULL_TREE, NULL_TREE);
5073 insert_before (g);
5074 }
5075 else
5076 {
5077 g = gimple_build_assign (make_ssa_name (var: m_limb_type),
5078 BIT_XOR_EXPR, rhs1, first);
5079 insert_before (g);
5080 tree stype = signed_type_for (m_limb_type);
5081 g = gimple_build_cond (LT_EXPR,
5082 add_cast (type: stype,
5083 val: gimple_assign_lhs (gs: g)),
5084 build_zero_cst (stype),
5085 NULL_TREE, NULL_TREE);
5086 insert_before (g);
5087 edge e1 = split_block (gsi_bb (i: m_gsi), g);
5088 e1->flags = EDGE_FALSE_VALUE;
5089 edge e2 = make_edge (e1->src, gimple_bb (g: stmt),
5090 EDGE_TRUE_VALUE);
5091 e1->probability = profile_probability::unlikely ();
5092 e2->probability = e1->probability.invert ();
5093 if (i == 1)
5094 set_immediate_dominator (CDI_DOMINATORS, e2->dest,
5095 e2->src);
5096 m_gsi = gsi_after_labels (bb: e1->dest);
5097 bqp[2 * i].e = e2;
5098 g = gimple_build_cond (NE_EXPR, rhs1, first,
5099 NULL_TREE, NULL_TREE);
5100 insert_before (g);
5101 }
5102 edge e1 = split_block (gsi_bb (i: m_gsi), g);
5103 e1->flags = EDGE_FALSE_VALUE;
5104 edge e2 = make_edge (e1->src, edge_bb, EDGE_TRUE_VALUE);
5105 e1->probability = profile_probability::unlikely ();
5106 e2->probability = e1->probability.invert ();
5107 if (i == 0)
5108 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e2->src);
5109 m_gsi = gsi_after_labels (bb: e1->dest);
5110 bqp[2 * i + 1].e = e2;
5111 bqp[i].val = rhs1;
5112 }
5113 if (tree_fits_uhwi_p (idx))
5114 bqp[i].addend
5115 = build_int_cst (integer_type_node,
5116 (int) prec
5117 - (((int) tree_to_uhwi (idx) + 1)
5118 * limb_prec) - sub_one);
5119 else
5120 {
5121 tree in, out;
5122 in = build_int_cst (integer_type_node, rem - sub_one);
5123 m_first = true;
5124 in = prepare_data_in_out (val: in, idx, data_out: &out);
5125 out = m_data[m_data_cnt + 1];
5126 bqp[i].addend = in;
5127 g = gimple_build_assign (out, PLUS_EXPR, in,
5128 build_int_cst (integer_type_node,
5129 limb_prec));
5130 insert_before (g);
5131 m_data[m_data_cnt] = out;
5132 }
5133
5134 m_first = false;
5135 if (kind == bitint_prec_huge && i == cnt - 1)
5136 {
5137 g = gimple_build_assign (idx_next, PLUS_EXPR, idx,
5138 size_int (-1));
5139 insert_before (g);
5140 g = gimple_build_cond (NE_EXPR, idx, size_zero_node,
5141 NULL_TREE, NULL_TREE);
5142 insert_before (g);
5143 edge true_edge, false_edge;
5144 extract_true_false_edges_from_block (gsi_bb (i: m_gsi),
5145 &true_edge, &false_edge);
5146 m_gsi = gsi_after_labels (bb: false_edge->dest);
5147 m_bb = NULL;
5148 }
5149 }
5150 }
5151 switch (ifn)
5152 {
5153 case IFN_CLZ:
5154 case IFN_CTZ:
5155 case IFN_FFS:
5156 gphi *phi1, *phi2, *phi3;
5157 basic_block bb;
5158 bb = gsi_bb (i: m_gsi);
5159 remove_edge (find_edge (bb, gimple_bb (g: stmt)));
5160 phi1 = create_phi_node (make_ssa_name (var: m_limb_type),
5161 gimple_bb (g: stmt));
5162 phi2 = create_phi_node (make_ssa_name (integer_type_node),
5163 gimple_bb (g: stmt));
5164 for (unsigned i = 0; i < cnt; i++)
5165 {
5166 add_phi_arg (phi1, bqp[i].val, bqp[i].e, UNKNOWN_LOCATION);
5167 add_phi_arg (phi2, bqp[i].addend, bqp[i].e, UNKNOWN_LOCATION);
5168 }
5169 if (arg1 == NULL_TREE)
5170 {
5171 g = gimple_build_builtin_unreachable (m_loc);
5172 insert_before (g);
5173 }
5174 m_gsi = gsi_for_stmt (stmt);
5175 g = gimple_build_call (fndecl, 1, gimple_phi_result (gs: phi1));
5176 gimple_call_set_lhs (gs: g, lhs: make_ssa_name (integer_type_node));
5177 insert_before (g);
5178 if (arg1 == NULL_TREE)
5179 g = gimple_build_assign (lhs, PLUS_EXPR,
5180 gimple_phi_result (gs: phi2),
5181 gimple_call_lhs (gs: g));
5182 else
5183 {
5184 g = gimple_build_assign (make_ssa_name (integer_type_node),
5185 PLUS_EXPR, gimple_phi_result (gs: phi2),
5186 gimple_call_lhs (gs: g));
5187 insert_before (g);
5188 edge e1 = split_block (gimple_bb (g: stmt), g);
5189 edge e2 = make_edge (bb, e1->dest, EDGE_FALLTHRU);
5190 e2->probability = profile_probability::always ();
5191 set_immediate_dominator (CDI_DOMINATORS, e1->dest,
5192 get_immediate_dominator (CDI_DOMINATORS,
5193 e1->src));
5194 phi3 = create_phi_node (make_ssa_name (integer_type_node), e1->dest);
5195 add_phi_arg (phi3, gimple_assign_lhs (gs: g), e1, UNKNOWN_LOCATION);
5196 add_phi_arg (phi3, arg1, e2, UNKNOWN_LOCATION);
5197 m_gsi = gsi_for_stmt (stmt);
5198 g = gimple_build_assign (lhs, gimple_phi_result (gs: phi3));
5199 }
5200 gsi_replace (&m_gsi, g, true);
5201 break;
5202 case IFN_CLRSB:
5203 bb = gsi_bb (i: m_gsi);
5204 remove_edge (find_edge (bb, edge_bb));
5205 edge e;
5206 e = make_edge (bb, gimple_bb (g: stmt), EDGE_FALLTHRU);
5207 e->probability = profile_probability::always ();
5208 set_immediate_dominator (CDI_DOMINATORS, gimple_bb (g: stmt),
5209 get_immediate_dominator (CDI_DOMINATORS,
5210 edge_bb));
5211 phi1 = create_phi_node (make_ssa_name (var: m_limb_type),
5212 edge_bb);
5213 phi2 = create_phi_node (make_ssa_name (integer_type_node),
5214 edge_bb);
5215 phi3 = create_phi_node (make_ssa_name (integer_type_node),
5216 gimple_bb (g: stmt));
5217 for (unsigned i = 0; i < cnt; i++)
5218 {
5219 add_phi_arg (phi1, bqp[i].val, bqp[2 * i + 1].e, UNKNOWN_LOCATION);
5220 add_phi_arg (phi2, bqp[i].addend, bqp[2 * i + 1].e,
5221 UNKNOWN_LOCATION);
5222 tree a = bqp[i].addend;
5223 if (i && kind == bitint_prec_large)
5224 a = int_const_binop (PLUS_EXPR, a, integer_minus_one_node);
5225 if (i)
5226 add_phi_arg (phi3, a, bqp[2 * i].e, UNKNOWN_LOCATION);
5227 }
5228 add_phi_arg (phi3, build_int_cst (integer_type_node, prec - 1), e,
5229 UNKNOWN_LOCATION);
5230 m_gsi = gsi_after_labels (bb: edge_bb);
5231 g = gimple_build_call (fndecl, 1,
5232 add_cast (type: signed_type_for (m_limb_type),
5233 val: gimple_phi_result (gs: phi1)));
5234 gimple_call_set_lhs (gs: g, lhs: make_ssa_name (integer_type_node));
5235 insert_before (g);
5236 g = gimple_build_assign (make_ssa_name (integer_type_node),
5237 PLUS_EXPR, gimple_call_lhs (gs: g),
5238 gimple_phi_result (gs: phi2));
5239 insert_before (g);
5240 if (kind != bitint_prec_large)
5241 {
5242 g = gimple_build_assign (make_ssa_name (integer_type_node),
5243 PLUS_EXPR, gimple_assign_lhs (gs: g),
5244 integer_one_node);
5245 insert_before (g);
5246 }
5247 add_phi_arg (phi3, gimple_assign_lhs (gs: g),
5248 find_edge (edge_bb, gimple_bb (g: stmt)), UNKNOWN_LOCATION);
5249 m_gsi = gsi_for_stmt (stmt);
5250 g = gimple_build_assign (lhs, gimple_phi_result (gs: phi3));
5251 gsi_replace (&m_gsi, g, true);
5252 break;
5253 case IFN_PARITY:
5254 g = gimple_build_call (fndecl, 1, res);
5255 gimple_call_set_lhs (gs: g, lhs);
5256 gsi_replace (&m_gsi, g, true);
5257 break;
5258 case IFN_POPCOUNT:
5259 g = gimple_build_assign (lhs, res);
5260 gsi_replace (&m_gsi, g, true);
5261 break;
5262 default:
5263 gcc_unreachable ();
5264 }
5265}
5266
5267/* Lower a call statement with one or more large/huge _BitInt
5268 arguments or large/huge _BitInt return value. */
5269
5270void
5271bitint_large_huge::lower_call (tree obj, gimple *stmt)
5272{
5273 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
5274 unsigned int nargs = gimple_call_num_args (gs: stmt);
5275 if (gimple_call_internal_p (gs: stmt))
5276 switch (gimple_call_internal_fn (gs: stmt))
5277 {
5278 case IFN_ADD_OVERFLOW:
5279 case IFN_SUB_OVERFLOW:
5280 case IFN_UBSAN_CHECK_ADD:
5281 case IFN_UBSAN_CHECK_SUB:
5282 lower_addsub_overflow (obj, stmt);
5283 return;
5284 case IFN_MUL_OVERFLOW:
5285 case IFN_UBSAN_CHECK_MUL:
5286 lower_mul_overflow (obj, stmt);
5287 return;
5288 case IFN_CLZ:
5289 case IFN_CTZ:
5290 case IFN_CLRSB:
5291 case IFN_FFS:
5292 case IFN_PARITY:
5293 case IFN_POPCOUNT:
5294 lower_bit_query (stmt);
5295 return;
5296 default:
5297 break;
5298 }
5299 bool returns_twice = (gimple_call_flags (stmt) & ECF_RETURNS_TWICE) != 0;
5300 for (unsigned int i = 0; i < nargs; ++i)
5301 {
5302 tree arg = gimple_call_arg (gs: stmt, index: i);
5303 if (TREE_CODE (arg) != SSA_NAME
5304 || TREE_CODE (TREE_TYPE (arg)) != BITINT_TYPE
5305 || bitint_precision_kind (TREE_TYPE (arg)) <= bitint_prec_middle)
5306 continue;
5307 if (SSA_NAME_IS_DEFAULT_DEF (arg)
5308 && (!SSA_NAME_VAR (arg) || VAR_P (SSA_NAME_VAR (arg))))
5309 {
5310 tree var = create_tmp_reg (TREE_TYPE (arg));
5311 arg = get_or_create_ssa_default_def (cfun, var);
5312 }
5313 else
5314 {
5315 int p = var_to_partition (map: m_map, var: arg);
5316 tree v = m_vars[p];
5317 gcc_assert (v != NULL_TREE);
5318 if (!types_compatible_p (TREE_TYPE (arg), TREE_TYPE (v)))
5319 v = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (arg), v);
5320 arg = make_ssa_name (TREE_TYPE (arg));
5321 gimple *g = gimple_build_assign (arg, v);
5322 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
5323 if (returns_twice && bb_has_abnormal_pred (bb: gimple_bb (g: stmt)))
5324 {
5325 m_returns_twice_calls.safe_push (obj: stmt);
5326 returns_twice = false;
5327 }
5328 }
5329 gimple_call_set_arg (gs: stmt, index: i, arg);
5330 if (m_preserved == NULL)
5331 m_preserved = BITMAP_ALLOC (NULL);
5332 bitmap_set_bit (m_preserved, SSA_NAME_VERSION (arg));
5333 }
5334 tree lhs = gimple_call_lhs (gs: stmt);
5335 if (lhs
5336 && TREE_CODE (lhs) == SSA_NAME
5337 && TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
5338 && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large)
5339 {
5340 int p = var_to_partition (map: m_map, var: lhs);
5341 tree v = m_vars[p];
5342 gcc_assert (v != NULL_TREE);
5343 if (!types_compatible_p (TREE_TYPE (lhs), TREE_TYPE (v)))
5344 v = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), v);
5345 gimple_call_set_lhs (gs: stmt, lhs: v);
5346 SSA_NAME_DEF_STMT (lhs) = gimple_build_nop ();
5347 }
5348 update_stmt (s: stmt);
5349}
5350
5351/* Lower __asm STMT which involves large/huge _BitInt values. */
5352
5353void
5354bitint_large_huge::lower_asm (gimple *stmt)
5355{
5356 gasm *g = as_a <gasm *> (p: stmt);
5357 unsigned noutputs = gimple_asm_noutputs (asm_stmt: g);
5358 unsigned ninputs = gimple_asm_ninputs (asm_stmt: g);
5359
5360 for (unsigned i = 0; i < noutputs; ++i)
5361 {
5362 tree t = gimple_asm_output_op (asm_stmt: g, index: i);
5363 tree s = TREE_VALUE (t);
5364 if (TREE_CODE (s) == SSA_NAME
5365 && TREE_CODE (TREE_TYPE (s)) == BITINT_TYPE
5366 && bitint_precision_kind (TREE_TYPE (s)) >= bitint_prec_large)
5367 {
5368 int part = var_to_partition (map: m_map, var: s);
5369 gcc_assert (m_vars[part] != NULL_TREE);
5370 TREE_VALUE (t) = m_vars[part];
5371 }
5372 }
5373 for (unsigned i = 0; i < ninputs; ++i)
5374 {
5375 tree t = gimple_asm_input_op (asm_stmt: g, index: i);
5376 tree s = TREE_VALUE (t);
5377 if (TREE_CODE (s) == SSA_NAME
5378 && TREE_CODE (TREE_TYPE (s)) == BITINT_TYPE
5379 && bitint_precision_kind (TREE_TYPE (s)) >= bitint_prec_large)
5380 {
5381 if (SSA_NAME_IS_DEFAULT_DEF (s)
5382 && (!SSA_NAME_VAR (s) || VAR_P (SSA_NAME_VAR (s))))
5383 {
5384 TREE_VALUE (t) = create_tmp_var (TREE_TYPE (s), "bitint");
5385 mark_addressable (TREE_VALUE (t));
5386 }
5387 else
5388 {
5389 int part = var_to_partition (map: m_map, var: s);
5390 gcc_assert (m_vars[part] != NULL_TREE);
5391 TREE_VALUE (t) = m_vars[part];
5392 }
5393 }
5394 }
5395 update_stmt (s: stmt);
5396}
5397
5398/* Lower statement STMT which involves large/huge _BitInt values
5399 into code accessing individual limbs. */
5400
5401void
5402bitint_large_huge::lower_stmt (gimple *stmt)
5403{
5404 m_first = true;
5405 m_lhs = NULL_TREE;
5406 m_data.truncate (size: 0);
5407 m_data_cnt = 0;
5408 m_gsi = gsi_for_stmt (stmt);
5409 m_after_stmt = NULL;
5410 m_bb = NULL;
5411 m_init_gsi = m_gsi;
5412 gsi_prev (i: &m_init_gsi);
5413 m_preheader_bb = NULL;
5414 m_upwards_2limb = 0;
5415 m_upwards = false;
5416 m_var_msb = false;
5417 m_cast_conditional = false;
5418 m_bitfld_load = 0;
5419 m_loc = gimple_location (g: stmt);
5420 if (is_gimple_call (gs: stmt))
5421 {
5422 lower_call (NULL_TREE, stmt);
5423 return;
5424 }
5425 if (gimple_code (g: stmt) == GIMPLE_ASM)
5426 {
5427 lower_asm (stmt);
5428 return;
5429 }
5430 tree lhs = NULL_TREE, cmp_op1 = NULL_TREE, cmp_op2 = NULL_TREE;
5431 tree_code cmp_code = comparison_op (stmt, pop1: &cmp_op1, pop2: &cmp_op2);
5432 bool eq_p = (cmp_code == EQ_EXPR || cmp_code == NE_EXPR);
5433 bool mergeable_cast_p = false;
5434 bool final_cast_p = false;
5435 if (gimple_assign_cast_p (s: stmt))
5436 {
5437 lhs = gimple_assign_lhs (gs: stmt);
5438 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
5439 if (TREE_CODE (rhs1) == VIEW_CONVERT_EXPR)
5440 rhs1 = TREE_OPERAND (rhs1, 0);
5441 if (TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
5442 && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large
5443 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)))
5444 mergeable_cast_p = true;
5445 else if (TREE_CODE (TREE_TYPE (rhs1)) == BITINT_TYPE
5446 && bitint_precision_kind (TREE_TYPE (rhs1)) >= bitint_prec_large
5447 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5448 || POINTER_TYPE_P (TREE_TYPE (lhs))
5449 || gimple_assign_rhs_code (gs: stmt) == VIEW_CONVERT_EXPR))
5450 {
5451 final_cast_p = true;
5452 if (((TREE_CODE (TREE_TYPE (lhs)) == INTEGER_TYPE
5453 && TYPE_PRECISION (TREE_TYPE (lhs)) > MAX_FIXED_MODE_SIZE)
5454 || (!INTEGRAL_TYPE_P (TREE_TYPE (lhs))
5455 && !POINTER_TYPE_P (TREE_TYPE (lhs))))
5456 && gimple_assign_rhs_code (gs: stmt) == VIEW_CONVERT_EXPR)
5457 {
5458 /* Handle VIEW_CONVERT_EXPRs to not generally supported
5459 huge INTEGER_TYPEs like uint256_t or uint512_t. These
5460 are usually emitted from memcpy folding and backends
5461 support moves with them but that is usually it.
5462 Similarly handle VCEs to vector/complex types etc. */
5463 gcc_assert (TREE_CODE (rhs1) == SSA_NAME);
5464 if (SSA_NAME_IS_DEFAULT_DEF (rhs1)
5465 && (!SSA_NAME_VAR (rhs1) || VAR_P (SSA_NAME_VAR (rhs1))))
5466 {
5467 tree var = create_tmp_reg (TREE_TYPE (lhs));
5468 rhs1 = get_or_create_ssa_default_def (cfun, var);
5469 gimple_assign_set_rhs1 (gs: stmt, rhs: rhs1);
5470 gimple_assign_set_rhs_code (s: stmt, code: SSA_NAME);
5471 }
5472 else if (m_names == NULL
5473 || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1)))
5474 {
5475 gimple *g = SSA_NAME_DEF_STMT (rhs1);
5476 gcc_assert (gimple_assign_load_p (g));
5477 tree mem = gimple_assign_rhs1 (gs: g);
5478 tree ltype = TREE_TYPE (lhs);
5479 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (mem));
5480 if (as != TYPE_ADDR_SPACE (ltype))
5481 ltype
5482 = build_qualified_type (ltype,
5483 TYPE_QUALS (ltype)
5484 | ENCODE_QUAL_ADDR_SPACE (as));
5485 rhs1 = build1 (VIEW_CONVERT_EXPR, ltype, unshare_expr (mem));
5486 gimple_assign_set_rhs1 (gs: stmt, rhs: rhs1);
5487 }
5488 else
5489 {
5490 int part = var_to_partition (map: m_map, var: rhs1);
5491 gcc_assert (m_vars[part] != NULL_TREE);
5492 rhs1 = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs),
5493 m_vars[part]);
5494 gimple_assign_set_rhs1 (gs: stmt, rhs: rhs1);
5495 }
5496 update_stmt (s: stmt);
5497 return;
5498 }
5499 if (TREE_CODE (rhs1) == SSA_NAME
5500 && (m_names == NULL
5501 || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1))))
5502 {
5503 gimple *g = SSA_NAME_DEF_STMT (rhs1);
5504 if (is_gimple_assign (gs: g)
5505 && gimple_assign_rhs_code (gs: g) == IMAGPART_EXPR)
5506 {
5507 tree rhs2 = TREE_OPERAND (gimple_assign_rhs1 (g), 0);
5508 if (TREE_CODE (rhs2) == SSA_NAME
5509 && (m_names == NULL
5510 || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs2))))
5511 {
5512 g = SSA_NAME_DEF_STMT (rhs2);
5513 int ovf = optimizable_arith_overflow (stmt: g);
5514 if (ovf == 2)
5515 /* If .{ADD,SUB,MUL}_OVERFLOW has both REALPART_EXPR
5516 and IMAGPART_EXPR uses, where the latter is cast to
5517 non-_BitInt, it will be optimized when handling
5518 the REALPART_EXPR. */
5519 return;
5520 if (ovf == 1)
5521 {
5522 lower_call (NULL_TREE, stmt: g);
5523 return;
5524 }
5525 }
5526 }
5527 }
5528 }
5529 else if (TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
5530 && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large
5531 && !INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
5532 && !POINTER_TYPE_P (TREE_TYPE (rhs1))
5533 && gimple_assign_rhs_code (gs: stmt) == VIEW_CONVERT_EXPR)
5534 {
5535 int part = var_to_partition (map: m_map, var: lhs);
5536 gcc_assert (m_vars[part] != NULL_TREE);
5537 lhs = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (rhs1), m_vars[part]);
5538 insert_before (g: gimple_build_assign (lhs, rhs1));
5539 return;
5540 }
5541 }
5542 if (gimple_store_p (gs: stmt))
5543 {
5544 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
5545 if (TREE_CODE (rhs1) == SSA_NAME
5546 && (m_names == NULL
5547 || !bitmap_bit_p (m_names, SSA_NAME_VERSION (rhs1))))
5548 {
5549 gimple *g = SSA_NAME_DEF_STMT (rhs1);
5550 m_loc = gimple_location (g);
5551 lhs = gimple_assign_lhs (gs: stmt);
5552 if (is_gimple_assign (gs: g) && !mergeable_op (stmt: g))
5553 switch (gimple_assign_rhs_code (gs: g))
5554 {
5555 case LSHIFT_EXPR:
5556 case RSHIFT_EXPR:
5557 lower_shift_stmt (obj: lhs, stmt: g);
5558 handled:
5559 m_gsi = gsi_for_stmt (stmt);
5560 unlink_stmt_vdef (stmt);
5561 release_ssa_name (name: gimple_vdef (g: stmt));
5562 gsi_remove (&m_gsi, true);
5563 return;
5564 case MULT_EXPR:
5565 case TRUNC_DIV_EXPR:
5566 case TRUNC_MOD_EXPR:
5567 lower_muldiv_stmt (obj: lhs, stmt: g);
5568 goto handled;
5569 case FIX_TRUNC_EXPR:
5570 lower_float_conv_stmt (obj: lhs, stmt: g);
5571 goto handled;
5572 case REALPART_EXPR:
5573 case IMAGPART_EXPR:
5574 lower_cplxpart_stmt (obj: lhs, stmt: g);
5575 goto handled;
5576 case VIEW_CONVERT_EXPR:
5577 {
5578 tree rhs1 = gimple_assign_rhs1 (gs: g);
5579 rhs1 = TREE_OPERAND (rhs1, 0);
5580 if (!INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
5581 && !POINTER_TYPE_P (TREE_TYPE (rhs1)))
5582 {
5583 tree ltype = TREE_TYPE (rhs1);
5584 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (lhs));
5585 ltype
5586 = build_qualified_type (ltype,
5587 TYPE_QUALS (TREE_TYPE (lhs))
5588 | ENCODE_QUAL_ADDR_SPACE (as));
5589 lhs = build1 (VIEW_CONVERT_EXPR, ltype, lhs);
5590 gimple_assign_set_lhs (gs: stmt, lhs);
5591 gimple_assign_set_rhs1 (gs: stmt, rhs: rhs1);
5592 gimple_assign_set_rhs_code (s: stmt, TREE_CODE (rhs1));
5593 update_stmt (s: stmt);
5594 return;
5595 }
5596 }
5597 break;
5598 default:
5599 break;
5600 }
5601 else if (optimizable_arith_overflow (stmt: g) == 3)
5602 {
5603 lower_call (obj: lhs, stmt: g);
5604 goto handled;
5605 }
5606 m_loc = gimple_location (g: stmt);
5607 }
5608 }
5609 if (mergeable_op (stmt)
5610 || gimple_store_p (gs: stmt)
5611 || gimple_assign_load_p (stmt)
5612 || eq_p
5613 || mergeable_cast_p)
5614 {
5615 lhs = lower_mergeable_stmt (stmt, cmp_code, cmp_op1, cmp_op2);
5616 if (!eq_p)
5617 return;
5618 }
5619 else if (cmp_code != ERROR_MARK)
5620 lhs = lower_comparison_stmt (stmt, cmp_code, cmp_op1, cmp_op2);
5621 if (cmp_code != ERROR_MARK)
5622 {
5623 if (gimple_code (g: stmt) == GIMPLE_COND)
5624 {
5625 gcond *cstmt = as_a <gcond *> (p: stmt);
5626 gimple_cond_set_lhs (gs: cstmt, lhs);
5627 gimple_cond_set_rhs (gs: cstmt, boolean_false_node);
5628 gimple_cond_set_code (gs: cstmt, code: cmp_code);
5629 update_stmt (s: stmt);
5630 return;
5631 }
5632 if (gimple_assign_rhs_code (gs: stmt) == COND_EXPR)
5633 {
5634 tree cond = build2 (cmp_code, boolean_type_node, lhs,
5635 boolean_false_node);
5636 gimple_assign_set_rhs1 (gs: stmt, rhs: cond);
5637 lhs = gimple_assign_lhs (gs: stmt);
5638 gcc_assert (TREE_CODE (TREE_TYPE (lhs)) != BITINT_TYPE
5639 || (bitint_precision_kind (TREE_TYPE (lhs))
5640 <= bitint_prec_middle));
5641 update_stmt (s: stmt);
5642 return;
5643 }
5644 gimple_assign_set_rhs1 (gs: stmt, rhs: lhs);
5645 gimple_assign_set_rhs2 (gs: stmt, boolean_false_node);
5646 gimple_assign_set_rhs_code (s: stmt, code: cmp_code);
5647 update_stmt (s: stmt);
5648 return;
5649 }
5650 if (final_cast_p)
5651 {
5652 tree lhs_type = TREE_TYPE (lhs);
5653 /* Add support for 3 or more limbs filled in from normal integral
5654 type if this assert fails. If no target chooses limb mode smaller
5655 than half of largest supported normal integral type, this will not
5656 be needed. */
5657 gcc_assert (TYPE_PRECISION (lhs_type) <= 2 * limb_prec);
5658 gimple *g;
5659 if ((TREE_CODE (lhs_type) == BITINT_TYPE
5660 && bitint_precision_kind (type: lhs_type) == bitint_prec_middle)
5661 || POINTER_TYPE_P (lhs_type))
5662 lhs_type = build_nonstandard_integer_type (TYPE_PRECISION (lhs_type),
5663 TYPE_UNSIGNED (lhs_type));
5664 m_data_cnt = 0;
5665 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
5666 tree r1 = handle_operand (op: rhs1, size_int (0));
5667 if (!useless_type_conversion_p (lhs_type, TREE_TYPE (r1)))
5668 r1 = add_cast (type: lhs_type, val: r1);
5669 if (TYPE_PRECISION (lhs_type) > limb_prec)
5670 {
5671 m_data_cnt = 0;
5672 m_first = false;
5673 tree r2 = handle_operand (op: rhs1, size_int (1));
5674 r2 = add_cast (type: lhs_type, val: r2);
5675 g = gimple_build_assign (make_ssa_name (var: lhs_type), LSHIFT_EXPR, r2,
5676 build_int_cst (unsigned_type_node,
5677 limb_prec));
5678 insert_before (g);
5679 g = gimple_build_assign (make_ssa_name (var: lhs_type), BIT_IOR_EXPR, r1,
5680 gimple_assign_lhs (gs: g));
5681 insert_before (g);
5682 r1 = gimple_assign_lhs (gs: g);
5683 }
5684 if (lhs_type != TREE_TYPE (lhs))
5685 g = gimple_build_assign (lhs, NOP_EXPR, r1);
5686 else
5687 g = gimple_build_assign (lhs, r1);
5688 gsi_replace (&m_gsi, g, true);
5689 return;
5690 }
5691 if (is_gimple_assign (gs: stmt))
5692 switch (gimple_assign_rhs_code (gs: stmt))
5693 {
5694 case LSHIFT_EXPR:
5695 case RSHIFT_EXPR:
5696 lower_shift_stmt (NULL_TREE, stmt);
5697 return;
5698 case MULT_EXPR:
5699 case TRUNC_DIV_EXPR:
5700 case TRUNC_MOD_EXPR:
5701 lower_muldiv_stmt (NULL_TREE, stmt);
5702 return;
5703 case FIX_TRUNC_EXPR:
5704 case FLOAT_EXPR:
5705 lower_float_conv_stmt (NULL_TREE, stmt);
5706 return;
5707 case REALPART_EXPR:
5708 case IMAGPART_EXPR:
5709 lower_cplxpart_stmt (NULL_TREE, stmt);
5710 return;
5711 case COMPLEX_EXPR:
5712 lower_complexexpr_stmt (stmt);
5713 return;
5714 default:
5715 break;
5716 }
5717 gcc_unreachable ();
5718}
5719
5720/* Helper for walk_non_aliased_vuses. Determine if we arrived at
5721 the desired memory state. */
5722
5723void *
5724vuse_eq (ao_ref *, tree vuse1, void *data)
5725{
5726 tree vuse2 = (tree) data;
5727 if (vuse1 == vuse2)
5728 return data;
5729
5730 return NULL;
5731}
5732
5733/* Return true if STMT uses a library function and needs to take
5734 address of its inputs. We need to avoid bit-fields in those
5735 cases. Similarly, we need to avoid overlap between destination
5736 and source limb arrays. */
5737
5738bool
5739stmt_needs_operand_addr (gimple *stmt)
5740{
5741 if (is_gimple_assign (gs: stmt))
5742 switch (gimple_assign_rhs_code (gs: stmt))
5743 {
5744 case MULT_EXPR:
5745 case TRUNC_DIV_EXPR:
5746 case TRUNC_MOD_EXPR:
5747 case FLOAT_EXPR:
5748 return true;
5749 default:
5750 break;
5751 }
5752 else if (gimple_call_internal_p (gs: stmt, fn: IFN_MUL_OVERFLOW)
5753 || gimple_call_internal_p (gs: stmt, fn: IFN_UBSAN_CHECK_MUL))
5754 return true;
5755 return false;
5756}
5757
5758/* Dominator walker used to discover which large/huge _BitInt
5759 loads could be sunk into all their uses. */
5760
5761class bitint_dom_walker : public dom_walker
5762{
5763public:
5764 bitint_dom_walker (bitmap names, bitmap loads)
5765 : dom_walker (CDI_DOMINATORS), m_names (names), m_loads (loads) {}
5766
5767 edge before_dom_children (basic_block) final override;
5768
5769private:
5770 bitmap m_names, m_loads;
5771};
5772
5773edge
5774bitint_dom_walker::before_dom_children (basic_block bb)
5775{
5776 gphi *phi = get_virtual_phi (bb);
5777 tree vop;
5778 if (phi)
5779 vop = gimple_phi_result (gs: phi);
5780 else if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
5781 vop = NULL_TREE;
5782 else
5783 vop = (tree) get_immediate_dominator (CDI_DOMINATORS, bb)->aux;
5784
5785 auto_vec<tree, 16> worklist;
5786 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
5787 !gsi_end_p (i: gsi); gsi_next (i: &gsi))
5788 {
5789 gimple *stmt = gsi_stmt (i: gsi);
5790 if (is_gimple_debug (gs: stmt))
5791 continue;
5792
5793 if (!vop && gimple_vuse (g: stmt))
5794 vop = gimple_vuse (g: stmt);
5795
5796 tree cvop = vop;
5797 if (gimple_vdef (g: stmt))
5798 vop = gimple_vdef (g: stmt);
5799
5800 tree lhs = gimple_get_lhs (stmt);
5801 if (lhs
5802 && TREE_CODE (lhs) == SSA_NAME
5803 && TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
5804 && bitint_precision_kind (TREE_TYPE (lhs)) >= bitint_prec_large
5805 && !bitmap_bit_p (m_names, SSA_NAME_VERSION (lhs)))
5806 /* If lhs of stmt is large/huge _BitInt SSA_NAME not in m_names,
5807 it means it will be handled in a loop or straight line code
5808 at the location of its (ultimate) immediate use, so for
5809 vop checking purposes check these only at the ultimate
5810 immediate use. */
5811 continue;
5812
5813 ssa_op_iter oi;
5814 use_operand_p use_p;
5815 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, oi, SSA_OP_USE)
5816 {
5817 tree s = USE_FROM_PTR (use_p);
5818 if (TREE_CODE (TREE_TYPE (s)) == BITINT_TYPE
5819 && bitint_precision_kind (TREE_TYPE (s)) >= bitint_prec_large)
5820 worklist.safe_push (obj: s);
5821 }
5822
5823 bool needs_operand_addr = stmt_needs_operand_addr (stmt);
5824 while (worklist.length () > 0)
5825 {
5826 tree s = worklist.pop ();
5827
5828 if (!bitmap_bit_p (m_names, SSA_NAME_VERSION (s)))
5829 {
5830 gimple *g = SSA_NAME_DEF_STMT (s);
5831 needs_operand_addr |= stmt_needs_operand_addr (stmt: g);
5832 FOR_EACH_SSA_USE_OPERAND (use_p, g, oi, SSA_OP_USE)
5833 {
5834 tree s2 = USE_FROM_PTR (use_p);
5835 if (TREE_CODE (TREE_TYPE (s2)) == BITINT_TYPE
5836 && (bitint_precision_kind (TREE_TYPE (s2))
5837 >= bitint_prec_large))
5838 worklist.safe_push (obj: s2);
5839 }
5840 continue;
5841 }
5842 if (!SSA_NAME_OCCURS_IN_ABNORMAL_PHI (s)
5843 && gimple_assign_cast_p (SSA_NAME_DEF_STMT (s)))
5844 {
5845 tree rhs = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (s));
5846 if (TREE_CODE (rhs) == SSA_NAME
5847 && bitmap_bit_p (m_loads, SSA_NAME_VERSION (rhs)))
5848 s = rhs;
5849 else
5850 continue;
5851 }
5852 else if (!bitmap_bit_p (m_loads, SSA_NAME_VERSION (s)))
5853 continue;
5854
5855 gimple *g = SSA_NAME_DEF_STMT (s);
5856 tree rhs1 = gimple_assign_rhs1 (gs: g);
5857 if (needs_operand_addr
5858 && TREE_CODE (rhs1) == COMPONENT_REF
5859 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (rhs1, 1)))
5860 {
5861 tree fld = TREE_OPERAND (rhs1, 1);
5862 /* For little-endian, we can allow as inputs bit-fields
5863 which start at a limb boundary. */
5864 if (DECL_OFFSET_ALIGN (fld) >= TYPE_ALIGN (TREE_TYPE (rhs1))
5865 && tree_fits_uhwi_p (DECL_FIELD_BIT_OFFSET (fld))
5866 && (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld))
5867 % limb_prec) == 0)
5868 ;
5869 else
5870 {
5871 bitmap_clear_bit (m_loads, SSA_NAME_VERSION (s));
5872 continue;
5873 }
5874 }
5875
5876 ao_ref ref;
5877 ao_ref_init (&ref, rhs1);
5878 tree lvop = gimple_vuse (g);
5879 unsigned limit = 64;
5880 tree vuse = cvop;
5881 if (vop != cvop
5882 && is_gimple_assign (gs: stmt)
5883 && gimple_store_p (gs: stmt)
5884 && (needs_operand_addr
5885 || !operand_equal_p (lhs, gimple_assign_rhs1 (gs: g), flags: 0)))
5886 vuse = vop;
5887 if (vuse != lvop
5888 && walk_non_aliased_vuses (&ref, vuse, false, vuse_eq,
5889 NULL, NULL, limit, lvop) == NULL)
5890 bitmap_clear_bit (m_loads, SSA_NAME_VERSION (s));
5891 }
5892 }
5893
5894 bb->aux = (void *) vop;
5895 return NULL;
5896}
5897
5898}
5899
5900/* Replacement for normal processing of STMT in tree-ssa-coalesce.cc
5901 build_ssa_conflict_graph.
5902 The differences are:
5903 1) don't process assignments with large/huge _BitInt lhs not in NAMES
5904 2) for large/huge _BitInt multiplication/division/modulo process def
5905 only after processing uses rather than before to make uses conflict
5906 with the definition
5907 3) for large/huge _BitInt uses not in NAMES mark the uses of their
5908 SSA_NAME_DEF_STMT (recursively), because those uses will be sunk into
5909 the final statement. */
5910
5911void
5912build_bitint_stmt_ssa_conflicts (gimple *stmt, live_track *live,
5913 ssa_conflicts *graph, bitmap names,
5914 void (*def) (live_track *, tree,
5915 ssa_conflicts *),
5916 void (*use) (live_track *, tree))
5917{
5918 bool muldiv_p = false;
5919 tree lhs = NULL_TREE;
5920 if (is_gimple_assign (gs: stmt))
5921 {
5922 lhs = gimple_assign_lhs (gs: stmt);
5923 if (TREE_CODE (lhs) == SSA_NAME)
5924 {
5925 tree type = TREE_TYPE (lhs);
5926 if (TREE_CODE (type) == COMPLEX_TYPE)
5927 type = TREE_TYPE (type);
5928 if (TREE_CODE (type) == BITINT_TYPE
5929 && bitint_precision_kind (type) >= bitint_prec_large)
5930 {
5931 if (!bitmap_bit_p (names, SSA_NAME_VERSION (lhs)))
5932 return;
5933 switch (gimple_assign_rhs_code (gs: stmt))
5934 {
5935 case MULT_EXPR:
5936 case TRUNC_DIV_EXPR:
5937 case TRUNC_MOD_EXPR:
5938 muldiv_p = true;
5939 default:
5940 break;
5941 }
5942 }
5943 }
5944 }
5945
5946 ssa_op_iter iter;
5947 tree var;
5948 if (!muldiv_p)
5949 {
5950 /* For stmts with more than one SSA_NAME definition pretend all the
5951 SSA_NAME outputs but the first one are live at this point, so
5952 that conflicts are added in between all those even when they are
5953 actually not really live after the asm, because expansion might
5954 copy those into pseudos after the asm and if multiple outputs
5955 share the same partition, it might overwrite those that should
5956 be live. E.g.
5957 asm volatile (".." : "=r" (a) : "=r" (b) : "0" (a), "1" (a));
5958 return a;
5959 See PR70593. */
5960 bool first = true;
5961 FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_DEF)
5962 if (first)
5963 first = false;
5964 else
5965 use (live, var);
5966
5967 FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_DEF)
5968 def (live, var, graph);
5969 }
5970
5971 auto_vec<tree, 16> worklist;
5972 FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_USE)
5973 {
5974 tree type = TREE_TYPE (var);
5975 if (TREE_CODE (type) == COMPLEX_TYPE)
5976 type = TREE_TYPE (type);
5977 if (TREE_CODE (type) == BITINT_TYPE
5978 && bitint_precision_kind (type) >= bitint_prec_large)
5979 {
5980 if (bitmap_bit_p (names, SSA_NAME_VERSION (var)))
5981 use (live, var);
5982 else
5983 worklist.safe_push (obj: var);
5984 }
5985 }
5986
5987 while (worklist.length () > 0)
5988 {
5989 tree s = worklist.pop ();
5990 FOR_EACH_SSA_TREE_OPERAND (var, SSA_NAME_DEF_STMT (s), iter, SSA_OP_USE)
5991 {
5992 tree type = TREE_TYPE (var);
5993 if (TREE_CODE (type) == COMPLEX_TYPE)
5994 type = TREE_TYPE (type);
5995 if (TREE_CODE (type) == BITINT_TYPE
5996 && bitint_precision_kind (type) >= bitint_prec_large)
5997 {
5998 if (bitmap_bit_p (names, SSA_NAME_VERSION (var)))
5999 use (live, var);
6000 else
6001 worklist.safe_push (obj: var);
6002 }
6003 }
6004 }
6005
6006 if (muldiv_p)
6007 def (live, lhs, graph);
6008}
6009
6010/* If STMT is .{ADD,SUB,MUL}_OVERFLOW with INTEGER_CST arguments,
6011 return the largest bitint_prec_kind of them, otherwise return
6012 bitint_prec_small. */
6013
6014static bitint_prec_kind
6015arith_overflow_arg_kind (gimple *stmt)
6016{
6017 bitint_prec_kind ret = bitint_prec_small;
6018 if (is_gimple_call (gs: stmt) && gimple_call_internal_p (gs: stmt))
6019 switch (gimple_call_internal_fn (gs: stmt))
6020 {
6021 case IFN_ADD_OVERFLOW:
6022 case IFN_SUB_OVERFLOW:
6023 case IFN_MUL_OVERFLOW:
6024 for (int i = 0; i < 2; ++i)
6025 {
6026 tree a = gimple_call_arg (gs: stmt, index: i);
6027 if (TREE_CODE (a) == INTEGER_CST
6028 && TREE_CODE (TREE_TYPE (a)) == BITINT_TYPE)
6029 {
6030 bitint_prec_kind kind = bitint_precision_kind (TREE_TYPE (a));
6031 ret = MAX (ret, kind);
6032 }
6033 }
6034 break;
6035 default:
6036 break;
6037 }
6038 return ret;
6039}
6040
6041/* Entry point for _BitInt(N) operation lowering during optimization. */
6042
6043static unsigned int
6044gimple_lower_bitint (void)
6045{
6046 small_max_prec = mid_min_prec = large_min_prec = huge_min_prec = 0;
6047 limb_prec = 0;
6048
6049 unsigned int i;
6050 for (i = 0; i < num_ssa_names; ++i)
6051 {
6052 tree s = ssa_name (i);
6053 if (s == NULL)
6054 continue;
6055 tree type = TREE_TYPE (s);
6056 if (TREE_CODE (type) == COMPLEX_TYPE)
6057 {
6058 if (arith_overflow_arg_kind (SSA_NAME_DEF_STMT (s))
6059 != bitint_prec_small)
6060 break;
6061 type = TREE_TYPE (type);
6062 }
6063 if (TREE_CODE (type) == BITINT_TYPE
6064 && bitint_precision_kind (type) != bitint_prec_small)
6065 break;
6066 /* We need to also rewrite stores of large/huge _BitInt INTEGER_CSTs
6067 into memory. Such functions could have no large/huge SSA_NAMEs. */
6068 if (SSA_NAME_IS_VIRTUAL_OPERAND (s))
6069 {
6070 gimple *g = SSA_NAME_DEF_STMT (s);
6071 if (is_gimple_assign (gs: g) && gimple_store_p (gs: g))
6072 {
6073 tree t = gimple_assign_rhs1 (gs: g);
6074 if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE
6075 && (bitint_precision_kind (TREE_TYPE (t))
6076 >= bitint_prec_large))
6077 break;
6078 }
6079 }
6080 /* Similarly, e.g. with -frounding-math casts from _BitInt INTEGER_CSTs
6081 to floating point types need to be rewritten. */
6082 else if (SCALAR_FLOAT_TYPE_P (type))
6083 {
6084 gimple *g = SSA_NAME_DEF_STMT (s);
6085 if (is_gimple_assign (gs: g) && gimple_assign_rhs_code (gs: g) == FLOAT_EXPR)
6086 {
6087 tree t = gimple_assign_rhs1 (gs: g);
6088 if (TREE_CODE (t) == INTEGER_CST
6089 && TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE
6090 && (bitint_precision_kind (TREE_TYPE (t))
6091 != bitint_prec_small))
6092 break;
6093 }
6094 }
6095 }
6096 if (i == num_ssa_names)
6097 return 0;
6098
6099 basic_block bb;
6100 auto_vec<gimple *, 4> switch_statements;
6101 FOR_EACH_BB_FN (bb, cfun)
6102 {
6103 if (gswitch *swtch = safe_dyn_cast <gswitch *> (p: *gsi_last_bb (bb)))
6104 {
6105 tree idx = gimple_switch_index (gs: swtch);
6106 if (TREE_CODE (TREE_TYPE (idx)) != BITINT_TYPE
6107 || bitint_precision_kind (TREE_TYPE (idx)) < bitint_prec_large)
6108 continue;
6109
6110 if (optimize)
6111 group_case_labels_stmt (swtch);
6112 if (gimple_switch_num_labels (gs: swtch) == 1)
6113 {
6114 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
6115 gimple_stmt_iterator gsi = gsi_for_stmt (swtch);
6116 gsi_remove (&gsi, true);
6117 }
6118 else
6119 switch_statements.safe_push (obj: swtch);
6120 }
6121 }
6122
6123 if (!switch_statements.is_empty ())
6124 {
6125 bool expanded = false;
6126 gimple *stmt;
6127 unsigned int j;
6128 i = 0;
6129 FOR_EACH_VEC_ELT (switch_statements, j, stmt)
6130 {
6131 gswitch *swtch = as_a<gswitch *> (p: stmt);
6132 tree_switch_conversion::switch_decision_tree dt (swtch);
6133 expanded |= dt.analyze_switch_statement ();
6134 }
6135
6136 if (expanded)
6137 {
6138 free_dominance_info (CDI_DOMINATORS);
6139 free_dominance_info (CDI_POST_DOMINATORS);
6140 mark_virtual_operands_for_renaming (cfun);
6141 cleanup_tree_cfg (TODO_update_ssa);
6142 }
6143 }
6144
6145 struct bitint_large_huge large_huge;
6146 bool has_large_huge_parm_result = false;
6147 bool has_large_huge = false;
6148 unsigned int ret = 0, first_large_huge = ~0U;
6149 bool edge_insertions = false;
6150 for (; i < num_ssa_names; ++i)
6151 {
6152 tree s = ssa_name (i);
6153 if (s == NULL)
6154 continue;
6155 tree type = TREE_TYPE (s);
6156 if (TREE_CODE (type) == COMPLEX_TYPE)
6157 {
6158 if (arith_overflow_arg_kind (SSA_NAME_DEF_STMT (s))
6159 >= bitint_prec_large)
6160 has_large_huge = true;
6161 type = TREE_TYPE (type);
6162 }
6163 if (TREE_CODE (type) == BITINT_TYPE
6164 && bitint_precision_kind (type) >= bitint_prec_large)
6165 {
6166 if (first_large_huge == ~0U)
6167 first_large_huge = i;
6168 gimple *stmt = SSA_NAME_DEF_STMT (s), *g;
6169 gimple_stmt_iterator gsi;
6170 tree_code rhs_code;
6171 /* Unoptimize certain constructs to simpler alternatives to
6172 avoid having to lower all of them. */
6173 if (is_gimple_assign (gs: stmt) && gimple_bb (g: stmt))
6174 switch (rhs_code = gimple_assign_rhs_code (gs: stmt))
6175 {
6176 default:
6177 break;
6178 case MULT_EXPR:
6179 case TRUNC_DIV_EXPR:
6180 case TRUNC_MOD_EXPR:
6181 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (s))
6182 {
6183 location_t loc = gimple_location (g: stmt);
6184 gsi = gsi_for_stmt (stmt);
6185 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
6186 tree rhs2 = gimple_assign_rhs2 (gs: stmt);
6187 /* For multiplication and division with (ab)
6188 lhs and one or both operands force the operands
6189 into new SSA_NAMEs to avoid coalescing failures. */
6190 if (TREE_CODE (rhs1) == SSA_NAME
6191 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1))
6192 {
6193 first_large_huge = 0;
6194 tree t = make_ssa_name (TREE_TYPE (rhs1));
6195 g = gimple_build_assign (t, SSA_NAME, rhs1);
6196 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
6197 gimple_set_location (g, location: loc);
6198 gimple_assign_set_rhs1 (gs: stmt, rhs: t);
6199 if (rhs1 == rhs2)
6200 {
6201 gimple_assign_set_rhs2 (gs: stmt, rhs: t);
6202 rhs2 = t;
6203 }
6204 update_stmt (s: stmt);
6205 }
6206 if (TREE_CODE (rhs2) == SSA_NAME
6207 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs2))
6208 {
6209 first_large_huge = 0;
6210 tree t = make_ssa_name (TREE_TYPE (rhs2));
6211 g = gimple_build_assign (t, SSA_NAME, rhs2);
6212 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
6213 gimple_set_location (g, location: loc);
6214 gimple_assign_set_rhs2 (gs: stmt, rhs: t);
6215 update_stmt (s: stmt);
6216 }
6217 }
6218 break;
6219 case LROTATE_EXPR:
6220 case RROTATE_EXPR:
6221 {
6222 first_large_huge = 0;
6223 location_t loc = gimple_location (g: stmt);
6224 gsi = gsi_for_stmt (stmt);
6225 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
6226 tree type = TREE_TYPE (rhs1);
6227 tree n = gimple_assign_rhs2 (gs: stmt), m;
6228 tree p = build_int_cst (TREE_TYPE (n),
6229 TYPE_PRECISION (type));
6230 if (TREE_CODE (n) == INTEGER_CST)
6231 m = fold_build2 (MINUS_EXPR, TREE_TYPE (n), p, n);
6232 else
6233 {
6234 m = make_ssa_name (TREE_TYPE (n));
6235 g = gimple_build_assign (m, MINUS_EXPR, p, n);
6236 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
6237 gimple_set_location (g, location: loc);
6238 }
6239 if (!TYPE_UNSIGNED (type))
6240 {
6241 tree utype = build_bitint_type (TYPE_PRECISION (type),
6242 1);
6243 if (TREE_CODE (rhs1) == INTEGER_CST)
6244 rhs1 = fold_convert (utype, rhs1);
6245 else
6246 {
6247 tree t = make_ssa_name (var: type);
6248 g = gimple_build_assign (t, NOP_EXPR, rhs1);
6249 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
6250 gimple_set_location (g, location: loc);
6251 }
6252 }
6253 g = gimple_build_assign (make_ssa_name (TREE_TYPE (rhs1)),
6254 rhs_code == LROTATE_EXPR
6255 ? LSHIFT_EXPR : RSHIFT_EXPR,
6256 rhs1, n);
6257 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
6258 gimple_set_location (g, location: loc);
6259 tree op1 = gimple_assign_lhs (gs: g);
6260 g = gimple_build_assign (make_ssa_name (TREE_TYPE (rhs1)),
6261 rhs_code == LROTATE_EXPR
6262 ? RSHIFT_EXPR : LSHIFT_EXPR,
6263 rhs1, m);
6264 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
6265 gimple_set_location (g, location: loc);
6266 tree op2 = gimple_assign_lhs (gs: g);
6267 tree lhs = gimple_assign_lhs (gs: stmt);
6268 if (!TYPE_UNSIGNED (type))
6269 {
6270 g = gimple_build_assign (make_ssa_name (TREE_TYPE (op1)),
6271 BIT_IOR_EXPR, op1, op2);
6272 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
6273 gimple_set_location (g, location: loc);
6274 g = gimple_build_assign (lhs, NOP_EXPR,
6275 gimple_assign_lhs (gs: g));
6276 }
6277 else
6278 g = gimple_build_assign (lhs, BIT_IOR_EXPR, op1, op2);
6279 gsi_replace (&gsi, g, true);
6280 gimple_set_location (g, location: loc);
6281 }
6282 break;
6283 case ABS_EXPR:
6284 case ABSU_EXPR:
6285 case MIN_EXPR:
6286 case MAX_EXPR:
6287 case COND_EXPR:
6288 first_large_huge = 0;
6289 gsi = gsi_for_stmt (stmt);
6290 tree lhs = gimple_assign_lhs (gs: stmt);
6291 tree rhs1 = gimple_assign_rhs1 (gs: stmt), rhs2 = NULL_TREE;
6292 location_t loc = gimple_location (g: stmt);
6293 if (rhs_code == ABS_EXPR)
6294 g = gimple_build_cond (LT_EXPR, rhs1,
6295 build_zero_cst (TREE_TYPE (rhs1)),
6296 NULL_TREE, NULL_TREE);
6297 else if (rhs_code == ABSU_EXPR)
6298 {
6299 rhs2 = make_ssa_name (TREE_TYPE (lhs));
6300 g = gimple_build_assign (rhs2, NOP_EXPR, rhs1);
6301 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
6302 gimple_set_location (g, location: loc);
6303 g = gimple_build_cond (LT_EXPR, rhs1,
6304 build_zero_cst (TREE_TYPE (rhs1)),
6305 NULL_TREE, NULL_TREE);
6306 rhs1 = rhs2;
6307 }
6308 else if (rhs_code == MIN_EXPR || rhs_code == MAX_EXPR)
6309 {
6310 rhs2 = gimple_assign_rhs2 (gs: stmt);
6311 if (TREE_CODE (rhs1) == INTEGER_CST)
6312 std::swap (a&: rhs1, b&: rhs2);
6313 g = gimple_build_cond (LT_EXPR, rhs1, rhs2,
6314 NULL_TREE, NULL_TREE);
6315 if (rhs_code == MAX_EXPR)
6316 std::swap (a&: rhs1, b&: rhs2);
6317 }
6318 else
6319 {
6320 g = gimple_build_cond (NE_EXPR, rhs1,
6321 build_zero_cst (TREE_TYPE (rhs1)),
6322 NULL_TREE, NULL_TREE);
6323 rhs1 = gimple_assign_rhs2 (gs: stmt);
6324 rhs2 = gimple_assign_rhs3 (gs: stmt);
6325 }
6326 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
6327 gimple_set_location (g, location: loc);
6328 edge e1 = split_block (gsi_bb (i: gsi), g);
6329 edge e2 = split_block (e1->dest, (gimple *) NULL);
6330 edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE);
6331 e3->probability = profile_probability::even ();
6332 e1->flags = EDGE_TRUE_VALUE;
6333 e1->probability = e3->probability.invert ();
6334 if (dom_info_available_p (CDI_DOMINATORS))
6335 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
6336 if (rhs_code == ABS_EXPR || rhs_code == ABSU_EXPR)
6337 {
6338 gsi = gsi_after_labels (bb: e1->dest);
6339 g = gimple_build_assign (make_ssa_name (TREE_TYPE (rhs1)),
6340 NEGATE_EXPR, rhs1);
6341 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
6342 gimple_set_location (g, location: loc);
6343 rhs2 = gimple_assign_lhs (gs: g);
6344 std::swap (a&: rhs1, b&: rhs2);
6345 }
6346 gsi = gsi_for_stmt (stmt);
6347 gsi_remove (&gsi, true);
6348 gphi *phi = create_phi_node (lhs, e2->dest);
6349 add_phi_arg (phi, rhs1, e2, UNKNOWN_LOCATION);
6350 add_phi_arg (phi, rhs2, e3, UNKNOWN_LOCATION);
6351 break;
6352 }
6353 }
6354 /* We need to also rewrite stores of large/huge _BitInt INTEGER_CSTs
6355 into memory. Such functions could have no large/huge SSA_NAMEs. */
6356 else if (SSA_NAME_IS_VIRTUAL_OPERAND (s))
6357 {
6358 gimple *g = SSA_NAME_DEF_STMT (s);
6359 if (is_gimple_assign (gs: g) && gimple_store_p (gs: g))
6360 {
6361 tree t = gimple_assign_rhs1 (gs: g);
6362 if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE
6363 && (bitint_precision_kind (TREE_TYPE (t))
6364 >= bitint_prec_large))
6365 has_large_huge = true;
6366 }
6367 }
6368 /* Similarly, e.g. with -frounding-math casts from _BitInt INTEGER_CSTs
6369 to floating point types need to be rewritten. */
6370 else if (SCALAR_FLOAT_TYPE_P (type))
6371 {
6372 gimple *g = SSA_NAME_DEF_STMT (s);
6373 if (is_gimple_assign (gs: g) && gimple_assign_rhs_code (gs: g) == FLOAT_EXPR)
6374 {
6375 tree t = gimple_assign_rhs1 (gs: g);
6376 if (TREE_CODE (t) == INTEGER_CST
6377 && TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE
6378 && (bitint_precision_kind (TREE_TYPE (t))
6379 >= bitint_prec_large))
6380 has_large_huge = true;
6381 }
6382 }
6383 }
6384 for (i = first_large_huge; i < num_ssa_names; ++i)
6385 {
6386 tree s = ssa_name (i);
6387 if (s == NULL)
6388 continue;
6389 tree type = TREE_TYPE (s);
6390 if (TREE_CODE (type) == COMPLEX_TYPE)
6391 type = TREE_TYPE (type);
6392 if (TREE_CODE (type) == BITINT_TYPE
6393 && bitint_precision_kind (type) >= bitint_prec_large)
6394 {
6395 use_operand_p use_p;
6396 gimple *use_stmt;
6397 has_large_huge = true;
6398 if (optimize
6399 && optimizable_arith_overflow (SSA_NAME_DEF_STMT (s)))
6400 continue;
6401 /* Ignore large/huge _BitInt SSA_NAMEs which have single use in
6402 the same bb and could be handled in the same loop with the
6403 immediate use. */
6404 if (optimize
6405 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (s)
6406 && single_imm_use (var: s, use_p: &use_p, stmt: &use_stmt)
6407 && gimple_bb (SSA_NAME_DEF_STMT (s)) == gimple_bb (g: use_stmt))
6408 {
6409 if (mergeable_op (SSA_NAME_DEF_STMT (s)))
6410 {
6411 if (mergeable_op (stmt: use_stmt))
6412 continue;
6413 tree_code cmp_code = comparison_op (stmt: use_stmt, NULL, NULL);
6414 if (cmp_code == EQ_EXPR || cmp_code == NE_EXPR)
6415 continue;
6416 if (gimple_assign_cast_p (s: use_stmt))
6417 {
6418 tree lhs = gimple_assign_lhs (gs: use_stmt);
6419 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6420 /* Don't merge with VIEW_CONVERT_EXPRs to
6421 huge INTEGER_TYPEs used sometimes in memcpy
6422 expansion. */
6423 && (TREE_CODE (TREE_TYPE (lhs)) != INTEGER_TYPE
6424 || (TYPE_PRECISION (TREE_TYPE (lhs))
6425 <= MAX_FIXED_MODE_SIZE)))
6426 continue;
6427 }
6428 else if (gimple_store_p (gs: use_stmt)
6429 && is_gimple_assign (gs: use_stmt)
6430 && !gimple_has_volatile_ops (stmt: use_stmt)
6431 && !stmt_ends_bb_p (use_stmt))
6432 continue;
6433 }
6434 if (gimple_assign_cast_p (SSA_NAME_DEF_STMT (s)))
6435 {
6436 tree rhs1 = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (s));
6437 if (TREE_CODE (rhs1) == VIEW_CONVERT_EXPR)
6438 {
6439 rhs1 = TREE_OPERAND (rhs1, 0);
6440 if (!INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
6441 && !POINTER_TYPE_P (TREE_TYPE (rhs1))
6442 && gimple_store_p (gs: use_stmt))
6443 continue;
6444 }
6445 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
6446 && ((is_gimple_assign (gs: use_stmt)
6447 && (gimple_assign_rhs_code (gs: use_stmt)
6448 != COMPLEX_EXPR))
6449 || gimple_code (g: use_stmt) == GIMPLE_COND)
6450 && (!gimple_store_p (gs: use_stmt)
6451 || (is_gimple_assign (gs: use_stmt)
6452 && !gimple_has_volatile_ops (stmt: use_stmt)
6453 && !stmt_ends_bb_p (use_stmt)))
6454 && (TREE_CODE (rhs1) != SSA_NAME
6455 || !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1)))
6456 {
6457 if (is_gimple_assign (gs: use_stmt))
6458 switch (gimple_assign_rhs_code (gs: use_stmt))
6459 {
6460 case TRUNC_DIV_EXPR:
6461 case TRUNC_MOD_EXPR:
6462 case FLOAT_EXPR:
6463 /* For division, modulo and casts to floating
6464 point, avoid representing unsigned operands
6465 using negative prec if they were sign-extended
6466 from narrower precision. */
6467 if (TYPE_UNSIGNED (TREE_TYPE (s))
6468 && !TYPE_UNSIGNED (TREE_TYPE (rhs1))
6469 && (TYPE_PRECISION (TREE_TYPE (s))
6470 > TYPE_PRECISION (TREE_TYPE (rhs1))))
6471 goto force_name;
6472 /* FALLTHRU */
6473 case MULT_EXPR:
6474 if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
6475 || (bitint_precision_kind (TREE_TYPE (rhs1))
6476 < bitint_prec_large))
6477 continue;
6478 /* Uses which use handle_operand_addr can't
6479 deal with nested casts. */
6480 if (TREE_CODE (rhs1) == SSA_NAME
6481 && gimple_assign_cast_p
6482 (SSA_NAME_DEF_STMT (rhs1))
6483 && has_single_use (var: rhs1)
6484 && (gimple_bb (SSA_NAME_DEF_STMT (rhs1))
6485 == gimple_bb (SSA_NAME_DEF_STMT (s))))
6486 goto force_name;
6487 break;
6488 case VIEW_CONVERT_EXPR:
6489 {
6490 tree lhs = gimple_assign_lhs (gs: use_stmt);
6491 /* Don't merge with VIEW_CONVERT_EXPRs to
6492 non-integral types. */
6493 if (!INTEGRAL_TYPE_P (TREE_TYPE (lhs)))
6494 goto force_name;
6495 /* Don't merge with VIEW_CONVERT_EXPRs to
6496 huge INTEGER_TYPEs used sometimes in memcpy
6497 expansion. */
6498 if (TREE_CODE (TREE_TYPE (lhs)) == INTEGER_TYPE
6499 && (TYPE_PRECISION (TREE_TYPE (lhs))
6500 > MAX_FIXED_MODE_SIZE))
6501 goto force_name;
6502 }
6503 break;
6504 default:
6505 break;
6506 }
6507 if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
6508 || (bitint_precision_kind (TREE_TYPE (rhs1))
6509 < bitint_prec_large))
6510 continue;
6511 if ((TYPE_PRECISION (TREE_TYPE (rhs1))
6512 >= TYPE_PRECISION (TREE_TYPE (s)))
6513 && mergeable_op (stmt: use_stmt))
6514 continue;
6515 /* Prevent merging a widening non-mergeable cast
6516 on result of some narrower mergeable op
6517 together with later mergeable operations. E.g.
6518 result of _BitInt(223) addition shouldn't be
6519 sign-extended to _BitInt(513) and have another
6520 _BitInt(513) added to it, as handle_plus_minus
6521 with its PHI node handling inside of handle_cast
6522 will not work correctly. An exception is if
6523 use_stmt is a store, this is handled directly
6524 in lower_mergeable_stmt. */
6525 if (TREE_CODE (rhs1) != SSA_NAME
6526 || !has_single_use (var: rhs1)
6527 || (gimple_bb (SSA_NAME_DEF_STMT (rhs1))
6528 != gimple_bb (SSA_NAME_DEF_STMT (s)))
6529 || !mergeable_op (SSA_NAME_DEF_STMT (rhs1))
6530 || gimple_store_p (gs: use_stmt))
6531 continue;
6532 if ((TYPE_PRECISION (TREE_TYPE (rhs1))
6533 < TYPE_PRECISION (TREE_TYPE (s)))
6534 && gimple_assign_cast_p (SSA_NAME_DEF_STMT (rhs1)))
6535 {
6536 /* Another exception is if the widening cast is
6537 from mergeable same precision cast from something
6538 not mergeable. */
6539 tree rhs2
6540 = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (rhs1));
6541 if (TREE_CODE (TREE_TYPE (rhs2)) == BITINT_TYPE
6542 && (TYPE_PRECISION (TREE_TYPE (rhs1))
6543 == TYPE_PRECISION (TREE_TYPE (rhs2))))
6544 {
6545 if (TREE_CODE (rhs2) != SSA_NAME
6546 || !has_single_use (var: rhs2)
6547 || (gimple_bb (SSA_NAME_DEF_STMT (rhs2))
6548 != gimple_bb (SSA_NAME_DEF_STMT (s)))
6549 || !mergeable_op (SSA_NAME_DEF_STMT (rhs2)))
6550 continue;
6551 }
6552 }
6553 }
6554 }
6555 if (is_gimple_assign (SSA_NAME_DEF_STMT (s)))
6556 switch (gimple_assign_rhs_code (SSA_NAME_DEF_STMT (s)))
6557 {
6558 case IMAGPART_EXPR:
6559 {
6560 tree rhs1 = gimple_assign_rhs1 (SSA_NAME_DEF_STMT (s));
6561 rhs1 = TREE_OPERAND (rhs1, 0);
6562 if (TREE_CODE (rhs1) == SSA_NAME)
6563 {
6564 gimple *g = SSA_NAME_DEF_STMT (rhs1);
6565 if (optimizable_arith_overflow (stmt: g))
6566 continue;
6567 }
6568 }
6569 /* FALLTHRU */
6570 case LSHIFT_EXPR:
6571 case RSHIFT_EXPR:
6572 case MULT_EXPR:
6573 case TRUNC_DIV_EXPR:
6574 case TRUNC_MOD_EXPR:
6575 case FIX_TRUNC_EXPR:
6576 case REALPART_EXPR:
6577 if (gimple_store_p (gs: use_stmt)
6578 && is_gimple_assign (gs: use_stmt)
6579 && !gimple_has_volatile_ops (stmt: use_stmt)
6580 && !stmt_ends_bb_p (use_stmt))
6581 {
6582 tree lhs = gimple_assign_lhs (gs: use_stmt);
6583 /* As multiply/division passes address of the lhs
6584 to library function and that assumes it can extend
6585 it to whole number of limbs, avoid merging those
6586 with bit-field stores. Don't allow it for
6587 shifts etc. either, so that the bit-field store
6588 handling doesn't have to be done everywhere. */
6589 if (TREE_CODE (lhs) == COMPONENT_REF
6590 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (lhs, 1)))
6591 break;
6592 continue;
6593 }
6594 break;
6595 default:
6596 break;
6597 }
6598 }
6599
6600 /* Also ignore uninitialized uses. */
6601 if (SSA_NAME_IS_DEFAULT_DEF (s)
6602 && (!SSA_NAME_VAR (s) || VAR_P (SSA_NAME_VAR (s))))
6603 continue;
6604
6605 force_name:
6606 if (!large_huge.m_names)
6607 large_huge.m_names = BITMAP_ALLOC (NULL);
6608 bitmap_set_bit (large_huge.m_names, SSA_NAME_VERSION (s));
6609 if (has_single_use (var: s))
6610 {
6611 if (!large_huge.m_single_use_names)
6612 large_huge.m_single_use_names = BITMAP_ALLOC (NULL);
6613 bitmap_set_bit (large_huge.m_single_use_names,
6614 SSA_NAME_VERSION (s));
6615 }
6616 if (SSA_NAME_VAR (s)
6617 && ((TREE_CODE (SSA_NAME_VAR (s)) == PARM_DECL
6618 && SSA_NAME_IS_DEFAULT_DEF (s))
6619 || TREE_CODE (SSA_NAME_VAR (s)) == RESULT_DECL))
6620 has_large_huge_parm_result = true;
6621 if (optimize
6622 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (s)
6623 && gimple_assign_load_p (SSA_NAME_DEF_STMT (s))
6624 && !gimple_has_volatile_ops (SSA_NAME_DEF_STMT (s))
6625 && !stmt_ends_bb_p (SSA_NAME_DEF_STMT (s)))
6626 {
6627 use_operand_p use_p;
6628 imm_use_iterator iter;
6629 bool optimizable_load = true;
6630 FOR_EACH_IMM_USE_FAST (use_p, iter, s)
6631 {
6632 gimple *use_stmt = USE_STMT (use_p);
6633 if (is_gimple_debug (gs: use_stmt))
6634 continue;
6635 if (gimple_code (g: use_stmt) == GIMPLE_PHI
6636 || is_gimple_call (gs: use_stmt)
6637 || gimple_code (g: use_stmt) == GIMPLE_ASM)
6638 {
6639 optimizable_load = false;
6640 break;
6641 }
6642 }
6643
6644 ssa_op_iter oi;
6645 FOR_EACH_SSA_USE_OPERAND (use_p, SSA_NAME_DEF_STMT (s),
6646 oi, SSA_OP_USE)
6647 {
6648 tree s2 = USE_FROM_PTR (use_p);
6649 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (s2))
6650 {
6651 optimizable_load = false;
6652 break;
6653 }
6654 }
6655
6656 if (optimizable_load && !stmt_ends_bb_p (SSA_NAME_DEF_STMT (s)))
6657 {
6658 if (!large_huge.m_loads)
6659 large_huge.m_loads = BITMAP_ALLOC (NULL);
6660 bitmap_set_bit (large_huge.m_loads, SSA_NAME_VERSION (s));
6661 }
6662 }
6663 }
6664 /* We need to also rewrite stores of large/huge _BitInt INTEGER_CSTs
6665 into memory. Such functions could have no large/huge SSA_NAMEs. */
6666 else if (SSA_NAME_IS_VIRTUAL_OPERAND (s))
6667 {
6668 gimple *g = SSA_NAME_DEF_STMT (s);
6669 if (is_gimple_assign (gs: g) && gimple_store_p (gs: g))
6670 {
6671 tree t = gimple_assign_rhs1 (gs: g);
6672 if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE
6673 && bitint_precision_kind (TREE_TYPE (t)) >= bitint_prec_large)
6674 has_large_huge = true;
6675 }
6676 }
6677 }
6678
6679 if (large_huge.m_names || has_large_huge)
6680 {
6681 ret = TODO_update_ssa_only_virtuals | TODO_cleanup_cfg;
6682 calculate_dominance_info (CDI_DOMINATORS);
6683 if (optimize)
6684 enable_ranger (cfun);
6685 if (large_huge.m_loads)
6686 {
6687 basic_block entry = ENTRY_BLOCK_PTR_FOR_FN (cfun);
6688 entry->aux = NULL;
6689 bitint_dom_walker (large_huge.m_names,
6690 large_huge.m_loads).walk (entry);
6691 bitmap_and_compl_into (large_huge.m_names, large_huge.m_loads);
6692 clear_aux_for_blocks ();
6693 BITMAP_FREE (large_huge.m_loads);
6694 }
6695 large_huge.m_limb_type = build_nonstandard_integer_type (limb_prec, 1);
6696 large_huge.m_limb_size
6697 = tree_to_uhwi (TYPE_SIZE_UNIT (large_huge.m_limb_type));
6698 }
6699 if (large_huge.m_names)
6700 {
6701 large_huge.m_map
6702 = init_var_map (num_ssa_names, NULL, large_huge.m_names);
6703 coalesce_ssa_name (large_huge.m_map);
6704 partition_view_normal (large_huge.m_map);
6705 if (dump_file && (dump_flags & TDF_DETAILS))
6706 {
6707 fprintf (stream: dump_file, format: "After Coalescing:\n");
6708 dump_var_map (dump_file, large_huge.m_map);
6709 }
6710 large_huge.m_vars
6711 = XCNEWVEC (tree, num_var_partitions (large_huge.m_map));
6712 bitmap_iterator bi;
6713 if (has_large_huge_parm_result)
6714 EXECUTE_IF_SET_IN_BITMAP (large_huge.m_names, 0, i, bi)
6715 {
6716 tree s = ssa_name (i);
6717 if (SSA_NAME_VAR (s)
6718 && ((TREE_CODE (SSA_NAME_VAR (s)) == PARM_DECL
6719 && SSA_NAME_IS_DEFAULT_DEF (s))
6720 || TREE_CODE (SSA_NAME_VAR (s)) == RESULT_DECL))
6721 {
6722 int p = var_to_partition (map: large_huge.m_map, var: s);
6723 if (large_huge.m_vars[p] == NULL_TREE)
6724 {
6725 large_huge.m_vars[p] = SSA_NAME_VAR (s);
6726 mark_addressable (SSA_NAME_VAR (s));
6727 }
6728 }
6729 }
6730 tree atype = NULL_TREE;
6731 if (dump_file && (dump_flags & TDF_DETAILS))
6732 fprintf (stream: dump_file, format: "Mapping SSA_NAMEs to decls:\n");
6733 EXECUTE_IF_SET_IN_BITMAP (large_huge.m_names, 0, i, bi)
6734 {
6735 tree s = ssa_name (i);
6736 int p = var_to_partition (map: large_huge.m_map, var: s);
6737 if (large_huge.m_vars[p] == NULL_TREE)
6738 {
6739 if (atype == NULL_TREE
6740 || !tree_int_cst_equal (TYPE_SIZE (atype),
6741 TYPE_SIZE (TREE_TYPE (s))))
6742 {
6743 unsigned HOST_WIDE_INT nelts
6744 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (s))) / limb_prec;
6745 atype = build_array_type_nelts (large_huge.m_limb_type,
6746 nelts);
6747 }
6748 large_huge.m_vars[p] = create_tmp_var (atype, "bitint");
6749 mark_addressable (large_huge.m_vars[p]);
6750 }
6751 if (dump_file && (dump_flags & TDF_DETAILS))
6752 {
6753 print_generic_expr (dump_file, s, TDF_SLIM);
6754 fprintf (stream: dump_file, format: " -> ");
6755 print_generic_expr (dump_file, large_huge.m_vars[p], TDF_SLIM);
6756 fprintf (stream: dump_file, format: "\n");
6757 }
6758 }
6759 }
6760
6761 FOR_EACH_BB_REVERSE_FN (bb, cfun)
6762 {
6763 gimple_stmt_iterator prev;
6764 for (gimple_stmt_iterator gsi = gsi_last_bb (bb); !gsi_end_p (i: gsi);
6765 gsi = prev)
6766 {
6767 prev = gsi;
6768 gsi_prev (i: &prev);
6769 ssa_op_iter iter;
6770 gimple *stmt = gsi_stmt (i: gsi);
6771 if (is_gimple_debug (gs: stmt))
6772 continue;
6773 bitint_prec_kind kind = bitint_prec_small;
6774 tree t;
6775 FOR_EACH_SSA_TREE_OPERAND (t, stmt, iter, SSA_OP_ALL_OPERANDS)
6776 if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE)
6777 {
6778 bitint_prec_kind this_kind
6779 = bitint_precision_kind (TREE_TYPE (t));
6780 kind = MAX (kind, this_kind);
6781 }
6782 if (is_gimple_assign (gs: stmt) && gimple_store_p (gs: stmt))
6783 {
6784 t = gimple_assign_rhs1 (gs: stmt);
6785 if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE)
6786 {
6787 bitint_prec_kind this_kind
6788 = bitint_precision_kind (TREE_TYPE (t));
6789 kind = MAX (kind, this_kind);
6790 }
6791 }
6792 if (is_gimple_assign (gs: stmt)
6793 && gimple_assign_rhs_code (gs: stmt) == FLOAT_EXPR)
6794 {
6795 t = gimple_assign_rhs1 (gs: stmt);
6796 if (TREE_CODE (TREE_TYPE (t)) == BITINT_TYPE
6797 && TREE_CODE (t) == INTEGER_CST)
6798 {
6799 bitint_prec_kind this_kind
6800 = bitint_precision_kind (TREE_TYPE (t));
6801 kind = MAX (kind, this_kind);
6802 }
6803 }
6804 if (is_gimple_call (gs: stmt))
6805 {
6806 t = gimple_call_lhs (gs: stmt);
6807 if (t && TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE)
6808 {
6809 bitint_prec_kind this_kind = arith_overflow_arg_kind (stmt);
6810 kind = MAX (kind, this_kind);
6811 if (TREE_CODE (TREE_TYPE (TREE_TYPE (t))) == BITINT_TYPE)
6812 {
6813 this_kind
6814 = bitint_precision_kind (TREE_TYPE (TREE_TYPE (t)));
6815 kind = MAX (kind, this_kind);
6816 }
6817 }
6818 }
6819 if (kind == bitint_prec_small)
6820 continue;
6821 switch (gimple_code (g: stmt))
6822 {
6823 case GIMPLE_CALL:
6824 /* For now. We'll need to handle some internal functions and
6825 perhaps some builtins. */
6826 if (kind == bitint_prec_middle)
6827 continue;
6828 break;
6829 case GIMPLE_ASM:
6830 if (kind == bitint_prec_middle)
6831 continue;
6832 break;
6833 case GIMPLE_RETURN:
6834 continue;
6835 case GIMPLE_ASSIGN:
6836 if (gimple_clobber_p (s: stmt))
6837 continue;
6838 if (kind >= bitint_prec_large)
6839 break;
6840 if (gimple_assign_single_p (gs: stmt))
6841 /* No need to lower copies, loads or stores. */
6842 continue;
6843 if (gimple_assign_cast_p (s: stmt))
6844 {
6845 tree lhs = gimple_assign_lhs (gs: stmt);
6846 tree rhs = gimple_assign_rhs1 (gs: stmt);
6847 if (INTEGRAL_TYPE_P (TREE_TYPE (lhs))
6848 && INTEGRAL_TYPE_P (TREE_TYPE (rhs))
6849 && (TYPE_PRECISION (TREE_TYPE (lhs))
6850 == TYPE_PRECISION (TREE_TYPE (rhs))))
6851 /* No need to lower casts to same precision. */
6852 continue;
6853 }
6854 break;
6855 default:
6856 break;
6857 }
6858
6859 if (kind == bitint_prec_middle)
6860 {
6861 tree type = NULL_TREE;
6862 /* Middle _BitInt(N) is rewritten to casts to INTEGER_TYPEs
6863 with the same precision and back. */
6864 unsigned int nops = gimple_num_ops (gs: stmt);
6865 for (unsigned int i = is_gimple_assign (gs: stmt) ? 1 : 0;
6866 i < nops; ++i)
6867 if (tree op = gimple_op (gs: stmt, i))
6868 {
6869 tree nop = maybe_cast_middle_bitint (gsi: &gsi, op, type);
6870 if (nop != op)
6871 gimple_set_op (gs: stmt, i, op: nop);
6872 else if (COMPARISON_CLASS_P (op))
6873 {
6874 TREE_OPERAND (op, 0)
6875 = maybe_cast_middle_bitint (gsi: &gsi,
6876 TREE_OPERAND (op, 0),
6877 type);
6878 TREE_OPERAND (op, 1)
6879 = maybe_cast_middle_bitint (gsi: &gsi,
6880 TREE_OPERAND (op, 1),
6881 type);
6882 }
6883 else if (TREE_CODE (op) == CASE_LABEL_EXPR)
6884 {
6885 CASE_LOW (op)
6886 = maybe_cast_middle_bitint (gsi: &gsi, CASE_LOW (op),
6887 type);
6888 CASE_HIGH (op)
6889 = maybe_cast_middle_bitint (gsi: &gsi, CASE_HIGH (op),
6890 type);
6891 }
6892 }
6893 if (tree lhs = gimple_get_lhs (stmt))
6894 if (TREE_CODE (TREE_TYPE (lhs)) == BITINT_TYPE
6895 && (bitint_precision_kind (TREE_TYPE (lhs))
6896 == bitint_prec_middle))
6897 {
6898 int prec = TYPE_PRECISION (TREE_TYPE (lhs));
6899 int uns = TYPE_UNSIGNED (TREE_TYPE (lhs));
6900 type = build_nonstandard_integer_type (prec, uns);
6901 tree lhs2 = make_ssa_name (var: type);
6902 gimple_set_lhs (stmt, lhs2);
6903 gimple *g = gimple_build_assign (lhs, NOP_EXPR, lhs2);
6904 if (stmt_ends_bb_p (stmt))
6905 {
6906 edge e = find_fallthru_edge (edges: gsi_bb (i: gsi)->succs);
6907 gsi_insert_on_edge_immediate (e, g);
6908 }
6909 else
6910 gsi_insert_after (&gsi, g, GSI_SAME_STMT);
6911 }
6912 update_stmt (s: stmt);
6913 continue;
6914 }
6915
6916 if (tree lhs = gimple_get_lhs (stmt))
6917 if (TREE_CODE (lhs) == SSA_NAME)
6918 {
6919 tree type = TREE_TYPE (lhs);
6920 if (TREE_CODE (type) == COMPLEX_TYPE)
6921 type = TREE_TYPE (type);
6922 if (TREE_CODE (type) == BITINT_TYPE
6923 && bitint_precision_kind (type) >= bitint_prec_large
6924 && (large_huge.m_names == NULL
6925 || !bitmap_bit_p (large_huge.m_names,
6926 SSA_NAME_VERSION (lhs))))
6927 continue;
6928 }
6929
6930 large_huge.lower_stmt (stmt);
6931 }
6932
6933 tree atype = NULL_TREE;
6934 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (i: gsi);
6935 gsi_next (i: &gsi))
6936 {
6937 gphi *phi = gsi.phi ();
6938 tree lhs = gimple_phi_result (gs: phi);
6939 if (TREE_CODE (TREE_TYPE (lhs)) != BITINT_TYPE
6940 || bitint_precision_kind (TREE_TYPE (lhs)) < bitint_prec_large)
6941 continue;
6942 int p1 = var_to_partition (map: large_huge.m_map, var: lhs);
6943 gcc_assert (large_huge.m_vars[p1] != NULL_TREE);
6944 tree v1 = large_huge.m_vars[p1];
6945 for (unsigned i = 0; i < gimple_phi_num_args (gs: phi); ++i)
6946 {
6947 tree arg = gimple_phi_arg_def (gs: phi, index: i);
6948 edge e = gimple_phi_arg_edge (phi, i);
6949 gimple *g;
6950 switch (TREE_CODE (arg))
6951 {
6952 case INTEGER_CST:
6953 if (integer_zerop (arg) && VAR_P (v1))
6954 {
6955 tree zero = build_zero_cst (TREE_TYPE (v1));
6956 g = gimple_build_assign (v1, zero);
6957 gsi_insert_on_edge (e, g);
6958 edge_insertions = true;
6959 break;
6960 }
6961 int ext;
6962 unsigned int min_prec, prec, rem;
6963 tree c;
6964 prec = TYPE_PRECISION (TREE_TYPE (arg));
6965 rem = prec % (2 * limb_prec);
6966 min_prec = bitint_min_cst_precision (cst: arg, ext);
6967 if (min_prec > prec - rem - 2 * limb_prec
6968 && min_prec > (unsigned) limb_prec)
6969 /* Constant which has enough significant bits that it
6970 isn't worth trying to save .rodata space by extending
6971 from smaller number. */
6972 min_prec = prec;
6973 else
6974 min_prec = CEIL (min_prec, limb_prec) * limb_prec;
6975 if (min_prec == 0)
6976 c = NULL_TREE;
6977 else if (min_prec == prec)
6978 c = tree_output_constant_def (arg);
6979 else if (min_prec == (unsigned) limb_prec)
6980 c = fold_convert (large_huge.m_limb_type, arg);
6981 else
6982 {
6983 tree ctype = build_bitint_type (min_prec, 1);
6984 c = tree_output_constant_def (fold_convert (ctype, arg));
6985 }
6986 if (c)
6987 {
6988 if (VAR_P (v1) && min_prec == prec)
6989 {
6990 tree v2 = build1 (VIEW_CONVERT_EXPR,
6991 TREE_TYPE (v1), c);
6992 g = gimple_build_assign (v1, v2);
6993 gsi_insert_on_edge (e, g);
6994 edge_insertions = true;
6995 break;
6996 }
6997 if (TREE_CODE (TREE_TYPE (c)) == INTEGER_TYPE)
6998 g = gimple_build_assign (build1 (VIEW_CONVERT_EXPR,
6999 TREE_TYPE (c), v1),
7000 c);
7001 else
7002 {
7003 unsigned HOST_WIDE_INT nelts
7004 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (c)))
7005 / limb_prec;
7006 tree vtype
7007 = build_array_type_nelts (large_huge.m_limb_type,
7008 nelts);
7009 g = gimple_build_assign (build1 (VIEW_CONVERT_EXPR,
7010 vtype, v1),
7011 build1 (VIEW_CONVERT_EXPR,
7012 vtype, c));
7013 }
7014 gsi_insert_on_edge (e, g);
7015 }
7016 if (ext == 0)
7017 {
7018 unsigned HOST_WIDE_INT nelts
7019 = (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (v1)))
7020 - min_prec) / limb_prec;
7021 tree vtype
7022 = build_array_type_nelts (large_huge.m_limb_type,
7023 nelts);
7024 tree ptype = build_pointer_type (TREE_TYPE (v1));
7025 tree off;
7026 if (c)
7027 off = fold_convert (ptype,
7028 TYPE_SIZE_UNIT (TREE_TYPE (c)));
7029 else
7030 off = build_zero_cst (ptype);
7031 tree vd = build2 (MEM_REF, vtype,
7032 build_fold_addr_expr (v1), off);
7033 g = gimple_build_assign (vd, build_zero_cst (vtype));
7034 }
7035 else
7036 {
7037 tree vd = v1;
7038 if (c)
7039 {
7040 tree ptype = build_pointer_type (TREE_TYPE (v1));
7041 tree off
7042 = fold_convert (ptype,
7043 TYPE_SIZE_UNIT (TREE_TYPE (c)));
7044 vd = build2 (MEM_REF, large_huge.m_limb_type,
7045 build_fold_addr_expr (v1), off);
7046 }
7047 vd = build_fold_addr_expr (vd);
7048 unsigned HOST_WIDE_INT nbytes
7049 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (v1)));
7050 if (c)
7051 nbytes
7052 -= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (c)));
7053 tree fn = builtin_decl_implicit (fncode: BUILT_IN_MEMSET);
7054 g = gimple_build_call (fn, 3, vd,
7055 integer_minus_one_node,
7056 build_int_cst (sizetype,
7057 nbytes));
7058 }
7059 gsi_insert_on_edge (e, g);
7060 edge_insertions = true;
7061 break;
7062 default:
7063 gcc_unreachable ();
7064 case SSA_NAME:
7065 if (gimple_code (SSA_NAME_DEF_STMT (arg)) == GIMPLE_NOP)
7066 {
7067 if (large_huge.m_names == NULL
7068 || !bitmap_bit_p (large_huge.m_names,
7069 SSA_NAME_VERSION (arg)))
7070 continue;
7071 }
7072 int p2 = var_to_partition (map: large_huge.m_map, var: arg);
7073 if (p1 == p2)
7074 continue;
7075 gcc_assert (large_huge.m_vars[p2] != NULL_TREE);
7076 tree v2 = large_huge.m_vars[p2];
7077 if (VAR_P (v1) && VAR_P (v2))
7078 g = gimple_build_assign (v1, v2);
7079 else if (VAR_P (v1))
7080 g = gimple_build_assign (v1, build1 (VIEW_CONVERT_EXPR,
7081 TREE_TYPE (v1), v2));
7082 else if (VAR_P (v2))
7083 g = gimple_build_assign (build1 (VIEW_CONVERT_EXPR,
7084 TREE_TYPE (v2), v1), v2);
7085 else
7086 {
7087 if (atype == NULL_TREE
7088 || !tree_int_cst_equal (TYPE_SIZE (atype),
7089 TYPE_SIZE (TREE_TYPE (lhs))))
7090 {
7091 unsigned HOST_WIDE_INT nelts
7092 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (lhs)))
7093 / limb_prec;
7094 atype
7095 = build_array_type_nelts (large_huge.m_limb_type,
7096 nelts);
7097 }
7098 g = gimple_build_assign (build1 (VIEW_CONVERT_EXPR,
7099 atype, v1),
7100 build1 (VIEW_CONVERT_EXPR,
7101 atype, v2));
7102 }
7103 gsi_insert_on_edge (e, g);
7104 edge_insertions = true;
7105 break;
7106 }
7107 }
7108 }
7109 }
7110
7111 if (large_huge.m_names || has_large_huge)
7112 {
7113 gimple *nop = NULL;
7114 for (i = 0; i < num_ssa_names; ++i)
7115 {
7116 tree s = ssa_name (i);
7117 if (s == NULL_TREE)
7118 continue;
7119 tree type = TREE_TYPE (s);
7120 if (TREE_CODE (type) == COMPLEX_TYPE)
7121 type = TREE_TYPE (type);
7122 if (TREE_CODE (type) == BITINT_TYPE
7123 && bitint_precision_kind (type) >= bitint_prec_large)
7124 {
7125 if (large_huge.m_preserved
7126 && bitmap_bit_p (large_huge.m_preserved,
7127 SSA_NAME_VERSION (s)))
7128 continue;
7129 gimple *g = SSA_NAME_DEF_STMT (s);
7130 if (gimple_code (g) == GIMPLE_NOP)
7131 {
7132 if (SSA_NAME_VAR (s))
7133 set_ssa_default_def (cfun, SSA_NAME_VAR (s), NULL_TREE);
7134 release_ssa_name (name: s);
7135 continue;
7136 }
7137 if (gimple_bb (g) == NULL)
7138 {
7139 release_ssa_name (name: s);
7140 continue;
7141 }
7142 if (gimple_code (g) != GIMPLE_ASM)
7143 {
7144 gimple_stmt_iterator gsi = gsi_for_stmt (g);
7145 bool save_vta = flag_var_tracking_assignments;
7146 flag_var_tracking_assignments = false;
7147 gsi_remove (&gsi, true);
7148 flag_var_tracking_assignments = save_vta;
7149 }
7150 if (nop == NULL)
7151 nop = gimple_build_nop ();
7152 SSA_NAME_DEF_STMT (s) = nop;
7153 release_ssa_name (name: s);
7154 }
7155 }
7156 if (optimize)
7157 disable_ranger (cfun);
7158 }
7159
7160 if (edge_insertions)
7161 gsi_commit_edge_inserts ();
7162
7163 /* Fix up arguments of ECF_RETURNS_TWICE calls. Those were temporarily
7164 inserted before the call, but that is invalid IL, so move them to the
7165 right place and add corresponding PHIs. */
7166 if (!large_huge.m_returns_twice_calls.is_empty ())
7167 {
7168 auto_vec<gimple *, 16> arg_stmts;
7169 while (!large_huge.m_returns_twice_calls.is_empty ())
7170 {
7171 gimple *stmt = large_huge.m_returns_twice_calls.pop ();
7172 gimple_stmt_iterator gsi = gsi_after_labels (bb: gimple_bb (g: stmt));
7173 while (gsi_stmt (i: gsi) != stmt)
7174 {
7175 if (is_gimple_debug (gs: gsi_stmt (i: gsi)))
7176 gsi_next (i: &gsi);
7177 else
7178 {
7179 arg_stmts.safe_push (obj: gsi_stmt (i: gsi));
7180 gsi_remove (&gsi, false);
7181 }
7182 }
7183 gimple *g;
7184 basic_block bb = NULL;
7185 edge e = NULL, ead = NULL;
7186 FOR_EACH_VEC_ELT (arg_stmts, i, g)
7187 {
7188 gsi_safe_insert_before (&gsi, g);
7189 if (i == 0)
7190 {
7191 bb = gimple_bb (g: stmt);
7192 gcc_checking_assert (EDGE_COUNT (bb->preds) == 2);
7193 e = EDGE_PRED (bb, 0);
7194 ead = EDGE_PRED (bb, 1);
7195 if ((ead->flags & EDGE_ABNORMAL) == 0)
7196 std::swap (a&: e, b&: ead);
7197 gcc_checking_assert ((e->flags & EDGE_ABNORMAL) == 0
7198 && (ead->flags & EDGE_ABNORMAL));
7199 }
7200 tree lhs = gimple_assign_lhs (gs: g);
7201 tree arg = lhs;
7202 gphi *phi = create_phi_node (copy_ssa_name (var: arg), bb);
7203 add_phi_arg (phi, arg, e, UNKNOWN_LOCATION);
7204 tree var = create_tmp_reg (TREE_TYPE (arg));
7205 suppress_warning (var, OPT_Wuninitialized);
7206 arg = get_or_create_ssa_default_def (cfun, var);
7207 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (arg) = 1;
7208 add_phi_arg (phi, arg, ead, UNKNOWN_LOCATION);
7209 arg = gimple_phi_result (gs: phi);
7210 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (arg) = 1;
7211 imm_use_iterator iter;
7212 gimple *use_stmt;
7213 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
7214 {
7215 if (use_stmt == phi)
7216 continue;
7217 gcc_checking_assert (use_stmt == stmt);
7218 use_operand_p use_p;
7219 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
7220 SET_USE (use_p, arg);
7221 }
7222 }
7223 update_stmt (s: stmt);
7224 arg_stmts.truncate (size: 0);
7225 }
7226 }
7227
7228 return ret;
7229}
7230
7231namespace {
7232
7233const pass_data pass_data_lower_bitint =
7234{
7235 .type: GIMPLE_PASS, /* type */
7236 .name: "bitintlower", /* name */
7237 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
7238 .tv_id: TV_NONE, /* tv_id */
7239 PROP_ssa, /* properties_required */
7240 PROP_gimple_lbitint, /* properties_provided */
7241 .properties_destroyed: 0, /* properties_destroyed */
7242 .todo_flags_start: 0, /* todo_flags_start */
7243 .todo_flags_finish: 0, /* todo_flags_finish */
7244};
7245
7246class pass_lower_bitint : public gimple_opt_pass
7247{
7248public:
7249 pass_lower_bitint (gcc::context *ctxt)
7250 : gimple_opt_pass (pass_data_lower_bitint, ctxt)
7251 {}
7252
7253 /* opt_pass methods: */
7254 opt_pass * clone () final override { return new pass_lower_bitint (m_ctxt); }
7255 unsigned int execute (function *) final override
7256 {
7257 return gimple_lower_bitint ();
7258 }
7259
7260}; // class pass_lower_bitint
7261
7262} // anon namespace
7263
7264gimple_opt_pass *
7265make_pass_lower_bitint (gcc::context *ctxt)
7266{
7267 return new pass_lower_bitint (ctxt);
7268}
7269
7270
7271namespace {
7272
7273const pass_data pass_data_lower_bitint_O0 =
7274{
7275 .type: GIMPLE_PASS, /* type */
7276 .name: "bitintlower0", /* name */
7277 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
7278 .tv_id: TV_NONE, /* tv_id */
7279 PROP_cfg, /* properties_required */
7280 PROP_gimple_lbitint, /* properties_provided */
7281 .properties_destroyed: 0, /* properties_destroyed */
7282 .todo_flags_start: 0, /* todo_flags_start */
7283 .todo_flags_finish: 0, /* todo_flags_finish */
7284};
7285
7286class pass_lower_bitint_O0 : public gimple_opt_pass
7287{
7288public:
7289 pass_lower_bitint_O0 (gcc::context *ctxt)
7290 : gimple_opt_pass (pass_data_lower_bitint_O0, ctxt)
7291 {}
7292
7293 /* opt_pass methods: */
7294 bool gate (function *fun) final override
7295 {
7296 /* With errors, normal optimization passes are not run. If we don't
7297 lower bitint operations at all, rtl expansion will abort. */
7298 return !(fun->curr_properties & PROP_gimple_lbitint);
7299 }
7300
7301 unsigned int execute (function *) final override
7302 {
7303 return gimple_lower_bitint ();
7304 }
7305
7306}; // class pass_lower_bitint_O0
7307
7308} // anon namespace
7309
7310gimple_opt_pass *
7311make_pass_lower_bitint_O0 (gcc::context *ctxt)
7312{
7313 return new pass_lower_bitint_O0 (ctxt);
7314}
7315

source code of gcc/gimple-lower-bitint.cc