1/* Support routines for value ranges.
2 Copyright (C) 2019-2023 Free Software Foundation, Inc.
3 Major hacks by Aldy Hernandez <aldyh@redhat.com> and
4 Andrew MacLeod <amacleod@redhat.com>.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify
9it under the terms of the GNU General Public License as published by
10the Free Software Foundation; either version 3, or (at your option)
11any later version.
12
13GCC is distributed in the hope that it will be useful,
14but WITHOUT ANY WARRANTY; without even the implied warranty of
15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16GNU General Public License for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "backend.h"
26#include "tree.h"
27#include "gimple.h"
28#include "ssa.h"
29#include "tree-pretty-print.h"
30#include "value-range-pretty-print.h"
31#include "fold-const.h"
32#include "gimple-range.h"
33
34void
35irange::accept (const vrange_visitor &v) const
36{
37 v.visit (*this);
38}
39
40void
41unsupported_range::accept (const vrange_visitor &v) const
42{
43 v.visit (*this);
44}
45
46// Convenience function only available for integers and pointers.
47
48wide_int
49Value_Range::lower_bound () const
50{
51 if (is_a <irange> (v&: *m_vrange))
52 return as_a <irange> (v&: *m_vrange).lower_bound ();
53 gcc_unreachable ();
54}
55
56// Convenience function only available for integers and pointers.
57
58wide_int
59Value_Range::upper_bound () const
60{
61 if (is_a <irange> (v&: *m_vrange))
62 return as_a <irange> (v&: *m_vrange).upper_bound ();
63 gcc_unreachable ();
64}
65
66void
67Value_Range::dump (FILE *out) const
68{
69 if (m_vrange)
70 m_vrange->dump (out);
71 else
72 fprintf (stream: out, format: "NULL");
73}
74
75DEBUG_FUNCTION void
76debug (const Value_Range &r)
77{
78 r.dump (stderr);
79 fprintf (stderr, format: "\n");
80}
81
82DEBUG_FUNCTION void
83debug (const irange_bitmask &bm)
84{
85 bm.dump (stderr);
86 fprintf (stderr, format: "\n");
87}
88
89// Default vrange definitions.
90
91bool
92vrange::contains_p (tree) const
93{
94 return varying_p ();
95}
96
97bool
98vrange::singleton_p (tree *) const
99{
100 return false;
101}
102
103void
104vrange::set (tree min, tree, value_range_kind)
105{
106 set_varying (TREE_TYPE (min));
107}
108
109tree
110vrange::type () const
111{
112 return void_type_node;
113}
114
115bool
116vrange::supports_type_p (const_tree) const
117{
118 return false;
119}
120
121void
122vrange::set_undefined ()
123{
124 m_kind = VR_UNDEFINED;
125}
126
127void
128vrange::set_varying (tree)
129{
130 m_kind = VR_VARYING;
131}
132
133bool
134vrange::union_ (const vrange &r)
135{
136 if (r.undefined_p () || varying_p ())
137 return false;
138 if (undefined_p () || r.varying_p ())
139 {
140 operator= (r);
141 return true;
142 }
143 gcc_unreachable ();
144 return false;
145}
146
147bool
148vrange::intersect (const vrange &r)
149{
150 if (undefined_p () || r.varying_p ())
151 return false;
152 if (r.undefined_p ())
153 {
154 set_undefined ();
155 return true;
156 }
157 if (varying_p ())
158 {
159 operator= (r);
160 return true;
161 }
162 gcc_unreachable ();
163 return false;
164}
165
166bool
167vrange::zero_p () const
168{
169 return false;
170}
171
172bool
173vrange::nonzero_p () const
174{
175 return false;
176}
177
178void
179vrange::set_nonzero (tree type)
180{
181 set_varying (type);
182}
183
184void
185vrange::set_zero (tree type)
186{
187 set_varying (type);
188}
189
190void
191vrange::set_nonnegative (tree type)
192{
193 set_varying (type);
194}
195
196bool
197vrange::fits_p (const vrange &) const
198{
199 return true;
200}
201
202// Assignment operator for generic ranges. Copying incompatible types
203// is not allowed.
204
205vrange &
206vrange::operator= (const vrange &src)
207{
208 if (is_a <irange> (v: src))
209 as_a <irange> (v&: *this) = as_a <irange> (v: src);
210 else if (is_a <frange> (v: src))
211 as_a <frange> (v&: *this) = as_a <frange> (v: src);
212 else
213 {
214 gcc_checking_assert (is_a <unsupported_range> (src));
215 m_kind = src.m_kind;
216 }
217 return *this;
218}
219
220// Equality operator for generic ranges.
221
222bool
223vrange::operator== (const vrange &src) const
224{
225 if (is_a <irange> (v: src))
226 return as_a <irange> (v: *this) == as_a <irange> (v: src);
227 if (is_a <frange> (v: src))
228 return as_a <frange> (v: *this) == as_a <frange> (v: src);
229 gcc_unreachable ();
230}
231
232// Wrapper for vrange_printer to dump a range to a file.
233
234void
235vrange::dump (FILE *file) const
236{
237 pretty_printer buffer;
238 pp_needs_newline (&buffer) = true;
239 buffer.buffer->stream = file;
240 vrange_printer vrange_pp (&buffer);
241 this->accept (v: vrange_pp);
242 pp_flush (&buffer);
243}
244
245void
246irange_bitmask::dump (FILE *file) const
247{
248 char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p;
249 pretty_printer buffer;
250
251 pp_needs_newline (&buffer) = true;
252 buffer.buffer->stream = file;
253 pp_string (&buffer, "MASK ");
254 unsigned len_mask, len_val;
255 if (print_hex_buf_size (wi: m_mask, len: &len_mask)
256 | print_hex_buf_size (wi: m_value, len: &len_val))
257 p = XALLOCAVEC (char, MAX (len_mask, len_val));
258 else
259 p = buf;
260 print_hex (wi: m_mask, buf: p);
261 pp_string (&buffer, p);
262 pp_string (&buffer, " VALUE ");
263 print_hex (wi: m_value, buf: p);
264 pp_string (&buffer, p);
265 pp_flush (&buffer);
266}
267
268namespace inchash
269{
270
271void
272add_vrange (const vrange &v, inchash::hash &hstate,
273 unsigned int)
274{
275 if (v.undefined_p ())
276 {
277 hstate.add_int (v: VR_UNDEFINED);
278 return;
279 }
280 // Types are ignored throughout to inhibit two ranges being equal
281 // but having different hash values. This can happen when two
282 // ranges are equal and their types are different (but
283 // types_compatible_p is true).
284 if (is_a <irange> (v))
285 {
286 const irange &r = as_a <irange> (v);
287 if (r.varying_p ())
288 hstate.add_int (v: VR_VARYING);
289 else
290 hstate.add_int (v: VR_RANGE);
291 for (unsigned i = 0; i < r.num_pairs (); ++i)
292 {
293 hstate.add_wide_int (x: r.lower_bound (pair: i));
294 hstate.add_wide_int (x: r.upper_bound (pair: i));
295 }
296 irange_bitmask bm = r.get_bitmask ();
297 hstate.add_wide_int (x: bm.value ());
298 hstate.add_wide_int (x: bm.mask ());
299 return;
300 }
301 if (is_a <frange> (v))
302 {
303 const frange &r = as_a <frange> (v);
304 if (r.known_isnan ())
305 hstate.add_int (v: VR_NAN);
306 else
307 {
308 hstate.add_int (v: r.varying_p () ? VR_VARYING : VR_RANGE);
309 hstate.add_real_value (v: r.lower_bound ());
310 hstate.add_real_value (v: r.upper_bound ());
311 }
312 nan_state nan = r.get_nan_state ();
313 hstate.add_int (v: nan.pos_p ());
314 hstate.add_int (v: nan.neg_p ());
315 return;
316 }
317 gcc_unreachable ();
318}
319
320} //namespace inchash
321
322bool
323irange::nonnegative_p () const
324{
325 return wi::ge_p (x: lower_bound (), y: 0, TYPE_SIGN (type ()));
326}
327
328bool
329irange::nonpositive_p () const
330{
331 return wi::le_p (x: upper_bound (), y: 0, TYPE_SIGN (type ()));
332}
333
334bool
335irange::supports_type_p (const_tree type) const
336{
337 return supports_p (type);
338}
339
340// Return TRUE if R fits in THIS.
341
342bool
343irange::fits_p (const vrange &r) const
344{
345 return m_max_ranges >= as_a <irange> (v: r).num_pairs ();
346}
347
348void
349irange::set_nonnegative (tree type)
350{
351 set (type,
352 wi::zero (TYPE_PRECISION (type)),
353 wi::to_wide (TYPE_MAX_VALUE (type)));
354}
355
356void
357frange::accept (const vrange_visitor &v) const
358{
359 v.visit (*this);
360}
361
362// Flush denormal endpoints to the appropriate 0.0.
363
364void
365frange::flush_denormals_to_zero ()
366{
367 if (undefined_p () || known_isnan ())
368 return;
369
370 machine_mode mode = TYPE_MODE (type ());
371 // Flush [x, -DENORMAL] to [x, -0.0].
372 if (real_isdenormal (r: &m_max, mode) && real_isneg (&m_max))
373 {
374 if (HONOR_SIGNED_ZEROS (m_type))
375 m_max = dconstm0;
376 else
377 m_max = dconst0;
378 }
379 // Flush [+DENORMAL, x] to [+0.0, x].
380 if (real_isdenormal (r: &m_min, mode) && !real_isneg (&m_min))
381 m_min = dconst0;
382}
383
384// Setter for franges.
385
386void
387frange::set (tree type,
388 const REAL_VALUE_TYPE &min, const REAL_VALUE_TYPE &max,
389 const nan_state &nan, value_range_kind kind)
390{
391 switch (kind)
392 {
393 case VR_UNDEFINED:
394 set_undefined ();
395 return;
396 case VR_VARYING:
397 case VR_ANTI_RANGE:
398 set_varying (type);
399 return;
400 case VR_RANGE:
401 break;
402 default:
403 gcc_unreachable ();
404 }
405
406 gcc_checking_assert (!real_isnan (&min) && !real_isnan (&max));
407
408 m_kind = kind;
409 m_type = type;
410 m_min = min;
411 m_max = max;
412 if (HONOR_NANS (m_type))
413 {
414 m_pos_nan = nan.pos_p ();
415 m_neg_nan = nan.neg_p ();
416 }
417 else
418 {
419 m_pos_nan = false;
420 m_neg_nan = false;
421 }
422
423 if (!MODE_HAS_SIGNED_ZEROS (TYPE_MODE (m_type)))
424 {
425 if (real_iszero (&m_min, sign: 1))
426 m_min.sign = 0;
427 if (real_iszero (&m_max, sign: 1))
428 m_max.sign = 0;
429 }
430 else if (!HONOR_SIGNED_ZEROS (m_type))
431 {
432 if (real_iszero (&m_max, sign: 1))
433 m_max.sign = 0;
434 if (real_iszero (&m_min, sign: 0))
435 m_min.sign = 1;
436 }
437
438 // For -ffinite-math-only we can drop ranges outside the
439 // representable numbers to min/max for the type.
440 if (!HONOR_INFINITIES (m_type))
441 {
442 REAL_VALUE_TYPE min_repr = frange_val_min (type: m_type);
443 REAL_VALUE_TYPE max_repr = frange_val_max (type: m_type);
444 if (real_less (&m_min, &min_repr))
445 m_min = min_repr;
446 else if (real_less (&max_repr, &m_min))
447 m_min = max_repr;
448 if (real_less (&max_repr, &m_max))
449 m_max = max_repr;
450 else if (real_less (&m_max, &min_repr))
451 m_max = min_repr;
452 }
453
454 // Check for swapped ranges.
455 gcc_checking_assert (real_compare (LE_EXPR, &min, &max));
456
457 normalize_kind ();
458}
459
460// Setter for an frange defaulting the NAN possibility to +-NAN when
461// HONOR_NANS.
462
463void
464frange::set (tree type,
465 const REAL_VALUE_TYPE &min, const REAL_VALUE_TYPE &max,
466 value_range_kind kind)
467{
468 set (type, min, max, nan: nan_state (true), kind);
469}
470
471void
472frange::set (tree min, tree max, value_range_kind kind)
473{
474 set (TREE_TYPE (min),
475 min: *TREE_REAL_CST_PTR (min), max: *TREE_REAL_CST_PTR (max), kind);
476}
477
478// Normalize range to VARYING or UNDEFINED, or vice versa. Return
479// TRUE if anything changed.
480//
481// A range with no known properties can be dropped to VARYING.
482// Similarly, a VARYING with any properties should be dropped to a
483// VR_RANGE. Normalizing ranges upon changing them ensures there is
484// only one representation for a given range.
485
486bool
487frange::normalize_kind ()
488{
489 if (m_kind == VR_RANGE
490 && frange_val_is_min (r: m_min, type: m_type)
491 && frange_val_is_max (r: m_max, type: m_type))
492 {
493 if (!HONOR_NANS (m_type) || (m_pos_nan && m_neg_nan))
494 {
495 set_varying (m_type);
496 return true;
497 }
498 }
499 else if (m_kind == VR_VARYING)
500 {
501 if (HONOR_NANS (m_type) && (!m_pos_nan || !m_neg_nan))
502 {
503 m_kind = VR_RANGE;
504 m_min = frange_val_min (type: m_type);
505 m_max = frange_val_max (type: m_type);
506 if (flag_checking)
507 verify_range ();
508 return true;
509 }
510 }
511 else if (m_kind == VR_NAN && !m_pos_nan && !m_neg_nan)
512 set_undefined ();
513 return false;
514}
515
516// Union or intersect the zero endpoints of two ranges. For example:
517// [-0, x] U [+0, x] => [-0, x]
518// [ x, -0] U [ x, +0] => [ x, +0]
519// [-0, x] ^ [+0, x] => [+0, x]
520// [ x, -0] ^ [ x, +0] => [ x, -0]
521//
522// UNION_P is true when performing a union, or false when intersecting.
523
524bool
525frange::combine_zeros (const frange &r, bool union_p)
526{
527 gcc_checking_assert (!undefined_p () && !known_isnan ());
528
529 bool changed = false;
530 if (real_iszero (&m_min) && real_iszero (&r.m_min)
531 && real_isneg (&m_min) != real_isneg (&r.m_min))
532 {
533 m_min.sign = union_p;
534 changed = true;
535 }
536 if (real_iszero (&m_max) && real_iszero (&r.m_max)
537 && real_isneg (&m_max) != real_isneg (&r.m_max))
538 {
539 m_max.sign = !union_p;
540 changed = true;
541 }
542 // If the signs are swapped, the resulting range is empty.
543 if (m_min.sign == 0 && m_max.sign == 1)
544 {
545 if (maybe_isnan ())
546 m_kind = VR_NAN;
547 else
548 set_undefined ();
549 changed = true;
550 }
551 return changed;
552}
553
554// Union two ranges when one is known to be a NAN.
555
556bool
557frange::union_nans (const frange &r)
558{
559 gcc_checking_assert (known_isnan () || r.known_isnan ());
560
561 bool changed = false;
562 if (known_isnan () && m_kind != r.m_kind)
563 {
564 m_kind = r.m_kind;
565 m_min = r.m_min;
566 m_max = r.m_max;
567 changed = true;
568 }
569 if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
570 {
571 m_pos_nan |= r.m_pos_nan;
572 m_neg_nan |= r.m_neg_nan;
573 changed = true;
574 }
575 if (changed)
576 {
577 normalize_kind ();
578 return true;
579 }
580 return false;
581}
582
583bool
584frange::union_ (const vrange &v)
585{
586 const frange &r = as_a <frange> (v);
587
588 if (r.undefined_p () || varying_p ())
589 return false;
590 if (undefined_p () || r.varying_p ())
591 {
592 *this = r;
593 return true;
594 }
595
596 // Combine NAN info.
597 if (known_isnan () || r.known_isnan ())
598 return union_nans (r);
599 bool changed = false;
600 if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
601 {
602 m_pos_nan |= r.m_pos_nan;
603 m_neg_nan |= r.m_neg_nan;
604 changed = true;
605 }
606
607 // Combine endpoints.
608 if (real_less (&r.m_min, &m_min))
609 {
610 m_min = r.m_min;
611 changed = true;
612 }
613 if (real_less (&m_max, &r.m_max))
614 {
615 m_max = r.m_max;
616 changed = true;
617 }
618
619 if (HONOR_SIGNED_ZEROS (m_type))
620 changed |= combine_zeros (r, union_p: true);
621
622 changed |= normalize_kind ();
623 return changed;
624}
625
626// Intersect two ranges when one is known to be a NAN.
627
628bool
629frange::intersect_nans (const frange &r)
630{
631 gcc_checking_assert (known_isnan () || r.known_isnan ());
632
633 m_pos_nan &= r.m_pos_nan;
634 m_neg_nan &= r.m_neg_nan;
635 if (maybe_isnan ())
636 m_kind = VR_NAN;
637 else
638 set_undefined ();
639 if (flag_checking)
640 verify_range ();
641 return true;
642}
643
644bool
645frange::intersect (const vrange &v)
646{
647 const frange &r = as_a <frange> (v);
648
649 if (undefined_p () || r.varying_p ())
650 return false;
651 if (r.undefined_p ())
652 {
653 set_undefined ();
654 return true;
655 }
656 if (varying_p ())
657 {
658 *this = r;
659 return true;
660 }
661
662 // Combine NAN info.
663 if (known_isnan () || r.known_isnan ())
664 return intersect_nans (r);
665 bool changed = false;
666 if (m_pos_nan != r.m_pos_nan || m_neg_nan != r.m_neg_nan)
667 {
668 m_pos_nan &= r.m_pos_nan;
669 m_neg_nan &= r.m_neg_nan;
670 changed = true;
671 }
672
673 // Combine endpoints.
674 if (real_less (&m_min, &r.m_min))
675 {
676 m_min = r.m_min;
677 changed = true;
678 }
679 if (real_less (&r.m_max, &m_max))
680 {
681 m_max = r.m_max;
682 changed = true;
683 }
684 // If the endpoints are swapped, the resulting range is empty.
685 if (real_less (&m_max, &m_min))
686 {
687 if (maybe_isnan ())
688 m_kind = VR_NAN;
689 else
690 set_undefined ();
691 if (flag_checking)
692 verify_range ();
693 return true;
694 }
695
696 if (HONOR_SIGNED_ZEROS (m_type))
697 changed |= combine_zeros (r, union_p: false);
698
699 changed |= normalize_kind ();
700 return changed;
701}
702
703frange &
704frange::operator= (const frange &src)
705{
706 m_kind = src.m_kind;
707 m_type = src.m_type;
708 m_min = src.m_min;
709 m_max = src.m_max;
710 m_pos_nan = src.m_pos_nan;
711 m_neg_nan = src.m_neg_nan;
712
713 if (flag_checking)
714 verify_range ();
715 return *this;
716}
717
718bool
719frange::operator== (const frange &src) const
720{
721 if (m_kind == src.m_kind)
722 {
723 if (undefined_p ())
724 return true;
725
726 if (varying_p ())
727 return types_compatible_p (type1: m_type, type2: src.m_type);
728
729 bool nan1 = known_isnan ();
730 bool nan2 = src.known_isnan ();
731 if (nan1 || nan2)
732 {
733 if (nan1 && nan2)
734 return (m_pos_nan == src.m_pos_nan
735 && m_neg_nan == src.m_neg_nan);
736 return false;
737 }
738
739 return (real_identical (&m_min, &src.m_min)
740 && real_identical (&m_max, &src.m_max)
741 && m_pos_nan == src.m_pos_nan
742 && m_neg_nan == src.m_neg_nan
743 && types_compatible_p (type1: m_type, type2: src.m_type));
744 }
745 return false;
746}
747
748// Return TRUE if range contains R.
749
750bool
751frange::contains_p (const REAL_VALUE_TYPE &r) const
752{
753 gcc_checking_assert (m_kind != VR_ANTI_RANGE);
754
755 if (undefined_p ())
756 return false;
757
758 if (varying_p ())
759 return true;
760
761 if (real_isnan (&r))
762 {
763 // No NAN in range.
764 if (!m_pos_nan && !m_neg_nan)
765 return false;
766 // Both +NAN and -NAN are present.
767 if (m_pos_nan && m_neg_nan)
768 return true;
769 return m_neg_nan == r.sign;
770 }
771 if (known_isnan ())
772 return false;
773
774 if (real_compare (GE_EXPR, &r, &m_min) && real_compare (LE_EXPR, &r, &m_max))
775 {
776 // Make sure the signs are equal for signed zeros.
777 if (HONOR_SIGNED_ZEROS (m_type) && real_iszero (&r))
778 return r.sign == m_min.sign || r.sign == m_max.sign;
779 return true;
780 }
781 return false;
782}
783
784// If range is a singleton, place it in RESULT and return TRUE. If
785// RESULT is NULL, just return TRUE.
786//
787// A NAN can never be a singleton.
788
789bool
790frange::internal_singleton_p (REAL_VALUE_TYPE *result) const
791{
792 if (m_kind == VR_RANGE && real_identical (&m_min, &m_max))
793 {
794 // Return false for any singleton that may be a NAN.
795 if (HONOR_NANS (m_type) && maybe_isnan ())
796 return false;
797
798 if (MODE_COMPOSITE_P (TYPE_MODE (m_type)))
799 {
800 // For IBM long doubles, if the value is +-Inf or is exactly
801 // representable in double, the other double could be +0.0
802 // or -0.0. Since this means there is more than one way to
803 // represent a value, return false to avoid propagating it.
804 // See libgcc/config/rs6000/ibm-ldouble-format for details.
805 if (real_isinf (&m_min))
806 return false;
807 REAL_VALUE_TYPE r;
808 real_convert (&r, DFmode, &m_min);
809 if (real_identical (&r, &m_min))
810 return false;
811 }
812
813 if (result)
814 *result = m_min;
815 return true;
816 }
817 return false;
818}
819
820bool
821frange::singleton_p (tree *result) const
822{
823 if (internal_singleton_p ())
824 {
825 if (result)
826 *result = build_real (m_type, m_min);
827 return true;
828 }
829 return false;
830}
831
832bool
833frange::singleton_p (REAL_VALUE_TYPE &r) const
834{
835 return internal_singleton_p (result: &r);
836}
837
838bool
839frange::supports_type_p (const_tree type) const
840{
841 return supports_p (type);
842}
843
844void
845frange::verify_range ()
846{
847 if (!undefined_p ())
848 gcc_checking_assert (HONOR_NANS (m_type) || !maybe_isnan ());
849 switch (m_kind)
850 {
851 case VR_UNDEFINED:
852 gcc_checking_assert (!m_type);
853 return;
854 case VR_VARYING:
855 gcc_checking_assert (m_type);
856 gcc_checking_assert (frange_val_is_min (m_min, m_type));
857 gcc_checking_assert (frange_val_is_max (m_max, m_type));
858 if (HONOR_NANS (m_type))
859 gcc_checking_assert (m_pos_nan && m_neg_nan);
860 else
861 gcc_checking_assert (!m_pos_nan && !m_neg_nan);
862 return;
863 case VR_RANGE:
864 gcc_checking_assert (m_type);
865 break;
866 case VR_NAN:
867 gcc_checking_assert (m_type);
868 gcc_checking_assert (m_pos_nan || m_neg_nan);
869 return;
870 default:
871 gcc_unreachable ();
872 }
873
874 // NANs cannot appear in the endpoints of a range.
875 gcc_checking_assert (!real_isnan (&m_min) && !real_isnan (&m_max));
876
877 // Make sure we don't have swapped ranges.
878 gcc_checking_assert (!real_less (&m_max, &m_min));
879
880 // [ +0.0, -0.0 ] is nonsensical.
881 gcc_checking_assert (!(real_iszero (&m_min, 0) && real_iszero (&m_max, 1)));
882
883 // If all the properties are clear, we better not span the entire
884 // domain, because that would make us varying.
885 if (m_pos_nan && m_neg_nan)
886 gcc_checking_assert (!frange_val_is_min (m_min, m_type)
887 || !frange_val_is_max (m_max, m_type));
888}
889
890// We can't do much with nonzeros yet.
891void
892frange::set_nonzero (tree type)
893{
894 set_varying (type);
895}
896
897// We can't do much with nonzeros yet.
898bool
899frange::nonzero_p () const
900{
901 return false;
902}
903
904// Set range to [+0.0, +0.0] if honoring signed zeros, or [0.0, 0.0]
905// otherwise.
906
907void
908frange::set_zero (tree type)
909{
910 if (HONOR_SIGNED_ZEROS (type))
911 {
912 set (type, min: dconstm0, max: dconst0);
913 clear_nan ();
914 }
915 else
916 set (type, min: dconst0, max: dconst0);
917}
918
919// Return TRUE for any zero regardless of sign.
920
921bool
922frange::zero_p () const
923{
924 return (m_kind == VR_RANGE
925 && real_iszero (&m_min)
926 && real_iszero (&m_max));
927}
928
929// Set the range to non-negative numbers, that is [+0.0, +INF].
930//
931// The NAN in the resulting range (if HONOR_NANS) has a varying sign
932// as there are no guarantees in IEEE 754 wrt to the sign of a NAN,
933// except for copy, abs, and copysign. It is the responsibility of
934// the caller to set the NAN's sign if desired.
935
936void
937frange::set_nonnegative (tree type)
938{
939 set (type, min: dconst0, max: frange_val_max (type));
940}
941
942// Here we copy between any two irange's.
943
944irange &
945irange::operator= (const irange &src)
946{
947 int needed = src.num_pairs ();
948 maybe_resize (needed);
949
950 unsigned x;
951 unsigned lim = src.m_num_ranges;
952 if (lim > m_max_ranges)
953 lim = m_max_ranges;
954
955 for (x = 0; x < lim * 2; ++x)
956 m_base[x] = src.m_base[x];
957
958 // If the range didn't fit, the last range should cover the rest.
959 if (lim != src.m_num_ranges)
960 m_base[x - 1] = src.m_base[src.m_num_ranges * 2 - 1];
961
962 m_num_ranges = lim;
963 m_type = src.m_type;
964 m_kind = src.m_kind;
965 m_bitmask = src.m_bitmask;
966 if (m_max_ranges == 1)
967 normalize_kind ();
968 if (flag_checking)
969 verify_range ();
970 return *this;
971}
972
973value_range_kind
974get_legacy_range (const irange &r, tree &min, tree &max)
975{
976 if (r.undefined_p ())
977 {
978 min = NULL_TREE;
979 max = NULL_TREE;
980 return VR_UNDEFINED;
981 }
982
983 tree type = r.type ();
984 if (r.varying_p ())
985 {
986 min = wide_int_to_tree (type, cst: r.lower_bound ());
987 max = wide_int_to_tree (type, cst: r.upper_bound ());
988 return VR_VARYING;
989 }
990
991 unsigned int precision = TYPE_PRECISION (type);
992 signop sign = TYPE_SIGN (type);
993 if (r.num_pairs () > 1
994 && precision > 1
995 && r.lower_bound () == wi::min_value (precision, sign)
996 && r.upper_bound () == wi::max_value (precision, sign))
997 {
998 int_range<3> inv (r);
999 inv.invert ();
1000 min = wide_int_to_tree (type, cst: inv.lower_bound (pair: 0));
1001 max = wide_int_to_tree (type, cst: inv.upper_bound (pair: 0));
1002 return VR_ANTI_RANGE;
1003 }
1004
1005 min = wide_int_to_tree (type, cst: r.lower_bound ());
1006 max = wide_int_to_tree (type, cst: r.upper_bound ());
1007 return VR_RANGE;
1008}
1009
1010/* Set value range to the canonical form of {VRTYPE, MIN, MAX, EQUIV}.
1011 This means adjusting VRTYPE, MIN and MAX representing the case of a
1012 wrapping range with MAX < MIN covering [MIN, type_max] U [type_min, MAX]
1013 as anti-rage ~[MAX+1, MIN-1]. Likewise for wrapping anti-ranges.
1014 In corner cases where MAX+1 or MIN-1 wraps this will fall back
1015 to varying.
1016 This routine exists to ease canonicalization in the case where we
1017 extract ranges from var + CST op limit. */
1018
1019void
1020irange::set (tree type, const wide_int &min, const wide_int &max,
1021 value_range_kind kind)
1022{
1023 unsigned prec = TYPE_PRECISION (type);
1024 signop sign = TYPE_SIGN (type);
1025 wide_int min_value = wi::min_value (prec, sign);
1026 wide_int max_value = wi::max_value (prec, sign);
1027
1028 m_type = type;
1029 m_bitmask.set_unknown (prec);
1030
1031 if (kind == VR_RANGE)
1032 {
1033 m_base[0] = min;
1034 m_base[1] = max;
1035 m_num_ranges = 1;
1036 if (min == min_value && max == max_value)
1037 m_kind = VR_VARYING;
1038 else
1039 m_kind = VR_RANGE;
1040 }
1041 else
1042 {
1043 gcc_checking_assert (kind == VR_ANTI_RANGE);
1044 gcc_checking_assert (m_max_ranges > 1);
1045
1046 m_kind = VR_UNDEFINED;
1047 m_num_ranges = 0;
1048 wi::overflow_type ovf;
1049 wide_int lim;
1050 if (sign == SIGNED)
1051 lim = wi::add (x: min, y: -1, sgn: sign, overflow: &ovf);
1052 else
1053 lim = wi::sub (x: min, y: 1, sgn: sign, overflow: &ovf);
1054
1055 if (!ovf)
1056 {
1057 m_kind = VR_RANGE;
1058 m_base[0] = min_value;
1059 m_base[1] = lim;
1060 ++m_num_ranges;
1061 }
1062 if (sign == SIGNED)
1063 lim = wi::sub (x: max, y: -1, sgn: sign, overflow: &ovf);
1064 else
1065 lim = wi::add (x: max, y: 1, sgn: sign, overflow: &ovf);
1066 if (!ovf)
1067 {
1068 m_kind = VR_RANGE;
1069 m_base[m_num_ranges * 2] = lim;
1070 m_base[m_num_ranges * 2 + 1] = max_value;
1071 ++m_num_ranges;
1072 }
1073 }
1074
1075 if (flag_checking)
1076 verify_range ();
1077}
1078
1079void
1080irange::set (tree min, tree max, value_range_kind kind)
1081{
1082 if (POLY_INT_CST_P (min) || POLY_INT_CST_P (max))
1083 {
1084 set_varying (TREE_TYPE (min));
1085 return;
1086 }
1087
1088 gcc_checking_assert (TREE_CODE (min) == INTEGER_CST);
1089 gcc_checking_assert (TREE_CODE (max) == INTEGER_CST);
1090
1091 return set (TREE_TYPE (min), min: wi::to_wide (t: min), max: wi::to_wide (t: max), kind);
1092}
1093
1094// Check the validity of the range.
1095
1096void
1097irange::verify_range ()
1098{
1099 gcc_checking_assert (m_discriminator == VR_IRANGE);
1100 if (m_kind == VR_UNDEFINED)
1101 {
1102 gcc_checking_assert (m_num_ranges == 0);
1103 return;
1104 }
1105 gcc_checking_assert (m_num_ranges <= m_max_ranges);
1106
1107 // Legacy allowed these to represent VARYING for unknown types.
1108 // Leave this in for now, until all users are converted. Eventually
1109 // we should abort in set_varying.
1110 if (m_kind == VR_VARYING && m_type == error_mark_node)
1111 return;
1112
1113 unsigned prec = TYPE_PRECISION (m_type);
1114 if (m_kind == VR_VARYING)
1115 {
1116 gcc_checking_assert (m_bitmask.unknown_p ());
1117 gcc_checking_assert (m_num_ranges == 1);
1118 gcc_checking_assert (varying_compatible_p ());
1119 gcc_checking_assert (lower_bound ().get_precision () == prec);
1120 gcc_checking_assert (upper_bound ().get_precision () == prec);
1121 return;
1122 }
1123 gcc_checking_assert (m_num_ranges != 0);
1124 gcc_checking_assert (!varying_compatible_p ());
1125 for (unsigned i = 0; i < m_num_ranges; ++i)
1126 {
1127 wide_int lb = lower_bound (pair: i);
1128 wide_int ub = upper_bound (pair: i);
1129 gcc_checking_assert (lb.get_precision () == prec);
1130 gcc_checking_assert (ub.get_precision () == prec);
1131 int c = wi::cmp (x: lb, y: ub, TYPE_SIGN (m_type));
1132 gcc_checking_assert (c == 0 || c == -1);
1133 }
1134 m_bitmask.verify_mask ();
1135}
1136
1137bool
1138irange::operator== (const irange &other) const
1139{
1140 if (m_num_ranges != other.m_num_ranges)
1141 return false;
1142
1143 if (m_num_ranges == 0)
1144 return true;
1145
1146 signop sign1 = TYPE_SIGN (type ());
1147 signop sign2 = TYPE_SIGN (other.type ());
1148
1149 for (unsigned i = 0; i < m_num_ranges; ++i)
1150 {
1151 widest_int lb = widest_int::from (x: lower_bound (pair: i), sgn: sign1);
1152 widest_int ub = widest_int::from (x: upper_bound (pair: i), sgn: sign1);
1153 widest_int lb_other = widest_int::from (x: other.lower_bound (pair: i), sgn: sign2);
1154 widest_int ub_other = widest_int::from (x: other.upper_bound (pair: i), sgn: sign2);
1155 if (lb != lb_other || ub != ub_other)
1156 return false;
1157 }
1158
1159 irange_bitmask bm1 = get_bitmask ();
1160 irange_bitmask bm2 = other.get_bitmask ();
1161 widest_int tmp1 = widest_int::from (x: bm1.mask (), sgn: sign1);
1162 widest_int tmp2 = widest_int::from (x: bm2.mask (), sgn: sign2);
1163 if (tmp1 != tmp2)
1164 return false;
1165 if (bm1.unknown_p ())
1166 return true;
1167 tmp1 = widest_int::from (x: bm1.value (), sgn: sign1);
1168 tmp2 = widest_int::from (x: bm2.value (), sgn: sign2);
1169 return tmp1 == tmp2;
1170}
1171
1172/* If range is a singleton, place it in RESULT and return TRUE. */
1173
1174bool
1175irange::singleton_p (tree *result) const
1176{
1177 if (num_pairs () == 1 && lower_bound () == upper_bound ())
1178 {
1179 if (result)
1180 *result = wide_int_to_tree (type: type (), cst: lower_bound ());
1181 return true;
1182 }
1183 return false;
1184}
1185
1186bool
1187irange::singleton_p (wide_int &w) const
1188{
1189 if (num_pairs () == 1 && lower_bound () == upper_bound ())
1190 {
1191 w = lower_bound ();
1192 return true;
1193 }
1194 return false;
1195}
1196
1197/* Return 1 if CST is inside value range.
1198 0 if CST is not inside value range.
1199
1200 Benchmark compile/20001226-1.c compilation time after changing this
1201 function. */
1202
1203bool
1204irange::contains_p (const wide_int &cst) const
1205{
1206 if (undefined_p ())
1207 return false;
1208
1209 // See if we can exclude CST based on the known 0 bits.
1210 if (!m_bitmask.unknown_p ()
1211 && cst != 0
1212 && wi::bit_and (x: m_bitmask.get_nonzero_bits (), y: cst) == 0)
1213 return false;
1214
1215 signop sign = TYPE_SIGN (type ());
1216 for (unsigned r = 0; r < m_num_ranges; ++r)
1217 {
1218 if (wi::lt_p (x: cst, y: lower_bound (pair: r), sgn: sign))
1219 return false;
1220 if (wi::le_p (x: cst, y: upper_bound (pair: r), sgn: sign))
1221 return true;
1222 }
1223
1224 return false;
1225}
1226
1227// Perform an efficient union with R when both ranges have only a single pair.
1228// Excluded are VARYING and UNDEFINED ranges.
1229
1230bool
1231irange::irange_single_pair_union (const irange &r)
1232{
1233 gcc_checking_assert (!undefined_p () && !varying_p ());
1234 gcc_checking_assert (!r.undefined_p () && !varying_p ());
1235
1236 signop sign = TYPE_SIGN (m_type);
1237 // Check if current lower bound is also the new lower bound.
1238 if (wi::le_p (x: m_base[0], y: r.m_base[0], sgn: sign))
1239 {
1240 // If current upper bound is new upper bound, we're done.
1241 if (wi::le_p (x: r.m_base[1], y: m_base[1], sgn: sign))
1242 return union_bitmask (r);
1243 // Otherwise R has the new upper bound.
1244 // Check for overlap/touching ranges, or single target range.
1245 if (m_max_ranges == 1
1246 || (widest_int::from (x: m_base[1], sgn: sign) + 1
1247 >= widest_int::from (x: r.m_base[0], TYPE_SIGN (r.m_type))))
1248 m_base[1] = r.m_base[1];
1249 else
1250 {
1251 // This is a dual range result.
1252 m_base[2] = r.m_base[0];
1253 m_base[3] = r.m_base[1];
1254 m_num_ranges = 2;
1255 }
1256 // The range has been altered, so normalize it even if nothing
1257 // changed in the mask.
1258 if (!union_bitmask (r))
1259 normalize_kind ();
1260 if (flag_checking)
1261 verify_range ();
1262 return true;
1263 }
1264
1265 // Set the new lower bound to R's lower bound.
1266 wide_int lb = m_base[0];
1267 m_base[0] = r.m_base[0];
1268
1269 // If R fully contains THIS range, just set the upper bound.
1270 if (wi::ge_p (x: r.m_base[1], y: m_base[1], sgn: sign))
1271 m_base[1] = r.m_base[1];
1272 // Check for overlapping ranges, or target limited to a single range.
1273 else if (m_max_ranges == 1
1274 || (widest_int::from (x: r.m_base[1], TYPE_SIGN (r.m_type)) + 1
1275 >= widest_int::from (x: lb, sgn: sign)))
1276 ;
1277 else
1278 {
1279 // Left with 2 pairs.
1280 m_num_ranges = 2;
1281 m_base[2] = lb;
1282 m_base[3] = m_base[1];
1283 m_base[1] = r.m_base[1];
1284 }
1285 // The range has been altered, so normalize it even if nothing
1286 // changed in the mask.
1287 if (!union_bitmask (r))
1288 normalize_kind ();
1289 if (flag_checking)
1290 verify_range ();
1291 return true;
1292}
1293
1294// Append R to this range, knowing that R occurs after all of these subranges.
1295// Return TRUE as something must have changed.
1296
1297bool
1298irange::union_append (const irange &r)
1299{
1300 // Check if the first range in R is an immmediate successor to the last
1301 // range, ths requiring a merge.
1302 signop sign = TYPE_SIGN (m_type);
1303 wide_int lb = r.lower_bound ();
1304 wide_int ub = upper_bound ();
1305 unsigned start = 0;
1306 if (widest_int::from (x: ub, sgn: sign) + 1
1307 == widest_int::from (x: lb, sgn: sign))
1308 {
1309 m_base[m_num_ranges * 2 - 1] = r.m_base[1];
1310 start = 1;
1311 }
1312 maybe_resize (needed: m_num_ranges + r.m_num_ranges - start);
1313 for ( ; start < r.m_num_ranges; start++)
1314 {
1315 // Merge the last ranges if it exceeds the maximum size.
1316 if (m_num_ranges + 1 > m_max_ranges)
1317 {
1318 m_base[m_max_ranges * 2 - 1] = r.m_base[r.m_num_ranges * 2 - 1];
1319 break;
1320 }
1321 m_base[m_num_ranges * 2] = r.m_base[start * 2];
1322 m_base[m_num_ranges * 2 + 1] = r.m_base[start * 2 + 1];
1323 m_num_ranges++;
1324 }
1325
1326 if (!union_bitmask (r))
1327 normalize_kind ();
1328 if (flag_checking)
1329 verify_range ();
1330 return true;
1331}
1332
1333// Return TRUE if anything changes.
1334
1335bool
1336irange::union_ (const vrange &v)
1337{
1338 const irange &r = as_a <irange> (v);
1339
1340 if (r.undefined_p ())
1341 return false;
1342
1343 if (undefined_p ())
1344 {
1345 operator= (src: r);
1346 if (flag_checking)
1347 verify_range ();
1348 return true;
1349 }
1350
1351 if (varying_p ())
1352 return false;
1353
1354 if (r.varying_p ())
1355 {
1356 set_varying (type ());
1357 return true;
1358 }
1359
1360 // Special case one range union one range.
1361 if (m_num_ranges == 1 && r.m_num_ranges == 1)
1362 return irange_single_pair_union (r);
1363
1364 signop sign = TYPE_SIGN (m_type);
1365 // Check for an append to the end.
1366 if (m_kind == VR_RANGE && wi::gt_p (x: r.lower_bound (), y: upper_bound (), sgn: sign))
1367 return union_append (r);
1368
1369 // If this ranges fully contains R, then we need do nothing.
1370 if (irange_contains_p (r))
1371 return union_bitmask (r);
1372
1373 // Do not worry about merging and such by reserving twice as many
1374 // pairs as needed, and then simply sort the 2 ranges into this
1375 // intermediate form.
1376 //
1377 // The intermediate result will have the property that the beginning
1378 // of each range is <= the beginning of the next range. There may
1379 // be overlapping ranges at this point. I.e. this would be valid
1380 // [-20, 10], [-10, 0], [0, 20], [40, 90] as it satisfies this
1381 // constraint : -20 < -10 < 0 < 40. When the range is rebuilt into r,
1382 // the merge is performed.
1383 //
1384 // [Xi,Yi]..[Xn,Yn] U [Xj,Yj]..[Xm,Ym] --> [Xk,Yk]..[Xp,Yp]
1385 auto_vec<wide_int, 20> res (m_num_ranges * 2 + r.m_num_ranges * 2);
1386 unsigned i = 0, j = 0, k = 0;
1387
1388 while (i < m_num_ranges * 2 && j < r.m_num_ranges * 2)
1389 {
1390 // lower of Xi and Xj is the lowest point.
1391 if (widest_int::from (x: m_base[i], sgn: sign)
1392 <= widest_int::from (x: r.m_base[j], sgn: sign))
1393 {
1394 res.quick_push (obj: m_base[i]);
1395 res.quick_push (obj: m_base[i + 1]);
1396 k += 2;
1397 i += 2;
1398 }
1399 else
1400 {
1401 res.quick_push (obj: r.m_base[j]);
1402 res.quick_push (obj: r.m_base[j + 1]);
1403 k += 2;
1404 j += 2;
1405 }
1406 }
1407 for ( ; i < m_num_ranges * 2; i += 2)
1408 {
1409 res.quick_push (obj: m_base[i]);
1410 res.quick_push (obj: m_base[i + 1]);
1411 k += 2;
1412 }
1413 for ( ; j < r.m_num_ranges * 2; j += 2)
1414 {
1415 res.quick_push (obj: r.m_base[j]);
1416 res.quick_push (obj: r.m_base[j + 1]);
1417 k += 2;
1418 }
1419
1420 // Now normalize the vector removing any overlaps.
1421 i = 2;
1422 for (j = 2; j < k ; j += 2)
1423 {
1424 // Current upper+1 is >= lower bound next pair, then we merge ranges.
1425 if (widest_int::from (x: res[i - 1], sgn: sign) + 1
1426 >= widest_int::from (x: res[j], sgn: sign))
1427 {
1428 // New upper bounds is greater of current or the next one.
1429 if (widest_int::from (x: res[j + 1], sgn: sign)
1430 > widest_int::from (x: res[i - 1], sgn: sign))
1431 res[i - 1] = res[j + 1];
1432 }
1433 else
1434 {
1435 // This is a new distinct range, but no point in copying it
1436 // if it is already in the right place.
1437 if (i != j)
1438 {
1439 res[i++] = res[j];
1440 res[i++] = res[j + 1];
1441 }
1442 else
1443 i += 2;
1444 }
1445 }
1446
1447 // At this point, the vector should have i ranges, none overlapping.
1448 // Now it simply needs to be copied, and if there are too many
1449 // ranges, merge some. We wont do any analysis as to what the
1450 // "best" merges are, simply combine the final ranges into one.
1451 maybe_resize (needed: i / 2);
1452 if (i > m_max_ranges * 2)
1453 {
1454 res[m_max_ranges * 2 - 1] = res[i - 1];
1455 i = m_max_ranges * 2;
1456 }
1457
1458 for (j = 0; j < i ; j++)
1459 m_base[j] = res [j];
1460 m_num_ranges = i / 2;
1461
1462 m_kind = VR_RANGE;
1463 // The range has been altered, so normalize it even if nothing
1464 // changed in the mask.
1465 if (!union_bitmask (r))
1466 normalize_kind ();
1467 if (flag_checking)
1468 verify_range ();
1469 return true;
1470}
1471
1472// Return TRUE if THIS fully contains R. No undefined or varying cases.
1473
1474bool
1475irange::irange_contains_p (const irange &r) const
1476{
1477 gcc_checking_assert (!undefined_p () && !varying_p ());
1478 gcc_checking_assert (!r.undefined_p () && !varying_p ());
1479
1480 // In order for THIS to fully contain R, all of the pairs within R must
1481 // be fully contained by the pairs in this object.
1482 signop sign = TYPE_SIGN (m_type);
1483 unsigned ri = 0;
1484 unsigned i = 0;
1485 wide_int rl = r.m_base[0];
1486 wide_int ru = r.m_base[1];
1487 wide_int l = m_base[0];
1488 wide_int u = m_base[1];
1489 while (1)
1490 {
1491 // If r is contained within this range, move to the next R
1492 if (wi::ge_p (x: rl, y: l, sgn: sign)
1493 && wi::le_p (x: ru, y: u, sgn: sign))
1494 {
1495 // This pair is OK, Either done, or bump to the next.
1496 if (++ri >= r.num_pairs ())
1497 return true;
1498 rl = r.m_base[ri * 2];
1499 ru = r.m_base[ri * 2 + 1];
1500 continue;
1501 }
1502 // Otherwise, check if this's pair occurs before R's.
1503 if (wi::lt_p (x: u, y: rl, sgn: sign))
1504 {
1505 // There's still at least one pair of R left.
1506 if (++i >= num_pairs ())
1507 return false;
1508 l = m_base[i * 2];
1509 u = m_base[i * 2 + 1];
1510 continue;
1511 }
1512 return false;
1513 }
1514 return false;
1515}
1516
1517
1518// Return TRUE if anything changes.
1519
1520bool
1521irange::intersect (const vrange &v)
1522{
1523 const irange &r = as_a <irange> (v);
1524 gcc_checking_assert (undefined_p () || r.undefined_p ()
1525 || range_compatible_p (type (), r.type ()));
1526
1527 if (undefined_p ())
1528 return false;
1529 if (r.undefined_p ())
1530 {
1531 set_undefined ();
1532 return true;
1533 }
1534 if (r.varying_p ())
1535 return false;
1536 if (varying_p ())
1537 {
1538 operator= (src: r);
1539 return true;
1540 }
1541
1542 if (r.num_pairs () == 1)
1543 {
1544 bool res = intersect (lb: r.lower_bound (), ub: r.upper_bound ());
1545 if (undefined_p ())
1546 return true;
1547
1548 res |= intersect_bitmask (r);
1549 if (res)
1550 normalize_kind ();
1551 return res;
1552 }
1553
1554 // If R fully contains this, then intersection will change nothing.
1555 if (r.irange_contains_p (r: *this))
1556 return intersect_bitmask (r);
1557
1558 // ?? We could probably come up with something smarter than the
1559 // worst case scenario here.
1560 int needed = num_pairs () + r.num_pairs ();
1561 maybe_resize (needed);
1562
1563 signop sign = TYPE_SIGN (m_type);
1564 unsigned bld_pair = 0;
1565 unsigned bld_lim = m_max_ranges;
1566 int_range_max r2 (*this);
1567 unsigned r2_lim = r2.num_pairs ();
1568 unsigned i2 = 0;
1569 for (unsigned i = 0; i < r.num_pairs (); )
1570 {
1571 // If r1's upper is < r2's lower, we can skip r1's pair.
1572 wide_int ru = r.m_base[i * 2 + 1];
1573 wide_int r2l = r2.m_base[i2 * 2];
1574 if (wi::lt_p (x: ru, y: r2l, sgn: sign))
1575 {
1576 i++;
1577 continue;
1578 }
1579 // Likewise, skip r2's pair if its excluded.
1580 wide_int r2u = r2.m_base[i2 * 2 + 1];
1581 wide_int rl = r.m_base[i * 2];
1582 if (wi::lt_p (x: r2u, y: rl, sgn: sign))
1583 {
1584 i2++;
1585 if (i2 < r2_lim)
1586 continue;
1587 // No more r2, break.
1588 break;
1589 }
1590
1591 // Must be some overlap. Find the highest of the lower bounds,
1592 // and set it, unless the build limits lower bounds is already
1593 // set.
1594 if (bld_pair < bld_lim)
1595 {
1596 if (wi::ge_p (x: rl, y: r2l, sgn: sign))
1597 m_base[bld_pair * 2] = rl;
1598 else
1599 m_base[bld_pair * 2] = r2l;
1600 }
1601 else
1602 // Decrease and set a new upper.
1603 bld_pair--;
1604
1605 // ...and choose the lower of the upper bounds.
1606 if (wi::le_p (x: ru, y: r2u, sgn: sign))
1607 {
1608 m_base[bld_pair * 2 + 1] = ru;
1609 bld_pair++;
1610 // Move past the r1 pair and keep trying.
1611 i++;
1612 continue;
1613 }
1614 else
1615 {
1616 m_base[bld_pair * 2 + 1] = r2u;
1617 bld_pair++;
1618 i2++;
1619 if (i2 < r2_lim)
1620 continue;
1621 // No more r2, break.
1622 break;
1623 }
1624 // r2 has the higher lower bound.
1625 }
1626
1627 // At the exit of this loop, it is one of 2 things:
1628 // ran out of r1, or r2, but either means we are done.
1629 m_num_ranges = bld_pair;
1630 if (m_num_ranges == 0)
1631 {
1632 set_undefined ();
1633 return true;
1634 }
1635
1636 m_kind = VR_RANGE;
1637 // The range has been altered, so normalize it even if nothing
1638 // changed in the mask.
1639 if (!intersect_bitmask (r))
1640 normalize_kind ();
1641 if (flag_checking)
1642 verify_range ();
1643 return true;
1644}
1645
1646
1647// Multirange intersect for a specified wide_int [lb, ub] range.
1648// Return TRUE if intersect changed anything.
1649//
1650// NOTE: It is the caller's responsibility to intersect the mask.
1651
1652bool
1653irange::intersect (const wide_int& lb, const wide_int& ub)
1654{
1655 // Undefined remains undefined.
1656 if (undefined_p ())
1657 return false;
1658
1659 tree range_type = type();
1660 signop sign = TYPE_SIGN (range_type);
1661
1662 gcc_checking_assert (TYPE_PRECISION (range_type) == wi::get_precision (lb));
1663 gcc_checking_assert (TYPE_PRECISION (range_type) == wi::get_precision (ub));
1664
1665 // If this range is fully contained, then intersection will do nothing.
1666 if (wi::ge_p (x: lower_bound (), y: lb, sgn: sign)
1667 && wi::le_p (x: upper_bound (), y: ub, sgn: sign))
1668 return false;
1669
1670 unsigned bld_index = 0;
1671 unsigned pair_lim = num_pairs ();
1672 for (unsigned i = 0; i < pair_lim; i++)
1673 {
1674 wide_int pairl = m_base[i * 2];
1675 wide_int pairu = m_base[i * 2 + 1];
1676 // Once UB is less than a pairs lower bound, we're done.
1677 if (wi::lt_p (x: ub, y: pairl, sgn: sign))
1678 break;
1679 // if LB is greater than this pairs upper, this pair is excluded.
1680 if (wi::lt_p (x: pairu, y: lb, sgn: sign))
1681 continue;
1682
1683 // Must be some overlap. Find the highest of the lower bounds,
1684 // and set it
1685 if (wi::gt_p (x: lb, y: pairl, sgn: sign))
1686 m_base[bld_index * 2] = lb;
1687 else
1688 m_base[bld_index * 2] = pairl;
1689
1690 // ...and choose the lower of the upper bounds and if the base pair
1691 // has the lower upper bound, need to check next pair too.
1692 if (wi::lt_p (x: ub, y: pairu, sgn: sign))
1693 {
1694 m_base[bld_index++ * 2 + 1] = ub;
1695 break;
1696 }
1697 else
1698 m_base[bld_index++ * 2 + 1] = pairu;
1699 }
1700
1701 m_num_ranges = bld_index;
1702 if (m_num_ranges == 0)
1703 {
1704 set_undefined ();
1705 return true;
1706 }
1707
1708 m_kind = VR_RANGE;
1709 // The caller must normalize and verify the range, as the bitmask
1710 // still needs to be handled.
1711 return true;
1712}
1713
1714
1715// Signed 1-bits are strange. You can't subtract 1, because you can't
1716// represent the number 1. This works around that for the invert routine.
1717
1718static wide_int inline
1719subtract_one (const wide_int &x, tree type, wi::overflow_type &overflow)
1720{
1721 if (TYPE_SIGN (type) == SIGNED)
1722 return wi::add (x, y: -1, sgn: SIGNED, overflow: &overflow);
1723 else
1724 return wi::sub (x, y: 1, sgn: UNSIGNED, overflow: &overflow);
1725}
1726
1727// The analogous function for adding 1.
1728
1729static wide_int inline
1730add_one (const wide_int &x, tree type, wi::overflow_type &overflow)
1731{
1732 if (TYPE_SIGN (type) == SIGNED)
1733 return wi::sub (x, y: -1, sgn: SIGNED, overflow: &overflow);
1734 else
1735 return wi::add (x, y: 1, sgn: UNSIGNED, overflow: &overflow);
1736}
1737
1738// Return the inverse of a range.
1739
1740void
1741irange::invert ()
1742{
1743 gcc_checking_assert (!undefined_p () && !varying_p ());
1744
1745 // We always need one more set of bounds to represent an inverse, so
1746 // if we're at the limit, we can't properly represent things.
1747 //
1748 // For instance, to represent the inverse of a 2 sub-range set
1749 // [5, 10][20, 30], we would need a 3 sub-range set
1750 // [-MIN, 4][11, 19][31, MAX].
1751 //
1752 // In this case, return the most conservative thing.
1753 //
1754 // However, if any of the extremes of the range are -MIN/+MAX, we
1755 // know we will not need an extra bound. For example:
1756 //
1757 // INVERT([-MIN,20][30,40]) => [21,29][41,+MAX]
1758 // INVERT([-MIN,20][30,MAX]) => [21,29]
1759 tree ttype = type ();
1760 unsigned prec = TYPE_PRECISION (ttype);
1761 signop sign = TYPE_SIGN (ttype);
1762 wide_int type_min = wi::min_value (prec, sign);
1763 wide_int type_max = wi::max_value (prec, sign);
1764 m_bitmask.set_unknown (prec);
1765
1766 // At this point, we need one extra sub-range to represent the
1767 // inverse.
1768 maybe_resize (needed: m_num_ranges + 1);
1769
1770 // The algorithm is as follows. To calculate INVERT ([a,b][c,d]), we
1771 // generate [-MIN, a-1][b+1, c-1][d+1, MAX].
1772 //
1773 // If there is an over/underflow in the calculation for any
1774 // sub-range, we eliminate that subrange. This allows us to easily
1775 // calculate INVERT([-MIN, 5]) with: [-MIN, -MIN-1][6, MAX]. And since
1776 // we eliminate the underflow, only [6, MAX] remains.
1777 unsigned i = 0;
1778 wi::overflow_type ovf;
1779 // Construct leftmost range.
1780 int_range_max orig_range (*this);
1781 unsigned nitems = 0;
1782 wide_int tmp;
1783 // If this is going to underflow on the MINUS 1, don't even bother
1784 // checking. This also handles subtracting one from an unsigned 0,
1785 // which doesn't set the underflow bit.
1786 if (type_min != orig_range.lower_bound ())
1787 {
1788 m_base[nitems++] = type_min;
1789 tmp = subtract_one (x: orig_range.lower_bound (), type: ttype, overflow&: ovf);
1790 m_base[nitems++] = tmp;
1791 if (ovf)
1792 nitems = 0;
1793 }
1794 i++;
1795 // Construct middle ranges if applicable.
1796 if (orig_range.num_pairs () > 1)
1797 {
1798 unsigned j = i;
1799 for (; j < (orig_range.num_pairs () * 2) - 1; j += 2)
1800 {
1801 // The middle ranges cannot have MAX/MIN, so there's no need
1802 // to check for unsigned overflow on the +1 and -1 here.
1803 tmp = wi::add (x: orig_range.m_base[j], y: 1, sgn: sign, overflow: &ovf);
1804 m_base[nitems++] = tmp;
1805 tmp = subtract_one (x: orig_range.m_base[j + 1], type: ttype, overflow&: ovf);
1806 m_base[nitems++] = tmp;
1807 if (ovf)
1808 nitems -= 2;
1809 }
1810 i = j;
1811 }
1812 // Construct rightmost range.
1813 //
1814 // However, if this will overflow on the PLUS 1, don't even bother.
1815 // This also handles adding one to an unsigned MAX, which doesn't
1816 // set the overflow bit.
1817 if (type_max != orig_range.m_base[i])
1818 {
1819 tmp = add_one (x: orig_range.m_base[i], type: ttype, overflow&: ovf);
1820 m_base[nitems++] = tmp;
1821 m_base[nitems++] = type_max;
1822 if (ovf)
1823 nitems -= 2;
1824 }
1825 m_num_ranges = nitems / 2;
1826
1827 // We disallow undefined or varying coming in, so the result can
1828 // only be a VR_RANGE.
1829 gcc_checking_assert (m_kind == VR_RANGE);
1830
1831 if (flag_checking)
1832 verify_range ();
1833}
1834
1835// Return the bitmask inherent in the range.
1836
1837irange_bitmask
1838irange::get_bitmask_from_range () const
1839{
1840 unsigned prec = TYPE_PRECISION (type ());
1841 wide_int min = lower_bound ();
1842 wide_int max = upper_bound ();
1843
1844 // All the bits of a singleton are known.
1845 if (min == max)
1846 {
1847 wide_int mask = wi::zero (precision: prec);
1848 wide_int value = lower_bound ();
1849 return irange_bitmask (value, mask);
1850 }
1851
1852 wide_int xorv = min ^ max;
1853
1854 if (xorv != 0)
1855 xorv = wi::mask (width: prec - wi::clz (xorv), negate_p: false, precision: prec);
1856
1857 return irange_bitmask (wi::zero (precision: prec), min | xorv);
1858}
1859
1860// Remove trailing ranges that this bitmask indicates can't exist.
1861
1862void
1863irange_bitmask::adjust_range (irange &r) const
1864{
1865 if (unknown_p () || r.undefined_p ())
1866 return;
1867
1868 int_range_max range;
1869 tree type = r.type ();
1870 int prec = TYPE_PRECISION (type);
1871 // If there are trailing zeros, create a range representing those bits.
1872 gcc_checking_assert (m_mask != 0);
1873 int z = wi::ctz (m_mask);
1874 if (z)
1875 {
1876 wide_int ub = (wi::one (precision: prec) << z) - 1;
1877 range = int_range<5> (type, wi::zero (precision: prec), ub);
1878 // Then remove the specific value these bits contain from the range.
1879 wide_int value = m_value & ub;
1880 range.intersect (v: int_range<2> (type, value, value, VR_ANTI_RANGE));
1881 // Inverting produces a list of ranges which can be valid.
1882 range.invert ();
1883 // And finally select R from only those valid values.
1884 r.intersect (v: range);
1885 return;
1886 }
1887}
1888
1889// If the the mask can be trivially converted to a range, do so and
1890// return TRUE.
1891
1892bool
1893irange::set_range_from_bitmask ()
1894{
1895 gcc_checking_assert (!undefined_p ());
1896 if (m_bitmask.unknown_p ())
1897 return false;
1898
1899 // If all the bits are known, this is a singleton.
1900 if (m_bitmask.mask () == 0)
1901 {
1902 set (type: m_type, min: m_bitmask.value (), max: m_bitmask.value ());
1903 return true;
1904 }
1905
1906 unsigned popcount = wi::popcount (m_bitmask.get_nonzero_bits ());
1907
1908 // If we have only one bit set in the mask, we can figure out the
1909 // range immediately.
1910 if (popcount == 1)
1911 {
1912 // Make sure we don't pessimize the range.
1913 if (!contains_p (cst: m_bitmask.get_nonzero_bits ()))
1914 return false;
1915
1916 bool has_zero = contains_zero_p (r: *this);
1917 wide_int nz = m_bitmask.get_nonzero_bits ();
1918 set (type: m_type, min: nz, max: nz);
1919 m_bitmask.set_nonzero_bits (nz);
1920 if (has_zero)
1921 {
1922 int_range<2> zero;
1923 zero.set_zero (type ());
1924 union_ (v: zero);
1925 }
1926 if (flag_checking)
1927 verify_range ();
1928 return true;
1929 }
1930 else if (popcount == 0)
1931 {
1932 set_zero (type ());
1933 return true;
1934 }
1935 return false;
1936}
1937
1938void
1939irange::update_bitmask (const irange_bitmask &bm)
1940{
1941 gcc_checking_assert (!undefined_p ());
1942
1943 // Drop VARYINGs with known bits to a plain range.
1944 if (m_kind == VR_VARYING && !bm.unknown_p ())
1945 m_kind = VR_RANGE;
1946
1947 m_bitmask = bm;
1948 if (!set_range_from_bitmask ())
1949 normalize_kind ();
1950 if (flag_checking)
1951 verify_range ();
1952}
1953
1954// Return the bitmask of known bits that includes the bitmask inherent
1955// in the range.
1956
1957irange_bitmask
1958irange::get_bitmask () const
1959{
1960 gcc_checking_assert (!undefined_p ());
1961
1962 // The mask inherent in the range is calculated on-demand. For
1963 // example, [0,255] does not have known bits set by default. This
1964 // saves us considerable time, because setting it at creation incurs
1965 // a large penalty for irange::set. At the time of writing there
1966 // was a 5% slowdown in VRP if we kept the mask precisely up to date
1967 // at all times. Instead, we default to -1 and set it when
1968 // explicitly requested. However, this function will always return
1969 // the correct mask.
1970 //
1971 // This also means that the mask may have a finer granularity than
1972 // the range and thus contradict it. Think of the mask as an
1973 // enhancement to the range. For example:
1974 //
1975 // [3, 1000] MASK 0xfffffffe VALUE 0x0
1976 //
1977 // 3 is in the range endpoints, but is excluded per the known 0 bits
1978 // in the mask.
1979 //
1980 // See also the note in irange_bitmask::intersect.
1981 irange_bitmask bm = get_bitmask_from_range ();
1982 if (!m_bitmask.unknown_p ())
1983 bm.intersect (orig_src: m_bitmask);
1984 return bm;
1985}
1986
1987// Set the nonzero bits in R into THIS. Return TRUE and
1988// normalize the range if anything changed.
1989
1990void
1991irange::set_nonzero_bits (const wide_int &bits)
1992{
1993 gcc_checking_assert (!undefined_p ());
1994 irange_bitmask bm (wi::zero (TYPE_PRECISION (type ())), bits);
1995 update_bitmask (bm);
1996}
1997
1998// Return the nonzero bits in R.
1999
2000wide_int
2001irange::get_nonzero_bits () const
2002{
2003 gcc_checking_assert (!undefined_p ());
2004 irange_bitmask bm = get_bitmask ();
2005 return bm.value () | bm.mask ();
2006}
2007
2008// Intersect the bitmask in R into THIS and normalize the range.
2009// Return TRUE if the intersection changed anything.
2010
2011bool
2012irange::intersect_bitmask (const irange &r)
2013{
2014 gcc_checking_assert (!undefined_p () && !r.undefined_p ());
2015
2016 if (m_bitmask == r.m_bitmask)
2017 return false;
2018
2019 irange_bitmask bm = get_bitmask ();
2020 irange_bitmask save = bm;
2021 if (!bm.intersect (orig_src: r.get_bitmask ()))
2022 return false;
2023
2024 m_bitmask = bm;
2025
2026 // Updating m_bitmask may still yield a semantic bitmask (as
2027 // returned by get_bitmask) which is functionally equivalent to what
2028 // we originally had. In which case, there's still no change.
2029 if (save == get_bitmask ())
2030 return false;
2031
2032 if (!set_range_from_bitmask ())
2033 normalize_kind ();
2034 m_bitmask.adjust_range (r&: *this);
2035 if (flag_checking)
2036 verify_range ();
2037 return true;
2038}
2039
2040// Union the bitmask in R into THIS. Return TRUE and normalize the
2041// range if anything changed.
2042
2043bool
2044irange::union_bitmask (const irange &r)
2045{
2046 gcc_checking_assert (!undefined_p () && !r.undefined_p ());
2047
2048 if (m_bitmask == r.m_bitmask)
2049 return false;
2050
2051 irange_bitmask bm = get_bitmask ();
2052 irange_bitmask save = bm;
2053 if (!bm.union_ (orig_src: r.get_bitmask ()))
2054 return false;
2055
2056 m_bitmask = bm;
2057
2058 // Updating m_bitmask may still yield a semantic bitmask (as
2059 // returned by get_bitmask) which is functionally equivalent to what
2060 // we originally had. In which case, there's still no change.
2061 if (save == get_bitmask ())
2062 return false;
2063
2064 // No need to call set_range_from_mask, because we'll never
2065 // narrow the range. Besides, it would cause endless recursion
2066 // because of the union_ in set_range_from_mask.
2067 normalize_kind ();
2068 return true;
2069}
2070
2071void
2072irange_bitmask::verify_mask () const
2073{
2074 gcc_assert (m_value.get_precision () == m_mask.get_precision ());
2075}
2076
2077void
2078dump_value_range (FILE *file, const vrange *vr)
2079{
2080 vr->dump (file);
2081}
2082
2083DEBUG_FUNCTION void
2084debug (const vrange *vr)
2085{
2086 dump_value_range (stderr, vr);
2087 fprintf (stderr, format: "\n");
2088}
2089
2090DEBUG_FUNCTION void
2091debug (const vrange &vr)
2092{
2093 debug (vr: &vr);
2094}
2095
2096DEBUG_FUNCTION void
2097debug (const value_range *vr)
2098{
2099 dump_value_range (stderr, vr);
2100 fprintf (stderr, format: "\n");
2101}
2102
2103DEBUG_FUNCTION void
2104debug (const value_range &vr)
2105{
2106 dump_value_range (stderr, vr: &vr);
2107 fprintf (stderr, format: "\n");
2108}
2109
2110/* Return true, if VAL1 and VAL2 are equal values for VRP purposes. */
2111
2112bool
2113vrp_operand_equal_p (const_tree val1, const_tree val2)
2114{
2115 if (val1 == val2)
2116 return true;
2117 if (!val1 || !val2 || !operand_equal_p (val1, val2, flags: 0))
2118 return false;
2119 return true;
2120}
2121
2122void
2123gt_ggc_mx (irange *x)
2124{
2125 if (!x->undefined_p ())
2126 gt_ggc_mx (x->m_type);
2127}
2128
2129void
2130gt_pch_nx (irange *x)
2131{
2132 if (!x->undefined_p ())
2133 gt_pch_nx (x->m_type);
2134}
2135
2136void
2137gt_pch_nx (irange *x, gt_pointer_operator op, void *cookie)
2138{
2139 for (unsigned i = 0; i < x->m_num_ranges; ++i)
2140 {
2141 op (&x->m_base[i * 2], NULL, cookie);
2142 op (&x->m_base[i * 2 + 1], NULL, cookie);
2143 }
2144}
2145
2146void
2147gt_ggc_mx (frange *x)
2148{
2149 gt_ggc_mx (x->m_type);
2150}
2151
2152void
2153gt_pch_nx (frange *x)
2154{
2155 gt_pch_nx (x->m_type);
2156}
2157
2158void
2159gt_pch_nx (frange *x, gt_pointer_operator op, void *cookie)
2160{
2161 op (&x->m_type, NULL, cookie);
2162}
2163
2164void
2165gt_ggc_mx (vrange *x)
2166{
2167 if (is_a <irange> (v&: *x))
2168 return gt_ggc_mx (x: (irange *) x);
2169 if (is_a <frange> (v&: *x))
2170 return gt_ggc_mx (x: (frange *) x);
2171 gcc_unreachable ();
2172}
2173
2174void
2175gt_pch_nx (vrange *x)
2176{
2177 if (is_a <irange> (v&: *x))
2178 return gt_pch_nx (x: (irange *) x);
2179 if (is_a <frange> (v&: *x))
2180 return gt_pch_nx (x: (frange *) x);
2181 gcc_unreachable ();
2182}
2183
2184void
2185gt_pch_nx (vrange *x, gt_pointer_operator op, void *cookie)
2186{
2187 if (is_a <irange> (v&: *x))
2188 gt_pch_nx (x: (irange *) x, op, cookie);
2189 else if (is_a <frange> (v&: *x))
2190 gt_pch_nx (x: (frange *) x, op, cookie);
2191 else
2192 gcc_unreachable ();
2193}
2194
2195#define DEFINE_INT_RANGE_INSTANCE(N) \
2196 template int_range<N>::int_range(tree_node *, \
2197 const wide_int &, \
2198 const wide_int &, \
2199 value_range_kind); \
2200 template int_range<N>::int_range(tree); \
2201 template int_range<N>::int_range(const irange &); \
2202 template int_range<N>::int_range(const int_range &); \
2203 template int_range<N>& int_range<N>::operator= (const int_range &);
2204
2205DEFINE_INT_RANGE_INSTANCE(1)
2206DEFINE_INT_RANGE_INSTANCE(2)
2207DEFINE_INT_RANGE_INSTANCE(3)
2208DEFINE_INT_RANGE_INSTANCE(255)
2209
2210#if CHECKING_P
2211#include "selftest.h"
2212
2213#define INT(x) wi::shwi ((x), TYPE_PRECISION (integer_type_node))
2214#define UINT(x) wi::uhwi ((x), TYPE_PRECISION (unsigned_type_node))
2215#define SCHAR(x) wi::shwi ((x), TYPE_PRECISION (signed_char_type_node))
2216
2217namespace selftest
2218{
2219
2220static int_range<2>
2221range (tree type, int a, int b, value_range_kind kind = VR_RANGE)
2222{
2223 wide_int w1, w2;
2224 if (TYPE_UNSIGNED (type))
2225 {
2226 w1 = wi::uhwi (val: a, TYPE_PRECISION (type));
2227 w2 = wi::uhwi (val: b, TYPE_PRECISION (type));
2228 }
2229 else
2230 {
2231 w1 = wi::shwi (val: a, TYPE_PRECISION (type));
2232 w2 = wi::shwi (val: b, TYPE_PRECISION (type));
2233 }
2234 return int_range<2> (type, w1, w2, kind);
2235}
2236
2237static int_range<2>
2238range_int (int a, int b, value_range_kind kind = VR_RANGE)
2239{
2240 return range (integer_type_node, a, b, kind);
2241}
2242
2243static int_range<2>
2244range_uint (int a, int b, value_range_kind kind = VR_RANGE)
2245{
2246 return range (unsigned_type_node, a, b, kind);
2247}
2248
2249static int_range<2>
2250range_uint128 (int a, int b, value_range_kind kind = VR_RANGE)
2251{
2252 tree u128_type_node = build_nonstandard_integer_type (128, 1);
2253 return range (type: u128_type_node, a, b, kind);
2254}
2255
2256static int_range<2>
2257range_uchar (int a, int b, value_range_kind kind = VR_RANGE)
2258{
2259 return range (unsigned_char_type_node, a, b, kind);
2260}
2261
2262static int_range<2>
2263range_char (int a, int b, value_range_kind kind = VR_RANGE)
2264{
2265 return range (signed_char_type_node, a, b, kind);
2266}
2267
2268static int_range<3>
2269build_range3 (int a, int b, int c, int d, int e, int f)
2270{
2271 int_range<3> i1 = range_int (a, b);
2272 int_range<3> i2 = range_int (a: c, b: d);
2273 int_range<3> i3 = range_int (a: e, b: f);
2274 i1.union_ (v: i2);
2275 i1.union_ (v: i3);
2276 return i1;
2277}
2278
2279static void
2280range_tests_irange3 ()
2281{
2282 int_range<3> r0, r1, r2;
2283 int_range<3> i1, i2, i3;
2284
2285 // ([10,20] U [5,8]) U [1,3] ==> [1,3][5,8][10,20].
2286 r0 = range_int (a: 10, b: 20);
2287 r1 = range_int (a: 5, b: 8);
2288 r0.union_ (v: r1);
2289 r1 = range_int (a: 1, b: 3);
2290 r0.union_ (v: r1);
2291 ASSERT_TRUE (r0 == build_range3 (1, 3, 5, 8, 10, 20));
2292
2293 // [1,3][5,8][10,20] U [-5,0] => [-5,3][5,8][10,20].
2294 r1 = range_int (a: -5, b: 0);
2295 r0.union_ (v: r1);
2296 ASSERT_TRUE (r0 == build_range3 (-5, 3, 5, 8, 10, 20));
2297
2298 // [10,20][30,40] U [50,60] ==> [10,20][30,40][50,60].
2299 r1 = range_int (a: 50, b: 60);
2300 r0 = range_int (a: 10, b: 20);
2301 r0.union_ (v: range_int (a: 30, b: 40));
2302 r0.union_ (v: r1);
2303 ASSERT_TRUE (r0 == build_range3 (10, 20, 30, 40, 50, 60));
2304 // [10,20][30,40][50,60] U [70, 80] ==> [10,20][30,40][50,60][70,80].
2305 r1 = range_int (a: 70, b: 80);
2306 r0.union_ (v: r1);
2307
2308 r2 = build_range3 (a: 10, b: 20, c: 30, d: 40, e: 50, f: 60);
2309 r2.union_ (v: range_int (a: 70, b: 80));
2310 ASSERT_TRUE (r0 == r2);
2311
2312 // [10,20][30,40][50,60] U [6,35] => [6,40][50,60].
2313 r0 = build_range3 (a: 10, b: 20, c: 30, d: 40, e: 50, f: 60);
2314 r1 = range_int (a: 6, b: 35);
2315 r0.union_ (v: r1);
2316 r1 = range_int (a: 6, b: 40);
2317 r1.union_ (v: range_int (a: 50, b: 60));
2318 ASSERT_TRUE (r0 == r1);
2319
2320 // [10,20][30,40][50,60] U [6,60] => [6,60].
2321 r0 = build_range3 (a: 10, b: 20, c: 30, d: 40, e: 50, f: 60);
2322 r1 = range_int (a: 6, b: 60);
2323 r0.union_ (v: r1);
2324 ASSERT_TRUE (r0 == range_int (6, 60));
2325
2326 // [10,20][30,40][50,60] U [6,70] => [6,70].
2327 r0 = build_range3 (a: 10, b: 20, c: 30, d: 40, e: 50, f: 60);
2328 r1 = range_int (a: 6, b: 70);
2329 r0.union_ (v: r1);
2330 ASSERT_TRUE (r0 == range_int (6, 70));
2331
2332 // [10,20][30,40][50,60] U [35,70] => [10,20][30,70].
2333 r0 = build_range3 (a: 10, b: 20, c: 30, d: 40, e: 50, f: 60);
2334 r1 = range_int (a: 35, b: 70);
2335 r0.union_ (v: r1);
2336 r1 = range_int (a: 10, b: 20);
2337 r1.union_ (v: range_int (a: 30, b: 70));
2338 ASSERT_TRUE (r0 == r1);
2339
2340 // [10,20][30,40][50,60] U [15,35] => [10,40][50,60].
2341 r0 = build_range3 (a: 10, b: 20, c: 30, d: 40, e: 50, f: 60);
2342 r1 = range_int (a: 15, b: 35);
2343 r0.union_ (v: r1);
2344 r1 = range_int (a: 10, b: 40);
2345 r1.union_ (v: range_int (a: 50, b: 60));
2346 ASSERT_TRUE (r0 == r1);
2347
2348 // [10,20][30,40][50,60] U [35,35] => [10,20][30,40][50,60].
2349 r0 = build_range3 (a: 10, b: 20, c: 30, d: 40, e: 50, f: 60);
2350 r1 = range_int (a: 35, b: 35);
2351 r0.union_ (v: r1);
2352 ASSERT_TRUE (r0 == build_range3 (10, 20, 30, 40, 50, 60));
2353}
2354
2355static void
2356range_tests_int_range_max ()
2357{
2358 int_range_max big;
2359 unsigned int nrange;
2360
2361 // Build a huge multi-range range.
2362 for (nrange = 0; nrange < 50; ++nrange)
2363 {
2364 int_range<1> tmp = range_int (a: nrange*10, b: nrange *10 + 5);
2365 big.union_ (v: tmp);
2366 }
2367 ASSERT_TRUE (big.num_pairs () == nrange);
2368
2369 // Verify that we can copy it without loosing precision.
2370 int_range_max copy (big);
2371 ASSERT_TRUE (copy.num_pairs () == nrange);
2372
2373 // Inverting it should produce one more sub-range.
2374 big.invert ();
2375 ASSERT_TRUE (big.num_pairs () == nrange + 1);
2376
2377 int_range<1> tmp = range_int (a: 5, b: 37);
2378 big.intersect (v: tmp);
2379 ASSERT_TRUE (big.num_pairs () == 4);
2380
2381 // Test that [10,10][20,20] does NOT contain 15.
2382 {
2383 int_range_max i1 = range_int (a: 10, b: 10);
2384 int_range_max i2 = range_int (a: 20, b: 20);
2385 i1.union_ (v: i2);
2386 ASSERT_FALSE (i1.contains_p (INT (15)));
2387 }
2388}
2389
2390// Simulate -fstrict-enums where the domain of a type is less than the
2391// underlying type.
2392
2393static void
2394range_tests_strict_enum ()
2395{
2396 // The enum can only hold [0, 3].
2397 tree rtype = copy_node (unsigned_type_node);
2398 TYPE_MIN_VALUE (rtype) = build_int_cstu (type: rtype, 0);
2399 TYPE_MAX_VALUE (rtype) = build_int_cstu (type: rtype, 3);
2400
2401 // Test that even though vr1 covers the strict enum domain ([0, 3]),
2402 // it does not cover the domain of the underlying type.
2403 int_range<1> vr1 = range (type: rtype, a: 0, b: 1);
2404 int_range<1> vr2 = range (type: rtype, a: 2, b: 3);
2405 vr1.union_ (v: vr2);
2406 ASSERT_TRUE (vr1 == range (rtype, 0, 3));
2407 ASSERT_FALSE (vr1.varying_p ());
2408
2409 // Test that copying to a multi-range does not change things.
2410 int_range<2> ir1 (vr1);
2411 ASSERT_TRUE (ir1 == vr1);
2412 ASSERT_FALSE (ir1.varying_p ());
2413
2414 // The same test as above, but using TYPE_{MIN,MAX}_VALUE instead of [0,3].
2415 vr1 = int_range<2> (rtype,
2416 wi::to_wide (TYPE_MIN_VALUE (rtype)),
2417 wi::to_wide (TYPE_MAX_VALUE (rtype)));
2418 ir1 = vr1;
2419 ASSERT_TRUE (ir1 == vr1);
2420 ASSERT_FALSE (ir1.varying_p ());
2421}
2422
2423static void
2424range_tests_misc ()
2425{
2426 tree u128_type = build_nonstandard_integer_type (128, /*unsigned=*/1);
2427 int_range<2> i1, i2, i3;
2428 int_range<2> r0, r1, rold;
2429
2430 // Test 1-bit signed integer union.
2431 // [-1,-1] U [0,0] = VARYING.
2432 tree one_bit_type = build_nonstandard_integer_type (1, 0);
2433 wide_int one_bit_min = irange_val_min (type: one_bit_type);
2434 wide_int one_bit_max = irange_val_max (type: one_bit_type);
2435 {
2436 int_range<2> min = int_range<2> (one_bit_type, one_bit_min, one_bit_min);
2437 int_range<2> max = int_range<2> (one_bit_type, one_bit_max, one_bit_max);
2438 max.union_ (v: min);
2439 ASSERT_TRUE (max.varying_p ());
2440 }
2441 // Test that we can set a range of true+false for a 1-bit signed int.
2442 r0 = range_true_and_false (type: one_bit_type);
2443
2444 // Test inversion of 1-bit signed integers.
2445 {
2446 int_range<2> min = int_range<2> (one_bit_type, one_bit_min, one_bit_min);
2447 int_range<2> max = int_range<2> (one_bit_type, one_bit_max, one_bit_max);
2448 int_range<2> t;
2449 t = min;
2450 t.invert ();
2451 ASSERT_TRUE (t == max);
2452 t = max;
2453 t.invert ();
2454 ASSERT_TRUE (t == min);
2455 }
2456
2457 // Test that NOT(255) is [0..254] in 8-bit land.
2458 int_range<1> not_255 = range_uchar (a: 255, b: 255, kind: VR_ANTI_RANGE);
2459 ASSERT_TRUE (not_255 == range_uchar (0, 254));
2460
2461 // Test that NOT(0) is [1..255] in 8-bit land.
2462 int_range<2> not_zero = range_nonzero (unsigned_char_type_node);
2463 ASSERT_TRUE (not_zero == range_uchar (1, 255));
2464
2465 // Check that [0,127][0x..ffffff80,0x..ffffff]
2466 // => ~[128, 0x..ffffff7f].
2467 r0 = range_uint128 (a: 0, b: 127);
2468 wide_int high = wi::minus_one (precision: 128);
2469 // low = -1 - 127 => 0x..ffffff80.
2470 wide_int low = wi::sub (x: high, y: wi::uhwi (val: 127, precision: 128));
2471 r1 = int_range<1> (u128_type, low, high); // [0x..ffffff80, 0x..ffffffff]
2472 // r0 = [0,127][0x..ffffff80,0x..fffffff].
2473 r0.union_ (v: r1);
2474 // r1 = [128, 0x..ffffff7f].
2475 r1 = int_range<1> (u128_type,
2476 wi::uhwi (val: 128, precision: 128),
2477 wi::sub (x: wi::minus_one (precision: 128), y: wi::uhwi (val: 128, precision: 128)));
2478 r0.invert ();
2479 ASSERT_TRUE (r0 == r1);
2480
2481 r0.set_varying (integer_type_node);
2482 wide_int minint = r0.lower_bound ();
2483 wide_int maxint = r0.upper_bound ();
2484
2485 r0.set_varying (short_integer_type_node);
2486
2487 r0.set_varying (unsigned_type_node);
2488 wide_int maxuint = r0.upper_bound ();
2489
2490 // Check that ~[0,5] => [6,MAX] for unsigned int.
2491 r0 = range_uint (a: 0, b: 5);
2492 r0.invert ();
2493 ASSERT_TRUE (r0 == int_range<1> (unsigned_type_node,
2494 wi::uhwi (6, TYPE_PRECISION (unsigned_type_node)),
2495 maxuint));
2496
2497 // Check that ~[10,MAX] => [0,9] for unsigned int.
2498 r0 = int_range<1> (unsigned_type_node,
2499 wi::uhwi (val: 10, TYPE_PRECISION (unsigned_type_node)),
2500 maxuint);
2501 r0.invert ();
2502 ASSERT_TRUE (r0 == range_uint (0, 9));
2503
2504 // Check that ~[0,5] => [6,MAX] for unsigned 128-bit numbers.
2505 r0 = range_uint128 (a: 0, b: 5, kind: VR_ANTI_RANGE);
2506 r1 = int_range<1> (u128_type, wi::uhwi (val: 6, precision: 128), wi::minus_one (precision: 128));
2507 ASSERT_TRUE (r0 == r1);
2508
2509 // Check that [~5] is really [-MIN,4][6,MAX].
2510 r0 = range_int (a: 5, b: 5, kind: VR_ANTI_RANGE);
2511 r1 = int_range<1> (integer_type_node, minint, INT (4));
2512 r1.union_ (v: int_range<1> (integer_type_node, INT (6), maxint));
2513 ASSERT_FALSE (r1.undefined_p ());
2514 ASSERT_TRUE (r0 == r1);
2515
2516 r1 = range_int (a: 5, b: 5);
2517 int_range<2> r2 (r1);
2518 ASSERT_TRUE (r1 == r2);
2519
2520 r1 = range_int (a: 5, b: 10);
2521
2522 r1 = range_int (a: 5, b: 10);
2523 ASSERT_TRUE (r1.contains_p (INT (7)));
2524
2525 r1 = range_char (a: 0, b: 20);
2526 ASSERT_TRUE (r1.contains_p (SCHAR(15)));
2527 ASSERT_FALSE (r1.contains_p (SCHAR(300)));
2528
2529 // NOT([10,20]) ==> [-MIN,9][21,MAX].
2530 r0 = r1 = range_int (a: 10, b: 20);
2531 r2 = int_range<1> (integer_type_node, minint, INT(9));
2532 r2.union_ (v: int_range<1> (integer_type_node, INT(21), maxint));
2533 ASSERT_FALSE (r2.undefined_p ());
2534 r1.invert ();
2535 ASSERT_TRUE (r1 == r2);
2536 // Test that NOT(NOT(x)) == x.
2537 r2.invert ();
2538 ASSERT_TRUE (r0 == r2);
2539
2540 // Test that booleans and their inverse work as expected.
2541 r0 = range_zero (boolean_type_node);
2542 ASSERT_TRUE (r0 == range_false ());
2543 r0.invert ();
2544 ASSERT_TRUE (r0 == range_true ());
2545
2546 // Make sure NULL and non-NULL of pointer types work, and that
2547 // inverses of them are consistent.
2548 tree voidp = build_pointer_type (void_type_node);
2549 r0 = range_zero (type: voidp);
2550 r1 = r0;
2551 r0.invert ();
2552 r0.invert ();
2553 ASSERT_TRUE (r0 == r1);
2554
2555 // [10,20] U [15, 30] => [10, 30].
2556 r0 = range_int (a: 10, b: 20);
2557 r1 = range_int (a: 15, b: 30);
2558 r0.union_ (v: r1);
2559 ASSERT_TRUE (r0 == range_int (10, 30));
2560
2561 // [15,40] U [] => [15,40].
2562 r0 = range_int (a: 15, b: 40);
2563 r1.set_undefined ();
2564 r0.union_ (v: r1);
2565 ASSERT_TRUE (r0 == range_int (15, 40));
2566
2567 // [10,20] U [10,10] => [10,20].
2568 r0 = range_int (a: 10, b: 20);
2569 r1 = range_int (a: 10, b: 10);
2570 r0.union_ (v: r1);
2571 ASSERT_TRUE (r0 == range_int (10, 20));
2572
2573 // [10,20] U [9,9] => [9,20].
2574 r0 = range_int (a: 10, b: 20);
2575 r1 = range_int (a: 9, b: 9);
2576 r0.union_ (v: r1);
2577 ASSERT_TRUE (r0 == range_int (9, 20));
2578
2579 // [10,20] ^ [15,30] => [15,20].
2580 r0 = range_int (a: 10, b: 20);
2581 r1 = range_int (a: 15, b: 30);
2582 r0.intersect (v: r1);
2583 ASSERT_TRUE (r0 == range_int (15, 20));
2584
2585 // Test the internal sanity of wide_int's wrt HWIs.
2586 ASSERT_TRUE (wi::max_value (TYPE_PRECISION (boolean_type_node),
2587 TYPE_SIGN (boolean_type_node))
2588 == wi::uhwi (1, TYPE_PRECISION (boolean_type_node)));
2589
2590 // Test zero_p().
2591 r0 = range_int (a: 0, b: 0);
2592 ASSERT_TRUE (r0.zero_p ());
2593
2594 // Test nonzero_p().
2595 r0 = range_int (a: 0, b: 0);
2596 r0.invert ();
2597 ASSERT_TRUE (r0.nonzero_p ());
2598
2599 // r0 = ~[1,1]
2600 r0 = range_int (a: 1, b: 1, kind: VR_ANTI_RANGE);
2601 // r1 = ~[3,3]
2602 r1 = range_int (a: 3, b: 3, kind: VR_ANTI_RANGE);
2603
2604 // vv = [0,0][2,2][4, MAX]
2605 int_range<3> vv = r0;
2606 vv.intersect (v: r1);
2607
2608 ASSERT_TRUE (vv.contains_p (UINT (2)));
2609 ASSERT_TRUE (vv.num_pairs () == 3);
2610
2611 r0 = range_int (a: 1, b: 1);
2612 // And union it with [0,0][2,2][4,MAX] multi range
2613 r0.union_ (v: vv);
2614 // The result should be [0,2][4,MAX], or ~[3,3] but it must contain 2
2615 ASSERT_TRUE (r0.contains_p (INT (2)));
2616}
2617
2618static void
2619range_tests_nonzero_bits ()
2620{
2621 int_range<2> r0, r1;
2622
2623 // Adding nonzero bits to a varying drops the varying.
2624 r0.set_varying (integer_type_node);
2625 r0.set_nonzero_bits (INT (255));
2626 ASSERT_TRUE (!r0.varying_p ());
2627 // Dropping the nonzero bits brings us back to varying.
2628 r0.set_nonzero_bits (INT (-1));
2629 ASSERT_TRUE (r0.varying_p ());
2630
2631 // Test contains_p with nonzero bits.
2632 r0.set_zero (integer_type_node);
2633 ASSERT_TRUE (r0.contains_p (INT (0)));
2634 ASSERT_FALSE (r0.contains_p (INT (1)));
2635 r0.set_nonzero_bits (INT (0xfe));
2636 ASSERT_FALSE (r0.contains_p (INT (0x100)));
2637 ASSERT_FALSE (r0.contains_p (INT (0x3)));
2638
2639 // Union of nonzero bits.
2640 r0.set_varying (integer_type_node);
2641 r0.set_nonzero_bits (INT (0xf0));
2642 r1.set_varying (integer_type_node);
2643 r1.set_nonzero_bits (INT (0xf));
2644 r0.union_ (v: r1);
2645 ASSERT_TRUE (r0.get_nonzero_bits () == 0xff);
2646
2647 // Intersect of nonzero bits.
2648 r0 = range_int (a: 0, b: 255);
2649 r0.set_nonzero_bits (INT (0xfe));
2650 r1.set_varying (integer_type_node);
2651 r1.set_nonzero_bits (INT (0xf0));
2652 r0.intersect (v: r1);
2653 ASSERT_TRUE (r0.get_nonzero_bits () == 0xf0);
2654
2655 // Intersect where the mask of nonzero bits is implicit from the range.
2656 r0.set_varying (integer_type_node);
2657 r1 = range_int (a: 0, b: 255);
2658 r0.intersect (v: r1);
2659 ASSERT_TRUE (r0.get_nonzero_bits () == 0xff);
2660
2661 // The union of a mask of 0xff..ffff00 with a mask of 0xff spans the
2662 // entire domain, and makes the range a varying.
2663 r0.set_varying (integer_type_node);
2664 wide_int x = wi::shwi (val: 0xff, TYPE_PRECISION (integer_type_node));
2665 x = wi::bit_not (x);
2666 r0.set_nonzero_bits (x); // 0xff..ff00
2667 r1.set_varying (integer_type_node);
2668 r1.set_nonzero_bits (INT (0xff));
2669 r0.union_ (v: r1);
2670 ASSERT_TRUE (r0.varying_p ());
2671
2672 // Test that setting a nonzero bit of 1 does not pessimize the range.
2673 r0.set_zero (integer_type_node);
2674 r0.set_nonzero_bits (INT (1));
2675 ASSERT_TRUE (r0.zero_p ());
2676}
2677
2678// Build an frange from string endpoints.
2679
2680static inline frange
2681frange_float (const char *lb, const char *ub, tree type = float_type_node)
2682{
2683 REAL_VALUE_TYPE min, max;
2684 gcc_assert (real_from_string (&min, lb) == 0);
2685 gcc_assert (real_from_string (&max, ub) == 0);
2686 return frange (type, min, max);
2687}
2688
2689static void
2690range_tests_nan ()
2691{
2692 frange r0, r1;
2693 REAL_VALUE_TYPE q, r;
2694 bool signbit;
2695
2696 // Equal ranges but with differing NAN bits are not equal.
2697 if (HONOR_NANS (float_type_node))
2698 {
2699 r1 = frange_float (lb: "10", ub: "12");
2700 r0 = r1;
2701 ASSERT_EQ (r0, r1);
2702 r0.clear_nan ();
2703 ASSERT_NE (r0, r1);
2704 r0.update_nan ();
2705 ASSERT_EQ (r0, r1);
2706
2707 // [10, 20] NAN ^ [30, 40] NAN = NAN.
2708 r0 = frange_float (lb: "10", ub: "20");
2709 r1 = frange_float (lb: "30", ub: "40");
2710 r0.intersect (v: r1);
2711 ASSERT_TRUE (r0.known_isnan ());
2712
2713 // [3,5] U [5,10] NAN = ... NAN
2714 r0 = frange_float (lb: "3", ub: "5");
2715 r0.clear_nan ();
2716 r1 = frange_float (lb: "5", ub: "10");
2717 r0.union_ (v: r1);
2718 ASSERT_TRUE (r0.maybe_isnan ());
2719 }
2720
2721 // [5,6] U NAN = [5,6] NAN.
2722 r0 = frange_float (lb: "5", ub: "6");
2723 r0.clear_nan ();
2724 r1.set_nan (float_type_node);
2725 r0.union_ (v: r1);
2726 real_from_string (&q, "5");
2727 real_from_string (&r, "6");
2728 ASSERT_TRUE (real_identical (&q, &r0.lower_bound ()));
2729 ASSERT_TRUE (real_identical (&r, &r0.upper_bound ()));
2730 ASSERT_TRUE (r0.maybe_isnan ());
2731
2732 // NAN U NAN = NAN
2733 r0.set_nan (float_type_node);
2734 r1.set_nan (float_type_node);
2735 r0.union_ (v: r1);
2736 ASSERT_TRUE (r0.known_isnan ());
2737
2738 // [INF, INF] NAN ^ NAN = NAN
2739 r0.set_nan (float_type_node);
2740 r1 = frange_float (lb: "+Inf", ub: "+Inf");
2741 if (!HONOR_NANS (float_type_node))
2742 r1.update_nan ();
2743 r0.intersect (v: r1);
2744 ASSERT_TRUE (r0.known_isnan ());
2745
2746 // NAN ^ NAN = NAN
2747 r0.set_nan (float_type_node);
2748 r1.set_nan (float_type_node);
2749 r0.intersect (v: r1);
2750 ASSERT_TRUE (r0.known_isnan ());
2751
2752 // +NAN ^ -NAN = UNDEFINED
2753 r0.set_nan (float_type_node, sign: false);
2754 r1.set_nan (float_type_node, sign: true);
2755 r0.intersect (v: r1);
2756 ASSERT_TRUE (r0.undefined_p ());
2757
2758 // VARYING ^ NAN = NAN.
2759 r0.set_nan (float_type_node);
2760 r1.set_varying (float_type_node);
2761 r0.intersect (v: r1);
2762 ASSERT_TRUE (r0.known_isnan ());
2763
2764 // [3,4] ^ NAN = UNDEFINED.
2765 r0 = frange_float (lb: "3", ub: "4");
2766 r0.clear_nan ();
2767 r1.set_nan (float_type_node);
2768 r0.intersect (v: r1);
2769 ASSERT_TRUE (r0.undefined_p ());
2770
2771 // [-3, 5] ^ NAN = UNDEFINED
2772 r0 = frange_float (lb: "-3", ub: "5");
2773 r0.clear_nan ();
2774 r1.set_nan (float_type_node);
2775 r0.intersect (v: r1);
2776 ASSERT_TRUE (r0.undefined_p ());
2777
2778 // Setting the NAN bit to yes does not make us a known NAN.
2779 r0.set_varying (float_type_node);
2780 r0.update_nan ();
2781 ASSERT_FALSE (r0.known_isnan ());
2782
2783 // NAN is in a VARYING.
2784 r0.set_varying (float_type_node);
2785 real_nan (&r, "", 1, TYPE_MODE (float_type_node));
2786 REAL_VALUE_TYPE nan = r;
2787 ASSERT_TRUE (r0.contains_p (nan));
2788
2789 // -NAN is in a VARYING.
2790 r0.set_varying (float_type_node);
2791 q = real_value_negate (&r);
2792 REAL_VALUE_TYPE neg_nan = q;
2793 ASSERT_TRUE (r0.contains_p (neg_nan));
2794
2795 // Clearing the NAN on a [] NAN is the empty set.
2796 r0.set_nan (float_type_node);
2797 r0.clear_nan ();
2798 ASSERT_TRUE (r0.undefined_p ());
2799
2800 // [10,20] NAN ^ [21,25] NAN = [NAN]
2801 r0 = frange_float (lb: "10", ub: "20");
2802 r0.update_nan ();
2803 r1 = frange_float (lb: "21", ub: "25");
2804 r1.update_nan ();
2805 r0.intersect (v: r1);
2806 ASSERT_TRUE (r0.known_isnan ());
2807
2808 // NAN U [5,6] should be [5,6] +-NAN.
2809 r0.set_nan (float_type_node);
2810 r1 = frange_float (lb: "5", ub: "6");
2811 r1.clear_nan ();
2812 r0.union_ (v: r1);
2813 real_from_string (&q, "5");
2814 real_from_string (&r, "6");
2815 ASSERT_TRUE (real_identical (&q, &r0.lower_bound ()));
2816 ASSERT_TRUE (real_identical (&r, &r0.upper_bound ()));
2817 ASSERT_TRUE (!r0.signbit_p (signbit));
2818 ASSERT_TRUE (r0.maybe_isnan ());
2819
2820 // NAN U NAN shouldn't change anything.
2821 r0.set_nan (float_type_node);
2822 r1.set_nan (float_type_node);
2823 ASSERT_FALSE (r0.union_ (r1));
2824
2825 // [3,5] NAN U NAN shouldn't change anything.
2826 r0 = frange_float (lb: "3", ub: "5");
2827 r1.set_nan (float_type_node);
2828 ASSERT_FALSE (r0.union_ (r1));
2829
2830 // [3,5] U NAN *does* trigger a change.
2831 r0 = frange_float (lb: "3", ub: "5");
2832 r0.clear_nan ();
2833 r1.set_nan (float_type_node);
2834 ASSERT_TRUE (r0.union_ (r1));
2835}
2836
2837static void
2838range_tests_signed_zeros ()
2839{
2840 REAL_VALUE_TYPE zero = dconst0;
2841 REAL_VALUE_TYPE neg_zero = zero;
2842 neg_zero.sign = 1;
2843 frange r0, r1;
2844 bool signbit;
2845
2846 // [0,0] contains [0,0] but not [-0,-0] and vice versa.
2847 r0 = frange_float (lb: "0.0", ub: "0.0");
2848 r1 = frange_float (lb: "-0.0", ub: "-0.0");
2849 ASSERT_TRUE (r0.contains_p (zero));
2850 ASSERT_TRUE (!r0.contains_p (neg_zero));
2851 ASSERT_TRUE (r1.contains_p (neg_zero));
2852 ASSERT_TRUE (!r1.contains_p (zero));
2853
2854 // Test contains_p() when we know the sign of the zero.
2855 r0 = frange_float (lb: "0.0", ub: "0.0");
2856 ASSERT_TRUE (r0.contains_p (zero));
2857 ASSERT_FALSE (r0.contains_p (neg_zero));
2858 r0 = frange_float (lb: "-0.0", ub: "-0.0");
2859 ASSERT_TRUE (r0.contains_p (neg_zero));
2860 ASSERT_FALSE (r0.contains_p (zero));
2861
2862 r0 = frange_float (lb: "-0.0", ub: "0.0");
2863 ASSERT_TRUE (r0.contains_p (neg_zero));
2864 ASSERT_TRUE (r0.contains_p (zero));
2865
2866 r0 = frange_float (lb: "-3", ub: "5");
2867 ASSERT_TRUE (r0.contains_p (neg_zero));
2868 ASSERT_TRUE (r0.contains_p (zero));
2869
2870 // The intersection of zeros that differ in sign is a NAN (or
2871 // undefined if not honoring NANs).
2872 r0 = frange_float (lb: "-0.0", ub: "-0.0");
2873 r1 = frange_float (lb: "0.0", ub: "0.0");
2874 r0.intersect (v: r1);
2875 if (HONOR_NANS (float_type_node))
2876 ASSERT_TRUE (r0.known_isnan ());
2877 else
2878 ASSERT_TRUE (r0.undefined_p ());
2879
2880 // The union of zeros that differ in sign is a zero with unknown sign.
2881 r0 = frange_float (lb: "0.0", ub: "0.0");
2882 r1 = frange_float (lb: "-0.0", ub: "-0.0");
2883 r0.union_ (v: r1);
2884 ASSERT_TRUE (r0.zero_p () && !r0.signbit_p (signbit));
2885
2886 // [-0, +0] has an unknown sign.
2887 r0 = frange_float (lb: "-0.0", ub: "0.0");
2888 ASSERT_TRUE (r0.zero_p () && !r0.signbit_p (signbit));
2889
2890 // [-0, +0] ^ [0, 0] is [0, 0]
2891 r0 = frange_float (lb: "-0.0", ub: "0.0");
2892 r1 = frange_float (lb: "0.0", ub: "0.0");
2893 r0.intersect (v: r1);
2894 ASSERT_TRUE (r0.zero_p ());
2895
2896 r0 = frange_float (lb: "+0", ub: "5");
2897 r0.clear_nan ();
2898 ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
2899
2900 r0 = frange_float (lb: "-0", ub: "5");
2901 r0.clear_nan ();
2902 ASSERT_TRUE (!r0.signbit_p (signbit));
2903
2904 r0 = frange_float (lb: "-0", ub: "10");
2905 r1 = frange_float (lb: "0", ub: "5");
2906 r0.intersect (v: r1);
2907 ASSERT_TRUE (real_iszero (&r0.lower_bound (), false));
2908
2909 r0 = frange_float (lb: "-0", ub: "5");
2910 r1 = frange_float (lb: "0", ub: "5");
2911 r0.union_ (v: r1);
2912 ASSERT_TRUE (real_iszero (&r0.lower_bound (), true));
2913
2914 r0 = frange_float (lb: "-5", ub: "-0");
2915 r0.update_nan ();
2916 r1 = frange_float (lb: "0", ub: "0");
2917 r1.update_nan ();
2918 r0.intersect (v: r1);
2919 if (HONOR_NANS (float_type_node))
2920 ASSERT_TRUE (r0.known_isnan ());
2921 else
2922 ASSERT_TRUE (r0.undefined_p ());
2923
2924 r0.set_nonnegative (float_type_node);
2925 if (HONOR_NANS (float_type_node))
2926 ASSERT_TRUE (r0.maybe_isnan ());
2927
2928 // Numbers containing zero should have an unknown SIGNBIT.
2929 r0 = frange_float (lb: "0", ub: "10");
2930 r0.clear_nan ();
2931 ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
2932}
2933
2934static void
2935range_tests_signbit ()
2936{
2937 frange r0, r1;
2938 bool signbit;
2939
2940 // Negative numbers should have the SIGNBIT set.
2941 r0 = frange_float (lb: "-5", ub: "-1");
2942 r0.clear_nan ();
2943 ASSERT_TRUE (r0.signbit_p (signbit) && signbit);
2944 // Positive numbers should have the SIGNBIT clear.
2945 r0 = frange_float (lb: "1", ub: "10");
2946 r0.clear_nan ();
2947 ASSERT_TRUE (r0.signbit_p (signbit) && !signbit);
2948 // Numbers spanning both positive and negative should have an
2949 // unknown SIGNBIT.
2950 r0 = frange_float (lb: "-10", ub: "10");
2951 r0.clear_nan ();
2952 ASSERT_TRUE (!r0.signbit_p (signbit));
2953 r0.set_varying (float_type_node);
2954 ASSERT_TRUE (!r0.signbit_p (signbit));
2955}
2956
2957static void
2958range_tests_floats ()
2959{
2960 frange r0, r1;
2961
2962 if (HONOR_NANS (float_type_node))
2963 range_tests_nan ();
2964 range_tests_signbit ();
2965
2966 if (HONOR_SIGNED_ZEROS (float_type_node))
2967 range_tests_signed_zeros ();
2968
2969 // A range of [-INF,+INF] is actually VARYING if no other properties
2970 // are set.
2971 r0 = frange_float (lb: "-Inf", ub: "+Inf");
2972 ASSERT_TRUE (r0.varying_p ());
2973 // ...unless it has some special property...
2974 if (HONOR_NANS (r0.type ()))
2975 {
2976 r0.clear_nan ();
2977 ASSERT_FALSE (r0.varying_p ());
2978 }
2979
2980 // For most architectures, where float and double are different
2981 // sizes, having the same endpoints does not necessarily mean the
2982 // ranges are equal.
2983 if (!types_compatible_p (float_type_node, double_type_node))
2984 {
2985 r0 = frange_float (lb: "3.0", ub: "3.0", float_type_node);
2986 r1 = frange_float (lb: "3.0", ub: "3.0", double_type_node);
2987 ASSERT_NE (r0, r1);
2988 }
2989
2990 // [3,5] U [10,12] = [3,12].
2991 r0 = frange_float (lb: "3", ub: "5");
2992 r1 = frange_float (lb: "10", ub: "12");
2993 r0.union_ (v: r1);
2994 ASSERT_EQ (r0, frange_float ("3", "12"));
2995
2996 // [5,10] U [4,8] = [4,10]
2997 r0 = frange_float (lb: "5", ub: "10");
2998 r1 = frange_float (lb: "4", ub: "8");
2999 r0.union_ (v: r1);
3000 ASSERT_EQ (r0, frange_float ("4", "10"));
3001
3002 // [3,5] U [4,10] = [3,10]
3003 r0 = frange_float (lb: "3", ub: "5");
3004 r1 = frange_float (lb: "4", ub: "10");
3005 r0.union_ (v: r1);
3006 ASSERT_EQ (r0, frange_float ("3", "10"));
3007
3008 // [4,10] U [5,11] = [4,11]
3009 r0 = frange_float (lb: "4", ub: "10");
3010 r1 = frange_float (lb: "5", ub: "11");
3011 r0.union_ (v: r1);
3012 ASSERT_EQ (r0, frange_float ("4", "11"));
3013
3014 // [3,12] ^ [10,12] = [10,12].
3015 r0 = frange_float (lb: "3", ub: "12");
3016 r1 = frange_float (lb: "10", ub: "12");
3017 r0.intersect (v: r1);
3018 ASSERT_EQ (r0, frange_float ("10", "12"));
3019
3020 // [10,12] ^ [11,11] = [11,11]
3021 r0 = frange_float (lb: "10", ub: "12");
3022 r1 = frange_float (lb: "11", ub: "11");
3023 r0.intersect (v: r1);
3024 ASSERT_EQ (r0, frange_float ("11", "11"));
3025
3026 // [10,20] ^ [5,15] = [10,15]
3027 r0 = frange_float (lb: "10", ub: "20");
3028 r1 = frange_float (lb: "5", ub: "15");
3029 r0.intersect (v: r1);
3030 ASSERT_EQ (r0, frange_float ("10", "15"));
3031
3032 // [10,20] ^ [15,25] = [15,20]
3033 r0 = frange_float (lb: "10", ub: "20");
3034 r1 = frange_float (lb: "15", ub: "25");
3035 r0.intersect (v: r1);
3036 ASSERT_EQ (r0, frange_float ("15", "20"));
3037
3038 // [10,20] ^ [21,25] = []
3039 r0 = frange_float (lb: "10", ub: "20");
3040 r0.clear_nan ();
3041 r1 = frange_float (lb: "21", ub: "25");
3042 r1.clear_nan ();
3043 r0.intersect (v: r1);
3044 ASSERT_TRUE (r0.undefined_p ());
3045
3046 if (HONOR_INFINITIES (float_type_node))
3047 {
3048 // Make sure [-Inf, -Inf] doesn't get normalized.
3049 r0 = frange_float (lb: "-Inf", ub: "-Inf");
3050 ASSERT_TRUE (real_isinf (&r0.lower_bound (), true));
3051 ASSERT_TRUE (real_isinf (&r0.upper_bound (), true));
3052 }
3053
3054 // Test that reading back a global range yields the same result as
3055 // what we wrote into it.
3056 tree ssa = make_temp_ssa_name (float_type_node, NULL, name: "blah");
3057 r0.set_varying (float_type_node);
3058 r0.clear_nan ();
3059 set_range_info (ssa, r0);
3060 get_global_range_query ()->range_of_expr (r&: r1, expr: ssa);
3061 ASSERT_EQ (r0, r1);
3062}
3063
3064// Run floating range tests for various combinations of NAN and INF
3065// support.
3066
3067static void
3068range_tests_floats_various ()
3069{
3070 int save_finite_math_only = flag_finite_math_only;
3071
3072 // Test -ffinite-math-only.
3073 flag_finite_math_only = 1;
3074 range_tests_floats ();
3075 // Test -fno-finite-math-only.
3076 flag_finite_math_only = 0;
3077 range_tests_floats ();
3078
3079 flag_finite_math_only = save_finite_math_only;
3080}
3081
3082void
3083range_tests ()
3084{
3085 range_tests_irange3 ();
3086 range_tests_int_range_max ();
3087 range_tests_strict_enum ();
3088 range_tests_nonzero_bits ();
3089 range_tests_floats_various ();
3090 range_tests_misc ();
3091}
3092
3093} // namespace selftest
3094
3095#endif // CHECKING_P
3096

source code of gcc/value-range.cc