1 | /* Operations with very long integers. -*- C++ -*- |
2 | Copyright (C) 2012-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the |
8 | Free Software Foundation; either version 3, or (at your option) any |
9 | later version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | #ifndef WIDE_INT_H |
21 | #define WIDE_INT_H |
22 | |
23 | /* wide-int.[cc|h] implements a class that efficiently performs |
24 | mathematical operations on finite precision integers. wide_ints |
25 | are designed to be transient - they are not for long term storage |
26 | of values. There is tight integration between wide_ints and the |
27 | other longer storage GCC representations (rtl and tree). |
28 | |
29 | The actual precision of a wide_int depends on the flavor. There |
30 | are three predefined flavors: |
31 | |
32 | 1) wide_int (the default). This flavor does the math in the |
33 | precision of its input arguments. It is assumed (and checked) |
34 | that the precisions of the operands and results are consistent. |
35 | This is the most efficient flavor. It is not possible to examine |
36 | bits above the precision that has been specified. Because of |
37 | this, the default flavor has semantics that are simple to |
38 | understand and in general model the underlying hardware that the |
39 | compiler is targetted for. |
40 | |
41 | This flavor must be used at the RTL level of gcc because there |
42 | is, in general, not enough information in the RTL representation |
43 | to extend a value beyond the precision specified in the mode. |
44 | |
45 | This flavor should also be used at the TREE and GIMPLE levels of |
46 | the compiler except for the circumstances described in the |
47 | descriptions of the other two flavors. |
48 | |
49 | The default wide_int representation does not contain any |
50 | information inherent about signedness of the represented value, |
51 | so it can be used to represent both signed and unsigned numbers. |
52 | For operations where the results depend on signedness (full width |
53 | multiply, division, shifts, comparisons, and operations that need |
54 | overflow detected), the signedness must be specified separately. |
55 | |
56 | For precisions up to WIDE_INT_MAX_INL_PRECISION, it uses an inline |
57 | buffer in the type, for larger precisions up to WIDEST_INT_MAX_PRECISION |
58 | it uses a pointer to heap allocated buffer. |
59 | |
60 | 2) offset_int. This is a fixed-precision integer that can hold |
61 | any address offset, measured in either bits or bytes, with at |
62 | least one extra sign bit. At the moment the maximum address |
63 | size GCC supports is 64 bits. With 8-bit bytes and an extra |
64 | sign bit, offset_int therefore needs to have at least 68 bits |
65 | of precision. We round this up to 128 bits for efficiency. |
66 | Values of type T are converted to this precision by sign- or |
67 | zero-extending them based on the signedness of T. |
68 | |
69 | The extra sign bit means that offset_int is effectively a signed |
70 | 128-bit integer, i.e. it behaves like int128_t. |
71 | |
72 | Since the values are logically signed, there is no need to |
73 | distinguish between signed and unsigned operations. Sign-sensitive |
74 | comparison operators <, <=, > and >= are therefore supported. |
75 | Shift operators << and >> are also supported, with >> being |
76 | an _arithmetic_ right shift. |
77 | |
78 | [ Note that, even though offset_int is effectively int128_t, |
79 | it can still be useful to use unsigned comparisons like |
80 | wi::leu_p (a, b) as a more efficient short-hand for |
81 | "a >= 0 && a <= b". ] |
82 | |
83 | 3) widest_int. This representation is an approximation of |
84 | infinite precision math. However, it is not really infinite |
85 | precision math as in the GMP library. It is really finite |
86 | precision math where the precision is WIDEST_INT_MAX_PRECISION. |
87 | |
88 | Like offset_int, widest_int is wider than all the values that |
89 | it needs to represent, so the integers are logically signed. |
90 | Sign-sensitive comparison operators <, <=, > and >= are supported, |
91 | as are << and >>. |
92 | |
93 | There are several places in the GCC where this should/must be used: |
94 | |
95 | * Code that does induction variable optimizations. This code |
96 | works with induction variables of many different types at the |
97 | same time. Because of this, it ends up doing many different |
98 | calculations where the operands are not compatible types. The |
99 | widest_int makes this easy, because it provides a field where |
100 | nothing is lost when converting from any variable, |
101 | |
102 | * There are a small number of passes that currently use the |
103 | widest_int that should use the default. These should be |
104 | changed. |
105 | |
106 | There are surprising features of offset_int and widest_int |
107 | that the users should be careful about: |
108 | |
109 | 1) Shifts and rotations are just weird. You have to specify a |
110 | precision in which the shift or rotate is to happen in. The bits |
111 | above this precision are zeroed. While this is what you |
112 | want, it is clearly non obvious. |
113 | |
114 | 2) Larger precision math sometimes does not produce the same |
115 | answer as would be expected for doing the math at the proper |
116 | precision. In particular, a multiply followed by a divide will |
117 | produce a different answer if the first product is larger than |
118 | what can be represented in the input precision. |
119 | |
120 | The offset_int and the widest_int flavors are more expensive |
121 | than the default wide int, so in addition to the caveats with these |
122 | two, the default is the prefered representation. |
123 | |
124 | All three flavors of wide_int are represented as a vector of |
125 | HOST_WIDE_INTs. The default and widest_int vectors contain enough elements |
126 | to hold a value of MAX_BITSIZE_MODE_ANY_INT bits. offset_int contains only |
127 | enough elements to hold ADDR_MAX_PRECISION bits. The values are stored |
128 | in the vector with the least significant HOST_BITS_PER_WIDE_INT bits |
129 | in element 0. |
130 | |
131 | The default wide_int contains three fields: the vector (VAL), |
132 | the precision and a length (LEN). The length is the number of HWIs |
133 | needed to represent the value. widest_int and offset_int have a |
134 | constant precision that cannot be changed, so they only store the |
135 | VAL and LEN fields. |
136 | |
137 | Since most integers used in a compiler are small values, it is |
138 | generally profitable to use a representation of the value that is |
139 | as small as possible. LEN is used to indicate the number of |
140 | elements of the vector that are in use. The numbers are stored as |
141 | sign extended numbers as a means of compression. Leading |
142 | HOST_WIDE_INTs that contain strings of either -1 or 0 are removed |
143 | as long as they can be reconstructed from the top bit that is being |
144 | represented. |
145 | |
146 | The precision and length of a wide_int are always greater than 0. |
147 | Any bits in a wide_int above the precision are sign-extended from the |
148 | most significant bit. For example, a 4-bit value 0x8 is represented as |
149 | VAL = { 0xf...fff8 }. However, as an optimization, we allow other integer |
150 | constants to be represented with undefined bits above the precision. |
151 | This allows INTEGER_CSTs to be pre-extended according to TYPE_SIGN, |
152 | so that the INTEGER_CST representation can be used both in TYPE_PRECISION |
153 | and in wider precisions. |
154 | |
155 | There are constructors to create the various forms of wide_int from |
156 | trees, rtl and constants. For trees the options are: |
157 | |
158 | tree t = ...; |
159 | wi::to_wide (t) // Treat T as a wide_int |
160 | wi::to_offset (t) // Treat T as an offset_int |
161 | wi::to_widest (t) // Treat T as a widest_int |
162 | |
163 | All three are light-weight accessors that should have no overhead |
164 | in release builds. If it is useful for readability reasons to |
165 | store the result in a temporary variable, the preferred method is: |
166 | |
167 | wi::tree_to_wide_ref twide = wi::to_wide (t); |
168 | wi::tree_to_offset_ref toffset = wi::to_offset (t); |
169 | wi::tree_to_widest_ref twidest = wi::to_widest (t); |
170 | |
171 | To make an rtx into a wide_int, you have to pair it with a mode. |
172 | The canonical way to do this is with rtx_mode_t as in: |
173 | |
174 | rtx r = ... |
175 | wide_int x = rtx_mode_t (r, mode); |
176 | |
177 | Similarly, a wide_int can only be constructed from a host value if |
178 | the target precision is given explicitly, such as in: |
179 | |
180 | wide_int x = wi::shwi (c, prec); // sign-extend C if necessary |
181 | wide_int y = wi::uhwi (c, prec); // zero-extend C if necessary |
182 | |
183 | However, offset_int and widest_int have an inherent precision and so |
184 | can be initialized directly from a host value: |
185 | |
186 | offset_int x = (int) c; // sign-extend C |
187 | widest_int x = (unsigned int) c; // zero-extend C |
188 | |
189 | It is also possible to do arithmetic directly on rtx_mode_ts and |
190 | constants. For example: |
191 | |
192 | wi::add (r1, r2); // add equal-sized rtx_mode_ts r1 and r2 |
193 | wi::add (r1, 1); // add 1 to rtx_mode_t r1 |
194 | wi::lshift (1, 100); // 1 << 100 as a widest_int |
195 | |
196 | Many binary operations place restrictions on the combinations of inputs, |
197 | using the following rules: |
198 | |
199 | - {rtx, wide_int} op {rtx, wide_int} -> wide_int |
200 | The inputs must be the same precision. The result is a wide_int |
201 | of the same precision |
202 | |
203 | - {rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int |
204 | (un)signed HOST_WIDE_INT op {rtx, wide_int} -> wide_int |
205 | The HOST_WIDE_INT is extended or truncated to the precision of |
206 | the other input. The result is a wide_int of the same precision |
207 | as that input. |
208 | |
209 | - (un)signed HOST_WIDE_INT op (un)signed HOST_WIDE_INT -> widest_int |
210 | The inputs are extended to widest_int precision and produce a |
211 | widest_int result. |
212 | |
213 | - offset_int op offset_int -> offset_int |
214 | offset_int op (un)signed HOST_WIDE_INT -> offset_int |
215 | (un)signed HOST_WIDE_INT op offset_int -> offset_int |
216 | |
217 | - widest_int op widest_int -> widest_int |
218 | widest_int op (un)signed HOST_WIDE_INT -> widest_int |
219 | (un)signed HOST_WIDE_INT op widest_int -> widest_int |
220 | |
221 | Other combinations like: |
222 | |
223 | - widest_int op offset_int and |
224 | - wide_int op offset_int |
225 | |
226 | are not allowed. The inputs should instead be extended or truncated |
227 | so that they match. |
228 | |
229 | The inputs to comparison functions like wi::eq_p and wi::lts_p |
230 | follow the same compatibility rules, although their return types |
231 | are different. Unary functions on X produce the same result as |
232 | a binary operation X + X. Shift functions X op Y also produce |
233 | the same result as X + X; the precision of the shift amount Y |
234 | can be arbitrarily different from X. */ |
235 | |
236 | /* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very |
237 | early examination of the target's mode file. The WIDE_INT_MAX_INL_ELTS |
238 | can accomodate at least 1 more bit so that unsigned numbers of that |
239 | mode can be represented as a signed value. Note that it is still |
240 | possible to create fixed_wide_ints that have precisions greater than |
241 | MAX_BITSIZE_MODE_ANY_INT. This can be useful when representing a |
242 | double-width multiplication result, for example. */ |
243 | #define WIDE_INT_MAX_INL_ELTS \ |
244 | ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) \ |
245 | / HOST_BITS_PER_WIDE_INT) |
246 | |
247 | #define WIDE_INT_MAX_INL_PRECISION \ |
248 | (WIDE_INT_MAX_INL_ELTS * HOST_BITS_PER_WIDE_INT) |
249 | |
250 | /* Precision of wide_int and largest _BitInt precision + 1 we can |
251 | support. */ |
252 | #define WIDE_INT_MAX_ELTS 1024 |
253 | #define WIDE_INT_MAX_PRECISION (WIDE_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT) |
254 | |
255 | /* Precision of widest_int. */ |
256 | #define WIDEST_INT_MAX_ELTS 2048 |
257 | #define WIDEST_INT_MAX_PRECISION (WIDEST_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT) |
258 | |
259 | STATIC_ASSERT (WIDE_INT_MAX_INL_ELTS < WIDE_INT_MAX_ELTS); |
260 | |
261 | /* This is the max size of any pointer on any machine. It does not |
262 | seem to be as easy to sniff this out of the machine description as |
263 | it is for MAX_BITSIZE_MODE_ANY_INT since targets may support |
264 | multiple address sizes and may have different address sizes for |
265 | different address spaces. However, currently the largest pointer |
266 | on any platform is 64 bits. When that changes, then it is likely |
267 | that a target hook should be defined so that targets can make this |
268 | value larger for those targets. */ |
269 | #define ADDR_MAX_BITSIZE 64 |
270 | |
271 | /* This is the internal precision used when doing any address |
272 | arithmetic. The '4' is really 3 + 1. Three of the bits are for |
273 | the number of extra bits needed to do bit addresses and the other bit |
274 | is to allow everything to be signed without loosing any precision. |
275 | Then everything is rounded up to the next HWI for efficiency. */ |
276 | #define ADDR_MAX_PRECISION \ |
277 | ((ADDR_MAX_BITSIZE + 4 + HOST_BITS_PER_WIDE_INT - 1) \ |
278 | & ~(HOST_BITS_PER_WIDE_INT - 1)) |
279 | |
280 | /* The number of HWIs needed to store an offset_int. */ |
281 | #define OFFSET_INT_ELTS (ADDR_MAX_PRECISION / HOST_BITS_PER_WIDE_INT) |
282 | |
283 | /* The max number of HWIs needed to store a wide_int of PRECISION. */ |
284 | #define WIDE_INT_MAX_HWIS(PRECISION) \ |
285 | ((PRECISION + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) |
286 | |
287 | /* The type of result produced by a binary operation on types T1 and T2. |
288 | Defined purely for brevity. */ |
289 | #define WI_BINARY_RESULT(T1, T2) \ |
290 | typename wi::binary_traits <T1, T2>::result_type |
291 | |
292 | /* Likewise for binary operators, which excludes the case in which neither |
293 | T1 nor T2 is a wide-int-based type. */ |
294 | #define WI_BINARY_OPERATOR_RESULT(T1, T2) \ |
295 | typename wi::binary_traits <T1, T2>::operator_result |
296 | |
297 | /* The type of result produced by T1 << T2. Leads to substitution failure |
298 | if the operation isn't supported. Defined purely for brevity. */ |
299 | #define WI_SIGNED_SHIFT_RESULT(T1, T2) \ |
300 | typename wi::binary_traits <T1, T2>::signed_shift_result_type |
301 | |
302 | /* The type of result produced by a sign-agnostic binary predicate on |
303 | types T1 and T2. This is bool if wide-int operations make sense for |
304 | T1 and T2 and leads to substitution failure otherwise. */ |
305 | #define WI_BINARY_PREDICATE_RESULT(T1, T2) \ |
306 | typename wi::binary_traits <T1, T2>::predicate_result |
307 | |
308 | /* The type of result produced by a signed binary predicate on types T1 and T2. |
309 | This is bool if signed comparisons make sense for T1 and T2 and leads to |
310 | substitution failure otherwise. */ |
311 | #define WI_SIGNED_BINARY_PREDICATE_RESULT(T1, T2) \ |
312 | typename wi::binary_traits <T1, T2>::signed_predicate_result |
313 | |
314 | /* The type of result produced by a unary operation on type T. */ |
315 | #define WI_UNARY_RESULT(T) \ |
316 | typename wi::binary_traits <T, T>::result_type |
317 | |
318 | /* Define a variable RESULT to hold the result of a binary operation on |
319 | X and Y, which have types T1 and T2 respectively. Define VAL to |
320 | point to the blocks of RESULT. Once the user of the macro has |
321 | filled in VAL, it should call RESULT.set_len to set the number |
322 | of initialized blocks. */ |
323 | #define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \ |
324 | WI_BINARY_RESULT (T1, T2) RESULT = \ |
325 | wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \ |
326 | HOST_WIDE_INT *VAL = RESULT.write_val (0) |
327 | |
328 | /* Similar for the result of a unary operation on X, which has type T. */ |
329 | #define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \ |
330 | WI_UNARY_RESULT (T) RESULT = \ |
331 | wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \ |
332 | HOST_WIDE_INT *VAL = RESULT.write_val (0) |
333 | |
334 | template <typename T> class generic_wide_int; |
335 | template <int N> class fixed_wide_int_storage; |
336 | class wide_int_storage; |
337 | template <int N> class widest_int_storage; |
338 | |
339 | /* An N-bit integer. Until we can use typedef templates, use this instead. */ |
340 | #define FIXED_WIDE_INT(N) \ |
341 | generic_wide_int < fixed_wide_int_storage <N> > |
342 | |
343 | typedef generic_wide_int <wide_int_storage> wide_int; |
344 | typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int; |
345 | typedef generic_wide_int <widest_int_storage <WIDEST_INT_MAX_PRECISION> > widest_int; |
346 | typedef generic_wide_int <widest_int_storage <WIDEST_INT_MAX_PRECISION * 2> > widest2_int; |
347 | |
348 | /* wi::storage_ref can be a reference to a primitive type, |
349 | so this is the conservatively-correct setting. */ |
350 | template <bool SE, bool HDP = true> |
351 | class wide_int_ref_storage; |
352 | |
353 | typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref; |
354 | |
355 | /* This can be used instead of wide_int_ref if the referenced value is |
356 | known to have type T. It carries across properties of T's representation, |
357 | such as whether excess upper bits in a HWI are defined, and can therefore |
358 | help avoid redundant work. |
359 | |
360 | The macro could be replaced with a template typedef, once we're able |
361 | to use those. */ |
362 | #define WIDE_INT_REF_FOR(T) \ |
363 | generic_wide_int \ |
364 | <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended, \ |
365 | wi::int_traits <T>::host_dependent_precision> > |
366 | |
367 | namespace wi |
368 | { |
369 | /* Operations that calculate overflow do so even for |
370 | TYPE_OVERFLOW_WRAPS types. For example, adding 1 to +MAX_INT in |
371 | an unsigned int is 0 and does not overflow in C/C++, but wi::add |
372 | will set the overflow argument in case it's needed for further |
373 | analysis. |
374 | |
375 | For operations that require overflow, these are the different |
376 | types of overflow. */ |
377 | enum overflow_type { |
378 | OVF_NONE = 0, |
379 | OVF_UNDERFLOW = -1, |
380 | OVF_OVERFLOW = 1, |
381 | /* There was an overflow, but we are unsure whether it was an |
382 | overflow or an underflow. */ |
383 | OVF_UNKNOWN = 2 |
384 | }; |
385 | |
386 | /* Classifies an integer based on its precision. */ |
387 | enum precision_type { |
388 | /* The integer has both a precision and defined signedness. This allows |
389 | the integer to be converted to any width, since we know whether to fill |
390 | any extra bits with zeros or signs. */ |
391 | FLEXIBLE_PRECISION, |
392 | |
393 | /* The integer has a variable precision but no defined signedness. */ |
394 | VAR_PRECISION, |
395 | |
396 | /* The integer has a constant precision (known at GCC compile time), |
397 | is signed and all elements are in inline buffer. */ |
398 | INL_CONST_PRECISION, |
399 | |
400 | /* Like INL_CONST_PRECISION, but elements can be heap allocated for |
401 | larger lengths. */ |
402 | CONST_PRECISION |
403 | }; |
404 | |
405 | /* This class, which has no default implementation, is expected to |
406 | provide the following members: |
407 | |
408 | static const enum precision_type precision_type; |
409 | Classifies the type of T. |
410 | |
411 | static const unsigned int precision; |
412 | Only defined if precision_type == INL_CONST_PRECISION or |
413 | precision_type == CONST_PRECISION. Specifies the |
414 | precision of all integers of type T. |
415 | |
416 | static const bool host_dependent_precision; |
417 | True if the precision of T depends (or can depend) on the host. |
418 | |
419 | static unsigned int get_precision (const T &x) |
420 | Return the number of bits in X. |
421 | |
422 | static wi::storage_ref *decompose (HOST_WIDE_INT *scratch, |
423 | unsigned int precision, const T &x) |
424 | Decompose X as a PRECISION-bit integer, returning the associated |
425 | wi::storage_ref. SCRATCH is available as scratch space if needed. |
426 | The routine should assert that PRECISION is acceptable. */ |
427 | template <typename T> struct int_traits; |
428 | |
429 | /* This class provides a single type, result_type, which specifies the |
430 | type of integer produced by a binary operation whose inputs have |
431 | types T1 and T2. The definition should be symmetric. */ |
432 | template <typename T1, typename T2, |
433 | enum precision_type P1 = int_traits <T1>::precision_type, |
434 | enum precision_type P2 = int_traits <T2>::precision_type> |
435 | struct binary_traits; |
436 | |
437 | /* Specify the result type for each supported combination of binary |
438 | inputs. Note that INL_CONST_PRECISION, CONST_PRECISION and |
439 | VAR_PRECISION cannot be mixed, in order to give stronger type |
440 | checking. When both inputs are INL_CONST_PRECISION or both are |
441 | CONST_PRECISION, they must have the same precision. */ |
442 | template <typename T1, typename T2> |
443 | struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION> |
444 | { |
445 | typedef widest_int result_type; |
446 | /* Don't define operators for this combination. */ |
447 | }; |
448 | |
449 | template <typename T1, typename T2> |
450 | struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION> |
451 | { |
452 | typedef wide_int result_type; |
453 | typedef result_type operator_result; |
454 | typedef bool predicate_result; |
455 | }; |
456 | |
457 | template <typename T1, typename T2> |
458 | struct binary_traits <T1, T2, FLEXIBLE_PRECISION, INL_CONST_PRECISION> |
459 | { |
460 | /* Spelled out explicitly (rather than through FIXED_WIDE_INT) |
461 | so as not to confuse gengtype. */ |
462 | typedef generic_wide_int < fixed_wide_int_storage |
463 | <int_traits <T2>::precision> > result_type; |
464 | typedef result_type operator_result; |
465 | typedef bool predicate_result; |
466 | typedef result_type signed_shift_result_type; |
467 | typedef bool signed_predicate_result; |
468 | }; |
469 | |
470 | template <typename T1, typename T2> |
471 | struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION> |
472 | { |
473 | typedef generic_wide_int < widest_int_storage |
474 | <int_traits <T2>::precision> > result_type; |
475 | typedef result_type operator_result; |
476 | typedef bool predicate_result; |
477 | typedef result_type signed_shift_result_type; |
478 | typedef bool signed_predicate_result; |
479 | }; |
480 | |
481 | template <typename T1, typename T2> |
482 | struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION> |
483 | { |
484 | typedef wide_int result_type; |
485 | typedef result_type operator_result; |
486 | typedef bool predicate_result; |
487 | }; |
488 | |
489 | template <typename T1, typename T2> |
490 | struct binary_traits <T1, T2, INL_CONST_PRECISION, FLEXIBLE_PRECISION> |
491 | { |
492 | /* Spelled out explicitly (rather than through FIXED_WIDE_INT) |
493 | so as not to confuse gengtype. */ |
494 | typedef generic_wide_int < fixed_wide_int_storage |
495 | <int_traits <T1>::precision> > result_type; |
496 | typedef result_type operator_result; |
497 | typedef bool predicate_result; |
498 | typedef result_type signed_shift_result_type; |
499 | typedef bool signed_predicate_result; |
500 | }; |
501 | |
502 | template <typename T1, typename T2> |
503 | struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION> |
504 | { |
505 | typedef generic_wide_int < widest_int_storage |
506 | <int_traits <T1>::precision> > result_type; |
507 | typedef result_type operator_result; |
508 | typedef bool predicate_result; |
509 | typedef result_type signed_shift_result_type; |
510 | typedef bool signed_predicate_result; |
511 | }; |
512 | |
513 | template <typename T1, typename T2> |
514 | struct binary_traits <T1, T2, INL_CONST_PRECISION, INL_CONST_PRECISION> |
515 | { |
516 | STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision); |
517 | /* Spelled out explicitly (rather than through FIXED_WIDE_INT) |
518 | so as not to confuse gengtype. */ |
519 | typedef generic_wide_int < fixed_wide_int_storage |
520 | <int_traits <T1>::precision> > result_type; |
521 | typedef result_type operator_result; |
522 | typedef bool predicate_result; |
523 | typedef result_type signed_shift_result_type; |
524 | typedef bool signed_predicate_result; |
525 | }; |
526 | |
527 | template <typename T1, typename T2> |
528 | struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION> |
529 | { |
530 | STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision); |
531 | typedef generic_wide_int < widest_int_storage |
532 | <int_traits <T1>::precision> > result_type; |
533 | typedef result_type operator_result; |
534 | typedef bool predicate_result; |
535 | typedef result_type signed_shift_result_type; |
536 | typedef bool signed_predicate_result; |
537 | }; |
538 | |
539 | template <typename T1, typename T2> |
540 | struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION> |
541 | { |
542 | typedef wide_int result_type; |
543 | typedef result_type operator_result; |
544 | typedef bool predicate_result; |
545 | }; |
546 | } |
547 | |
548 | /* Public functions for querying and operating on integers. */ |
549 | namespace wi |
550 | { |
551 | template <typename T> |
552 | unsigned int get_precision (const T &); |
553 | |
554 | template <typename T1, typename T2> |
555 | unsigned int get_binary_precision (const T1 &, const T2 &); |
556 | |
557 | template <typename T1, typename T2> |
558 | void copy (T1 &, const T2 &); |
559 | |
560 | #define UNARY_PREDICATE \ |
561 | template <typename T> bool |
562 | #define UNARY_FUNCTION \ |
563 | template <typename T> WI_UNARY_RESULT (T) |
564 | #define BINARY_PREDICATE \ |
565 | template <typename T1, typename T2> bool |
566 | #define BINARY_FUNCTION \ |
567 | template <typename T1, typename T2> WI_BINARY_RESULT (T1, T2) |
568 | #define SHIFT_FUNCTION \ |
569 | template <typename T1, typename T2> WI_UNARY_RESULT (T1) |
570 | |
571 | UNARY_PREDICATE fits_shwi_p (const T &); |
572 | UNARY_PREDICATE fits_uhwi_p (const T &); |
573 | UNARY_PREDICATE neg_p (const T &, signop = SIGNED); |
574 | |
575 | template <typename T> |
576 | HOST_WIDE_INT sign_mask (const T &); |
577 | |
578 | BINARY_PREDICATE eq_p (const T1 &, const T2 &); |
579 | BINARY_PREDICATE ne_p (const T1 &, const T2 &); |
580 | BINARY_PREDICATE lt_p (const T1 &, const T2 &, signop); |
581 | BINARY_PREDICATE lts_p (const T1 &, const T2 &); |
582 | BINARY_PREDICATE ltu_p (const T1 &, const T2 &); |
583 | BINARY_PREDICATE le_p (const T1 &, const T2 &, signop); |
584 | BINARY_PREDICATE les_p (const T1 &, const T2 &); |
585 | BINARY_PREDICATE leu_p (const T1 &, const T2 &); |
586 | BINARY_PREDICATE gt_p (const T1 &, const T2 &, signop); |
587 | BINARY_PREDICATE gts_p (const T1 &, const T2 &); |
588 | BINARY_PREDICATE gtu_p (const T1 &, const T2 &); |
589 | BINARY_PREDICATE ge_p (const T1 &, const T2 &, signop); |
590 | BINARY_PREDICATE ges_p (const T1 &, const T2 &); |
591 | BINARY_PREDICATE geu_p (const T1 &, const T2 &); |
592 | |
593 | template <typename T1, typename T2> |
594 | int cmp (const T1 &, const T2 &, signop); |
595 | |
596 | template <typename T1, typename T2> |
597 | int cmps (const T1 &, const T2 &); |
598 | |
599 | template <typename T1, typename T2> |
600 | int cmpu (const T1 &, const T2 &); |
601 | |
602 | UNARY_FUNCTION bit_not (const T &); |
603 | UNARY_FUNCTION neg (const T &); |
604 | UNARY_FUNCTION neg (const T &, overflow_type *); |
605 | UNARY_FUNCTION abs (const T &); |
606 | UNARY_FUNCTION ext (const T &, unsigned int, signop); |
607 | UNARY_FUNCTION sext (const T &, unsigned int); |
608 | UNARY_FUNCTION zext (const T &, unsigned int); |
609 | UNARY_FUNCTION set_bit (const T &, unsigned int); |
610 | UNARY_FUNCTION bswap (const T &); |
611 | UNARY_FUNCTION bitreverse (const T &); |
612 | |
613 | BINARY_FUNCTION min (const T1 &, const T2 &, signop); |
614 | BINARY_FUNCTION smin (const T1 &, const T2 &); |
615 | BINARY_FUNCTION umin (const T1 &, const T2 &); |
616 | BINARY_FUNCTION max (const T1 &, const T2 &, signop); |
617 | BINARY_FUNCTION smax (const T1 &, const T2 &); |
618 | BINARY_FUNCTION umax (const T1 &, const T2 &); |
619 | |
620 | BINARY_FUNCTION bit_and (const T1 &, const T2 &); |
621 | BINARY_FUNCTION bit_and_not (const T1 &, const T2 &); |
622 | BINARY_FUNCTION bit_or (const T1 &, const T2 &); |
623 | BINARY_FUNCTION bit_or_not (const T1 &, const T2 &); |
624 | BINARY_FUNCTION bit_xor (const T1 &, const T2 &); |
625 | BINARY_FUNCTION add (const T1 &, const T2 &); |
626 | BINARY_FUNCTION add (const T1 &, const T2 &, signop, overflow_type *); |
627 | BINARY_FUNCTION sub (const T1 &, const T2 &); |
628 | BINARY_FUNCTION sub (const T1 &, const T2 &, signop, overflow_type *); |
629 | BINARY_FUNCTION mul (const T1 &, const T2 &); |
630 | BINARY_FUNCTION mul (const T1 &, const T2 &, signop, overflow_type *); |
631 | BINARY_FUNCTION smul (const T1 &, const T2 &, overflow_type *); |
632 | BINARY_FUNCTION umul (const T1 &, const T2 &, overflow_type *); |
633 | BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop); |
634 | BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, |
635 | overflow_type * = 0); |
636 | BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &); |
637 | BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &); |
638 | BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, |
639 | overflow_type * = 0); |
640 | BINARY_FUNCTION udiv_floor (const T1 &, const T2 &); |
641 | BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &); |
642 | BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, |
643 | overflow_type * = 0); |
644 | BINARY_FUNCTION udiv_ceil (const T1 &, const T2 &); |
645 | BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, |
646 | overflow_type * = 0); |
647 | BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop, |
648 | WI_BINARY_RESULT (T1, T2) *); |
649 | BINARY_FUNCTION gcd (const T1 &, const T2 &, signop = UNSIGNED); |
650 | BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, |
651 | overflow_type * = 0); |
652 | BINARY_FUNCTION smod_trunc (const T1 &, const T2 &); |
653 | BINARY_FUNCTION umod_trunc (const T1 &, const T2 &); |
654 | BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, |
655 | overflow_type * = 0); |
656 | BINARY_FUNCTION umod_floor (const T1 &, const T2 &); |
657 | BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, |
658 | overflow_type * = 0); |
659 | BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, |
660 | overflow_type * = 0); |
661 | |
662 | template <typename T1, typename T2> |
663 | bool multiple_of_p (const T1 &, const T2 &, signop); |
664 | |
665 | template <typename T1, typename T2> |
666 | bool multiple_of_p (const T1 &, const T2 &, signop, |
667 | WI_BINARY_RESULT (T1, T2) *); |
668 | |
669 | SHIFT_FUNCTION lshift (const T1 &, const T2 &); |
670 | SHIFT_FUNCTION lrshift (const T1 &, const T2 &); |
671 | SHIFT_FUNCTION arshift (const T1 &, const T2 &); |
672 | SHIFT_FUNCTION rshift (const T1 &, const T2 &, signop sgn); |
673 | SHIFT_FUNCTION lrotate (const T1 &, const T2 &, unsigned int = 0); |
674 | SHIFT_FUNCTION rrotate (const T1 &, const T2 &, unsigned int = 0); |
675 | |
676 | #undef SHIFT_FUNCTION |
677 | #undef BINARY_PREDICATE |
678 | #undef BINARY_FUNCTION |
679 | #undef UNARY_PREDICATE |
680 | #undef UNARY_FUNCTION |
681 | |
682 | bool only_sign_bit_p (const wide_int_ref &, unsigned int); |
683 | bool only_sign_bit_p (const wide_int_ref &); |
684 | int clz (const wide_int_ref &); |
685 | int clrsb (const wide_int_ref &); |
686 | int ctz (const wide_int_ref &); |
687 | int exact_log2 (const wide_int_ref &); |
688 | int floor_log2 (const wide_int_ref &); |
689 | int ffs (const wide_int_ref &); |
690 | int popcount (const wide_int_ref &); |
691 | int parity (const wide_int_ref &); |
692 | |
693 | template <typename T> |
694 | unsigned HOST_WIDE_INT extract_uhwi (const T &, unsigned int, unsigned int); |
695 | |
696 | template <typename T> |
697 | unsigned int min_precision (const T &, signop); |
698 | |
699 | static inline void accumulate_overflow (overflow_type &, overflow_type); |
700 | } |
701 | |
702 | namespace wi |
703 | { |
704 | /* Contains the components of a decomposed integer for easy, direct |
705 | access. */ |
706 | class storage_ref |
707 | { |
708 | public: |
709 | storage_ref () {} |
710 | storage_ref (const HOST_WIDE_INT *, unsigned int, unsigned int); |
711 | |
712 | const HOST_WIDE_INT *val; |
713 | unsigned int len; |
714 | unsigned int precision; |
715 | |
716 | /* Provide enough trappings for this class to act as storage for |
717 | generic_wide_int. */ |
718 | unsigned int get_len () const; |
719 | unsigned int get_precision () const; |
720 | const HOST_WIDE_INT *get_val () const; |
721 | }; |
722 | } |
723 | |
724 | inline::wi::storage_ref::storage_ref (const HOST_WIDE_INT *val_in, |
725 | unsigned int len_in, |
726 | unsigned int precision_in) |
727 | : val (val_in), len (len_in), precision (precision_in) |
728 | { |
729 | } |
730 | |
731 | inline unsigned int |
732 | wi::storage_ref::get_len () const |
733 | { |
734 | return len; |
735 | } |
736 | |
737 | inline unsigned int |
738 | wi::storage_ref::get_precision () const |
739 | { |
740 | return precision; |
741 | } |
742 | |
743 | inline const HOST_WIDE_INT * |
744 | wi::storage_ref::get_val () const |
745 | { |
746 | return val; |
747 | } |
748 | |
749 | /* This class defines an integer type using the storage provided by the |
750 | template argument. The storage class must provide the following |
751 | functions: |
752 | |
753 | unsigned int get_precision () const |
754 | Return the number of bits in the integer. |
755 | |
756 | HOST_WIDE_INT *get_val () const |
757 | Return a pointer to the array of blocks that encodes the integer. |
758 | |
759 | unsigned int get_len () const |
760 | Return the number of blocks in get_val (). If this is smaller |
761 | than the number of blocks implied by get_precision (), the |
762 | remaining blocks are sign extensions of block get_len () - 1. |
763 | |
764 | Although not required by generic_wide_int itself, writable storage |
765 | classes can also provide the following functions: |
766 | |
767 | HOST_WIDE_INT *write_val (unsigned int) |
768 | Get a modifiable version of get_val (). The argument should be |
769 | upper estimation for LEN (ignored by all storages but |
770 | widest_int_storage). |
771 | |
772 | unsigned int set_len (unsigned int len) |
773 | Set the value returned by get_len () to LEN. */ |
774 | template <typename storage> |
775 | class GTY(()) generic_wide_int : public storage |
776 | { |
777 | public: |
778 | generic_wide_int (); |
779 | |
780 | template <typename T> |
781 | generic_wide_int (const T &); |
782 | |
783 | template <typename T> |
784 | generic_wide_int (const T &, unsigned int); |
785 | |
786 | /* Conversions. */ |
787 | HOST_WIDE_INT to_shwi (unsigned int) const; |
788 | HOST_WIDE_INT to_shwi () const; |
789 | unsigned HOST_WIDE_INT to_uhwi (unsigned int) const; |
790 | unsigned HOST_WIDE_INT to_uhwi () const; |
791 | HOST_WIDE_INT to_short_addr () const; |
792 | |
793 | /* Public accessors for the interior of a wide int. */ |
794 | HOST_WIDE_INT sign_mask () const; |
795 | HOST_WIDE_INT elt (unsigned int) const; |
796 | HOST_WIDE_INT sext_elt (unsigned int) const; |
797 | unsigned HOST_WIDE_INT ulow () const; |
798 | unsigned HOST_WIDE_INT uhigh () const; |
799 | HOST_WIDE_INT slow () const; |
800 | HOST_WIDE_INT shigh () const; |
801 | |
802 | template <typename T> |
803 | generic_wide_int &operator = (const T &); |
804 | |
805 | #define ASSIGNMENT_OPERATOR(OP, F) \ |
806 | template <typename T> \ |
807 | generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); } |
808 | |
809 | /* Restrict these to cases where the shift operator is defined. */ |
810 | #define SHIFT_ASSIGNMENT_OPERATOR(OP, OP2) \ |
811 | template <typename T> \ |
812 | generic_wide_int &OP (const T &c) { return (*this = *this OP2 c); } |
813 | |
814 | #define INCDEC_OPERATOR(OP, DELTA) \ |
815 | generic_wide_int &OP () { *this += DELTA; return *this; } |
816 | |
817 | ASSIGNMENT_OPERATOR (operator &=, bit_and) |
818 | ASSIGNMENT_OPERATOR (operator |=, bit_or) |
819 | ASSIGNMENT_OPERATOR (operator ^=, bit_xor) |
820 | ASSIGNMENT_OPERATOR (operator +=, add) |
821 | ASSIGNMENT_OPERATOR (operator -=, sub) |
822 | ASSIGNMENT_OPERATOR (operator *=, mul) |
823 | ASSIGNMENT_OPERATOR (operator <<=, lshift) |
824 | SHIFT_ASSIGNMENT_OPERATOR (operator >>=, >>) |
825 | INCDEC_OPERATOR (operator ++, 1) |
826 | INCDEC_OPERATOR (operator --, -1) |
827 | |
828 | #undef SHIFT_ASSIGNMENT_OPERATOR |
829 | #undef ASSIGNMENT_OPERATOR |
830 | #undef INCDEC_OPERATOR |
831 | |
832 | /* Debugging functions. */ |
833 | void dump () const; |
834 | |
835 | static const bool is_sign_extended |
836 | = wi::int_traits <generic_wide_int <storage> >::is_sign_extended; |
837 | static const bool needs_write_val_arg |
838 | = wi::int_traits <generic_wide_int <storage> >::needs_write_val_arg; |
839 | }; |
840 | |
841 | template <typename storage> |
842 | inline generic_wide_int <storage>::generic_wide_int () {} |
843 | |
844 | template <typename storage> |
845 | template <typename T> |
846 | inline generic_wide_int <storage>::generic_wide_int (const T &x) |
847 | : storage (x) |
848 | { |
849 | } |
850 | |
851 | template <typename storage> |
852 | template <typename T> |
853 | inline generic_wide_int <storage>::generic_wide_int (const T &x, |
854 | unsigned int precision) |
855 | : storage (x, precision) |
856 | { |
857 | } |
858 | |
859 | /* Return THIS as a signed HOST_WIDE_INT, sign-extending from PRECISION. |
860 | If THIS does not fit in PRECISION, the information is lost. */ |
861 | template <typename storage> |
862 | inline HOST_WIDE_INT |
863 | generic_wide_int <storage>::to_shwi (unsigned int precision) const |
864 | { |
865 | if (precision < HOST_BITS_PER_WIDE_INT) |
866 | return sext_hwi (this->get_val ()[0], precision); |
867 | else |
868 | return this->get_val ()[0]; |
869 | } |
870 | |
871 | /* Return THIS as a signed HOST_WIDE_INT, in its natural precision. */ |
872 | template <typename storage> |
873 | inline HOST_WIDE_INT |
874 | generic_wide_int <storage>::to_shwi () const |
875 | { |
876 | if (is_sign_extended) |
877 | return this->get_val ()[0]; |
878 | else |
879 | return to_shwi (this->get_precision ()); |
880 | } |
881 | |
882 | /* Return THIS as an unsigned HOST_WIDE_INT, zero-extending from |
883 | PRECISION. If THIS does not fit in PRECISION, the information |
884 | is lost. */ |
885 | template <typename storage> |
886 | inline unsigned HOST_WIDE_INT |
887 | generic_wide_int <storage>::to_uhwi (unsigned int precision) const |
888 | { |
889 | if (precision < HOST_BITS_PER_WIDE_INT) |
890 | return zext_hwi (this->get_val ()[0], precision); |
891 | else |
892 | return this->get_val ()[0]; |
893 | } |
894 | |
895 | /* Return THIS as an signed HOST_WIDE_INT, in its natural precision. */ |
896 | template <typename storage> |
897 | inline unsigned HOST_WIDE_INT |
898 | generic_wide_int <storage>::to_uhwi () const |
899 | { |
900 | return to_uhwi (this->get_precision ()); |
901 | } |
902 | |
903 | /* TODO: The compiler is half converted from using HOST_WIDE_INT to |
904 | represent addresses to using offset_int to represent addresses. |
905 | We use to_short_addr at the interface from new code to old, |
906 | unconverted code. */ |
907 | template <typename storage> |
908 | inline HOST_WIDE_INT |
909 | generic_wide_int <storage>::to_short_addr () const |
910 | { |
911 | return this->get_val ()[0]; |
912 | } |
913 | |
914 | /* Return the implicit value of blocks above get_len (). */ |
915 | template <typename storage> |
916 | inline HOST_WIDE_INT |
917 | generic_wide_int <storage>::sign_mask () const |
918 | { |
919 | unsigned int len = this->get_len (); |
920 | gcc_assert (len > 0); |
921 | |
922 | unsigned HOST_WIDE_INT high = this->get_val ()[len - 1]; |
923 | if (!is_sign_extended) |
924 | { |
925 | unsigned int precision = this->get_precision (); |
926 | int excess = len * HOST_BITS_PER_WIDE_INT - precision; |
927 | if (excess > 0) |
928 | high <<= excess; |
929 | } |
930 | return (HOST_WIDE_INT) (high) < 0 ? -1 : 0; |
931 | } |
932 | |
933 | /* Return the signed value of the least-significant explicitly-encoded |
934 | block. */ |
935 | template <typename storage> |
936 | inline HOST_WIDE_INT |
937 | generic_wide_int <storage>::slow () const |
938 | { |
939 | return this->get_val ()[0]; |
940 | } |
941 | |
942 | /* Return the signed value of the most-significant explicitly-encoded |
943 | block. */ |
944 | template <typename storage> |
945 | inline HOST_WIDE_INT |
946 | generic_wide_int <storage>::shigh () const |
947 | { |
948 | return this->get_val ()[this->get_len () - 1]; |
949 | } |
950 | |
951 | /* Return the unsigned value of the least-significant |
952 | explicitly-encoded block. */ |
953 | template <typename storage> |
954 | inline unsigned HOST_WIDE_INT |
955 | generic_wide_int <storage>::ulow () const |
956 | { |
957 | return this->get_val ()[0]; |
958 | } |
959 | |
960 | /* Return the unsigned value of the most-significant |
961 | explicitly-encoded block. */ |
962 | template <typename storage> |
963 | inline unsigned HOST_WIDE_INT |
964 | generic_wide_int <storage>::uhigh () const |
965 | { |
966 | return this->get_val ()[this->get_len () - 1]; |
967 | } |
968 | |
969 | /* Return block I, which might be implicitly or explicit encoded. */ |
970 | template <typename storage> |
971 | inline HOST_WIDE_INT |
972 | generic_wide_int <storage>::elt (unsigned int i) const |
973 | { |
974 | if (i >= this->get_len ()) |
975 | return sign_mask (); |
976 | else |
977 | return this->get_val ()[i]; |
978 | } |
979 | |
980 | /* Like elt, but sign-extend beyond the upper bit, instead of returning |
981 | the raw encoding. */ |
982 | template <typename storage> |
983 | inline HOST_WIDE_INT |
984 | generic_wide_int <storage>::sext_elt (unsigned int i) const |
985 | { |
986 | HOST_WIDE_INT elt_i = elt (i); |
987 | if (!is_sign_extended) |
988 | { |
989 | unsigned int precision = this->get_precision (); |
990 | unsigned int lsb = i * HOST_BITS_PER_WIDE_INT; |
991 | if (precision - lsb < HOST_BITS_PER_WIDE_INT) |
992 | elt_i = sext_hwi (src: elt_i, prec: precision - lsb); |
993 | } |
994 | return elt_i; |
995 | } |
996 | |
997 | template <typename storage> |
998 | template <typename T> |
999 | inline generic_wide_int <storage> & |
1000 | generic_wide_int <storage>::operator = (const T &x) |
1001 | { |
1002 | storage::operator = (x); |
1003 | return *this; |
1004 | } |
1005 | |
1006 | /* Dump the contents of the integer to stderr, for debugging. */ |
1007 | template <typename storage> |
1008 | void |
1009 | generic_wide_int <storage>::dump () const |
1010 | { |
1011 | unsigned int len = this->get_len (); |
1012 | const HOST_WIDE_INT *val = this->get_val (); |
1013 | unsigned int precision = this->get_precision (); |
1014 | fprintf (stderr, format: "[" ); |
1015 | if (len * HOST_BITS_PER_WIDE_INT < precision) |
1016 | fprintf (stderr, format: "...," ); |
1017 | for (unsigned int i = 0; i < len - 1; ++i) |
1018 | fprintf (stderr, HOST_WIDE_INT_PRINT_HEX "," , val[len - 1 - i]); |
1019 | fprintf (stderr, HOST_WIDE_INT_PRINT_HEX "], precision = %d\n" , |
1020 | val[0], precision); |
1021 | } |
1022 | |
1023 | namespace wi |
1024 | { |
1025 | template <typename storage> |
1026 | struct int_traits < generic_wide_int <storage> > |
1027 | : public wi::int_traits <storage> |
1028 | { |
1029 | static unsigned int get_precision (const generic_wide_int <storage> &); |
1030 | static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, |
1031 | const generic_wide_int <storage> &); |
1032 | }; |
1033 | } |
1034 | |
1035 | template <typename storage> |
1036 | inline unsigned int |
1037 | wi::int_traits < generic_wide_int <storage> >:: |
1038 | get_precision (const generic_wide_int <storage> &x) |
1039 | { |
1040 | return x.get_precision (); |
1041 | } |
1042 | |
1043 | template <typename storage> |
1044 | inline wi::storage_ref |
1045 | wi::int_traits < generic_wide_int <storage> >:: |
1046 | decompose (HOST_WIDE_INT *, unsigned int precision, |
1047 | const generic_wide_int <storage> &x) |
1048 | { |
1049 | gcc_checking_assert (precision == x.get_precision ()); |
1050 | return wi::storage_ref (x.get_val (), x.get_len (), precision); |
1051 | } |
1052 | |
1053 | /* Provide the storage for a wide_int_ref. This acts like a read-only |
1054 | wide_int, with the optimization that VAL is normally a pointer to |
1055 | another integer's storage, so that no array copy is needed. */ |
1056 | template <bool SE, bool HDP> |
1057 | class wide_int_ref_storage : public wi::storage_ref |
1058 | { |
1059 | private: |
1060 | /* Scratch space that can be used when decomposing the original integer. |
1061 | It must live as long as this object. */ |
1062 | HOST_WIDE_INT scratch[2]; |
1063 | |
1064 | public: |
1065 | wide_int_ref_storage () {} |
1066 | |
1067 | wide_int_ref_storage (const wi::storage_ref &); |
1068 | |
1069 | template <typename T> |
1070 | wide_int_ref_storage (const T &); |
1071 | |
1072 | template <typename T> |
1073 | wide_int_ref_storage (const T &, unsigned int); |
1074 | }; |
1075 | |
1076 | /* Create a reference from an existing reference. */ |
1077 | template <bool SE, bool HDP> |
1078 | inline wide_int_ref_storage <SE, HDP>:: |
1079 | wide_int_ref_storage (const wi::storage_ref &x) |
1080 | : storage_ref (x) |
1081 | {} |
1082 | |
1083 | /* Create a reference to integer X in its natural precision. Note |
1084 | that the natural precision is host-dependent for primitive |
1085 | types. */ |
1086 | template <bool SE, bool HDP> |
1087 | template <typename T> |
1088 | inline wide_int_ref_storage <SE, HDP>::wide_int_ref_storage (const T &x) |
1089 | : storage_ref (wi::int_traits <T>::decompose (scratch, |
1090 | wi::get_precision (x), x)) |
1091 | { |
1092 | } |
1093 | |
1094 | /* Create a reference to integer X in precision PRECISION. */ |
1095 | template <bool SE, bool HDP> |
1096 | template <typename T> |
1097 | inline wide_int_ref_storage <SE, HDP>:: |
1098 | wide_int_ref_storage (const T &x, unsigned int precision) |
1099 | : storage_ref (wi::int_traits <T>::decompose (scratch, precision, x)) |
1100 | { |
1101 | } |
1102 | |
1103 | namespace wi |
1104 | { |
1105 | template <bool SE, bool HDP> |
1106 | struct int_traits <wide_int_ref_storage <SE, HDP> > |
1107 | { |
1108 | static const enum precision_type precision_type = VAR_PRECISION; |
1109 | static const bool host_dependent_precision = HDP; |
1110 | static const bool is_sign_extended = SE; |
1111 | static const bool needs_write_val_arg = false; |
1112 | }; |
1113 | } |
1114 | |
1115 | namespace wi |
1116 | { |
1117 | unsigned int force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1118 | unsigned int, unsigned int, unsigned int, |
1119 | signop sgn); |
1120 | unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1121 | unsigned int, unsigned int, bool = true); |
1122 | } |
1123 | |
1124 | /* The storage used by wide_int. */ |
1125 | class GTY(()) wide_int_storage |
1126 | { |
1127 | private: |
1128 | union |
1129 | { |
1130 | HOST_WIDE_INT val[WIDE_INT_MAX_INL_ELTS]; |
1131 | HOST_WIDE_INT *valp; |
1132 | } GTY((skip)) u; |
1133 | unsigned int len; |
1134 | unsigned int precision; |
1135 | |
1136 | public: |
1137 | wide_int_storage (); |
1138 | template <typename T> |
1139 | wide_int_storage (const T &); |
1140 | wide_int_storage (const wide_int_storage &); |
1141 | ~wide_int_storage (); |
1142 | |
1143 | /* The standard generic_wide_int storage methods. */ |
1144 | unsigned int get_precision () const; |
1145 | const HOST_WIDE_INT *get_val () const; |
1146 | unsigned int get_len () const; |
1147 | HOST_WIDE_INT *write_val (unsigned int); |
1148 | void set_len (unsigned int, bool = false); |
1149 | |
1150 | wide_int_storage &operator = (const wide_int_storage &); |
1151 | template <typename T> |
1152 | wide_int_storage &operator = (const T &); |
1153 | |
1154 | static wide_int from (const wide_int_ref &, unsigned int, signop); |
1155 | static wide_int from_array (const HOST_WIDE_INT *, unsigned int, |
1156 | unsigned int, bool = true); |
1157 | static wide_int create (unsigned int); |
1158 | }; |
1159 | |
1160 | namespace wi |
1161 | { |
1162 | template <> |
1163 | struct int_traits <wide_int_storage> |
1164 | { |
1165 | static const enum precision_type precision_type = VAR_PRECISION; |
1166 | /* Guaranteed by a static assert in the wide_int_storage constructor. */ |
1167 | static const bool host_dependent_precision = false; |
1168 | static const bool is_sign_extended = true; |
1169 | static const bool needs_write_val_arg = false; |
1170 | template <typename T1, typename T2> |
1171 | static wide_int get_binary_result (const T1 &, const T2 &); |
1172 | template <typename T1, typename T2> |
1173 | static unsigned int get_binary_precision (const T1 &, const T2 &); |
1174 | }; |
1175 | } |
1176 | |
1177 | inline wide_int_storage::wide_int_storage () : precision (0) {} |
1178 | |
1179 | /* Initialize the storage from integer X, in its natural precision. |
1180 | Note that we do not allow integers with host-dependent precision |
1181 | to become wide_ints; wide_ints must always be logically independent |
1182 | of the host. */ |
1183 | template <typename T> |
1184 | inline wide_int_storage::wide_int_storage (const T &x) |
1185 | { |
1186 | STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); |
1187 | STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); |
1188 | STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::INL_CONST_PRECISION); |
1189 | WIDE_INT_REF_FOR (T) xi (x); |
1190 | precision = xi.precision; |
1191 | if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION)) |
1192 | u.valp = XNEWVEC (HOST_WIDE_INT, CEIL (precision, HOST_BITS_PER_WIDE_INT)); |
1193 | wi::copy (*this, xi); |
1194 | } |
1195 | |
1196 | inline wide_int_storage::wide_int_storage (const wide_int_storage &x) |
1197 | { |
1198 | memcpy (dest: this, src: &x, n: sizeof (wide_int_storage)); |
1199 | if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION)) |
1200 | { |
1201 | u.valp = XNEWVEC (HOST_WIDE_INT, CEIL (precision, HOST_BITS_PER_WIDE_INT)); |
1202 | memcpy (dest: u.valp, src: x.u.valp, n: len * sizeof (HOST_WIDE_INT)); |
1203 | } |
1204 | } |
1205 | |
1206 | inline wide_int_storage::~wide_int_storage () |
1207 | { |
1208 | if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION)) |
1209 | XDELETEVEC (u.valp); |
1210 | } |
1211 | |
1212 | inline wide_int_storage& |
1213 | wide_int_storage::operator = (const wide_int_storage &x) |
1214 | { |
1215 | if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION)) |
1216 | { |
1217 | if (this == &x) |
1218 | return *this; |
1219 | XDELETEVEC (u.valp); |
1220 | } |
1221 | memcpy (dest: this, src: &x, n: sizeof (wide_int_storage)); |
1222 | if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION)) |
1223 | { |
1224 | u.valp = XNEWVEC (HOST_WIDE_INT, CEIL (precision, HOST_BITS_PER_WIDE_INT)); |
1225 | memcpy (dest: u.valp, src: x.u.valp, n: len * sizeof (HOST_WIDE_INT)); |
1226 | } |
1227 | return *this; |
1228 | } |
1229 | |
1230 | template <typename T> |
1231 | inline wide_int_storage& |
1232 | wide_int_storage::operator = (const T &x) |
1233 | { |
1234 | STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); |
1235 | STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); |
1236 | STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::INL_CONST_PRECISION); |
1237 | WIDE_INT_REF_FOR (T) xi (x); |
1238 | if (UNLIKELY (precision != xi.precision)) |
1239 | { |
1240 | if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION)) |
1241 | XDELETEVEC (u.valp); |
1242 | precision = xi.precision; |
1243 | if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION)) |
1244 | u.valp = XNEWVEC (HOST_WIDE_INT, |
1245 | CEIL (precision, HOST_BITS_PER_WIDE_INT)); |
1246 | } |
1247 | wi::copy (*this, xi); |
1248 | return *this; |
1249 | } |
1250 | |
1251 | inline unsigned int |
1252 | wide_int_storage::get_precision () const |
1253 | { |
1254 | return precision; |
1255 | } |
1256 | |
1257 | inline const HOST_WIDE_INT * |
1258 | wide_int_storage::get_val () const |
1259 | { |
1260 | return UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION) ? u.valp : u.val; |
1261 | } |
1262 | |
1263 | inline unsigned int |
1264 | wide_int_storage::get_len () const |
1265 | { |
1266 | return len; |
1267 | } |
1268 | |
1269 | inline HOST_WIDE_INT * |
1270 | wide_int_storage::write_val (unsigned int) |
1271 | { |
1272 | return UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION) ? u.valp : u.val; |
1273 | } |
1274 | |
1275 | inline void |
1276 | wide_int_storage::set_len (unsigned int l, bool is_sign_extended) |
1277 | { |
1278 | len = l; |
1279 | if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision) |
1280 | { |
1281 | HOST_WIDE_INT &v = write_val (len)[len - 1]; |
1282 | v = sext_hwi (src: v, prec: precision % HOST_BITS_PER_WIDE_INT); |
1283 | } |
1284 | } |
1285 | |
1286 | /* Treat X as having signedness SGN and convert it to a PRECISION-bit |
1287 | number. */ |
1288 | inline wide_int |
1289 | wide_int_storage::from (const wide_int_ref &x, unsigned int precision, |
1290 | signop sgn) |
1291 | { |
1292 | wide_int result = wide_int::create (precision); |
1293 | result.set_len (l: wi::force_to_size (result.write_val (x.len), x.val, x.len, |
1294 | x.precision, precision, sgn)); |
1295 | return result; |
1296 | } |
1297 | |
1298 | /* Create a wide_int from the explicit block encoding given by VAL and |
1299 | LEN. PRECISION is the precision of the integer. NEED_CANON_P is |
1300 | true if the encoding may have redundant trailing blocks. */ |
1301 | inline wide_int |
1302 | wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len, |
1303 | unsigned int precision, bool need_canon_p) |
1304 | { |
1305 | wide_int result = wide_int::create (precision); |
1306 | result.set_len (l: wi::from_array (result.write_val (len), val, len, precision, |
1307 | need_canon_p)); |
1308 | return result; |
1309 | } |
1310 | |
1311 | /* Return an uninitialized wide_int with precision PRECISION. */ |
1312 | inline wide_int |
1313 | wide_int_storage::create (unsigned int precision) |
1314 | { |
1315 | wide_int x; |
1316 | x.precision = precision; |
1317 | if (UNLIKELY (precision > WIDE_INT_MAX_INL_PRECISION)) |
1318 | x.u.valp = XNEWVEC (HOST_WIDE_INT, |
1319 | CEIL (precision, HOST_BITS_PER_WIDE_INT)); |
1320 | return x; |
1321 | } |
1322 | |
1323 | template <typename T1, typename T2> |
1324 | inline wide_int |
1325 | wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y) |
1326 | { |
1327 | /* This shouldn't be used for two flexible-precision inputs. */ |
1328 | STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION |
1329 | || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION); |
1330 | if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION) |
1331 | return wide_int::create (precision: wi::get_precision (y)); |
1332 | else |
1333 | return wide_int::create (precision: wi::get_precision (x)); |
1334 | } |
1335 | |
1336 | template <typename T1, typename T2> |
1337 | inline unsigned int |
1338 | wi::int_traits <wide_int_storage>::get_binary_precision (const T1 &x, |
1339 | const T2 &y) |
1340 | { |
1341 | /* This shouldn't be used for two flexible-precision inputs. */ |
1342 | STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION |
1343 | || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION); |
1344 | if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION) |
1345 | return wi::get_precision (y); |
1346 | else |
1347 | return wi::get_precision (x); |
1348 | } |
1349 | |
1350 | /* The storage used by FIXED_WIDE_INT (N). */ |
1351 | template <int N> |
1352 | class GTY(()) fixed_wide_int_storage |
1353 | { |
1354 | private: |
1355 | HOST_WIDE_INT val[WIDE_INT_MAX_HWIS (N)]; |
1356 | unsigned int len; |
1357 | |
1358 | public: |
1359 | fixed_wide_int_storage () = default; |
1360 | template <typename T> |
1361 | fixed_wide_int_storage (const T &); |
1362 | |
1363 | /* The standard generic_wide_int storage methods. */ |
1364 | unsigned int get_precision () const; |
1365 | const HOST_WIDE_INT *get_val () const; |
1366 | unsigned int get_len () const; |
1367 | HOST_WIDE_INT *write_val (unsigned int); |
1368 | void set_len (unsigned int, bool = false); |
1369 | |
1370 | static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop); |
1371 | static FIXED_WIDE_INT (N) from_array (const HOST_WIDE_INT *, unsigned int, |
1372 | bool = true); |
1373 | }; |
1374 | |
1375 | namespace wi |
1376 | { |
1377 | template <int N> |
1378 | struct int_traits < fixed_wide_int_storage <N> > |
1379 | { |
1380 | static const enum precision_type precision_type = INL_CONST_PRECISION; |
1381 | static const bool host_dependent_precision = false; |
1382 | static const bool is_sign_extended = true; |
1383 | static const bool needs_write_val_arg = false; |
1384 | static const unsigned int precision = N; |
1385 | template <typename T1, typename T2> |
1386 | static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &); |
1387 | template <typename T1, typename T2> |
1388 | static unsigned int get_binary_precision (const T1 &, const T2 &); |
1389 | }; |
1390 | } |
1391 | |
1392 | /* Initialize the storage from integer X, in precision N. */ |
1393 | template <int N> |
1394 | template <typename T> |
1395 | inline fixed_wide_int_storage <N>::fixed_wide_int_storage (const T &x) |
1396 | { |
1397 | /* Check for type compatibility. We don't want to initialize a |
1398 | fixed-width integer from something like a wide_int. */ |
1399 | WI_BINARY_RESULT (T, FIXED_WIDE_INT (N)) *assertion ATTRIBUTE_UNUSED; |
1400 | wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N)); |
1401 | } |
1402 | |
1403 | template <int N> |
1404 | inline unsigned int |
1405 | fixed_wide_int_storage <N>::get_precision () const |
1406 | { |
1407 | return N; |
1408 | } |
1409 | |
1410 | template <int N> |
1411 | inline const HOST_WIDE_INT * |
1412 | fixed_wide_int_storage <N>::get_val () const |
1413 | { |
1414 | return val; |
1415 | } |
1416 | |
1417 | template <int N> |
1418 | inline unsigned int |
1419 | fixed_wide_int_storage <N>::get_len () const |
1420 | { |
1421 | return len; |
1422 | } |
1423 | |
1424 | template <int N> |
1425 | inline HOST_WIDE_INT * |
1426 | fixed_wide_int_storage <N>::write_val (unsigned int) |
1427 | { |
1428 | return val; |
1429 | } |
1430 | |
1431 | template <int N> |
1432 | inline void |
1433 | fixed_wide_int_storage <N>::set_len (unsigned int l, bool) |
1434 | { |
1435 | len = l; |
1436 | /* There are no excess bits in val[len - 1]. */ |
1437 | STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0); |
1438 | } |
1439 | |
1440 | /* Treat X as having signedness SGN and convert it to an N-bit number. */ |
1441 | template <int N> |
1442 | inline FIXED_WIDE_INT (N) |
1443 | fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn) |
1444 | { |
1445 | FIXED_WIDE_INT (N) result; |
1446 | result.set_len (wi::force_to_size (result.write_val (x.len), x.val, x.len, |
1447 | x.precision, N, sgn)); |
1448 | return result; |
1449 | } |
1450 | |
1451 | /* Create a FIXED_WIDE_INT (N) from the explicit block encoding given by |
1452 | VAL and LEN. NEED_CANON_P is true if the encoding may have redundant |
1453 | trailing blocks. */ |
1454 | template <int N> |
1455 | inline FIXED_WIDE_INT (N) |
1456 | fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val, |
1457 | unsigned int len, |
1458 | bool need_canon_p) |
1459 | { |
1460 | FIXED_WIDE_INT (N) result; |
1461 | result.set_len (wi::from_array (result.write_val (len), val, len, |
1462 | N, need_canon_p)); |
1463 | return result; |
1464 | } |
1465 | |
1466 | template <int N> |
1467 | template <typename T1, typename T2> |
1468 | inline FIXED_WIDE_INT (N) |
1469 | wi::int_traits < fixed_wide_int_storage <N> >:: |
1470 | get_binary_result (const T1 &, const T2 &) |
1471 | { |
1472 | return FIXED_WIDE_INT (N) (); |
1473 | } |
1474 | |
1475 | template <int N> |
1476 | template <typename T1, typename T2> |
1477 | inline unsigned int |
1478 | wi::int_traits < fixed_wide_int_storage <N> >:: |
1479 | get_binary_precision (const T1 &, const T2 &) |
1480 | { |
1481 | return N; |
1482 | } |
1483 | |
1484 | #define WIDEST_INT(N) generic_wide_int < widest_int_storage <N> > |
1485 | |
1486 | /* The storage used by widest_int. */ |
1487 | template <int N> |
1488 | class GTY(()) widest_int_storage |
1489 | { |
1490 | private: |
1491 | union |
1492 | { |
1493 | HOST_WIDE_INT val[WIDE_INT_MAX_INL_ELTS]; |
1494 | HOST_WIDE_INT *valp; |
1495 | } GTY((skip)) u; |
1496 | unsigned int len; |
1497 | |
1498 | public: |
1499 | widest_int_storage (); |
1500 | widest_int_storage (const widest_int_storage &); |
1501 | template <typename T> |
1502 | widest_int_storage (const T &); |
1503 | ~widest_int_storage (); |
1504 | widest_int_storage &operator = (const widest_int_storage &); |
1505 | template <typename T> |
1506 | inline widest_int_storage& operator = (const T &); |
1507 | |
1508 | /* The standard generic_wide_int storage methods. */ |
1509 | unsigned int get_precision () const; |
1510 | const HOST_WIDE_INT *get_val () const; |
1511 | unsigned int get_len () const; |
1512 | HOST_WIDE_INT *write_val (unsigned int); |
1513 | void set_len (unsigned int, bool = false); |
1514 | |
1515 | static WIDEST_INT (N) from (const wide_int_ref &, signop); |
1516 | static WIDEST_INT (N) from_array (const HOST_WIDE_INT *, unsigned int, |
1517 | bool = true); |
1518 | }; |
1519 | |
1520 | namespace wi |
1521 | { |
1522 | template <int N> |
1523 | struct int_traits < widest_int_storage <N> > |
1524 | { |
1525 | static const enum precision_type precision_type = CONST_PRECISION; |
1526 | static const bool host_dependent_precision = false; |
1527 | static const bool is_sign_extended = true; |
1528 | static const bool needs_write_val_arg = true; |
1529 | static const unsigned int precision = N; |
1530 | template <typename T1, typename T2> |
1531 | static WIDEST_INT (N) get_binary_result (const T1 &, const T2 &); |
1532 | template <typename T1, typename T2> |
1533 | static unsigned int get_binary_precision (const T1 &, const T2 &); |
1534 | }; |
1535 | } |
1536 | |
1537 | template <int N> |
1538 | inline widest_int_storage <N>::widest_int_storage () : len (0) {} |
1539 | |
1540 | /* Initialize the storage from integer X, in precision N. */ |
1541 | template <int N> |
1542 | template <typename T> |
1543 | inline widest_int_storage <N>::widest_int_storage (const T &x) : len (0) |
1544 | { |
1545 | /* Check for type compatibility. We don't want to initialize a |
1546 | widest integer from something like a wide_int. */ |
1547 | WI_BINARY_RESULT (T, WIDEST_INT (N)) *assertion ATTRIBUTE_UNUSED; |
1548 | wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N)); |
1549 | } |
1550 | |
1551 | template <int N> |
1552 | inline |
1553 | widest_int_storage <N>::widest_int_storage (const widest_int_storage &x) |
1554 | { |
1555 | memcpy (this, &x, sizeof (widest_int_storage)); |
1556 | if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS)) |
1557 | { |
1558 | u.valp = XNEWVEC (HOST_WIDE_INT, len); |
1559 | memcpy (u.valp, x.u.valp, len * sizeof (HOST_WIDE_INT)); |
1560 | } |
1561 | } |
1562 | |
1563 | template <int N> |
1564 | inline widest_int_storage <N>::~widest_int_storage () |
1565 | { |
1566 | if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS)) |
1567 | XDELETEVEC (u.valp); |
1568 | } |
1569 | |
1570 | template <int N> |
1571 | inline widest_int_storage <N>& |
1572 | widest_int_storage <N>::operator = (const widest_int_storage &x) |
1573 | { |
1574 | if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS)) |
1575 | { |
1576 | if (this == &x) |
1577 | return *this; |
1578 | XDELETEVEC (u.valp); |
1579 | } |
1580 | memcpy (this, &x, sizeof (widest_int_storage)); |
1581 | if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS)) |
1582 | { |
1583 | u.valp = XNEWVEC (HOST_WIDE_INT, len); |
1584 | memcpy (u.valp, x.u.valp, len * sizeof (HOST_WIDE_INT)); |
1585 | } |
1586 | return *this; |
1587 | } |
1588 | |
1589 | template <int N> |
1590 | template <typename T> |
1591 | inline widest_int_storage <N>& |
1592 | widest_int_storage <N>::operator = (const T &x) |
1593 | { |
1594 | /* Check for type compatibility. We don't want to assign a |
1595 | widest integer from something like a wide_int. */ |
1596 | WI_BINARY_RESULT (T, WIDEST_INT (N)) *assertion ATTRIBUTE_UNUSED; |
1597 | if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS)) |
1598 | XDELETEVEC (u.valp); |
1599 | len = 0; |
1600 | wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N)); |
1601 | return *this; |
1602 | } |
1603 | |
1604 | template <int N> |
1605 | inline unsigned int |
1606 | widest_int_storage <N>::get_precision () const |
1607 | { |
1608 | return N; |
1609 | } |
1610 | |
1611 | template <int N> |
1612 | inline const HOST_WIDE_INT * |
1613 | widest_int_storage <N>::get_val () const |
1614 | { |
1615 | return UNLIKELY (len > WIDE_INT_MAX_INL_ELTS) ? u.valp : u.val; |
1616 | } |
1617 | |
1618 | template <int N> |
1619 | inline unsigned int |
1620 | widest_int_storage <N>::get_len () const |
1621 | { |
1622 | return len; |
1623 | } |
1624 | |
1625 | template <int N> |
1626 | inline HOST_WIDE_INT * |
1627 | widest_int_storage <N>::write_val (unsigned int l) |
1628 | { |
1629 | if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS)) |
1630 | XDELETEVEC (u.valp); |
1631 | len = l; |
1632 | if (UNLIKELY (l > WIDE_INT_MAX_INL_ELTS)) |
1633 | { |
1634 | u.valp = XNEWVEC (HOST_WIDE_INT, l); |
1635 | return u.valp; |
1636 | } |
1637 | else if (CHECKING_P && l < WIDE_INT_MAX_INL_ELTS) |
1638 | u.val[l] = HOST_WIDE_INT_UC (0xbaaaaaaddeadbeef); |
1639 | return u.val; |
1640 | } |
1641 | |
1642 | template <int N> |
1643 | inline void |
1644 | widest_int_storage <N>::set_len (unsigned int l, bool) |
1645 | { |
1646 | gcc_checking_assert (l <= len); |
1647 | if (UNLIKELY (len > WIDE_INT_MAX_INL_ELTS) |
1648 | && l <= WIDE_INT_MAX_INL_ELTS) |
1649 | { |
1650 | HOST_WIDE_INT *valp = u.valp; |
1651 | memcpy (u.val, valp, l * sizeof (u.val[0])); |
1652 | XDELETEVEC (valp); |
1653 | } |
1654 | else if (len && len < WIDE_INT_MAX_INL_ELTS) |
1655 | gcc_checking_assert ((unsigned HOST_WIDE_INT) u.val[len] |
1656 | == HOST_WIDE_INT_UC (0xbaaaaaaddeadbeef)); |
1657 | len = l; |
1658 | /* There are no excess bits in val[len - 1]. */ |
1659 | STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0); |
1660 | } |
1661 | |
1662 | /* Treat X as having signedness SGN and convert it to an N-bit number. */ |
1663 | template <int N> |
1664 | inline WIDEST_INT (N) |
1665 | widest_int_storage <N>::from (const wide_int_ref &x, signop sgn) |
1666 | { |
1667 | WIDEST_INT (N) result; |
1668 | unsigned int exp_len = x.len; |
1669 | unsigned int prec = result.get_precision (); |
1670 | if (sgn == UNSIGNED && prec > x.precision && x.val[x.len - 1] < 0) |
1671 | exp_len = CEIL (x.precision, HOST_BITS_PER_WIDE_INT) + 1; |
1672 | result.set_len (wi::force_to_size (result.write_val (exp_len), x.val, x.len, |
1673 | x.precision, prec, sgn)); |
1674 | return result; |
1675 | } |
1676 | |
1677 | /* Create a WIDEST_INT (N) from the explicit block encoding given by |
1678 | VAL and LEN. NEED_CANON_P is true if the encoding may have redundant |
1679 | trailing blocks. */ |
1680 | template <int N> |
1681 | inline WIDEST_INT (N) |
1682 | widest_int_storage <N>::from_array (const HOST_WIDE_INT *val, |
1683 | unsigned int len, |
1684 | bool need_canon_p) |
1685 | { |
1686 | WIDEST_INT (N) result; |
1687 | result.set_len (wi::from_array (result.write_val (len), val, len, |
1688 | result.get_precision (), need_canon_p)); |
1689 | return result; |
1690 | } |
1691 | |
1692 | template <int N> |
1693 | template <typename T1, typename T2> |
1694 | inline WIDEST_INT (N) |
1695 | wi::int_traits < widest_int_storage <N> >:: |
1696 | get_binary_result (const T1 &, const T2 &) |
1697 | { |
1698 | return WIDEST_INT (N) (); |
1699 | } |
1700 | |
1701 | template <int N> |
1702 | template <typename T1, typename T2> |
1703 | inline unsigned int |
1704 | wi::int_traits < widest_int_storage <N> >:: |
1705 | get_binary_precision (const T1 &, const T2 &) |
1706 | { |
1707 | return N; |
1708 | } |
1709 | |
1710 | /* A reference to one element of a trailing_wide_ints structure. */ |
1711 | class trailing_wide_int_storage |
1712 | { |
1713 | private: |
1714 | /* The precision of the integer, which is a fixed property of the |
1715 | parent trailing_wide_ints. */ |
1716 | unsigned int m_precision; |
1717 | |
1718 | /* A pointer to the length field. */ |
1719 | unsigned short *m_len; |
1720 | |
1721 | /* A pointer to the HWI array. There are enough elements to hold all |
1722 | values of precision M_PRECISION. */ |
1723 | HOST_WIDE_INT *m_val; |
1724 | |
1725 | public: |
1726 | trailing_wide_int_storage (unsigned int, unsigned short *, HOST_WIDE_INT *); |
1727 | |
1728 | /* The standard generic_wide_int storage methods. */ |
1729 | unsigned int get_len () const; |
1730 | unsigned int get_precision () const; |
1731 | const HOST_WIDE_INT *get_val () const; |
1732 | HOST_WIDE_INT *write_val (unsigned int); |
1733 | void set_len (unsigned int, bool = false); |
1734 | |
1735 | template <typename T> |
1736 | trailing_wide_int_storage &operator = (const T &); |
1737 | }; |
1738 | |
1739 | typedef generic_wide_int <trailing_wide_int_storage> trailing_wide_int; |
1740 | |
1741 | /* trailing_wide_int behaves like a wide_int. */ |
1742 | namespace wi |
1743 | { |
1744 | template <> |
1745 | struct int_traits <trailing_wide_int_storage> |
1746 | : public int_traits <wide_int_storage> {}; |
1747 | } |
1748 | |
1749 | /* A variable-length array of wide_int-like objects that can be put |
1750 | at the end of a variable-sized structure. The number of objects is |
1751 | at most N and can be set at runtime by using set_precision(). |
1752 | |
1753 | Use extra_size to calculate how many bytes beyond the |
1754 | sizeof need to be allocated. Use set_precision to initialize the |
1755 | structure. */ |
1756 | template <int N> |
1757 | struct GTY((user)) trailing_wide_ints |
1758 | { |
1759 | private: |
1760 | /* The shared precision of each number. */ |
1761 | unsigned short m_precision; |
1762 | |
1763 | /* The shared maximum length of each number. */ |
1764 | unsigned short m_max_len; |
1765 | |
1766 | /* The number of elements. */ |
1767 | unsigned char m_num_elements; |
1768 | |
1769 | /* The current length of each number. */ |
1770 | unsigned short m_len[N]; |
1771 | |
1772 | /* The variable-length part of the structure, which always contains |
1773 | at least one HWI. Element I starts at index I * M_MAX_LEN. */ |
1774 | HOST_WIDE_INT m_val[1]; |
1775 | |
1776 | public: |
1777 | typedef WIDE_INT_REF_FOR (trailing_wide_int_storage) const_reference; |
1778 | |
1779 | void set_precision (unsigned int precision, unsigned int num_elements = N); |
1780 | unsigned int get_precision () const { return m_precision; } |
1781 | unsigned int num_elements () const { return m_num_elements; } |
1782 | trailing_wide_int operator [] (unsigned int); |
1783 | const_reference operator [] (unsigned int) const; |
1784 | static size_t extra_size (unsigned int precision, |
1785 | unsigned int num_elements = N); |
1786 | size_t () const { return extra_size (m_precision, |
1787 | m_num_elements); } |
1788 | }; |
1789 | |
1790 | inline trailing_wide_int_storage:: |
1791 | trailing_wide_int_storage (unsigned int precision, unsigned short *len, |
1792 | HOST_WIDE_INT *val) |
1793 | : m_precision (precision), m_len (len), m_val (val) |
1794 | { |
1795 | } |
1796 | |
1797 | inline unsigned int |
1798 | trailing_wide_int_storage::get_len () const |
1799 | { |
1800 | return *m_len; |
1801 | } |
1802 | |
1803 | inline unsigned int |
1804 | trailing_wide_int_storage::get_precision () const |
1805 | { |
1806 | return m_precision; |
1807 | } |
1808 | |
1809 | inline const HOST_WIDE_INT * |
1810 | trailing_wide_int_storage::get_val () const |
1811 | { |
1812 | return m_val; |
1813 | } |
1814 | |
1815 | inline HOST_WIDE_INT * |
1816 | trailing_wide_int_storage::write_val (unsigned int) |
1817 | { |
1818 | return m_val; |
1819 | } |
1820 | |
1821 | inline void |
1822 | trailing_wide_int_storage::set_len (unsigned int len, bool is_sign_extended) |
1823 | { |
1824 | *m_len = len; |
1825 | if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > m_precision) |
1826 | m_val[len - 1] = sext_hwi (src: m_val[len - 1], |
1827 | prec: m_precision % HOST_BITS_PER_WIDE_INT); |
1828 | } |
1829 | |
1830 | template <typename T> |
1831 | inline trailing_wide_int_storage & |
1832 | trailing_wide_int_storage::operator = (const T &x) |
1833 | { |
1834 | WIDE_INT_REF_FOR (T) xi (x, m_precision); |
1835 | wi::copy (*this, xi); |
1836 | return *this; |
1837 | } |
1838 | |
1839 | /* Initialize the structure and record that all elements have precision |
1840 | PRECISION. NUM_ELEMENTS can be no more than N. */ |
1841 | template <int N> |
1842 | inline void |
1843 | trailing_wide_ints <N>::set_precision (unsigned int precision, |
1844 | unsigned int num_elements) |
1845 | { |
1846 | gcc_checking_assert (num_elements <= N); |
1847 | m_num_elements = num_elements; |
1848 | m_precision = precision; |
1849 | m_max_len = WIDE_INT_MAX_HWIS (precision); |
1850 | } |
1851 | |
1852 | /* Return a reference to element INDEX. */ |
1853 | template <int N> |
1854 | inline trailing_wide_int |
1855 | trailing_wide_ints <N>::operator [] (unsigned int index) |
1856 | { |
1857 | return trailing_wide_int_storage (m_precision, &m_len[index], |
1858 | &m_val[index * m_max_len]); |
1859 | } |
1860 | |
1861 | template <int N> |
1862 | inline typename trailing_wide_ints <N>::const_reference |
1863 | trailing_wide_ints <N>::operator [] (unsigned int index) const |
1864 | { |
1865 | return wi::storage_ref (&m_val[index * m_max_len], |
1866 | m_len[index], m_precision); |
1867 | } |
1868 | |
1869 | /* Return how many extra bytes need to be added to the end of the |
1870 | structure in order to handle NUM_ELEMENTS wide_ints of precision |
1871 | PRECISION. NUM_ELEMENTS is the number of elements, and defaults |
1872 | to N. */ |
1873 | template <int N> |
1874 | inline size_t |
1875 | trailing_wide_ints <N>:: (unsigned int precision, |
1876 | unsigned int num_elements) |
1877 | { |
1878 | unsigned int max_len = WIDE_INT_MAX_HWIS (precision); |
1879 | gcc_checking_assert (num_elements <= N); |
1880 | return (num_elements * max_len - 1) * sizeof (HOST_WIDE_INT); |
1881 | } |
1882 | |
1883 | /* This macro is used in structures that end with a trailing_wide_ints field |
1884 | called FIELD. It declares get_NAME() and set_NAME() methods to access |
1885 | element I of FIELD. */ |
1886 | #define TRAILING_WIDE_INT_ACCESSOR(NAME, FIELD, I) \ |
1887 | trailing_wide_int get_##NAME () { return FIELD[I]; } \ |
1888 | template <typename T> void set_##NAME (const T &x) { FIELD[I] = x; } |
1889 | |
1890 | namespace wi |
1891 | { |
1892 | /* Implementation of int_traits for primitive integer types like "int". */ |
1893 | template <typename T, bool signed_p> |
1894 | struct primitive_int_traits |
1895 | { |
1896 | static const enum precision_type precision_type = FLEXIBLE_PRECISION; |
1897 | static const bool host_dependent_precision = true; |
1898 | static const bool is_sign_extended = true; |
1899 | static const bool needs_write_val_arg = false; |
1900 | static unsigned int get_precision (T); |
1901 | static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T); |
1902 | }; |
1903 | } |
1904 | |
1905 | template <typename T, bool signed_p> |
1906 | inline unsigned int |
1907 | wi::primitive_int_traits <T, signed_p>::get_precision (T) |
1908 | { |
1909 | return sizeof (T) * CHAR_BIT; |
1910 | } |
1911 | |
1912 | template <typename T, bool signed_p> |
1913 | inline wi::storage_ref |
1914 | wi::primitive_int_traits <T, signed_p>::decompose (HOST_WIDE_INT *scratch, |
1915 | unsigned int precision, T x) |
1916 | { |
1917 | scratch[0] = x; |
1918 | if (signed_p || scratch[0] >= 0 || precision <= HOST_BITS_PER_WIDE_INT) |
1919 | return wi::storage_ref (scratch, 1, precision); |
1920 | scratch[1] = 0; |
1921 | return wi::storage_ref (scratch, 2, precision); |
1922 | } |
1923 | |
1924 | /* Allow primitive C types to be used in wi:: routines. */ |
1925 | namespace wi |
1926 | { |
1927 | template <> |
1928 | struct int_traits <unsigned char> |
1929 | : public primitive_int_traits <unsigned char, false> {}; |
1930 | |
1931 | template <> |
1932 | struct int_traits <unsigned short> |
1933 | : public primitive_int_traits <unsigned short, false> {}; |
1934 | |
1935 | template <> |
1936 | struct int_traits <int> |
1937 | : public primitive_int_traits <int, true> {}; |
1938 | |
1939 | template <> |
1940 | struct int_traits <unsigned int> |
1941 | : public primitive_int_traits <unsigned int, false> {}; |
1942 | |
1943 | template <> |
1944 | struct int_traits <long> |
1945 | : public primitive_int_traits <long, true> {}; |
1946 | |
1947 | template <> |
1948 | struct int_traits <unsigned long> |
1949 | : public primitive_int_traits <unsigned long, false> {}; |
1950 | |
1951 | #if defined HAVE_LONG_LONG |
1952 | template <> |
1953 | struct int_traits <long long> |
1954 | : public primitive_int_traits <long long, true> {}; |
1955 | |
1956 | template <> |
1957 | struct int_traits <unsigned long long> |
1958 | : public primitive_int_traits <unsigned long long, false> {}; |
1959 | #endif |
1960 | } |
1961 | |
1962 | namespace wi |
1963 | { |
1964 | /* Stores HWI-sized integer VAL, treating it as having signedness SGN |
1965 | and precision PRECISION. */ |
1966 | class hwi_with_prec |
1967 | { |
1968 | public: |
1969 | hwi_with_prec () {} |
1970 | hwi_with_prec (HOST_WIDE_INT, unsigned int, signop); |
1971 | HOST_WIDE_INT val; |
1972 | unsigned int precision; |
1973 | signop sgn; |
1974 | }; |
1975 | |
1976 | hwi_with_prec shwi (HOST_WIDE_INT, unsigned int); |
1977 | hwi_with_prec uhwi (unsigned HOST_WIDE_INT, unsigned int); |
1978 | |
1979 | hwi_with_prec minus_one (unsigned int); |
1980 | hwi_with_prec zero (unsigned int); |
1981 | hwi_with_prec one (unsigned int); |
1982 | hwi_with_prec two (unsigned int); |
1983 | } |
1984 | |
1985 | inline wi::hwi_with_prec::hwi_with_prec (HOST_WIDE_INT v, unsigned int p, |
1986 | signop s) |
1987 | : precision (p), sgn (s) |
1988 | { |
1989 | if (precision < HOST_BITS_PER_WIDE_INT) |
1990 | val = sext_hwi (src: v, prec: precision); |
1991 | else |
1992 | val = v; |
1993 | } |
1994 | |
1995 | /* Return a signed integer that has value VAL and precision PRECISION. */ |
1996 | inline wi::hwi_with_prec |
1997 | wi::shwi (HOST_WIDE_INT val, unsigned int precision) |
1998 | { |
1999 | return hwi_with_prec (val, precision, SIGNED); |
2000 | } |
2001 | |
2002 | /* Return an unsigned integer that has value VAL and precision PRECISION. */ |
2003 | inline wi::hwi_with_prec |
2004 | wi::uhwi (unsigned HOST_WIDE_INT val, unsigned int precision) |
2005 | { |
2006 | return hwi_with_prec (val, precision, UNSIGNED); |
2007 | } |
2008 | |
2009 | /* Return a wide int of -1 with precision PRECISION. */ |
2010 | inline wi::hwi_with_prec |
2011 | wi::minus_one (unsigned int precision) |
2012 | { |
2013 | return wi::shwi (val: -1, precision); |
2014 | } |
2015 | |
2016 | /* Return a wide int of 0 with precision PRECISION. */ |
2017 | inline wi::hwi_with_prec |
2018 | wi::zero (unsigned int precision) |
2019 | { |
2020 | return wi::shwi (val: 0, precision); |
2021 | } |
2022 | |
2023 | /* Return a wide int of 1 with precision PRECISION. */ |
2024 | inline wi::hwi_with_prec |
2025 | wi::one (unsigned int precision) |
2026 | { |
2027 | return wi::shwi (val: 1, precision); |
2028 | } |
2029 | |
2030 | /* Return a wide int of 2 with precision PRECISION. */ |
2031 | inline wi::hwi_with_prec |
2032 | wi::two (unsigned int precision) |
2033 | { |
2034 | return wi::shwi (val: 2, precision); |
2035 | } |
2036 | |
2037 | namespace wi |
2038 | { |
2039 | /* ints_for<T>::zero (X) returns a zero that, when asssigned to a T, |
2040 | gives that T the same precision as X. */ |
2041 | template<typename T, precision_type = int_traits<T>::precision_type> |
2042 | struct ints_for |
2043 | { |
2044 | static int zero (const T &) { return 0; } |
2045 | }; |
2046 | |
2047 | template<typename T> |
2048 | struct ints_for<T, VAR_PRECISION> |
2049 | { |
2050 | static hwi_with_prec zero (const T &); |
2051 | }; |
2052 | } |
2053 | |
2054 | template<typename T> |
2055 | inline wi::hwi_with_prec |
2056 | wi::ints_for<T, wi::VAR_PRECISION>::zero (const T &x) |
2057 | { |
2058 | return wi::zero (precision: wi::get_precision (x)); |
2059 | } |
2060 | |
2061 | namespace wi |
2062 | { |
2063 | template <> |
2064 | struct int_traits <wi::hwi_with_prec> |
2065 | { |
2066 | static const enum precision_type precision_type = VAR_PRECISION; |
2067 | /* hwi_with_prec has an explicitly-given precision, rather than the |
2068 | precision of HOST_WIDE_INT. */ |
2069 | static const bool host_dependent_precision = false; |
2070 | static const bool is_sign_extended = true; |
2071 | static const bool needs_write_val_arg = false; |
2072 | static unsigned int get_precision (const wi::hwi_with_prec &); |
2073 | static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, |
2074 | const wi::hwi_with_prec &); |
2075 | }; |
2076 | } |
2077 | |
2078 | inline unsigned int |
2079 | wi::int_traits <wi::hwi_with_prec>::get_precision (const wi::hwi_with_prec &x) |
2080 | { |
2081 | return x.precision; |
2082 | } |
2083 | |
2084 | inline wi::storage_ref |
2085 | wi::int_traits <wi::hwi_with_prec>:: |
2086 | decompose (HOST_WIDE_INT *scratch, unsigned int precision, |
2087 | const wi::hwi_with_prec &x) |
2088 | { |
2089 | gcc_checking_assert (precision == x.precision); |
2090 | scratch[0] = x.val; |
2091 | if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT) |
2092 | return wi::storage_ref (scratch, 1, precision); |
2093 | scratch[1] = 0; |
2094 | return wi::storage_ref (scratch, 2, precision); |
2095 | } |
2096 | |
2097 | /* Private functions for handling large cases out of line. They take |
2098 | individual length and array parameters because that is cheaper for |
2099 | the inline caller than constructing an object on the stack and |
2100 | passing a reference to it. (Although many callers use wide_int_refs, |
2101 | we generally want those to be removed by SRA.) */ |
2102 | namespace wi |
2103 | { |
2104 | bool eq_p_large (const HOST_WIDE_INT *, unsigned int, |
2105 | const HOST_WIDE_INT *, unsigned int, unsigned int); |
2106 | bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, |
2107 | const HOST_WIDE_INT *, unsigned int); |
2108 | bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, |
2109 | const HOST_WIDE_INT *, unsigned int); |
2110 | int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int, |
2111 | const HOST_WIDE_INT *, unsigned int); |
2112 | int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int, |
2113 | const HOST_WIDE_INT *, unsigned int); |
2114 | unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2115 | unsigned int, unsigned int, unsigned int); |
2116 | unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2117 | unsigned int, unsigned int, unsigned int); |
2118 | unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2119 | unsigned int, unsigned int, unsigned int); |
2120 | unsigned int bswap_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2121 | unsigned int, unsigned int); |
2122 | unsigned int bitreverse_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2123 | unsigned int, unsigned int); |
2124 | |
2125 | unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2126 | unsigned int, unsigned int, unsigned int); |
2127 | unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2128 | unsigned int, unsigned int, unsigned int, |
2129 | unsigned int); |
2130 | unsigned int arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2131 | unsigned int, unsigned int, unsigned int, |
2132 | unsigned int); |
2133 | unsigned int and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, |
2134 | const HOST_WIDE_INT *, unsigned int, unsigned int); |
2135 | unsigned int and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2136 | unsigned int, const HOST_WIDE_INT *, |
2137 | unsigned int, unsigned int); |
2138 | unsigned int or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, |
2139 | const HOST_WIDE_INT *, unsigned int, unsigned int); |
2140 | unsigned int or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2141 | unsigned int, const HOST_WIDE_INT *, |
2142 | unsigned int, unsigned int); |
2143 | unsigned int xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, |
2144 | const HOST_WIDE_INT *, unsigned int, unsigned int); |
2145 | unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, |
2146 | const HOST_WIDE_INT *, unsigned int, unsigned int, |
2147 | signop, overflow_type *); |
2148 | unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, |
2149 | const HOST_WIDE_INT *, unsigned int, unsigned int, |
2150 | signop, overflow_type *); |
2151 | unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2152 | unsigned int, const HOST_WIDE_INT *, |
2153 | unsigned int, unsigned int, signop, |
2154 | overflow_type *, bool); |
2155 | unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *, |
2156 | HOST_WIDE_INT *, const HOST_WIDE_INT *, |
2157 | unsigned int, unsigned int, |
2158 | const HOST_WIDE_INT *, |
2159 | unsigned int, unsigned int, |
2160 | signop, overflow_type *); |
2161 | } |
2162 | |
2163 | /* Return the number of bits that integer X can hold. */ |
2164 | template <typename T> |
2165 | inline unsigned int |
2166 | wi::get_precision (const T &x) |
2167 | { |
2168 | return wi::int_traits <T>::get_precision (x); |
2169 | } |
2170 | |
2171 | /* Return the number of bits that the result of a binary operation can |
2172 | hold when the input operands are X and Y. */ |
2173 | template <typename T1, typename T2> |
2174 | inline unsigned int |
2175 | wi::get_binary_precision (const T1 &x, const T2 &y) |
2176 | { |
2177 | using res_traits = wi::int_traits <WI_BINARY_RESULT (T1, T2)>; |
2178 | return res_traits::get_binary_precision (x, y); |
2179 | } |
2180 | |
2181 | /* Copy the contents of Y to X, but keeping X's current precision. */ |
2182 | template <typename T1, typename T2> |
2183 | inline void |
2184 | wi::copy (T1 &x, const T2 &y) |
2185 | { |
2186 | unsigned int len = y.get_len (); |
2187 | HOST_WIDE_INT *xval = x.write_val (len); |
2188 | const HOST_WIDE_INT *yval = y.get_val (); |
2189 | unsigned int i = 0; |
2190 | do |
2191 | xval[i] = yval[i]; |
2192 | while (++i < len); |
2193 | /* For widest_int write_val is called with an exact value, not |
2194 | upper bound for len, so nothing is needed further. */ |
2195 | if (!wi::int_traits <T1>::needs_write_val_arg) |
2196 | x.set_len (len, y.is_sign_extended); |
2197 | } |
2198 | |
2199 | /* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */ |
2200 | template <typename T> |
2201 | inline bool |
2202 | wi::fits_shwi_p (const T &x) |
2203 | { |
2204 | WIDE_INT_REF_FOR (T) xi (x); |
2205 | return xi.len == 1; |
2206 | } |
2207 | |
2208 | /* Return true if X fits in an unsigned HOST_WIDE_INT with no loss of |
2209 | precision. */ |
2210 | template <typename T> |
2211 | inline bool |
2212 | wi::fits_uhwi_p (const T &x) |
2213 | { |
2214 | WIDE_INT_REF_FOR (T) xi (x); |
2215 | if (xi.precision <= HOST_BITS_PER_WIDE_INT) |
2216 | return true; |
2217 | if (xi.len == 1) |
2218 | return xi.slow () >= 0; |
2219 | return xi.len == 2 && xi.uhigh () == 0; |
2220 | } |
2221 | |
2222 | /* Return true if X is negative based on the interpretation of SGN. |
2223 | For UNSIGNED, this is always false. */ |
2224 | template <typename T> |
2225 | inline bool |
2226 | wi::neg_p (const T &x, signop sgn) |
2227 | { |
2228 | WIDE_INT_REF_FOR (T) xi (x); |
2229 | if (sgn == UNSIGNED) |
2230 | return false; |
2231 | return xi.sign_mask () < 0; |
2232 | } |
2233 | |
2234 | /* Return -1 if the top bit of X is set and 0 if the top bit is clear. */ |
2235 | template <typename T> |
2236 | inline HOST_WIDE_INT |
2237 | wi::sign_mask (const T &x) |
2238 | { |
2239 | WIDE_INT_REF_FOR (T) xi (x); |
2240 | return xi.sign_mask (); |
2241 | } |
2242 | |
2243 | /* Return true if X == Y. X and Y must be binary-compatible. */ |
2244 | template <typename T1, typename T2> |
2245 | inline bool |
2246 | wi::eq_p (const T1 &x, const T2 &y) |
2247 | { |
2248 | unsigned int precision = get_binary_precision (x, y); |
2249 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2250 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2251 | if (xi.is_sign_extended && yi.is_sign_extended) |
2252 | { |
2253 | /* This case reduces to array equality. */ |
2254 | if (xi.len != yi.len) |
2255 | return false; |
2256 | unsigned int i = 0; |
2257 | do |
2258 | if (xi.val[i] != yi.val[i]) |
2259 | return false; |
2260 | while (++i != xi.len); |
2261 | return true; |
2262 | } |
2263 | if (LIKELY (yi.len == 1)) |
2264 | { |
2265 | /* XI is only equal to YI if it too has a single HWI. */ |
2266 | if (xi.len != 1) |
2267 | return false; |
2268 | /* Excess bits in xi.val[0] will be signs or zeros, so comparisons |
2269 | with 0 are simple. */ |
2270 | if (STATIC_CONSTANT_P (yi.val[0] == 0)) |
2271 | return xi.val[0] == 0; |
2272 | /* Otherwise flush out any excess bits first. */ |
2273 | unsigned HOST_WIDE_INT diff = xi.val[0] ^ yi.val[0]; |
2274 | int excess = HOST_BITS_PER_WIDE_INT - precision; |
2275 | if (excess > 0) |
2276 | diff <<= excess; |
2277 | return diff == 0; |
2278 | } |
2279 | return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision); |
2280 | } |
2281 | |
2282 | /* Return true if X != Y. X and Y must be binary-compatible. */ |
2283 | template <typename T1, typename T2> |
2284 | inline bool |
2285 | wi::ne_p (const T1 &x, const T2 &y) |
2286 | { |
2287 | return !eq_p (x, y); |
2288 | } |
2289 | |
2290 | /* Return true if X < Y when both are treated as signed values. */ |
2291 | template <typename T1, typename T2> |
2292 | inline bool |
2293 | wi::lts_p (const T1 &x, const T2 &y) |
2294 | { |
2295 | unsigned int precision = get_binary_precision (x, y); |
2296 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2297 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2298 | /* We optimize x < y, where y is 64 or fewer bits. */ |
2299 | if (wi::fits_shwi_p (yi)) |
2300 | { |
2301 | /* Make lts_p (x, 0) as efficient as wi::neg_p (x). */ |
2302 | if (STATIC_CONSTANT_P (yi.val[0] == 0)) |
2303 | return neg_p (xi); |
2304 | /* If x fits directly into a shwi, we can compare directly. */ |
2305 | if (wi::fits_shwi_p (xi)) |
2306 | return xi.to_shwi () < yi.to_shwi (); |
2307 | /* If x doesn't fit and is negative, then it must be more |
2308 | negative than any value in y, and hence smaller than y. */ |
2309 | if (neg_p (xi)) |
2310 | return true; |
2311 | /* If x is positive, then it must be larger than any value in y, |
2312 | and hence greater than y. */ |
2313 | return false; |
2314 | } |
2315 | /* Optimize the opposite case, if it can be detected at compile time. */ |
2316 | if (STATIC_CONSTANT_P (xi.len == 1)) |
2317 | /* If YI is negative it is lower than the least HWI. |
2318 | If YI is positive it is greater than the greatest HWI. */ |
2319 | return !neg_p (yi); |
2320 | return lts_p_large (xi.val, xi.len, precision, yi.val, yi.len); |
2321 | } |
2322 | |
2323 | /* Return true if X < Y when both are treated as unsigned values. */ |
2324 | template <typename T1, typename T2> |
2325 | inline bool |
2326 | wi::ltu_p (const T1 &x, const T2 &y) |
2327 | { |
2328 | unsigned int precision = get_binary_precision (x, y); |
2329 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2330 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2331 | /* Optimize comparisons with constants. */ |
2332 | if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0)) |
2333 | return xi.len == 1 && xi.to_uhwi () < (unsigned HOST_WIDE_INT) yi.val[0]; |
2334 | if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0)) |
2335 | return yi.len != 1 || yi.to_uhwi () > (unsigned HOST_WIDE_INT) xi.val[0]; |
2336 | /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended |
2337 | for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both |
2338 | values does not change the result. */ |
2339 | if (LIKELY (xi.len + yi.len == 2)) |
2340 | { |
2341 | unsigned HOST_WIDE_INT xl = xi.to_uhwi (); |
2342 | unsigned HOST_WIDE_INT yl = yi.to_uhwi (); |
2343 | return xl < yl; |
2344 | } |
2345 | return ltu_p_large (xi.val, xi.len, precision, yi.val, yi.len); |
2346 | } |
2347 | |
2348 | /* Return true if X < Y. Signedness of X and Y is indicated by SGN. */ |
2349 | template <typename T1, typename T2> |
2350 | inline bool |
2351 | wi::lt_p (const T1 &x, const T2 &y, signop sgn) |
2352 | { |
2353 | if (sgn == SIGNED) |
2354 | return lts_p (x, y); |
2355 | else |
2356 | return ltu_p (x, y); |
2357 | } |
2358 | |
2359 | /* Return true if X <= Y when both are treated as signed values. */ |
2360 | template <typename T1, typename T2> |
2361 | inline bool |
2362 | wi::les_p (const T1 &x, const T2 &y) |
2363 | { |
2364 | return !lts_p (y, x); |
2365 | } |
2366 | |
2367 | /* Return true if X <= Y when both are treated as unsigned values. */ |
2368 | template <typename T1, typename T2> |
2369 | inline bool |
2370 | wi::leu_p (const T1 &x, const T2 &y) |
2371 | { |
2372 | return !ltu_p (y, x); |
2373 | } |
2374 | |
2375 | /* Return true if X <= Y. Signedness of X and Y is indicated by SGN. */ |
2376 | template <typename T1, typename T2> |
2377 | inline bool |
2378 | wi::le_p (const T1 &x, const T2 &y, signop sgn) |
2379 | { |
2380 | if (sgn == SIGNED) |
2381 | return les_p (x, y); |
2382 | else |
2383 | return leu_p (x, y); |
2384 | } |
2385 | |
2386 | /* Return true if X > Y when both are treated as signed values. */ |
2387 | template <typename T1, typename T2> |
2388 | inline bool |
2389 | wi::gts_p (const T1 &x, const T2 &y) |
2390 | { |
2391 | return lts_p (y, x); |
2392 | } |
2393 | |
2394 | /* Return true if X > Y when both are treated as unsigned values. */ |
2395 | template <typename T1, typename T2> |
2396 | inline bool |
2397 | wi::gtu_p (const T1 &x, const T2 &y) |
2398 | { |
2399 | return ltu_p (y, x); |
2400 | } |
2401 | |
2402 | /* Return true if X > Y. Signedness of X and Y is indicated by SGN. */ |
2403 | template <typename T1, typename T2> |
2404 | inline bool |
2405 | wi::gt_p (const T1 &x, const T2 &y, signop sgn) |
2406 | { |
2407 | if (sgn == SIGNED) |
2408 | return gts_p (x, y); |
2409 | else |
2410 | return gtu_p (x, y); |
2411 | } |
2412 | |
2413 | /* Return true if X >= Y when both are treated as signed values. */ |
2414 | template <typename T1, typename T2> |
2415 | inline bool |
2416 | wi::ges_p (const T1 &x, const T2 &y) |
2417 | { |
2418 | return !lts_p (x, y); |
2419 | } |
2420 | |
2421 | /* Return true if X >= Y when both are treated as unsigned values. */ |
2422 | template <typename T1, typename T2> |
2423 | inline bool |
2424 | wi::geu_p (const T1 &x, const T2 &y) |
2425 | { |
2426 | return !ltu_p (x, y); |
2427 | } |
2428 | |
2429 | /* Return true if X >= Y. Signedness of X and Y is indicated by SGN. */ |
2430 | template <typename T1, typename T2> |
2431 | inline bool |
2432 | wi::ge_p (const T1 &x, const T2 &y, signop sgn) |
2433 | { |
2434 | if (sgn == SIGNED) |
2435 | return ges_p (x, y); |
2436 | else |
2437 | return geu_p (x, y); |
2438 | } |
2439 | |
2440 | /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y |
2441 | as signed values. */ |
2442 | template <typename T1, typename T2> |
2443 | inline int |
2444 | wi::cmps (const T1 &x, const T2 &y) |
2445 | { |
2446 | unsigned int precision = get_binary_precision (x, y); |
2447 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2448 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2449 | if (wi::fits_shwi_p (yi)) |
2450 | { |
2451 | /* Special case for comparisons with 0. */ |
2452 | if (STATIC_CONSTANT_P (yi.val[0] == 0)) |
2453 | return neg_p (xi) ? -1 : !(xi.len == 1 && xi.val[0] == 0); |
2454 | /* If x fits into a signed HWI, we can compare directly. */ |
2455 | if (wi::fits_shwi_p (xi)) |
2456 | { |
2457 | HOST_WIDE_INT xl = xi.to_shwi (); |
2458 | HOST_WIDE_INT yl = yi.to_shwi (); |
2459 | return xl < yl ? -1 : xl > yl; |
2460 | } |
2461 | /* If x doesn't fit and is negative, then it must be more |
2462 | negative than any signed HWI, and hence smaller than y. */ |
2463 | if (neg_p (xi)) |
2464 | return -1; |
2465 | /* If x is positive, then it must be larger than any signed HWI, |
2466 | and hence greater than y. */ |
2467 | return 1; |
2468 | } |
2469 | /* Optimize the opposite case, if it can be detected at compile time. */ |
2470 | if (STATIC_CONSTANT_P (xi.len == 1)) |
2471 | /* If YI is negative it is lower than the least HWI. |
2472 | If YI is positive it is greater than the greatest HWI. */ |
2473 | return neg_p (yi) ? 1 : -1; |
2474 | return cmps_large (xi.val, xi.len, precision, yi.val, yi.len); |
2475 | } |
2476 | |
2477 | /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y |
2478 | as unsigned values. */ |
2479 | template <typename T1, typename T2> |
2480 | inline int |
2481 | wi::cmpu (const T1 &x, const T2 &y) |
2482 | { |
2483 | unsigned int precision = get_binary_precision (x, y); |
2484 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2485 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2486 | /* Optimize comparisons with constants. */ |
2487 | if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0)) |
2488 | { |
2489 | /* If XI doesn't fit in a HWI then it must be larger than YI. */ |
2490 | if (xi.len != 1) |
2491 | return 1; |
2492 | /* Otherwise compare directly. */ |
2493 | unsigned HOST_WIDE_INT xl = xi.to_uhwi (); |
2494 | unsigned HOST_WIDE_INT yl = yi.val[0]; |
2495 | return xl < yl ? -1 : xl > yl; |
2496 | } |
2497 | if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0)) |
2498 | { |
2499 | /* If YI doesn't fit in a HWI then it must be larger than XI. */ |
2500 | if (yi.len != 1) |
2501 | return -1; |
2502 | /* Otherwise compare directly. */ |
2503 | unsigned HOST_WIDE_INT xl = xi.val[0]; |
2504 | unsigned HOST_WIDE_INT yl = yi.to_uhwi (); |
2505 | return xl < yl ? -1 : xl > yl; |
2506 | } |
2507 | /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended |
2508 | for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both |
2509 | values does not change the result. */ |
2510 | if (LIKELY (xi.len + yi.len == 2)) |
2511 | { |
2512 | unsigned HOST_WIDE_INT xl = xi.to_uhwi (); |
2513 | unsigned HOST_WIDE_INT yl = yi.to_uhwi (); |
2514 | return xl < yl ? -1 : xl > yl; |
2515 | } |
2516 | return cmpu_large (xi.val, xi.len, precision, yi.val, yi.len); |
2517 | } |
2518 | |
2519 | /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of |
2520 | X and Y indicated by SGN. */ |
2521 | template <typename T1, typename T2> |
2522 | inline int |
2523 | wi::cmp (const T1 &x, const T2 &y, signop sgn) |
2524 | { |
2525 | if (sgn == SIGNED) |
2526 | return cmps (x, y); |
2527 | else |
2528 | return cmpu (x, y); |
2529 | } |
2530 | |
2531 | /* Return ~x. */ |
2532 | template <typename T> |
2533 | inline WI_UNARY_RESULT (T) |
2534 | wi::bit_not (const T &x) |
2535 | { |
2536 | WI_UNARY_RESULT_VAR (result, val, T, x); |
2537 | WIDE_INT_REF_FOR (T) xi (x, get_precision (result)); |
2538 | if (result.needs_write_val_arg) |
2539 | val = result.write_val (xi.len); |
2540 | for (unsigned int i = 0; i < xi.len; ++i) |
2541 | val[i] = ~xi.val[i]; |
2542 | result.set_len (xi.len); |
2543 | return result; |
2544 | } |
2545 | |
2546 | /* Return -x. */ |
2547 | template <typename T> |
2548 | inline WI_UNARY_RESULT (T) |
2549 | wi::neg (const T &x) |
2550 | { |
2551 | return sub (0, x); |
2552 | } |
2553 | |
2554 | /* Return -x. Indicate in *OVERFLOW if performing the negation would |
2555 | cause an overflow. */ |
2556 | template <typename T> |
2557 | inline WI_UNARY_RESULT (T) |
2558 | wi::neg (const T &x, overflow_type *overflow) |
2559 | { |
2560 | *overflow = only_sign_bit_p (x) ? OVF_OVERFLOW : OVF_NONE; |
2561 | return sub (0, x); |
2562 | } |
2563 | |
2564 | /* Return the absolute value of x. */ |
2565 | template <typename T> |
2566 | inline WI_UNARY_RESULT (T) |
2567 | wi::abs (const T &x) |
2568 | { |
2569 | return neg_p (x) ? neg (x) : WI_UNARY_RESULT (T) (x); |
2570 | } |
2571 | |
2572 | /* Return the result of sign-extending the low OFFSET bits of X. */ |
2573 | template <typename T> |
2574 | inline WI_UNARY_RESULT (T) |
2575 | wi::sext (const T &x, unsigned int offset) |
2576 | { |
2577 | WI_UNARY_RESULT_VAR (result, val, T, x); |
2578 | unsigned int precision = get_precision (result); |
2579 | WIDE_INT_REF_FOR (T) xi (x, precision); |
2580 | |
2581 | if (result.needs_write_val_arg) |
2582 | val = result.write_val (MAX (xi.len, |
2583 | CEIL (offset, HOST_BITS_PER_WIDE_INT))); |
2584 | if (offset <= HOST_BITS_PER_WIDE_INT) |
2585 | { |
2586 | val[0] = sext_hwi (xi.ulow (), offset); |
2587 | result.set_len (1, true); |
2588 | } |
2589 | else |
2590 | result.set_len (sext_large (val, xi.val, xi.len, precision, offset)); |
2591 | return result; |
2592 | } |
2593 | |
2594 | /* Return the result of zero-extending the low OFFSET bits of X. */ |
2595 | template <typename T> |
2596 | inline WI_UNARY_RESULT (T) |
2597 | wi::zext (const T &x, unsigned int offset) |
2598 | { |
2599 | WI_UNARY_RESULT_VAR (result, val, T, x); |
2600 | unsigned int precision = get_precision (result); |
2601 | WIDE_INT_REF_FOR (T) xi (x, precision); |
2602 | |
2603 | /* This is not just an optimization, it is actually required to |
2604 | maintain canonization. */ |
2605 | if (offset >= precision) |
2606 | { |
2607 | wi::copy (result, xi); |
2608 | return result; |
2609 | } |
2610 | |
2611 | if (result.needs_write_val_arg) |
2612 | val = result.write_val (MAX (xi.len, |
2613 | offset / HOST_BITS_PER_WIDE_INT + 1)); |
2614 | /* In these cases we know that at least the top bit will be clear, |
2615 | so no sign extension is necessary. */ |
2616 | if (offset < HOST_BITS_PER_WIDE_INT) |
2617 | { |
2618 | val[0] = zext_hwi (xi.ulow (), offset); |
2619 | result.set_len (1, true); |
2620 | } |
2621 | else |
2622 | result.set_len (zext_large (val, xi.val, xi.len, precision, offset), true); |
2623 | return result; |
2624 | } |
2625 | |
2626 | /* Return the result of extending the low OFFSET bits of X according to |
2627 | signedness SGN. */ |
2628 | template <typename T> |
2629 | inline WI_UNARY_RESULT (T) |
2630 | wi::ext (const T &x, unsigned int offset, signop sgn) |
2631 | { |
2632 | return sgn == SIGNED ? sext (x, offset) : zext (x, offset); |
2633 | } |
2634 | |
2635 | /* Return an integer that represents X | (1 << bit). */ |
2636 | template <typename T> |
2637 | inline WI_UNARY_RESULT (T) |
2638 | wi::set_bit (const T &x, unsigned int bit) |
2639 | { |
2640 | WI_UNARY_RESULT_VAR (result, val, T, x); |
2641 | unsigned int precision = get_precision (result); |
2642 | WIDE_INT_REF_FOR (T) xi (x, precision); |
2643 | if (result.needs_write_val_arg) |
2644 | val = result.write_val (MAX (xi.len, |
2645 | bit / HOST_BITS_PER_WIDE_INT + 1)); |
2646 | if (precision <= HOST_BITS_PER_WIDE_INT) |
2647 | { |
2648 | val[0] = xi.ulow () | (HOST_WIDE_INT_1U << bit); |
2649 | result.set_len (1); |
2650 | } |
2651 | else |
2652 | result.set_len (set_bit_large (val, xi.val, xi.len, precision, bit)); |
2653 | return result; |
2654 | } |
2655 | |
2656 | /* Byte swap the integer X. |
2657 | ??? This always swaps 8-bit octets, regardless of BITS_PER_UNIT. |
2658 | This function requires X's precision to be a multiple of 16 bits, |
2659 | so care needs to be taken for targets where BITS_PER_UNIT != 8. */ |
2660 | template <typename T> |
2661 | inline WI_UNARY_RESULT (T) |
2662 | wi::bswap (const T &x) |
2663 | { |
2664 | WI_UNARY_RESULT_VAR (result, val, T, x); |
2665 | unsigned int precision = get_precision (result); |
2666 | WIDE_INT_REF_FOR (T) xi (x, precision); |
2667 | static_assert (!result.needs_write_val_arg, |
2668 | "bswap on widest_int makes no sense" ); |
2669 | result.set_len (bswap_large (val, xi.val, xi.len, precision)); |
2670 | return result; |
2671 | } |
2672 | |
2673 | /* Bitreverse the integer X. */ |
2674 | template <typename T> |
2675 | inline WI_UNARY_RESULT (T) |
2676 | wi::bitreverse (const T &x) |
2677 | { |
2678 | WI_UNARY_RESULT_VAR (result, val, T, x); |
2679 | unsigned int precision = get_precision (result); |
2680 | WIDE_INT_REF_FOR (T) xi (x, precision); |
2681 | static_assert (!result.needs_write_val_arg, |
2682 | "bitreverse on widest_int makes no sense" ); |
2683 | result.set_len (bitreverse_large (val, xi.val, xi.len, precision)); |
2684 | return result; |
2685 | } |
2686 | |
2687 | /* Return the mininum of X and Y, treating them both as having |
2688 | signedness SGN. */ |
2689 | template <typename T1, typename T2> |
2690 | inline WI_BINARY_RESULT (T1, T2) |
2691 | wi::min (const T1 &x, const T2 &y, signop sgn) |
2692 | { |
2693 | WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y); |
2694 | unsigned int precision = get_precision (result); |
2695 | if (wi::le_p (x, y, sgn)) |
2696 | wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision)); |
2697 | else |
2698 | wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision)); |
2699 | return result; |
2700 | } |
2701 | |
2702 | /* Return the minimum of X and Y, treating both as signed values. */ |
2703 | template <typename T1, typename T2> |
2704 | inline WI_BINARY_RESULT (T1, T2) |
2705 | wi::smin (const T1 &x, const T2 &y) |
2706 | { |
2707 | return wi::min (x, y, SIGNED); |
2708 | } |
2709 | |
2710 | /* Return the minimum of X and Y, treating both as unsigned values. */ |
2711 | template <typename T1, typename T2> |
2712 | inline WI_BINARY_RESULT (T1, T2) |
2713 | wi::umin (const T1 &x, const T2 &y) |
2714 | { |
2715 | return wi::min (x, y, UNSIGNED); |
2716 | } |
2717 | |
2718 | /* Return the maxinum of X and Y, treating them both as having |
2719 | signedness SGN. */ |
2720 | template <typename T1, typename T2> |
2721 | inline WI_BINARY_RESULT (T1, T2) |
2722 | wi::max (const T1 &x, const T2 &y, signop sgn) |
2723 | { |
2724 | WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y); |
2725 | unsigned int precision = get_precision (result); |
2726 | if (wi::ge_p (x, y, sgn)) |
2727 | wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision)); |
2728 | else |
2729 | wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision)); |
2730 | return result; |
2731 | } |
2732 | |
2733 | /* Return the maximum of X and Y, treating both as signed values. */ |
2734 | template <typename T1, typename T2> |
2735 | inline WI_BINARY_RESULT (T1, T2) |
2736 | wi::smax (const T1 &x, const T2 &y) |
2737 | { |
2738 | return wi::max (x, y, SIGNED); |
2739 | } |
2740 | |
2741 | /* Return the maximum of X and Y, treating both as unsigned values. */ |
2742 | template <typename T1, typename T2> |
2743 | inline WI_BINARY_RESULT (T1, T2) |
2744 | wi::umax (const T1 &x, const T2 &y) |
2745 | { |
2746 | return wi::max (x, y, UNSIGNED); |
2747 | } |
2748 | |
2749 | /* Return X & Y. */ |
2750 | template <typename T1, typename T2> |
2751 | inline WI_BINARY_RESULT (T1, T2) |
2752 | wi::bit_and (const T1 &x, const T2 &y) |
2753 | { |
2754 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2755 | unsigned int precision = get_precision (result); |
2756 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2757 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2758 | bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; |
2759 | if (result.needs_write_val_arg) |
2760 | val = result.write_val (MAX (xi.len, yi.len)); |
2761 | if (LIKELY (xi.len + yi.len == 2)) |
2762 | { |
2763 | val[0] = xi.ulow () & yi.ulow (); |
2764 | result.set_len (1, is_sign_extended); |
2765 | } |
2766 | else |
2767 | result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len, |
2768 | precision), is_sign_extended); |
2769 | return result; |
2770 | } |
2771 | |
2772 | /* Return X & ~Y. */ |
2773 | template <typename T1, typename T2> |
2774 | inline WI_BINARY_RESULT (T1, T2) |
2775 | wi::bit_and_not (const T1 &x, const T2 &y) |
2776 | { |
2777 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2778 | unsigned int precision = get_precision (result); |
2779 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2780 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2781 | bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; |
2782 | if (result.needs_write_val_arg) |
2783 | val = result.write_val (MAX (xi.len, yi.len)); |
2784 | if (LIKELY (xi.len + yi.len == 2)) |
2785 | { |
2786 | val[0] = xi.ulow () & ~yi.ulow (); |
2787 | result.set_len (1, is_sign_extended); |
2788 | } |
2789 | else |
2790 | result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len, |
2791 | precision), is_sign_extended); |
2792 | return result; |
2793 | } |
2794 | |
2795 | /* Return X | Y. */ |
2796 | template <typename T1, typename T2> |
2797 | inline WI_BINARY_RESULT (T1, T2) |
2798 | wi::bit_or (const T1 &x, const T2 &y) |
2799 | { |
2800 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2801 | unsigned int precision = get_precision (result); |
2802 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2803 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2804 | bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; |
2805 | if (result.needs_write_val_arg) |
2806 | val = result.write_val (MAX (xi.len, yi.len)); |
2807 | if (LIKELY (xi.len + yi.len == 2)) |
2808 | { |
2809 | val[0] = xi.ulow () | yi.ulow (); |
2810 | result.set_len (1, is_sign_extended); |
2811 | } |
2812 | else |
2813 | result.set_len (or_large (val, xi.val, xi.len, |
2814 | yi.val, yi.len, precision), is_sign_extended); |
2815 | return result; |
2816 | } |
2817 | |
2818 | /* Return X | ~Y. */ |
2819 | template <typename T1, typename T2> |
2820 | inline WI_BINARY_RESULT (T1, T2) |
2821 | wi::bit_or_not (const T1 &x, const T2 &y) |
2822 | { |
2823 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2824 | unsigned int precision = get_precision (result); |
2825 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2826 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2827 | bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; |
2828 | if (result.needs_write_val_arg) |
2829 | val = result.write_val (MAX (xi.len, yi.len)); |
2830 | if (LIKELY (xi.len + yi.len == 2)) |
2831 | { |
2832 | val[0] = xi.ulow () | ~yi.ulow (); |
2833 | result.set_len (1, is_sign_extended); |
2834 | } |
2835 | else |
2836 | result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len, |
2837 | precision), is_sign_extended); |
2838 | return result; |
2839 | } |
2840 | |
2841 | /* Return X ^ Y. */ |
2842 | template <typename T1, typename T2> |
2843 | inline WI_BINARY_RESULT (T1, T2) |
2844 | wi::bit_xor (const T1 &x, const T2 &y) |
2845 | { |
2846 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2847 | unsigned int precision = get_precision (result); |
2848 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2849 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2850 | bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; |
2851 | if (result.needs_write_val_arg) |
2852 | val = result.write_val (MAX (xi.len, yi.len)); |
2853 | if (LIKELY (xi.len + yi.len == 2)) |
2854 | { |
2855 | val[0] = xi.ulow () ^ yi.ulow (); |
2856 | result.set_len (1, is_sign_extended); |
2857 | } |
2858 | else |
2859 | result.set_len (xor_large (val, xi.val, xi.len, |
2860 | yi.val, yi.len, precision), is_sign_extended); |
2861 | return result; |
2862 | } |
2863 | |
2864 | /* Return X + Y. */ |
2865 | template <typename T1, typename T2> |
2866 | inline WI_BINARY_RESULT (T1, T2) |
2867 | wi::add (const T1 &x, const T2 &y) |
2868 | { |
2869 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2870 | unsigned int precision = get_precision (result); |
2871 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2872 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2873 | if (result.needs_write_val_arg) |
2874 | val = result.write_val (MAX (xi.len, yi.len) + 1); |
2875 | if (precision <= HOST_BITS_PER_WIDE_INT) |
2876 | { |
2877 | val[0] = xi.ulow () + yi.ulow (); |
2878 | result.set_len (1); |
2879 | } |
2880 | /* If the precision is known at compile time to be greater than |
2881 | HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case |
2882 | knowing that (a) all bits in those HWIs are significant and |
2883 | (b) the result has room for at least two HWIs. This provides |
2884 | a fast path for things like offset_int and widest_int. |
2885 | |
2886 | The STATIC_CONSTANT_P test prevents this path from being |
2887 | used for wide_ints. wide_ints with precisions greater than |
2888 | HOST_BITS_PER_WIDE_INT are relatively rare and there's not much |
2889 | point handling them inline. */ |
2890 | else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT) |
2891 | && LIKELY (xi.len + yi.len == 2)) |
2892 | { |
2893 | unsigned HOST_WIDE_INT xl = xi.ulow (); |
2894 | unsigned HOST_WIDE_INT yl = yi.ulow (); |
2895 | unsigned HOST_WIDE_INT resultl = xl + yl; |
2896 | val[0] = resultl; |
2897 | val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1; |
2898 | result.set_len (1 + (((resultl ^ xl) & (resultl ^ yl)) |
2899 | >> (HOST_BITS_PER_WIDE_INT - 1))); |
2900 | } |
2901 | else |
2902 | result.set_len (add_large (val, xi.val, xi.len, |
2903 | yi.val, yi.len, precision, |
2904 | UNSIGNED, 0)); |
2905 | return result; |
2906 | } |
2907 | |
2908 | /* Return X + Y. Treat X and Y as having the signednes given by SGN |
2909 | and indicate in *OVERFLOW whether the operation overflowed. */ |
2910 | template <typename T1, typename T2> |
2911 | inline WI_BINARY_RESULT (T1, T2) |
2912 | wi::add (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
2913 | { |
2914 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2915 | unsigned int precision = get_precision (result); |
2916 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2917 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2918 | if (result.needs_write_val_arg) |
2919 | val = result.write_val (MAX (xi.len, yi.len) + 1); |
2920 | if (precision <= HOST_BITS_PER_WIDE_INT) |
2921 | { |
2922 | unsigned HOST_WIDE_INT xl = xi.ulow (); |
2923 | unsigned HOST_WIDE_INT yl = yi.ulow (); |
2924 | unsigned HOST_WIDE_INT resultl = xl + yl; |
2925 | if (sgn == SIGNED) |
2926 | { |
2927 | if ((((resultl ^ xl) & (resultl ^ yl)) |
2928 | >> (precision - 1)) & 1) |
2929 | { |
2930 | if (xl > resultl) |
2931 | *overflow = OVF_UNDERFLOW; |
2932 | else if (xl < resultl) |
2933 | *overflow = OVF_OVERFLOW; |
2934 | else |
2935 | *overflow = OVF_NONE; |
2936 | } |
2937 | else |
2938 | *overflow = OVF_NONE; |
2939 | } |
2940 | else |
2941 | *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision)) |
2942 | < (xl << (HOST_BITS_PER_WIDE_INT - precision))) |
2943 | ? OVF_OVERFLOW : OVF_NONE; |
2944 | val[0] = resultl; |
2945 | result.set_len (1); |
2946 | } |
2947 | else |
2948 | result.set_len (add_large (val, xi.val, xi.len, |
2949 | yi.val, yi.len, precision, |
2950 | sgn, overflow)); |
2951 | return result; |
2952 | } |
2953 | |
2954 | /* Return X - Y. */ |
2955 | template <typename T1, typename T2> |
2956 | inline WI_BINARY_RESULT (T1, T2) |
2957 | wi::sub (const T1 &x, const T2 &y) |
2958 | { |
2959 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2960 | unsigned int precision = get_precision (result); |
2961 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2962 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2963 | if (result.needs_write_val_arg) |
2964 | val = result.write_val (MAX (xi.len, yi.len) + 1); |
2965 | if (precision <= HOST_BITS_PER_WIDE_INT) |
2966 | { |
2967 | val[0] = xi.ulow () - yi.ulow (); |
2968 | result.set_len (1); |
2969 | } |
2970 | /* If the precision is known at compile time to be greater than |
2971 | HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case |
2972 | knowing that (a) all bits in those HWIs are significant and |
2973 | (b) the result has room for at least two HWIs. This provides |
2974 | a fast path for things like offset_int and widest_int. |
2975 | |
2976 | The STATIC_CONSTANT_P test prevents this path from being |
2977 | used for wide_ints. wide_ints with precisions greater than |
2978 | HOST_BITS_PER_WIDE_INT are relatively rare and there's not much |
2979 | point handling them inline. */ |
2980 | else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT) |
2981 | && LIKELY (xi.len + yi.len == 2)) |
2982 | { |
2983 | unsigned HOST_WIDE_INT xl = xi.ulow (); |
2984 | unsigned HOST_WIDE_INT yl = yi.ulow (); |
2985 | unsigned HOST_WIDE_INT resultl = xl - yl; |
2986 | val[0] = resultl; |
2987 | val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1; |
2988 | result.set_len (1 + (((resultl ^ xl) & (xl ^ yl)) |
2989 | >> (HOST_BITS_PER_WIDE_INT - 1))); |
2990 | } |
2991 | else |
2992 | result.set_len (sub_large (val, xi.val, xi.len, |
2993 | yi.val, yi.len, precision, |
2994 | UNSIGNED, 0)); |
2995 | return result; |
2996 | } |
2997 | |
2998 | /* Return X - Y. Treat X and Y as having the signednes given by SGN |
2999 | and indicate in *OVERFLOW whether the operation overflowed. */ |
3000 | template <typename T1, typename T2> |
3001 | inline WI_BINARY_RESULT (T1, T2) |
3002 | wi::sub (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
3003 | { |
3004 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
3005 | unsigned int precision = get_precision (result); |
3006 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3007 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
3008 | if (result.needs_write_val_arg) |
3009 | val = result.write_val (MAX (xi.len, yi.len) + 1); |
3010 | if (precision <= HOST_BITS_PER_WIDE_INT) |
3011 | { |
3012 | unsigned HOST_WIDE_INT xl = xi.ulow (); |
3013 | unsigned HOST_WIDE_INT yl = yi.ulow (); |
3014 | unsigned HOST_WIDE_INT resultl = xl - yl; |
3015 | if (sgn == SIGNED) |
3016 | { |
3017 | if ((((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1) |
3018 | { |
3019 | if (xl > yl) |
3020 | *overflow = OVF_UNDERFLOW; |
3021 | else if (xl < yl) |
3022 | *overflow = OVF_OVERFLOW; |
3023 | else |
3024 | *overflow = OVF_NONE; |
3025 | } |
3026 | else |
3027 | *overflow = OVF_NONE; |
3028 | } |
3029 | else |
3030 | *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision)) |
3031 | > (xl << (HOST_BITS_PER_WIDE_INT - precision))) |
3032 | ? OVF_UNDERFLOW : OVF_NONE; |
3033 | val[0] = resultl; |
3034 | result.set_len (1); |
3035 | } |
3036 | else |
3037 | result.set_len (sub_large (val, xi.val, xi.len, |
3038 | yi.val, yi.len, precision, |
3039 | sgn, overflow)); |
3040 | return result; |
3041 | } |
3042 | |
3043 | /* Return X * Y. */ |
3044 | template <typename T1, typename T2> |
3045 | inline WI_BINARY_RESULT (T1, T2) |
3046 | wi::mul (const T1 &x, const T2 &y) |
3047 | { |
3048 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
3049 | unsigned int precision = get_precision (result); |
3050 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3051 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
3052 | if (result.needs_write_val_arg) |
3053 | val = result.write_val (xi.len + yi.len + 2); |
3054 | if (precision <= HOST_BITS_PER_WIDE_INT) |
3055 | { |
3056 | val[0] = xi.ulow () * yi.ulow (); |
3057 | result.set_len (1); |
3058 | } |
3059 | else |
3060 | result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len, |
3061 | precision, UNSIGNED, 0, false)); |
3062 | return result; |
3063 | } |
3064 | |
3065 | /* Return X * Y. Treat X and Y as having the signednes given by SGN |
3066 | and indicate in *OVERFLOW whether the operation overflowed. */ |
3067 | template <typename T1, typename T2> |
3068 | inline WI_BINARY_RESULT (T1, T2) |
3069 | wi::mul (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
3070 | { |
3071 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
3072 | unsigned int precision = get_precision (result); |
3073 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3074 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
3075 | if (result.needs_write_val_arg) |
3076 | val = result.write_val (xi.len + yi.len + 2); |
3077 | result.set_len (mul_internal (val, xi.val, xi.len, |
3078 | yi.val, yi.len, precision, |
3079 | sgn, overflow, false)); |
3080 | return result; |
3081 | } |
3082 | |
3083 | /* Return X * Y, treating both X and Y as signed values. Indicate in |
3084 | *OVERFLOW whether the operation overflowed. */ |
3085 | template <typename T1, typename T2> |
3086 | inline WI_BINARY_RESULT (T1, T2) |
3087 | wi::smul (const T1 &x, const T2 &y, overflow_type *overflow) |
3088 | { |
3089 | return mul (x, y, SIGNED, overflow); |
3090 | } |
3091 | |
3092 | /* Return X * Y, treating both X and Y as unsigned values. Indicate in |
3093 | *OVERFLOW if the result overflows. */ |
3094 | template <typename T1, typename T2> |
3095 | inline WI_BINARY_RESULT (T1, T2) |
3096 | wi::umul (const T1 &x, const T2 &y, overflow_type *overflow) |
3097 | { |
3098 | return mul (x, y, UNSIGNED, overflow); |
3099 | } |
3100 | |
3101 | /* Perform a widening multiplication of X and Y, extending the values |
3102 | according to SGN, and return the high part of the result. */ |
3103 | template <typename T1, typename T2> |
3104 | inline WI_BINARY_RESULT (T1, T2) |
3105 | wi::mul_high (const T1 &x, const T2 &y, signop sgn) |
3106 | { |
3107 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
3108 | unsigned int precision = get_precision (result); |
3109 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3110 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
3111 | static_assert (!result.needs_write_val_arg, |
3112 | "mul_high on widest_int doesn't make sense" ); |
3113 | result.set_len (mul_internal (val, xi.val, xi.len, |
3114 | yi.val, yi.len, precision, |
3115 | sgn, 0, true)); |
3116 | return result; |
3117 | } |
3118 | |
3119 | /* Return X / Y, rouding towards 0. Treat X and Y as having the |
3120 | signedness given by SGN. Indicate in *OVERFLOW if the result |
3121 | overflows. */ |
3122 | template <typename T1, typename T2> |
3123 | inline WI_BINARY_RESULT (T1, T2) |
3124 | wi::div_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
3125 | { |
3126 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
3127 | unsigned int precision = get_precision (quotient); |
3128 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3129 | WIDE_INT_REF_FOR (T2) yi (y); |
3130 | |
3131 | if (quotient.needs_write_val_arg) |
3132 | quotient_val = quotient.write_val ((sgn == UNSIGNED |
3133 | && xi.val[xi.len - 1] < 0) |
3134 | ? CEIL (precision, |
3135 | HOST_BITS_PER_WIDE_INT) + 1 |
3136 | : xi.len + 1); |
3137 | quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len, |
3138 | precision, |
3139 | yi.val, yi.len, yi.precision, |
3140 | sgn, overflow)); |
3141 | return quotient; |
3142 | } |
3143 | |
3144 | /* Return X / Y, rouding towards 0. Treat X and Y as signed values. */ |
3145 | template <typename T1, typename T2> |
3146 | inline WI_BINARY_RESULT (T1, T2) |
3147 | wi::sdiv_trunc (const T1 &x, const T2 &y) |
3148 | { |
3149 | return div_trunc (x, y, SIGNED); |
3150 | } |
3151 | |
3152 | /* Return X / Y, rouding towards 0. Treat X and Y as unsigned values. */ |
3153 | template <typename T1, typename T2> |
3154 | inline WI_BINARY_RESULT (T1, T2) |
3155 | wi::udiv_trunc (const T1 &x, const T2 &y) |
3156 | { |
3157 | return div_trunc (x, y, UNSIGNED); |
3158 | } |
3159 | |
3160 | /* Return X / Y, rouding towards -inf. Treat X and Y as having the |
3161 | signedness given by SGN. Indicate in *OVERFLOW if the result |
3162 | overflows. */ |
3163 | template <typename T1, typename T2> |
3164 | inline WI_BINARY_RESULT (T1, T2) |
3165 | wi::div_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
3166 | { |
3167 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
3168 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
3169 | unsigned int precision = get_precision (quotient); |
3170 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3171 | WIDE_INT_REF_FOR (T2) yi (y); |
3172 | |
3173 | unsigned int remainder_len; |
3174 | if (quotient.needs_write_val_arg) |
3175 | { |
3176 | unsigned int est_len; |
3177 | if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0) |
3178 | est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1; |
3179 | else |
3180 | est_len = xi.len + 1; |
3181 | quotient_val = quotient.write_val (est_len); |
3182 | remainder_val = remainder.write_val (est_len); |
3183 | } |
3184 | quotient.set_len (divmod_internal (quotient_val, |
3185 | &remainder_len, remainder_val, |
3186 | xi.val, xi.len, precision, |
3187 | yi.val, yi.len, yi.precision, sgn, |
3188 | overflow)); |
3189 | remainder.set_len (remainder_len); |
3190 | if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0) |
3191 | return quotient - 1; |
3192 | return quotient; |
3193 | } |
3194 | |
3195 | /* Return X / Y, rouding towards -inf. Treat X and Y as signed values. */ |
3196 | template <typename T1, typename T2> |
3197 | inline WI_BINARY_RESULT (T1, T2) |
3198 | wi::sdiv_floor (const T1 &x, const T2 &y) |
3199 | { |
3200 | return div_floor (x, y, SIGNED); |
3201 | } |
3202 | |
3203 | /* Return X / Y, rouding towards -inf. Treat X and Y as unsigned values. */ |
3204 | /* ??? Why do we have both this and udiv_trunc. Aren't they the same? */ |
3205 | template <typename T1, typename T2> |
3206 | inline WI_BINARY_RESULT (T1, T2) |
3207 | wi::udiv_floor (const T1 &x, const T2 &y) |
3208 | { |
3209 | return div_floor (x, y, UNSIGNED); |
3210 | } |
3211 | |
3212 | /* Return X / Y, rouding towards +inf. Treat X and Y as having the |
3213 | signedness given by SGN. Indicate in *OVERFLOW if the result |
3214 | overflows. */ |
3215 | template <typename T1, typename T2> |
3216 | inline WI_BINARY_RESULT (T1, T2) |
3217 | wi::div_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
3218 | { |
3219 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
3220 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
3221 | unsigned int precision = get_precision (quotient); |
3222 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3223 | WIDE_INT_REF_FOR (T2) yi (y); |
3224 | |
3225 | unsigned int remainder_len; |
3226 | if (quotient.needs_write_val_arg) |
3227 | { |
3228 | unsigned int est_len; |
3229 | if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0) |
3230 | est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1; |
3231 | else |
3232 | est_len = xi.len + 1; |
3233 | quotient_val = quotient.write_val (est_len); |
3234 | remainder_val = remainder.write_val (est_len); |
3235 | } |
3236 | quotient.set_len (divmod_internal (quotient_val, |
3237 | &remainder_len, remainder_val, |
3238 | xi.val, xi.len, precision, |
3239 | yi.val, yi.len, yi.precision, sgn, |
3240 | overflow)); |
3241 | remainder.set_len (remainder_len); |
3242 | if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0) |
3243 | return quotient + 1; |
3244 | return quotient; |
3245 | } |
3246 | |
3247 | /* Return X / Y, rouding towards +inf. Treat X and Y as unsigned values. */ |
3248 | template <typename T1, typename T2> |
3249 | inline WI_BINARY_RESULT (T1, T2) |
3250 | wi::udiv_ceil (const T1 &x, const T2 &y) |
3251 | { |
3252 | return div_ceil (x, y, UNSIGNED); |
3253 | } |
3254 | |
3255 | /* Return X / Y, rouding towards nearest with ties away from zero. |
3256 | Treat X and Y as having the signedness given by SGN. Indicate |
3257 | in *OVERFLOW if the result overflows. */ |
3258 | template <typename T1, typename T2> |
3259 | inline WI_BINARY_RESULT (T1, T2) |
3260 | wi::div_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
3261 | { |
3262 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
3263 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
3264 | unsigned int precision = get_precision (quotient); |
3265 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3266 | WIDE_INT_REF_FOR (T2) yi (y); |
3267 | |
3268 | unsigned int remainder_len; |
3269 | if (quotient.needs_write_val_arg) |
3270 | { |
3271 | unsigned int est_len; |
3272 | if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0) |
3273 | est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1; |
3274 | else |
3275 | est_len = xi.len + 1; |
3276 | quotient_val = quotient.write_val (est_len); |
3277 | remainder_val = remainder.write_val (est_len); |
3278 | } |
3279 | quotient.set_len (divmod_internal (quotient_val, |
3280 | &remainder_len, remainder_val, |
3281 | xi.val, xi.len, precision, |
3282 | yi.val, yi.len, yi.precision, sgn, |
3283 | overflow)); |
3284 | remainder.set_len (remainder_len); |
3285 | |
3286 | if (remainder != 0) |
3287 | { |
3288 | if (sgn == SIGNED) |
3289 | { |
3290 | WI_BINARY_RESULT (T1, T2) abs_remainder = wi::abs (remainder); |
3291 | if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder))) |
3292 | { |
3293 | if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn)) |
3294 | return quotient - 1; |
3295 | else |
3296 | return quotient + 1; |
3297 | } |
3298 | } |
3299 | else |
3300 | { |
3301 | if (wi::geu_p (remainder, wi::sub (y, remainder))) |
3302 | return quotient + 1; |
3303 | } |
3304 | } |
3305 | return quotient; |
3306 | } |
3307 | |
3308 | /* Return X / Y, rouding towards 0. Treat X and Y as having the |
3309 | signedness given by SGN. Store the remainder in *REMAINDER_PTR. */ |
3310 | template <typename T1, typename T2> |
3311 | inline WI_BINARY_RESULT (T1, T2) |
3312 | wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn, |
3313 | WI_BINARY_RESULT (T1, T2) *remainder_ptr) |
3314 | { |
3315 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
3316 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
3317 | unsigned int precision = get_precision (quotient); |
3318 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3319 | WIDE_INT_REF_FOR (T2) yi (y); |
3320 | |
3321 | unsigned int remainder_len; |
3322 | if (quotient.needs_write_val_arg) |
3323 | { |
3324 | unsigned int est_len; |
3325 | if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0) |
3326 | est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1; |
3327 | else |
3328 | est_len = xi.len + 1; |
3329 | quotient_val = quotient.write_val (est_len); |
3330 | remainder_val = remainder.write_val (est_len); |
3331 | } |
3332 | quotient.set_len (divmod_internal (quotient_val, |
3333 | &remainder_len, remainder_val, |
3334 | xi.val, xi.len, precision, |
3335 | yi.val, yi.len, yi.precision, sgn, 0)); |
3336 | remainder.set_len (remainder_len); |
3337 | |
3338 | *remainder_ptr = remainder; |
3339 | return quotient; |
3340 | } |
3341 | |
3342 | /* Compute the greatest common divisor of two numbers A and B using |
3343 | Euclid's algorithm. */ |
3344 | template <typename T1, typename T2> |
3345 | inline WI_BINARY_RESULT (T1, T2) |
3346 | wi::gcd (const T1 &a, const T2 &b, signop sgn) |
3347 | { |
3348 | T1 x, y, z; |
3349 | |
3350 | x = wi::abs (a); |
3351 | y = wi::abs (b); |
3352 | |
3353 | while (gt_p (x, 0, sgn)) |
3354 | { |
3355 | z = mod_trunc (y, x, sgn); |
3356 | y = x; |
3357 | x = z; |
3358 | } |
3359 | |
3360 | return y; |
3361 | } |
3362 | |
3363 | /* Compute X / Y, rouding towards 0, and return the remainder. |
3364 | Treat X and Y as having the signedness given by SGN. Indicate |
3365 | in *OVERFLOW if the division overflows. */ |
3366 | template <typename T1, typename T2> |
3367 | inline WI_BINARY_RESULT (T1, T2) |
3368 | wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
3369 | { |
3370 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
3371 | unsigned int precision = get_precision (remainder); |
3372 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3373 | WIDE_INT_REF_FOR (T2) yi (y); |
3374 | |
3375 | unsigned int remainder_len; |
3376 | if (remainder.needs_write_val_arg) |
3377 | remainder_val = remainder.write_val ((sgn == UNSIGNED |
3378 | && xi.val[xi.len - 1] < 0) |
3379 | ? CEIL (precision, |
3380 | HOST_BITS_PER_WIDE_INT) + 1 |
3381 | : xi.len + 1); |
3382 | divmod_internal (0, &remainder_len, remainder_val, |
3383 | xi.val, xi.len, precision, |
3384 | yi.val, yi.len, yi.precision, sgn, overflow); |
3385 | remainder.set_len (remainder_len); |
3386 | |
3387 | return remainder; |
3388 | } |
3389 | |
3390 | /* Compute X / Y, rouding towards 0, and return the remainder. |
3391 | Treat X and Y as signed values. */ |
3392 | template <typename T1, typename T2> |
3393 | inline WI_BINARY_RESULT (T1, T2) |
3394 | wi::smod_trunc (const T1 &x, const T2 &y) |
3395 | { |
3396 | return mod_trunc (x, y, SIGNED); |
3397 | } |
3398 | |
3399 | /* Compute X / Y, rouding towards 0, and return the remainder. |
3400 | Treat X and Y as unsigned values. */ |
3401 | template <typename T1, typename T2> |
3402 | inline WI_BINARY_RESULT (T1, T2) |
3403 | wi::umod_trunc (const T1 &x, const T2 &y) |
3404 | { |
3405 | return mod_trunc (x, y, UNSIGNED); |
3406 | } |
3407 | |
3408 | /* Compute X / Y, rouding towards -inf, and return the remainder. |
3409 | Treat X and Y as having the signedness given by SGN. Indicate |
3410 | in *OVERFLOW if the division overflows. */ |
3411 | template <typename T1, typename T2> |
3412 | inline WI_BINARY_RESULT (T1, T2) |
3413 | wi::mod_floor (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
3414 | { |
3415 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
3416 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
3417 | unsigned int precision = get_precision (quotient); |
3418 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3419 | WIDE_INT_REF_FOR (T2) yi (y); |
3420 | |
3421 | unsigned int remainder_len; |
3422 | if (quotient.needs_write_val_arg) |
3423 | { |
3424 | unsigned int est_len; |
3425 | if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0) |
3426 | est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1; |
3427 | else |
3428 | est_len = xi.len + 1; |
3429 | quotient_val = quotient.write_val (est_len); |
3430 | remainder_val = remainder.write_val (est_len); |
3431 | } |
3432 | quotient.set_len (divmod_internal (quotient_val, |
3433 | &remainder_len, remainder_val, |
3434 | xi.val, xi.len, precision, |
3435 | yi.val, yi.len, yi.precision, sgn, |
3436 | overflow)); |
3437 | remainder.set_len (remainder_len); |
3438 | |
3439 | if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0) |
3440 | return remainder + y; |
3441 | return remainder; |
3442 | } |
3443 | |
3444 | /* Compute X / Y, rouding towards -inf, and return the remainder. |
3445 | Treat X and Y as unsigned values. */ |
3446 | /* ??? Why do we have both this and umod_trunc. Aren't they the same? */ |
3447 | template <typename T1, typename T2> |
3448 | inline WI_BINARY_RESULT (T1, T2) |
3449 | wi::umod_floor (const T1 &x, const T2 &y) |
3450 | { |
3451 | return mod_floor (x, y, UNSIGNED); |
3452 | } |
3453 | |
3454 | /* Compute X / Y, rouding towards +inf, and return the remainder. |
3455 | Treat X and Y as having the signedness given by SGN. Indicate |
3456 | in *OVERFLOW if the division overflows. */ |
3457 | template <typename T1, typename T2> |
3458 | inline WI_BINARY_RESULT (T1, T2) |
3459 | wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
3460 | { |
3461 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
3462 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
3463 | unsigned int precision = get_precision (quotient); |
3464 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3465 | WIDE_INT_REF_FOR (T2) yi (y); |
3466 | |
3467 | unsigned int remainder_len; |
3468 | if (quotient.needs_write_val_arg) |
3469 | { |
3470 | unsigned int est_len; |
3471 | if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0) |
3472 | est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1; |
3473 | else |
3474 | est_len = xi.len + 1; |
3475 | quotient_val = quotient.write_val (est_len); |
3476 | remainder_val = remainder.write_val (est_len); |
3477 | } |
3478 | quotient.set_len (divmod_internal (quotient_val, |
3479 | &remainder_len, remainder_val, |
3480 | xi.val, xi.len, precision, |
3481 | yi.val, yi.len, yi.precision, sgn, |
3482 | overflow)); |
3483 | remainder.set_len (remainder_len); |
3484 | |
3485 | if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0) |
3486 | return remainder - y; |
3487 | return remainder; |
3488 | } |
3489 | |
3490 | /* Compute X / Y, rouding towards nearest with ties away from zero, |
3491 | and return the remainder. Treat X and Y as having the signedness |
3492 | given by SGN. Indicate in *OVERFLOW if the division overflows. */ |
3493 | template <typename T1, typename T2> |
3494 | inline WI_BINARY_RESULT (T1, T2) |
3495 | wi::mod_round (const T1 &x, const T2 &y, signop sgn, overflow_type *overflow) |
3496 | { |
3497 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
3498 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
3499 | unsigned int precision = get_precision (quotient); |
3500 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3501 | WIDE_INT_REF_FOR (T2) yi (y); |
3502 | |
3503 | unsigned int remainder_len; |
3504 | if (quotient.needs_write_val_arg) |
3505 | { |
3506 | unsigned int est_len; |
3507 | if (sgn == UNSIGNED && xi.val[xi.len - 1] < 0) |
3508 | est_len = CEIL (precision, HOST_BITS_PER_WIDE_INT) + 1; |
3509 | else |
3510 | est_len = xi.len + 1; |
3511 | quotient_val = quotient.write_val (est_len); |
3512 | remainder_val = remainder.write_val (est_len); |
3513 | } |
3514 | quotient.set_len (divmod_internal (quotient_val, |
3515 | &remainder_len, remainder_val, |
3516 | xi.val, xi.len, precision, |
3517 | yi.val, yi.len, yi.precision, sgn, |
3518 | overflow)); |
3519 | remainder.set_len (remainder_len); |
3520 | |
3521 | if (remainder != 0) |
3522 | { |
3523 | if (sgn == SIGNED) |
3524 | { |
3525 | WI_BINARY_RESULT (T1, T2) abs_remainder = wi::abs (remainder); |
3526 | if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder))) |
3527 | { |
3528 | if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn)) |
3529 | return remainder + y; |
3530 | else |
3531 | return remainder - y; |
3532 | } |
3533 | } |
3534 | else |
3535 | { |
3536 | if (wi::geu_p (remainder, wi::sub (y, remainder))) |
3537 | return remainder - y; |
3538 | } |
3539 | } |
3540 | return remainder; |
3541 | } |
3542 | |
3543 | /* Return true if X is a multiple of Y. Treat X and Y as having the |
3544 | signedness given by SGN. */ |
3545 | template <typename T1, typename T2> |
3546 | inline bool |
3547 | wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn) |
3548 | { |
3549 | return wi::mod_trunc (x, y, sgn) == 0; |
3550 | } |
3551 | |
3552 | /* Return true if X is a multiple of Y, storing X / Y in *RES if so. |
3553 | Treat X and Y as having the signedness given by SGN. */ |
3554 | template <typename T1, typename T2> |
3555 | inline bool |
3556 | wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn, |
3557 | WI_BINARY_RESULT (T1, T2) *res) |
3558 | { |
3559 | WI_BINARY_RESULT (T1, T2) remainder; |
3560 | WI_BINARY_RESULT (T1, T2) quotient |
3561 | = divmod_trunc (x, y, sgn, &remainder); |
3562 | if (remainder == 0) |
3563 | { |
3564 | *res = quotient; |
3565 | return true; |
3566 | } |
3567 | return false; |
3568 | } |
3569 | |
3570 | /* Return X << Y. Return 0 if Y is greater than or equal to |
3571 | the precision of X. */ |
3572 | template <typename T1, typename T2> |
3573 | inline WI_UNARY_RESULT (T1) |
3574 | wi::lshift (const T1 &x, const T2 &y) |
3575 | { |
3576 | WI_UNARY_RESULT_VAR (result, val, T1, x); |
3577 | unsigned int precision = get_precision (result); |
3578 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
3579 | WIDE_INT_REF_FOR (T2) yi (y); |
3580 | /* Handle the simple cases quickly. */ |
3581 | if (geu_p (yi, precision)) |
3582 | { |
3583 | if (result.needs_write_val_arg) |
3584 | val = result.write_val (1); |
3585 | val[0] = 0; |
3586 | result.set_len (1); |
3587 | } |
3588 | else |
3589 | { |
3590 | unsigned int shift = yi.to_uhwi (); |
3591 | if (result.needs_write_val_arg) |
3592 | val = result.write_val (xi.len + shift / HOST_BITS_PER_WIDE_INT + 1); |
3593 | /* For fixed-precision integers like offset_int and widest_int, |
3594 | handle the case where the shift value is constant and the |
3595 | result is a single nonnegative HWI (meaning that we don't |
3596 | need to worry about val[1]). This is particularly common |
3597 | for converting a byte count to a bit count. |
3598 | |
3599 | For variable-precision integers like wide_int, handle HWI |
3600 | and sub-HWI integers inline. */ |
3601 | if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT) |
3602 | ? (STATIC_CONSTANT_P (shift < HOST_BITS_PER_WIDE_INT - 1) |
3603 | && xi.len == 1 |
3604 | && IN_RANGE (xi.val[0], 0, HOST_WIDE_INT_MAX >> shift)) |
3605 | : precision <= HOST_BITS_PER_WIDE_INT) |
3606 | { |
3607 | val[0] = xi.ulow () << shift; |
3608 | result.set_len (1); |
3609 | } |
3610 | else |
3611 | result.set_len (lshift_large (val, xi.val, xi.len, |
3612 | precision, shift)); |
3613 | } |
3614 | return result; |
3615 | } |
3616 | |
3617 | /* Return X >> Y, using a logical shift. Return 0 if Y is greater than |
3618 | or equal to the precision of X. */ |
3619 | template <typename T1, typename T2> |
3620 | inline WI_UNARY_RESULT (T1) |
3621 | wi::lrshift (const T1 &x, const T2 &y) |
3622 | { |
3623 | WI_UNARY_RESULT_VAR (result, val, T1, x); |
3624 | /* Do things in the precision of the input rather than the output, |
3625 | since the result can be no larger than that. */ |
3626 | WIDE_INT_REF_FOR (T1) xi (x); |
3627 | WIDE_INT_REF_FOR (T2) yi (y); |
3628 | /* Handle the simple cases quickly. */ |
3629 | if (geu_p (yi, xi.precision)) |
3630 | { |
3631 | if (result.needs_write_val_arg) |
3632 | val = result.write_val (1); |
3633 | val[0] = 0; |
3634 | result.set_len (1); |
3635 | } |
3636 | else |
3637 | { |
3638 | unsigned int shift = yi.to_uhwi (); |
3639 | if (result.needs_write_val_arg) |
3640 | { |
3641 | unsigned int est_len = xi.len; |
3642 | if (xi.val[xi.len - 1] < 0 && shift) |
3643 | /* Logical right shift of sign-extended value might need a very |
3644 | large precision e.g. for widest_int. */ |
3645 | est_len = CEIL (xi.precision - shift, HOST_BITS_PER_WIDE_INT) + 1; |
3646 | val = result.write_val (est_len); |
3647 | } |
3648 | /* For fixed-precision integers like offset_int and widest_int, |
3649 | handle the case where the shift value is constant and the |
3650 | shifted value is a single nonnegative HWI (meaning that all |
3651 | bits above the HWI are zero). This is particularly common |
3652 | for converting a bit count to a byte count. |
3653 | |
3654 | For variable-precision integers like wide_int, handle HWI |
3655 | and sub-HWI integers inline. */ |
3656 | if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT) |
3657 | ? (shift < HOST_BITS_PER_WIDE_INT |
3658 | && xi.len == 1 |
3659 | && xi.val[0] >= 0) |
3660 | : xi.precision <= HOST_BITS_PER_WIDE_INT) |
3661 | { |
3662 | val[0] = xi.to_uhwi () >> shift; |
3663 | result.set_len (1); |
3664 | } |
3665 | else |
3666 | result.set_len (lrshift_large (val, xi.val, xi.len, xi.precision, |
3667 | get_precision (result), shift)); |
3668 | } |
3669 | return result; |
3670 | } |
3671 | |
3672 | /* Return X >> Y, using an arithmetic shift. Return a sign mask if |
3673 | Y is greater than or equal to the precision of X. */ |
3674 | template <typename T1, typename T2> |
3675 | inline WI_UNARY_RESULT (T1) |
3676 | wi::arshift (const T1 &x, const T2 &y) |
3677 | { |
3678 | WI_UNARY_RESULT_VAR (result, val, T1, x); |
3679 | /* Do things in the precision of the input rather than the output, |
3680 | since the result can be no larger than that. */ |
3681 | WIDE_INT_REF_FOR (T1) xi (x); |
3682 | WIDE_INT_REF_FOR (T2) yi (y); |
3683 | if (result.needs_write_val_arg) |
3684 | val = result.write_val (xi.len); |
3685 | /* Handle the simple cases quickly. */ |
3686 | if (geu_p (yi, xi.precision)) |
3687 | { |
3688 | val[0] = sign_mask (x); |
3689 | result.set_len (1); |
3690 | } |
3691 | else |
3692 | { |
3693 | unsigned int shift = yi.to_uhwi (); |
3694 | if (xi.precision <= HOST_BITS_PER_WIDE_INT) |
3695 | { |
3696 | val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift); |
3697 | result.set_len (1, true); |
3698 | } |
3699 | else |
3700 | result.set_len (arshift_large (val, xi.val, xi.len, xi.precision, |
3701 | get_precision (result), shift)); |
3702 | } |
3703 | return result; |
3704 | } |
3705 | |
3706 | /* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a |
3707 | logical shift otherwise. */ |
3708 | template <typename T1, typename T2> |
3709 | inline WI_UNARY_RESULT (T1) |
3710 | wi::rshift (const T1 &x, const T2 &y, signop sgn) |
3711 | { |
3712 | if (sgn == UNSIGNED) |
3713 | return lrshift (x, y); |
3714 | else |
3715 | return arshift (x, y); |
3716 | } |
3717 | |
3718 | /* Return the result of rotating the low WIDTH bits of X left by Y |
3719 | bits and zero-extending the result. Use a full-width rotate if |
3720 | WIDTH is zero. */ |
3721 | template <typename T1, typename T2> |
3722 | WI_UNARY_RESULT (T1) |
3723 | wi::lrotate (const T1 &x, const T2 &y, unsigned int width) |
3724 | { |
3725 | unsigned int precision = get_binary_precision (x, x); |
3726 | if (width == 0) |
3727 | width = precision; |
3728 | WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width); |
3729 | WI_UNARY_RESULT (T1) left = wi::lshift (x, ymod); |
3730 | WI_UNARY_RESULT (T1) right |
3731 | = wi::lrshift (width != precision ? wi::zext (x, width) : x, |
3732 | wi::sub (width, ymod)); |
3733 | if (width != precision) |
3734 | return wi::zext (left, width) | right; |
3735 | return left | right; |
3736 | } |
3737 | |
3738 | /* Return the result of rotating the low WIDTH bits of X right by Y |
3739 | bits and zero-extending the result. Use a full-width rotate if |
3740 | WIDTH is zero. */ |
3741 | template <typename T1, typename T2> |
3742 | WI_UNARY_RESULT (T1) |
3743 | wi::rrotate (const T1 &x, const T2 &y, unsigned int width) |
3744 | { |
3745 | unsigned int precision = get_binary_precision (x, x); |
3746 | if (width == 0) |
3747 | width = precision; |
3748 | WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width); |
3749 | WI_UNARY_RESULT (T1) right |
3750 | = wi::lrshift (width != precision ? wi::zext (x, width) : x, ymod); |
3751 | WI_UNARY_RESULT (T1) left = wi::lshift (x, wi::sub (width, ymod)); |
3752 | if (width != precision) |
3753 | return wi::zext (left, width) | right; |
3754 | return left | right; |
3755 | } |
3756 | |
3757 | /* Return 0 if the number of 1s in X is even and 1 if the number of 1s |
3758 | is odd. */ |
3759 | inline int |
3760 | wi::parity (const wide_int_ref &x) |
3761 | { |
3762 | return popcount (x) & 1; |
3763 | } |
3764 | |
3765 | /* Extract WIDTH bits from X, starting at BITPOS. */ |
3766 | template <typename T> |
3767 | inline unsigned HOST_WIDE_INT |
3768 | wi:: (const T &x, unsigned int bitpos, unsigned int width) |
3769 | { |
3770 | unsigned precision = get_precision (x); |
3771 | if (precision < bitpos + width) |
3772 | precision = bitpos + width; |
3773 | WIDE_INT_REF_FOR (T) xi (x, precision); |
3774 | |
3775 | /* Handle this rare case after the above, so that we assert about |
3776 | bogus BITPOS values. */ |
3777 | if (width == 0) |
3778 | return 0; |
3779 | |
3780 | unsigned int start = bitpos / HOST_BITS_PER_WIDE_INT; |
3781 | unsigned int shift = bitpos % HOST_BITS_PER_WIDE_INT; |
3782 | unsigned HOST_WIDE_INT res = xi.elt (start); |
3783 | res >>= shift; |
3784 | if (shift + width > HOST_BITS_PER_WIDE_INT) |
3785 | { |
3786 | unsigned HOST_WIDE_INT upper = xi.elt (start + 1); |
3787 | res |= upper << (-shift % HOST_BITS_PER_WIDE_INT); |
3788 | } |
3789 | return zext_hwi (src: res, prec: width); |
3790 | } |
3791 | |
3792 | /* Return the minimum precision needed to store X with sign SGN. */ |
3793 | template <typename T> |
3794 | inline unsigned int |
3795 | wi::min_precision (const T &x, signop sgn) |
3796 | { |
3797 | if (sgn == SIGNED) |
3798 | return get_precision (x) - clrsb (x); |
3799 | else |
3800 | return get_precision (x) - clz (x); |
3801 | } |
3802 | |
3803 | #define SIGNED_BINARY_PREDICATE(OP, F) \ |
3804 | template <typename T1, typename T2> \ |
3805 | inline WI_SIGNED_BINARY_PREDICATE_RESULT (T1, T2) \ |
3806 | OP (const T1 &x, const T2 &y) \ |
3807 | { \ |
3808 | return wi::F (x, y); \ |
3809 | } |
3810 | |
3811 | SIGNED_BINARY_PREDICATE (operator <, lts_p) |
3812 | SIGNED_BINARY_PREDICATE (operator <=, les_p) |
3813 | SIGNED_BINARY_PREDICATE (operator >, gts_p) |
3814 | SIGNED_BINARY_PREDICATE (operator >=, ges_p) |
3815 | |
3816 | #undef SIGNED_BINARY_PREDICATE |
3817 | |
3818 | #define UNARY_OPERATOR(OP, F) \ |
3819 | template<typename T> \ |
3820 | WI_UNARY_RESULT (generic_wide_int<T>) \ |
3821 | OP (const generic_wide_int<T> &x) \ |
3822 | { \ |
3823 | return wi::F (x); \ |
3824 | } |
3825 | |
3826 | #define BINARY_PREDICATE(OP, F) \ |
3827 | template<typename T1, typename T2> \ |
3828 | WI_BINARY_PREDICATE_RESULT (T1, T2) \ |
3829 | OP (const T1 &x, const T2 &y) \ |
3830 | { \ |
3831 | return wi::F (x, y); \ |
3832 | } |
3833 | |
3834 | #define BINARY_OPERATOR(OP, F) \ |
3835 | template<typename T1, typename T2> \ |
3836 | WI_BINARY_OPERATOR_RESULT (T1, T2) \ |
3837 | OP (const T1 &x, const T2 &y) \ |
3838 | { \ |
3839 | return wi::F (x, y); \ |
3840 | } |
3841 | |
3842 | #define SHIFT_OPERATOR(OP, F) \ |
3843 | template<typename T1, typename T2> \ |
3844 | WI_BINARY_OPERATOR_RESULT (T1, T1) \ |
3845 | OP (const T1 &x, const T2 &y) \ |
3846 | { \ |
3847 | return wi::F (x, y); \ |
3848 | } |
3849 | |
3850 | UNARY_OPERATOR (operator ~, bit_not) |
3851 | UNARY_OPERATOR (operator -, neg) |
3852 | BINARY_PREDICATE (operator ==, eq_p) |
3853 | BINARY_PREDICATE (operator !=, ne_p) |
3854 | BINARY_OPERATOR (operator &, bit_and) |
3855 | BINARY_OPERATOR (operator |, bit_or) |
3856 | BINARY_OPERATOR (operator ^, bit_xor) |
3857 | BINARY_OPERATOR (operator +, add) |
3858 | BINARY_OPERATOR (operator -, sub) |
3859 | BINARY_OPERATOR (operator *, mul) |
3860 | SHIFT_OPERATOR (operator <<, lshift) |
3861 | |
3862 | #undef UNARY_OPERATOR |
3863 | #undef BINARY_PREDICATE |
3864 | #undef BINARY_OPERATOR |
3865 | #undef SHIFT_OPERATOR |
3866 | |
3867 | template <typename T1, typename T2> |
3868 | inline WI_SIGNED_SHIFT_RESULT (T1, T2) |
3869 | operator >> (const T1 &x, const T2 &y) |
3870 | { |
3871 | return wi::arshift (x, y); |
3872 | } |
3873 | |
3874 | template <typename T1, typename T2> |
3875 | inline WI_SIGNED_SHIFT_RESULT (T1, T2) |
3876 | operator / (const T1 &x, const T2 &y) |
3877 | { |
3878 | return wi::sdiv_trunc (x, y); |
3879 | } |
3880 | |
3881 | template <typename T1, typename T2> |
3882 | inline WI_SIGNED_SHIFT_RESULT (T1, T2) |
3883 | operator % (const T1 &x, const T2 &y) |
3884 | { |
3885 | return wi::smod_trunc (x, y); |
3886 | } |
3887 | |
3888 | void gt_ggc_mx (generic_wide_int <wide_int_storage> *) = delete; |
3889 | void gt_pch_nx (generic_wide_int <wide_int_storage> *) = delete; |
3890 | void gt_pch_nx (generic_wide_int <wide_int_storage> *, |
3891 | gt_pointer_operator, void *) = delete; |
3892 | |
3893 | template<int N> |
3894 | void |
3895 | gt_ggc_mx (generic_wide_int <fixed_wide_int_storage <N> > *) |
3896 | { |
3897 | } |
3898 | |
3899 | template<int N> |
3900 | void |
3901 | gt_pch_nx (generic_wide_int <fixed_wide_int_storage <N> > *) |
3902 | { |
3903 | } |
3904 | |
3905 | template<int N> |
3906 | void |
3907 | gt_pch_nx (generic_wide_int <fixed_wide_int_storage <N> > *, |
3908 | gt_pointer_operator, void *) |
3909 | { |
3910 | } |
3911 | |
3912 | template<int N> |
3913 | void gt_ggc_mx (generic_wide_int <widest_int_storage <N> > *) = delete; |
3914 | |
3915 | template<int N> |
3916 | void gt_pch_nx (generic_wide_int <widest_int_storage <N> > *) = delete; |
3917 | |
3918 | template<int N> |
3919 | void gt_pch_nx (generic_wide_int <widest_int_storage <N> > *, |
3920 | gt_pointer_operator, void *) = delete; |
3921 | |
3922 | template<int N> |
3923 | void |
3924 | gt_ggc_mx (trailing_wide_ints <N> *) |
3925 | { |
3926 | } |
3927 | |
3928 | template<int N> |
3929 | void |
3930 | gt_pch_nx (trailing_wide_ints <N> *) |
3931 | { |
3932 | } |
3933 | |
3934 | template<int N> |
3935 | void |
3936 | gt_pch_nx (trailing_wide_ints <N> *, gt_pointer_operator, void *) |
3937 | { |
3938 | } |
3939 | |
3940 | namespace wi |
3941 | { |
3942 | /* Used for overloaded functions in which the only other acceptable |
3943 | scalar type is a pointer. It stops a plain 0 from being treated |
3944 | as a null pointer. */ |
3945 | struct never_used1 {}; |
3946 | struct never_used2 {}; |
3947 | |
3948 | wide_int min_value (unsigned int, signop); |
3949 | wide_int min_value (never_used1 *); |
3950 | wide_int min_value (never_used2 *); |
3951 | wide_int max_value (unsigned int, signop); |
3952 | wide_int max_value (never_used1 *); |
3953 | wide_int max_value (never_used2 *); |
3954 | |
3955 | /* FIXME: this is target dependent, so should be elsewhere. |
3956 | It also seems to assume that CHAR_BIT == BITS_PER_UNIT. */ |
3957 | wide_int from_buffer (const unsigned char *, unsigned int); |
3958 | |
3959 | #ifndef GENERATOR_FILE |
3960 | void to_mpz (const wide_int_ref &, mpz_t, signop); |
3961 | #endif |
3962 | |
3963 | wide_int mask (unsigned int, bool, unsigned int); |
3964 | wide_int shifted_mask (unsigned int, unsigned int, bool, unsigned int); |
3965 | wide_int set_bit_in_zero (unsigned int, unsigned int); |
3966 | wide_int insert (const wide_int &x, const wide_int &y, unsigned int, |
3967 | unsigned int); |
3968 | wide_int round_down_for_mask (const wide_int &, const wide_int &); |
3969 | wide_int round_up_for_mask (const wide_int &, const wide_int &); |
3970 | |
3971 | wide_int mod_inv (const wide_int &a, const wide_int &b); |
3972 | |
3973 | template <typename T> |
3974 | T mask (unsigned int, bool); |
3975 | |
3976 | template <typename T> |
3977 | T shifted_mask (unsigned int, unsigned int, bool); |
3978 | |
3979 | template <typename T> |
3980 | T set_bit_in_zero (unsigned int); |
3981 | |
3982 | unsigned int mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int); |
3983 | unsigned int shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int, |
3984 | bool, unsigned int); |
3985 | unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
3986 | unsigned int, unsigned int, bool); |
3987 | } |
3988 | |
3989 | /* Return a PRECISION-bit integer in which the low WIDTH bits are set |
3990 | and the other bits are clear, or the inverse if NEGATE_P. */ |
3991 | inline wide_int |
3992 | wi::mask (unsigned int width, bool negate_p, unsigned int precision) |
3993 | { |
3994 | wide_int result = wide_int::create (precision); |
3995 | result.set_len (l: mask (result.write_val (0), width, negate_p, precision)); |
3996 | return result; |
3997 | } |
3998 | |
3999 | /* Return a PRECISION-bit integer in which the low START bits are clear, |
4000 | the next WIDTH bits are set, and the other bits are clear, |
4001 | or the inverse if NEGATE_P. */ |
4002 | inline wide_int |
4003 | wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p, |
4004 | unsigned int precision) |
4005 | { |
4006 | wide_int result = wide_int::create (precision); |
4007 | result.set_len (l: shifted_mask (result.write_val (0), start, width, negate_p, |
4008 | precision)); |
4009 | return result; |
4010 | } |
4011 | |
4012 | /* Return a PRECISION-bit integer in which bit BIT is set and all the |
4013 | others are clear. */ |
4014 | inline wide_int |
4015 | wi::set_bit_in_zero (unsigned int bit, unsigned int precision) |
4016 | { |
4017 | return shifted_mask (start: bit, width: 1, negate_p: false, precision); |
4018 | } |
4019 | |
4020 | /* Return an integer of type T in which the low WIDTH bits are set |
4021 | and the other bits are clear, or the inverse if NEGATE_P. */ |
4022 | template <typename T> |
4023 | inline T |
4024 | wi::mask (unsigned int width, bool negate_p) |
4025 | { |
4026 | STATIC_ASSERT (wi::int_traits<T>::precision); |
4027 | T result; |
4028 | result.set_len (mask (result.write_val (width / HOST_BITS_PER_WIDE_INT + 1), |
4029 | width, negate_p, wi::int_traits <T>::precision)); |
4030 | return result; |
4031 | } |
4032 | |
4033 | /* Return an integer of type T in which the low START bits are clear, |
4034 | the next WIDTH bits are set, and the other bits are clear, or the |
4035 | inverse if NEGATE_P. */ |
4036 | template <typename T> |
4037 | inline T |
4038 | wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p) |
4039 | { |
4040 | STATIC_ASSERT (wi::int_traits<T>::precision); |
4041 | T result; |
4042 | unsigned int prec = wi::int_traits <T>::precision; |
4043 | unsigned int est_len |
4044 | = result.needs_write_val_arg |
4045 | ? ((start + (width > prec - start ? prec - start : width)) |
4046 | / HOST_BITS_PER_WIDE_INT + 1) : 0; |
4047 | result.set_len (shifted_mask (result.write_val (est_len), start, width, |
4048 | negate_p, prec)); |
4049 | return result; |
4050 | } |
4051 | |
4052 | /* Return an integer of type T in which bit BIT is set and all the |
4053 | others are clear. */ |
4054 | template <typename T> |
4055 | inline T |
4056 | wi::set_bit_in_zero (unsigned int bit) |
4057 | { |
4058 | return shifted_mask <T> (bit, 1, false); |
4059 | } |
4060 | |
4061 | /* Accumulate a set of overflows into OVERFLOW. */ |
4062 | |
4063 | inline void |
4064 | wi::accumulate_overflow (wi::overflow_type &overflow, |
4065 | wi::overflow_type suboverflow) |
4066 | { |
4067 | if (!suboverflow) |
4068 | return; |
4069 | if (!overflow) |
4070 | overflow = suboverflow; |
4071 | else if (overflow != suboverflow) |
4072 | overflow = wi::OVF_UNKNOWN; |
4073 | } |
4074 | |
4075 | #endif /* WIDE_INT_H */ |
4076 | |