1 | /* Operations with very long integers. -*- C++ -*- |
2 | Copyright (C) 2012-2017 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the |
8 | Free Software Foundation; either version 3, or (at your option) any |
9 | later version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | #ifndef WIDE_INT_H |
21 | #define WIDE_INT_H |
22 | |
23 | /* wide-int.[cc|h] implements a class that efficiently performs |
24 | mathematical operations on finite precision integers. wide_ints |
25 | are designed to be transient - they are not for long term storage |
26 | of values. There is tight integration between wide_ints and the |
27 | other longer storage GCC representations (rtl and tree). |
28 | |
29 | The actual precision of a wide_int depends on the flavor. There |
30 | are three predefined flavors: |
31 | |
32 | 1) wide_int (the default). This flavor does the math in the |
33 | precision of its input arguments. It is assumed (and checked) |
34 | that the precisions of the operands and results are consistent. |
35 | This is the most efficient flavor. It is not possible to examine |
36 | bits above the precision that has been specified. Because of |
37 | this, the default flavor has semantics that are simple to |
38 | understand and in general model the underlying hardware that the |
39 | compiler is targetted for. |
40 | |
41 | This flavor must be used at the RTL level of gcc because there |
42 | is, in general, not enough information in the RTL representation |
43 | to extend a value beyond the precision specified in the mode. |
44 | |
45 | This flavor should also be used at the TREE and GIMPLE levels of |
46 | the compiler except for the circumstances described in the |
47 | descriptions of the other two flavors. |
48 | |
49 | The default wide_int representation does not contain any |
50 | information inherent about signedness of the represented value, |
51 | so it can be used to represent both signed and unsigned numbers. |
52 | For operations where the results depend on signedness (full width |
53 | multiply, division, shifts, comparisons, and operations that need |
54 | overflow detected), the signedness must be specified separately. |
55 | |
56 | 2) offset_int. This is a fixed-precision integer that can hold |
57 | any address offset, measured in either bits or bytes, with at |
58 | least one extra sign bit. At the moment the maximum address |
59 | size GCC supports is 64 bits. With 8-bit bytes and an extra |
60 | sign bit, offset_int therefore needs to have at least 68 bits |
61 | of precision. We round this up to 128 bits for efficiency. |
62 | Values of type T are converted to this precision by sign- or |
63 | zero-extending them based on the signedness of T. |
64 | |
65 | The extra sign bit means that offset_int is effectively a signed |
66 | 128-bit integer, i.e. it behaves like int128_t. |
67 | |
68 | Since the values are logically signed, there is no need to |
69 | distinguish between signed and unsigned operations. Sign-sensitive |
70 | comparison operators <, <=, > and >= are therefore supported. |
71 | Shift operators << and >> are also supported, with >> being |
72 | an _arithmetic_ right shift. |
73 | |
74 | [ Note that, even though offset_int is effectively int128_t, |
75 | it can still be useful to use unsigned comparisons like |
76 | wi::leu_p (a, b) as a more efficient short-hand for |
77 | "a >= 0 && a <= b". ] |
78 | |
79 | 3) widest_int. This representation is an approximation of |
80 | infinite precision math. However, it is not really infinite |
81 | precision math as in the GMP library. It is really finite |
82 | precision math where the precision is 4 times the size of the |
83 | largest integer that the target port can represent. |
84 | |
85 | Like offset_int, widest_int is wider than all the values that |
86 | it needs to represent, so the integers are logically signed. |
87 | Sign-sensitive comparison operators <, <=, > and >= are supported, |
88 | as are << and >>. |
89 | |
90 | There are several places in the GCC where this should/must be used: |
91 | |
92 | * Code that does induction variable optimizations. This code |
93 | works with induction variables of many different types at the |
94 | same time. Because of this, it ends up doing many different |
95 | calculations where the operands are not compatible types. The |
96 | widest_int makes this easy, because it provides a field where |
97 | nothing is lost when converting from any variable, |
98 | |
99 | * There are a small number of passes that currently use the |
100 | widest_int that should use the default. These should be |
101 | changed. |
102 | |
103 | There are surprising features of offset_int and widest_int |
104 | that the users should be careful about: |
105 | |
106 | 1) Shifts and rotations are just weird. You have to specify a |
107 | precision in which the shift or rotate is to happen in. The bits |
108 | above this precision are zeroed. While this is what you |
109 | want, it is clearly non obvious. |
110 | |
111 | 2) Larger precision math sometimes does not produce the same |
112 | answer as would be expected for doing the math at the proper |
113 | precision. In particular, a multiply followed by a divide will |
114 | produce a different answer if the first product is larger than |
115 | what can be represented in the input precision. |
116 | |
117 | The offset_int and the widest_int flavors are more expensive |
118 | than the default wide int, so in addition to the caveats with these |
119 | two, the default is the prefered representation. |
120 | |
121 | All three flavors of wide_int are represented as a vector of |
122 | HOST_WIDE_INTs. The default and widest_int vectors contain enough elements |
123 | to hold a value of MAX_BITSIZE_MODE_ANY_INT bits. offset_int contains only |
124 | enough elements to hold ADDR_MAX_PRECISION bits. The values are stored |
125 | in the vector with the least significant HOST_BITS_PER_WIDE_INT bits |
126 | in element 0. |
127 | |
128 | The default wide_int contains three fields: the vector (VAL), |
129 | the precision and a length (LEN). The length is the number of HWIs |
130 | needed to represent the value. widest_int and offset_int have a |
131 | constant precision that cannot be changed, so they only store the |
132 | VAL and LEN fields. |
133 | |
134 | Since most integers used in a compiler are small values, it is |
135 | generally profitable to use a representation of the value that is |
136 | as small as possible. LEN is used to indicate the number of |
137 | elements of the vector that are in use. The numbers are stored as |
138 | sign extended numbers as a means of compression. Leading |
139 | HOST_WIDE_INTs that contain strings of either -1 or 0 are removed |
140 | as long as they can be reconstructed from the top bit that is being |
141 | represented. |
142 | |
143 | The precision and length of a wide_int are always greater than 0. |
144 | Any bits in a wide_int above the precision are sign-extended from the |
145 | most significant bit. For example, a 4-bit value 0x8 is represented as |
146 | VAL = { 0xf...fff8 }. However, as an optimization, we allow other integer |
147 | constants to be represented with undefined bits above the precision. |
148 | This allows INTEGER_CSTs to be pre-extended according to TYPE_SIGN, |
149 | so that the INTEGER_CST representation can be used both in TYPE_PRECISION |
150 | and in wider precisions. |
151 | |
152 | There are constructors to create the various forms of wide_int from |
153 | trees, rtl and constants. For trees the options are: |
154 | |
155 | tree t = ...; |
156 | wi::to_wide (t) // Treat T as a wide_int |
157 | wi::to_offset (t) // Treat T as an offset_int |
158 | wi::to_widest (t) // Treat T as a widest_int |
159 | |
160 | All three are light-weight accessors that should have no overhead |
161 | in release builds. If it is useful for readability reasons to |
162 | store the result in a temporary variable, the preferred method is: |
163 | |
164 | wi::tree_to_wide_ref twide = wi::to_wide (t); |
165 | wi::tree_to_offset_ref toffset = wi::to_offset (t); |
166 | wi::tree_to_widest_ref twidest = wi::to_widest (t); |
167 | |
168 | To make an rtx into a wide_int, you have to pair it with a mode. |
169 | The canonical way to do this is with rtx_mode_t as in: |
170 | |
171 | rtx r = ... |
172 | wide_int x = rtx_mode_t (r, mode); |
173 | |
174 | Similarly, a wide_int can only be constructed from a host value if |
175 | the target precision is given explicitly, such as in: |
176 | |
177 | wide_int x = wi::shwi (c, prec); // sign-extend C if necessary |
178 | wide_int y = wi::uhwi (c, prec); // zero-extend C if necessary |
179 | |
180 | However, offset_int and widest_int have an inherent precision and so |
181 | can be initialized directly from a host value: |
182 | |
183 | offset_int x = (int) c; // sign-extend C |
184 | widest_int x = (unsigned int) c; // zero-extend C |
185 | |
186 | It is also possible to do arithmetic directly on rtx_mode_ts and |
187 | constants. For example: |
188 | |
189 | wi::add (r1, r2); // add equal-sized rtx_mode_ts r1 and r2 |
190 | wi::add (r1, 1); // add 1 to rtx_mode_t r1 |
191 | wi::lshift (1, 100); // 1 << 100 as a widest_int |
192 | |
193 | Many binary operations place restrictions on the combinations of inputs, |
194 | using the following rules: |
195 | |
196 | - {rtx, wide_int} op {rtx, wide_int} -> wide_int |
197 | The inputs must be the same precision. The result is a wide_int |
198 | of the same precision |
199 | |
200 | - {rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int |
201 | (un)signed HOST_WIDE_INT op {rtx, wide_int} -> wide_int |
202 | The HOST_WIDE_INT is extended or truncated to the precision of |
203 | the other input. The result is a wide_int of the same precision |
204 | as that input. |
205 | |
206 | - (un)signed HOST_WIDE_INT op (un)signed HOST_WIDE_INT -> widest_int |
207 | The inputs are extended to widest_int precision and produce a |
208 | widest_int result. |
209 | |
210 | - offset_int op offset_int -> offset_int |
211 | offset_int op (un)signed HOST_WIDE_INT -> offset_int |
212 | (un)signed HOST_WIDE_INT op offset_int -> offset_int |
213 | |
214 | - widest_int op widest_int -> widest_int |
215 | widest_int op (un)signed HOST_WIDE_INT -> widest_int |
216 | (un)signed HOST_WIDE_INT op widest_int -> widest_int |
217 | |
218 | Other combinations like: |
219 | |
220 | - widest_int op offset_int and |
221 | - wide_int op offset_int |
222 | |
223 | are not allowed. The inputs should instead be extended or truncated |
224 | so that they match. |
225 | |
226 | The inputs to comparison functions like wi::eq_p and wi::lts_p |
227 | follow the same compatibility rules, although their return types |
228 | are different. Unary functions on X produce the same result as |
229 | a binary operation X + X. Shift functions X op Y also produce |
230 | the same result as X + X; the precision of the shift amount Y |
231 | can be arbitrarily different from X. */ |
232 | |
233 | /* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very |
234 | early examination of the target's mode file. The WIDE_INT_MAX_ELTS |
235 | can accomodate at least 1 more bit so that unsigned numbers of that |
236 | mode can be represented as a signed value. Note that it is still |
237 | possible to create fixed_wide_ints that have precisions greater than |
238 | MAX_BITSIZE_MODE_ANY_INT. This can be useful when representing a |
239 | double-width multiplication result, for example. */ |
240 | #define WIDE_INT_MAX_ELTS \ |
241 | ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) / HOST_BITS_PER_WIDE_INT) |
242 | |
243 | #define WIDE_INT_MAX_PRECISION (WIDE_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT) |
244 | |
245 | /* This is the max size of any pointer on any machine. It does not |
246 | seem to be as easy to sniff this out of the machine description as |
247 | it is for MAX_BITSIZE_MODE_ANY_INT since targets may support |
248 | multiple address sizes and may have different address sizes for |
249 | different address spaces. However, currently the largest pointer |
250 | on any platform is 64 bits. When that changes, then it is likely |
251 | that a target hook should be defined so that targets can make this |
252 | value larger for those targets. */ |
253 | #define ADDR_MAX_BITSIZE 64 |
254 | |
255 | /* This is the internal precision used when doing any address |
256 | arithmetic. The '4' is really 3 + 1. Three of the bits are for |
257 | the number of extra bits needed to do bit addresses and the other bit |
258 | is to allow everything to be signed without loosing any precision. |
259 | Then everything is rounded up to the next HWI for efficiency. */ |
260 | #define ADDR_MAX_PRECISION \ |
261 | ((ADDR_MAX_BITSIZE + 4 + HOST_BITS_PER_WIDE_INT - 1) \ |
262 | & ~(HOST_BITS_PER_WIDE_INT - 1)) |
263 | |
264 | /* The number of HWIs needed to store an offset_int. */ |
265 | #define OFFSET_INT_ELTS (ADDR_MAX_PRECISION / HOST_BITS_PER_WIDE_INT) |
266 | |
267 | /* The type of result produced by a binary operation on types T1 and T2. |
268 | Defined purely for brevity. */ |
269 | #define WI_BINARY_RESULT(T1, T2) \ |
270 | typename wi::binary_traits <T1, T2>::result_type |
271 | |
272 | /* Likewise for binary operators, which excludes the case in which neither |
273 | T1 nor T2 is a wide-int-based type. */ |
274 | #define WI_BINARY_OPERATOR_RESULT(T1, T2) \ |
275 | typename wi::binary_traits <T1, T2>::operator_result |
276 | |
277 | /* The type of result produced by T1 << T2. Leads to substitution failure |
278 | if the operation isn't supported. Defined purely for brevity. */ |
279 | #define WI_SIGNED_SHIFT_RESULT(T1, T2) \ |
280 | typename wi::binary_traits <T1, T2>::signed_shift_result_type |
281 | |
282 | /* The type of result produced by a sign-agnostic binary predicate on |
283 | types T1 and T2. This is bool if wide-int operations make sense for |
284 | T1 and T2 and leads to substitution failure otherwise. */ |
285 | #define WI_BINARY_PREDICATE_RESULT(T1, T2) \ |
286 | typename wi::binary_traits <T1, T2>::predicate_result |
287 | |
288 | /* The type of result produced by a signed binary predicate on types T1 and T2. |
289 | This is bool if signed comparisons make sense for T1 and T2 and leads to |
290 | substitution failure otherwise. */ |
291 | #define WI_SIGNED_BINARY_PREDICATE_RESULT(T1, T2) \ |
292 | typename wi::binary_traits <T1, T2>::signed_predicate_result |
293 | |
294 | /* The type of result produced by a unary operation on type T. */ |
295 | #define WI_UNARY_RESULT(T) \ |
296 | typename wi::unary_traits <T>::result_type |
297 | |
298 | /* Define a variable RESULT to hold the result of a binary operation on |
299 | X and Y, which have types T1 and T2 respectively. Define VAL to |
300 | point to the blocks of RESULT. Once the user of the macro has |
301 | filled in VAL, it should call RESULT.set_len to set the number |
302 | of initialized blocks. */ |
303 | #define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \ |
304 | WI_BINARY_RESULT (T1, T2) RESULT = \ |
305 | wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \ |
306 | HOST_WIDE_INT *VAL = RESULT.write_val () |
307 | |
308 | /* Similar for the result of a unary operation on X, which has type T. */ |
309 | #define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \ |
310 | WI_UNARY_RESULT (T) RESULT = \ |
311 | wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \ |
312 | HOST_WIDE_INT *VAL = RESULT.write_val () |
313 | |
314 | template <typename T> class generic_wide_int; |
315 | template <int N> class fixed_wide_int_storage; |
316 | class wide_int_storage; |
317 | |
318 | /* An N-bit integer. Until we can use typedef templates, use this instead. */ |
319 | #define FIXED_WIDE_INT(N) \ |
320 | generic_wide_int < fixed_wide_int_storage <N> > |
321 | |
322 | typedef generic_wide_int <wide_int_storage> wide_int; |
323 | typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int; |
324 | typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION) widest_int; |
325 | |
326 | /* wi::storage_ref can be a reference to a primitive type, |
327 | so this is the conservatively-correct setting. */ |
328 | template <bool SE, bool HDP = true> |
329 | struct wide_int_ref_storage; |
330 | |
331 | typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref; |
332 | |
333 | /* This can be used instead of wide_int_ref if the referenced value is |
334 | known to have type T. It carries across properties of T's representation, |
335 | such as whether excess upper bits in a HWI are defined, and can therefore |
336 | help avoid redundant work. |
337 | |
338 | The macro could be replaced with a template typedef, once we're able |
339 | to use those. */ |
340 | #define WIDE_INT_REF_FOR(T) \ |
341 | generic_wide_int \ |
342 | <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended, \ |
343 | wi::int_traits <T>::host_dependent_precision> > |
344 | |
345 | namespace wi |
346 | { |
347 | /* Classifies an integer based on its precision. */ |
348 | enum precision_type { |
349 | /* The integer has both a precision and defined signedness. This allows |
350 | the integer to be converted to any width, since we know whether to fill |
351 | any extra bits with zeros or signs. */ |
352 | FLEXIBLE_PRECISION, |
353 | |
354 | /* The integer has a variable precision but no defined signedness. */ |
355 | VAR_PRECISION, |
356 | |
357 | /* The integer has a constant precision (known at GCC compile time) |
358 | and is signed. */ |
359 | CONST_PRECISION |
360 | }; |
361 | |
362 | /* This class, which has no default implementation, is expected to |
363 | provide the following members: |
364 | |
365 | static const enum precision_type precision_type; |
366 | Classifies the type of T. |
367 | |
368 | static const unsigned int precision; |
369 | Only defined if precision_type == CONST_PRECISION. Specifies the |
370 | precision of all integers of type T. |
371 | |
372 | static const bool host_dependent_precision; |
373 | True if the precision of T depends (or can depend) on the host. |
374 | |
375 | static unsigned int get_precision (const T &x) |
376 | Return the number of bits in X. |
377 | |
378 | static wi::storage_ref *decompose (HOST_WIDE_INT *scratch, |
379 | unsigned int precision, const T &x) |
380 | Decompose X as a PRECISION-bit integer, returning the associated |
381 | wi::storage_ref. SCRATCH is available as scratch space if needed. |
382 | The routine should assert that PRECISION is acceptable. */ |
383 | template <typename T> struct int_traits; |
384 | |
385 | /* This class provides a single type, result_type, which specifies the |
386 | type of integer produced by a binary operation whose inputs have |
387 | types T1 and T2. The definition should be symmetric. */ |
388 | template <typename T1, typename T2, |
389 | enum precision_type P1 = int_traits <T1>::precision_type, |
390 | enum precision_type P2 = int_traits <T2>::precision_type> |
391 | struct binary_traits; |
392 | |
393 | /* The result of a unary operation on T is the same as the result of |
394 | a binary operation on two values of type T. */ |
395 | template <typename T> |
396 | struct unary_traits : public binary_traits <T, T> {}; |
397 | |
398 | /* Specify the result type for each supported combination of binary |
399 | inputs. Note that CONST_PRECISION and VAR_PRECISION cannot be |
400 | mixed, in order to give stronger type checking. When both inputs |
401 | are CONST_PRECISION, they must have the same precision. */ |
402 | template <typename T1, typename T2> |
403 | struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION> |
404 | { |
405 | typedef widest_int result_type; |
406 | /* Don't define operators for this combination. */ |
407 | }; |
408 | |
409 | template <typename T1, typename T2> |
410 | struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION> |
411 | { |
412 | typedef wide_int result_type; |
413 | typedef result_type operator_result; |
414 | typedef bool predicate_result; |
415 | }; |
416 | |
417 | template <typename T1, typename T2> |
418 | struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION> |
419 | { |
420 | /* Spelled out explicitly (rather than through FIXED_WIDE_INT) |
421 | so as not to confuse gengtype. */ |
422 | typedef generic_wide_int < fixed_wide_int_storage |
423 | <int_traits <T2>::precision> > result_type; |
424 | typedef result_type operator_result; |
425 | typedef bool predicate_result; |
426 | typedef bool signed_predicate_result; |
427 | }; |
428 | |
429 | template <typename T1, typename T2> |
430 | struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION> |
431 | { |
432 | typedef wide_int result_type; |
433 | typedef result_type operator_result; |
434 | typedef bool predicate_result; |
435 | }; |
436 | |
437 | template <typename T1, typename T2> |
438 | struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION> |
439 | { |
440 | /* Spelled out explicitly (rather than through FIXED_WIDE_INT) |
441 | so as not to confuse gengtype. */ |
442 | typedef generic_wide_int < fixed_wide_int_storage |
443 | <int_traits <T1>::precision> > result_type; |
444 | typedef result_type operator_result; |
445 | typedef bool predicate_result; |
446 | typedef result_type signed_shift_result_type; |
447 | typedef bool signed_predicate_result; |
448 | }; |
449 | |
450 | template <typename T1, typename T2> |
451 | struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION> |
452 | { |
453 | STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision); |
454 | /* Spelled out explicitly (rather than through FIXED_WIDE_INT) |
455 | so as not to confuse gengtype. */ |
456 | typedef generic_wide_int < fixed_wide_int_storage |
457 | <int_traits <T1>::precision> > result_type; |
458 | typedef result_type operator_result; |
459 | typedef bool predicate_result; |
460 | typedef result_type signed_shift_result_type; |
461 | typedef bool signed_predicate_result; |
462 | }; |
463 | |
464 | template <typename T1, typename T2> |
465 | struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION> |
466 | { |
467 | typedef wide_int result_type; |
468 | typedef result_type operator_result; |
469 | typedef bool predicate_result; |
470 | }; |
471 | } |
472 | |
473 | /* Public functions for querying and operating on integers. */ |
474 | namespace wi |
475 | { |
476 | template <typename T> |
477 | unsigned int get_precision (const T &); |
478 | |
479 | template <typename T1, typename T2> |
480 | unsigned int get_binary_precision (const T1 &, const T2 &); |
481 | |
482 | template <typename T1, typename T2> |
483 | void copy (T1 &, const T2 &); |
484 | |
485 | #define UNARY_PREDICATE \ |
486 | template <typename T> bool |
487 | #define UNARY_FUNCTION \ |
488 | template <typename T> WI_UNARY_RESULT (T) |
489 | #define BINARY_PREDICATE \ |
490 | template <typename T1, typename T2> bool |
491 | #define BINARY_FUNCTION \ |
492 | template <typename T1, typename T2> WI_BINARY_RESULT (T1, T2) |
493 | #define SHIFT_FUNCTION \ |
494 | template <typename T1, typename T2> WI_UNARY_RESULT (T1) |
495 | |
496 | UNARY_PREDICATE fits_shwi_p (const T &); |
497 | UNARY_PREDICATE fits_uhwi_p (const T &); |
498 | UNARY_PREDICATE neg_p (const T &, signop = SIGNED); |
499 | |
500 | template <typename T> |
501 | HOST_WIDE_INT sign_mask (const T &); |
502 | |
503 | BINARY_PREDICATE eq_p (const T1 &, const T2 &); |
504 | BINARY_PREDICATE ne_p (const T1 &, const T2 &); |
505 | BINARY_PREDICATE lt_p (const T1 &, const T2 &, signop); |
506 | BINARY_PREDICATE lts_p (const T1 &, const T2 &); |
507 | BINARY_PREDICATE ltu_p (const T1 &, const T2 &); |
508 | BINARY_PREDICATE le_p (const T1 &, const T2 &, signop); |
509 | BINARY_PREDICATE les_p (const T1 &, const T2 &); |
510 | BINARY_PREDICATE leu_p (const T1 &, const T2 &); |
511 | BINARY_PREDICATE gt_p (const T1 &, const T2 &, signop); |
512 | BINARY_PREDICATE gts_p (const T1 &, const T2 &); |
513 | BINARY_PREDICATE gtu_p (const T1 &, const T2 &); |
514 | BINARY_PREDICATE ge_p (const T1 &, const T2 &, signop); |
515 | BINARY_PREDICATE ges_p (const T1 &, const T2 &); |
516 | BINARY_PREDICATE geu_p (const T1 &, const T2 &); |
517 | |
518 | template <typename T1, typename T2> |
519 | int cmp (const T1 &, const T2 &, signop); |
520 | |
521 | template <typename T1, typename T2> |
522 | int cmps (const T1 &, const T2 &); |
523 | |
524 | template <typename T1, typename T2> |
525 | int cmpu (const T1 &, const T2 &); |
526 | |
527 | UNARY_FUNCTION bit_not (const T &); |
528 | UNARY_FUNCTION neg (const T &); |
529 | UNARY_FUNCTION neg (const T &, bool *); |
530 | UNARY_FUNCTION abs (const T &); |
531 | UNARY_FUNCTION ext (const T &, unsigned int, signop); |
532 | UNARY_FUNCTION sext (const T &, unsigned int); |
533 | UNARY_FUNCTION zext (const T &, unsigned int); |
534 | UNARY_FUNCTION set_bit (const T &, unsigned int); |
535 | |
536 | BINARY_FUNCTION min (const T1 &, const T2 &, signop); |
537 | BINARY_FUNCTION smin (const T1 &, const T2 &); |
538 | BINARY_FUNCTION umin (const T1 &, const T2 &); |
539 | BINARY_FUNCTION max (const T1 &, const T2 &, signop); |
540 | BINARY_FUNCTION smax (const T1 &, const T2 &); |
541 | BINARY_FUNCTION umax (const T1 &, const T2 &); |
542 | |
543 | BINARY_FUNCTION bit_and (const T1 &, const T2 &); |
544 | BINARY_FUNCTION bit_and_not (const T1 &, const T2 &); |
545 | BINARY_FUNCTION bit_or (const T1 &, const T2 &); |
546 | BINARY_FUNCTION bit_or_not (const T1 &, const T2 &); |
547 | BINARY_FUNCTION bit_xor (const T1 &, const T2 &); |
548 | BINARY_FUNCTION add (const T1 &, const T2 &); |
549 | BINARY_FUNCTION add (const T1 &, const T2 &, signop, bool *); |
550 | BINARY_FUNCTION sub (const T1 &, const T2 &); |
551 | BINARY_FUNCTION sub (const T1 &, const T2 &, signop, bool *); |
552 | BINARY_FUNCTION mul (const T1 &, const T2 &); |
553 | BINARY_FUNCTION mul (const T1 &, const T2 &, signop, bool *); |
554 | BINARY_FUNCTION smul (const T1 &, const T2 &, bool *); |
555 | BINARY_FUNCTION umul (const T1 &, const T2 &, bool *); |
556 | BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop); |
557 | BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, bool * = 0); |
558 | BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &); |
559 | BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &); |
560 | BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, bool * = 0); |
561 | BINARY_FUNCTION udiv_floor (const T1 &, const T2 &); |
562 | BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &); |
563 | BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, bool * = 0); |
564 | BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, bool * = 0); |
565 | BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop, |
566 | WI_BINARY_RESULT (T1, T2) *); |
567 | BINARY_FUNCTION gcd (const T1 &, const T2 &, signop = UNSIGNED); |
568 | BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, bool * = 0); |
569 | BINARY_FUNCTION smod_trunc (const T1 &, const T2 &); |
570 | BINARY_FUNCTION umod_trunc (const T1 &, const T2 &); |
571 | BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, bool * = 0); |
572 | BINARY_FUNCTION umod_floor (const T1 &, const T2 &); |
573 | BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, bool * = 0); |
574 | BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, bool * = 0); |
575 | |
576 | template <typename T1, typename T2> |
577 | bool multiple_of_p (const T1 &, const T2 &, signop); |
578 | |
579 | template <typename T1, typename T2> |
580 | bool multiple_of_p (const T1 &, const T2 &, signop, |
581 | WI_BINARY_RESULT (T1, T2) *); |
582 | |
583 | SHIFT_FUNCTION lshift (const T1 &, const T2 &); |
584 | SHIFT_FUNCTION lrshift (const T1 &, const T2 &); |
585 | SHIFT_FUNCTION arshift (const T1 &, const T2 &); |
586 | SHIFT_FUNCTION rshift (const T1 &, const T2 &, signop sgn); |
587 | SHIFT_FUNCTION lrotate (const T1 &, const T2 &, unsigned int = 0); |
588 | SHIFT_FUNCTION rrotate (const T1 &, const T2 &, unsigned int = 0); |
589 | |
590 | #undef SHIFT_FUNCTION |
591 | #undef BINARY_PREDICATE |
592 | #undef BINARY_FUNCTION |
593 | #undef UNARY_PREDICATE |
594 | #undef UNARY_FUNCTION |
595 | |
596 | bool only_sign_bit_p (const wide_int_ref &, unsigned int); |
597 | bool only_sign_bit_p (const wide_int_ref &); |
598 | int clz (const wide_int_ref &); |
599 | int clrsb (const wide_int_ref &); |
600 | int ctz (const wide_int_ref &); |
601 | int exact_log2 (const wide_int_ref &); |
602 | int floor_log2 (const wide_int_ref &); |
603 | int ffs (const wide_int_ref &); |
604 | int popcount (const wide_int_ref &); |
605 | int parity (const wide_int_ref &); |
606 | |
607 | template <typename T> |
608 | unsigned HOST_WIDE_INT extract_uhwi (const T &, unsigned int, unsigned int); |
609 | |
610 | template <typename T> |
611 | unsigned int min_precision (const T &, signop); |
612 | } |
613 | |
614 | namespace wi |
615 | { |
616 | /* Contains the components of a decomposed integer for easy, direct |
617 | access. */ |
618 | struct storage_ref |
619 | { |
620 | storage_ref (const HOST_WIDE_INT *, unsigned int, unsigned int); |
621 | |
622 | const HOST_WIDE_INT *val; |
623 | unsigned int len; |
624 | unsigned int precision; |
625 | |
626 | /* Provide enough trappings for this class to act as storage for |
627 | generic_wide_int. */ |
628 | unsigned int get_len () const; |
629 | unsigned int get_precision () const; |
630 | const HOST_WIDE_INT *get_val () const; |
631 | }; |
632 | } |
633 | |
634 | inline::wi::storage_ref::storage_ref (const HOST_WIDE_INT *val_in, |
635 | unsigned int len_in, |
636 | unsigned int precision_in) |
637 | : val (val_in), len (len_in), precision (precision_in) |
638 | { |
639 | } |
640 | |
641 | inline unsigned int |
642 | wi::storage_ref::get_len () const |
643 | { |
644 | return len; |
645 | } |
646 | |
647 | inline unsigned int |
648 | wi::storage_ref::get_precision () const |
649 | { |
650 | return precision; |
651 | } |
652 | |
653 | inline const HOST_WIDE_INT * |
654 | wi::storage_ref::get_val () const |
655 | { |
656 | return val; |
657 | } |
658 | |
659 | /* This class defines an integer type using the storage provided by the |
660 | template argument. The storage class must provide the following |
661 | functions: |
662 | |
663 | unsigned int get_precision () const |
664 | Return the number of bits in the integer. |
665 | |
666 | HOST_WIDE_INT *get_val () const |
667 | Return a pointer to the array of blocks that encodes the integer. |
668 | |
669 | unsigned int get_len () const |
670 | Return the number of blocks in get_val (). If this is smaller |
671 | than the number of blocks implied by get_precision (), the |
672 | remaining blocks are sign extensions of block get_len () - 1. |
673 | |
674 | Although not required by generic_wide_int itself, writable storage |
675 | classes can also provide the following functions: |
676 | |
677 | HOST_WIDE_INT *write_val () |
678 | Get a modifiable version of get_val () |
679 | |
680 | unsigned int set_len (unsigned int len) |
681 | Set the value returned by get_len () to LEN. */ |
682 | template <typename storage> |
683 | class GTY(()) generic_wide_int : public storage |
684 | { |
685 | public: |
686 | generic_wide_int (); |
687 | |
688 | template <typename T> |
689 | generic_wide_int (const T &); |
690 | |
691 | template <typename T> |
692 | generic_wide_int (const T &, unsigned int); |
693 | |
694 | /* Conversions. */ |
695 | HOST_WIDE_INT to_shwi (unsigned int) const; |
696 | HOST_WIDE_INT to_shwi () const; |
697 | unsigned HOST_WIDE_INT to_uhwi (unsigned int) const; |
698 | unsigned HOST_WIDE_INT to_uhwi () const; |
699 | HOST_WIDE_INT to_short_addr () const; |
700 | |
701 | /* Public accessors for the interior of a wide int. */ |
702 | HOST_WIDE_INT sign_mask () const; |
703 | HOST_WIDE_INT elt (unsigned int) const; |
704 | unsigned HOST_WIDE_INT ulow () const; |
705 | unsigned HOST_WIDE_INT uhigh () const; |
706 | HOST_WIDE_INT slow () const; |
707 | HOST_WIDE_INT shigh () const; |
708 | |
709 | template <typename T> |
710 | generic_wide_int &operator = (const T &); |
711 | |
712 | #define ASSIGNMENT_OPERATOR(OP, F) \ |
713 | template <typename T> \ |
714 | generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); } |
715 | |
716 | /* Restrict these to cases where the shift operator is defined. */ |
717 | #define SHIFT_ASSIGNMENT_OPERATOR(OP, OP2) \ |
718 | template <typename T> \ |
719 | generic_wide_int &OP (const T &c) { return (*this = *this OP2 c); } |
720 | |
721 | #define INCDEC_OPERATOR(OP, DELTA) \ |
722 | generic_wide_int &OP () { *this += DELTA; return *this; } |
723 | |
724 | ASSIGNMENT_OPERATOR (operator &=, bit_and) |
725 | ASSIGNMENT_OPERATOR (operator |=, bit_or) |
726 | ASSIGNMENT_OPERATOR (operator ^=, bit_xor) |
727 | ASSIGNMENT_OPERATOR (operator +=, add) |
728 | ASSIGNMENT_OPERATOR (operator -=, sub) |
729 | ASSIGNMENT_OPERATOR (operator *=, mul) |
730 | SHIFT_ASSIGNMENT_OPERATOR (operator <<=, <<) |
731 | SHIFT_ASSIGNMENT_OPERATOR (operator >>=, >>) |
732 | INCDEC_OPERATOR (operator ++, 1) |
733 | INCDEC_OPERATOR (operator --, -1) |
734 | |
735 | #undef SHIFT_ASSIGNMENT_OPERATOR |
736 | #undef ASSIGNMENT_OPERATOR |
737 | #undef INCDEC_OPERATOR |
738 | |
739 | /* Debugging functions. */ |
740 | void dump () const; |
741 | |
742 | static const bool is_sign_extended |
743 | = wi::int_traits <generic_wide_int <storage> >::is_sign_extended; |
744 | }; |
745 | |
746 | template <typename storage> |
747 | inline generic_wide_int <storage>::generic_wide_int () {} |
748 | |
749 | template <typename storage> |
750 | template <typename T> |
751 | inline generic_wide_int <storage>::generic_wide_int (const T &x) |
752 | : storage (x) |
753 | { |
754 | } |
755 | |
756 | template <typename storage> |
757 | template <typename T> |
758 | inline generic_wide_int <storage>::generic_wide_int (const T &x, |
759 | unsigned int precision) |
760 | : storage (x, precision) |
761 | { |
762 | } |
763 | |
764 | /* Return THIS as a signed HOST_WIDE_INT, sign-extending from PRECISION. |
765 | If THIS does not fit in PRECISION, the information is lost. */ |
766 | template <typename storage> |
767 | inline HOST_WIDE_INT |
768 | generic_wide_int <storage>::to_shwi (unsigned int precision) const |
769 | { |
770 | if (precision < HOST_BITS_PER_WIDE_INT) |
771 | return sext_hwi (this->get_val ()[0], precision); |
772 | else |
773 | return this->get_val ()[0]; |
774 | } |
775 | |
776 | /* Return THIS as a signed HOST_WIDE_INT, in its natural precision. */ |
777 | template <typename storage> |
778 | inline HOST_WIDE_INT |
779 | generic_wide_int <storage>::to_shwi () const |
780 | { |
781 | if (is_sign_extended) |
782 | return this->get_val ()[0]; |
783 | else |
784 | return to_shwi (this->get_precision ()); |
785 | } |
786 | |
787 | /* Return THIS as an unsigned HOST_WIDE_INT, zero-extending from |
788 | PRECISION. If THIS does not fit in PRECISION, the information |
789 | is lost. */ |
790 | template <typename storage> |
791 | inline unsigned HOST_WIDE_INT |
792 | generic_wide_int <storage>::to_uhwi (unsigned int precision) const |
793 | { |
794 | if (precision < HOST_BITS_PER_WIDE_INT) |
795 | return zext_hwi (this->get_val ()[0], precision); |
796 | else |
797 | return this->get_val ()[0]; |
798 | } |
799 | |
800 | /* Return THIS as an signed HOST_WIDE_INT, in its natural precision. */ |
801 | template <typename storage> |
802 | inline unsigned HOST_WIDE_INT |
803 | generic_wide_int <storage>::to_uhwi () const |
804 | { |
805 | return to_uhwi (this->get_precision ()); |
806 | } |
807 | |
808 | /* TODO: The compiler is half converted from using HOST_WIDE_INT to |
809 | represent addresses to using offset_int to represent addresses. |
810 | We use to_short_addr at the interface from new code to old, |
811 | unconverted code. */ |
812 | template <typename storage> |
813 | inline HOST_WIDE_INT |
814 | generic_wide_int <storage>::to_short_addr () const |
815 | { |
816 | return this->get_val ()[0]; |
817 | } |
818 | |
819 | /* Return the implicit value of blocks above get_len (). */ |
820 | template <typename storage> |
821 | inline HOST_WIDE_INT |
822 | generic_wide_int <storage>::sign_mask () const |
823 | { |
824 | unsigned int len = this->get_len (); |
825 | unsigned HOST_WIDE_INT high = this->get_val ()[len - 1]; |
826 | if (!is_sign_extended) |
827 | { |
828 | unsigned int precision = this->get_precision (); |
829 | int excess = len * HOST_BITS_PER_WIDE_INT - precision; |
830 | if (excess > 0) |
831 | high <<= excess; |
832 | } |
833 | return (HOST_WIDE_INT) (high) < 0 ? -1 : 0; |
834 | } |
835 | |
836 | /* Return the signed value of the least-significant explicitly-encoded |
837 | block. */ |
838 | template <typename storage> |
839 | inline HOST_WIDE_INT |
840 | generic_wide_int <storage>::slow () const |
841 | { |
842 | return this->get_val ()[0]; |
843 | } |
844 | |
845 | /* Return the signed value of the most-significant explicitly-encoded |
846 | block. */ |
847 | template <typename storage> |
848 | inline HOST_WIDE_INT |
849 | generic_wide_int <storage>::shigh () const |
850 | { |
851 | return this->get_val ()[this->get_len () - 1]; |
852 | } |
853 | |
854 | /* Return the unsigned value of the least-significant |
855 | explicitly-encoded block. */ |
856 | template <typename storage> |
857 | inline unsigned HOST_WIDE_INT |
858 | generic_wide_int <storage>::ulow () const |
859 | { |
860 | return this->get_val ()[0]; |
861 | } |
862 | |
863 | /* Return the unsigned value of the most-significant |
864 | explicitly-encoded block. */ |
865 | template <typename storage> |
866 | inline unsigned HOST_WIDE_INT |
867 | generic_wide_int <storage>::uhigh () const |
868 | { |
869 | return this->get_val ()[this->get_len () - 1]; |
870 | } |
871 | |
872 | /* Return block I, which might be implicitly or explicit encoded. */ |
873 | template <typename storage> |
874 | inline HOST_WIDE_INT |
875 | generic_wide_int <storage>::elt (unsigned int i) const |
876 | { |
877 | if (i >= this->get_len ()) |
878 | return sign_mask (); |
879 | else |
880 | return this->get_val ()[i]; |
881 | } |
882 | |
883 | template <typename storage> |
884 | template <typename T> |
885 | inline generic_wide_int <storage> & |
886 | generic_wide_int <storage>::operator = (const T &x) |
887 | { |
888 | storage::operator = (x); |
889 | return *this; |
890 | } |
891 | |
892 | /* Dump the contents of the integer to stderr, for debugging. */ |
893 | template <typename storage> |
894 | void |
895 | generic_wide_int <storage>::dump () const |
896 | { |
897 | unsigned int len = this->get_len (); |
898 | const HOST_WIDE_INT *val = this->get_val (); |
899 | unsigned int precision = this->get_precision (); |
900 | fprintf (stderr, "[" ); |
901 | if (len * HOST_BITS_PER_WIDE_INT < precision) |
902 | fprintf (stderr, "...," ); |
903 | for (unsigned int i = 0; i < len - 1; ++i) |
904 | fprintf (stderr, HOST_WIDE_INT_PRINT_HEX "," , val[len - 1 - i]); |
905 | fprintf (stderr, HOST_WIDE_INT_PRINT_HEX "], precision = %d\n" , |
906 | val[0], precision); |
907 | } |
908 | |
909 | namespace wi |
910 | { |
911 | template <typename storage> |
912 | struct int_traits < generic_wide_int <storage> > |
913 | : public wi::int_traits <storage> |
914 | { |
915 | static unsigned int get_precision (const generic_wide_int <storage> &); |
916 | static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, |
917 | const generic_wide_int <storage> &); |
918 | }; |
919 | } |
920 | |
921 | template <typename storage> |
922 | inline unsigned int |
923 | wi::int_traits < generic_wide_int <storage> >:: |
924 | get_precision (const generic_wide_int <storage> &x) |
925 | { |
926 | return x.get_precision (); |
927 | } |
928 | |
929 | template <typename storage> |
930 | inline wi::storage_ref |
931 | wi::int_traits < generic_wide_int <storage> >:: |
932 | decompose (HOST_WIDE_INT *, unsigned int precision, |
933 | const generic_wide_int <storage> &x) |
934 | { |
935 | gcc_checking_assert (precision == x.get_precision ()); |
936 | return wi::storage_ref (x.get_val (), x.get_len (), precision); |
937 | } |
938 | |
939 | /* Provide the storage for a wide_int_ref. This acts like a read-only |
940 | wide_int, with the optimization that VAL is normally a pointer to |
941 | another integer's storage, so that no array copy is needed. */ |
942 | template <bool SE, bool HDP> |
943 | struct wide_int_ref_storage : public wi::storage_ref |
944 | { |
945 | private: |
946 | /* Scratch space that can be used when decomposing the original integer. |
947 | It must live as long as this object. */ |
948 | HOST_WIDE_INT scratch[2]; |
949 | |
950 | public: |
951 | wide_int_ref_storage (const wi::storage_ref &); |
952 | |
953 | template <typename T> |
954 | wide_int_ref_storage (const T &); |
955 | |
956 | template <typename T> |
957 | wide_int_ref_storage (const T &, unsigned int); |
958 | }; |
959 | |
960 | /* Create a reference from an existing reference. */ |
961 | template <bool SE, bool HDP> |
962 | inline wide_int_ref_storage <SE, HDP>:: |
963 | wide_int_ref_storage (const wi::storage_ref &x) |
964 | : storage_ref (x) |
965 | {} |
966 | |
967 | /* Create a reference to integer X in its natural precision. Note |
968 | that the natural precision is host-dependent for primitive |
969 | types. */ |
970 | template <bool SE, bool HDP> |
971 | template <typename T> |
972 | inline wide_int_ref_storage <SE, HDP>::wide_int_ref_storage (const T &x) |
973 | : storage_ref (wi::int_traits <T>::decompose (scratch, |
974 | wi::get_precision (x), x)) |
975 | { |
976 | } |
977 | |
978 | /* Create a reference to integer X in precision PRECISION. */ |
979 | template <bool SE, bool HDP> |
980 | template <typename T> |
981 | inline wide_int_ref_storage <SE, HDP>:: |
982 | wide_int_ref_storage (const T &x, unsigned int precision) |
983 | : storage_ref (wi::int_traits <T>::decompose (scratch, precision, x)) |
984 | { |
985 | } |
986 | |
987 | namespace wi |
988 | { |
989 | template <bool SE, bool HDP> |
990 | struct int_traits <wide_int_ref_storage <SE, HDP> > |
991 | { |
992 | static const enum precision_type precision_type = VAR_PRECISION; |
993 | static const bool host_dependent_precision = HDP; |
994 | static const bool is_sign_extended = SE; |
995 | }; |
996 | } |
997 | |
998 | namespace wi |
999 | { |
1000 | unsigned int force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1001 | unsigned int, unsigned int, unsigned int, |
1002 | signop sgn); |
1003 | unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1004 | unsigned int, unsigned int, bool = true); |
1005 | } |
1006 | |
1007 | /* The storage used by wide_int. */ |
1008 | class GTY(()) wide_int_storage |
1009 | { |
1010 | private: |
1011 | HOST_WIDE_INT val[WIDE_INT_MAX_ELTS]; |
1012 | unsigned int len; |
1013 | unsigned int precision; |
1014 | |
1015 | public: |
1016 | wide_int_storage (); |
1017 | template <typename T> |
1018 | wide_int_storage (const T &); |
1019 | |
1020 | /* The standard generic_wide_int storage methods. */ |
1021 | unsigned int get_precision () const; |
1022 | const HOST_WIDE_INT *get_val () const; |
1023 | unsigned int get_len () const; |
1024 | HOST_WIDE_INT *write_val (); |
1025 | void set_len (unsigned int, bool = false); |
1026 | |
1027 | template <typename T> |
1028 | wide_int_storage &operator = (const T &); |
1029 | |
1030 | static wide_int from (const wide_int_ref &, unsigned int, signop); |
1031 | static wide_int from_array (const HOST_WIDE_INT *, unsigned int, |
1032 | unsigned int, bool = true); |
1033 | static wide_int create (unsigned int); |
1034 | |
1035 | /* FIXME: target-dependent, so should disappear. */ |
1036 | wide_int bswap () const; |
1037 | }; |
1038 | |
1039 | namespace wi |
1040 | { |
1041 | template <> |
1042 | struct int_traits <wide_int_storage> |
1043 | { |
1044 | static const enum precision_type precision_type = VAR_PRECISION; |
1045 | /* Guaranteed by a static assert in the wide_int_storage constructor. */ |
1046 | static const bool host_dependent_precision = false; |
1047 | static const bool is_sign_extended = true; |
1048 | template <typename T1, typename T2> |
1049 | static wide_int get_binary_result (const T1 &, const T2 &); |
1050 | }; |
1051 | } |
1052 | |
1053 | inline wide_int_storage::wide_int_storage () {} |
1054 | |
1055 | /* Initialize the storage from integer X, in its natural precision. |
1056 | Note that we do not allow integers with host-dependent precision |
1057 | to become wide_ints; wide_ints must always be logically independent |
1058 | of the host. */ |
1059 | template <typename T> |
1060 | inline wide_int_storage::wide_int_storage (const T &x) |
1061 | { |
1062 | { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); } |
1063 | { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); } |
1064 | WIDE_INT_REF_FOR (T) xi (x); |
1065 | precision = xi.precision; |
1066 | wi::copy (*this, xi); |
1067 | } |
1068 | |
1069 | template <typename T> |
1070 | inline wide_int_storage& |
1071 | wide_int_storage::operator = (const T &x) |
1072 | { |
1073 | { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); } |
1074 | { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); } |
1075 | WIDE_INT_REF_FOR (T) xi (x); |
1076 | precision = xi.precision; |
1077 | wi::copy (*this, xi); |
1078 | return *this; |
1079 | } |
1080 | |
1081 | inline unsigned int |
1082 | wide_int_storage::get_precision () const |
1083 | { |
1084 | return precision; |
1085 | } |
1086 | |
1087 | inline const HOST_WIDE_INT * |
1088 | wide_int_storage::get_val () const |
1089 | { |
1090 | return val; |
1091 | } |
1092 | |
1093 | inline unsigned int |
1094 | wide_int_storage::get_len () const |
1095 | { |
1096 | return len; |
1097 | } |
1098 | |
1099 | inline HOST_WIDE_INT * |
1100 | wide_int_storage::write_val () |
1101 | { |
1102 | return val; |
1103 | } |
1104 | |
1105 | inline void |
1106 | wide_int_storage::set_len (unsigned int l, bool is_sign_extended) |
1107 | { |
1108 | len = l; |
1109 | if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision) |
1110 | val[len - 1] = sext_hwi (val[len - 1], |
1111 | precision % HOST_BITS_PER_WIDE_INT); |
1112 | } |
1113 | |
1114 | /* Treat X as having signedness SGN and convert it to a PRECISION-bit |
1115 | number. */ |
1116 | inline wide_int |
1117 | wide_int_storage::from (const wide_int_ref &x, unsigned int precision, |
1118 | signop sgn) |
1119 | { |
1120 | wide_int result = wide_int::create (precision); |
1121 | result.set_len (wi::force_to_size (result.write_val (), x.val, x.len, |
1122 | x.precision, precision, sgn)); |
1123 | return result; |
1124 | } |
1125 | |
1126 | /* Create a wide_int from the explicit block encoding given by VAL and |
1127 | LEN. PRECISION is the precision of the integer. NEED_CANON_P is |
1128 | true if the encoding may have redundant trailing blocks. */ |
1129 | inline wide_int |
1130 | wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len, |
1131 | unsigned int precision, bool need_canon_p) |
1132 | { |
1133 | wide_int result = wide_int::create (precision); |
1134 | result.set_len (wi::from_array (result.write_val (), val, len, precision, |
1135 | need_canon_p)); |
1136 | return result; |
1137 | } |
1138 | |
1139 | /* Return an uninitialized wide_int with precision PRECISION. */ |
1140 | inline wide_int |
1141 | wide_int_storage::create (unsigned int precision) |
1142 | { |
1143 | wide_int x; |
1144 | x.precision = precision; |
1145 | return x; |
1146 | } |
1147 | |
1148 | template <typename T1, typename T2> |
1149 | inline wide_int |
1150 | wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y) |
1151 | { |
1152 | /* This shouldn't be used for two flexible-precision inputs. */ |
1153 | STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION |
1154 | || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION); |
1155 | if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION) |
1156 | return wide_int::create (wi::get_precision (y)); |
1157 | else |
1158 | return wide_int::create (wi::get_precision (x)); |
1159 | } |
1160 | |
1161 | /* The storage used by FIXED_WIDE_INT (N). */ |
1162 | template <int N> |
1163 | class GTY(()) fixed_wide_int_storage |
1164 | { |
1165 | private: |
1166 | HOST_WIDE_INT val[(N + HOST_BITS_PER_WIDE_INT + 1) / HOST_BITS_PER_WIDE_INT]; |
1167 | unsigned int len; |
1168 | |
1169 | public: |
1170 | fixed_wide_int_storage (); |
1171 | template <typename T> |
1172 | fixed_wide_int_storage (const T &); |
1173 | |
1174 | /* The standard generic_wide_int storage methods. */ |
1175 | unsigned int get_precision () const; |
1176 | const HOST_WIDE_INT *get_val () const; |
1177 | unsigned int get_len () const; |
1178 | HOST_WIDE_INT *write_val (); |
1179 | void set_len (unsigned int, bool = false); |
1180 | |
1181 | static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop); |
1182 | static FIXED_WIDE_INT (N) from_array (const HOST_WIDE_INT *, unsigned int, |
1183 | bool = true); |
1184 | }; |
1185 | |
1186 | namespace wi |
1187 | { |
1188 | template <int N> |
1189 | struct int_traits < fixed_wide_int_storage <N> > |
1190 | { |
1191 | static const enum precision_type precision_type = CONST_PRECISION; |
1192 | static const bool host_dependent_precision = false; |
1193 | static const bool is_sign_extended = true; |
1194 | static const unsigned int precision = N; |
1195 | template <typename T1, typename T2> |
1196 | static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &); |
1197 | }; |
1198 | } |
1199 | |
1200 | template <int N> |
1201 | inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {} |
1202 | |
1203 | /* Initialize the storage from integer X, in precision N. */ |
1204 | template <int N> |
1205 | template <typename T> |
1206 | inline fixed_wide_int_storage <N>::fixed_wide_int_storage (const T &x) |
1207 | { |
1208 | /* Check for type compatibility. We don't want to initialize a |
1209 | fixed-width integer from something like a wide_int. */ |
1210 | WI_BINARY_RESULT (T, FIXED_WIDE_INT (N)) *assertion ATTRIBUTE_UNUSED; |
1211 | wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N)); |
1212 | } |
1213 | |
1214 | template <int N> |
1215 | inline unsigned int |
1216 | fixed_wide_int_storage <N>::get_precision () const |
1217 | { |
1218 | return N; |
1219 | } |
1220 | |
1221 | template <int N> |
1222 | inline const HOST_WIDE_INT * |
1223 | fixed_wide_int_storage <N>::get_val () const |
1224 | { |
1225 | return val; |
1226 | } |
1227 | |
1228 | template <int N> |
1229 | inline unsigned int |
1230 | fixed_wide_int_storage <N>::get_len () const |
1231 | { |
1232 | return len; |
1233 | } |
1234 | |
1235 | template <int N> |
1236 | inline HOST_WIDE_INT * |
1237 | fixed_wide_int_storage <N>::write_val () |
1238 | { |
1239 | return val; |
1240 | } |
1241 | |
1242 | template <int N> |
1243 | inline void |
1244 | fixed_wide_int_storage <N>::set_len (unsigned int l, bool) |
1245 | { |
1246 | len = l; |
1247 | /* There are no excess bits in val[len - 1]. */ |
1248 | STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0); |
1249 | } |
1250 | |
1251 | /* Treat X as having signedness SGN and convert it to an N-bit number. */ |
1252 | template <int N> |
1253 | inline FIXED_WIDE_INT (N) |
1254 | fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn) |
1255 | { |
1256 | FIXED_WIDE_INT (N) result; |
1257 | result.set_len (wi::force_to_size (result.write_val (), x.val, x.len, |
1258 | x.precision, N, sgn)); |
1259 | return result; |
1260 | } |
1261 | |
1262 | /* Create a FIXED_WIDE_INT (N) from the explicit block encoding given by |
1263 | VAL and LEN. NEED_CANON_P is true if the encoding may have redundant |
1264 | trailing blocks. */ |
1265 | template <int N> |
1266 | inline FIXED_WIDE_INT (N) |
1267 | fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val, |
1268 | unsigned int len, |
1269 | bool need_canon_p) |
1270 | { |
1271 | FIXED_WIDE_INT (N) result; |
1272 | result.set_len (wi::from_array (result.write_val (), val, len, |
1273 | N, need_canon_p)); |
1274 | return result; |
1275 | } |
1276 | |
1277 | template <int N> |
1278 | template <typename T1, typename T2> |
1279 | inline FIXED_WIDE_INT (N) |
1280 | wi::int_traits < fixed_wide_int_storage <N> >:: |
1281 | get_binary_result (const T1 &, const T2 &) |
1282 | { |
1283 | return FIXED_WIDE_INT (N) (); |
1284 | } |
1285 | |
1286 | /* A reference to one element of a trailing_wide_ints structure. */ |
1287 | class trailing_wide_int_storage |
1288 | { |
1289 | private: |
1290 | /* The precision of the integer, which is a fixed property of the |
1291 | parent trailing_wide_ints. */ |
1292 | unsigned int m_precision; |
1293 | |
1294 | /* A pointer to the length field. */ |
1295 | unsigned char *m_len; |
1296 | |
1297 | /* A pointer to the HWI array. There are enough elements to hold all |
1298 | values of precision M_PRECISION. */ |
1299 | HOST_WIDE_INT *m_val; |
1300 | |
1301 | public: |
1302 | trailing_wide_int_storage (unsigned int, unsigned char *, HOST_WIDE_INT *); |
1303 | |
1304 | /* The standard generic_wide_int storage methods. */ |
1305 | unsigned int get_len () const; |
1306 | unsigned int get_precision () const; |
1307 | const HOST_WIDE_INT *get_val () const; |
1308 | HOST_WIDE_INT *write_val (); |
1309 | void set_len (unsigned int, bool = false); |
1310 | |
1311 | template <typename T> |
1312 | trailing_wide_int_storage &operator = (const T &); |
1313 | }; |
1314 | |
1315 | typedef generic_wide_int <trailing_wide_int_storage> trailing_wide_int; |
1316 | |
1317 | /* trailing_wide_int behaves like a wide_int. */ |
1318 | namespace wi |
1319 | { |
1320 | template <> |
1321 | struct int_traits <trailing_wide_int_storage> |
1322 | : public int_traits <wide_int_storage> {}; |
1323 | } |
1324 | |
1325 | /* An array of N wide_int-like objects that can be put at the end of |
1326 | a variable-sized structure. Use extra_size to calculate how many |
1327 | bytes beyond the sizeof need to be allocated. Use set_precision |
1328 | to initialize the structure. */ |
1329 | template <int N> |
1330 | class GTY(()) trailing_wide_ints |
1331 | { |
1332 | private: |
1333 | /* The shared precision of each number. */ |
1334 | unsigned short m_precision; |
1335 | |
1336 | /* The shared maximum length of each number. */ |
1337 | unsigned char m_max_len; |
1338 | |
1339 | /* The current length of each number. */ |
1340 | unsigned char m_len[N]; |
1341 | |
1342 | /* The variable-length part of the structure, which always contains |
1343 | at least one HWI. Element I starts at index I * M_MAX_LEN. */ |
1344 | HOST_WIDE_INT m_val[1]; |
1345 | |
1346 | public: |
1347 | void set_precision (unsigned int); |
1348 | trailing_wide_int operator [] (unsigned int); |
1349 | static size_t extra_size (unsigned int); |
1350 | }; |
1351 | |
1352 | inline trailing_wide_int_storage:: |
1353 | trailing_wide_int_storage (unsigned int precision, unsigned char *len, |
1354 | HOST_WIDE_INT *val) |
1355 | : m_precision (precision), m_len (len), m_val (val) |
1356 | { |
1357 | } |
1358 | |
1359 | inline unsigned int |
1360 | trailing_wide_int_storage::get_len () const |
1361 | { |
1362 | return *m_len; |
1363 | } |
1364 | |
1365 | inline unsigned int |
1366 | trailing_wide_int_storage::get_precision () const |
1367 | { |
1368 | return m_precision; |
1369 | } |
1370 | |
1371 | inline const HOST_WIDE_INT * |
1372 | trailing_wide_int_storage::get_val () const |
1373 | { |
1374 | return m_val; |
1375 | } |
1376 | |
1377 | inline HOST_WIDE_INT * |
1378 | trailing_wide_int_storage::write_val () |
1379 | { |
1380 | return m_val; |
1381 | } |
1382 | |
1383 | inline void |
1384 | trailing_wide_int_storage::set_len (unsigned int len, bool is_sign_extended) |
1385 | { |
1386 | *m_len = len; |
1387 | if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > m_precision) |
1388 | m_val[len - 1] = sext_hwi (m_val[len - 1], |
1389 | m_precision % HOST_BITS_PER_WIDE_INT); |
1390 | } |
1391 | |
1392 | template <typename T> |
1393 | inline trailing_wide_int_storage & |
1394 | trailing_wide_int_storage::operator = (const T &x) |
1395 | { |
1396 | WIDE_INT_REF_FOR (T) xi (x, m_precision); |
1397 | wi::copy (*this, xi); |
1398 | return *this; |
1399 | } |
1400 | |
1401 | /* Initialize the structure and record that all elements have precision |
1402 | PRECISION. */ |
1403 | template <int N> |
1404 | inline void |
1405 | trailing_wide_ints <N>::set_precision (unsigned int precision) |
1406 | { |
1407 | m_precision = precision; |
1408 | m_max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1) |
1409 | / HOST_BITS_PER_WIDE_INT); |
1410 | } |
1411 | |
1412 | /* Return a reference to element INDEX. */ |
1413 | template <int N> |
1414 | inline trailing_wide_int |
1415 | trailing_wide_ints <N>::operator [] (unsigned int index) |
1416 | { |
1417 | return trailing_wide_int_storage (m_precision, &m_len[index], |
1418 | &m_val[index * m_max_len]); |
1419 | } |
1420 | |
1421 | /* Return how many extra bytes need to be added to the end of the structure |
1422 | in order to handle N wide_ints of precision PRECISION. */ |
1423 | template <int N> |
1424 | inline size_t |
1425 | trailing_wide_ints <N>:: (unsigned int precision) |
1426 | { |
1427 | unsigned int max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1) |
1428 | / HOST_BITS_PER_WIDE_INT); |
1429 | return (N * max_len - 1) * sizeof (HOST_WIDE_INT); |
1430 | } |
1431 | |
1432 | /* This macro is used in structures that end with a trailing_wide_ints field |
1433 | called FIELD. It declares get_NAME() and set_NAME() methods to access |
1434 | element I of FIELD. */ |
1435 | #define TRAILING_WIDE_INT_ACCESSOR(NAME, FIELD, I) \ |
1436 | trailing_wide_int get_##NAME () { return FIELD[I]; } \ |
1437 | template <typename T> void set_##NAME (const T &x) { FIELD[I] = x; } |
1438 | |
1439 | namespace wi |
1440 | { |
1441 | /* Implementation of int_traits for primitive integer types like "int". */ |
1442 | template <typename T, bool signed_p> |
1443 | struct primitive_int_traits |
1444 | { |
1445 | static const enum precision_type precision_type = FLEXIBLE_PRECISION; |
1446 | static const bool host_dependent_precision = true; |
1447 | static const bool is_sign_extended = true; |
1448 | static unsigned int get_precision (T); |
1449 | static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T); |
1450 | }; |
1451 | } |
1452 | |
1453 | template <typename T, bool signed_p> |
1454 | inline unsigned int |
1455 | wi::primitive_int_traits <T, signed_p>::get_precision (T) |
1456 | { |
1457 | return sizeof (T) * CHAR_BIT; |
1458 | } |
1459 | |
1460 | template <typename T, bool signed_p> |
1461 | inline wi::storage_ref |
1462 | wi::primitive_int_traits <T, signed_p>::decompose (HOST_WIDE_INT *scratch, |
1463 | unsigned int precision, T x) |
1464 | { |
1465 | scratch[0] = x; |
1466 | if (signed_p || scratch[0] >= 0 || precision <= HOST_BITS_PER_WIDE_INT) |
1467 | return wi::storage_ref (scratch, 1, precision); |
1468 | scratch[1] = 0; |
1469 | return wi::storage_ref (scratch, 2, precision); |
1470 | } |
1471 | |
1472 | /* Allow primitive C types to be used in wi:: routines. */ |
1473 | namespace wi |
1474 | { |
1475 | template <> |
1476 | struct int_traits <unsigned char> |
1477 | : public primitive_int_traits <unsigned char, false> {}; |
1478 | |
1479 | template <> |
1480 | struct int_traits <unsigned short> |
1481 | : public primitive_int_traits <unsigned short, false> {}; |
1482 | |
1483 | template <> |
1484 | struct int_traits <int> |
1485 | : public primitive_int_traits <int, true> {}; |
1486 | |
1487 | template <> |
1488 | struct int_traits <unsigned int> |
1489 | : public primitive_int_traits <unsigned int, false> {}; |
1490 | |
1491 | template <> |
1492 | struct int_traits <long> |
1493 | : public primitive_int_traits <long, true> {}; |
1494 | |
1495 | template <> |
1496 | struct int_traits <unsigned long> |
1497 | : public primitive_int_traits <unsigned long, false> {}; |
1498 | |
1499 | #if defined HAVE_LONG_LONG |
1500 | template <> |
1501 | struct int_traits <long long> |
1502 | : public primitive_int_traits <long long, true> {}; |
1503 | |
1504 | template <> |
1505 | struct int_traits <unsigned long long> |
1506 | : public primitive_int_traits <unsigned long long, false> {}; |
1507 | #endif |
1508 | } |
1509 | |
1510 | namespace wi |
1511 | { |
1512 | /* Stores HWI-sized integer VAL, treating it as having signedness SGN |
1513 | and precision PRECISION. */ |
1514 | struct hwi_with_prec |
1515 | { |
1516 | hwi_with_prec (HOST_WIDE_INT, unsigned int, signop); |
1517 | HOST_WIDE_INT val; |
1518 | unsigned int precision; |
1519 | signop sgn; |
1520 | }; |
1521 | |
1522 | hwi_with_prec shwi (HOST_WIDE_INT, unsigned int); |
1523 | hwi_with_prec uhwi (unsigned HOST_WIDE_INT, unsigned int); |
1524 | |
1525 | hwi_with_prec minus_one (unsigned int); |
1526 | hwi_with_prec zero (unsigned int); |
1527 | hwi_with_prec one (unsigned int); |
1528 | hwi_with_prec two (unsigned int); |
1529 | } |
1530 | |
1531 | inline wi::hwi_with_prec::hwi_with_prec (HOST_WIDE_INT v, unsigned int p, |
1532 | signop s) |
1533 | : precision (p), sgn (s) |
1534 | { |
1535 | if (precision < HOST_BITS_PER_WIDE_INT) |
1536 | val = sext_hwi (v, precision); |
1537 | else |
1538 | val = v; |
1539 | } |
1540 | |
1541 | /* Return a signed integer that has value VAL and precision PRECISION. */ |
1542 | inline wi::hwi_with_prec |
1543 | wi::shwi (HOST_WIDE_INT val, unsigned int precision) |
1544 | { |
1545 | return hwi_with_prec (val, precision, SIGNED); |
1546 | } |
1547 | |
1548 | /* Return an unsigned integer that has value VAL and precision PRECISION. */ |
1549 | inline wi::hwi_with_prec |
1550 | wi::uhwi (unsigned HOST_WIDE_INT val, unsigned int precision) |
1551 | { |
1552 | return hwi_with_prec (val, precision, UNSIGNED); |
1553 | } |
1554 | |
1555 | /* Return a wide int of -1 with precision PRECISION. */ |
1556 | inline wi::hwi_with_prec |
1557 | wi::minus_one (unsigned int precision) |
1558 | { |
1559 | return wi::shwi (-1, precision); |
1560 | } |
1561 | |
1562 | /* Return a wide int of 0 with precision PRECISION. */ |
1563 | inline wi::hwi_with_prec |
1564 | wi::zero (unsigned int precision) |
1565 | { |
1566 | return wi::shwi (0, precision); |
1567 | } |
1568 | |
1569 | /* Return a wide int of 1 with precision PRECISION. */ |
1570 | inline wi::hwi_with_prec |
1571 | wi::one (unsigned int precision) |
1572 | { |
1573 | return wi::shwi (1, precision); |
1574 | } |
1575 | |
1576 | /* Return a wide int of 2 with precision PRECISION. */ |
1577 | inline wi::hwi_with_prec |
1578 | wi::two (unsigned int precision) |
1579 | { |
1580 | return wi::shwi (2, precision); |
1581 | } |
1582 | |
1583 | namespace wi |
1584 | { |
1585 | template <> |
1586 | struct int_traits <wi::hwi_with_prec> |
1587 | { |
1588 | static const enum precision_type precision_type = VAR_PRECISION; |
1589 | /* hwi_with_prec has an explicitly-given precision, rather than the |
1590 | precision of HOST_WIDE_INT. */ |
1591 | static const bool host_dependent_precision = false; |
1592 | static const bool is_sign_extended = true; |
1593 | static unsigned int get_precision (const wi::hwi_with_prec &); |
1594 | static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, |
1595 | const wi::hwi_with_prec &); |
1596 | }; |
1597 | } |
1598 | |
1599 | inline unsigned int |
1600 | wi::int_traits <wi::hwi_with_prec>::get_precision (const wi::hwi_with_prec &x) |
1601 | { |
1602 | return x.precision; |
1603 | } |
1604 | |
1605 | inline wi::storage_ref |
1606 | wi::int_traits <wi::hwi_with_prec>:: |
1607 | decompose (HOST_WIDE_INT *scratch, unsigned int precision, |
1608 | const wi::hwi_with_prec &x) |
1609 | { |
1610 | gcc_checking_assert (precision == x.precision); |
1611 | scratch[0] = x.val; |
1612 | if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT) |
1613 | return wi::storage_ref (scratch, 1, precision); |
1614 | scratch[1] = 0; |
1615 | return wi::storage_ref (scratch, 2, precision); |
1616 | } |
1617 | |
1618 | /* Private functions for handling large cases out of line. They take |
1619 | individual length and array parameters because that is cheaper for |
1620 | the inline caller than constructing an object on the stack and |
1621 | passing a reference to it. (Although many callers use wide_int_refs, |
1622 | we generally want those to be removed by SRA.) */ |
1623 | namespace wi |
1624 | { |
1625 | bool eq_p_large (const HOST_WIDE_INT *, unsigned int, |
1626 | const HOST_WIDE_INT *, unsigned int, unsigned int); |
1627 | bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, |
1628 | const HOST_WIDE_INT *, unsigned int); |
1629 | bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, |
1630 | const HOST_WIDE_INT *, unsigned int); |
1631 | int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int, |
1632 | const HOST_WIDE_INT *, unsigned int); |
1633 | int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int, |
1634 | const HOST_WIDE_INT *, unsigned int); |
1635 | unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1636 | unsigned int, |
1637 | unsigned int, unsigned int); |
1638 | unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1639 | unsigned int, |
1640 | unsigned int, unsigned int); |
1641 | unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1642 | unsigned int, unsigned int, unsigned int); |
1643 | unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1644 | unsigned int, unsigned int, unsigned int); |
1645 | unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1646 | unsigned int, unsigned int, unsigned int, |
1647 | unsigned int); |
1648 | unsigned int arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1649 | unsigned int, unsigned int, unsigned int, |
1650 | unsigned int); |
1651 | unsigned int and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, |
1652 | const HOST_WIDE_INT *, unsigned int, unsigned int); |
1653 | unsigned int and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1654 | unsigned int, const HOST_WIDE_INT *, |
1655 | unsigned int, unsigned int); |
1656 | unsigned int or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, |
1657 | const HOST_WIDE_INT *, unsigned int, unsigned int); |
1658 | unsigned int or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1659 | unsigned int, const HOST_WIDE_INT *, |
1660 | unsigned int, unsigned int); |
1661 | unsigned int xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, |
1662 | const HOST_WIDE_INT *, unsigned int, unsigned int); |
1663 | unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, |
1664 | const HOST_WIDE_INT *, unsigned int, unsigned int, |
1665 | signop, bool *); |
1666 | unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, |
1667 | const HOST_WIDE_INT *, unsigned int, unsigned int, |
1668 | signop, bool *); |
1669 | unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1670 | unsigned int, const HOST_WIDE_INT *, |
1671 | unsigned int, unsigned int, signop, bool *, |
1672 | bool); |
1673 | unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *, |
1674 | HOST_WIDE_INT *, const HOST_WIDE_INT *, |
1675 | unsigned int, unsigned int, |
1676 | const HOST_WIDE_INT *, |
1677 | unsigned int, unsigned int, |
1678 | signop, bool *); |
1679 | } |
1680 | |
1681 | /* Return the number of bits that integer X can hold. */ |
1682 | template <typename T> |
1683 | inline unsigned int |
1684 | wi::get_precision (const T &x) |
1685 | { |
1686 | return wi::int_traits <T>::get_precision (x); |
1687 | } |
1688 | |
1689 | /* Return the number of bits that the result of a binary operation can |
1690 | hold when the input operands are X and Y. */ |
1691 | template <typename T1, typename T2> |
1692 | inline unsigned int |
1693 | wi::get_binary_precision (const T1 &x, const T2 &y) |
1694 | { |
1695 | return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)>:: |
1696 | get_binary_result (x, y)); |
1697 | } |
1698 | |
1699 | /* Copy the contents of Y to X, but keeping X's current precision. */ |
1700 | template <typename T1, typename T2> |
1701 | inline void |
1702 | wi::copy (T1 &x, const T2 &y) |
1703 | { |
1704 | HOST_WIDE_INT *xval = x.write_val (); |
1705 | const HOST_WIDE_INT *yval = y.get_val (); |
1706 | unsigned int len = y.get_len (); |
1707 | unsigned int i = 0; |
1708 | do |
1709 | xval[i] = yval[i]; |
1710 | while (++i < len); |
1711 | x.set_len (len, y.is_sign_extended); |
1712 | } |
1713 | |
1714 | /* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */ |
1715 | template <typename T> |
1716 | inline bool |
1717 | wi::fits_shwi_p (const T &x) |
1718 | { |
1719 | WIDE_INT_REF_FOR (T) xi (x); |
1720 | return xi.len == 1; |
1721 | } |
1722 | |
1723 | /* Return true if X fits in an unsigned HOST_WIDE_INT with no loss of |
1724 | precision. */ |
1725 | template <typename T> |
1726 | inline bool |
1727 | wi::fits_uhwi_p (const T &x) |
1728 | { |
1729 | WIDE_INT_REF_FOR (T) xi (x); |
1730 | if (xi.precision <= HOST_BITS_PER_WIDE_INT) |
1731 | return true; |
1732 | if (xi.len == 1) |
1733 | return xi.slow () >= 0; |
1734 | return xi.len == 2 && xi.uhigh () == 0; |
1735 | } |
1736 | |
1737 | /* Return true if X is negative based on the interpretation of SGN. |
1738 | For UNSIGNED, this is always false. */ |
1739 | template <typename T> |
1740 | inline bool |
1741 | wi::neg_p (const T &x, signop sgn) |
1742 | { |
1743 | WIDE_INT_REF_FOR (T) xi (x); |
1744 | if (sgn == UNSIGNED) |
1745 | return false; |
1746 | return xi.sign_mask () < 0; |
1747 | } |
1748 | |
1749 | /* Return -1 if the top bit of X is set and 0 if the top bit is clear. */ |
1750 | template <typename T> |
1751 | inline HOST_WIDE_INT |
1752 | wi::sign_mask (const T &x) |
1753 | { |
1754 | WIDE_INT_REF_FOR (T) xi (x); |
1755 | return xi.sign_mask (); |
1756 | } |
1757 | |
1758 | /* Return true if X == Y. X and Y must be binary-compatible. */ |
1759 | template <typename T1, typename T2> |
1760 | inline bool |
1761 | wi::eq_p (const T1 &x, const T2 &y) |
1762 | { |
1763 | unsigned int precision = get_binary_precision (x, y); |
1764 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
1765 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
1766 | if (xi.is_sign_extended && yi.is_sign_extended) |
1767 | { |
1768 | /* This case reduces to array equality. */ |
1769 | if (xi.len != yi.len) |
1770 | return false; |
1771 | unsigned int i = 0; |
1772 | do |
1773 | if (xi.val[i] != yi.val[i]) |
1774 | return false; |
1775 | while (++i != xi.len); |
1776 | return true; |
1777 | } |
1778 | if (__builtin_expect (yi.len == 1, true)) |
1779 | { |
1780 | /* XI is only equal to YI if it too has a single HWI. */ |
1781 | if (xi.len != 1) |
1782 | return false; |
1783 | /* Excess bits in xi.val[0] will be signs or zeros, so comparisons |
1784 | with 0 are simple. */ |
1785 | if (STATIC_CONSTANT_P (yi.val[0] == 0)) |
1786 | return xi.val[0] == 0; |
1787 | /* Otherwise flush out any excess bits first. */ |
1788 | unsigned HOST_WIDE_INT diff = xi.val[0] ^ yi.val[0]; |
1789 | int excess = HOST_BITS_PER_WIDE_INT - precision; |
1790 | if (excess > 0) |
1791 | diff <<= excess; |
1792 | return diff == 0; |
1793 | } |
1794 | return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision); |
1795 | } |
1796 | |
1797 | /* Return true if X != Y. X and Y must be binary-compatible. */ |
1798 | template <typename T1, typename T2> |
1799 | inline bool |
1800 | wi::ne_p (const T1 &x, const T2 &y) |
1801 | { |
1802 | return !eq_p (x, y); |
1803 | } |
1804 | |
1805 | /* Return true if X < Y when both are treated as signed values. */ |
1806 | template <typename T1, typename T2> |
1807 | inline bool |
1808 | wi::lts_p (const T1 &x, const T2 &y) |
1809 | { |
1810 | unsigned int precision = get_binary_precision (x, y); |
1811 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
1812 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
1813 | /* We optimize x < y, where y is 64 or fewer bits. */ |
1814 | if (wi::fits_shwi_p (yi)) |
1815 | { |
1816 | /* Make lts_p (x, 0) as efficient as wi::neg_p (x). */ |
1817 | if (STATIC_CONSTANT_P (yi.val[0] == 0)) |
1818 | return neg_p (xi); |
1819 | /* If x fits directly into a shwi, we can compare directly. */ |
1820 | if (wi::fits_shwi_p (xi)) |
1821 | return xi.to_shwi () < yi.to_shwi (); |
1822 | /* If x doesn't fit and is negative, then it must be more |
1823 | negative than any value in y, and hence smaller than y. */ |
1824 | if (neg_p (xi)) |
1825 | return true; |
1826 | /* If x is positive, then it must be larger than any value in y, |
1827 | and hence greater than y. */ |
1828 | return false; |
1829 | } |
1830 | /* Optimize the opposite case, if it can be detected at compile time. */ |
1831 | if (STATIC_CONSTANT_P (xi.len == 1)) |
1832 | /* If YI is negative it is lower than the least HWI. |
1833 | If YI is positive it is greater than the greatest HWI. */ |
1834 | return !neg_p (yi); |
1835 | return lts_p_large (xi.val, xi.len, precision, yi.val, yi.len); |
1836 | } |
1837 | |
1838 | /* Return true if X < Y when both are treated as unsigned values. */ |
1839 | template <typename T1, typename T2> |
1840 | inline bool |
1841 | wi::ltu_p (const T1 &x, const T2 &y) |
1842 | { |
1843 | unsigned int precision = get_binary_precision (x, y); |
1844 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
1845 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
1846 | /* Optimize comparisons with constants. */ |
1847 | if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0)) |
1848 | return xi.len == 1 && xi.to_uhwi () < (unsigned HOST_WIDE_INT) yi.val[0]; |
1849 | if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0)) |
1850 | return yi.len != 1 || yi.to_uhwi () > (unsigned HOST_WIDE_INT) xi.val[0]; |
1851 | /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended |
1852 | for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both |
1853 | values does not change the result. */ |
1854 | if (__builtin_expect (xi.len + yi.len == 2, true)) |
1855 | { |
1856 | unsigned HOST_WIDE_INT xl = xi.to_uhwi (); |
1857 | unsigned HOST_WIDE_INT yl = yi.to_uhwi (); |
1858 | return xl < yl; |
1859 | } |
1860 | return ltu_p_large (xi.val, xi.len, precision, yi.val, yi.len); |
1861 | } |
1862 | |
1863 | /* Return true if X < Y. Signedness of X and Y is indicated by SGN. */ |
1864 | template <typename T1, typename T2> |
1865 | inline bool |
1866 | wi::lt_p (const T1 &x, const T2 &y, signop sgn) |
1867 | { |
1868 | if (sgn == SIGNED) |
1869 | return lts_p (x, y); |
1870 | else |
1871 | return ltu_p (x, y); |
1872 | } |
1873 | |
1874 | /* Return true if X <= Y when both are treated as signed values. */ |
1875 | template <typename T1, typename T2> |
1876 | inline bool |
1877 | wi::les_p (const T1 &x, const T2 &y) |
1878 | { |
1879 | return !lts_p (y, x); |
1880 | } |
1881 | |
1882 | /* Return true if X <= Y when both are treated as unsigned values. */ |
1883 | template <typename T1, typename T2> |
1884 | inline bool |
1885 | wi::leu_p (const T1 &x, const T2 &y) |
1886 | { |
1887 | return !ltu_p (y, x); |
1888 | } |
1889 | |
1890 | /* Return true if X <= Y. Signedness of X and Y is indicated by SGN. */ |
1891 | template <typename T1, typename T2> |
1892 | inline bool |
1893 | wi::le_p (const T1 &x, const T2 &y, signop sgn) |
1894 | { |
1895 | if (sgn == SIGNED) |
1896 | return les_p (x, y); |
1897 | else |
1898 | return leu_p (x, y); |
1899 | } |
1900 | |
1901 | /* Return true if X > Y when both are treated as signed values. */ |
1902 | template <typename T1, typename T2> |
1903 | inline bool |
1904 | wi::gts_p (const T1 &x, const T2 &y) |
1905 | { |
1906 | return lts_p (y, x); |
1907 | } |
1908 | |
1909 | /* Return true if X > Y when both are treated as unsigned values. */ |
1910 | template <typename T1, typename T2> |
1911 | inline bool |
1912 | wi::gtu_p (const T1 &x, const T2 &y) |
1913 | { |
1914 | return ltu_p (y, x); |
1915 | } |
1916 | |
1917 | /* Return true if X > Y. Signedness of X and Y is indicated by SGN. */ |
1918 | template <typename T1, typename T2> |
1919 | inline bool |
1920 | wi::gt_p (const T1 &x, const T2 &y, signop sgn) |
1921 | { |
1922 | if (sgn == SIGNED) |
1923 | return gts_p (x, y); |
1924 | else |
1925 | return gtu_p (x, y); |
1926 | } |
1927 | |
1928 | /* Return true if X >= Y when both are treated as signed values. */ |
1929 | template <typename T1, typename T2> |
1930 | inline bool |
1931 | wi::ges_p (const T1 &x, const T2 &y) |
1932 | { |
1933 | return !lts_p (x, y); |
1934 | } |
1935 | |
1936 | /* Return true if X >= Y when both are treated as unsigned values. */ |
1937 | template <typename T1, typename T2> |
1938 | inline bool |
1939 | wi::geu_p (const T1 &x, const T2 &y) |
1940 | { |
1941 | return !ltu_p (x, y); |
1942 | } |
1943 | |
1944 | /* Return true if X >= Y. Signedness of X and Y is indicated by SGN. */ |
1945 | template <typename T1, typename T2> |
1946 | inline bool |
1947 | wi::ge_p (const T1 &x, const T2 &y, signop sgn) |
1948 | { |
1949 | if (sgn == SIGNED) |
1950 | return ges_p (x, y); |
1951 | else |
1952 | return geu_p (x, y); |
1953 | } |
1954 | |
1955 | /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y |
1956 | as signed values. */ |
1957 | template <typename T1, typename T2> |
1958 | inline int |
1959 | wi::cmps (const T1 &x, const T2 &y) |
1960 | { |
1961 | unsigned int precision = get_binary_precision (x, y); |
1962 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
1963 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
1964 | if (wi::fits_shwi_p (yi)) |
1965 | { |
1966 | /* Special case for comparisons with 0. */ |
1967 | if (STATIC_CONSTANT_P (yi.val[0] == 0)) |
1968 | return neg_p (xi) ? -1 : !(xi.len == 1 && xi.val[0] == 0); |
1969 | /* If x fits into a signed HWI, we can compare directly. */ |
1970 | if (wi::fits_shwi_p (xi)) |
1971 | { |
1972 | HOST_WIDE_INT xl = xi.to_shwi (); |
1973 | HOST_WIDE_INT yl = yi.to_shwi (); |
1974 | return xl < yl ? -1 : xl > yl; |
1975 | } |
1976 | /* If x doesn't fit and is negative, then it must be more |
1977 | negative than any signed HWI, and hence smaller than y. */ |
1978 | if (neg_p (xi)) |
1979 | return -1; |
1980 | /* If x is positive, then it must be larger than any signed HWI, |
1981 | and hence greater than y. */ |
1982 | return 1; |
1983 | } |
1984 | /* Optimize the opposite case, if it can be detected at compile time. */ |
1985 | if (STATIC_CONSTANT_P (xi.len == 1)) |
1986 | /* If YI is negative it is lower than the least HWI. |
1987 | If YI is positive it is greater than the greatest HWI. */ |
1988 | return neg_p (yi) ? 1 : -1; |
1989 | return cmps_large (xi.val, xi.len, precision, yi.val, yi.len); |
1990 | } |
1991 | |
1992 | /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y |
1993 | as unsigned values. */ |
1994 | template <typename T1, typename T2> |
1995 | inline int |
1996 | wi::cmpu (const T1 &x, const T2 &y) |
1997 | { |
1998 | unsigned int precision = get_binary_precision (x, y); |
1999 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2000 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2001 | /* Optimize comparisons with constants. */ |
2002 | if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0)) |
2003 | { |
2004 | /* If XI doesn't fit in a HWI then it must be larger than YI. */ |
2005 | if (xi.len != 1) |
2006 | return 1; |
2007 | /* Otherwise compare directly. */ |
2008 | unsigned HOST_WIDE_INT xl = xi.to_uhwi (); |
2009 | unsigned HOST_WIDE_INT yl = yi.val[0]; |
2010 | return xl < yl ? -1 : xl > yl; |
2011 | } |
2012 | if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0)) |
2013 | { |
2014 | /* If YI doesn't fit in a HWI then it must be larger than XI. */ |
2015 | if (yi.len != 1) |
2016 | return -1; |
2017 | /* Otherwise compare directly. */ |
2018 | unsigned HOST_WIDE_INT xl = xi.val[0]; |
2019 | unsigned HOST_WIDE_INT yl = yi.to_uhwi (); |
2020 | return xl < yl ? -1 : xl > yl; |
2021 | } |
2022 | /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended |
2023 | for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both |
2024 | values does not change the result. */ |
2025 | if (__builtin_expect (xi.len + yi.len == 2, true)) |
2026 | { |
2027 | unsigned HOST_WIDE_INT xl = xi.to_uhwi (); |
2028 | unsigned HOST_WIDE_INT yl = yi.to_uhwi (); |
2029 | return xl < yl ? -1 : xl > yl; |
2030 | } |
2031 | return cmpu_large (xi.val, xi.len, precision, yi.val, yi.len); |
2032 | } |
2033 | |
2034 | /* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of |
2035 | X and Y indicated by SGN. */ |
2036 | template <typename T1, typename T2> |
2037 | inline int |
2038 | wi::cmp (const T1 &x, const T2 &y, signop sgn) |
2039 | { |
2040 | if (sgn == SIGNED) |
2041 | return cmps (x, y); |
2042 | else |
2043 | return cmpu (x, y); |
2044 | } |
2045 | |
2046 | /* Return ~x. */ |
2047 | template <typename T> |
2048 | inline WI_UNARY_RESULT (T) |
2049 | wi::bit_not (const T &x) |
2050 | { |
2051 | WI_UNARY_RESULT_VAR (result, val, T, x); |
2052 | WIDE_INT_REF_FOR (T) xi (x, get_precision (result)); |
2053 | for (unsigned int i = 0; i < xi.len; ++i) |
2054 | val[i] = ~xi.val[i]; |
2055 | result.set_len (xi.len); |
2056 | return result; |
2057 | } |
2058 | |
2059 | /* Return -x. */ |
2060 | template <typename T> |
2061 | inline WI_UNARY_RESULT (T) |
2062 | wi::neg (const T &x) |
2063 | { |
2064 | return sub (0, x); |
2065 | } |
2066 | |
2067 | /* Return -x. Indicate in *OVERFLOW if X is the minimum signed value. */ |
2068 | template <typename T> |
2069 | inline WI_UNARY_RESULT (T) |
2070 | wi::neg (const T &x, bool *overflow) |
2071 | { |
2072 | *overflow = only_sign_bit_p (x); |
2073 | return sub (0, x); |
2074 | } |
2075 | |
2076 | /* Return the absolute value of x. */ |
2077 | template <typename T> |
2078 | inline WI_UNARY_RESULT (T) |
2079 | wi::abs (const T &x) |
2080 | { |
2081 | return neg_p (x) ? neg (x) : WI_UNARY_RESULT (T) (x); |
2082 | } |
2083 | |
2084 | /* Return the result of sign-extending the low OFFSET bits of X. */ |
2085 | template <typename T> |
2086 | inline WI_UNARY_RESULT (T) |
2087 | wi::sext (const T &x, unsigned int offset) |
2088 | { |
2089 | WI_UNARY_RESULT_VAR (result, val, T, x); |
2090 | unsigned int precision = get_precision (result); |
2091 | WIDE_INT_REF_FOR (T) xi (x, precision); |
2092 | |
2093 | if (offset <= HOST_BITS_PER_WIDE_INT) |
2094 | { |
2095 | val[0] = sext_hwi (xi.ulow (), offset); |
2096 | result.set_len (1, true); |
2097 | } |
2098 | else |
2099 | result.set_len (sext_large (val, xi.val, xi.len, precision, offset)); |
2100 | return result; |
2101 | } |
2102 | |
2103 | /* Return the result of zero-extending the low OFFSET bits of X. */ |
2104 | template <typename T> |
2105 | inline WI_UNARY_RESULT (T) |
2106 | wi::zext (const T &x, unsigned int offset) |
2107 | { |
2108 | WI_UNARY_RESULT_VAR (result, val, T, x); |
2109 | unsigned int precision = get_precision (result); |
2110 | WIDE_INT_REF_FOR (T) xi (x, precision); |
2111 | |
2112 | /* This is not just an optimization, it is actually required to |
2113 | maintain canonization. */ |
2114 | if (offset >= precision) |
2115 | { |
2116 | wi::copy (result, xi); |
2117 | return result; |
2118 | } |
2119 | |
2120 | /* In these cases we know that at least the top bit will be clear, |
2121 | so no sign extension is necessary. */ |
2122 | if (offset < HOST_BITS_PER_WIDE_INT) |
2123 | { |
2124 | val[0] = zext_hwi (xi.ulow (), offset); |
2125 | result.set_len (1, true); |
2126 | } |
2127 | else |
2128 | result.set_len (zext_large (val, xi.val, xi.len, precision, offset), true); |
2129 | return result; |
2130 | } |
2131 | |
2132 | /* Return the result of extending the low OFFSET bits of X according to |
2133 | signedness SGN. */ |
2134 | template <typename T> |
2135 | inline WI_UNARY_RESULT (T) |
2136 | wi::ext (const T &x, unsigned int offset, signop sgn) |
2137 | { |
2138 | return sgn == SIGNED ? sext (x, offset) : zext (x, offset); |
2139 | } |
2140 | |
2141 | /* Return an integer that represents X | (1 << bit). */ |
2142 | template <typename T> |
2143 | inline WI_UNARY_RESULT (T) |
2144 | wi::set_bit (const T &x, unsigned int bit) |
2145 | { |
2146 | WI_UNARY_RESULT_VAR (result, val, T, x); |
2147 | unsigned int precision = get_precision (result); |
2148 | WIDE_INT_REF_FOR (T) xi (x, precision); |
2149 | if (precision <= HOST_BITS_PER_WIDE_INT) |
2150 | { |
2151 | val[0] = xi.ulow () | (HOST_WIDE_INT_1U << bit); |
2152 | result.set_len (1); |
2153 | } |
2154 | else |
2155 | result.set_len (set_bit_large (val, xi.val, xi.len, precision, bit)); |
2156 | return result; |
2157 | } |
2158 | |
2159 | /* Return the mininum of X and Y, treating them both as having |
2160 | signedness SGN. */ |
2161 | template <typename T1, typename T2> |
2162 | inline WI_BINARY_RESULT (T1, T2) |
2163 | wi::min (const T1 &x, const T2 &y, signop sgn) |
2164 | { |
2165 | WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y); |
2166 | unsigned int precision = get_precision (result); |
2167 | if (wi::le_p (x, y, sgn)) |
2168 | wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision)); |
2169 | else |
2170 | wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision)); |
2171 | return result; |
2172 | } |
2173 | |
2174 | /* Return the minimum of X and Y, treating both as signed values. */ |
2175 | template <typename T1, typename T2> |
2176 | inline WI_BINARY_RESULT (T1, T2) |
2177 | wi::smin (const T1 &x, const T2 &y) |
2178 | { |
2179 | return wi::min (x, y, SIGNED); |
2180 | } |
2181 | |
2182 | /* Return the minimum of X and Y, treating both as unsigned values. */ |
2183 | template <typename T1, typename T2> |
2184 | inline WI_BINARY_RESULT (T1, T2) |
2185 | wi::umin (const T1 &x, const T2 &y) |
2186 | { |
2187 | return wi::min (x, y, UNSIGNED); |
2188 | } |
2189 | |
2190 | /* Return the maxinum of X and Y, treating them both as having |
2191 | signedness SGN. */ |
2192 | template <typename T1, typename T2> |
2193 | inline WI_BINARY_RESULT (T1, T2) |
2194 | wi::max (const T1 &x, const T2 &y, signop sgn) |
2195 | { |
2196 | WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y); |
2197 | unsigned int precision = get_precision (result); |
2198 | if (wi::ge_p (x, y, sgn)) |
2199 | wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision)); |
2200 | else |
2201 | wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision)); |
2202 | return result; |
2203 | } |
2204 | |
2205 | /* Return the maximum of X and Y, treating both as signed values. */ |
2206 | template <typename T1, typename T2> |
2207 | inline WI_BINARY_RESULT (T1, T2) |
2208 | wi::smax (const T1 &x, const T2 &y) |
2209 | { |
2210 | return wi::max (x, y, SIGNED); |
2211 | } |
2212 | |
2213 | /* Return the maximum of X and Y, treating both as unsigned values. */ |
2214 | template <typename T1, typename T2> |
2215 | inline WI_BINARY_RESULT (T1, T2) |
2216 | wi::umax (const T1 &x, const T2 &y) |
2217 | { |
2218 | return wi::max (x, y, UNSIGNED); |
2219 | } |
2220 | |
2221 | /* Return X & Y. */ |
2222 | template <typename T1, typename T2> |
2223 | inline WI_BINARY_RESULT (T1, T2) |
2224 | wi::bit_and (const T1 &x, const T2 &y) |
2225 | { |
2226 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2227 | unsigned int precision = get_precision (result); |
2228 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2229 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2230 | bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; |
2231 | if (__builtin_expect (xi.len + yi.len == 2, true)) |
2232 | { |
2233 | val[0] = xi.ulow () & yi.ulow (); |
2234 | result.set_len (1, is_sign_extended); |
2235 | } |
2236 | else |
2237 | result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len, |
2238 | precision), is_sign_extended); |
2239 | return result; |
2240 | } |
2241 | |
2242 | /* Return X & ~Y. */ |
2243 | template <typename T1, typename T2> |
2244 | inline WI_BINARY_RESULT (T1, T2) |
2245 | wi::bit_and_not (const T1 &x, const T2 &y) |
2246 | { |
2247 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2248 | unsigned int precision = get_precision (result); |
2249 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2250 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2251 | bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; |
2252 | if (__builtin_expect (xi.len + yi.len == 2, true)) |
2253 | { |
2254 | val[0] = xi.ulow () & ~yi.ulow (); |
2255 | result.set_len (1, is_sign_extended); |
2256 | } |
2257 | else |
2258 | result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len, |
2259 | precision), is_sign_extended); |
2260 | return result; |
2261 | } |
2262 | |
2263 | /* Return X | Y. */ |
2264 | template <typename T1, typename T2> |
2265 | inline WI_BINARY_RESULT (T1, T2) |
2266 | wi::bit_or (const T1 &x, const T2 &y) |
2267 | { |
2268 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2269 | unsigned int precision = get_precision (result); |
2270 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2271 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2272 | bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; |
2273 | if (__builtin_expect (xi.len + yi.len == 2, true)) |
2274 | { |
2275 | val[0] = xi.ulow () | yi.ulow (); |
2276 | result.set_len (1, is_sign_extended); |
2277 | } |
2278 | else |
2279 | result.set_len (or_large (val, xi.val, xi.len, |
2280 | yi.val, yi.len, precision), is_sign_extended); |
2281 | return result; |
2282 | } |
2283 | |
2284 | /* Return X | ~Y. */ |
2285 | template <typename T1, typename T2> |
2286 | inline WI_BINARY_RESULT (T1, T2) |
2287 | wi::bit_or_not (const T1 &x, const T2 &y) |
2288 | { |
2289 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2290 | unsigned int precision = get_precision (result); |
2291 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2292 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2293 | bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; |
2294 | if (__builtin_expect (xi.len + yi.len == 2, true)) |
2295 | { |
2296 | val[0] = xi.ulow () | ~yi.ulow (); |
2297 | result.set_len (1, is_sign_extended); |
2298 | } |
2299 | else |
2300 | result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len, |
2301 | precision), is_sign_extended); |
2302 | return result; |
2303 | } |
2304 | |
2305 | /* Return X ^ Y. */ |
2306 | template <typename T1, typename T2> |
2307 | inline WI_BINARY_RESULT (T1, T2) |
2308 | wi::bit_xor (const T1 &x, const T2 &y) |
2309 | { |
2310 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2311 | unsigned int precision = get_precision (result); |
2312 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2313 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2314 | bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended; |
2315 | if (__builtin_expect (xi.len + yi.len == 2, true)) |
2316 | { |
2317 | val[0] = xi.ulow () ^ yi.ulow (); |
2318 | result.set_len (1, is_sign_extended); |
2319 | } |
2320 | else |
2321 | result.set_len (xor_large (val, xi.val, xi.len, |
2322 | yi.val, yi.len, precision), is_sign_extended); |
2323 | return result; |
2324 | } |
2325 | |
2326 | /* Return X + Y. */ |
2327 | template <typename T1, typename T2> |
2328 | inline WI_BINARY_RESULT (T1, T2) |
2329 | wi::add (const T1 &x, const T2 &y) |
2330 | { |
2331 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2332 | unsigned int precision = get_precision (result); |
2333 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2334 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2335 | if (precision <= HOST_BITS_PER_WIDE_INT) |
2336 | { |
2337 | val[0] = xi.ulow () + yi.ulow (); |
2338 | result.set_len (1); |
2339 | } |
2340 | /* If the precision is known at compile time to be greater than |
2341 | HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case |
2342 | knowing that (a) all bits in those HWIs are significant and |
2343 | (b) the result has room for at least two HWIs. This provides |
2344 | a fast path for things like offset_int and widest_int. |
2345 | |
2346 | The STATIC_CONSTANT_P test prevents this path from being |
2347 | used for wide_ints. wide_ints with precisions greater than |
2348 | HOST_BITS_PER_WIDE_INT are relatively rare and there's not much |
2349 | point handling them inline. */ |
2350 | else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT) |
2351 | && __builtin_expect (xi.len + yi.len == 2, true)) |
2352 | { |
2353 | unsigned HOST_WIDE_INT xl = xi.ulow (); |
2354 | unsigned HOST_WIDE_INT yl = yi.ulow (); |
2355 | unsigned HOST_WIDE_INT resultl = xl + yl; |
2356 | val[0] = resultl; |
2357 | val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1; |
2358 | result.set_len (1 + (((resultl ^ xl) & (resultl ^ yl)) |
2359 | >> (HOST_BITS_PER_WIDE_INT - 1))); |
2360 | } |
2361 | else |
2362 | result.set_len (add_large (val, xi.val, xi.len, |
2363 | yi.val, yi.len, precision, |
2364 | UNSIGNED, 0)); |
2365 | return result; |
2366 | } |
2367 | |
2368 | /* Return X + Y. Treat X and Y as having the signednes given by SGN |
2369 | and indicate in *OVERFLOW whether the operation overflowed. */ |
2370 | template <typename T1, typename T2> |
2371 | inline WI_BINARY_RESULT (T1, T2) |
2372 | wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2373 | { |
2374 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2375 | unsigned int precision = get_precision (result); |
2376 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2377 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2378 | if (precision <= HOST_BITS_PER_WIDE_INT) |
2379 | { |
2380 | unsigned HOST_WIDE_INT xl = xi.ulow (); |
2381 | unsigned HOST_WIDE_INT yl = yi.ulow (); |
2382 | unsigned HOST_WIDE_INT resultl = xl + yl; |
2383 | if (sgn == SIGNED) |
2384 | *overflow = (((resultl ^ xl) & (resultl ^ yl)) |
2385 | >> (precision - 1)) & 1; |
2386 | else |
2387 | *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision)) |
2388 | < (xl << (HOST_BITS_PER_WIDE_INT - precision))); |
2389 | val[0] = resultl; |
2390 | result.set_len (1); |
2391 | } |
2392 | else |
2393 | result.set_len (add_large (val, xi.val, xi.len, |
2394 | yi.val, yi.len, precision, |
2395 | sgn, overflow)); |
2396 | return result; |
2397 | } |
2398 | |
2399 | /* Return X - Y. */ |
2400 | template <typename T1, typename T2> |
2401 | inline WI_BINARY_RESULT (T1, T2) |
2402 | wi::sub (const T1 &x, const T2 &y) |
2403 | { |
2404 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2405 | unsigned int precision = get_precision (result); |
2406 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2407 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2408 | if (precision <= HOST_BITS_PER_WIDE_INT) |
2409 | { |
2410 | val[0] = xi.ulow () - yi.ulow (); |
2411 | result.set_len (1); |
2412 | } |
2413 | /* If the precision is known at compile time to be greater than |
2414 | HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case |
2415 | knowing that (a) all bits in those HWIs are significant and |
2416 | (b) the result has room for at least two HWIs. This provides |
2417 | a fast path for things like offset_int and widest_int. |
2418 | |
2419 | The STATIC_CONSTANT_P test prevents this path from being |
2420 | used for wide_ints. wide_ints with precisions greater than |
2421 | HOST_BITS_PER_WIDE_INT are relatively rare and there's not much |
2422 | point handling them inline. */ |
2423 | else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT) |
2424 | && __builtin_expect (xi.len + yi.len == 2, true)) |
2425 | { |
2426 | unsigned HOST_WIDE_INT xl = xi.ulow (); |
2427 | unsigned HOST_WIDE_INT yl = yi.ulow (); |
2428 | unsigned HOST_WIDE_INT resultl = xl - yl; |
2429 | val[0] = resultl; |
2430 | val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1; |
2431 | result.set_len (1 + (((resultl ^ xl) & (xl ^ yl)) |
2432 | >> (HOST_BITS_PER_WIDE_INT - 1))); |
2433 | } |
2434 | else |
2435 | result.set_len (sub_large (val, xi.val, xi.len, |
2436 | yi.val, yi.len, precision, |
2437 | UNSIGNED, 0)); |
2438 | return result; |
2439 | } |
2440 | |
2441 | /* Return X - Y. Treat X and Y as having the signednes given by SGN |
2442 | and indicate in *OVERFLOW whether the operation overflowed. */ |
2443 | template <typename T1, typename T2> |
2444 | inline WI_BINARY_RESULT (T1, T2) |
2445 | wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2446 | { |
2447 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2448 | unsigned int precision = get_precision (result); |
2449 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2450 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2451 | if (precision <= HOST_BITS_PER_WIDE_INT) |
2452 | { |
2453 | unsigned HOST_WIDE_INT xl = xi.ulow (); |
2454 | unsigned HOST_WIDE_INT yl = yi.ulow (); |
2455 | unsigned HOST_WIDE_INT resultl = xl - yl; |
2456 | if (sgn == SIGNED) |
2457 | *overflow = (((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1; |
2458 | else |
2459 | *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision)) |
2460 | > (xl << (HOST_BITS_PER_WIDE_INT - precision))); |
2461 | val[0] = resultl; |
2462 | result.set_len (1); |
2463 | } |
2464 | else |
2465 | result.set_len (sub_large (val, xi.val, xi.len, |
2466 | yi.val, yi.len, precision, |
2467 | sgn, overflow)); |
2468 | return result; |
2469 | } |
2470 | |
2471 | /* Return X * Y. */ |
2472 | template <typename T1, typename T2> |
2473 | inline WI_BINARY_RESULT (T1, T2) |
2474 | wi::mul (const T1 &x, const T2 &y) |
2475 | { |
2476 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2477 | unsigned int precision = get_precision (result); |
2478 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2479 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2480 | if (precision <= HOST_BITS_PER_WIDE_INT) |
2481 | { |
2482 | val[0] = xi.ulow () * yi.ulow (); |
2483 | result.set_len (1); |
2484 | } |
2485 | else |
2486 | result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len, |
2487 | precision, UNSIGNED, 0, false)); |
2488 | return result; |
2489 | } |
2490 | |
2491 | /* Return X * Y. Treat X and Y as having the signednes given by SGN |
2492 | and indicate in *OVERFLOW whether the operation overflowed. */ |
2493 | template <typename T1, typename T2> |
2494 | inline WI_BINARY_RESULT (T1, T2) |
2495 | wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2496 | { |
2497 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2498 | unsigned int precision = get_precision (result); |
2499 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2500 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2501 | result.set_len (mul_internal (val, xi.val, xi.len, |
2502 | yi.val, yi.len, precision, |
2503 | sgn, overflow, false)); |
2504 | return result; |
2505 | } |
2506 | |
2507 | /* Return X * Y, treating both X and Y as signed values. Indicate in |
2508 | *OVERFLOW whether the operation overflowed. */ |
2509 | template <typename T1, typename T2> |
2510 | inline WI_BINARY_RESULT (T1, T2) |
2511 | wi::smul (const T1 &x, const T2 &y, bool *overflow) |
2512 | { |
2513 | return mul (x, y, SIGNED, overflow); |
2514 | } |
2515 | |
2516 | /* Return X * Y, treating both X and Y as unsigned values. Indicate in |
2517 | *OVERFLOW whether the operation overflowed. */ |
2518 | template <typename T1, typename T2> |
2519 | inline WI_BINARY_RESULT (T1, T2) |
2520 | wi::umul (const T1 &x, const T2 &y, bool *overflow) |
2521 | { |
2522 | return mul (x, y, UNSIGNED, overflow); |
2523 | } |
2524 | |
2525 | /* Perform a widening multiplication of X and Y, extending the values |
2526 | according to SGN, and return the high part of the result. */ |
2527 | template <typename T1, typename T2> |
2528 | inline WI_BINARY_RESULT (T1, T2) |
2529 | wi::mul_high (const T1 &x, const T2 &y, signop sgn) |
2530 | { |
2531 | WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); |
2532 | unsigned int precision = get_precision (result); |
2533 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2534 | WIDE_INT_REF_FOR (T2) yi (y, precision); |
2535 | result.set_len (mul_internal (val, xi.val, xi.len, |
2536 | yi.val, yi.len, precision, |
2537 | sgn, 0, true)); |
2538 | return result; |
2539 | } |
2540 | |
2541 | /* Return X / Y, rouding towards 0. Treat X and Y as having the |
2542 | signedness given by SGN. Indicate in *OVERFLOW if the result |
2543 | overflows. */ |
2544 | template <typename T1, typename T2> |
2545 | inline WI_BINARY_RESULT (T1, T2) |
2546 | wi::div_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2547 | { |
2548 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
2549 | unsigned int precision = get_precision (quotient); |
2550 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2551 | WIDE_INT_REF_FOR (T2) yi (y); |
2552 | |
2553 | quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len, |
2554 | precision, |
2555 | yi.val, yi.len, yi.precision, |
2556 | sgn, overflow)); |
2557 | return quotient; |
2558 | } |
2559 | |
2560 | /* Return X / Y, rouding towards 0. Treat X and Y as signed values. */ |
2561 | template <typename T1, typename T2> |
2562 | inline WI_BINARY_RESULT (T1, T2) |
2563 | wi::sdiv_trunc (const T1 &x, const T2 &y) |
2564 | { |
2565 | return div_trunc (x, y, SIGNED); |
2566 | } |
2567 | |
2568 | /* Return X / Y, rouding towards 0. Treat X and Y as unsigned values. */ |
2569 | template <typename T1, typename T2> |
2570 | inline WI_BINARY_RESULT (T1, T2) |
2571 | wi::udiv_trunc (const T1 &x, const T2 &y) |
2572 | { |
2573 | return div_trunc (x, y, UNSIGNED); |
2574 | } |
2575 | |
2576 | /* Return X / Y, rouding towards -inf. Treat X and Y as having the |
2577 | signedness given by SGN. Indicate in *OVERFLOW if the result |
2578 | overflows. */ |
2579 | template <typename T1, typename T2> |
2580 | inline WI_BINARY_RESULT (T1, T2) |
2581 | wi::div_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2582 | { |
2583 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
2584 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
2585 | unsigned int precision = get_precision (quotient); |
2586 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2587 | WIDE_INT_REF_FOR (T2) yi (y); |
2588 | |
2589 | unsigned int remainder_len; |
2590 | quotient.set_len (divmod_internal (quotient_val, |
2591 | &remainder_len, remainder_val, |
2592 | xi.val, xi.len, precision, |
2593 | yi.val, yi.len, yi.precision, sgn, |
2594 | overflow)); |
2595 | remainder.set_len (remainder_len); |
2596 | if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0) |
2597 | return quotient - 1; |
2598 | return quotient; |
2599 | } |
2600 | |
2601 | /* Return X / Y, rouding towards -inf. Treat X and Y as signed values. */ |
2602 | template <typename T1, typename T2> |
2603 | inline WI_BINARY_RESULT (T1, T2) |
2604 | wi::sdiv_floor (const T1 &x, const T2 &y) |
2605 | { |
2606 | return div_floor (x, y, SIGNED); |
2607 | } |
2608 | |
2609 | /* Return X / Y, rouding towards -inf. Treat X and Y as unsigned values. */ |
2610 | /* ??? Why do we have both this and udiv_trunc. Aren't they the same? */ |
2611 | template <typename T1, typename T2> |
2612 | inline WI_BINARY_RESULT (T1, T2) |
2613 | wi::udiv_floor (const T1 &x, const T2 &y) |
2614 | { |
2615 | return div_floor (x, y, UNSIGNED); |
2616 | } |
2617 | |
2618 | /* Return X / Y, rouding towards +inf. Treat X and Y as having the |
2619 | signedness given by SGN. Indicate in *OVERFLOW if the result |
2620 | overflows. */ |
2621 | template <typename T1, typename T2> |
2622 | inline WI_BINARY_RESULT (T1, T2) |
2623 | wi::div_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2624 | { |
2625 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
2626 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
2627 | unsigned int precision = get_precision (quotient); |
2628 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2629 | WIDE_INT_REF_FOR (T2) yi (y); |
2630 | |
2631 | unsigned int remainder_len; |
2632 | quotient.set_len (divmod_internal (quotient_val, |
2633 | &remainder_len, remainder_val, |
2634 | xi.val, xi.len, precision, |
2635 | yi.val, yi.len, yi.precision, sgn, |
2636 | overflow)); |
2637 | remainder.set_len (remainder_len); |
2638 | if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0) |
2639 | return quotient + 1; |
2640 | return quotient; |
2641 | } |
2642 | |
2643 | /* Return X / Y, rouding towards nearest with ties away from zero. |
2644 | Treat X and Y as having the signedness given by SGN. Indicate |
2645 | in *OVERFLOW if the result overflows. */ |
2646 | template <typename T1, typename T2> |
2647 | inline WI_BINARY_RESULT (T1, T2) |
2648 | wi::div_round (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2649 | { |
2650 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
2651 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
2652 | unsigned int precision = get_precision (quotient); |
2653 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2654 | WIDE_INT_REF_FOR (T2) yi (y); |
2655 | |
2656 | unsigned int remainder_len; |
2657 | quotient.set_len (divmod_internal (quotient_val, |
2658 | &remainder_len, remainder_val, |
2659 | xi.val, xi.len, precision, |
2660 | yi.val, yi.len, yi.precision, sgn, |
2661 | overflow)); |
2662 | remainder.set_len (remainder_len); |
2663 | |
2664 | if (remainder != 0) |
2665 | { |
2666 | if (sgn == SIGNED) |
2667 | { |
2668 | WI_BINARY_RESULT (T1, T2) abs_remainder = wi::abs (remainder); |
2669 | if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder))) |
2670 | { |
2671 | if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn)) |
2672 | return quotient - 1; |
2673 | else |
2674 | return quotient + 1; |
2675 | } |
2676 | } |
2677 | else |
2678 | { |
2679 | if (wi::geu_p (remainder, wi::sub (y, remainder))) |
2680 | return quotient + 1; |
2681 | } |
2682 | } |
2683 | return quotient; |
2684 | } |
2685 | |
2686 | /* Return X / Y, rouding towards 0. Treat X and Y as having the |
2687 | signedness given by SGN. Store the remainder in *REMAINDER_PTR. */ |
2688 | template <typename T1, typename T2> |
2689 | inline WI_BINARY_RESULT (T1, T2) |
2690 | wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn, |
2691 | WI_BINARY_RESULT (T1, T2) *remainder_ptr) |
2692 | { |
2693 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
2694 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
2695 | unsigned int precision = get_precision (quotient); |
2696 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2697 | WIDE_INT_REF_FOR (T2) yi (y); |
2698 | |
2699 | unsigned int remainder_len; |
2700 | quotient.set_len (divmod_internal (quotient_val, |
2701 | &remainder_len, remainder_val, |
2702 | xi.val, xi.len, precision, |
2703 | yi.val, yi.len, yi.precision, sgn, 0)); |
2704 | remainder.set_len (remainder_len); |
2705 | |
2706 | *remainder_ptr = remainder; |
2707 | return quotient; |
2708 | } |
2709 | |
2710 | /* Compute the greatest common divisor of two numbers A and B using |
2711 | Euclid's algorithm. */ |
2712 | template <typename T1, typename T2> |
2713 | inline WI_BINARY_RESULT (T1, T2) |
2714 | wi::gcd (const T1 &a, const T2 &b, signop sgn) |
2715 | { |
2716 | T1 x, y, z; |
2717 | |
2718 | x = wi::abs (a); |
2719 | y = wi::abs (b); |
2720 | |
2721 | while (gt_p (x, 0, sgn)) |
2722 | { |
2723 | z = mod_trunc (y, x, sgn); |
2724 | y = x; |
2725 | x = z; |
2726 | } |
2727 | |
2728 | return y; |
2729 | } |
2730 | |
2731 | /* Compute X / Y, rouding towards 0, and return the remainder. |
2732 | Treat X and Y as having the signedness given by SGN. Indicate |
2733 | in *OVERFLOW if the division overflows. */ |
2734 | template <typename T1, typename T2> |
2735 | inline WI_BINARY_RESULT (T1, T2) |
2736 | wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2737 | { |
2738 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
2739 | unsigned int precision = get_precision (remainder); |
2740 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2741 | WIDE_INT_REF_FOR (T2) yi (y); |
2742 | |
2743 | unsigned int remainder_len; |
2744 | divmod_internal (0, &remainder_len, remainder_val, |
2745 | xi.val, xi.len, precision, |
2746 | yi.val, yi.len, yi.precision, sgn, overflow); |
2747 | remainder.set_len (remainder_len); |
2748 | |
2749 | return remainder; |
2750 | } |
2751 | |
2752 | /* Compute X / Y, rouding towards 0, and return the remainder. |
2753 | Treat X and Y as signed values. */ |
2754 | template <typename T1, typename T2> |
2755 | inline WI_BINARY_RESULT (T1, T2) |
2756 | wi::smod_trunc (const T1 &x, const T2 &y) |
2757 | { |
2758 | return mod_trunc (x, y, SIGNED); |
2759 | } |
2760 | |
2761 | /* Compute X / Y, rouding towards 0, and return the remainder. |
2762 | Treat X and Y as unsigned values. */ |
2763 | template <typename T1, typename T2> |
2764 | inline WI_BINARY_RESULT (T1, T2) |
2765 | wi::umod_trunc (const T1 &x, const T2 &y) |
2766 | { |
2767 | return mod_trunc (x, y, UNSIGNED); |
2768 | } |
2769 | |
2770 | /* Compute X / Y, rouding towards -inf, and return the remainder. |
2771 | Treat X and Y as having the signedness given by SGN. Indicate |
2772 | in *OVERFLOW if the division overflows. */ |
2773 | template <typename T1, typename T2> |
2774 | inline WI_BINARY_RESULT (T1, T2) |
2775 | wi::mod_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2776 | { |
2777 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
2778 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
2779 | unsigned int precision = get_precision (quotient); |
2780 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2781 | WIDE_INT_REF_FOR (T2) yi (y); |
2782 | |
2783 | unsigned int remainder_len; |
2784 | quotient.set_len (divmod_internal (quotient_val, |
2785 | &remainder_len, remainder_val, |
2786 | xi.val, xi.len, precision, |
2787 | yi.val, yi.len, yi.precision, sgn, |
2788 | overflow)); |
2789 | remainder.set_len (remainder_len); |
2790 | |
2791 | if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0) |
2792 | return remainder + y; |
2793 | return remainder; |
2794 | } |
2795 | |
2796 | /* Compute X / Y, rouding towards -inf, and return the remainder. |
2797 | Treat X and Y as unsigned values. */ |
2798 | /* ??? Why do we have both this and umod_trunc. Aren't they the same? */ |
2799 | template <typename T1, typename T2> |
2800 | inline WI_BINARY_RESULT (T1, T2) |
2801 | wi::umod_floor (const T1 &x, const T2 &y) |
2802 | { |
2803 | return mod_floor (x, y, UNSIGNED); |
2804 | } |
2805 | |
2806 | /* Compute X / Y, rouding towards +inf, and return the remainder. |
2807 | Treat X and Y as having the signedness given by SGN. Indicate |
2808 | in *OVERFLOW if the division overflows. */ |
2809 | template <typename T1, typename T2> |
2810 | inline WI_BINARY_RESULT (T1, T2) |
2811 | wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2812 | { |
2813 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
2814 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
2815 | unsigned int precision = get_precision (quotient); |
2816 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2817 | WIDE_INT_REF_FOR (T2) yi (y); |
2818 | |
2819 | unsigned int remainder_len; |
2820 | quotient.set_len (divmod_internal (quotient_val, |
2821 | &remainder_len, remainder_val, |
2822 | xi.val, xi.len, precision, |
2823 | yi.val, yi.len, yi.precision, sgn, |
2824 | overflow)); |
2825 | remainder.set_len (remainder_len); |
2826 | |
2827 | if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0) |
2828 | return remainder - y; |
2829 | return remainder; |
2830 | } |
2831 | |
2832 | /* Compute X / Y, rouding towards nearest with ties away from zero, |
2833 | and return the remainder. Treat X and Y as having the signedness |
2834 | given by SGN. Indicate in *OVERFLOW if the division overflows. */ |
2835 | template <typename T1, typename T2> |
2836 | inline WI_BINARY_RESULT (T1, T2) |
2837 | wi::mod_round (const T1 &x, const T2 &y, signop sgn, bool *overflow) |
2838 | { |
2839 | WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); |
2840 | WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); |
2841 | unsigned int precision = get_precision (quotient); |
2842 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2843 | WIDE_INT_REF_FOR (T2) yi (y); |
2844 | |
2845 | unsigned int remainder_len; |
2846 | quotient.set_len (divmod_internal (quotient_val, |
2847 | &remainder_len, remainder_val, |
2848 | xi.val, xi.len, precision, |
2849 | yi.val, yi.len, yi.precision, sgn, |
2850 | overflow)); |
2851 | remainder.set_len (remainder_len); |
2852 | |
2853 | if (remainder != 0) |
2854 | { |
2855 | if (sgn == SIGNED) |
2856 | { |
2857 | WI_BINARY_RESULT (T1, T2) abs_remainder = wi::abs (remainder); |
2858 | if (wi::geu_p (abs_remainder, wi::sub (wi::abs (y), abs_remainder))) |
2859 | { |
2860 | if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn)) |
2861 | return remainder + y; |
2862 | else |
2863 | return remainder - y; |
2864 | } |
2865 | } |
2866 | else |
2867 | { |
2868 | if (wi::geu_p (remainder, wi::sub (y, remainder))) |
2869 | return remainder - y; |
2870 | } |
2871 | } |
2872 | return remainder; |
2873 | } |
2874 | |
2875 | /* Return true if X is a multiple of Y. Treat X and Y as having the |
2876 | signedness given by SGN. */ |
2877 | template <typename T1, typename T2> |
2878 | inline bool |
2879 | wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn) |
2880 | { |
2881 | return wi::mod_trunc (x, y, sgn) == 0; |
2882 | } |
2883 | |
2884 | /* Return true if X is a multiple of Y, storing X / Y in *RES if so. |
2885 | Treat X and Y as having the signedness given by SGN. */ |
2886 | template <typename T1, typename T2> |
2887 | inline bool |
2888 | wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn, |
2889 | WI_BINARY_RESULT (T1, T2) *res) |
2890 | { |
2891 | WI_BINARY_RESULT (T1, T2) remainder; |
2892 | WI_BINARY_RESULT (T1, T2) quotient |
2893 | = divmod_trunc (x, y, sgn, &remainder); |
2894 | if (remainder == 0) |
2895 | { |
2896 | *res = quotient; |
2897 | return true; |
2898 | } |
2899 | return false; |
2900 | } |
2901 | |
2902 | /* Return X << Y. Return 0 if Y is greater than or equal to |
2903 | the precision of X. */ |
2904 | template <typename T1, typename T2> |
2905 | inline WI_UNARY_RESULT (T1) |
2906 | wi::lshift (const T1 &x, const T2 &y) |
2907 | { |
2908 | WI_UNARY_RESULT_VAR (result, val, T1, x); |
2909 | unsigned int precision = get_precision (result); |
2910 | WIDE_INT_REF_FOR (T1) xi (x, precision); |
2911 | WIDE_INT_REF_FOR (T2) yi (y); |
2912 | /* Handle the simple cases quickly. */ |
2913 | if (geu_p (yi, precision)) |
2914 | { |
2915 | val[0] = 0; |
2916 | result.set_len (1); |
2917 | } |
2918 | else |
2919 | { |
2920 | unsigned int shift = yi.to_uhwi (); |
2921 | /* For fixed-precision integers like offset_int and widest_int, |
2922 | handle the case where the shift value is constant and the |
2923 | result is a single nonnegative HWI (meaning that we don't |
2924 | need to worry about val[1]). This is particularly common |
2925 | for converting a byte count to a bit count. |
2926 | |
2927 | For variable-precision integers like wide_int, handle HWI |
2928 | and sub-HWI integers inline. */ |
2929 | if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT) |
2930 | ? (STATIC_CONSTANT_P (shift < HOST_BITS_PER_WIDE_INT - 1) |
2931 | && xi.len == 1 |
2932 | && xi.val[0] <= (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) |
2933 | HOST_WIDE_INT_MAX >> shift)) |
2934 | : precision <= HOST_BITS_PER_WIDE_INT) |
2935 | { |
2936 | val[0] = xi.ulow () << shift; |
2937 | result.set_len (1); |
2938 | } |
2939 | else |
2940 | result.set_len (lshift_large (val, xi.val, xi.len, |
2941 | precision, shift)); |
2942 | } |
2943 | return result; |
2944 | } |
2945 | |
2946 | /* Return X >> Y, using a logical shift. Return 0 if Y is greater than |
2947 | or equal to the precision of X. */ |
2948 | template <typename T1, typename T2> |
2949 | inline WI_UNARY_RESULT (T1) |
2950 | wi::lrshift (const T1 &x, const T2 &y) |
2951 | { |
2952 | WI_UNARY_RESULT_VAR (result, val, T1, x); |
2953 | /* Do things in the precision of the input rather than the output, |
2954 | since the result can be no larger than that. */ |
2955 | WIDE_INT_REF_FOR (T1) xi (x); |
2956 | WIDE_INT_REF_FOR (T2) yi (y); |
2957 | /* Handle the simple cases quickly. */ |
2958 | if (geu_p (yi, xi.precision)) |
2959 | { |
2960 | val[0] = 0; |
2961 | result.set_len (1); |
2962 | } |
2963 | else |
2964 | { |
2965 | unsigned int shift = yi.to_uhwi (); |
2966 | /* For fixed-precision integers like offset_int and widest_int, |
2967 | handle the case where the shift value is constant and the |
2968 | shifted value is a single nonnegative HWI (meaning that all |
2969 | bits above the HWI are zero). This is particularly common |
2970 | for converting a bit count to a byte count. |
2971 | |
2972 | For variable-precision integers like wide_int, handle HWI |
2973 | and sub-HWI integers inline. */ |
2974 | if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT) |
2975 | ? (shift < HOST_BITS_PER_WIDE_INT |
2976 | && xi.len == 1 |
2977 | && xi.val[0] >= 0) |
2978 | : xi.precision <= HOST_BITS_PER_WIDE_INT) |
2979 | { |
2980 | val[0] = xi.to_uhwi () >> shift; |
2981 | result.set_len (1); |
2982 | } |
2983 | else |
2984 | result.set_len (lrshift_large (val, xi.val, xi.len, xi.precision, |
2985 | get_precision (result), shift)); |
2986 | } |
2987 | return result; |
2988 | } |
2989 | |
2990 | /* Return X >> Y, using an arithmetic shift. Return a sign mask if |
2991 | Y is greater than or equal to the precision of X. */ |
2992 | template <typename T1, typename T2> |
2993 | inline WI_UNARY_RESULT (T1) |
2994 | wi::arshift (const T1 &x, const T2 &y) |
2995 | { |
2996 | WI_UNARY_RESULT_VAR (result, val, T1, x); |
2997 | /* Do things in the precision of the input rather than the output, |
2998 | since the result can be no larger than that. */ |
2999 | WIDE_INT_REF_FOR (T1) xi (x); |
3000 | WIDE_INT_REF_FOR (T2) yi (y); |
3001 | /* Handle the simple cases quickly. */ |
3002 | if (geu_p (yi, xi.precision)) |
3003 | { |
3004 | val[0] = sign_mask (x); |
3005 | result.set_len (1); |
3006 | } |
3007 | else |
3008 | { |
3009 | unsigned int shift = yi.to_uhwi (); |
3010 | if (xi.precision <= HOST_BITS_PER_WIDE_INT) |
3011 | { |
3012 | val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift); |
3013 | result.set_len (1, true); |
3014 | } |
3015 | else |
3016 | result.set_len (arshift_large (val, xi.val, xi.len, xi.precision, |
3017 | get_precision (result), shift)); |
3018 | } |
3019 | return result; |
3020 | } |
3021 | |
3022 | /* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a |
3023 | logical shift otherwise. */ |
3024 | template <typename T1, typename T2> |
3025 | inline WI_UNARY_RESULT (T1) |
3026 | wi::rshift (const T1 &x, const T2 &y, signop sgn) |
3027 | { |
3028 | if (sgn == UNSIGNED) |
3029 | return lrshift (x, y); |
3030 | else |
3031 | return arshift (x, y); |
3032 | } |
3033 | |
3034 | /* Return the result of rotating the low WIDTH bits of X left by Y |
3035 | bits and zero-extending the result. Use a full-width rotate if |
3036 | WIDTH is zero. */ |
3037 | template <typename T1, typename T2> |
3038 | WI_UNARY_RESULT (T1) |
3039 | wi::lrotate (const T1 &x, const T2 &y, unsigned int width) |
3040 | { |
3041 | unsigned int precision = get_binary_precision (x, x); |
3042 | if (width == 0) |
3043 | width = precision; |
3044 | WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width); |
3045 | WI_UNARY_RESULT (T1) left = wi::lshift (x, ymod); |
3046 | WI_UNARY_RESULT (T1) right = wi::lrshift (x, wi::sub (width, ymod)); |
3047 | if (width != precision) |
3048 | return wi::zext (left, width) | wi::zext (right, width); |
3049 | return left | right; |
3050 | } |
3051 | |
3052 | /* Return the result of rotating the low WIDTH bits of X right by Y |
3053 | bits and zero-extending the result. Use a full-width rotate if |
3054 | WIDTH is zero. */ |
3055 | template <typename T1, typename T2> |
3056 | WI_UNARY_RESULT (T1) |
3057 | wi::rrotate (const T1 &x, const T2 &y, unsigned int width) |
3058 | { |
3059 | unsigned int precision = get_binary_precision (x, x); |
3060 | if (width == 0) |
3061 | width = precision; |
3062 | WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width); |
3063 | WI_UNARY_RESULT (T1) right = wi::lrshift (x, ymod); |
3064 | WI_UNARY_RESULT (T1) left = wi::lshift (x, wi::sub (width, ymod)); |
3065 | if (width != precision) |
3066 | return wi::zext (left, width) | wi::zext (right, width); |
3067 | return left | right; |
3068 | } |
3069 | |
3070 | /* Return 0 if the number of 1s in X is even and 1 if the number of 1s |
3071 | is odd. */ |
3072 | inline int |
3073 | wi::parity (const wide_int_ref &x) |
3074 | { |
3075 | return popcount (x) & 1; |
3076 | } |
3077 | |
3078 | /* Extract WIDTH bits from X, starting at BITPOS. */ |
3079 | template <typename T> |
3080 | inline unsigned HOST_WIDE_INT |
3081 | wi:: (const T &x, unsigned int bitpos, unsigned int width) |
3082 | { |
3083 | unsigned precision = get_precision (x); |
3084 | if (precision < bitpos + width) |
3085 | precision = bitpos + width; |
3086 | WIDE_INT_REF_FOR (T) xi (x, precision); |
3087 | |
3088 | /* Handle this rare case after the above, so that we assert about |
3089 | bogus BITPOS values. */ |
3090 | if (width == 0) |
3091 | return 0; |
3092 | |
3093 | unsigned int start = bitpos / HOST_BITS_PER_WIDE_INT; |
3094 | unsigned int shift = bitpos % HOST_BITS_PER_WIDE_INT; |
3095 | unsigned HOST_WIDE_INT res = xi.elt (start); |
3096 | res >>= shift; |
3097 | if (shift + width > HOST_BITS_PER_WIDE_INT) |
3098 | { |
3099 | unsigned HOST_WIDE_INT upper = xi.elt (start + 1); |
3100 | res |= upper << (-shift % HOST_BITS_PER_WIDE_INT); |
3101 | } |
3102 | return zext_hwi (res, width); |
3103 | } |
3104 | |
3105 | /* Return the minimum precision needed to store X with sign SGN. */ |
3106 | template <typename T> |
3107 | inline unsigned int |
3108 | wi::min_precision (const T &x, signop sgn) |
3109 | { |
3110 | if (sgn == SIGNED) |
3111 | return get_precision (x) - clrsb (x); |
3112 | else |
3113 | return get_precision (x) - clz (x); |
3114 | } |
3115 | |
3116 | #define SIGNED_BINARY_PREDICATE(OP, F) \ |
3117 | template <typename T1, typename T2> \ |
3118 | inline WI_SIGNED_BINARY_PREDICATE_RESULT (T1, T2) \ |
3119 | OP (const T1 &x, const T2 &y) \ |
3120 | { \ |
3121 | return wi::F (x, y); \ |
3122 | } |
3123 | |
3124 | SIGNED_BINARY_PREDICATE (operator <, lts_p) |
3125 | SIGNED_BINARY_PREDICATE (operator <=, les_p) |
3126 | SIGNED_BINARY_PREDICATE (operator >, gts_p) |
3127 | SIGNED_BINARY_PREDICATE (operator >=, ges_p) |
3128 | |
3129 | #undef SIGNED_BINARY_PREDICATE |
3130 | |
3131 | #define UNARY_OPERATOR(OP, F) \ |
3132 | template<typename T> \ |
3133 | WI_UNARY_RESULT (generic_wide_int<T>) \ |
3134 | OP (const generic_wide_int<T> &x) \ |
3135 | { \ |
3136 | return wi::F (x); \ |
3137 | } |
3138 | |
3139 | #define BINARY_PREDICATE(OP, F) \ |
3140 | template<typename T1, typename T2> \ |
3141 | WI_BINARY_PREDICATE_RESULT (T1, T2) \ |
3142 | OP (const T1 &x, const T2 &y) \ |
3143 | { \ |
3144 | return wi::F (x, y); \ |
3145 | } |
3146 | |
3147 | #define BINARY_OPERATOR(OP, F) \ |
3148 | template<typename T1, typename T2> \ |
3149 | WI_BINARY_OPERATOR_RESULT (T1, T2) \ |
3150 | OP (const T1 &x, const T2 &y) \ |
3151 | { \ |
3152 | return wi::F (x, y); \ |
3153 | } |
3154 | |
3155 | UNARY_OPERATOR (operator ~, bit_not) |
3156 | UNARY_OPERATOR (operator -, neg) |
3157 | BINARY_PREDICATE (operator ==, eq_p) |
3158 | BINARY_PREDICATE (operator !=, ne_p) |
3159 | BINARY_OPERATOR (operator &, bit_and) |
3160 | BINARY_OPERATOR (operator |, bit_or) |
3161 | BINARY_OPERATOR (operator ^, bit_xor) |
3162 | BINARY_OPERATOR (operator +, add) |
3163 | BINARY_OPERATOR (operator -, sub) |
3164 | BINARY_OPERATOR (operator *, mul) |
3165 | |
3166 | #undef UNARY_OPERATOR |
3167 | #undef BINARY_PREDICATE |
3168 | #undef BINARY_OPERATOR |
3169 | |
3170 | template <typename T1, typename T2> |
3171 | inline WI_SIGNED_SHIFT_RESULT (T1, T2) |
3172 | operator << (const T1 &x, const T2 &y) |
3173 | { |
3174 | return wi::lshift (x, y); |
3175 | } |
3176 | |
3177 | template <typename T1, typename T2> |
3178 | inline WI_SIGNED_SHIFT_RESULT (T1, T2) |
3179 | operator >> (const T1 &x, const T2 &y) |
3180 | { |
3181 | return wi::arshift (x, y); |
3182 | } |
3183 | |
3184 | template<typename T> |
3185 | void |
3186 | gt_ggc_mx (generic_wide_int <T> *) |
3187 | { |
3188 | } |
3189 | |
3190 | template<typename T> |
3191 | void |
3192 | gt_pch_nx (generic_wide_int <T> *) |
3193 | { |
3194 | } |
3195 | |
3196 | template<typename T> |
3197 | void |
3198 | gt_pch_nx (generic_wide_int <T> *, void (*) (void *, void *), void *) |
3199 | { |
3200 | } |
3201 | |
3202 | template<int N> |
3203 | void |
3204 | gt_ggc_mx (trailing_wide_ints <N> *) |
3205 | { |
3206 | } |
3207 | |
3208 | template<int N> |
3209 | void |
3210 | gt_pch_nx (trailing_wide_ints <N> *) |
3211 | { |
3212 | } |
3213 | |
3214 | template<int N> |
3215 | void |
3216 | gt_pch_nx (trailing_wide_ints <N> *, void (*) (void *, void *), void *) |
3217 | { |
3218 | } |
3219 | |
3220 | namespace wi |
3221 | { |
3222 | /* Used for overloaded functions in which the only other acceptable |
3223 | scalar type is a pointer. It stops a plain 0 from being treated |
3224 | as a null pointer. */ |
3225 | struct never_used1 {}; |
3226 | struct never_used2 {}; |
3227 | |
3228 | wide_int min_value (unsigned int, signop); |
3229 | wide_int min_value (never_used1 *); |
3230 | wide_int min_value (never_used2 *); |
3231 | wide_int max_value (unsigned int, signop); |
3232 | wide_int max_value (never_used1 *); |
3233 | wide_int max_value (never_used2 *); |
3234 | |
3235 | /* FIXME: this is target dependent, so should be elsewhere. |
3236 | It also seems to assume that CHAR_BIT == BITS_PER_UNIT. */ |
3237 | wide_int from_buffer (const unsigned char *, unsigned int); |
3238 | |
3239 | #ifndef GENERATOR_FILE |
3240 | void to_mpz (const wide_int_ref &, mpz_t, signop); |
3241 | #endif |
3242 | |
3243 | wide_int mask (unsigned int, bool, unsigned int); |
3244 | wide_int shifted_mask (unsigned int, unsigned int, bool, unsigned int); |
3245 | wide_int set_bit_in_zero (unsigned int, unsigned int); |
3246 | wide_int insert (const wide_int &x, const wide_int &y, unsigned int, |
3247 | unsigned int); |
3248 | |
3249 | template <typename T> |
3250 | T mask (unsigned int, bool); |
3251 | |
3252 | template <typename T> |
3253 | T shifted_mask (unsigned int, unsigned int, bool); |
3254 | |
3255 | template <typename T> |
3256 | T set_bit_in_zero (unsigned int); |
3257 | |
3258 | unsigned int mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int); |
3259 | unsigned int shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int, |
3260 | bool, unsigned int); |
3261 | unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *, |
3262 | unsigned int, unsigned int, bool); |
3263 | } |
3264 | |
3265 | /* Return a PRECISION-bit integer in which the low WIDTH bits are set |
3266 | and the other bits are clear, or the inverse if NEGATE_P. */ |
3267 | inline wide_int |
3268 | wi::mask (unsigned int width, bool negate_p, unsigned int precision) |
3269 | { |
3270 | wide_int result = wide_int::create (precision); |
3271 | result.set_len (mask (result.write_val (), width, negate_p, precision)); |
3272 | return result; |
3273 | } |
3274 | |
3275 | /* Return a PRECISION-bit integer in which the low START bits are clear, |
3276 | the next WIDTH bits are set, and the other bits are clear, |
3277 | or the inverse if NEGATE_P. */ |
3278 | inline wide_int |
3279 | wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p, |
3280 | unsigned int precision) |
3281 | { |
3282 | wide_int result = wide_int::create (precision); |
3283 | result.set_len (shifted_mask (result.write_val (), start, width, negate_p, |
3284 | precision)); |
3285 | return result; |
3286 | } |
3287 | |
3288 | /* Return a PRECISION-bit integer in which bit BIT is set and all the |
3289 | others are clear. */ |
3290 | inline wide_int |
3291 | wi::set_bit_in_zero (unsigned int bit, unsigned int precision) |
3292 | { |
3293 | return shifted_mask (bit, 1, false, precision); |
3294 | } |
3295 | |
3296 | /* Return an integer of type T in which the low WIDTH bits are set |
3297 | and the other bits are clear, or the inverse if NEGATE_P. */ |
3298 | template <typename T> |
3299 | inline T |
3300 | wi::mask (unsigned int width, bool negate_p) |
3301 | { |
3302 | STATIC_ASSERT (wi::int_traits<T>::precision); |
3303 | T result; |
3304 | result.set_len (mask (result.write_val (), width, negate_p, |
3305 | wi::int_traits <T>::precision)); |
3306 | return result; |
3307 | } |
3308 | |
3309 | /* Return an integer of type T in which the low START bits are clear, |
3310 | the next WIDTH bits are set, and the other bits are clear, or the |
3311 | inverse if NEGATE_P. */ |
3312 | template <typename T> |
3313 | inline T |
3314 | wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p) |
3315 | { |
3316 | STATIC_ASSERT (wi::int_traits<T>::precision); |
3317 | T result; |
3318 | result.set_len (shifted_mask (result.write_val (), start, width, |
3319 | negate_p, |
3320 | wi::int_traits <T>::precision)); |
3321 | return result; |
3322 | } |
3323 | |
3324 | /* Return an integer of type T in which bit BIT is set and all the |
3325 | others are clear. */ |
3326 | template <typename T> |
3327 | inline T |
3328 | wi::set_bit_in_zero (unsigned int bit) |
3329 | { |
3330 | return shifted_mask <T> (bit, 1, false); |
3331 | } |
3332 | |
3333 | #endif /* WIDE_INT_H */ |
3334 | |