1/* Operations with very long integers.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by the
9Free Software Foundation; either version 3, or (at your option) any
10later version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT
13ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "tree.h"
26#include "selftest.h"
27
28
29#define HOST_BITS_PER_HALF_WIDE_INT 32
30#if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31# define HOST_HALF_WIDE_INT long
32#elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33# define HOST_HALF_WIDE_INT int
34#else
35#error Please add support for HOST_HALF_WIDE_INT
36#endif
37
38#define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39/* Do not include longlong.h when compiler is clang-based. See PR61146. */
40#if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41typedef unsigned HOST_HALF_WIDE_INT UHWtype;
42typedef unsigned HOST_WIDE_INT UWtype;
43typedef unsigned int UQItype __attribute__ ((mode (QI)));
44typedef unsigned int USItype __attribute__ ((mode (SI)));
45typedef unsigned int UDItype __attribute__ ((mode (DI)));
46#if W_TYPE_SIZE == 32
47typedef unsigned int UDWtype __attribute__ ((mode (DI)));
48#else
49typedef unsigned int UDWtype __attribute__ ((mode (TI)));
50#endif
51#include "longlong.h"
52#endif
53
54static const HOST_WIDE_INT zeros[1] = {};
55
56/*
57 * Internal utilities.
58 */
59
60/* Quantities to deal with values that hold half of a wide int. Used
61 in multiply and divide. */
62#define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
63
64#define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65#define BLOCKS_NEEDED(PREC) (PREC ? CEIL (PREC, HOST_BITS_PER_WIDE_INT) : 1)
66#define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
67
68/* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69 based on the top existing bit of VAL. */
70
71static unsigned HOST_WIDE_INT
72safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
73{
74 return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
75}
76
77/* Convert the integer in VAL to canonical form, returning its new length.
78 LEN is the number of blocks currently in VAL and PRECISION is the number
79 of bits in the integer it represents.
80
81 This function only changes the representation, not the value. */
82static unsigned int
83canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
84{
85 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
86 HOST_WIDE_INT top;
87 int i;
88
89 if (len > blocks_needed)
90 len = blocks_needed;
91
92 if (len == 1)
93 return len;
94
95 top = val[len - 1];
96 if (len * HOST_BITS_PER_WIDE_INT > precision)
97 val[len - 1] = top = sext_hwi (src: top, prec: precision % HOST_BITS_PER_WIDE_INT);
98 if (top != 0 && top != HOST_WIDE_INT_M1)
99 return len;
100
101 /* At this point we know that the top is either 0 or -1. Find the
102 first block that is not a copy of this. */
103 for (i = len - 2; i >= 0; i--)
104 {
105 HOST_WIDE_INT x = val[i];
106 if (x != top)
107 {
108 if (SIGN_MASK (x) == top)
109 return i + 1;
110
111 /* We need an extra block because the top bit block i does
112 not match the extension. */
113 return i + 2;
114 }
115 }
116
117 /* The number is 0 or -1. */
118 return 1;
119}
120
121/* VAL[0] is the unsigned result of an operation. Canonize it by adding
122 another 0 block if needed, and return number of blocks needed. */
123
124static inline unsigned int
125canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
126{
127 if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
128 {
129 val[1] = 0;
130 return 2;
131 }
132 return 1;
133}
134
135/*
136 * Conversion routines in and out of wide_int.
137 */
138
139/* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
140 result for an integer with precision PRECISION. Return the length
141 of VAL (after any canonization). */
142unsigned int
143wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
144 unsigned int xlen, unsigned int precision, bool need_canon)
145{
146 for (unsigned i = 0; i < xlen; i++)
147 val[i] = xval[i];
148 return need_canon ? canonize (val, len: xlen, precision) : xlen;
149}
150
151/* Construct a wide int from a buffer of length LEN. BUFFER will be
152 read according to byte endianness and word endianness of the target.
153 Only the lower BUFFER_LEN bytes of the result are set; the remaining
154 high bytes are cleared. */
155wide_int
156wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
157{
158 unsigned int precision = buffer_len * BITS_PER_UNIT;
159 wide_int result = wide_int::create (precision);
160 unsigned int words = buffer_len / UNITS_PER_WORD;
161
162 /* We have to clear all the bits ourself, as we merely or in values
163 below. */
164 unsigned int len = BLOCKS_NEEDED (precision);
165 HOST_WIDE_INT *val = result.write_val (0);
166 for (unsigned int i = 0; i < len; ++i)
167 val[i] = 0;
168
169 for (unsigned int byte = 0; byte < buffer_len; byte++)
170 {
171 unsigned int offset;
172 unsigned int index;
173 unsigned int bitpos = byte * BITS_PER_UNIT;
174 unsigned HOST_WIDE_INT value;
175
176 if (buffer_len > UNITS_PER_WORD)
177 {
178 unsigned int word = byte / UNITS_PER_WORD;
179
180 if (WORDS_BIG_ENDIAN)
181 word = (words - 1) - word;
182
183 offset = word * UNITS_PER_WORD;
184
185 if (BYTES_BIG_ENDIAN)
186 offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
187 else
188 offset += byte % UNITS_PER_WORD;
189 }
190 else
191 offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
192
193 value = (unsigned HOST_WIDE_INT) buffer[offset];
194
195 index = bitpos / HOST_BITS_PER_WIDE_INT;
196 val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
197 }
198
199 result.set_len (l: canonize (val, len, precision));
200
201 return result;
202}
203
204/* Sets RESULT from X, the sign is taken according to SGN. */
205void
206wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
207{
208 int len = x.get_len ();
209 const HOST_WIDE_INT *v = x.get_val ();
210 int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
211
212 if (wi::neg_p (x, sgn))
213 {
214 /* We use ones complement to avoid -x80..0 edge case that -
215 won't work on. */
216 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
217 for (int i = 0; i < len; i++)
218 t[i] = ~v[i];
219 if (excess > 0)
220 t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
221 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
222 mpz_com (result, result);
223 }
224 else if (excess > 0)
225 {
226 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
227 for (int i = 0; i < len - 1; i++)
228 t[i] = v[i];
229 t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
230 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
231 }
232 else if (excess < 0 && wi::neg_p (x))
233 {
234 int extra = CEIL (-excess, HOST_BITS_PER_WIDE_INT);
235 HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len + extra);
236 for (int i = 0; i < len; i++)
237 t[i] = v[i];
238 for (int i = 0; i < extra; i++)
239 t[len + i] = -1;
240 excess = (-excess) % HOST_BITS_PER_WIDE_INT;
241 if (excess)
242 t[len + extra - 1] = (HOST_WIDE_INT_1U << excess) - 1;
243 mpz_import (result, len + extra, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
244 }
245 else
246 mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
247}
248
249/* Returns X converted to TYPE. If WRAP is true, then out-of-range
250 values of VAL will be wrapped; otherwise, they will be set to the
251 appropriate minimum or maximum TYPE bound. */
252wide_int
253wi::from_mpz (const_tree type, mpz_t x, bool wrap)
254{
255 size_t count, numb;
256 unsigned int prec = TYPE_PRECISION (type);
257 wide_int res = wide_int::create (precision: prec);
258
259 if (!wrap)
260 {
261 mpz_t min, max;
262
263 mpz_init (min);
264 mpz_init (max);
265 get_type_static_bounds (type, min, max);
266
267 if (mpz_cmp (x, min) < 0)
268 mpz_set (x, min);
269 else if (mpz_cmp (x, max) > 0)
270 mpz_set (x, max);
271
272 mpz_clear (min);
273 mpz_clear (max);
274 }
275
276 /* Determine the number of unsigned HOST_WIDE_INTs that are required
277 for representing the absolute value. The code to calculate count is
278 extracted from the GMP manual, section "Integer Import and Export":
279 http://gmplib.org/manual/Integer-Import-and-Export.html */
280 numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
281 count = CEIL (mpz_sizeinbase (x, 2), numb);
282 HOST_WIDE_INT *val = res.write_val (0);
283 /* Read the absolute value.
284
285 Write directly to the wide_int storage if possible, otherwise leave
286 GMP to allocate the memory for us. It might be slightly more efficient
287 to use mpz_tdiv_r_2exp for the latter case, but the situation is
288 pathological and it seems safer to operate on the original mpz value
289 in all cases. */
290 void *valres = mpz_export (count <= WIDE_INT_MAX_INL_ELTS ? val : 0,
291 &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
292 if (count < 1)
293 {
294 val[0] = 0;
295 count = 1;
296 }
297 count = MIN (count, BLOCKS_NEEDED (prec));
298 if (valres != val)
299 {
300 memcpy (dest: val, src: valres, n: count * sizeof (HOST_WIDE_INT));
301 free (ptr: valres);
302 }
303 /* Zero-extend the absolute value to PREC bits. */
304 if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
305 val[count++] = 0;
306 else
307 count = canonize (val, len: count, precision: prec);
308 res.set_len (l: count);
309
310 if (mpz_sgn (x) < 0)
311 res = -res;
312
313 return res;
314}
315
316/*
317 * Largest and smallest values in a mode.
318 */
319
320/* Return the largest SGNed number that is representable in PRECISION bits.
321
322 TODO: There is still code from the double_int era that trys to
323 make up for the fact that double int's could not represent the
324 min and max values of all types. This code should be removed
325 because the min and max values can always be represented in
326 wide_ints and int-csts. */
327wide_int
328wi::max_value (unsigned int precision, signop sgn)
329{
330 gcc_checking_assert (precision != 0);
331 if (sgn == UNSIGNED)
332 /* The unsigned max is just all ones. */
333 return shwi (val: -1, precision);
334 else
335 /* The signed max is all ones except the top bit. This must be
336 explicitly represented. */
337 return mask (width: precision - 1, negate_p: false, precision);
338}
339
340/* Return the largest SGNed number that is representable in PRECISION bits. */
341wide_int
342wi::min_value (unsigned int precision, signop sgn)
343{
344 gcc_checking_assert (precision != 0);
345 if (sgn == UNSIGNED)
346 return uhwi (val: 0, precision);
347 else
348 /* The signed min is all zeros except the top bit. This must be
349 explicitly represented. */
350 return wi::set_bit_in_zero (bit: precision - 1, precision);
351}
352
353/*
354 * Public utilities.
355 */
356
357/* Convert the number represented by XVAL, XLEN and XPRECISION, which has
358 signedness SGN, to an integer that has PRECISION bits. Store the blocks
359 in VAL and return the number of blocks used.
360
361 This function can handle both extension (PRECISION > XPRECISION)
362 and truncation (PRECISION < XPRECISION). */
363unsigned int
364wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
365 unsigned int xlen, unsigned int xprecision,
366 unsigned int precision, signop sgn)
367{
368 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
369 unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
370 for (unsigned i = 0; i < len; i++)
371 val[i] = xval[i];
372
373 if (precision > xprecision)
374 {
375 unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
376
377 /* Expanding. */
378 if (sgn == UNSIGNED)
379 {
380 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
381 val[len - 1] = zext_hwi (src: val[len - 1], prec: small_xprecision);
382 else if (val[len - 1] < 0)
383 {
384 while (len < BLOCKS_NEEDED (xprecision))
385 val[len++] = -1;
386 if (small_xprecision)
387 val[len - 1] = zext_hwi (src: val[len - 1], prec: small_xprecision);
388 else
389 val[len++] = 0;
390 }
391 }
392 else
393 {
394 if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
395 val[len - 1] = sext_hwi (src: val[len - 1], prec: small_xprecision);
396 }
397 }
398 len = canonize (val, len, precision);
399
400 return len;
401}
402
403/* This function hides the fact that we cannot rely on the bits beyond
404 the precision. This issue comes up in the relational comparisions
405 where we do allow comparisons of values of different precisions. */
406static inline HOST_WIDE_INT
407selt (const HOST_WIDE_INT *a, unsigned int len,
408 unsigned int blocks_needed, unsigned int small_prec,
409 unsigned int index, signop sgn)
410{
411 HOST_WIDE_INT val;
412 if (index < len)
413 val = a[index];
414 else if (index < blocks_needed || sgn == SIGNED)
415 /* Signed or within the precision. */
416 val = SIGN_MASK (a[len - 1]);
417 else
418 /* Unsigned extension beyond the precision. */
419 val = 0;
420
421 if (small_prec && index == blocks_needed - 1)
422 return (sgn == SIGNED
423 ? sext_hwi (src: val, prec: small_prec)
424 : zext_hwi (src: val, prec: small_prec));
425 else
426 return val;
427}
428
429/* Find the highest bit represented in a wide int. This will in
430 general have the same value as the sign bit. */
431static inline HOST_WIDE_INT
432top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
433{
434 int excess = len * HOST_BITS_PER_WIDE_INT - prec;
435 unsigned HOST_WIDE_INT val = a[len - 1];
436 if (excess > 0)
437 val <<= excess;
438 return val >> (HOST_BITS_PER_WIDE_INT - 1);
439}
440
441/*
442 * Comparisons, note that only equality is an operator. The other
443 * comparisons cannot be operators since they are inherently signed or
444 * unsigned and C++ has no such operators.
445 */
446
447/* Return true if OP0 == OP1. */
448bool
449wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
450 const HOST_WIDE_INT *op1, unsigned int op1len,
451 unsigned int prec)
452{
453 int l0 = op0len - 1;
454 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
455
456 if (op0len != op1len)
457 return false;
458
459 if (op0len == BLOCKS_NEEDED (prec) && small_prec)
460 {
461 /* It does not matter if we zext or sext here, we just have to
462 do both the same way. */
463 if (zext_hwi (src: op0 [l0], prec: small_prec) != zext_hwi (src: op1 [l0], prec: small_prec))
464 return false;
465 l0--;
466 }
467
468 while (l0 >= 0)
469 if (op0[l0] != op1[l0])
470 return false;
471 else
472 l0--;
473
474 return true;
475}
476
477/* Return true if OP0 < OP1 using signed comparisons. */
478bool
479wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
480 unsigned int precision,
481 const HOST_WIDE_INT *op1, unsigned int op1len)
482{
483 HOST_WIDE_INT s0, s1;
484 unsigned HOST_WIDE_INT u0, u1;
485 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
486 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
487 int l = MAX (op0len - 1, op1len - 1);
488
489 /* Only the top block is compared as signed. The rest are unsigned
490 comparisons. */
491 s0 = selt (a: op0, len: op0len, blocks_needed, small_prec, index: l, sgn: SIGNED);
492 s1 = selt (a: op1, len: op1len, blocks_needed, small_prec, index: l, sgn: SIGNED);
493 if (s0 < s1)
494 return true;
495 if (s0 > s1)
496 return false;
497
498 l--;
499 while (l >= 0)
500 {
501 u0 = selt (a: op0, len: op0len, blocks_needed, small_prec, index: l, sgn: SIGNED);
502 u1 = selt (a: op1, len: op1len, blocks_needed, small_prec, index: l, sgn: SIGNED);
503
504 if (u0 < u1)
505 return true;
506 if (u0 > u1)
507 return false;
508 l--;
509 }
510
511 return false;
512}
513
514/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
515 signed compares. */
516int
517wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
518 unsigned int precision,
519 const HOST_WIDE_INT *op1, unsigned int op1len)
520{
521 HOST_WIDE_INT s0, s1;
522 unsigned HOST_WIDE_INT u0, u1;
523 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
524 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
525 int l = MAX (op0len - 1, op1len - 1);
526
527 /* Only the top block is compared as signed. The rest are unsigned
528 comparisons. */
529 s0 = selt (a: op0, len: op0len, blocks_needed, small_prec, index: l, sgn: SIGNED);
530 s1 = selt (a: op1, len: op1len, blocks_needed, small_prec, index: l, sgn: SIGNED);
531 if (s0 < s1)
532 return -1;
533 if (s0 > s1)
534 return 1;
535
536 l--;
537 while (l >= 0)
538 {
539 u0 = selt (a: op0, len: op0len, blocks_needed, small_prec, index: l, sgn: SIGNED);
540 u1 = selt (a: op1, len: op1len, blocks_needed, small_prec, index: l, sgn: SIGNED);
541
542 if (u0 < u1)
543 return -1;
544 if (u0 > u1)
545 return 1;
546 l--;
547 }
548
549 return 0;
550}
551
552/* Return true if OP0 < OP1 using unsigned comparisons. */
553bool
554wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
555 unsigned int precision,
556 const HOST_WIDE_INT *op1, unsigned int op1len)
557{
558 unsigned HOST_WIDE_INT x0;
559 unsigned HOST_WIDE_INT x1;
560 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
561 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
562 int l = MAX (op0len - 1, op1len - 1);
563
564 while (l >= 0)
565 {
566 x0 = selt (a: op0, len: op0len, blocks_needed, small_prec, index: l, sgn: UNSIGNED);
567 x1 = selt (a: op1, len: op1len, blocks_needed, small_prec, index: l, sgn: UNSIGNED);
568 if (x0 < x1)
569 return true;
570 if (x0 > x1)
571 return false;
572 l--;
573 }
574
575 return false;
576}
577
578/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
579 unsigned compares. */
580int
581wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
582 unsigned int precision,
583 const HOST_WIDE_INT *op1, unsigned int op1len)
584{
585 unsigned HOST_WIDE_INT x0;
586 unsigned HOST_WIDE_INT x1;
587 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
588 unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
589 int l = MAX (op0len - 1, op1len - 1);
590
591 while (l >= 0)
592 {
593 x0 = selt (a: op0, len: op0len, blocks_needed, small_prec, index: l, sgn: UNSIGNED);
594 x1 = selt (a: op1, len: op1len, blocks_needed, small_prec, index: l, sgn: UNSIGNED);
595 if (x0 < x1)
596 return -1;
597 if (x0 > x1)
598 return 1;
599 l--;
600 }
601
602 return 0;
603}
604
605/*
606 * Extension.
607 */
608
609/* Sign-extend the number represented by XVAL and XLEN into VAL,
610 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
611 and VAL have PRECISION bits. */
612unsigned int
613wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
614 unsigned int xlen, unsigned int precision, unsigned int offset)
615{
616 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
617 /* Extending beyond the precision is a no-op. If we have only stored
618 OFFSET bits or fewer, the rest are already signs. */
619 if (offset >= precision || len >= xlen)
620 {
621 for (unsigned i = 0; i < xlen; ++i)
622 val[i] = xval[i];
623 return xlen;
624 }
625 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
626 for (unsigned int i = 0; i < len; i++)
627 val[i] = xval[i];
628 if (suboffset > 0)
629 {
630 val[len] = sext_hwi (src: xval[len], prec: suboffset);
631 len += 1;
632 }
633 return canonize (val, len, precision);
634}
635
636/* Zero-extend the number represented by XVAL and XLEN into VAL,
637 starting at OFFSET. Return the number of blocks in VAL. Both XVAL
638 and VAL have PRECISION bits. */
639unsigned int
640wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
641 unsigned int xlen, unsigned int precision, unsigned int offset)
642{
643 unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
644 /* Extending beyond the precision is a no-op. If we have only stored
645 OFFSET bits or fewer, and the upper stored bit is zero, then there
646 is nothing to do. */
647 if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
648 {
649 for (unsigned i = 0; i < xlen; ++i)
650 val[i] = xval[i];
651 return xlen;
652 }
653 unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
654 for (unsigned int i = 0; i < len; i++)
655 val[i] = i < xlen ? xval[i] : -1;
656 if (suboffset > 0)
657 val[len] = zext_hwi (src: len < xlen ? xval[len] : -1, prec: suboffset);
658 else
659 val[len] = 0;
660 return canonize (val, len: len + 1, precision);
661}
662
663/*
664 * Masking, inserting, shifting, rotating.
665 */
666
667/* Insert WIDTH bits from Y into X starting at START. */
668wide_int
669wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
670 unsigned int width)
671{
672 wide_int result;
673 wide_int mask;
674 wide_int tmp;
675
676 unsigned int precision = x.get_precision ();
677 if (start >= precision)
678 return x;
679
680 gcc_checking_assert (precision >= width);
681
682 if (start + width >= precision)
683 width = precision - start;
684
685 mask = wi::shifted_mask (start, width, negate_p: false, precision);
686 tmp = wi::lshift (x: wide_int::from (x: y, precision, sgn: UNSIGNED), y: start);
687 result = tmp & mask;
688
689 tmp = wi::bit_and_not (x, y: mask);
690 result = result | tmp;
691
692 return result;
693}
694
695/* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
696 Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
697 bits. */
698unsigned int
699wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
700 unsigned int xlen, unsigned int precision, unsigned int bit)
701{
702 unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
703 unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
704
705 if (block + 1 >= xlen)
706 {
707 /* The operation either affects the last current block or needs
708 a new block. */
709 unsigned int len = block + 1;
710 for (unsigned int i = 0; i < len; i++)
711 val[i] = safe_uhwi (val: xval, len: xlen, i);
712 val[block] |= HOST_WIDE_INT_1U << subbit;
713
714 /* If the bit we just set is at the msb of the block, make sure
715 that any higher bits are zeros. */
716 if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
717 {
718 val[len++] = 0;
719 return len;
720 }
721 return canonize (val, len, precision);
722 }
723 else
724 {
725 for (unsigned int i = 0; i < xlen; i++)
726 val[i] = xval[i];
727 val[block] |= HOST_WIDE_INT_1U << subbit;
728 return canonize (val, len: xlen, precision);
729 }
730}
731
732/* Byte swap the integer represented by XVAL and XLEN into VAL. Return
733 the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
734unsigned int
735wi::bswap_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
736 unsigned int xlen, unsigned int precision)
737{
738 unsigned int s, len = BLOCKS_NEEDED (precision);
739
740 /* This is not a well defined operation if the precision is not a
741 multiple of 8. */
742 gcc_assert ((precision & 0x7) == 0);
743
744 memset (s: val, c: 0, n: sizeof (unsigned HOST_WIDE_INT) * len);
745
746 /* Only swap the bytes that are not the padding. */
747 for (s = 0; s < precision; s += 8)
748 {
749 unsigned int d = precision - s - 8;
750 unsigned HOST_WIDE_INT byte;
751
752 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
753 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
754
755 byte = (safe_uhwi (val: xval, len: xlen, i: block) >> offset) & 0xff;
756
757 block = d / HOST_BITS_PER_WIDE_INT;
758 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
759
760 val[block] |= byte << offset;
761 }
762
763 return canonize (val, len, precision);
764}
765
766/* Bitreverse the integer represented by XVAL and LEN into VAL. Return
767 the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
768unsigned int
769wi::bitreverse_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
770 unsigned int len, unsigned int precision)
771{
772 unsigned int i, s;
773
774 for (i = 0; i < len; i++)
775 val[i] = 0;
776
777 for (s = 0; s < precision; s++)
778 {
779 unsigned int block = s / HOST_BITS_PER_WIDE_INT;
780 unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
781 if (((safe_uhwi (val: xval, len, i: block) >> offset) & 1) != 0)
782 {
783 unsigned int d = (precision - 1) - s;
784 block = d / HOST_BITS_PER_WIDE_INT;
785 offset = d & (HOST_BITS_PER_WIDE_INT - 1);
786 val[block] |= HOST_WIDE_INT_1U << offset;
787 }
788 }
789
790 return canonize (val, len, precision);
791}
792
793/* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
794 above that up to PREC are zeros. The result is inverted if NEGATE
795 is true. Return the number of blocks in VAL. */
796unsigned int
797wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
798 unsigned int prec)
799{
800 if (width >= prec)
801 {
802 val[0] = negate ? 0 : -1;
803 return 1;
804 }
805 else if (width == 0)
806 {
807 val[0] = negate ? -1 : 0;
808 return 1;
809 }
810
811 unsigned int i = 0;
812 while (i < width / HOST_BITS_PER_WIDE_INT)
813 val[i++] = negate ? 0 : -1;
814
815 unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
816 if (shift != 0)
817 {
818 HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
819 val[i++] = negate ? ~last : last;
820 }
821 else
822 val[i++] = negate ? -1 : 0;
823
824 return i;
825}
826
827/* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
828 bits are ones, and the bits above that up to PREC are zeros. The result
829 is inverted if NEGATE is true. Return the number of blocks in VAL. */
830unsigned int
831wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
832 bool negate, unsigned int prec)
833{
834 if (start >= prec || width == 0)
835 {
836 val[0] = negate ? -1 : 0;
837 return 1;
838 }
839
840 if (width > prec - start)
841 width = prec - start;
842 unsigned int end = start + width;
843
844 unsigned int i = 0;
845 while (i < start / HOST_BITS_PER_WIDE_INT)
846 val[i++] = negate ? -1 : 0;
847
848 unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
849 if (shift)
850 {
851 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
852 shift += width;
853 if (shift < HOST_BITS_PER_WIDE_INT)
854 {
855 /* case 000111000 */
856 block = (HOST_WIDE_INT_1U << shift) - block - 1;
857 val[i++] = negate ? ~block : block;
858 return i;
859 }
860 else
861 /* ...111000 */
862 val[i++] = negate ? block : ~block;
863 }
864
865 if (end >= prec)
866 {
867 if (!shift)
868 val[i++] = negate ? 0 : -1;
869 return i;
870 }
871
872 while (i < end / HOST_BITS_PER_WIDE_INT)
873 /* 1111111 */
874 val[i++] = negate ? 0 : -1;
875
876 shift = end & (HOST_BITS_PER_WIDE_INT - 1);
877 if (shift != 0)
878 {
879 /* 000011111 */
880 HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
881 val[i++] = negate ? ~block : block;
882 }
883 else
884 val[i++] = negate ? -1 : 0;
885
886 return i;
887}
888
889/*
890 * logical operations.
891 */
892
893/* Set VAL to OP0 & OP1. Return the number of blocks used. */
894unsigned int
895wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
896 unsigned int op0len, const HOST_WIDE_INT *op1,
897 unsigned int op1len, unsigned int prec)
898{
899 int l0 = op0len - 1;
900 int l1 = op1len - 1;
901 bool need_canon = true;
902
903 unsigned int len = MAX (op0len, op1len);
904 if (l0 > l1)
905 {
906 HOST_WIDE_INT op1mask = -top_bit_of (a: op1, len: op1len, prec);
907 if (op1mask == 0)
908 {
909 l0 = l1;
910 len = l1 + 1;
911 }
912 else
913 {
914 need_canon = false;
915 while (l0 > l1)
916 {
917 val[l0] = op0[l0];
918 l0--;
919 }
920 }
921 }
922 else if (l1 > l0)
923 {
924 HOST_WIDE_INT op0mask = -top_bit_of (a: op0, len: op0len, prec);
925 if (op0mask == 0)
926 len = l0 + 1;
927 else
928 {
929 need_canon = false;
930 while (l1 > l0)
931 {
932 val[l1] = op1[l1];
933 l1--;
934 }
935 }
936 }
937
938 while (l0 >= 0)
939 {
940 val[l0] = op0[l0] & op1[l0];
941 l0--;
942 }
943
944 if (need_canon)
945 len = canonize (val, len, precision: prec);
946
947 return len;
948}
949
950/* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
951unsigned int
952wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
953 unsigned int op0len, const HOST_WIDE_INT *op1,
954 unsigned int op1len, unsigned int prec)
955{
956 wide_int result;
957 int l0 = op0len - 1;
958 int l1 = op1len - 1;
959 bool need_canon = true;
960
961 unsigned int len = MAX (op0len, op1len);
962 if (l0 > l1)
963 {
964 HOST_WIDE_INT op1mask = -top_bit_of (a: op1, len: op1len, prec);
965 if (op1mask != 0)
966 {
967 l0 = l1;
968 len = l1 + 1;
969 }
970 else
971 {
972 need_canon = false;
973 while (l0 > l1)
974 {
975 val[l0] = op0[l0];
976 l0--;
977 }
978 }
979 }
980 else if (l1 > l0)
981 {
982 HOST_WIDE_INT op0mask = -top_bit_of (a: op0, len: op0len, prec);
983 if (op0mask == 0)
984 len = l0 + 1;
985 else
986 {
987 need_canon = false;
988 while (l1 > l0)
989 {
990 val[l1] = ~op1[l1];
991 l1--;
992 }
993 }
994 }
995
996 while (l0 >= 0)
997 {
998 val[l0] = op0[l0] & ~op1[l0];
999 l0--;
1000 }
1001
1002 if (need_canon)
1003 len = canonize (val, len, precision: prec);
1004
1005 return len;
1006}
1007
1008/* Set VAL to OP0 | OP1. Return the number of blocks used. */
1009unsigned int
1010wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1011 unsigned int op0len, const HOST_WIDE_INT *op1,
1012 unsigned int op1len, unsigned int prec)
1013{
1014 wide_int result;
1015 int l0 = op0len - 1;
1016 int l1 = op1len - 1;
1017 bool need_canon = true;
1018
1019 unsigned int len = MAX (op0len, op1len);
1020 if (l0 > l1)
1021 {
1022 HOST_WIDE_INT op1mask = -top_bit_of (a: op1, len: op1len, prec);
1023 if (op1mask != 0)
1024 {
1025 l0 = l1;
1026 len = l1 + 1;
1027 }
1028 else
1029 {
1030 need_canon = false;
1031 while (l0 > l1)
1032 {
1033 val[l0] = op0[l0];
1034 l0--;
1035 }
1036 }
1037 }
1038 else if (l1 > l0)
1039 {
1040 HOST_WIDE_INT op0mask = -top_bit_of (a: op0, len: op0len, prec);
1041 if (op0mask != 0)
1042 len = l0 + 1;
1043 else
1044 {
1045 need_canon = false;
1046 while (l1 > l0)
1047 {
1048 val[l1] = op1[l1];
1049 l1--;
1050 }
1051 }
1052 }
1053
1054 while (l0 >= 0)
1055 {
1056 val[l0] = op0[l0] | op1[l0];
1057 l0--;
1058 }
1059
1060 if (need_canon)
1061 len = canonize (val, len, precision: prec);
1062
1063 return len;
1064}
1065
1066/* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1067unsigned int
1068wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1069 unsigned int op0len, const HOST_WIDE_INT *op1,
1070 unsigned int op1len, unsigned int prec)
1071{
1072 wide_int result;
1073 int l0 = op0len - 1;
1074 int l1 = op1len - 1;
1075 bool need_canon = true;
1076
1077 unsigned int len = MAX (op0len, op1len);
1078 if (l0 > l1)
1079 {
1080 HOST_WIDE_INT op1mask = -top_bit_of (a: op1, len: op1len, prec);
1081 if (op1mask == 0)
1082 {
1083 l0 = l1;
1084 len = l1 + 1;
1085 }
1086 else
1087 {
1088 need_canon = false;
1089 while (l0 > l1)
1090 {
1091 val[l0] = op0[l0];
1092 l0--;
1093 }
1094 }
1095 }
1096 else if (l1 > l0)
1097 {
1098 HOST_WIDE_INT op0mask = -top_bit_of (a: op0, len: op0len, prec);
1099 if (op0mask != 0)
1100 len = l0 + 1;
1101 else
1102 {
1103 need_canon = false;
1104 while (l1 > l0)
1105 {
1106 val[l1] = ~op1[l1];
1107 l1--;
1108 }
1109 }
1110 }
1111
1112 while (l0 >= 0)
1113 {
1114 val[l0] = op0[l0] | ~op1[l0];
1115 l0--;
1116 }
1117
1118 if (need_canon)
1119 len = canonize (val, len, precision: prec);
1120
1121 return len;
1122}
1123
1124/* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1125unsigned int
1126wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1127 unsigned int op0len, const HOST_WIDE_INT *op1,
1128 unsigned int op1len, unsigned int prec)
1129{
1130 wide_int result;
1131 int l0 = op0len - 1;
1132 int l1 = op1len - 1;
1133
1134 unsigned int len = MAX (op0len, op1len);
1135 if (l0 > l1)
1136 {
1137 HOST_WIDE_INT op1mask = -top_bit_of (a: op1, len: op1len, prec);
1138 while (l0 > l1)
1139 {
1140 val[l0] = op0[l0] ^ op1mask;
1141 l0--;
1142 }
1143 }
1144
1145 if (l1 > l0)
1146 {
1147 HOST_WIDE_INT op0mask = -top_bit_of (a: op0, len: op0len, prec);
1148 while (l1 > l0)
1149 {
1150 val[l1] = op0mask ^ op1[l1];
1151 l1--;
1152 }
1153 }
1154
1155 while (l0 >= 0)
1156 {
1157 val[l0] = op0[l0] ^ op1[l0];
1158 l0--;
1159 }
1160
1161 return canonize (val, len, precision: prec);
1162}
1163
1164/*
1165 * math
1166 */
1167
1168/* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1169 whether the result overflows when OP0 and OP1 are treated as having
1170 signedness SGN. Return the number of blocks in VAL. */
1171unsigned int
1172wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1173 unsigned int op0len, const HOST_WIDE_INT *op1,
1174 unsigned int op1len, unsigned int prec,
1175 signop sgn, wi::overflow_type *overflow)
1176{
1177 unsigned HOST_WIDE_INT o0 = 0;
1178 unsigned HOST_WIDE_INT o1 = 0;
1179 unsigned HOST_WIDE_INT x = 0;
1180 unsigned HOST_WIDE_INT carry = 0;
1181 unsigned HOST_WIDE_INT old_carry = 0;
1182 unsigned HOST_WIDE_INT mask0, mask1;
1183 unsigned int i;
1184
1185 unsigned int len = MAX (op0len, op1len);
1186 mask0 = -top_bit_of (a: op0, len: op0len, prec);
1187 mask1 = -top_bit_of (a: op1, len: op1len, prec);
1188 /* Add all of the explicitly defined elements. */
1189
1190 for (i = 0; i < len; i++)
1191 {
1192 o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1193 o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1194 x = o0 + o1 + carry;
1195 val[i] = x;
1196 old_carry = carry;
1197 carry = carry == 0 ? x < o0 : x <= o0;
1198 }
1199
1200 if (len * HOST_BITS_PER_WIDE_INT < prec)
1201 {
1202 val[len] = mask0 + mask1 + carry;
1203 len++;
1204 if (overflow)
1205 *overflow
1206 = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1207 }
1208 else if (overflow)
1209 {
1210 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1211 if (sgn == SIGNED)
1212 {
1213 unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1214 if ((HOST_WIDE_INT) (x << shift) < 0)
1215 {
1216 if (o0 > (unsigned HOST_WIDE_INT) val[len - 1])
1217 *overflow = wi::OVF_UNDERFLOW;
1218 else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1])
1219 *overflow = wi::OVF_OVERFLOW;
1220 else
1221 *overflow = wi::OVF_NONE;
1222 }
1223 else
1224 *overflow = wi::OVF_NONE;
1225 }
1226 else
1227 {
1228 /* Put the MSB of X and O0 and in the top of the HWI. */
1229 x <<= shift;
1230 o0 <<= shift;
1231 if (old_carry)
1232 *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1233 else
1234 *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1235 }
1236 }
1237
1238 return canonize (val, len, precision: prec);
1239}
1240
1241/* Subroutines of the multiplication and division operations. Unpack
1242 the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1243 HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1244 uncompressing the top bit of INPUT[IN_LEN - 1]. */
1245static void
1246wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1247 unsigned int in_len, unsigned int out_len,
1248 unsigned int prec, signop sgn)
1249{
1250 unsigned int i;
1251 unsigned int j = 0;
1252 unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1253 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1254 HOST_WIDE_INT mask;
1255
1256 if (sgn == SIGNED)
1257 {
1258 mask = -top_bit_of (a: (const HOST_WIDE_INT *) input, len: in_len, prec);
1259 mask &= HALF_INT_MASK;
1260 }
1261 else
1262 mask = 0;
1263
1264 for (i = 0; i < blocks_needed - 1; i++)
1265 {
1266 HOST_WIDE_INT x = safe_uhwi (val: input, len: in_len, i);
1267 result[j++] = x;
1268 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1269 }
1270
1271 HOST_WIDE_INT x = safe_uhwi (val: input, len: in_len, i);
1272 if (small_prec)
1273 {
1274 if (sgn == SIGNED)
1275 x = sext_hwi (src: x, prec: small_prec);
1276 else
1277 x = zext_hwi (src: x, prec: small_prec);
1278 }
1279 result[j++] = x;
1280 result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1281
1282 /* Smear the sign bit. */
1283 while (j < out_len)
1284 result[j++] = mask;
1285}
1286
1287/* The inverse of wi_unpack. IN_LEN is the number of input
1288 blocks and PRECISION is the precision of the result. Return the
1289 number of blocks in the canonicalized result. */
1290static unsigned int
1291wi_pack (HOST_WIDE_INT *result,
1292 const unsigned HOST_HALF_WIDE_INT *input,
1293 unsigned int in_len, unsigned int precision)
1294{
1295 unsigned int i = 0;
1296 unsigned int j = 0;
1297 unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1298
1299 while (i + 1 < in_len)
1300 {
1301 result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1302 | ((unsigned HOST_WIDE_INT) input[i + 1]
1303 << HOST_BITS_PER_HALF_WIDE_INT));
1304 i += 2;
1305 }
1306
1307 /* Handle the case where in_len is odd. For this we zero extend. */
1308 if (in_len & 1)
1309 result[j++] = (unsigned HOST_WIDE_INT) input[i];
1310 else if (j < blocks_needed)
1311 result[j++] = 0;
1312 return canonize (val: result, len: j, precision);
1313}
1314
1315/* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1316 result is returned.
1317
1318 If HIGH is not set, throw away the upper half after the check is
1319 made to see if it overflows. Unfortunately there is no better way
1320 to check for overflow than to do this. If OVERFLOW is nonnull,
1321 record in *OVERFLOW whether the result overflowed. SGN controls
1322 the signedness and is used to check overflow or if HIGH is set.
1323
1324 NOTE: Overflow type for signed overflow is not yet implemented. */
1325unsigned int
1326wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1327 unsigned int op1len, const HOST_WIDE_INT *op2val,
1328 unsigned int op2len, unsigned int prec, signop sgn,
1329 wi::overflow_type *overflow, bool high)
1330{
1331 unsigned HOST_WIDE_INT o0, o1, k, t;
1332 unsigned int i;
1333 unsigned int j;
1334
1335 /* If the top level routine did not really pass in an overflow, then
1336 just make sure that we never attempt to set it. */
1337 bool needs_overflow = (overflow != 0);
1338 if (needs_overflow)
1339 *overflow = wi::OVF_NONE;
1340
1341 wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1342 wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1343
1344 /* This is a surprisingly common case, so do it first. */
1345 if (op1 == 0 || op2 == 0)
1346 {
1347 val[0] = 0;
1348 return 1;
1349 }
1350
1351#ifdef umul_ppmm
1352 if (sgn == UNSIGNED)
1353 {
1354 /* If the inputs are single HWIs and the output has room for at
1355 least two HWIs, we can use umul_ppmm directly. */
1356 if (prec >= HOST_BITS_PER_WIDE_INT * 2
1357 && wi::fits_uhwi_p (op1)
1358 && wi::fits_uhwi_p (op2))
1359 {
1360 /* This case never overflows. */
1361 if (high)
1362 {
1363 val[0] = 0;
1364 return 1;
1365 }
1366 umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1367 if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1368 {
1369 val[2] = 0;
1370 return 3;
1371 }
1372 return 1 + (val[1] != 0 || val[0] < 0);
1373 }
1374 /* Likewise if the output is a full single HWI, except that the
1375 upper HWI of the result is only used for determining overflow.
1376 (We handle this case inline when overflow isn't needed.) */
1377 else if (prec == HOST_BITS_PER_WIDE_INT)
1378 {
1379 unsigned HOST_WIDE_INT upper;
1380 umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1381 if (needs_overflow)
1382 /* Unsigned overflow can only be +OVERFLOW. */
1383 *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1384 if (high)
1385 val[0] = upper;
1386 return 1;
1387 }
1388 }
1389#endif
1390
1391 /* Handle multiplications by 1. */
1392 if (op1 == 1)
1393 {
1394 if (high)
1395 {
1396 val[0] = wi::neg_p (x: op2, sgn) ? -1 : 0;
1397 return 1;
1398 }
1399 for (i = 0; i < op2len; i++)
1400 val[i] = op2val[i];
1401 return op2len;
1402 }
1403 if (op2 == 1)
1404 {
1405 if (high)
1406 {
1407 val[0] = wi::neg_p (x: op1, sgn) ? -1 : 0;
1408 return 1;
1409 }
1410 for (i = 0; i < op1len; i++)
1411 val[i] = op1val[i];
1412 return op1len;
1413 }
1414
1415 /* If we need to check for overflow, we can only do half wide
1416 multiplies quickly because we need to look at the top bits to
1417 check for the overflow. */
1418 if ((high || needs_overflow)
1419 && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1420 {
1421 unsigned HOST_WIDE_INT r;
1422
1423 if (sgn == SIGNED)
1424 {
1425 o0 = op1.to_shwi ();
1426 o1 = op2.to_shwi ();
1427 }
1428 else
1429 {
1430 o0 = op1.to_uhwi ();
1431 o1 = op2.to_uhwi ();
1432 }
1433
1434 r = o0 * o1;
1435 if (needs_overflow)
1436 {
1437 if (sgn == SIGNED)
1438 {
1439 if ((HOST_WIDE_INT) r != sext_hwi (src: r, prec))
1440 /* FIXME: Signed overflow type is not implemented yet. */
1441 *overflow = OVF_UNKNOWN;
1442 }
1443 else
1444 {
1445 if ((r >> prec) != 0)
1446 /* Unsigned overflow can only be +OVERFLOW. */
1447 *overflow = OVF_OVERFLOW;
1448 }
1449 }
1450 val[0] = high ? r >> prec : r;
1451 return 1;
1452 }
1453
1454 /* The sizes here are scaled to support a 2x WIDE_INT_MAX_INL_PRECISION by 2x
1455 WIDE_INT_MAX_INL_PRECISION yielding a 4x WIDE_INT_MAX_INL_PRECISION
1456 result. */
1457
1458 unsigned HOST_HALF_WIDE_INT
1459 ubuf[4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1460 unsigned HOST_HALF_WIDE_INT
1461 vbuf[4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1462 /* The '2' in 'R' is because we are internally doing a full
1463 multiply. */
1464 unsigned HOST_HALF_WIDE_INT
1465 rbuf[2 * 4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1466 const HOST_WIDE_INT mask
1467 = (HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1468 unsigned HOST_HALF_WIDE_INT *u = ubuf;
1469 unsigned HOST_HALF_WIDE_INT *v = vbuf;
1470 unsigned HOST_HALF_WIDE_INT *r = rbuf;
1471
1472 if (!high)
1473 prec = MIN ((op1len + op2len + 1) * HOST_BITS_PER_WIDE_INT, prec);
1474 unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1475 unsigned int half_blocks_needed = blocks_needed * 2;
1476 if (UNLIKELY (prec > WIDE_INT_MAX_INL_PRECISION))
1477 {
1478 unsigned HOST_HALF_WIDE_INT *buf
1479 = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT, 4 * half_blocks_needed);
1480 u = buf;
1481 v = u + half_blocks_needed;
1482 r = v + half_blocks_needed;
1483 }
1484
1485 /* We do unsigned mul and then correct it. */
1486 wi_unpack (result: u, input: op1val, in_len: op1len, out_len: half_blocks_needed, prec, sgn: UNSIGNED);
1487 wi_unpack (result: v, input: op2val, in_len: op2len, out_len: half_blocks_needed, prec, sgn: UNSIGNED);
1488
1489 /* The 2 is for a full mult. */
1490 memset (s: r, c: 0, n: half_blocks_needed * 2
1491 * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1492
1493 for (j = 0; j < half_blocks_needed; j++)
1494 {
1495 k = 0;
1496 for (i = 0; i < half_blocks_needed; i++)
1497 {
1498 t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1499 + r[i + j] + k);
1500 r[i + j] = t & HALF_INT_MASK;
1501 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1502 }
1503 r[j + half_blocks_needed] = k;
1504 }
1505
1506 unsigned int shift;
1507 if ((high || needs_overflow) && (shift = prec % HOST_BITS_PER_WIDE_INT) != 0)
1508 {
1509 /* The high or needs_overflow code assumes that the high bits
1510 only appear from r[half_blocks_needed] up to
1511 r[half_blocks_needed * 2 - 1]. If prec is not a multiple
1512 of HOST_BITS_PER_WIDE_INT, shift the bits above prec up
1513 to make that code simple. */
1514 if (shift == HOST_BITS_PER_HALF_WIDE_INT)
1515 memmove (dest: &r[half_blocks_needed], src: &r[half_blocks_needed - 1],
1516 n: sizeof (r[0]) * half_blocks_needed);
1517 else
1518 {
1519 unsigned int skip = shift < HOST_BITS_PER_HALF_WIDE_INT;
1520 if (!skip)
1521 shift -= HOST_BITS_PER_HALF_WIDE_INT;
1522 for (i = 2 * half_blocks_needed - 1; i >= half_blocks_needed; i--)
1523 r[i] = ((r[i - skip] << (-shift % HOST_BITS_PER_HALF_WIDE_INT))
1524 | (r[i - skip - 1] >> shift));
1525 }
1526 }
1527
1528 /* We did unsigned math above. For signed we must adjust the
1529 product (assuming we need to see that). */
1530 if (sgn == SIGNED && (high || needs_overflow))
1531 {
1532 unsigned HOST_WIDE_INT b;
1533 if (wi::neg_p (x: op1))
1534 {
1535 b = 0;
1536 for (i = 0; i < half_blocks_needed; i++)
1537 {
1538 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1539 - (unsigned HOST_WIDE_INT)v[i] - b;
1540 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1541 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1542 }
1543 }
1544 if (wi::neg_p (x: op2))
1545 {
1546 b = 0;
1547 for (i = 0; i < half_blocks_needed; i++)
1548 {
1549 t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1550 - (unsigned HOST_WIDE_INT)u[i] - b;
1551 r[i + half_blocks_needed] = t & HALF_INT_MASK;
1552 b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1553 }
1554 }
1555 }
1556
1557 if (needs_overflow)
1558 {
1559 HOST_WIDE_INT top;
1560
1561 /* For unsigned, overflow is true if any of the top bits are set.
1562 For signed, overflow is true if any of the top bits are not equal
1563 to the sign bit. */
1564 if (sgn == UNSIGNED)
1565 top = 0;
1566 else
1567 {
1568 top = r[half_blocks_needed - 1
1569 - ((-prec % HOST_BITS_PER_WIDE_INT)
1570 >= HOST_BITS_PER_HALF_WIDE_INT)];
1571 top = SIGN_MASK (((unsigned HOST_WIDE_INT) top)
1572 << (HOST_BITS_PER_WIDE_INT / 2
1573 + (-prec % HOST_BITS_PER_HALF_WIDE_INT)));
1574 top &= mask;
1575 }
1576
1577 for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
1578 if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1579 /* FIXME: Signed overflow type is not implemented yet. */
1580 *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN;
1581 }
1582
1583 int r_offset = high ? half_blocks_needed : 0;
1584 return wi_pack (result: val, input: &r[r_offset], in_len: half_blocks_needed, precision: prec);
1585}
1586
1587/* Compute the population count of X. */
1588int
1589wi::popcount (const wide_int_ref &x)
1590{
1591 unsigned int i;
1592 int count;
1593
1594 /* The high order block is special if it is the last block and the
1595 precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1596 have to clear out any ones above the precision before doing
1597 popcount on this block. */
1598 count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1599 unsigned int stop = x.len;
1600 if (count < 0)
1601 {
1602 count = popcount_hwi (x: x.uhigh () << -count);
1603 stop -= 1;
1604 }
1605 else
1606 {
1607 if (x.sign_mask () >= 0)
1608 count = 0;
1609 }
1610
1611 for (i = 0; i < stop; ++i)
1612 count += popcount_hwi (x: x.val[i]);
1613
1614 return count;
1615}
1616
1617/* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1618 whether the result overflows when OP0 and OP1 are treated as having
1619 signedness SGN. Return the number of blocks in VAL. */
1620unsigned int
1621wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1622 unsigned int op0len, const HOST_WIDE_INT *op1,
1623 unsigned int op1len, unsigned int prec,
1624 signop sgn, wi::overflow_type *overflow)
1625{
1626 unsigned HOST_WIDE_INT o0 = 0;
1627 unsigned HOST_WIDE_INT o1 = 0;
1628 unsigned HOST_WIDE_INT x = 0;
1629 /* We implement subtraction as an in place negate and add. Negation
1630 is just inversion and add 1, so we can do the add of 1 by just
1631 starting the borrow in of the first element at 1. */
1632 unsigned HOST_WIDE_INT borrow = 0;
1633 unsigned HOST_WIDE_INT old_borrow = 0;
1634
1635 unsigned HOST_WIDE_INT mask0, mask1;
1636 unsigned int i;
1637
1638 unsigned int len = MAX (op0len, op1len);
1639 mask0 = -top_bit_of (a: op0, len: op0len, prec);
1640 mask1 = -top_bit_of (a: op1, len: op1len, prec);
1641
1642 /* Subtract all of the explicitly defined elements. */
1643 for (i = 0; i < len; i++)
1644 {
1645 o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1646 o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1647 x = o0 - o1 - borrow;
1648 val[i] = x;
1649 old_borrow = borrow;
1650 borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1651 }
1652
1653 if (len * HOST_BITS_PER_WIDE_INT < prec)
1654 {
1655 val[len] = mask0 - mask1 - borrow;
1656 len++;
1657 if (overflow)
1658 *overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE;
1659 }
1660 else if (overflow)
1661 {
1662 unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1663 if (sgn == SIGNED)
1664 {
1665 unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1666 if ((HOST_WIDE_INT) (x << shift) < 0)
1667 {
1668 if (o0 > o1)
1669 *overflow = OVF_UNDERFLOW;
1670 else if (o0 < o1)
1671 *overflow = OVF_OVERFLOW;
1672 else
1673 *overflow = OVF_NONE;
1674 }
1675 else
1676 *overflow = OVF_NONE;
1677 }
1678 else
1679 {
1680 /* Put the MSB of X and O0 and in the top of the HWI. */
1681 x <<= shift;
1682 o0 <<= shift;
1683 if (old_borrow)
1684 *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE;
1685 else
1686 *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE;
1687 }
1688 }
1689
1690 return canonize (val, len, precision: prec);
1691}
1692
1693
1694/*
1695 * Division and Mod
1696 */
1697
1698/* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1699 algorithm is a small modification of the algorithm in Hacker's
1700 Delight by Warren, which itself is a small modification of Knuth's
1701 algorithm. M is the number of significant elements of U however
1702 there needs to be at least one extra element of B_DIVIDEND
1703 allocated, N is the number of elements of B_DIVISOR.
1704 Return new value for N. */
1705static int
1706divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1707 unsigned HOST_HALF_WIDE_INT *b_remainder,
1708 unsigned HOST_HALF_WIDE_INT *b_dividend,
1709 unsigned HOST_HALF_WIDE_INT *b_divisor,
1710 int m, int n)
1711{
1712 /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1713 HOST_WIDE_INT and stored in the lower bits of each word. This
1714 algorithm should work properly on both 32 and 64 bit
1715 machines. */
1716 unsigned HOST_WIDE_INT b = HOST_WIDE_INT_1U << HOST_BITS_PER_HALF_WIDE_INT;
1717 unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1718 unsigned HOST_WIDE_INT rhat; /* A remainder. */
1719 unsigned HOST_WIDE_INT p; /* Product of two digits. */
1720 HOST_WIDE_INT t, k;
1721 int i, j, s;
1722
1723 /* Single digit divisor. */
1724 if (n == 1)
1725 {
1726 k = 0;
1727 for (j = m - 1; j >= 0; j--)
1728 {
1729 b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1730 k = ((k * b + b_dividend[j])
1731 - ((unsigned HOST_WIDE_INT)b_quotient[j]
1732 * (unsigned HOST_WIDE_INT)b_divisor[0]));
1733 }
1734 b_remainder[0] = k;
1735 return 1;
1736 }
1737
1738 s = clz_hwi (x: b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1739
1740 if (s)
1741 {
1742 /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1743 algorithm, we can overwrite b_dividend and b_divisor, so we do
1744 that. */
1745 for (i = n - 1; i > 0; i--)
1746 b_divisor[i] = (b_divisor[i] << s)
1747 | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1748 b_divisor[0] = b_divisor[0] << s;
1749
1750 b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1751 for (i = m - 1; i > 0; i--)
1752 b_dividend[i] = (b_dividend[i] << s)
1753 | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1754 b_dividend[0] = b_dividend[0] << s;
1755 }
1756
1757 /* Main loop. */
1758 for (j = m - n; j >= 0; j--)
1759 {
1760 qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1761 rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1762 again:
1763 if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1764 {
1765 qhat -= 1;
1766 rhat += b_divisor[n-1];
1767 if (rhat < b)
1768 goto again;
1769 }
1770
1771 /* Multiply and subtract. */
1772 k = 0;
1773 for (i = 0; i < n; i++)
1774 {
1775 p = qhat * b_divisor[i];
1776 t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1777 b_dividend[i + j] = t;
1778 k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1779 - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1780 }
1781 t = b_dividend[j+n] - k;
1782 b_dividend[j+n] = t;
1783
1784 b_quotient[j] = qhat;
1785 if (t < 0)
1786 {
1787 b_quotient[j] -= 1;
1788 k = 0;
1789 for (i = 0; i < n; i++)
1790 {
1791 t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1792 b_dividend[i+j] = t;
1793 k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1794 }
1795 b_dividend[j+n] += k;
1796 }
1797 }
1798 /* If N > M, the main loop was skipped, quotient will be 0 and
1799 we can't copy more than M half-limbs into the remainder, as they
1800 aren't present in b_dividend (which has . */
1801 n = MIN (n, m);
1802 if (s)
1803 for (i = 0; i < n; i++)
1804 b_remainder[i] = (b_dividend[i] >> s)
1805 | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1806 else
1807 for (i = 0; i < n; i++)
1808 b_remainder[i] = b_dividend[i];
1809 return n;
1810}
1811
1812
1813/* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1814 the result. If QUOTIENT is nonnull, store the value of the quotient
1815 there and return the number of blocks in it. The return value is
1816 not defined otherwise. If REMAINDER is nonnull, store the value
1817 of the remainder there and store the number of blocks in
1818 *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1819 the division overflowed. */
1820unsigned int
1821wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1822 HOST_WIDE_INT *remainder,
1823 const HOST_WIDE_INT *dividend_val,
1824 unsigned int dividend_len, unsigned int dividend_prec,
1825 const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1826 unsigned int divisor_prec, signop sgn,
1827 wi::overflow_type *oflow)
1828{
1829 unsigned int m, n;
1830 bool dividend_neg = false;
1831 bool divisor_neg = false;
1832 bool overflow = false;
1833 wide_int neg_dividend, neg_divisor;
1834
1835 wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1836 dividend_prec);
1837 wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1838 divisor_prec);
1839 if (divisor == 0)
1840 overflow = true;
1841
1842 /* The smallest signed number / -1 causes overflow. The dividend_len
1843 check is for speed rather than correctness. */
1844 if (sgn == SIGNED
1845 && dividend_len == BLOCKS_NEEDED (dividend_prec)
1846 && divisor == -1
1847 && wi::only_sign_bit_p (dividend))
1848 overflow = true;
1849
1850 /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1851 (signed min / -1) has the same representation as the orignal dividend.
1852 We have traditionally made division by zero act as division by one,
1853 so there too we use the original dividend. */
1854 if (overflow)
1855 {
1856 if (remainder)
1857 {
1858 *remainder_len = 1;
1859 remainder[0] = 0;
1860 }
1861 if (oflow)
1862 *oflow = OVF_OVERFLOW;
1863 if (quotient)
1864 for (unsigned int i = 0; i < dividend_len; ++i)
1865 quotient[i] = dividend_val[i];
1866 return dividend_len;
1867 }
1868
1869 if (oflow)
1870 *oflow = OVF_NONE;
1871
1872 /* Do it on the host if you can. */
1873 if (sgn == SIGNED
1874 && wi::fits_shwi_p (x: dividend)
1875 && wi::fits_shwi_p (x: divisor))
1876 {
1877 HOST_WIDE_INT o0 = dividend.to_shwi ();
1878 HOST_WIDE_INT o1 = divisor.to_shwi ();
1879
1880 if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1881 {
1882 gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1883 if (quotient)
1884 {
1885 quotient[0] = HOST_WIDE_INT_MIN;
1886 quotient[1] = 0;
1887 }
1888 if (remainder)
1889 {
1890 remainder[0] = 0;
1891 *remainder_len = 1;
1892 }
1893 return 2;
1894 }
1895 else
1896 {
1897 if (quotient)
1898 quotient[0] = o0 / o1;
1899 if (remainder)
1900 {
1901 remainder[0] = o0 % o1;
1902 *remainder_len = 1;
1903 }
1904 return 1;
1905 }
1906 }
1907
1908 if (sgn == UNSIGNED
1909 && wi::fits_uhwi_p (x: dividend)
1910 && wi::fits_uhwi_p (x: divisor))
1911 {
1912 unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1913 unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1914 unsigned int quotient_len = 1;
1915
1916 if (quotient)
1917 {
1918 quotient[0] = o0 / o1;
1919 quotient_len = canonize_uhwi (val: quotient, precision: dividend_prec);
1920 }
1921 if (remainder)
1922 {
1923 remainder[0] = o0 % o1;
1924 *remainder_len = canonize_uhwi (val: remainder, precision: dividend_prec);
1925 }
1926 return quotient_len;
1927 }
1928
1929 /* Make the divisor and dividend positive and remember what we
1930 did. */
1931 if (sgn == SIGNED)
1932 {
1933 if (wi::neg_p (x: dividend))
1934 {
1935 neg_dividend = -dividend;
1936 dividend = neg_dividend;
1937 dividend_neg = true;
1938 }
1939 if (wi::neg_p (x: divisor))
1940 {
1941 neg_divisor = -divisor;
1942 divisor = neg_divisor;
1943 divisor_neg = true;
1944 }
1945 }
1946
1947 unsigned HOST_HALF_WIDE_INT
1948 b_quotient_buf[4 * WIDE_INT_MAX_INL_PRECISION
1949 / HOST_BITS_PER_HALF_WIDE_INT];
1950 unsigned HOST_HALF_WIDE_INT
1951 b_remainder_buf[4 * WIDE_INT_MAX_INL_PRECISION
1952 / HOST_BITS_PER_HALF_WIDE_INT];
1953 unsigned HOST_HALF_WIDE_INT
1954 b_dividend_buf[(4 * WIDE_INT_MAX_INL_PRECISION
1955 / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1956 unsigned HOST_HALF_WIDE_INT
1957 b_divisor_buf[4 * WIDE_INT_MAX_INL_PRECISION
1958 / HOST_BITS_PER_HALF_WIDE_INT];
1959 unsigned HOST_HALF_WIDE_INT *b_quotient = b_quotient_buf;
1960 unsigned HOST_HALF_WIDE_INT *b_remainder = b_remainder_buf;
1961 unsigned HOST_HALF_WIDE_INT *b_dividend = b_dividend_buf;
1962 unsigned HOST_HALF_WIDE_INT *b_divisor = b_divisor_buf;
1963
1964 if (sgn == SIGNED || dividend_val[dividend_len - 1] >= 0)
1965 dividend_prec = MIN ((dividend_len + 1) * HOST_BITS_PER_WIDE_INT,
1966 dividend_prec);
1967 if (sgn == SIGNED || divisor_val[divisor_len - 1] >= 0)
1968 divisor_prec = MIN (divisor_len * HOST_BITS_PER_WIDE_INT, divisor_prec);
1969 unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1970 unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1971 if (UNLIKELY (dividend_prec > WIDE_INT_MAX_INL_PRECISION)
1972 || UNLIKELY (divisor_prec > WIDE_INT_MAX_INL_PRECISION))
1973 {
1974 unsigned HOST_HALF_WIDE_INT *buf
1975 = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT,
1976 3 * dividend_blocks_needed + 1
1977 + divisor_blocks_needed);
1978 b_quotient = buf;
1979 b_remainder = b_quotient + dividend_blocks_needed;
1980 b_dividend = b_remainder + dividend_blocks_needed;
1981 b_divisor = b_dividend + dividend_blocks_needed + 1;
1982 memset (s: b_quotient, c: 0,
1983 n: dividend_blocks_needed * sizeof (HOST_HALF_WIDE_INT));
1984 }
1985 wi_unpack (result: b_dividend, input: dividend.get_val (), in_len: dividend.get_len (),
1986 out_len: dividend_blocks_needed, prec: dividend_prec, sgn: UNSIGNED);
1987 wi_unpack (result: b_divisor, input: divisor.get_val (), in_len: divisor.get_len (),
1988 out_len: divisor_blocks_needed, prec: divisor_prec, sgn: UNSIGNED);
1989
1990 m = dividend_blocks_needed;
1991 b_dividend[m] = 0;
1992 while (m > 1 && b_dividend[m - 1] == 0)
1993 m--;
1994
1995 n = divisor_blocks_needed;
1996 while (n > 1 && b_divisor[n - 1] == 0)
1997 n--;
1998
1999 if (b_quotient == b_quotient_buf)
2000 memset (s: b_quotient_buf, c: 0, n: sizeof (b_quotient_buf));
2001
2002 n = divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
2003
2004 unsigned int quotient_len = 0;
2005 if (quotient)
2006 {
2007 quotient_len = wi_pack (result: quotient, input: b_quotient, in_len: m, precision: dividend_prec);
2008 /* The quotient is neg if exactly one of the divisor or dividend is
2009 neg. */
2010 if (dividend_neg != divisor_neg)
2011 quotient_len = wi::sub_large (val: quotient, op0: zeros, op0len: 1, op1: quotient,
2012 op1len: quotient_len, prec: dividend_prec,
2013 sgn: UNSIGNED, overflow: 0);
2014 }
2015
2016 if (remainder)
2017 {
2018 *remainder_len = wi_pack (result: remainder, input: b_remainder, in_len: n, precision: dividend_prec);
2019 /* The remainder is always the same sign as the dividend. */
2020 if (dividend_neg)
2021 *remainder_len = wi::sub_large (val: remainder, op0: zeros, op0len: 1, op1: remainder,
2022 op1len: *remainder_len, prec: dividend_prec,
2023 sgn: UNSIGNED, overflow: 0);
2024 }
2025
2026 return quotient_len;
2027}
2028
2029/*
2030 * Shifting, rotating and extraction.
2031 */
2032
2033/* Left shift XVAL by SHIFT and store the result in VAL. Return the
2034 number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
2035unsigned int
2036wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2037 unsigned int xlen, unsigned int precision,
2038 unsigned int shift)
2039{
2040 /* Split the shift into a whole-block shift and a subblock shift. */
2041 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
2042 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
2043
2044 /* The whole-block shift fills with zeros. */
2045 unsigned int len = BLOCKS_NEEDED (precision);
2046 len = MIN (xlen + skip + 1, len);
2047 for (unsigned int i = 0; i < skip; ++i)
2048 val[i] = 0;
2049
2050 /* It's easier to handle the simple block case specially. */
2051 if (small_shift == 0)
2052 for (unsigned int i = skip; i < len; ++i)
2053 val[i] = safe_uhwi (val: xval, len: xlen, i: i - skip);
2054 else
2055 {
2056 /* The first unfilled output block is a left shift of the first
2057 block in XVAL. The other output blocks contain bits from two
2058 consecutive input blocks. */
2059 unsigned HOST_WIDE_INT carry = 0;
2060 for (unsigned int i = skip; i < len; ++i)
2061 {
2062 unsigned HOST_WIDE_INT x = safe_uhwi (val: xval, len: xlen, i: i - skip);
2063 val[i] = (x << small_shift) | carry;
2064 carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
2065 }
2066 }
2067 return canonize (val, len, precision);
2068}
2069
2070/* Right shift XVAL by SHIFT and store the result in VAL. LEN is the
2071 number of blocks in VAL. The input has XPRECISION bits and the
2072 output has XPRECISION - SHIFT bits. */
2073static void
2074rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2075 unsigned int xlen, unsigned int shift, unsigned int len)
2076{
2077 /* Split the shift into a whole-block shift and a subblock shift. */
2078 unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
2079 unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
2080
2081 /* It's easier to handle the simple block case specially. */
2082 if (small_shift == 0)
2083 for (unsigned int i = 0; i < len; ++i)
2084 val[i] = safe_uhwi (val: xval, len: xlen, i: i + skip);
2085 else
2086 {
2087 /* Each output block but the last is a combination of two input blocks.
2088 The last block is a right shift of the last block in XVAL. */
2089 unsigned HOST_WIDE_INT curr = safe_uhwi (val: xval, len: xlen, i: skip);
2090 for (unsigned int i = 0; i < len; ++i)
2091 {
2092 val[i] = curr >> small_shift;
2093 curr = safe_uhwi (val: xval, len: xlen, i: i + skip + 1);
2094 val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
2095 }
2096 }
2097}
2098
2099/* Logically right shift XVAL by SHIFT and store the result in VAL.
2100 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2101 VAL has PRECISION bits. */
2102unsigned int
2103wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2104 unsigned int xlen, unsigned int xprecision,
2105 unsigned int precision, unsigned int shift)
2106{
2107 /* Work out how many blocks are needed to store the significant bits
2108 (excluding the upper zeros or signs). */
2109 unsigned int blocks_needed = BLOCKS_NEEDED (xprecision - shift);
2110 unsigned int len = blocks_needed;
2111 if (len > xlen && xval[xlen - 1] >= 0)
2112 len = xlen;
2113
2114 rshift_large_common (val, xval, xlen, shift, len);
2115
2116 /* The value we just created has precision XPRECISION - SHIFT.
2117 Zero-extend it to wider precisions. */
2118 if (precision > xprecision - shift && len == blocks_needed)
2119 {
2120 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2121 if (small_prec)
2122 val[len - 1] = zext_hwi (src: val[len - 1], prec: small_prec);
2123 else if (val[len - 1] < 0)
2124 {
2125 /* Add a new block with a zero. */
2126 val[len++] = 0;
2127 return len;
2128 }
2129 }
2130 return canonize (val, len, precision);
2131}
2132
2133/* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2134 Return the number of blocks in VAL. XVAL has XPRECISION bits and
2135 VAL has PRECISION bits. */
2136unsigned int
2137wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2138 unsigned int xlen, unsigned int xprecision,
2139 unsigned int precision, unsigned int shift)
2140{
2141 /* Work out how many blocks are needed to store the significant bits
2142 (excluding the upper zeros or signs). */
2143 unsigned int blocks_needed = BLOCKS_NEEDED (xprecision - shift);
2144 unsigned int len = MIN (xlen, blocks_needed);
2145
2146 rshift_large_common (val, xval, xlen, shift, len);
2147
2148 /* The value we just created has precision XPRECISION - SHIFT.
2149 Sign-extend it to wider types. */
2150 if (precision > xprecision - shift && len == blocks_needed)
2151 {
2152 unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2153 if (small_prec)
2154 val[len - 1] = sext_hwi (src: val[len - 1], prec: small_prec);
2155 }
2156 return canonize (val, len, precision);
2157}
2158
2159/* Return the number of leading (upper) zeros in X. */
2160int
2161wi::clz (const wide_int_ref &x)
2162{
2163 if (x.sign_mask () < 0)
2164 /* The upper bit is set, so there are no leading zeros. */
2165 return 0;
2166
2167 /* Calculate how many bits there above the highest represented block. */
2168 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2169
2170 unsigned HOST_WIDE_INT high = x.uhigh ();
2171 if (count < 0)
2172 /* The upper -COUNT bits of HIGH are not part of the value.
2173 Clear them out. */
2174 high = (high << -count) >> -count;
2175
2176 /* We don't need to look below HIGH. Either HIGH is nonzero,
2177 or the top bit of the block below is nonzero; clz_hwi is
2178 HOST_BITS_PER_WIDE_INT in the latter case. */
2179 return count + clz_hwi (x: high);
2180}
2181
2182/* Return the number of redundant sign bits in X. (That is, the number
2183 of bits immediately below the sign bit that have the same value as
2184 the sign bit.) */
2185int
2186wi::clrsb (const wide_int_ref &x)
2187{
2188 /* Calculate how many bits there above the highest represented block. */
2189 int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2190
2191 unsigned HOST_WIDE_INT high = x.uhigh ();
2192 unsigned HOST_WIDE_INT mask = -1;
2193 if (count < 0)
2194 {
2195 /* The upper -COUNT bits of HIGH are not part of the value.
2196 Clear them from both MASK and HIGH. */
2197 mask >>= -count;
2198 high &= mask;
2199 }
2200
2201 /* If the top bit is 1, count the number of leading 1s. If the top
2202 bit is zero, count the number of leading zeros. */
2203 if (high > mask / 2)
2204 high ^= mask;
2205
2206 /* There are no sign bits below the top block, so we don't need to look
2207 beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2208 HIGH is 0. */
2209 return count + clz_hwi (x: high) - 1;
2210}
2211
2212/* Return the number of trailing (lower) zeros in X. */
2213int
2214wi::ctz (const wide_int_ref &x)
2215{
2216 if (x.len == 1 && x.ulow () == 0)
2217 return x.precision;
2218
2219 /* Having dealt with the zero case, there must be a block with a
2220 nonzero bit. We don't care about the bits above the first 1. */
2221 unsigned int i = 0;
2222 while (x.val[i] == 0)
2223 ++i;
2224 return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x: x.val[i]);
2225}
2226
2227/* If X is an exact power of 2, return the base-2 logarithm, otherwise
2228 return -1. */
2229int
2230wi::exact_log2 (const wide_int_ref &x)
2231{
2232 /* Reject cases where there are implicit -1 blocks above HIGH. */
2233 if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2234 return -1;
2235
2236 /* Set CRUX to the index of the entry that should be nonzero.
2237 If the top block is zero then the next lowest block (if any)
2238 must have the high bit set. */
2239 unsigned int crux = x.len - 1;
2240 if (crux > 0 && x.val[crux] == 0)
2241 crux -= 1;
2242
2243 /* Check that all lower blocks are zero. */
2244 for (unsigned int i = 0; i < crux; ++i)
2245 if (x.val[i] != 0)
2246 return -1;
2247
2248 /* Get a zero-extended form of block CRUX. */
2249 unsigned HOST_WIDE_INT hwi = x.val[crux];
2250 if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2251 hwi = zext_hwi (src: hwi, prec: x.precision % HOST_BITS_PER_WIDE_INT);
2252
2253 /* Now it's down to whether HWI is a power of 2. */
2254 int res = ::exact_log2 (x: hwi);
2255 if (res >= 0)
2256 res += crux * HOST_BITS_PER_WIDE_INT;
2257 return res;
2258}
2259
2260/* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2261int
2262wi::floor_log2 (const wide_int_ref &x)
2263{
2264 return x.precision - 1 - clz (x);
2265}
2266
2267/* Return the index of the first (lowest) set bit in X, counting from 1.
2268 Return 0 if X is 0. */
2269int
2270wi::ffs (const wide_int_ref &x)
2271{
2272 return eq_p (x, y: 0) ? 0 : ctz (x) + 1;
2273}
2274
2275/* Return true if sign-extending X to have precision PRECISION would give
2276 the minimum signed value at that precision. */
2277bool
2278wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2279{
2280 return ctz (x) + 1 == int (precision);
2281}
2282
2283/* Return true if X represents the minimum signed value. */
2284bool
2285wi::only_sign_bit_p (const wide_int_ref &x)
2286{
2287 return only_sign_bit_p (x, precision: x.precision);
2288}
2289
2290/* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2291 down to the previous value that has no bits set outside MASK.
2292 This rounding wraps for signed values if VAL is negative and
2293 the top bit of MASK is clear.
2294
2295 For example, round_down_for_mask (6, 0xf1) would give 1 and
2296 round_down_for_mask (24, 0xf1) would give 17. */
2297
2298wide_int
2299wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
2300{
2301 /* Get the bits in VAL that are outside the mask. */
2302 wide_int extra_bits = wi::bit_and_not (x: val, y: mask);
2303 if (extra_bits == 0)
2304 return val;
2305
2306 /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2307 below that bit. */
2308 unsigned int precision = val.get_precision ();
2309 wide_int lower_mask = wi::mask (width: precision - wi::clz (x: extra_bits),
2310 negate_p: false, precision);
2311
2312 /* Clear the bits that aren't in MASK, but ensure that all bits
2313 in MASK below the top cleared bit are set. */
2314 return (val & mask) | (mask & lower_mask);
2315}
2316
2317/* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2318 up to the next value that has no bits set outside MASK. The rounding
2319 wraps if there are no suitable values greater than VAL.
2320
2321 For example, round_up_for_mask (6, 0xf1) would give 16 and
2322 round_up_for_mask (24, 0xf1) would give 32. */
2323
2324wide_int
2325wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
2326{
2327 /* Get the bits in VAL that are outside the mask. */
2328 wide_int extra_bits = wi::bit_and_not (x: val, y: mask);
2329 if (extra_bits == 0)
2330 return val;
2331
2332 /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2333 unsigned int precision = val.get_precision ();
2334 wide_int upper_mask = wi::mask (width: precision - wi::clz (x: extra_bits),
2335 negate_p: true, precision);
2336
2337 /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2338 upper_mask &= mask;
2339
2340 /* Conceptually we need to:
2341
2342 - clear bits of VAL outside UPPER_MASK
2343 - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2344 - propagate the carry through the bits of VAL in UPPER_MASK
2345
2346 If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2347 reaches that bit and the process leaves all lower bits clear.
2348 If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2349 wide_int tmp = wi::bit_and_not (x: upper_mask, y: val);
2350
2351 return (val | tmp) & -tmp;
2352}
2353
2354/* Compute the modular multiplicative inverse of A modulo B
2355 using extended Euclid's algorithm. Assumes A and B are coprime,
2356 and that A and B have the same precision. */
2357wide_int
2358wi::mod_inv (const wide_int &a, const wide_int &b)
2359{
2360 /* Verify the assumption. */
2361 gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1));
2362
2363 unsigned int p = a.get_precision () + 1;
2364 gcc_checking_assert (b.get_precision () + 1 == p);
2365 wide_int c = wide_int::from (x: a, precision: p, sgn: UNSIGNED);
2366 wide_int d = wide_int::from (x: b, precision: p, sgn: UNSIGNED);
2367 wide_int x0 = wide_int::from (x: 0, precision: p, sgn: UNSIGNED);
2368 wide_int x1 = wide_int::from (x: 1, precision: p, sgn: UNSIGNED);
2369
2370 if (wi::eq_p (x: b, y: 1))
2371 return wide_int::from (x: 1, precision: p, sgn: UNSIGNED);
2372
2373 while (wi::gt_p (x: c, y: 1, sgn: UNSIGNED))
2374 {
2375 wide_int t = d;
2376 wide_int q = wi::divmod_trunc (x: c, y: d, sgn: UNSIGNED, remainder_ptr: &d);
2377 c = t;
2378 wide_int s = x0;
2379 x0 = wi::sub (x: x1, y: wi::mul (x: q, y: x0));
2380 x1 = s;
2381 }
2382 if (wi::lt_p (x: x1, y: 0, sgn: SIGNED))
2383 x1 += d;
2384 return x1;
2385}
2386
2387/*
2388 * Private utilities.
2389 */
2390
2391void gt_ggc_mx (widest_int *) { }
2392void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2393void gt_pch_nx (widest_int *) { }
2394
2395template void wide_int::dump () const;
2396template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2397template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2398template void offset_int::dump () const;
2399template void widest_int::dump () const;
2400
2401/* We could add all the above ::dump variants here, but wide_int and
2402 widest_int should handle the common cases. Besides, you can always
2403 call the dump method directly. */
2404
2405DEBUG_FUNCTION void
2406debug (const wide_int &ref)
2407{
2408 ref.dump ();
2409}
2410
2411DEBUG_FUNCTION void
2412debug (const wide_int *ptr)
2413{
2414 if (ptr)
2415 debug (ref: *ptr);
2416 else
2417 fprintf (stderr, format: "<nil>\n");
2418}
2419
2420DEBUG_FUNCTION void
2421debug (const widest_int &ref)
2422{
2423 ref.dump ();
2424}
2425
2426DEBUG_FUNCTION void
2427debug (const widest_int *ptr)
2428{
2429 if (ptr)
2430 debug (ref: *ptr);
2431 else
2432 fprintf (stderr, format: "<nil>\n");
2433}
2434
2435#if CHECKING_P
2436
2437namespace selftest {
2438
2439/* Selftests for wide ints. We run these multiple times, once per type. */
2440
2441/* Helper function for building a test value. */
2442
2443template <class VALUE_TYPE>
2444static VALUE_TYPE
2445from_int (int i);
2446
2447/* Specializations of the fixture for each wide-int type. */
2448
2449/* Specialization for VALUE_TYPE == wide_int. */
2450
2451template <>
2452wide_int
2453from_int (int i)
2454{
2455 return wi::shwi (val: i, precision: 32);
2456}
2457
2458/* Specialization for VALUE_TYPE == offset_int. */
2459
2460template <>
2461offset_int
2462from_int (int i)
2463{
2464 return offset_int (i);
2465}
2466
2467/* Specialization for VALUE_TYPE == widest_int. */
2468
2469template <>
2470widest_int
2471from_int (int i)
2472{
2473 return widest_int (i);
2474}
2475
2476/* Verify that print_dec (WI, ..., SGN) gives the expected string
2477 representation (using base 10). */
2478
2479static void
2480assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
2481{
2482 char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
2483 unsigned len;
2484 if (print_dec_buf_size (wi, sgn, len: &len))
2485 p = XALLOCAVEC (char, len);
2486 print_dec (wi, buf: p, sgn);
2487 ASSERT_STREQ (expected, p);
2488}
2489
2490/* Likewise for base 16. */
2491
2492static void
2493assert_hexeq (const char *expected, const wide_int_ref &wi)
2494{
2495 char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
2496 unsigned len;
2497 if (print_hex_buf_size (wi, len: &len))
2498 p = XALLOCAVEC (char, len);
2499 print_hex (wi, buf: p);
2500 ASSERT_STREQ (expected, p);
2501}
2502
2503/* Test cases. */
2504
2505/* Verify that print_dec and print_hex work for VALUE_TYPE. */
2506
2507template <class VALUE_TYPE>
2508static void
2509test_printing ()
2510{
2511 VALUE_TYPE a = from_int<VALUE_TYPE> (42);
2512 assert_deceq ("42", a, SIGNED);
2513 assert_hexeq ("0x2a", a);
2514 assert_hexeq (expected: "0x1fffffffffffffffff", wi: wi::shwi (val: -1, precision: 69));
2515 assert_hexeq (expected: "0xffffffffffffffff", wi: wi::mask (width: 64, negate_p: false, precision: 69));
2516 assert_hexeq (expected: "0xffffffffffffffff", wi: wi::mask <widest_int> (width: 64, negate_p: false));
2517 if (WIDE_INT_MAX_INL_PRECISION > 128)
2518 {
2519 assert_hexeq (expected: "0x20000000000000000fffffffffffffffe",
2520 wi: wi::lshift (x: 1, y: 129) + wi::lshift (x: 1, y: 64) - 2);
2521 assert_hexeq (expected: "0x200000000000004000123456789abcdef",
2522 wi: wi::lshift (x: 1, y: 129) + wi::lshift (x: 1, y: 74)
2523 + wi::lshift (x: 0x1234567, y: 32) + 0x89abcdef);
2524 }
2525}
2526
2527/* Verify that various operations work correctly for VALUE_TYPE,
2528 unary and binary, using both function syntax, and
2529 overloaded-operators. */
2530
2531template <class VALUE_TYPE>
2532static void
2533test_ops ()
2534{
2535 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2536 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2537
2538 /* Using functions. */
2539 assert_deceq ("-7", wi::neg (a), SIGNED);
2540 assert_deceq ("10", wi::add (a, b), SIGNED);
2541 assert_deceq ("4", wi::sub (a, b), SIGNED);
2542 assert_deceq ("-4", wi::sub (b, a), SIGNED);
2543 assert_deceq ("21", wi::mul (a, b), SIGNED);
2544
2545 /* Using operators. */
2546 assert_deceq ("-7", -a, SIGNED);
2547 assert_deceq ("10", a + b, SIGNED);
2548 assert_deceq ("4", a - b, SIGNED);
2549 assert_deceq ("-4", b - a, SIGNED);
2550 assert_deceq ("21", a * b, SIGNED);
2551}
2552
2553/* Verify that various comparisons work correctly for VALUE_TYPE. */
2554
2555template <class VALUE_TYPE>
2556static void
2557test_comparisons ()
2558{
2559 VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2560 VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2561
2562 /* == */
2563 ASSERT_TRUE (wi::eq_p (a, a));
2564 ASSERT_FALSE (wi::eq_p (a, b));
2565
2566 /* != */
2567 ASSERT_TRUE (wi::ne_p (a, b));
2568 ASSERT_FALSE (wi::ne_p (a, a));
2569
2570 /* < */
2571 ASSERT_FALSE (wi::lts_p (a, a));
2572 ASSERT_FALSE (wi::lts_p (a, b));
2573 ASSERT_TRUE (wi::lts_p (b, a));
2574
2575 /* <= */
2576 ASSERT_TRUE (wi::les_p (a, a));
2577 ASSERT_FALSE (wi::les_p (a, b));
2578 ASSERT_TRUE (wi::les_p (b, a));
2579
2580 /* > */
2581 ASSERT_FALSE (wi::gts_p (a, a));
2582 ASSERT_TRUE (wi::gts_p (a, b));
2583 ASSERT_FALSE (wi::gts_p (b, a));
2584
2585 /* >= */
2586 ASSERT_TRUE (wi::ges_p (a, a));
2587 ASSERT_TRUE (wi::ges_p (a, b));
2588 ASSERT_FALSE (wi::ges_p (b, a));
2589
2590 /* comparison */
2591 ASSERT_EQ (-1, wi::cmps (b, a));
2592 ASSERT_EQ (0, wi::cmps (a, a));
2593 ASSERT_EQ (1, wi::cmps (a, b));
2594}
2595
2596/* Run all of the selftests, using the given VALUE_TYPE. */
2597
2598template <class VALUE_TYPE>
2599static void run_all_wide_int_tests ()
2600{
2601 test_printing <VALUE_TYPE> ();
2602 test_ops <VALUE_TYPE> ();
2603 test_comparisons <VALUE_TYPE> ();
2604}
2605
2606/* Test overflow conditions. */
2607
2608static void
2609test_overflow ()
2610{
2611 static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2612 static int offsets[] = { 16, 1, 0 };
2613 for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
2614 for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
2615 {
2616 int prec = precs[i];
2617 int offset = offsets[j];
2618 wi::overflow_type overflow;
2619 wide_int sum, diff;
2620
2621 sum = wi::add (x: wi::max_value (precision: prec, sgn: UNSIGNED) - offset, y: 1,
2622 sgn: UNSIGNED, overflow: &overflow);
2623 ASSERT_EQ (sum, -offset);
2624 ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2625
2626 sum = wi::add (x: 1, y: wi::max_value (precision: prec, sgn: UNSIGNED) - offset,
2627 sgn: UNSIGNED, overflow: &overflow);
2628 ASSERT_EQ (sum, -offset);
2629 ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2630
2631 diff = wi::sub (x: wi::max_value (precision: prec, sgn: UNSIGNED) - offset,
2632 y: wi::max_value (precision: prec, sgn: UNSIGNED),
2633 sgn: UNSIGNED, overflow: &overflow);
2634 ASSERT_EQ (diff, -offset);
2635 ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0);
2636
2637 diff = wi::sub (x: wi::max_value (precision: prec, sgn: UNSIGNED) - offset,
2638 y: wi::max_value (precision: prec, sgn: UNSIGNED) - 1,
2639 sgn: UNSIGNED, overflow: &overflow);
2640 ASSERT_EQ (diff, 1 - offset);
2641 ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1);
2642 }
2643}
2644
2645/* Test the round_{down,up}_for_mask functions. */
2646
2647static void
2648test_round_for_mask ()
2649{
2650 unsigned int prec = 18;
2651 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
2652 wi::shwi (0xf1, prec)));
2653 ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
2654 wi::shwi (0xf1, prec)));
2655
2656 ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
2657 wi::shwi (0xf1, prec)));
2658 ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
2659 wi::shwi (0xf1, prec)));
2660
2661 ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
2662 wi::shwi (0xf1, prec)));
2663 ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
2664 wi::shwi (0xf1, prec)));
2665
2666 ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
2667 wi::shwi (0x111, prec)));
2668 ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
2669 wi::shwi (0x111, prec)));
2670
2671 ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
2672 wi::shwi (0xfc, prec)));
2673 ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
2674 wi::shwi (0xfc, prec)));
2675
2676 ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
2677 wi::shwi (0xabc, prec)));
2678 ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
2679 wi::shwi (0xabc, prec)));
2680
2681 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
2682 wi::shwi (0xabc, prec)));
2683 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
2684 wi::shwi (0xabc, prec)));
2685
2686 ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
2687 wi::shwi (0xabc, prec)));
2688 ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
2689 wi::shwi (0xabc, prec)));
2690}
2691
2692/* Run all of the selftests within this file, for all value types. */
2693
2694void
2695wide_int_cc_tests ()
2696{
2697 run_all_wide_int_tests <wide_int> ();
2698 run_all_wide_int_tests <offset_int> ();
2699 run_all_wide_int_tests <widest_int> ();
2700 test_overflow ();
2701 test_round_for_mask ();
2702 ASSERT_EQ (wi::mask (128, false, 128),
2703 wi::shifted_mask (0, 128, false, 128));
2704 ASSERT_EQ (wi::mask (128, true, 128),
2705 wi::shifted_mask (0, 128, true, 128));
2706 ASSERT_EQ (wi::multiple_of_p (from_int <widest_int> (1),
2707 from_int <widest_int> (-128), UNSIGNED),
2708 false);
2709}
2710
2711} // namespace selftest
2712#endif /* CHECKING_P */
2713

source code of gcc/wide-int.cc