1/* Utility routines for data type conversion for GCC.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20
21/* These routines are somewhat language-independent utility function
22 intended to be called by the language-specific convert () functions. */
23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "target.h"
28#include "tree.h"
29#include "diagnostic-core.h"
30#include "fold-const.h"
31#include "stor-layout.h"
32#include "convert.h"
33#include "langhooks.h"
34#include "builtins.h"
35#include "ubsan.h"
36#include "stringpool.h"
37#include "attribs.h"
38#include "asan.h"
39
40#define maybe_fold_build1_loc(FOLD_P, LOC, CODE, TYPE, EXPR) \
41 ((FOLD_P) ? fold_build1_loc (LOC, CODE, TYPE, EXPR) \
42 : build1_loc (LOC, CODE, TYPE, EXPR))
43#define maybe_fold_build2_loc(FOLD_P, LOC, CODE, TYPE, EXPR1, EXPR2) \
44 ((FOLD_P) ? fold_build2_loc (LOC, CODE, TYPE, EXPR1, EXPR2) \
45 : build2_loc (LOC, CODE, TYPE, EXPR1, EXPR2))
46
47/* Convert EXPR to some pointer or reference type TYPE.
48 EXPR must be pointer, reference, integer, enumeral, or literal zero;
49 in other cases error is called. If FOLD_P is true, try to fold the
50 expression. */
51
52static tree
53convert_to_pointer_1 (tree type, tree expr, bool fold_p)
54{
55 location_t loc = EXPR_LOCATION (expr);
56 if (TREE_TYPE (expr) == type)
57 return expr;
58
59 switch (TREE_CODE (TREE_TYPE (expr)))
60 {
61 case POINTER_TYPE:
62 case REFERENCE_TYPE:
63 {
64 /* If the pointers point to different address spaces, conversion needs
65 to be done via a ADDR_SPACE_CONVERT_EXPR instead of a NOP_EXPR. */
66 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (type));
67 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
68
69 if (to_as == from_as)
70 return maybe_fold_build1_loc (fold_p, loc, NOP_EXPR, type, expr);
71 else
72 return maybe_fold_build1_loc (fold_p, loc, ADDR_SPACE_CONVERT_EXPR,
73 type, expr);
74 }
75
76 case INTEGER_TYPE:
77 case ENUMERAL_TYPE:
78 case BOOLEAN_TYPE:
79 {
80 /* If the input precision differs from the target pointer type
81 precision, first convert the input expression to an integer type of
82 the target precision. Some targets, e.g. VMS, need several pointer
83 sizes to coexist so the latter isn't necessarily POINTER_SIZE. */
84 unsigned int pprec = TYPE_PRECISION (type);
85 unsigned int eprec = TYPE_PRECISION (TREE_TYPE (expr));
86
87 if (eprec != pprec)
88 expr
89 = maybe_fold_build1_loc (fold_p, loc, NOP_EXPR,
90 lang_hooks.types.type_for_size (pprec, 0),
91 expr);
92 }
93 return maybe_fold_build1_loc (fold_p, loc, CONVERT_EXPR, type, expr);
94
95 default:
96 error ("cannot convert to a pointer type");
97 return convert_to_pointer_1 (type, integer_zero_node, fold_p);
98 }
99}
100
101/* A wrapper around convert_to_pointer_1 that always folds the
102 expression. */
103
104tree
105convert_to_pointer (tree type, tree expr)
106{
107 return convert_to_pointer_1 (type, expr, true);
108}
109
110/* A wrapper around convert_to_pointer_1 that only folds the
111 expression if DOFOLD, or if it is CONSTANT_CLASS_P. */
112
113tree
114convert_to_pointer_maybe_fold (tree type, tree expr, bool dofold)
115{
116 return convert_to_pointer_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
117}
118
119/* Convert EXPR to some floating-point type TYPE.
120
121 EXPR must be float, fixed-point, integer, or enumeral;
122 in other cases error is called. If FOLD_P is true, try to fold
123 the expression. */
124
125static tree
126convert_to_real_1 (tree type, tree expr, bool fold_p)
127{
128 enum built_in_function fcode = builtin_mathfn_code (expr);
129 tree itype = TREE_TYPE (expr);
130 location_t loc = EXPR_LOCATION (expr);
131
132 if (TREE_CODE (expr) == COMPOUND_EXPR)
133 {
134 tree t = convert_to_real_1 (type, TREE_OPERAND (expr, 1), fold_p);
135 if (t == TREE_OPERAND (expr, 1))
136 return expr;
137 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR, TREE_TYPE (t),
138 TREE_OPERAND (expr, 0), t);
139 }
140
141 /* Disable until we figure out how to decide whether the functions are
142 present in runtime. */
143 /* Convert (float)sqrt((double)x) where x is float into sqrtf(x) */
144 if (optimize
145 && (TYPE_MODE (type) == TYPE_MODE (double_type_node)
146 || TYPE_MODE (type) == TYPE_MODE (float_type_node)))
147 {
148 switch (fcode)
149 {
150#define CASE_MATHFN(FN) case BUILT_IN_##FN: case BUILT_IN_##FN##L:
151 CASE_MATHFN (COSH)
152 CASE_MATHFN (EXP)
153 CASE_MATHFN (EXP10)
154 CASE_MATHFN (EXP2)
155 CASE_MATHFN (EXPM1)
156 CASE_MATHFN (GAMMA)
157 CASE_MATHFN (J0)
158 CASE_MATHFN (J1)
159 CASE_MATHFN (LGAMMA)
160 CASE_MATHFN (POW10)
161 CASE_MATHFN (SINH)
162 CASE_MATHFN (TGAMMA)
163 CASE_MATHFN (Y0)
164 CASE_MATHFN (Y1)
165 /* The above functions may set errno differently with float
166 input or output so this transformation is not safe with
167 -fmath-errno. */
168 if (flag_errno_math)
169 break;
170 gcc_fallthrough ();
171 CASE_MATHFN (ACOS)
172 CASE_MATHFN (ACOSH)
173 CASE_MATHFN (ASIN)
174 CASE_MATHFN (ASINH)
175 CASE_MATHFN (ATAN)
176 CASE_MATHFN (ATANH)
177 CASE_MATHFN (CBRT)
178 CASE_MATHFN (COS)
179 CASE_MATHFN (ERF)
180 CASE_MATHFN (ERFC)
181 CASE_MATHFN (LOG)
182 CASE_MATHFN (LOG10)
183 CASE_MATHFN (LOG2)
184 CASE_MATHFN (LOG1P)
185 CASE_MATHFN (SIN)
186 CASE_MATHFN (TAN)
187 CASE_MATHFN (TANH)
188 /* The above functions are not safe to do this conversion. */
189 if (!flag_unsafe_math_optimizations)
190 break;
191 gcc_fallthrough ();
192 CASE_MATHFN (SQRT)
193 CASE_MATHFN (FABS)
194 CASE_MATHFN (LOGB)
195#undef CASE_MATHFN
196 {
197 tree arg0 = strip_float_extensions (CALL_EXPR_ARG (expr, 0));
198 tree newtype = type;
199
200 /* We have (outertype)sqrt((innertype)x). Choose the wider mode from
201 the both as the safe type for operation. */
202 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (type))
203 newtype = TREE_TYPE (arg0);
204
205 /* We consider to convert
206
207 (T1) sqrtT2 ((T2) exprT3)
208 to
209 (T1) sqrtT4 ((T4) exprT3)
210
211 , where T1 is TYPE, T2 is ITYPE, T3 is TREE_TYPE (ARG0),
212 and T4 is NEWTYPE. All those types are of floating point types.
213 T4 (NEWTYPE) should be narrower than T2 (ITYPE). This conversion
214 is safe only if P1 >= P2*2+2, where P1 and P2 are precisions of
215 T2 and T4. See the following URL for a reference:
216 http://stackoverflow.com/questions/9235456/determining-
217 floating-point-square-root
218 */
219 if ((fcode == BUILT_IN_SQRT || fcode == BUILT_IN_SQRTL)
220 && !flag_unsafe_math_optimizations)
221 {
222 /* The following conversion is unsafe even the precision condition
223 below is satisfied:
224
225 (float) sqrtl ((long double) double_val) -> (float) sqrt (double_val)
226 */
227 if (TYPE_MODE (type) != TYPE_MODE (newtype))
228 break;
229
230 int p1 = REAL_MODE_FORMAT (TYPE_MODE (itype))->p;
231 int p2 = REAL_MODE_FORMAT (TYPE_MODE (newtype))->p;
232 if (p1 < p2 * 2 + 2)
233 break;
234 }
235
236 /* Be careful about integer to fp conversions.
237 These may overflow still. */
238 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
239 && TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
240 && (TYPE_MODE (newtype) == TYPE_MODE (double_type_node)
241 || TYPE_MODE (newtype) == TYPE_MODE (float_type_node)))
242 {
243 tree fn = mathfn_built_in (newtype, fcode);
244 if (fn)
245 {
246 tree arg = convert_to_real_1 (newtype, arg0, fold_p);
247 expr = build_call_expr (fn, 1, arg);
248 if (newtype == type)
249 return expr;
250 }
251 }
252 }
253 default:
254 break;
255 }
256 }
257
258 /* Propagate the cast into the operation. */
259 if (itype != type && FLOAT_TYPE_P (type))
260 switch (TREE_CODE (expr))
261 {
262 /* Convert (float)-x into -(float)x. This is safe for
263 round-to-nearest rounding mode when the inner type is float. */
264 case ABS_EXPR:
265 case NEGATE_EXPR:
266 if (!flag_rounding_math
267 && FLOAT_TYPE_P (itype)
268 && TYPE_PRECISION (type) < TYPE_PRECISION (itype))
269 {
270 tree arg = convert_to_real_1 (type, TREE_OPERAND (expr, 0),
271 fold_p);
272 return build1 (TREE_CODE (expr), type, arg);
273 }
274 break;
275 /* Convert (outertype)((innertype0)a+(innertype1)b)
276 into ((newtype)a+(newtype)b) where newtype
277 is the widest mode from all of these. */
278 case PLUS_EXPR:
279 case MINUS_EXPR:
280 case MULT_EXPR:
281 case RDIV_EXPR:
282 {
283 tree arg0 = strip_float_extensions (TREE_OPERAND (expr, 0));
284 tree arg1 = strip_float_extensions (TREE_OPERAND (expr, 1));
285
286 if (FLOAT_TYPE_P (TREE_TYPE (arg0))
287 && FLOAT_TYPE_P (TREE_TYPE (arg1))
288 && DECIMAL_FLOAT_TYPE_P (itype) == DECIMAL_FLOAT_TYPE_P (type))
289 {
290 tree newtype = type;
291
292 if (TYPE_MODE (TREE_TYPE (arg0)) == SDmode
293 || TYPE_MODE (TREE_TYPE (arg1)) == SDmode
294 || TYPE_MODE (type) == SDmode)
295 newtype = dfloat32_type_node;
296 if (TYPE_MODE (TREE_TYPE (arg0)) == DDmode
297 || TYPE_MODE (TREE_TYPE (arg1)) == DDmode
298 || TYPE_MODE (type) == DDmode)
299 newtype = dfloat64_type_node;
300 if (TYPE_MODE (TREE_TYPE (arg0)) == TDmode
301 || TYPE_MODE (TREE_TYPE (arg1)) == TDmode
302 || TYPE_MODE (type) == TDmode)
303 newtype = dfloat128_type_node;
304 if (newtype == dfloat32_type_node
305 || newtype == dfloat64_type_node
306 || newtype == dfloat128_type_node)
307 {
308 expr = build2 (TREE_CODE (expr), newtype,
309 convert_to_real_1 (newtype, arg0,
310 fold_p),
311 convert_to_real_1 (newtype, arg1,
312 fold_p));
313 if (newtype == type)
314 return expr;
315 break;
316 }
317
318 if (TYPE_PRECISION (TREE_TYPE (arg0)) > TYPE_PRECISION (newtype))
319 newtype = TREE_TYPE (arg0);
320 if (TYPE_PRECISION (TREE_TYPE (arg1)) > TYPE_PRECISION (newtype))
321 newtype = TREE_TYPE (arg1);
322 /* Sometimes this transformation is safe (cannot
323 change results through affecting double rounding
324 cases) and sometimes it is not. If NEWTYPE is
325 wider than TYPE, e.g. (float)((long double)double
326 + (long double)double) converted to
327 (float)(double + double), the transformation is
328 unsafe regardless of the details of the types
329 involved; double rounding can arise if the result
330 of NEWTYPE arithmetic is a NEWTYPE value half way
331 between two representable TYPE values but the
332 exact value is sufficiently different (in the
333 right direction) for this difference to be
334 visible in ITYPE arithmetic. If NEWTYPE is the
335 same as TYPE, however, the transformation may be
336 safe depending on the types involved: it is safe
337 if the ITYPE has strictly more than twice as many
338 mantissa bits as TYPE, can represent infinities
339 and NaNs if the TYPE can, and has sufficient
340 exponent range for the product or ratio of two
341 values representable in the TYPE to be within the
342 range of normal values of ITYPE. */
343 if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
344 && (flag_unsafe_math_optimizations
345 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
346 && real_can_shorten_arithmetic (TYPE_MODE (itype),
347 TYPE_MODE (type))
348 && !excess_precision_type (newtype))))
349 {
350 expr = build2 (TREE_CODE (expr), newtype,
351 convert_to_real_1 (newtype, arg0,
352 fold_p),
353 convert_to_real_1 (newtype, arg1,
354 fold_p));
355 if (newtype == type)
356 return expr;
357 }
358 }
359 }
360 break;
361 default:
362 break;
363 }
364
365 switch (TREE_CODE (TREE_TYPE (expr)))
366 {
367 case REAL_TYPE:
368 /* Ignore the conversion if we don't need to store intermediate
369 results and neither type is a decimal float. */
370 return build1_loc (loc,
371 (flag_float_store
372 || DECIMAL_FLOAT_TYPE_P (type)
373 || DECIMAL_FLOAT_TYPE_P (itype))
374 ? CONVERT_EXPR : NOP_EXPR, type, expr);
375
376 case INTEGER_TYPE:
377 case ENUMERAL_TYPE:
378 case BOOLEAN_TYPE:
379 return build1 (FLOAT_EXPR, type, expr);
380
381 case FIXED_POINT_TYPE:
382 return build1 (FIXED_CONVERT_EXPR, type, expr);
383
384 case COMPLEX_TYPE:
385 return convert (type,
386 maybe_fold_build1_loc (fold_p, loc, REALPART_EXPR,
387 TREE_TYPE (TREE_TYPE (expr)),
388 expr));
389
390 case POINTER_TYPE:
391 case REFERENCE_TYPE:
392 error ("pointer value used where a floating point value was expected");
393 return convert_to_real_1 (type, integer_zero_node, fold_p);
394
395 default:
396 error ("aggregate value used where a float was expected");
397 return convert_to_real_1 (type, integer_zero_node, fold_p);
398 }
399}
400
401/* A wrapper around convert_to_real_1 that always folds the
402 expression. */
403
404tree
405convert_to_real (tree type, tree expr)
406{
407 return convert_to_real_1 (type, expr, true);
408}
409
410/* A wrapper around convert_to_real_1 that only folds the
411 expression if DOFOLD, or if it is CONSTANT_CLASS_P. */
412
413tree
414convert_to_real_maybe_fold (tree type, tree expr, bool dofold)
415{
416 return convert_to_real_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
417}
418
419/* Try to narrow EX_FORM ARG0 ARG1 in narrowed arg types producing a
420 result in TYPE. */
421
422static tree
423do_narrow (location_t loc,
424 enum tree_code ex_form, tree type, tree arg0, tree arg1,
425 tree expr, unsigned inprec, unsigned outprec, bool dofold)
426{
427 /* Do the arithmetic in type TYPEX,
428 then convert result to TYPE. */
429 tree typex = type;
430
431 /* Can't do arithmetic in enumeral types
432 so use an integer type that will hold the values. */
433 if (TREE_CODE (typex) == ENUMERAL_TYPE)
434 typex = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
435 TYPE_UNSIGNED (typex));
436
437 /* The type demotion below might cause doing unsigned arithmetic
438 instead of signed, and thus hide overflow bugs. */
439 if ((ex_form == PLUS_EXPR || ex_form == MINUS_EXPR)
440 && !TYPE_UNSIGNED (typex)
441 && sanitize_flags_p (SANITIZE_SI_OVERFLOW))
442 return NULL_TREE;
443
444 /* But now perhaps TYPEX is as wide as INPREC.
445 In that case, do nothing special here.
446 (Otherwise would recurse infinitely in convert. */
447 if (TYPE_PRECISION (typex) != inprec)
448 {
449 /* Don't do unsigned arithmetic where signed was wanted,
450 or vice versa.
451 Exception: if both of the original operands were
452 unsigned then we can safely do the work as unsigned.
453 Exception: shift operations take their type solely
454 from the first argument.
455 Exception: the LSHIFT_EXPR case above requires that
456 we perform this operation unsigned lest we produce
457 signed-overflow undefinedness.
458 And we may need to do it as unsigned
459 if we truncate to the original size. */
460 if (TYPE_UNSIGNED (TREE_TYPE (expr))
461 || (TYPE_UNSIGNED (TREE_TYPE (arg0))
462 && (TYPE_UNSIGNED (TREE_TYPE (arg1))
463 || ex_form == LSHIFT_EXPR
464 || ex_form == RSHIFT_EXPR
465 || ex_form == LROTATE_EXPR
466 || ex_form == RROTATE_EXPR))
467 || ex_form == LSHIFT_EXPR
468 /* If we have !flag_wrapv, and either ARG0 or
469 ARG1 is of a signed type, we have to do
470 PLUS_EXPR, MINUS_EXPR or MULT_EXPR in an unsigned
471 type in case the operation in outprec precision
472 could overflow. Otherwise, we would introduce
473 signed-overflow undefinedness. */
474 || ((!TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0))
475 || !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg1)))
476 && ((TYPE_PRECISION (TREE_TYPE (arg0)) * 2u
477 > outprec)
478 || (TYPE_PRECISION (TREE_TYPE (arg1)) * 2u
479 > outprec))
480 && (ex_form == PLUS_EXPR
481 || ex_form == MINUS_EXPR
482 || ex_form == MULT_EXPR)))
483 {
484 if (!TYPE_UNSIGNED (typex))
485 typex = unsigned_type_for (typex);
486 }
487 else
488 {
489 if (TYPE_UNSIGNED (typex))
490 typex = signed_type_for (typex);
491 }
492 /* We should do away with all this once we have a proper
493 type promotion/demotion pass, see PR45397. */
494 expr = maybe_fold_build2_loc (dofold, loc, ex_form, typex,
495 convert (typex, arg0),
496 convert (typex, arg1));
497 return convert (type, expr);
498 }
499
500 return NULL_TREE;
501}
502
503/* Convert EXPR to some integer (or enum) type TYPE.
504
505 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
506 fixed-point or vector; in other cases error is called.
507
508 If DOFOLD is TRUE, we try to simplify newly-created patterns by folding.
509
510 The result of this is always supposed to be a newly created tree node
511 not in use in any existing structure. */
512
513static tree
514convert_to_integer_1 (tree type, tree expr, bool dofold)
515{
516 enum tree_code ex_form = TREE_CODE (expr);
517 tree intype = TREE_TYPE (expr);
518 unsigned int inprec = element_precision (intype);
519 unsigned int outprec = element_precision (type);
520 location_t loc = EXPR_LOCATION (expr);
521
522 /* An INTEGER_TYPE cannot be incomplete, but an ENUMERAL_TYPE can
523 be. Consider `enum E = { a, b = (enum E) 3 };'. */
524 if (!COMPLETE_TYPE_P (type))
525 {
526 error ("conversion to incomplete type");
527 return error_mark_node;
528 }
529
530 if (ex_form == COMPOUND_EXPR)
531 {
532 tree t = convert_to_integer_1 (type, TREE_OPERAND (expr, 1), dofold);
533 if (t == TREE_OPERAND (expr, 1))
534 return expr;
535 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR, TREE_TYPE (t),
536 TREE_OPERAND (expr, 0), t);
537 }
538
539 /* Convert e.g. (long)round(d) -> lround(d). */
540 /* If we're converting to char, we may encounter differing behavior
541 between converting from double->char vs double->long->char.
542 We're in "undefined" territory but we prefer to be conservative,
543 so only proceed in "unsafe" math mode. */
544 if (optimize
545 && (flag_unsafe_math_optimizations
546 || (long_integer_type_node
547 && outprec >= TYPE_PRECISION (long_integer_type_node))))
548 {
549 tree s_expr = strip_float_extensions (expr);
550 tree s_intype = TREE_TYPE (s_expr);
551 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
552 tree fn = 0;
553
554 switch (fcode)
555 {
556 CASE_FLT_FN (BUILT_IN_CEIL):
557 /* Only convert in ISO C99 mode. */
558 if (!targetm.libc_has_function (function_c99_misc))
559 break;
560 if (outprec < TYPE_PRECISION (integer_type_node)
561 || (outprec == TYPE_PRECISION (integer_type_node)
562 && !TYPE_UNSIGNED (type)))
563 fn = mathfn_built_in (s_intype, BUILT_IN_ICEIL);
564 else if (outprec == TYPE_PRECISION (long_integer_type_node)
565 && !TYPE_UNSIGNED (type))
566 fn = mathfn_built_in (s_intype, BUILT_IN_LCEIL);
567 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
568 && !TYPE_UNSIGNED (type))
569 fn = mathfn_built_in (s_intype, BUILT_IN_LLCEIL);
570 break;
571
572 CASE_FLT_FN (BUILT_IN_FLOOR):
573 /* Only convert in ISO C99 mode. */
574 if (!targetm.libc_has_function (function_c99_misc))
575 break;
576 if (outprec < TYPE_PRECISION (integer_type_node)
577 || (outprec == TYPE_PRECISION (integer_type_node)
578 && !TYPE_UNSIGNED (type)))
579 fn = mathfn_built_in (s_intype, BUILT_IN_IFLOOR);
580 else if (outprec == TYPE_PRECISION (long_integer_type_node)
581 && !TYPE_UNSIGNED (type))
582 fn = mathfn_built_in (s_intype, BUILT_IN_LFLOOR);
583 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
584 && !TYPE_UNSIGNED (type))
585 fn = mathfn_built_in (s_intype, BUILT_IN_LLFLOOR);
586 break;
587
588 CASE_FLT_FN (BUILT_IN_ROUND):
589 /* Only convert in ISO C99 mode and with -fno-math-errno. */
590 if (!targetm.libc_has_function (function_c99_misc) || flag_errno_math)
591 break;
592 if (outprec < TYPE_PRECISION (integer_type_node)
593 || (outprec == TYPE_PRECISION (integer_type_node)
594 && !TYPE_UNSIGNED (type)))
595 fn = mathfn_built_in (s_intype, BUILT_IN_IROUND);
596 else if (outprec == TYPE_PRECISION (long_integer_type_node)
597 && !TYPE_UNSIGNED (type))
598 fn = mathfn_built_in (s_intype, BUILT_IN_LROUND);
599 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
600 && !TYPE_UNSIGNED (type))
601 fn = mathfn_built_in (s_intype, BUILT_IN_LLROUND);
602 break;
603
604 CASE_FLT_FN (BUILT_IN_NEARBYINT):
605 /* Only convert nearbyint* if we can ignore math exceptions. */
606 if (flag_trapping_math)
607 break;
608 gcc_fallthrough ();
609 CASE_FLT_FN (BUILT_IN_RINT):
610 /* Only convert in ISO C99 mode and with -fno-math-errno. */
611 if (!targetm.libc_has_function (function_c99_misc) || flag_errno_math)
612 break;
613 if (outprec < TYPE_PRECISION (integer_type_node)
614 || (outprec == TYPE_PRECISION (integer_type_node)
615 && !TYPE_UNSIGNED (type)))
616 fn = mathfn_built_in (s_intype, BUILT_IN_IRINT);
617 else if (outprec == TYPE_PRECISION (long_integer_type_node)
618 && !TYPE_UNSIGNED (type))
619 fn = mathfn_built_in (s_intype, BUILT_IN_LRINT);
620 else if (outprec == TYPE_PRECISION (long_long_integer_type_node)
621 && !TYPE_UNSIGNED (type))
622 fn = mathfn_built_in (s_intype, BUILT_IN_LLRINT);
623 break;
624
625 CASE_FLT_FN (BUILT_IN_TRUNC):
626 return convert_to_integer_1 (type, CALL_EXPR_ARG (s_expr, 0), dofold);
627
628 default:
629 break;
630 }
631
632 if (fn)
633 {
634 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
635 return convert_to_integer_1 (type, newexpr, dofold);
636 }
637 }
638
639 /* Convert (int)logb(d) -> ilogb(d). */
640 if (optimize
641 && flag_unsafe_math_optimizations
642 && !flag_trapping_math && !flag_errno_math && flag_finite_math_only
643 && integer_type_node
644 && (outprec > TYPE_PRECISION (integer_type_node)
645 || (outprec == TYPE_PRECISION (integer_type_node)
646 && !TYPE_UNSIGNED (type))))
647 {
648 tree s_expr = strip_float_extensions (expr);
649 tree s_intype = TREE_TYPE (s_expr);
650 const enum built_in_function fcode = builtin_mathfn_code (s_expr);
651 tree fn = 0;
652
653 switch (fcode)
654 {
655 CASE_FLT_FN (BUILT_IN_LOGB):
656 fn = mathfn_built_in (s_intype, BUILT_IN_ILOGB);
657 break;
658
659 default:
660 break;
661 }
662
663 if (fn)
664 {
665 tree newexpr = build_call_expr (fn, 1, CALL_EXPR_ARG (s_expr, 0));
666 return convert_to_integer_1 (type, newexpr, dofold);
667 }
668 }
669
670 switch (TREE_CODE (intype))
671 {
672 case POINTER_TYPE:
673 case REFERENCE_TYPE:
674 if (integer_zerop (expr))
675 return build_int_cst (type, 0);
676
677 /* Convert to an unsigned integer of the correct width first, and from
678 there widen/truncate to the required type. Some targets support the
679 coexistence of multiple valid pointer sizes, so fetch the one we need
680 from the type. */
681 if (!dofold)
682 return build1 (CONVERT_EXPR, type, expr);
683 expr = fold_build1 (CONVERT_EXPR,
684 lang_hooks.types.type_for_size
685 (TYPE_PRECISION (intype), 0),
686 expr);
687 return fold_convert (type, expr);
688
689 case INTEGER_TYPE:
690 case ENUMERAL_TYPE:
691 case BOOLEAN_TYPE:
692 case OFFSET_TYPE:
693 /* If this is a logical operation, which just returns 0 or 1, we can
694 change the type of the expression. */
695
696 if (TREE_CODE_CLASS (ex_form) == tcc_comparison)
697 {
698 expr = copy_node (expr);
699 TREE_TYPE (expr) = type;
700 return expr;
701 }
702
703 /* If we are widening the type, put in an explicit conversion.
704 Similarly if we are not changing the width. After this, we know
705 we are truncating EXPR. */
706
707 else if (outprec >= inprec)
708 {
709 enum tree_code code;
710
711 /* If the precision of the EXPR's type is K bits and the
712 destination mode has more bits, and the sign is changing,
713 it is not safe to use a NOP_EXPR. For example, suppose
714 that EXPR's type is a 3-bit unsigned integer type, the
715 TYPE is a 3-bit signed integer type, and the machine mode
716 for the types is 8-bit QImode. In that case, the
717 conversion necessitates an explicit sign-extension. In
718 the signed-to-unsigned case the high-order bits have to
719 be cleared. */
720 if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
721 && !type_has_mode_precision_p (TREE_TYPE (expr)))
722 code = CONVERT_EXPR;
723 else
724 code = NOP_EXPR;
725
726 return maybe_fold_build1_loc (dofold, loc, code, type, expr);
727 }
728
729 /* If TYPE is an enumeral type or a type with a precision less
730 than the number of bits in its mode, do the conversion to the
731 type corresponding to its mode, then do a nop conversion
732 to TYPE. */
733 else if (TREE_CODE (type) == ENUMERAL_TYPE
734 || outprec != GET_MODE_PRECISION (TYPE_MODE (type)))
735 {
736 expr = convert (lang_hooks.types.type_for_mode
737 (TYPE_MODE (type), TYPE_UNSIGNED (type)), expr);
738 return maybe_fold_build1_loc (dofold, loc, NOP_EXPR, type, expr);
739 }
740
741 /* Here detect when we can distribute the truncation down past some
742 arithmetic. For example, if adding two longs and converting to an
743 int, we can equally well convert both to ints and then add.
744 For the operations handled here, such truncation distribution
745 is always safe.
746 It is desirable in these cases:
747 1) when truncating down to full-word from a larger size
748 2) when truncating takes no work.
749 3) when at least one operand of the arithmetic has been extended
750 (as by C's default conversions). In this case we need two conversions
751 if we do the arithmetic as already requested, so we might as well
752 truncate both and then combine. Perhaps that way we need only one.
753
754 Note that in general we cannot do the arithmetic in a type
755 shorter than the desired result of conversion, even if the operands
756 are both extended from a shorter type, because they might overflow
757 if combined in that type. The exceptions to this--the times when
758 two narrow values can be combined in their narrow type even to
759 make a wider result--are handled by "shorten" in build_binary_op. */
760
761 if (dofold)
762 switch (ex_form)
763 {
764 case RSHIFT_EXPR:
765 /* We can pass truncation down through right shifting
766 when the shift count is a nonpositive constant. */
767 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
768 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) <= 0)
769 goto trunc1;
770 break;
771
772 case LSHIFT_EXPR:
773 /* We can pass truncation down through left shifting
774 when the shift count is a nonnegative constant and
775 the target type is unsigned. */
776 if (TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST
777 && tree_int_cst_sgn (TREE_OPERAND (expr, 1)) >= 0
778 && TYPE_UNSIGNED (type)
779 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
780 {
781 /* If shift count is less than the width of the truncated type,
782 really shift. */
783 if (tree_int_cst_lt (TREE_OPERAND (expr, 1), TYPE_SIZE (type)))
784 /* In this case, shifting is like multiplication. */
785 goto trunc1;
786 else
787 {
788 /* If it is >= that width, result is zero.
789 Handling this with trunc1 would give the wrong result:
790 (int) ((long long) a << 32) is well defined (as 0)
791 but (int) a << 32 is undefined and would get a
792 warning. */
793
794 tree t = build_int_cst (type, 0);
795
796 /* If the original expression had side-effects, we must
797 preserve it. */
798 if (TREE_SIDE_EFFECTS (expr))
799 return build2 (COMPOUND_EXPR, type, expr, t);
800 else
801 return t;
802 }
803 }
804 break;
805
806 case TRUNC_DIV_EXPR:
807 {
808 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), NULL_TREE);
809 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), NULL_TREE);
810
811 /* Don't distribute unless the output precision is at least as
812 big as the actual inputs and it has the same signedness. */
813 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
814 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
815 /* If signedness of arg0 and arg1 don't match,
816 we can't necessarily find a type to compare them in. */
817 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
818 == TYPE_UNSIGNED (TREE_TYPE (arg1)))
819 /* Do not change the sign of the division. */
820 && (TYPE_UNSIGNED (TREE_TYPE (expr))
821 == TYPE_UNSIGNED (TREE_TYPE (arg0)))
822 /* Either require unsigned division or a division by
823 a constant that is not -1. */
824 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
825 || (TREE_CODE (arg1) == INTEGER_CST
826 && !integer_all_onesp (arg1))))
827 {
828 tree tem = do_narrow (loc, ex_form, type, arg0, arg1,
829 expr, inprec, outprec, dofold);
830 if (tem)
831 return tem;
832 }
833 break;
834 }
835
836 case MAX_EXPR:
837 case MIN_EXPR:
838 case MULT_EXPR:
839 {
840 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
841 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
842
843 /* Don't distribute unless the output precision is at least as
844 big as the actual inputs. Otherwise, the comparison of the
845 truncated values will be wrong. */
846 if (outprec >= TYPE_PRECISION (TREE_TYPE (arg0))
847 && outprec >= TYPE_PRECISION (TREE_TYPE (arg1))
848 /* If signedness of arg0 and arg1 don't match,
849 we can't necessarily find a type to compare them in. */
850 && (TYPE_UNSIGNED (TREE_TYPE (arg0))
851 == TYPE_UNSIGNED (TREE_TYPE (arg1))))
852 goto trunc1;
853 break;
854 }
855
856 case PLUS_EXPR:
857 case MINUS_EXPR:
858 case BIT_AND_EXPR:
859 case BIT_IOR_EXPR:
860 case BIT_XOR_EXPR:
861 trunc1:
862 {
863 tree arg0 = get_unwidened (TREE_OPERAND (expr, 0), type);
864 tree arg1 = get_unwidened (TREE_OPERAND (expr, 1), type);
865
866 /* Do not try to narrow operands of pointer subtraction;
867 that will interfere with other folding. */
868 if (ex_form == MINUS_EXPR
869 && CONVERT_EXPR_P (arg0)
870 && CONVERT_EXPR_P (arg1)
871 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg0, 0)))
872 && POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (arg1, 0))))
873 break;
874
875 if (outprec >= BITS_PER_WORD
876 || targetm.truly_noop_truncation (outprec, inprec)
877 || inprec > TYPE_PRECISION (TREE_TYPE (arg0))
878 || inprec > TYPE_PRECISION (TREE_TYPE (arg1)))
879 {
880 tree tem = do_narrow (loc, ex_form, type, arg0, arg1,
881 expr, inprec, outprec, dofold);
882 if (tem)
883 return tem;
884 }
885 }
886 break;
887
888 case NEGATE_EXPR:
889 /* Using unsigned arithmetic for signed types may hide overflow
890 bugs. */
891 if (!TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (expr, 0)))
892 && sanitize_flags_p (SANITIZE_SI_OVERFLOW))
893 break;
894 /* Fall through. */
895 case BIT_NOT_EXPR:
896 /* This is not correct for ABS_EXPR,
897 since we must test the sign before truncation. */
898 {
899 /* Do the arithmetic in type TYPEX,
900 then convert result to TYPE. */
901 tree typex = type;
902
903 /* Can't do arithmetic in enumeral types
904 so use an integer type that will hold the values. */
905 if (TREE_CODE (typex) == ENUMERAL_TYPE)
906 typex
907 = lang_hooks.types.type_for_size (TYPE_PRECISION (typex),
908 TYPE_UNSIGNED (typex));
909
910 if (!TYPE_UNSIGNED (typex))
911 typex = unsigned_type_for (typex);
912 return convert (type,
913 fold_build1 (ex_form, typex,
914 convert (typex,
915 TREE_OPERAND (expr, 0))));
916 }
917
918 CASE_CONVERT:
919 /* Don't introduce a "can't convert between vector values of
920 different size" error. */
921 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == VECTOR_TYPE
922 && (GET_MODE_SIZE (TYPE_MODE
923 (TREE_TYPE (TREE_OPERAND (expr, 0))))
924 != GET_MODE_SIZE (TYPE_MODE (type))))
925 break;
926 /* If truncating after truncating, might as well do all at once.
927 If truncating after extending, we may get rid of wasted work. */
928 return convert (type, get_unwidened (TREE_OPERAND (expr, 0), type));
929
930 case COND_EXPR:
931 /* It is sometimes worthwhile to push the narrowing down through
932 the conditional and never loses. A COND_EXPR may have a throw
933 as one operand, which then has void type. Just leave void
934 operands as they are. */
935 return
936 fold_build3 (COND_EXPR, type, TREE_OPERAND (expr, 0),
937 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1)))
938 ? TREE_OPERAND (expr, 1)
939 : convert (type, TREE_OPERAND (expr, 1)),
940 VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 2)))
941 ? TREE_OPERAND (expr, 2)
942 : convert (type, TREE_OPERAND (expr, 2)));
943
944 default:
945 break;
946 }
947
948 /* When parsing long initializers, we might end up with a lot of casts.
949 Shortcut this. */
950 if (TREE_CODE (expr) == INTEGER_CST)
951 return fold_convert (type, expr);
952 return build1 (CONVERT_EXPR, type, expr);
953
954 case REAL_TYPE:
955 if (sanitize_flags_p (SANITIZE_FLOAT_CAST)
956 && current_function_decl != NULL_TREE)
957 {
958 expr = save_expr (expr);
959 tree check = ubsan_instrument_float_cast (loc, type, expr);
960 expr = build1 (FIX_TRUNC_EXPR, type, expr);
961 if (check == NULL_TREE)
962 return expr;
963 return maybe_fold_build2_loc (dofold, loc, COMPOUND_EXPR,
964 TREE_TYPE (expr), check, expr);
965 }
966 else
967 return build1 (FIX_TRUNC_EXPR, type, expr);
968
969 case FIXED_POINT_TYPE:
970 return build1 (FIXED_CONVERT_EXPR, type, expr);
971
972 case COMPLEX_TYPE:
973 expr = maybe_fold_build1_loc (dofold, loc, REALPART_EXPR,
974 TREE_TYPE (TREE_TYPE (expr)), expr);
975 return convert (type, expr);
976
977 case VECTOR_TYPE:
978 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
979 {
980 error ("can%'t convert a vector of type %qT"
981 " to type %qT which has different size",
982 TREE_TYPE (expr), type);
983 return error_mark_node;
984 }
985 return build1 (VIEW_CONVERT_EXPR, type, expr);
986
987 default:
988 error ("aggregate value used where an integer was expected");
989 return convert (type, integer_zero_node);
990 }
991}
992
993/* Convert EXPR to some integer (or enum) type TYPE.
994
995 EXPR must be pointer, integer, discrete (enum, char, or bool), float,
996 fixed-point or vector; in other cases error is called.
997
998 The result of this is always supposed to be a newly created tree node
999 not in use in any existing structure. */
1000
1001tree
1002convert_to_integer (tree type, tree expr)
1003{
1004 return convert_to_integer_1 (type, expr, true);
1005}
1006
1007/* A wrapper around convert_to_complex_1 that only folds the
1008 expression if DOFOLD, or if it is CONSTANT_CLASS_P. */
1009
1010tree
1011convert_to_integer_maybe_fold (tree type, tree expr, bool dofold)
1012{
1013 return convert_to_integer_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
1014}
1015
1016/* Convert EXPR to the complex type TYPE in the usual ways. If FOLD_P is
1017 true, try to fold the expression. */
1018
1019static tree
1020convert_to_complex_1 (tree type, tree expr, bool fold_p)
1021{
1022 location_t loc = EXPR_LOCATION (expr);
1023 tree subtype = TREE_TYPE (type);
1024
1025 switch (TREE_CODE (TREE_TYPE (expr)))
1026 {
1027 case REAL_TYPE:
1028 case FIXED_POINT_TYPE:
1029 case INTEGER_TYPE:
1030 case ENUMERAL_TYPE:
1031 case BOOLEAN_TYPE:
1032 return build2 (COMPLEX_EXPR, type, convert (subtype, expr),
1033 convert (subtype, integer_zero_node));
1034
1035 case COMPLEX_TYPE:
1036 {
1037 tree elt_type = TREE_TYPE (TREE_TYPE (expr));
1038
1039 if (TYPE_MAIN_VARIANT (elt_type) == TYPE_MAIN_VARIANT (subtype))
1040 return expr;
1041 else if (TREE_CODE (expr) == COMPOUND_EXPR)
1042 {
1043 tree t = convert_to_complex_1 (type, TREE_OPERAND (expr, 1),
1044 fold_p);
1045 if (t == TREE_OPERAND (expr, 1))
1046 return expr;
1047 return build2_loc (EXPR_LOCATION (expr), COMPOUND_EXPR,
1048 TREE_TYPE (t), TREE_OPERAND (expr, 0), t);
1049 }
1050 else if (TREE_CODE (expr) == COMPLEX_EXPR)
1051 return maybe_fold_build2_loc (fold_p, loc, COMPLEX_EXPR, type,
1052 convert (subtype,
1053 TREE_OPERAND (expr, 0)),
1054 convert (subtype,
1055 TREE_OPERAND (expr, 1)));
1056 else
1057 {
1058 expr = save_expr (expr);
1059 tree realp = maybe_fold_build1_loc (fold_p, loc, REALPART_EXPR,
1060 TREE_TYPE (TREE_TYPE (expr)),
1061 expr);
1062 tree imagp = maybe_fold_build1_loc (fold_p, loc, IMAGPART_EXPR,
1063 TREE_TYPE (TREE_TYPE (expr)),
1064 expr);
1065 return maybe_fold_build2_loc (fold_p, loc, COMPLEX_EXPR, type,
1066 convert (subtype, realp),
1067 convert (subtype, imagp));
1068 }
1069 }
1070
1071 case POINTER_TYPE:
1072 case REFERENCE_TYPE:
1073 error ("pointer value used where a complex was expected");
1074 return convert_to_complex_1 (type, integer_zero_node, fold_p);
1075
1076 default:
1077 error ("aggregate value used where a complex was expected");
1078 return convert_to_complex_1 (type, integer_zero_node, fold_p);
1079 }
1080}
1081
1082/* A wrapper around convert_to_complex_1 that always folds the
1083 expression. */
1084
1085tree
1086convert_to_complex (tree type, tree expr)
1087{
1088 return convert_to_complex_1 (type, expr, true);
1089}
1090
1091/* A wrapper around convert_to_complex_1 that only folds the
1092 expression if DOFOLD, or if it is CONSTANT_CLASS_P. */
1093
1094tree
1095convert_to_complex_maybe_fold (tree type, tree expr, bool dofold)
1096{
1097 return convert_to_complex_1 (type, expr, dofold || CONSTANT_CLASS_P (expr));
1098}
1099
1100/* Convert EXPR to the vector type TYPE in the usual ways. */
1101
1102tree
1103convert_to_vector (tree type, tree expr)
1104{
1105 switch (TREE_CODE (TREE_TYPE (expr)))
1106 {
1107 case INTEGER_TYPE:
1108 case VECTOR_TYPE:
1109 if (!tree_int_cst_equal (TYPE_SIZE (type), TYPE_SIZE (TREE_TYPE (expr))))
1110 {
1111 error ("can%'t convert a value of type %qT"
1112 " to vector type %qT which has different size",
1113 TREE_TYPE (expr), type);
1114 return error_mark_node;
1115 }
1116 return build1 (VIEW_CONVERT_EXPR, type, expr);
1117
1118 default:
1119 error ("can%'t convert value to a vector");
1120 return error_mark_node;
1121 }
1122}
1123
1124/* Convert EXPR to some fixed-point type TYPE.
1125
1126 EXPR must be fixed-point, float, integer, or enumeral;
1127 in other cases error is called. */
1128
1129tree
1130convert_to_fixed (tree type, tree expr)
1131{
1132 if (integer_zerop (expr))
1133 {
1134 tree fixed_zero_node = build_fixed (type, FCONST0 (TYPE_MODE (type)));
1135 return fixed_zero_node;
1136 }
1137 else if (integer_onep (expr) && ALL_SCALAR_ACCUM_MODE_P (TYPE_MODE (type)))
1138 {
1139 tree fixed_one_node = build_fixed (type, FCONST1 (TYPE_MODE (type)));
1140 return fixed_one_node;
1141 }
1142
1143 switch (TREE_CODE (TREE_TYPE (expr)))
1144 {
1145 case FIXED_POINT_TYPE:
1146 case INTEGER_TYPE:
1147 case ENUMERAL_TYPE:
1148 case BOOLEAN_TYPE:
1149 case REAL_TYPE:
1150 return build1 (FIXED_CONVERT_EXPR, type, expr);
1151
1152 case COMPLEX_TYPE:
1153 return convert (type,
1154 fold_build1 (REALPART_EXPR,
1155 TREE_TYPE (TREE_TYPE (expr)), expr));
1156
1157 default:
1158 error ("aggregate value used where a fixed-point was expected");
1159 return error_mark_node;
1160 }
1161}
1162