1/* Internal functions.
2 Copyright (C) 2011-2017 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "backend.h"
24#include "target.h"
25#include "rtl.h"
26#include "tree.h"
27#include "gimple.h"
28#include "predict.h"
29#include "stringpool.h"
30#include "tree-vrp.h"
31#include "tree-ssanames.h"
32#include "expmed.h"
33#include "memmodel.h"
34#include "optabs.h"
35#include "emit-rtl.h"
36#include "diagnostic-core.h"
37#include "fold-const.h"
38#include "internal-fn.h"
39#include "stor-layout.h"
40#include "dojump.h"
41#include "expr.h"
42#include "stringpool.h"
43#include "attribs.h"
44#include "asan.h"
45#include "ubsan.h"
46#include "recog.h"
47#include "builtins.h"
48#include "optabs-tree.h"
49#include "gimple-ssa.h"
50#include "tree-phinodes.h"
51#include "ssa-iterators.h"
52
53/* The names of each internal function, indexed by function number. */
54const char *const internal_fn_name_array[] = {
55#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) #CODE,
56#include "internal-fn.def"
57 "<invalid-fn>"
58};
59
60/* The ECF_* flags of each internal function, indexed by function number. */
61const int internal_fn_flags_array[] = {
62#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) FLAGS,
63#include "internal-fn.def"
64 0
65};
66
67/* Fnspec of each internal function, indexed by function number. */
68const_tree internal_fn_fnspec_array[IFN_LAST + 1];
69
70void
71init_internal_fns ()
72{
73#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
74 if (FNSPEC) internal_fn_fnspec_array[IFN_##CODE] = \
75 build_string ((int) sizeof (FNSPEC), FNSPEC ? FNSPEC : "");
76#include "internal-fn.def"
77 internal_fn_fnspec_array[IFN_LAST] = 0;
78}
79
80/* Create static initializers for the information returned by
81 direct_internal_fn. */
82#define not_direct { -2, -2, false }
83#define mask_load_direct { -1, 2, false }
84#define load_lanes_direct { -1, -1, false }
85#define mask_store_direct { 3, 2, false }
86#define store_lanes_direct { 0, 0, false }
87#define unary_direct { 0, 0, true }
88#define binary_direct { 0, 0, true }
89
90const direct_internal_fn_info direct_internal_fn_array[IFN_LAST + 1] = {
91#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) not_direct,
92#define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) TYPE##_direct,
93#define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
94 UNSIGNED_OPTAB, TYPE) TYPE##_direct,
95#include "internal-fn.def"
96 not_direct
97};
98
99/* ARRAY_TYPE is an array of vector modes. Return the associated insn
100 for load-lanes-style optab OPTAB, or CODE_FOR_nothing if none. */
101
102static enum insn_code
103get_multi_vector_move (tree array_type, convert_optab optab)
104{
105 machine_mode imode;
106 machine_mode vmode;
107
108 gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
109 imode = TYPE_MODE (array_type);
110 vmode = TYPE_MODE (TREE_TYPE (array_type));
111
112 return convert_optab_handler (optab, imode, vmode);
113}
114
115/* Expand LOAD_LANES call STMT using optab OPTAB. */
116
117static void
118expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
119{
120 struct expand_operand ops[2];
121 tree type, lhs, rhs;
122 rtx target, mem;
123
124 lhs = gimple_call_lhs (stmt);
125 rhs = gimple_call_arg (stmt, 0);
126 type = TREE_TYPE (lhs);
127
128 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
129 mem = expand_normal (rhs);
130
131 gcc_assert (MEM_P (mem));
132 PUT_MODE (mem, TYPE_MODE (type));
133
134 create_output_operand (&ops[0], target, TYPE_MODE (type));
135 create_fixed_operand (&ops[1], mem);
136 expand_insn (get_multi_vector_move (type, optab), 2, ops);
137}
138
139/* Expand STORE_LANES call STMT using optab OPTAB. */
140
141static void
142expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
143{
144 struct expand_operand ops[2];
145 tree type, lhs, rhs;
146 rtx target, reg;
147
148 lhs = gimple_call_lhs (stmt);
149 rhs = gimple_call_arg (stmt, 0);
150 type = TREE_TYPE (rhs);
151
152 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
153 reg = expand_normal (rhs);
154
155 gcc_assert (MEM_P (target));
156 PUT_MODE (target, TYPE_MODE (type));
157
158 create_fixed_operand (&ops[0], target);
159 create_input_operand (&ops[1], reg, TYPE_MODE (type));
160 expand_insn (get_multi_vector_move (type, optab), 2, ops);
161}
162
163static void
164expand_ANNOTATE (internal_fn, gcall *)
165{
166 gcc_unreachable ();
167}
168
169/* This should get expanded in omp_device_lower pass. */
170
171static void
172expand_GOMP_USE_SIMT (internal_fn, gcall *)
173{
174 gcc_unreachable ();
175}
176
177/* This should get expanded in omp_device_lower pass. */
178
179static void
180expand_GOMP_SIMT_ENTER (internal_fn, gcall *)
181{
182 gcc_unreachable ();
183}
184
185/* Allocate per-lane storage and begin non-uniform execution region. */
186
187static void
188expand_GOMP_SIMT_ENTER_ALLOC (internal_fn, gcall *stmt)
189{
190 rtx target;
191 tree lhs = gimple_call_lhs (stmt);
192 if (lhs)
193 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
194 else
195 target = gen_reg_rtx (Pmode);
196 rtx size = expand_normal (gimple_call_arg (stmt, 0));
197 rtx align = expand_normal (gimple_call_arg (stmt, 1));
198 struct expand_operand ops[3];
199 create_output_operand (&ops[0], target, Pmode);
200 create_input_operand (&ops[1], size, Pmode);
201 create_input_operand (&ops[2], align, Pmode);
202 gcc_assert (targetm.have_omp_simt_enter ());
203 expand_insn (targetm.code_for_omp_simt_enter, 3, ops);
204}
205
206/* Deallocate per-lane storage and leave non-uniform execution region. */
207
208static void
209expand_GOMP_SIMT_EXIT (internal_fn, gcall *stmt)
210{
211 gcc_checking_assert (!gimple_call_lhs (stmt));
212 rtx arg = expand_normal (gimple_call_arg (stmt, 0));
213 struct expand_operand ops[1];
214 create_input_operand (&ops[0], arg, Pmode);
215 gcc_assert (targetm.have_omp_simt_exit ());
216 expand_insn (targetm.code_for_omp_simt_exit, 1, ops);
217}
218
219/* Lane index on SIMT targets: thread index in the warp on NVPTX. On targets
220 without SIMT execution this should be expanded in omp_device_lower pass. */
221
222static void
223expand_GOMP_SIMT_LANE (internal_fn, gcall *stmt)
224{
225 tree lhs = gimple_call_lhs (stmt);
226 if (!lhs)
227 return;
228
229 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
230 gcc_assert (targetm.have_omp_simt_lane ());
231 emit_insn (targetm.gen_omp_simt_lane (target));
232}
233
234/* This should get expanded in omp_device_lower pass. */
235
236static void
237expand_GOMP_SIMT_VF (internal_fn, gcall *)
238{
239 gcc_unreachable ();
240}
241
242/* Lane index of the first SIMT lane that supplies a non-zero argument.
243 This is a SIMT counterpart to GOMP_SIMD_LAST_LANE, used to represent the
244 lane that executed the last iteration for handling OpenMP lastprivate. */
245
246static void
247expand_GOMP_SIMT_LAST_LANE (internal_fn, gcall *stmt)
248{
249 tree lhs = gimple_call_lhs (stmt);
250 if (!lhs)
251 return;
252
253 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
254 rtx cond = expand_normal (gimple_call_arg (stmt, 0));
255 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
256 struct expand_operand ops[2];
257 create_output_operand (&ops[0], target, mode);
258 create_input_operand (&ops[1], cond, mode);
259 gcc_assert (targetm.have_omp_simt_last_lane ());
260 expand_insn (targetm.code_for_omp_simt_last_lane, 2, ops);
261}
262
263/* Non-transparent predicate used in SIMT lowering of OpenMP "ordered". */
264
265static void
266expand_GOMP_SIMT_ORDERED_PRED (internal_fn, gcall *stmt)
267{
268 tree lhs = gimple_call_lhs (stmt);
269 if (!lhs)
270 return;
271
272 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
273 rtx ctr = expand_normal (gimple_call_arg (stmt, 0));
274 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
275 struct expand_operand ops[2];
276 create_output_operand (&ops[0], target, mode);
277 create_input_operand (&ops[1], ctr, mode);
278 gcc_assert (targetm.have_omp_simt_ordered ());
279 expand_insn (targetm.code_for_omp_simt_ordered, 2, ops);
280}
281
282/* "Or" boolean reduction across SIMT lanes: return non-zero in all lanes if
283 any lane supplies a non-zero argument. */
284
285static void
286expand_GOMP_SIMT_VOTE_ANY (internal_fn, gcall *stmt)
287{
288 tree lhs = gimple_call_lhs (stmt);
289 if (!lhs)
290 return;
291
292 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
293 rtx cond = expand_normal (gimple_call_arg (stmt, 0));
294 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
295 struct expand_operand ops[2];
296 create_output_operand (&ops[0], target, mode);
297 create_input_operand (&ops[1], cond, mode);
298 gcc_assert (targetm.have_omp_simt_vote_any ());
299 expand_insn (targetm.code_for_omp_simt_vote_any, 2, ops);
300}
301
302/* Exchange between SIMT lanes with a "butterfly" pattern: source lane index
303 is destination lane index XOR given offset. */
304
305static void
306expand_GOMP_SIMT_XCHG_BFLY (internal_fn, gcall *stmt)
307{
308 tree lhs = gimple_call_lhs (stmt);
309 if (!lhs)
310 return;
311
312 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
313 rtx src = expand_normal (gimple_call_arg (stmt, 0));
314 rtx idx = expand_normal (gimple_call_arg (stmt, 1));
315 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
316 struct expand_operand ops[3];
317 create_output_operand (&ops[0], target, mode);
318 create_input_operand (&ops[1], src, mode);
319 create_input_operand (&ops[2], idx, SImode);
320 gcc_assert (targetm.have_omp_simt_xchg_bfly ());
321 expand_insn (targetm.code_for_omp_simt_xchg_bfly, 3, ops);
322}
323
324/* Exchange between SIMT lanes according to given source lane index. */
325
326static void
327expand_GOMP_SIMT_XCHG_IDX (internal_fn, gcall *stmt)
328{
329 tree lhs = gimple_call_lhs (stmt);
330 if (!lhs)
331 return;
332
333 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
334 rtx src = expand_normal (gimple_call_arg (stmt, 0));
335 rtx idx = expand_normal (gimple_call_arg (stmt, 1));
336 machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
337 struct expand_operand ops[3];
338 create_output_operand (&ops[0], target, mode);
339 create_input_operand (&ops[1], src, mode);
340 create_input_operand (&ops[2], idx, SImode);
341 gcc_assert (targetm.have_omp_simt_xchg_idx ());
342 expand_insn (targetm.code_for_omp_simt_xchg_idx, 3, ops);
343}
344
345/* This should get expanded in adjust_simduid_builtins. */
346
347static void
348expand_GOMP_SIMD_LANE (internal_fn, gcall *)
349{
350 gcc_unreachable ();
351}
352
353/* This should get expanded in adjust_simduid_builtins. */
354
355static void
356expand_GOMP_SIMD_VF (internal_fn, gcall *)
357{
358 gcc_unreachable ();
359}
360
361/* This should get expanded in adjust_simduid_builtins. */
362
363static void
364expand_GOMP_SIMD_LAST_LANE (internal_fn, gcall *)
365{
366 gcc_unreachable ();
367}
368
369/* This should get expanded in adjust_simduid_builtins. */
370
371static void
372expand_GOMP_SIMD_ORDERED_START (internal_fn, gcall *)
373{
374 gcc_unreachable ();
375}
376
377/* This should get expanded in adjust_simduid_builtins. */
378
379static void
380expand_GOMP_SIMD_ORDERED_END (internal_fn, gcall *)
381{
382 gcc_unreachable ();
383}
384
385/* This should get expanded in the sanopt pass. */
386
387static void
388expand_UBSAN_NULL (internal_fn, gcall *)
389{
390 gcc_unreachable ();
391}
392
393/* This should get expanded in the sanopt pass. */
394
395static void
396expand_UBSAN_BOUNDS (internal_fn, gcall *)
397{
398 gcc_unreachable ();
399}
400
401/* This should get expanded in the sanopt pass. */
402
403static void
404expand_UBSAN_VPTR (internal_fn, gcall *)
405{
406 gcc_unreachable ();
407}
408
409/* This should get expanded in the sanopt pass. */
410
411static void
412expand_UBSAN_PTR (internal_fn, gcall *)
413{
414 gcc_unreachable ();
415}
416
417/* This should get expanded in the sanopt pass. */
418
419static void
420expand_UBSAN_OBJECT_SIZE (internal_fn, gcall *)
421{
422 gcc_unreachable ();
423}
424
425/* This should get expanded in the sanopt pass. */
426
427static void
428expand_ASAN_CHECK (internal_fn, gcall *)
429{
430 gcc_unreachable ();
431}
432
433/* This should get expanded in the sanopt pass. */
434
435static void
436expand_ASAN_MARK (internal_fn, gcall *)
437{
438 gcc_unreachable ();
439}
440
441/* This should get expanded in the sanopt pass. */
442
443static void
444expand_ASAN_POISON (internal_fn, gcall *)
445{
446 gcc_unreachable ();
447}
448
449/* This should get expanded in the sanopt pass. */
450
451static void
452expand_ASAN_POISON_USE (internal_fn, gcall *)
453{
454 gcc_unreachable ();
455}
456
457/* This should get expanded in the tsan pass. */
458
459static void
460expand_TSAN_FUNC_EXIT (internal_fn, gcall *)
461{
462 gcc_unreachable ();
463}
464
465/* This should get expanded in the lower pass. */
466
467static void
468expand_FALLTHROUGH (internal_fn, gcall *call)
469{
470 error_at (gimple_location (call),
471 "invalid use of attribute %<fallthrough%>");
472}
473
474/* Return minimum precision needed to represent all values
475 of ARG in SIGNed integral type. */
476
477static int
478get_min_precision (tree arg, signop sign)
479{
480 int prec = TYPE_PRECISION (TREE_TYPE (arg));
481 int cnt = 0;
482 signop orig_sign = sign;
483 if (TREE_CODE (arg) == INTEGER_CST)
484 {
485 int p;
486 if (TYPE_SIGN (TREE_TYPE (arg)) != sign)
487 {
488 widest_int w = wi::to_widest (arg);
489 w = wi::ext (w, prec, sign);
490 p = wi::min_precision (w, sign);
491 }
492 else
493 p = wi::min_precision (wi::to_wide (arg), sign);
494 return MIN (p, prec);
495 }
496 while (CONVERT_EXPR_P (arg)
497 && INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (arg, 0)))
498 && TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg, 0))) <= prec)
499 {
500 arg = TREE_OPERAND (arg, 0);
501 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
502 {
503 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
504 sign = UNSIGNED;
505 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
506 return prec + (orig_sign != sign);
507 prec = TYPE_PRECISION (TREE_TYPE (arg));
508 }
509 if (++cnt > 30)
510 return prec + (orig_sign != sign);
511 }
512 if (TREE_CODE (arg) != SSA_NAME)
513 return prec + (orig_sign != sign);
514 wide_int arg_min, arg_max;
515 while (get_range_info (arg, &arg_min, &arg_max) != VR_RANGE)
516 {
517 gimple *g = SSA_NAME_DEF_STMT (arg);
518 if (is_gimple_assign (g)
519 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (g)))
520 {
521 tree t = gimple_assign_rhs1 (g);
522 if (INTEGRAL_TYPE_P (TREE_TYPE (t))
523 && TYPE_PRECISION (TREE_TYPE (t)) <= prec)
524 {
525 arg = t;
526 if (TYPE_PRECISION (TREE_TYPE (arg)) < prec)
527 {
528 if (TYPE_UNSIGNED (TREE_TYPE (arg)))
529 sign = UNSIGNED;
530 else if (sign == UNSIGNED && get_range_pos_neg (arg) != 1)
531 return prec + (orig_sign != sign);
532 prec = TYPE_PRECISION (TREE_TYPE (arg));
533 }
534 if (++cnt > 30)
535 return prec + (orig_sign != sign);
536 continue;
537 }
538 }
539 return prec + (orig_sign != sign);
540 }
541 if (sign == TYPE_SIGN (TREE_TYPE (arg)))
542 {
543 int p1 = wi::min_precision (arg_min, sign);
544 int p2 = wi::min_precision (arg_max, sign);
545 p1 = MAX (p1, p2);
546 prec = MIN (prec, p1);
547 }
548 else if (sign == UNSIGNED && !wi::neg_p (arg_min, SIGNED))
549 {
550 int p = wi::min_precision (arg_max, UNSIGNED);
551 prec = MIN (prec, p);
552 }
553 return prec + (orig_sign != sign);
554}
555
556/* Helper for expand_*_overflow. Set the __imag__ part to true
557 (1 except for signed:1 type, in which case store -1). */
558
559static void
560expand_arith_set_overflow (tree lhs, rtx target)
561{
562 if (TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs))) == 1
563 && !TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs))))
564 write_complex_part (target, constm1_rtx, true);
565 else
566 write_complex_part (target, const1_rtx, true);
567}
568
569/* Helper for expand_*_overflow. Store RES into the __real__ part
570 of TARGET. If RES has larger MODE than __real__ part of TARGET,
571 set the __imag__ part to 1 if RES doesn't fit into it. Similarly
572 if LHS has smaller precision than its mode. */
573
574static void
575expand_arith_overflow_result_store (tree lhs, rtx target,
576 scalar_int_mode mode, rtx res)
577{
578 scalar_int_mode tgtmode
579 = as_a <scalar_int_mode> (GET_MODE_INNER (GET_MODE (target)));
580 rtx lres = res;
581 if (tgtmode != mode)
582 {
583 rtx_code_label *done_label = gen_label_rtx ();
584 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
585 lres = convert_modes (tgtmode, mode, res, uns);
586 gcc_assert (GET_MODE_PRECISION (tgtmode) < GET_MODE_PRECISION (mode));
587 do_compare_rtx_and_jump (res, convert_modes (mode, tgtmode, lres, uns),
588 EQ, true, mode, NULL_RTX, NULL, done_label,
589 profile_probability::very_likely ());
590 expand_arith_set_overflow (lhs, target);
591 emit_label (done_label);
592 }
593 int prec = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (lhs)));
594 int tgtprec = GET_MODE_PRECISION (tgtmode);
595 if (prec < tgtprec)
596 {
597 rtx_code_label *done_label = gen_label_rtx ();
598 int uns = TYPE_UNSIGNED (TREE_TYPE (TREE_TYPE (lhs)));
599 res = lres;
600 if (uns)
601 {
602 rtx mask
603 = immed_wide_int_const (wi::shifted_mask (0, prec, false, tgtprec),
604 tgtmode);
605 lres = expand_simple_binop (tgtmode, AND, res, mask, NULL_RTX,
606 true, OPTAB_LIB_WIDEN);
607 }
608 else
609 {
610 lres = expand_shift (LSHIFT_EXPR, tgtmode, res, tgtprec - prec,
611 NULL_RTX, 1);
612 lres = expand_shift (RSHIFT_EXPR, tgtmode, lres, tgtprec - prec,
613 NULL_RTX, 0);
614 }
615 do_compare_rtx_and_jump (res, lres,
616 EQ, true, tgtmode, NULL_RTX, NULL, done_label,
617 profile_probability::very_likely ());
618 expand_arith_set_overflow (lhs, target);
619 emit_label (done_label);
620 }
621 write_complex_part (target, lres, false);
622}
623
624/* Helper for expand_*_overflow. Store RES into TARGET. */
625
626static void
627expand_ubsan_result_store (rtx target, rtx res)
628{
629 if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
630 /* If this is a scalar in a register that is stored in a wider mode
631 than the declared mode, compute the result into its declared mode
632 and then convert to the wider mode. Our value is the computed
633 expression. */
634 convert_move (SUBREG_REG (target), res, SUBREG_PROMOTED_SIGN (target));
635 else
636 emit_move_insn (target, res);
637}
638
639/* Add sub/add overflow checking to the statement STMT.
640 CODE says whether the operation is +, or -. */
641
642static void
643expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
644 tree arg0, tree arg1, bool unsr_p, bool uns0_p,
645 bool uns1_p, bool is_ubsan, tree *datap)
646{
647 rtx res, target = NULL_RTX;
648 tree fn;
649 rtx_code_label *done_label = gen_label_rtx ();
650 rtx_code_label *do_error = gen_label_rtx ();
651 do_pending_stack_adjust ();
652 rtx op0 = expand_normal (arg0);
653 rtx op1 = expand_normal (arg1);
654 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0));
655 int prec = GET_MODE_PRECISION (mode);
656 rtx sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
657 bool do_xor = false;
658
659 if (is_ubsan)
660 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
661
662 if (lhs)
663 {
664 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
665 if (!is_ubsan)
666 write_complex_part (target, const0_rtx, true);
667 }
668
669 /* We assume both operands and result have the same precision
670 here (GET_MODE_BITSIZE (mode)), S stands for signed type
671 with that precision, U for unsigned type with that precision,
672 sgn for unsigned most significant bit in that precision.
673 s1 is signed first operand, u1 is unsigned first operand,
674 s2 is signed second operand, u2 is unsigned second operand,
675 sr is signed result, ur is unsigned result and the following
676 rules say how to compute result (which is always result of
677 the operands as if both were unsigned, cast to the right
678 signedness) and how to compute whether operation overflowed.
679
680 s1 + s2 -> sr
681 res = (S) ((U) s1 + (U) s2)
682 ovf = s2 < 0 ? res > s1 : res < s1 (or jump on overflow)
683 s1 - s2 -> sr
684 res = (S) ((U) s1 - (U) s2)
685 ovf = s2 < 0 ? res < s1 : res > s2 (or jump on overflow)
686 u1 + u2 -> ur
687 res = u1 + u2
688 ovf = res < u1 (or jump on carry, but RTL opts will handle it)
689 u1 - u2 -> ur
690 res = u1 - u2
691 ovf = res > u1 (or jump on carry, but RTL opts will handle it)
692 s1 + u2 -> sr
693 res = (S) ((U) s1 + u2)
694 ovf = ((U) res ^ sgn) < u2
695 s1 + u2 -> ur
696 t1 = (S) (u2 ^ sgn)
697 t2 = s1 + t1
698 res = (U) t2 ^ sgn
699 ovf = t1 < 0 ? t2 > s1 : t2 < s1 (or jump on overflow)
700 s1 - u2 -> sr
701 res = (S) ((U) s1 - u2)
702 ovf = u2 > ((U) s1 ^ sgn)
703 s1 - u2 -> ur
704 res = (U) s1 - u2
705 ovf = s1 < 0 || u2 > (U) s1
706 u1 - s2 -> sr
707 res = u1 - (U) s2
708 ovf = u1 >= ((U) s2 ^ sgn)
709 u1 - s2 -> ur
710 t1 = u1 ^ sgn
711 t2 = t1 - (U) s2
712 res = t2 ^ sgn
713 ovf = s2 < 0 ? (S) t2 < (S) t1 : (S) t2 > (S) t1 (or jump on overflow)
714 s1 + s2 -> ur
715 res = (U) s1 + (U) s2
716 ovf = s2 < 0 ? (s1 | (S) res) < 0) : (s1 & (S) res) < 0)
717 u1 + u2 -> sr
718 res = (S) (u1 + u2)
719 ovf = (U) res < u2 || res < 0
720 u1 - u2 -> sr
721 res = (S) (u1 - u2)
722 ovf = u1 >= u2 ? res < 0 : res >= 0
723 s1 - s2 -> ur
724 res = (U) s1 - (U) s2
725 ovf = s2 >= 0 ? ((s1 | (S) res) < 0) : ((s1 & (S) res) < 0) */
726
727 if (code == PLUS_EXPR && uns0_p && !uns1_p)
728 {
729 /* PLUS_EXPR is commutative, if operand signedness differs,
730 canonicalize to the first operand being signed and second
731 unsigned to simplify following code. */
732 std::swap (op0, op1);
733 std::swap (arg0, arg1);
734 uns0_p = false;
735 uns1_p = true;
736 }
737
738 /* u1 +- u2 -> ur */
739 if (uns0_p && uns1_p && unsr_p)
740 {
741 insn_code icode = optab_handler (code == PLUS_EXPR ? uaddv4_optab
742 : usubv4_optab, mode);
743 if (icode != CODE_FOR_nothing)
744 {
745 struct expand_operand ops[4];
746 rtx_insn *last = get_last_insn ();
747
748 res = gen_reg_rtx (mode);
749 create_output_operand (&ops[0], res, mode);
750 create_input_operand (&ops[1], op0, mode);
751 create_input_operand (&ops[2], op1, mode);
752 create_fixed_operand (&ops[3], do_error);
753 if (maybe_expand_insn (icode, 4, ops))
754 {
755 last = get_last_insn ();
756 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
757 && JUMP_P (last)
758 && any_condjump_p (last)
759 && !find_reg_note (last, REG_BR_PROB, 0))
760 add_reg_br_prob_note (last,
761 profile_probability::very_unlikely ());
762 emit_jump (done_label);
763 goto do_error_label;
764 }
765
766 delete_insns_since (last);
767 }
768
769 /* Compute the operation. On RTL level, the addition is always
770 unsigned. */
771 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
772 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
773 rtx tem = op0;
774 /* For PLUS_EXPR, the operation is commutative, so we can pick
775 operand to compare against. For prec <= BITS_PER_WORD, I think
776 preferring REG operand is better over CONST_INT, because
777 the CONST_INT might enlarge the instruction or CSE would need
778 to figure out we'd already loaded it into a register before.
779 For prec > BITS_PER_WORD, I think CONST_INT might be more beneficial,
780 as then the multi-word comparison can be perhaps simplified. */
781 if (code == PLUS_EXPR
782 && (prec <= BITS_PER_WORD
783 ? (CONST_SCALAR_INT_P (op0) && REG_P (op1))
784 : CONST_SCALAR_INT_P (op1)))
785 tem = op1;
786 do_compare_rtx_and_jump (res, tem, code == PLUS_EXPR ? GEU : LEU,
787 true, mode, NULL_RTX, NULL, done_label,
788 profile_probability::very_likely ());
789 goto do_error_label;
790 }
791
792 /* s1 +- u2 -> sr */
793 if (!uns0_p && uns1_p && !unsr_p)
794 {
795 /* Compute the operation. On RTL level, the addition is always
796 unsigned. */
797 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
798 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
799 rtx tem = expand_binop (mode, add_optab,
800 code == PLUS_EXPR ? res : op0, sgn,
801 NULL_RTX, false, OPTAB_LIB_WIDEN);
802 do_compare_rtx_and_jump (tem, op1, GEU, true, mode, NULL_RTX, NULL,
803 done_label, profile_probability::very_likely ());
804 goto do_error_label;
805 }
806
807 /* s1 + u2 -> ur */
808 if (code == PLUS_EXPR && !uns0_p && uns1_p && unsr_p)
809 {
810 op1 = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
811 OPTAB_LIB_WIDEN);
812 /* As we've changed op1, we have to avoid using the value range
813 for the original argument. */
814 arg1 = error_mark_node;
815 do_xor = true;
816 goto do_signed;
817 }
818
819 /* u1 - s2 -> ur */
820 if (code == MINUS_EXPR && uns0_p && !uns1_p && unsr_p)
821 {
822 op0 = expand_binop (mode, add_optab, op0, sgn, NULL_RTX, false,
823 OPTAB_LIB_WIDEN);
824 /* As we've changed op0, we have to avoid using the value range
825 for the original argument. */
826 arg0 = error_mark_node;
827 do_xor = true;
828 goto do_signed;
829 }
830
831 /* s1 - u2 -> ur */
832 if (code == MINUS_EXPR && !uns0_p && uns1_p && unsr_p)
833 {
834 /* Compute the operation. On RTL level, the addition is always
835 unsigned. */
836 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
837 OPTAB_LIB_WIDEN);
838 int pos_neg = get_range_pos_neg (arg0);
839 if (pos_neg == 2)
840 /* If ARG0 is known to be always negative, this is always overflow. */
841 emit_jump (do_error);
842 else if (pos_neg == 3)
843 /* If ARG0 is not known to be always positive, check at runtime. */
844 do_compare_rtx_and_jump (op0, const0_rtx, LT, false, mode, NULL_RTX,
845 NULL, do_error, profile_probability::very_unlikely ());
846 do_compare_rtx_and_jump (op1, op0, LEU, true, mode, NULL_RTX, NULL,
847 done_label, profile_probability::very_likely ());
848 goto do_error_label;
849 }
850
851 /* u1 - s2 -> sr */
852 if (code == MINUS_EXPR && uns0_p && !uns1_p && !unsr_p)
853 {
854 /* Compute the operation. On RTL level, the addition is always
855 unsigned. */
856 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
857 OPTAB_LIB_WIDEN);
858 rtx tem = expand_binop (mode, add_optab, op1, sgn, NULL_RTX, false,
859 OPTAB_LIB_WIDEN);
860 do_compare_rtx_and_jump (op0, tem, LTU, true, mode, NULL_RTX, NULL,
861 done_label, profile_probability::very_likely ());
862 goto do_error_label;
863 }
864
865 /* u1 + u2 -> sr */
866 if (code == PLUS_EXPR && uns0_p && uns1_p && !unsr_p)
867 {
868 /* Compute the operation. On RTL level, the addition is always
869 unsigned. */
870 res = expand_binop (mode, add_optab, op0, op1, NULL_RTX, false,
871 OPTAB_LIB_WIDEN);
872 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
873 NULL, do_error, profile_probability::very_unlikely ());
874 rtx tem = op1;
875 /* The operation is commutative, so we can pick operand to compare
876 against. For prec <= BITS_PER_WORD, I think preferring REG operand
877 is better over CONST_INT, because the CONST_INT might enlarge the
878 instruction or CSE would need to figure out we'd already loaded it
879 into a register before. For prec > BITS_PER_WORD, I think CONST_INT
880 might be more beneficial, as then the multi-word comparison can be
881 perhaps simplified. */
882 if (prec <= BITS_PER_WORD
883 ? (CONST_SCALAR_INT_P (op1) && REG_P (op0))
884 : CONST_SCALAR_INT_P (op0))
885 tem = op0;
886 do_compare_rtx_and_jump (res, tem, GEU, true, mode, NULL_RTX, NULL,
887 done_label, profile_probability::very_likely ());
888 goto do_error_label;
889 }
890
891 /* s1 +- s2 -> ur */
892 if (!uns0_p && !uns1_p && unsr_p)
893 {
894 /* Compute the operation. On RTL level, the addition is always
895 unsigned. */
896 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
897 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
898 int pos_neg = get_range_pos_neg (arg1);
899 if (code == PLUS_EXPR)
900 {
901 int pos_neg0 = get_range_pos_neg (arg0);
902 if (pos_neg0 != 3 && pos_neg == 3)
903 {
904 std::swap (op0, op1);
905 pos_neg = pos_neg0;
906 }
907 }
908 rtx tem;
909 if (pos_neg != 3)
910 {
911 tem = expand_binop (mode, ((pos_neg == 1) ^ (code == MINUS_EXPR))
912 ? and_optab : ior_optab,
913 op0, res, NULL_RTX, false, OPTAB_LIB_WIDEN);
914 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL,
915 NULL, done_label, profile_probability::very_likely ());
916 }
917 else
918 {
919 rtx_code_label *do_ior_label = gen_label_rtx ();
920 do_compare_rtx_and_jump (op1, const0_rtx,
921 code == MINUS_EXPR ? GE : LT, false, mode,
922 NULL_RTX, NULL, do_ior_label,
923 profile_probability::even ());
924 tem = expand_binop (mode, and_optab, op0, res, NULL_RTX, false,
925 OPTAB_LIB_WIDEN);
926 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
927 NULL, done_label, profile_probability::very_likely ());
928 emit_jump (do_error);
929 emit_label (do_ior_label);
930 tem = expand_binop (mode, ior_optab, op0, res, NULL_RTX, false,
931 OPTAB_LIB_WIDEN);
932 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
933 NULL, done_label, profile_probability::very_likely ());
934 }
935 goto do_error_label;
936 }
937
938 /* u1 - u2 -> sr */
939 if (code == MINUS_EXPR && uns0_p && uns1_p && !unsr_p)
940 {
941 /* Compute the operation. On RTL level, the addition is always
942 unsigned. */
943 res = expand_binop (mode, sub_optab, op0, op1, NULL_RTX, false,
944 OPTAB_LIB_WIDEN);
945 rtx_code_label *op0_geu_op1 = gen_label_rtx ();
946 do_compare_rtx_and_jump (op0, op1, GEU, true, mode, NULL_RTX, NULL,
947 op0_geu_op1, profile_probability::even ());
948 do_compare_rtx_and_jump (res, const0_rtx, LT, false, mode, NULL_RTX,
949 NULL, done_label, profile_probability::very_likely ());
950 emit_jump (do_error);
951 emit_label (op0_geu_op1);
952 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
953 NULL, done_label, profile_probability::very_likely ());
954 goto do_error_label;
955 }
956
957 gcc_assert (!uns0_p && !uns1_p && !unsr_p);
958
959 /* s1 +- s2 -> sr */
960 do_signed:
961 {
962 insn_code icode = optab_handler (code == PLUS_EXPR ? addv4_optab
963 : subv4_optab, mode);
964 if (icode != CODE_FOR_nothing)
965 {
966 struct expand_operand ops[4];
967 rtx_insn *last = get_last_insn ();
968
969 res = gen_reg_rtx (mode);
970 create_output_operand (&ops[0], res, mode);
971 create_input_operand (&ops[1], op0, mode);
972 create_input_operand (&ops[2], op1, mode);
973 create_fixed_operand (&ops[3], do_error);
974 if (maybe_expand_insn (icode, 4, ops))
975 {
976 last = get_last_insn ();
977 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
978 && JUMP_P (last)
979 && any_condjump_p (last)
980 && !find_reg_note (last, REG_BR_PROB, 0))
981 add_reg_br_prob_note (last,
982 profile_probability::very_unlikely ());
983 emit_jump (done_label);
984 goto do_error_label;
985 }
986
987 delete_insns_since (last);
988 }
989
990 /* Compute the operation. On RTL level, the addition is always
991 unsigned. */
992 res = expand_binop (mode, code == PLUS_EXPR ? add_optab : sub_optab,
993 op0, op1, NULL_RTX, false, OPTAB_LIB_WIDEN);
994
995 /* If we can prove that one of the arguments (for MINUS_EXPR only
996 the second operand, as subtraction is not commutative) is always
997 non-negative or always negative, we can do just one comparison
998 and conditional jump. */
999 int pos_neg = get_range_pos_neg (arg1);
1000 if (code == PLUS_EXPR)
1001 {
1002 int pos_neg0 = get_range_pos_neg (arg0);
1003 if (pos_neg0 != 3 && pos_neg == 3)
1004 {
1005 std::swap (op0, op1);
1006 pos_neg = pos_neg0;
1007 }
1008 }
1009
1010 /* Addition overflows if and only if the two operands have the same sign,
1011 and the result has the opposite sign. Subtraction overflows if and
1012 only if the two operands have opposite sign, and the subtrahend has
1013 the same sign as the result. Here 0 is counted as positive. */
1014 if (pos_neg == 3)
1015 {
1016 /* Compute op0 ^ op1 (operands have opposite sign). */
1017 rtx op_xor = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1018 OPTAB_LIB_WIDEN);
1019
1020 /* Compute res ^ op1 (result and 2nd operand have opposite sign). */
1021 rtx res_xor = expand_binop (mode, xor_optab, res, op1, NULL_RTX, false,
1022 OPTAB_LIB_WIDEN);
1023
1024 rtx tem;
1025 if (code == PLUS_EXPR)
1026 {
1027 /* Compute (res ^ op1) & ~(op0 ^ op1). */
1028 tem = expand_unop (mode, one_cmpl_optab, op_xor, NULL_RTX, false);
1029 tem = expand_binop (mode, and_optab, res_xor, tem, NULL_RTX, false,
1030 OPTAB_LIB_WIDEN);
1031 }
1032 else
1033 {
1034 /* Compute (op0 ^ op1) & ~(res ^ op1). */
1035 tem = expand_unop (mode, one_cmpl_optab, res_xor, NULL_RTX, false);
1036 tem = expand_binop (mode, and_optab, op_xor, tem, NULL_RTX, false,
1037 OPTAB_LIB_WIDEN);
1038 }
1039
1040 /* No overflow if the result has bit sign cleared. */
1041 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1042 NULL, done_label, profile_probability::very_likely ());
1043 }
1044
1045 /* Compare the result of the operation with the first operand.
1046 No overflow for addition if second operand is positive and result
1047 is larger or second operand is negative and result is smaller.
1048 Likewise for subtraction with sign of second operand flipped. */
1049 else
1050 do_compare_rtx_and_jump (res, op0,
1051 (pos_neg == 1) ^ (code == MINUS_EXPR) ? GE : LE,
1052 false, mode, NULL_RTX, NULL, done_label,
1053 profile_probability::very_likely ());
1054 }
1055
1056 do_error_label:
1057 emit_label (do_error);
1058 if (is_ubsan)
1059 {
1060 /* Expand the ubsan builtin call. */
1061 push_temp_slots ();
1062 fn = ubsan_build_overflow_builtin (code, loc, TREE_TYPE (arg0),
1063 arg0, arg1, datap);
1064 expand_normal (fn);
1065 pop_temp_slots ();
1066 do_pending_stack_adjust ();
1067 }
1068 else if (lhs)
1069 expand_arith_set_overflow (lhs, target);
1070
1071 /* We're done. */
1072 emit_label (done_label);
1073
1074 if (lhs)
1075 {
1076 if (is_ubsan)
1077 expand_ubsan_result_store (target, res);
1078 else
1079 {
1080 if (do_xor)
1081 res = expand_binop (mode, add_optab, res, sgn, NULL_RTX, false,
1082 OPTAB_LIB_WIDEN);
1083
1084 expand_arith_overflow_result_store (lhs, target, mode, res);
1085 }
1086 }
1087}
1088
1089/* Add negate overflow checking to the statement STMT. */
1090
1091static void
1092expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan,
1093 tree *datap)
1094{
1095 rtx res, op1;
1096 tree fn;
1097 rtx_code_label *done_label, *do_error;
1098 rtx target = NULL_RTX;
1099
1100 done_label = gen_label_rtx ();
1101 do_error = gen_label_rtx ();
1102
1103 do_pending_stack_adjust ();
1104 op1 = expand_normal (arg1);
1105
1106 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg1));
1107 if (lhs)
1108 {
1109 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1110 if (!is_ubsan)
1111 write_complex_part (target, const0_rtx, true);
1112 }
1113
1114 enum insn_code icode = optab_handler (negv3_optab, mode);
1115 if (icode != CODE_FOR_nothing)
1116 {
1117 struct expand_operand ops[3];
1118 rtx_insn *last = get_last_insn ();
1119
1120 res = gen_reg_rtx (mode);
1121 create_output_operand (&ops[0], res, mode);
1122 create_input_operand (&ops[1], op1, mode);
1123 create_fixed_operand (&ops[2], do_error);
1124 if (maybe_expand_insn (icode, 3, ops))
1125 {
1126 last = get_last_insn ();
1127 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1128 && JUMP_P (last)
1129 && any_condjump_p (last)
1130 && !find_reg_note (last, REG_BR_PROB, 0))
1131 add_reg_br_prob_note (last,
1132 profile_probability::very_unlikely ());
1133 emit_jump (done_label);
1134 }
1135 else
1136 {
1137 delete_insns_since (last);
1138 icode = CODE_FOR_nothing;
1139 }
1140 }
1141
1142 if (icode == CODE_FOR_nothing)
1143 {
1144 /* Compute the operation. On RTL level, the addition is always
1145 unsigned. */
1146 res = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1147
1148 /* Compare the operand with the most negative value. */
1149 rtx minv = expand_normal (TYPE_MIN_VALUE (TREE_TYPE (arg1)));
1150 do_compare_rtx_and_jump (op1, minv, NE, true, mode, NULL_RTX, NULL,
1151 done_label, profile_probability::very_likely ());
1152 }
1153
1154 emit_label (do_error);
1155 if (is_ubsan)
1156 {
1157 /* Expand the ubsan builtin call. */
1158 push_temp_slots ();
1159 fn = ubsan_build_overflow_builtin (NEGATE_EXPR, loc, TREE_TYPE (arg1),
1160 arg1, NULL_TREE, datap);
1161 expand_normal (fn);
1162 pop_temp_slots ();
1163 do_pending_stack_adjust ();
1164 }
1165 else if (lhs)
1166 expand_arith_set_overflow (lhs, target);
1167
1168 /* We're done. */
1169 emit_label (done_label);
1170
1171 if (lhs)
1172 {
1173 if (is_ubsan)
1174 expand_ubsan_result_store (target, res);
1175 else
1176 expand_arith_overflow_result_store (lhs, target, mode, res);
1177 }
1178}
1179
1180/* Return true if UNS WIDEN_MULT_EXPR with result mode WMODE and operand
1181 mode MODE can be expanded without using a libcall. */
1182
1183static bool
1184can_widen_mult_without_libcall (scalar_int_mode wmode, scalar_int_mode mode,
1185 rtx op0, rtx op1, bool uns)
1186{
1187 if (find_widening_optab_handler (umul_widen_optab, wmode, mode)
1188 != CODE_FOR_nothing)
1189 return true;
1190
1191 if (find_widening_optab_handler (smul_widen_optab, wmode, mode)
1192 != CODE_FOR_nothing)
1193 return true;
1194
1195 rtx_insn *last = get_last_insn ();
1196 if (CONSTANT_P (op0))
1197 op0 = convert_modes (wmode, mode, op0, uns);
1198 else
1199 op0 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 1);
1200 if (CONSTANT_P (op1))
1201 op1 = convert_modes (wmode, mode, op1, uns);
1202 else
1203 op1 = gen_raw_REG (wmode, LAST_VIRTUAL_REGISTER + 2);
1204 rtx ret = expand_mult (wmode, op0, op1, NULL_RTX, uns, true);
1205 delete_insns_since (last);
1206 return ret != NULL_RTX;
1207}
1208
1209/* Add mul overflow checking to the statement STMT. */
1210
1211static void
1212expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
1213 bool unsr_p, bool uns0_p, bool uns1_p, bool is_ubsan,
1214 tree *datap)
1215{
1216 rtx res, op0, op1;
1217 tree fn, type;
1218 rtx_code_label *done_label, *do_error;
1219 rtx target = NULL_RTX;
1220 signop sign;
1221 enum insn_code icode;
1222
1223 done_label = gen_label_rtx ();
1224 do_error = gen_label_rtx ();
1225
1226 do_pending_stack_adjust ();
1227 op0 = expand_normal (arg0);
1228 op1 = expand_normal (arg1);
1229
1230 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (arg0));
1231 bool uns = unsr_p;
1232 if (lhs)
1233 {
1234 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
1235 if (!is_ubsan)
1236 write_complex_part (target, const0_rtx, true);
1237 }
1238
1239 if (is_ubsan)
1240 gcc_assert (!unsr_p && !uns0_p && !uns1_p);
1241
1242 /* We assume both operands and result have the same precision
1243 here (GET_MODE_BITSIZE (mode)), S stands for signed type
1244 with that precision, U for unsigned type with that precision,
1245 sgn for unsigned most significant bit in that precision.
1246 s1 is signed first operand, u1 is unsigned first operand,
1247 s2 is signed second operand, u2 is unsigned second operand,
1248 sr is signed result, ur is unsigned result and the following
1249 rules say how to compute result (which is always result of
1250 the operands as if both were unsigned, cast to the right
1251 signedness) and how to compute whether operation overflowed.
1252 main_ovf (false) stands for jump on signed multiplication
1253 overflow or the main algorithm with uns == false.
1254 main_ovf (true) stands for jump on unsigned multiplication
1255 overflow or the main algorithm with uns == true.
1256
1257 s1 * s2 -> sr
1258 res = (S) ((U) s1 * (U) s2)
1259 ovf = main_ovf (false)
1260 u1 * u2 -> ur
1261 res = u1 * u2
1262 ovf = main_ovf (true)
1263 s1 * u2 -> ur
1264 res = (U) s1 * u2
1265 ovf = (s1 < 0 && u2) || main_ovf (true)
1266 u1 * u2 -> sr
1267 res = (S) (u1 * u2)
1268 ovf = res < 0 || main_ovf (true)
1269 s1 * u2 -> sr
1270 res = (S) ((U) s1 * u2)
1271 ovf = (S) u2 >= 0 ? main_ovf (false)
1272 : (s1 != 0 && (s1 != -1 || u2 != (U) res))
1273 s1 * s2 -> ur
1274 t1 = (s1 & s2) < 0 ? (-(U) s1) : ((U) s1)
1275 t2 = (s1 & s2) < 0 ? (-(U) s2) : ((U) s2)
1276 res = t1 * t2
1277 ovf = (s1 ^ s2) < 0 ? (s1 && s2) : main_ovf (true) */
1278
1279 if (uns0_p && !uns1_p)
1280 {
1281 /* Multiplication is commutative, if operand signedness differs,
1282 canonicalize to the first operand being signed and second
1283 unsigned to simplify following code. */
1284 std::swap (op0, op1);
1285 std::swap (arg0, arg1);
1286 uns0_p = false;
1287 uns1_p = true;
1288 }
1289
1290 int pos_neg0 = get_range_pos_neg (arg0);
1291 int pos_neg1 = get_range_pos_neg (arg1);
1292
1293 /* s1 * u2 -> ur */
1294 if (!uns0_p && uns1_p && unsr_p)
1295 {
1296 switch (pos_neg0)
1297 {
1298 case 1:
1299 /* If s1 is non-negative, just perform normal u1 * u2 -> ur. */
1300 goto do_main;
1301 case 2:
1302 /* If s1 is negative, avoid the main code, just multiply and
1303 signal overflow if op1 is not 0. */
1304 struct separate_ops ops;
1305 ops.code = MULT_EXPR;
1306 ops.type = TREE_TYPE (arg1);
1307 ops.op0 = make_tree (ops.type, op0);
1308 ops.op1 = make_tree (ops.type, op1);
1309 ops.op2 = NULL_TREE;
1310 ops.location = loc;
1311 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1312 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1313 NULL, done_label, profile_probability::very_likely ());
1314 goto do_error_label;
1315 case 3:
1316 rtx_code_label *do_main_label;
1317 do_main_label = gen_label_rtx ();
1318 do_compare_rtx_and_jump (op0, const0_rtx, GE, false, mode, NULL_RTX,
1319 NULL, do_main_label, profile_probability::very_likely ());
1320 do_compare_rtx_and_jump (op1, const0_rtx, EQ, true, mode, NULL_RTX,
1321 NULL, do_main_label, profile_probability::very_likely ());
1322 expand_arith_set_overflow (lhs, target);
1323 emit_label (do_main_label);
1324 goto do_main;
1325 default:
1326 gcc_unreachable ();
1327 }
1328 }
1329
1330 /* u1 * u2 -> sr */
1331 if (uns0_p && uns1_p && !unsr_p)
1332 {
1333 uns = true;
1334 /* Rest of handling of this case after res is computed. */
1335 goto do_main;
1336 }
1337
1338 /* s1 * u2 -> sr */
1339 if (!uns0_p && uns1_p && !unsr_p)
1340 {
1341 switch (pos_neg1)
1342 {
1343 case 1:
1344 goto do_main;
1345 case 2:
1346 /* If (S) u2 is negative (i.e. u2 is larger than maximum of S,
1347 avoid the main code, just multiply and signal overflow
1348 unless 0 * u2 or -1 * ((U) Smin). */
1349 struct separate_ops ops;
1350 ops.code = MULT_EXPR;
1351 ops.type = TREE_TYPE (arg1);
1352 ops.op0 = make_tree (ops.type, op0);
1353 ops.op1 = make_tree (ops.type, op1);
1354 ops.op2 = NULL_TREE;
1355 ops.location = loc;
1356 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1357 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1358 NULL, done_label, profile_probability::very_likely ());
1359 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1360 NULL, do_error, profile_probability::very_unlikely ());
1361 int prec;
1362 prec = GET_MODE_PRECISION (mode);
1363 rtx sgn;
1364 sgn = immed_wide_int_const (wi::min_value (prec, SIGNED), mode);
1365 do_compare_rtx_and_jump (op1, sgn, EQ, true, mode, NULL_RTX,
1366 NULL, done_label, profile_probability::very_likely ());
1367 goto do_error_label;
1368 case 3:
1369 /* Rest of handling of this case after res is computed. */
1370 goto do_main;
1371 default:
1372 gcc_unreachable ();
1373 }
1374 }
1375
1376 /* s1 * s2 -> ur */
1377 if (!uns0_p && !uns1_p && unsr_p)
1378 {
1379 rtx tem, tem2;
1380 switch (pos_neg0 | pos_neg1)
1381 {
1382 case 1: /* Both operands known to be non-negative. */
1383 goto do_main;
1384 case 2: /* Both operands known to be negative. */
1385 op0 = expand_unop (mode, neg_optab, op0, NULL_RTX, false);
1386 op1 = expand_unop (mode, neg_optab, op1, NULL_RTX, false);
1387 /* Avoid looking at arg0/arg1 ranges, as we've changed
1388 the arguments. */
1389 arg0 = error_mark_node;
1390 arg1 = error_mark_node;
1391 goto do_main;
1392 case 3:
1393 if ((pos_neg0 ^ pos_neg1) == 3)
1394 {
1395 /* If one operand is known to be negative and the other
1396 non-negative, this overflows always, unless the non-negative
1397 one is 0. Just do normal multiply and set overflow
1398 unless one of the operands is 0. */
1399 struct separate_ops ops;
1400 ops.code = MULT_EXPR;
1401 ops.type
1402 = build_nonstandard_integer_type (GET_MODE_PRECISION (mode),
1403 1);
1404 ops.op0 = make_tree (ops.type, op0);
1405 ops.op1 = make_tree (ops.type, op1);
1406 ops.op2 = NULL_TREE;
1407 ops.location = loc;
1408 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1409 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1410 OPTAB_LIB_WIDEN);
1411 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode,
1412 NULL_RTX, NULL, done_label,
1413 profile_probability::very_likely ());
1414 goto do_error_label;
1415 }
1416 /* The general case, do all the needed comparisons at runtime. */
1417 rtx_code_label *do_main_label, *after_negate_label;
1418 rtx rop0, rop1;
1419 rop0 = gen_reg_rtx (mode);
1420 rop1 = gen_reg_rtx (mode);
1421 emit_move_insn (rop0, op0);
1422 emit_move_insn (rop1, op1);
1423 op0 = rop0;
1424 op1 = rop1;
1425 do_main_label = gen_label_rtx ();
1426 after_negate_label = gen_label_rtx ();
1427 tem = expand_binop (mode, and_optab, op0, op1, NULL_RTX, false,
1428 OPTAB_LIB_WIDEN);
1429 do_compare_rtx_and_jump (tem, const0_rtx, GE, false, mode, NULL_RTX,
1430 NULL, after_negate_label, profile_probability::very_likely ());
1431 /* Both arguments negative here, negate them and continue with
1432 normal unsigned overflow checking multiplication. */
1433 emit_move_insn (op0, expand_unop (mode, neg_optab, op0,
1434 NULL_RTX, false));
1435 emit_move_insn (op1, expand_unop (mode, neg_optab, op1,
1436 NULL_RTX, false));
1437 /* Avoid looking at arg0/arg1 ranges, as we might have changed
1438 the arguments. */
1439 arg0 = error_mark_node;
1440 arg1 = error_mark_node;
1441 emit_jump (do_main_label);
1442 emit_label (after_negate_label);
1443 tem2 = expand_binop (mode, xor_optab, op0, op1, NULL_RTX, false,
1444 OPTAB_LIB_WIDEN);
1445 do_compare_rtx_and_jump (tem2, const0_rtx, GE, false, mode, NULL_RTX,
1446 NULL, do_main_label, profile_probability::very_likely ());
1447 /* One argument is negative here, the other positive. This
1448 overflows always, unless one of the arguments is 0. But
1449 if e.g. s2 is 0, (U) s1 * 0 doesn't overflow, whatever s1
1450 is, thus we can keep do_main code oring in overflow as is. */
1451 do_compare_rtx_and_jump (tem, const0_rtx, EQ, true, mode, NULL_RTX,
1452 NULL, do_main_label, profile_probability::very_likely ());
1453 expand_arith_set_overflow (lhs, target);
1454 emit_label (do_main_label);
1455 goto do_main;
1456 default:
1457 gcc_unreachable ();
1458 }
1459 }
1460
1461 do_main:
1462 type = build_nonstandard_integer_type (GET_MODE_PRECISION (mode), uns);
1463 sign = uns ? UNSIGNED : SIGNED;
1464 icode = optab_handler (uns ? umulv4_optab : mulv4_optab, mode);
1465 if (uns
1466 && (integer_pow2p (arg0) || integer_pow2p (arg1))
1467 && (optimize_insn_for_speed_p () || icode == CODE_FOR_nothing))
1468 {
1469 /* Optimize unsigned multiplication by power of 2 constant
1470 using 2 shifts, one for result, one to extract the shifted
1471 out bits to see if they are all zero.
1472 Don't do this if optimizing for size and we have umulv4_optab,
1473 in that case assume multiplication will be shorter.
1474 This is heuristics based on the single target that provides
1475 umulv4 right now (i?86/x86_64), if further targets add it, this
1476 might need to be revisited.
1477 Cases where both operands are constant should be folded already
1478 during GIMPLE, and cases where one operand is constant but not
1479 power of 2 are questionable, either the WIDEN_MULT_EXPR case
1480 below can be done without multiplication, just by shifts and adds,
1481 or we'd need to divide the result (and hope it actually doesn't
1482 really divide nor multiply) and compare the result of the division
1483 with the original operand. */
1484 rtx opn0 = op0;
1485 rtx opn1 = op1;
1486 tree argn0 = arg0;
1487 tree argn1 = arg1;
1488 if (integer_pow2p (arg0))
1489 {
1490 std::swap (opn0, opn1);
1491 std::swap (argn0, argn1);
1492 }
1493 int cnt = tree_log2 (argn1);
1494 if (cnt >= 0 && cnt < GET_MODE_PRECISION (mode))
1495 {
1496 rtx upper = const0_rtx;
1497 res = expand_shift (LSHIFT_EXPR, mode, opn0, cnt, NULL_RTX, uns);
1498 if (cnt != 0)
1499 upper = expand_shift (RSHIFT_EXPR, mode, opn0,
1500 GET_MODE_PRECISION (mode) - cnt,
1501 NULL_RTX, uns);
1502 do_compare_rtx_and_jump (upper, const0_rtx, EQ, true, mode,
1503 NULL_RTX, NULL, done_label,
1504 profile_probability::very_likely ());
1505 goto do_error_label;
1506 }
1507 }
1508 if (icode != CODE_FOR_nothing)
1509 {
1510 struct expand_operand ops[4];
1511 rtx_insn *last = get_last_insn ();
1512
1513 res = gen_reg_rtx (mode);
1514 create_output_operand (&ops[0], res, mode);
1515 create_input_operand (&ops[1], op0, mode);
1516 create_input_operand (&ops[2], op1, mode);
1517 create_fixed_operand (&ops[3], do_error);
1518 if (maybe_expand_insn (icode, 4, ops))
1519 {
1520 last = get_last_insn ();
1521 if (profile_status_for_fn (cfun) != PROFILE_ABSENT
1522 && JUMP_P (last)
1523 && any_condjump_p (last)
1524 && !find_reg_note (last, REG_BR_PROB, 0))
1525 add_reg_br_prob_note (last,
1526 profile_probability::very_unlikely ());
1527 emit_jump (done_label);
1528 }
1529 else
1530 {
1531 delete_insns_since (last);
1532 icode = CODE_FOR_nothing;
1533 }
1534 }
1535
1536 if (icode == CODE_FOR_nothing)
1537 {
1538 struct separate_ops ops;
1539 int prec = GET_MODE_PRECISION (mode);
1540 scalar_int_mode hmode, wmode;
1541 ops.op0 = make_tree (type, op0);
1542 ops.op1 = make_tree (type, op1);
1543 ops.op2 = NULL_TREE;
1544 ops.location = loc;
1545
1546 /* Optimize unsigned overflow check where we don't use the
1547 multiplication result, just whether overflow happened.
1548 If we can do MULT_HIGHPART_EXPR, that followed by
1549 comparison of the result against zero is cheapest.
1550 We'll still compute res, but it should be DCEd later. */
1551 use_operand_p use;
1552 gimple *use_stmt;
1553 if (!is_ubsan
1554 && lhs
1555 && uns
1556 && !(uns0_p && uns1_p && !unsr_p)
1557 && can_mult_highpart_p (mode, uns) == 1
1558 && single_imm_use (lhs, &use, &use_stmt)
1559 && is_gimple_assign (use_stmt)
1560 && gimple_assign_rhs_code (use_stmt) == IMAGPART_EXPR)
1561 goto highpart;
1562
1563 if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
1564 && targetm.scalar_mode_supported_p (wmode)
1565 && can_widen_mult_without_libcall (wmode, mode, op0, op1, uns))
1566 {
1567 twoxwider:
1568 ops.code = WIDEN_MULT_EXPR;
1569 ops.type
1570 = build_nonstandard_integer_type (GET_MODE_PRECISION (wmode), uns);
1571
1572 res = expand_expr_real_2 (&ops, NULL_RTX, wmode, EXPAND_NORMAL);
1573 rtx hipart = expand_shift (RSHIFT_EXPR, wmode, res, prec,
1574 NULL_RTX, uns);
1575 hipart = convert_modes (mode, wmode, hipart, uns);
1576 res = convert_modes (mode, wmode, res, uns);
1577 if (uns)
1578 /* For the unsigned multiplication, there was overflow if
1579 HIPART is non-zero. */
1580 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1581 NULL_RTX, NULL, done_label,
1582 profile_probability::very_likely ());
1583 else
1584 {
1585 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1586 NULL_RTX, 0);
1587 /* RES is low half of the double width result, HIPART
1588 the high half. There was overflow if
1589 HIPART is different from RES < 0 ? -1 : 0. */
1590 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1591 NULL_RTX, NULL, done_label,
1592 profile_probability::very_likely ());
1593 }
1594 }
1595 else if (can_mult_highpart_p (mode, uns) == 1)
1596 {
1597 highpart:
1598 ops.code = MULT_HIGHPART_EXPR;
1599 ops.type = type;
1600
1601 rtx hipart = expand_expr_real_2 (&ops, NULL_RTX, mode,
1602 EXPAND_NORMAL);
1603 ops.code = MULT_EXPR;
1604 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1605 if (uns)
1606 /* For the unsigned multiplication, there was overflow if
1607 HIPART is non-zero. */
1608 do_compare_rtx_and_jump (hipart, const0_rtx, EQ, true, mode,
1609 NULL_RTX, NULL, done_label,
1610 profile_probability::very_likely ());
1611 else
1612 {
1613 rtx signbit = expand_shift (RSHIFT_EXPR, mode, res, prec - 1,
1614 NULL_RTX, 0);
1615 /* RES is low half of the double width result, HIPART
1616 the high half. There was overflow if
1617 HIPART is different from RES < 0 ? -1 : 0. */
1618 do_compare_rtx_and_jump (signbit, hipart, EQ, true, mode,
1619 NULL_RTX, NULL, done_label,
1620 profile_probability::very_likely ());
1621 }
1622
1623 }
1624 else if (int_mode_for_size (prec / 2, 1).exists (&hmode)
1625 && 2 * GET_MODE_PRECISION (hmode) == prec)
1626 {
1627 rtx_code_label *large_op0 = gen_label_rtx ();
1628 rtx_code_label *small_op0_large_op1 = gen_label_rtx ();
1629 rtx_code_label *one_small_one_large = gen_label_rtx ();
1630 rtx_code_label *both_ops_large = gen_label_rtx ();
1631 rtx_code_label *after_hipart_neg = uns ? NULL : gen_label_rtx ();
1632 rtx_code_label *after_lopart_neg = uns ? NULL : gen_label_rtx ();
1633 rtx_code_label *do_overflow = gen_label_rtx ();
1634 rtx_code_label *hipart_different = uns ? NULL : gen_label_rtx ();
1635
1636 unsigned int hprec = GET_MODE_PRECISION (hmode);
1637 rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
1638 NULL_RTX, uns);
1639 hipart0 = convert_modes (hmode, mode, hipart0, uns);
1640 rtx lopart0 = convert_modes (hmode, mode, op0, uns);
1641 rtx signbit0 = const0_rtx;
1642 if (!uns)
1643 signbit0 = expand_shift (RSHIFT_EXPR, hmode, lopart0, hprec - 1,
1644 NULL_RTX, 0);
1645 rtx hipart1 = expand_shift (RSHIFT_EXPR, mode, op1, hprec,
1646 NULL_RTX, uns);
1647 hipart1 = convert_modes (hmode, mode, hipart1, uns);
1648 rtx lopart1 = convert_modes (hmode, mode, op1, uns);
1649 rtx signbit1 = const0_rtx;
1650 if (!uns)
1651 signbit1 = expand_shift (RSHIFT_EXPR, hmode, lopart1, hprec - 1,
1652 NULL_RTX, 0);
1653
1654 res = gen_reg_rtx (mode);
1655
1656 /* True if op0 resp. op1 are known to be in the range of
1657 halfstype. */
1658 bool op0_small_p = false;
1659 bool op1_small_p = false;
1660 /* True if op0 resp. op1 are known to have all zeros or all ones
1661 in the upper half of bits, but are not known to be
1662 op{0,1}_small_p. */
1663 bool op0_medium_p = false;
1664 bool op1_medium_p = false;
1665 /* -1 if op{0,1} is known to be negative, 0 if it is known to be
1666 nonnegative, 1 if unknown. */
1667 int op0_sign = 1;
1668 int op1_sign = 1;
1669
1670 if (pos_neg0 == 1)
1671 op0_sign = 0;
1672 else if (pos_neg0 == 2)
1673 op0_sign = -1;
1674 if (pos_neg1 == 1)
1675 op1_sign = 0;
1676 else if (pos_neg1 == 2)
1677 op1_sign = -1;
1678
1679 unsigned int mprec0 = prec;
1680 if (arg0 != error_mark_node)
1681 mprec0 = get_min_precision (arg0, sign);
1682 if (mprec0 <= hprec)
1683 op0_small_p = true;
1684 else if (!uns && mprec0 <= hprec + 1)
1685 op0_medium_p = true;
1686 unsigned int mprec1 = prec;
1687 if (arg1 != error_mark_node)
1688 mprec1 = get_min_precision (arg1, sign);
1689 if (mprec1 <= hprec)
1690 op1_small_p = true;
1691 else if (!uns && mprec1 <= hprec + 1)
1692 op1_medium_p = true;
1693
1694 int smaller_sign = 1;
1695 int larger_sign = 1;
1696 if (op0_small_p)
1697 {
1698 smaller_sign = op0_sign;
1699 larger_sign = op1_sign;
1700 }
1701 else if (op1_small_p)
1702 {
1703 smaller_sign = op1_sign;
1704 larger_sign = op0_sign;
1705 }
1706 else if (op0_sign == op1_sign)
1707 {
1708 smaller_sign = op0_sign;
1709 larger_sign = op0_sign;
1710 }
1711
1712 if (!op0_small_p)
1713 do_compare_rtx_and_jump (signbit0, hipart0, NE, true, hmode,
1714 NULL_RTX, NULL, large_op0,
1715 profile_probability::unlikely ());
1716
1717 if (!op1_small_p)
1718 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1719 NULL_RTX, NULL, small_op0_large_op1,
1720 profile_probability::unlikely ());
1721
1722 /* If both op0 and op1 are sign (!uns) or zero (uns) extended from
1723 hmode to mode, the multiplication will never overflow. We can
1724 do just one hmode x hmode => mode widening multiplication. */
1725 rtx lopart0s = lopart0, lopart1s = lopart1;
1726 if (GET_CODE (lopart0) == SUBREG)
1727 {
1728 lopart0s = shallow_copy_rtx (lopart0);
1729 SUBREG_PROMOTED_VAR_P (lopart0s) = 1;
1730 SUBREG_PROMOTED_SET (lopart0s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1731 }
1732 if (GET_CODE (lopart1) == SUBREG)
1733 {
1734 lopart1s = shallow_copy_rtx (lopart1);
1735 SUBREG_PROMOTED_VAR_P (lopart1s) = 1;
1736 SUBREG_PROMOTED_SET (lopart1s, uns ? SRP_UNSIGNED : SRP_SIGNED);
1737 }
1738 tree halfstype = build_nonstandard_integer_type (hprec, uns);
1739 ops.op0 = make_tree (halfstype, lopart0s);
1740 ops.op1 = make_tree (halfstype, lopart1s);
1741 ops.code = WIDEN_MULT_EXPR;
1742 ops.type = type;
1743 rtx thisres
1744 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1745 emit_move_insn (res, thisres);
1746 emit_jump (done_label);
1747
1748 emit_label (small_op0_large_op1);
1749
1750 /* If op0 is sign (!uns) or zero (uns) extended from hmode to mode,
1751 but op1 is not, just swap the arguments and handle it as op1
1752 sign/zero extended, op0 not. */
1753 rtx larger = gen_reg_rtx (mode);
1754 rtx hipart = gen_reg_rtx (hmode);
1755 rtx lopart = gen_reg_rtx (hmode);
1756 emit_move_insn (larger, op1);
1757 emit_move_insn (hipart, hipart1);
1758 emit_move_insn (lopart, lopart0);
1759 emit_jump (one_small_one_large);
1760
1761 emit_label (large_op0);
1762
1763 if (!op1_small_p)
1764 do_compare_rtx_and_jump (signbit1, hipart1, NE, true, hmode,
1765 NULL_RTX, NULL, both_ops_large,
1766 profile_probability::unlikely ());
1767
1768 /* If op1 is sign (!uns) or zero (uns) extended from hmode to mode,
1769 but op0 is not, prepare larger, hipart and lopart pseudos and
1770 handle it together with small_op0_large_op1. */
1771 emit_move_insn (larger, op0);
1772 emit_move_insn (hipart, hipart0);
1773 emit_move_insn (lopart, lopart1);
1774
1775 emit_label (one_small_one_large);
1776
1777 /* lopart is the low part of the operand that is sign extended
1778 to mode, larger is the other operand, hipart is the
1779 high part of larger and lopart0 and lopart1 are the low parts
1780 of both operands.
1781 We perform lopart0 * lopart1 and lopart * hipart widening
1782 multiplications. */
1783 tree halfutype = build_nonstandard_integer_type (hprec, 1);
1784 ops.op0 = make_tree (halfutype, lopart0);
1785 ops.op1 = make_tree (halfutype, lopart1);
1786 rtx lo0xlo1
1787 = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1788
1789 ops.op0 = make_tree (halfutype, lopart);
1790 ops.op1 = make_tree (halfutype, hipart);
1791 rtx loxhi = gen_reg_rtx (mode);
1792 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1793 emit_move_insn (loxhi, tem);
1794
1795 if (!uns)
1796 {
1797 /* if (hipart < 0) loxhi -= lopart << (bitsize / 2); */
1798 if (larger_sign == 0)
1799 emit_jump (after_hipart_neg);
1800 else if (larger_sign != -1)
1801 do_compare_rtx_and_jump (hipart, const0_rtx, GE, false, hmode,
1802 NULL_RTX, NULL, after_hipart_neg,
1803 profile_probability::even ());
1804
1805 tem = convert_modes (mode, hmode, lopart, 1);
1806 tem = expand_shift (LSHIFT_EXPR, mode, tem, hprec, NULL_RTX, 1);
1807 tem = expand_simple_binop (mode, MINUS, loxhi, tem, NULL_RTX,
1808 1, OPTAB_WIDEN);
1809 emit_move_insn (loxhi, tem);
1810
1811 emit_label (after_hipart_neg);
1812
1813 /* if (lopart < 0) loxhi -= larger; */
1814 if (smaller_sign == 0)
1815 emit_jump (after_lopart_neg);
1816 else if (smaller_sign != -1)
1817 do_compare_rtx_and_jump (lopart, const0_rtx, GE, false, hmode,
1818 NULL_RTX, NULL, after_lopart_neg,
1819 profile_probability::even ());
1820
1821 tem = expand_simple_binop (mode, MINUS, loxhi, larger, NULL_RTX,
1822 1, OPTAB_WIDEN);
1823 emit_move_insn (loxhi, tem);
1824
1825 emit_label (after_lopart_neg);
1826 }
1827
1828 /* loxhi += (uns) lo0xlo1 >> (bitsize / 2); */
1829 tem = expand_shift (RSHIFT_EXPR, mode, lo0xlo1, hprec, NULL_RTX, 1);
1830 tem = expand_simple_binop (mode, PLUS, loxhi, tem, NULL_RTX,
1831 1, OPTAB_WIDEN);
1832 emit_move_insn (loxhi, tem);
1833
1834 /* if (loxhi >> (bitsize / 2)
1835 == (hmode) loxhi >> (bitsize / 2 - 1)) (if !uns)
1836 if (loxhi >> (bitsize / 2) == 0 (if uns). */
1837 rtx hipartloxhi = expand_shift (RSHIFT_EXPR, mode, loxhi, hprec,
1838 NULL_RTX, 0);
1839 hipartloxhi = convert_modes (hmode, mode, hipartloxhi, 0);
1840 rtx signbitloxhi = const0_rtx;
1841 if (!uns)
1842 signbitloxhi = expand_shift (RSHIFT_EXPR, hmode,
1843 convert_modes (hmode, mode,
1844 loxhi, 0),
1845 hprec - 1, NULL_RTX, 0);
1846
1847 do_compare_rtx_and_jump (signbitloxhi, hipartloxhi, NE, true, hmode,
1848 NULL_RTX, NULL, do_overflow,
1849 profile_probability::very_unlikely ());
1850
1851 /* res = (loxhi << (bitsize / 2)) | (hmode) lo0xlo1; */
1852 rtx loxhishifted = expand_shift (LSHIFT_EXPR, mode, loxhi, hprec,
1853 NULL_RTX, 1);
1854 tem = convert_modes (mode, hmode,
1855 convert_modes (hmode, mode, lo0xlo1, 1), 1);
1856
1857 tem = expand_simple_binop (mode, IOR, loxhishifted, tem, res,
1858 1, OPTAB_WIDEN);
1859 if (tem != res)
1860 emit_move_insn (res, tem);
1861 emit_jump (done_label);
1862
1863 emit_label (both_ops_large);
1864
1865 /* If both operands are large (not sign (!uns) or zero (uns)
1866 extended from hmode), then perform the full multiplication
1867 which will be the result of the operation.
1868 The only cases which don't overflow are for signed multiplication
1869 some cases where both hipart0 and highpart1 are 0 or -1.
1870 For unsigned multiplication when high parts are both non-zero
1871 this overflows always. */
1872 ops.code = MULT_EXPR;
1873 ops.op0 = make_tree (type, op0);
1874 ops.op1 = make_tree (type, op1);
1875 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1876 emit_move_insn (res, tem);
1877
1878 if (!uns)
1879 {
1880 if (!op0_medium_p)
1881 {
1882 tem = expand_simple_binop (hmode, PLUS, hipart0, const1_rtx,
1883 NULL_RTX, 1, OPTAB_WIDEN);
1884 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1885 NULL_RTX, NULL, do_error,
1886 profile_probability::very_unlikely ());
1887 }
1888
1889 if (!op1_medium_p)
1890 {
1891 tem = expand_simple_binop (hmode, PLUS, hipart1, const1_rtx,
1892 NULL_RTX, 1, OPTAB_WIDEN);
1893 do_compare_rtx_and_jump (tem, const1_rtx, GTU, true, hmode,
1894 NULL_RTX, NULL, do_error,
1895 profile_probability::very_unlikely ());
1896 }
1897
1898 /* At this point hipart{0,1} are both in [-1, 0]. If they are
1899 the same, overflow happened if res is non-positive, if they
1900 are different, overflow happened if res is positive. */
1901 if (op0_sign != 1 && op1_sign != 1 && op0_sign != op1_sign)
1902 emit_jump (hipart_different);
1903 else if (op0_sign == 1 || op1_sign == 1)
1904 do_compare_rtx_and_jump (hipart0, hipart1, NE, true, hmode,
1905 NULL_RTX, NULL, hipart_different,
1906 profile_probability::even ());
1907
1908 do_compare_rtx_and_jump (res, const0_rtx, LE, false, mode,
1909 NULL_RTX, NULL, do_error,
1910 profile_probability::very_unlikely ());
1911 emit_jump (done_label);
1912
1913 emit_label (hipart_different);
1914
1915 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode,
1916 NULL_RTX, NULL, do_error,
1917 profile_probability::very_unlikely ());
1918 emit_jump (done_label);
1919 }
1920
1921 emit_label (do_overflow);
1922
1923 /* Overflow, do full multiplication and fallthru into do_error. */
1924 ops.op0 = make_tree (type, op0);
1925 ops.op1 = make_tree (type, op1);
1926 tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1927 emit_move_insn (res, tem);
1928 }
1929 else if (GET_MODE_2XWIDER_MODE (mode).exists (&wmode)
1930 && targetm.scalar_mode_supported_p (wmode))
1931 /* Even emitting a libcall is better than not detecting overflow
1932 at all. */
1933 goto twoxwider;
1934 else
1935 {
1936 gcc_assert (!is_ubsan);
1937 ops.code = MULT_EXPR;
1938 ops.type = type;
1939 res = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
1940 emit_jump (done_label);
1941 }
1942 }
1943
1944 do_error_label:
1945 emit_label (do_error);
1946 if (is_ubsan)
1947 {
1948 /* Expand the ubsan builtin call. */
1949 push_temp_slots ();
1950 fn = ubsan_build_overflow_builtin (MULT_EXPR, loc, TREE_TYPE (arg0),
1951 arg0, arg1, datap);
1952 expand_normal (fn);
1953 pop_temp_slots ();
1954 do_pending_stack_adjust ();
1955 }
1956 else if (lhs)
1957 expand_arith_set_overflow (lhs, target);
1958
1959 /* We're done. */
1960 emit_label (done_label);
1961
1962 /* u1 * u2 -> sr */
1963 if (uns0_p && uns1_p && !unsr_p)
1964 {
1965 rtx_code_label *all_done_label = gen_label_rtx ();
1966 do_compare_rtx_and_jump (res, const0_rtx, GE, false, mode, NULL_RTX,
1967 NULL, all_done_label, profile_probability::very_likely ());
1968 expand_arith_set_overflow (lhs, target);
1969 emit_label (all_done_label);
1970 }
1971
1972 /* s1 * u2 -> sr */
1973 if (!uns0_p && uns1_p && !unsr_p && pos_neg1 == 3)
1974 {
1975 rtx_code_label *all_done_label = gen_label_rtx ();
1976 rtx_code_label *set_noovf = gen_label_rtx ();
1977 do_compare_rtx_and_jump (op1, const0_rtx, GE, false, mode, NULL_RTX,
1978 NULL, all_done_label, profile_probability::very_likely ());
1979 expand_arith_set_overflow (lhs, target);
1980 do_compare_rtx_and_jump (op0, const0_rtx, EQ, true, mode, NULL_RTX,
1981 NULL, set_noovf, profile_probability::very_likely ());
1982 do_compare_rtx_and_jump (op0, constm1_rtx, NE, true, mode, NULL_RTX,
1983 NULL, all_done_label, profile_probability::very_unlikely ());
1984 do_compare_rtx_and_jump (op1, res, NE, true, mode, NULL_RTX, NULL,
1985 all_done_label, profile_probability::very_unlikely ());
1986 emit_label (set_noovf);
1987 write_complex_part (target, const0_rtx, true);
1988 emit_label (all_done_label);
1989 }
1990
1991 if (lhs)
1992 {
1993 if (is_ubsan)
1994 expand_ubsan_result_store (target, res);
1995 else
1996 expand_arith_overflow_result_store (lhs, target, mode, res);
1997 }
1998}
1999
2000/* Expand UBSAN_CHECK_* internal function if it has vector operands. */
2001
2002static void
2003expand_vector_ubsan_overflow (location_t loc, enum tree_code code, tree lhs,
2004 tree arg0, tree arg1)
2005{
2006 int cnt = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
2007 rtx_code_label *loop_lab = NULL;
2008 rtx cntvar = NULL_RTX;
2009 tree cntv = NULL_TREE;
2010 tree eltype = TREE_TYPE (TREE_TYPE (arg0));
2011 tree sz = TYPE_SIZE (eltype);
2012 tree data = NULL_TREE;
2013 tree resv = NULL_TREE;
2014 rtx lhsr = NULL_RTX;
2015 rtx resvr = NULL_RTX;
2016
2017 if (lhs)
2018 {
2019 optab op;
2020 lhsr = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2021 if (!VECTOR_MODE_P (GET_MODE (lhsr))
2022 || (op = optab_for_tree_code (code, TREE_TYPE (arg0),
2023 optab_default)) == unknown_optab
2024 || (optab_handler (op, TYPE_MODE (TREE_TYPE (arg0)))
2025 == CODE_FOR_nothing))
2026 {
2027 if (MEM_P (lhsr))
2028 resv = make_tree (TREE_TYPE (lhs), lhsr);
2029 else
2030 {
2031 resvr = assign_temp (TREE_TYPE (lhs), 1, 1);
2032 resv = make_tree (TREE_TYPE (lhs), resvr);
2033 }
2034 }
2035 }
2036 if (cnt > 4)
2037 {
2038 do_pending_stack_adjust ();
2039 loop_lab = gen_label_rtx ();
2040 cntvar = gen_reg_rtx (TYPE_MODE (sizetype));
2041 cntv = make_tree (sizetype, cntvar);
2042 emit_move_insn (cntvar, const0_rtx);
2043 emit_label (loop_lab);
2044 }
2045 if (TREE_CODE (arg0) != VECTOR_CST)
2046 {
2047 rtx arg0r = expand_normal (arg0);
2048 arg0 = make_tree (TREE_TYPE (arg0), arg0r);
2049 }
2050 if (TREE_CODE (arg1) != VECTOR_CST)
2051 {
2052 rtx arg1r = expand_normal (arg1);
2053 arg1 = make_tree (TREE_TYPE (arg1), arg1r);
2054 }
2055 for (int i = 0; i < (cnt > 4 ? 1 : cnt); i++)
2056 {
2057 tree op0, op1, res = NULL_TREE;
2058 if (cnt > 4)
2059 {
2060 tree atype = build_array_type_nelts (eltype, cnt);
2061 op0 = uniform_vector_p (arg0);
2062 if (op0 == NULL_TREE)
2063 {
2064 op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg0);
2065 op0 = build4_loc (loc, ARRAY_REF, eltype, op0, cntv,
2066 NULL_TREE, NULL_TREE);
2067 }
2068 op1 = uniform_vector_p (arg1);
2069 if (op1 == NULL_TREE)
2070 {
2071 op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, arg1);
2072 op1 = build4_loc (loc, ARRAY_REF, eltype, op1, cntv,
2073 NULL_TREE, NULL_TREE);
2074 }
2075 if (resv)
2076 {
2077 res = fold_build1_loc (loc, VIEW_CONVERT_EXPR, atype, resv);
2078 res = build4_loc (loc, ARRAY_REF, eltype, res, cntv,
2079 NULL_TREE, NULL_TREE);
2080 }
2081 }
2082 else
2083 {
2084 tree bitpos = bitsize_int (tree_to_uhwi (sz) * i);
2085 op0 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg0, sz, bitpos);
2086 op1 = fold_build3_loc (loc, BIT_FIELD_REF, eltype, arg1, sz, bitpos);
2087 if (resv)
2088 res = fold_build3_loc (loc, BIT_FIELD_REF, eltype, resv, sz,
2089 bitpos);
2090 }
2091 switch (code)
2092 {
2093 case PLUS_EXPR:
2094 expand_addsub_overflow (loc, PLUS_EXPR, res, op0, op1,
2095 false, false, false, true, &data);
2096 break;
2097 case MINUS_EXPR:
2098 if (cnt > 4 ? integer_zerop (arg0) : integer_zerop (op0))
2099 expand_neg_overflow (loc, res, op1, true, &data);
2100 else
2101 expand_addsub_overflow (loc, MINUS_EXPR, res, op0, op1,
2102 false, false, false, true, &data);
2103 break;
2104 case MULT_EXPR:
2105 expand_mul_overflow (loc, res, op0, op1, false, false, false,
2106 true, &data);
2107 break;
2108 default:
2109 gcc_unreachable ();
2110 }
2111 }
2112 if (cnt > 4)
2113 {
2114 struct separate_ops ops;
2115 ops.code = PLUS_EXPR;
2116 ops.type = TREE_TYPE (cntv);
2117 ops.op0 = cntv;
2118 ops.op1 = build_int_cst (TREE_TYPE (cntv), 1);
2119 ops.op2 = NULL_TREE;
2120 ops.location = loc;
2121 rtx ret = expand_expr_real_2 (&ops, cntvar, TYPE_MODE (sizetype),
2122 EXPAND_NORMAL);
2123 if (ret != cntvar)
2124 emit_move_insn (cntvar, ret);
2125 do_compare_rtx_and_jump (cntvar, GEN_INT (cnt), NE, false,
2126 TYPE_MODE (sizetype), NULL_RTX, NULL, loop_lab,
2127 profile_probability::very_likely ());
2128 }
2129 if (lhs && resv == NULL_TREE)
2130 {
2131 struct separate_ops ops;
2132 ops.code = code;
2133 ops.type = TREE_TYPE (arg0);
2134 ops.op0 = arg0;
2135 ops.op1 = arg1;
2136 ops.op2 = NULL_TREE;
2137 ops.location = loc;
2138 rtx ret = expand_expr_real_2 (&ops, lhsr, TYPE_MODE (TREE_TYPE (arg0)),
2139 EXPAND_NORMAL);
2140 if (ret != lhsr)
2141 emit_move_insn (lhsr, ret);
2142 }
2143 else if (resvr)
2144 emit_move_insn (lhsr, resvr);
2145}
2146
2147/* Expand UBSAN_CHECK_ADD call STMT. */
2148
2149static void
2150expand_UBSAN_CHECK_ADD (internal_fn, gcall *stmt)
2151{
2152 location_t loc = gimple_location (stmt);
2153 tree lhs = gimple_call_lhs (stmt);
2154 tree arg0 = gimple_call_arg (stmt, 0);
2155 tree arg1 = gimple_call_arg (stmt, 1);
2156 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2157 expand_vector_ubsan_overflow (loc, PLUS_EXPR, lhs, arg0, arg1);
2158 else
2159 expand_addsub_overflow (loc, PLUS_EXPR, lhs, arg0, arg1,
2160 false, false, false, true, NULL);
2161}
2162
2163/* Expand UBSAN_CHECK_SUB call STMT. */
2164
2165static void
2166expand_UBSAN_CHECK_SUB (internal_fn, gcall *stmt)
2167{
2168 location_t loc = gimple_location (stmt);
2169 tree lhs = gimple_call_lhs (stmt);
2170 tree arg0 = gimple_call_arg (stmt, 0);
2171 tree arg1 = gimple_call_arg (stmt, 1);
2172 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2173 expand_vector_ubsan_overflow (loc, MINUS_EXPR, lhs, arg0, arg1);
2174 else if (integer_zerop (arg0))
2175 expand_neg_overflow (loc, lhs, arg1, true, NULL);
2176 else
2177 expand_addsub_overflow (loc, MINUS_EXPR, lhs, arg0, arg1,
2178 false, false, false, true, NULL);
2179}
2180
2181/* Expand UBSAN_CHECK_MUL call STMT. */
2182
2183static void
2184expand_UBSAN_CHECK_MUL (internal_fn, gcall *stmt)
2185{
2186 location_t loc = gimple_location (stmt);
2187 tree lhs = gimple_call_lhs (stmt);
2188 tree arg0 = gimple_call_arg (stmt, 0);
2189 tree arg1 = gimple_call_arg (stmt, 1);
2190 if (VECTOR_TYPE_P (TREE_TYPE (arg0)))
2191 expand_vector_ubsan_overflow (loc, MULT_EXPR, lhs, arg0, arg1);
2192 else
2193 expand_mul_overflow (loc, lhs, arg0, arg1, false, false, false, true,
2194 NULL);
2195}
2196
2197/* Helper function for {ADD,SUB,MUL}_OVERFLOW call stmt expansion. */
2198
2199static void
2200expand_arith_overflow (enum tree_code code, gimple *stmt)
2201{
2202 tree lhs = gimple_call_lhs (stmt);
2203 if (lhs == NULL_TREE)
2204 return;
2205 tree arg0 = gimple_call_arg (stmt, 0);
2206 tree arg1 = gimple_call_arg (stmt, 1);
2207 tree type = TREE_TYPE (TREE_TYPE (lhs));
2208 int uns0_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
2209 int uns1_p = TYPE_UNSIGNED (TREE_TYPE (arg1));
2210 int unsr_p = TYPE_UNSIGNED (type);
2211 int prec0 = TYPE_PRECISION (TREE_TYPE (arg0));
2212 int prec1 = TYPE_PRECISION (TREE_TYPE (arg1));
2213 int precres = TYPE_PRECISION (type);
2214 location_t loc = gimple_location (stmt);
2215 if (!uns0_p && get_range_pos_neg (arg0) == 1)
2216 uns0_p = true;
2217 if (!uns1_p && get_range_pos_neg (arg1) == 1)
2218 uns1_p = true;
2219 int pr = get_min_precision (arg0, uns0_p ? UNSIGNED : SIGNED);
2220 prec0 = MIN (prec0, pr);
2221 pr = get_min_precision (arg1, uns1_p ? UNSIGNED : SIGNED);
2222 prec1 = MIN (prec1, pr);
2223
2224 /* If uns0_p && uns1_p, precop is minimum needed precision
2225 of unsigned type to hold the exact result, otherwise
2226 precop is minimum needed precision of signed type to
2227 hold the exact result. */
2228 int precop;
2229 if (code == MULT_EXPR)
2230 precop = prec0 + prec1 + (uns0_p != uns1_p);
2231 else
2232 {
2233 if (uns0_p == uns1_p)
2234 precop = MAX (prec0, prec1) + 1;
2235 else if (uns0_p)
2236 precop = MAX (prec0 + 1, prec1) + 1;
2237 else
2238 precop = MAX (prec0, prec1 + 1) + 1;
2239 }
2240 int orig_precres = precres;
2241
2242 do
2243 {
2244 if ((uns0_p && uns1_p)
2245 ? ((precop + !unsr_p) <= precres
2246 /* u1 - u2 -> ur can overflow, no matter what precision
2247 the result has. */
2248 && (code != MINUS_EXPR || !unsr_p))
2249 : (!unsr_p && precop <= precres))
2250 {
2251 /* The infinity precision result will always fit into result. */
2252 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2253 write_complex_part (target, const0_rtx, true);
2254 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2255 struct separate_ops ops;
2256 ops.code = code;
2257 ops.type = type;
2258 ops.op0 = fold_convert_loc (loc, type, arg0);
2259 ops.op1 = fold_convert_loc (loc, type, arg1);
2260 ops.op2 = NULL_TREE;
2261 ops.location = loc;
2262 rtx tem = expand_expr_real_2 (&ops, NULL_RTX, mode, EXPAND_NORMAL);
2263 expand_arith_overflow_result_store (lhs, target, mode, tem);
2264 return;
2265 }
2266
2267 /* For operations with low precision, if target doesn't have them, start
2268 with precres widening right away, otherwise do it only if the most
2269 simple cases can't be used. */
2270 const int min_precision = targetm.min_arithmetic_precision ();
2271 if (orig_precres == precres && precres < min_precision)
2272 ;
2273 else if ((uns0_p && uns1_p && unsr_p && prec0 <= precres
2274 && prec1 <= precres)
2275 || ((!uns0_p || !uns1_p) && !unsr_p
2276 && prec0 + uns0_p <= precres
2277 && prec1 + uns1_p <= precres))
2278 {
2279 arg0 = fold_convert_loc (loc, type, arg0);
2280 arg1 = fold_convert_loc (loc, type, arg1);
2281 switch (code)
2282 {
2283 case MINUS_EXPR:
2284 if (integer_zerop (arg0) && !unsr_p)
2285 {
2286 expand_neg_overflow (loc, lhs, arg1, false, NULL);
2287 return;
2288 }
2289 /* FALLTHRU */
2290 case PLUS_EXPR:
2291 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2292 unsr_p, unsr_p, false, NULL);
2293 return;
2294 case MULT_EXPR:
2295 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2296 unsr_p, unsr_p, false, NULL);
2297 return;
2298 default:
2299 gcc_unreachable ();
2300 }
2301 }
2302
2303 /* For sub-word operations, retry with a wider type first. */
2304 if (orig_precres == precres && precop <= BITS_PER_WORD)
2305 {
2306 int p = MAX (min_precision, precop);
2307 scalar_int_mode m = smallest_int_mode_for_size (p);
2308 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2309 uns0_p && uns1_p
2310 && unsr_p);
2311 p = TYPE_PRECISION (optype);
2312 if (p > precres)
2313 {
2314 precres = p;
2315 unsr_p = TYPE_UNSIGNED (optype);
2316 type = optype;
2317 continue;
2318 }
2319 }
2320
2321 if (prec0 <= precres && prec1 <= precres)
2322 {
2323 tree types[2];
2324 if (unsr_p)
2325 {
2326 types[0] = build_nonstandard_integer_type (precres, 0);
2327 types[1] = type;
2328 }
2329 else
2330 {
2331 types[0] = type;
2332 types[1] = build_nonstandard_integer_type (precres, 1);
2333 }
2334 arg0 = fold_convert_loc (loc, types[uns0_p], arg0);
2335 arg1 = fold_convert_loc (loc, types[uns1_p], arg1);
2336 if (code != MULT_EXPR)
2337 expand_addsub_overflow (loc, code, lhs, arg0, arg1, unsr_p,
2338 uns0_p, uns1_p, false, NULL);
2339 else
2340 expand_mul_overflow (loc, lhs, arg0, arg1, unsr_p,
2341 uns0_p, uns1_p, false, NULL);
2342 return;
2343 }
2344
2345 /* Retry with a wider type. */
2346 if (orig_precres == precres)
2347 {
2348 int p = MAX (prec0, prec1);
2349 scalar_int_mode m = smallest_int_mode_for_size (p);
2350 tree optype = build_nonstandard_integer_type (GET_MODE_PRECISION (m),
2351 uns0_p && uns1_p
2352 && unsr_p);
2353 p = TYPE_PRECISION (optype);
2354 if (p > precres)
2355 {
2356 precres = p;
2357 unsr_p = TYPE_UNSIGNED (optype);
2358 type = optype;
2359 continue;
2360 }
2361 }
2362
2363 gcc_unreachable ();
2364 }
2365 while (1);
2366}
2367
2368/* Expand ADD_OVERFLOW STMT. */
2369
2370static void
2371expand_ADD_OVERFLOW (internal_fn, gcall *stmt)
2372{
2373 expand_arith_overflow (PLUS_EXPR, stmt);
2374}
2375
2376/* Expand SUB_OVERFLOW STMT. */
2377
2378static void
2379expand_SUB_OVERFLOW (internal_fn, gcall *stmt)
2380{
2381 expand_arith_overflow (MINUS_EXPR, stmt);
2382}
2383
2384/* Expand MUL_OVERFLOW STMT. */
2385
2386static void
2387expand_MUL_OVERFLOW (internal_fn, gcall *stmt)
2388{
2389 expand_arith_overflow (MULT_EXPR, stmt);
2390}
2391
2392/* This should get folded in tree-vectorizer.c. */
2393
2394static void
2395expand_LOOP_VECTORIZED (internal_fn, gcall *)
2396{
2397 gcc_unreachable ();
2398}
2399
2400/* This should get folded in tree-vectorizer.c. */
2401
2402static void
2403expand_LOOP_DIST_ALIAS (internal_fn, gcall *)
2404{
2405 gcc_unreachable ();
2406}
2407
2408/* Expand MASK_LOAD call STMT using optab OPTAB. */
2409
2410static void
2411expand_mask_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2412{
2413 struct expand_operand ops[3];
2414 tree type, lhs, rhs, maskt, ptr;
2415 rtx mem, target, mask;
2416 unsigned align;
2417
2418 maskt = gimple_call_arg (stmt, 2);
2419 lhs = gimple_call_lhs (stmt);
2420 if (lhs == NULL_TREE)
2421 return;
2422 type = TREE_TYPE (lhs);
2423 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
2424 align = tree_to_shwi (gimple_call_arg (stmt, 1));
2425 if (TYPE_ALIGN (type) != align)
2426 type = build_aligned_type (type, align);
2427 rhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), ptr);
2428
2429 mem = expand_expr (rhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2430 gcc_assert (MEM_P (mem));
2431 mask = expand_normal (maskt);
2432 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2433 create_output_operand (&ops[0], target, TYPE_MODE (type));
2434 create_fixed_operand (&ops[1], mem);
2435 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2436 expand_insn (convert_optab_handler (optab, TYPE_MODE (type),
2437 TYPE_MODE (TREE_TYPE (maskt))),
2438 3, ops);
2439}
2440
2441/* Expand MASK_STORE call STMT using optab OPTAB. */
2442
2443static void
2444expand_mask_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
2445{
2446 struct expand_operand ops[3];
2447 tree type, lhs, rhs, maskt, ptr;
2448 rtx mem, reg, mask;
2449 unsigned align;
2450
2451 maskt = gimple_call_arg (stmt, 2);
2452 rhs = gimple_call_arg (stmt, 3);
2453 type = TREE_TYPE (rhs);
2454 ptr = build_int_cst (TREE_TYPE (gimple_call_arg (stmt, 1)), 0);
2455 align = tree_to_shwi (gimple_call_arg (stmt, 1));
2456 if (TYPE_ALIGN (type) != align)
2457 type = build_aligned_type (type, align);
2458 lhs = fold_build2 (MEM_REF, type, gimple_call_arg (stmt, 0), ptr);
2459
2460 mem = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2461 gcc_assert (MEM_P (mem));
2462 mask = expand_normal (maskt);
2463 reg = expand_normal (rhs);
2464 create_fixed_operand (&ops[0], mem);
2465 create_input_operand (&ops[1], reg, TYPE_MODE (type));
2466 create_input_operand (&ops[2], mask, TYPE_MODE (TREE_TYPE (maskt)));
2467 expand_insn (convert_optab_handler (optab, TYPE_MODE (type),
2468 TYPE_MODE (TREE_TYPE (maskt))),
2469 3, ops);
2470}
2471
2472static void
2473expand_ABNORMAL_DISPATCHER (internal_fn, gcall *)
2474{
2475}
2476
2477static void
2478expand_BUILTIN_EXPECT (internal_fn, gcall *stmt)
2479{
2480 /* When guessing was done, the hints should be already stripped away. */
2481 gcc_assert (!flag_guess_branch_prob || optimize == 0 || seen_error ());
2482
2483 rtx target;
2484 tree lhs = gimple_call_lhs (stmt);
2485 if (lhs)
2486 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2487 else
2488 target = const0_rtx;
2489 rtx val = expand_expr (gimple_call_arg (stmt, 0), target, VOIDmode, EXPAND_NORMAL);
2490 if (lhs && val != target)
2491 emit_move_insn (target, val);
2492}
2493
2494/* IFN_VA_ARG is supposed to be expanded at pass_stdarg. So this dummy function
2495 should never be called. */
2496
2497static void
2498expand_VA_ARG (internal_fn, gcall *)
2499{
2500 gcc_unreachable ();
2501}
2502
2503/* Expand the IFN_UNIQUE function according to its first argument. */
2504
2505static void
2506expand_UNIQUE (internal_fn, gcall *stmt)
2507{
2508 rtx pattern = NULL_RTX;
2509 enum ifn_unique_kind kind
2510 = (enum ifn_unique_kind) TREE_INT_CST_LOW (gimple_call_arg (stmt, 0));
2511
2512 switch (kind)
2513 {
2514 default:
2515 gcc_unreachable ();
2516
2517 case IFN_UNIQUE_UNSPEC:
2518 if (targetm.have_unique ())
2519 pattern = targetm.gen_unique ();
2520 break;
2521
2522 case IFN_UNIQUE_OACC_FORK:
2523 case IFN_UNIQUE_OACC_JOIN:
2524 if (targetm.have_oacc_fork () && targetm.have_oacc_join ())
2525 {
2526 tree lhs = gimple_call_lhs (stmt);
2527 rtx target = const0_rtx;
2528
2529 if (lhs)
2530 target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2531
2532 rtx data_dep = expand_normal (gimple_call_arg (stmt, 1));
2533 rtx axis = expand_normal (gimple_call_arg (stmt, 2));
2534
2535 if (kind == IFN_UNIQUE_OACC_FORK)
2536 pattern = targetm.gen_oacc_fork (target, data_dep, axis);
2537 else
2538 pattern = targetm.gen_oacc_join (target, data_dep, axis);
2539 }
2540 else
2541 gcc_unreachable ();
2542 break;
2543 }
2544
2545 if (pattern)
2546 emit_insn (pattern);
2547}
2548
2549/* The size of an OpenACC compute dimension. */
2550
2551static void
2552expand_GOACC_DIM_SIZE (internal_fn, gcall *stmt)
2553{
2554 tree lhs = gimple_call_lhs (stmt);
2555
2556 if (!lhs)
2557 return;
2558
2559 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2560 if (targetm.have_oacc_dim_size ())
2561 {
2562 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2563 VOIDmode, EXPAND_NORMAL);
2564 emit_insn (targetm.gen_oacc_dim_size (target, dim));
2565 }
2566 else
2567 emit_move_insn (target, GEN_INT (1));
2568}
2569
2570/* The position of an OpenACC execution engine along one compute axis. */
2571
2572static void
2573expand_GOACC_DIM_POS (internal_fn, gcall *stmt)
2574{
2575 tree lhs = gimple_call_lhs (stmt);
2576
2577 if (!lhs)
2578 return;
2579
2580 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2581 if (targetm.have_oacc_dim_pos ())
2582 {
2583 rtx dim = expand_expr (gimple_call_arg (stmt, 0), NULL_RTX,
2584 VOIDmode, EXPAND_NORMAL);
2585 emit_insn (targetm.gen_oacc_dim_pos (target, dim));
2586 }
2587 else
2588 emit_move_insn (target, const0_rtx);
2589}
2590
2591/* This is expanded by oacc_device_lower pass. */
2592
2593static void
2594expand_GOACC_LOOP (internal_fn, gcall *)
2595{
2596 gcc_unreachable ();
2597}
2598
2599/* This is expanded by oacc_device_lower pass. */
2600
2601static void
2602expand_GOACC_REDUCTION (internal_fn, gcall *)
2603{
2604 gcc_unreachable ();
2605}
2606
2607/* This is expanded by oacc_device_lower pass. */
2608
2609static void
2610expand_GOACC_TILE (internal_fn, gcall *)
2611{
2612 gcc_unreachable ();
2613}
2614
2615/* Set errno to EDOM. */
2616
2617static void
2618expand_SET_EDOM (internal_fn, gcall *)
2619{
2620#ifdef TARGET_EDOM
2621#ifdef GEN_ERRNO_RTX
2622 rtx errno_rtx = GEN_ERRNO_RTX;
2623#else
2624 rtx errno_rtx = gen_rtx_MEM (word_mode, gen_rtx_SYMBOL_REF (Pmode, "errno"));
2625#endif
2626 emit_move_insn (errno_rtx,
2627 gen_int_mode (TARGET_EDOM, GET_MODE (errno_rtx)));
2628#else
2629 gcc_unreachable ();
2630#endif
2631}
2632
2633/* Expand atomic bit test and set. */
2634
2635static void
2636expand_ATOMIC_BIT_TEST_AND_SET (internal_fn, gcall *call)
2637{
2638 expand_ifn_atomic_bit_test_and (call);
2639}
2640
2641/* Expand atomic bit test and complement. */
2642
2643static void
2644expand_ATOMIC_BIT_TEST_AND_COMPLEMENT (internal_fn, gcall *call)
2645{
2646 expand_ifn_atomic_bit_test_and (call);
2647}
2648
2649/* Expand atomic bit test and reset. */
2650
2651static void
2652expand_ATOMIC_BIT_TEST_AND_RESET (internal_fn, gcall *call)
2653{
2654 expand_ifn_atomic_bit_test_and (call);
2655}
2656
2657/* Expand atomic bit test and set. */
2658
2659static void
2660expand_ATOMIC_COMPARE_EXCHANGE (internal_fn, gcall *call)
2661{
2662 expand_ifn_atomic_compare_exchange (call);
2663}
2664
2665/* Expand LAUNDER to assignment, lhs = arg0. */
2666
2667static void
2668expand_LAUNDER (internal_fn, gcall *call)
2669{
2670 tree lhs = gimple_call_lhs (call);
2671
2672 if (!lhs)
2673 return;
2674
2675 expand_assignment (lhs, gimple_call_arg (call, 0), false);
2676}
2677
2678/* Expand DIVMOD() using:
2679 a) optab handler for udivmod/sdivmod if it is available.
2680 b) If optab_handler doesn't exist, generate call to
2681 target-specific divmod libfunc. */
2682
2683static void
2684expand_DIVMOD (internal_fn, gcall *call_stmt)
2685{
2686 tree lhs = gimple_call_lhs (call_stmt);
2687 tree arg0 = gimple_call_arg (call_stmt, 0);
2688 tree arg1 = gimple_call_arg (call_stmt, 1);
2689
2690 gcc_assert (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE);
2691 tree type = TREE_TYPE (TREE_TYPE (lhs));
2692 machine_mode mode = TYPE_MODE (type);
2693 bool unsignedp = TYPE_UNSIGNED (type);
2694 optab tab = (unsignedp) ? udivmod_optab : sdivmod_optab;
2695
2696 rtx op0 = expand_normal (arg0);
2697 rtx op1 = expand_normal (arg1);
2698 rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2699
2700 rtx quotient, remainder, libfunc;
2701
2702 /* Check if optab_handler exists for divmod_optab for given mode. */
2703 if (optab_handler (tab, mode) != CODE_FOR_nothing)
2704 {
2705 quotient = gen_reg_rtx (mode);
2706 remainder = gen_reg_rtx (mode);
2707 expand_twoval_binop (tab, op0, op1, quotient, remainder, unsignedp);
2708 }
2709
2710 /* Generate call to divmod libfunc if it exists. */
2711 else if ((libfunc = optab_libfunc (tab, mode)) != NULL_RTX)
2712 targetm.expand_divmod_libfunc (libfunc, mode, op0, op1,
2713 &quotient, &remainder);
2714
2715 else
2716 gcc_unreachable ();
2717
2718 /* Wrap the return value (quotient, remainder) within COMPLEX_EXPR. */
2719 expand_expr (build2 (COMPLEX_EXPR, TREE_TYPE (lhs),
2720 make_tree (TREE_TYPE (arg0), quotient),
2721 make_tree (TREE_TYPE (arg1), remainder)),
2722 target, VOIDmode, EXPAND_NORMAL);
2723}
2724
2725/* Expand a call to FN using the operands in STMT. FN has a single
2726 output operand and NARGS input operands. */
2727
2728static void
2729expand_direct_optab_fn (internal_fn fn, gcall *stmt, direct_optab optab,
2730 unsigned int nargs)
2731{
2732 expand_operand *ops = XALLOCAVEC (expand_operand, nargs + 1);
2733
2734 tree_pair types = direct_internal_fn_types (fn, stmt);
2735 insn_code icode = direct_optab_handler (optab, TYPE_MODE (types.first));
2736
2737 tree lhs = gimple_call_lhs (stmt);
2738 tree lhs_type = TREE_TYPE (lhs);
2739 rtx lhs_rtx = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
2740
2741 /* Do not assign directly to a promoted subreg, since there is no
2742 guarantee that the instruction will leave the upper bits of the
2743 register in the state required by SUBREG_PROMOTED_SIGN. */
2744 rtx dest = lhs_rtx;
2745 if (GET_CODE (dest) == SUBREG && SUBREG_PROMOTED_VAR_P (dest))
2746 dest = NULL_RTX;
2747
2748 create_output_operand (&ops[0], dest, insn_data[icode].operand[0].mode);
2749
2750 for (unsigned int i = 0; i < nargs; ++i)
2751 {
2752 tree rhs = gimple_call_arg (stmt, i);
2753 tree rhs_type = TREE_TYPE (rhs);
2754 rtx rhs_rtx = expand_normal (rhs);
2755 if (INTEGRAL_TYPE_P (rhs_type))
2756 create_convert_operand_from (&ops[i + 1], rhs_rtx,
2757 TYPE_MODE (rhs_type),
2758 TYPE_UNSIGNED (rhs_type));
2759 else
2760 create_input_operand (&ops[i + 1], rhs_rtx, TYPE_MODE (rhs_type));
2761 }
2762
2763 expand_insn (icode, nargs + 1, ops);
2764 if (!rtx_equal_p (lhs_rtx, ops[0].value))
2765 {
2766 /* If the return value has an integral type, convert the instruction
2767 result to that type. This is useful for things that return an
2768 int regardless of the size of the input. If the instruction result
2769 is smaller than required, assume that it is signed.
2770
2771 If the return value has a nonintegral type, its mode must match
2772 the instruction result. */
2773 if (GET_CODE (lhs_rtx) == SUBREG && SUBREG_PROMOTED_VAR_P (lhs_rtx))
2774 {
2775 /* If this is a scalar in a register that is stored in a wider
2776 mode than the declared mode, compute the result into its
2777 declared mode and then convert to the wider mode. */
2778 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2779 rtx tmp = convert_to_mode (GET_MODE (lhs_rtx), ops[0].value, 0);
2780 convert_move (SUBREG_REG (lhs_rtx), tmp,
2781 SUBREG_PROMOTED_SIGN (lhs_rtx));
2782 }
2783 else if (GET_MODE (lhs_rtx) == GET_MODE (ops[0].value))
2784 emit_move_insn (lhs_rtx, ops[0].value);
2785 else
2786 {
2787 gcc_checking_assert (INTEGRAL_TYPE_P (lhs_type));
2788 convert_move (lhs_rtx, ops[0].value, 0);
2789 }
2790 }
2791}
2792
2793/* Expanders for optabs that can use expand_direct_optab_fn. */
2794
2795#define expand_unary_optab_fn(FN, STMT, OPTAB) \
2796 expand_direct_optab_fn (FN, STMT, OPTAB, 1)
2797
2798#define expand_binary_optab_fn(FN, STMT, OPTAB) \
2799 expand_direct_optab_fn (FN, STMT, OPTAB, 2)
2800
2801/* RETURN_TYPE and ARGS are a return type and argument list that are
2802 in principle compatible with FN (which satisfies direct_internal_fn_p).
2803 Return the types that should be used to determine whether the
2804 target supports FN. */
2805
2806tree_pair
2807direct_internal_fn_types (internal_fn fn, tree return_type, tree *args)
2808{
2809 const direct_internal_fn_info &info = direct_internal_fn (fn);
2810 tree type0 = (info.type0 < 0 ? return_type : TREE_TYPE (args[info.type0]));
2811 tree type1 = (info.type1 < 0 ? return_type : TREE_TYPE (args[info.type1]));
2812 return tree_pair (type0, type1);
2813}
2814
2815/* CALL is a call whose return type and arguments are in principle
2816 compatible with FN (which satisfies direct_internal_fn_p). Return the
2817 types that should be used to determine whether the target supports FN. */
2818
2819tree_pair
2820direct_internal_fn_types (internal_fn fn, gcall *call)
2821{
2822 const direct_internal_fn_info &info = direct_internal_fn (fn);
2823 tree op0 = (info.type0 < 0
2824 ? gimple_call_lhs (call)
2825 : gimple_call_arg (call, info.type0));
2826 tree op1 = (info.type1 < 0
2827 ? gimple_call_lhs (call)
2828 : gimple_call_arg (call, info.type1));
2829 return tree_pair (TREE_TYPE (op0), TREE_TYPE (op1));
2830}
2831
2832/* Return true if OPTAB is supported for TYPES (whose modes should be
2833 the same) when the optimization type is OPT_TYPE. Used for simple
2834 direct optabs. */
2835
2836static bool
2837direct_optab_supported_p (direct_optab optab, tree_pair types,
2838 optimization_type opt_type)
2839{
2840 machine_mode mode = TYPE_MODE (types.first);
2841 gcc_checking_assert (mode == TYPE_MODE (types.second));
2842 return direct_optab_handler (optab, mode, opt_type) != CODE_FOR_nothing;
2843}
2844
2845/* Return true if load/store lanes optab OPTAB is supported for
2846 array type TYPES.first when the optimization type is OPT_TYPE. */
2847
2848static bool
2849multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
2850 optimization_type opt_type)
2851{
2852 gcc_assert (TREE_CODE (types.first) == ARRAY_TYPE);
2853 machine_mode imode = TYPE_MODE (types.first);
2854 machine_mode vmode = TYPE_MODE (TREE_TYPE (types.first));
2855 return (convert_optab_handler (optab, imode, vmode, opt_type)
2856 != CODE_FOR_nothing);
2857}
2858
2859#define direct_unary_optab_supported_p direct_optab_supported_p
2860#define direct_binary_optab_supported_p direct_optab_supported_p
2861#define direct_mask_load_optab_supported_p direct_optab_supported_p
2862#define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
2863#define direct_mask_store_optab_supported_p direct_optab_supported_p
2864#define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
2865
2866/* Return the optab used by internal function FN. */
2867
2868static optab
2869direct_internal_fn_optab (internal_fn fn, tree_pair types)
2870{
2871 switch (fn)
2872 {
2873#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
2874 case IFN_##CODE: break;
2875#define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2876 case IFN_##CODE: return OPTAB##_optab;
2877#define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
2878 UNSIGNED_OPTAB, TYPE) \
2879 case IFN_##CODE: return (TYPE_UNSIGNED (types.SELECTOR) \
2880 ? UNSIGNED_OPTAB ## _optab \
2881 : SIGNED_OPTAB ## _optab);
2882#include "internal-fn.def"
2883
2884 case IFN_LAST:
2885 break;
2886 }
2887 gcc_unreachable ();
2888}
2889
2890/* Return true if FN is supported for the types in TYPES when the
2891 optimization type is OPT_TYPE. The types are those associated with
2892 the "type0" and "type1" fields of FN's direct_internal_fn_info
2893 structure. */
2894
2895bool
2896direct_internal_fn_supported_p (internal_fn fn, tree_pair types,
2897 optimization_type opt_type)
2898{
2899 switch (fn)
2900 {
2901#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) \
2902 case IFN_##CODE: break;
2903#define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2904 case IFN_##CODE: \
2905 return direct_##TYPE##_optab_supported_p (OPTAB##_optab, types, \
2906 opt_type);
2907#define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
2908 UNSIGNED_OPTAB, TYPE) \
2909 case IFN_##CODE: \
2910 { \
2911 optab which_optab = (TYPE_UNSIGNED (types.SELECTOR) \
2912 ? UNSIGNED_OPTAB ## _optab \
2913 : SIGNED_OPTAB ## _optab); \
2914 return direct_##TYPE##_optab_supported_p (which_optab, types, \
2915 opt_type); \
2916 }
2917#include "internal-fn.def"
2918
2919 case IFN_LAST:
2920 break;
2921 }
2922 gcc_unreachable ();
2923}
2924
2925/* Return true if FN is supported for type TYPE when the optimization
2926 type is OPT_TYPE. The caller knows that the "type0" and "type1"
2927 fields of FN's direct_internal_fn_info structure are the same. */
2928
2929bool
2930direct_internal_fn_supported_p (internal_fn fn, tree type,
2931 optimization_type opt_type)
2932{
2933 const direct_internal_fn_info &info = direct_internal_fn (fn);
2934 gcc_checking_assert (info.type0 == info.type1);
2935 return direct_internal_fn_supported_p (fn, tree_pair (type, type), opt_type);
2936}
2937
2938/* Return true if IFN_SET_EDOM is supported. */
2939
2940bool
2941set_edom_supported_p (void)
2942{
2943#ifdef TARGET_EDOM
2944 return true;
2945#else
2946 return false;
2947#endif
2948}
2949
2950#define DEF_INTERNAL_OPTAB_FN(CODE, FLAGS, OPTAB, TYPE) \
2951 static void \
2952 expand_##CODE (internal_fn fn, gcall *stmt) \
2953 { \
2954 expand_##TYPE##_optab_fn (fn, stmt, OPTAB##_optab); \
2955 }
2956#define DEF_INTERNAL_SIGNED_OPTAB_FN(CODE, FLAGS, SELECTOR, SIGNED_OPTAB, \
2957 UNSIGNED_OPTAB, TYPE) \
2958 static void \
2959 expand_##CODE (internal_fn fn, gcall *stmt) \
2960 { \
2961 tree_pair types = direct_internal_fn_types (fn, stmt); \
2962 optab which_optab = direct_internal_fn_optab (fn, types); \
2963 expand_##TYPE##_optab_fn (fn, stmt, which_optab); \
2964 }
2965#include "internal-fn.def"
2966
2967/* Routines to expand each internal function, indexed by function number.
2968 Each routine has the prototype:
2969
2970 expand_<NAME> (gcall *stmt)
2971
2972 where STMT is the statement that performs the call. */
2973static void (*const internal_fn_expanders[]) (internal_fn, gcall *) = {
2974#define DEF_INTERNAL_FN(CODE, FLAGS, FNSPEC) expand_##CODE,
2975#include "internal-fn.def"
2976 0
2977};
2978
2979/* Expand STMT as though it were a call to internal function FN. */
2980
2981void
2982expand_internal_call (internal_fn fn, gcall *stmt)
2983{
2984 internal_fn_expanders[fn] (fn, stmt);
2985}
2986
2987/* Expand STMT, which is a call to internal function FN. */
2988
2989void
2990expand_internal_call (gcall *stmt)
2991{
2992 expand_internal_call (gimple_call_internal_fn (stmt), stmt);
2993}
2994
2995void
2996expand_PHI (internal_fn, gcall *)
2997{
2998 gcc_unreachable ();
2999}
3000