1/* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "backend.h"
25#include "target.h"
26#include "rtl.h"
27#include "tree.h"
28#include "predict.h"
29#include "memmodel.h"
30#include "optabs.h"
31#include "emit-rtl.h"
32#include "recog.h"
33#include "diagnostic-core.h"
34#include "varasm.h"
35#include "flags.h"
36#include "selftest.h"
37#include "selftest-rtl.h"
38
39/* Simplification and canonicalization of RTL. */
40
41/* Much code operates on (low, high) pairs; the low value is an
42 unsigned wide int, the high value a signed wide int. We
43 occasionally need to sign extend from low to high as if low were a
44 signed wide int. */
45#define HWI_SIGN_EXTEND(low) \
46 ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
47
48static rtx neg_const_int (machine_mode, const_rtx);
49static bool plus_minus_operand_p (const_rtx);
50static rtx simplify_plus_minus (enum rtx_code, machine_mode, rtx, rtx);
51static rtx simplify_associative_operation (enum rtx_code, machine_mode,
52 rtx, rtx);
53static rtx simplify_relational_operation_1 (enum rtx_code, machine_mode,
54 machine_mode, rtx, rtx);
55static rtx simplify_unary_operation_1 (enum rtx_code, machine_mode, rtx);
56static rtx simplify_binary_operation_1 (enum rtx_code, machine_mode,
57 rtx, rtx, rtx, rtx);
58
59/* Negate a CONST_INT rtx. */
60static rtx
61neg_const_int (machine_mode mode, const_rtx i)
62{
63 unsigned HOST_WIDE_INT val = -UINTVAL (i);
64
65 if (!HWI_COMPUTABLE_MODE_P (mode)
66 && val == UINTVAL (i))
67 return simplify_const_unary_operation (NEG, mode, CONST_CAST_RTX (i),
68 mode);
69 return gen_int_mode (val, mode);
70}
71
72/* Test whether expression, X, is an immediate constant that represents
73 the most significant bit of machine mode MODE. */
74
75bool
76mode_signbit_p (machine_mode mode, const_rtx x)
77{
78 unsigned HOST_WIDE_INT val;
79 unsigned int width;
80 scalar_int_mode int_mode;
81
82 if (!is_int_mode (mode, &int_mode))
83 return false;
84
85 width = GET_MODE_PRECISION (int_mode);
86 if (width == 0)
87 return false;
88
89 if (width <= HOST_BITS_PER_WIDE_INT
90 && CONST_INT_P (x))
91 val = INTVAL (x);
92#if TARGET_SUPPORTS_WIDE_INT
93 else if (CONST_WIDE_INT_P (x))
94 {
95 unsigned int i;
96 unsigned int elts = CONST_WIDE_INT_NUNITS (x);
97 if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
98 return false;
99 for (i = 0; i < elts - 1; i++)
100 if (CONST_WIDE_INT_ELT (x, i) != 0)
101 return false;
102 val = CONST_WIDE_INT_ELT (x, elts - 1);
103 width %= HOST_BITS_PER_WIDE_INT;
104 if (width == 0)
105 width = HOST_BITS_PER_WIDE_INT;
106 }
107#else
108 else if (width <= HOST_BITS_PER_DOUBLE_INT
109 && CONST_DOUBLE_AS_INT_P (x)
110 && CONST_DOUBLE_LOW (x) == 0)
111 {
112 val = CONST_DOUBLE_HIGH (x);
113 width -= HOST_BITS_PER_WIDE_INT;
114 }
115#endif
116 else
117 /* X is not an integer constant. */
118 return false;
119
120 if (width < HOST_BITS_PER_WIDE_INT)
121 val &= (HOST_WIDE_INT_1U << width) - 1;
122 return val == (HOST_WIDE_INT_1U << (width - 1));
123}
124
125/* Test whether VAL is equal to the most significant bit of mode MODE
126 (after masking with the mode mask of MODE). Returns false if the
127 precision of MODE is too large to handle. */
128
129bool
130val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
131{
132 unsigned int width;
133 scalar_int_mode int_mode;
134
135 if (!is_int_mode (mode, &int_mode))
136 return false;
137
138 width = GET_MODE_PRECISION (int_mode);
139 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
140 return false;
141
142 val &= GET_MODE_MASK (int_mode);
143 return val == (HOST_WIDE_INT_1U << (width - 1));
144}
145
146/* Test whether the most significant bit of mode MODE is set in VAL.
147 Returns false if the precision of MODE is too large to handle. */
148bool
149val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
150{
151 unsigned int width;
152
153 scalar_int_mode int_mode;
154 if (!is_int_mode (mode, &int_mode))
155 return false;
156
157 width = GET_MODE_PRECISION (int_mode);
158 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
159 return false;
160
161 val &= HOST_WIDE_INT_1U << (width - 1);
162 return val != 0;
163}
164
165/* Test whether the most significant bit of mode MODE is clear in VAL.
166 Returns false if the precision of MODE is too large to handle. */
167bool
168val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
169{
170 unsigned int width;
171
172 scalar_int_mode int_mode;
173 if (!is_int_mode (mode, &int_mode))
174 return false;
175
176 width = GET_MODE_PRECISION (int_mode);
177 if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
178 return false;
179
180 val &= HOST_WIDE_INT_1U << (width - 1);
181 return val == 0;
182}
183
184/* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
186
187rtx
188simplify_gen_binary (enum rtx_code code, machine_mode mode, rtx op0,
189 rtx op1)
190{
191 rtx tem;
192
193 /* If this simplifies, do it. */
194 tem = simplify_binary_operation (code, mode, op0, op1);
195 if (tem)
196 return tem;
197
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0, op1))
201 std::swap (op0, op1);
202
203 return gen_rtx_fmt_ee (code, mode, op0, op1);
204}
205
206/* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
208rtx
209avoid_constant_pool_reference (rtx x)
210{
211 rtx c, tmp, addr;
212 machine_mode cmode;
213 HOST_WIDE_INT offset = 0;
214
215 switch (GET_CODE (x))
216 {
217 case MEM:
218 break;
219
220 case FLOAT_EXTEND:
221 /* Handle float extensions of constant pool references. */
222 tmp = XEXP (x, 0);
223 c = avoid_constant_pool_reference (tmp);
224 if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
226 GET_MODE (x));
227 return x;
228
229 default:
230 return x;
231 }
232
233 if (GET_MODE (x) == BLKmode)
234 return x;
235
236 addr = XEXP (x, 0);
237
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr = targetm.delegitimize_address (addr);
240
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr) == CONST
243 && GET_CODE (XEXP (addr, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr, 0), 1)))
245 {
246 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
247 addr = XEXP (XEXP (addr, 0), 0);
248 }
249
250 if (GET_CODE (addr) == LO_SUM)
251 addr = XEXP (addr, 1);
252
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr))
257 {
258 c = get_pool_constant (addr);
259 cmode = get_pool_mode (addr);
260
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if (offset == 0 && cmode == GET_MODE (x))
265 return c;
266 else if (offset >= 0 && offset < GET_MODE_SIZE (cmode))
267 {
268 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
269 if (tem && CONSTANT_P (tem))
270 return tem;
271 }
272 }
273
274 return x;
275}
276
277/* Simplify a MEM based on its attributes. This is the default
278 delegitimize_address target hook, and it's recommended that every
279 overrider call it. */
280
281rtx
282delegitimize_mem_from_attrs (rtx x)
283{
284 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
285 use their base addresses as equivalent. */
286 if (MEM_P (x)
287 && MEM_EXPR (x)
288 && MEM_OFFSET_KNOWN_P (x))
289 {
290 tree decl = MEM_EXPR (x);
291 machine_mode mode = GET_MODE (x);
292 HOST_WIDE_INT offset = 0;
293
294 switch (TREE_CODE (decl))
295 {
296 default:
297 decl = NULL;
298 break;
299
300 case VAR_DECL:
301 break;
302
303 case ARRAY_REF:
304 case ARRAY_RANGE_REF:
305 case COMPONENT_REF:
306 case BIT_FIELD_REF:
307 case REALPART_EXPR:
308 case IMAGPART_EXPR:
309 case VIEW_CONVERT_EXPR:
310 {
311 HOST_WIDE_INT bitsize, bitpos;
312 tree toffset;
313 int unsignedp, reversep, volatilep = 0;
314
315 decl
316 = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
317 &unsignedp, &reversep, &volatilep);
318 if (bitsize != GET_MODE_BITSIZE (mode)
319 || (bitpos % BITS_PER_UNIT)
320 || (toffset && !tree_fits_shwi_p (toffset)))
321 decl = NULL;
322 else
323 {
324 offset += bitpos / BITS_PER_UNIT;
325 if (toffset)
326 offset += tree_to_shwi (toffset);
327 }
328 break;
329 }
330 }
331
332 if (decl
333 && mode == GET_MODE (x)
334 && VAR_P (decl)
335 && (TREE_STATIC (decl)
336 || DECL_THREAD_LOCAL_P (decl))
337 && DECL_RTL_SET_P (decl)
338 && MEM_P (DECL_RTL (decl)))
339 {
340 rtx newx;
341
342 offset += MEM_OFFSET (x);
343
344 newx = DECL_RTL (decl);
345
346 if (MEM_P (newx))
347 {
348 rtx n = XEXP (newx, 0), o = XEXP (x, 0);
349
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
356 if (!((offset == 0
357 || (GET_CODE (o) == PLUS
358 && GET_CODE (XEXP (o, 1)) == CONST_INT
359 && (offset == INTVAL (XEXP (o, 1))
360 || (GET_CODE (n) == PLUS
361 && GET_CODE (XEXP (n, 1)) == CONST_INT
362 && (INTVAL (XEXP (n, 1)) + offset
363 == INTVAL (XEXP (o, 1)))
364 && (n = XEXP (n, 0))))
365 && (o = XEXP (o, 0))))
366 && rtx_equal_p (o, n)))
367 x = adjust_address_nv (newx, mode, offset);
368 }
369 else if (GET_MODE (x) == GET_MODE (newx)
370 && offset == 0)
371 x = newx;
372 }
373 }
374
375 return x;
376}
377
378/* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
380
381rtx
382simplify_gen_unary (enum rtx_code code, machine_mode mode, rtx op,
383 machine_mode op_mode)
384{
385 rtx tem;
386
387 /* If this simplifies, use it. */
388 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
389 return tem;
390
391 return gen_rtx_fmt_e (code, mode, op);
392}
393
394/* Likewise for ternary operations. */
395
396rtx
397simplify_gen_ternary (enum rtx_code code, machine_mode mode,
398 machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
399{
400 rtx tem;
401
402 /* If this simplifies, use it. */
403 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
404 op0, op1, op2)))
405 return tem;
406
407 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
408}
409
410/* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
412
413rtx
414simplify_gen_relational (enum rtx_code code, machine_mode mode,
415 machine_mode cmp_mode, rtx op0, rtx op1)
416{
417 rtx tem;
418
419 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
420 op0, op1)))
421 return tem;
422
423 return gen_rtx_fmt_ee (code, mode, op0, op1);
424}
425
426/* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
429 result. */
430
431rtx
432simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
433 rtx (*fn) (rtx, const_rtx, void *), void *data)
434{
435 enum rtx_code code = GET_CODE (x);
436 machine_mode mode = GET_MODE (x);
437 machine_mode op_mode;
438 const char *fmt;
439 rtx op0, op1, op2, newx, op;
440 rtvec vec, newvec;
441 int i, j;
442
443 if (__builtin_expect (fn != NULL, 0))
444 {
445 newx = fn (x, old_rtx, data);
446 if (newx)
447 return newx;
448 }
449 else if (rtx_equal_p (x, old_rtx))
450 return copy_rtx ((rtx) data);
451
452 switch (GET_RTX_CLASS (code))
453 {
454 case RTX_UNARY:
455 op0 = XEXP (x, 0);
456 op_mode = GET_MODE (op0);
457 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
458 if (op0 == XEXP (x, 0))
459 return x;
460 return simplify_gen_unary (code, mode, op0, op_mode);
461
462 case RTX_BIN_ARITH:
463 case RTX_COMM_ARITH:
464 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
465 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
466 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
467 return x;
468 return simplify_gen_binary (code, mode, op0, op1);
469
470 case RTX_COMPARE:
471 case RTX_COMM_COMPARE:
472 op0 = XEXP (x, 0);
473 op1 = XEXP (x, 1);
474 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
475 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
476 op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
477 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
478 return x;
479 return simplify_gen_relational (code, mode, op_mode, op0, op1);
480
481 case RTX_TERNARY:
482 case RTX_BITFIELD_OPS:
483 op0 = XEXP (x, 0);
484 op_mode = GET_MODE (op0);
485 op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
486 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
487 op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
488 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
489 return x;
490 if (op_mode == VOIDmode)
491 op_mode = GET_MODE (op0);
492 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
493
494 case RTX_EXTRA:
495 if (code == SUBREG)
496 {
497 op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
498 if (op0 == SUBREG_REG (x))
499 return x;
500 op0 = simplify_gen_subreg (GET_MODE (x), op0,
501 GET_MODE (SUBREG_REG (x)),
502 SUBREG_BYTE (x));
503 return op0 ? op0 : x;
504 }
505 break;
506
507 case RTX_OBJ:
508 if (code == MEM)
509 {
510 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
511 if (op0 == XEXP (x, 0))
512 return x;
513 return replace_equiv_address_nv (x, op0);
514 }
515 else if (code == LO_SUM)
516 {
517 op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
518 op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
519
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0) == HIGH)
522 {
523 rtx base0, base1, offset0, offset1;
524 split_const (XEXP (op0, 0), &base0, &offset0);
525 split_const (op1, &base1, &offset1);
526 if (rtx_equal_p (base0, base1))
527 return op1;
528 }
529
530 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
531 return x;
532 return gen_rtx_LO_SUM (mode, op0, op1);
533 }
534 break;
535
536 default:
537 break;
538 }
539
540 newx = x;
541 fmt = GET_RTX_FORMAT (code);
542 for (i = 0; fmt[i]; i++)
543 switch (fmt[i])
544 {
545 case 'E':
546 vec = XVEC (x, i);
547 newvec = XVEC (newx, i);
548 for (j = 0; j < GET_NUM_ELEM (vec); j++)
549 {
550 op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
551 old_rtx, fn, data);
552 if (op != RTVEC_ELT (vec, j))
553 {
554 if (newvec == vec)
555 {
556 newvec = shallow_copy_rtvec (vec);
557 if (x == newx)
558 newx = shallow_copy_rtx (x);
559 XVEC (newx, i) = newvec;
560 }
561 RTVEC_ELT (newvec, j) = op;
562 }
563 }
564 break;
565
566 case 'e':
567 if (XEXP (x, i))
568 {
569 op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
570 if (op != XEXP (x, i))
571 {
572 if (x == newx)
573 newx = shallow_copy_rtx (x);
574 XEXP (newx, i) = op;
575 }
576 }
577 break;
578 }
579 return newx;
580}
581
582/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
584
585rtx
586simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
587{
588 return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
589}
590
591/* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
593
594 RTL provides two ways of truncating a value:
595
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
599 an rvalue.
600
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
606
607 2. a TRUNCATE. This form handles both scalar and compound integers.
608
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
613
614 simplify_gen_unary (TRUNCATE, ...)
615
616 and leave simplify_unary_operation to work out which representation
617 should be used.
618
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
623 truncation of:
624
625 (and:DI X Y)
626
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
631
632 (and:DI (reg:DI X) (const_int 63))
633
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
637
638static rtx
639simplify_truncation (machine_mode mode, rtx op,
640 machine_mode op_mode)
641{
642 unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
643 unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
644 scalar_int_mode int_mode, int_op_mode, subreg_mode;
645
646 gcc_assert (precision <= op_precision);
647
648 /* Optimize truncations of zero and sign extended values. */
649 if (GET_CODE (op) == ZERO_EXTEND
650 || GET_CODE (op) == SIGN_EXTEND)
651 {
652 /* There are three possibilities. If MODE is the same as the
653 origmode, we can omit both the extension and the subreg.
654 If MODE is not larger than the origmode, we can apply the
655 truncation without the extension. Finally, if the outermode
656 is larger than the origmode, we can just extend to the appropriate
657 mode. */
658 machine_mode origmode = GET_MODE (XEXP (op, 0));
659 if (mode == origmode)
660 return XEXP (op, 0);
661 else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
662 return simplify_gen_unary (TRUNCATE, mode,
663 XEXP (op, 0), origmode);
664 else
665 return simplify_gen_unary (GET_CODE (op), mode,
666 XEXP (op, 0), origmode);
667 }
668
669 /* If the machine can perform operations in the truncated mode, distribute
670 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
671 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
672 if (1
673 && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
674 && (GET_CODE (op) == PLUS
675 || GET_CODE (op) == MINUS
676 || GET_CODE (op) == MULT))
677 {
678 rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
679 if (op0)
680 {
681 rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
682 if (op1)
683 return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
684 }
685 }
686
687 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
688 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
689 the outer subreg is effectively a truncation to the original mode. */
690 if ((GET_CODE (op) == LSHIFTRT
691 || GET_CODE (op) == ASHIFTRT)
692 /* Ensure that OP_MODE is at least twice as wide as MODE
693 to avoid the possibility that an outer LSHIFTRT shifts by more
694 than the sign extension's sign_bit_copies and introduces zeros
695 into the high bits of the result. */
696 && 2 * precision <= op_precision
697 && CONST_INT_P (XEXP (op, 1))
698 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
699 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
700 && UINTVAL (XEXP (op, 1)) < precision)
701 return simplify_gen_binary (ASHIFTRT, mode,
702 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
703
704 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
705 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
706 the outer subreg is effectively a truncation to the original mode. */
707 if ((GET_CODE (op) == LSHIFTRT
708 || GET_CODE (op) == ASHIFTRT)
709 && CONST_INT_P (XEXP (op, 1))
710 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
711 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
712 && UINTVAL (XEXP (op, 1)) < precision)
713 return simplify_gen_binary (LSHIFTRT, mode,
714 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
715
716 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
717 to (ashift:QI (x:QI) C), where C is a suitable small constant and
718 the outer subreg is effectively a truncation to the original mode. */
719 if (GET_CODE (op) == ASHIFT
720 && CONST_INT_P (XEXP (op, 1))
721 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
722 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
723 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
724 && UINTVAL (XEXP (op, 1)) < precision)
725 return simplify_gen_binary (ASHIFT, mode,
726 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
727
728 /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
729 (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
730 and C2. */
731 if (GET_CODE (op) == AND
732 && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
733 || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
734 && CONST_INT_P (XEXP (XEXP (op, 0), 1))
735 && CONST_INT_P (XEXP (op, 1)))
736 {
737 rtx op0 = (XEXP (XEXP (op, 0), 0));
738 rtx shift_op = XEXP (XEXP (op, 0), 1);
739 rtx mask_op = XEXP (op, 1);
740 unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
741 unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
742
743 if (shift < precision
744 /* If doing this transform works for an X with all bits set,
745 it works for any X. */
746 && ((GET_MODE_MASK (mode) >> shift) & mask)
747 == ((GET_MODE_MASK (op_mode) >> shift) & mask)
748 && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
749 && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
750 {
751 mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
752 return simplify_gen_binary (AND, mode, op0, mask_op);
753 }
754 }
755
756 /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
757 (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
758 changing len. */
759 if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
760 && REG_P (XEXP (op, 0))
761 && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
762 && CONST_INT_P (XEXP (op, 1))
763 && CONST_INT_P (XEXP (op, 2)))
764 {
765 rtx op0 = XEXP (op, 0);
766 unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
767 unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
768 if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
769 {
770 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
771 if (op0)
772 {
773 pos -= op_precision - precision;
774 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
775 XEXP (op, 1), GEN_INT (pos));
776 }
777 }
778 else if (!BITS_BIG_ENDIAN && precision >= len + pos)
779 {
780 op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
781 if (op0)
782 return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
783 XEXP (op, 1), XEXP (op, 2));
784 }
785 }
786
787 /* Recognize a word extraction from a multi-word subreg. */
788 if ((GET_CODE (op) == LSHIFTRT
789 || GET_CODE (op) == ASHIFTRT)
790 && SCALAR_INT_MODE_P (mode)
791 && SCALAR_INT_MODE_P (op_mode)
792 && precision >= BITS_PER_WORD
793 && 2 * precision <= op_precision
794 && CONST_INT_P (XEXP (op, 1))
795 && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
796 && UINTVAL (XEXP (op, 1)) < op_precision)
797 {
798 int byte = subreg_lowpart_offset (mode, op_mode);
799 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
801 (WORDS_BIG_ENDIAN
802 ? byte - shifted_bytes
803 : byte + shifted_bytes));
804 }
805
806 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
807 and try replacing the TRUNCATE and shift with it. Don't do this
808 if the MEM has a mode-dependent address. */
809 if ((GET_CODE (op) == LSHIFTRT
810 || GET_CODE (op) == ASHIFTRT)
811 && is_a <scalar_int_mode> (mode, &int_mode)
812 && is_a <scalar_int_mode> (op_mode, &int_op_mode)
813 && MEM_P (XEXP (op, 0))
814 && CONST_INT_P (XEXP (op, 1))
815 && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
816 && INTVAL (XEXP (op, 1)) > 0
817 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
818 && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
819 MEM_ADDR_SPACE (XEXP (op, 0)))
820 && ! MEM_VOLATILE_P (XEXP (op, 0))
821 && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
822 || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
823 {
824 int byte = subreg_lowpart_offset (int_mode, int_op_mode);
825 int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
826 return adjust_address_nv (XEXP (op, 0), int_mode,
827 (WORDS_BIG_ENDIAN
828 ? byte - shifted_bytes
829 : byte + shifted_bytes));
830 }
831
832 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
833 (OP:SI foo:SI) if OP is NEG or ABS. */
834 if ((GET_CODE (op) == ABS
835 || GET_CODE (op) == NEG)
836 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
837 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
838 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
839 return simplify_gen_unary (GET_CODE (op), mode,
840 XEXP (XEXP (op, 0), 0), mode);
841
842 /* (truncate:A (subreg:B (truncate:C X) 0)) is
843 (truncate:A X). */
844 if (GET_CODE (op) == SUBREG
845 && is_a <scalar_int_mode> (mode, &int_mode)
846 && SCALAR_INT_MODE_P (op_mode)
847 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
848 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
849 && subreg_lowpart_p (op))
850 {
851 rtx inner = XEXP (SUBREG_REG (op), 0);
852 if (GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (subreg_mode))
853 return simplify_gen_unary (TRUNCATE, int_mode, inner,
854 GET_MODE (inner));
855 else
856 /* If subreg above is paradoxical and C is narrower
857 than A, return (subreg:A (truncate:C X) 0). */
858 return simplify_gen_subreg (int_mode, SUBREG_REG (op), subreg_mode, 0);
859 }
860
861 /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 if (GET_CODE (op) == TRUNCATE)
863 return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 GET_MODE (XEXP (op, 0)));
865
866 /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 in mode A. */
868 if (GET_CODE (op) == IOR
869 && SCALAR_INT_MODE_P (mode)
870 && SCALAR_INT_MODE_P (op_mode)
871 && CONST_INT_P (XEXP (op, 1))
872 && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 return constm1_rtx;
874
875 return NULL_RTX;
876}
877
878/* Try to simplify a unary operation CODE whose output mode is to be
879 MODE with input operand OP whose mode was originally OP_MODE.
880 Return zero if no simplification can be made. */
881rtx
882simplify_unary_operation (enum rtx_code code, machine_mode mode,
883 rtx op, machine_mode op_mode)
884{
885 rtx trueop, tem;
886
887 trueop = avoid_constant_pool_reference (op);
888
889 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 if (tem)
891 return tem;
892
893 return simplify_unary_operation_1 (code, mode, op);
894}
895
896/* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 to be exact. */
898
899static bool
900exact_int_to_float_conversion_p (const_rtx op)
901{
902 int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
903 machine_mode op0_mode = GET_MODE (XEXP (op, 0));
904 /* Constants shouldn't reach here. */
905 gcc_assert (op0_mode != VOIDmode);
906 int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
907 int in_bits = in_prec;
908 if (HWI_COMPUTABLE_MODE_P (op0_mode))
909 {
910 unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
911 if (GET_CODE (op) == FLOAT)
912 in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
913 else if (GET_CODE (op) == UNSIGNED_FLOAT)
914 in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
915 else
916 gcc_unreachable ();
917 in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
918 }
919 return in_bits <= out_bits;
920}
921
922/* Perform some simplifications we can do even if the operands
923 aren't constant. */
924static rtx
925simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
926{
927 enum rtx_code reversed;
928 rtx temp, elt, base, step;
929 scalar_int_mode inner, int_mode, op_mode, op0_mode;
930
931 switch (code)
932 {
933 case NOT:
934 /* (not (not X)) == X. */
935 if (GET_CODE (op) == NOT)
936 return XEXP (op, 0);
937
938 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
939 comparison is all ones. */
940 if (COMPARISON_P (op)
941 && (mode == BImode || STORE_FLAG_VALUE == -1)
942 && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
943 return simplify_gen_relational (reversed, mode, VOIDmode,
944 XEXP (op, 0), XEXP (op, 1));
945
946 /* (not (plus X -1)) can become (neg X). */
947 if (GET_CODE (op) == PLUS
948 && XEXP (op, 1) == constm1_rtx)
949 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
950
951 /* Similarly, (not (neg X)) is (plus X -1). Only do this for
952 modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
953 and MODE_VECTOR_INT. */
954 if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
955 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
956 CONSTM1_RTX (mode));
957
958 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
959 if (GET_CODE (op) == XOR
960 && CONST_INT_P (XEXP (op, 1))
961 && (temp = simplify_unary_operation (NOT, mode,
962 XEXP (op, 1), mode)) != 0)
963 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
964
965 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
966 if (GET_CODE (op) == PLUS
967 && CONST_INT_P (XEXP (op, 1))
968 && mode_signbit_p (mode, XEXP (op, 1))
969 && (temp = simplify_unary_operation (NOT, mode,
970 XEXP (op, 1), mode)) != 0)
971 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
972
973
974 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
975 operands other than 1, but that is not valid. We could do a
976 similar simplification for (not (lshiftrt C X)) where C is
977 just the sign bit, but this doesn't seem common enough to
978 bother with. */
979 if (GET_CODE (op) == ASHIFT
980 && XEXP (op, 0) == const1_rtx)
981 {
982 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
983 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
984 }
985
986 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
987 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
988 so we can perform the above simplification. */
989 if (STORE_FLAG_VALUE == -1
990 && is_a <scalar_int_mode> (mode, &int_mode)
991 && GET_CODE (op) == ASHIFTRT
992 && CONST_INT_P (XEXP (op, 1))
993 && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
994 return simplify_gen_relational (GE, int_mode, VOIDmode,
995 XEXP (op, 0), const0_rtx);
996
997
998 if (partial_subreg_p (op)
999 && subreg_lowpart_p (op)
1000 && GET_CODE (SUBREG_REG (op)) == ASHIFT
1001 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1002 {
1003 machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1004 rtx x;
1005
1006 x = gen_rtx_ROTATE (inner_mode,
1007 simplify_gen_unary (NOT, inner_mode, const1_rtx,
1008 inner_mode),
1009 XEXP (SUBREG_REG (op), 1));
1010 temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1011 if (temp)
1012 return temp;
1013 }
1014
1015 /* Apply De Morgan's laws to reduce number of patterns for machines
1016 with negating logical insns (and-not, nand, etc.). If result has
1017 only one NOT, put it first, since that is how the patterns are
1018 coded. */
1019 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1020 {
1021 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1022 machine_mode op_mode;
1023
1024 op_mode = GET_MODE (in1);
1025 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1026
1027 op_mode = GET_MODE (in2);
1028 if (op_mode == VOIDmode)
1029 op_mode = mode;
1030 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1031
1032 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1033 std::swap (in1, in2);
1034
1035 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1036 mode, in1, in2);
1037 }
1038
1039 /* (not (bswap x)) -> (bswap (not x)). */
1040 if (GET_CODE (op) == BSWAP)
1041 {
1042 rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1043 return simplify_gen_unary (BSWAP, mode, x, mode);
1044 }
1045 break;
1046
1047 case NEG:
1048 /* (neg (neg X)) == X. */
1049 if (GET_CODE (op) == NEG)
1050 return XEXP (op, 0);
1051
1052 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1053 If comparison is not reversible use
1054 x ? y : (neg y). */
1055 if (GET_CODE (op) == IF_THEN_ELSE)
1056 {
1057 rtx cond = XEXP (op, 0);
1058 rtx true_rtx = XEXP (op, 1);
1059 rtx false_rtx = XEXP (op, 2);
1060
1061 if ((GET_CODE (true_rtx) == NEG
1062 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1063 || (GET_CODE (false_rtx) == NEG
1064 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1065 {
1066 if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1067 temp = reversed_comparison (cond, mode);
1068 else
1069 {
1070 temp = cond;
1071 std::swap (true_rtx, false_rtx);
1072 }
1073 return simplify_gen_ternary (IF_THEN_ELSE, mode,
1074 mode, temp, true_rtx, false_rtx);
1075 }
1076 }
1077
1078 /* (neg (plus X 1)) can become (not X). */
1079 if (GET_CODE (op) == PLUS
1080 && XEXP (op, 1) == const1_rtx)
1081 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1082
1083 /* Similarly, (neg (not X)) is (plus X 1). */
1084 if (GET_CODE (op) == NOT)
1085 return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1086 CONST1_RTX (mode));
1087
1088 /* (neg (minus X Y)) can become (minus Y X). This transformation
1089 isn't safe for modes with signed zeros, since if X and Y are
1090 both +0, (minus Y X) is the same as (minus X Y). If the
1091 rounding mode is towards +infinity (or -infinity) then the two
1092 expressions will be rounded differently. */
1093 if (GET_CODE (op) == MINUS
1094 && !HONOR_SIGNED_ZEROS (mode)
1095 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1096 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1097
1098 if (GET_CODE (op) == PLUS
1099 && !HONOR_SIGNED_ZEROS (mode)
1100 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1101 {
1102 /* (neg (plus A C)) is simplified to (minus -C A). */
1103 if (CONST_SCALAR_INT_P (XEXP (op, 1))
1104 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1105 {
1106 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1107 if (temp)
1108 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1109 }
1110
1111 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1112 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1113 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1114 }
1115
1116 /* (neg (mult A B)) becomes (mult A (neg B)).
1117 This works even for floating-point values. */
1118 if (GET_CODE (op) == MULT
1119 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1120 {
1121 temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1122 return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1123 }
1124
1125 /* NEG commutes with ASHIFT since it is multiplication. Only do
1126 this if we can then eliminate the NEG (e.g., if the operand
1127 is a constant). */
1128 if (GET_CODE (op) == ASHIFT)
1129 {
1130 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1131 if (temp)
1132 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1133 }
1134
1135 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1136 C is equal to the width of MODE minus 1. */
1137 if (GET_CODE (op) == ASHIFTRT
1138 && CONST_INT_P (XEXP (op, 1))
1139 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1140 return simplify_gen_binary (LSHIFTRT, mode,
1141 XEXP (op, 0), XEXP (op, 1));
1142
1143 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1144 C is equal to the width of MODE minus 1. */
1145 if (GET_CODE (op) == LSHIFTRT
1146 && CONST_INT_P (XEXP (op, 1))
1147 && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1148 return simplify_gen_binary (ASHIFTRT, mode,
1149 XEXP (op, 0), XEXP (op, 1));
1150
1151 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1152 if (GET_CODE (op) == XOR
1153 && XEXP (op, 1) == const1_rtx
1154 && nonzero_bits (XEXP (op, 0), mode) == 1)
1155 return plus_constant (mode, XEXP (op, 0), -1);
1156
1157 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1158 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1159 if (GET_CODE (op) == LT
1160 && XEXP (op, 1) == const0_rtx
1161 && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1162 {
1163 int_mode = as_a <scalar_int_mode> (mode);
1164 int isize = GET_MODE_PRECISION (inner);
1165 if (STORE_FLAG_VALUE == 1)
1166 {
1167 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1168 GEN_INT (isize - 1));
1169 if (int_mode == inner)
1170 return temp;
1171 if (GET_MODE_PRECISION (int_mode) > isize)
1172 return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1173 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1174 }
1175 else if (STORE_FLAG_VALUE == -1)
1176 {
1177 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1178 GEN_INT (isize - 1));
1179 if (int_mode == inner)
1180 return temp;
1181 if (GET_MODE_PRECISION (int_mode) > isize)
1182 return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1183 return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1184 }
1185 }
1186
1187 if (vec_series_p (op, &base, &step))
1188 {
1189 /* Only create a new series if we can simplify both parts. In other
1190 cases this isn't really a simplification, and it's not necessarily
1191 a win to replace a vector operation with a scalar operation. */
1192 scalar_mode inner_mode = GET_MODE_INNER (mode);
1193 base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1194 if (base)
1195 {
1196 step = simplify_unary_operation (NEG, inner_mode,
1197 step, inner_mode);
1198 if (step)
1199 return gen_vec_series (mode, base, step);
1200 }
1201 }
1202 break;
1203
1204 case TRUNCATE:
1205 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1206 with the umulXi3_highpart patterns. */
1207 if (GET_CODE (op) == LSHIFTRT
1208 && GET_CODE (XEXP (op, 0)) == MULT)
1209 break;
1210
1211 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1212 {
1213 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1214 {
1215 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1216 if (temp)
1217 return temp;
1218 }
1219 /* We can't handle truncation to a partial integer mode here
1220 because we don't know the real bitsize of the partial
1221 integer mode. */
1222 break;
1223 }
1224
1225 if (GET_MODE (op) != VOIDmode)
1226 {
1227 temp = simplify_truncation (mode, op, GET_MODE (op));
1228 if (temp)
1229 return temp;
1230 }
1231
1232 /* If we know that the value is already truncated, we can
1233 replace the TRUNCATE with a SUBREG. */
1234 if (GET_MODE_NUNITS (mode) == 1
1235 && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1236 || truncated_to_mode (mode, op)))
1237 {
1238 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1239 if (temp)
1240 return temp;
1241 }
1242
1243 /* A truncate of a comparison can be replaced with a subreg if
1244 STORE_FLAG_VALUE permits. This is like the previous test,
1245 but it works even if the comparison is done in a mode larger
1246 than HOST_BITS_PER_WIDE_INT. */
1247 if (HWI_COMPUTABLE_MODE_P (mode)
1248 && COMPARISON_P (op)
1249 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
1250 {
1251 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1252 if (temp)
1253 return temp;
1254 }
1255
1256 /* A truncate of a memory is just loading the low part of the memory
1257 if we are not changing the meaning of the address. */
1258 if (GET_CODE (op) == MEM
1259 && !VECTOR_MODE_P (mode)
1260 && !MEM_VOLATILE_P (op)
1261 && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1262 {
1263 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1264 if (temp)
1265 return temp;
1266 }
1267
1268 break;
1269
1270 case FLOAT_TRUNCATE:
1271 if (DECIMAL_FLOAT_MODE_P (mode))
1272 break;
1273
1274 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1275 if (GET_CODE (op) == FLOAT_EXTEND
1276 && GET_MODE (XEXP (op, 0)) == mode)
1277 return XEXP (op, 0);
1278
1279 /* (float_truncate:SF (float_truncate:DF foo:XF))
1280 = (float_truncate:SF foo:XF).
1281 This may eliminate double rounding, so it is unsafe.
1282
1283 (float_truncate:SF (float_extend:XF foo:DF))
1284 = (float_truncate:SF foo:DF).
1285
1286 (float_truncate:DF (float_extend:XF foo:SF))
1287 = (float_extend:DF foo:SF). */
1288 if ((GET_CODE (op) == FLOAT_TRUNCATE
1289 && flag_unsafe_math_optimizations)
1290 || GET_CODE (op) == FLOAT_EXTEND)
1291 return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1292 > GET_MODE_UNIT_SIZE (mode)
1293 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1294 mode,
1295 XEXP (op, 0), mode);
1296
1297 /* (float_truncate (float x)) is (float x) */
1298 if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1299 && (flag_unsafe_math_optimizations
1300 || exact_int_to_float_conversion_p (op)))
1301 return simplify_gen_unary (GET_CODE (op), mode,
1302 XEXP (op, 0),
1303 GET_MODE (XEXP (op, 0)));
1304
1305 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1306 (OP:SF foo:SF) if OP is NEG or ABS. */
1307 if ((GET_CODE (op) == ABS
1308 || GET_CODE (op) == NEG)
1309 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1310 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1311 return simplify_gen_unary (GET_CODE (op), mode,
1312 XEXP (XEXP (op, 0), 0), mode);
1313
1314 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1315 is (float_truncate:SF x). */
1316 if (GET_CODE (op) == SUBREG
1317 && subreg_lowpart_p (op)
1318 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1319 return SUBREG_REG (op);
1320 break;
1321
1322 case FLOAT_EXTEND:
1323 if (DECIMAL_FLOAT_MODE_P (mode))
1324 break;
1325
1326 /* (float_extend (float_extend x)) is (float_extend x)
1327
1328 (float_extend (float x)) is (float x) assuming that double
1329 rounding can't happen.
1330 */
1331 if (GET_CODE (op) == FLOAT_EXTEND
1332 || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1333 && exact_int_to_float_conversion_p (op)))
1334 return simplify_gen_unary (GET_CODE (op), mode,
1335 XEXP (op, 0),
1336 GET_MODE (XEXP (op, 0)));
1337
1338 break;
1339
1340 case ABS:
1341 /* (abs (neg <foo>)) -> (abs <foo>) */
1342 if (GET_CODE (op) == NEG)
1343 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1344 GET_MODE (XEXP (op, 0)));
1345
1346 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1347 do nothing. */
1348 if (GET_MODE (op) == VOIDmode)
1349 break;
1350
1351 /* If operand is something known to be positive, ignore the ABS. */
1352 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
1353 || val_signbit_known_clear_p (GET_MODE (op),
1354 nonzero_bits (op, GET_MODE (op))))
1355 return op;
1356
1357 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1358 if (is_a <scalar_int_mode> (mode, &int_mode)
1359 && (num_sign_bit_copies (op, int_mode)
1360 == GET_MODE_PRECISION (int_mode)))
1361 return gen_rtx_NEG (int_mode, op);
1362
1363 break;
1364
1365 case FFS:
1366 /* (ffs (*_extend <X>)) = (ffs <X>) */
1367 if (GET_CODE (op) == SIGN_EXTEND
1368 || GET_CODE (op) == ZERO_EXTEND)
1369 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
1370 GET_MODE (XEXP (op, 0)));
1371 break;
1372
1373 case POPCOUNT:
1374 switch (GET_CODE (op))
1375 {
1376 case BSWAP:
1377 case ZERO_EXTEND:
1378 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1379 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1380 GET_MODE (XEXP (op, 0)));
1381
1382 case ROTATE:
1383 case ROTATERT:
1384 /* Rotations don't affect popcount. */
1385 if (!side_effects_p (XEXP (op, 1)))
1386 return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1387 GET_MODE (XEXP (op, 0)));
1388 break;
1389
1390 default:
1391 break;
1392 }
1393 break;
1394
1395 case PARITY:
1396 switch (GET_CODE (op))
1397 {
1398 case NOT:
1399 case BSWAP:
1400 case ZERO_EXTEND:
1401 case SIGN_EXTEND:
1402 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1403 GET_MODE (XEXP (op, 0)));
1404
1405 case ROTATE:
1406 case ROTATERT:
1407 /* Rotations don't affect parity. */
1408 if (!side_effects_p (XEXP (op, 1)))
1409 return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1410 GET_MODE (XEXP (op, 0)));
1411 break;
1412
1413 default:
1414 break;
1415 }
1416 break;
1417
1418 case BSWAP:
1419 /* (bswap (bswap x)) -> x. */
1420 if (GET_CODE (op) == BSWAP)
1421 return XEXP (op, 0);
1422 break;
1423
1424 case FLOAT:
1425 /* (float (sign_extend <X>)) = (float <X>). */
1426 if (GET_CODE (op) == SIGN_EXTEND)
1427 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1428 GET_MODE (XEXP (op, 0)));
1429 break;
1430
1431 case SIGN_EXTEND:
1432 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1433 becomes just the MINUS if its mode is MODE. This allows
1434 folding switch statements on machines using casesi (such as
1435 the VAX). */
1436 if (GET_CODE (op) == TRUNCATE
1437 && GET_MODE (XEXP (op, 0)) == mode
1438 && GET_CODE (XEXP (op, 0)) == MINUS
1439 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1440 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1441 return XEXP (op, 0);
1442
1443 /* Extending a widening multiplication should be canonicalized to
1444 a wider widening multiplication. */
1445 if (GET_CODE (op) == MULT)
1446 {
1447 rtx lhs = XEXP (op, 0);
1448 rtx rhs = XEXP (op, 1);
1449 enum rtx_code lcode = GET_CODE (lhs);
1450 enum rtx_code rcode = GET_CODE (rhs);
1451
1452 /* Widening multiplies usually extend both operands, but sometimes
1453 they use a shift to extract a portion of a register. */
1454 if ((lcode == SIGN_EXTEND
1455 || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1456 && (rcode == SIGN_EXTEND
1457 || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1458 {
1459 machine_mode lmode = GET_MODE (lhs);
1460 machine_mode rmode = GET_MODE (rhs);
1461 int bits;
1462
1463 if (lcode == ASHIFTRT)
1464 /* Number of bits not shifted off the end. */
1465 bits = (GET_MODE_UNIT_PRECISION (lmode)
1466 - INTVAL (XEXP (lhs, 1)));
1467 else /* lcode == SIGN_EXTEND */
1468 /* Size of inner mode. */
1469 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1470
1471 if (rcode == ASHIFTRT)
1472 bits += (GET_MODE_UNIT_PRECISION (rmode)
1473 - INTVAL (XEXP (rhs, 1)));
1474 else /* rcode == SIGN_EXTEND */
1475 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1476
1477 /* We can only widen multiplies if the result is mathematiclly
1478 equivalent. I.e. if overflow was impossible. */
1479 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1480 return simplify_gen_binary
1481 (MULT, mode,
1482 simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1483 simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1484 }
1485 }
1486
1487 /* Check for a sign extension of a subreg of a promoted
1488 variable, where the promotion is sign-extended, and the
1489 target mode is the same as the variable's promotion. */
1490 if (GET_CODE (op) == SUBREG
1491 && SUBREG_PROMOTED_VAR_P (op)
1492 && SUBREG_PROMOTED_SIGNED_P (op)
1493 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1494 {
1495 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1496 if (temp)
1497 return temp;
1498 }
1499
1500 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1501 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1502 if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1503 {
1504 gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1505 > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1506 return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1507 GET_MODE (XEXP (op, 0)));
1508 }
1509
1510 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1511 is (sign_extend:M (subreg:O <X>)) if there is mode with
1512 GET_MODE_BITSIZE (N) - I bits.
1513 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1514 is similarly (zero_extend:M (subreg:O <X>)). */
1515 if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1516 && GET_CODE (XEXP (op, 0)) == ASHIFT
1517 && is_a <scalar_int_mode> (mode, &int_mode)
1518 && CONST_INT_P (XEXP (op, 1))
1519 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1520 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1521 GET_MODE_BITSIZE (op_mode) > INTVAL (XEXP (op, 1))))
1522 {
1523 scalar_int_mode tmode;
1524 gcc_assert (GET_MODE_BITSIZE (int_mode)
1525 > GET_MODE_BITSIZE (op_mode));
1526 if (int_mode_for_size (GET_MODE_BITSIZE (op_mode)
1527 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1528 {
1529 rtx inner =
1530 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1531 if (inner)
1532 return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1533 ? SIGN_EXTEND : ZERO_EXTEND,
1534 int_mode, inner, tmode);
1535 }
1536 }
1537
1538 /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1539 (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1540 if (GET_CODE (op) == LSHIFTRT
1541 && CONST_INT_P (XEXP (op, 1))
1542 && XEXP (op, 1) != const0_rtx)
1543 return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1544
1545#if defined(POINTERS_EXTEND_UNSIGNED)
1546 /* As we do not know which address space the pointer is referring to,
1547 we can do this only if the target does not support different pointer
1548 or address modes depending on the address space. */
1549 if (target_default_pointer_address_modes_p ()
1550 && ! POINTERS_EXTEND_UNSIGNED
1551 && mode == Pmode && GET_MODE (op) == ptr_mode
1552 && (CONSTANT_P (op)
1553 || (GET_CODE (op) == SUBREG
1554 && REG_P (SUBREG_REG (op))
1555 && REG_POINTER (SUBREG_REG (op))
1556 && GET_MODE (SUBREG_REG (op)) == Pmode))
1557 && !targetm.have_ptr_extend ())
1558 {
1559 temp
1560 = convert_memory_address_addr_space_1 (Pmode, op,
1561 ADDR_SPACE_GENERIC, false,
1562 true);
1563 if (temp)
1564 return temp;
1565 }
1566#endif
1567 break;
1568
1569 case ZERO_EXTEND:
1570 /* Check for a zero extension of a subreg of a promoted
1571 variable, where the promotion is zero-extended, and the
1572 target mode is the same as the variable's promotion. */
1573 if (GET_CODE (op) == SUBREG
1574 && SUBREG_PROMOTED_VAR_P (op)
1575 && SUBREG_PROMOTED_UNSIGNED_P (op)
1576 && !paradoxical_subreg_p (mode, GET_MODE (SUBREG_REG (op))))
1577 {
1578 temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1579 if (temp)
1580 return temp;
1581 }
1582
1583 /* Extending a widening multiplication should be canonicalized to
1584 a wider widening multiplication. */
1585 if (GET_CODE (op) == MULT)
1586 {
1587 rtx lhs = XEXP (op, 0);
1588 rtx rhs = XEXP (op, 1);
1589 enum rtx_code lcode = GET_CODE (lhs);
1590 enum rtx_code rcode = GET_CODE (rhs);
1591
1592 /* Widening multiplies usually extend both operands, but sometimes
1593 they use a shift to extract a portion of a register. */
1594 if ((lcode == ZERO_EXTEND
1595 || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1596 && (rcode == ZERO_EXTEND
1597 || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1598 {
1599 machine_mode lmode = GET_MODE (lhs);
1600 machine_mode rmode = GET_MODE (rhs);
1601 int bits;
1602
1603 if (lcode == LSHIFTRT)
1604 /* Number of bits not shifted off the end. */
1605 bits = (GET_MODE_UNIT_PRECISION (lmode)
1606 - INTVAL (XEXP (lhs, 1)));
1607 else /* lcode == ZERO_EXTEND */
1608 /* Size of inner mode. */
1609 bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1610
1611 if (rcode == LSHIFTRT)
1612 bits += (GET_MODE_UNIT_PRECISION (rmode)
1613 - INTVAL (XEXP (rhs, 1)));
1614 else /* rcode == ZERO_EXTEND */
1615 bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1616
1617 /* We can only widen multiplies if the result is mathematiclly
1618 equivalent. I.e. if overflow was impossible. */
1619 if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1620 return simplify_gen_binary
1621 (MULT, mode,
1622 simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1623 simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1624 }
1625 }
1626
1627 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1628 if (GET_CODE (op) == ZERO_EXTEND)
1629 return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1630 GET_MODE (XEXP (op, 0)));
1631
1632 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1633 is (zero_extend:M (subreg:O <X>)) if there is mode with
1634 GET_MODE_PRECISION (N) - I bits. */
1635 if (GET_CODE (op) == LSHIFTRT
1636 && GET_CODE (XEXP (op, 0)) == ASHIFT
1637 && is_a <scalar_int_mode> (mode, &int_mode)
1638 && CONST_INT_P (XEXP (op, 1))
1639 && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1640 && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1641 GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1642 {
1643 scalar_int_mode tmode;
1644 if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1645 - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1646 {
1647 rtx inner =
1648 rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1649 if (inner)
1650 return simplify_gen_unary (ZERO_EXTEND, int_mode,
1651 inner, tmode);
1652 }
1653 }
1654
1655 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1656 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1657 of mode N. E.g.
1658 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1659 (and:SI (reg:SI) (const_int 63)). */
1660 if (partial_subreg_p (op)
1661 && is_a <scalar_int_mode> (mode, &int_mode)
1662 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1663 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1664 && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1665 && subreg_lowpart_p (op)
1666 && (nonzero_bits (SUBREG_REG (op), op0_mode)
1667 & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1668 {
1669 if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1670 return SUBREG_REG (op);
1671 return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1672 op0_mode);
1673 }
1674
1675#if defined(POINTERS_EXTEND_UNSIGNED)
1676 /* As we do not know which address space the pointer is referring to,
1677 we can do this only if the target does not support different pointer
1678 or address modes depending on the address space. */
1679 if (target_default_pointer_address_modes_p ()
1680 && POINTERS_EXTEND_UNSIGNED > 0
1681 && mode == Pmode && GET_MODE (op) == ptr_mode
1682 && (CONSTANT_P (op)
1683 || (GET_CODE (op) == SUBREG
1684 && REG_P (SUBREG_REG (op))
1685 && REG_POINTER (SUBREG_REG (op))
1686 && GET_MODE (SUBREG_REG (op)) == Pmode))
1687 && !targetm.have_ptr_extend ())
1688 {
1689 temp
1690 = convert_memory_address_addr_space_1 (Pmode, op,
1691 ADDR_SPACE_GENERIC, false,
1692 true);
1693 if (temp)
1694 return temp;
1695 }
1696#endif
1697 break;
1698
1699 default:
1700 break;
1701 }
1702
1703 if (VECTOR_MODE_P (mode) && vec_duplicate_p (op, &elt))
1704 {
1705 /* Try applying the operator to ELT and see if that simplifies.
1706 We can duplicate the result if so.
1707
1708 The reason we don't use simplify_gen_unary is that it isn't
1709 necessarily a win to convert things like:
1710
1711 (neg:V (vec_duplicate:V (reg:S R)))
1712
1713 to:
1714
1715 (vec_duplicate:V (neg:S (reg:S R)))
1716
1717 The first might be done entirely in vector registers while the
1718 second might need a move between register files. */
1719 temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1720 elt, GET_MODE_INNER (GET_MODE (op)));
1721 if (temp)
1722 return gen_vec_duplicate (mode, temp);
1723 }
1724
1725 return 0;
1726}
1727
1728/* Try to compute the value of a unary operation CODE whose output mode is to
1729 be MODE with input operand OP whose mode was originally OP_MODE.
1730 Return zero if the value cannot be computed. */
1731rtx
1732simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1733 rtx op, machine_mode op_mode)
1734{
1735 scalar_int_mode result_mode;
1736
1737 if (code == VEC_DUPLICATE)
1738 {
1739 gcc_assert (VECTOR_MODE_P (mode));
1740 if (GET_MODE (op) != VOIDmode)
1741 {
1742 if (!VECTOR_MODE_P (GET_MODE (op)))
1743 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1744 else
1745 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1746 (GET_MODE (op)));
1747 }
1748 if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1749 return gen_const_vec_duplicate (mode, op);
1750 if (GET_CODE (op) == CONST_VECTOR)
1751 {
1752 unsigned int n_elts = GET_MODE_NUNITS (mode);
1753 unsigned int in_n_elts = CONST_VECTOR_NUNITS (op);
1754 gcc_assert (in_n_elts < n_elts);
1755 gcc_assert ((n_elts % in_n_elts) == 0);
1756 rtvec v = rtvec_alloc (n_elts);
1757 for (unsigned i = 0; i < n_elts; i++)
1758 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
1759 return gen_rtx_CONST_VECTOR (mode, v);
1760 }
1761 }
1762
1763 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
1764 {
1765 int elt_size = GET_MODE_UNIT_SIZE (mode);
1766 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1767 machine_mode opmode = GET_MODE (op);
1768 int op_elt_size = GET_MODE_UNIT_SIZE (opmode);
1769 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
1770 rtvec v = rtvec_alloc (n_elts);
1771 unsigned int i;
1772
1773 gcc_assert (op_n_elts == n_elts);
1774 for (i = 0; i < n_elts; i++)
1775 {
1776 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1777 CONST_VECTOR_ELT (op, i),
1778 GET_MODE_INNER (opmode));
1779 if (!x)
1780 return 0;
1781 RTVEC_ELT (v, i) = x;
1782 }
1783 return gen_rtx_CONST_VECTOR (mode, v);
1784 }
1785
1786 /* The order of these tests is critical so that, for example, we don't
1787 check the wrong mode (input vs. output) for a conversion operation,
1788 such as FIX. At some point, this should be simplified. */
1789
1790 if (code == FLOAT && CONST_SCALAR_INT_P (op))
1791 {
1792 REAL_VALUE_TYPE d;
1793
1794 if (op_mode == VOIDmode)
1795 {
1796 /* CONST_INT have VOIDmode as the mode. We assume that all
1797 the bits of the constant are significant, though, this is
1798 a dangerous assumption as many times CONST_INTs are
1799 created and used with garbage in the bits outside of the
1800 precision of the implied mode of the const_int. */
1801 op_mode = MAX_MODE_INT;
1802 }
1803
1804 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
1805
1806 /* Avoid the folding if flag_signaling_nans is on and
1807 operand is a signaling NaN. */
1808 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1809 return 0;
1810
1811 d = real_value_truncate (mode, d);
1812 return const_double_from_real_value (d, mode);
1813 }
1814 else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
1815 {
1816 REAL_VALUE_TYPE d;
1817
1818 if (op_mode == VOIDmode)
1819 {
1820 /* CONST_INT have VOIDmode as the mode. We assume that all
1821 the bits of the constant are significant, though, this is
1822 a dangerous assumption as many times CONST_INTs are
1823 created and used with garbage in the bits outside of the
1824 precision of the implied mode of the const_int. */
1825 op_mode = MAX_MODE_INT;
1826 }
1827
1828 real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
1829
1830 /* Avoid the folding if flag_signaling_nans is on and
1831 operand is a signaling NaN. */
1832 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1833 return 0;
1834
1835 d = real_value_truncate (mode, d);
1836 return const_double_from_real_value (d, mode);
1837 }
1838
1839 if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
1840 {
1841 unsigned int width = GET_MODE_PRECISION (result_mode);
1842 wide_int result;
1843 scalar_int_mode imode = (op_mode == VOIDmode
1844 ? result_mode
1845 : as_a <scalar_int_mode> (op_mode));
1846 rtx_mode_t op0 = rtx_mode_t (op, imode);
1847 int int_value;
1848
1849#if TARGET_SUPPORTS_WIDE_INT == 0
1850 /* This assert keeps the simplification from producing a result
1851 that cannot be represented in a CONST_DOUBLE but a lot of
1852 upstream callers expect that this function never fails to
1853 simplify something and so you if you added this to the test
1854 above the code would die later anyway. If this assert
1855 happens, you just need to make the port support wide int. */
1856 gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
1857#endif
1858
1859 switch (code)
1860 {
1861 case NOT:
1862 result = wi::bit_not (op0);
1863 break;
1864
1865 case NEG:
1866 result = wi::neg (op0);
1867 break;
1868
1869 case ABS:
1870 result = wi::abs (op0);
1871 break;
1872
1873 case FFS:
1874 result = wi::shwi (wi::ffs (op0), result_mode);
1875 break;
1876
1877 case CLZ:
1878 if (wi::ne_p (op0, 0))
1879 int_value = wi::clz (op0);
1880 else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1881 int_value = GET_MODE_PRECISION (imode);
1882 result = wi::shwi (int_value, result_mode);
1883 break;
1884
1885 case CLRSB:
1886 result = wi::shwi (wi::clrsb (op0), result_mode);
1887 break;
1888
1889 case CTZ:
1890 if (wi::ne_p (op0, 0))
1891 int_value = wi::ctz (op0);
1892 else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
1893 int_value = GET_MODE_PRECISION (imode);
1894 result = wi::shwi (int_value, result_mode);
1895 break;
1896
1897 case POPCOUNT:
1898 result = wi::shwi (wi::popcount (op0), result_mode);
1899 break;
1900
1901 case PARITY:
1902 result = wi::shwi (wi::parity (op0), result_mode);
1903 break;
1904
1905 case BSWAP:
1906 result = wide_int (op0).bswap ();
1907 break;
1908
1909 case TRUNCATE:
1910 case ZERO_EXTEND:
1911 result = wide_int::from (op0, width, UNSIGNED);
1912 break;
1913
1914 case SIGN_EXTEND:
1915 result = wide_int::from (op0, width, SIGNED);
1916 break;
1917
1918 case SQRT:
1919 default:
1920 return 0;
1921 }
1922
1923 return immed_wide_int_const (result, result_mode);
1924 }
1925
1926 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1927 && SCALAR_FLOAT_MODE_P (mode)
1928 && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
1929 {
1930 REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
1931 switch (code)
1932 {
1933 case SQRT:
1934 return 0;
1935 case ABS:
1936 d = real_value_abs (&d);
1937 break;
1938 case NEG:
1939 d = real_value_negate (&d);
1940 break;
1941 case FLOAT_TRUNCATE:
1942 /* Don't perform the operation if flag_signaling_nans is on
1943 and the operand is a signaling NaN. */
1944 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1945 return NULL_RTX;
1946 d = real_value_truncate (mode, d);
1947 break;
1948 case FLOAT_EXTEND:
1949 /* Don't perform the operation if flag_signaling_nans is on
1950 and the operand is a signaling NaN. */
1951 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1952 return NULL_RTX;
1953 /* All this does is change the mode, unless changing
1954 mode class. */
1955 if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
1956 real_convert (&d, mode, &d);
1957 break;
1958 case FIX:
1959 /* Don't perform the operation if flag_signaling_nans is on
1960 and the operand is a signaling NaN. */
1961 if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
1962 return NULL_RTX;
1963 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1964 break;
1965 case NOT:
1966 {
1967 long tmp[4];
1968 int i;
1969
1970 real_to_target (tmp, &d, GET_MODE (op));
1971 for (i = 0; i < 4; i++)
1972 tmp[i] = ~tmp[i];
1973 real_from_target (&d, tmp, mode);
1974 break;
1975 }
1976 default:
1977 gcc_unreachable ();
1978 }
1979 return const_double_from_real_value (d, mode);
1980 }
1981 else if (CONST_DOUBLE_AS_FLOAT_P (op)
1982 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1983 && is_int_mode (mode, &result_mode))
1984 {
1985 unsigned int width = GET_MODE_PRECISION (result_mode);
1986 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1987 operators are intentionally left unspecified (to ease implementation
1988 by target backends), for consistency, this routine implements the
1989 same semantics for constant folding as used by the middle-end. */
1990
1991 /* This was formerly used only for non-IEEE float.
1992 eggert@twinsun.com says it is safe for IEEE also. */
1993 REAL_VALUE_TYPE t;
1994 const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
1995 wide_int wmax, wmin;
1996 /* This is part of the abi to real_to_integer, but we check
1997 things before making this call. */
1998 bool fail;
1999
2000 switch (code)
2001 {
2002 case FIX:
2003 if (REAL_VALUE_ISNAN (*x))
2004 return const0_rtx;
2005
2006 /* Test against the signed upper bound. */
2007 wmax = wi::max_value (width, SIGNED);
2008 real_from_integer (&t, VOIDmode, wmax, SIGNED);
2009 if (real_less (&t, x))
2010 return immed_wide_int_const (wmax, mode);
2011
2012 /* Test against the signed lower bound. */
2013 wmin = wi::min_value (width, SIGNED);
2014 real_from_integer (&t, VOIDmode, wmin, SIGNED);
2015 if (real_less (x, &t))
2016 return immed_wide_int_const (wmin, mode);
2017
2018 return immed_wide_int_const (real_to_integer (x, &fail, width),
2019 mode);
2020
2021 case UNSIGNED_FIX:
2022 if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2023 return const0_rtx;
2024
2025 /* Test against the unsigned upper bound. */
2026 wmax = wi::max_value (width, UNSIGNED);
2027 real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2028 if (real_less (&t, x))
2029 return immed_wide_int_const (wmax, mode);
2030
2031 return immed_wide_int_const (real_to_integer (x, &fail, width),
2032 mode);
2033
2034 default:
2035 gcc_unreachable ();
2036 }
2037 }
2038
2039 return NULL_RTX;
2040}
2041
2042/* Subroutine of simplify_binary_operation to simplify a binary operation
2043 CODE that can commute with byte swapping, with result mode MODE and
2044 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2045 Return zero if no simplification or canonicalization is possible. */
2046
2047static rtx
2048simplify_byte_swapping_operation (enum rtx_code code, machine_mode mode,
2049 rtx op0, rtx op1)
2050{
2051 rtx tem;
2052
2053 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2054 if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2055 {
2056 tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2057 simplify_gen_unary (BSWAP, mode, op1, mode));
2058 return simplify_gen_unary (BSWAP, mode, tem, mode);
2059 }
2060
2061 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2062 if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2063 {
2064 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2065 return simplify_gen_unary (BSWAP, mode, tem, mode);
2066 }
2067
2068 return NULL_RTX;
2069}
2070
2071/* Subroutine of simplify_binary_operation to simplify a commutative,
2072 associative binary operation CODE with result mode MODE, operating
2073 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2074 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2075 canonicalization is possible. */
2076
2077static rtx
2078simplify_associative_operation (enum rtx_code code, machine_mode mode,
2079 rtx op0, rtx op1)
2080{
2081 rtx tem;
2082
2083 /* Linearize the operator to the left. */
2084 if (GET_CODE (op1) == code)
2085 {
2086 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2087 if (GET_CODE (op0) == code)
2088 {
2089 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2090 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2091 }
2092
2093 /* "a op (b op c)" becomes "(b op c) op a". */
2094 if (! swap_commutative_operands_p (op1, op0))
2095 return simplify_gen_binary (code, mode, op1, op0);
2096
2097 std::swap (op0, op1);
2098 }
2099
2100 if (GET_CODE (op0) == code)
2101 {
2102 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2103 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2104 {
2105 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2106 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2107 }
2108
2109 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2110 tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2111 if (tem != 0)
2112 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2113
2114 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2115 tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2116 if (tem != 0)
2117 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2118 }
2119
2120 return 0;
2121}
2122
2123
2124/* Simplify a binary operation CODE with result mode MODE, operating on OP0
2125 and OP1. Return 0 if no simplification is possible.
2126
2127 Don't use this for relational operations such as EQ or LT.
2128 Use simplify_relational_operation instead. */
2129rtx
2130simplify_binary_operation (enum rtx_code code, machine_mode mode,
2131 rtx op0, rtx op1)
2132{
2133 rtx trueop0, trueop1;
2134 rtx tem;
2135
2136 /* Relational operations don't work here. We must know the mode
2137 of the operands in order to do the comparison correctly.
2138 Assuming a full word can give incorrect results.
2139 Consider comparing 128 with -128 in QImode. */
2140 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2141 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2142
2143 /* Make sure the constant is second. */
2144 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2145 && swap_commutative_operands_p (op0, op1))
2146 std::swap (op0, op1);
2147
2148 trueop0 = avoid_constant_pool_reference (op0);
2149 trueop1 = avoid_constant_pool_reference (op1);
2150
2151 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2152 if (tem)
2153 return tem;
2154 tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2155
2156 if (tem)
2157 return tem;
2158
2159 /* If the above steps did not result in a simplification and op0 or op1
2160 were constant pool references, use the referenced constants directly. */
2161 if (trueop0 != op0 || trueop1 != op1)
2162 return simplify_gen_binary (code, mode, trueop0, trueop1);
2163
2164 return NULL_RTX;
2165}
2166
2167/* Subroutine of simplify_binary_operation_1 that looks for cases in
2168 which OP0 and OP1 are both vector series or vector duplicates
2169 (which are really just series with a step of 0). If so, try to
2170 form a new series by applying CODE to the bases and to the steps.
2171 Return null if no simplification is possible.
2172
2173 MODE is the mode of the operation and is known to be a vector
2174 integer mode. */
2175
2176static rtx
2177simplify_binary_operation_series (rtx_code code, machine_mode mode,
2178 rtx op0, rtx op1)
2179{
2180 rtx base0, step0;
2181 if (vec_duplicate_p (op0, &base0))
2182 step0 = const0_rtx;
2183 else if (!vec_series_p (op0, &base0, &step0))
2184 return NULL_RTX;
2185
2186 rtx base1, step1;
2187 if (vec_duplicate_p (op1, &base1))
2188 step1 = const0_rtx;
2189 else if (!vec_series_p (op1, &base1, &step1))
2190 return NULL_RTX;
2191
2192 /* Only create a new series if we can simplify both parts. In other
2193 cases this isn't really a simplification, and it's not necessarily
2194 a win to replace a vector operation with a scalar operation. */
2195 scalar_mode inner_mode = GET_MODE_INNER (mode);
2196 rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2197 if (!new_base)
2198 return NULL_RTX;
2199
2200 rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2201 if (!new_step)
2202 return NULL_RTX;
2203
2204 return gen_vec_series (mode, new_base, new_step);
2205}
2206
2207/* Subroutine of simplify_binary_operation. Simplify a binary operation
2208 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2209 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2210 actual constants. */
2211
2212static rtx
2213simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
2214 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
2215{
2216 rtx tem, reversed, opleft, opright, elt0, elt1;
2217 HOST_WIDE_INT val;
2218 scalar_int_mode int_mode, inner_mode;
2219
2220 /* Even if we can't compute a constant result,
2221 there are some cases worth simplifying. */
2222
2223 switch (code)
2224 {
2225 case PLUS:
2226 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2227 when x is NaN, infinite, or finite and nonzero. They aren't
2228 when x is -0 and the rounding mode is not towards -infinity,
2229 since (-0) + 0 is then 0. */
2230 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
2231 return op0;
2232
2233 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2234 transformations are safe even for IEEE. */
2235 if (GET_CODE (op0) == NEG)
2236 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
2237 else if (GET_CODE (op1) == NEG)
2238 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
2239
2240 /* (~a) + 1 -> -a */
2241 if (INTEGRAL_MODE_P (mode)
2242 && GET_CODE (op0) == NOT
2243 && trueop1 == const1_rtx)
2244 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
2245
2246 /* Handle both-operands-constant cases. We can only add
2247 CONST_INTs to constants since the sum of relocatable symbols
2248 can't be handled by most assemblers. Don't add CONST_INT
2249 to CONST_INT since overflow won't be computed properly if wider
2250 than HOST_BITS_PER_WIDE_INT. */
2251
2252 if ((GET_CODE (op0) == CONST
2253 || GET_CODE (op0) == SYMBOL_REF
2254 || GET_CODE (op0) == LABEL_REF)
2255 && CONST_INT_P (op1))
2256 return plus_constant (mode, op0, INTVAL (op1));
2257 else if ((GET_CODE (op1) == CONST
2258 || GET_CODE (op1) == SYMBOL_REF
2259 || GET_CODE (op1) == LABEL_REF)
2260 && CONST_INT_P (op0))
2261 return plus_constant (mode, op1, INTVAL (op0));
2262
2263 /* See if this is something like X * C - X or vice versa or
2264 if the multiplication is written as a shift. If so, we can
2265 distribute and make a new multiply, shift, or maybe just
2266 have X (if C is 2 in the example above). But don't make
2267 something more expensive than we had before. */
2268
2269 if (is_a <scalar_int_mode> (mode, &int_mode))
2270 {
2271 rtx lhs = op0, rhs = op1;
2272
2273 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2274 wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2275
2276 if (GET_CODE (lhs) == NEG)
2277 {
2278 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2279 lhs = XEXP (lhs, 0);
2280 }
2281 else if (GET_CODE (lhs) == MULT
2282 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2283 {
2284 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2285 lhs = XEXP (lhs, 0);
2286 }
2287 else if (GET_CODE (lhs) == ASHIFT
2288 && CONST_INT_P (XEXP (lhs, 1))
2289 && INTVAL (XEXP (lhs, 1)) >= 0
2290 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2291 {
2292 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2293 GET_MODE_PRECISION (int_mode));
2294 lhs = XEXP (lhs, 0);
2295 }
2296
2297 if (GET_CODE (rhs) == NEG)
2298 {
2299 coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2300 rhs = XEXP (rhs, 0);
2301 }
2302 else if (GET_CODE (rhs) == MULT
2303 && CONST_INT_P (XEXP (rhs, 1)))
2304 {
2305 coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
2306 rhs = XEXP (rhs, 0);
2307 }
2308 else if (GET_CODE (rhs) == ASHIFT
2309 && CONST_INT_P (XEXP (rhs, 1))
2310 && INTVAL (XEXP (rhs, 1)) >= 0
2311 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2312 {
2313 coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2314 GET_MODE_PRECISION (int_mode));
2315 rhs = XEXP (rhs, 0);
2316 }
2317
2318 if (rtx_equal_p (lhs, rhs))
2319 {
2320 rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
2321 rtx coeff;
2322 bool speed = optimize_function_for_speed_p (cfun);
2323
2324 coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
2325
2326 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2327 return (set_src_cost (tem, int_mode, speed)
2328 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2329 }
2330 }
2331
2332 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2333 if (CONST_SCALAR_INT_P (op1)
2334 && GET_CODE (op0) == XOR
2335 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2336 && mode_signbit_p (mode, op1))
2337 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2338 simplify_gen_binary (XOR, mode, op1,
2339 XEXP (op0, 1)));
2340
2341 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2342 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2343 && GET_CODE (op0) == MULT
2344 && GET_CODE (XEXP (op0, 0)) == NEG)
2345 {
2346 rtx in1, in2;
2347
2348 in1 = XEXP (XEXP (op0, 0), 0);
2349 in2 = XEXP (op0, 1);
2350 return simplify_gen_binary (MINUS, mode, op1,
2351 simplify_gen_binary (MULT, mode,
2352 in1, in2));
2353 }
2354
2355 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2356 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2357 is 1. */
2358 if (COMPARISON_P (op0)
2359 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
2360 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
2361 && (reversed = reversed_comparison (op0, mode)))
2362 return
2363 simplify_gen_unary (NEG, mode, reversed, mode);
2364
2365 /* If one of the operands is a PLUS or a MINUS, see if we can
2366 simplify this by the associative law.
2367 Don't use the associative law for floating point.
2368 The inaccuracy makes it nonassociative,
2369 and subtle programs can break if operations are associated. */
2370
2371 if (INTEGRAL_MODE_P (mode)
2372 && (plus_minus_operand_p (op0)
2373 || plus_minus_operand_p (op1))
2374 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2375 return tem;
2376
2377 /* Reassociate floating point addition only when the user
2378 specifies associative math operations. */
2379 if (FLOAT_MODE_P (mode)
2380 && flag_associative_math)
2381 {
2382 tem = simplify_associative_operation (code, mode, op0, op1);
2383 if (tem)
2384 return tem;
2385 }
2386
2387 /* Handle vector series. */
2388 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2389 {
2390 tem = simplify_binary_operation_series (code, mode, op0, op1);
2391 if (tem)
2392 return tem;
2393 }
2394 break;
2395
2396 case COMPARE:
2397 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2398 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
2399 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
2400 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
2401 {
2402 rtx xop00 = XEXP (op0, 0);
2403 rtx xop10 = XEXP (op1, 0);
2404
2405 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
2406 return xop00;
2407
2408 if (REG_P (xop00) && REG_P (xop10)
2409 && REGNO (xop00) == REGNO (xop10)
2410 && GET_MODE (xop00) == mode
2411 && GET_MODE (xop10) == mode
2412 && GET_MODE_CLASS (mode) == MODE_CC)
2413 return xop00;
2414 }
2415 break;
2416
2417 case MINUS:
2418 /* We can't assume x-x is 0 even with non-IEEE floating point,
2419 but since it is zero except in very strange circumstances, we
2420 will treat it as zero with -ffinite-math-only. */
2421 if (rtx_equal_p (trueop0, trueop1)
2422 && ! side_effects_p (op0)
2423 && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
2424 return CONST0_RTX (mode);
2425
2426 /* Change subtraction from zero into negation. (0 - x) is the
2427 same as -x when x is NaN, infinite, or finite and nonzero.
2428 But if the mode has signed zeros, and does not round towards
2429 -infinity, then 0 - 0 is 0, not -0. */
2430 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
2431 return simplify_gen_unary (NEG, mode, op1, mode);
2432
2433 /* (-1 - a) is ~a, unless the expression contains symbolic
2434 constants, in which case not retaining additions and
2435 subtractions could cause invalid assembly to be produced. */
2436 if (trueop0 == constm1_rtx
2437 && !contains_symbolic_reference_p (op1))
2438 return simplify_gen_unary (NOT, mode, op1, mode);
2439
2440 /* Subtracting 0 has no effect unless the mode has signed zeros
2441 and supports rounding towards -infinity. In such a case,
2442 0 - 0 is -0. */
2443 if (!(HONOR_SIGNED_ZEROS (mode)
2444 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2445 && trueop1 == CONST0_RTX (mode))
2446 return op0;
2447
2448 /* See if this is something like X * C - X or vice versa or
2449 if the multiplication is written as a shift. If so, we can
2450 distribute and make a new multiply, shift, or maybe just
2451 have X (if C is 2 in the example above). But don't make
2452 something more expensive than we had before. */
2453
2454 if (is_a <scalar_int_mode> (mode, &int_mode))
2455 {
2456 rtx lhs = op0, rhs = op1;
2457
2458 wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
2459 wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2460
2461 if (GET_CODE (lhs) == NEG)
2462 {
2463 coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
2464 lhs = XEXP (lhs, 0);
2465 }
2466 else if (GET_CODE (lhs) == MULT
2467 && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
2468 {
2469 coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
2470 lhs = XEXP (lhs, 0);
2471 }
2472 else if (GET_CODE (lhs) == ASHIFT
2473 && CONST_INT_P (XEXP (lhs, 1))
2474 && INTVAL (XEXP (lhs, 1)) >= 0
2475 && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
2476 {
2477 coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
2478 GET_MODE_PRECISION (int_mode));
2479 lhs = XEXP (lhs, 0);
2480 }
2481
2482 if (GET_CODE (rhs) == NEG)
2483 {
2484 negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
2485 rhs = XEXP (rhs, 0);
2486 }
2487 else if (GET_CODE (rhs) == MULT
2488 && CONST_INT_P (XEXP (rhs, 1)))
2489 {
2490 negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
2491 rhs = XEXP (rhs, 0);
2492 }
2493 else if (GET_CODE (rhs) == ASHIFT
2494 && CONST_INT_P (XEXP (rhs, 1))
2495 && INTVAL (XEXP (rhs, 1)) >= 0
2496 && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
2497 {
2498 negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
2499 GET_MODE_PRECISION (int_mode));
2500 negcoeff1 = -negcoeff1;
2501 rhs = XEXP (rhs, 0);
2502 }
2503
2504 if (rtx_equal_p (lhs, rhs))
2505 {
2506 rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
2507 rtx coeff;
2508 bool speed = optimize_function_for_speed_p (cfun);
2509
2510 coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
2511
2512 tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
2513 return (set_src_cost (tem, int_mode, speed)
2514 <= set_src_cost (orig, int_mode, speed) ? tem : 0);
2515 }
2516 }
2517
2518 /* (a - (-b)) -> (a + b). True even for IEEE. */
2519 if (GET_CODE (op1) == NEG)
2520 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
2521
2522 /* (-x - c) may be simplified as (-c - x). */
2523 if (GET_CODE (op0) == NEG
2524 && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
2525 {
2526 tem = simplify_unary_operation (NEG, mode, op1, mode);
2527 if (tem)
2528 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
2529 }
2530
2531 /* Don't let a relocatable value get a negative coeff. */
2532 if (CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode)
2533 return simplify_gen_binary (PLUS, mode,
2534 op0,
2535 neg_const_int (mode, op1));
2536
2537 /* (x - (x & y)) -> (x & ~y) */
2538 if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
2539 {
2540 if (rtx_equal_p (op0, XEXP (op1, 0)))
2541 {
2542 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
2543 GET_MODE (XEXP (op1, 1)));
2544 return simplify_gen_binary (AND, mode, op0, tem);
2545 }
2546 if (rtx_equal_p (op0, XEXP (op1, 1)))
2547 {
2548 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
2549 GET_MODE (XEXP (op1, 0)));
2550 return simplify_gen_binary (AND, mode, op0, tem);
2551 }
2552 }
2553
2554 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2555 by reversing the comparison code if valid. */
2556 if (STORE_FLAG_VALUE == 1
2557 && trueop0 == const1_rtx
2558 && COMPARISON_P (op1)
2559 && (reversed = reversed_comparison (op1, mode)))
2560 return reversed;
2561
2562 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2563 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2564 && GET_CODE (op1) == MULT
2565 && GET_CODE (XEXP (op1, 0)) == NEG)
2566 {
2567 rtx in1, in2;
2568
2569 in1 = XEXP (XEXP (op1, 0), 0);
2570 in2 = XEXP (op1, 1);
2571 return simplify_gen_binary (PLUS, mode,
2572 simplify_gen_binary (MULT, mode,
2573 in1, in2),
2574 op0);
2575 }
2576
2577 /* Canonicalize (minus (neg A) (mult B C)) to
2578 (minus (mult (neg B) C) A). */
2579 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2580 && GET_CODE (op1) == MULT
2581 && GET_CODE (op0) == NEG)
2582 {
2583 rtx in1, in2;
2584
2585 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
2586 in2 = XEXP (op1, 1);
2587 return simplify_gen_binary (MINUS, mode,
2588 simplify_gen_binary (MULT, mode,
2589 in1, in2),
2590 XEXP (op0, 0));
2591 }
2592
2593 /* If one of the operands is a PLUS or a MINUS, see if we can
2594 simplify this by the associative law. This will, for example,
2595 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2596 Don't use the associative law for floating point.
2597 The inaccuracy makes it nonassociative,
2598 and subtle programs can break if operations are associated. */
2599
2600 if (INTEGRAL_MODE_P (mode)
2601 && (plus_minus_operand_p (op0)
2602 || plus_minus_operand_p (op1))
2603 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
2604 return tem;
2605
2606 /* Handle vector series. */
2607 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
2608 {
2609 tem = simplify_binary_operation_series (code, mode, op0, op1);
2610 if (tem)
2611 return tem;
2612 }
2613 break;
2614
2615 case MULT:
2616 if (trueop1 == constm1_rtx)
2617 return simplify_gen_unary (NEG, mode, op0, mode);
2618
2619 if (GET_CODE (op0) == NEG)
2620 {
2621 rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
2622 /* If op1 is a MULT as well and simplify_unary_operation
2623 just moved the NEG to the second operand, simplify_gen_binary
2624 below could through simplify_associative_operation move
2625 the NEG around again and recurse endlessly. */
2626 if (temp
2627 && GET_CODE (op1) == MULT
2628 && GET_CODE (temp) == MULT
2629 && XEXP (op1, 0) == XEXP (temp, 0)
2630 && GET_CODE (XEXP (temp, 1)) == NEG
2631 && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
2632 temp = NULL_RTX;
2633 if (temp)
2634 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
2635 }
2636 if (GET_CODE (op1) == NEG)
2637 {
2638 rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
2639 /* If op0 is a MULT as well and simplify_unary_operation
2640 just moved the NEG to the second operand, simplify_gen_binary
2641 below could through simplify_associative_operation move
2642 the NEG around again and recurse endlessly. */
2643 if (temp
2644 && GET_CODE (op0) == MULT
2645 && GET_CODE (temp) == MULT
2646 && XEXP (op0, 0) == XEXP (temp, 0)
2647 && GET_CODE (XEXP (temp, 1)) == NEG
2648 && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
2649 temp = NULL_RTX;
2650 if (temp)
2651 return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
2652 }
2653
2654 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2655 x is NaN, since x * 0 is then also NaN. Nor is it valid
2656 when the mode has signed zeros, since multiplying a negative
2657 number by 0 will give -0, not 0. */
2658 if (!HONOR_NANS (mode)
2659 && !HONOR_SIGNED_ZEROS (mode)
2660 && trueop1 == CONST0_RTX (mode)
2661 && ! side_effects_p (op0))
2662 return op1;
2663
2664 /* In IEEE floating point, x*1 is not equivalent to x for
2665 signalling NaNs. */
2666 if (!HONOR_SNANS (mode)
2667 && trueop1 == CONST1_RTX (mode))
2668 return op0;
2669
2670 /* Convert multiply by constant power of two into shift. */
2671 if (CONST_SCALAR_INT_P (trueop1))
2672 {
2673 val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
2674 if (val >= 0)
2675 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
2676 }
2677
2678 /* x*2 is x+x and x*(-1) is -x */
2679 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
2680 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
2681 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
2682 && GET_MODE (op0) == mode)
2683 {
2684 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
2685
2686 if (real_equal (d1, &dconst2))
2687 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
2688
2689 if (!HONOR_SNANS (mode)
2690 && real_equal (d1, &dconstm1))
2691 return simplify_gen_unary (NEG, mode, op0, mode);
2692 }
2693
2694 /* Optimize -x * -x as x * x. */
2695 if (FLOAT_MODE_P (mode)
2696 && GET_CODE (op0) == NEG
2697 && GET_CODE (op1) == NEG
2698 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2699 && !side_effects_p (XEXP (op0, 0)))
2700 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2701
2702 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2703 if (SCALAR_FLOAT_MODE_P (mode)
2704 && GET_CODE (op0) == ABS
2705 && GET_CODE (op1) == ABS
2706 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2707 && !side_effects_p (XEXP (op0, 0)))
2708 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
2709
2710 /* Reassociate multiplication, but for floating point MULTs
2711 only when the user specifies unsafe math optimizations. */
2712 if (! FLOAT_MODE_P (mode)
2713 || flag_unsafe_math_optimizations)
2714 {
2715 tem = simplify_associative_operation (code, mode, op0, op1);
2716 if (tem)
2717 return tem;
2718 }
2719 break;
2720
2721 case IOR:
2722 if (trueop1 == CONST0_RTX (mode))
2723 return op0;
2724 if (INTEGRAL_MODE_P (mode)
2725 && trueop1 == CONSTM1_RTX (mode)
2726 && !side_effects_p (op0))
2727 return op1;
2728 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2729 return op0;
2730 /* A | (~A) -> -1 */
2731 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2732 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2733 && ! side_effects_p (op0)
2734 && SCALAR_INT_MODE_P (mode))
2735 return constm1_rtx;
2736
2737 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2738 if (CONST_INT_P (op1)
2739 && HWI_COMPUTABLE_MODE_P (mode)
2740 && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
2741 && !side_effects_p (op0))
2742 return op1;
2743
2744 /* Canonicalize (X & C1) | C2. */
2745 if (GET_CODE (op0) == AND
2746 && CONST_INT_P (trueop1)
2747 && CONST_INT_P (XEXP (op0, 1)))
2748 {
2749 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
2750 HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
2751 HOST_WIDE_INT c2 = INTVAL (trueop1);
2752
2753 /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
2754 if ((c1 & c2) == c1
2755 && !side_effects_p (XEXP (op0, 0)))
2756 return trueop1;
2757
2758 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2759 if (((c1|c2) & mask) == mask)
2760 return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
2761 }
2762
2763 /* Convert (A & B) | A to A. */
2764 if (GET_CODE (op0) == AND
2765 && (rtx_equal_p (XEXP (op0, 0), op1)
2766 || rtx_equal_p (XEXP (op0, 1), op1))
2767 && ! side_effects_p (XEXP (op0, 0))
2768 && ! side_effects_p (XEXP (op0, 1)))
2769 return op1;
2770
2771 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2772 mode size to (rotate A CX). */
2773
2774 if (GET_CODE (op1) == ASHIFT
2775 || GET_CODE (op1) == SUBREG)
2776 {
2777 opleft = op1;
2778 opright = op0;
2779 }
2780 else
2781 {
2782 opright = op1;
2783 opleft = op0;
2784 }
2785
2786 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2787 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2788 && CONST_INT_P (XEXP (opleft, 1))
2789 && CONST_INT_P (XEXP (opright, 1))
2790 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2791 == GET_MODE_UNIT_PRECISION (mode)))
2792 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2793
2794 /* Same, but for ashift that has been "simplified" to a wider mode
2795 by simplify_shift_const. */
2796
2797 if (GET_CODE (opleft) == SUBREG
2798 && is_a <scalar_int_mode> (mode, &int_mode)
2799 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2800 &inner_mode)
2801 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2802 && GET_CODE (opright) == LSHIFTRT
2803 && GET_CODE (XEXP (opright, 0)) == SUBREG
2804 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2805 && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2806 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2807 SUBREG_REG (XEXP (opright, 0)))
2808 && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2809 && CONST_INT_P (XEXP (opright, 1))
2810 && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2811 + INTVAL (XEXP (opright, 1))
2812 == GET_MODE_PRECISION (int_mode)))
2813 return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
2814 XEXP (SUBREG_REG (opleft), 1));
2815
2816 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2817 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2818 the PLUS does not affect any of the bits in OP1: then we can do
2819 the IOR as a PLUS and we can associate. This is valid if OP1
2820 can be safely shifted left C bits. */
2821 if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
2822 && GET_CODE (XEXP (op0, 0)) == PLUS
2823 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
2824 && CONST_INT_P (XEXP (op0, 1))
2825 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2826 {
2827 int count = INTVAL (XEXP (op0, 1));
2828 HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
2829
2830 if (mask >> count == INTVAL (trueop1)
2831 && trunc_int_for_mode (mask, mode) == mask
2832 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2833 return simplify_gen_binary (ASHIFTRT, mode,
2834 plus_constant (mode, XEXP (op0, 0),
2835 mask),
2836 XEXP (op0, 1));
2837 }
2838
2839 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
2840 if (tem)
2841 return tem;
2842
2843 tem = simplify_associative_operation (code, mode, op0, op1);
2844 if (tem)
2845 return tem;
2846 break;
2847
2848 case XOR:
2849 if (trueop1 == CONST0_RTX (mode))
2850 return op0;
2851 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
2852 return simplify_gen_unary (NOT, mode, op0, mode);
2853 if (rtx_equal_p (trueop0, trueop1)
2854 && ! side_effects_p (op0)
2855 && GET_MODE_CLASS (mode) != MODE_CC)
2856 return CONST0_RTX (mode);
2857
2858 /* Canonicalize XOR of the most significant bit to PLUS. */
2859 if (CONST_SCALAR_INT_P (op1)
2860 && mode_signbit_p (mode, op1))
2861 return simplify_gen_binary (PLUS, mode, op0, op1);
2862 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2863 if (CONST_SCALAR_INT_P (op1)
2864 && GET_CODE (op0) == PLUS
2865 && CONST_SCALAR_INT_P (XEXP (op0, 1))
2866 && mode_signbit_p (mode, XEXP (op0, 1)))
2867 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2868 simplify_gen_binary (XOR, mode, op1,
2869 XEXP (op0, 1)));
2870
2871 /* If we are XORing two things that have no bits in common,
2872 convert them into an IOR. This helps to detect rotation encoded
2873 using those methods and possibly other simplifications. */
2874
2875 if (HWI_COMPUTABLE_MODE_P (mode)
2876 && (nonzero_bits (op0, mode)
2877 & nonzero_bits (op1, mode)) == 0)
2878 return (simplify_gen_binary (IOR, mode, op0, op1));
2879
2880 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2881 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2882 (NOT y). */
2883 {
2884 int num_negated = 0;
2885
2886 if (GET_CODE (op0) == NOT)
2887 num_negated++, op0 = XEXP (op0, 0);
2888 if (GET_CODE (op1) == NOT)
2889 num_negated++, op1 = XEXP (op1, 0);
2890
2891 if (num_negated == 2)
2892 return simplify_gen_binary (XOR, mode, op0, op1);
2893 else if (num_negated == 1)
2894 return simplify_gen_unary (NOT, mode,
2895 simplify_gen_binary (XOR, mode, op0, op1),
2896 mode);
2897 }
2898
2899 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2900 correspond to a machine insn or result in further simplifications
2901 if B is a constant. */
2902
2903 if (GET_CODE (op0) == AND
2904 && rtx_equal_p (XEXP (op0, 1), op1)
2905 && ! side_effects_p (op1))
2906 return simplify_gen_binary (AND, mode,
2907 simplify_gen_unary (NOT, mode,
2908 XEXP (op0, 0), mode),
2909 op1);
2910
2911 else if (GET_CODE (op0) == AND
2912 && rtx_equal_p (XEXP (op0, 0), op1)
2913 && ! side_effects_p (op1))
2914 return simplify_gen_binary (AND, mode,
2915 simplify_gen_unary (NOT, mode,
2916 XEXP (op0, 1), mode),
2917 op1);
2918
2919 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2920 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2921 out bits inverted twice and not set by C. Similarly, given
2922 (xor (and (xor A B) C) D), simplify without inverting C in
2923 the xor operand: (xor (and A C) (B&C)^D).
2924 */
2925 else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
2926 && GET_CODE (XEXP (op0, 0)) == XOR
2927 && CONST_INT_P (op1)
2928 && CONST_INT_P (XEXP (op0, 1))
2929 && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
2930 {
2931 enum rtx_code op = GET_CODE (op0);
2932 rtx a = XEXP (XEXP (op0, 0), 0);
2933 rtx b = XEXP (XEXP (op0, 0), 1);
2934 rtx c = XEXP (op0, 1);
2935 rtx d = op1;
2936 HOST_WIDE_INT bval = INTVAL (b);
2937 HOST_WIDE_INT cval = INTVAL (c);
2938 HOST_WIDE_INT dval = INTVAL (d);
2939 HOST_WIDE_INT xcval;
2940
2941 if (op == IOR)
2942 xcval = ~cval;
2943 else
2944 xcval = cval;
2945
2946 return simplify_gen_binary (XOR, mode,
2947 simplify_gen_binary (op, mode, a, c),
2948 gen_int_mode ((bval & xcval) ^ dval,
2949 mode));
2950 }
2951
2952 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2953 we can transform like this:
2954 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2955 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2956 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2957 Attempt a few simplifications when B and C are both constants. */
2958 if (GET_CODE (op0) == AND
2959 && CONST_INT_P (op1)
2960 && CONST_INT_P (XEXP (op0, 1)))
2961 {
2962 rtx a = XEXP (op0, 0);
2963 rtx b = XEXP (op0, 1);
2964 rtx c = op1;
2965 HOST_WIDE_INT bval = INTVAL (b);
2966 HOST_WIDE_INT cval = INTVAL (c);
2967
2968 /* Instead of computing ~A&C, we compute its negated value,
2969 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2970 optimize for sure. If it does not simplify, we still try
2971 to compute ~A&C below, but since that always allocates
2972 RTL, we don't try that before committing to returning a
2973 simplified expression. */
2974 rtx n_na_c = simplify_binary_operation (IOR, mode, a,
2975 GEN_INT (~cval));
2976
2977 if ((~cval & bval) == 0)
2978 {
2979 rtx na_c = NULL_RTX;
2980 if (n_na_c)
2981 na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
2982 else
2983 {
2984 /* If ~A does not simplify, don't bother: we don't
2985 want to simplify 2 operations into 3, and if na_c
2986 were to simplify with na, n_na_c would have
2987 simplified as well. */
2988 rtx na = simplify_unary_operation (NOT, mode, a, mode);
2989 if (na)
2990 na_c = simplify_gen_binary (AND, mode, na, c);
2991 }
2992
2993 /* Try to simplify ~A&C | ~B&C. */
2994 if (na_c != NULL_RTX)
2995 return simplify_gen_binary (IOR, mode, na_c,
2996 gen_int_mode (~bval & cval, mode));
2997 }
2998 else
2999 {
3000 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
3001 if (n_na_c == CONSTM1_RTX (mode))
3002 {
3003 rtx a_nc_b = simplify_gen_binary (AND, mode, a,
3004 gen_int_mode (~cval & bval,
3005 mode));
3006 return simplify_gen_binary (IOR, mode, a_nc_b,
3007 gen_int_mode (~bval & cval,
3008 mode));
3009 }
3010 }
3011 }
3012
3013 /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
3014 do (ior (and A ~C) (and B C)) which is a machine instruction on some
3015 machines, and also has shorter instruction path length. */
3016 if (GET_CODE (op0) == AND
3017 && GET_CODE (XEXP (op0, 0)) == XOR
3018 && CONST_INT_P (XEXP (op0, 1))
3019 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
3020 {
3021 rtx a = trueop1;
3022 rtx b = XEXP (XEXP (op0, 0), 1);
3023 rtx c = XEXP (op0, 1);
3024 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3025 rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
3026 rtx bc = simplify_gen_binary (AND, mode, b, c);
3027 return simplify_gen_binary (IOR, mode, a_nc, bc);
3028 }
3029 /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
3030 else if (GET_CODE (op0) == AND
3031 && GET_CODE (XEXP (op0, 0)) == XOR
3032 && CONST_INT_P (XEXP (op0, 1))
3033 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
3034 {
3035 rtx a = XEXP (XEXP (op0, 0), 0);
3036 rtx b = trueop1;
3037 rtx c = XEXP (op0, 1);
3038 rtx nc = simplify_gen_unary (NOT, mode, c, mode);
3039 rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
3040 rtx ac = simplify_gen_binary (AND, mode, a, c);
3041 return simplify_gen_binary (IOR, mode, ac, b_nc);
3042 }
3043
3044 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
3045 comparison if STORE_FLAG_VALUE is 1. */
3046 if (STORE_FLAG_VALUE == 1
3047 && trueop1 == const1_rtx
3048 && COMPARISON_P (op0)
3049 && (reversed = reversed_comparison (op0, mode)))
3050 return reversed;
3051
3052 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
3053 is (lt foo (const_int 0)), so we can perform the above
3054 simplification if STORE_FLAG_VALUE is 1. */
3055
3056 if (is_a <scalar_int_mode> (mode, &int_mode)
3057 && STORE_FLAG_VALUE == 1
3058 && trueop1 == const1_rtx
3059 && GET_CODE (op0) == LSHIFTRT
3060 && CONST_INT_P (XEXP (op0, 1))
3061 && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
3062 return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
3063
3064 /* (xor (comparison foo bar) (const_int sign-bit))
3065 when STORE_FLAG_VALUE is the sign bit. */
3066 if (is_a <scalar_int_mode> (mode, &int_mode)
3067 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
3068 && trueop1 == const_true_rtx
3069 && COMPARISON_P (op0)
3070 && (reversed = reversed_comparison (op0, int_mode)))
3071 return reversed;
3072
3073 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3074 if (tem)
3075 return tem;
3076
3077 tem = simplify_associative_operation (code, mode, op0, op1);
3078 if (tem)
3079 return tem;
3080 break;
3081
3082 case AND:
3083 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3084 return trueop1;
3085 if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3086 return op0;
3087 if (HWI_COMPUTABLE_MODE_P (mode))
3088 {
3089 HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode);
3090 HOST_WIDE_INT nzop1;
3091 if (CONST_INT_P (trueop1))
3092 {
3093 HOST_WIDE_INT val1 = INTVAL (trueop1);
3094 /* If we are turning off bits already known off in OP0, we need
3095 not do an AND. */
3096 if ((nzop0 & ~val1) == 0)
3097 return op0;
3098 }
3099 nzop1 = nonzero_bits (trueop1, mode);
3100 /* If we are clearing all the nonzero bits, the result is zero. */
3101 if ((nzop1 & nzop0) == 0
3102 && !side_effects_p (op0) && !side_effects_p (op1))
3103 return CONST0_RTX (mode);
3104 }
3105 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
3106 && GET_MODE_CLASS (mode) != MODE_CC)
3107 return op0;
3108 /* A & (~A) -> 0 */
3109 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3110 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3111 && ! side_effects_p (op0)
3112 && GET_MODE_CLASS (mode) != MODE_CC)
3113 return CONST0_RTX (mode);
3114
3115 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
3116 there are no nonzero bits of C outside of X's mode. */
3117 if ((GET_CODE (op0) == SIGN_EXTEND
3118 || GET_CODE (op0) == ZERO_EXTEND)
3119 && CONST_INT_P (trueop1)
3120 && HWI_COMPUTABLE_MODE_P (mode)
3121 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
3122 & UINTVAL (trueop1)) == 0)
3123 {
3124 machine_mode imode = GET_MODE (XEXP (op0, 0));
3125 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
3126 gen_int_mode (INTVAL (trueop1),
3127 imode));
3128 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
3129 }
3130
3131 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
3132 we might be able to further simplify the AND with X and potentially
3133 remove the truncation altogether. */
3134 if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
3135 {
3136 rtx x = XEXP (op0, 0);
3137 machine_mode xmode = GET_MODE (x);
3138 tem = simplify_gen_binary (AND, xmode, x,
3139 gen_int_mode (INTVAL (trueop1), xmode));
3140 return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
3141 }
3142
3143 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
3144 if (GET_CODE (op0) == IOR
3145 && CONST_INT_P (trueop1)
3146 && CONST_INT_P (XEXP (op0, 1)))
3147 {
3148 HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
3149 return simplify_gen_binary (IOR, mode,
3150 simplify_gen_binary (AND, mode,
3151 XEXP (op0, 0), op1),
3152 gen_int_mode (tmp, mode));
3153 }
3154
3155 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
3156 insn (and may simplify more). */
3157 if (GET_CODE (op0) == XOR
3158 && rtx_equal_p (XEXP (op0, 0), op1)
3159 && ! side_effects_p (op1))
3160 return simplify_gen_binary (AND, mode,
3161 simplify_gen_unary (NOT, mode,
3162 XEXP (op0, 1), mode),
3163 op1);
3164
3165 if (GET_CODE (op0) == XOR
3166 && rtx_equal_p (XEXP (op0, 1), op1)
3167 && ! side_effects_p (op1))
3168 return simplify_gen_binary (AND, mode,
3169 simplify_gen_unary (NOT, mode,
3170 XEXP (op0, 0), mode),
3171 op1);
3172
3173 /* Similarly for (~(A ^ B)) & A. */
3174 if (GET_CODE (op0) == NOT
3175 && GET_CODE (XEXP (op0, 0)) == XOR
3176 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
3177 && ! side_effects_p (op1))
3178 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
3179
3180 if (GET_CODE (op0) == NOT
3181 && GET_CODE (XEXP (op0, 0)) == XOR
3182 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
3183 && ! side_effects_p (op1))
3184 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
3185
3186 /* Convert (A | B) & A to A. */
3187 if (GET_CODE (op0) == IOR
3188 && (rtx_equal_p (XEXP (op0, 0), op1)
3189 || rtx_equal_p (XEXP (op0, 1), op1))
3190 && ! side_effects_p (XEXP (op0, 0))
3191 && ! side_effects_p (XEXP (op0, 1)))
3192 return op1;
3193
3194 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
3195 ((A & N) + B) & M -> (A + B) & M
3196 Similarly if (N & M) == 0,
3197 ((A | N) + B) & M -> (A + B) & M
3198 and for - instead of + and/or ^ instead of |.
3199 Also, if (N & M) == 0, then
3200 (A +- N) & M -> A & M. */
3201 if (CONST_INT_P (trueop1)
3202 && HWI_COMPUTABLE_MODE_P (mode)
3203 && ~UINTVAL (trueop1)
3204 && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
3205 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
3206 {
3207 rtx pmop[2];
3208 int which;
3209
3210 pmop[0] = XEXP (op0, 0);
3211 pmop[1] = XEXP (op0, 1);
3212
3213 if (CONST_INT_P (pmop[1])
3214 && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
3215 return simplify_gen_binary (AND, mode, pmop[0], op1);
3216
3217 for (which = 0; which < 2; which++)
3218 {
3219 tem = pmop[which];
3220 switch (GET_CODE (tem))
3221 {
3222 case AND:
3223 if (CONST_INT_P (XEXP (tem, 1))
3224 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
3225 == UINTVAL (trueop1))
3226 pmop[which] = XEXP (tem, 0);
3227 break;
3228 case IOR:
3229 case XOR:
3230 if (CONST_INT_P (XEXP (tem, 1))
3231 && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
3232 pmop[which] = XEXP (tem, 0);
3233 break;
3234 default:
3235 break;
3236 }
3237 }
3238
3239 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
3240 {
3241 tem = simplify_gen_binary (GET_CODE (op0), mode,
3242 pmop[0], pmop[1]);
3243 return simplify_gen_binary (code, mode, tem, op1);
3244 }
3245 }
3246
3247 /* (and X (ior (not X) Y) -> (and X Y) */
3248 if (GET_CODE (op1) == IOR
3249 && GET_CODE (XEXP (op1, 0)) == NOT
3250 && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
3251 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
3252
3253 /* (and (ior (not X) Y) X) -> (and X Y) */
3254 if (GET_CODE (op0) == IOR
3255 && GET_CODE (XEXP (op0, 0)) == NOT
3256 && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
3257 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
3258
3259 /* (and X (ior Y (not X)) -> (and X Y) */
3260 if (GET_CODE (op1) == IOR
3261 && GET_CODE (XEXP (op1, 1)) == NOT
3262 && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
3263 return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
3264
3265 /* (and (ior Y (not X)) X) -> (and X Y) */
3266 if (GET_CODE (op0) == IOR
3267 && GET_CODE (XEXP (op0, 1)) == NOT
3268 && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
3269 return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
3270
3271 tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3272 if (tem)
3273 return tem;
3274
3275 tem = simplify_associative_operation (code, mode, op0, op1);
3276 if (tem)
3277 return tem;
3278 break;
3279
3280 case UDIV:
3281 /* 0/x is 0 (or x&0 if x has side-effects). */
3282 if (trueop0 == CONST0_RTX (mode)
3283 && !cfun->can_throw_non_call_exceptions)
3284 {
3285 if (side_effects_p (op1))
3286 return simplify_gen_binary (AND, mode, op1, trueop0);
3287 return trueop0;
3288 }
3289 /* x/1 is x. */
3290 if (trueop1 == CONST1_RTX (mode))
3291 {
3292 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3293 if (tem)
3294 return tem;
3295 }
3296 /* Convert divide by power of two into shift. */
3297 if (CONST_INT_P (trueop1)
3298 && (val = exact_log2 (UINTVAL (trueop1))) > 0)
3299 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
3300 break;
3301
3302 case DIV:
3303 /* Handle floating point and integers separately. */
3304 if (SCALAR_FLOAT_MODE_P (mode))
3305 {
3306 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3307 safe for modes with NaNs, since 0.0 / 0.0 will then be
3308 NaN rather than 0.0. Nor is it safe for modes with signed
3309 zeros, since dividing 0 by a negative number gives -0.0 */
3310 if (trueop0 == CONST0_RTX (mode)
3311 && !HONOR_NANS (mode)
3312 && !HONOR_SIGNED_ZEROS (mode)
3313 && ! side_effects_p (op1))
3314 return op0;
3315 /* x/1.0 is x. */
3316 if (trueop1 == CONST1_RTX (mode)
3317 && !HONOR_SNANS (mode))
3318 return op0;
3319
3320 if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3321 && trueop1 != CONST0_RTX (mode))
3322 {
3323 const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3324
3325 /* x/-1.0 is -x. */
3326 if (real_equal (d1, &dconstm1)
3327 && !HONOR_SNANS (mode))
3328 return simplify_gen_unary (NEG, mode, op0, mode);
3329
3330 /* Change FP division by a constant into multiplication.
3331 Only do this with -freciprocal-math. */
3332 if (flag_reciprocal_math
3333 && !real_equal (d1, &dconst0))
3334 {
3335 REAL_VALUE_TYPE d;
3336 real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
3337 tem = const_double_from_real_value (d, mode);
3338 return simplify_gen_binary (MULT, mode, op0, tem);
3339 }
3340 }
3341 }
3342 else if (SCALAR_INT_MODE_P (mode))
3343 {
3344 /* 0/x is 0 (or x&0 if x has side-effects). */
3345 if (trueop0 == CONST0_RTX (mode)
3346 && !cfun->can_throw_non_call_exceptions)
3347 {
3348 if (side_effects_p (op1))
3349 return simplify_gen_binary (AND, mode, op1, trueop0);
3350 return trueop0;
3351 }
3352 /* x/1 is x. */
3353 if (trueop1 == CONST1_RTX (mode))
3354 {
3355 tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3356 if (tem)
3357 return tem;
3358 }
3359 /* x/-1 is -x. */
3360 if (trueop1 == constm1_rtx)
3361 {
3362 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
3363 if (x)
3364 return simplify_gen_unary (NEG, mode, x, mode);
3365 }
3366 }
3367 break;
3368
3369 case UMOD:
3370 /* 0%x is 0 (or x&0 if x has side-effects). */
3371 if (trueop0 == CONST0_RTX (mode))
3372 {
3373 if (side_effects_p (op1))
3374 return simplify_gen_binary (AND, mode, op1, trueop0);
3375 return trueop0;
3376 }
3377 /* x%1 is 0 (of x&0 if x has side-effects). */
3378 if (trueop1 == CONST1_RTX (mode))
3379 {
3380 if (side_effects_p (op0))
3381 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3382 return CONST0_RTX (mode);
3383 }
3384 /* Implement modulus by power of two as AND. */
3385 if (CONST_INT_P (trueop1)
3386 && exact_log2 (UINTVAL (trueop1)) > 0)
3387 return simplify_gen_binary (AND, mode, op0,
3388 gen_int_mode (INTVAL (op1) - 1, mode));
3389 break;
3390
3391 case MOD:
3392 /* 0%x is 0 (or x&0 if x has side-effects). */
3393 if (trueop0 == CONST0_RTX (mode))
3394 {
3395 if (side_effects_p (op1))
3396 return simplify_gen_binary (AND, mode, op1, trueop0);
3397 return trueop0;
3398 }
3399 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3400 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
3401 {
3402 if (side_effects_p (op0))
3403 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
3404 return CONST0_RTX (mode);
3405 }
3406 break;
3407
3408 case ROTATERT:
3409 case ROTATE:
3410 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3411 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3412 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3413 amount instead. */
3414#if defined(HAVE_rotate) && defined(HAVE_rotatert)
3415 if (CONST_INT_P (trueop1)
3416 && IN_RANGE (INTVAL (trueop1),
3417 GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
3418 GET_MODE_UNIT_PRECISION (mode) - 1))
3419 return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
3420 mode, op0,
3421 GEN_INT (GET_MODE_UNIT_PRECISION (mode)
3422 - INTVAL (trueop1)));
3423#endif
3424 /* FALLTHRU */
3425 case ASHIFTRT:
3426 if (trueop1 == CONST0_RTX (mode))
3427 return op0;
3428 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3429 return op0;
3430 /* Rotating ~0 always results in ~0. */
3431 if (CONST_INT_P (trueop0)
3432 && HWI_COMPUTABLE_MODE_P (mode)
3433 && UINTVAL (trueop0) == GET_MODE_MASK (mode)
3434 && ! side_effects_p (op1))
3435 return op0;
3436
3437 canonicalize_shift:
3438 /* Given:
3439 scalar modes M1, M2
3440 scalar constants c1, c2
3441 size (M2) > size (M1)
3442 c1 == size (M2) - size (M1)
3443 optimize:
3444 ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3445 <low_part>)
3446 (const_int <c2>))
3447 to:
3448 (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3449 <low_part>). */
3450 if ((code == ASHIFTRT || code == LSHIFTRT)
3451 && is_a <scalar_int_mode> (mode, &int_mode)
3452 && SUBREG_P (op0)
3453 && CONST_INT_P (op1)
3454 && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
3455 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
3456 &inner_mode)
3457 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
3458 && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
3459 && (INTVAL (XEXP (SUBREG_REG (op0), 1))
3460 == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
3461 && subreg_lowpart_p (op0))
3462 {
3463 rtx tmp = GEN_INT (INTVAL (XEXP (SUBREG_REG (op0), 1))
3464 + INTVAL (op1));
3465 tmp = simplify_gen_binary (code, inner_mode,
3466 XEXP (SUBREG_REG (op0), 0),
3467 tmp);
3468 return lowpart_subreg (int_mode, tmp, inner_mode);
3469 }
3470
3471 if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
3472 {
3473 val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
3474 if (val != INTVAL (op1))
3475 return simplify_gen_binary (code, mode, op0, GEN_INT (val));
3476 }
3477 break;
3478
3479 case ASHIFT:
3480 case SS_ASHIFT:
3481 case US_ASHIFT:
3482 if (trueop1 == CONST0_RTX (mode))
3483 return op0;
3484 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3485 return op0;
3486 goto canonicalize_shift;
3487
3488 case LSHIFTRT:
3489 if (trueop1 == CONST0_RTX (mode))
3490 return op0;
3491 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
3492 return op0;
3493 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3494 if (GET_CODE (op0) == CLZ
3495 && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
3496 && CONST_INT_P (trueop1)
3497 && STORE_FLAG_VALUE == 1
3498 && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
3499 {
3500 unsigned HOST_WIDE_INT zero_val = 0;
3501
3502 if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
3503 && zero_val == GET_MODE_PRECISION (inner_mode)
3504 && INTVAL (trueop1) == exact_log2 (zero_val))
3505 return simplify_gen_relational (EQ, mode, inner_mode,
3506 XEXP (op0, 0), const0_rtx);
3507 }
3508 goto canonicalize_shift;
3509
3510 case SMIN:
3511 if (HWI_COMPUTABLE_MODE_P (mode)
3512 && mode_signbit_p (mode, trueop1)
3513 && ! side_effects_p (op0))
3514 return op1;
3515 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3516 return op0;
3517 tem = simplify_associative_operation (code, mode, op0, op1);
3518 if (tem)
3519 return tem;
3520 break;
3521
3522 case SMAX:
3523 if (HWI_COMPUTABLE_MODE_P (mode)
3524 && CONST_INT_P (trueop1)
3525 && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
3526 && ! side_effects_p (op0))
3527 return op1;
3528 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3529 return op0;
3530 tem = simplify_associative_operation (code, mode, op0, op1);
3531 if (tem)
3532 return tem;
3533 break;
3534
3535 case UMIN:
3536 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
3537 return op1;
3538 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3539 return op0;
3540 tem = simplify_associative_operation (code, mode, op0, op1);
3541 if (tem)
3542 return tem;
3543 break;
3544
3545 case UMAX:
3546 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
3547 return op1;
3548 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3549 return op0;
3550 tem = simplify_associative_operation (code, mode, op0, op1);
3551 if (tem)
3552 return tem;
3553 break;
3554
3555 case SS_PLUS:
3556 case US_PLUS:
3557 case SS_MINUS:
3558 case US_MINUS:
3559 case SS_MULT:
3560 case US_MULT:
3561 case SS_DIV:
3562 case US_DIV:
3563 /* ??? There are simplifications that can be done. */
3564 return 0;
3565
3566 case VEC_SERIES:
3567 if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
3568 return gen_vec_duplicate (mode, op0);
3569 if (CONSTANT_P (op0) && CONSTANT_P (op1))
3570 return gen_const_vec_series (mode, op0, op1);
3571 return 0;
3572
3573 case VEC_SELECT:
3574 if (!VECTOR_MODE_P (mode))
3575 {
3576 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3577 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
3578 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3579 gcc_assert (XVECLEN (trueop1, 0) == 1);
3580 gcc_assert (CONST_INT_P (XVECEXP (trueop1, 0, 0)));
3581
3582 if (vec_duplicate_p (trueop0, &elt0))
3583 return elt0;
3584
3585 if (GET_CODE (trueop0) == CONST_VECTOR)
3586 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
3587 (trueop1, 0, 0)));
3588
3589 /* Extract a scalar element from a nested VEC_SELECT expression
3590 (with optional nested VEC_CONCAT expression). Some targets
3591 (i386) extract scalar element from a vector using chain of
3592 nested VEC_SELECT expressions. When input operand is a memory
3593 operand, this operation can be simplified to a simple scalar
3594 load from an offseted memory address. */
3595 if (GET_CODE (trueop0) == VEC_SELECT)
3596 {
3597 rtx op0 = XEXP (trueop0, 0);
3598 rtx op1 = XEXP (trueop0, 1);
3599
3600 int n_elts = GET_MODE_NUNITS (GET_MODE (op0));
3601
3602 int i = INTVAL (XVECEXP (trueop1, 0, 0));
3603 int elem;
3604
3605 rtvec vec;
3606 rtx tmp_op, tmp;
3607
3608 gcc_assert (GET_CODE (op1) == PARALLEL);
3609 gcc_assert (i < n_elts);
3610
3611 /* Select element, pointed by nested selector. */
3612 elem = INTVAL (XVECEXP (op1, 0, i));
3613
3614 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3615 if (GET_CODE (op0) == VEC_CONCAT)
3616 {
3617 rtx op00 = XEXP (op0, 0);
3618 rtx op01 = XEXP (op0, 1);
3619
3620 machine_mode mode00, mode01;
3621 int n_elts00, n_elts01;
3622
3623 mode00 = GET_MODE (op00);
3624 mode01 = GET_MODE (op01);
3625
3626 /* Find out number of elements of each operand. */
3627 n_elts00 = GET_MODE_NUNITS (mode00);
3628 n_elts01 = GET_MODE_NUNITS (mode01);
3629
3630 gcc_assert (n_elts == n_elts00 + n_elts01);
3631
3632 /* Select correct operand of VEC_CONCAT
3633 and adjust selector. */
3634 if (elem < n_elts01)
3635 tmp_op = op00;
3636 else
3637 {
3638 tmp_op = op01;
3639 elem -= n_elts00;
3640 }
3641 }
3642 else
3643 tmp_op = op0;
3644
3645 vec = rtvec_alloc (1);
3646 RTVEC_ELT (vec, 0) = GEN_INT (elem);
3647
3648 tmp = gen_rtx_fmt_ee (code, mode,
3649 tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
3650 return tmp;
3651 }
3652 }
3653 else
3654 {
3655 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
3656 gcc_assert (GET_MODE_INNER (mode)
3657 == GET_MODE_INNER (GET_MODE (trueop0)));
3658 gcc_assert (GET_CODE (trueop1) == PARALLEL);
3659
3660 if (vec_duplicate_p (trueop0, &elt0))
3661 /* It doesn't matter which elements are selected by trueop1,
3662 because they are all the same. */
3663 return gen_vec_duplicate (mode, elt0);
3664
3665 if (GET_CODE (trueop0) == CONST_VECTOR)
3666 {
3667 int elt_size = GET_MODE_UNIT_SIZE (mode);
3668 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
3669 rtvec v = rtvec_alloc (n_elts);
3670 unsigned int i;
3671
3672 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
3673 for (i = 0; i < n_elts; i++)
3674 {
3675 rtx x = XVECEXP (trueop1, 0, i);
3676
3677 gcc_assert (CONST_INT_P (x));
3678 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
3679 INTVAL (x));
3680 }
3681
3682 return gen_rtx_CONST_VECTOR (mode, v);
3683 }
3684
3685 /* Recognize the identity. */
3686 if (GET_MODE (trueop0) == mode)
3687 {
3688 bool maybe_ident = true;
3689 for (int i = 0; i < XVECLEN (trueop1, 0); i++)
3690 {
3691 rtx j = XVECEXP (trueop1, 0, i);
3692 if (!CONST_INT_P (j) || INTVAL (j) != i)
3693 {
3694 maybe_ident = false;
3695 break;
3696 }
3697 }
3698 if (maybe_ident)
3699 return trueop0;
3700 }
3701
3702 /* If we build {a,b} then permute it, build the result directly. */
3703 if (XVECLEN (trueop1, 0) == 2
3704 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3705 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3706 && GET_CODE (trueop0) == VEC_CONCAT
3707 && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
3708 && GET_MODE (XEXP (trueop0, 0)) == mode
3709 && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
3710 && GET_MODE (XEXP (trueop0, 1)) == mode)
3711 {
3712 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3713 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3714 rtx subop0, subop1;
3715
3716 gcc_assert (i0 < 4 && i1 < 4);
3717 subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
3718 subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
3719
3720 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3721 }
3722
3723 if (XVECLEN (trueop1, 0) == 2
3724 && CONST_INT_P (XVECEXP (trueop1, 0, 0))
3725 && CONST_INT_P (XVECEXP (trueop1, 0, 1))
3726 && GET_CODE (trueop0) == VEC_CONCAT
3727 && GET_MODE (trueop0) == mode)
3728 {
3729 unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
3730 unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
3731 rtx subop0, subop1;
3732
3733 gcc_assert (i0 < 2 && i1 < 2);
3734 subop0 = XEXP (trueop0, i0);
3735 subop1 = XEXP (trueop0, i1);
3736
3737 return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
3738 }
3739
3740 /* If we select one half of a vec_concat, return that. */
3741 if (GET_CODE (trueop0) == VEC_CONCAT
3742 &&