1/* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2024 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "backend.h"
25#include "target.h"
26#include "rtl.h"
27#include "rtlanal.h"
28#include "tree.h"
29#include "predict.h"
30#include "df.h"
31#include "memmodel.h"
32#include "tm_p.h"
33#include "insn-config.h"
34#include "regs.h"
35#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36#include "recog.h"
37#include "addresses.h"
38#include "rtl-iter.h"
39#include "hard-reg-set.h"
40#include "function-abi.h"
41
42/* Forward declarations */
43static void set_of_1 (rtx, const_rtx, void *);
44static bool covers_regno_p (const_rtx, unsigned int);
45static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
46static bool computed_jump_p_1 (const_rtx);
47static void parms_set (rtx, const_rtx, void *);
48
49static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
50 const_rtx, machine_mode,
51 unsigned HOST_WIDE_INT);
52static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
53 const_rtx, machine_mode,
54 unsigned HOST_WIDE_INT);
55static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
56 const_rtx, machine_mode,
57 unsigned int);
58static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
59 const_rtx, machine_mode,
60 unsigned int);
61
62rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
63rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
64
65/* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67 SIGN_EXTEND then while narrowing we also have to enforce the
68 representation and sign-extend the value to mode DESTINATION_REP.
69
70 If the value is already sign-extended to DESTINATION_REP mode we
71 can just switch to DESTINATION mode on it. For each pair of
72 integral modes SOURCE and DESTINATION, when truncating from SOURCE
73 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74 contains the number of high-order bits in SOURCE that have to be
75 copies of the sign-bit so that we can do this mode-switch to
76 DESTINATION. */
77
78static unsigned int
79num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
80
81/* Store X into index I of ARRAY. ARRAY is known to have at least I
82 elements. Return the new base of ARRAY. */
83
84template <typename T>
85typename T::value_type *
86generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
87 value_type *base,
88 size_t i, value_type x)
89{
90 if (base == array.stack)
91 {
92 if (i < LOCAL_ELEMS)
93 {
94 base[i] = x;
95 return base;
96 }
97 gcc_checking_assert (i == LOCAL_ELEMS);
98 /* A previous iteration might also have moved from the stack to the
99 heap, in which case the heap array will already be big enough. */
100 if (vec_safe_length (array.heap) <= i)
101 vec_safe_grow (array.heap, i + 1, true);
102 base = array.heap->address ();
103 memcpy (base, array.stack, sizeof (array.stack));
104 base[LOCAL_ELEMS] = x;
105 return base;
106 }
107 unsigned int length = array.heap->length ();
108 if (length > i)
109 {
110 gcc_checking_assert (base == array.heap->address ());
111 base[i] = x;
112 return base;
113 }
114 else
115 {
116 gcc_checking_assert (i == length);
117 vec_safe_push (array.heap, x);
118 return array.heap->address ();
119 }
120}
121
122/* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
123 number of elements added to the worklist. */
124
125template <typename T>
126size_t
127generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
128 value_type *base,
129 size_t end, rtx_type x)
130{
131 enum rtx_code code = GET_CODE (x);
132 const char *format = GET_RTX_FORMAT (code);
133 size_t orig_end = end;
134 if (UNLIKELY (INSN_P (x)))
135 {
136 /* Put the pattern at the top of the queue, since that's what
137 we're likely to want most. It also allows for the SEQUENCE
138 code below. */
139 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
140 if (format[i] == 'e')
141 {
142 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
143 if (LIKELY (end < LOCAL_ELEMS))
144 base[end++] = subx;
145 else
146 base = add_single_to_queue (array, base, i: end++, x: subx);
147 }
148 }
149 else
150 for (int i = 0; format[i]; ++i)
151 if (format[i] == 'e')
152 {
153 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
154 if (LIKELY (end < LOCAL_ELEMS))
155 base[end++] = subx;
156 else
157 base = add_single_to_queue (array, base, i: end++, x: subx);
158 }
159 else if (format[i] == 'E')
160 {
161 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
162 rtx *vec = x->u.fld[i].rt_rtvec->elem;
163 if (LIKELY (end + length <= LOCAL_ELEMS))
164 for (unsigned int j = 0; j < length; j++)
165 base[end++] = T::get_value (vec[j]);
166 else
167 for (unsigned int j = 0; j < length; j++)
168 base = add_single_to_queue (array, base, i: end++,
169 x: T::get_value (vec[j]));
170 if (code == SEQUENCE && end == length)
171 /* If the subrtxes of the sequence fill the entire array then
172 we know that no other parts of a containing insn are queued.
173 The caller is therefore iterating over the sequence as a
174 PATTERN (...), so we also want the patterns of the
175 subinstructions. */
176 for (unsigned int j = 0; j < length; j++)
177 {
178 typename T::rtx_type x = T::get_rtx (base[j]);
179 if (INSN_P (x))
180 base[j] = T::get_value (PATTERN (x));
181 }
182 }
183 return end - orig_end;
184}
185
186template <typename T>
187void
188generic_subrtx_iterator <T>::free_array (array_type &array)
189{
190 vec_free (array.heap);
191}
192
193template <typename T>
194const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
195
196template class generic_subrtx_iterator <const_rtx_accessor>;
197template class generic_subrtx_iterator <rtx_var_accessor>;
198template class generic_subrtx_iterator <rtx_ptr_accessor>;
199
200/* Return true if the value of X is unstable
201 (would be different at a different point in the program).
202 The frame pointer, arg pointer, etc. are considered stable
203 (within one function) and so is anything marked `unchanging'. */
204
205bool
206rtx_unstable_p (const_rtx x)
207{
208 const RTX_CODE code = GET_CODE (x);
209 int i;
210 const char *fmt;
211
212 switch (code)
213 {
214 case MEM:
215 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
216
217 case CONST:
218 CASE_CONST_ANY:
219 case SYMBOL_REF:
220 case LABEL_REF:
221 return false;
222
223 case REG:
224 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
225 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
226 /* The arg pointer varies if it is not a fixed register. */
227 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
228 return false;
229 /* ??? When call-clobbered, the value is stable modulo the restore
230 that must happen after a call. This currently screws up local-alloc
231 into believing that the restore is not needed. */
232 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
233 return false;
234 return true;
235
236 case ASM_OPERANDS:
237 if (MEM_VOLATILE_P (x))
238 return true;
239
240 /* Fall through. */
241
242 default:
243 break;
244 }
245
246 fmt = GET_RTX_FORMAT (code);
247 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
248 if (fmt[i] == 'e')
249 {
250 if (rtx_unstable_p (XEXP (x, i)))
251 return true;
252 }
253 else if (fmt[i] == 'E')
254 {
255 int j;
256 for (j = 0; j < XVECLEN (x, i); j++)
257 if (rtx_unstable_p (XVECEXP (x, i, j)))
258 return true;
259 }
260
261 return false;
262}
263
264/* Return true if X has a value that can vary even between two
265 executions of the program. false means X can be compared reliably
266 against certain constants or near-constants.
267 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
268 zero, we are slightly more conservative.
269 The frame pointer and the arg pointer are considered constant. */
270
271bool
272rtx_varies_p (const_rtx x, bool for_alias)
273{
274 RTX_CODE code;
275 int i;
276 const char *fmt;
277
278 if (!x)
279 return false;
280
281 code = GET_CODE (x);
282 switch (code)
283 {
284 case MEM:
285 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
286
287 case CONST:
288 CASE_CONST_ANY:
289 case SYMBOL_REF:
290 case LABEL_REF:
291 return false;
292
293 case REG:
294 /* Note that we have to test for the actual rtx used for the frame
295 and arg pointers and not just the register number in case we have
296 eliminated the frame and/or arg pointer and are using it
297 for pseudos. */
298 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
299 /* The arg pointer varies if it is not a fixed register. */
300 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
301 return false;
302 if (x == pic_offset_table_rtx
303 /* ??? When call-clobbered, the value is stable modulo the restore
304 that must happen after a call. This currently screws up
305 local-alloc into believing that the restore is not needed, so we
306 must return 0 only if we are called from alias analysis. */
307 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
308 return false;
309 return true;
310
311 case LO_SUM:
312 /* The operand 0 of a LO_SUM is considered constant
313 (in fact it is related specifically to operand 1)
314 during alias analysis. */
315 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
316 || rtx_varies_p (XEXP (x, 1), for_alias);
317
318 case ASM_OPERANDS:
319 if (MEM_VOLATILE_P (x))
320 return true;
321
322 /* Fall through. */
323
324 default:
325 break;
326 }
327
328 fmt = GET_RTX_FORMAT (code);
329 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
330 if (fmt[i] == 'e')
331 {
332 if (rtx_varies_p (XEXP (x, i), for_alias))
333 return true;
334 }
335 else if (fmt[i] == 'E')
336 {
337 int j;
338 for (j = 0; j < XVECLEN (x, i); j++)
339 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
340 return true;
341 }
342
343 return false;
344}
345
346/* Compute an approximation for the offset between the register
347 FROM and TO for the current function, as it was at the start
348 of the routine. */
349
350static poly_int64
351get_initial_register_offset (int from, int to)
352{
353 static const struct elim_table_t
354 {
355 const int from;
356 const int to;
357 } table[] = ELIMINABLE_REGS;
358 poly_int64 offset1, offset2;
359 unsigned int i, j;
360
361 if (to == from)
362 return 0;
363
364 /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
365 is completed, but we need to give at least an estimate for the stack
366 pointer based on the frame size. */
367 if (!epilogue_completed)
368 {
369 offset1 = crtl->outgoing_args_size + get_frame_size ();
370#if !STACK_GROWS_DOWNWARD
371 offset1 = - offset1;
372#endif
373 if (to == STACK_POINTER_REGNUM)
374 return offset1;
375 else if (from == STACK_POINTER_REGNUM)
376 return - offset1;
377 else
378 return 0;
379 }
380
381 for (i = 0; i < ARRAY_SIZE (table); i++)
382 if (table[i].from == from)
383 {
384 if (table[i].to == to)
385 {
386 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
387 offset1);
388 return offset1;
389 }
390 for (j = 0; j < ARRAY_SIZE (table); j++)
391 {
392 if (table[j].to == to
393 && table[j].from == table[i].to)
394 {
395 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
396 offset1);
397 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
398 offset2);
399 return offset1 + offset2;
400 }
401 if (table[j].from == to
402 && table[j].to == table[i].to)
403 {
404 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
405 offset1);
406 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
407 offset2);
408 return offset1 - offset2;
409 }
410 }
411 }
412 else if (table[i].to == from)
413 {
414 if (table[i].from == to)
415 {
416 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
417 offset1);
418 return - offset1;
419 }
420 for (j = 0; j < ARRAY_SIZE (table); j++)
421 {
422 if (table[j].to == to
423 && table[j].from == table[i].from)
424 {
425 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
426 offset1);
427 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
428 offset2);
429 return - offset1 + offset2;
430 }
431 if (table[j].from == to
432 && table[j].to == table[i].from)
433 {
434 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
435 offset1);
436 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
437 offset2);
438 return - offset1 - offset2;
439 }
440 }
441 }
442
443 /* If the requested register combination was not found,
444 try a different more simple combination. */
445 if (from == ARG_POINTER_REGNUM)
446 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
447 else if (to == ARG_POINTER_REGNUM)
448 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
449 else if (from == HARD_FRAME_POINTER_REGNUM)
450 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
451 else if (to == HARD_FRAME_POINTER_REGNUM)
452 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
453 else
454 return 0;
455}
456
457/* Return true if the use of X+OFFSET as an address in a MEM with SIZE
458 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
459 UNALIGNED_MEMS controls whether true is returned for unaligned memory
460 references on strict alignment machines. */
461
462static bool
463rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
464 machine_mode mode, bool unaligned_mems)
465{
466 enum rtx_code code = GET_CODE (x);
467 gcc_checking_assert (mode == BLKmode
468 || mode == VOIDmode
469 || known_size_p (size));
470 poly_int64 const_x1;
471
472 /* The offset must be a multiple of the mode size if we are considering
473 unaligned memory references on strict alignment machines. */
474 if (STRICT_ALIGNMENT
475 && unaligned_mems
476 && mode != BLKmode
477 && mode != VOIDmode)
478 {
479 poly_int64 actual_offset = offset;
480
481#ifdef SPARC_STACK_BOUNDARY_HACK
482 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
483 the real alignment of %sp. However, when it does this, the
484 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
485 if (SPARC_STACK_BOUNDARY_HACK
486 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
487 actual_offset -= STACK_POINTER_OFFSET;
488#endif
489
490 if (!multiple_p (a: actual_offset, b: GET_MODE_SIZE (mode)))
491 return true;
492 }
493
494 switch (code)
495 {
496 case SYMBOL_REF:
497 if (SYMBOL_REF_WEAK (x))
498 return true;
499 if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
500 {
501 tree decl;
502 poly_int64 decl_size;
503
504 if (maybe_lt (a: offset, b: 0))
505 return true;
506 if (!known_size_p (a: size))
507 return maybe_ne (a: offset, b: 0);
508
509 /* If the size of the access or of the symbol is unknown,
510 assume the worst. */
511 decl = SYMBOL_REF_DECL (x);
512
513 /* Else check that the access is in bounds. TODO: restructure
514 expr_size/tree_expr_size/int_expr_size and just use the latter. */
515 if (!decl)
516 decl_size = -1;
517 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
518 {
519 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), value: &decl_size))
520 decl_size = -1;
521 }
522 else if (TREE_CODE (decl) == STRING_CST)
523 decl_size = TREE_STRING_LENGTH (decl);
524 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
525 decl_size = int_size_in_bytes (TREE_TYPE (decl));
526 else
527 decl_size = -1;
528
529 return (!known_size_p (a: decl_size) || known_eq (decl_size, 0)
530 ? maybe_ne (a: offset, b: 0)
531 : !known_subrange_p (pos1: offset, size1: size, pos2: 0, size2: decl_size));
532 }
533
534 return false;
535
536 case LABEL_REF:
537 return false;
538
539 case REG:
540 /* Stack references are assumed not to trap, but we need to deal with
541 nonsensical offsets. */
542 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
543 || x == stack_pointer_rtx
544 /* The arg pointer varies if it is not a fixed register. */
545 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
546 {
547#ifdef RED_ZONE_SIZE
548 poly_int64 red_zone_size = RED_ZONE_SIZE;
549#else
550 poly_int64 red_zone_size = 0;
551#endif
552 poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
553 poly_int64 low_bound, high_bound;
554
555 if (!known_size_p (a: size))
556 return true;
557
558 if (x == frame_pointer_rtx)
559 {
560 if (FRAME_GROWS_DOWNWARD)
561 {
562 high_bound = targetm.starting_frame_offset ();
563 low_bound = high_bound - get_frame_size ();
564 }
565 else
566 {
567 low_bound = targetm.starting_frame_offset ();
568 high_bound = low_bound + get_frame_size ();
569 }
570 }
571 else if (x == hard_frame_pointer_rtx)
572 {
573 poly_int64 sp_offset
574 = get_initial_register_offset (STACK_POINTER_REGNUM,
575 HARD_FRAME_POINTER_REGNUM);
576 poly_int64 ap_offset
577 = get_initial_register_offset (ARG_POINTER_REGNUM,
578 HARD_FRAME_POINTER_REGNUM);
579
580#if STACK_GROWS_DOWNWARD
581 low_bound = sp_offset - red_zone_size - stack_boundary;
582 high_bound = ap_offset
583 + FIRST_PARM_OFFSET (current_function_decl)
584#if !ARGS_GROW_DOWNWARD
585 + crtl->args.size
586#endif
587 + stack_boundary;
588#else
589 high_bound = sp_offset + red_zone_size + stack_boundary;
590 low_bound = ap_offset
591 + FIRST_PARM_OFFSET (current_function_decl)
592#if ARGS_GROW_DOWNWARD
593 - crtl->args.size
594#endif
595 - stack_boundary;
596#endif
597 }
598 else if (x == stack_pointer_rtx)
599 {
600 poly_int64 ap_offset
601 = get_initial_register_offset (ARG_POINTER_REGNUM,
602 STACK_POINTER_REGNUM);
603
604#if STACK_GROWS_DOWNWARD
605 low_bound = - red_zone_size - stack_boundary;
606 high_bound = ap_offset
607 + FIRST_PARM_OFFSET (current_function_decl)
608#if !ARGS_GROW_DOWNWARD
609 + crtl->args.size
610#endif
611 + stack_boundary;
612#else
613 high_bound = red_zone_size + stack_boundary;
614 low_bound = ap_offset
615 + FIRST_PARM_OFFSET (current_function_decl)
616#if ARGS_GROW_DOWNWARD
617 - crtl->args.size
618#endif
619 - stack_boundary;
620#endif
621 }
622 else
623 {
624 /* We assume that accesses are safe to at least the
625 next stack boundary.
626 Examples are varargs and __builtin_return_address. */
627#if ARGS_GROW_DOWNWARD
628 high_bound = FIRST_PARM_OFFSET (current_function_decl)
629 + stack_boundary;
630 low_bound = FIRST_PARM_OFFSET (current_function_decl)
631 - crtl->args.size - stack_boundary;
632#else
633 low_bound = FIRST_PARM_OFFSET (current_function_decl)
634 - stack_boundary;
635 high_bound = FIRST_PARM_OFFSET (current_function_decl)
636 + crtl->args.size + stack_boundary;
637#endif
638 }
639
640 if (known_ge (offset, low_bound)
641 && known_le (offset, high_bound - size))
642 return false;
643 return true;
644 }
645 /* All of the virtual frame registers are stack references. */
646 if (VIRTUAL_REGISTER_P (x))
647 return false;
648 return true;
649
650 case CONST:
651 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
652 mode, unaligned_mems);
653
654 case PLUS:
655 /* An address is assumed not to trap if:
656 - it is the pic register plus a const unspec without offset. */
657 if (XEXP (x, 0) == pic_offset_table_rtx
658 && GET_CODE (XEXP (x, 1)) == CONST
659 && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
660 && known_eq (offset, 0))
661 return false;
662
663 /* - or it is an address that can't trap plus a constant integer. */
664 if (poly_int_rtx_p (XEXP (x, 1), res: &const_x1)
665 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset: offset + const_x1,
666 size, mode, unaligned_mems))
667 return false;
668
669 return true;
670
671 case LO_SUM:
672 case PRE_MODIFY:
673 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
674 mode, unaligned_mems);
675
676 case PRE_DEC:
677 case PRE_INC:
678 case POST_DEC:
679 case POST_INC:
680 case POST_MODIFY:
681 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
682 mode, unaligned_mems);
683
684 default:
685 break;
686 }
687
688 /* If it isn't one of the case above, it can cause a trap. */
689 return true;
690}
691
692/* Return true if the use of X as an address in a MEM can cause a trap. */
693
694bool
695rtx_addr_can_trap_p (const_rtx x)
696{
697 return rtx_addr_can_trap_p_1 (x, offset: 0, size: -1, BLKmode, unaligned_mems: false);
698}
699
700/* Return true if X contains a MEM subrtx. */
701
702bool
703contains_mem_rtx_p (rtx x)
704{
705 subrtx_iterator::array_type array;
706 FOR_EACH_SUBRTX (iter, array, x, ALL)
707 if (MEM_P (*iter))
708 return true;
709
710 return false;
711}
712
713/* Return true if X is an address that is known to not be zero. */
714
715bool
716nonzero_address_p (const_rtx x)
717{
718 const enum rtx_code code = GET_CODE (x);
719
720 switch (code)
721 {
722 case SYMBOL_REF:
723 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
724
725 case LABEL_REF:
726 return true;
727
728 case REG:
729 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
730 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
731 || x == stack_pointer_rtx
732 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
733 return true;
734 /* All of the virtual frame registers are stack references. */
735 if (VIRTUAL_REGISTER_P (x))
736 return true;
737 return false;
738
739 case CONST:
740 return nonzero_address_p (XEXP (x, 0));
741
742 case PLUS:
743 /* Handle PIC references. */
744 if (XEXP (x, 0) == pic_offset_table_rtx
745 && CONSTANT_P (XEXP (x, 1)))
746 return true;
747 return false;
748
749 case PRE_MODIFY:
750 /* Similar to the above; allow positive offsets. Further, since
751 auto-inc is only allowed in memories, the register must be a
752 pointer. */
753 if (CONST_INT_P (XEXP (x, 1))
754 && INTVAL (XEXP (x, 1)) > 0)
755 return true;
756 return nonzero_address_p (XEXP (x, 0));
757
758 case PRE_INC:
759 /* Similarly. Further, the offset is always positive. */
760 return true;
761
762 case PRE_DEC:
763 case POST_DEC:
764 case POST_INC:
765 case POST_MODIFY:
766 return nonzero_address_p (XEXP (x, 0));
767
768 case LO_SUM:
769 return nonzero_address_p (XEXP (x, 1));
770
771 default:
772 break;
773 }
774
775 /* If it isn't one of the case above, might be zero. */
776 return false;
777}
778
779/* Return true if X refers to a memory location whose address
780 cannot be compared reliably with constant addresses,
781 or if X refers to a BLKmode memory object.
782 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
783 zero, we are slightly more conservative. */
784
785bool
786rtx_addr_varies_p (const_rtx x, bool for_alias)
787{
788 enum rtx_code code;
789 int i;
790 const char *fmt;
791
792 if (x == 0)
793 return false;
794
795 code = GET_CODE (x);
796 if (code == MEM)
797 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
798
799 fmt = GET_RTX_FORMAT (code);
800 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
801 if (fmt[i] == 'e')
802 {
803 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
804 return true;
805 }
806 else if (fmt[i] == 'E')
807 {
808 int j;
809 for (j = 0; j < XVECLEN (x, i); j++)
810 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
811 return true;
812 }
813 return false;
814}
815
816/* Return the CALL in X if there is one. */
817
818rtx
819get_call_rtx_from (const rtx_insn *insn)
820{
821 rtx x = PATTERN (insn);
822 if (GET_CODE (x) == PARALLEL)
823 x = XVECEXP (x, 0, 0);
824 if (GET_CODE (x) == SET)
825 x = SET_SRC (x);
826 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
827 return x;
828 return NULL_RTX;
829}
830
831/* Get the declaration of the function called by INSN. */
832
833tree
834get_call_fndecl (const rtx_insn *insn)
835{
836 rtx note, datum;
837
838 note = find_reg_note (insn, REG_CALL_DECL, NULL_RTX);
839 if (note == NULL_RTX)
840 return NULL_TREE;
841
842 datum = XEXP (note, 0);
843 if (datum != NULL_RTX)
844 return SYMBOL_REF_DECL (datum);
845
846 return NULL_TREE;
847}
848
849/* Return the value of the integer term in X, if one is apparent;
850 otherwise return 0.
851 Only obvious integer terms are detected.
852 This is used in cse.cc with the `related_value' field. */
853
854HOST_WIDE_INT
855get_integer_term (const_rtx x)
856{
857 if (GET_CODE (x) == CONST)
858 x = XEXP (x, 0);
859
860 if (GET_CODE (x) == MINUS
861 && CONST_INT_P (XEXP (x, 1)))
862 return - INTVAL (XEXP (x, 1));
863 if (GET_CODE (x) == PLUS
864 && CONST_INT_P (XEXP (x, 1)))
865 return INTVAL (XEXP (x, 1));
866 return 0;
867}
868
869/* If X is a constant, return the value sans apparent integer term;
870 otherwise return 0.
871 Only obvious integer terms are detected. */
872
873rtx
874get_related_value (const_rtx x)
875{
876 if (GET_CODE (x) != CONST)
877 return 0;
878 x = XEXP (x, 0);
879 if (GET_CODE (x) == PLUS
880 && CONST_INT_P (XEXP (x, 1)))
881 return XEXP (x, 0);
882 else if (GET_CODE (x) == MINUS
883 && CONST_INT_P (XEXP (x, 1)))
884 return XEXP (x, 0);
885 return 0;
886}
887
888/* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
889 to somewhere in the same object or object_block as SYMBOL. */
890
891bool
892offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
893{
894 tree decl;
895
896 if (GET_CODE (symbol) != SYMBOL_REF)
897 return false;
898
899 if (offset == 0)
900 return true;
901
902 if (offset > 0)
903 {
904 if (CONSTANT_POOL_ADDRESS_P (symbol)
905 && offset < (int) GET_MODE_SIZE (mode: get_pool_mode (symbol)))
906 return true;
907
908 decl = SYMBOL_REF_DECL (symbol);
909 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
910 return true;
911 }
912
913 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
914 && SYMBOL_REF_BLOCK (symbol)
915 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
916 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
917 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
918 return true;
919
920 return false;
921}
922
923/* Split X into a base and a constant offset, storing them in *BASE_OUT
924 and *OFFSET_OUT respectively. */
925
926void
927split_const (rtx x, rtx *base_out, rtx *offset_out)
928{
929 if (GET_CODE (x) == CONST)
930 {
931 x = XEXP (x, 0);
932 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
933 {
934 *base_out = XEXP (x, 0);
935 *offset_out = XEXP (x, 1);
936 return;
937 }
938 }
939 *base_out = x;
940 *offset_out = const0_rtx;
941}
942
943/* Express integer value X as some value Y plus a polynomial offset,
944 where Y is either const0_rtx, X or something within X (as opposed
945 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
946
947rtx
948strip_offset (rtx x, poly_int64 *offset_out)
949{
950 rtx base = const0_rtx;
951 rtx test = x;
952 if (GET_CODE (test) == CONST)
953 test = XEXP (test, 0);
954 if (GET_CODE (test) == PLUS)
955 {
956 base = XEXP (test, 0);
957 test = XEXP (test, 1);
958 }
959 if (poly_int_rtx_p (x: test, res: offset_out))
960 return base;
961 *offset_out = 0;
962 return x;
963}
964
965/* Return the argument size in REG_ARGS_SIZE note X. */
966
967poly_int64
968get_args_size (const_rtx x)
969{
970 gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
971 return rtx_to_poly_int64 (XEXP (x, 0));
972}
973
974/* Return the number of places FIND appears within X. If COUNT_DEST is
975 zero, we do not count occurrences inside the destination of a SET. */
976
977int
978count_occurrences (const_rtx x, const_rtx find, int count_dest)
979{
980 int i, j;
981 enum rtx_code code;
982 const char *format_ptr;
983 int count;
984
985 if (x == find)
986 return 1;
987
988 code = GET_CODE (x);
989
990 switch (code)
991 {
992 case REG:
993 CASE_CONST_ANY:
994 case SYMBOL_REF:
995 case CODE_LABEL:
996 case PC:
997 return 0;
998
999 case EXPR_LIST:
1000 count = count_occurrences (XEXP (x, 0), find, count_dest);
1001 if (XEXP (x, 1))
1002 count += count_occurrences (XEXP (x, 1), find, count_dest);
1003 return count;
1004
1005 case MEM:
1006 if (MEM_P (find) && rtx_equal_p (x, find))
1007 return 1;
1008 break;
1009
1010 case SET:
1011 if (SET_DEST (x) == find && ! count_dest)
1012 return count_occurrences (SET_SRC (x), find, count_dest);
1013 break;
1014
1015 default:
1016 break;
1017 }
1018
1019 format_ptr = GET_RTX_FORMAT (code);
1020 count = 0;
1021
1022 for (i = 0; i < GET_RTX_LENGTH (code); i++)
1023 {
1024 switch (*format_ptr++)
1025 {
1026 case 'e':
1027 count += count_occurrences (XEXP (x, i), find, count_dest);
1028 break;
1029
1030 case 'E':
1031 for (j = 0; j < XVECLEN (x, i); j++)
1032 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1033 break;
1034 }
1035 }
1036 return count;
1037}
1038
1039
1040/* Return TRUE if OP is a register or subreg of a register that
1041 holds an unsigned quantity. Otherwise, return FALSE. */
1042
1043bool
1044unsigned_reg_p (rtx op)
1045{
1046 if (REG_P (op)
1047 && REG_EXPR (op)
1048 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1049 return true;
1050
1051 if (GET_CODE (op) == SUBREG
1052 && SUBREG_PROMOTED_SIGN (op))
1053 return true;
1054
1055 return false;
1056}
1057
1058
1059/* Return true if register REG appears somewhere within IN.
1060 Also works if REG is not a register; in this case it checks
1061 for a subexpression of IN that is Lisp "equal" to REG. */
1062
1063bool
1064reg_mentioned_p (const_rtx reg, const_rtx in)
1065{
1066 const char *fmt;
1067 int i;
1068 enum rtx_code code;
1069
1070 if (in == 0)
1071 return false;
1072
1073 if (reg == in)
1074 return true;
1075
1076 if (GET_CODE (in) == LABEL_REF)
1077 return reg == label_ref_label (ref: in);
1078
1079 code = GET_CODE (in);
1080
1081 switch (code)
1082 {
1083 /* Compare registers by number. */
1084 case REG:
1085 return REG_P (reg) && REGNO (in) == REGNO (reg);
1086
1087 /* These codes have no constituent expressions
1088 and are unique. */
1089 case SCRATCH:
1090 case PC:
1091 return false;
1092
1093 CASE_CONST_ANY:
1094 /* These are kept unique for a given value. */
1095 return false;
1096
1097 default:
1098 break;
1099 }
1100
1101 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1102 return true;
1103
1104 fmt = GET_RTX_FORMAT (code);
1105
1106 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1107 {
1108 if (fmt[i] == 'E')
1109 {
1110 int j;
1111 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1112 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1113 return true;
1114 }
1115 else if (fmt[i] == 'e'
1116 && reg_mentioned_p (reg, XEXP (in, i)))
1117 return true;
1118 }
1119 return false;
1120}
1121
1122/* Return true if in between BEG and END, exclusive of BEG and END, there is
1123 no CODE_LABEL insn. */
1124
1125bool
1126no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1127{
1128 rtx_insn *p;
1129 if (beg == end)
1130 return false;
1131 for (p = NEXT_INSN (insn: beg); p != end; p = NEXT_INSN (insn: p))
1132 if (LABEL_P (p))
1133 return false;
1134 return true;
1135}
1136
1137/* Return true if register REG is used in an insn between
1138 FROM_INSN and TO_INSN (exclusive of those two). */
1139
1140bool
1141reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1142 const rtx_insn *to_insn)
1143{
1144 rtx_insn *insn;
1145
1146 if (from_insn == to_insn)
1147 return false;
1148
1149 for (insn = NEXT_INSN (insn: from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1150 if (NONDEBUG_INSN_P (insn)
1151 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1152 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1153 return true;
1154 return false;
1155}
1156
1157/* Return true if the old value of X, a register, is referenced in BODY. If X
1158 is entirely replaced by a new value and the only use is as a SET_DEST,
1159 we do not consider it a reference. */
1160
1161bool
1162reg_referenced_p (const_rtx x, const_rtx body)
1163{
1164 int i;
1165
1166 switch (GET_CODE (body))
1167 {
1168 case SET:
1169 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1170 return true;
1171
1172 /* If the destination is anything other than PC, a REG or a SUBREG
1173 of a REG that occupies all of the REG, the insn references X if
1174 it is mentioned in the destination. */
1175 if (GET_CODE (SET_DEST (body)) != PC
1176 && !REG_P (SET_DEST (body))
1177 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1178 && REG_P (SUBREG_REG (SET_DEST (body)))
1179 && !read_modify_subreg_p (SET_DEST (body)))
1180 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1181 return true;
1182 return false;
1183
1184 case ASM_OPERANDS:
1185 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1186 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1187 return true;
1188 return false;
1189
1190 case CALL:
1191 case USE:
1192 case IF_THEN_ELSE:
1193 return reg_overlap_mentioned_p (x, body);
1194
1195 case TRAP_IF:
1196 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1197
1198 case PREFETCH:
1199 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1200
1201 case UNSPEC:
1202 case UNSPEC_VOLATILE:
1203 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1204 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1205 return true;
1206 return false;
1207
1208 case PARALLEL:
1209 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1210 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1211 return true;
1212 return false;
1213
1214 case CLOBBER:
1215 if (MEM_P (XEXP (body, 0)))
1216 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1217 return true;
1218 return false;
1219
1220 case COND_EXEC:
1221 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1222 return true;
1223 return reg_referenced_p (x, COND_EXEC_CODE (body));
1224
1225 default:
1226 return false;
1227 }
1228}
1229
1230/* Return true if register REG is set or clobbered in an insn between
1231 FROM_INSN and TO_INSN (exclusive of those two). */
1232
1233bool
1234reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1235 const rtx_insn *to_insn)
1236{
1237 const rtx_insn *insn;
1238
1239 if (from_insn == to_insn)
1240 return false;
1241
1242 for (insn = NEXT_INSN (insn: from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1243 if (INSN_P (insn) && reg_set_p (reg, insn))
1244 return true;
1245 return false;
1246}
1247
1248/* Return true if REG is set or clobbered inside INSN. */
1249
1250bool
1251reg_set_p (const_rtx reg, const_rtx insn)
1252{
1253 /* After delay slot handling, call and branch insns might be in a
1254 sequence. Check all the elements there. */
1255 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1256 {
1257 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1258 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1259 return true;
1260
1261 return false;
1262 }
1263
1264 /* We can be passed an insn or part of one. If we are passed an insn,
1265 check if a side-effect of the insn clobbers REG. */
1266 if (INSN_P (insn)
1267 && (FIND_REG_INC_NOTE (insn, reg)
1268 || (CALL_P (insn)
1269 && ((REG_P (reg)
1270 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1271 && (insn_callee_abi (as_a<const rtx_insn *> (p: insn))
1272 .clobbers_reg_p (GET_MODE (reg), REGNO (reg))))
1273 || MEM_P (reg)
1274 || find_reg_fusage (insn, CLOBBER, reg)))))
1275 return true;
1276
1277 /* There are no REG_INC notes for SP autoinc. */
1278 if (reg == stack_pointer_rtx && INSN_P (insn))
1279 {
1280 subrtx_var_iterator::array_type array;
1281 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1282 {
1283 rtx mem = *iter;
1284 if (mem
1285 && MEM_P (mem)
1286 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1287 {
1288 if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1289 return true;
1290 iter.skip_subrtxes ();
1291 }
1292 }
1293 }
1294
1295 return set_of (reg, insn) != NULL_RTX;
1296}
1297
1298/* Similar to reg_set_between_p, but check all registers in X. Return false
1299 only if none of them are modified between START and END. Return true if
1300 X contains a MEM; this routine does use memory aliasing. */
1301
1302bool
1303modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1304{
1305 const enum rtx_code code = GET_CODE (x);
1306 const char *fmt;
1307 int i, j;
1308 rtx_insn *insn;
1309
1310 if (start == end)
1311 return false;
1312
1313 switch (code)
1314 {
1315 CASE_CONST_ANY:
1316 case CONST:
1317 case SYMBOL_REF:
1318 case LABEL_REF:
1319 return false;
1320
1321 case PC:
1322 return true;
1323
1324 case MEM:
1325 if (modified_between_p (XEXP (x, 0), start, end))
1326 return true;
1327 if (MEM_READONLY_P (x))
1328 return false;
1329 for (insn = NEXT_INSN (insn: start); insn != end; insn = NEXT_INSN (insn))
1330 if (memory_modified_in_insn_p (x, insn))
1331 return true;
1332 return false;
1333
1334 case REG:
1335 return reg_set_between_p (reg: x, from_insn: start, to_insn: end);
1336
1337 default:
1338 break;
1339 }
1340
1341 fmt = GET_RTX_FORMAT (code);
1342 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1343 {
1344 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1345 return true;
1346
1347 else if (fmt[i] == 'E')
1348 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1349 if (modified_between_p (XVECEXP (x, i, j), start, end))
1350 return true;
1351 }
1352
1353 return false;
1354}
1355
1356/* Similar to reg_set_p, but check all registers in X. Return false only if
1357 none of them are modified in INSN. Return true if X contains a MEM; this
1358 routine does use memory aliasing. */
1359
1360bool
1361modified_in_p (const_rtx x, const_rtx insn)
1362{
1363 const enum rtx_code code = GET_CODE (x);
1364 const char *fmt;
1365 int i, j;
1366
1367 switch (code)
1368 {
1369 CASE_CONST_ANY:
1370 case CONST:
1371 case SYMBOL_REF:
1372 case LABEL_REF:
1373 return false;
1374
1375 case PC:
1376 return true;
1377
1378 case MEM:
1379 if (modified_in_p (XEXP (x, 0), insn))
1380 return true;
1381 if (MEM_READONLY_P (x))
1382 return false;
1383 if (memory_modified_in_insn_p (x, insn))
1384 return true;
1385 return false;
1386
1387 case REG:
1388 return reg_set_p (reg: x, insn);
1389
1390 default:
1391 break;
1392 }
1393
1394 fmt = GET_RTX_FORMAT (code);
1395 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1396 {
1397 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1398 return true;
1399
1400 else if (fmt[i] == 'E')
1401 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1402 if (modified_in_p (XVECEXP (x, i, j), insn))
1403 return true;
1404 }
1405
1406 return false;
1407}
1408
1409/* Return true if X is a SUBREG and if storing a value to X would
1410 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1411 target, using a SUBREG to store to one half of a DImode REG would
1412 preserve the other half. */
1413
1414bool
1415read_modify_subreg_p (const_rtx x)
1416{
1417 if (GET_CODE (x) != SUBREG)
1418 return false;
1419 poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1420 poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1421 poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1422 /* The inner and outer modes of a subreg must be ordered, so that we
1423 can tell whether they're paradoxical or partial. */
1424 gcc_checking_assert (ordered_p (isize, osize));
1425 return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1426}
1427
1428/* Helper function for set_of. */
1429struct set_of_data
1430 {
1431 const_rtx found;
1432 const_rtx pat;
1433 };
1434
1435static void
1436set_of_1 (rtx x, const_rtx pat, void *data1)
1437{
1438 struct set_of_data *const data = (struct set_of_data *) (data1);
1439 if (rtx_equal_p (x, data->pat)
1440 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1441 data->found = pat;
1442}
1443
1444/* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1445 (either directly or via STRICT_LOW_PART and similar modifiers). */
1446const_rtx
1447set_of (const_rtx pat, const_rtx insn)
1448{
1449 struct set_of_data data;
1450 data.found = NULL_RTX;
1451 data.pat = pat;
1452 note_pattern_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1453 return data.found;
1454}
1455
1456/* Check whether instruction pattern PAT contains a SET with the following
1457 properties:
1458
1459 - the SET is executed unconditionally; and
1460 - either:
1461 - the destination of the SET is a REG that contains REGNO; or
1462 - both:
1463 - the destination of the SET is a SUBREG of such a REG; and
1464 - writing to the subreg clobbers all of the SUBREG_REG
1465 (in other words, read_modify_subreg_p is false).
1466
1467 If PAT does have a SET like that, return the set, otherwise return null.
1468
1469 This is intended to be an alternative to single_set for passes that
1470 can handle patterns with multiple_sets. */
1471rtx
1472simple_regno_set (rtx pat, unsigned int regno)
1473{
1474 if (GET_CODE (pat) == PARALLEL)
1475 {
1476 int last = XVECLEN (pat, 0) - 1;
1477 for (int i = 0; i < last; ++i)
1478 if (rtx set = simple_regno_set (XVECEXP (pat, 0, i), regno))
1479 return set;
1480
1481 pat = XVECEXP (pat, 0, last);
1482 }
1483
1484 if (GET_CODE (pat) == SET
1485 && covers_regno_no_parallel_p (SET_DEST (pat), regno))
1486 return pat;
1487
1488 return nullptr;
1489}
1490
1491/* Add all hard register in X to *PSET. */
1492void
1493find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1494{
1495 subrtx_iterator::array_type array;
1496 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1497 {
1498 const_rtx x = *iter;
1499 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1500 add_to_hard_reg_set (regs: pset, GET_MODE (x), REGNO (x));
1501 }
1502}
1503
1504/* This function, called through note_stores, collects sets and
1505 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1506 by DATA. */
1507void
1508record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1509{
1510 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1511 if (REG_P (x) && HARD_REGISTER_P (x))
1512 add_to_hard_reg_set (regs: pset, GET_MODE (x), REGNO (x));
1513}
1514
1515/* Examine INSN, and compute the set of hard registers written by it.
1516 Store it in *PSET. Should only be called after reload.
1517
1518 IMPLICIT is true if we should include registers that are fully-clobbered
1519 by calls. This should be used with caution, since it doesn't include
1520 partially-clobbered registers. */
1521void
1522find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1523{
1524 rtx link;
1525
1526 CLEAR_HARD_REG_SET (set&: *pset);
1527 note_stores (insn, record_hard_reg_sets, pset);
1528 if (CALL_P (insn) && implicit)
1529 *pset |= insn_callee_abi (insn).full_reg_clobbers ();
1530 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1531 if (REG_NOTE_KIND (link) == REG_INC)
1532 record_hard_reg_sets (XEXP (link, 0), NULL, data: pset);
1533}
1534
1535/* Like record_hard_reg_sets, but called through note_uses. */
1536void
1537record_hard_reg_uses (rtx *px, void *data)
1538{
1539 find_all_hard_regs (x: *px, pset: (HARD_REG_SET *) data);
1540}
1541
1542/* Given an INSN, return a SET expression if this insn has only a single SET.
1543 It may also have CLOBBERs, USEs, or SET whose output
1544 will not be used, which we ignore. */
1545
1546rtx
1547single_set_2 (const rtx_insn *insn, const_rtx pat)
1548{
1549 rtx set = NULL;
1550 int set_verified = 1;
1551 int i;
1552
1553 if (GET_CODE (pat) == PARALLEL)
1554 {
1555 for (i = 0; i < XVECLEN (pat, 0); i++)
1556 {
1557 rtx sub = XVECEXP (pat, 0, i);
1558 switch (GET_CODE (sub))
1559 {
1560 case USE:
1561 case CLOBBER:
1562 break;
1563
1564 case SET:
1565 /* We can consider insns having multiple sets, where all
1566 but one are dead as single set insns. In common case
1567 only single set is present in the pattern so we want
1568 to avoid checking for REG_UNUSED notes unless necessary.
1569
1570 When we reach set first time, we just expect this is
1571 the single set we are looking for and only when more
1572 sets are found in the insn, we check them. */
1573 if (!set_verified)
1574 {
1575 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1576 && !side_effects_p (set))
1577 set = NULL;
1578 else
1579 set_verified = 1;
1580 }
1581 if (!set)
1582 set = sub, set_verified = 0;
1583 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1584 || side_effects_p (sub))
1585 return NULL_RTX;
1586 break;
1587
1588 default:
1589 return NULL_RTX;
1590 }
1591 }
1592 }
1593 return set;
1594}
1595
1596/* Given an INSN, return true if it has more than one SET, else return
1597 false. */
1598
1599bool
1600multiple_sets (const_rtx insn)
1601{
1602 bool found;
1603 int i;
1604
1605 /* INSN must be an insn. */
1606 if (! INSN_P (insn))
1607 return false;
1608
1609 /* Only a PARALLEL can have multiple SETs. */
1610 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1611 {
1612 for (i = 0, found = false; i < XVECLEN (PATTERN (insn), 0); i++)
1613 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1614 {
1615 /* If we have already found a SET, then return now. */
1616 if (found)
1617 return true;
1618 else
1619 found = true;
1620 }
1621 }
1622
1623 /* Either zero or one SET. */
1624 return false;
1625}
1626
1627/* Return true if the destination of SET equals the source
1628 and there are no side effects. */
1629
1630bool
1631set_noop_p (const_rtx set)
1632{
1633 rtx src = SET_SRC (set);
1634 rtx dst = SET_DEST (set);
1635
1636 if (dst == pc_rtx && src == pc_rtx)
1637 return true;
1638
1639 if (MEM_P (dst) && MEM_P (src))
1640 return (rtx_equal_p (dst, src)
1641 && !side_effects_p (dst)
1642 && !side_effects_p (src));
1643
1644 if (GET_CODE (dst) == ZERO_EXTRACT)
1645 return (rtx_equal_p (XEXP (dst, 0), src)
1646 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1647 && !side_effects_p (src)
1648 && !side_effects_p (XEXP (dst, 0)));
1649
1650 if (GET_CODE (dst) == STRICT_LOW_PART)
1651 dst = XEXP (dst, 0);
1652
1653 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1654 {
1655 if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1656 return false;
1657 src = SUBREG_REG (src);
1658 dst = SUBREG_REG (dst);
1659 if (GET_MODE (src) != GET_MODE (dst))
1660 /* It is hard to tell whether subregs refer to the same bits, so act
1661 conservatively and return false. */
1662 return false;
1663 }
1664
1665 /* It is a NOOP if destination overlaps with selected src vector
1666 elements. */
1667 if (GET_CODE (src) == VEC_SELECT
1668 && REG_P (XEXP (src, 0)) && REG_P (dst)
1669 && HARD_REGISTER_P (XEXP (src, 0))
1670 && HARD_REGISTER_P (dst))
1671 {
1672 int i;
1673 rtx par = XEXP (src, 1);
1674 rtx src0 = XEXP (src, 0);
1675 poly_int64 c0;
1676 if (!poly_int_rtx_p (XVECEXP (par, 0, 0), res: &c0))
1677 return false;
1678 poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1679
1680 for (i = 1; i < XVECLEN (par, 0); i++)
1681 {
1682 poly_int64 c0i;
1683 if (!poly_int_rtx_p (XVECEXP (par, 0, i), res: &c0i)
1684 || maybe_ne (a: c0i, b: c0 + i))
1685 return false;
1686 }
1687 return
1688 REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))
1689 && simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1690 offset, GET_MODE (dst)) == (int) REGNO (dst);
1691 }
1692
1693 return (REG_P (src) && REG_P (dst)
1694 && REGNO (src) == REGNO (dst));
1695}
1696
1697/* Return true if an insn consists only of SETs, each of which only sets a
1698 value to itself. */
1699
1700bool
1701noop_move_p (const rtx_insn *insn)
1702{
1703 rtx pat = PATTERN (insn);
1704
1705 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1706 return true;
1707
1708 /* Check the code to be executed for COND_EXEC. */
1709 if (GET_CODE (pat) == COND_EXEC)
1710 pat = COND_EXEC_CODE (pat);
1711
1712 if (GET_CODE (pat) == SET && set_noop_p (pat))
1713 return true;
1714
1715 if (GET_CODE (pat) == PARALLEL)
1716 {
1717 int i;
1718 /* If nothing but SETs of registers to themselves,
1719 this insn can also be deleted. */
1720 for (i = 0; i < XVECLEN (pat, 0); i++)
1721 {
1722 rtx tem = XVECEXP (pat, 0, i);
1723
1724 if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER)
1725 continue;
1726
1727 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1728 return false;
1729 }
1730
1731 return true;
1732 }
1733 return false;
1734}
1735
1736
1737/* Return true if register in range [REGNO, ENDREGNO)
1738 appears either explicitly or implicitly in X
1739 other than being stored into.
1740
1741 References contained within the substructure at LOC do not count.
1742 LOC may be zero, meaning don't ignore anything. */
1743
1744bool
1745refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1746 rtx *loc)
1747{
1748 int i;
1749 unsigned int x_regno;
1750 RTX_CODE code;
1751 const char *fmt;
1752
1753 repeat:
1754 /* The contents of a REG_NONNEG note is always zero, so we must come here
1755 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1756 if (x == 0)
1757 return false;
1758
1759 code = GET_CODE (x);
1760
1761 switch (code)
1762 {
1763 case REG:
1764 x_regno = REGNO (x);
1765
1766 /* If we modifying the stack, frame, or argument pointer, it will
1767 clobber a virtual register. In fact, we could be more precise,
1768 but it isn't worth it. */
1769 if ((x_regno == STACK_POINTER_REGNUM
1770 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1771 && x_regno == ARG_POINTER_REGNUM)
1772 || x_regno == FRAME_POINTER_REGNUM)
1773 && VIRTUAL_REGISTER_NUM_P (regno))
1774 return true;
1775
1776 return endregno > x_regno && regno < END_REGNO (x);
1777
1778 case SUBREG:
1779 /* If this is a SUBREG of a hard reg, we can see exactly which
1780 registers are being modified. Otherwise, handle normally. */
1781 if (REG_P (SUBREG_REG (x))
1782 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1783 {
1784 unsigned int inner_regno = subreg_regno (x);
1785 unsigned int inner_endregno
1786 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1787 ? subreg_nregs (x) : 1);
1788
1789 return endregno > inner_regno && regno < inner_endregno;
1790 }
1791 break;
1792
1793 case CLOBBER:
1794 case SET:
1795 if (&SET_DEST (x) != loc
1796 /* Note setting a SUBREG counts as referring to the REG it is in for
1797 a pseudo but not for hard registers since we can
1798 treat each word individually. */
1799 && ((GET_CODE (SET_DEST (x)) == SUBREG
1800 && loc != &SUBREG_REG (SET_DEST (x))
1801 && REG_P (SUBREG_REG (SET_DEST (x)))
1802 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1803 && refers_to_regno_p (regno, endregno,
1804 SUBREG_REG (SET_DEST (x)), loc))
1805 || (!REG_P (SET_DEST (x))
1806 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1807 return true;
1808
1809 if (code == CLOBBER || loc == &SET_SRC (x))
1810 return false;
1811 x = SET_SRC (x);
1812 goto repeat;
1813
1814 default:
1815 break;
1816 }
1817
1818 /* X does not match, so try its subexpressions. */
1819
1820 fmt = GET_RTX_FORMAT (code);
1821 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1822 {
1823 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1824 {
1825 if (i == 0)
1826 {
1827 x = XEXP (x, 0);
1828 goto repeat;
1829 }
1830 else
1831 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1832 return true;
1833 }
1834 else if (fmt[i] == 'E')
1835 {
1836 int j;
1837 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1838 if (loc != &XVECEXP (x, i, j)
1839 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1840 return true;
1841 }
1842 }
1843 return false;
1844}
1845
1846/* Rreturn true if modifying X will affect IN. If X is a register or a SUBREG,
1847 we check if any register number in X conflicts with the relevant register
1848 numbers. If X is a constant, return false. If X is a MEM, return true iff
1849 IN contains a MEM (we don't bother checking for memory addresses that can't
1850 conflict because we expect this to be a rare case. */
1851
1852bool
1853reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1854{
1855 unsigned int regno, endregno;
1856
1857 /* If either argument is a constant, then modifying X cannot
1858 affect IN. Here we look at IN, we can profitably combine
1859 CONSTANT_P (x) with the switch statement below. */
1860 if (CONSTANT_P (in))
1861 return false;
1862
1863 recurse:
1864 switch (GET_CODE (x))
1865 {
1866 case CLOBBER:
1867 case STRICT_LOW_PART:
1868 case ZERO_EXTRACT:
1869 case SIGN_EXTRACT:
1870 /* Overly conservative. */
1871 x = XEXP (x, 0);
1872 goto recurse;
1873
1874 case SUBREG:
1875 regno = REGNO (SUBREG_REG (x));
1876 if (regno < FIRST_PSEUDO_REGISTER)
1877 regno = subreg_regno (x);
1878 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1879 ? subreg_nregs (x) : 1);
1880 goto do_reg;
1881
1882 case REG:
1883 regno = REGNO (x);
1884 endregno = END_REGNO (x);
1885 do_reg:
1886 return refers_to_regno_p (regno, endregno, x: in, loc: (rtx*) 0);
1887
1888 case MEM:
1889 {
1890 const char *fmt;
1891 int i;
1892
1893 if (MEM_P (in))
1894 return true;
1895
1896 fmt = GET_RTX_FORMAT (GET_CODE (in));
1897 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1898 if (fmt[i] == 'e')
1899 {
1900 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1901 return true;
1902 }
1903 else if (fmt[i] == 'E')
1904 {
1905 int j;
1906 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1907 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1908 return true;
1909 }
1910
1911 return false;
1912 }
1913
1914 case SCRATCH:
1915 case PC:
1916 return reg_mentioned_p (reg: x, in);
1917
1918 case PARALLEL:
1919 {
1920 int i;
1921
1922 /* If any register in here refers to it we return true. */
1923 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1924 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1925 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1926 return true;
1927 return false;
1928 }
1929
1930 default:
1931 gcc_assert (CONSTANT_P (x));
1932 return false;
1933 }
1934}
1935
1936/* Call FUN on each register or MEM that is stored into or clobbered by X.
1937 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1938 ignored by note_stores, but passed to FUN.
1939
1940 FUN receives three arguments:
1941 1. the REG, MEM or PC being stored in or clobbered,
1942 2. the SET or CLOBBER rtx that does the store,
1943 3. the pointer DATA provided to note_stores.
1944
1945 If the item being stored in or clobbered is a SUBREG of a hard register,
1946 the SUBREG will be passed. */
1947
1948void
1949note_pattern_stores (const_rtx x,
1950 void (*fun) (rtx, const_rtx, void *), void *data)
1951{
1952 int i;
1953
1954 if (GET_CODE (x) == COND_EXEC)
1955 x = COND_EXEC_CODE (x);
1956
1957 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1958 {
1959 rtx dest = SET_DEST (x);
1960
1961 while ((GET_CODE (dest) == SUBREG
1962 && (!REG_P (SUBREG_REG (dest))
1963 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1964 || GET_CODE (dest) == ZERO_EXTRACT
1965 || GET_CODE (dest) == STRICT_LOW_PART)
1966 dest = XEXP (dest, 0);
1967
1968 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1969 each of whose first operand is a register. */
1970 if (GET_CODE (dest) == PARALLEL)
1971 {
1972 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1973 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1974 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1975 }
1976 else
1977 (*fun) (dest, x, data);
1978 }
1979
1980 else if (GET_CODE (x) == PARALLEL)
1981 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1982 note_pattern_stores (XVECEXP (x, 0, i), fun, data);
1983}
1984
1985/* Same, but for an instruction. If the instruction is a call, include
1986 any CLOBBERs in its CALL_INSN_FUNCTION_USAGE. */
1987
1988void
1989note_stores (const rtx_insn *insn,
1990 void (*fun) (rtx, const_rtx, void *), void *data)
1991{
1992 if (CALL_P (insn))
1993 for (rtx link = CALL_INSN_FUNCTION_USAGE (insn);
1994 link; link = XEXP (link, 1))
1995 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1996 note_pattern_stores (XEXP (link, 0), fun, data);
1997 note_pattern_stores (x: PATTERN (insn), fun, data);
1998}
1999
2000/* Like notes_stores, but call FUN for each expression that is being
2001 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
2002 FUN for each expression, not any interior subexpressions. FUN receives a
2003 pointer to the expression and the DATA passed to this function.
2004
2005 Note that this is not quite the same test as that done in reg_referenced_p
2006 since that considers something as being referenced if it is being
2007 partially set, while we do not. */
2008
2009void
2010note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
2011{
2012 rtx body = *pbody;
2013 int i;
2014
2015 switch (GET_CODE (body))
2016 {
2017 case COND_EXEC:
2018 (*fun) (&COND_EXEC_TEST (body), data);
2019 note_uses (pbody: &COND_EXEC_CODE (body), fun, data);
2020 return;
2021
2022 case PARALLEL:
2023 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2024 note_uses (pbody: &XVECEXP (body, 0, i), fun, data);
2025 return;
2026
2027 case SEQUENCE:
2028 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2029 note_uses (pbody: &PATTERN (XVECEXP (body, 0, i)), fun, data);
2030 return;
2031
2032 case USE:
2033 (*fun) (&XEXP (body, 0), data);
2034 return;
2035
2036 case ASM_OPERANDS:
2037 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
2038 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
2039 return;
2040
2041 case TRAP_IF:
2042 (*fun) (&TRAP_CONDITION (body), data);
2043 return;
2044
2045 case PREFETCH:
2046 (*fun) (&XEXP (body, 0), data);
2047 return;
2048
2049 case UNSPEC:
2050 case UNSPEC_VOLATILE:
2051 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2052 (*fun) (&XVECEXP (body, 0, i), data);
2053 return;
2054
2055 case CLOBBER:
2056 if (MEM_P (XEXP (body, 0)))
2057 (*fun) (&XEXP (XEXP (body, 0), 0), data);
2058 return;
2059
2060 case SET:
2061 {
2062 rtx dest = SET_DEST (body);
2063
2064 /* For sets we replace everything in source plus registers in memory
2065 expression in store and operands of a ZERO_EXTRACT. */
2066 (*fun) (&SET_SRC (body), data);
2067
2068 if (GET_CODE (dest) == ZERO_EXTRACT)
2069 {
2070 (*fun) (&XEXP (dest, 1), data);
2071 (*fun) (&XEXP (dest, 2), data);
2072 }
2073
2074 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
2075 dest = XEXP (dest, 0);
2076
2077 if (MEM_P (dest))
2078 (*fun) (&XEXP (dest, 0), data);
2079 }
2080 return;
2081
2082 default:
2083 /* All the other possibilities never store. */
2084 (*fun) (pbody, data);
2085 return;
2086 }
2087}
2088
2089/* Try to add a description of REG X to this object, stopping once
2090 the REF_END limit has been reached. FLAGS is a bitmask of
2091 rtx_obj_reference flags that describe the context. */
2092
2093void
2094rtx_properties::try_to_add_reg (const_rtx x, unsigned int flags)
2095{
2096 if (REG_NREGS (x) != 1)
2097 flags |= rtx_obj_flags::IS_MULTIREG;
2098 machine_mode mode = GET_MODE (x);
2099 unsigned int start_regno = REGNO (x);
2100 unsigned int end_regno = END_REGNO (x);
2101 for (unsigned int regno = start_regno; regno < end_regno; ++regno)
2102 if (ref_iter != ref_end)
2103 *ref_iter++ = rtx_obj_reference (regno, flags, mode,
2104 regno - start_regno);
2105}
2106
2107/* Add a description of destination X to this object. FLAGS is a bitmask
2108 of rtx_obj_reference flags that describe the context.
2109
2110 This routine accepts all rtxes that can legitimately appear in a
2111 SET_DEST. */
2112
2113void
2114rtx_properties::try_to_add_dest (const_rtx x, unsigned int flags)
2115{
2116 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
2117 each of whose first operand is a register. */
2118 if (UNLIKELY (GET_CODE (x) == PARALLEL))
2119 {
2120 for (int i = XVECLEN (x, 0) - 1; i >= 0; --i)
2121 if (rtx dest = XEXP (XVECEXP (x, 0, i), 0))
2122 try_to_add_dest (x: dest, flags);
2123 return;
2124 }
2125
2126 unsigned int base_flags = flags & rtx_obj_flags::STICKY_FLAGS;
2127 flags |= rtx_obj_flags::IS_WRITE;
2128 for (;;)
2129 if (GET_CODE (x) == ZERO_EXTRACT)
2130 {
2131 try_to_add_src (XEXP (x, 1), flags: base_flags);
2132 try_to_add_src (XEXP (x, 2), flags: base_flags);
2133 flags |= rtx_obj_flags::IS_READ;
2134 x = XEXP (x, 0);
2135 }
2136 else if (GET_CODE (x) == STRICT_LOW_PART)
2137 {
2138 flags |= rtx_obj_flags::IS_READ;
2139 x = XEXP (x, 0);
2140 }
2141 else if (GET_CODE (x) == SUBREG)
2142 {
2143 flags |= rtx_obj_flags::IN_SUBREG;
2144 if (read_modify_subreg_p (x))
2145 flags |= rtx_obj_flags::IS_READ;
2146 x = SUBREG_REG (x);
2147 }
2148 else
2149 break;
2150
2151 if (MEM_P (x))
2152 {
2153 if (ref_iter != ref_end)
2154 *ref_iter++ = rtx_obj_reference (MEM_REGNO, flags, GET_MODE (x));
2155
2156 unsigned int addr_flags = base_flags | rtx_obj_flags::IN_MEM_STORE;
2157 if (flags & rtx_obj_flags::IS_READ)
2158 addr_flags |= rtx_obj_flags::IN_MEM_LOAD;
2159 try_to_add_src (XEXP (x, 0), flags: addr_flags);
2160 return;
2161 }
2162
2163 if (LIKELY (REG_P (x)))
2164 {
2165 /* We want to keep sp alive everywhere - by making all
2166 writes to sp also use sp. */
2167 if (REGNO (x) == STACK_POINTER_REGNUM)
2168 flags |= rtx_obj_flags::IS_READ;
2169 try_to_add_reg (x, flags);
2170 return;
2171 }
2172}
2173
2174/* Try to add a description of source X to this object, stopping once
2175 the REF_END limit has been reached. FLAGS is a bitmask of
2176 rtx_obj_reference flags that describe the context.
2177
2178 This routine accepts all rtxes that can legitimately appear in a SET_SRC. */
2179
2180void
2181rtx_properties::try_to_add_src (const_rtx x, unsigned int flags)
2182{
2183 unsigned int base_flags = flags & rtx_obj_flags::STICKY_FLAGS;
2184 subrtx_iterator::array_type array;
2185 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
2186 {
2187 const_rtx x = *iter;
2188 rtx_code code = GET_CODE (x);
2189 if (code == REG)
2190 try_to_add_reg (x, flags: flags | rtx_obj_flags::IS_READ);
2191 else if (code == MEM)
2192 {
2193 if (MEM_VOLATILE_P (x))
2194 has_volatile_refs = true;
2195
2196 if (!MEM_READONLY_P (x) && ref_iter != ref_end)
2197 {
2198 auto mem_flags = flags | rtx_obj_flags::IS_READ;
2199 *ref_iter++ = rtx_obj_reference (MEM_REGNO, mem_flags,
2200 GET_MODE (x));
2201 }
2202
2203 try_to_add_src (XEXP (x, 0),
2204 flags: base_flags | rtx_obj_flags::IN_MEM_LOAD);
2205 iter.skip_subrtxes ();
2206 }
2207 else if (code == SUBREG)
2208 {
2209 try_to_add_src (SUBREG_REG (x), flags: flags | rtx_obj_flags::IN_SUBREG);
2210 iter.skip_subrtxes ();
2211 }
2212 else if (code == UNSPEC_VOLATILE)
2213 has_volatile_refs = true;
2214 else if (code == ASM_INPUT || code == ASM_OPERANDS)
2215 {
2216 has_asm = true;
2217 if (MEM_VOLATILE_P (x))
2218 has_volatile_refs = true;
2219 }
2220 else if (code == PRE_INC
2221 || code == PRE_DEC
2222 || code == POST_INC
2223 || code == POST_DEC
2224 || code == PRE_MODIFY
2225 || code == POST_MODIFY)
2226 {
2227 has_pre_post_modify = true;
2228
2229 unsigned int addr_flags = (base_flags
2230 | rtx_obj_flags::IS_PRE_POST_MODIFY
2231 | rtx_obj_flags::IS_READ);
2232 try_to_add_dest (XEXP (x, 0), flags: addr_flags);
2233 if (code == PRE_MODIFY || code == POST_MODIFY)
2234 iter.substitute (XEXP (XEXP (x, 1), 1));
2235 else
2236 iter.skip_subrtxes ();
2237 }
2238 else if (code == CALL)
2239 has_call = true;
2240 }
2241}
2242
2243/* Try to add a description of instruction pattern PAT to this object,
2244 stopping once the REF_END limit has been reached. */
2245
2246void
2247rtx_properties::try_to_add_pattern (const_rtx pat)
2248{
2249 switch (GET_CODE (pat))
2250 {
2251 case COND_EXEC:
2252 try_to_add_src (COND_EXEC_TEST (pat));
2253 try_to_add_pattern (COND_EXEC_CODE (pat));
2254 break;
2255
2256 case PARALLEL:
2257 {
2258 int last = XVECLEN (pat, 0) - 1;
2259 for (int i = 0; i < last; ++i)
2260 try_to_add_pattern (XVECEXP (pat, 0, i));
2261 try_to_add_pattern (XVECEXP (pat, 0, last));
2262 break;
2263 }
2264
2265 case ASM_OPERANDS:
2266 for (int i = 0, len = ASM_OPERANDS_INPUT_LENGTH (pat); i < len; ++i)
2267 try_to_add_src (ASM_OPERANDS_INPUT (pat, i));
2268 break;
2269
2270 case CLOBBER:
2271 try_to_add_dest (XEXP (pat, 0), flags: rtx_obj_flags::IS_CLOBBER);
2272 break;
2273
2274 case SET:
2275 try_to_add_dest (SET_DEST (pat));
2276 try_to_add_src (SET_SRC (pat));
2277 break;
2278
2279 default:
2280 /* All the other possibilities never store and can use a normal
2281 rtx walk. This includes:
2282
2283 - USE
2284 - TRAP_IF
2285 - PREFETCH
2286 - UNSPEC
2287 - UNSPEC_VOLATILE. */
2288 try_to_add_src (x: pat);
2289 break;
2290 }
2291}
2292
2293/* Try to add a description of INSN to this object, stopping once
2294 the REF_END limit has been reached. INCLUDE_NOTES is true if the
2295 description should include REG_EQUAL and REG_EQUIV notes; all such
2296 references will then be marked with rtx_obj_flags::IN_NOTE.
2297
2298 For calls, this description includes all accesses in
2299 CALL_INSN_FUNCTION_USAGE. It also include all implicit accesses
2300 to global registers by the target function. However, it does not
2301 include clobbers performed by the target function; callers that want
2302 this information should instead use the function_abi interface. */
2303
2304void
2305rtx_properties::try_to_add_insn (const rtx_insn *insn, bool include_notes)
2306{
2307 if (CALL_P (insn))
2308 {
2309 /* Non-const functions can read from global registers. Impure
2310 functions can also set them.
2311
2312 Adding the global registers first removes a situation in which
2313 a fixed-form clobber of register R could come before a real set
2314 of register R. */
2315 if (!hard_reg_set_empty_p (x: global_reg_set)
2316 && !RTL_CONST_CALL_P (insn))
2317 {
2318 unsigned int flags = rtx_obj_flags::IS_READ;
2319 if (!RTL_PURE_CALL_P (insn))
2320 flags |= rtx_obj_flags::IS_WRITE;
2321 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
2322 /* As a special case, the stack pointer is invariant across calls
2323 even if it has been marked global; see the corresponding
2324 handling in df_get_call_refs. */
2325 if (regno != STACK_POINTER_REGNUM
2326 && global_regs[regno]
2327 && ref_iter != ref_end)
2328 *ref_iter++ = rtx_obj_reference (regno, flags,
2329 reg_raw_mode[regno], 0);
2330 }
2331 /* Untyped calls implicitly set all function value registers.
2332 Again, we add them first in case the main pattern contains
2333 a fixed-form clobber. */
2334 if (find_reg_note (insn, REG_UNTYPED_CALL, NULL_RTX))
2335 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
2336 if (targetm.calls.function_value_regno_p (regno)
2337 && ref_iter != ref_end)
2338 *ref_iter++ = rtx_obj_reference (regno, rtx_obj_flags::IS_WRITE,
2339 reg_raw_mode[regno], 0);
2340 if (ref_iter != ref_end && !RTL_CONST_CALL_P (insn))
2341 {
2342 auto mem_flags = rtx_obj_flags::IS_READ;
2343 if (!RTL_PURE_CALL_P (insn))
2344 mem_flags |= rtx_obj_flags::IS_WRITE;
2345 *ref_iter++ = rtx_obj_reference (MEM_REGNO, mem_flags, BLKmode);
2346 }
2347 try_to_add_pattern (pat: PATTERN (insn));
2348 for (rtx link = CALL_INSN_FUNCTION_USAGE (insn); link;
2349 link = XEXP (link, 1))
2350 {
2351 rtx x = XEXP (link, 0);
2352 if (GET_CODE (x) == CLOBBER)
2353 try_to_add_dest (XEXP (x, 0), flags: rtx_obj_flags::IS_CLOBBER);
2354 else if (GET_CODE (x) == USE)
2355 try_to_add_src (XEXP (x, 0));
2356 }
2357 }
2358 else
2359 try_to_add_pattern (pat: PATTERN (insn));
2360
2361 if (include_notes)
2362 for (rtx note = REG_NOTES (insn); note; note = XEXP (note, 1))
2363 if (REG_NOTE_KIND (note) == REG_EQUAL
2364 || REG_NOTE_KIND (note) == REG_EQUIV)
2365 try_to_add_note (XEXP (note, 0));
2366}
2367
2368/* Grow the storage by a bit while keeping the contents of the first
2369 START elements. */
2370
2371void
2372vec_rtx_properties_base::grow (ptrdiff_t start)
2373{
2374 /* The same heuristic that vec uses. */
2375 ptrdiff_t new_elems = (ref_end - ref_begin) * 3 / 2;
2376 if (ref_begin == m_storage)
2377 {
2378 ref_begin = XNEWVEC (rtx_obj_reference, new_elems);
2379 if (start)
2380 memcpy (dest: ref_begin, src: m_storage, n: start * sizeof (rtx_obj_reference));
2381 }
2382 else
2383 ref_begin = reinterpret_cast<rtx_obj_reference *>
2384 (xrealloc (ref_begin, new_elems * sizeof (rtx_obj_reference)));
2385 ref_iter = ref_begin + start;
2386 ref_end = ref_begin + new_elems;
2387}
2388
2389/* Return true if X's old contents don't survive after INSN.
2390 This will be true if X is a register and X dies in INSN or because
2391 INSN entirely sets X.
2392
2393 "Entirely set" means set directly and not through a SUBREG, or
2394 ZERO_EXTRACT, so no trace of the old contents remains.
2395 Likewise, REG_INC does not count.
2396
2397 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2398 but for this use that makes no difference, since regs don't overlap
2399 during their lifetimes. Therefore, this function may be used
2400 at any time after deaths have been computed.
2401
2402 If REG is a hard reg that occupies multiple machine registers, this
2403 function will only return true if each of those registers will be replaced
2404 by INSN. */
2405
2406bool
2407dead_or_set_p (const rtx_insn *insn, const_rtx x)
2408{
2409 unsigned int regno, end_regno;
2410 unsigned int i;
2411
2412 gcc_assert (REG_P (x));
2413
2414 regno = REGNO (x);
2415 end_regno = END_REGNO (x);
2416 for (i = regno; i < end_regno; i++)
2417 if (! dead_or_set_regno_p (insn, i))
2418 return false;
2419
2420 return true;
2421}
2422
2423/* Return TRUE iff DEST is a register or subreg of a register, is a
2424 complete rather than read-modify-write destination, and contains
2425 register TEST_REGNO. */
2426
2427static bool
2428covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2429{
2430 unsigned int regno, endregno;
2431
2432 if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (x: dest))
2433 dest = SUBREG_REG (dest);
2434
2435 if (!REG_P (dest))
2436 return false;
2437
2438 regno = REGNO (dest);
2439 endregno = END_REGNO (x: dest);
2440 return (test_regno >= regno && test_regno < endregno);
2441}
2442
2443/* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2444 any member matches the covers_regno_no_parallel_p criteria. */
2445
2446static bool
2447covers_regno_p (const_rtx dest, unsigned int test_regno)
2448{
2449 if (GET_CODE (dest) == PARALLEL)
2450 {
2451 /* Some targets place small structures in registers for return
2452 values of functions, and those registers are wrapped in
2453 PARALLELs that we may see as the destination of a SET. */
2454 int i;
2455
2456 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2457 {
2458 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2459 if (inner != NULL_RTX
2460 && covers_regno_no_parallel_p (dest: inner, test_regno))
2461 return true;
2462 }
2463
2464 return false;
2465 }
2466 else
2467 return covers_regno_no_parallel_p (dest, test_regno);
2468}
2469
2470/* Utility function for dead_or_set_p to check an individual register. */
2471
2472bool
2473dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2474{
2475 const_rtx pattern;
2476
2477 /* See if there is a death note for something that includes TEST_REGNO. */
2478 if (find_regno_note (insn, REG_DEAD, test_regno))
2479 return true;
2480
2481 if (CALL_P (insn)
2482 && find_regno_fusage (insn, CLOBBER, test_regno))
2483 return true;
2484
2485 pattern = PATTERN (insn);
2486
2487 /* If a COND_EXEC is not executed, the value survives. */
2488 if (GET_CODE (pattern) == COND_EXEC)
2489 return false;
2490
2491 if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2492 return covers_regno_p (SET_DEST (pattern), test_regno);
2493 else if (GET_CODE (pattern) == PARALLEL)
2494 {
2495 int i;
2496
2497 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2498 {
2499 rtx body = XVECEXP (pattern, 0, i);
2500
2501 if (GET_CODE (body) == COND_EXEC)
2502 body = COND_EXEC_CODE (body);
2503
2504 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2505 && covers_regno_p (SET_DEST (body), test_regno))
2506 return true;
2507 }
2508 }
2509
2510 return false;
2511}
2512
2513/* Return the reg-note of kind KIND in insn INSN, if there is one.
2514 If DATUM is nonzero, look for one whose datum is DATUM. */
2515
2516rtx
2517find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2518{
2519 rtx link;
2520
2521 gcc_checking_assert (insn);
2522
2523 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2524 if (! INSN_P (insn))
2525 return 0;
2526 if (datum == 0)
2527 {
2528 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2529 if (REG_NOTE_KIND (link) == kind)
2530 return link;
2531 return 0;
2532 }
2533
2534 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2535 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2536 return link;
2537 return 0;
2538}
2539
2540/* Return the reg-note of kind KIND in insn INSN which applies to register
2541 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2542 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2543 it might be the case that the note overlaps REGNO. */
2544
2545rtx
2546find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2547{
2548 rtx link;
2549
2550 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2551 if (! INSN_P (insn))
2552 return 0;
2553
2554 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2555 if (REG_NOTE_KIND (link) == kind
2556 /* Verify that it is a register, so that scratch and MEM won't cause a
2557 problem here. */
2558 && REG_P (XEXP (link, 0))
2559 && REGNO (XEXP (link, 0)) <= regno
2560 && END_REGNO (XEXP (link, 0)) > regno)
2561 return link;
2562 return 0;
2563}
2564
2565/* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2566 has such a note. */
2567
2568rtx
2569find_reg_equal_equiv_note (const_rtx insn)
2570{
2571 rtx link;
2572
2573 if (!INSN_P (insn))
2574 return 0;
2575
2576 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2577 if (REG_NOTE_KIND (link) == REG_EQUAL
2578 || REG_NOTE_KIND (link) == REG_EQUIV)
2579 {
2580 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2581 insns that have multiple sets. Checking single_set to
2582 make sure of this is not the proper check, as explained
2583 in the comment in set_unique_reg_note.
2584
2585 This should be changed into an assert. */
2586 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2587 return 0;
2588 return link;
2589 }
2590 return NULL;
2591}
2592
2593/* Check whether INSN is a single_set whose source is known to be
2594 equivalent to a constant. Return that constant if so, otherwise
2595 return null. */
2596
2597rtx
2598find_constant_src (const rtx_insn *insn)
2599{
2600 rtx note, set, x;
2601
2602 set = single_set (insn);
2603 if (set)
2604 {
2605 x = avoid_constant_pool_reference (SET_SRC (set));
2606 if (CONSTANT_P (x))
2607 return x;
2608 }
2609
2610 note = find_reg_equal_equiv_note (insn);
2611 if (note && CONSTANT_P (XEXP (note, 0)))
2612 return XEXP (note, 0);
2613
2614 return NULL_RTX;
2615}
2616
2617/* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2618 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2619
2620bool
2621find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2622{
2623 /* If it's not a CALL_INSN, it can't possibly have a
2624 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2625 if (!CALL_P (insn))
2626 return false;
2627
2628 gcc_assert (datum);
2629
2630 if (!REG_P (datum))
2631 {
2632 rtx link;
2633
2634 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2635 link;
2636 link = XEXP (link, 1))
2637 if (GET_CODE (XEXP (link, 0)) == code
2638 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2639 return true;
2640 }
2641 else
2642 {
2643 unsigned int regno = REGNO (datum);
2644
2645 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2646 to pseudo registers, so don't bother checking. */
2647
2648 if (regno < FIRST_PSEUDO_REGISTER)
2649 {
2650 unsigned int end_regno = END_REGNO (x: datum);
2651 unsigned int i;
2652
2653 for (i = regno; i < end_regno; i++)
2654 if (find_regno_fusage (insn, code, i))
2655 return true;
2656 }
2657 }
2658
2659 return false;
2660}
2661
2662/* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2663 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2664
2665bool
2666find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2667{
2668 rtx link;
2669
2670 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2671 to pseudo registers, so don't bother checking. */
2672
2673 if (regno >= FIRST_PSEUDO_REGISTER
2674 || !CALL_P (insn) )
2675 return false;
2676
2677 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2678 {
2679 rtx op, reg;
2680
2681 if (GET_CODE (op = XEXP (link, 0)) == code
2682 && REG_P (reg = XEXP (op, 0))
2683 && REGNO (reg) <= regno
2684 && END_REGNO (x: reg) > regno)
2685 return true;
2686 }
2687
2688 return false;
2689}
2690
2691
2692/* Return true if KIND is an integer REG_NOTE. */
2693
2694static bool
2695int_reg_note_p (enum reg_note kind)
2696{
2697 return kind == REG_BR_PROB;
2698}
2699
2700/* Allocate a register note with kind KIND and datum DATUM. LIST is
2701 stored as the pointer to the next register note. */
2702
2703rtx
2704alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2705{
2706 rtx note;
2707
2708 gcc_checking_assert (!int_reg_note_p (kind));
2709 switch (kind)
2710 {
2711 case REG_LABEL_TARGET:
2712 case REG_LABEL_OPERAND:
2713 case REG_TM:
2714 /* These types of register notes use an INSN_LIST rather than an
2715 EXPR_LIST, so that copying is done right and dumps look
2716 better. */
2717 note = alloc_INSN_LIST (datum, list);
2718 PUT_REG_NOTE_KIND (note, kind);
2719 break;
2720
2721 default:
2722 note = alloc_EXPR_LIST (kind, datum, list);
2723 break;
2724 }
2725
2726 return note;
2727}
2728
2729/* Add register note with kind KIND and datum DATUM to INSN. */
2730
2731void
2732add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2733{
2734 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2735}
2736
2737/* Add an integer register note with kind KIND and datum DATUM to INSN. */
2738
2739void
2740add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2741{
2742 gcc_checking_assert (int_reg_note_p (kind));
2743 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2744 datum, REG_NOTES (insn));
2745}
2746
2747/* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2748
2749void
2750add_args_size_note (rtx_insn *insn, poly_int64 value)
2751{
2752 gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2753 add_reg_note (insn, kind: REG_ARGS_SIZE, datum: gen_int_mode (value, Pmode));
2754}
2755
2756/* Add a register note like NOTE to INSN. */
2757
2758void
2759add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2760{
2761 if (GET_CODE (note) == INT_LIST)
2762 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2763 else
2764 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2765}
2766
2767/* Duplicate NOTE and return the copy. */
2768rtx
2769duplicate_reg_note (rtx note)
2770{
2771 reg_note kind = REG_NOTE_KIND (note);
2772
2773 if (GET_CODE (note) == INT_LIST)
2774 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2775 else if (GET_CODE (note) == EXPR_LIST)
2776 return alloc_reg_note (kind, datum: copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2777 else
2778 return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2779}
2780
2781/* Remove register note NOTE from the REG_NOTES of INSN. */
2782
2783void
2784remove_note (rtx_insn *insn, const_rtx note)
2785{
2786 rtx link;
2787
2788 if (note == NULL_RTX)
2789 return;
2790
2791 if (REG_NOTES (insn) == note)
2792 REG_NOTES (insn) = XEXP (note, 1);
2793 else
2794 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2795 if (XEXP (link, 1) == note)
2796 {
2797 XEXP (link, 1) = XEXP (note, 1);
2798 break;
2799 }
2800
2801 switch (REG_NOTE_KIND (note))
2802 {
2803 case REG_EQUAL:
2804 case REG_EQUIV:
2805 df_notes_rescan (insn);
2806 break;
2807 default:
2808 break;
2809 }
2810}
2811
2812/* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2813 If NO_RESCAN is false and any notes were removed, call
2814 df_notes_rescan. Return true if any note has been removed. */
2815
2816bool
2817remove_reg_equal_equiv_notes (rtx_insn *insn, bool no_rescan)
2818{
2819 rtx *loc;
2820 bool ret = false;
2821
2822 loc = &REG_NOTES (insn);
2823 while (*loc)
2824 {
2825 enum reg_note kind = REG_NOTE_KIND (*loc);
2826 if (kind == REG_EQUAL || kind == REG_EQUIV)
2827 {
2828 *loc = XEXP (*loc, 1);
2829 ret = true;
2830 }
2831 else
2832 loc = &XEXP (*loc, 1);
2833 }
2834 if (ret && !no_rescan)
2835 df_notes_rescan (insn);
2836 return ret;
2837}
2838
2839/* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2840
2841void
2842remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2843{
2844 df_ref eq_use;
2845
2846 if (!df)
2847 return;
2848
2849 /* This loop is a little tricky. We cannot just go down the chain because
2850 it is being modified by some actions in the loop. So we just iterate
2851 over the head. We plan to drain the list anyway. */
2852 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2853 {
2854 rtx_insn *insn = DF_REF_INSN (eq_use);
2855 rtx note = find_reg_equal_equiv_note (insn);
2856
2857 /* This assert is generally triggered when someone deletes a REG_EQUAL
2858 or REG_EQUIV note by hacking the list manually rather than calling
2859 remove_note. */
2860 gcc_assert (note);
2861
2862 remove_note (insn, note);
2863 }
2864}
2865
2866/* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2867 return 1 if it is found. A simple equality test is used to determine if
2868 NODE matches. */
2869
2870bool
2871in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2872{
2873 const_rtx x;
2874
2875 for (x = listp; x; x = XEXP (x, 1))
2876 if (node == XEXP (x, 0))
2877 return true;
2878
2879 return false;
2880}
2881
2882/* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2883 remove that entry from the list if it is found.
2884
2885 A simple equality test is used to determine if NODE matches. */
2886
2887void
2888remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2889{
2890 rtx_insn_list *temp = *listp;
2891 rtx_insn_list *prev = NULL;
2892
2893 while (temp)
2894 {
2895 if (node == temp->insn ())
2896 {
2897 /* Splice the node out of the list. */
2898 if (prev)
2899 XEXP (prev, 1) = temp->next ();
2900 else
2901 *listp = temp->next ();
2902
2903 gcc_checking_assert (!in_insn_list_p (temp->next (), node));
2904 return;
2905 }
2906
2907 prev = temp;
2908 temp = temp->next ();
2909 }
2910}
2911
2912/* Return true if X contains any volatile instructions. These are instructions
2913 which may cause unpredictable machine state instructions, and thus no
2914 instructions or register uses should be moved or combined across them.
2915 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2916
2917bool
2918volatile_insn_p (const_rtx x)
2919{
2920 const RTX_CODE code = GET_CODE (x);
2921 switch (code)
2922 {
2923 case LABEL_REF:
2924 case SYMBOL_REF:
2925 case CONST:
2926 CASE_CONST_ANY:
2927 case PC:
2928 case REG:
2929 case SCRATCH:
2930 case CLOBBER:
2931 case ADDR_VEC:
2932 case ADDR_DIFF_VEC:
2933 case CALL:
2934 case MEM:
2935 return false;
2936
2937 case UNSPEC_VOLATILE:
2938 return true;
2939
2940 case ASM_INPUT:
2941 case ASM_OPERANDS:
2942 if (MEM_VOLATILE_P (x))
2943 return true;
2944
2945 default:
2946 break;
2947 }
2948
2949 /* Recursively scan the operands of this expression. */
2950
2951 {
2952 const char *const fmt = GET_RTX_FORMAT (code);
2953 int i;
2954
2955 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2956 {
2957 if (fmt[i] == 'e')
2958 {
2959 if (volatile_insn_p (XEXP (x, i)))
2960 return true;
2961 }
2962 else if (fmt[i] == 'E')
2963 {
2964 int j;
2965 for (j = 0; j < XVECLEN (x, i); j++)
2966 if (volatile_insn_p (XVECEXP (x, i, j)))
2967 return true;
2968 }
2969 }
2970 }
2971 return false;
2972}
2973
2974/* Return true if X contains any volatile memory references
2975 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2976
2977bool
2978volatile_refs_p (const_rtx x)
2979{
2980 const RTX_CODE code = GET_CODE (x);
2981 switch (code)
2982 {
2983 case LABEL_REF:
2984 case SYMBOL_REF:
2985 case CONST:
2986 CASE_CONST_ANY:
2987 case PC:
2988 case REG:
2989 case SCRATCH:
2990 case CLOBBER:
2991 case ADDR_VEC:
2992 case ADDR_DIFF_VEC:
2993 return false;
2994
2995 case UNSPEC_VOLATILE:
2996 return true;
2997
2998 case MEM:
2999 case ASM_INPUT:
3000 case ASM_OPERANDS:
3001 if (MEM_VOLATILE_P (x))
3002 return true;
3003
3004 default:
3005 break;
3006 }
3007
3008 /* Recursively scan the operands of this expression. */
3009
3010 {
3011 const char *const fmt = GET_RTX_FORMAT (code);
3012 int i;
3013
3014 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3015 {
3016 if (fmt[i] == 'e')
3017 {
3018 if (volatile_refs_p (XEXP (x, i)))
3019 return true;
3020 }
3021 else if (fmt[i] == 'E')
3022 {
3023 int j;
3024 for (j = 0; j < XVECLEN (x, i); j++)
3025 if (volatile_refs_p (XVECEXP (x, i, j)))
3026 return true;
3027 }
3028 }
3029 }
3030 return false;
3031}
3032
3033/* Similar to above, except that it also rejects register pre- and post-
3034 incrementing. */
3035
3036bool
3037side_effects_p (const_rtx x)
3038{
3039 const RTX_CODE code = GET_CODE (x);
3040 switch (code)
3041 {
3042 case LABEL_REF:
3043 case SYMBOL_REF:
3044 case CONST:
3045 CASE_CONST_ANY:
3046 case PC:
3047 case REG:
3048 case SCRATCH:
3049 case ADDR_VEC:
3050 case ADDR_DIFF_VEC:
3051 case VAR_LOCATION:
3052 return false;
3053
3054 case CLOBBER:
3055 /* Reject CLOBBER with a non-VOID mode. These are made by combine.cc
3056 when some combination can't be done. If we see one, don't think
3057 that we can simplify the expression. */
3058 return (GET_MODE (x) != VOIDmode);
3059
3060 case PRE_INC:
3061 case PRE_DEC:
3062 case POST_INC:
3063 case POST_DEC:
3064 case PRE_MODIFY:
3065 case POST_MODIFY:
3066 case CALL:
3067 case UNSPEC_VOLATILE:
3068 return true;
3069
3070 case MEM:
3071 case ASM_INPUT:
3072 case ASM_OPERANDS:
3073 if (MEM_VOLATILE_P (x))
3074 return true;
3075
3076 default:
3077 break;
3078 }
3079
3080 /* Recursively scan the operands of this expression. */
3081
3082 {
3083 const char *fmt = GET_RTX_FORMAT (code);
3084 int i;
3085
3086 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3087 {
3088 if (fmt[i] == 'e')
3089 {
3090 if (side_effects_p (XEXP (x, i)))
3091 return true;
3092 }
3093 else if (fmt[i] == 'E')
3094 {
3095 int j;
3096 for (j = 0; j < XVECLEN (x, i); j++)
3097 if (side_effects_p (XVECEXP (x, i, j)))
3098 return true;
3099 }
3100 }
3101 }
3102 return false;
3103}
3104
3105/* Return true if evaluating rtx X might cause a trap.
3106 FLAGS controls how to consider MEMs. A true means the context
3107 of the access may have changed from the original, such that the
3108 address may have become invalid. */
3109
3110bool
3111may_trap_p_1 (const_rtx x, unsigned flags)
3112{
3113 int i;
3114 enum rtx_code code;
3115 const char *fmt;
3116
3117 /* We make no distinction currently, but this function is part of
3118 the internal target-hooks ABI so we keep the parameter as
3119 "unsigned flags". */
3120 bool code_changed = flags != 0;
3121
3122 if (x == 0)
3123 return false;
3124 code = GET_CODE (x);
3125 switch (code)
3126 {
3127 /* Handle these cases quickly. */
3128 CASE_CONST_ANY:
3129 case SYMBOL_REF:
3130 case LABEL_REF:
3131 case CONST:
3132 case PC:
3133 case REG:
3134 case SCRATCH:
3135 return false;
3136
3137 case UNSPEC:
3138 return targetm.unspec_may_trap_p (x, flags);
3139
3140 case UNSPEC_VOLATILE:
3141 case ASM_INPUT:
3142 case TRAP_IF:
3143 return true;
3144
3145 case ASM_OPERANDS:
3146 return MEM_VOLATILE_P (x);
3147
3148 /* Memory ref can trap unless it's a static var or a stack slot. */
3149 case MEM:
3150 /* Recognize specific pattern of stack checking probes. */
3151 if (flag_stack_check
3152 && MEM_VOLATILE_P (x)
3153 && XEXP (x, 0) == stack_pointer_rtx)
3154 return true;
3155 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
3156 reference; moving it out of context such as when moving code
3157 when optimizing, might cause its address to become invalid. */
3158 code_changed
3159 || !MEM_NOTRAP_P (x))
3160 {
3161 poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
3162 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset: 0, size,
3163 GET_MODE (x), unaligned_mems: code_changed);
3164 }
3165
3166 return false;
3167
3168 /* Division by a non-constant might trap. */
3169 case DIV:
3170 case MOD:
3171 case UDIV:
3172 case UMOD:
3173 if (HONOR_SNANS (x))
3174 return true;
3175 if (FLOAT_MODE_P (GET_MODE (x)))
3176 return flag_trapping_math;
3177 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
3178 return true;
3179 if (GET_CODE (XEXP (x, 1)) == CONST_VECTOR)
3180 {
3181 /* For CONST_VECTOR, return 1 if any element is or might be zero. */
3182 unsigned int n_elts;
3183 rtx op = XEXP (x, 1);
3184 if (!GET_MODE_NUNITS (GET_MODE (op)).is_constant (const_value: &n_elts))
3185 {
3186 if (!CONST_VECTOR_DUPLICATE_P (op))
3187 return true;
3188 for (unsigned i = 0; i < (unsigned int) XVECLEN (op, 0); i++)
3189 if (CONST_VECTOR_ENCODED_ELT (op, i) == const0_rtx)
3190 return true;
3191 }
3192 else
3193 for (unsigned i = 0; i < n_elts; i++)
3194 if (CONST_VECTOR_ELT (op, i) == const0_rtx)
3195 return true;
3196 }
3197 break;
3198
3199 case EXPR_LIST:
3200 /* An EXPR_LIST is used to represent a function call. This
3201 certainly may trap. */
3202 return true;
3203
3204 case GE:
3205 case GT:
3206 case LE:
3207 case LT:
3208 case LTGT:
3209 case COMPARE:
3210 /* Treat min/max similar as comparisons. */
3211 case SMIN:
3212 case SMAX:
3213 /* Some floating point comparisons may trap. */
3214 if (!flag_trapping_math)
3215 break;
3216 /* ??? There is no machine independent way to check for tests that trap
3217 when COMPARE is used, though many targets do make this distinction.
3218 For instance, sparc uses CCFPE for compares which generate exceptions
3219 and CCFP for compares which do not generate exceptions. */
3220 if (HONOR_NANS (x))
3221 return true;
3222 /* But often the compare has some CC mode, so check operand
3223 modes as well. */
3224 if (HONOR_NANS (XEXP (x, 0))
3225 || HONOR_NANS (XEXP (x, 1)))
3226 return true;
3227 break;
3228
3229 case EQ:
3230 case NE:
3231 if (HONOR_SNANS (x))
3232 return true;
3233 /* Often comparison is CC mode, so check operand modes. */
3234 if (HONOR_SNANS (XEXP (x, 0))
3235 || HONOR_SNANS (XEXP (x, 1)))
3236 return true;
3237 break;
3238
3239 case FIX:
3240 case UNSIGNED_FIX:
3241 /* Conversion of floating point might trap. */
3242 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
3243 return true;
3244 break;
3245
3246 case NEG:
3247 case ABS:
3248 case SUBREG:
3249 case VEC_MERGE:
3250 case VEC_SELECT:
3251 case VEC_CONCAT:
3252 case VEC_DUPLICATE:
3253 /* These operations don't trap even with floating point. */
3254 break;
3255
3256 default:
3257 /* Any floating arithmetic may trap. */
3258 if (FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
3259 return true;
3260 }
3261
3262 fmt = GET_RTX_FORMAT (code);
3263 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3264 {
3265 if (fmt[i] == 'e')
3266 {
3267 if (may_trap_p_1 (XEXP (x, i), flags))
3268 return true;
3269 }
3270 else if (fmt[i] == 'E')
3271 {
3272 int j;
3273 for (j = 0; j < XVECLEN (x, i); j++)
3274 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
3275 return true;
3276 }
3277 }
3278 return false;
3279}
3280
3281/* Return true if evaluating rtx X might cause a trap. */
3282
3283bool
3284may_trap_p (const_rtx x)
3285{
3286 return may_trap_p_1 (x, flags: 0);
3287}
3288
3289/* Same as above, but additionally return true if evaluating rtx X might
3290 cause a fault. We define a fault for the purpose of this function as a
3291 erroneous execution condition that cannot be encountered during the normal
3292 execution of a valid program; the typical example is an unaligned memory
3293 access on a strict alignment machine. The compiler guarantees that it
3294 doesn't generate code that will fault from a valid program, but this
3295 guarantee doesn't mean anything for individual instructions. Consider
3296 the following example:
3297
3298 struct S { int d; union { char *cp; int *ip; }; };
3299
3300 int foo(struct S *s)
3301 {
3302 if (s->d == 1)
3303 return *s->ip;
3304 else
3305 return *s->cp;
3306 }
3307
3308 on a strict alignment machine. In a valid program, foo will never be
3309 invoked on a structure for which d is equal to 1 and the underlying
3310 unique field of the union not aligned on a 4-byte boundary, but the
3311 expression *s->ip might cause a fault if considered individually.
3312
3313 At the RTL level, potentially problematic expressions will almost always
3314 verify may_trap_p; for example, the above dereference can be emitted as
3315 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
3316 However, suppose that foo is inlined in a caller that causes s->cp to
3317 point to a local character variable and guarantees that s->d is not set
3318 to 1; foo may have been effectively translated into pseudo-RTL as:
3319
3320 if ((reg:SI) == 1)
3321 (set (reg:SI) (mem:SI (%fp - 7)))
3322 else
3323 (set (reg:QI) (mem:QI (%fp - 7)))
3324
3325 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3326 memory reference to a stack slot, but it will certainly cause a fault
3327 on a strict alignment machine. */
3328
3329bool
3330may_trap_or_fault_p (const_rtx x)
3331{
3332 return may_trap_p_1 (x, flags: 1);
3333}
3334
3335/* Replace any occurrence of FROM in X with TO. The function does
3336 not enter into CONST_DOUBLE for the replace.
3337
3338 Note that copying is not done so X must not be shared unless all copies
3339 are to be modified.
3340
3341 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3342 those pointer-equal ones. */
3343
3344rtx
3345replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3346{
3347 int i, j;
3348 const char *fmt;
3349
3350 if (x == from)
3351 return to;
3352
3353 /* Allow this function to make replacements in EXPR_LISTs. */
3354 if (x == 0)
3355 return 0;
3356
3357 if (all_regs
3358 && REG_P (x)
3359 && REG_P (from)
3360 && REGNO (x) == REGNO (from))
3361 {
3362 gcc_assert (GET_MODE (x) == GET_MODE (from));
3363 return to;
3364 }
3365 else if (GET_CODE (x) == SUBREG)
3366 {
3367 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3368
3369 if (CONST_SCALAR_INT_P (new_rtx))
3370 {
3371 x = simplify_subreg (GET_MODE (x), op: new_rtx,
3372 GET_MODE (SUBREG_REG (x)),
3373 SUBREG_BYTE (x));
3374 gcc_assert (x);
3375 }
3376 else
3377 SUBREG_REG (x) = new_rtx;
3378
3379 return x;
3380 }
3381 else if (GET_CODE (x) == ZERO_EXTEND)
3382 {
3383 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3384
3385 if (CONST_SCALAR_INT_P (new_rtx))
3386 {
3387 x = simplify_unary_operation (code: ZERO_EXTEND, GET_MODE (x),
3388 op: new_rtx, GET_MODE (XEXP (x, 0)));
3389 gcc_assert (x);
3390 }
3391 else
3392 XEXP (x, 0) = new_rtx;
3393
3394 return x;
3395 }
3396
3397 fmt = GET_RTX_FORMAT (GET_CODE (x));
3398 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3399 {
3400 if (fmt[i] == 'e')
3401 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3402 else if (fmt[i] == 'E')
3403 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3404 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3405 from, to, all_regs);
3406 }
3407
3408 return x;
3409}
3410
3411/* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3412 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3413
3414void
3415replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3416{
3417 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3418 rtx x = *loc;
3419 if (JUMP_TABLE_DATA_P (x))
3420 {
3421 x = PATTERN (insn: x);
3422 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3423 int len = GET_NUM_ELEM (vec);
3424 for (int i = 0; i < len; ++i)
3425 {
3426 rtx ref = RTVEC_ELT (vec, i);
3427 if (XEXP (ref, 0) == old_label)
3428 {
3429 XEXP (ref, 0) = new_label;
3430 if (update_label_nuses)
3431 {
3432 ++LABEL_NUSES (new_label);
3433 --LABEL_NUSES (old_label);
3434 }
3435 }
3436 }
3437 return;
3438 }
3439
3440 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3441 field. This is not handled by the iterator because it doesn't
3442 handle unprinted ('0') fields. */
3443 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3444 JUMP_LABEL (x) = new_label;
3445
3446 subrtx_ptr_iterator::array_type array;
3447 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3448 {
3449 rtx *loc = *iter;
3450 if (rtx x = *loc)
3451 {
3452 if (GET_CODE (x) == SYMBOL_REF
3453 && CONSTANT_POOL_ADDRESS_P (x))
3454 {
3455 rtx c = get_pool_constant (x);
3456 if (rtx_referenced_p (old_label, c))
3457 {
3458 /* Create a copy of constant C; replace the label inside
3459 but do not update LABEL_NUSES because uses in constant pool
3460 are not counted. */
3461 rtx new_c = copy_rtx (c);
3462 replace_label (loc: &new_c, old_label, new_label, update_label_nuses: false);
3463
3464 /* Add the new constant NEW_C to constant pool and replace
3465 the old reference to constant by new reference. */
3466 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3467 *loc = replace_rtx (x, from: x, XEXP (new_mem, 0));
3468 }
3469 }
3470
3471 if ((GET_CODE (x) == LABEL_REF
3472 || GET_CODE (x) == INSN_LIST)
3473 && XEXP (x, 0) == old_label)
3474 {
3475 XEXP (x, 0) = new_label;
3476 if (update_label_nuses)
3477 {
3478 ++LABEL_NUSES (new_label);
3479 --LABEL_NUSES (old_label);
3480 }
3481 }
3482 }
3483 }
3484}
3485
3486void
3487replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3488 rtx_insn *new_label, bool update_label_nuses)
3489{
3490 rtx insn_as_rtx = insn;
3491 replace_label (loc: &insn_as_rtx, old_label, new_label, update_label_nuses);
3492 gcc_checking_assert (insn_as_rtx == insn);
3493}
3494
3495/* Return true if X is referenced in BODY. */
3496
3497bool
3498rtx_referenced_p (const_rtx x, const_rtx body)
3499{
3500 subrtx_iterator::array_type array;
3501 FOR_EACH_SUBRTX (iter, array, body, ALL)
3502 if (const_rtx y = *iter)
3503 {
3504 /* Check if a label_ref Y refers to label X. */
3505 if (GET_CODE (y) == LABEL_REF
3506 && LABEL_P (x)
3507 && label_ref_label (ref: y) == x)
3508 return true;
3509
3510 if (rtx_equal_p (x, y))
3511 return true;
3512
3513 /* If Y is a reference to pool constant traverse the constant. */
3514 if (GET_CODE (y) == SYMBOL_REF
3515 && CONSTANT_POOL_ADDRESS_P (y))
3516 iter.substitute (x: get_pool_constant (y));
3517 }
3518 return false;
3519}
3520
3521/* If INSN is a tablejump return true and store the label (before jump table) to
3522 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3523
3524bool
3525tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3526 rtx_jump_table_data **tablep)
3527{
3528 if (!JUMP_P (insn))
3529 return false;
3530
3531 rtx target = JUMP_LABEL (insn);
3532 if (target == NULL_RTX || ANY_RETURN_P (target))
3533 return false;
3534
3535 rtx_insn *label = as_a<rtx_insn *> (p: target);
3536 rtx_insn *table = next_insn (label);
3537 if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3538 return false;
3539
3540 if (labelp)
3541 *labelp = label;
3542 if (tablep)
3543 *tablep = as_a <rtx_jump_table_data *> (p: table);
3544 return true;
3545}
3546
3547/* For INSN known to satisfy tablejump_p, determine if it actually is a
3548 CASESI. Return the insn pattern if so, NULL_RTX otherwise. */
3549
3550rtx
3551tablejump_casesi_pattern (const rtx_insn *insn)
3552{
3553 rtx tmp;
3554
3555 if ((tmp = single_set (insn)) != NULL
3556 && SET_DEST (tmp) == pc_rtx
3557 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3558 && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF)
3559 return tmp;
3560
3561 return NULL_RTX;
3562}
3563
3564/* A subroutine of computed_jump_p, return true if X contains a REG or MEM or
3565 constant that is not in the constant pool and not in the condition
3566 of an IF_THEN_ELSE. */
3567
3568static bool
3569computed_jump_p_1 (const_rtx x)
3570{
3571 const enum rtx_code code = GET_CODE (x);
3572 int i, j;
3573 const char *fmt;
3574
3575 switch (code)
3576 {
3577 case LABEL_REF:
3578 case PC:
3579 return false;
3580
3581 case CONST:
3582 CASE_CONST_ANY:
3583 case SYMBOL_REF:
3584 case REG:
3585 return true;
3586
3587 case MEM:
3588 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3589 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3590
3591 case IF_THEN_ELSE:
3592 return (computed_jump_p_1 (XEXP (x, 1))
3593 || computed_jump_p_1 (XEXP (x, 2)));
3594
3595 default:
3596 break;
3597 }
3598
3599 fmt = GET_RTX_FORMAT (code);
3600 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3601 {
3602 if (fmt[i] == 'e'
3603 && computed_jump_p_1 (XEXP (x, i)))
3604 return true;
3605
3606 else if (fmt[i] == 'E')
3607 for (j = 0; j < XVECLEN (x, i); j++)
3608 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3609 return true;
3610 }
3611
3612 return false;
3613}
3614
3615/* Return true if INSN is an indirect jump (aka computed jump).
3616
3617 Tablejumps and casesi insns are not considered indirect jumps;
3618 we can recognize them by a (use (label_ref)). */
3619
3620bool
3621computed_jump_p (const rtx_insn *insn)
3622{
3623 int i;
3624 if (JUMP_P (insn))
3625 {
3626 rtx pat = PATTERN (insn);
3627
3628 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3629 if (JUMP_LABEL (insn) != NULL)
3630 return false;
3631
3632 if (GET_CODE (pat) == PARALLEL)
3633 {
3634 int len = XVECLEN (pat, 0);
3635 bool has_use_labelref = false;
3636
3637 for (i = len - 1; i >= 0; i--)
3638 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3639 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3640 == LABEL_REF))
3641 {
3642 has_use_labelref = true;
3643 break;
3644 }
3645
3646 if (! has_use_labelref)
3647 for (i = len - 1; i >= 0; i--)
3648 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3649 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3650 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3651 return true;
3652 }
3653 else if (GET_CODE (pat) == SET
3654 && SET_DEST (pat) == pc_rtx
3655 && computed_jump_p_1 (SET_SRC (pat)))
3656 return true;
3657 }
3658 return false;
3659}
3660
3661
3662
3663/* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3664 the equivalent add insn and pass the result to FN, using DATA as the
3665 final argument. */
3666
3667static int
3668for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3669{
3670 rtx x = XEXP (mem, 0);
3671 switch (GET_CODE (x))
3672 {
3673 case PRE_INC:
3674 case POST_INC:
3675 {
3676 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3677 rtx r1 = XEXP (x, 0);
3678 rtx c = gen_int_mode (size, GET_MODE (r1));
3679 return fn (mem, x, r1, r1, c, data);
3680 }
3681
3682 case PRE_DEC:
3683 case POST_DEC:
3684 {
3685 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3686 rtx r1 = XEXP (x, 0);
3687 rtx c = gen_int_mode (-size, GET_MODE (r1));
3688 return fn (mem, x, r1, r1, c, data);
3689 }
3690
3691 case PRE_MODIFY:
3692 case POST_MODIFY:
3693 {
3694 rtx r1 = XEXP (x, 0);
3695 rtx add = XEXP (x, 1);
3696 return fn (mem, x, r1, add, NULL, data);
3697 }
3698
3699 default:
3700 gcc_unreachable ();
3701 }
3702}
3703
3704/* Traverse *LOC looking for MEMs that have autoinc addresses.
3705 For each such autoinc operation found, call FN, passing it
3706 the innermost enclosing MEM, the operation itself, the RTX modified
3707 by the operation, two RTXs (the second may be NULL) that, once
3708 added, represent the value to be held by the modified RTX
3709 afterwards, and DATA. FN is to return 0 to continue the
3710 traversal or any other value to have it returned to the caller of
3711 for_each_inc_dec. */
3712
3713int
3714for_each_inc_dec (rtx x,
3715 for_each_inc_dec_fn fn,
3716 void *data)
3717{
3718 subrtx_var_iterator::array_type array;
3719 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3720 {
3721 rtx mem = *iter;
3722 if (mem
3723 && MEM_P (mem)
3724 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3725 {
3726 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3727 if (res != 0)
3728 return res;
3729 iter.skip_subrtxes ();
3730 }
3731 }
3732 return 0;
3733}
3734
3735
3736/* Searches X for any reference to REGNO, returning the rtx of the
3737 reference found if any. Otherwise, returns NULL_RTX. */
3738
3739rtx
3740regno_use_in (unsigned int regno, rtx x)
3741{
3742 const char *fmt;
3743 int i, j;
3744 rtx tem;
3745
3746 if (REG_P (x) && REGNO (x) == regno)
3747 return x;
3748
3749 fmt = GET_RTX_FORMAT (GET_CODE (x));
3750 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3751 {
3752 if (fmt[i] == 'e')
3753 {
3754 if ((tem = regno_use_in (regno, XEXP (x, i))))
3755 return tem;
3756 }
3757 else if (fmt[i] == 'E')
3758 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3759 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3760 return tem;
3761 }
3762
3763 return NULL_RTX;
3764}
3765
3766/* Return a value indicating whether OP, an operand of a commutative
3767 operation, is preferred as the first or second operand. The more
3768 positive the value, the stronger the preference for being the first
3769 operand. */
3770
3771int
3772commutative_operand_precedence (rtx op)
3773{
3774 enum rtx_code code = GET_CODE (op);
3775
3776 /* Constants always become the second operand. Prefer "nice" constants. */
3777 if (code == CONST_INT)
3778 return -10;
3779 if (code == CONST_WIDE_INT)
3780 return -9;
3781 if (code == CONST_POLY_INT)
3782 return -8;
3783 if (code == CONST_DOUBLE)
3784 return -8;
3785 if (code == CONST_FIXED)
3786 return -8;
3787 op = avoid_constant_pool_reference (op);
3788 code = GET_CODE (op);
3789
3790 switch (GET_RTX_CLASS (code))
3791 {
3792 case RTX_CONST_OBJ:
3793 if (code == CONST_INT)
3794 return -7;
3795 if (code == CONST_WIDE_INT)
3796 return -6;
3797 if (code == CONST_POLY_INT)
3798 return -5;
3799 if (code == CONST_DOUBLE)
3800 return -5;
3801 if (code == CONST_FIXED)
3802 return -5;
3803 return -4;
3804
3805 case RTX_EXTRA:
3806 /* SUBREGs of objects should come second. */
3807 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3808 return -3;
3809 return 0;
3810
3811 case RTX_OBJ:
3812 /* Complex expressions should be the first, so decrease priority
3813 of objects. Prefer pointer objects over non pointer objects. */
3814 if ((REG_P (op) && REG_POINTER (op))
3815 || (MEM_P (op) && MEM_POINTER (op)))
3816 return -1;
3817 return -2;
3818
3819 case RTX_COMM_ARITH:
3820 /* Prefer operands that are themselves commutative to be first.
3821 This helps to make things linear. In particular,
3822 (and (and (reg) (reg)) (not (reg))) is canonical. */
3823 return 4;
3824
3825 case RTX_BIN_ARITH:
3826 /* If only one operand is a binary expression, it will be the first
3827 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3828 is canonical, although it will usually be further simplified. */
3829 return 2;
3830
3831 case RTX_UNARY:
3832 /* Then prefer NEG and NOT. */
3833 if (code == NEG || code == NOT)
3834 return 1;
3835 /* FALLTHRU */
3836
3837 default:
3838 return 0;
3839 }
3840}
3841
3842/* Return true iff it is necessary to swap operands of commutative operation
3843 in order to canonicalize expression. */
3844
3845bool
3846swap_commutative_operands_p (rtx x, rtx y)
3847{
3848 return (commutative_operand_precedence (op: x)
3849 < commutative_operand_precedence (op: y));
3850}
3851
3852/* Return true if X is an autoincrement side effect and the register is
3853 not the stack pointer. */
3854bool
3855auto_inc_p (const_rtx x)
3856{
3857 switch (GET_CODE (x))
3858 {
3859 case PRE_INC:
3860 case POST_INC:
3861 case PRE_DEC:
3862 case POST_DEC:
3863 case PRE_MODIFY:
3864 case POST_MODIFY:
3865 /* There are no REG_INC notes for SP. */
3866 if (XEXP (x, 0) != stack_pointer_rtx)
3867 return true;
3868 default:
3869 break;
3870 }
3871 return false;
3872}
3873
3874/* Return true if IN contains a piece of rtl that has the address LOC. */
3875bool
3876loc_mentioned_in_p (rtx *loc, const_rtx in)
3877{
3878 enum rtx_code code;
3879 const char *fmt;
3880 int i, j;
3881
3882 if (!in)
3883 return false;
3884
3885 code = GET_CODE (in);
3886 fmt = GET_RTX_FORMAT (code);
3887 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3888 {
3889 if (fmt[i] == 'e')
3890 {
3891 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3892 return true;
3893 }
3894 else if (fmt[i] == 'E')
3895 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3896 if (loc == &XVECEXP (in, i, j)
3897 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3898 return true;
3899 }
3900 return false;
3901}
3902
3903/* Reinterpret a subreg as a bit extraction from an integer and return
3904 the position of the least significant bit of the extracted value.
3905 In other words, if the extraction were performed as a shift right
3906 and mask, return the number of bits to shift right.
3907
3908 The outer value of the subreg has OUTER_BYTES bytes and starts at
3909 byte offset SUBREG_BYTE within an inner value of INNER_BYTES bytes. */
3910
3911poly_uint64
3912subreg_size_lsb (poly_uint64 outer_bytes,
3913 poly_uint64 inner_bytes,
3914 poly_uint64 subreg_byte)
3915{
3916 poly_uint64 subreg_end, trailing_bytes, byte_pos;
3917
3918 /* A paradoxical subreg begins at bit position 0. */
3919 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3920 if (maybe_gt (outer_bytes, inner_bytes))
3921 {
3922 gcc_checking_assert (known_eq (subreg_byte, 0U));
3923 return 0;
3924 }
3925
3926 subreg_end = subreg_byte + outer_bytes;
3927 trailing_bytes = inner_bytes - subreg_end;
3928 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3929 byte_pos = trailing_bytes;
3930 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3931 byte_pos = subreg_byte;
3932 else
3933 {
3934 /* When bytes and words have opposite endianness, we must be able
3935 to split offsets into words and bytes at compile time. */
3936 poly_uint64 leading_word_part
3937 = force_align_down (value: subreg_byte, UNITS_PER_WORD);
3938 poly_uint64 trailing_word_part
3939 = force_align_down (value: trailing_bytes, UNITS_PER_WORD);
3940 /* If the subreg crosses a word boundary ensure that
3941 it also begins and ends on a word boundary. */
3942 gcc_assert (known_le (subreg_end - leading_word_part,
3943 (unsigned int) UNITS_PER_WORD)
3944 || (known_eq (leading_word_part, subreg_byte)
3945 && known_eq (trailing_word_part, trailing_bytes)));
3946 if (WORDS_BIG_ENDIAN)
3947 byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3948 else
3949 byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3950 }
3951
3952 return byte_pos * BITS_PER_UNIT;
3953}
3954
3955/* Given a subreg X, return the bit offset where the subreg begins
3956 (counting from the least significant bit of the reg). */
3957
3958poly_uint64
3959subreg_lsb (const_rtx x)
3960{
3961 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3962 SUBREG_BYTE (x));
3963}
3964
3965/* Return the subreg byte offset for a subreg whose outer value has
3966 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3967 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3968 lsb of the inner value. This is the inverse of the calculation
3969 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3970
3971poly_uint64
3972subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3973 poly_uint64 lsb_shift)
3974{
3975 /* A paradoxical subreg begins at bit position 0. */
3976 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3977 if (maybe_gt (outer_bytes, inner_bytes))
3978 {
3979 gcc_checking_assert (known_eq (lsb_shift, 0U));
3980 return 0;
3981 }
3982
3983 poly_uint64 lower_bytes = exact_div (a: lsb_shift, BITS_PER_UNIT);
3984 poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3985 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3986 return upper_bytes;
3987 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3988 return lower_bytes;
3989 else
3990 {
3991 /* When bytes and words have opposite endianness, we must be able
3992 to split offsets into words and bytes at compile time. */
3993 poly_uint64 lower_word_part = force_align_down (value: lower_bytes,
3994 UNITS_PER_WORD);
3995 poly_uint64 upper_word_part = force_align_down (value: upper_bytes,
3996 UNITS_PER_WORD);
3997 if (WORDS_BIG_ENDIAN)
3998 return upper_word_part + (lower_bytes - lower_word_part);
3999 else
4000 return lower_word_part + (upper_bytes - upper_word_part);
4001 }
4002}
4003
4004/* Fill in information about a subreg of a hard register.
4005 xregno - A regno of an inner hard subreg_reg (or what will become one).
4006 xmode - The mode of xregno.
4007 offset - The byte offset.
4008 ymode - The mode of a top level SUBREG (or what may become one).
4009 info - Pointer to structure to fill in.
4010
4011 Rather than considering one particular inner register (and thus one
4012 particular "outer" register) in isolation, this function really uses
4013 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
4014 function does not check whether adding INFO->offset to XREGNO gives
4015 a valid hard register; even if INFO->offset + XREGNO is out of range,
4016 there might be another register of the same type that is in range.
4017 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
4018 the new register, since that can depend on things like whether the final
4019 register number is even or odd. Callers that want to check whether
4020 this particular subreg can be replaced by a simple (reg ...) should
4021 use simplify_subreg_regno. */
4022
4023void
4024subreg_get_info (unsigned int xregno, machine_mode xmode,
4025 poly_uint64 offset, machine_mode ymode,
4026 struct subreg_info *info)
4027{
4028 unsigned int nregs_xmode, nregs_ymode;
4029
4030 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
4031
4032 poly_uint64 xsize = GET_MODE_SIZE (mode: xmode);
4033 poly_uint64 ysize = GET_MODE_SIZE (mode: ymode);
4034
4035 bool rknown = false;
4036
4037 /* If the register representation of a non-scalar mode has holes in it,
4038 we expect the scalar units to be concatenated together, with the holes
4039 distributed evenly among the scalar units. Each scalar unit must occupy
4040 at least one register. */
4041 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
4042 {
4043 /* As a consequence, we must be dealing with a constant number of
4044 scalars, and thus a constant offset and number of units. */
4045 HOST_WIDE_INT coffset = offset.to_constant ();
4046 HOST_WIDE_INT cysize = ysize.to_constant ();
4047 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
4048 unsigned int nunits = GET_MODE_NUNITS (mode: xmode).to_constant ();
4049 scalar_mode xmode_unit = GET_MODE_INNER (xmode);
4050 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
4051 gcc_assert (nregs_xmode
4052 == (nunits
4053 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
4054 gcc_assert (hard_regno_nregs (xregno, xmode)
4055 == hard_regno_nregs (xregno, xmode_unit) * nunits);
4056
4057 /* You can only ask for a SUBREG of a value with holes in the middle
4058 if you don't cross the holes. (Such a SUBREG should be done by
4059 picking a different register class, or doing it in memory if
4060 necessary.) An example of a value with holes is XCmode on 32-bit
4061 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
4062 3 for each part, but in memory it's two 128-bit parts.
4063 Padding is assumed to be at the end (not necessarily the 'high part')
4064 of each unit. */
4065 if ((coffset / GET_MODE_SIZE (mode: xmode_unit) + 1 < nunits)
4066 && (coffset / GET_MODE_SIZE (mode: xmode_unit)
4067 != ((coffset + cysize - 1) / GET_MODE_SIZE (mode: xmode_unit))))
4068 {
4069 info->representable_p = false;
4070 rknown = true;
4071 }
4072 }
4073 else
4074 nregs_xmode = hard_regno_nregs (regno: xregno, mode: xmode);
4075
4076 nregs_ymode = hard_regno_nregs (regno: xregno, mode: ymode);
4077
4078 /* Subreg sizes must be ordered, so that we can tell whether they are
4079 partial, paradoxical or complete. */
4080 gcc_checking_assert (ordered_p (xsize, ysize));
4081
4082 /* Paradoxical subregs are otherwise valid. */
4083 if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
4084 {
4085 info->representable_p = true;
4086 /* If this is a big endian paradoxical subreg, which uses more
4087 actual hard registers than the original register, we must
4088 return a negative offset so that we find the proper highpart
4089 of the register.
4090
4091 We assume that the ordering of registers within a multi-register
4092 value has a consistent endianness: if bytes and register words
4093 have different endianness, the hard registers that make up a
4094 multi-register value must be at least word-sized. */
4095 if (REG_WORDS_BIG_ENDIAN)
4096 info->offset = (int) nregs_xmode - (int) nregs_ymode;
4097 else
4098 info->offset = 0;
4099 info->nregs = nregs_ymode;
4100 return;
4101 }
4102
4103 /* If registers store different numbers of bits in the different
4104 modes, we cannot generally form this subreg. */
4105 poly_uint64 regsize_xmode, regsize_ymode;
4106 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
4107 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
4108 && multiple_p (a: xsize, b: nregs_xmode, multiple: &regsize_xmode)
4109 && multiple_p (a: ysize, b: nregs_ymode, multiple: &regsize_ymode))
4110 {
4111 if (!rknown
4112 && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
4113 || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
4114 {
4115 info->representable_p = false;
4116 if (!can_div_away_from_zero_p (a: ysize, b: regsize_xmode, quotient: &info->nregs)
4117 || !can_div_trunc_p (a: offset, b: regsize_xmode, quotient: &info->offset))
4118 /* Checked by validate_subreg. We must know at compile time
4119 which inner registers are being accessed. */
4120 gcc_unreachable ();
4121 return;
4122 }
4123 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
4124 would go outside of XMODE. */
4125 if (!rknown && maybe_gt (ysize + offset, xsize))
4126 {
4127 info->representable_p = false;
4128 info->nregs = nregs_ymode;
4129 if (!can_div_trunc_p (a: offset, b: regsize_xmode, quotient: &info->offset))
4130 /* Checked by validate_subreg. We must know at compile time
4131 which inner registers are being accessed. */
4132 gcc_unreachable ();
4133 return;
4134 }
4135 /* Quick exit for the simple and common case of extracting whole
4136 subregisters from a multiregister value. */
4137 /* ??? It would be better to integrate this into the code below,
4138 if we can generalize the concept enough and figure out how
4139 odd-sized modes can coexist with the other weird cases we support. */
4140 HOST_WIDE_INT count;
4141 if (!rknown
4142 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
4143 && known_eq (regsize_xmode, regsize_ymode)
4144 && constant_multiple_p (a: offset, b: regsize_ymode, multiple: &count))
4145 {
4146 info->representable_p = true;
4147 info->nregs = nregs_ymode;
4148 info->offset = count;
4149 gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
4150 return;
4151 }
4152 }
4153
4154 /* Lowpart subregs are otherwise valid. */
4155 if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
4156 {
4157 info->representable_p = true;
4158 rknown = true;
4159
4160 if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
4161 {
4162 info->offset = 0;
4163 info->nregs = nregs_ymode;
4164 return;
4165 }
4166 }
4167
4168 /* Set NUM_BLOCKS to the number of independently-representable YMODE
4169 values there are in (reg:XMODE XREGNO). We can view the register
4170 as consisting of this number of independent "blocks", where each
4171 block occupies NREGS_YMODE registers and contains exactly one
4172 representable YMODE value. */
4173 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
4174 unsigned int num_blocks = nregs_xmode / nregs_ymode;
4175
4176 /* Calculate the number of bytes in each block. This must always
4177 be exact, otherwise we don't know how to verify the constraint.
4178 These conditions may be relaxed but subreg_regno_offset would
4179 need to be redesigned. */
4180 poly_uint64 bytes_per_block = exact_div (a: xsize, b: num_blocks);
4181
4182 /* Get the number of the first block that contains the subreg and the byte
4183 offset of the subreg from the start of that block. */
4184 unsigned int block_number;
4185 poly_uint64 subblock_offset;
4186 if (!can_div_trunc_p (a: offset, b: bytes_per_block, quotient: &block_number,
4187 remainder: &subblock_offset))
4188 /* Checked by validate_subreg. We must know at compile time which
4189 inner registers are being accessed. */
4190 gcc_unreachable ();
4191
4192 if (!rknown)
4193 {
4194 /* Only the lowpart of each block is representable. */
4195 info->representable_p
4196 = known_eq (subblock_offset,
4197 subreg_size_lowpart_offset (ysize, bytes_per_block));
4198 rknown = true;
4199 }
4200
4201 /* We assume that the ordering of registers within a multi-register
4202 value has a consistent endianness: if bytes and register words
4203 have different endianness, the hard registers that make up a
4204 multi-register value must be at least word-sized. */
4205 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
4206 /* The block number we calculated above followed memory endianness.
4207 Convert it to register endianness by counting back from the end.
4208 (Note that, because of the assumption above, each block must be
4209 at least word-sized.) */
4210 info->offset = (num_blocks - block_number - 1) * nregs_ymode;
4211 else
4212 info->offset = block_number * nregs_ymode;
4213 info->nregs = nregs_ymode;
4214}
4215
4216/* This function returns the regno offset of a subreg expression.
4217 xregno - A regno of an inner hard subreg_reg (or what will become one).
4218 xmode - The mode of xregno.
4219 offset - The byte offset.
4220 ymode - The mode of a top level SUBREG (or what may become one).
4221 RETURN - The regno offset which would be used. */
4222unsigned int
4223subreg_regno_offset (unsigned int xregno, machine_mode xmode,
4224 poly_uint64 offset, machine_mode ymode)
4225{
4226 struct subreg_info info;
4227 subreg_get_info (xregno, xmode, offset, ymode, info: &info);
4228 return info.offset;
4229}
4230
4231/* This function returns true when the offset is representable via
4232 subreg_offset in the given regno.
4233 xregno - A regno of an inner hard subreg_reg (or what will become one).
4234 xmode - The mode of xregno.
4235 offset - The byte offset.
4236 ymode - The mode of a top level SUBREG (or what may become one).
4237 RETURN - Whether the offset is representable. */
4238bool
4239subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
4240 poly_uint64 offset, machine_mode ymode)
4241{
4242 struct subreg_info info;
4243 subreg_get_info (xregno, xmode, offset, ymode, info: &info);
4244 return info.representable_p;
4245}
4246
4247/* Return the number of a YMODE register to which
4248
4249 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
4250
4251 can be simplified. Return -1 if the subreg can't be simplified.
4252
4253 XREGNO is a hard register number. */
4254
4255int
4256simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
4257 poly_uint64 offset, machine_mode ymode)
4258{
4259 struct subreg_info info;
4260 unsigned int yregno;
4261
4262 /* Give the backend a chance to disallow the mode change. */
4263 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
4264 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
4265 && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode))
4266 return -1;
4267
4268 /* We shouldn't simplify stack-related registers. */
4269 if ((!reload_completed || frame_pointer_needed)
4270 && xregno == FRAME_POINTER_REGNUM)
4271 return -1;
4272
4273 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4274 && xregno == ARG_POINTER_REGNUM)
4275 return -1;
4276
4277 if (xregno == STACK_POINTER_REGNUM
4278 /* We should convert hard stack register in LRA if it is
4279 possible. */
4280 && ! lra_in_progress)
4281 return -1;
4282
4283 /* Try to get the register offset. */
4284 subreg_get_info (xregno, xmode, offset, ymode, info: &info);
4285 if (!info.representable_p)
4286 return -1;
4287
4288 /* Make sure that the offsetted register value is in range. */
4289 yregno = xregno + info.offset;
4290 if (!HARD_REGISTER_NUM_P (yregno))
4291 return -1;
4292
4293 /* See whether (reg:YMODE YREGNO) is valid.
4294
4295 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4296 This is a kludge to work around how complex FP arguments are passed
4297 on IA-64 and should be fixed. See PR target/49226. */
4298 if (!targetm.hard_regno_mode_ok (yregno, ymode)
4299 && targetm.hard_regno_mode_ok (xregno, xmode))
4300 return -1;
4301
4302 return (int) yregno;
4303}
4304
4305/* A wrapper around simplify_subreg_regno that uses subreg_lowpart_offset
4306 (xmode, ymode) as the offset. */
4307
4308int
4309lowpart_subreg_regno (unsigned int regno, machine_mode xmode,
4310 machine_mode ymode)
4311{
4312 poly_uint64 offset = subreg_lowpart_offset (outermode: xmode, innermode: ymode);
4313 return simplify_subreg_regno (xregno: regno, xmode, offset, ymode);
4314}
4315
4316/* Return the final regno that a subreg expression refers to. */
4317unsigned int
4318subreg_regno (const_rtx x)
4319{
4320 unsigned int ret;
4321 rtx subreg = SUBREG_REG (x);
4322 int regno = REGNO (subreg);
4323
4324 ret = regno + subreg_regno_offset (xregno: regno,
4325 GET_MODE (subreg),
4326 SUBREG_BYTE (x),
4327 GET_MODE (x));
4328 return ret;
4329
4330}
4331
4332/* Return the number of registers that a subreg expression refers
4333 to. */
4334unsigned int
4335subreg_nregs (const_rtx x)
4336{
4337 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
4338}
4339
4340/* Return the number of registers that a subreg REG with REGNO
4341 expression refers to. This is a copy of the rtlanal.cc:subreg_nregs
4342 changed so that the regno can be passed in. */
4343
4344unsigned int
4345subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4346{
4347 struct subreg_info info;
4348 rtx subreg = SUBREG_REG (x);
4349
4350 subreg_get_info (xregno: regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4351 info: &info);
4352 return info.nregs;
4353}
4354
4355struct parms_set_data
4356{
4357 int nregs;
4358 HARD_REG_SET regs;
4359};
4360
4361/* Helper function for noticing stores to parameter registers. */
4362static void
4363parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4364{
4365 struct parms_set_data *const d = (struct parms_set_data *) data;
4366 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4367 && TEST_HARD_REG_BIT (set: d->regs, REGNO (x)))
4368 {
4369 CLEAR_HARD_REG_BIT (set&: d->regs, REGNO (x));
4370 d->nregs--;
4371 }
4372}
4373
4374/* Look backward for first parameter to be loaded.
4375 Note that loads of all parameters will not necessarily be
4376 found if CSE has eliminated some of them (e.g., an argument
4377 to the outer function is passed down as a parameter).
4378 Do not skip BOUNDARY. */
4379rtx_insn *
4380find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4381{
4382 struct parms_set_data parm;
4383 rtx p;
4384 rtx_insn *before, *first_set;
4385
4386 /* Since different machines initialize their parameter registers
4387 in different orders, assume nothing. Collect the set of all
4388 parameter registers. */
4389 CLEAR_HARD_REG_SET (set&: parm.regs);
4390 parm.nregs = 0;
4391 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4392 if (GET_CODE (XEXP (p, 0)) == USE
4393 && REG_P (XEXP (XEXP (p, 0), 0))
4394 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4395 {
4396 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4397
4398 /* We only care about registers which can hold function
4399 arguments. */
4400 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4401 continue;
4402
4403 SET_HARD_REG_BIT (set&: parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4404 parm.nregs++;
4405 }
4406 before = call_insn;
4407 first_set = call_insn;
4408
4409 /* Search backward for the first set of a register in this set. */
4410 while (parm.nregs && before != boundary)
4411 {
4412 before = PREV_INSN (insn: before);
4413
4414 /* It is possible that some loads got CSEed from one call to
4415 another. Stop in that case. */
4416 if (CALL_P (before))
4417 break;
4418
4419 /* Our caller needs either ensure that we will find all sets
4420 (in case code has not been optimized yet), or take care
4421 for possible labels in a way by setting boundary to preceding
4422 CODE_LABEL. */
4423 if (LABEL_P (before))
4424 {
4425 gcc_assert (before == boundary);
4426 break;
4427 }
4428
4429 if (INSN_P (before))
4430 {
4431 int nregs_old = parm.nregs;
4432 note_stores (insn: before, fun: parms_set, data: &parm);
4433 /* If we found something that did not set a parameter reg,
4434 we're done. Do not keep going, as that might result
4435 in hoisting an insn before the setting of a pseudo
4436 that is used by the hoisted insn. */
4437 if (nregs_old != parm.nregs)
4438 first_set = before;
4439 else
4440 break;
4441 }
4442 }
4443 return first_set;
4444}
4445
4446/* Return true if we should avoid inserting code between INSN and preceding
4447 call instruction. */
4448
4449bool
4450keep_with_call_p (const rtx_insn *insn)
4451{
4452 rtx set;
4453
4454 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4455 {
4456 if (REG_P (SET_DEST (set))
4457 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4458 && fixed_regs[REGNO (SET_DEST (set))]
4459 && general_operand (SET_SRC (set), VOIDmode))
4460 return true;
4461 if (REG_P (SET_SRC (set))
4462 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4463 && REG_P (SET_DEST (set))
4464 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4465 return true;
4466 /* There may be a stack pop just after the call and before the store
4467 of the return register. Search for the actual store when deciding
4468 if we can break or not. */
4469 if (SET_DEST (set) == stack_pointer_rtx)
4470 {
4471 /* This CONST_CAST is okay because next_nonnote_insn just
4472 returns its argument and we assign it to a const_rtx
4473 variable. */
4474 const rtx_insn *i2
4475 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4476 if (i2 && keep_with_call_p (insn: i2))
4477 return true;
4478 }
4479 }
4480 return false;
4481}
4482
4483/* Return true if LABEL is a target of JUMP_INSN. This applies only
4484 to non-complex jumps. That is, direct unconditional, conditional,
4485 and tablejumps, but not computed jumps or returns. It also does
4486 not apply to the fallthru case of a conditional jump. */
4487
4488bool
4489label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4490{
4491 rtx tmp = JUMP_LABEL (jump_insn);
4492 rtx_jump_table_data *table;
4493
4494 if (label == tmp)
4495 return true;
4496
4497 if (tablejump_p (insn: jump_insn, NULL, tablep: &table))
4498 {
4499 rtvec vec = table->get_labels ();
4500 int i, veclen = GET_NUM_ELEM (vec);
4501
4502 for (i = 0; i < veclen; ++i)
4503 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4504 return true;
4505 }
4506
4507 if (find_reg_note (insn: jump_insn, kind: REG_LABEL_TARGET, datum: label))
4508 return true;
4509
4510 return false;
4511}
4512
4513
4514/* Return an estimate of the cost of computing rtx X.
4515 One use is in cse, to decide which expression to keep in the hash table.
4516 Another is in rtl generation, to pick the cheapest way to multiply.
4517 Other uses like the latter are expected in the future.
4518
4519 X appears as operand OPNO in an expression with code OUTER_CODE.
4520 SPEED specifies whether costs optimized for speed or size should
4521 be returned. */
4522
4523int
4524rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4525 int opno, bool speed)
4526{
4527 int i, j;
4528 enum rtx_code code;
4529 const char *fmt;
4530 int total;
4531 int factor;
4532 unsigned mode_size;
4533
4534 if (x == 0)
4535 return 0;
4536
4537 if (GET_CODE (x) == SET)
4538 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4539 the mode for the factor. */
4540 mode = GET_MODE (SET_DEST (x));
4541 else if (GET_MODE (x) != VOIDmode)
4542 mode = GET_MODE (x);
4543
4544 mode_size = estimated_poly_value (x: GET_MODE_SIZE (mode));
4545
4546 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4547 many insns, taking N times as long. */
4548 factor = mode_size > UNITS_PER_WORD ? mode_size / UNITS_PER_WORD : 1;
4549
4550 /* Compute the default costs of certain things.
4551 Note that targetm.rtx_costs can override the defaults. */
4552
4553 code = GET_CODE (x);
4554 switch (code)
4555 {
4556 case MULT:
4557 case FMA:
4558 case SS_MULT:
4559 case US_MULT:
4560 case SMUL_HIGHPART:
4561 case UMUL_HIGHPART:
4562 /* Multiplication has time-complexity O(N*N), where N is the
4563 number of units (translated from digits) when using
4564 schoolbook long multiplication. */
4565 total = factor * factor * COSTS_N_INSNS (5);
4566 break;
4567 case DIV:
4568 case UDIV:
4569 case MOD:
4570 case UMOD:
4571 case SS_DIV:
4572 case US_DIV:
4573 /* Similarly, complexity for schoolbook long division. */
4574 total = factor * factor * COSTS_N_INSNS (7);
4575 break;
4576 case USE:
4577 /* Used in combine.cc as a marker. */
4578 total = 0;
4579 break;
4580 default:
4581 total = factor * COSTS_N_INSNS (1);
4582 }
4583
4584 switch (code)
4585 {
4586 case REG:
4587 return 0;
4588
4589 case SUBREG:
4590 total = 0;
4591 /* If we can't tie these modes, make this expensive. The larger
4592 the mode, the more expensive it is. */
4593 if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4594 return COSTS_N_INSNS (2 + factor);
4595 break;
4596
4597 case TRUNCATE:
4598 if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4599 {
4600 total = 0;
4601 break;
4602 }
4603 /* FALLTHRU */
4604 default:
4605 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4606 return total;
4607 break;
4608 }
4609
4610 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4611 which is already in total. */
4612
4613 fmt = GET_RTX_FORMAT (code);
4614 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4615 if (fmt[i] == 'e')
4616 total += rtx_cost (XEXP (x, i), mode, outer_code: code, opno: i, speed);
4617 else if (fmt[i] == 'E')
4618 for (j = 0; j < XVECLEN (x, i); j++)
4619 total += rtx_cost (XVECEXP (x, i, j), mode, outer_code: code, opno: i, speed);
4620
4621 return total;
4622}
4623
4624/* Fill in the structure C with information about both speed and size rtx
4625 costs for X, which is operand OPNO in an expression with code OUTER. */
4626
4627void
4628get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4629 struct full_rtx_costs *c)
4630{
4631 c->speed = rtx_cost (x, mode, outer_code: outer, opno, speed: true);
4632 c->size = rtx_cost (x, mode, outer_code: outer, opno, speed: false);
4633}
4634
4635
4636/* Return cost of address expression X.
4637 Expect that X is properly formed address reference.
4638
4639 SPEED parameter specify whether costs optimized for speed or size should
4640 be returned. */
4641
4642int
4643address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4644{
4645 /* We may be asked for cost of various unusual addresses, such as operands
4646 of push instruction. It is not worthwhile to complicate writing
4647 of the target hook by such cases. */
4648
4649 if (!memory_address_addr_space_p (mode, x, as))
4650 return 1000;
4651
4652 return targetm.address_cost (x, mode, as, speed);
4653}
4654
4655/* If the target doesn't override, compute the cost as with arithmetic. */
4656
4657int
4658default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4659{
4660 return rtx_cost (x, Pmode, outer_code: MEM, opno: 0, speed);
4661}
4662
4663
4664unsigned HOST_WIDE_INT
4665nonzero_bits (const_rtx x, machine_mode mode)
4666{
4667 if (mode == VOIDmode)
4668 mode = GET_MODE (x);
4669 scalar_int_mode int_mode;
4670 if (!is_a <scalar_int_mode> (m: mode, result: &int_mode))
4671 return GET_MODE_MASK (mode);
4672 return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4673}
4674
4675unsigned int
4676num_sign_bit_copies (const_rtx x, machine_mode mode)
4677{
4678 if (mode == VOIDmode)
4679 mode = GET_MODE (x);
4680 scalar_int_mode int_mode;
4681 if (!is_a <scalar_int_mode> (m: mode, result: &int_mode))
4682 return 1;
4683 return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4684}
4685
4686/* Return true if nonzero_bits1 might recurse into both operands
4687 of X. */
4688
4689static inline bool
4690nonzero_bits_binary_arith_p (const_rtx x)
4691{
4692 if (!ARITHMETIC_P (x))
4693 return false;
4694 switch (GET_CODE (x))
4695 {
4696 case AND:
4697 case XOR:
4698 case IOR:
4699 case UMIN:
4700 case UMAX:
4701 case SMIN:
4702 case SMAX:
4703 case PLUS:
4704 case MINUS:
4705 case MULT:
4706 case DIV:
4707 case UDIV:
4708 case MOD:
4709 case UMOD:
4710 return true;
4711 default:
4712 return false;
4713 }
4714}
4715
4716/* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4717 It avoids exponential behavior in nonzero_bits1 when X has
4718 identical subexpressions on the first or the second level. */
4719
4720static unsigned HOST_WIDE_INT
4721cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4722 machine_mode known_mode,
4723 unsigned HOST_WIDE_INT known_ret)
4724{
4725 if (x == known_x && mode == known_mode)
4726 return known_ret;
4727
4728 /* Try to find identical subexpressions. If found call
4729 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4730 precomputed value for the subexpression as KNOWN_RET. */
4731
4732 if (nonzero_bits_binary_arith_p (x))
4733 {
4734 rtx x0 = XEXP (x, 0);
4735 rtx x1 = XEXP (x, 1);
4736
4737 /* Check the first level. */
4738 if (x0 == x1)
4739 return nonzero_bits1 (x, mode, x0, mode,
4740 cached_nonzero_bits (x: x0, mode, known_x,
4741 known_mode, known_ret));
4742
4743 /* Check the second level. */
4744 if (nonzero_bits_binary_arith_p (x: x0)
4745 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4746 return nonzero_bits1 (x, mode, x1, mode,
4747 cached_nonzero_bits (x: x1, mode, known_x,
4748 known_mode, known_ret));
4749
4750 if (nonzero_bits_binary_arith_p (x: x1)
4751 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4752 return nonzero_bits1 (x, mode, x0, mode,
4753 cached_nonzero_bits (x: x0, mode, known_x,
4754 known_mode, known_ret));
4755 }
4756
4757 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4758}
4759
4760/* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4761 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4762 is less useful. We can't allow both, because that results in exponential
4763 run time recursion. There is a nullstone testcase that triggered
4764 this. This macro avoids accidental uses of num_sign_bit_copies. */
4765#define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4766
4767/* Given an expression, X, compute which bits in X can be nonzero.
4768 We don't care about bits outside of those defined in MODE.
4769
4770 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4771 an arithmetic operation, we can do better. */
4772
4773static unsigned HOST_WIDE_INT
4774nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4775 machine_mode known_mode,
4776 unsigned HOST_WIDE_INT known_ret)
4777{
4778 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4779 unsigned HOST_WIDE_INT inner_nz;
4780 enum rtx_code code = GET_CODE (x);
4781 machine_mode inner_mode;
4782 unsigned int inner_width;
4783 scalar_int_mode xmode;
4784
4785 unsigned int mode_width = GET_MODE_PRECISION (mode);
4786
4787 if (CONST_INT_P (x))
4788 {
4789 if (SHORT_IMMEDIATES_SIGN_EXTEND
4790 && INTVAL (x) > 0
4791 && mode_width < BITS_PER_WORD
4792 && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4793 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4794
4795 return UINTVAL (x);
4796 }
4797
4798 if (!is_a <scalar_int_mode> (GET_MODE (x), result: &xmode))
4799 return nonzero;
4800 unsigned int xmode_width = GET_MODE_PRECISION (mode: xmode);
4801
4802 /* If X is wider than MODE, use its mode instead. */
4803 if (xmode_width > mode_width)
4804 {
4805 mode = xmode;
4806 nonzero = GET_MODE_MASK (mode);
4807 mode_width = xmode_width;
4808 }
4809
4810 if (mode_width > HOST_BITS_PER_WIDE_INT)
4811 /* Our only callers in this case look for single bit values. So
4812 just return the mode mask. Those tests will then be false. */
4813 return nonzero;
4814
4815 /* If MODE is wider than X, but both are a single word for both the host
4816 and target machines, we can compute this from which bits of the object
4817 might be nonzero in its own mode, taking into account the fact that, on
4818 CISC machines, accessing an object in a wider mode generally causes the
4819 high-order bits to become undefined, so they are not known to be zero.
4820 We extend this reasoning to RISC machines for operations that might not
4821 operate on the full registers. */
4822 if (mode_width > xmode_width
4823 && xmode_width <= BITS_PER_WORD
4824 && xmode_width <= HOST_BITS_PER_WIDE_INT
4825 && !(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4826 {
4827 nonzero &= cached_nonzero_bits (x, mode: xmode,
4828 known_x, known_mode, known_ret);
4829 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4830 return nonzero;
4831 }
4832
4833 /* Please keep nonzero_bits_binary_arith_p above in sync with
4834 the code in the switch below. */
4835 switch (code)
4836 {
4837 case REG:
4838#if defined(POINTERS_EXTEND_UNSIGNED)
4839 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4840 all the bits above ptr_mode are known to be zero. */
4841 /* As we do not know which address space the pointer is referring to,
4842 we can do this only if the target does not support different pointer
4843 or address modes depending on the address space. */
4844 if (target_default_pointer_address_modes_p ()
4845 && POINTERS_EXTEND_UNSIGNED
4846 && xmode == Pmode
4847 && REG_POINTER (x)
4848 && !targetm.have_ptr_extend ())
4849 nonzero &= GET_MODE_MASK (ptr_mode);
4850#endif
4851
4852 /* Include declared information about alignment of pointers. */
4853 /* ??? We don't properly preserve REG_POINTER changes across
4854 pointer-to-integer casts, so we can't trust it except for
4855 things that we know must be pointers. See execute/960116-1.c. */
4856 if ((x == stack_pointer_rtx
4857 || x == frame_pointer_rtx
4858 || x == arg_pointer_rtx)
4859 && REGNO_POINTER_ALIGN (REGNO (x)))
4860 {
4861 unsigned HOST_WIDE_INT alignment
4862 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4863
4864#ifdef PUSH_ROUNDING
4865 /* If PUSH_ROUNDING is defined, it is possible for the
4866 stack to be momentarily aligned only to that amount,
4867 so we pick the least alignment. */
4868 if (x == stack_pointer_rtx && targetm.calls.push_argument (0))
4869 {
4870 poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4871 alignment = MIN (known_alignment (rounded_1), alignment);
4872 }
4873#endif
4874
4875 nonzero &= ~(alignment - 1);
4876 }
4877
4878 {
4879 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4880 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4881 &nonzero_for_hook);
4882
4883 if (new_rtx)
4884 nonzero_for_hook &= cached_nonzero_bits (x: new_rtx, mode, known_x,
4885 known_mode, known_ret);
4886
4887 return nonzero_for_hook;
4888 }
4889
4890 case MEM:
4891 /* In many, if not most, RISC machines, reading a byte from memory
4892 zeros the rest of the register. Noticing that fact saves a lot
4893 of extra zero-extends. */
4894 if (load_extend_op (mode: xmode) == ZERO_EXTEND)
4895 nonzero &= GET_MODE_MASK (xmode);
4896 break;
4897
4898 case EQ: case NE:
4899 case UNEQ: case LTGT:
4900 case GT: case GTU: case UNGT:
4901 case LT: case LTU: case UNLT:
4902 case GE: case GEU: case UNGE:
4903 case LE: case LEU: case UNLE:
4904 case UNORDERED: case ORDERED:
4905 /* If this produces an integer result, we know which bits are set.
4906 Code here used to clear bits outside the mode of X, but that is
4907 now done above. */
4908 /* Mind that MODE is the mode the caller wants to look at this
4909 operation in, and not the actual operation mode. We can wind
4910 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4911 that describes the results of a vector compare. */
4912 if (GET_MODE_CLASS (xmode) == MODE_INT
4913 && mode_width <= HOST_BITS_PER_WIDE_INT)
4914 nonzero = STORE_FLAG_VALUE;
4915 break;
4916
4917 case NEG:
4918#if 0
4919 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4920 and num_sign_bit_copies. */
4921 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4922 nonzero = 1;
4923#endif
4924
4925 if (xmode_width < mode_width)
4926 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4927 break;
4928
4929 case ABS:
4930#if 0
4931 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4932 and num_sign_bit_copies. */
4933 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4934 nonzero = 1;
4935#endif
4936 break;
4937
4938 case TRUNCATE:
4939 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4940 known_x, known_mode, known_ret)
4941 & GET_MODE_MASK (mode));
4942 break;
4943
4944 case ZERO_EXTEND:
4945 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4946 known_x, known_mode, known_ret);
4947 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4948 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4949 break;
4950
4951 case SIGN_EXTEND:
4952 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4953 Otherwise, show all the bits in the outer mode but not the inner
4954 may be nonzero. */
4955 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4956 known_x, known_mode, known_ret);
4957 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4958 {
4959 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4960 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4961 inner_nz |= (GET_MODE_MASK (mode)
4962 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4963 }
4964
4965 nonzero &= inner_nz;
4966 break;
4967
4968 case AND:
4969 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4970 known_x, known_mode, known_ret)
4971 & cached_nonzero_bits (XEXP (x, 1), mode,
4972 known_x, known_mode, known_ret);
4973 break;
4974
4975 case XOR: case IOR:
4976 case UMIN: case UMAX: case SMIN: case SMAX:
4977 {
4978 unsigned HOST_WIDE_INT nonzero0
4979 = cached_nonzero_bits (XEXP (x, 0), mode,
4980 known_x, known_mode, known_ret);
4981
4982 /* Don't call nonzero_bits for the second time if it cannot change
4983 anything. */
4984 if ((nonzero & nonzero0) != nonzero)
4985 nonzero &= nonzero0
4986 | cached_nonzero_bits (XEXP (x, 1), mode,
4987 known_x, known_mode, known_ret);
4988 }
4989 break;
4990
4991 case PLUS: case MINUS:
4992 case MULT:
4993 case DIV: case UDIV:
4994 case MOD: case UMOD:
4995 /* We can apply the rules of arithmetic to compute the number of
4996 high- and low-order zero bits of these operations. We start by
4997 computing the width (position of the highest-order nonzero bit)
4998 and the number of low-order zero bits for each value. */
4999 {
5000 unsigned HOST_WIDE_INT nz0
5001 = cached_nonzero_bits (XEXP (x, 0), mode,
5002 known_x, known_mode, known_ret);
5003 unsigned HOST_WIDE_INT nz1
5004 = cached_nonzero_bits (XEXP (x, 1), mode,
5005 known_x, known_mode, known_ret);
5006 int sign_index = xmode_width - 1;
5007 int width0 = floor_log2 (x: nz0) + 1;
5008 int width1 = floor_log2 (x: nz1) + 1;
5009 int low0 = ctz_or_zero (x: nz0);
5010 int low1 = ctz_or_zero (x: nz1);
5011 unsigned HOST_WIDE_INT op0_maybe_minusp
5012 = nz0 & (HOST_WIDE_INT_1U << sign_index);
5013 unsigned HOST_WIDE_INT op1_maybe_minusp
5014 = nz1 & (HOST_WIDE_INT_1U << sign_index);
5015 unsigned int result_width = mode_width;
5016 int result_low = 0;
5017
5018 switch (code)
5019 {
5020 case PLUS:
5021 result_width = MAX (width0, width1) + 1;
5022 result_low = MIN (low0, low1);
5023 break;
5024 case MINUS:
5025 result_low = MIN (low0, low1);
5026 break;
5027 case MULT:
5028 result_width = width0 + width1;
5029 result_low = low0 + low1;
5030 break;
5031 case DIV:
5032 if (width1 == 0)
5033 break;
5034 if (!op0_maybe_minusp && !op1_maybe_minusp)
5035 result_width = width0;
5036 break;
5037 case UDIV:
5038 if (width1 == 0)
5039 break;
5040 result_width = width0;
5041 break;
5042 case MOD:
5043 if (width1 == 0)
5044 break;
5045 if (!op0_maybe_minusp && !op1_maybe_minusp)
5046 result_width = MIN (width0, width1);
5047 result_low = MIN (low0, low1);
5048 break;
5049 case UMOD:
5050 if (width1 == 0)
5051 break;
5052 result_width = MIN (width0, width1);
5053 result_low = MIN (low0, low1);
5054 break;
5055 default:
5056 gcc_unreachable ();
5057 }
5058
5059 /* Note that mode_width <= HOST_BITS_PER_WIDE_INT, see above. */
5060 if (result_width < mode_width)
5061 nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
5062
5063 if (result_low > 0)
5064 {
5065 if (result_low < HOST_BITS_PER_WIDE_INT)
5066 nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
5067 else
5068 nonzero = 0;
5069 }
5070 }
5071 break;
5072
5073 case ZERO_EXTRACT:
5074 if (CONST_INT_P (XEXP (x, 1))
5075 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
5076 nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
5077 break;
5078
5079 case SUBREG:
5080 /* If this is a SUBREG formed for a promoted variable that has
5081 been zero-extended, we know that at least the high-order bits
5082 are zero, though others might be too. */
5083 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
5084 nonzero = GET_MODE_MASK (xmode)
5085 & cached_nonzero_bits (SUBREG_REG (x), mode: xmode,
5086 known_x, known_mode, known_ret);
5087
5088 /* If the inner mode is a single word for both the host and target
5089 machines, we can compute this from which bits of the inner
5090 object might be nonzero. */
5091 inner_mode = GET_MODE (SUBREG_REG (x));
5092 if (GET_MODE_PRECISION (mode: inner_mode).is_constant (const_value: &inner_width)
5093 && inner_width <= BITS_PER_WORD
5094 && inner_width <= HOST_BITS_PER_WIDE_INT)
5095 {
5096 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
5097 known_x, known_mode, known_ret);
5098
5099 /* On a typical CISC machine, accessing an object in a wider mode
5100 causes the high-order bits to become undefined. So they are
5101 not known to be zero.
5102
5103 On a typical RISC machine, we only have to worry about the way
5104 loads are extended. Otherwise, if we get a reload for the inner
5105 part, it may be loaded from the stack, and then we may lose all
5106 the zero bits that existed before the store to the stack. */
5107 rtx_code extend_op;
5108 if ((!WORD_REGISTER_OPERATIONS
5109 || ((extend_op = load_extend_op (mode: inner_mode)) == SIGN_EXTEND
5110 ? val_signbit_known_set_p (inner_mode, nonzero)
5111 : extend_op != ZERO_EXTEND)
5112 || !MEM_P (SUBREG_REG (x)))
5113 && xmode_width > inner_width)
5114 nonzero
5115 |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
5116 }
5117 break;
5118
5119 case ASHIFT:
5120 case ASHIFTRT:
5121 case LSHIFTRT:
5122 case ROTATE:
5123 case ROTATERT:
5124 /* The nonzero bits are in two classes: any bits within MODE
5125 that aren't in xmode are always significant. The rest of the
5126 nonzero bits are those that are significant in the operand of
5127 the shift when shifted the appropriate number of bits. This
5128 shows that high-order bits are cleared by the right shift and
5129 low-order bits by left shifts. */
5130 if (CONST_INT_P (XEXP (x, 1))
5131 && INTVAL (XEXP (x, 1)) >= 0
5132 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
5133 && INTVAL (XEXP (x, 1)) < xmode_width)
5134 {
5135 int count = INTVAL (XEXP (x, 1));
5136 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
5137 unsigned HOST_WIDE_INT op_nonzero
5138 = cached_nonzero_bits (XEXP (x, 0), mode,
5139 known_x, known_mode, known_ret);
5140 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
5141 unsigned HOST_WIDE_INT outer = 0;
5142
5143 if (mode_width > xmode_width)
5144 outer = (op_nonzero & nonzero & ~mode_mask);
5145
5146 switch (code)
5147 {
5148 case ASHIFT:
5149 inner <<= count;
5150 break;
5151
5152 case LSHIFTRT:
5153 inner >>= count;
5154 break;
5155
5156 case ASHIFTRT:
5157 inner >>= count;
5158
5159 /* If the sign bit may have been nonzero before the shift, we
5160 need to mark all the places it could have been copied to
5161 by the shift as possibly nonzero. */
5162 if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
5163 inner |= (((HOST_WIDE_INT_1U << count) - 1)
5164 << (xmode_width - count));
5165 break;
5166
5167 case ROTATE:
5168 inner = (inner << (count % xmode_width)
5169 | (inner >> (xmode_width - (count % xmode_width))))
5170 & mode_mask;
5171 break;
5172
5173 case ROTATERT:
5174 inner = (inner >> (count % xmode_width)
5175 | (inner << (xmode_width - (count % xmode_width))))
5176 & mode_mask;
5177 break;
5178
5179 default:
5180 gcc_unreachable ();
5181 }
5182
5183 nonzero &= (outer | inner);
5184 }
5185 break;
5186
5187 case FFS:
5188 case POPCOUNT:
5189 /* This is at most the number of bits in the mode. */
5190 nonzero = (HOST_WIDE_INT_UC (2) << (floor_log2 (x: mode_width))) - 1;
5191 break;
5192
5193 case CLZ:
5194 /* If CLZ has a known value at zero, then the nonzero bits are
5195 that value, plus the number of bits in the mode minus one. */
5196 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
5197 nonzero
5198 |= (HOST_WIDE_INT_1U << (floor_log2 (x: mode_width))) - 1;
5199 else
5200 nonzero = -1;
5201 break;
5202
5203 case CTZ:
5204 /* If CTZ has a known value at zero, then the nonzero bits are
5205 that value, plus the number of bits in the mode minus one. */
5206 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
5207 nonzero
5208 |= (HOST_WIDE_INT_1U << (floor_log2 (x: mode_width))) - 1;
5209 else
5210 nonzero = -1;
5211 break;
5212
5213 case CLRSB:
5214 /* This is at most the number of bits in the mode minus 1. */
5215 nonzero = (HOST_WIDE_INT_1U << (floor_log2 (x: mode_width))) - 1;
5216 break;
5217
5218 case PARITY:
5219 nonzero = 1;
5220 break;
5221
5222 case IF_THEN_ELSE:
5223 {
5224 unsigned HOST_WIDE_INT nonzero_true
5225 = cached_nonzero_bits (XEXP (x, 1), mode,
5226 known_x, known_mode, known_ret);
5227
5228 /* Don't call nonzero_bits for the second time if it cannot change
5229 anything. */
5230 if ((nonzero & nonzero_true) != nonzero)
5231 nonzero &= nonzero_true
5232 | cached_nonzero_bits (XEXP (x, 2), mode,
5233 known_x, known_mode, known_ret);
5234 }
5235 break;
5236
5237 default:
5238 break;
5239 }
5240
5241 return nonzero;
5242}
5243
5244/* See the macro definition above. */
5245#undef cached_num_sign_bit_copies
5246
5247
5248/* Return true if num_sign_bit_copies1 might recurse into both operands
5249 of X. */
5250
5251static inline bool
5252num_sign_bit_copies_binary_arith_p (const_rtx x)
5253{
5254 if (!ARITHMETIC_P (x))
5255 return false;
5256 switch (GET_CODE (x))
5257 {
5258 case IOR:
5259 case AND:
5260 case XOR:
5261 case SMIN:
5262 case SMAX:
5263 case UMIN:
5264 case UMAX:
5265 case PLUS:
5266 case MINUS:
5267 case MULT:
5268 return true;
5269 default:
5270 return false;
5271 }
5272}
5273
5274/* The function cached_num_sign_bit_copies is a wrapper around
5275 num_sign_bit_copies1. It avoids exponential behavior in
5276 num_sign_bit_copies1 when X has identical subexpressions on the
5277 first or the second level. */
5278
5279static unsigned int
5280cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
5281 const_rtx known_x, machine_mode known_mode,
5282 unsigned int known_ret)
5283{
5284 if (x == known_x && mode == known_mode)
5285 return known_ret;
5286
5287 /* Try to find identical subexpressions. If found call
5288 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
5289 the precomputed value for the subexpression as KNOWN_RET. */
5290
5291 if (num_sign_bit_copies_binary_arith_p (x))
5292 {
5293 rtx x0 = XEXP (x, 0);
5294 rtx x1 = XEXP (x, 1);
5295
5296 /* Check the first level. */
5297 if (x0 == x1)
5298 return
5299 num_sign_bit_copies1 (x, mode, x0, mode,
5300 cached_num_sign_bit_copies (x: x0, mode, known_x,
5301 known_mode,
5302 known_ret));
5303
5304 /* Check the second level. */
5305 if (num_sign_bit_copies_binary_arith_p (x: x0)
5306 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
5307 return
5308 num_sign_bit_copies1 (x, mode, x1, mode,
5309 cached_num_sign_bit_copies (x: x1, mode, known_x,
5310 known_mode,
5311 known_ret));
5312
5313 if (num_sign_bit_copies_binary_arith_p (x: x1)
5314 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
5315 return
5316 num_sign_bit_copies1 (x, mode, x0, mode,
5317 cached_num_sign_bit_copies (x: x0, mode, known_x,
5318 known_mode,
5319 known_ret));
5320 }
5321
5322 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
5323}
5324
5325/* Return the number of bits at the high-order end of X that are known to
5326 be equal to the sign bit. X will be used in mode MODE. The returned
5327 value will always be between 1 and the number of bits in MODE. */
5328
5329static unsigned int
5330num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
5331 machine_mode known_mode,
5332 unsigned int known_ret)
5333{
5334 enum rtx_code code = GET_CODE (x);
5335 unsigned int bitwidth = GET_MODE_PRECISION (mode);
5336 int num0, num1, result;
5337 unsigned HOST_WIDE_INT nonzero;
5338
5339 if (CONST_INT_P (x))
5340 {
5341 /* If the constant is negative, take its 1's complement and remask.
5342 Then see how many zero bits we have. */
5343 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
5344 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5345 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5346 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5347
5348 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (x: nonzero) - 1);
5349 }
5350
5351 scalar_int_mode xmode, inner_mode;
5352 if (!is_a <scalar_int_mode> (GET_MODE (x), result: &xmode))
5353 return 1;
5354
5355 unsigned int xmode_width = GET_MODE_PRECISION (mode: xmode);
5356
5357 /* For a smaller mode, just ignore the high bits. */
5358 if (bitwidth < xmode_width)
5359 {
5360 num0 = cached_num_sign_bit_copies (x, mode: xmode,
5361 known_x, known_mode, known_ret);
5362 return MAX (1, num0 - (int) (xmode_width - bitwidth));
5363 }
5364
5365 if (bitwidth > xmode_width)
5366 {
5367 /* If this machine does not do all register operations on the entire
5368 register and MODE is wider than the mode of X, we can say nothing
5369 at all about the high-order bits. We extend this reasoning to RISC
5370 machines for operations that might not operate on full registers. */
5371 if (!(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
5372 return 1;
5373
5374 /* Likewise on machines that do, if the mode of the object is smaller
5375 than a word and loads of that size don't sign extend, we can say
5376 nothing about the high order bits. */
5377 if (xmode_width < BITS_PER_WORD
5378 && load_extend_op (mode: xmode) != SIGN_EXTEND)
5379 return 1;
5380 }
5381
5382 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5383 the code in the switch below. */
5384 switch (code)
5385 {
5386 case REG:
5387
5388#if defined(POINTERS_EXTEND_UNSIGNED)
5389 /* If pointers extend signed and this is a pointer in Pmode, say that
5390 all the bits above ptr_mode are known to be sign bit copies. */
5391 /* As we do not know which address space the pointer is referring to,
5392 we can do this only if the target does not support different pointer
5393 or address modes depending on the address space. */
5394 if (target_default_pointer_address_modes_p ()
5395 && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5396 && mode == Pmode && REG_POINTER (x)
5397 && !targetm.have_ptr_extend ())
5398 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (mode: ptr_mode) + 1;
5399#endif
5400
5401 {
5402 unsigned int copies_for_hook = 1, copies = 1;
5403 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5404 &copies_for_hook);
5405
5406 if (new_rtx)
5407 copies = cached_num_sign_bit_copies (x: new_rtx, mode, known_x,
5408 known_mode, known_ret);
5409
5410 if (copies > 1 || copies_for_hook > 1)
5411 return MAX (copies, copies_for_hook);
5412
5413 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5414 }
5415 break;
5416
5417 case MEM:
5418 /* Some RISC machines sign-extend all loads of smaller than a word. */
5419 if (load_extend_op (mode: xmode) == SIGN_EXTEND)
5420 return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5421 break;
5422
5423 case SUBREG:
5424 /* If this is a SUBREG for a promoted object that is sign-extended
5425 and we are looking at it in a wider mode, we know that at least the
5426 high-order bits are known to be sign bit copies. */
5427
5428 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5429 {
5430 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5431 known_x, known_mode, known_ret);
5432 return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5433 }
5434
5435 if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), result: &inner_mode))
5436 {
5437 /* For a smaller object, just ignore the high bits. */
5438 if (bitwidth <= GET_MODE_PRECISION (mode: inner_mode))
5439 {
5440 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode: inner_mode,
5441 known_x, known_mode,
5442 known_ret);
5443 return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5444 - bitwidth));
5445 }
5446
5447 /* For paradoxical SUBREGs on machines where all register operations
5448 affect the entire register, just look inside. Note that we are
5449 passing MODE to the recursive call, so the number of sign bit
5450 copies will remain relative to that mode, not the inner mode.
5451
5452 This works only if loads sign extend. Otherwise, if we get a
5453 reload for the inner part, it may be loaded from the stack, and
5454 then we lose all sign bit copies that existed before the store
5455 to the stack. */
5456 if (WORD_REGISTER_OPERATIONS
5457 && load_extend_op (mode: inner_mode) == SIGN_EXTEND
5458 && paradoxical_subreg_p (x)
5459 && MEM_P (SUBREG_REG (x)))
5460 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5461 known_x, known_mode, known_ret);
5462 }
5463 break;
5464
5465 case SIGN_EXTRACT:
5466 if (CONST_INT_P (XEXP (x, 1)))
5467 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5468 break;
5469
5470 case SIGN_EXTEND:
5471 if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), result: &inner_mode))
5472 return (bitwidth - GET_MODE_PRECISION (mode: inner_mode)
5473 + cached_num_sign_bit_copies (XEXP (x, 0), mode: inner_mode,
5474 known_x, known_mode, known_ret));
5475 break;
5476
5477 case TRUNCATE:
5478 /* For a smaller object, just ignore the high bits. */
5479 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5480 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode: inner_mode,
5481 known_x, known_mode, known_ret);
5482 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5483 - bitwidth)));
5484
5485 case NOT:
5486 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5487 known_x, known_mode, known_ret);
5488
5489 case ROTATE: case ROTATERT:
5490 /* If we are rotating left by a number of bits less than the number
5491 of sign bit copies, we can just subtract that amount from the
5492 number. */
5493 if (CONST_INT_P (XEXP (x, 1))
5494 && INTVAL (XEXP (x, 1)) >= 0
5495 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5496 {
5497 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5498 known_x, known_mode, known_ret);
5499 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5500 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5501 }
5502 break;
5503
5504 case NEG:
5505 /* In general, this subtracts one sign bit copy. But if the value
5506 is known to be positive, the number of sign bit copies is the
5507 same as that of the input. Finally, if the input has just one bit
5508 that might be nonzero, all the bits are copies of the sign bit. */
5509 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5510 known_x, known_mode, known_ret);
5511 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5512 return num0 > 1 ? num0 - 1 : 1;
5513
5514 nonzero = nonzero_bits (XEXP (x, 0), mode);
5515 if (nonzero == 1)
5516 return bitwidth;
5517
5518 if (num0 > 1
5519 && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5520 num0--;
5521
5522 return num0;
5523
5524 case IOR: case AND: case XOR:
5525 case SMIN: case SMAX: case UMIN: case UMAX:
5526 /* Logical operations will preserve the number of sign-bit copies.
5527 MIN and MAX operations always return one of the operands. */
5528 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5529 known_x, known_mode, known_ret);
5530 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5531 known_x, known_mode, known_ret);
5532
5533 /* If num1 is clearing some of the top bits then regardless of
5534 the other term, we are guaranteed to have at least that many
5535 high-order zero bits. */
5536 if (code == AND
5537 && num1 > 1
5538 && bitwidth <= HOST_BITS_PER_WIDE_INT
5539 && CONST_INT_P (XEXP (x, 1))
5540 && (UINTVAL (XEXP (x, 1))
5541 & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5542 return num1;
5543
5544 /* Similarly for IOR when setting high-order bits. */
5545 if (code == IOR
5546 && num1 > 1
5547 && bitwidth <= HOST_BITS_PER_WIDE_INT
5548 && CONST_INT_P (XEXP (x, 1))
5549 && (UINTVAL (XEXP (x, 1))
5550 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5551 return num1;
5552
5553 return MIN (num0, num1);
5554
5555 case PLUS: case MINUS:
5556 /* For addition and subtraction, we can have a 1-bit carry. However,
5557 if we are subtracting 1 from a positive number, there will not
5558 be such a carry. Furthermore, if the positive number is known to
5559 be 0 or 1, we know the result is either -1 or 0. */
5560
5561 if (code == PLUS && XEXP (x, 1) == constm1_rtx
5562 && bitwidth <= HOST_BITS_PER_WIDE_INT)
5563 {
5564 nonzero = nonzero_bits (XEXP (x, 0), mode);
5565 if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5566 return (nonzero == 1 || nonzero == 0 ? bitwidth
5567 : bitwidth - floor_log2 (x: nonzero) - 1);
5568 }
5569
5570 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5571 known_x, known_mode, known_ret);
5572 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5573 known_x, known_mode, known_ret);
5574 result = MAX (1, MIN (num0, num1) - 1);
5575
5576 return result;
5577
5578 case MULT:
5579 /* The number of bits of the product is the sum of the number of
5580 bits of both terms. However, unless one of the terms if known
5581 to be positive, we must allow for an additional bit since negating
5582 a negative number can remove one sign bit copy. */
5583
5584 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5585 known_x, known_mode, known_ret);
5586 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5587 known_x, known_mode, known_ret);
5588
5589 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5590 if (result > 0
5591 && (bitwidth > HOST_BITS_PER_WIDE_INT
5592 || (((nonzero_bits (XEXP (x, 0), mode)
5593 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5594 && ((nonzero_bits (XEXP (x, 1), mode)
5595 & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5596 != 0))))
5597 result--;
5598
5599 return MAX (1, result);
5600
5601 case UDIV:
5602 /* The result must be <= the first operand. If the first operand
5603 has the high bit set, we know nothing about the number of sign
5604 bit copies. */
5605 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5606 return 1;
5607 else if ((nonzero_bits (XEXP (x, 0), mode)
5608 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5609 return 1;
5610 else
5611 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5612 known_x, known_mode, known_ret);
5613
5614 case UMOD:
5615 /* The result must be <= the second operand. If the second operand
5616 has (or just might have) the high bit set, we know nothing about
5617 the number of sign bit copies. */
5618 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5619 return 1;
5620 else if ((nonzero_bits (XEXP (x, 1), mode)
5621 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5622 return 1;
5623 else
5624 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5625 known_x, known_mode, known_ret);
5626
5627 case DIV:
5628 /* Similar to unsigned division, except that we have to worry about
5629 the case where the divisor is negative, in which case we have
5630 to add 1. */
5631 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5632 known_x, known_mode, known_ret);
5633 if (result > 1
5634 && (bitwidth > HOST_BITS_PER_WIDE_INT
5635 || (nonzero_bits (XEXP (x, 1), mode)
5636 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5637 result--;
5638
5639 return result;
5640
5641 case MOD:
5642 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5643 known_x, known_mode, known_ret);
5644 if (result > 1
5645 && (bitwidth > HOST_BITS_PER_WIDE_INT
5646 || (nonzero_bits (XEXP (x, 1), mode)
5647 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5648 result--;
5649
5650 return result;
5651
5652 case ASHIFTRT:
5653 /* Shifts by a constant add to the number of bits equal to the
5654 sign bit. */
5655 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5656 known_x, known_mode, known_ret);
5657 if (CONST_INT_P (XEXP (x, 1))
5658 && INTVAL (XEXP (x, 1)) > 0
5659 && INTVAL (XEXP (x, 1)) < xmode_width)
5660 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5661
5662 return num0;
5663
5664 case ASHIFT:
5665 /* Left shifts destroy copies. */
5666 if (!CONST_INT_P (XEXP (x, 1))
5667 || INTVAL (XEXP (x, 1)) < 0
5668 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5669 || INTVAL (XEXP (x, 1)) >= xmode_width)
5670 return 1;
5671
5672 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5673 known_x, known_mode, known_ret);
5674 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5675
5676 case IF_THEN_ELSE:
5677 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5678 known_x, known_mode, known_ret);
5679 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5680 known_x, known_mode, known_ret);
5681 return MIN (num0, num1);
5682
5683 case EQ: case NE: case GE: case GT: case LE: case LT:
5684 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5685 case GEU: case GTU: case LEU: case LTU:
5686 case UNORDERED: case ORDERED:
5687 /* If the constant is negative, take its 1's complement and remask.
5688 Then see how many zero bits we have. */
5689 nonzero = STORE_FLAG_VALUE;
5690 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5691 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5692 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5693
5694 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (x: nonzero) - 1);
5695
5696 default:
5697 break;
5698 }
5699
5700 /* If we haven't been able to figure it out by one of the above rules,
5701 see if some of the high-order bits are known to be zero. If so,
5702 count those bits and return one less than that amount. If we can't
5703 safely compute the mask for this mode, always return BITWIDTH. */
5704
5705 bitwidth = GET_MODE_PRECISION (mode);
5706 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5707 return 1;
5708
5709 nonzero = nonzero_bits (x, mode);
5710 return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5711 ? 1 : bitwidth - floor_log2 (x: nonzero) - 1;
5712}
5713
5714/* Calculate the rtx_cost of a single instruction pattern. A return value of
5715 zero indicates an instruction pattern without a known cost. */
5716
5717int
5718pattern_cost (rtx pat, bool speed)
5719{
5720 int i, cost;
5721 rtx set;
5722
5723 /* Extract the single set rtx from the instruction pattern. We
5724 can't use single_set since we only have the pattern. We also
5725 consider PARALLELs of a normal set and a single comparison. In
5726 that case we use the cost of the non-comparison SET operation,
5727 which is most-likely to be the real cost of this operation. */
5728 if (GET_CODE (pat) == SET)
5729 set = pat;
5730 else if (GET_CODE (pat) == PARALLEL)
5731 {
5732 set = NULL_RTX;
5733 rtx comparison = NULL_RTX;
5734
5735 for (i = 0; i < XVECLEN (pat, 0); i++)
5736 {
5737 rtx x = XVECEXP (pat, 0, i);
5738 if (GET_CODE (x) == SET)
5739 {
5740 if (GET_CODE (SET_SRC (x)) == COMPARE)
5741 {
5742 if (comparison)
5743 return 0;
5744 comparison = x;
5745 }
5746 else
5747 {
5748 if (set)
5749 return 0;
5750 set = x;
5751 }
5752 }
5753 }
5754
5755 if (!set && comparison)
5756 set = comparison;
5757
5758 if (!set)
5759 return 0;
5760 }
5761 else
5762 return 0;
5763
5764 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed_p: speed);
5765 return cost > 0 ? cost : COSTS_N_INSNS (1);
5766}
5767
5768/* Calculate the cost of a single instruction. A return value of zero
5769 indicates an instruction pattern without a known cost. */
5770
5771int
5772insn_cost (rtx_insn *insn, bool speed)
5773{
5774 if (targetm.insn_cost)
5775 return targetm.insn_cost (insn, speed);
5776
5777 return pattern_cost (pat: PATTERN (insn), speed);
5778}
5779
5780/* Returns estimate on cost of computing SEQ. */
5781
5782unsigned
5783seq_cost (const rtx_insn *seq, bool speed)
5784{
5785 unsigned cost = 0;
5786 rtx set;
5787
5788 for (; seq; seq = NEXT_INSN (insn: seq))
5789 {
5790 set = single_set (insn: seq);
5791 if (set)
5792 cost += set_rtx_cost (x: set, speed_p: speed);
5793 else if (NONDEBUG_INSN_P (seq))
5794 {
5795 int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5796 if (this_cost > 0)
5797 cost += this_cost;
5798 else
5799 cost++;
5800 }
5801 }
5802
5803 return cost;
5804}
5805
5806/* Given an insn INSN and condition COND, return the condition in a
5807 canonical form to simplify testing by callers. Specifically:
5808
5809 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5810 (2) Both operands will be machine operands.
5811 (3) If an operand is a constant, it will be the second operand.
5812 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5813 for GE, GEU, and LEU.
5814
5815 If the condition cannot be understood, or is an inequality floating-point
5816 comparison which needs to be reversed, 0 will be returned.
5817
5818 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5819
5820 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5821 insn used in locating the condition was found. If a replacement test
5822 of the condition is desired, it should be placed in front of that
5823 insn and we will be sure that the inputs are still valid.
5824
5825 If WANT_REG is nonzero, we wish the condition to be relative to that
5826 register, if possible. Therefore, do not canonicalize the condition
5827 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5828 to be a compare to a CC mode register.
5829
5830 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5831 and at INSN. */
5832
5833rtx
5834canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5835 rtx_insn **earliest,
5836 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5837{
5838 enum rtx_code code;
5839 rtx_insn *prev = insn;
5840 const_rtx set;
5841 rtx tem;
5842 rtx op0, op1;
5843 int reverse_code = 0;
5844 machine_mode mode;
5845 basic_block bb = BLOCK_FOR_INSN (insn);
5846
5847 code = GET_CODE (cond);
5848 mode = GET_MODE (cond);
5849 op0 = XEXP (cond, 0);
5850 op1 = XEXP (cond, 1);
5851
5852 if (reverse)
5853 code = reversed_comparison_code (cond, insn);
5854 if (code == UNKNOWN)
5855 return 0;
5856
5857 if (earliest)
5858 *earliest = insn;
5859
5860 /* If we are comparing a register with zero, see if the register is set
5861 in the previous insn to a COMPARE or a comparison operation. Perform
5862 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5863 in cse.cc */
5864
5865 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5866 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5867 && op1 == CONST0_RTX (GET_MODE (op0))
5868 && op0 != want_reg)
5869 {
5870 /* Set nonzero when we find something of interest. */
5871 rtx x = 0;
5872
5873 /* If this is a COMPARE, pick up the two things being compared. */
5874 if (GET_CODE (op0) == COMPARE)
5875 {
5876 op1 = XEXP (op0, 1);
5877 op0 = XEXP (op0, 0);
5878 continue;
5879 }
5880 else if (!REG_P (op0))
5881 break;
5882
5883 /* Go back to the previous insn. Stop if it is not an INSN. We also
5884 stop if it isn't a single set or if it has a REG_INC note because
5885 we don't want to bother dealing with it. */
5886
5887 prev = prev_nonnote_nondebug_insn (prev);
5888
5889 if (prev == 0
5890 || !NONJUMP_INSN_P (prev)
5891 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5892 /* In cfglayout mode, there do not have to be labels at the
5893 beginning of a block, or jumps at the end, so the previous
5894 conditions would not stop us when we reach bb boundary. */
5895 || BLOCK_FOR_INSN (insn: prev) != bb)
5896 break;
5897
5898 set = set_of (pat: op0, insn: prev);
5899
5900 if (set
5901 && (GET_CODE (set) != SET
5902 || !rtx_equal_p (SET_DEST (set), op0)))
5903 break;
5904
5905 /* If this is setting OP0, get what it sets it to if it looks
5906 relevant. */
5907 if (set)
5908 {
5909 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5910#ifdef FLOAT_STORE_FLAG_VALUE
5911 REAL_VALUE_TYPE fsfv;
5912#endif
5913
5914 /* ??? We may not combine comparisons done in a CCmode with
5915 comparisons not done in a CCmode. This is to aid targets
5916 like Alpha that have an IEEE compliant EQ instruction, and
5917 a non-IEEE compliant BEQ instruction. The use of CCmode is
5918 actually artificial, simply to prevent the combination, but
5919 should not affect other platforms.
5920
5921 However, we must allow VOIDmode comparisons to match either
5922 CCmode or non-CCmode comparison, because some ports have
5923 modeless comparisons inside branch patterns.
5924
5925 ??? This mode check should perhaps look more like the mode check
5926 in simplify_comparison in combine. */
5927 if (((GET_MODE_CLASS (mode) == MODE_CC)
5928 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5929 && mode != VOIDmode
5930 && inner_mode != VOIDmode)
5931 break;
5932 if (GET_CODE (SET_SRC (set)) == COMPARE
5933 || (((code == NE
5934 || (code == LT
5935 && val_signbit_known_set_p (inner_mode,
5936 STORE_FLAG_VALUE))
5937#ifdef FLOAT_STORE_FLAG_VALUE
5938 || (code == LT
5939 && SCALAR_FLOAT_MODE_P (inner_mode)
5940 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5941 REAL_VALUE_NEGATIVE (fsfv)))
5942#endif
5943 ))
5944 && COMPARISON_P (SET_SRC (set))))
5945 x = SET_SRC (set);
5946 else if (((code == EQ
5947 || (code == GE
5948 && val_signbit_known_set_p (inner_mode,
5949 STORE_FLAG_VALUE))
5950#ifdef FLOAT_STORE_FLAG_VALUE
5951 || (code == GE
5952 && SCALAR_FLOAT_MODE_P (inner_mode)
5953 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5954 REAL_VALUE_NEGATIVE (fsfv)))
5955#endif
5956 ))
5957 && COMPARISON_P (SET_SRC (set)))
5958 {
5959 reverse_code = 1;
5960 x = SET_SRC (set);
5961 }
5962 else if ((code == EQ || code == NE)
5963 && GET_CODE (SET_SRC (set)) == XOR)
5964 /* Handle sequences like:
5965
5966 (set op0 (xor X Y))
5967 ...(eq|ne op0 (const_int 0))...
5968
5969 in which case:
5970
5971 (eq op0 (const_int 0)) reduces to (eq X Y)
5972 (ne op0 (const_int 0)) reduces to (ne X Y)
5973
5974 This is the form used by MIPS16, for example. */
5975 x = SET_SRC (set);
5976 else
5977 break;
5978 }
5979
5980 else if (reg_set_p (reg: op0, insn: prev))
5981 /* If this sets OP0, but not directly, we have to give up. */
5982 break;
5983
5984 if (x)
5985 {
5986 /* If the caller is expecting the condition to be valid at INSN,
5987 make sure X doesn't change before INSN. */
5988 if (valid_at_insn_p)
5989 if (modified_in_p (x, insn: prev) || modified_between_p (x, start: prev, end: insn))
5990 break;
5991 if (COMPARISON_P (x))
5992 code = GET_CODE (x);
5993 if (reverse_code)
5994 {
5995 code = reversed_comparison_code (x, prev);
5996 if (code == UNKNOWN)
5997 return 0;
5998 reverse_code = 0;
5999 }
6000
6001 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
6002 if (earliest)
6003 *earliest = prev;
6004 }
6005 }
6006
6007 /* If constant is first, put it last. */
6008 if (CONSTANT_P (op0))
6009 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
6010
6011 /* If OP0 is the result of a comparison, we weren't able to find what
6012 was really being compared, so fail. */
6013 if (!allow_cc_mode
6014 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6015 return 0;
6016
6017 /* Canonicalize any ordered comparison with integers involving equality
6018 if we can do computations in the relevant mode and we do not
6019 overflow. */
6020
6021 scalar_int_mode op0_mode;
6022 if (CONST_INT_P (op1)
6023 && is_a <scalar_int_mode> (GET_MODE (op0), result: &op0_mode)
6024 && GET_MODE_PRECISION (mode: op0_mode) <= HOST_BITS_PER_WIDE_INT)
6025 {
6026 HOST_WIDE_INT const_val = INTVAL (op1);
6027 unsigned HOST_WIDE_INT uconst_val = const_val;
6028 unsigned HOST_WIDE_INT max_val
6029 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
6030
6031 switch (code)
6032 {
6033 case LE:
6034 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
6035 code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
6036 break;
6037
6038 /* When cross-compiling, const_val might be sign-extended from
6039 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
6040 case GE:
6041 if ((const_val & max_val)
6042 != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (mode: op0_mode) - 1)))
6043 code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
6044 break;
6045
6046 case LEU:
6047 if (uconst_val < max_val)
6048 code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
6049 break;
6050
6051 case GEU:
6052 if (uconst_val != 0)
6053 code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
6054 break;
6055
6056 default:
6057 break;
6058 }
6059 }
6060
6061 /* We promised to return a comparison. */
6062 rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
6063 if (COMPARISON_P (ret))
6064 return ret;
6065 return 0;
6066}
6067
6068/* Given a jump insn JUMP, return the condition that will cause it to branch
6069 to its JUMP_LABEL. If the condition cannot be understood, or is an
6070 inequality floating-point comparison which needs to be reversed, 0 will
6071 be returned.
6072
6073 If EARLIEST is nonzero, it is a pointer to a place where the earliest
6074 insn used in locating the condition was found. If a replacement test
6075 of the condition is desired, it should be placed in front of that
6076 insn and we will be sure that the inputs are still valid. If EARLIEST
6077 is null, the returned condition will be valid at INSN.
6078
6079 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
6080 compare CC mode register.
6081
6082 VALID_AT_INSN_P is the same as for canonicalize_condition. */
6083
6084rtx
6085get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
6086 int valid_at_insn_p)
6087{
6088 rtx cond;
6089 int reverse;
6090 rtx set;
6091
6092 /* If this is not a standard conditional jump, we can't parse it. */
6093 if (!JUMP_P (jump)
6094 || ! any_condjump_p (jump))
6095 return 0;
6096 set = pc_set (jump);
6097
6098 cond = XEXP (SET_SRC (set), 0);
6099
6100 /* If this branches to JUMP_LABEL when the condition is false, reverse
6101 the condition. */
6102 reverse
6103 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
6104 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
6105
6106 return canonicalize_condition (insn: jump, cond, reverse, earliest, NULL_RTX,
6107 allow_cc_mode, valid_at_insn_p);
6108}
6109
6110/* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
6111 TARGET_MODE_REP_EXTENDED.
6112
6113 Note that we assume that the property of
6114 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
6115 narrower than mode B. I.e., if A is a mode narrower than B then in
6116 order to be able to operate on it in mode B, mode A needs to
6117 satisfy the requirements set by the representation of mode B. */
6118
6119static void
6120init_num_sign_bit_copies_in_rep (void)
6121{
6122 opt_scalar_int_mode in_mode_iter;
6123 scalar_int_mode mode;
6124
6125 FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
6126 FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
6127 {
6128 scalar_int_mode in_mode = in_mode_iter.require ();
6129 scalar_int_mode i;
6130
6131 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
6132 extends to the next widest mode. */
6133 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
6134 || GET_MODE_WIDER_MODE (mode).require () == in_mode);
6135
6136 /* We are in in_mode. Count how many bits outside of mode
6137 have to be copies of the sign-bit. */
6138 FOR_EACH_MODE (i, mode, in_mode)
6139 {
6140 /* This must always exist (for the last iteration it will be
6141 IN_MODE). */
6142 scalar_int_mode wider = GET_MODE_WIDER_MODE (m: i).require ();
6143
6144 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
6145 /* We can only check sign-bit copies starting from the
6146 top-bit. In order to be able to check the bits we
6147 have already seen we pretend that subsequent bits
6148 have to be sign-bit copies too. */
6149 || num_sign_bit_copies_in_rep [in_mode][mode])
6150 num_sign_bit_copies_in_rep [in_mode][mode]
6151 += GET_MODE_PRECISION (mode: wider) - GET_MODE_PRECISION (mode: i);
6152 }
6153 }
6154}
6155
6156/* Suppose that truncation from the machine mode of X to MODE is not a
6157 no-op. See if there is anything special about X so that we can
6158 assume it already contains a truncated value of MODE. */
6159
6160bool
6161truncated_to_mode (machine_mode mode, const_rtx x)
6162{
6163 /* This register has already been used in MODE without explicit
6164 truncation. */
6165 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
6166 return true;
6167
6168 /* See if we already satisfy the requirements of MODE. If yes we
6169 can just switch to MODE. */
6170 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
6171 && (num_sign_bit_copies (x, GET_MODE (x))
6172 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
6173 return true;
6174
6175 return false;
6176}
6177
6178/* Return true if RTX code CODE has a single sequence of zero or more
6179 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
6180 entry in that case. */
6181
6182static bool
6183setup_reg_subrtx_bounds (unsigned int code)
6184{
6185 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
6186 unsigned int i = 0;
6187 for (; format[i] != 'e'; ++i)
6188 {
6189 if (!format[i])
6190 /* No subrtxes. Leave start and count as 0. */
6191 return true;
6192 if (format[i] == 'E' || format[i] == 'V')
6193 return false;
6194 }
6195
6196 /* Record the sequence of 'e's. */
6197 rtx_all_subrtx_bounds[code].start = i;
6198 do
6199 ++i;
6200 while (format[i] == 'e');
6201 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
6202 /* rtl-iter.h relies on this. */
6203 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
6204
6205 for (; format[i]; ++i)
6206 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
6207 return false;
6208
6209 return true;
6210}
6211
6212/* Initialize rtx_all_subrtx_bounds. */
6213void
6214init_rtlanal (void)
6215{
6216 int i;
6217 for (i = 0; i < NUM_RTX_CODE; i++)
6218 {
6219 if (!setup_reg_subrtx_bounds (i))
6220 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
6221 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
6222 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
6223 }
6224
6225 init_num_sign_bit_copies_in_rep ();
6226}
6227
6228/* Check whether this is a constant pool constant. */
6229bool
6230constant_pool_constant_p (rtx x)
6231{
6232 x = avoid_constant_pool_reference (x);
6233 return CONST_DOUBLE_P (x);
6234}
6235
6236/* If M is a bitmask that selects a field of low-order bits within an item but
6237 not the entire word, return the length of the field. Return -1 otherwise.
6238 M is used in machine mode MODE. */
6239
6240int
6241low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
6242{
6243 if (mode != VOIDmode)
6244 {
6245 if (!HWI_COMPUTABLE_MODE_P (mode))
6246 return -1;
6247 m &= GET_MODE_MASK (mode);
6248 }
6249
6250 return exact_log2 (x: m + 1);
6251}
6252
6253/* Return the mode of MEM's address. */
6254
6255scalar_int_mode
6256get_address_mode (rtx mem)
6257{
6258 machine_mode mode;
6259
6260 gcc_assert (MEM_P (mem));
6261 mode = GET_MODE (XEXP (mem, 0));
6262 if (mode != VOIDmode)
6263 return as_a <scalar_int_mode> (m: mode);
6264 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
6265}
6266
6267/* Split up a CONST_DOUBLE or integer constant rtx
6268 into two rtx's for single words,
6269 storing in *FIRST the word that comes first in memory in the target
6270 and in *SECOND the other.
6271
6272 TODO: This function needs to be rewritten to work on any size
6273 integer. */
6274
6275void
6276split_double (rtx value, rtx *first, rtx *second)
6277{
6278 if (CONST_INT_P (value))
6279 {
6280 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
6281 {
6282 /* In this case the CONST_INT holds both target words.
6283 Extract the bits from it into two word-sized pieces.
6284 Sign extend each half to HOST_WIDE_INT. */
6285 unsigned HOST_WIDE_INT low, high;
6286 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
6287 unsigned bits_per_word = BITS_PER_WORD;
6288
6289 /* Set sign_bit to the most significant bit of a word. */
6290 sign_bit = 1;
6291 sign_bit <<= bits_per_word - 1;
6292
6293 /* Set mask so that all bits of the word are set. We could
6294 have used 1 << BITS_PER_WORD instead of basing the
6295 calculation on sign_bit. However, on machines where
6296 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
6297 compiler warning, even though the code would never be
6298 executed. */
6299 mask = sign_bit << 1;
6300 mask--;
6301
6302 /* Set sign_extend as any remaining bits. */
6303 sign_extend = ~mask;
6304
6305 /* Pick the lower word and sign-extend it. */
6306 low = INTVAL (value);
6307 low &= mask;
6308 if (low & sign_bit)
6309 low |= sign_extend;
6310
6311 /* Pick the higher word, shifted to the least significant
6312 bits, and sign-extend it. */
6313 high = INTVAL (value);
6314 high >>= bits_per_word - 1;
6315 high >>= 1;
6316 high &= mask;
6317 if (high & sign_bit)
6318 high |= sign_extend;
6319
6320 /* Store the words in the target machine order. */
6321 if (WORDS_BIG_ENDIAN)
6322 {
6323 *first = GEN_INT (high);
6324 *second = GEN_INT (low);
6325 }
6326 else
6327 {
6328 *first = GEN_INT (low);
6329 *second = GEN_INT (high);
6330 }
6331 }
6332 else
6333 {
6334 /* The rule for using CONST_INT for a wider mode
6335 is that we regard the value as signed.
6336 So sign-extend it. */
6337 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6338 if (WORDS_BIG_ENDIAN)
6339 {
6340 *first = high;
6341 *second = value;
6342 }
6343 else
6344 {
6345 *first = value;
6346 *second = high;
6347 }
6348 }
6349 }
6350 else if (GET_CODE (value) == CONST_WIDE_INT)
6351 {
6352 /* All of this is scary code and needs to be converted to
6353 properly work with any size integer. */
6354 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6355 if (WORDS_BIG_ENDIAN)
6356 {
6357 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6358 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6359 }
6360 else
6361 {
6362 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6363 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6364 }
6365 }
6366 else if (!CONST_DOUBLE_P (value))
6367 {
6368 if (WORDS_BIG_ENDIAN)
6369 {
6370 *first = const0_rtx;
6371 *second = value;
6372 }
6373 else
6374 {
6375 *first = value;
6376 *second = const0_rtx;
6377 }
6378 }
6379 else if (GET_MODE (value) == VOIDmode
6380 /* This is the old way we did CONST_DOUBLE integers. */
6381 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6382 {
6383 /* In an integer, the words are defined as most and least significant.
6384 So order them by the target's convention. */
6385 if (WORDS_BIG_ENDIAN)
6386 {
6387 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6388 *second = GEN_INT (CONST_DOUBLE_LOW (value));
6389 }
6390 else
6391 {
6392 *first = GEN_INT (CONST_DOUBLE_LOW (value));
6393 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6394 }
6395 }
6396 else
6397 {
6398 long l[2];
6399
6400 /* Note, this converts the REAL_VALUE_TYPE to the target's
6401 format, splits up the floating point double and outputs
6402 exactly 32 bits of it into each of l[0] and l[1] --
6403 not necessarily BITS_PER_WORD bits. */
6404 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6405
6406 /* If 32 bits is an entire word for the target, but not for the host,
6407 then sign-extend on the host so that the number will look the same
6408 way on the host that it would on the target. See for instance
6409 simplify_unary_operation. The #if is needed to avoid compiler
6410 warnings. */
6411
6412#if HOST_BITS_PER_LONG > 32
6413 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6414 {
6415 if (l[0] & ((long) 1 << 31))
6416 l[0] |= ((unsigned long) (-1) << 32);
6417 if (l[1] & ((long) 1 << 31))
6418 l[1] |= ((unsigned long) (-1) << 32);
6419 }
6420#endif
6421
6422 *first = GEN_INT (l[0]);
6423 *second = GEN_INT (l[1]);
6424 }
6425}
6426
6427/* Return true if X is a sign_extract or zero_extract from the least
6428 significant bit. */
6429
6430static bool
6431lsb_bitfield_op_p (rtx x)
6432{
6433 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6434 {
6435 machine_mode mode = GET_MODE (XEXP (x, 0));
6436 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6437 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6438 poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6439
6440 return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6441 }
6442 return false;
6443}
6444
6445/* Strip outer address "mutations" from LOC and return a pointer to the
6446 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6447 stripped expression there.
6448
6449 "Mutations" either convert between modes or apply some kind of
6450 extension, truncation or alignment. */
6451
6452rtx *
6453strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6454{
6455 for (;;)
6456 {
6457 enum rtx_code code = GET_CODE (*loc);
6458 if (GET_RTX_CLASS (code) == RTX_UNARY)
6459 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6460 used to convert between pointer sizes. */
6461 loc = &XEXP (*loc, 0);
6462 else if (lsb_bitfield_op_p (x: *loc))
6463 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6464 acts as a combined truncation and extension. */
6465 loc = &XEXP (*loc, 0);
6466 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6467 /* (and ... (const_int -X)) is used to align to X bytes. */
6468 loc = &XEXP (*loc, 0);
6469 else if (code == SUBREG
6470 && !OBJECT_P (SUBREG_REG (*loc))
6471 && subreg_lowpart_p (*loc))
6472 /* (subreg (operator ...) ...) inside and is used for mode
6473 conversion too. */
6474 loc = &SUBREG_REG (*loc);
6475 else
6476 return loc;
6477 if (outer_code)
6478 *outer_code = code;
6479 }
6480}
6481
6482/* Return true if CODE applies some kind of scale. The scaled value is
6483 is the first operand and the scale is the second. */
6484
6485static bool
6486binary_scale_code_p (enum rtx_code code)
6487{
6488 return (code == MULT
6489 || code == ASHIFT
6490 /* Needed by ARM targets. */
6491 || code == ASHIFTRT
6492 || code == LSHIFTRT
6493 || code == ROTATE
6494 || code == ROTATERT);
6495}
6496
6497/* If *INNER can be interpreted as a base, return a pointer to the inner term
6498 (see address_info). Return null otherwise. */
6499
6500static rtx *
6501get_base_term (rtx *inner)
6502{
6503 if (GET_CODE (*inner) == LO_SUM)
6504 inner = strip_address_mutations (loc: &XEXP (*inner, 0));
6505 if (REG_P (*inner)
6506 || MEM_P (*inner)
6507 || GET_CODE (*inner) == SUBREG
6508 || GET_CODE (*inner) == SCRATCH)
6509 return inner;
6510 return 0;
6511}
6512
6513/* If *INNER can be interpreted as an index, return a pointer to the inner term
6514 (see address_info). Return null otherwise. */
6515
6516static rtx *
6517get_index_term (rtx *inner)
6518{
6519 /* At present, only constant scales are allowed. */
6520 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6521 inner = strip_address_mutations (loc: &XEXP (*inner, 0));
6522 if (REG_P (*inner)
6523 || MEM_P (*inner)
6524 || GET_CODE (*inner) == SUBREG
6525 || GET_CODE (*inner) == SCRATCH)
6526 return inner;
6527 return 0;
6528}
6529
6530/* Set the segment part of address INFO to LOC, given that INNER is the
6531 unmutated value. */
6532
6533static void
6534set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6535{
6536 gcc_assert (!info->segment);
6537 info->segment = loc;
6538 info->segment_term = inner;
6539}
6540
6541/* Set the base part of address INFO to LOC, given that INNER is the
6542 unmutated value. */
6543
6544static void
6545set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6546{
6547 gcc_assert (!info->base);
6548 info->base = loc;
6549 info->base_term = inner;
6550}
6551
6552/* Set the index part of address INFO to LOC, given that INNER is the
6553 unmutated value. */
6554
6555static void
6556set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6557{
6558 gcc_assert (!info->index);
6559 info->index = loc;
6560 info->index_term = inner;
6561}
6562
6563/* Set the displacement part of address INFO to LOC, given that INNER
6564 is the constant term. */
6565
6566static void
6567set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6568{
6569 gcc_assert (!info->disp);
6570 info->disp = loc;
6571 info->disp_term = inner;
6572}
6573
6574/* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6575 rest of INFO accordingly. */
6576
6577static void
6578decompose_incdec_address (struct address_info *info)
6579{
6580 info->autoinc_p = true;
6581
6582 rtx *base = &XEXP (*info->inner, 0);
6583 set_address_base (info, loc: base, inner: base);
6584 gcc_checking_assert (info->base == info->base_term);
6585
6586 /* These addresses are only valid when the size of the addressed
6587 value is known. */
6588 gcc_checking_assert (info->mode != VOIDmode);
6589}
6590
6591/* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6592 of INFO accordingly. */
6593
6594static void
6595decompose_automod_address (struct address_info *info)
6596{
6597 info->autoinc_p = true;
6598
6599 rtx *base = &XEXP (*info->inner, 0);
6600 set_address_base (info, loc: base, inner: base);
6601 gcc_checking_assert (info->base == info->base_term);
6602
6603 rtx plus = XEXP (*info->inner, 1);
6604 gcc_assert (GET_CODE (plus) == PLUS);
6605
6606 info->base_term2 = &XEXP (plus, 0);
6607 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6608
6609 rtx *step = &XEXP (plus, 1);
6610 rtx *inner_step = strip_address_mutations (loc: step);
6611 if (CONSTANT_P (*inner_step))
6612 set_address_disp (info, loc: step, inner: inner_step);
6613 else
6614 set_address_index (info, loc: step, inner: inner_step);
6615}
6616
6617/* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6618 values in [PTR, END). Return a pointer to the end of the used array. */
6619
6620static rtx **
6621extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6622{
6623 rtx x = *loc;
6624 if (GET_CODE (x) == PLUS)
6625 {
6626 ptr = extract_plus_operands (loc: &XEXP (x, 0), ptr, end);
6627 ptr = extract_plus_operands (loc: &XEXP (x, 1), ptr, end);
6628 }
6629 else
6630 {
6631 gcc_assert (ptr != end);
6632 *ptr++ = loc;
6633 }
6634 return ptr;
6635}
6636
6637/* Evaluate the likelihood of X being a base or index value, returning
6638 positive if it is likely to be a base, negative if it is likely to be
6639 an index, and 0 if we can't tell. Make the magnitude of the return
6640 value reflect the amount of confidence we have in the answer.
6641
6642 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6643
6644static int
6645baseness (rtx x, machine_mode mode, addr_space_t as,
6646 enum rtx_code outer_code, enum rtx_code index_code)
6647{
6648 /* Believe *_POINTER unless the address shape requires otherwise. */
6649 if (REG_P (x) && REG_POINTER (x))
6650 return 2;
6651 if (MEM_P (x) && MEM_POINTER (x))
6652 return 2;
6653
6654 if (REG_P (x) && HARD_REGISTER_P (x))
6655 {
6656 /* X is a hard register. If it only fits one of the base
6657 or index classes, choose that interpretation. */
6658 int regno = REGNO (x);
6659 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6660 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6661 if (base_p != index_p)
6662 return base_p ? 1 : -1;
6663 }
6664 return 0;
6665}
6666
6667/* INFO->INNER describes a normal, non-automodified address.
6668 Fill in the rest of INFO accordingly. */
6669
6670static void
6671decompose_normal_address (struct address_info *info)
6672{
6673 /* Treat the address as the sum of up to four values. */
6674 rtx *ops[4];
6675 size_t n_ops = extract_plus_operands (loc: info->inner, ptr: ops,
6676 end: ops + ARRAY_SIZE (ops)) - ops;
6677
6678 /* If there is more than one component, any base component is in a PLUS. */
6679 if (n_ops > 1)
6680 info->base_outer_code = PLUS;
6681
6682 /* Try to classify each sum operand now. Leave those that could be
6683 either a base or an index in OPS. */
6684 rtx *inner_ops[4];
6685 size_t out = 0;
6686 for (size_t in = 0; in < n_ops; ++in)
6687 {
6688 rtx *loc = ops[in];
6689 rtx *inner = strip_address_mutations (loc);
6690 if (CONSTANT_P (*inner))
6691 set_address_disp (info, loc, inner);
6692 else if (GET_CODE (*inner) == UNSPEC)
6693 set_address_segment (info, loc, inner);
6694 else
6695 {
6696 /* The only other possibilities are a base or an index. */
6697 rtx *base_term = get_base_term (inner);
6698 rtx *index_term = get_index_term (inner);
6699 gcc_assert (base_term || index_term);
6700 if (!base_term)
6701 set_address_index (info, loc, inner: index_term);
6702 else if (!index_term)
6703 set_address_base (info, loc, inner: base_term);
6704 else
6705 {
6706 gcc_assert (base_term == index_term);
6707 ops[out] = loc;
6708 inner_ops[out] = base_term;
6709 ++out;
6710 }
6711 }
6712 }
6713
6714 /* Classify the remaining OPS members as bases and indexes. */
6715 if (out == 1)
6716 {
6717 /* If we haven't seen a base or an index yet, assume that this is
6718 the base. If we were confident that another term was the base
6719 or index, treat the remaining operand as the other kind. */
6720 if (!info->base)
6721 set_address_base (info, loc: ops[0], inner: inner_ops[0]);
6722 else
6723 set_address_index (info, loc: ops[0], inner: inner_ops[0]);
6724 }
6725 else if (out == 2)
6726 {
6727 /* In the event of a tie, assume the base comes first. */
6728 if (baseness (x: *inner_ops[0], mode: info->mode, as: info->as, outer_code: PLUS,
6729 GET_CODE (*ops[1]))
6730 >= baseness (x: *inner_ops[1], mode: info->mode, as: info->as, outer_code: PLUS,
6731 GET_CODE (*ops[0])))
6732 {
6733 set_address_base (info, loc: ops[0], inner: inner_ops[0]);
6734 set_address_index (info, loc: ops[1], inner: inner_ops[1]);
6735 }
6736 else
6737 {
6738 set_address_base (info, loc: ops[1], inner: inner_ops[1]);
6739 set_address_index (info, loc: ops[0], inner: inner_ops[0]);
6740 }
6741 }
6742 else
6743 gcc_assert (out == 0);
6744}
6745
6746/* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6747 or VOIDmode if not known. AS is the address space associated with LOC.
6748 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6749
6750void
6751decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6752 addr_space_t as, enum rtx_code outer_code)
6753{
6754 memset (s: info, c: 0, n: sizeof (*info));
6755 info->mode = mode;
6756 info->as = as;
6757 info->addr_outer_code = outer_code;
6758 info->outer = loc;
6759 info->inner = strip_address_mutations (loc, outer_code: &outer_code);
6760 info->base_outer_code = outer_code;
6761 switch (GET_CODE (*info->inner))
6762 {
6763 case PRE_DEC:
6764 case PRE_INC:
6765 case POST_DEC:
6766 case POST_INC:
6767 decompose_incdec_address (info);
6768 break;
6769
6770 case PRE_MODIFY:
6771 case POST_MODIFY:
6772 decompose_automod_address (info);
6773 break;
6774
6775 default:
6776 decompose_normal_address (info);
6777 break;
6778 }
6779}
6780
6781/* Describe address operand LOC in INFO. */
6782
6783void
6784decompose_lea_address (struct address_info *info, rtx *loc)
6785{
6786 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, outer_code: ADDRESS);
6787}
6788
6789/* Describe the address of MEM X in INFO. */
6790
6791void
6792decompose_mem_address (struct address_info *info, rtx x)
6793{
6794 gcc_assert (MEM_P (x));
6795 decompose_address (info, loc: &XEXP (x, 0), GET_MODE (x),
6796 MEM_ADDR_SPACE (x), outer_code: MEM);
6797}
6798
6799/* Update INFO after a change to the address it describes. */
6800
6801void
6802update_address (struct address_info *info)
6803{
6804 decompose_address (info, loc: info->outer, mode: info->mode, as: info->as,
6805 outer_code: info->addr_outer_code);
6806}
6807
6808/* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6809 more complicated than that. */
6810
6811HOST_WIDE_INT
6812get_index_scale (const struct address_info *info)
6813{
6814 rtx index = *info->index;
6815 if (GET_CODE (index) == MULT
6816 && CONST_INT_P (XEXP (index, 1))
6817 && info->index_term == &XEXP (index, 0))
6818 return INTVAL (XEXP (index, 1));
6819
6820 if (GET_CODE (index) == ASHIFT
6821 && CONST_INT_P (XEXP (index, 1))
6822 && info->index_term == &XEXP (index, 0))
6823 return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6824
6825 if (info->index == info->index_term)
6826 return 1;
6827
6828 return 0;
6829}
6830
6831/* Return the "index code" of INFO, in the form required by
6832 ok_for_base_p_1. */
6833
6834enum rtx_code
6835get_index_code (const struct address_info *info)
6836{
6837 if (info->index)
6838 return GET_CODE (*info->index);
6839
6840 if (info->disp)
6841 return GET_CODE (*info->disp);
6842
6843 return SCRATCH;
6844}
6845
6846/* Return true if RTL X contains a SYMBOL_REF. */
6847
6848bool
6849contains_symbol_ref_p (const_rtx x)
6850{
6851 subrtx_iterator::array_type array;
6852 FOR_EACH_SUBRTX (iter, array, x, ALL)
6853 if (SYMBOL_REF_P (*iter))
6854 return true;
6855
6856 return false;
6857}
6858
6859/* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6860
6861bool
6862contains_symbolic_reference_p (const_rtx x)
6863{
6864 subrtx_iterator::array_type array;
6865 FOR_EACH_SUBRTX (iter, array, x, ALL)
6866 if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6867 return true;
6868
6869 return false;
6870}
6871
6872/* Return true if RTL X contains a constant pool address. */
6873
6874bool
6875contains_constant_pool_address_p (const_rtx x)
6876{
6877 subrtx_iterator::array_type array;
6878 FOR_EACH_SUBRTX (iter, array, x, ALL)
6879 if (SYMBOL_REF_P (*iter) && CONSTANT_POOL_ADDRESS_P (*iter))
6880 return true;
6881
6882 return false;
6883}
6884
6885
6886/* Return true if X contains a thread-local symbol. */
6887
6888bool
6889tls_referenced_p (const_rtx x)
6890{
6891 if (!targetm.have_tls)
6892 return false;
6893
6894 subrtx_iterator::array_type array;
6895 FOR_EACH_SUBRTX (iter, array, x, ALL)
6896 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6897 return true;
6898 return false;
6899}
6900
6901/* Process recursively X of INSN and add REG_INC notes if necessary. */
6902void
6903add_auto_inc_notes (rtx_insn *insn, rtx x)
6904{
6905 enum rtx_code code = GET_CODE (x);
6906 const char *fmt;
6907 int i, j;
6908
6909 if (code == MEM && auto_inc_p (XEXP (x, 0)))
6910 {
6911 add_reg_note (insn, kind: REG_INC, XEXP (XEXP (x, 0), 0));
6912 return;
6913 }
6914
6915 /* Scan all X sub-expressions. */
6916 fmt = GET_RTX_FORMAT (code);
6917 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6918 {
6919 if (fmt[i] == 'e')
6920 add_auto_inc_notes (insn, XEXP (x, i));
6921 else if (fmt[i] == 'E')
6922 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6923 add_auto_inc_notes (insn, XVECEXP (x, i, j));
6924 }
6925}
6926
6927/* Return true if X is register asm. */
6928
6929bool
6930register_asm_p (const_rtx x)
6931{
6932 return (REG_P (x)
6933 && REG_EXPR (x) != NULL_TREE
6934 && HAS_DECL_ASSEMBLER_NAME_P (REG_EXPR (x))
6935 && DECL_ASSEMBLER_NAME_SET_P (REG_EXPR (x))
6936 && DECL_REGISTER (REG_EXPR (x)));
6937}
6938
6939/* Return true if, for all OP of mode OP_MODE:
6940
6941 (vec_select:RESULT_MODE OP SEL)
6942
6943 is equivalent to the highpart RESULT_MODE of OP. */
6944
6945bool
6946vec_series_highpart_p (machine_mode result_mode, machine_mode op_mode, rtx sel)
6947{
6948 int nunits;
6949 if (GET_MODE_NUNITS (mode: op_mode).is_constant (const_value: &nunits)
6950 && targetm.can_change_mode_class (op_mode, result_mode, ALL_REGS))
6951 {
6952 int offset = BYTES_BIG_ENDIAN ? 0 : nunits - XVECLEN (sel, 0);
6953 return rtvec_series_p (XVEC (sel, 0), offset);
6954 }
6955 return false;
6956}
6957
6958/* Return true if, for all OP of mode OP_MODE:
6959
6960 (vec_select:RESULT_MODE OP SEL)
6961
6962 is equivalent to the lowpart RESULT_MODE of OP. */
6963
6964bool
6965vec_series_lowpart_p (machine_mode result_mode, machine_mode op_mode, rtx sel)
6966{
6967 int nunits;
6968 if (GET_MODE_NUNITS (mode: op_mode).is_constant (const_value: &nunits)
6969 && targetm.can_change_mode_class (op_mode, result_mode, ALL_REGS))
6970 {
6971 int offset = BYTES_BIG_ENDIAN ? nunits - XVECLEN (sel, 0) : 0;
6972 return rtvec_series_p (XVEC (sel, 0), offset);
6973 }
6974 return false;
6975}
6976
6977/* Return true if X contains a paradoxical subreg. */
6978
6979bool
6980contains_paradoxical_subreg_p (rtx x)
6981{
6982 subrtx_var_iterator::array_type array;
6983 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
6984 {
6985 x = *iter;
6986 if (SUBREG_P (x) && paradoxical_subreg_p (x))
6987 return true;
6988 }
6989 return false;
6990}
6991

source code of gcc/rtlanal.cc