1 | /* Optimize by combining instructions for GNU compiler. |
2 | Copyright (C) 1987-2024 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | /* This module is essentially the "combiner" phase of the U. of Arizona |
21 | Portable Optimizer, but redone to work on our list-structured |
22 | representation for RTL instead of their string representation. |
23 | |
24 | The LOG_LINKS of each insn identify the most recent assignment |
25 | to each REG used in the insn. It is a list of previous insns, |
26 | each of which contains a SET for a REG that is used in this insn |
27 | and not used or set in between. LOG_LINKs never cross basic blocks. |
28 | They were set up by the preceding pass (lifetime analysis). |
29 | |
30 | We try to combine each pair of insns joined by a logical link. |
31 | We also try to combine triplets of insns A, B and C when C has |
32 | a link back to B and B has a link back to A. Likewise for a |
33 | small number of quadruplets of insns A, B, C and D for which |
34 | there's high likelihood of success. |
35 | |
36 | We check (with modified_between_p) to avoid combining in such a way |
37 | as to move a computation to a place where its value would be different. |
38 | |
39 | Combination is done by mathematically substituting the previous |
40 | insn(s) values for the regs they set into the expressions in |
41 | the later insns that refer to these regs. If the result is a valid insn |
42 | for our target machine, according to the machine description, |
43 | we install it, delete the earlier insns, and update the data flow |
44 | information (LOG_LINKS and REG_NOTES) for what we did. |
45 | |
46 | There are a few exceptions where the dataflow information isn't |
47 | completely updated (however this is only a local issue since it is |
48 | regenerated before the next pass that uses it): |
49 | |
50 | - reg_live_length is not updated |
51 | - reg_n_refs is not adjusted in the rare case when a register is |
52 | no longer required in a computation |
53 | - there are extremely rare cases (see distribute_notes) when a |
54 | REG_DEAD note is lost |
55 | - a LOG_LINKS entry that refers to an insn with multiple SETs may be |
56 | removed because there is no way to know which register it was |
57 | linking |
58 | |
59 | To simplify substitution, we combine only when the earlier insn(s) |
60 | consist of only a single assignment. To simplify updating afterward, |
61 | we never combine when a subroutine call appears in the middle. */ |
62 | |
63 | #include "config.h" |
64 | #include "system.h" |
65 | #include "coretypes.h" |
66 | #include "backend.h" |
67 | #include "target.h" |
68 | #include "rtl.h" |
69 | #include "tree.h" |
70 | #include "cfghooks.h" |
71 | #include "predict.h" |
72 | #include "df.h" |
73 | #include "memmodel.h" |
74 | #include "tm_p.h" |
75 | #include "optabs.h" |
76 | #include "regs.h" |
77 | #include "emit-rtl.h" |
78 | #include "recog.h" |
79 | #include "cgraph.h" |
80 | #include "stor-layout.h" |
81 | #include "cfgrtl.h" |
82 | #include "cfgcleanup.h" |
83 | /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */ |
84 | #include "explow.h" |
85 | #include "insn-attr.h" |
86 | #include "rtlhooks-def.h" |
87 | #include "expr.h" |
88 | #include "tree-pass.h" |
89 | #include "valtrack.h" |
90 | #include "rtl-iter.h" |
91 | #include "print-rtl.h" |
92 | #include "function-abi.h" |
93 | #include "rtlanal.h" |
94 | |
95 | /* Number of attempts to combine instructions in this function. */ |
96 | |
97 | static int combine_attempts; |
98 | |
99 | /* Number of attempts that got as far as substitution in this function. */ |
100 | |
101 | static int combine_merges; |
102 | |
103 | /* Number of instructions combined with added SETs in this function. */ |
104 | |
105 | static int ; |
106 | |
107 | /* Number of instructions combined in this function. */ |
108 | |
109 | static int combine_successes; |
110 | |
111 | /* combine_instructions may try to replace the right hand side of the |
112 | second instruction with the value of an associated REG_EQUAL note |
113 | before throwing it at try_combine. That is problematic when there |
114 | is a REG_DEAD note for a register used in the old right hand side |
115 | and can cause distribute_notes to do wrong things. This is the |
116 | second instruction if it has been so modified, null otherwise. */ |
117 | |
118 | static rtx_insn *i2mod; |
119 | |
120 | /* When I2MOD is nonnull, this is a copy of the old right hand side. */ |
121 | |
122 | static rtx i2mod_old_rhs; |
123 | |
124 | /* When I2MOD is nonnull, this is a copy of the new right hand side. */ |
125 | |
126 | static rtx i2mod_new_rhs; |
127 | |
128 | struct reg_stat_type { |
129 | /* Record last point of death of (hard or pseudo) register n. */ |
130 | rtx_insn *last_death; |
131 | |
132 | /* Record last point of modification of (hard or pseudo) register n. */ |
133 | rtx_insn *last_set; |
134 | |
135 | /* The next group of fields allows the recording of the last value assigned |
136 | to (hard or pseudo) register n. We use this information to see if an |
137 | operation being processed is redundant given a prior operation performed |
138 | on the register. For example, an `and' with a constant is redundant if |
139 | all the zero bits are already known to be turned off. |
140 | |
141 | We use an approach similar to that used by cse, but change it in the |
142 | following ways: |
143 | |
144 | (1) We do not want to reinitialize at each label. |
145 | (2) It is useful, but not critical, to know the actual value assigned |
146 | to a register. Often just its form is helpful. |
147 | |
148 | Therefore, we maintain the following fields: |
149 | |
150 | last_set_value the last value assigned |
151 | last_set_label records the value of label_tick when the |
152 | register was assigned |
153 | last_set_table_tick records the value of label_tick when a |
154 | value using the register is assigned |
155 | last_set_invalid set to true when it is not valid |
156 | to use the value of this register in some |
157 | register's value |
158 | |
159 | To understand the usage of these tables, it is important to understand |
160 | the distinction between the value in last_set_value being valid and |
161 | the register being validly contained in some other expression in the |
162 | table. |
163 | |
164 | (The next two parameters are out of date). |
165 | |
166 | reg_stat[i].last_set_value is valid if it is nonzero, and either |
167 | reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick. |
168 | |
169 | Register I may validly appear in any expression returned for the value |
170 | of another register if reg_n_sets[i] is 1. It may also appear in the |
171 | value for register J if reg_stat[j].last_set_invalid is zero, or |
172 | reg_stat[i].last_set_label < reg_stat[j].last_set_label. |
173 | |
174 | If an expression is found in the table containing a register which may |
175 | not validly appear in an expression, the register is replaced by |
176 | something that won't match, (clobber (const_int 0)). */ |
177 | |
178 | /* Record last value assigned to (hard or pseudo) register n. */ |
179 | |
180 | rtx last_set_value; |
181 | |
182 | /* Record the value of label_tick when an expression involving register n |
183 | is placed in last_set_value. */ |
184 | |
185 | int last_set_table_tick; |
186 | |
187 | /* Record the value of label_tick when the value for register n is placed in |
188 | last_set_value. */ |
189 | |
190 | int last_set_label; |
191 | |
192 | /* These fields are maintained in parallel with last_set_value and are |
193 | used to store the mode in which the register was last set, the bits |
194 | that were known to be zero when it was last set, and the number of |
195 | sign bits copies it was known to have when it was last set. */ |
196 | |
197 | unsigned HOST_WIDE_INT last_set_nonzero_bits; |
198 | char last_set_sign_bit_copies; |
199 | ENUM_BITFIELD(machine_mode) last_set_mode : MACHINE_MODE_BITSIZE; |
200 | |
201 | /* Set to true if references to register n in expressions should not be |
202 | used. last_set_invalid is set nonzero when this register is being |
203 | assigned to and last_set_table_tick == label_tick. */ |
204 | |
205 | bool last_set_invalid; |
206 | |
207 | /* Some registers that are set more than once and used in more than one |
208 | basic block are nevertheless always set in similar ways. For example, |
209 | a QImode register may be loaded from memory in two places on a machine |
210 | where byte loads zero extend. |
211 | |
212 | We record in the following fields if a register has some leading bits |
213 | that are always equal to the sign bit, and what we know about the |
214 | nonzero bits of a register, specifically which bits are known to be |
215 | zero. |
216 | |
217 | If an entry is zero, it means that we don't know anything special. */ |
218 | |
219 | unsigned char sign_bit_copies; |
220 | |
221 | unsigned HOST_WIDE_INT nonzero_bits; |
222 | |
223 | /* Record the value of the label_tick when the last truncation |
224 | happened. The field truncated_to_mode is only valid if |
225 | truncation_label == label_tick. */ |
226 | |
227 | int truncation_label; |
228 | |
229 | /* Record the last truncation seen for this register. If truncation |
230 | is not a nop to this mode we might be able to save an explicit |
231 | truncation if we know that value already contains a truncated |
232 | value. */ |
233 | |
234 | ENUM_BITFIELD(machine_mode) truncated_to_mode : MACHINE_MODE_BITSIZE; |
235 | }; |
236 | |
237 | |
238 | static vec<reg_stat_type> reg_stat; |
239 | |
240 | /* One plus the highest pseudo for which we track REG_N_SETS. |
241 | regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once, |
242 | but during combine_split_insns new pseudos can be created. As we don't have |
243 | updated DF information in that case, it is hard to initialize the array |
244 | after growing. The combiner only cares about REG_N_SETS (regno) == 1, |
245 | so instead of growing the arrays, just assume all newly created pseudos |
246 | during combine might be set multiple times. */ |
247 | |
248 | static unsigned int reg_n_sets_max; |
249 | |
250 | /* Record the luid of the last insn that invalidated memory |
251 | (anything that writes memory, and subroutine calls, but not pushes). */ |
252 | |
253 | static int mem_last_set; |
254 | |
255 | /* Record the luid of the last CALL_INSN |
256 | so we can tell whether a potential combination crosses any calls. */ |
257 | |
258 | static int last_call_luid; |
259 | |
260 | /* When `subst' is called, this is the insn that is being modified |
261 | (by combining in a previous insn). The PATTERN of this insn |
262 | is still the old pattern partially modified and it should not be |
263 | looked at, but this may be used to examine the successors of the insn |
264 | to judge whether a simplification is valid. */ |
265 | |
266 | static rtx_insn *subst_insn; |
267 | |
268 | /* This is the lowest LUID that `subst' is currently dealing with. |
269 | get_last_value will not return a value if the register was set at or |
270 | after this LUID. If not for this mechanism, we could get confused if |
271 | I2 or I1 in try_combine were an insn that used the old value of a register |
272 | to obtain a new value. In that case, we might erroneously get the |
273 | new value of the register when we wanted the old one. */ |
274 | |
275 | static int subst_low_luid; |
276 | |
277 | /* This contains any hard registers that are used in newpat; reg_dead_at_p |
278 | must consider all these registers to be always live. */ |
279 | |
280 | static HARD_REG_SET newpat_used_regs; |
281 | |
282 | /* This is an insn to which a LOG_LINKS entry has been added. If this |
283 | insn is the earlier than I2 or I3, combine should rescan starting at |
284 | that location. */ |
285 | |
286 | static rtx_insn *added_links_insn; |
287 | |
288 | /* And similarly, for notes. */ |
289 | |
290 | static rtx_insn *added_notes_insn; |
291 | |
292 | /* Basic block in which we are performing combines. */ |
293 | static basic_block this_basic_block; |
294 | static bool optimize_this_for_speed_p; |
295 | |
296 | |
297 | /* Length of the currently allocated uid_insn_cost array. */ |
298 | |
299 | static int max_uid_known; |
300 | |
301 | /* The following array records the insn_cost for every insn |
302 | in the instruction stream. */ |
303 | |
304 | static int *uid_insn_cost; |
305 | |
306 | /* The following array records the LOG_LINKS for every insn in the |
307 | instruction stream as struct insn_link pointers. */ |
308 | |
309 | struct insn_link { |
310 | rtx_insn *insn; |
311 | unsigned int regno; |
312 | struct insn_link *next; |
313 | }; |
314 | |
315 | static struct insn_link **uid_log_links; |
316 | |
317 | static inline int |
318 | insn_uid_check (const_rtx insn) |
319 | { |
320 | int uid = INSN_UID (insn); |
321 | gcc_checking_assert (uid <= max_uid_known); |
322 | return uid; |
323 | } |
324 | |
325 | #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)]) |
326 | #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)]) |
327 | |
328 | #define FOR_EACH_LOG_LINK(L, INSN) \ |
329 | for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next) |
330 | |
331 | /* Links for LOG_LINKS are allocated from this obstack. */ |
332 | |
333 | static struct obstack insn_link_obstack; |
334 | |
335 | /* Allocate a link. */ |
336 | |
337 | static inline struct insn_link * |
338 | alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next) |
339 | { |
340 | struct insn_link *l |
341 | = (struct insn_link *) obstack_alloc (&insn_link_obstack, |
342 | sizeof (struct insn_link)); |
343 | l->insn = insn; |
344 | l->regno = regno; |
345 | l->next = next; |
346 | return l; |
347 | } |
348 | |
349 | /* Incremented for each basic block. */ |
350 | |
351 | static int label_tick; |
352 | |
353 | /* Reset to label_tick for each extended basic block in scanning order. */ |
354 | |
355 | static int label_tick_ebb_start; |
356 | |
357 | /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the |
358 | largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */ |
359 | |
360 | static scalar_int_mode nonzero_bits_mode; |
361 | |
362 | /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can |
363 | be safely used. It is zero while computing them and after combine has |
364 | completed. This former test prevents propagating values based on |
365 | previously set values, which can be incorrect if a variable is modified |
366 | in a loop. */ |
367 | |
368 | static int nonzero_sign_valid; |
369 | |
370 | |
371 | /* Record one modification to rtl structure |
372 | to be undone by storing old_contents into *where. */ |
373 | |
374 | enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS }; |
375 | |
376 | struct undo |
377 | { |
378 | struct undo *next; |
379 | enum undo_kind kind; |
380 | union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents; |
381 | union { rtx *r; int *i; int regno; struct insn_link **l; } where; |
382 | }; |
383 | |
384 | /* Record a bunch of changes to be undone, up to MAX_UNDO of them. |
385 | num_undo says how many are currently recorded. |
386 | |
387 | other_insn is nonzero if we have modified some other insn in the process |
388 | of working on subst_insn. It must be verified too. */ |
389 | |
390 | struct undobuf |
391 | { |
392 | struct undo *undos; |
393 | struct undo *frees; |
394 | rtx_insn *other_insn; |
395 | }; |
396 | |
397 | static struct undobuf undobuf; |
398 | |
399 | /* Number of times the pseudo being substituted for |
400 | was found and replaced. */ |
401 | |
402 | static int n_occurrences; |
403 | |
404 | static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode, |
405 | scalar_int_mode, |
406 | unsigned HOST_WIDE_INT *); |
407 | static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode, |
408 | scalar_int_mode, |
409 | unsigned int *); |
410 | static void do_SUBST (rtx *, rtx); |
411 | static void do_SUBST_INT (int *, int); |
412 | static void init_reg_last (void); |
413 | static void setup_incoming_promotions (rtx_insn *); |
414 | static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *); |
415 | static bool cant_combine_insn_p (rtx_insn *); |
416 | static bool can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *, |
417 | rtx_insn *, rtx_insn *, rtx *, rtx *); |
418 | static bool combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, |
419 | bool, bool, rtx *); |
420 | static bool contains_muldiv (rtx); |
421 | static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *, |
422 | bool *, rtx_insn *); |
423 | static void undo_all (void); |
424 | static void undo_commit (void); |
425 | static rtx *find_split_point (rtx *, rtx_insn *, bool); |
426 | static rtx subst (rtx, rtx, rtx, bool, bool, bool); |
427 | static rtx combine_simplify_rtx (rtx, machine_mode, bool, bool); |
428 | static rtx simplify_if_then_else (rtx); |
429 | static rtx simplify_set (rtx); |
430 | static rtx simplify_logical (rtx); |
431 | static rtx expand_compound_operation (rtx); |
432 | static const_rtx expand_field_assignment (const_rtx); |
433 | static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT, rtx, |
434 | unsigned HOST_WIDE_INT, bool, bool, bool); |
435 | static int get_pos_from_mask (unsigned HOST_WIDE_INT, |
436 | unsigned HOST_WIDE_INT *); |
437 | static rtx canon_reg_for_combine (rtx, rtx); |
438 | static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode, |
439 | scalar_int_mode, unsigned HOST_WIDE_INT, bool); |
440 | static rtx force_to_mode (rtx, machine_mode, |
441 | unsigned HOST_WIDE_INT, bool); |
442 | static rtx if_then_else_cond (rtx, rtx *, rtx *); |
443 | static rtx known_cond (rtx, enum rtx_code, rtx, rtx); |
444 | static bool rtx_equal_for_field_assignment_p (rtx, rtx, bool = false); |
445 | static rtx make_field_assignment (rtx); |
446 | static rtx apply_distributive_law (rtx); |
447 | static rtx distribute_and_simplify_rtx (rtx, int); |
448 | static rtx simplify_and_const_int_1 (scalar_int_mode, rtx, |
449 | unsigned HOST_WIDE_INT); |
450 | static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx, |
451 | unsigned HOST_WIDE_INT); |
452 | static bool merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code, |
453 | HOST_WIDE_INT, machine_mode, bool *); |
454 | static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int); |
455 | static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx, |
456 | int); |
457 | static int recog_for_combine (rtx *, rtx_insn *, rtx *); |
458 | static rtx gen_lowpart_for_combine (machine_mode, rtx); |
459 | static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode, |
460 | rtx *, rtx *); |
461 | static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *); |
462 | static void update_table_tick (rtx); |
463 | static void record_value_for_reg (rtx, rtx_insn *, rtx); |
464 | static void check_promoted_subreg (rtx_insn *, rtx); |
465 | static void record_dead_and_set_regs_1 (rtx, const_rtx, void *); |
466 | static void record_dead_and_set_regs (rtx_insn *); |
467 | static bool get_last_value_validate (rtx *, rtx_insn *, int, bool); |
468 | static rtx get_last_value (const_rtx); |
469 | static void reg_dead_at_p_1 (rtx, const_rtx, void *); |
470 | static bool reg_dead_at_p (rtx, rtx_insn *); |
471 | static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *); |
472 | static bool reg_bitfield_target_p (rtx, rtx); |
473 | static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, |
474 | rtx, rtx, rtx); |
475 | static void distribute_links (struct insn_link *); |
476 | static void mark_used_regs_combine (rtx); |
477 | static void record_promoted_value (rtx_insn *, rtx); |
478 | static bool unmentioned_reg_p (rtx, rtx); |
479 | static void record_truncated_values (rtx *, void *); |
480 | static bool reg_truncated_to_mode (machine_mode, const_rtx); |
481 | static rtx gen_lowpart_or_truncate (machine_mode, rtx); |
482 | |
483 | |
484 | /* It is not safe to use ordinary gen_lowpart in combine. |
485 | See comments in gen_lowpart_for_combine. */ |
486 | #undef RTL_HOOKS_GEN_LOWPART |
487 | #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine |
488 | |
489 | /* Our implementation of gen_lowpart never emits a new pseudo. */ |
490 | #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT |
491 | #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine |
492 | |
493 | #undef RTL_HOOKS_REG_NONZERO_REG_BITS |
494 | #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine |
495 | |
496 | #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES |
497 | #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine |
498 | |
499 | #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE |
500 | #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode |
501 | |
502 | static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER; |
503 | |
504 | |
505 | /* Convenience wrapper for the canonicalize_comparison target hook. |
506 | Target hooks cannot use enum rtx_code. */ |
507 | static inline void |
508 | target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1, |
509 | bool op0_preserve_value) |
510 | { |
511 | int code_int = (int)*code; |
512 | targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value); |
513 | *code = (enum rtx_code)code_int; |
514 | } |
515 | |
516 | /* Try to split PATTERN found in INSN. This returns NULL_RTX if |
517 | PATTERN cannot be split. Otherwise, it returns an insn sequence. |
518 | This is a wrapper around split_insns which ensures that the |
519 | reg_stat vector is made larger if the splitter creates a new |
520 | register. */ |
521 | |
522 | static rtx_insn * |
523 | combine_split_insns (rtx pattern, rtx_insn *insn) |
524 | { |
525 | rtx_insn *ret; |
526 | unsigned int nregs; |
527 | |
528 | ret = split_insns (pattern, insn); |
529 | nregs = max_reg_num (); |
530 | if (nregs > reg_stat.length ()) |
531 | reg_stat.safe_grow_cleared (len: nregs, exact: true); |
532 | return ret; |
533 | } |
534 | |
535 | /* This is used by find_single_use to locate an rtx in LOC that |
536 | contains exactly one use of DEST, which is typically a REG. |
537 | It returns a pointer to the innermost rtx expression |
538 | containing DEST. Appearances of DEST that are being used to |
539 | totally replace it are not counted. */ |
540 | |
541 | static rtx * |
542 | find_single_use_1 (rtx dest, rtx *loc) |
543 | { |
544 | rtx x = *loc; |
545 | enum rtx_code code = GET_CODE (x); |
546 | rtx *result = NULL; |
547 | rtx *this_result; |
548 | int i; |
549 | const char *fmt; |
550 | |
551 | switch (code) |
552 | { |
553 | case CONST: |
554 | case LABEL_REF: |
555 | case SYMBOL_REF: |
556 | CASE_CONST_ANY: |
557 | case CLOBBER: |
558 | return 0; |
559 | |
560 | case SET: |
561 | /* If the destination is anything other than PC, a REG or a SUBREG |
562 | of a REG that occupies all of the REG, the insn uses DEST if |
563 | it is mentioned in the destination or the source. Otherwise, we |
564 | need just check the source. */ |
565 | if (GET_CODE (SET_DEST (x)) != PC |
566 | && !REG_P (SET_DEST (x)) |
567 | && ! (GET_CODE (SET_DEST (x)) == SUBREG |
568 | && REG_P (SUBREG_REG (SET_DEST (x))) |
569 | && !read_modify_subreg_p (SET_DEST (x)))) |
570 | break; |
571 | |
572 | return find_single_use_1 (dest, loc: &SET_SRC (x)); |
573 | |
574 | case MEM: |
575 | case SUBREG: |
576 | return find_single_use_1 (dest, loc: &XEXP (x, 0)); |
577 | |
578 | default: |
579 | break; |
580 | } |
581 | |
582 | /* If it wasn't one of the common cases above, check each expression and |
583 | vector of this code. Look for a unique usage of DEST. */ |
584 | |
585 | fmt = GET_RTX_FORMAT (code); |
586 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
587 | { |
588 | if (fmt[i] == 'e') |
589 | { |
590 | if (dest == XEXP (x, i) |
591 | || (REG_P (dest) && REG_P (XEXP (x, i)) |
592 | && REGNO (dest) == REGNO (XEXP (x, i)))) |
593 | this_result = loc; |
594 | else |
595 | this_result = find_single_use_1 (dest, loc: &XEXP (x, i)); |
596 | |
597 | if (result == NULL) |
598 | result = this_result; |
599 | else if (this_result) |
600 | /* Duplicate usage. */ |
601 | return NULL; |
602 | } |
603 | else if (fmt[i] == 'E') |
604 | { |
605 | int j; |
606 | |
607 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
608 | { |
609 | if (XVECEXP (x, i, j) == dest |
610 | || (REG_P (dest) |
611 | && REG_P (XVECEXP (x, i, j)) |
612 | && REGNO (XVECEXP (x, i, j)) == REGNO (dest))) |
613 | this_result = loc; |
614 | else |
615 | this_result = find_single_use_1 (dest, loc: &XVECEXP (x, i, j)); |
616 | |
617 | if (result == NULL) |
618 | result = this_result; |
619 | else if (this_result) |
620 | return NULL; |
621 | } |
622 | } |
623 | } |
624 | |
625 | return result; |
626 | } |
627 | |
628 | |
629 | /* See if DEST, produced in INSN, is used only a single time in the |
630 | sequel. If so, return a pointer to the innermost rtx expression in which |
631 | it is used. |
632 | |
633 | If PLOC is nonzero, *PLOC is set to the insn containing the single use. |
634 | |
635 | Otherwise, we find the single use by finding an insn that has a |
636 | LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is |
637 | only referenced once in that insn, we know that it must be the first |
638 | and last insn referencing DEST. */ |
639 | |
640 | static rtx * |
641 | find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc) |
642 | { |
643 | basic_block bb; |
644 | rtx_insn *next; |
645 | rtx *result; |
646 | struct insn_link *link; |
647 | |
648 | if (!REG_P (dest)) |
649 | return 0; |
650 | |
651 | bb = BLOCK_FOR_INSN (insn); |
652 | for (next = NEXT_INSN (insn); |
653 | next && BLOCK_FOR_INSN (insn: next) == bb; |
654 | next = NEXT_INSN (insn: next)) |
655 | if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest)) |
656 | { |
657 | FOR_EACH_LOG_LINK (link, next) |
658 | if (link->insn == insn && link->regno == REGNO (dest)) |
659 | break; |
660 | |
661 | if (link) |
662 | { |
663 | result = find_single_use_1 (dest, loc: &PATTERN (insn: next)); |
664 | if (ploc) |
665 | *ploc = next; |
666 | return result; |
667 | } |
668 | } |
669 | |
670 | return 0; |
671 | } |
672 | |
673 | /* Substitute NEWVAL, an rtx expression, into INTO, a place in some |
674 | insn. The substitution can be undone by undo_all. If INTO is already |
675 | set to NEWVAL, do not record this change. Because computing NEWVAL might |
676 | also call SUBST, we have to compute it before we put anything into |
677 | the undo table. */ |
678 | |
679 | static void |
680 | do_SUBST (rtx *into, rtx newval) |
681 | { |
682 | struct undo *buf; |
683 | rtx oldval = *into; |
684 | |
685 | if (oldval == newval) |
686 | return; |
687 | |
688 | /* We'd like to catch as many invalid transformations here as |
689 | possible. Unfortunately, there are way too many mode changes |
690 | that are perfectly valid, so we'd waste too much effort for |
691 | little gain doing the checks here. Focus on catching invalid |
692 | transformations involving integer constants. */ |
693 | if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT |
694 | && CONST_INT_P (newval)) |
695 | { |
696 | /* Sanity check that we're replacing oldval with a CONST_INT |
697 | that is a valid sign-extension for the original mode. */ |
698 | gcc_assert (INTVAL (newval) |
699 | == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval))); |
700 | |
701 | /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a |
702 | CONST_INT is not valid, because after the replacement, the |
703 | original mode would be gone. Unfortunately, we can't tell |
704 | when do_SUBST is called to replace the operand thereof, so we |
705 | perform this test on oldval instead, checking whether an |
706 | invalid replacement took place before we got here. */ |
707 | gcc_assert (!(GET_CODE (oldval) == SUBREG |
708 | && CONST_INT_P (SUBREG_REG (oldval)))); |
709 | gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND |
710 | && CONST_INT_P (XEXP (oldval, 0)))); |
711 | } |
712 | |
713 | if (undobuf.frees) |
714 | buf = undobuf.frees, undobuf.frees = buf->next; |
715 | else |
716 | buf = XNEW (struct undo); |
717 | |
718 | buf->kind = UNDO_RTX; |
719 | buf->where.r = into; |
720 | buf->old_contents.r = oldval; |
721 | *into = newval; |
722 | |
723 | buf->next = undobuf.undos, undobuf.undos = buf; |
724 | } |
725 | |
726 | #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL)) |
727 | |
728 | /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution |
729 | for the value of a HOST_WIDE_INT value (including CONST_INT) is |
730 | not safe. */ |
731 | |
732 | static void |
733 | do_SUBST_INT (int *into, int newval) |
734 | { |
735 | struct undo *buf; |
736 | int oldval = *into; |
737 | |
738 | if (oldval == newval) |
739 | return; |
740 | |
741 | if (undobuf.frees) |
742 | buf = undobuf.frees, undobuf.frees = buf->next; |
743 | else |
744 | buf = XNEW (struct undo); |
745 | |
746 | buf->kind = UNDO_INT; |
747 | buf->where.i = into; |
748 | buf->old_contents.i = oldval; |
749 | *into = newval; |
750 | |
751 | buf->next = undobuf.undos, undobuf.undos = buf; |
752 | } |
753 | |
754 | #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL)) |
755 | |
756 | /* Similar to SUBST, but just substitute the mode. This is used when |
757 | changing the mode of a pseudo-register, so that any other |
758 | references to the entry in the regno_reg_rtx array will change as |
759 | well. */ |
760 | |
761 | static void |
762 | subst_mode (int regno, machine_mode newval) |
763 | { |
764 | struct undo *buf; |
765 | rtx reg = regno_reg_rtx[regno]; |
766 | machine_mode oldval = GET_MODE (reg); |
767 | |
768 | if (oldval == newval) |
769 | return; |
770 | |
771 | if (undobuf.frees) |
772 | buf = undobuf.frees, undobuf.frees = buf->next; |
773 | else |
774 | buf = XNEW (struct undo); |
775 | |
776 | buf->kind = UNDO_MODE; |
777 | buf->where.regno = regno; |
778 | buf->old_contents.m = oldval; |
779 | adjust_reg_mode (reg, newval); |
780 | |
781 | buf->next = undobuf.undos, undobuf.undos = buf; |
782 | } |
783 | |
784 | /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */ |
785 | |
786 | static void |
787 | do_SUBST_LINK (struct insn_link **into, struct insn_link *newval) |
788 | { |
789 | struct undo *buf; |
790 | struct insn_link * oldval = *into; |
791 | |
792 | if (oldval == newval) |
793 | return; |
794 | |
795 | if (undobuf.frees) |
796 | buf = undobuf.frees, undobuf.frees = buf->next; |
797 | else |
798 | buf = XNEW (struct undo); |
799 | |
800 | buf->kind = UNDO_LINKS; |
801 | buf->where.l = into; |
802 | buf->old_contents.l = oldval; |
803 | *into = newval; |
804 | |
805 | buf->next = undobuf.undos, undobuf.undos = buf; |
806 | } |
807 | |
808 | #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval) |
809 | |
810 | /* Subroutine of try_combine. Determine whether the replacement patterns |
811 | NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost |
812 | than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note |
813 | that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and |
814 | undobuf.other_insn may also both be NULL_RTX. Return false if the cost |
815 | of all the instructions can be estimated and the replacements are more |
816 | expensive than the original sequence. */ |
817 | |
818 | static bool |
819 | combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3, |
820 | rtx newpat, rtx newi2pat, rtx newotherpat) |
821 | { |
822 | int i0_cost, i1_cost, i2_cost, i3_cost; |
823 | int new_i2_cost, new_i3_cost; |
824 | int old_cost, new_cost; |
825 | |
826 | /* Lookup the original insn_costs. */ |
827 | i2_cost = INSN_COST (i2); |
828 | i3_cost = INSN_COST (i3); |
829 | |
830 | if (i1) |
831 | { |
832 | i1_cost = INSN_COST (i1); |
833 | if (i0) |
834 | { |
835 | i0_cost = INSN_COST (i0); |
836 | old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0 |
837 | ? i0_cost + i1_cost + i2_cost + i3_cost : 0); |
838 | } |
839 | else |
840 | { |
841 | old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0 |
842 | ? i1_cost + i2_cost + i3_cost : 0); |
843 | i0_cost = 0; |
844 | } |
845 | } |
846 | else |
847 | { |
848 | old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0; |
849 | i1_cost = i0_cost = 0; |
850 | } |
851 | |
852 | /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice; |
853 | correct that. */ |
854 | if (old_cost && i1 && INSN_UID (insn: i1) == INSN_UID (insn: i2)) |
855 | old_cost -= i1_cost; |
856 | |
857 | |
858 | /* Calculate the replacement insn_costs. */ |
859 | rtx tmp = PATTERN (insn: i3); |
860 | PATTERN (insn: i3) = newpat; |
861 | int tmpi = INSN_CODE (i3); |
862 | INSN_CODE (i3) = -1; |
863 | new_i3_cost = insn_cost (i3, optimize_this_for_speed_p); |
864 | PATTERN (insn: i3) = tmp; |
865 | INSN_CODE (i3) = tmpi; |
866 | if (newi2pat) |
867 | { |
868 | tmp = PATTERN (insn: i2); |
869 | PATTERN (insn: i2) = newi2pat; |
870 | tmpi = INSN_CODE (i2); |
871 | INSN_CODE (i2) = -1; |
872 | new_i2_cost = insn_cost (i2, optimize_this_for_speed_p); |
873 | PATTERN (insn: i2) = tmp; |
874 | INSN_CODE (i2) = tmpi; |
875 | new_cost = (new_i2_cost > 0 && new_i3_cost > 0) |
876 | ? new_i2_cost + new_i3_cost : 0; |
877 | } |
878 | else |
879 | { |
880 | new_cost = new_i3_cost; |
881 | new_i2_cost = 0; |
882 | } |
883 | |
884 | if (undobuf.other_insn) |
885 | { |
886 | int old_other_cost, new_other_cost; |
887 | |
888 | old_other_cost = INSN_COST (undobuf.other_insn); |
889 | tmp = PATTERN (insn: undobuf.other_insn); |
890 | PATTERN (insn: undobuf.other_insn) = newotherpat; |
891 | tmpi = INSN_CODE (undobuf.other_insn); |
892 | INSN_CODE (undobuf.other_insn) = -1; |
893 | new_other_cost = insn_cost (undobuf.other_insn, |
894 | optimize_this_for_speed_p); |
895 | PATTERN (insn: undobuf.other_insn) = tmp; |
896 | INSN_CODE (undobuf.other_insn) = tmpi; |
897 | if (old_other_cost > 0 && new_other_cost > 0) |
898 | { |
899 | old_cost += old_other_cost; |
900 | new_cost += new_other_cost; |
901 | } |
902 | else |
903 | old_cost = 0; |
904 | } |
905 | |
906 | /* Disallow this combination if both new_cost and old_cost are greater than |
907 | zero, and new_cost is greater than old cost. */ |
908 | bool reject = old_cost > 0 && new_cost > old_cost; |
909 | |
910 | if (dump_file) |
911 | { |
912 | fprintf (stream: dump_file, format: "%s combination of insns " , |
913 | reject ? "rejecting" : "allowing" ); |
914 | if (i0) |
915 | fprintf (stream: dump_file, format: "%d, " , INSN_UID (insn: i0)); |
916 | if (i1 && INSN_UID (insn: i1) != INSN_UID (insn: i2)) |
917 | fprintf (stream: dump_file, format: "%d, " , INSN_UID (insn: i1)); |
918 | fprintf (stream: dump_file, format: "%d and %d\n" , INSN_UID (insn: i2), INSN_UID (insn: i3)); |
919 | |
920 | fprintf (stream: dump_file, format: "original costs " ); |
921 | if (i0) |
922 | fprintf (stream: dump_file, format: "%d + " , i0_cost); |
923 | if (i1 && INSN_UID (insn: i1) != INSN_UID (insn: i2)) |
924 | fprintf (stream: dump_file, format: "%d + " , i1_cost); |
925 | fprintf (stream: dump_file, format: "%d + %d = %d\n" , i2_cost, i3_cost, old_cost); |
926 | |
927 | if (newi2pat) |
928 | fprintf (stream: dump_file, format: "replacement costs %d + %d = %d\n" , |
929 | new_i2_cost, new_i3_cost, new_cost); |
930 | else |
931 | fprintf (stream: dump_file, format: "replacement cost %d\n" , new_cost); |
932 | } |
933 | |
934 | if (reject) |
935 | return false; |
936 | |
937 | /* Update the uid_insn_cost array with the replacement costs. */ |
938 | INSN_COST (i2) = new_i2_cost; |
939 | INSN_COST (i3) = new_i3_cost; |
940 | if (i1) |
941 | { |
942 | INSN_COST (i1) = 0; |
943 | if (i0) |
944 | INSN_COST (i0) = 0; |
945 | } |
946 | |
947 | return true; |
948 | } |
949 | |
950 | |
951 | /* Delete any insns that copy a register to itself. |
952 | Return true if the CFG was changed. */ |
953 | |
954 | static bool |
955 | delete_noop_moves (void) |
956 | { |
957 | rtx_insn *insn, *next; |
958 | basic_block bb; |
959 | |
960 | bool edges_deleted = false; |
961 | |
962 | FOR_EACH_BB_FN (bb, cfun) |
963 | { |
964 | for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next) |
965 | { |
966 | next = NEXT_INSN (insn); |
967 | if (INSN_P (insn) && noop_move_p (insn)) |
968 | { |
969 | if (dump_file) |
970 | fprintf (stream: dump_file, format: "deleting noop move %d\n" , INSN_UID (insn)); |
971 | |
972 | edges_deleted |= delete_insn_and_edges (insn); |
973 | } |
974 | } |
975 | } |
976 | |
977 | return edges_deleted; |
978 | } |
979 | |
980 | |
981 | /* Return false if we do not want to (or cannot) combine DEF. */ |
982 | static bool |
983 | can_combine_def_p (df_ref def) |
984 | { |
985 | /* Do not consider if it is pre/post modification in MEM. */ |
986 | if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY) |
987 | return false; |
988 | |
989 | unsigned int regno = DF_REF_REGNO (def); |
990 | |
991 | /* Do not combine frame pointer adjustments. */ |
992 | if ((regno == FRAME_POINTER_REGNUM |
993 | && (!reload_completed || frame_pointer_needed)) |
994 | || (!HARD_FRAME_POINTER_IS_FRAME_POINTER |
995 | && regno == HARD_FRAME_POINTER_REGNUM |
996 | && (!reload_completed || frame_pointer_needed)) |
997 | || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
998 | && regno == ARG_POINTER_REGNUM && fixed_regs[regno])) |
999 | return false; |
1000 | |
1001 | return true; |
1002 | } |
1003 | |
1004 | /* Return false if we do not want to (or cannot) combine USE. */ |
1005 | static bool |
1006 | can_combine_use_p (df_ref use) |
1007 | { |
1008 | /* Do not consider the usage of the stack pointer by function call. */ |
1009 | if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE) |
1010 | return false; |
1011 | |
1012 | return true; |
1013 | } |
1014 | |
1015 | /* Fill in log links field for all insns. */ |
1016 | |
1017 | static void |
1018 | create_log_links (void) |
1019 | { |
1020 | basic_block bb; |
1021 | rtx_insn **next_use; |
1022 | rtx_insn *insn; |
1023 | df_ref def, use; |
1024 | |
1025 | next_use = XCNEWVEC (rtx_insn *, max_reg_num ()); |
1026 | |
1027 | /* Pass through each block from the end, recording the uses of each |
1028 | register and establishing log links when def is encountered. |
1029 | Note that we do not clear next_use array in order to save time, |
1030 | so we have to test whether the use is in the same basic block as def. |
1031 | |
1032 | There are a few cases below when we do not consider the definition or |
1033 | usage -- these are taken from original flow.c did. Don't ask me why it is |
1034 | done this way; I don't know and if it works, I don't want to know. */ |
1035 | |
1036 | FOR_EACH_BB_FN (bb, cfun) |
1037 | { |
1038 | FOR_BB_INSNS_REVERSE (bb, insn) |
1039 | { |
1040 | if (!NONDEBUG_INSN_P (insn)) |
1041 | continue; |
1042 | |
1043 | /* Log links are created only once. */ |
1044 | gcc_assert (!LOG_LINKS (insn)); |
1045 | |
1046 | FOR_EACH_INSN_DEF (def, insn) |
1047 | { |
1048 | unsigned int regno = DF_REF_REGNO (def); |
1049 | rtx_insn *use_insn; |
1050 | |
1051 | if (!next_use[regno]) |
1052 | continue; |
1053 | |
1054 | if (!can_combine_def_p (def)) |
1055 | continue; |
1056 | |
1057 | use_insn = next_use[regno]; |
1058 | next_use[regno] = NULL; |
1059 | |
1060 | if (BLOCK_FOR_INSN (insn: use_insn) != bb) |
1061 | continue; |
1062 | |
1063 | /* flow.c claimed: |
1064 | |
1065 | We don't build a LOG_LINK for hard registers contained |
1066 | in ASM_OPERANDs. If these registers get replaced, |
1067 | we might wind up changing the semantics of the insn, |
1068 | even if reload can make what appear to be valid |
1069 | assignments later. */ |
1070 | if (regno < FIRST_PSEUDO_REGISTER |
1071 | && asm_noperands (PATTERN (insn: use_insn)) >= 0) |
1072 | continue; |
1073 | |
1074 | /* Don't add duplicate links between instructions. */ |
1075 | struct insn_link *links; |
1076 | FOR_EACH_LOG_LINK (links, use_insn) |
1077 | if (insn == links->insn && regno == links->regno) |
1078 | break; |
1079 | |
1080 | if (!links) |
1081 | LOG_LINKS (use_insn) |
1082 | = alloc_insn_link (insn, regno, LOG_LINKS (use_insn)); |
1083 | } |
1084 | |
1085 | FOR_EACH_INSN_USE (use, insn) |
1086 | if (can_combine_use_p (use)) |
1087 | next_use[DF_REF_REGNO (use)] = insn; |
1088 | } |
1089 | } |
1090 | |
1091 | free (ptr: next_use); |
1092 | } |
1093 | |
1094 | /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return |
1095 | true if we found a LOG_LINK that proves that A feeds B. This only works |
1096 | if there are no instructions between A and B which could have a link |
1097 | depending on A, since in that case we would not record a link for B. */ |
1098 | |
1099 | static bool |
1100 | insn_a_feeds_b (rtx_insn *a, rtx_insn *b) |
1101 | { |
1102 | struct insn_link *links; |
1103 | FOR_EACH_LOG_LINK (links, b) |
1104 | if (links->insn == a) |
1105 | return true; |
1106 | return false; |
1107 | } |
1108 | |
1109 | /* Main entry point for combiner. F is the first insn of the function. |
1110 | NREGS is the first unused pseudo-reg number. |
1111 | |
1112 | Return nonzero if the CFG was changed (e.g. if the combiner has |
1113 | turned an indirect jump instruction into a direct jump). */ |
1114 | static bool |
1115 | combine_instructions (rtx_insn *f, unsigned int nregs) |
1116 | { |
1117 | rtx_insn *insn, *next; |
1118 | struct insn_link *links, *nextlinks; |
1119 | rtx_insn *first; |
1120 | basic_block last_bb; |
1121 | |
1122 | bool new_direct_jump_p = false; |
1123 | |
1124 | for (first = f; first && !NONDEBUG_INSN_P (first); ) |
1125 | first = NEXT_INSN (insn: first); |
1126 | if (!first) |
1127 | return false; |
1128 | |
1129 | combine_attempts = 0; |
1130 | combine_merges = 0; |
1131 | combine_extras = 0; |
1132 | combine_successes = 0; |
1133 | |
1134 | rtl_hooks = combine_rtl_hooks; |
1135 | |
1136 | reg_stat.safe_grow_cleared (len: nregs, exact: true); |
1137 | |
1138 | init_recog_no_volatile (); |
1139 | |
1140 | /* Allocate array for insn info. */ |
1141 | max_uid_known = get_max_uid (); |
1142 | uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1); |
1143 | uid_insn_cost = XCNEWVEC (int, max_uid_known + 1); |
1144 | gcc_obstack_init (&insn_link_obstack); |
1145 | |
1146 | nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, limit: 0).require (); |
1147 | |
1148 | /* Don't use reg_stat[].nonzero_bits when computing it. This can cause |
1149 | problems when, for example, we have j <<= 1 in a loop. */ |
1150 | |
1151 | nonzero_sign_valid = 0; |
1152 | label_tick = label_tick_ebb_start = 1; |
1153 | |
1154 | /* Scan all SETs and see if we can deduce anything about what |
1155 | bits are known to be zero for some registers and how many copies |
1156 | of the sign bit are known to exist for those registers. |
1157 | |
1158 | Also set any known values so that we can use it while searching |
1159 | for what bits are known to be set. */ |
1160 | |
1161 | setup_incoming_promotions (first); |
1162 | /* Allow the entry block and the first block to fall into the same EBB. |
1163 | Conceptually the incoming promotions are assigned to the entry block. */ |
1164 | last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun); |
1165 | |
1166 | create_log_links (); |
1167 | FOR_EACH_BB_FN (this_basic_block, cfun) |
1168 | { |
1169 | optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block); |
1170 | last_call_luid = 0; |
1171 | mem_last_set = -1; |
1172 | |
1173 | label_tick++; |
1174 | if (!single_pred_p (bb: this_basic_block) |
1175 | || single_pred (bb: this_basic_block) != last_bb) |
1176 | label_tick_ebb_start = label_tick; |
1177 | last_bb = this_basic_block; |
1178 | |
1179 | FOR_BB_INSNS (this_basic_block, insn) |
1180 | if (INSN_P (insn) && BLOCK_FOR_INSN (insn)) |
1181 | { |
1182 | rtx links; |
1183 | |
1184 | subst_low_luid = DF_INSN_LUID (insn); |
1185 | subst_insn = insn; |
1186 | |
1187 | note_stores (insn, set_nonzero_bits_and_sign_copies, insn); |
1188 | record_dead_and_set_regs (insn); |
1189 | |
1190 | if (AUTO_INC_DEC) |
1191 | for (links = REG_NOTES (insn); links; links = XEXP (links, 1)) |
1192 | if (REG_NOTE_KIND (links) == REG_INC) |
1193 | set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX, |
1194 | insn); |
1195 | |
1196 | /* Record the current insn_cost of this instruction. */ |
1197 | INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p); |
1198 | if (dump_file) |
1199 | { |
1200 | fprintf (stream: dump_file, format: "insn_cost %d for " , INSN_COST (insn)); |
1201 | dump_insn_slim (dump_file, insn); |
1202 | } |
1203 | } |
1204 | } |
1205 | |
1206 | nonzero_sign_valid = 1; |
1207 | |
1208 | /* Now scan all the insns in forward order. */ |
1209 | label_tick = label_tick_ebb_start = 1; |
1210 | init_reg_last (); |
1211 | setup_incoming_promotions (first); |
1212 | last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun); |
1213 | int max_combine = param_max_combine_insns; |
1214 | |
1215 | FOR_EACH_BB_FN (this_basic_block, cfun) |
1216 | { |
1217 | rtx_insn *last_combined_insn = NULL; |
1218 | |
1219 | /* Ignore instruction combination in basic blocks that are going to |
1220 | be removed as unreachable anyway. See PR82386. */ |
1221 | if (EDGE_COUNT (this_basic_block->preds) == 0) |
1222 | continue; |
1223 | |
1224 | optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block); |
1225 | last_call_luid = 0; |
1226 | mem_last_set = -1; |
1227 | |
1228 | label_tick++; |
1229 | if (!single_pred_p (bb: this_basic_block) |
1230 | || single_pred (bb: this_basic_block) != last_bb) |
1231 | label_tick_ebb_start = label_tick; |
1232 | last_bb = this_basic_block; |
1233 | |
1234 | rtl_profile_for_bb (this_basic_block); |
1235 | for (insn = BB_HEAD (this_basic_block); |
1236 | insn != NEXT_INSN (BB_END (this_basic_block)); |
1237 | insn = next ? next : NEXT_INSN (insn)) |
1238 | { |
1239 | next = 0; |
1240 | if (!NONDEBUG_INSN_P (insn)) |
1241 | continue; |
1242 | |
1243 | while (last_combined_insn |
1244 | && (!NONDEBUG_INSN_P (last_combined_insn) |
1245 | || last_combined_insn->deleted ())) |
1246 | last_combined_insn = PREV_INSN (insn: last_combined_insn); |
1247 | if (last_combined_insn == NULL_RTX |
1248 | || BLOCK_FOR_INSN (insn: last_combined_insn) != this_basic_block |
1249 | || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn)) |
1250 | last_combined_insn = insn; |
1251 | |
1252 | /* See if we know about function return values before this |
1253 | insn based upon SUBREG flags. */ |
1254 | check_promoted_subreg (insn, PATTERN (insn)); |
1255 | |
1256 | /* See if we can find hardregs and subreg of pseudos in |
1257 | narrower modes. This could help turning TRUNCATEs |
1258 | into SUBREGs. */ |
1259 | note_uses (&PATTERN (insn), record_truncated_values, NULL); |
1260 | |
1261 | /* Try this insn with each insn it links back to. */ |
1262 | |
1263 | FOR_EACH_LOG_LINK (links, insn) |
1264 | if ((next = try_combine (insn, links->insn, NULL, |
1265 | NULL, &new_direct_jump_p, |
1266 | last_combined_insn)) != 0) |
1267 | { |
1268 | statistics_counter_event (cfun, "two-insn combine" , 1); |
1269 | goto retry; |
1270 | } |
1271 | |
1272 | /* Try each sequence of three linked insns ending with this one. */ |
1273 | |
1274 | if (max_combine >= 3) |
1275 | FOR_EACH_LOG_LINK (links, insn) |
1276 | { |
1277 | rtx_insn *link = links->insn; |
1278 | |
1279 | /* If the linked insn has been replaced by a note, then there |
1280 | is no point in pursuing this chain any further. */ |
1281 | if (NOTE_P (link)) |
1282 | continue; |
1283 | |
1284 | FOR_EACH_LOG_LINK (nextlinks, link) |
1285 | if ((next = try_combine (insn, link, nextlinks->insn, |
1286 | NULL, &new_direct_jump_p, |
1287 | last_combined_insn)) != 0) |
1288 | { |
1289 | statistics_counter_event (cfun, "three-insn combine" , 1); |
1290 | goto retry; |
1291 | } |
1292 | } |
1293 | |
1294 | /* Try combining an insn with two different insns whose results it |
1295 | uses. */ |
1296 | if (max_combine >= 3) |
1297 | FOR_EACH_LOG_LINK (links, insn) |
1298 | for (nextlinks = links->next; nextlinks; |
1299 | nextlinks = nextlinks->next) |
1300 | if ((next = try_combine (insn, links->insn, |
1301 | nextlinks->insn, NULL, |
1302 | &new_direct_jump_p, |
1303 | last_combined_insn)) != 0) |
1304 | |
1305 | { |
1306 | statistics_counter_event (cfun, "three-insn combine" , 1); |
1307 | goto retry; |
1308 | } |
1309 | |
1310 | /* Try four-instruction combinations. */ |
1311 | if (max_combine >= 4) |
1312 | FOR_EACH_LOG_LINK (links, insn) |
1313 | { |
1314 | struct insn_link *next1; |
1315 | rtx_insn *link = links->insn; |
1316 | |
1317 | /* If the linked insn has been replaced by a note, then there |
1318 | is no point in pursuing this chain any further. */ |
1319 | if (NOTE_P (link)) |
1320 | continue; |
1321 | |
1322 | FOR_EACH_LOG_LINK (next1, link) |
1323 | { |
1324 | rtx_insn *link1 = next1->insn; |
1325 | if (NOTE_P (link1)) |
1326 | continue; |
1327 | /* I0 -> I1 -> I2 -> I3. */ |
1328 | FOR_EACH_LOG_LINK (nextlinks, link1) |
1329 | if ((next = try_combine (insn, link, link1, |
1330 | nextlinks->insn, |
1331 | &new_direct_jump_p, |
1332 | last_combined_insn)) != 0) |
1333 | { |
1334 | statistics_counter_event (cfun, "four-insn combine" , 1); |
1335 | goto retry; |
1336 | } |
1337 | /* I0, I1 -> I2, I2 -> I3. */ |
1338 | for (nextlinks = next1->next; nextlinks; |
1339 | nextlinks = nextlinks->next) |
1340 | if ((next = try_combine (insn, link, link1, |
1341 | nextlinks->insn, |
1342 | &new_direct_jump_p, |
1343 | last_combined_insn)) != 0) |
1344 | { |
1345 | statistics_counter_event (cfun, "four-insn combine" , 1); |
1346 | goto retry; |
1347 | } |
1348 | } |
1349 | |
1350 | for (next1 = links->next; next1; next1 = next1->next) |
1351 | { |
1352 | rtx_insn *link1 = next1->insn; |
1353 | if (NOTE_P (link1)) |
1354 | continue; |
1355 | /* I0 -> I2; I1, I2 -> I3. */ |
1356 | FOR_EACH_LOG_LINK (nextlinks, link) |
1357 | if ((next = try_combine (insn, link, link1, |
1358 | nextlinks->insn, |
1359 | &new_direct_jump_p, |
1360 | last_combined_insn)) != 0) |
1361 | { |
1362 | statistics_counter_event (cfun, "four-insn combine" , 1); |
1363 | goto retry; |
1364 | } |
1365 | /* I0 -> I1; I1, I2 -> I3. */ |
1366 | FOR_EACH_LOG_LINK (nextlinks, link1) |
1367 | if ((next = try_combine (insn, link, link1, |
1368 | nextlinks->insn, |
1369 | &new_direct_jump_p, |
1370 | last_combined_insn)) != 0) |
1371 | { |
1372 | statistics_counter_event (cfun, "four-insn combine" , 1); |
1373 | goto retry; |
1374 | } |
1375 | } |
1376 | } |
1377 | |
1378 | /* Try this insn with each REG_EQUAL note it links back to. */ |
1379 | FOR_EACH_LOG_LINK (links, insn) |
1380 | { |
1381 | rtx set, note; |
1382 | rtx_insn *temp = links->insn; |
1383 | if ((set = single_set (insn: temp)) != 0 |
1384 | && (note = find_reg_equal_equiv_note (temp)) != 0 |
1385 | && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST |
1386 | && ! side_effects_p (SET_SRC (set)) |
1387 | /* Avoid using a register that may already been marked |
1388 | dead by an earlier instruction. */ |
1389 | && ! unmentioned_reg_p (note, SET_SRC (set)) |
1390 | && (GET_MODE (note) == VOIDmode |
1391 | ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set))) |
1392 | : (GET_MODE (SET_DEST (set)) == GET_MODE (note) |
1393 | && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT |
1394 | || (GET_MODE (XEXP (SET_DEST (set), 0)) |
1395 | == GET_MODE (note)))))) |
1396 | { |
1397 | /* Temporarily replace the set's source with the |
1398 | contents of the REG_EQUAL note. The insn will |
1399 | be deleted or recognized by try_combine. */ |
1400 | rtx orig_src = SET_SRC (set); |
1401 | rtx orig_dest = SET_DEST (set); |
1402 | if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT) |
1403 | SET_DEST (set) = XEXP (SET_DEST (set), 0); |
1404 | SET_SRC (set) = note; |
1405 | i2mod = temp; |
1406 | i2mod_old_rhs = copy_rtx (orig_src); |
1407 | i2mod_new_rhs = copy_rtx (note); |
1408 | next = try_combine (insn, i2mod, NULL, NULL, |
1409 | &new_direct_jump_p, |
1410 | last_combined_insn); |
1411 | i2mod = NULL; |
1412 | if (next) |
1413 | { |
1414 | statistics_counter_event (cfun, "insn-with-note combine" , 1); |
1415 | goto retry; |
1416 | } |
1417 | INSN_CODE (temp) = -1; |
1418 | SET_SRC (set) = orig_src; |
1419 | SET_DEST (set) = orig_dest; |
1420 | } |
1421 | } |
1422 | |
1423 | if (!NOTE_P (insn)) |
1424 | record_dead_and_set_regs (insn); |
1425 | |
1426 | retry: |
1427 | ; |
1428 | } |
1429 | } |
1430 | |
1431 | default_rtl_profile (); |
1432 | clear_bb_flags (); |
1433 | |
1434 | if (purge_all_dead_edges ()) |
1435 | new_direct_jump_p = true; |
1436 | if (delete_noop_moves ()) |
1437 | new_direct_jump_p = true; |
1438 | |
1439 | /* Clean up. */ |
1440 | obstack_free (&insn_link_obstack, NULL); |
1441 | free (ptr: uid_log_links); |
1442 | free (ptr: uid_insn_cost); |
1443 | reg_stat.release (); |
1444 | |
1445 | { |
1446 | struct undo *undo, *next; |
1447 | for (undo = undobuf.frees; undo; undo = next) |
1448 | { |
1449 | next = undo->next; |
1450 | free (ptr: undo); |
1451 | } |
1452 | undobuf.frees = 0; |
1453 | } |
1454 | |
1455 | statistics_counter_event (cfun, "attempts" , combine_attempts); |
1456 | statistics_counter_event (cfun, "merges" , combine_merges); |
1457 | statistics_counter_event (cfun, "extras" , combine_extras); |
1458 | statistics_counter_event (cfun, "successes" , combine_successes); |
1459 | |
1460 | nonzero_sign_valid = 0; |
1461 | rtl_hooks = general_rtl_hooks; |
1462 | |
1463 | /* Make recognizer allow volatile MEMs again. */ |
1464 | init_recog (); |
1465 | |
1466 | return new_direct_jump_p; |
1467 | } |
1468 | |
1469 | /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */ |
1470 | |
1471 | static void |
1472 | init_reg_last (void) |
1473 | { |
1474 | unsigned int i; |
1475 | reg_stat_type *p; |
1476 | |
1477 | FOR_EACH_VEC_ELT (reg_stat, i, p) |
1478 | memset (s: p, c: 0, offsetof (reg_stat_type, sign_bit_copies)); |
1479 | } |
1480 | |
1481 | /* Set up any promoted values for incoming argument registers. */ |
1482 | |
1483 | static void |
1484 | setup_incoming_promotions (rtx_insn *first) |
1485 | { |
1486 | tree arg; |
1487 | bool strictly_local = false; |
1488 | |
1489 | for (arg = DECL_ARGUMENTS (current_function_decl); arg; |
1490 | arg = DECL_CHAIN (arg)) |
1491 | { |
1492 | rtx x, reg = DECL_INCOMING_RTL (arg); |
1493 | int uns1, uns3; |
1494 | machine_mode mode1, mode2, mode3, mode4; |
1495 | |
1496 | /* Only continue if the incoming argument is in a register. */ |
1497 | if (!REG_P (reg)) |
1498 | continue; |
1499 | |
1500 | /* Determine, if possible, whether all call sites of the current |
1501 | function lie within the current compilation unit. (This does |
1502 | take into account the exporting of a function via taking its |
1503 | address, and so forth.) */ |
1504 | strictly_local |
1505 | = cgraph_node::local_info_node (decl: current_function_decl)->local; |
1506 | |
1507 | /* The mode and signedness of the argument before any promotions happen |
1508 | (equal to the mode of the pseudo holding it at that stage). */ |
1509 | mode1 = TYPE_MODE (TREE_TYPE (arg)); |
1510 | uns1 = TYPE_UNSIGNED (TREE_TYPE (arg)); |
1511 | |
1512 | /* The mode and signedness of the argument after any source language and |
1513 | TARGET_PROMOTE_PROTOTYPES-driven promotions. */ |
1514 | mode2 = TYPE_MODE (DECL_ARG_TYPE (arg)); |
1515 | uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg)); |
1516 | |
1517 | /* The mode and signedness of the argument as it is actually passed, |
1518 | see assign_parm_setup_reg in function.cc. */ |
1519 | mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3, |
1520 | TREE_TYPE (cfun->decl), 0); |
1521 | |
1522 | /* The mode of the register in which the argument is being passed. */ |
1523 | mode4 = GET_MODE (reg); |
1524 | |
1525 | /* Eliminate sign extensions in the callee when: |
1526 | (a) A mode promotion has occurred; */ |
1527 | if (mode1 == mode3) |
1528 | continue; |
1529 | /* (b) The mode of the register is the same as the mode of |
1530 | the argument as it is passed; */ |
1531 | if (mode3 != mode4) |
1532 | continue; |
1533 | /* (c) There's no language level extension; */ |
1534 | if (mode1 == mode2) |
1535 | ; |
1536 | /* (c.1) All callers are from the current compilation unit. If that's |
1537 | the case we don't have to rely on an ABI, we only have to know |
1538 | what we're generating right now, and we know that we will do the |
1539 | mode1 to mode2 promotion with the given sign. */ |
1540 | else if (!strictly_local) |
1541 | continue; |
1542 | /* (c.2) The combination of the two promotions is useful. This is |
1543 | true when the signs match, or if the first promotion is unsigned. |
1544 | In the later case, (sign_extend (zero_extend x)) is the same as |
1545 | (zero_extend (zero_extend x)), so make sure to force UNS3 true. */ |
1546 | else if (uns1) |
1547 | uns3 = true; |
1548 | else if (uns3) |
1549 | continue; |
1550 | |
1551 | /* Record that the value was promoted from mode1 to mode3, |
1552 | so that any sign extension at the head of the current |
1553 | function may be eliminated. */ |
1554 | x = gen_rtx_CLOBBER (mode1, const0_rtx); |
1555 | x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x); |
1556 | record_value_for_reg (reg, first, x); |
1557 | } |
1558 | } |
1559 | |
1560 | /* If MODE has a precision lower than PREC and SRC is a non-negative constant |
1561 | that would appear negative in MODE, sign-extend SRC for use in nonzero_bits |
1562 | because some machines (maybe most) will actually do the sign-extension and |
1563 | this is the conservative approach. |
1564 | |
1565 | ??? For 2.5, try to tighten up the MD files in this regard instead of this |
1566 | kludge. */ |
1567 | |
1568 | static rtx |
1569 | sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec) |
1570 | { |
1571 | scalar_int_mode int_mode; |
1572 | if (CONST_INT_P (src) |
1573 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
1574 | && GET_MODE_PRECISION (mode: int_mode) < prec |
1575 | && INTVAL (src) > 0 |
1576 | && val_signbit_known_set_p (int_mode, INTVAL (src))) |
1577 | src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode)); |
1578 | |
1579 | return src; |
1580 | } |
1581 | |
1582 | /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists) |
1583 | and SET. */ |
1584 | |
1585 | static void |
1586 | update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set, |
1587 | rtx x) |
1588 | { |
1589 | rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX; |
1590 | unsigned HOST_WIDE_INT bits = 0; |
1591 | rtx reg_equal = NULL, src = SET_SRC (set); |
1592 | unsigned int num = 0; |
1593 | |
1594 | if (reg_equal_note) |
1595 | reg_equal = XEXP (reg_equal_note, 0); |
1596 | |
1597 | if (SHORT_IMMEDIATES_SIGN_EXTEND) |
1598 | { |
1599 | src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD); |
1600 | if (reg_equal) |
1601 | reg_equal = sign_extend_short_imm (src: reg_equal, GET_MODE (x), BITS_PER_WORD); |
1602 | } |
1603 | |
1604 | /* Don't call nonzero_bits if it cannot change anything. */ |
1605 | if (rsp->nonzero_bits != HOST_WIDE_INT_M1U) |
1606 | { |
1607 | machine_mode mode = GET_MODE (x); |
1608 | if (GET_MODE_CLASS (mode) == MODE_INT |
1609 | && HWI_COMPUTABLE_MODE_P (mode)) |
1610 | mode = nonzero_bits_mode; |
1611 | bits = nonzero_bits (src, mode); |
1612 | if (reg_equal && bits) |
1613 | bits &= nonzero_bits (reg_equal, mode); |
1614 | rsp->nonzero_bits |= bits; |
1615 | } |
1616 | |
1617 | /* Don't call num_sign_bit_copies if it cannot change anything. */ |
1618 | if (rsp->sign_bit_copies != 1) |
1619 | { |
1620 | num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x)); |
1621 | if (reg_equal && maybe_ne (a: num, b: GET_MODE_PRECISION (GET_MODE (x)))) |
1622 | { |
1623 | unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x)); |
1624 | if (num == 0 || numeq > num) |
1625 | num = numeq; |
1626 | } |
1627 | if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies) |
1628 | rsp->sign_bit_copies = num; |
1629 | } |
1630 | } |
1631 | |
1632 | /* Called via note_stores. If X is a pseudo that is narrower than |
1633 | HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero. |
1634 | |
1635 | If we are setting only a portion of X and we can't figure out what |
1636 | portion, assume all bits will be used since we don't know what will |
1637 | be happening. |
1638 | |
1639 | Similarly, set how many bits of X are known to be copies of the sign bit |
1640 | at all locations in the function. This is the smallest number implied |
1641 | by any set of X. */ |
1642 | |
1643 | static void |
1644 | set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data) |
1645 | { |
1646 | rtx_insn *insn = (rtx_insn *) data; |
1647 | scalar_int_mode mode; |
1648 | |
1649 | if (REG_P (x) |
1650 | && REGNO (x) >= FIRST_PSEUDO_REGISTER |
1651 | /* If this register is undefined at the start of the file, we can't |
1652 | say what its contents were. */ |
1653 | && ! REGNO_REG_SET_P |
1654 | (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x)) |
1655 | && is_a <scalar_int_mode> (GET_MODE (x), result: &mode) |
1656 | && HWI_COMPUTABLE_MODE_P (mode)) |
1657 | { |
1658 | reg_stat_type *rsp = ®_stat[REGNO (x)]; |
1659 | |
1660 | if (set == 0 || GET_CODE (set) == CLOBBER) |
1661 | { |
1662 | rsp->nonzero_bits = GET_MODE_MASK (mode); |
1663 | rsp->sign_bit_copies = 1; |
1664 | return; |
1665 | } |
1666 | |
1667 | /* If this register is being initialized using itself, and the |
1668 | register is uninitialized in this basic block, and there are |
1669 | no LOG_LINKS which set the register, then part of the |
1670 | register is uninitialized. In that case we can't assume |
1671 | anything about the number of nonzero bits. |
1672 | |
1673 | ??? We could do better if we checked this in |
1674 | reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we |
1675 | could avoid making assumptions about the insn which initially |
1676 | sets the register, while still using the information in other |
1677 | insns. We would have to be careful to check every insn |
1678 | involved in the combination. */ |
1679 | |
1680 | if (insn |
1681 | && reg_referenced_p (x, PATTERN (insn)) |
1682 | && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)), |
1683 | REGNO (x))) |
1684 | { |
1685 | struct insn_link *link; |
1686 | |
1687 | FOR_EACH_LOG_LINK (link, insn) |
1688 | if (dead_or_set_p (link->insn, x)) |
1689 | break; |
1690 | if (!link) |
1691 | { |
1692 | rsp->nonzero_bits = GET_MODE_MASK (mode); |
1693 | rsp->sign_bit_copies = 1; |
1694 | return; |
1695 | } |
1696 | } |
1697 | |
1698 | /* If this is a complex assignment, see if we can convert it into a |
1699 | simple assignment. */ |
1700 | set = expand_field_assignment (set); |
1701 | |
1702 | /* If this is a simple assignment, or we have a paradoxical SUBREG, |
1703 | set what we know about X. */ |
1704 | |
1705 | if (SET_DEST (set) == x |
1706 | || (paradoxical_subreg_p (SET_DEST (set)) |
1707 | && SUBREG_REG (SET_DEST (set)) == x)) |
1708 | update_rsp_from_reg_equal (rsp, insn, set, x); |
1709 | else |
1710 | { |
1711 | rsp->nonzero_bits = GET_MODE_MASK (mode); |
1712 | rsp->sign_bit_copies = 1; |
1713 | } |
1714 | } |
1715 | } |
1716 | |
1717 | /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are |
1718 | optionally insns that were previously combined into I3 or that will be |
1719 | combined into the merger of INSN and I3. The order is PRED, PRED2, |
1720 | INSN, SUCC, SUCC2, I3. |
1721 | |
1722 | Return false if the combination is not allowed for any reason. |
1723 | |
1724 | If the combination is allowed, *PDEST will be set to the single |
1725 | destination of INSN and *PSRC to the single source, and this function |
1726 | will return true. */ |
1727 | |
1728 | static bool |
1729 | can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED, |
1730 | rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2, |
1731 | rtx *pdest, rtx *psrc) |
1732 | { |
1733 | int i; |
1734 | const_rtx set = 0; |
1735 | rtx src, dest; |
1736 | rtx_insn *p; |
1737 | rtx link; |
1738 | bool all_adjacent = true; |
1739 | bool (*is_volatile_p) (const_rtx); |
1740 | |
1741 | if (succ) |
1742 | { |
1743 | if (succ2) |
1744 | { |
1745 | if (next_active_insn (succ2) != i3) |
1746 | all_adjacent = false; |
1747 | if (next_active_insn (succ) != succ2) |
1748 | all_adjacent = false; |
1749 | } |
1750 | else if (next_active_insn (succ) != i3) |
1751 | all_adjacent = false; |
1752 | if (next_active_insn (insn) != succ) |
1753 | all_adjacent = false; |
1754 | } |
1755 | else if (next_active_insn (insn) != i3) |
1756 | all_adjacent = false; |
1757 | |
1758 | /* Can combine only if previous insn is a SET of a REG or a SUBREG, |
1759 | or a PARALLEL consisting of such a SET and CLOBBERs. |
1760 | |
1761 | If INSN has CLOBBER parallel parts, ignore them for our processing. |
1762 | By definition, these happen during the execution of the insn. When it |
1763 | is merged with another insn, all bets are off. If they are, in fact, |
1764 | needed and aren't also supplied in I3, they may be added by |
1765 | recog_for_combine. Otherwise, it won't match. |
1766 | |
1767 | We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED |
1768 | note. |
1769 | |
1770 | Get the source and destination of INSN. If more than one, can't |
1771 | combine. */ |
1772 | |
1773 | if (GET_CODE (PATTERN (insn)) == SET) |
1774 | set = PATTERN (insn); |
1775 | else if (GET_CODE (PATTERN (insn)) == PARALLEL |
1776 | && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET) |
1777 | { |
1778 | for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++) |
1779 | { |
1780 | rtx elt = XVECEXP (PATTERN (insn), 0, i); |
1781 | |
1782 | switch (GET_CODE (elt)) |
1783 | { |
1784 | /* This is important to combine floating point insns |
1785 | for the SH4 port. */ |
1786 | case USE: |
1787 | /* Combining an isolated USE doesn't make sense. |
1788 | We depend here on combinable_i3pat to reject them. */ |
1789 | /* The code below this loop only verifies that the inputs of |
1790 | the SET in INSN do not change. We call reg_set_between_p |
1791 | to verify that the REG in the USE does not change between |
1792 | I3 and INSN. |
1793 | If the USE in INSN was for a pseudo register, the matching |
1794 | insn pattern will likely match any register; combining this |
1795 | with any other USE would only be safe if we knew that the |
1796 | used registers have identical values, or if there was |
1797 | something to tell them apart, e.g. different modes. For |
1798 | now, we forgo such complicated tests and simply disallow |
1799 | combining of USES of pseudo registers with any other USE. */ |
1800 | if (REG_P (XEXP (elt, 0)) |
1801 | && GET_CODE (PATTERN (i3)) == PARALLEL) |
1802 | { |
1803 | rtx i3pat = PATTERN (insn: i3); |
1804 | int i = XVECLEN (i3pat, 0) - 1; |
1805 | unsigned int regno = REGNO (XEXP (elt, 0)); |
1806 | |
1807 | do |
1808 | { |
1809 | rtx i3elt = XVECEXP (i3pat, 0, i); |
1810 | |
1811 | if (GET_CODE (i3elt) == USE |
1812 | && REG_P (XEXP (i3elt, 0)) |
1813 | && (REGNO (XEXP (i3elt, 0)) == regno |
1814 | ? reg_set_between_p (XEXP (elt, 0), |
1815 | PREV_INSN (insn), i3) |
1816 | : regno >= FIRST_PSEUDO_REGISTER)) |
1817 | return false; |
1818 | } |
1819 | while (--i >= 0); |
1820 | } |
1821 | break; |
1822 | |
1823 | /* We can ignore CLOBBERs. */ |
1824 | case CLOBBER: |
1825 | break; |
1826 | |
1827 | case SET: |
1828 | /* Ignore SETs whose result isn't used but not those that |
1829 | have side-effects. */ |
1830 | if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt)) |
1831 | && insn_nothrow_p (insn) |
1832 | && !side_effects_p (elt)) |
1833 | break; |
1834 | |
1835 | /* If we have already found a SET, this is a second one and |
1836 | so we cannot combine with this insn. */ |
1837 | if (set) |
1838 | return false; |
1839 | |
1840 | set = elt; |
1841 | break; |
1842 | |
1843 | default: |
1844 | /* Anything else means we can't combine. */ |
1845 | return false; |
1846 | } |
1847 | } |
1848 | |
1849 | if (set == 0 |
1850 | /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs, |
1851 | so don't do anything with it. */ |
1852 | || GET_CODE (SET_SRC (set)) == ASM_OPERANDS) |
1853 | return false; |
1854 | } |
1855 | else |
1856 | return false; |
1857 | |
1858 | if (set == 0) |
1859 | return false; |
1860 | |
1861 | /* The simplification in expand_field_assignment may call back to |
1862 | get_last_value, so set safe guard here. */ |
1863 | subst_low_luid = DF_INSN_LUID (insn); |
1864 | |
1865 | set = expand_field_assignment (set); |
1866 | src = SET_SRC (set), dest = SET_DEST (set); |
1867 | |
1868 | /* Do not eliminate user-specified register if it is in an |
1869 | asm input because we may break the register asm usage defined |
1870 | in GCC manual if allow to do so. |
1871 | Be aware that this may cover more cases than we expect but this |
1872 | should be harmless. */ |
1873 | if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest) |
1874 | && extract_asm_operands (PATTERN (insn: i3))) |
1875 | return false; |
1876 | |
1877 | /* Don't eliminate a store in the stack pointer. */ |
1878 | if (dest == stack_pointer_rtx |
1879 | /* Don't combine with an insn that sets a register to itself if it has |
1880 | a REG_EQUAL note. This may be part of a LIBCALL sequence. */ |
1881 | || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX)) |
1882 | /* Can't merge an ASM_OPERANDS. */ |
1883 | || GET_CODE (src) == ASM_OPERANDS |
1884 | /* Can't merge a function call. */ |
1885 | || GET_CODE (src) == CALL |
1886 | /* Don't eliminate a function call argument. */ |
1887 | || (CALL_P (i3) |
1888 | && (find_reg_fusage (i3, USE, dest) |
1889 | || (REG_P (dest) |
1890 | && REGNO (dest) < FIRST_PSEUDO_REGISTER |
1891 | && global_regs[REGNO (dest)]))) |
1892 | /* Don't substitute into an incremented register. */ |
1893 | || FIND_REG_INC_NOTE (i3, dest) |
1894 | || (succ && FIND_REG_INC_NOTE (succ, dest)) |
1895 | || (succ2 && FIND_REG_INC_NOTE (succ2, dest)) |
1896 | /* Don't substitute into a non-local goto, this confuses CFG. */ |
1897 | || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX)) |
1898 | /* Make sure that DEST is not used after INSN but before SUCC, or |
1899 | after SUCC and before SUCC2, or after SUCC2 but before I3. */ |
1900 | || (!all_adjacent |
1901 | && ((succ2 |
1902 | && (reg_used_between_p (dest, succ2, i3) |
1903 | || reg_used_between_p (dest, succ, succ2))) |
1904 | || (!succ2 && succ && reg_used_between_p (dest, succ, i3)) |
1905 | || (!succ2 && !succ && reg_used_between_p (dest, insn, i3)) |
1906 | || (succ |
1907 | /* SUCC and SUCC2 can be split halves from a PARALLEL; in |
1908 | that case SUCC is not in the insn stream, so use SUCC2 |
1909 | instead for this test. */ |
1910 | && reg_used_between_p (dest, insn, |
1911 | succ2 |
1912 | && INSN_UID (insn: succ) == INSN_UID (insn: succ2) |
1913 | ? succ2 : succ)))) |
1914 | /* Make sure that the value that is to be substituted for the register |
1915 | does not use any registers whose values alter in between. However, |
1916 | If the insns are adjacent, a use can't cross a set even though we |
1917 | think it might (this can happen for a sequence of insns each setting |
1918 | the same destination; last_set of that register might point to |
1919 | a NOTE). If INSN has a REG_EQUIV note, the register is always |
1920 | equivalent to the memory so the substitution is valid even if there |
1921 | are intervening stores. Also, don't move a volatile asm or |
1922 | UNSPEC_VOLATILE across any other insns. */ |
1923 | || (! all_adjacent |
1924 | && (((!MEM_P (src) |
1925 | || ! find_reg_note (insn, REG_EQUIV, src)) |
1926 | && modified_between_p (src, insn, i3)) |
1927 | || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src)) |
1928 | || GET_CODE (src) == UNSPEC_VOLATILE)) |
1929 | /* Don't combine across a CALL_INSN, because that would possibly |
1930 | change whether the life span of some REGs crosses calls or not, |
1931 | and it is a pain to update that information. |
1932 | Exception: if source is a constant, moving it later can't hurt. |
1933 | Accept that as a special case. */ |
1934 | || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src))) |
1935 | return false; |
1936 | |
1937 | /* DEST must be a REG. */ |
1938 | if (REG_P (dest)) |
1939 | { |
1940 | /* If register alignment is being enforced for multi-word items in all |
1941 | cases except for parameters, it is possible to have a register copy |
1942 | insn referencing a hard register that is not allowed to contain the |
1943 | mode being copied and which would not be valid as an operand of most |
1944 | insns. Eliminate this problem by not combining with such an insn. |
1945 | |
1946 | Also, on some machines we don't want to extend the life of a hard |
1947 | register. */ |
1948 | |
1949 | if (REG_P (src) |
1950 | && ((REGNO (dest) < FIRST_PSEUDO_REGISTER |
1951 | && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest))) |
1952 | /* Don't extend the life of a hard register unless it is |
1953 | user variable (if we have few registers) or it can't |
1954 | fit into the desired register (meaning something special |
1955 | is going on). |
1956 | Also avoid substituting a return register into I3, because |
1957 | reload can't handle a conflict with constraints of other |
1958 | inputs. */ |
1959 | || (REGNO (src) < FIRST_PSEUDO_REGISTER |
1960 | && !targetm.hard_regno_mode_ok (REGNO (src), |
1961 | GET_MODE (src))))) |
1962 | return false; |
1963 | } |
1964 | else |
1965 | return false; |
1966 | |
1967 | |
1968 | if (GET_CODE (PATTERN (i3)) == PARALLEL) |
1969 | for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--) |
1970 | if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER) |
1971 | { |
1972 | rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0); |
1973 | |
1974 | /* If the clobber represents an earlyclobber operand, we must not |
1975 | substitute an expression containing the clobbered register. |
1976 | As we do not analyze the constraint strings here, we have to |
1977 | make the conservative assumption. However, if the register is |
1978 | a fixed hard reg, the clobber cannot represent any operand; |
1979 | we leave it up to the machine description to either accept or |
1980 | reject use-and-clobber patterns. */ |
1981 | if (!REG_P (reg) |
1982 | || REGNO (reg) >= FIRST_PSEUDO_REGISTER |
1983 | || !fixed_regs[REGNO (reg)]) |
1984 | if (reg_overlap_mentioned_p (reg, src)) |
1985 | return false; |
1986 | } |
1987 | |
1988 | /* If INSN contains anything volatile, or is an `asm' (whether volatile |
1989 | or not), reject, unless nothing volatile comes between it and I3 */ |
1990 | |
1991 | if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src)) |
1992 | { |
1993 | /* Make sure neither succ nor succ2 contains a volatile reference. */ |
1994 | if (succ2 != 0 && volatile_refs_p (PATTERN (insn: succ2))) |
1995 | return false; |
1996 | if (succ != 0 && volatile_refs_p (PATTERN (insn: succ))) |
1997 | return false; |
1998 | /* We'll check insns between INSN and I3 below. */ |
1999 | } |
2000 | |
2001 | /* If INSN is an asm, and DEST is a hard register, reject, since it has |
2002 | to be an explicit register variable, and was chosen for a reason. */ |
2003 | |
2004 | if (GET_CODE (src) == ASM_OPERANDS |
2005 | && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER) |
2006 | return false; |
2007 | |
2008 | /* If INSN contains volatile references (specifically volatile MEMs), |
2009 | we cannot combine across any other volatile references. |
2010 | Even if INSN doesn't contain volatile references, any intervening |
2011 | volatile insn might affect machine state. */ |
2012 | |
2013 | is_volatile_p = volatile_refs_p (PATTERN (insn)) |
2014 | ? volatile_refs_p |
2015 | : volatile_insn_p; |
2016 | |
2017 | for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (insn: p)) |
2018 | if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (insn: p))) |
2019 | return false; |
2020 | |
2021 | /* If INSN contains an autoincrement or autodecrement, make sure that |
2022 | register is not used between there and I3, and not already used in |
2023 | I3 either. Neither must it be used in PRED or SUCC, if they exist. |
2024 | Also insist that I3 not be a jump if using LRA; if it were one |
2025 | and the incremented register were spilled, we would lose. |
2026 | Reload handles this correctly. */ |
2027 | |
2028 | if (AUTO_INC_DEC) |
2029 | for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) |
2030 | if (REG_NOTE_KIND (link) == REG_INC |
2031 | && ((JUMP_P (i3) && targetm.lra_p ()) |
2032 | || reg_used_between_p (XEXP (link, 0), insn, i3) |
2033 | || (pred != NULL_RTX |
2034 | && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (insn: pred))) |
2035 | || (pred2 != NULL_RTX |
2036 | && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (insn: pred2))) |
2037 | || (succ != NULL_RTX |
2038 | && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (insn: succ))) |
2039 | || (succ2 != NULL_RTX |
2040 | && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (insn: succ2))) |
2041 | || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (insn: i3)))) |
2042 | return false; |
2043 | |
2044 | /* If we get here, we have passed all the tests and the combination is |
2045 | to be allowed. */ |
2046 | |
2047 | *pdest = dest; |
2048 | *psrc = src; |
2049 | |
2050 | return true; |
2051 | } |
2052 | |
2053 | /* LOC is the location within I3 that contains its pattern or the component |
2054 | of a PARALLEL of the pattern. We validate that it is valid for combining. |
2055 | |
2056 | One problem is if I3 modifies its output, as opposed to replacing it |
2057 | entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as |
2058 | doing so would produce an insn that is not equivalent to the original insns. |
2059 | |
2060 | Consider: |
2061 | |
2062 | (set (reg:DI 101) (reg:DI 100)) |
2063 | (set (subreg:SI (reg:DI 101) 0) <foo>) |
2064 | |
2065 | This is NOT equivalent to: |
2066 | |
2067 | (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>) |
2068 | (set (reg:DI 101) (reg:DI 100))]) |
2069 | |
2070 | Not only does this modify 100 (in which case it might still be valid |
2071 | if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100. |
2072 | |
2073 | We can also run into a problem if I2 sets a register that I1 |
2074 | uses and I1 gets directly substituted into I3 (not via I2). In that |
2075 | case, we would be getting the wrong value of I2DEST into I3, so we |
2076 | must reject the combination. This case occurs when I2 and I1 both |
2077 | feed into I3, rather than when I1 feeds into I2, which feeds into I3. |
2078 | If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source |
2079 | of a SET must prevent combination from occurring. The same situation |
2080 | can occur for I0, in which case I0_NOT_IN_SRC is set. |
2081 | |
2082 | Before doing the above check, we first try to expand a field assignment |
2083 | into a set of logical operations. |
2084 | |
2085 | If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which |
2086 | we place a register that is both set and used within I3. If more than one |
2087 | such register is detected, we fail. |
2088 | |
2089 | Return true if the combination is valid, false otherwise. */ |
2090 | |
2091 | static bool |
2092 | combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest, |
2093 | bool i1_not_in_src, bool i0_not_in_src, rtx *pi3dest_killed) |
2094 | { |
2095 | rtx x = *loc; |
2096 | |
2097 | if (GET_CODE (x) == SET) |
2098 | { |
2099 | rtx set = x ; |
2100 | rtx dest = SET_DEST (set); |
2101 | rtx src = SET_SRC (set); |
2102 | rtx inner_dest = dest; |
2103 | rtx subdest; |
2104 | |
2105 | while (GET_CODE (inner_dest) == STRICT_LOW_PART |
2106 | || GET_CODE (inner_dest) == SUBREG |
2107 | || GET_CODE (inner_dest) == ZERO_EXTRACT) |
2108 | inner_dest = XEXP (inner_dest, 0); |
2109 | |
2110 | /* Check for the case where I3 modifies its output, as discussed |
2111 | above. We don't want to prevent pseudos from being combined |
2112 | into the address of a MEM, so only prevent the combination if |
2113 | i1 or i2 set the same MEM. */ |
2114 | if ((inner_dest != dest && |
2115 | (!MEM_P (inner_dest) |
2116 | || rtx_equal_p (i2dest, inner_dest) |
2117 | || (i1dest && rtx_equal_p (i1dest, inner_dest)) |
2118 | || (i0dest && rtx_equal_p (i0dest, inner_dest))) |
2119 | && (reg_overlap_mentioned_p (i2dest, inner_dest) |
2120 | || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest)) |
2121 | || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest)))) |
2122 | |
2123 | /* This is the same test done in can_combine_p except we can't test |
2124 | all_adjacent; we don't have to, since this instruction will stay |
2125 | in place, thus we are not considering increasing the lifetime of |
2126 | INNER_DEST. |
2127 | |
2128 | Also, if this insn sets a function argument, combining it with |
2129 | something that might need a spill could clobber a previous |
2130 | function argument; the all_adjacent test in can_combine_p also |
2131 | checks this; here, we do a more specific test for this case. */ |
2132 | |
2133 | || (REG_P (inner_dest) |
2134 | && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER |
2135 | && !targetm.hard_regno_mode_ok (REGNO (inner_dest), |
2136 | GET_MODE (inner_dest))) |
2137 | || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src)) |
2138 | || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src))) |
2139 | return false; |
2140 | |
2141 | /* If DEST is used in I3, it is being killed in this insn, so |
2142 | record that for later. We have to consider paradoxical |
2143 | subregs here, since they kill the whole register, but we |
2144 | ignore partial subregs, STRICT_LOW_PART, etc. |
2145 | Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the |
2146 | STACK_POINTER_REGNUM, since these are always considered to be |
2147 | live. Similarly for ARG_POINTER_REGNUM if it is fixed. */ |
2148 | subdest = dest; |
2149 | if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (x: subdest)) |
2150 | subdest = SUBREG_REG (subdest); |
2151 | if (pi3dest_killed |
2152 | && REG_P (subdest) |
2153 | && reg_referenced_p (subdest, PATTERN (insn: i3)) |
2154 | && REGNO (subdest) != FRAME_POINTER_REGNUM |
2155 | && (HARD_FRAME_POINTER_IS_FRAME_POINTER |
2156 | || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM) |
2157 | && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM |
2158 | || (REGNO (subdest) != ARG_POINTER_REGNUM |
2159 | || ! fixed_regs [REGNO (subdest)])) |
2160 | && REGNO (subdest) != STACK_POINTER_REGNUM) |
2161 | { |
2162 | if (*pi3dest_killed) |
2163 | return false; |
2164 | |
2165 | *pi3dest_killed = subdest; |
2166 | } |
2167 | } |
2168 | |
2169 | else if (GET_CODE (x) == PARALLEL) |
2170 | { |
2171 | int i; |
2172 | |
2173 | for (i = 0; i < XVECLEN (x, 0); i++) |
2174 | if (! combinable_i3pat (i3, loc: &XVECEXP (x, 0, i), i2dest, i1dest, i0dest, |
2175 | i1_not_in_src, i0_not_in_src, pi3dest_killed)) |
2176 | return false; |
2177 | } |
2178 | |
2179 | return true; |
2180 | } |
2181 | |
2182 | /* Return true if X is an arithmetic expression that contains a multiplication |
2183 | and division. We don't count multiplications by powers of two here. */ |
2184 | |
2185 | static bool |
2186 | contains_muldiv (rtx x) |
2187 | { |
2188 | switch (GET_CODE (x)) |
2189 | { |
2190 | case MOD: case DIV: case UMOD: case UDIV: |
2191 | return true; |
2192 | |
2193 | case MULT: |
2194 | return ! (CONST_INT_P (XEXP (x, 1)) |
2195 | && pow2p_hwi (UINTVAL (XEXP (x, 1)))); |
2196 | default: |
2197 | if (BINARY_P (x)) |
2198 | return contains_muldiv (XEXP (x, 0)) |
2199 | || contains_muldiv (XEXP (x, 1)); |
2200 | |
2201 | if (UNARY_P (x)) |
2202 | return contains_muldiv (XEXP (x, 0)); |
2203 | |
2204 | return false; |
2205 | } |
2206 | } |
2207 | |
2208 | /* Determine whether INSN can be used in a combination. Return true if |
2209 | not. This is used in try_combine to detect early some cases where we |
2210 | can't perform combinations. */ |
2211 | |
2212 | static bool |
2213 | cant_combine_insn_p (rtx_insn *insn) |
2214 | { |
2215 | rtx set; |
2216 | rtx src, dest; |
2217 | |
2218 | /* If this isn't really an insn, we can't do anything. |
2219 | This can occur when flow deletes an insn that it has merged into an |
2220 | auto-increment address. */ |
2221 | if (!NONDEBUG_INSN_P (insn)) |
2222 | return true; |
2223 | |
2224 | /* Never combine loads and stores involving hard regs that are likely |
2225 | to be spilled. The register allocator can usually handle such |
2226 | reg-reg moves by tying. If we allow the combiner to make |
2227 | substitutions of likely-spilled regs, reload might die. |
2228 | As an exception, we allow combinations involving fixed regs; these are |
2229 | not available to the register allocator so there's no risk involved. */ |
2230 | |
2231 | set = single_set (insn); |
2232 | if (! set) |
2233 | return false; |
2234 | src = SET_SRC (set); |
2235 | dest = SET_DEST (set); |
2236 | if (GET_CODE (src) == SUBREG) |
2237 | src = SUBREG_REG (src); |
2238 | if (GET_CODE (dest) == SUBREG) |
2239 | dest = SUBREG_REG (dest); |
2240 | if (REG_P (src) && REG_P (dest) |
2241 | && ((HARD_REGISTER_P (src) |
2242 | && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src)) |
2243 | #ifdef LEAF_REGISTERS |
2244 | && ! LEAF_REGISTERS [REGNO (src)]) |
2245 | #else |
2246 | ) |
2247 | #endif |
2248 | || (HARD_REGISTER_P (dest) |
2249 | && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest)) |
2250 | && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest)))))) |
2251 | return true; |
2252 | |
2253 | return false; |
2254 | } |
2255 | |
2256 | struct likely_spilled_retval_info |
2257 | { |
2258 | unsigned regno, nregs; |
2259 | unsigned mask; |
2260 | }; |
2261 | |
2262 | /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask |
2263 | hard registers that are known to be written to / clobbered in full. */ |
2264 | static void |
2265 | likely_spilled_retval_1 (rtx x, const_rtx set, void *data) |
2266 | { |
2267 | struct likely_spilled_retval_info *const info = |
2268 | (struct likely_spilled_retval_info *) data; |
2269 | unsigned regno, nregs; |
2270 | unsigned new_mask; |
2271 | |
2272 | if (!REG_P (XEXP (set, 0))) |
2273 | return; |
2274 | regno = REGNO (x); |
2275 | if (regno >= info->regno + info->nregs) |
2276 | return; |
2277 | nregs = REG_NREGS (x); |
2278 | if (regno + nregs <= info->regno) |
2279 | return; |
2280 | new_mask = (2U << (nregs - 1)) - 1; |
2281 | if (regno < info->regno) |
2282 | new_mask >>= info->regno - regno; |
2283 | else |
2284 | new_mask <<= regno - info->regno; |
2285 | info->mask &= ~new_mask; |
2286 | } |
2287 | |
2288 | /* Return true iff part of the return value is live during INSN, and |
2289 | it is likely spilled. This can happen when more than one insn is needed |
2290 | to copy the return value, e.g. when we consider to combine into the |
2291 | second copy insn for a complex value. */ |
2292 | |
2293 | static bool |
2294 | likely_spilled_retval_p (rtx_insn *insn) |
2295 | { |
2296 | rtx_insn *use = BB_END (this_basic_block); |
2297 | rtx reg; |
2298 | rtx_insn *p; |
2299 | unsigned regno, nregs; |
2300 | /* We assume here that no machine mode needs more than |
2301 | 32 hard registers when the value overlaps with a register |
2302 | for which TARGET_FUNCTION_VALUE_REGNO_P is true. */ |
2303 | unsigned mask; |
2304 | struct likely_spilled_retval_info info; |
2305 | |
2306 | if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use) |
2307 | return false; |
2308 | reg = XEXP (PATTERN (use), 0); |
2309 | if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg))) |
2310 | return false; |
2311 | regno = REGNO (reg); |
2312 | nregs = REG_NREGS (reg); |
2313 | if (nregs == 1) |
2314 | return false; |
2315 | mask = (2U << (nregs - 1)) - 1; |
2316 | |
2317 | /* Disregard parts of the return value that are set later. */ |
2318 | info.regno = regno; |
2319 | info.nregs = nregs; |
2320 | info.mask = mask; |
2321 | for (p = PREV_INSN (insn: use); info.mask && p != insn; p = PREV_INSN (insn: p)) |
2322 | if (INSN_P (p)) |
2323 | note_stores (p, likely_spilled_retval_1, &info); |
2324 | mask = info.mask; |
2325 | |
2326 | /* Check if any of the (probably) live return value registers is |
2327 | likely spilled. */ |
2328 | nregs --; |
2329 | do |
2330 | { |
2331 | if ((mask & 1 << nregs) |
2332 | && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs))) |
2333 | return true; |
2334 | } while (nregs--); |
2335 | return false; |
2336 | } |
2337 | |
2338 | /* Adjust INSN after we made a change to its destination. |
2339 | |
2340 | Changing the destination can invalidate notes that say something about |
2341 | the results of the insn and a LOG_LINK pointing to the insn. */ |
2342 | |
2343 | static void |
2344 | adjust_for_new_dest (rtx_insn *insn) |
2345 | { |
2346 | /* For notes, be conservative and simply remove them. */ |
2347 | remove_reg_equal_equiv_notes (insn, true); |
2348 | |
2349 | /* The new insn will have a destination that was previously the destination |
2350 | of an insn just above it. Call distribute_links to make a LOG_LINK from |
2351 | the next use of that destination. */ |
2352 | |
2353 | rtx set = single_set (insn); |
2354 | gcc_assert (set); |
2355 | |
2356 | rtx reg = SET_DEST (set); |
2357 | |
2358 | while (GET_CODE (reg) == ZERO_EXTRACT |
2359 | || GET_CODE (reg) == STRICT_LOW_PART |
2360 | || GET_CODE (reg) == SUBREG) |
2361 | reg = XEXP (reg, 0); |
2362 | gcc_assert (REG_P (reg)); |
2363 | |
2364 | distribute_links (alloc_insn_link (insn, REGNO (reg), NULL)); |
2365 | |
2366 | df_insn_rescan (insn); |
2367 | } |
2368 | |
2369 | /* Return TRUE if combine can reuse reg X in mode MODE. |
2370 | ADDED_SETS is trueif the original set is still required. */ |
2371 | static bool |
2372 | can_change_dest_mode (rtx x, bool added_sets, machine_mode mode) |
2373 | { |
2374 | unsigned int regno; |
2375 | |
2376 | if (!REG_P (x)) |
2377 | return false; |
2378 | |
2379 | /* Don't change between modes with different underlying register sizes, |
2380 | since this could lead to invalid subregs. */ |
2381 | if (maybe_ne (REGMODE_NATURAL_SIZE (mode), |
2382 | REGMODE_NATURAL_SIZE (GET_MODE (x)))) |
2383 | return false; |
2384 | |
2385 | regno = REGNO (x); |
2386 | /* Allow hard registers if the new mode is legal, and occupies no more |
2387 | registers than the old mode. */ |
2388 | if (regno < FIRST_PSEUDO_REGISTER) |
2389 | return (targetm.hard_regno_mode_ok (regno, mode) |
2390 | && REG_NREGS (x) >= hard_regno_nregs (regno, mode)); |
2391 | |
2392 | /* Or a pseudo that is only used once. */ |
2393 | return (regno < reg_n_sets_max |
2394 | && REG_N_SETS (regno) == 1 |
2395 | && !added_sets |
2396 | && !REG_USERVAR_P (x)); |
2397 | } |
2398 | |
2399 | |
2400 | /* Check whether X, the destination of a set, refers to part of |
2401 | the register specified by REG. */ |
2402 | |
2403 | static bool |
2404 | reg_subword_p (rtx x, rtx reg) |
2405 | { |
2406 | /* Check that reg is an integer mode register. */ |
2407 | if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT) |
2408 | return false; |
2409 | |
2410 | if (GET_CODE (x) == STRICT_LOW_PART |
2411 | || GET_CODE (x) == ZERO_EXTRACT) |
2412 | x = XEXP (x, 0); |
2413 | |
2414 | return GET_CODE (x) == SUBREG |
2415 | && !paradoxical_subreg_p (x) |
2416 | && SUBREG_REG (x) == reg |
2417 | && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT; |
2418 | } |
2419 | |
2420 | /* Return whether PAT is a PARALLEL of exactly N register SETs followed |
2421 | by an arbitrary number of CLOBBERs. */ |
2422 | static bool |
2423 | is_parallel_of_n_reg_sets (rtx pat, int n) |
2424 | { |
2425 | if (GET_CODE (pat) != PARALLEL) |
2426 | return false; |
2427 | |
2428 | int len = XVECLEN (pat, 0); |
2429 | if (len < n) |
2430 | return false; |
2431 | |
2432 | int i; |
2433 | for (i = 0; i < n; i++) |
2434 | if (GET_CODE (XVECEXP (pat, 0, i)) != SET |
2435 | || !REG_P (SET_DEST (XVECEXP (pat, 0, i)))) |
2436 | return false; |
2437 | for ( ; i < len; i++) |
2438 | switch (GET_CODE (XVECEXP (pat, 0, i))) |
2439 | { |
2440 | case CLOBBER: |
2441 | if (XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx) |
2442 | return false; |
2443 | break; |
2444 | default: |
2445 | return false; |
2446 | } |
2447 | return true; |
2448 | } |
2449 | |
2450 | /* Return whether INSN, a PARALLEL of N register SETs (and maybe some |
2451 | CLOBBERs), can be split into individual SETs in that order, without |
2452 | changing semantics. */ |
2453 | static bool |
2454 | can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n) |
2455 | { |
2456 | if (!insn_nothrow_p (insn)) |
2457 | return false; |
2458 | |
2459 | rtx pat = PATTERN (insn); |
2460 | |
2461 | int i, j; |
2462 | for (i = 0; i < n; i++) |
2463 | { |
2464 | if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i)))) |
2465 | return false; |
2466 | |
2467 | rtx reg = SET_DEST (XVECEXP (pat, 0, i)); |
2468 | |
2469 | for (j = i + 1; j < n; j++) |
2470 | if (reg_referenced_p (reg, XVECEXP (pat, 0, j))) |
2471 | return false; |
2472 | } |
2473 | |
2474 | return true; |
2475 | } |
2476 | |
2477 | /* Return whether X is just a single_set, with the source |
2478 | a general_operand. */ |
2479 | static bool |
2480 | is_just_move (rtx_insn *x) |
2481 | { |
2482 | rtx set = single_set (insn: x); |
2483 | if (!set) |
2484 | return false; |
2485 | |
2486 | return general_operand (SET_SRC (set), VOIDmode); |
2487 | } |
2488 | |
2489 | /* Callback function to count autoincs. */ |
2490 | |
2491 | static int |
2492 | count_auto_inc (rtx, rtx, rtx, rtx, rtx, void *arg) |
2493 | { |
2494 | (*((int *) arg))++; |
2495 | |
2496 | return 0; |
2497 | } |
2498 | |
2499 | /* Try to combine the insns I0, I1 and I2 into I3. |
2500 | Here I0, I1 and I2 appear earlier than I3. |
2501 | I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into |
2502 | I3. |
2503 | |
2504 | If we are combining more than two insns and the resulting insn is not |
2505 | recognized, try splitting it into two insns. If that happens, I2 and I3 |
2506 | are retained and I1/I0 are pseudo-deleted by turning them into a NOTE. |
2507 | Otherwise, I0, I1 and I2 are pseudo-deleted. |
2508 | |
2509 | Return 0 if the combination does not work. Then nothing is changed. |
2510 | If we did the combination, return the insn at which combine should |
2511 | resume scanning. |
2512 | |
2513 | Set NEW_DIRECT_JUMP_P to true if try_combine creates a |
2514 | new direct jump instruction. |
2515 | |
2516 | LAST_COMBINED_INSN is either I3, or some insn after I3 that has |
2517 | been I3 passed to an earlier try_combine within the same basic |
2518 | block. */ |
2519 | |
2520 | static rtx_insn * |
2521 | try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0, |
2522 | bool *new_direct_jump_p, rtx_insn *last_combined_insn) |
2523 | { |
2524 | /* New patterns for I3 and I2, respectively. */ |
2525 | rtx newpat, newi2pat = 0; |
2526 | rtvec newpat_vec_with_clobbers = 0; |
2527 | bool substed_i2 = false, substed_i1 = false, substed_i0 = false; |
2528 | /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not |
2529 | dead. */ |
2530 | bool added_sets_0, added_sets_1, added_sets_2; |
2531 | /* Total number of SETs to put into I3. */ |
2532 | int total_sets; |
2533 | /* Nonzero if I2's or I1's body now appears in I3. */ |
2534 | int i2_is_used = 0, i1_is_used = 0; |
2535 | /* INSN_CODEs for new I3, new I2, and user of condition code. */ |
2536 | int insn_code_number, i2_code_number = 0, other_code_number = 0; |
2537 | /* Contains I3 if the destination of I3 is used in its source, which means |
2538 | that the old life of I3 is being killed. If that usage is placed into |
2539 | I2 and not in I3, a REG_DEAD note must be made. */ |
2540 | rtx i3dest_killed = 0; |
2541 | /* SET_DEST and SET_SRC of I2, I1 and I0. */ |
2542 | rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0; |
2543 | /* Copy of SET_SRC of I1 and I0, if needed. */ |
2544 | rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0; |
2545 | /* Set if I2DEST was reused as a scratch register. */ |
2546 | bool i2scratch = false; |
2547 | /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */ |
2548 | rtx i0pat = 0, i1pat = 0, i2pat = 0; |
2549 | /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */ |
2550 | bool i2dest_in_i2src = false, i1dest_in_i1src = false; |
2551 | bool i2dest_in_i1src = false, i0dest_in_i0src = false; |
2552 | bool i1dest_in_i0src = false, i2dest_in_i0src = false;; |
2553 | bool i2dest_killed = false, i1dest_killed = false, i0dest_killed = false; |
2554 | bool i1_feeds_i2_n = false, i0_feeds_i2_n = false, i0_feeds_i1_n = false; |
2555 | /* Notes that must be added to REG_NOTES in I3 and I2. */ |
2556 | rtx new_i3_notes, new_i2_notes; |
2557 | /* Notes that we substituted I3 into I2 instead of the normal case. */ |
2558 | bool i3_subst_into_i2 = false; |
2559 | /* Notes that I1, I2 or I3 is a MULT operation. */ |
2560 | bool have_mult = false; |
2561 | bool swap_i2i3 = false; |
2562 | bool split_i2i3 = false; |
2563 | bool changed_i3_dest = false; |
2564 | bool i2_was_move = false, i3_was_move = false; |
2565 | int n_auto_inc = 0; |
2566 | |
2567 | int maxreg; |
2568 | rtx_insn *temp_insn; |
2569 | rtx temp_expr; |
2570 | struct insn_link *link; |
2571 | rtx other_pat = 0; |
2572 | rtx new_other_notes; |
2573 | int i; |
2574 | scalar_int_mode dest_mode, temp_mode; |
2575 | bool has_non_call_exception = false; |
2576 | |
2577 | /* Immediately return if any of I0,I1,I2 are the same insn (I3 can |
2578 | never be). */ |
2579 | if (i1 == i2 || i0 == i2 || (i0 && i0 == i1)) |
2580 | return 0; |
2581 | |
2582 | /* Only try four-insn combinations when there's high likelihood of |
2583 | success. Look for simple insns, such as loads of constants or |
2584 | binary operations involving a constant. */ |
2585 | if (i0) |
2586 | { |
2587 | int i; |
2588 | int ngood = 0; |
2589 | int nshift = 0; |
2590 | rtx set0, set3; |
2591 | |
2592 | if (!flag_expensive_optimizations) |
2593 | return 0; |
2594 | |
2595 | for (i = 0; i < 4; i++) |
2596 | { |
2597 | rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3; |
2598 | rtx set = single_set (insn); |
2599 | rtx src; |
2600 | if (!set) |
2601 | continue; |
2602 | src = SET_SRC (set); |
2603 | if (CONSTANT_P (src)) |
2604 | { |
2605 | ngood += 2; |
2606 | break; |
2607 | } |
2608 | else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1))) |
2609 | ngood++; |
2610 | else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT |
2611 | || GET_CODE (src) == LSHIFTRT) |
2612 | nshift++; |
2613 | } |
2614 | |
2615 | /* If I0 loads a memory and I3 sets the same memory, then I1 and I2 |
2616 | are likely manipulating its value. Ideally we'll be able to combine |
2617 | all four insns into a bitfield insertion of some kind. |
2618 | |
2619 | Note the source in I0 might be inside a sign/zero extension and the |
2620 | memory modes in I0 and I3 might be different. So extract the address |
2621 | from the destination of I3 and search for it in the source of I0. |
2622 | |
2623 | In the event that there's a match but the source/dest do not actually |
2624 | refer to the same memory, the worst that happens is we try some |
2625 | combinations that we wouldn't have otherwise. */ |
2626 | if ((set0 = single_set (insn: i0)) |
2627 | /* Ensure the source of SET0 is a MEM, possibly buried inside |
2628 | an extension. */ |
2629 | && (GET_CODE (SET_SRC (set0)) == MEM |
2630 | || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND |
2631 | || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND) |
2632 | && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM)) |
2633 | && (set3 = single_set (insn: i3)) |
2634 | /* Ensure the destination of SET3 is a MEM. */ |
2635 | && GET_CODE (SET_DEST (set3)) == MEM |
2636 | /* Would it be better to extract the base address for the MEM |
2637 | in SET3 and look for that? I don't have cases where it matters |
2638 | but I could envision such cases. */ |
2639 | && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0))) |
2640 | ngood += 2; |
2641 | |
2642 | if (ngood < 2 && nshift < 2) |
2643 | return 0; |
2644 | } |
2645 | |
2646 | /* Exit early if one of the insns involved can't be used for |
2647 | combinations. */ |
2648 | if (CALL_P (i2) |
2649 | || (i1 && CALL_P (i1)) |
2650 | || (i0 && CALL_P (i0)) |
2651 | || cant_combine_insn_p (insn: i3) |
2652 | || cant_combine_insn_p (insn: i2) |
2653 | || (i1 && cant_combine_insn_p (insn: i1)) |
2654 | || (i0 && cant_combine_insn_p (insn: i0)) |
2655 | || likely_spilled_retval_p (insn: i3)) |
2656 | return 0; |
2657 | |
2658 | combine_attempts++; |
2659 | undobuf.other_insn = 0; |
2660 | |
2661 | /* Reset the hard register usage information. */ |
2662 | CLEAR_HARD_REG_SET (set&: newpat_used_regs); |
2663 | |
2664 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2665 | { |
2666 | if (i0) |
2667 | fprintf (stream: dump_file, format: "\nTrying %d, %d, %d -> %d:\n" , |
2668 | INSN_UID (insn: i0), INSN_UID (insn: i1), INSN_UID (insn: i2), INSN_UID (insn: i3)); |
2669 | else if (i1) |
2670 | fprintf (stream: dump_file, format: "\nTrying %d, %d -> %d:\n" , |
2671 | INSN_UID (insn: i1), INSN_UID (insn: i2), INSN_UID (insn: i3)); |
2672 | else |
2673 | fprintf (stream: dump_file, format: "\nTrying %d -> %d:\n" , |
2674 | INSN_UID (insn: i2), INSN_UID (insn: i3)); |
2675 | |
2676 | if (i0) |
2677 | dump_insn_slim (dump_file, i0); |
2678 | if (i1) |
2679 | dump_insn_slim (dump_file, i1); |
2680 | dump_insn_slim (dump_file, i2); |
2681 | dump_insn_slim (dump_file, i3); |
2682 | } |
2683 | |
2684 | /* If multiple insns feed into one of I2 or I3, they can be in any |
2685 | order. To simplify the code below, reorder them in sequence. */ |
2686 | if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2)) |
2687 | std::swap (a&: i0, b&: i2); |
2688 | if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1)) |
2689 | std::swap (a&: i0, b&: i1); |
2690 | if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2)) |
2691 | std::swap (a&: i1, b&: i2); |
2692 | |
2693 | added_links_insn = 0; |
2694 | added_notes_insn = 0; |
2695 | |
2696 | /* First check for one important special case that the code below will |
2697 | not handle. Namely, the case where I1 is zero, I2 is a PARALLEL |
2698 | and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case, |
2699 | we may be able to replace that destination with the destination of I3. |
2700 | This occurs in the common code where we compute both a quotient and |
2701 | remainder into a structure, in which case we want to do the computation |
2702 | directly into the structure to avoid register-register copies. |
2703 | |
2704 | Note that this case handles both multiple sets in I2 and also cases |
2705 | where I2 has a number of CLOBBERs inside the PARALLEL. |
2706 | |
2707 | We make very conservative checks below and only try to handle the |
2708 | most common cases of this. For example, we only handle the case |
2709 | where I2 and I3 are adjacent to avoid making difficult register |
2710 | usage tests. */ |
2711 | |
2712 | if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET |
2713 | && REG_P (SET_SRC (PATTERN (i3))) |
2714 | && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER |
2715 | && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3))) |
2716 | && GET_CODE (PATTERN (i2)) == PARALLEL |
2717 | && ! side_effects_p (SET_DEST (PATTERN (i3))) |
2718 | /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code |
2719 | below would need to check what is inside (and reg_overlap_mentioned_p |
2720 | doesn't support those codes anyway). Don't allow those destinations; |
2721 | the resulting insn isn't likely to be recognized anyway. */ |
2722 | && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT |
2723 | && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART |
2724 | && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)), |
2725 | SET_DEST (PATTERN (i3))) |
2726 | && next_active_insn (i2) == i3) |
2727 | { |
2728 | rtx p2 = PATTERN (insn: i2); |
2729 | |
2730 | /* Make sure that the destination of I3, |
2731 | which we are going to substitute into one output of I2, |
2732 | is not used within another output of I2. We must avoid making this: |
2733 | (parallel [(set (mem (reg 69)) ...) |
2734 | (set (reg 69) ...)]) |
2735 | which is not well-defined as to order of actions. |
2736 | (Besides, reload can't handle output reloads for this.) |
2737 | |
2738 | The problem can also happen if the dest of I3 is a memory ref, |
2739 | if another dest in I2 is an indirect memory ref. |
2740 | |
2741 | Neither can this PARALLEL be an asm. We do not allow combining |
2742 | that usually (see can_combine_p), so do not here either. */ |
2743 | bool ok = true; |
2744 | for (i = 0; ok && i < XVECLEN (p2, 0); i++) |
2745 | { |
2746 | if ((GET_CODE (XVECEXP (p2, 0, i)) == SET |
2747 | || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER) |
2748 | && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)), |
2749 | SET_DEST (XVECEXP (p2, 0, i)))) |
2750 | ok = false; |
2751 | else if (GET_CODE (XVECEXP (p2, 0, i)) == SET |
2752 | && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS) |
2753 | ok = false; |
2754 | } |
2755 | |
2756 | if (ok) |
2757 | for (i = 0; i < XVECLEN (p2, 0); i++) |
2758 | if (GET_CODE (XVECEXP (p2, 0, i)) == SET |
2759 | && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3))) |
2760 | { |
2761 | combine_merges++; |
2762 | |
2763 | subst_insn = i3; |
2764 | subst_low_luid = DF_INSN_LUID (i2); |
2765 | |
2766 | added_sets_2 = added_sets_1 = added_sets_0 = false; |
2767 | i2src = SET_SRC (XVECEXP (p2, 0, i)); |
2768 | i2dest = SET_DEST (XVECEXP (p2, 0, i)); |
2769 | i2dest_killed = dead_or_set_p (i2, i2dest); |
2770 | |
2771 | /* Replace the dest in I2 with our dest and make the resulting |
2772 | insn the new pattern for I3. Then skip to where we validate |
2773 | the pattern. Everything was set up above. */ |
2774 | SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3))); |
2775 | newpat = p2; |
2776 | i3_subst_into_i2 = true; |
2777 | goto validate_replacement; |
2778 | } |
2779 | } |
2780 | |
2781 | /* If I2 is setting a pseudo to a constant and I3 is setting some |
2782 | sub-part of it to another constant, merge them by making a new |
2783 | constant. */ |
2784 | if (i1 == 0 |
2785 | && (temp_expr = single_set (insn: i2)) != 0 |
2786 | && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), result: &temp_mode) |
2787 | && CONST_SCALAR_INT_P (SET_SRC (temp_expr)) |
2788 | && GET_CODE (PATTERN (i3)) == SET |
2789 | && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3))) |
2790 | && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr))) |
2791 | { |
2792 | rtx dest = SET_DEST (PATTERN (i3)); |
2793 | rtx temp_dest = SET_DEST (temp_expr); |
2794 | int offset = -1; |
2795 | int width = 0; |
2796 | |
2797 | if (GET_CODE (dest) == ZERO_EXTRACT) |
2798 | { |
2799 | if (CONST_INT_P (XEXP (dest, 1)) |
2800 | && CONST_INT_P (XEXP (dest, 2)) |
2801 | && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)), |
2802 | result: &dest_mode)) |
2803 | { |
2804 | width = INTVAL (XEXP (dest, 1)); |
2805 | offset = INTVAL (XEXP (dest, 2)); |
2806 | dest = XEXP (dest, 0); |
2807 | if (BITS_BIG_ENDIAN) |
2808 | offset = GET_MODE_PRECISION (mode: dest_mode) - width - offset; |
2809 | } |
2810 | } |
2811 | else |
2812 | { |
2813 | if (GET_CODE (dest) == STRICT_LOW_PART) |
2814 | dest = XEXP (dest, 0); |
2815 | if (is_a <scalar_int_mode> (GET_MODE (dest), result: &dest_mode)) |
2816 | { |
2817 | width = GET_MODE_PRECISION (mode: dest_mode); |
2818 | offset = 0; |
2819 | } |
2820 | } |
2821 | |
2822 | if (offset >= 0) |
2823 | { |
2824 | /* If this is the low part, we're done. */ |
2825 | if (subreg_lowpart_p (dest)) |
2826 | ; |
2827 | /* Handle the case where inner is twice the size of outer. */ |
2828 | else if (GET_MODE_PRECISION (mode: temp_mode) |
2829 | == 2 * GET_MODE_PRECISION (mode: dest_mode)) |
2830 | offset += GET_MODE_PRECISION (mode: dest_mode); |
2831 | /* Otherwise give up for now. */ |
2832 | else |
2833 | offset = -1; |
2834 | } |
2835 | |
2836 | if (offset >= 0) |
2837 | { |
2838 | rtx inner = SET_SRC (PATTERN (i3)); |
2839 | rtx outer = SET_SRC (temp_expr); |
2840 | |
2841 | wide_int o = wi::insert (x: rtx_mode_t (outer, temp_mode), |
2842 | y: rtx_mode_t (inner, dest_mode), |
2843 | offset, width); |
2844 | |
2845 | combine_merges++; |
2846 | subst_insn = i3; |
2847 | subst_low_luid = DF_INSN_LUID (i2); |
2848 | added_sets_2 = added_sets_1 = added_sets_0 = false; |
2849 | i2dest = temp_dest; |
2850 | i2dest_killed = dead_or_set_p (i2, i2dest); |
2851 | |
2852 | /* Replace the source in I2 with the new constant and make the |
2853 | resulting insn the new pattern for I3. Then skip to where we |
2854 | validate the pattern. Everything was set up above. */ |
2855 | SUBST (SET_SRC (temp_expr), |
2856 | immed_wide_int_const (o, temp_mode)); |
2857 | |
2858 | newpat = PATTERN (insn: i2); |
2859 | |
2860 | /* The dest of I3 has been replaced with the dest of I2. */ |
2861 | changed_i3_dest = true; |
2862 | goto validate_replacement; |
2863 | } |
2864 | } |
2865 | |
2866 | /* If we have no I1 and I2 looks like: |
2867 | (parallel [(set (reg:CC X) (compare:CC OP (const_int 0))) |
2868 | (set Y OP)]) |
2869 | make up a dummy I1 that is |
2870 | (set Y OP) |
2871 | and change I2 to be |
2872 | (set (reg:CC X) (compare:CC Y (const_int 0))) |
2873 | |
2874 | (We can ignore any trailing CLOBBERs.) |
2875 | |
2876 | This undoes a previous combination and allows us to match a branch-and- |
2877 | decrement insn. */ |
2878 | |
2879 | if (i1 == 0 |
2880 | && is_parallel_of_n_reg_sets (pat: PATTERN (insn: i2), n: 2) |
2881 | && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)))) |
2882 | == MODE_CC) |
2883 | && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE |
2884 | && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx |
2885 | && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0), |
2886 | SET_SRC (XVECEXP (PATTERN (i2), 0, 1))) |
2887 | && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3) |
2888 | && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)) |
2889 | { |
2890 | /* We make I1 with the same INSN_UID as I2. This gives it |
2891 | the same DF_INSN_LUID for value tracking. Our fake I1 will |
2892 | never appear in the insn stream so giving it the same INSN_UID |
2893 | as I2 will not cause a problem. */ |
2894 | |
2895 | i1 = gen_rtx_INSN (VOIDmode, NULL, next_insn: i2, bb: BLOCK_FOR_INSN (insn: i2), |
2896 | XVECEXP (PATTERN (i2), 0, 1), location: INSN_LOCATION (insn: i2), |
2897 | code: -1, NULL_RTX); |
2898 | INSN_UID (insn: i1) = INSN_UID (insn: i2); |
2899 | |
2900 | SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0)); |
2901 | SUBST (XEXP (SET_SRC (PATTERN (i2)), 0), |
2902 | SET_DEST (PATTERN (i1))); |
2903 | unsigned int regno = REGNO (SET_DEST (PATTERN (i1))); |
2904 | SUBST_LINK (LOG_LINKS (i2), |
2905 | alloc_insn_link (i1, regno, LOG_LINKS (i2))); |
2906 | } |
2907 | |
2908 | /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs), |
2909 | make those two SETs separate I1 and I2 insns, and make an I0 that is |
2910 | the original I1. */ |
2911 | if (i0 == 0 |
2912 | && is_parallel_of_n_reg_sets (pat: PATTERN (insn: i2), n: 2) |
2913 | && can_split_parallel_of_n_reg_sets (insn: i2, n: 2) |
2914 | && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3) |
2915 | && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3) |
2916 | && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3) |
2917 | && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)) |
2918 | { |
2919 | /* If there is no I1, there is no I0 either. */ |
2920 | i0 = i1; |
2921 | |
2922 | /* We make I1 with the same INSN_UID as I2. This gives it |
2923 | the same DF_INSN_LUID for value tracking. Our fake I1 will |
2924 | never appear in the insn stream so giving it the same INSN_UID |
2925 | as I2 will not cause a problem. */ |
2926 | |
2927 | i1 = gen_rtx_INSN (VOIDmode, NULL, next_insn: i2, bb: BLOCK_FOR_INSN (insn: i2), |
2928 | XVECEXP (PATTERN (i2), 0, 0), location: INSN_LOCATION (insn: i2), |
2929 | code: -1, NULL_RTX); |
2930 | INSN_UID (insn: i1) = INSN_UID (insn: i2); |
2931 | |
2932 | SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1)); |
2933 | } |
2934 | |
2935 | /* Verify that I2 and maybe I1 and I0 can be combined into I3. */ |
2936 | if (!can_combine_p (insn: i2, i3, pred: i0, pred2: i1, NULL, NULL, pdest: &i2dest, psrc: &i2src)) |
2937 | { |
2938 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2939 | fprintf (stream: dump_file, format: "Can't combine i2 into i3\n" ); |
2940 | undo_all (); |
2941 | return 0; |
2942 | } |
2943 | if (i1 && !can_combine_p (insn: i1, i3, pred: i0, NULL, succ: i2, NULL, pdest: &i1dest, psrc: &i1src)) |
2944 | { |
2945 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2946 | fprintf (stream: dump_file, format: "Can't combine i1 into i3\n" ); |
2947 | undo_all (); |
2948 | return 0; |
2949 | } |
2950 | if (i0 && !can_combine_p (insn: i0, i3, NULL, NULL, succ: i1, succ2: i2, pdest: &i0dest, psrc: &i0src)) |
2951 | { |
2952 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2953 | fprintf (stream: dump_file, format: "Can't combine i0 into i3\n" ); |
2954 | undo_all (); |
2955 | return 0; |
2956 | } |
2957 | |
2958 | /* With non-call exceptions we can end up trying to combine multiple |
2959 | insns with possible EH side effects. Make sure we can combine |
2960 | that to a single insn which means there must be at most one insn |
2961 | in the combination with an EH side effect. */ |
2962 | if (cfun->can_throw_non_call_exceptions) |
2963 | { |
2964 | if (find_reg_note (i3, REG_EH_REGION, NULL_RTX) |
2965 | || find_reg_note (i2, REG_EH_REGION, NULL_RTX) |
2966 | || (i1 && find_reg_note (i1, REG_EH_REGION, NULL_RTX)) |
2967 | || (i0 && find_reg_note (i0, REG_EH_REGION, NULL_RTX))) |
2968 | { |
2969 | has_non_call_exception = true; |
2970 | if (insn_could_throw_p (i3) |
2971 | + insn_could_throw_p (i2) |
2972 | + (i1 ? insn_could_throw_p (i1) : 0) |
2973 | + (i0 ? insn_could_throw_p (i0) : 0) > 1) |
2974 | { |
2975 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2976 | fprintf (stream: dump_file, format: "Can't combine multiple insns with EH " |
2977 | "side-effects\n" ); |
2978 | undo_all (); |
2979 | return 0; |
2980 | } |
2981 | } |
2982 | } |
2983 | |
2984 | /* Record whether i2 and i3 are trivial moves. */ |
2985 | i2_was_move = is_just_move (x: i2); |
2986 | i3_was_move = is_just_move (x: i3); |
2987 | |
2988 | /* Record whether I2DEST is used in I2SRC and similarly for the other |
2989 | cases. Knowing this will help in register status updating below. */ |
2990 | i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src); |
2991 | i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src); |
2992 | i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src); |
2993 | i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src); |
2994 | i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src); |
2995 | i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src); |
2996 | i2dest_killed = dead_or_set_p (i2, i2dest); |
2997 | i1dest_killed = i1 && dead_or_set_p (i1, i1dest); |
2998 | i0dest_killed = i0 && dead_or_set_p (i0, i0dest); |
2999 | |
3000 | /* For the earlier insns, determine which of the subsequent ones they |
3001 | feed. */ |
3002 | i1_feeds_i2_n = i1 && insn_a_feeds_b (a: i1, b: i2); |
3003 | i0_feeds_i1_n = i0 && insn_a_feeds_b (a: i0, b: i1); |
3004 | i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (a: i0, b: i2) |
3005 | : (!reg_overlap_mentioned_p (i1dest, i0dest) |
3006 | && reg_overlap_mentioned_p (i0dest, i2src)))); |
3007 | |
3008 | /* Ensure that I3's pattern can be the destination of combines. */ |
3009 | if (! combinable_i3pat (i3, loc: &PATTERN (insn: i3), i2dest, i1dest, i0dest, |
3010 | i1_not_in_src: i1 && i2dest_in_i1src && !i1_feeds_i2_n, |
3011 | i0_not_in_src: i0 && ((i2dest_in_i0src && !i0_feeds_i2_n) |
3012 | || (i1dest_in_i0src && !i0_feeds_i1_n)), |
3013 | pi3dest_killed: &i3dest_killed)) |
3014 | { |
3015 | undo_all (); |
3016 | return 0; |
3017 | } |
3018 | |
3019 | /* See if any of the insns is a MULT operation. Unless one is, we will |
3020 | reject a combination that is, since it must be slower. Be conservative |
3021 | here. */ |
3022 | if (GET_CODE (i2src) == MULT |
3023 | || (i1 != 0 && GET_CODE (i1src) == MULT) |
3024 | || (i0 != 0 && GET_CODE (i0src) == MULT) |
3025 | || (GET_CODE (PATTERN (i3)) == SET |
3026 | && GET_CODE (SET_SRC (PATTERN (i3))) == MULT)) |
3027 | have_mult = true; |
3028 | |
3029 | /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd. |
3030 | We used to do this EXCEPT in one case: I3 has a post-inc in an |
3031 | output operand. However, that exception can give rise to insns like |
3032 | mov r3,(r3)+ |
3033 | which is a famous insn on the PDP-11 where the value of r3 used as the |
3034 | source was model-dependent. Avoid this sort of thing. */ |
3035 | |
3036 | #if 0 |
3037 | if (!(GET_CODE (PATTERN (i3)) == SET |
3038 | && REG_P (SET_SRC (PATTERN (i3))) |
3039 | && MEM_P (SET_DEST (PATTERN (i3))) |
3040 | && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC |
3041 | || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC))) |
3042 | /* It's not the exception. */ |
3043 | #endif |
3044 | if (AUTO_INC_DEC) |
3045 | { |
3046 | rtx link; |
3047 | for (link = REG_NOTES (i3); link; link = XEXP (link, 1)) |
3048 | if (REG_NOTE_KIND (link) == REG_INC |
3049 | && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (insn: i2)) |
3050 | || (i1 != 0 |
3051 | && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (insn: i1))))) |
3052 | { |
3053 | undo_all (); |
3054 | return 0; |
3055 | } |
3056 | } |
3057 | |
3058 | /* See if the SETs in I1 or I2 need to be kept around in the merged |
3059 | instruction: whenever the value set there is still needed past I3. |
3060 | For the SET in I2, this is easy: we see if I2DEST dies or is set in I3. |
3061 | |
3062 | For the SET in I1, we have two cases: if I1 and I2 independently feed |
3063 | into I3, the set in I1 needs to be kept around unless I1DEST dies |
3064 | or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set |
3065 | in I1 needs to be kept around unless I1DEST dies or is set in either |
3066 | I2 or I3. The same considerations apply to I0. */ |
3067 | |
3068 | added_sets_2 = !dead_or_set_p (i3, i2dest); |
3069 | |
3070 | if (i1) |
3071 | added_sets_1 = !(dead_or_set_p (i3, i1dest) |
3072 | || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest))); |
3073 | else |
3074 | added_sets_1 = false; |
3075 | |
3076 | if (i0) |
3077 | added_sets_0 = !(dead_or_set_p (i3, i0dest) |
3078 | || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)) |
3079 | || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n)) |
3080 | && dead_or_set_p (i2, i0dest))); |
3081 | else |
3082 | added_sets_0 = false; |
3083 | |
3084 | /* We are about to copy insns for the case where they need to be kept |
3085 | around. Check that they can be copied in the merged instruction. */ |
3086 | |
3087 | if (targetm.cannot_copy_insn_p |
3088 | && ((added_sets_2 && targetm.cannot_copy_insn_p (i2)) |
3089 | || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1)) |
3090 | || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0)))) |
3091 | { |
3092 | undo_all (); |
3093 | return 0; |
3094 | } |
3095 | |
3096 | /* We cannot safely duplicate volatile references in any case. */ |
3097 | |
3098 | if ((added_sets_2 && volatile_refs_p (PATTERN (insn: i2))) |
3099 | || (added_sets_1 && volatile_refs_p (PATTERN (insn: i1))) |
3100 | || (added_sets_0 && volatile_refs_p (PATTERN (insn: i0)))) |
3101 | { |
3102 | undo_all (); |
3103 | return 0; |
3104 | } |
3105 | |
3106 | /* Count how many auto_inc expressions there were in the original insns; |
3107 | we need to have the same number in the resulting patterns. */ |
3108 | |
3109 | if (i0) |
3110 | for_each_inc_dec (PATTERN (insn: i0), count_auto_inc, arg: &n_auto_inc); |
3111 | if (i1) |
3112 | for_each_inc_dec (PATTERN (insn: i1), count_auto_inc, arg: &n_auto_inc); |
3113 | for_each_inc_dec (PATTERN (insn: i2), count_auto_inc, arg: &n_auto_inc); |
3114 | for_each_inc_dec (PATTERN (insn: i3), count_auto_inc, arg: &n_auto_inc); |
3115 | |
3116 | /* If the set in I2 needs to be kept around, we must make a copy of |
3117 | PATTERN (I2), so that when we substitute I1SRC for I1DEST in |
3118 | PATTERN (I2), we are only substituting for the original I1DEST, not into |
3119 | an already-substituted copy. This also prevents making self-referential |
3120 | rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to |
3121 | I2DEST. */ |
3122 | |
3123 | if (added_sets_2) |
3124 | { |
3125 | if (GET_CODE (PATTERN (i2)) == PARALLEL) |
3126 | i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src)); |
3127 | else |
3128 | i2pat = copy_rtx (PATTERN (insn: i2)); |
3129 | } |
3130 | |
3131 | if (added_sets_1) |
3132 | { |
3133 | if (GET_CODE (PATTERN (i1)) == PARALLEL) |
3134 | i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src)); |
3135 | else |
3136 | i1pat = copy_rtx (PATTERN (insn: i1)); |
3137 | } |
3138 | |
3139 | if (added_sets_0) |
3140 | { |
3141 | if (GET_CODE (PATTERN (i0)) == PARALLEL) |
3142 | i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src)); |
3143 | else |
3144 | i0pat = copy_rtx (PATTERN (insn: i0)); |
3145 | } |
3146 | |
3147 | combine_merges++; |
3148 | |
3149 | /* Substitute in the latest insn for the regs set by the earlier ones. */ |
3150 | |
3151 | maxreg = max_reg_num (); |
3152 | |
3153 | subst_insn = i3; |
3154 | |
3155 | /* Many machines have insns that can both perform an |
3156 | arithmetic operation and set the condition code. These operations will |
3157 | be represented as a PARALLEL with the first element of the vector |
3158 | being a COMPARE of an arithmetic operation with the constant zero. |
3159 | The second element of the vector will set some pseudo to the result |
3160 | of the same arithmetic operation. If we simplify the COMPARE, we won't |
3161 | match such a pattern and so will generate an extra insn. Here we test |
3162 | for this case, where both the comparison and the operation result are |
3163 | needed, and make the PARALLEL by just replacing I2DEST in I3SRC with |
3164 | I2SRC. Later we will make the PARALLEL that contains I2. */ |
3165 | |
3166 | if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET |
3167 | && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE |
3168 | && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1)) |
3169 | && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest)) |
3170 | { |
3171 | rtx newpat_dest; |
3172 | rtx *cc_use_loc = NULL; |
3173 | rtx_insn *cc_use_insn = NULL; |
3174 | rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1); |
3175 | machine_mode compare_mode, orig_compare_mode; |
3176 | enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN; |
3177 | scalar_int_mode mode; |
3178 | |
3179 | newpat = PATTERN (insn: i3); |
3180 | newpat_dest = SET_DEST (newpat); |
3181 | compare_mode = orig_compare_mode = GET_MODE (newpat_dest); |
3182 | |
3183 | if (undobuf.other_insn == 0 |
3184 | && (cc_use_loc = find_single_use (SET_DEST (newpat), insn: i3, |
3185 | ploc: &cc_use_insn))) |
3186 | { |
3187 | compare_code = orig_compare_code = GET_CODE (*cc_use_loc); |
3188 | if (is_a <scalar_int_mode> (GET_MODE (i2dest), result: &mode)) |
3189 | compare_code = simplify_compare_const (compare_code, mode, |
3190 | &op0, &op1); |
3191 | target_canonicalize_comparison (code: &compare_code, op0: &op0, op1: &op1, op0_preserve_value: 1); |
3192 | } |
3193 | |
3194 | /* Do the rest only if op1 is const0_rtx, which may be the |
3195 | result of simplification. */ |
3196 | if (op1 == const0_rtx) |
3197 | { |
3198 | /* If a single use of the CC is found, prepare to modify it |
3199 | when SELECT_CC_MODE returns a new CC-class mode, or when |
3200 | the above simplify_compare_const() returned a new comparison |
3201 | operator. undobuf.other_insn is assigned the CC use insn |
3202 | when modifying it. */ |
3203 | if (cc_use_loc) |
3204 | { |
3205 | #ifdef SELECT_CC_MODE |
3206 | machine_mode new_mode |
3207 | = SELECT_CC_MODE (compare_code, op0, op1); |
3208 | if (new_mode != orig_compare_mode |
3209 | && can_change_dest_mode (SET_DEST (newpat), |
3210 | added_sets: added_sets_2, mode: new_mode)) |
3211 | { |
3212 | unsigned int regno = REGNO (newpat_dest); |
3213 | compare_mode = new_mode; |
3214 | if (regno < FIRST_PSEUDO_REGISTER) |
3215 | newpat_dest = gen_rtx_REG (compare_mode, regno); |
3216 | else |
3217 | { |
3218 | subst_mode (regno, newval: compare_mode); |
3219 | newpat_dest = regno_reg_rtx[regno]; |
3220 | } |
3221 | } |
3222 | #endif |
3223 | /* Cases for modifying the CC-using comparison. */ |
3224 | if (compare_code != orig_compare_code |
3225 | && COMPARISON_P (*cc_use_loc)) |
3226 | { |
3227 | /* Replace cc_use_loc with entire new RTX. */ |
3228 | SUBST (*cc_use_loc, |
3229 | gen_rtx_fmt_ee (compare_code, GET_MODE (*cc_use_loc), |
3230 | newpat_dest, const0_rtx)); |
3231 | undobuf.other_insn = cc_use_insn; |
3232 | } |
3233 | else if (compare_mode != orig_compare_mode) |
3234 | { |
3235 | subrtx_ptr_iterator::array_type array; |
3236 | |
3237 | /* Just replace the CC reg with a new mode. */ |
3238 | FOR_EACH_SUBRTX_PTR (iter, array, cc_use_loc, NONCONST) |
3239 | { |
3240 | rtx *loc = *iter; |
3241 | if (REG_P (*loc) |
3242 | && REGNO (*loc) == REGNO (newpat_dest)) |
3243 | { |
3244 | SUBST (*loc, newpat_dest); |
3245 | iter.skip_subrtxes (); |
3246 | } |
3247 | } |
3248 | undobuf.other_insn = cc_use_insn; |
3249 | } |
3250 | } |
3251 | |
3252 | /* Now we modify the current newpat: |
3253 | First, SET_DEST(newpat) is updated if the CC mode has been |
3254 | altered. For targets without SELECT_CC_MODE, this should be |
3255 | optimized away. */ |
3256 | if (compare_mode != orig_compare_mode) |
3257 | SUBST (SET_DEST (newpat), newpat_dest); |
3258 | /* This is always done to propagate i2src into newpat. */ |
3259 | SUBST (SET_SRC (newpat), |
3260 | gen_rtx_COMPARE (compare_mode, op0, op1)); |
3261 | /* Create new version of i2pat if needed; the below PARALLEL |
3262 | creation needs this to work correctly. */ |
3263 | if (! rtx_equal_p (i2src, op0)) |
3264 | i2pat = gen_rtx_SET (i2dest, op0); |
3265 | i2_is_used = 1; |
3266 | } |
3267 | } |
3268 | |
3269 | if (i2_is_used == 0) |
3270 | { |
3271 | /* It is possible that the source of I2 or I1 may be performing |
3272 | an unneeded operation, such as a ZERO_EXTEND of something |
3273 | that is known to have the high part zero. Handle that case |
3274 | by letting subst look at the inner insns. |
3275 | |
3276 | Another way to do this would be to have a function that tries |
3277 | to simplify a single insn instead of merging two or more |
3278 | insns. We don't do this because of the potential of infinite |
3279 | loops and because of the potential extra memory required. |
3280 | However, doing it the way we are is a bit of a kludge and |
3281 | doesn't catch all cases. |
3282 | |
3283 | But only do this if -fexpensive-optimizations since it slows |
3284 | things down and doesn't usually win. |
3285 | |
3286 | This is not done in the COMPARE case above because the |
3287 | unmodified I2PAT is used in the PARALLEL and so a pattern |
3288 | with a modified I2SRC would not match. */ |
3289 | |
3290 | if (flag_expensive_optimizations) |
3291 | { |
3292 | /* Pass pc_rtx so no substitutions are done, just |
3293 | simplifications. */ |
3294 | if (i1) |
3295 | { |
3296 | subst_low_luid = DF_INSN_LUID (i1); |
3297 | i1src = subst (i1src, pc_rtx, pc_rtx, false, false, false); |
3298 | } |
3299 | |
3300 | subst_low_luid = DF_INSN_LUID (i2); |
3301 | i2src = subst (i2src, pc_rtx, pc_rtx, false, false, false); |
3302 | } |
3303 | |
3304 | n_occurrences = 0; /* `subst' counts here */ |
3305 | subst_low_luid = DF_INSN_LUID (i2); |
3306 | |
3307 | /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique |
3308 | copy of I2SRC each time we substitute it, in order to avoid creating |
3309 | self-referential RTL when we will be substituting I1SRC for I1DEST |
3310 | later. Likewise if I0 feeds into I2, either directly or indirectly |
3311 | through I1, and I0DEST is in I0SRC. */ |
3312 | newpat = subst (PATTERN (insn: i3), i2dest, i2src, false, false, |
3313 | (i1_feeds_i2_n && i1dest_in_i1src) |
3314 | || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n)) |
3315 | && i0dest_in_i0src)); |
3316 | substed_i2 = true; |
3317 | |
3318 | /* Record whether I2's body now appears within I3's body. */ |
3319 | i2_is_used = n_occurrences; |
3320 | } |
3321 | |
3322 | /* If we already got a failure, don't try to do more. Otherwise, try to |
3323 | substitute I1 if we have it. */ |
3324 | |
3325 | if (i1 && GET_CODE (newpat) != CLOBBER) |
3326 | { |
3327 | /* Before we can do this substitution, we must redo the test done |
3328 | above (see detailed comments there) that ensures I1DEST isn't |
3329 | mentioned in any SETs in NEWPAT that are field assignments. */ |
3330 | if (!combinable_i3pat (NULL, loc: &newpat, i2dest: i1dest, NULL_RTX, NULL_RTX, |
3331 | i1_not_in_src: false, i0_not_in_src: false, pi3dest_killed: 0)) |
3332 | { |
3333 | undo_all (); |
3334 | return 0; |
3335 | } |
3336 | |
3337 | n_occurrences = 0; |
3338 | subst_low_luid = DF_INSN_LUID (i1); |
3339 | |
3340 | /* If the following substitution will modify I1SRC, make a copy of it |
3341 | for the case where it is substituted for I1DEST in I2PAT later. */ |
3342 | if (added_sets_2 && i1_feeds_i2_n) |
3343 | i1src_copy = copy_rtx (i1src); |
3344 | |
3345 | /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique |
3346 | copy of I1SRC each time we substitute it, in order to avoid creating |
3347 | self-referential RTL when we will be substituting I0SRC for I0DEST |
3348 | later. */ |
3349 | newpat = subst (newpat, i1dest, i1src, false, false, |
3350 | i0_feeds_i1_n && i0dest_in_i0src); |
3351 | substed_i1 = true; |
3352 | |
3353 | /* Record whether I1's body now appears within I3's body. */ |
3354 | i1_is_used = n_occurrences; |
3355 | } |
3356 | |
3357 | /* Likewise for I0 if we have it. */ |
3358 | |
3359 | if (i0 && GET_CODE (newpat) != CLOBBER) |
3360 | { |
3361 | if (!combinable_i3pat (NULL, loc: &newpat, i2dest: i0dest, NULL_RTX, NULL_RTX, |
3362 | i1_not_in_src: false, i0_not_in_src: false, pi3dest_killed: 0)) |
3363 | { |
3364 | undo_all (); |
3365 | return 0; |
3366 | } |
3367 | |
3368 | /* If the following substitution will modify I0SRC, make a copy of it |
3369 | for the case where it is substituted for I0DEST in I1PAT later. */ |
3370 | if (added_sets_1 && i0_feeds_i1_n) |
3371 | i0src_copy = copy_rtx (i0src); |
3372 | /* And a copy for I0DEST in I2PAT substitution. */ |
3373 | if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n) |
3374 | || (i0_feeds_i2_n))) |
3375 | i0src_copy2 = copy_rtx (i0src); |
3376 | |
3377 | n_occurrences = 0; |
3378 | subst_low_luid = DF_INSN_LUID (i0); |
3379 | newpat = subst (newpat, i0dest, i0src, false, false, false); |
3380 | substed_i0 = true; |
3381 | } |
3382 | |
3383 | if (n_auto_inc) |
3384 | { |
3385 | int new_n_auto_inc = 0; |
3386 | for_each_inc_dec (newpat, count_auto_inc, arg: &new_n_auto_inc); |
3387 | |
3388 | if (n_auto_inc != new_n_auto_inc) |
3389 | { |
3390 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3391 | fprintf (stream: dump_file, format: "Number of auto_inc expressions changed\n" ); |
3392 | undo_all (); |
3393 | return 0; |
3394 | } |
3395 | } |
3396 | |
3397 | /* Fail if an autoincrement side-effect has been duplicated. Be careful |
3398 | to count all the ways that I2SRC and I1SRC can be used. */ |
3399 | if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0 |
3400 | && i2_is_used + added_sets_2 > 1) |
3401 | || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0 |
3402 | && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n) > 1)) |
3403 | || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0 |
3404 | && (n_occurrences + added_sets_0 |
3405 | + (added_sets_1 && i0_feeds_i1_n) |
3406 | + (added_sets_2 && i0_feeds_i2_n) > 1)) |
3407 | /* Fail if we tried to make a new register. */ |
3408 | || max_reg_num () != maxreg |
3409 | /* Fail if we couldn't do something and have a CLOBBER. */ |
3410 | || GET_CODE (newpat) == CLOBBER |
3411 | /* Fail if this new pattern is a MULT and we didn't have one before |
3412 | at the outer level. */ |
3413 | || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT |
3414 | && ! have_mult)) |
3415 | { |
3416 | undo_all (); |
3417 | return 0; |
3418 | } |
3419 | |
3420 | /* If the actions of the earlier insns must be kept |
3421 | in addition to substituting them into the latest one, |
3422 | we must make a new PARALLEL for the latest insn |
3423 | to hold additional the SETs. */ |
3424 | |
3425 | if (added_sets_0 || added_sets_1 || added_sets_2) |
3426 | { |
3427 | int = added_sets_0 + added_sets_1 + added_sets_2; |
3428 | combine_extras++; |
3429 | |
3430 | if (GET_CODE (newpat) == PARALLEL) |
3431 | { |
3432 | rtvec old = XVEC (newpat, 0); |
3433 | total_sets = XVECLEN (newpat, 0) + extra_sets; |
3434 | newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets)); |
3435 | memcpy (XVEC (newpat, 0)->elem, src: &old->elem[0], |
3436 | n: sizeof (old->elem[0]) * old->num_elem); |
3437 | } |
3438 | else |
3439 | { |
3440 | rtx old = newpat; |
3441 | total_sets = 1 + extra_sets; |
3442 | newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets)); |
3443 | XVECEXP (newpat, 0, 0) = old; |
3444 | } |
3445 | |
3446 | if (added_sets_0) |
3447 | XVECEXP (newpat, 0, --total_sets) = i0pat; |
3448 | |
3449 | if (added_sets_1) |
3450 | { |
3451 | rtx t = i1pat; |
3452 | if (i0_feeds_i1_n) |
3453 | t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, |
3454 | false, false, false); |
3455 | |
3456 | XVECEXP (newpat, 0, --total_sets) = t; |
3457 | } |
3458 | if (added_sets_2) |
3459 | { |
3460 | rtx t = i2pat; |
3461 | if (i1_feeds_i2_n) |
3462 | t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, false, false, |
3463 | i0_feeds_i1_n && i0dest_in_i0src); |
3464 | if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n) |
3465 | t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, |
3466 | false, false, false); |
3467 | |
3468 | XVECEXP (newpat, 0, --total_sets) = t; |
3469 | } |
3470 | } |
3471 | |
3472 | validate_replacement: |
3473 | |
3474 | /* Note which hard regs this insn has as inputs. */ |
3475 | mark_used_regs_combine (newpat); |
3476 | |
3477 | /* If recog_for_combine fails, it strips existing clobbers. If we'll |
3478 | consider splitting this pattern, we might need these clobbers. */ |
3479 | if (i1 && GET_CODE (newpat) == PARALLEL |
3480 | && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER) |
3481 | { |
3482 | int len = XVECLEN (newpat, 0); |
3483 | |
3484 | newpat_vec_with_clobbers = rtvec_alloc (len); |
3485 | for (i = 0; i < len; i++) |
3486 | RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i); |
3487 | } |
3488 | |
3489 | /* We have recognized nothing yet. */ |
3490 | insn_code_number = -1; |
3491 | |
3492 | /* See if this is a PARALLEL of two SETs where one SET's destination is |
3493 | a register that is unused and this isn't marked as an instruction that |
3494 | might trap in an EH region. In that case, we just need the other SET. |
3495 | We prefer this over the PARALLEL. |
3496 | |
3497 | This can occur when simplifying a divmod insn. We *must* test for this |
3498 | case here because the code below that splits two independent SETs doesn't |
3499 | handle this case correctly when it updates the register status. |
3500 | |
3501 | It's pointless doing this if we originally had two sets, one from |
3502 | i3, and one from i2. Combining then splitting the parallel results |
3503 | in the original i2 again plus an invalid insn (which we delete). |
3504 | The net effect is only to move instructions around, which makes |
3505 | debug info less accurate. |
3506 | |
3507 | If the remaining SET came from I2 its destination should not be used |
3508 | between I2 and I3. See PR82024. */ |
3509 | |
3510 | if (!(added_sets_2 && i1 == 0) |
3511 | && is_parallel_of_n_reg_sets (pat: newpat, n: 2) |
3512 | && asm_noperands (newpat) < 0) |
3513 | { |
3514 | rtx set0 = XVECEXP (newpat, 0, 0); |
3515 | rtx set1 = XVECEXP (newpat, 0, 1); |
3516 | rtx oldpat = newpat; |
3517 | |
3518 | if (((REG_P (SET_DEST (set1)) |
3519 | && find_reg_note (i3, REG_UNUSED, SET_DEST (set1))) |
3520 | || (GET_CODE (SET_DEST (set1)) == SUBREG |
3521 | && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1))))) |
3522 | && insn_nothrow_p (i3) |
3523 | && !side_effects_p (SET_SRC (set1))) |
3524 | { |
3525 | newpat = set0; |
3526 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
3527 | } |
3528 | |
3529 | else if (((REG_P (SET_DEST (set0)) |
3530 | && find_reg_note (i3, REG_UNUSED, SET_DEST (set0))) |
3531 | || (GET_CODE (SET_DEST (set0)) == SUBREG |
3532 | && find_reg_note (i3, REG_UNUSED, |
3533 | SUBREG_REG (SET_DEST (set0))))) |
3534 | && insn_nothrow_p (i3) |
3535 | && !side_effects_p (SET_SRC (set0))) |
3536 | { |
3537 | rtx dest = SET_DEST (set1); |
3538 | if (GET_CODE (dest) == SUBREG) |
3539 | dest = SUBREG_REG (dest); |
3540 | if (!reg_used_between_p (dest, i2, i3)) |
3541 | { |
3542 | newpat = set1; |
3543 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
3544 | |
3545 | if (insn_code_number >= 0) |
3546 | changed_i3_dest = true; |
3547 | } |
3548 | } |
3549 | |
3550 | if (insn_code_number < 0) |
3551 | newpat = oldpat; |
3552 | } |
3553 | |
3554 | /* Is the result of combination a valid instruction? */ |
3555 | if (insn_code_number < 0) |
3556 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
3557 | |
3558 | /* If we were combining three insns and the result is a simple SET |
3559 | with no ASM_OPERANDS that wasn't recognized, try to split it into two |
3560 | insns. There are two ways to do this. It can be split using a |
3561 | machine-specific method (like when you have an addition of a large |
3562 | constant) or by combine in the function find_split_point. */ |
3563 | |
3564 | if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET |
3565 | && asm_noperands (newpat) < 0) |
3566 | { |
3567 | rtx parallel, *split; |
3568 | rtx_insn *m_split_insn; |
3569 | |
3570 | /* See if the MD file can split NEWPAT. If it can't, see if letting it |
3571 | use I2DEST as a scratch register will help. In the latter case, |
3572 | convert I2DEST to the mode of the source of NEWPAT if we can. */ |
3573 | |
3574 | m_split_insn = combine_split_insns (pattern: newpat, insn: i3); |
3575 | |
3576 | /* We can only use I2DEST as a scratch reg if it doesn't overlap any |
3577 | inputs of NEWPAT. */ |
3578 | |
3579 | /* ??? If I2DEST is not safe, and I1DEST exists, then it would be |
3580 | possible to try that as a scratch reg. This would require adding |
3581 | more code to make it work though. */ |
3582 | |
3583 | if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat)) |
3584 | { |
3585 | machine_mode new_mode = GET_MODE (SET_DEST (newpat)); |
3586 | |
3587 | /* ??? Reusing i2dest without resetting the reg_stat entry for it |
3588 | (temporarily, until we are committed to this instruction |
3589 | combination) does not work: for example, any call to nonzero_bits |
3590 | on the register (from a splitter in the MD file, for example) |
3591 | will get the old information, which is invalid. |
3592 | |
3593 | Since nowadays we can create registers during combine just fine, |
3594 | we should just create a new one here, not reuse i2dest. */ |
3595 | |
3596 | /* First try to split using the original register as a |
3597 | scratch register. */ |
3598 | parallel = gen_rtx_PARALLEL (VOIDmode, |
3599 | gen_rtvec (2, newpat, |
3600 | gen_rtx_CLOBBER (VOIDmode, |
3601 | i2dest))); |
3602 | m_split_insn = combine_split_insns (pattern: parallel, insn: i3); |
3603 | |
3604 | /* If that didn't work, try changing the mode of I2DEST if |
3605 | we can. */ |
3606 | if (m_split_insn == 0 |
3607 | && new_mode != GET_MODE (i2dest) |
3608 | && new_mode != VOIDmode |
3609 | && can_change_dest_mode (x: i2dest, added_sets: added_sets_2, mode: new_mode)) |
3610 | { |
3611 | machine_mode old_mode = GET_MODE (i2dest); |
3612 | rtx ni2dest; |
3613 | |
3614 | if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER) |
3615 | ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest)); |
3616 | else |
3617 | { |
3618 | subst_mode (REGNO (i2dest), newval: new_mode); |
3619 | ni2dest = regno_reg_rtx[REGNO (i2dest)]; |
3620 | } |
3621 | |
3622 | parallel = (gen_rtx_PARALLEL |
3623 | (VOIDmode, |
3624 | gen_rtvec (2, newpat, |
3625 | gen_rtx_CLOBBER (VOIDmode, |
3626 | ni2dest)))); |
3627 | m_split_insn = combine_split_insns (pattern: parallel, insn: i3); |
3628 | |
3629 | if (m_split_insn == 0 |
3630 | && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER) |
3631 | { |
3632 | struct undo *buf; |
3633 | |
3634 | adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode); |
3635 | buf = undobuf.undos; |
3636 | undobuf.undos = buf->next; |
3637 | buf->next = undobuf.frees; |
3638 | undobuf.frees = buf; |
3639 | } |
3640 | } |
3641 | |
3642 | i2scratch = m_split_insn != 0; |
3643 | } |
3644 | |
3645 | /* If recog_for_combine has discarded clobbers, try to use them |
3646 | again for the split. */ |
3647 | if (m_split_insn == 0 && newpat_vec_with_clobbers) |
3648 | { |
3649 | parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers); |
3650 | m_split_insn = combine_split_insns (pattern: parallel, insn: i3); |
3651 | } |
3652 | |
3653 | if (m_split_insn && NEXT_INSN (insn: m_split_insn) == NULL_RTX) |
3654 | { |
3655 | rtx m_split_pat = PATTERN (insn: m_split_insn); |
3656 | insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes); |
3657 | if (insn_code_number >= 0) |
3658 | newpat = m_split_pat; |
3659 | } |
3660 | else if (m_split_insn && NEXT_INSN (insn: NEXT_INSN (insn: m_split_insn)) == NULL_RTX |
3661 | && (next_nonnote_nondebug_insn (i2) == i3 |
3662 | || !modified_between_p (PATTERN (insn: m_split_insn), i2, i3))) |
3663 | { |
3664 | rtx i2set, i3set; |
3665 | rtx newi3pat = PATTERN (insn: NEXT_INSN (insn: m_split_insn)); |
3666 | newi2pat = PATTERN (insn: m_split_insn); |
3667 | |
3668 | i3set = single_set (insn: NEXT_INSN (insn: m_split_insn)); |
3669 | i2set = single_set (insn: m_split_insn); |
3670 | |
3671 | i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); |
3672 | |
3673 | /* If I2 or I3 has multiple SETs, we won't know how to track |
3674 | register status, so don't use these insns. If I2's destination |
3675 | is used between I2 and I3, we also can't use these insns. */ |
3676 | |
3677 | if (i2_code_number >= 0 && i2set && i3set |
3678 | && (next_nonnote_nondebug_insn (i2) == i3 |
3679 | || ! reg_used_between_p (SET_DEST (i2set), i2, i3))) |
3680 | insn_code_number = recog_for_combine (&newi3pat, i3, |
3681 | &new_i3_notes); |
3682 | if (insn_code_number >= 0) |
3683 | newpat = newi3pat; |
3684 | |
3685 | /* It is possible that both insns now set the destination of I3. |
3686 | If so, we must show an extra use of it. */ |
3687 | |
3688 | if (insn_code_number >= 0) |
3689 | { |
3690 | rtx new_i3_dest = SET_DEST (i3set); |
3691 | rtx new_i2_dest = SET_DEST (i2set); |
3692 | |
3693 | while (GET_CODE (new_i3_dest) == ZERO_EXTRACT |
3694 | || GET_CODE (new_i3_dest) == STRICT_LOW_PART |
3695 | || GET_CODE (new_i3_dest) == SUBREG) |
3696 | new_i3_dest = XEXP (new_i3_dest, 0); |
3697 | |
3698 | while (GET_CODE (new_i2_dest) == ZERO_EXTRACT |
3699 | || GET_CODE (new_i2_dest) == STRICT_LOW_PART |
3700 | || GET_CODE (new_i2_dest) == SUBREG) |
3701 | new_i2_dest = XEXP (new_i2_dest, 0); |
3702 | |
3703 | if (REG_P (new_i3_dest) |
3704 | && REG_P (new_i2_dest) |
3705 | && REGNO (new_i3_dest) == REGNO (new_i2_dest) |
3706 | && REGNO (new_i2_dest) < reg_n_sets_max) |
3707 | INC_REG_N_SETS (REGNO (new_i2_dest), 1); |
3708 | } |
3709 | } |
3710 | |
3711 | /* If we can split it and use I2DEST, go ahead and see if that |
3712 | helps things be recognized. Verify that none of the registers |
3713 | are set between I2 and I3. */ |
3714 | if (insn_code_number < 0 |
3715 | && (split = find_split_point (&newpat, i3, false)) != 0 |
3716 | /* We need I2DEST in the proper mode. If it is a hard register |
3717 | or the only use of a pseudo, we can change its mode. |
3718 | Make sure we don't change a hard register to have a mode that |
3719 | isn't valid for it, or change the number of registers. */ |
3720 | && (GET_MODE (*split) == GET_MODE (i2dest) |
3721 | || GET_MODE (*split) == VOIDmode |
3722 | || can_change_dest_mode (x: i2dest, added_sets: added_sets_2, |
3723 | GET_MODE (*split))) |
3724 | && (next_nonnote_nondebug_insn (i2) == i3 |
3725 | || !modified_between_p (*split, i2, i3)) |
3726 | /* We can't overwrite I2DEST if its value is still used by |
3727 | NEWPAT. */ |
3728 | && ! reg_referenced_p (i2dest, newpat) |
3729 | /* We should not split a possibly trapping part when we |
3730 | care about non-call EH and have REG_EH_REGION notes |
3731 | to distribute. */ |
3732 | && ! (cfun->can_throw_non_call_exceptions |
3733 | && has_non_call_exception |
3734 | && may_trap_p (*split))) |
3735 | { |
3736 | rtx newdest = i2dest; |
3737 | enum rtx_code split_code = GET_CODE (*split); |
3738 | machine_mode split_mode = GET_MODE (*split); |
3739 | bool subst_done = false; |
3740 | newi2pat = NULL_RTX; |
3741 | |
3742 | i2scratch = true; |
3743 | |
3744 | /* *SPLIT may be part of I2SRC, so make sure we have the |
3745 | original expression around for later debug processing. |
3746 | We should not need I2SRC any more in other cases. */ |
3747 | if (MAY_HAVE_DEBUG_BIND_INSNS) |
3748 | i2src = copy_rtx (i2src); |
3749 | else |
3750 | i2src = NULL; |
3751 | |
3752 | /* Get NEWDEST as a register in the proper mode. We have already |
3753 | validated that we can do this. */ |
3754 | if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode) |
3755 | { |
3756 | if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER) |
3757 | newdest = gen_rtx_REG (split_mode, REGNO (i2dest)); |
3758 | else |
3759 | { |
3760 | subst_mode (REGNO (i2dest), newval: split_mode); |
3761 | newdest = regno_reg_rtx[REGNO (i2dest)]; |
3762 | } |
3763 | } |
3764 | |
3765 | /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to |
3766 | an ASHIFT. This can occur if it was inside a PLUS and hence |
3767 | appeared to be a memory address. This is a kludge. */ |
3768 | if (split_code == MULT |
3769 | && CONST_INT_P (XEXP (*split, 1)) |
3770 | && INTVAL (XEXP (*split, 1)) > 0 |
3771 | && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0) |
3772 | { |
3773 | rtx i_rtx = gen_int_shift_amount (split_mode, i); |
3774 | SUBST (*split, gen_rtx_ASHIFT (split_mode, |
3775 | XEXP (*split, 0), i_rtx)); |
3776 | /* Update split_code because we may not have a multiply |
3777 | anymore. */ |
3778 | split_code = GET_CODE (*split); |
3779 | } |
3780 | |
3781 | /* Similarly for (plus (mult FOO (const_int pow2))). */ |
3782 | if (split_code == PLUS |
3783 | && GET_CODE (XEXP (*split, 0)) == MULT |
3784 | && CONST_INT_P (XEXP (XEXP (*split, 0), 1)) |
3785 | && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0 |
3786 | && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0) |
3787 | { |
3788 | rtx nsplit = XEXP (*split, 0); |
3789 | rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i); |
3790 | SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit), |
3791 | XEXP (nsplit, 0), |
3792 | i_rtx)); |
3793 | /* Update split_code because we may not have a multiply |
3794 | anymore. */ |
3795 | split_code = GET_CODE (*split); |
3796 | } |
3797 | |
3798 | #ifdef INSN_SCHEDULING |
3799 | /* If *SPLIT is a paradoxical SUBREG, when we split it, it should |
3800 | be written as a ZERO_EXTEND. */ |
3801 | if (split_code == SUBREG && MEM_P (SUBREG_REG (*split))) |
3802 | { |
3803 | /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's |
3804 | what it really is. */ |
3805 | if (load_extend_op (GET_MODE (SUBREG_REG (*split))) |
3806 | == SIGN_EXTEND) |
3807 | SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode, |
3808 | SUBREG_REG (*split))); |
3809 | else |
3810 | SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode, |
3811 | SUBREG_REG (*split))); |
3812 | } |
3813 | #endif |
3814 | |
3815 | /* Attempt to split binary operators using arithmetic identities. */ |
3816 | if (BINARY_P (SET_SRC (newpat)) |
3817 | && split_mode == GET_MODE (SET_SRC (newpat)) |
3818 | && ! side_effects_p (SET_SRC (newpat))) |
3819 | { |
3820 | rtx setsrc = SET_SRC (newpat); |
3821 | machine_mode mode = GET_MODE (setsrc); |
3822 | enum rtx_code code = GET_CODE (setsrc); |
3823 | rtx src_op0 = XEXP (setsrc, 0); |
3824 | rtx src_op1 = XEXP (setsrc, 1); |
3825 | |
3826 | /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */ |
3827 | if (rtx_equal_p (src_op0, src_op1)) |
3828 | { |
3829 | newi2pat = gen_rtx_SET (newdest, src_op0); |
3830 | SUBST (XEXP (setsrc, 0), newdest); |
3831 | SUBST (XEXP (setsrc, 1), newdest); |
3832 | subst_done = true; |
3833 | } |
3834 | /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */ |
3835 | else if ((code == PLUS || code == MULT) |
3836 | && GET_CODE (src_op0) == code |
3837 | && GET_CODE (XEXP (src_op0, 0)) == code |
3838 | && (INTEGRAL_MODE_P (mode) |
3839 | || (FLOAT_MODE_P (mode) |
3840 | && flag_unsafe_math_optimizations))) |
3841 | { |
3842 | rtx p = XEXP (XEXP (src_op0, 0), 0); |
3843 | rtx q = XEXP (XEXP (src_op0, 0), 1); |
3844 | rtx r = XEXP (src_op0, 1); |
3845 | rtx s = src_op1; |
3846 | |
3847 | /* Split both "((X op Y) op X) op Y" and |
3848 | "((X op Y) op Y) op X" as "T op T" where T is |
3849 | "X op Y". */ |
3850 | if ((rtx_equal_p (p,r) && rtx_equal_p (q,s)) |
3851 | || (rtx_equal_p (p,s) && rtx_equal_p (q,r))) |
3852 | { |
3853 | newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0)); |
3854 | SUBST (XEXP (setsrc, 0), newdest); |
3855 | SUBST (XEXP (setsrc, 1), newdest); |
3856 | subst_done = true; |
3857 | } |
3858 | /* Split "((X op X) op Y) op Y)" as "T op T" where |
3859 | T is "X op Y". */ |
3860 | else if (rtx_equal_p (p,q) && rtx_equal_p (r,s)) |
3861 | { |
3862 | rtx tmp = simplify_gen_binary (code, mode, op0: p, op1: r); |
3863 | newi2pat = gen_rtx_SET (newdest, tmp); |
3864 | SUBST (XEXP (setsrc, 0), newdest); |
3865 | SUBST (XEXP (setsrc, 1), newdest); |
3866 | subst_done = true; |
3867 | } |
3868 | } |
3869 | } |
3870 | |
3871 | if (!subst_done) |
3872 | { |
3873 | newi2pat = gen_rtx_SET (newdest, *split); |
3874 | SUBST (*split, newdest); |
3875 | } |
3876 | |
3877 | i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); |
3878 | |
3879 | /* recog_for_combine might have added CLOBBERs to newi2pat. |
3880 | Make sure NEWPAT does not depend on the clobbered regs. */ |
3881 | if (GET_CODE (newi2pat) == PARALLEL) |
3882 | for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--) |
3883 | if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER) |
3884 | { |
3885 | rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0); |
3886 | if (reg_overlap_mentioned_p (reg, newpat)) |
3887 | { |
3888 | undo_all (); |
3889 | return 0; |
3890 | } |
3891 | } |
3892 | |
3893 | /* If the split point was a MULT and we didn't have one before, |
3894 | don't use one now. */ |
3895 | if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult)) |
3896 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
3897 | } |
3898 | } |
3899 | |
3900 | /* Check for a case where we loaded from memory in a narrow mode and |
3901 | then sign extended it, but we need both registers. In that case, |
3902 | we have a PARALLEL with both loads from the same memory location. |
3903 | We can split this into a load from memory followed by a register-register |
3904 | copy. This saves at least one insn, more if register allocation can |
3905 | eliminate the copy. |
3906 | |
3907 | We cannot do this if the destination of the first assignment is a |
3908 | condition code register. We eliminate this case by making sure |
3909 | the SET_DEST and SET_SRC have the same mode. |
3910 | |
3911 | We cannot do this if the destination of the second assignment is |
3912 | a register that we have already assumed is zero-extended. Similarly |
3913 | for a SUBREG of such a register. */ |
3914 | |
3915 | else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0 |
3916 | && GET_CODE (newpat) == PARALLEL |
3917 | && XVECLEN (newpat, 0) == 2 |
3918 | && GET_CODE (XVECEXP (newpat, 0, 0)) == SET |
3919 | && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND |
3920 | && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0))) |
3921 | == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0)))) |
3922 | && GET_CODE (XVECEXP (newpat, 0, 1)) == SET |
3923 | && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)), |
3924 | XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0)) |
3925 | && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3) |
3926 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT |
3927 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART |
3928 | && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)), |
3929 | (REG_P (temp_expr) |
3930 | && reg_stat[REGNO (temp_expr)].nonzero_bits != 0 |
3931 | && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)), |
3932 | BITS_PER_WORD) |
3933 | && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)), |
3934 | HOST_BITS_PER_INT) |
3935 | && (reg_stat[REGNO (temp_expr)].nonzero_bits |
3936 | != GET_MODE_MASK (word_mode)))) |
3937 | && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG |
3938 | && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))), |
3939 | (REG_P (temp_expr) |
3940 | && reg_stat[REGNO (temp_expr)].nonzero_bits != 0 |
3941 | && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)), |
3942 | BITS_PER_WORD) |
3943 | && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)), |
3944 | HOST_BITS_PER_INT) |
3945 | && (reg_stat[REGNO (temp_expr)].nonzero_bits |
3946 | != GET_MODE_MASK (word_mode))))) |
3947 | && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)), |
3948 | SET_SRC (XVECEXP (newpat, 0, 1))) |
3949 | && ! find_reg_note (i3, REG_UNUSED, |
3950 | SET_DEST (XVECEXP (newpat, 0, 0)))) |
3951 | { |
3952 | rtx ni2dest; |
3953 | |
3954 | newi2pat = XVECEXP (newpat, 0, 0); |
3955 | ni2dest = SET_DEST (XVECEXP (newpat, 0, 0)); |
3956 | newpat = XVECEXP (newpat, 0, 1); |
3957 | SUBST (SET_SRC (newpat), |
3958 | gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest)); |
3959 | i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); |
3960 | |
3961 | if (i2_code_number >= 0) |
3962 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
3963 | |
3964 | if (insn_code_number >= 0) |
3965 | swap_i2i3 = 1; |
3966 | } |
3967 | |
3968 | /* Similarly, check for a case where we have a PARALLEL of two independent |
3969 | SETs but we started with three insns. In this case, we can do the sets |
3970 | as two separate insns. This case occurs when some SET allows two |
3971 | other insns to combine, but the destination of that SET is still live. |
3972 | |
3973 | Also do this if we started with two insns and (at least) one of the |
3974 | resulting sets is a noop; this noop will be deleted later. |
3975 | |
3976 | Also do this if we started with two insns neither of which was a simple |
3977 | move. */ |
3978 | |
3979 | else if (insn_code_number < 0 && asm_noperands (newpat) < 0 |
3980 | && GET_CODE (newpat) == PARALLEL |
3981 | && XVECLEN (newpat, 0) == 2 |
3982 | && GET_CODE (XVECEXP (newpat, 0, 0)) == SET |
3983 | && GET_CODE (XVECEXP (newpat, 0, 1)) == SET |
3984 | && (i1 |
3985 | || set_noop_p (XVECEXP (newpat, 0, 0)) |
3986 | || set_noop_p (XVECEXP (newpat, 0, 1)) |
3987 | || (!i2_was_move && !i3_was_move)) |
3988 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT |
3989 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART |
3990 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT |
3991 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART |
3992 | && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)), |
3993 | XVECEXP (newpat, 0, 0)) |
3994 | && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)), |
3995 | XVECEXP (newpat, 0, 1)) |
3996 | && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0))) |
3997 | && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1))))) |
3998 | { |
3999 | rtx set0 = XVECEXP (newpat, 0, 0); |
4000 | rtx set1 = XVECEXP (newpat, 0, 1); |
4001 | |
4002 | /* Normally, it doesn't matter which of the two is done first, but |
4003 | one which uses any regs/memory set in between i2 and i3 can't |
4004 | be first. The PARALLEL might also have been pre-existing in i3, |
4005 | so we need to make sure that we won't wrongly hoist a SET to i2 |
4006 | that would conflict with a death note present in there, or would |
4007 | have its dest modified between i2 and i3. */ |
4008 | if (!modified_between_p (SET_SRC (set1), i2, i3) |
4009 | && !(REG_P (SET_DEST (set1)) |
4010 | && find_reg_note (i2, REG_DEAD, SET_DEST (set1))) |
4011 | && !(GET_CODE (SET_DEST (set1)) == SUBREG |
4012 | && find_reg_note (i2, REG_DEAD, |
4013 | SUBREG_REG (SET_DEST (set1)))) |
4014 | && !modified_between_p (SET_DEST (set1), i2, i3) |
4015 | /* If I3 is a jump, ensure that set0 is a jump so that |
4016 | we do not create invalid RTL. */ |
4017 | && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx) |
4018 | ) |
4019 | { |
4020 | newi2pat = set1; |
4021 | newpat = set0; |
4022 | } |
4023 | else if (!modified_between_p (SET_SRC (set0), i2, i3) |
4024 | && !(REG_P (SET_DEST (set0)) |
4025 | && find_reg_note (i2, REG_DEAD, SET_DEST (set0))) |
4026 | && !(GET_CODE (SET_DEST (set0)) == SUBREG |
4027 | && find_reg_note (i2, REG_DEAD, |
4028 | SUBREG_REG (SET_DEST (set0)))) |
4029 | && !modified_between_p (SET_DEST (set0), i2, i3) |
4030 | /* If I3 is a jump, ensure that set1 is a jump so that |
4031 | we do not create invalid RTL. */ |
4032 | && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx) |
4033 | ) |
4034 | { |
4035 | newi2pat = set0; |
4036 | newpat = set1; |
4037 | } |
4038 | else |
4039 | { |
4040 | undo_all (); |
4041 | return 0; |
4042 | } |
4043 | |
4044 | i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); |
4045 | |
4046 | if (i2_code_number >= 0) |
4047 | { |
4048 | /* recog_for_combine might have added CLOBBERs to newi2pat. |
4049 | Make sure NEWPAT does not depend on the clobbered regs. */ |
4050 | if (GET_CODE (newi2pat) == PARALLEL) |
4051 | { |
4052 | for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--) |
4053 | if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER) |
4054 | { |
4055 | rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0); |
4056 | if (reg_overlap_mentioned_p (reg, newpat)) |
4057 | { |
4058 | undo_all (); |
4059 | return 0; |
4060 | } |
4061 | } |
4062 | } |
4063 | |
4064 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
4065 | |
4066 | /* Likewise, recog_for_combine might have added clobbers to NEWPAT. |
4067 | Checking that the SET0's SET_DEST and SET1's SET_DEST aren't |
4068 | mentioned/clobbered, ensures NEWI2PAT's SET_DEST is live. */ |
4069 | if (insn_code_number >= 0 && GET_CODE (newpat) == PARALLEL) |
4070 | { |
4071 | for (i = XVECLEN (newpat, 0) - 1; i >= 0; i--) |
4072 | if (GET_CODE (XVECEXP (newpat, 0, i)) == CLOBBER) |
4073 | { |
4074 | rtx reg = XEXP (XVECEXP (newpat, 0, i), 0); |
4075 | if (reg_overlap_mentioned_p (reg, SET_DEST (set0)) |
4076 | || reg_overlap_mentioned_p (reg, SET_DEST (set1))) |
4077 | { |
4078 | undo_all (); |
4079 | return 0; |
4080 | } |
4081 | } |
4082 | } |
4083 | |
4084 | if (insn_code_number >= 0) |
4085 | split_i2i3 = true; |
4086 | } |
4087 | } |
4088 | |
4089 | /* If it still isn't recognized, fail and change things back the way they |
4090 | were. */ |
4091 | if ((insn_code_number < 0 |
4092 | /* Is the result a reasonable ASM_OPERANDS? */ |
4093 | && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2))) |
4094 | { |
4095 | undo_all (); |
4096 | return 0; |
4097 | } |
4098 | |
4099 | /* If we had to change another insn, make sure it is valid also. */ |
4100 | if (undobuf.other_insn) |
4101 | { |
4102 | CLEAR_HARD_REG_SET (set&: newpat_used_regs); |
4103 | |
4104 | other_pat = PATTERN (insn: undobuf.other_insn); |
4105 | other_code_number = recog_for_combine (&other_pat, undobuf.other_insn, |
4106 | &new_other_notes); |
4107 | |
4108 | if (other_code_number < 0 && ! check_asm_operands (other_pat)) |
4109 | { |
4110 | undo_all (); |
4111 | return 0; |
4112 | } |
4113 | } |
4114 | |
4115 | /* Only allow this combination if insn_cost reports that the |
4116 | replacement instructions are cheaper than the originals. */ |
4117 | if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, newotherpat: other_pat)) |
4118 | { |
4119 | undo_all (); |
4120 | return 0; |
4121 | } |
4122 | |
4123 | if (MAY_HAVE_DEBUG_BIND_INSNS) |
4124 | { |
4125 | struct undo *undo; |
4126 | |
4127 | for (undo = undobuf.undos; undo; undo = undo->next) |
4128 | if (undo->kind == UNDO_MODE) |
4129 | { |
4130 | rtx reg = regno_reg_rtx[undo->where.regno]; |
4131 | machine_mode new_mode = GET_MODE (reg); |
4132 | machine_mode old_mode = undo->old_contents.m; |
4133 | |
4134 | /* Temporarily revert mode back. */ |
4135 | adjust_reg_mode (reg, old_mode); |
4136 | |
4137 | if (reg == i2dest && i2scratch) |
4138 | { |
4139 | /* If we used i2dest as a scratch register with a |
4140 | different mode, substitute it for the original |
4141 | i2src while its original mode is temporarily |
4142 | restored, and then clear i2scratch so that we don't |
4143 | do it again later. */ |
4144 | propagate_for_debug (i2, last_combined_insn, reg, i2src, |
4145 | this_basic_block); |
4146 | i2scratch = false; |
4147 | /* Put back the new mode. */ |
4148 | adjust_reg_mode (reg, new_mode); |
4149 | } |
4150 | else |
4151 | { |
4152 | rtx tempreg = gen_raw_REG (old_mode, REGNO (reg)); |
4153 | rtx_insn *first, *last; |
4154 | |
4155 | if (reg == i2dest) |
4156 | { |
4157 | first = i2; |
4158 | last = last_combined_insn; |
4159 | } |
4160 | else |
4161 | { |
4162 | first = i3; |
4163 | last = undobuf.other_insn; |
4164 | gcc_assert (last); |
4165 | if (DF_INSN_LUID (last) |
4166 | < DF_INSN_LUID (last_combined_insn)) |
4167 | last = last_combined_insn; |
4168 | } |
4169 | |
4170 | /* We're dealing with a reg that changed mode but not |
4171 | meaning, so we want to turn it into a subreg for |
4172 | the new mode. However, because of REG sharing and |
4173 | because its mode had already changed, we have to do |
4174 | it in two steps. First, replace any debug uses of |
4175 | reg, with its original mode temporarily restored, |
4176 | with this copy we have created; then, replace the |
4177 | copy with the SUBREG of the original shared reg, |
4178 | once again changed to the new mode. */ |
4179 | propagate_for_debug (first, last, reg, tempreg, |
4180 | this_basic_block); |
4181 | adjust_reg_mode (reg, new_mode); |
4182 | propagate_for_debug (first, last, tempreg, |
4183 | lowpart_subreg (outermode: old_mode, op: reg, innermode: new_mode), |
4184 | this_basic_block); |
4185 | } |
4186 | } |
4187 | } |
4188 | |
4189 | /* If we will be able to accept this, we have made a |
4190 | change to the destination of I3. This requires us to |
4191 | do a few adjustments. */ |
4192 | |
4193 | if (changed_i3_dest) |
4194 | { |
4195 | PATTERN (insn: i3) = newpat; |
4196 | adjust_for_new_dest (insn: i3); |
4197 | } |
4198 | |
4199 | /* We now know that we can do this combination. Merge the insns and |
4200 | update the status of registers and LOG_LINKS. */ |
4201 | |
4202 | if (undobuf.other_insn) |
4203 | { |
4204 | rtx note, next; |
4205 | |
4206 | PATTERN (insn: undobuf.other_insn) = other_pat; |
4207 | |
4208 | /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED, |
4209 | ensure that they are still valid. Then add any non-duplicate |
4210 | notes added by recog_for_combine. */ |
4211 | for (note = REG_NOTES (undobuf.other_insn); note; note = next) |
4212 | { |
4213 | next = XEXP (note, 1); |
4214 | |
4215 | if ((REG_NOTE_KIND (note) == REG_DEAD |
4216 | && !reg_referenced_p (XEXP (note, 0), |
4217 | PATTERN (insn: undobuf.other_insn))) |
4218 | ||(REG_NOTE_KIND (note) == REG_UNUSED |
4219 | && !reg_set_p (XEXP (note, 0), |
4220 | PATTERN (insn: undobuf.other_insn))) |
4221 | /* Simply drop equal note since it may be no longer valid |
4222 | for other_insn. It may be possible to record that CC |
4223 | register is changed and only discard those notes, but |
4224 | in practice it's unnecessary complication and doesn't |
4225 | give any meaningful improvement. |
4226 | |
4227 | See PR78559. */ |
4228 | || REG_NOTE_KIND (note) == REG_EQUAL |
4229 | || REG_NOTE_KIND (note) == REG_EQUIV) |
4230 | remove_note (undobuf.other_insn, note); |
4231 | } |
4232 | |
4233 | distribute_notes (new_other_notes, undobuf.other_insn, |
4234 | undobuf.other_insn, NULL, NULL_RTX, NULL_RTX, |
4235 | NULL_RTX); |
4236 | } |
4237 | |
4238 | if (swap_i2i3) |
4239 | { |
4240 | /* I3 now uses what used to be its destination and which is now |
4241 | I2's destination. This requires us to do a few adjustments. */ |
4242 | PATTERN (insn: i3) = newpat; |
4243 | adjust_for_new_dest (insn: i3); |
4244 | } |
4245 | |
4246 | if (swap_i2i3 || split_i2i3) |
4247 | { |
4248 | /* We might need a LOG_LINK from I3 to I2. But then we used to |
4249 | have one, so we still will. |
4250 | |
4251 | However, some later insn might be using I2's dest and have |
4252 | a LOG_LINK pointing at I3. We should change it to point at |
4253 | I2 instead. */ |
4254 | |
4255 | /* newi2pat is usually a SET here; however, recog_for_combine might |
4256 | have added some clobbers. */ |
4257 | rtx x = newi2pat; |
4258 | if (GET_CODE (x) == PARALLEL) |
4259 | x = XVECEXP (newi2pat, 0, 0); |
4260 | |
4261 | if (REG_P (SET_DEST (x)) |
4262 | || (GET_CODE (SET_DEST (x)) == SUBREG |
4263 | && REG_P (SUBREG_REG (SET_DEST (x))))) |
4264 | { |
4265 | unsigned int regno = reg_or_subregno (SET_DEST (x)); |
4266 | |
4267 | bool done = false; |
4268 | for (rtx_insn *insn = NEXT_INSN (insn: i3); |
4269 | !done |
4270 | && insn |
4271 | && INSN_P (insn) |
4272 | && BLOCK_FOR_INSN (insn) == this_basic_block; |
4273 | insn = NEXT_INSN (insn)) |
4274 | { |
4275 | if (DEBUG_INSN_P (insn)) |
4276 | continue; |
4277 | struct insn_link *link; |
4278 | FOR_EACH_LOG_LINK (link, insn) |
4279 | if (link->insn == i3 && link->regno == regno) |
4280 | { |
4281 | link->insn = i2; |
4282 | done = true; |
4283 | break; |
4284 | } |
4285 | } |
4286 | } |
4287 | } |
4288 | |
4289 | { |
4290 | rtx i3notes, i2notes, i1notes = 0, i0notes = 0; |
4291 | struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0; |
4292 | rtx midnotes = 0; |
4293 | int from_luid; |
4294 | /* Compute which registers we expect to eliminate. newi2pat may be setting |
4295 | either i3dest or i2dest, so we must check it. */ |
4296 | rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat)) |
4297 | || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src |
4298 | || !i2dest_killed |
4299 | ? 0 : i2dest); |
4300 | /* For i1, we need to compute both local elimination and global |
4301 | elimination information with respect to newi2pat because i1dest |
4302 | may be the same as i3dest, in which case newi2pat may be setting |
4303 | i1dest. Global information is used when distributing REG_DEAD |
4304 | note for i2 and i3, in which case it does matter if newi2pat sets |
4305 | i1dest or not. |
4306 | |
4307 | Local information is used when distributing REG_DEAD note for i1, |
4308 | in which case it doesn't matter if newi2pat sets i1dest or not. |
4309 | See PR62151, if we have four insns combination: |
4310 | i0: r0 <- i0src |
4311 | i1: r1 <- i1src (using r0) |
4312 | REG_DEAD (r0) |
4313 | i2: r0 <- i2src (using r1) |
4314 | i3: r3 <- i3src (using r0) |
4315 | ix: using r0 |
4316 | From i1's point of view, r0 is eliminated, no matter if it is set |
4317 | by newi2pat or not. In other words, REG_DEAD info for r0 in i1 |
4318 | should be discarded. |
4319 | |
4320 | Note local information only affects cases in forms like "I1->I2->I3", |
4321 | "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like |
4322 | "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or |
4323 | i0dest anyway. */ |
4324 | rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src |
4325 | || !i1dest_killed |
4326 | ? 0 : i1dest); |
4327 | rtx elim_i1 = (local_elim_i1 == 0 |
4328 | || (newi2pat && reg_set_p (i1dest, newi2pat)) |
4329 | ? 0 : i1dest); |
4330 | /* Same case as i1. */ |
4331 | rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed |
4332 | ? 0 : i0dest); |
4333 | rtx elim_i0 = (local_elim_i0 == 0 |
4334 | || (newi2pat && reg_set_p (i0dest, newi2pat)) |
4335 | ? 0 : i0dest); |
4336 | |
4337 | /* Get the old REG_NOTES and LOG_LINKS from all our insns and |
4338 | clear them. */ |
4339 | i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3); |
4340 | i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2); |
4341 | if (i1) |
4342 | i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1); |
4343 | if (i0) |
4344 | i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0); |
4345 | |
4346 | /* Ensure that we do not have something that should not be shared but |
4347 | occurs multiple times in the new insns. Check this by first |
4348 | resetting all the `used' flags and then copying anything is shared. */ |
4349 | |
4350 | reset_used_flags (i3notes); |
4351 | reset_used_flags (i2notes); |
4352 | reset_used_flags (i1notes); |
4353 | reset_used_flags (i0notes); |
4354 | reset_used_flags (newpat); |
4355 | reset_used_flags (newi2pat); |
4356 | if (undobuf.other_insn) |
4357 | reset_used_flags (PATTERN (insn: undobuf.other_insn)); |
4358 | |
4359 | i3notes = copy_rtx_if_shared (i3notes); |
4360 | i2notes = copy_rtx_if_shared (i2notes); |
4361 | i1notes = copy_rtx_if_shared (i1notes); |
4362 | i0notes = copy_rtx_if_shared (i0notes); |
4363 | newpat = copy_rtx_if_shared (newpat); |
4364 | newi2pat = copy_rtx_if_shared (newi2pat); |
4365 | if (undobuf.other_insn) |
4366 | reset_used_flags (PATTERN (insn: undobuf.other_insn)); |
4367 | |
4368 | INSN_CODE (i3) = insn_code_number; |
4369 | PATTERN (insn: i3) = newpat; |
4370 | |
4371 | if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3)) |
4372 | { |
4373 | for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link; |
4374 | link = XEXP (link, 1)) |
4375 | { |
4376 | if (substed_i2) |
4377 | { |
4378 | /* I2SRC must still be meaningful at this point. Some |
4379 | splitting operations can invalidate I2SRC, but those |
4380 | operations do not apply to calls. */ |
4381 | gcc_assert (i2src); |
4382 | XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0), |
4383 | i2dest, i2src); |
4384 | } |
4385 | if (substed_i1) |
4386 | XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0), |
4387 | i1dest, i1src); |
4388 | if (substed_i0) |
4389 | XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0), |
4390 | i0dest, i0src); |
4391 | } |
4392 | } |
4393 | |
4394 | if (undobuf.other_insn) |
4395 | INSN_CODE (undobuf.other_insn) = other_code_number; |
4396 | |
4397 | /* We had one special case above where I2 had more than one set and |
4398 | we replaced a destination of one of those sets with the destination |
4399 | of I3. In that case, we have to update LOG_LINKS of insns later |
4400 | in this basic block. Note that this (expensive) case is rare. |
4401 | |
4402 | Also, in this case, we must pretend that all REG_NOTEs for I2 |
4403 | actually came from I3, so that REG_UNUSED notes from I2 will be |
4404 | properly handled. */ |
4405 | |
4406 | if (i3_subst_into_i2) |
4407 | { |
4408 | for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++) |
4409 | if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET |
4410 | || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER) |
4411 | && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i))) |
4412 | && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest |
4413 | && ! find_reg_note (i2, REG_UNUSED, |
4414 | SET_DEST (XVECEXP (PATTERN (i2), 0, i)))) |
4415 | for (temp_insn = NEXT_INSN (insn: i2); |
4416 | temp_insn |
4417 | && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
4418 | || BB_HEAD (this_basic_block) != temp_insn); |
4419 | temp_insn = NEXT_INSN (insn: temp_insn)) |
4420 | if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn)) |
4421 | FOR_EACH_LOG_LINK (link, temp_insn) |
4422 | if (link->insn == i2) |
4423 | link->insn = i3; |
4424 | |
4425 | if (i3notes) |
4426 | { |
4427 | rtx link = i3notes; |
4428 | while (XEXP (link, 1)) |
4429 | link = XEXP (link, 1); |
4430 | XEXP (link, 1) = i2notes; |
4431 | } |
4432 | else |
4433 | i3notes = i2notes; |
4434 | i2notes = 0; |
4435 | } |
4436 | |
4437 | LOG_LINKS (i3) = NULL; |
4438 | REG_NOTES (i3) = 0; |
4439 | LOG_LINKS (i2) = NULL; |
4440 | REG_NOTES (i2) = 0; |
4441 | |
4442 | if (newi2pat) |
4443 | { |
4444 | if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch) |
4445 | propagate_for_debug (i2, last_combined_insn, i2dest, i2src, |
4446 | this_basic_block); |
4447 | INSN_CODE (i2) = i2_code_number; |
4448 | PATTERN (insn: i2) = newi2pat; |
4449 | } |
4450 | else |
4451 | { |
4452 | if (MAY_HAVE_DEBUG_BIND_INSNS && i2src) |
4453 | propagate_for_debug (i2, last_combined_insn, i2dest, i2src, |
4454 | this_basic_block); |
4455 | SET_INSN_DELETED (i2); |
4456 | } |
4457 | |
4458 | if (i1) |
4459 | { |
4460 | LOG_LINKS (i1) = NULL; |
4461 | REG_NOTES (i1) = 0; |
4462 | if (MAY_HAVE_DEBUG_BIND_INSNS) |
4463 | propagate_for_debug (i1, last_combined_insn, i1dest, i1src, |
4464 | this_basic_block); |
4465 | SET_INSN_DELETED (i1); |
4466 | } |
4467 | |
4468 | if (i0) |
4469 | { |
4470 | LOG_LINKS (i0) = NULL; |
4471 | REG_NOTES (i0) = 0; |
4472 | if (MAY_HAVE_DEBUG_BIND_INSNS) |
4473 | propagate_for_debug (i0, last_combined_insn, i0dest, i0src, |
4474 | this_basic_block); |
4475 | SET_INSN_DELETED (i0); |
4476 | } |
4477 | |
4478 | /* Get death notes for everything that is now used in either I3 or |
4479 | I2 and used to die in a previous insn. If we built two new |
4480 | patterns, move from I1 to I2 then I2 to I3 so that we get the |
4481 | proper movement on registers that I2 modifies. */ |
4482 | |
4483 | if (i0) |
4484 | from_luid = DF_INSN_LUID (i0); |
4485 | else if (i1) |
4486 | from_luid = DF_INSN_LUID (i1); |
4487 | else |
4488 | from_luid = DF_INSN_LUID (i2); |
4489 | if (newi2pat) |
4490 | move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes); |
4491 | move_deaths (newpat, newi2pat, from_luid, i3, &midnotes); |
4492 | |
4493 | /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */ |
4494 | if (i3notes) |
4495 | distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL, |
4496 | elim_i2, elim_i1, elim_i0); |
4497 | if (i2notes) |
4498 | distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL, |
4499 | elim_i2, elim_i1, elim_i0); |
4500 | if (i1notes) |
4501 | distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL, |
4502 | elim_i2, local_elim_i1, local_elim_i0); |
4503 | if (i0notes) |
4504 | distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL, |
4505 | elim_i2, elim_i1, local_elim_i0); |
4506 | if (midnotes) |
4507 | distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL, |
4508 | elim_i2, elim_i1, elim_i0); |
4509 | |
4510 | /* Distribute any notes added to I2 or I3 by recog_for_combine. We |
4511 | know these are REG_UNUSED and want them to go to the desired insn, |
4512 | so we always pass it as i3. */ |
4513 | |
4514 | if (newi2pat && new_i2_notes) |
4515 | distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX, |
4516 | NULL_RTX); |
4517 | |
4518 | if (new_i3_notes) |
4519 | distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX, |
4520 | NULL_RTX); |
4521 | |
4522 | /* If I3DEST was used in I3SRC, it really died in I3. We may need to |
4523 | put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets |
4524 | I3DEST, the death must be somewhere before I2, not I3. If we passed I3 |
4525 | in that case, it might delete I2. Similarly for I2 and I1. |
4526 | Show an additional death due to the REG_DEAD note we make here. If |
4527 | we discard it in distribute_notes, we will decrement it again. */ |
4528 | |
4529 | if (i3dest_killed) |
4530 | { |
4531 | rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX); |
4532 | if (newi2pat && reg_set_p (i3dest_killed, newi2pat)) |
4533 | distribute_notes (new_note, NULL, i2, NULL, elim_i2, |
4534 | elim_i1, elim_i0); |
4535 | else |
4536 | distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL, |
4537 | elim_i2, elim_i1, elim_i0); |
4538 | } |
4539 | |
4540 | if (i2dest_in_i2src) |
4541 | { |
4542 | rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX); |
4543 | if (newi2pat && reg_set_p (i2dest, newi2pat)) |
4544 | distribute_notes (new_note, NULL, i2, NULL, NULL_RTX, |
4545 | NULL_RTX, NULL_RTX); |
4546 | else |
4547 | distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL, |
4548 | NULL_RTX, NULL_RTX, NULL_RTX); |
4549 | } |
4550 | |
4551 | if (i1dest_in_i1src) |
4552 | { |
4553 | rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX); |
4554 | if (newi2pat && reg_set_p (i1dest, newi2pat)) |
4555 | distribute_notes (new_note, NULL, i2, NULL, NULL_RTX, |
4556 | NULL_RTX, NULL_RTX); |
4557 | else |
4558 | distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL, |
4559 | NULL_RTX, NULL_RTX, NULL_RTX); |
4560 | } |
4561 | |
4562 | if (i0dest_in_i0src) |
4563 | { |
4564 | rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX); |
4565 | if (newi2pat && reg_set_p (i0dest, newi2pat)) |
4566 | distribute_notes (new_note, NULL, i2, NULL, NULL_RTX, |
4567 | NULL_RTX, NULL_RTX); |
4568 | else |
4569 | distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL, |
4570 | NULL_RTX, NULL_RTX, NULL_RTX); |
4571 | } |
4572 | |
4573 | distribute_links (i3links); |
4574 | distribute_links (i2links); |
4575 | distribute_links (i1links); |
4576 | distribute_links (i0links); |
4577 | |
4578 | if (REG_P (i2dest)) |
4579 | { |
4580 | struct insn_link *link; |
4581 | rtx_insn *i2_insn = 0; |
4582 | rtx i2_val = 0, set; |
4583 | |
4584 | /* The insn that used to set this register doesn't exist, and |
4585 | this life of the register may not exist either. See if one of |
4586 | I3's links points to an insn that sets I2DEST. If it does, |
4587 | that is now the last known value for I2DEST. If we don't update |
4588 | this and I2 set the register to a value that depended on its old |
4589 | contents, we will get confused. If this insn is used, thing |
4590 | will be set correctly in combine_instructions. */ |
4591 | FOR_EACH_LOG_LINK (link, i3) |
4592 | if ((set = single_set (insn: link->insn)) != 0 |
4593 | && rtx_equal_p (i2dest, SET_DEST (set))) |
4594 | i2_insn = link->insn, i2_val = SET_SRC (set); |
4595 | |
4596 | record_value_for_reg (i2dest, i2_insn, i2_val); |
4597 | |
4598 | /* If the reg formerly set in I2 died only once and that was in I3, |
4599 | zero its use count so it won't make `reload' do any work. */ |
4600 | if (! added_sets_2 |
4601 | && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat)) |
4602 | && ! i2dest_in_i2src |
4603 | && REGNO (i2dest) < reg_n_sets_max) |
4604 | INC_REG_N_SETS (REGNO (i2dest), -1); |
4605 | } |
4606 | |
4607 | if (i1 && REG_P (i1dest)) |
4608 | { |
4609 | struct insn_link *link; |
4610 | rtx_insn *i1_insn = 0; |
4611 | rtx i1_val = 0, set; |
4612 | |
4613 | FOR_EACH_LOG_LINK (link, i3) |
4614 | if ((set = single_set (insn: link->insn)) != 0 |
4615 | && rtx_equal_p (i1dest, SET_DEST (set))) |
4616 | i1_insn = link->insn, i1_val = SET_SRC (set); |
4617 | |
4618 | record_value_for_reg (i1dest, i1_insn, i1_val); |
4619 | |
4620 | if (! added_sets_1 |
4621 | && ! i1dest_in_i1src |
4622 | && REGNO (i1dest) < reg_n_sets_max) |
4623 | INC_REG_N_SETS (REGNO (i1dest), -1); |
4624 | } |
4625 | |
4626 | if (i0 && REG_P (i0dest)) |
4627 | { |
4628 | struct insn_link *link; |
4629 | rtx_insn *i0_insn = 0; |
4630 | rtx i0_val = 0, set; |
4631 | |
4632 | FOR_EACH_LOG_LINK (link, i3) |
4633 | if ((set = single_set (insn: link->insn)) != 0 |
4634 | && rtx_equal_p (i0dest, SET_DEST (set))) |
4635 | i0_insn = link->insn, i0_val = SET_SRC (set); |
4636 | |
4637 | record_value_for_reg (i0dest, i0_insn, i0_val); |
4638 | |
4639 | if (! added_sets_0 |
4640 | && ! i0dest_in_i0src |
4641 | && REGNO (i0dest) < reg_n_sets_max) |
4642 | INC_REG_N_SETS (REGNO (i0dest), -1); |
4643 | } |
4644 | |
4645 | /* Update reg_stat[].nonzero_bits et al for any changes that may have |
4646 | been made to this insn. The order is important, because newi2pat |
4647 | can affect nonzero_bits of newpat. */ |
4648 | if (newi2pat) |
4649 | note_pattern_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL); |
4650 | note_pattern_stores (newpat, set_nonzero_bits_and_sign_copies, NULL); |
4651 | } |
4652 | |
4653 | if (undobuf.other_insn != NULL_RTX) |
4654 | { |
4655 | if (dump_file) |
4656 | { |
4657 | fprintf (stream: dump_file, format: "modifying other_insn " ); |
4658 | dump_insn_slim (dump_file, undobuf.other_insn); |
4659 | } |
4660 | df_insn_rescan (undobuf.other_insn); |
4661 | } |
4662 | |
4663 | if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED))) |
4664 | { |
4665 | if (dump_file) |
4666 | { |
4667 | fprintf (stream: dump_file, format: "modifying insn i0 " ); |
4668 | dump_insn_slim (dump_file, i0); |
4669 | } |
4670 | df_insn_rescan (i0); |
4671 | } |
4672 | |
4673 | if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED))) |
4674 | { |
4675 | if (dump_file) |
4676 | { |
4677 | fprintf (stream: dump_file, format: "modifying insn i1 " ); |
4678 | dump_insn_slim (dump_file, i1); |
4679 | } |
4680 | df_insn_rescan (i1); |
4681 | } |
4682 | |
4683 | if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED))) |
4684 | { |
4685 | if (dump_file) |
4686 | { |
4687 | fprintf (stream: dump_file, format: "modifying insn i2 " ); |
4688 | dump_insn_slim (dump_file, i2); |
4689 | } |
4690 | df_insn_rescan (i2); |
4691 | } |
4692 | |
4693 | if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED))) |
4694 | { |
4695 | if (dump_file) |
4696 | { |
4697 | fprintf (stream: dump_file, format: "modifying insn i3 " ); |
4698 | dump_insn_slim (dump_file, i3); |
4699 | } |
4700 | df_insn_rescan (i3); |
4701 | } |
4702 | |
4703 | /* Set new_direct_jump_p if a new return or simple jump instruction |
4704 | has been created. Adjust the CFG accordingly. */ |
4705 | if (returnjump_p (i3) || any_uncondjump_p (i3)) |
4706 | { |
4707 | *new_direct_jump_p = 1; |
4708 | mark_jump_label (PATTERN (insn: i3), i3, 0); |
4709 | update_cfg_for_uncondjump (i3); |
4710 | } |
4711 | |
4712 | if (undobuf.other_insn != NULL_RTX |
4713 | && (returnjump_p (undobuf.other_insn) |
4714 | || any_uncondjump_p (undobuf.other_insn))) |
4715 | { |
4716 | *new_direct_jump_p = 1; |
4717 | update_cfg_for_uncondjump (undobuf.other_insn); |
4718 | } |
4719 | |
4720 | if (GET_CODE (PATTERN (i3)) == TRAP_IF |
4721 | && XEXP (PATTERN (i3), 0) == const1_rtx) |
4722 | { |
4723 | basic_block bb = BLOCK_FOR_INSN (insn: i3); |
4724 | gcc_assert (bb); |
4725 | remove_edge (split_block (bb, i3)); |
4726 | emit_barrier_after_bb (bb); |
4727 | *new_direct_jump_p = 1; |
4728 | } |
4729 | |
4730 | if (undobuf.other_insn |
4731 | && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF |
4732 | && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx) |
4733 | { |
4734 | basic_block bb = BLOCK_FOR_INSN (insn: undobuf.other_insn); |
4735 | gcc_assert (bb); |
4736 | remove_edge (split_block (bb, undobuf.other_insn)); |
4737 | emit_barrier_after_bb (bb); |
4738 | *new_direct_jump_p = 1; |
4739 | } |
4740 | |
4741 | /* A noop might also need cleaning up of CFG, if it comes from the |
4742 | simplification of a jump. */ |
4743 | if (JUMP_P (i3) |
4744 | && GET_CODE (newpat) == SET |
4745 | && SET_SRC (newpat) == pc_rtx |
4746 | && SET_DEST (newpat) == pc_rtx) |
4747 | { |
4748 | *new_direct_jump_p = 1; |
4749 | update_cfg_for_uncondjump (i3); |
4750 | } |
4751 | |
4752 | if (undobuf.other_insn != NULL_RTX |
4753 | && JUMP_P (undobuf.other_insn) |
4754 | && GET_CODE (PATTERN (undobuf.other_insn)) == SET |
4755 | && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx |
4756 | && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx) |
4757 | { |
4758 | *new_direct_jump_p = 1; |
4759 | update_cfg_for_uncondjump (undobuf.other_insn); |
4760 | } |
4761 | |
4762 | combine_successes++; |
4763 | undo_commit (); |
4764 | |
4765 | rtx_insn *ret = newi2pat ? i2 : i3; |
4766 | if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret)) |
4767 | ret = added_links_insn; |
4768 | if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret)) |
4769 | ret = added_notes_insn; |
4770 | |
4771 | return ret; |
4772 | } |
4773 | |
4774 | /* Get a marker for undoing to the current state. */ |
4775 | |
4776 | static void * |
4777 | get_undo_marker (void) |
4778 | { |
4779 | return undobuf.undos; |
4780 | } |
4781 | |
4782 | /* Undo the modifications up to the marker. */ |
4783 | |
4784 | static void |
4785 | undo_to_marker (void *marker) |
4786 | { |
4787 | struct undo *undo, *next; |
4788 | |
4789 | for (undo = undobuf.undos; undo != marker; undo = next) |
4790 | { |
4791 | gcc_assert (undo); |
4792 | |
4793 | next = undo->next; |
4794 | switch (undo->kind) |
4795 | { |
4796 | case UNDO_RTX: |
4797 | *undo->where.r = undo->old_contents.r; |
4798 | break; |
4799 | case UNDO_INT: |
4800 | *undo->where.i = undo->old_contents.i; |
4801 | break; |
4802 | case UNDO_MODE: |
4803 | adjust_reg_mode (regno_reg_rtx[undo->where.regno], |
4804 | undo->old_contents.m); |
4805 | break; |
4806 | case UNDO_LINKS: |
4807 | *undo->where.l = undo->old_contents.l; |
4808 | break; |
4809 | default: |
4810 | gcc_unreachable (); |
4811 | } |
4812 | |
4813 | undo->next = undobuf.frees; |
4814 | undobuf.frees = undo; |
4815 | } |
4816 | |
4817 | undobuf.undos = (struct undo *) marker; |
4818 | } |
4819 | |
4820 | /* Undo all the modifications recorded in undobuf. */ |
4821 | |
4822 | static void |
4823 | undo_all (void) |
4824 | { |
4825 | undo_to_marker (marker: 0); |
4826 | } |
4827 | |
4828 | /* We've committed to accepting the changes we made. Move all |
4829 | of the undos to the free list. */ |
4830 | |
4831 | static void |
4832 | undo_commit (void) |
4833 | { |
4834 | struct undo *undo, *next; |
4835 | |
4836 | for (undo = undobuf.undos; undo; undo = next) |
4837 | { |
4838 | next = undo->next; |
4839 | undo->next = undobuf.frees; |
4840 | undobuf.frees = undo; |
4841 | } |
4842 | undobuf.undos = 0; |
4843 | } |
4844 | |
4845 | /* Find the innermost point within the rtx at LOC, possibly LOC itself, |
4846 | where we have an arithmetic expression and return that point. LOC will |
4847 | be inside INSN. |
4848 | |
4849 | try_combine will call this function to see if an insn can be split into |
4850 | two insns. */ |
4851 | |
4852 | static rtx * |
4853 | find_split_point (rtx *loc, rtx_insn *insn, bool set_src) |
4854 | { |
4855 | rtx x = *loc; |
4856 | enum rtx_code code = GET_CODE (x); |
4857 | rtx *split; |
4858 | unsigned HOST_WIDE_INT len = 0; |
4859 | HOST_WIDE_INT pos = 0; |
4860 | bool unsignedp = false; |
4861 | rtx inner = NULL_RTX; |
4862 | scalar_int_mode mode, inner_mode; |
4863 | |
4864 | /* First special-case some codes. */ |
4865 | switch (code) |
4866 | { |
4867 | case SUBREG: |
4868 | #ifdef INSN_SCHEDULING |
4869 | /* If we are making a paradoxical SUBREG invalid, it becomes a split |
4870 | point. */ |
4871 | if (MEM_P (SUBREG_REG (x))) |
4872 | return loc; |
4873 | #endif |
4874 | return find_split_point (loc: &SUBREG_REG (x), insn, set_src: false); |
4875 | |
4876 | case MEM: |
4877 | /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it |
4878 | using LO_SUM and HIGH. */ |
4879 | if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST |
4880 | || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)) |
4881 | { |
4882 | machine_mode address_mode = get_address_mode (mem: x); |
4883 | |
4884 | SUBST (XEXP (x, 0), |
4885 | gen_rtx_LO_SUM (address_mode, |
4886 | gen_rtx_HIGH (address_mode, XEXP (x, 0)), |
4887 | XEXP (x, 0))); |
4888 | return &XEXP (XEXP (x, 0), 0); |
4889 | } |
4890 | |
4891 | /* If we have a PLUS whose second operand is a constant and the |
4892 | address is not valid, perhaps we can split it up using |
4893 | the machine-specific way to split large constants. We use |
4894 | the first pseudo-reg (one of the virtual regs) as a placeholder; |
4895 | it will not remain in the result. */ |
4896 | if (GET_CODE (XEXP (x, 0)) == PLUS |
4897 | && CONST_INT_P (XEXP (XEXP (x, 0), 1)) |
4898 | && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0), |
4899 | MEM_ADDR_SPACE (x))) |
4900 | { |
4901 | rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER]; |
4902 | rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)), |
4903 | insn: subst_insn); |
4904 | |
4905 | /* This should have produced two insns, each of which sets our |
4906 | placeholder. If the source of the second is a valid address, |
4907 | we can put both sources together and make a split point |
4908 | in the middle. */ |
4909 | |
4910 | if (seq |
4911 | && NEXT_INSN (insn: seq) != NULL_RTX |
4912 | && NEXT_INSN (insn: NEXT_INSN (insn: seq)) == NULL_RTX |
4913 | && NONJUMP_INSN_P (seq) |
4914 | && GET_CODE (PATTERN (seq)) == SET |
4915 | && SET_DEST (PATTERN (seq)) == reg |
4916 | && ! reg_mentioned_p (reg, |
4917 | SET_SRC (PATTERN (seq))) |
4918 | && NONJUMP_INSN_P (NEXT_INSN (seq)) |
4919 | && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET |
4920 | && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg |
4921 | && memory_address_addr_space_p |
4922 | (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))), |
4923 | MEM_ADDR_SPACE (x))) |
4924 | { |
4925 | rtx src1 = SET_SRC (PATTERN (seq)); |
4926 | rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq))); |
4927 | |
4928 | /* Replace the placeholder in SRC2 with SRC1. If we can |
4929 | find where in SRC2 it was placed, that can become our |
4930 | split point and we can replace this address with SRC2. |
4931 | Just try two obvious places. */ |
4932 | |
4933 | src2 = replace_rtx (src2, reg, src1); |
4934 | split = 0; |
4935 | if (XEXP (src2, 0) == src1) |
4936 | split = &XEXP (src2, 0); |
4937 | else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e' |
4938 | && XEXP (XEXP (src2, 0), 0) == src1) |
4939 | split = &XEXP (XEXP (src2, 0), 0); |
4940 | |
4941 | if (split) |
4942 | { |
4943 | SUBST (XEXP (x, 0), src2); |
4944 | return split; |
4945 | } |
4946 | } |
4947 | |
4948 | /* If that didn't work and we have a nested plus, like: |
4949 | ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2 |
4950 | is valid address, try to split (REG1 * CONST1). */ |
4951 | if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS |
4952 | && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0)) |
4953 | && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1)) |
4954 | && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SUBREG |
4955 | && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0), |
4956 | 0), 0))))) |
4957 | { |
4958 | rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 0); |
4959 | XEXP (XEXP (XEXP (x, 0), 0), 0) = reg; |
4960 | if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0), |
4961 | MEM_ADDR_SPACE (x))) |
4962 | { |
4963 | XEXP (XEXP (XEXP (x, 0), 0), 0) = tem; |
4964 | return &XEXP (XEXP (XEXP (x, 0), 0), 0); |
4965 | } |
4966 | XEXP (XEXP (XEXP (x, 0), 0), 0) = tem; |
4967 | } |
4968 | else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS |
4969 | && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0)) |
4970 | && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1)) |
4971 | && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SUBREG |
4972 | && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0), |
4973 | 0), 1))))) |
4974 | { |
4975 | rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 1); |
4976 | XEXP (XEXP (XEXP (x, 0), 0), 1) = reg; |
4977 | if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0), |
4978 | MEM_ADDR_SPACE (x))) |
4979 | { |
4980 | XEXP (XEXP (XEXP (x, 0), 0), 1) = tem; |
4981 | return &XEXP (XEXP (XEXP (x, 0), 0), 1); |
4982 | } |
4983 | XEXP (XEXP (XEXP (x, 0), 0), 1) = tem; |
4984 | } |
4985 | |
4986 | /* If that didn't work, perhaps the first operand is complex and |
4987 | needs to be computed separately, so make a split point there. |
4988 | This will occur on machines that just support REG + CONST |
4989 | and have a constant moved through some previous computation. */ |
4990 | if (!OBJECT_P (XEXP (XEXP (x, 0), 0)) |
4991 | && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG |
4992 | && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0))))) |
4993 | return &XEXP (XEXP (x, 0), 0); |
4994 | } |
4995 | |
4996 | /* If we have a PLUS whose first operand is complex, try computing it |
4997 | separately by making a split there. */ |
4998 | if (GET_CODE (XEXP (x, 0)) == PLUS |
4999 | && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0), |
5000 | MEM_ADDR_SPACE (x)) |
5001 | && ! OBJECT_P (XEXP (XEXP (x, 0), 0)) |
5002 | && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG |
5003 | && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0))))) |
5004 | return &XEXP (XEXP (x, 0), 0); |
5005 | break; |
5006 | |
5007 | case SET: |
5008 | /* See if we can split SET_SRC as it stands. */ |
5009 | split = find_split_point (loc: &SET_SRC (x), insn, set_src: true); |
5010 | if (split && split != &SET_SRC (x)) |
5011 | return split; |
5012 | |
5013 | /* See if we can split SET_DEST as it stands. */ |
5014 | split = find_split_point (loc: &SET_DEST (x), insn, set_src: false); |
5015 | if (split && split != &SET_DEST (x)) |
5016 | return split; |
5017 | |
5018 | /* See if this is a bitfield assignment with everything constant. If |
5019 | so, this is an IOR of an AND, so split it into that. */ |
5020 | if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT |
5021 | && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)), |
5022 | result: &inner_mode) |
5023 | && HWI_COMPUTABLE_MODE_P (mode: inner_mode) |
5024 | && CONST_INT_P (XEXP (SET_DEST (x), 1)) |
5025 | && CONST_INT_P (XEXP (SET_DEST (x), 2)) |
5026 | && CONST_INT_P (SET_SRC (x)) |
5027 | && ((INTVAL (XEXP (SET_DEST (x), 1)) |
5028 | + INTVAL (XEXP (SET_DEST (x), 2))) |
5029 | <= GET_MODE_PRECISION (mode: inner_mode)) |
5030 | && ! side_effects_p (XEXP (SET_DEST (x), 0))) |
5031 | { |
5032 | HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2)); |
5033 | unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1)); |
5034 | rtx dest = XEXP (SET_DEST (x), 0); |
5035 | unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << len) - 1; |
5036 | unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x)) & mask; |
5037 | rtx or_mask; |
5038 | |
5039 | if (BITS_BIG_ENDIAN) |
5040 | pos = GET_MODE_PRECISION (mode: inner_mode) - len - pos; |
5041 | |
5042 | or_mask = gen_int_mode (src << pos, inner_mode); |
5043 | if (src == mask) |
5044 | SUBST (SET_SRC (x), |
5045 | simplify_gen_binary (IOR, inner_mode, dest, or_mask)); |
5046 | else |
5047 | { |
5048 | rtx negmask = gen_int_mode (~(mask << pos), inner_mode); |
5049 | SUBST (SET_SRC (x), |
5050 | simplify_gen_binary (IOR, inner_mode, |
5051 | simplify_gen_binary (AND, inner_mode, |
5052 | dest, negmask), |
5053 | or_mask)); |
5054 | } |
5055 | |
5056 | SUBST (SET_DEST (x), dest); |
5057 | |
5058 | split = find_split_point (loc: &SET_SRC (x), insn, set_src: true); |
5059 | if (split && split != &SET_SRC (x)) |
5060 | return split; |
5061 | } |
5062 | |
5063 | /* Otherwise, see if this is an operation that we can split into two. |
5064 | If so, try to split that. */ |
5065 | code = GET_CODE (SET_SRC (x)); |
5066 | |
5067 | switch (code) |
5068 | { |
5069 | case AND: |
5070 | /* If we are AND'ing with a large constant that is only a single |
5071 | bit and the result is only being used in a context where we |
5072 | need to know if it is zero or nonzero, replace it with a bit |
5073 | extraction. This will avoid the large constant, which might |
5074 | have taken more than one insn to make. If the constant were |
5075 | not a valid argument to the AND but took only one insn to make, |
5076 | this is no worse, but if it took more than one insn, it will |
5077 | be better. */ |
5078 | |
5079 | if (CONST_INT_P (XEXP (SET_SRC (x), 1)) |
5080 | && REG_P (XEXP (SET_SRC (x), 0)) |
5081 | && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7 |
5082 | && REG_P (SET_DEST (x)) |
5083 | && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0 |
5084 | && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE) |
5085 | && XEXP (*split, 0) == SET_DEST (x) |
5086 | && XEXP (*split, 1) == const0_rtx) |
5087 | { |
5088 | rtx = make_extraction (GET_MODE (SET_DEST (x)), |
5089 | XEXP (SET_SRC (x), 0), |
5090 | pos, NULL_RTX, 1, |
5091 | true, false, false); |
5092 | if (extraction != 0) |
5093 | { |
5094 | SUBST (SET_SRC (x), extraction); |
5095 | return find_split_point (loc, insn, set_src: false); |
5096 | } |
5097 | } |
5098 | break; |
5099 | |
5100 | case NE: |
5101 | /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X |
5102 | is known to be on, this can be converted into a NEG of a shift. */ |
5103 | if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx |
5104 | && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0)) |
5105 | && ((pos = exact_log2 (x: nonzero_bits (XEXP (SET_SRC (x), 0), |
5106 | GET_MODE (XEXP (SET_SRC (x), |
5107 | 0))))) >= 1)) |
5108 | { |
5109 | machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0)); |
5110 | rtx pos_rtx = gen_int_shift_amount (mode, pos); |
5111 | SUBST (SET_SRC (x), |
5112 | gen_rtx_NEG (mode, |
5113 | gen_rtx_LSHIFTRT (mode, |
5114 | XEXP (SET_SRC (x), 0), |
5115 | pos_rtx))); |
5116 | |
5117 | split = find_split_point (loc: &SET_SRC (x), insn, set_src: true); |
5118 | if (split && split != &SET_SRC (x)) |
5119 | return split; |
5120 | } |
5121 | break; |
5122 | |
5123 | case SIGN_EXTEND: |
5124 | inner = XEXP (SET_SRC (x), 0); |
5125 | |
5126 | /* We can't optimize if either mode is a partial integer |
5127 | mode as we don't know how many bits are significant |
5128 | in those modes. */ |
5129 | if (!is_int_mode (GET_MODE (inner), int_mode: &inner_mode) |
5130 | || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT) |
5131 | break; |
5132 | |
5133 | pos = 0; |
5134 | len = GET_MODE_PRECISION (mode: inner_mode); |
5135 | unsignedp = false; |
5136 | break; |
5137 | |
5138 | case SIGN_EXTRACT: |
5139 | case ZERO_EXTRACT: |
5140 | if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)), |
5141 | result: &inner_mode) |
5142 | && CONST_INT_P (XEXP (SET_SRC (x), 1)) |
5143 | && CONST_INT_P (XEXP (SET_SRC (x), 2))) |
5144 | { |
5145 | inner = XEXP (SET_SRC (x), 0); |
5146 | len = INTVAL (XEXP (SET_SRC (x), 1)); |
5147 | pos = INTVAL (XEXP (SET_SRC (x), 2)); |
5148 | |
5149 | if (BITS_BIG_ENDIAN) |
5150 | pos = GET_MODE_PRECISION (mode: inner_mode) - len - pos; |
5151 | unsignedp = (code == ZERO_EXTRACT); |
5152 | } |
5153 | break; |
5154 | |
5155 | default: |
5156 | break; |
5157 | } |
5158 | |
5159 | if (len |
5160 | && known_subrange_p (pos1: pos, size1: len, |
5161 | pos2: 0, size2: GET_MODE_PRECISION (GET_MODE (inner))) |
5162 | && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), result: &mode)) |
5163 | { |
5164 | /* For unsigned, we have a choice of a shift followed by an |
5165 | AND or two shifts. Use two shifts for field sizes where the |
5166 | constant might be too large. We assume here that we can |
5167 | always at least get 8-bit constants in an AND insn, which is |
5168 | true for every current RISC. */ |
5169 | |
5170 | if (unsignedp && len <= 8) |
5171 | { |
5172 | unsigned HOST_WIDE_INT mask |
5173 | = (HOST_WIDE_INT_1U << len) - 1; |
5174 | rtx pos_rtx = gen_int_shift_amount (mode, pos); |
5175 | SUBST (SET_SRC (x), |
5176 | gen_rtx_AND (mode, |
5177 | gen_rtx_LSHIFTRT |
5178 | (mode, gen_lowpart (mode, inner), pos_rtx), |
5179 | gen_int_mode (mask, mode))); |
5180 | |
5181 | split = find_split_point (loc: &SET_SRC (x), insn, set_src: true); |
5182 | if (split && split != &SET_SRC (x)) |
5183 | return split; |
5184 | } |
5185 | else |
5186 | { |
5187 | int left_bits = GET_MODE_PRECISION (mode) - len - pos; |
5188 | int right_bits = GET_MODE_PRECISION (mode) - len; |
5189 | SUBST (SET_SRC (x), |
5190 | gen_rtx_fmt_ee |
5191 | (unsignedp ? LSHIFTRT : ASHIFTRT, mode, |
5192 | gen_rtx_ASHIFT (mode, |
5193 | gen_lowpart (mode, inner), |
5194 | gen_int_shift_amount (mode, left_bits)), |
5195 | gen_int_shift_amount (mode, right_bits))); |
5196 | |
5197 | split = find_split_point (loc: &SET_SRC (x), insn, set_src: true); |
5198 | if (split && split != &SET_SRC (x)) |
5199 | return split; |
5200 | } |
5201 | } |
5202 | |
5203 | /* See if this is a simple operation with a constant as the second |
5204 | operand. It might be that this constant is out of range and hence |
5205 | could be used as a split point. */ |
5206 | if (BINARY_P (SET_SRC (x)) |
5207 | && CONSTANT_P (XEXP (SET_SRC (x), 1)) |
5208 | && (OBJECT_P (XEXP (SET_SRC (x), 0)) |
5209 | || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG |
5210 | && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0)))))) |
5211 | return &XEXP (SET_SRC (x), 1); |
5212 | |
5213 | /* Finally, see if this is a simple operation with its first operand |
5214 | not in a register. The operation might require this operand in a |
5215 | register, so return it as a split point. We can always do this |
5216 | because if the first operand were another operation, we would have |
5217 | already found it as a split point. */ |
5218 | if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x))) |
5219 | && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode)) |
5220 | return &XEXP (SET_SRC (x), 0); |
5221 | |
5222 | return 0; |
5223 | |
5224 | case AND: |
5225 | case IOR: |
5226 | /* We write NOR as (and (not A) (not B)), but if we don't have a NOR, |
5227 | it is better to write this as (not (ior A B)) so we can split it. |
5228 | Similarly for IOR. */ |
5229 | if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT) |
5230 | { |
5231 | SUBST (*loc, |
5232 | gen_rtx_NOT (GET_MODE (x), |
5233 | gen_rtx_fmt_ee (code == IOR ? AND : IOR, |
5234 | GET_MODE (x), |
5235 | XEXP (XEXP (x, 0), 0), |
5236 | XEXP (XEXP (x, 1), 0)))); |
5237 | return find_split_point (loc, insn, set_src); |
5238 | } |
5239 | |
5240 | /* Many RISC machines have a large set of logical insns. If the |
5241 | second operand is a NOT, put it first so we will try to split the |
5242 | other operand first. */ |
5243 | if (GET_CODE (XEXP (x, 1)) == NOT) |
5244 | { |
5245 | rtx tem = XEXP (x, 0); |
5246 | SUBST (XEXP (x, 0), XEXP (x, 1)); |
5247 | SUBST (XEXP (x, 1), tem); |
5248 | } |
5249 | break; |
5250 | |
5251 | case PLUS: |
5252 | case MINUS: |
5253 | /* Canonicalization can produce (minus A (mult B C)), where C is a |
5254 | constant. It may be better to try splitting (plus (mult B -C) A) |
5255 | instead if this isn't a multiply by a power of two. */ |
5256 | if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT |
5257 | && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT |
5258 | && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1)))) |
5259 | { |
5260 | machine_mode mode = GET_MODE (x); |
5261 | unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1)); |
5262 | HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode); |
5263 | SUBST (*loc, gen_rtx_PLUS (mode, |
5264 | gen_rtx_MULT (mode, |
5265 | XEXP (XEXP (x, 1), 0), |
5266 | gen_int_mode (other_int, |
5267 | mode)), |
5268 | XEXP (x, 0))); |
5269 | return find_split_point (loc, insn, set_src); |
5270 | } |
5271 | |
5272 | /* Split at a multiply-accumulate instruction. However if this is |
5273 | the SET_SRC, we likely do not have such an instruction and it's |
5274 | worthless to try this split. */ |
5275 | if (!set_src |
5276 | && (GET_CODE (XEXP (x, 0)) == MULT |
5277 | || (GET_CODE (XEXP (x, 0)) == ASHIFT |
5278 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))) |
5279 | return loc; |
5280 | |
5281 | default: |
5282 | break; |
5283 | } |
5284 | |
5285 | /* Otherwise, select our actions depending on our rtx class. */ |
5286 | switch (GET_RTX_CLASS (code)) |
5287 | { |
5288 | case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */ |
5289 | case RTX_TERNARY: |
5290 | split = find_split_point (loc: &XEXP (x, 2), insn, set_src: false); |
5291 | if (split) |
5292 | return split; |
5293 | /* fall through */ |
5294 | case RTX_BIN_ARITH: |
5295 | case RTX_COMM_ARITH: |
5296 | case RTX_COMPARE: |
5297 | case RTX_COMM_COMPARE: |
5298 | split = find_split_point (loc: &XEXP (x, 1), insn, set_src: false); |
5299 | if (split) |
5300 | return split; |
5301 | /* fall through */ |
5302 | case RTX_UNARY: |
5303 | /* Some machines have (and (shift ...) ...) insns. If X is not |
5304 | an AND, but XEXP (X, 0) is, use it as our split point. */ |
5305 | if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND) |
5306 | return &XEXP (x, 0); |
5307 | |
5308 | split = find_split_point (loc: &XEXP (x, 0), insn, set_src: false); |
5309 | if (split) |
5310 | return split; |
5311 | return loc; |
5312 | |
5313 | default: |
5314 | /* Otherwise, we don't have a split point. */ |
5315 | return 0; |
5316 | } |
5317 | } |
5318 | |
5319 | /* Throughout X, replace FROM with TO, and return the result. |
5320 | The result is TO if X is FROM; |
5321 | otherwise the result is X, but its contents may have been modified. |
5322 | If they were modified, a record was made in undobuf so that |
5323 | undo_all will (among other things) return X to its original state. |
5324 | |
5325 | If the number of changes necessary is too much to record to undo, |
5326 | the excess changes are not made, so the result is invalid. |
5327 | The changes already made can still be undone. |
5328 | undobuf.num_undo is incremented for such changes, so by testing that |
5329 | the caller can tell whether the result is valid. |
5330 | |
5331 | `n_occurrences' is incremented each time FROM is replaced. |
5332 | |
5333 | IN_DEST is true if we are processing the SET_DEST of a SET. |
5334 | |
5335 | IN_COND is true if we are at the top level of a condition. |
5336 | |
5337 | UNIQUE_COPY is true if each substitution must be unique. We do this |
5338 | by copying if `n_occurrences' is nonzero. */ |
5339 | |
5340 | static rtx |
5341 | subst (rtx x, rtx from, rtx to, bool in_dest, bool in_cond, bool unique_copy) |
5342 | { |
5343 | enum rtx_code code = GET_CODE (x); |
5344 | machine_mode op0_mode = VOIDmode; |
5345 | const char *fmt; |
5346 | int len, i; |
5347 | rtx new_rtx; |
5348 | |
5349 | /* Two expressions are equal if they are identical copies of a shared |
5350 | RTX or if they are both registers with the same register number |
5351 | and mode. */ |
5352 | |
5353 | #define COMBINE_RTX_EQUAL_P(X,Y) \ |
5354 | ((X) == (Y) \ |
5355 | || (REG_P (X) && REG_P (Y) \ |
5356 | && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y))) |
5357 | |
5358 | /* Do not substitute into clobbers of regs -- this will never result in |
5359 | valid RTL. */ |
5360 | if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0))) |
5361 | return x; |
5362 | |
5363 | if (! in_dest && COMBINE_RTX_EQUAL_P (x, from)) |
5364 | { |
5365 | n_occurrences++; |
5366 | return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to); |
5367 | } |
5368 | |
5369 | /* If X and FROM are the same register but different modes, they |
5370 | will not have been seen as equal above. However, the log links code |
5371 | will make a LOG_LINKS entry for that case. If we do nothing, we |
5372 | will try to rerecognize our original insn and, when it succeeds, |
5373 | we will delete the feeding insn, which is incorrect. |
5374 | |
5375 | So force this insn not to match in this (rare) case. */ |
5376 | if (! in_dest && code == REG && REG_P (from) |
5377 | && reg_overlap_mentioned_p (x, from)) |
5378 | return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); |
5379 | |
5380 | /* If this is an object, we are done unless it is a MEM or LO_SUM, both |
5381 | of which may contain things that can be combined. */ |
5382 | if (code != MEM && code != LO_SUM && OBJECT_P (x)) |
5383 | return x; |
5384 | |
5385 | /* It is possible to have a subexpression appear twice in the insn. |
5386 | Suppose that FROM is a register that appears within TO. |
5387 | Then, after that subexpression has been scanned once by `subst', |
5388 | the second time it is scanned, TO may be found. If we were |
5389 | to scan TO here, we would find FROM within it and create a |
5390 | self-referent rtl structure which is completely wrong. */ |
5391 | if (COMBINE_RTX_EQUAL_P (x, to)) |
5392 | return to; |
5393 | |
5394 | /* Parallel asm_operands need special attention because all of the |
5395 | inputs are shared across the arms. Furthermore, unsharing the |
5396 | rtl results in recognition failures. Failure to handle this case |
5397 | specially can result in circular rtl. |
5398 | |
5399 | Solve this by doing a normal pass across the first entry of the |
5400 | parallel, and only processing the SET_DESTs of the subsequent |
5401 | entries. Ug. */ |
5402 | |
5403 | if (code == PARALLEL |
5404 | && GET_CODE (XVECEXP (x, 0, 0)) == SET |
5405 | && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS) |
5406 | { |
5407 | new_rtx = subst (XVECEXP (x, 0, 0), from, to, in_dest: false, in_cond: false, unique_copy); |
5408 | |
5409 | /* If this substitution failed, this whole thing fails. */ |
5410 | if (GET_CODE (new_rtx) == CLOBBER |
5411 | && XEXP (new_rtx, 0) == const0_rtx) |
5412 | return new_rtx; |
5413 | |
5414 | SUBST (XVECEXP (x, 0, 0), new_rtx); |
5415 | |
5416 | for (i = XVECLEN (x, 0) - 1; i >= 1; i--) |
5417 | { |
5418 | rtx dest = SET_DEST (XVECEXP (x, 0, i)); |
5419 | |
5420 | if (!REG_P (dest) && GET_CODE (dest) != PC) |
5421 | { |
5422 | new_rtx = subst (x: dest, from, to, in_dest: false, in_cond: false, unique_copy); |
5423 | |
5424 | /* If this substitution failed, this whole thing fails. */ |
5425 | if (GET_CODE (new_rtx) == CLOBBER |
5426 | && XEXP (new_rtx, 0) == const0_rtx) |
5427 | return new_rtx; |
5428 | |
5429 | SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx); |
5430 | } |
5431 | } |
5432 | } |
5433 | else |
5434 | { |
5435 | len = GET_RTX_LENGTH (code); |
5436 | fmt = GET_RTX_FORMAT (code); |
5437 | |
5438 | /* We don't need to process a SET_DEST that is a register or PC, so |
5439 | set up to skip this common case. All other cases where we want |
5440 | to suppress replacing something inside a SET_SRC are handled via |
5441 | the IN_DEST operand. */ |
5442 | if (code == SET |
5443 | && (REG_P (SET_DEST (x)) |
5444 | || GET_CODE (SET_DEST (x)) == PC)) |
5445 | fmt = "ie" ; |
5446 | |
5447 | /* Trying to simplify the operands of a widening MULT is not likely |
5448 | to create RTL matching a machine insn. */ |
5449 | if (code == MULT |
5450 | && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND |
5451 | || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) |
5452 | && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND |
5453 | || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND) |
5454 | && REG_P (XEXP (XEXP (x, 0), 0)) |
5455 | && REG_P (XEXP (XEXP (x, 1), 0)) |
5456 | && from == to) |
5457 | return x; |
5458 | |
5459 | |
5460 | /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a |
5461 | constant. */ |
5462 | if (fmt[0] == 'e') |
5463 | op0_mode = GET_MODE (XEXP (x, 0)); |
5464 | |
5465 | for (i = 0; i < len; i++) |
5466 | { |
5467 | if (fmt[i] == 'E') |
5468 | { |
5469 | int j; |
5470 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
5471 | { |
5472 | if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from)) |
5473 | { |
5474 | new_rtx = (unique_copy && n_occurrences |
5475 | ? copy_rtx (to) : to); |
5476 | n_occurrences++; |
5477 | } |
5478 | else |
5479 | { |
5480 | new_rtx = subst (XVECEXP (x, i, j), from, to, |
5481 | in_dest: false, in_cond: false, unique_copy); |
5482 | |
5483 | /* If this substitution failed, this whole thing |
5484 | fails. */ |
5485 | if (GET_CODE (new_rtx) == CLOBBER |
5486 | && XEXP (new_rtx, 0) == const0_rtx) |
5487 | return new_rtx; |
5488 | } |
5489 | |
5490 | SUBST (XVECEXP (x, i, j), new_rtx); |
5491 | } |
5492 | } |
5493 | else if (fmt[i] == 'e') |
5494 | { |
5495 | /* If this is a register being set, ignore it. */ |
5496 | new_rtx = XEXP (x, i); |
5497 | if (in_dest |
5498 | && i == 0 |
5499 | && (((code == SUBREG || code == ZERO_EXTRACT) |
5500 | && REG_P (new_rtx)) |
5501 | || code == STRICT_LOW_PART)) |
5502 | ; |
5503 | |
5504 | else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from)) |
5505 | { |
5506 | /* In general, don't install a subreg involving two |
5507 | modes not tieable. It can worsen register |
5508 | allocation, and can even make invalid reload |
5509 | insns, since the reg inside may need to be copied |
5510 | from in the outside mode, and that may be invalid |
5511 | if it is an fp reg copied in integer mode. |
5512 | |
5513 | We allow an exception to this: It is valid if |
5514 | it is inside another SUBREG and the mode of that |
5515 | SUBREG and the mode of the inside of TO is |
5516 | tieable. */ |
5517 | |
5518 | if (GET_CODE (to) == SUBREG |
5519 | && !targetm.modes_tieable_p (GET_MODE (to), |
5520 | GET_MODE (SUBREG_REG (to))) |
5521 | && ! (code == SUBREG |
5522 | && (targetm.modes_tieable_p |
5523 | (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))) |
5524 | return gen_rtx_CLOBBER (VOIDmode, const0_rtx); |
5525 | |
5526 | if (code == SUBREG |
5527 | && REG_P (to) |
5528 | && REGNO (to) < FIRST_PSEUDO_REGISTER |
5529 | && simplify_subreg_regno (REGNO (to), GET_MODE (to), |
5530 | SUBREG_BYTE (x), |
5531 | GET_MODE (x)) < 0) |
5532 | return gen_rtx_CLOBBER (VOIDmode, const0_rtx); |
5533 | |
5534 | new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to); |
5535 | n_occurrences++; |
5536 | } |
5537 | else |
5538 | /* If we are in a SET_DEST, suppress most cases unless we |
5539 | have gone inside a MEM, in which case we want to |
5540 | simplify the address. We assume here that things that |
5541 | are actually part of the destination have their inner |
5542 | parts in the first expression. This is true for SUBREG, |
5543 | STRICT_LOW_PART, and ZERO_EXTRACT, which are the only |
5544 | things aside from REG and MEM that should appear in a |
5545 | SET_DEST. */ |
5546 | new_rtx = subst (XEXP (x, i), from, to, |
5547 | in_dest: (((in_dest |
5548 | && (code == SUBREG || code == STRICT_LOW_PART |
5549 | || code == ZERO_EXTRACT)) |
5550 | || code == SET) |
5551 | && i == 0), |
5552 | in_cond: code == IF_THEN_ELSE && i == 0, |
5553 | unique_copy); |
5554 | |
5555 | /* If we found that we will have to reject this combination, |
5556 | indicate that by returning the CLOBBER ourselves, rather than |
5557 | an expression containing it. This will speed things up as |
5558 | well as prevent accidents where two CLOBBERs are considered |
5559 | to be equal, thus producing an incorrect simplification. */ |
5560 | |
5561 | if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx) |
5562 | return new_rtx; |
5563 | |
5564 | if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx)) |
5565 | { |
5566 | machine_mode mode = GET_MODE (x); |
5567 | |
5568 | x = simplify_subreg (GET_MODE (x), op: new_rtx, |
5569 | GET_MODE (SUBREG_REG (x)), |
5570 | SUBREG_BYTE (x)); |
5571 | if (! x) |
5572 | x = gen_rtx_CLOBBER (mode, const0_rtx); |
5573 | } |
5574 | else if (CONST_SCALAR_INT_P (new_rtx) |
5575 | && (GET_CODE (x) == ZERO_EXTEND |
5576 | || GET_CODE (x) == SIGN_EXTEND |
5577 | || GET_CODE (x) == FLOAT |
5578 | || GET_CODE (x) == UNSIGNED_FLOAT)) |
5579 | { |
5580 | x = simplify_unary_operation (GET_CODE (x), GET_MODE (x), |
5581 | op: new_rtx, |
5582 | GET_MODE (XEXP (x, 0))); |
5583 | if (!x) |
5584 | return gen_rtx_CLOBBER (VOIDmode, const0_rtx); |
5585 | } |
5586 | /* CONST_INTs shouldn't be substituted into PRE_DEC, PRE_MODIFY |
5587 | etc. arguments, otherwise we can ICE before trying to recog |
5588 | it. See PR104446. */ |
5589 | else if (CONST_SCALAR_INT_P (new_rtx) |
5590 | && GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC) |
5591 | return gen_rtx_CLOBBER (VOIDmode, const0_rtx); |
5592 | else |
5593 | SUBST (XEXP (x, i), new_rtx); |
5594 | } |
5595 | } |
5596 | } |
5597 | |
5598 | /* Check if we are loading something from the constant pool via float |
5599 | extension; in this case we would undo compress_float_constant |
5600 | optimization and degenerate constant load to an immediate value. */ |
5601 | if (GET_CODE (x) == FLOAT_EXTEND |
5602 | && MEM_P (XEXP (x, 0)) |
5603 | && MEM_READONLY_P (XEXP (x, 0))) |
5604 | { |
5605 | rtx tmp = avoid_constant_pool_reference (x); |
5606 | if (x != tmp) |
5607 | return x; |
5608 | } |
5609 | |
5610 | /* Try to simplify X. If the simplification changed the code, it is likely |
5611 | that further simplification will help, so loop, but limit the number |
5612 | of repetitions that will be performed. */ |
5613 | |
5614 | for (i = 0; i < 4; i++) |
5615 | { |
5616 | /* If X is sufficiently simple, don't bother trying to do anything |
5617 | with it. */ |
5618 | if (code != CONST_INT && code != REG && code != CLOBBER) |
5619 | x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond); |
5620 | |
5621 | if (GET_CODE (x) == code) |
5622 | break; |
5623 | |
5624 | code = GET_CODE (x); |
5625 | |
5626 | /* We no longer know the original mode of operand 0 since we |
5627 | have changed the form of X) */ |
5628 | op0_mode = VOIDmode; |
5629 | } |
5630 | |
5631 | return x; |
5632 | } |
5633 | |
5634 | /* If X is a commutative operation whose operands are not in the canonical |
5635 | order, use substitutions to swap them. */ |
5636 | |
5637 | static void |
5638 | maybe_swap_commutative_operands (rtx x) |
5639 | { |
5640 | if (COMMUTATIVE_ARITH_P (x) |
5641 | && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))) |
5642 | { |
5643 | rtx temp = XEXP (x, 0); |
5644 | SUBST (XEXP (x, 0), XEXP (x, 1)); |
5645 | SUBST (XEXP (x, 1), temp); |
5646 | } |
5647 | |
5648 | unsigned n_elts = 0; |
5649 | if (GET_CODE (x) == VEC_MERGE |
5650 | && CONST_INT_P (XEXP (x, 2)) |
5651 | && GET_MODE_NUNITS (GET_MODE (x)).is_constant (const_value: &n_elts) |
5652 | && (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)) |
5653 | /* Two operands have same precedence, then |
5654 | first bit of mask select first operand. */ |
5655 | || (!swap_commutative_operands_p (XEXP (x, 1), XEXP (x, 0)) |
5656 | && !(UINTVAL (XEXP (x, 2)) & 1)))) |
5657 | { |
5658 | rtx temp = XEXP (x, 0); |
5659 | unsigned HOST_WIDE_INT sel = UINTVAL (XEXP (x, 2)); |
5660 | unsigned HOST_WIDE_INT mask = HOST_WIDE_INT_1U; |
5661 | if (n_elts == HOST_BITS_PER_WIDE_INT) |
5662 | mask = -1; |
5663 | else |
5664 | mask = (HOST_WIDE_INT_1U << n_elts) - 1; |
5665 | SUBST (XEXP (x, 0), XEXP (x, 1)); |
5666 | SUBST (XEXP (x, 1), temp); |
5667 | SUBST (XEXP (x, 2), GEN_INT (~sel & mask)); |
5668 | } |
5669 | } |
5670 | |
5671 | /* Simplify X, a piece of RTL. We just operate on the expression at the |
5672 | outer level; call `subst' to simplify recursively. Return the new |
5673 | expression. |
5674 | |
5675 | OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is true |
5676 | if we are inside a SET_DEST. IN_COND is true if we are at the top level |
5677 | of a condition. */ |
5678 | |
5679 | static rtx |
5680 | combine_simplify_rtx (rtx x, machine_mode op0_mode, bool in_dest, bool in_cond) |
5681 | { |
5682 | enum rtx_code code = GET_CODE (x); |
5683 | machine_mode mode = GET_MODE (x); |
5684 | scalar_int_mode int_mode; |
5685 | rtx temp; |
5686 | int i; |
5687 | |
5688 | /* If this is a commutative operation, put a constant last and a complex |
5689 | expression first. We don't need to do this for comparisons here. */ |
5690 | maybe_swap_commutative_operands (x); |
5691 | |
5692 | /* Try to fold this expression in case we have constants that weren't |
5693 | present before. */ |
5694 | temp = 0; |
5695 | switch (GET_RTX_CLASS (code)) |
5696 | { |
5697 | case RTX_UNARY: |
5698 | if (op0_mode == VOIDmode) |
5699 | op0_mode = GET_MODE (XEXP (x, 0)); |
5700 | temp = simplify_unary_operation (code, mode, XEXP (x, 0), op_mode: op0_mode); |
5701 | break; |
5702 | case RTX_COMPARE: |
5703 | case RTX_COMM_COMPARE: |
5704 | { |
5705 | machine_mode cmp_mode = GET_MODE (XEXP (x, 0)); |
5706 | if (cmp_mode == VOIDmode) |
5707 | { |
5708 | cmp_mode = GET_MODE (XEXP (x, 1)); |
5709 | if (cmp_mode == VOIDmode) |
5710 | cmp_mode = op0_mode; |
5711 | } |
5712 | temp = simplify_relational_operation (code, mode, op_mode: cmp_mode, |
5713 | XEXP (x, 0), XEXP (x, 1)); |
5714 | } |
5715 | break; |
5716 | case RTX_COMM_ARITH: |
5717 | case RTX_BIN_ARITH: |
5718 | temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1)); |
5719 | break; |
5720 | case RTX_BITFIELD_OPS: |
5721 | case RTX_TERNARY: |
5722 | temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0), |
5723 | XEXP (x, 1), XEXP (x, 2)); |
5724 | break; |
5725 | default: |
5726 | break; |
5727 | } |
5728 | |
5729 | if (temp) |
5730 | { |
5731 | x = temp; |
5732 | code = GET_CODE (temp); |
5733 | op0_mode = VOIDmode; |
5734 | mode = GET_MODE (temp); |
5735 | } |
5736 | |
5737 | /* If this is a simple operation applied to an IF_THEN_ELSE, try |
5738 | applying it to the arms of the IF_THEN_ELSE. This often simplifies |
5739 | things. Check for cases where both arms are testing the same |
5740 | condition. |
5741 | |
5742 | Don't do anything if all operands are very simple. */ |
5743 | |
5744 | if ((BINARY_P (x) |
5745 | && ((!OBJECT_P (XEXP (x, 0)) |
5746 | && ! (GET_CODE (XEXP (x, 0)) == SUBREG |
5747 | && OBJECT_P (SUBREG_REG (XEXP (x, 0))))) |
5748 | || (!OBJECT_P (XEXP (x, 1)) |
5749 | && ! (GET_CODE (XEXP (x, 1)) == SUBREG |
5750 | && OBJECT_P (SUBREG_REG (XEXP (x, 1))))))) |
5751 | || (UNARY_P (x) |
5752 | && (!OBJECT_P (XEXP (x, 0)) |
5753 | && ! (GET_CODE (XEXP (x, 0)) == SUBREG |
5754 | && OBJECT_P (SUBREG_REG (XEXP (x, 0))))))) |
5755 | { |
5756 | rtx cond, true_rtx, false_rtx; |
5757 | |
5758 | cond = if_then_else_cond (x, &true_rtx, &false_rtx); |
5759 | if (cond != 0 |
5760 | /* If everything is a comparison, what we have is highly unlikely |
5761 | to be simpler, so don't use it. */ |
5762 | && ! (COMPARISON_P (x) |
5763 | && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))) |
5764 | /* Similarly, if we end up with one of the expressions the same |
5765 | as the original, it is certainly not simpler. */ |
5766 | && ! rtx_equal_p (x, true_rtx) |
5767 | && ! rtx_equal_p (x, false_rtx)) |
5768 | { |
5769 | rtx cop1 = const0_rtx; |
5770 | enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1); |
5771 | |
5772 | if (cond_code == NE && COMPARISON_P (cond)) |
5773 | return x; |
5774 | |
5775 | /* Simplify the alternative arms; this may collapse the true and |
5776 | false arms to store-flag values. Be careful to use copy_rtx |
5777 | here since true_rtx or false_rtx might share RTL with x as a |
5778 | result of the if_then_else_cond call above. */ |
5779 | true_rtx = subst (x: copy_rtx (true_rtx), from: pc_rtx, to: pc_rtx, |
5780 | in_dest: false, in_cond: false, unique_copy: false); |
5781 | false_rtx = subst (x: copy_rtx (false_rtx), from: pc_rtx, to: pc_rtx, |
5782 | in_dest: false, in_cond: false, unique_copy: false); |
5783 | |
5784 | /* If true_rtx and false_rtx are not general_operands, an if_then_else |
5785 | is unlikely to be simpler. */ |
5786 | if (general_operand (true_rtx, VOIDmode) |
5787 | && general_operand (false_rtx, VOIDmode)) |
5788 | { |
5789 | enum rtx_code reversed; |
5790 | |
5791 | /* Restarting if we generate a store-flag expression will cause |
5792 | us to loop. Just drop through in this case. */ |
5793 | |
5794 | /* If the result values are STORE_FLAG_VALUE and zero, we can |
5795 | just make the comparison operation. */ |
5796 | if (true_rtx == const_true_rtx && false_rtx == const0_rtx) |
5797 | x = simplify_gen_relational (code: cond_code, mode, VOIDmode, |
5798 | op0: cond, op1: cop1); |
5799 | else if (true_rtx == const0_rtx && false_rtx == const_true_rtx |
5800 | && ((reversed = reversed_comparison_code_parts |
5801 | (cond_code, cond, cop1, NULL)) |
5802 | != UNKNOWN)) |
5803 | x = simplify_gen_relational (code: reversed, mode, VOIDmode, |
5804 | op0: cond, op1: cop1); |
5805 | |
5806 | /* Likewise, we can make the negate of a comparison operation |
5807 | if the result values are - STORE_FLAG_VALUE and zero. */ |
5808 | else if (CONST_INT_P (true_rtx) |
5809 | && INTVAL (true_rtx) == - STORE_FLAG_VALUE |
5810 | && false_rtx == const0_rtx) |
5811 | x = simplify_gen_unary (code: NEG, mode, |
5812 | op: simplify_gen_relational (code: cond_code, |
5813 | mode, VOIDmode, |
5814 | op0: cond, op1: cop1), |
5815 | op_mode: mode); |
5816 | else if (CONST_INT_P (false_rtx) |
5817 | && INTVAL (false_rtx) == - STORE_FLAG_VALUE |
5818 | && true_rtx == const0_rtx |
5819 | && ((reversed = reversed_comparison_code_parts |
5820 | (cond_code, cond, cop1, NULL)) |
5821 | != UNKNOWN)) |
5822 | x = simplify_gen_unary (code: NEG, mode, |
5823 | op: simplify_gen_relational (code: reversed, |
5824 | mode, VOIDmode, |
5825 | op0: cond, op1: cop1), |
5826 | op_mode: mode); |
5827 | |
5828 | code = GET_CODE (x); |
5829 | op0_mode = VOIDmode; |
5830 | } |
5831 | } |
5832 | } |
5833 | |
5834 | /* First see if we can apply the inverse distributive law. */ |
5835 | if (code == PLUS || code == MINUS |
5836 | || code == AND || code == IOR || code == XOR) |
5837 | { |
5838 | x = apply_distributive_law (x); |
5839 | code = GET_CODE (x); |
5840 | op0_mode = VOIDmode; |
5841 | } |
5842 | |
5843 | /* If CODE is an associative operation not otherwise handled, see if we |
5844 | can associate some operands. This can win if they are constants or |
5845 | if they are logically related (i.e. (a & b) & a). */ |
5846 | if ((code == PLUS || code == MINUS || code == MULT || code == DIV |
5847 | || code == AND || code == IOR || code == XOR |
5848 | || code == SMAX || code == SMIN || code == UMAX || code == UMIN) |
5849 | && ((INTEGRAL_MODE_P (mode) && code != DIV) |
5850 | || (flag_associative_math && FLOAT_MODE_P (mode)))) |
5851 | { |
5852 | if (GET_CODE (XEXP (x, 0)) == code) |
5853 | { |
5854 | rtx other = XEXP (XEXP (x, 0), 0); |
5855 | rtx inner_op0 = XEXP (XEXP (x, 0), 1); |
5856 | rtx inner_op1 = XEXP (x, 1); |
5857 | rtx inner; |
5858 | |
5859 | /* Make sure we pass the constant operand if any as the second |
5860 | one if this is a commutative operation. */ |
5861 | if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x)) |
5862 | std::swap (a&: inner_op0, b&: inner_op1); |
5863 | inner = simplify_binary_operation (code: code == MINUS ? PLUS |
5864 | : code == DIV ? MULT |
5865 | : code, |
5866 | mode, op0: inner_op0, op1: inner_op1); |
5867 | |
5868 | /* For commutative operations, try the other pair if that one |
5869 | didn't simplify. */ |
5870 | if (inner == 0 && COMMUTATIVE_ARITH_P (x)) |
5871 | { |
5872 | other = XEXP (XEXP (x, 0), 1); |
5873 | inner = simplify_binary_operation (code, mode, |
5874 | XEXP (XEXP (x, 0), 0), |
5875 | XEXP (x, 1)); |
5876 | } |
5877 | |
5878 | if (inner) |
5879 | return simplify_gen_binary (code, mode, op0: other, op1: inner); |
5880 | } |
5881 | } |
5882 | |
5883 | /* A little bit of algebraic simplification here. */ |
5884 | switch (code) |
5885 | { |
5886 | case MEM: |
5887 | /* Ensure that our address has any ASHIFTs converted to MULT in case |
5888 | address-recognizing predicates are called later. */ |
5889 | temp = make_compound_operation (XEXP (x, 0), MEM); |
5890 | SUBST (XEXP (x, 0), temp); |
5891 | break; |
5892 | |
5893 | case SUBREG: |
5894 | if (op0_mode == VOIDmode) |
5895 | op0_mode = GET_MODE (SUBREG_REG (x)); |
5896 | |
5897 | /* See if this can be moved to simplify_subreg. */ |
5898 | if (CONSTANT_P (SUBREG_REG (x)) |
5899 | && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x)) |
5900 | /* Don't call gen_lowpart if the inner mode |
5901 | is VOIDmode and we cannot simplify it, as SUBREG without |
5902 | inner mode is invalid. */ |
5903 | && (GET_MODE (SUBREG_REG (x)) != VOIDmode |
5904 | || gen_lowpart_common (mode, SUBREG_REG (x)))) |
5905 | return gen_lowpart (mode, SUBREG_REG (x)); |
5906 | |
5907 | if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC) |
5908 | break; |
5909 | { |
5910 | rtx temp; |
5911 | temp = simplify_subreg (outermode: mode, SUBREG_REG (x), innermode: op0_mode, |
5912 | SUBREG_BYTE (x)); |
5913 | if (temp) |
5914 | return temp; |
5915 | |
5916 | /* If op is known to have all lower bits zero, the result is zero. */ |
5917 | scalar_int_mode int_mode, int_op0_mode; |
5918 | if (!in_dest |
5919 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
5920 | && is_a <scalar_int_mode> (m: op0_mode, result: &int_op0_mode) |
5921 | && (GET_MODE_PRECISION (mode: int_mode) |
5922 | < GET_MODE_PRECISION (mode: int_op0_mode)) |
5923 | && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode), |
5924 | SUBREG_BYTE (x)) |
5925 | && HWI_COMPUTABLE_MODE_P (mode: int_op0_mode) |
5926 | && ((nonzero_bits (SUBREG_REG (x), int_op0_mode) |
5927 | & GET_MODE_MASK (int_mode)) == 0) |
5928 | && !side_effects_p (SUBREG_REG (x))) |
5929 | return CONST0_RTX (int_mode); |
5930 | } |
5931 | |
5932 | /* Don't change the mode of the MEM if that would change the meaning |
5933 | of the address. */ |
5934 | if (MEM_P (SUBREG_REG (x)) |
5935 | && (MEM_VOLATILE_P (SUBREG_REG (x)) |
5936 | || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0), |
5937 | MEM_ADDR_SPACE (SUBREG_REG (x))))) |
5938 | return gen_rtx_CLOBBER (mode, const0_rtx); |
5939 | |
5940 | /* Note that we cannot do any narrowing for non-constants since |
5941 | we might have been counting on using the fact that some bits were |
5942 | zero. We now do this in the SET. */ |
5943 | |
5944 | break; |
5945 | |
5946 | case NEG: |
5947 | temp = expand_compound_operation (XEXP (x, 0)); |
5948 | |
5949 | /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be |
5950 | replaced by (lshiftrt X C). This will convert |
5951 | (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */ |
5952 | |
5953 | if (GET_CODE (temp) == ASHIFTRT |
5954 | && CONST_INT_P (XEXP (temp, 1)) |
5955 | && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1) |
5956 | return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0), |
5957 | INTVAL (XEXP (temp, 1))); |
5958 | |
5959 | /* If X has only a single bit that might be nonzero, say, bit I, convert |
5960 | (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of |
5961 | MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to |
5962 | (sign_extract X 1 Y). But only do this if TEMP isn't a register |
5963 | or a SUBREG of one since we'd be making the expression more |
5964 | complex if it was just a register. */ |
5965 | |
5966 | if (!REG_P (temp) |
5967 | && ! (GET_CODE (temp) == SUBREG |
5968 | && REG_P (SUBREG_REG (temp))) |
5969 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
5970 | && (i = exact_log2 (x: nonzero_bits (temp, int_mode))) >= 0) |
5971 | { |
5972 | rtx temp1 = simplify_shift_const |
5973 | (NULL_RTX, ASHIFTRT, int_mode, |
5974 | simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp, |
5975 | GET_MODE_PRECISION (mode: int_mode) - 1 - i), |
5976 | GET_MODE_PRECISION (mode: int_mode) - 1 - i); |
5977 | |
5978 | /* If all we did was surround TEMP with the two shifts, we |
5979 | haven't improved anything, so don't use it. Otherwise, |
5980 | we are better off with TEMP1. */ |
5981 | if (GET_CODE (temp1) != ASHIFTRT |
5982 | || GET_CODE (XEXP (temp1, 0)) != ASHIFT |
5983 | || XEXP (XEXP (temp1, 0), 0) != temp) |
5984 | return temp1; |
5985 | } |
5986 | break; |
5987 | |
5988 | case TRUNCATE: |
5989 | /* We can't handle truncation to a partial integer mode here |
5990 | because we don't know the real bitsize of the partial |
5991 | integer mode. */ |
5992 | if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT) |
5993 | break; |
5994 | |
5995 | if (HWI_COMPUTABLE_MODE_P (mode)) |
5996 | SUBST (XEXP (x, 0), |
5997 | force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)), |
5998 | GET_MODE_MASK (mode), false)); |
5999 | |
6000 | /* We can truncate a constant value and return it. */ |
6001 | { |
6002 | poly_int64 c; |
6003 | if (poly_int_rtx_p (XEXP (x, 0), res: &c)) |
6004 | return gen_int_mode (c, mode); |
6005 | } |
6006 | |
6007 | /* Similarly to what we do in simplify-rtx.cc, a truncate of a register |
6008 | whose value is a comparison can be replaced with a subreg if |
6009 | STORE_FLAG_VALUE permits. */ |
6010 | if (HWI_COMPUTABLE_MODE_P (mode) |
6011 | && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0 |
6012 | && (temp = get_last_value (XEXP (x, 0))) |
6013 | && COMPARISON_P (temp) |
6014 | && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (XEXP (x, 0)))) |
6015 | return gen_lowpart (mode, XEXP (x, 0)); |
6016 | break; |
6017 | |
6018 | case CONST: |
6019 | /* (const (const X)) can become (const X). Do it this way rather than |
6020 | returning the inner CONST since CONST can be shared with a |
6021 | REG_EQUAL note. */ |
6022 | if (GET_CODE (XEXP (x, 0)) == CONST) |
6023 | SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0)); |
6024 | break; |
6025 | |
6026 | case LO_SUM: |
6027 | /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we |
6028 | can add in an offset. find_split_point will split this address up |
6029 | again if it doesn't match. */ |
6030 | if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH |
6031 | && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1))) |
6032 | return XEXP (x, 1); |
6033 | break; |
6034 | |
6035 | case PLUS: |
6036 | /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>) |
6037 | when c is (const_int (pow2 + 1) / 2) is a sign extension of a |
6038 | bit-field and can be replaced by either a sign_extend or a |
6039 | sign_extract. The `and' may be a zero_extend and the two |
6040 | <c>, -<c> constants may be reversed. */ |
6041 | if (GET_CODE (XEXP (x, 0)) == XOR |
6042 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
6043 | && CONST_INT_P (XEXP (x, 1)) |
6044 | && CONST_INT_P (XEXP (XEXP (x, 0), 1)) |
6045 | && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1)) |
6046 | && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0 |
6047 | || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0) |
6048 | && HWI_COMPUTABLE_MODE_P (mode: int_mode) |
6049 | && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND |
6050 | && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1)) |
6051 | && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)) |
6052 | == (HOST_WIDE_INT_1U << (i + 1)) - 1)) |
6053 | || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND |
6054 | && known_eq ((GET_MODE_PRECISION |
6055 | (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))), |
6056 | (unsigned int) i + 1)))) |
6057 | return simplify_shift_const |
6058 | (NULL_RTX, ASHIFTRT, int_mode, |
6059 | simplify_shift_const (NULL_RTX, ASHIFT, int_mode, |
6060 | XEXP (XEXP (XEXP (x, 0), 0), 0), |
6061 | GET_MODE_PRECISION (mode: int_mode) - (i + 1)), |
6062 | GET_MODE_PRECISION (mode: int_mode) - (i + 1)); |
6063 | |
6064 | /* If only the low-order bit of X is possibly nonzero, (plus x -1) |
6065 | can become (ashiftrt (ashift (xor x 1) C) C) where C is |
6066 | the bitsize of the mode - 1. This allows simplification of |
6067 | "a = (b & 8) == 0;" */ |
6068 | if (XEXP (x, 1) == constm1_rtx |
6069 | && !REG_P (XEXP (x, 0)) |
6070 | && ! (GET_CODE (XEXP (x, 0)) == SUBREG |
6071 | && REG_P (SUBREG_REG (XEXP (x, 0)))) |
6072 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
6073 | && nonzero_bits (XEXP (x, 0), int_mode) == 1) |
6074 | return simplify_shift_const |
6075 | (NULL_RTX, ASHIFTRT, int_mode, |
6076 | simplify_shift_const (NULL_RTX, ASHIFT, int_mode, |
6077 | gen_rtx_XOR (int_mode, XEXP (x, 0), |
6078 | const1_rtx), |
6079 | GET_MODE_PRECISION (mode: int_mode) - 1), |
6080 | GET_MODE_PRECISION (mode: int_mode) - 1); |
6081 | |
6082 | /* If we are adding two things that have no bits in common, convert |
6083 | the addition into an IOR. This will often be further simplified, |
6084 | for example in cases like ((a & 1) + (a & 2)), which can |
6085 | become a & 3. */ |
6086 | |
6087 | if (HWI_COMPUTABLE_MODE_P (mode) |
6088 | && (nonzero_bits (XEXP (x, 0), mode) |
6089 | & nonzero_bits (XEXP (x, 1), mode)) == 0) |
6090 | { |
6091 | /* Try to simplify the expression further. */ |
6092 | rtx tor = simplify_gen_binary (code: IOR, mode, XEXP (x, 0), XEXP (x, 1)); |
6093 | temp = combine_simplify_rtx (x: tor, VOIDmode, in_dest, in_cond: false); |
6094 | |
6095 | /* If we could, great. If not, do not go ahead with the IOR |
6096 | replacement, since PLUS appears in many special purpose |
6097 | address arithmetic instructions. */ |
6098 | if (GET_CODE (temp) != CLOBBER |
6099 | && (GET_CODE (temp) != IOR |
6100 | || ((XEXP (temp, 0) != XEXP (x, 0) |
6101 | || XEXP (temp, 1) != XEXP (x, 1)) |
6102 | && (XEXP (temp, 0) != XEXP (x, 1) |
6103 | || XEXP (temp, 1) != XEXP (x, 0))))) |
6104 | return temp; |
6105 | } |
6106 | |
6107 | /* Canonicalize x + x into x << 1. */ |
6108 | if (GET_MODE_CLASS (mode) == MODE_INT |
6109 | && rtx_equal_p (XEXP (x, 0), XEXP (x, 1)) |
6110 | && !side_effects_p (XEXP (x, 0))) |
6111 | return simplify_gen_binary (code: ASHIFT, mode, XEXP (x, 0), const1_rtx); |
6112 | |
6113 | break; |
6114 | |
6115 | case MINUS: |
6116 | /* (minus <foo> (and <foo> (const_int -pow2))) becomes |
6117 | (and <foo> (const_int pow2-1)) */ |
6118 | if (is_a <scalar_int_mode> (m: mode, result: &int_mode) |
6119 | && GET_CODE (XEXP (x, 1)) == AND |
6120 | && CONST_INT_P (XEXP (XEXP (x, 1), 1)) |
6121 | && pow2p_hwi (x: -UINTVAL (XEXP (XEXP (x, 1), 1))) |
6122 | && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0))) |
6123 | return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0), |
6124 | -INTVAL (XEXP (XEXP (x, 1), 1)) - 1); |
6125 | break; |
6126 | |
6127 | case MULT: |
6128 | /* If we have (mult (plus A B) C), apply the distributive law and then |
6129 | the inverse distributive law to see if things simplify. This |
6130 | occurs mostly in addresses, often when unrolling loops. */ |
6131 | |
6132 | if (GET_CODE (XEXP (x, 0)) == PLUS) |
6133 | { |
6134 | rtx result = distribute_and_simplify_rtx (x, 0); |
6135 | if (result) |
6136 | return result; |
6137 | } |
6138 | |
6139 | /* Try simplify a*(b/c) as (a*b)/c. */ |
6140 | if (FLOAT_MODE_P (mode) && flag_associative_math |
6141 | && GET_CODE (XEXP (x, 0)) == DIV) |
6142 | { |
6143 | rtx tem = simplify_binary_operation (code: MULT, mode, |
6144 | XEXP (XEXP (x, 0), 0), |
6145 | XEXP (x, 1)); |
6146 | if (tem) |
6147 | return simplify_gen_binary (code: DIV, mode, op0: tem, XEXP (XEXP (x, 0), 1)); |
6148 | } |
6149 | break; |
6150 | |
6151 | case UDIV: |
6152 | /* If this is a divide by a power of two, treat it as a shift if |
6153 | its first operand is a shift. */ |
6154 | if (is_a <scalar_int_mode> (m: mode, result: &int_mode) |
6155 | && CONST_INT_P (XEXP (x, 1)) |
6156 | && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0 |
6157 | && (GET_CODE (XEXP (x, 0)) == ASHIFT |
6158 | || GET_CODE (XEXP (x, 0)) == LSHIFTRT |
6159 | || GET_CODE (XEXP (x, 0)) == ASHIFTRT |
6160 | || GET_CODE (XEXP (x, 0)) == ROTATE |
6161 | || GET_CODE (XEXP (x, 0)) == ROTATERT)) |
6162 | return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode, |
6163 | XEXP (x, 0), i); |
6164 | break; |
6165 | |
6166 | case EQ: case NE: |
6167 | case GT: case GTU: case GE: case GEU: |
6168 | case LT: case LTU: case LE: case LEU: |
6169 | case UNEQ: case LTGT: |
6170 | case UNGT: case UNGE: |
6171 | case UNLT: case UNLE: |
6172 | case UNORDERED: case ORDERED: |
6173 | /* If the first operand is a condition code, we can't do anything |
6174 | with it. */ |
6175 | if (GET_CODE (XEXP (x, 0)) == COMPARE |
6176 | || GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC) |
6177 | { |
6178 | rtx op0 = XEXP (x, 0); |
6179 | rtx op1 = XEXP (x, 1); |
6180 | enum rtx_code new_code; |
6181 | |
6182 | if (GET_CODE (op0) == COMPARE) |
6183 | op1 = XEXP (op0, 1), op0 = XEXP (op0, 0); |
6184 | |
6185 | /* Simplify our comparison, if possible. */ |
6186 | new_code = simplify_comparison (code, &op0, &op1); |
6187 | |
6188 | /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X |
6189 | if only the low-order bit is possibly nonzero in X (such as when |
6190 | X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to |
6191 | (xor X 1) or (minus 1 X); we use the former. Finally, if X is |
6192 | known to be either 0 or -1, NE becomes a NEG and EQ becomes |
6193 | (plus X 1). |
6194 | |
6195 | Remove any ZERO_EXTRACT we made when thinking this was a |
6196 | comparison. It may now be simpler to use, e.g., an AND. If a |
6197 | ZERO_EXTRACT is indeed appropriate, it will be placed back by |
6198 | the call to make_compound_operation in the SET case. |
6199 | |
6200 | Don't apply these optimizations if the caller would |
6201 | prefer a comparison rather than a value. |
6202 | E.g., for the condition in an IF_THEN_ELSE most targets need |
6203 | an explicit comparison. */ |
6204 | |
6205 | if (in_cond) |
6206 | ; |
6207 | |
6208 | else if (STORE_FLAG_VALUE == 1 |
6209 | && new_code == NE |
6210 | && is_int_mode (mode, int_mode: &int_mode) |
6211 | && op1 == const0_rtx |
6212 | && int_mode == GET_MODE (op0) |
6213 | && nonzero_bits (op0, int_mode) == 1) |
6214 | return gen_lowpart (int_mode, |
6215 | expand_compound_operation (op0)); |
6216 | |
6217 | else if (STORE_FLAG_VALUE == 1 |
6218 | && new_code == NE |
6219 | && is_int_mode (mode, int_mode: &int_mode) |
6220 | && op1 == const0_rtx |
6221 | && int_mode == GET_MODE (op0) |
6222 | && (num_sign_bit_copies (op0, int_mode) |
6223 | == GET_MODE_PRECISION (mode: int_mode))) |
6224 | { |
6225 | op0 = expand_compound_operation (op0); |
6226 | return simplify_gen_unary (code: NEG, mode: int_mode, |
6227 | gen_lowpart (int_mode, op0), |
6228 | op_mode: int_mode); |
6229 | } |
6230 | |
6231 | else if (STORE_FLAG_VALUE == 1 |
6232 | && new_code == EQ |
6233 | && is_int_mode (mode, int_mode: &int_mode) |
6234 | && op1 == const0_rtx |
6235 | && int_mode == GET_MODE (op0) |
6236 | && nonzero_bits (op0, int_mode) == 1) |
6237 | { |
6238 | op0 = expand_compound_operation (op0); |
6239 | return simplify_gen_binary (code: XOR, mode: int_mode, |
6240 | gen_lowpart (int_mode, op0), |
6241 | const1_rtx); |
6242 | } |
6243 | |
6244 | else if (STORE_FLAG_VALUE == 1 |
6245 | && new_code == EQ |
6246 | && is_int_mode (mode, int_mode: &int_mode) |
6247 | && op1 == const0_rtx |
6248 | && int_mode == GET_MODE (op0) |
6249 | && (num_sign_bit_copies (op0, int_mode) |
6250 | == GET_MODE_PRECISION (mode: int_mode))) |
6251 | { |
6252 | op0 = expand_compound_operation (op0); |
6253 | return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1); |
6254 | } |
6255 | |
6256 | /* If STORE_FLAG_VALUE is -1, we have cases similar to |
6257 | those above. */ |
6258 | if (in_cond) |
6259 | ; |
6260 | |
6261 | else if (STORE_FLAG_VALUE == -1 |
6262 | && new_code == NE |
6263 | && is_int_mode (mode, int_mode: &int_mode) |
6264 | && op1 == const0_rtx |
6265 | && int_mode == GET_MODE (op0) |
6266 | && (num_sign_bit_copies (op0, int_mode) |
6267 | == GET_MODE_PRECISION (mode: int_mode))) |
6268 | return gen_lowpart (int_mode, expand_compound_operation (op0)); |
6269 | |
6270 | else if (STORE_FLAG_VALUE == -1 |
6271 | && new_code == NE |
6272 | && is_int_mode (mode, int_mode: &int_mode) |
6273 | && op1 == const0_rtx |
6274 | && int_mode == GET_MODE (op0) |
6275 | && nonzero_bits (op0, int_mode) == 1) |
6276 | { |
6277 | op0 = expand_compound_operation (op0); |
6278 | return simplify_gen_unary (code: NEG, mode: int_mode, |
6279 | gen_lowpart (int_mode, op0), |
6280 | op_mode: int_mode); |
6281 | } |
6282 | |
6283 | else if (STORE_FLAG_VALUE == -1 |
6284 | && new_code == EQ |
6285 | && is_int_mode (mode, int_mode: &int_mode) |
6286 | && op1 == const0_rtx |
6287 | && int_mode == GET_MODE (op0) |
6288 | && (num_sign_bit_copies (op0, int_mode) |
6289 | == GET_MODE_PRECISION (mode: int_mode))) |
6290 | { |
6291 | op0 = expand_compound_operation (op0); |
6292 | return simplify_gen_unary (code: NOT, mode: int_mode, |
6293 | gen_lowpart (int_mode, op0), |
6294 | op_mode: int_mode); |
6295 | } |
6296 | |
6297 | /* If X is 0/1, (eq X 0) is X-1. */ |
6298 | else if (STORE_FLAG_VALUE == -1 |
6299 | && new_code == EQ |
6300 | && is_int_mode (mode, int_mode: &int_mode) |
6301 | && op1 == const0_rtx |
6302 | && int_mode == GET_MODE (op0) |
6303 | && nonzero_bits (op0, int_mode) == 1) |
6304 | { |
6305 | op0 = expand_compound_operation (op0); |
6306 | return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1); |
6307 | } |
6308 | |
6309 | /* If STORE_FLAG_VALUE says to just test the sign bit and X has just |
6310 | one bit that might be nonzero, we can convert (ne x 0) to |
6311 | (ashift x c) where C puts the bit in the sign bit. Remove any |
6312 | AND with STORE_FLAG_VALUE when we are done, since we are only |
6313 | going to test the sign bit. */ |
6314 | if (new_code == NE |
6315 | && is_int_mode (mode, int_mode: &int_mode) |
6316 | && HWI_COMPUTABLE_MODE_P (mode: int_mode) |
6317 | && val_signbit_p (int_mode, STORE_FLAG_VALUE) |
6318 | && op1 == const0_rtx |
6319 | && int_mode == GET_MODE (op0) |
6320 | && (i = exact_log2 (x: nonzero_bits (op0, int_mode))) >= 0) |
6321 | { |
6322 | x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode, |
6323 | expand_compound_operation (op0), |
6324 | GET_MODE_PRECISION (mode: int_mode) - 1 - i); |
6325 | if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx) |
6326 | return XEXP (x, 0); |
6327 | else |
6328 | return x; |
6329 | } |
6330 | |
6331 | /* If the code changed, return a whole new comparison. |
6332 | We also need to avoid using SUBST in cases where |
6333 | simplify_comparison has widened a comparison with a CONST_INT, |
6334 | since in that case the wider CONST_INT may fail the sanity |
6335 | checks in do_SUBST. */ |
6336 | if (new_code != code |
6337 | || (CONST_INT_P (op1) |
6338 | && GET_MODE (op0) != GET_MODE (XEXP (x, 0)) |
6339 | && GET_MODE (op0) != GET_MODE (XEXP (x, 1)))) |
6340 | return gen_rtx_fmt_ee (new_code, mode, op0, op1); |
6341 | |
6342 | /* Otherwise, keep this operation, but maybe change its operands. |
6343 | This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */ |
6344 | SUBST (XEXP (x, 0), op0); |
6345 | SUBST (XEXP (x, 1), op1); |
6346 | } |
6347 | break; |
6348 | |
6349 | case IF_THEN_ELSE: |
6350 | return simplify_if_then_else (x); |
6351 | |
6352 | case ZERO_EXTRACT: |
6353 | case SIGN_EXTRACT: |
6354 | case ZERO_EXTEND: |
6355 | case SIGN_EXTEND: |
6356 | /* If we are processing SET_DEST, we are done. */ |
6357 | if (in_dest) |
6358 | return x; |
6359 | |
6360 | return expand_compound_operation (x); |
6361 | |
6362 | case SET: |
6363 | return simplify_set (x); |
6364 | |
6365 | case AND: |
6366 | case IOR: |
6367 | return simplify_logical (x); |
6368 | |
6369 | case ASHIFT: |
6370 | case LSHIFTRT: |
6371 | case ASHIFTRT: |
6372 | case ROTATE: |
6373 | case ROTATERT: |
6374 | /* If this is a shift by a constant amount, simplify it. */ |
6375 | if (CONST_INT_P (XEXP (x, 1))) |
6376 | return simplify_shift_const (x, code, mode, XEXP (x, 0), |
6377 | INTVAL (XEXP (x, 1))); |
6378 | |
6379 | else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1))) |
6380 | SUBST (XEXP (x, 1), |
6381 | force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)), |
6382 | (HOST_WIDE_INT_1U |
6383 | << exact_log2 (GET_MODE_UNIT_BITSIZE |
6384 | (GET_MODE (x)))) - 1, false)); |
6385 | break; |
6386 | case VEC_SELECT: |
6387 | { |
6388 | rtx trueop0 = XEXP (x, 0); |
6389 | mode = GET_MODE (trueop0); |
6390 | rtx trueop1 = XEXP (x, 1); |
6391 | /* If we select a low-part subreg, return that. */ |
6392 | if (vec_series_lowpart_p (GET_MODE (x), op_mode: mode, sel: trueop1)) |
6393 | { |
6394 | rtx new_rtx = lowpart_subreg (GET_MODE (x), op: trueop0, innermode: mode); |
6395 | if (new_rtx != NULL_RTX) |
6396 | return new_rtx; |
6397 | } |
6398 | } |
6399 | |
6400 | default: |
6401 | break; |
6402 | } |
6403 | |
6404 | return x; |
6405 | } |
6406 | |
6407 | /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */ |
6408 | |
6409 | static rtx |
6410 | simplify_if_then_else (rtx x) |
6411 | { |
6412 | machine_mode mode = GET_MODE (x); |
6413 | rtx cond = XEXP (x, 0); |
6414 | rtx true_rtx = XEXP (x, 1); |
6415 | rtx false_rtx = XEXP (x, 2); |
6416 | enum rtx_code true_code = GET_CODE (cond); |
6417 | bool comparison_p = COMPARISON_P (cond); |
6418 | rtx temp; |
6419 | int i; |
6420 | enum rtx_code false_code; |
6421 | rtx reversed; |
6422 | scalar_int_mode int_mode, inner_mode; |
6423 | |
6424 | /* Simplify storing of the truth value. */ |
6425 | if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx) |
6426 | return simplify_gen_relational (code: true_code, mode, VOIDmode, |
6427 | XEXP (cond, 0), XEXP (cond, 1)); |
6428 | |
6429 | /* Also when the truth value has to be reversed. */ |
6430 | if (comparison_p |
6431 | && true_rtx == const0_rtx && false_rtx == const_true_rtx |
6432 | && (reversed = reversed_comparison (cond, mode))) |
6433 | return reversed; |
6434 | |
6435 | /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used |
6436 | in it is being compared against certain values. Get the true and false |
6437 | comparisons and see if that says anything about the value of each arm. */ |
6438 | |
6439 | if (comparison_p |
6440 | && ((false_code = reversed_comparison_code (cond, NULL)) |
6441 | != UNKNOWN) |
6442 | && REG_P (XEXP (cond, 0))) |
6443 | { |
6444 | HOST_WIDE_INT nzb; |
6445 | rtx from = XEXP (cond, 0); |
6446 | rtx true_val = XEXP (cond, 1); |
6447 | rtx false_val = true_val; |
6448 | bool swapped = false; |
6449 | |
6450 | /* If FALSE_CODE is EQ, swap the codes and arms. */ |
6451 | |
6452 | if (false_code == EQ) |
6453 | { |
6454 | swapped = true, true_code = EQ, false_code = NE; |
6455 | std::swap (a&: true_rtx, b&: false_rtx); |
6456 | } |
6457 | |
6458 | scalar_int_mode from_mode; |
6459 | if (is_a <scalar_int_mode> (GET_MODE (from), result: &from_mode)) |
6460 | { |
6461 | /* If we are comparing against zero and the expression being |
6462 | tested has only a single bit that might be nonzero, that is |
6463 | its value when it is not equal to zero. Similarly if it is |
6464 | known to be -1 or 0. */ |
6465 | if (true_code == EQ |
6466 | && true_val == const0_rtx |
6467 | && pow2p_hwi (x: nzb = nonzero_bits (from, from_mode))) |
6468 | { |
6469 | false_code = EQ; |
6470 | false_val = gen_int_mode (nzb, from_mode); |
6471 | } |
6472 | else if (true_code == EQ |
6473 | && true_val == const0_rtx |
6474 | && (num_sign_bit_copies (from, from_mode) |
6475 | == GET_MODE_PRECISION (mode: from_mode))) |
6476 | { |
6477 | false_code = EQ; |
6478 | false_val = constm1_rtx; |
6479 | } |
6480 | } |
6481 | |
6482 | /* Now simplify an arm if we know the value of the register in the |
6483 | branch and it is used in the arm. Be careful due to the potential |
6484 | of locally-shared RTL. */ |
6485 | |
6486 | if (reg_mentioned_p (from, true_rtx)) |
6487 | true_rtx = subst (x: known_cond (copy_rtx (true_rtx), true_code, |
6488 | from, true_val), |
6489 | from: pc_rtx, to: pc_rtx, in_dest: false, in_cond: false, unique_copy: false); |
6490 | if (reg_mentioned_p (from, false_rtx)) |
6491 | false_rtx = subst (x: known_cond (copy_rtx (false_rtx), false_code, |
6492 | from, false_val), |
6493 | from: pc_rtx, to: pc_rtx, in_dest: false, in_cond: false, unique_copy: false); |
6494 | |
6495 | SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx); |
6496 | SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx); |
6497 | |
6498 | true_rtx = XEXP (x, 1); |
6499 | false_rtx = XEXP (x, 2); |
6500 | true_code = GET_CODE (cond); |
6501 | } |
6502 | |
6503 | /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be |
6504 | reversed, do so to avoid needing two sets of patterns for |
6505 | subtract-and-branch insns. Similarly if we have a constant in the true |
6506 | arm, the false arm is the same as the first operand of the comparison, or |
6507 | the false arm is more complicated than the true arm. */ |
6508 | |
6509 | if (comparison_p |
6510 | && reversed_comparison_code (cond, NULL) != UNKNOWN |
6511 | && (true_rtx == pc_rtx |
6512 | || (CONSTANT_P (true_rtx) |
6513 | && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx) |
6514 | || true_rtx == const0_rtx |
6515 | || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx)) |
6516 | || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx)) |
6517 | && !OBJECT_P (false_rtx)) |
6518 | || reg_mentioned_p (true_rtx, false_rtx) |
6519 | || rtx_equal_p (false_rtx, XEXP (cond, 0)))) |
6520 | { |
6521 | SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond))); |
6522 | SUBST (XEXP (x, 1), false_rtx); |
6523 | SUBST (XEXP (x, 2), true_rtx); |
6524 | |
6525 | std::swap (a&: true_rtx, b&: false_rtx); |
6526 | cond = XEXP (x, 0); |
6527 | |
6528 | /* It is possible that the conditional has been simplified out. */ |
6529 | true_code = GET_CODE (cond); |
6530 | comparison_p = COMPARISON_P (cond); |
6531 | } |
6532 | |
6533 | /* If the two arms are identical, we don't need the comparison. */ |
6534 | |
6535 | if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond)) |
6536 | return true_rtx; |
6537 | |
6538 | /* Convert a == b ? b : a to "a". */ |
6539 | if (true_code == EQ && ! side_effects_p (cond) |
6540 | && !HONOR_NANS (mode) |
6541 | && rtx_equal_p (XEXP (cond, 0), false_rtx) |
6542 | && rtx_equal_p (XEXP (cond, 1), true_rtx)) |
6543 | return false_rtx; |
6544 | else if (true_code == NE && ! side_effects_p (cond) |
6545 | && !HONOR_NANS (mode) |
6546 | && rtx_equal_p (XEXP (cond, 0), true_rtx) |
6547 | && rtx_equal_p (XEXP (cond, 1), false_rtx)) |
6548 | return true_rtx; |
6549 | |
6550 | /* Look for cases where we have (abs x) or (neg (abs X)). */ |
6551 | |
6552 | if (GET_MODE_CLASS (mode) == MODE_INT |
6553 | && comparison_p |
6554 | && XEXP (cond, 1) == const0_rtx |
6555 | && GET_CODE (false_rtx) == NEG |
6556 | && rtx_equal_p (true_rtx, XEXP (false_rtx, 0)) |
6557 | && rtx_equal_p (true_rtx, XEXP (cond, 0)) |
6558 | && ! side_effects_p (true_rtx)) |
6559 | switch (true_code) |
6560 | { |
6561 | case GT: |
6562 | case GE: |
6563 | return simplify_gen_unary (code: ABS, mode, op: true_rtx, op_mode: mode); |
6564 | case LT: |
6565 | case LE: |
6566 | return |
6567 | simplify_gen_unary (code: NEG, mode, |
6568 | op: simplify_gen_unary (code: ABS, mode, op: true_rtx, op_mode: mode), |
6569 | op_mode: mode); |
6570 | default: |
6571 | break; |
6572 | } |
6573 | |
6574 | /* Look for MIN or MAX. */ |
6575 | |
6576 | if ((! FLOAT_MODE_P (mode) |
6577 | || (flag_unsafe_math_optimizations |
6578 | && !HONOR_NANS (mode) |
6579 | && !HONOR_SIGNED_ZEROS (mode))) |
6580 | && comparison_p |
6581 | && rtx_equal_p (XEXP (cond, 0), true_rtx) |
6582 | && rtx_equal_p (XEXP (cond, 1), false_rtx) |
6583 | && ! side_effects_p (cond)) |
6584 | switch (true_code) |
6585 | { |
6586 | case GE: |
6587 | case GT: |
6588 | return simplify_gen_binary (code: SMAX, mode, op0: true_rtx, op1: false_rtx); |
6589 | case LE: |
6590 | case LT: |
6591 | return simplify_gen_binary (code: SMIN, mode, op0: true_rtx, op1: false_rtx); |
6592 | case GEU: |
6593 | case GTU: |
6594 | return simplify_gen_binary (code: UMAX, mode, op0: true_rtx, op1: false_rtx); |
6595 | case LEU: |
6596 | case LTU: |
6597 | return simplify_gen_binary (code: UMIN, mode, op0: true_rtx, op1: false_rtx); |
6598 | default: |
6599 | break; |
6600 | } |
6601 | |
6602 | /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its |
6603 | second operand is zero, this can be done as (OP Z (mult COND C2)) where |
6604 | C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or |
6605 | SIGN_EXTEND as long as Z is already extended (so we don't destroy it). |
6606 | We can do this kind of thing in some cases when STORE_FLAG_VALUE is |
6607 | neither 1 or -1, but it isn't worth checking for. */ |
6608 | |
6609 | if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) |
6610 | && comparison_p |
6611 | && is_int_mode (mode, int_mode: &int_mode) |
6612 | && ! side_effects_p (x)) |
6613 | { |
6614 | rtx t = make_compound_operation (true_rtx, SET); |
6615 | rtx f = make_compound_operation (false_rtx, SET); |
6616 | rtx cond_op0 = XEXP (cond, 0); |
6617 | rtx cond_op1 = XEXP (cond, 1); |
6618 | enum rtx_code op = UNKNOWN, extend_op = UNKNOWN; |
6619 | scalar_int_mode m = int_mode; |
6620 | rtx z = 0, c1 = NULL_RTX; |
6621 | |
6622 | if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS |
6623 | || GET_CODE (t) == IOR || GET_CODE (t) == XOR |
6624 | || GET_CODE (t) == ASHIFT |
6625 | || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT) |
6626 | && rtx_equal_p (XEXP (t, 0), f)) |
6627 | c1 = XEXP (t, 1), op = GET_CODE (t), z = f; |
6628 | |
6629 | /* If an identity-zero op is commutative, check whether there |
6630 | would be a match if we swapped the operands. */ |
6631 | else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR |
6632 | || GET_CODE (t) == XOR) |
6633 | && rtx_equal_p (XEXP (t, 1), f)) |
6634 | c1 = XEXP (t, 0), op = GET_CODE (t), z = f; |
6635 | else if (GET_CODE (t) == SIGN_EXTEND |
6636 | && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), result: &inner_mode) |
6637 | && (GET_CODE (XEXP (t, 0)) == PLUS |
6638 | || GET_CODE (XEXP (t, 0)) == MINUS |
6639 | || GET_CODE (XEXP (t, 0)) == IOR |
6640 | || GET_CODE (XEXP (t, 0)) == XOR |
6641 | || GET_CODE (XEXP (t, 0)) == ASHIFT |
6642 | || GET_CODE (XEXP (t, 0)) == LSHIFTRT |
6643 | || GET_CODE (XEXP (t, 0)) == ASHIFTRT) |
6644 | && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG |
6645 | && subreg_lowpart_p (XEXP (XEXP (t, 0), 0)) |
6646 | && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) |
6647 | && (num_sign_bit_copies (f, GET_MODE (f)) |
6648 | > (unsigned int) |
6649 | (GET_MODE_PRECISION (mode: int_mode) |
6650 | - GET_MODE_PRECISION (mode: inner_mode)))) |
6651 | { |
6652 | c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0)); |
6653 | extend_op = SIGN_EXTEND; |
6654 | m = inner_mode; |
6655 | } |
6656 | else if (GET_CODE (t) == SIGN_EXTEND |
6657 | && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), result: &inner_mode) |
6658 | && (GET_CODE (XEXP (t, 0)) == PLUS |
6659 | || GET_CODE (XEXP (t, 0)) == IOR |
6660 | || GET_CODE (XEXP (t, 0)) == XOR) |
6661 | && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG |
6662 | && subreg_lowpart_p (XEXP (XEXP (t, 0), 1)) |
6663 | && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) |
6664 | && (num_sign_bit_copies (f, GET_MODE (f)) |
6665 | > (unsigned int) |
6666 | (GET_MODE_PRECISION (mode: int_mode) |
6667 | - GET_MODE_PRECISION (mode: inner_mode)))) |
6668 | { |
6669 | c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0)); |
6670 | extend_op = SIGN_EXTEND; |
6671 | m = inner_mode; |
6672 | } |
6673 | else if (GET_CODE (t) == ZERO_EXTEND |
6674 | && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), result: &inner_mode) |
6675 | && (GET_CODE (XEXP (t, 0)) == PLUS |
6676 | || GET_CODE (XEXP (t, 0)) == MINUS |
6677 | || GET_CODE (XEXP (t, 0)) == IOR |
6678 | || GET_CODE (XEXP (t, 0)) == XOR |
6679 | || GET_CODE (XEXP (t, 0)) == ASHIFT |
6680 | || GET_CODE (XEXP (t, 0)) == LSHIFTRT |
6681 | || GET_CODE (XEXP (t, 0)) == ASHIFTRT) |
6682 | && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG |
6683 | && HWI_COMPUTABLE_MODE_P (mode: int_mode) |
6684 | && subreg_lowpart_p (XEXP (XEXP (t, 0), 0)) |
6685 | && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) |
6686 | && ((nonzero_bits (f, GET_MODE (f)) |
6687 | & ~GET_MODE_MASK (inner_mode)) |
6688 | == 0)) |
6689 | { |
6690 | c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0)); |
6691 | extend_op = ZERO_EXTEND; |
6692 | m = inner_mode; |
6693 | } |
6694 | else if (GET_CODE (t) == ZERO_EXTEND |
6695 | && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), result: &inner_mode) |
6696 | && (GET_CODE (XEXP (t, 0)) == PLUS |
6697 | || GET_CODE (XEXP (t, 0)) == IOR |
6698 | || GET_CODE (XEXP (t, 0)) == XOR) |
6699 | && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG |
6700 | && HWI_COMPUTABLE_MODE_P (mode: int_mode) |
6701 | && subreg_lowpart_p (XEXP (XEXP (t, 0), 1)) |
6702 | && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) |
6703 | && ((nonzero_bits (f, GET_MODE (f)) |
6704 | & ~GET_MODE_MASK (inner_mode)) |
6705 | == 0)) |
6706 | { |
6707 | c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0)); |
6708 | extend_op = ZERO_EXTEND; |
6709 | m = inner_mode; |
6710 | } |
6711 | |
6712 | if (z) |
6713 | { |
6714 | machine_mode cm = m; |
6715 | if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT) |
6716 | && GET_MODE (c1) != VOIDmode) |
6717 | cm = GET_MODE (c1); |
6718 | temp = subst (x: simplify_gen_relational (code: true_code, mode: cm, VOIDmode, |
6719 | op0: cond_op0, op1: cond_op1), |
6720 | from: pc_rtx, to: pc_rtx, in_dest: false, in_cond: false, unique_copy: false); |
6721 | temp = simplify_gen_binary (code: MULT, mode: cm, op0: temp, |
6722 | op1: simplify_gen_binary (code: MULT, mode: cm, op0: c1, |
6723 | op1: const_true_rtx)); |
6724 | temp = subst (x: temp, from: pc_rtx, to: pc_rtx, in_dest: false, in_cond: false, unique_copy: false); |
6725 | temp = simplify_gen_binary (code: op, mode: m, gen_lowpart (m, z), op1: temp); |
6726 | |
6727 | if (extend_op != UNKNOWN) |
6728 | temp = simplify_gen_unary (code: extend_op, mode: int_mode, op: temp, op_mode: m); |
6729 | |
6730 | return temp; |
6731 | } |
6732 | } |
6733 | |
6734 | /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or |
6735 | 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the |
6736 | negation of a single bit, we can convert this operation to a shift. We |
6737 | can actually do this more generally, but it doesn't seem worth it. */ |
6738 | |
6739 | if (true_code == NE |
6740 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
6741 | && XEXP (cond, 1) == const0_rtx |
6742 | && false_rtx == const0_rtx |
6743 | && CONST_INT_P (true_rtx) |
6744 | && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1 |
6745 | && (i = exact_log2 (UINTVAL (true_rtx))) >= 0) |
6746 | || ((num_sign_bit_copies (XEXP (cond, 0), int_mode) |
6747 | == GET_MODE_PRECISION (mode: int_mode)) |
6748 | && (i = exact_log2 (x: -UINTVAL (true_rtx))) >= 0))) |
6749 | return |
6750 | simplify_shift_const (NULL_RTX, ASHIFT, int_mode, |
6751 | gen_lowpart (int_mode, XEXP (cond, 0)), i); |
6752 | |
6753 | /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only |
6754 | non-zero bit in A is C1. */ |
6755 | if (true_code == NE && XEXP (cond, 1) == const0_rtx |
6756 | && false_rtx == const0_rtx && CONST_INT_P (true_rtx) |
6757 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
6758 | && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), result: &inner_mode) |
6759 | && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode)) |
6760 | == nonzero_bits (XEXP (cond, 0), inner_mode) |
6761 | && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0) |
6762 | { |
6763 | rtx val = XEXP (cond, 0); |
6764 | if (inner_mode == int_mode) |
6765 | return val; |
6766 | else if (GET_MODE_PRECISION (mode: inner_mode) < GET_MODE_PRECISION (mode: int_mode)) |
6767 | return simplify_gen_unary (code: ZERO_EXTEND, mode: int_mode, op: val, op_mode: inner_mode); |
6768 | } |
6769 | |
6770 | return x; |
6771 | } |
6772 | |
6773 | /* Simplify X, a SET expression. Return the new expression. */ |
6774 | |
6775 | static rtx |
6776 | simplify_set (rtx x) |
6777 | { |
6778 | rtx src = SET_SRC (x); |
6779 | rtx dest = SET_DEST (x); |
6780 | machine_mode mode |
6781 | = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest); |
6782 | rtx_insn *other_insn; |
6783 | rtx *cc_use; |
6784 | scalar_int_mode int_mode; |
6785 | |
6786 | /* (set (pc) (return)) gets written as (return). */ |
6787 | if (GET_CODE (dest) == PC && ANY_RETURN_P (src)) |
6788 | return src; |
6789 | |
6790 | /* Now that we know for sure which bits of SRC we are using, see if we can |
6791 | simplify the expression for the object knowing that we only need the |
6792 | low-order bits. */ |
6793 | |
6794 | if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode)) |
6795 | { |
6796 | src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, false); |
6797 | SUBST (SET_SRC (x), src); |
6798 | } |
6799 | |
6800 | /* If the source is a COMPARE, look for the use of the comparison result |
6801 | and try to simplify it unless we already have used undobuf.other_insn. */ |
6802 | if ((GET_MODE_CLASS (mode) == MODE_CC || GET_CODE (src) == COMPARE) |
6803 | && (cc_use = find_single_use (dest, insn: subst_insn, ploc: &other_insn)) != 0 |
6804 | && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn) |
6805 | && COMPARISON_P (*cc_use) |
6806 | && rtx_equal_p (XEXP (*cc_use, 0), dest)) |
6807 | { |
6808 | enum rtx_code old_code = GET_CODE (*cc_use); |
6809 | enum rtx_code new_code; |
6810 | rtx op0, op1, tmp; |
6811 | bool other_changed = false; |
6812 | rtx inner_compare = NULL_RTX; |
6813 | machine_mode compare_mode = GET_MODE (dest); |
6814 | |
6815 | if (GET_CODE (src) == COMPARE) |
6816 | { |
6817 | op0 = XEXP (src, 0), op1 = XEXP (src, 1); |
6818 | if (GET_CODE (op0) == COMPARE && op1 == const0_rtx) |
6819 | { |
6820 | inner_compare = op0; |
6821 | op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1); |
6822 | } |
6823 | } |
6824 | else |
6825 | op0 = src, op1 = CONST0_RTX (GET_MODE (src)); |
6826 | |
6827 | tmp = simplify_relational_operation (code: old_code, mode: compare_mode, VOIDmode, |
6828 | op0, op1); |
6829 | if (!tmp) |
6830 | new_code = old_code; |
6831 | else if (!CONSTANT_P (tmp)) |
6832 | { |
6833 | new_code = GET_CODE (tmp); |
6834 | op0 = XEXP (tmp, 0); |
6835 | op1 = XEXP (tmp, 1); |
6836 | } |
6837 | else |
6838 | { |
6839 | rtx pat = PATTERN (insn: other_insn); |
6840 | undobuf.other_insn = other_insn; |
6841 | SUBST (*cc_use, tmp); |
6842 | |
6843 | /* Attempt to simplify CC user. */ |
6844 | if (GET_CODE (pat) == SET) |
6845 | { |
6846 | rtx new_rtx = simplify_rtx (SET_SRC (pat)); |
6847 | if (new_rtx != NULL_RTX) |
6848 | SUBST (SET_SRC (pat), new_rtx); |
6849 | } |
6850 | |
6851 | /* Convert X into a no-op move. */ |
6852 | SUBST (SET_DEST (x), pc_rtx); |
6853 | SUBST (SET_SRC (x), pc_rtx); |
6854 | return x; |
6855 | } |
6856 | |
6857 | /* Simplify our comparison, if possible. */ |
6858 | new_code = simplify_comparison (new_code, &op0, &op1); |
6859 | |
6860 | #ifdef SELECT_CC_MODE |
6861 | /* If this machine has CC modes other than CCmode, check to see if we |
6862 | need to use a different CC mode here. */ |
6863 | if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC) |
6864 | compare_mode = GET_MODE (op0); |
6865 | else if (inner_compare |
6866 | && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC |
6867 | && new_code == old_code |
6868 | && op0 == XEXP (inner_compare, 0) |
6869 | && op1 == XEXP (inner_compare, 1)) |
6870 | compare_mode = GET_MODE (inner_compare); |
6871 | else |
6872 | compare_mode = SELECT_CC_MODE (new_code, op0, op1); |
6873 | |
6874 | /* If the mode changed, we have to change SET_DEST, the mode in the |
6875 | compare, and the mode in the place SET_DEST is used. If SET_DEST is |
6876 | a hard register, just build new versions with the proper mode. If it |
6877 | is a pseudo, we lose unless it is only time we set the pseudo, in |
6878 | which case we can safely change its mode. */ |
6879 | if (compare_mode != GET_MODE (dest)) |
6880 | { |
6881 | if (can_change_dest_mode (x: dest, added_sets: 0, mode: compare_mode)) |
6882 | { |
6883 | unsigned int regno = REGNO (dest); |
6884 | rtx new_dest; |
6885 | |
6886 | if (regno < FIRST_PSEUDO_REGISTER) |
6887 | new_dest = gen_rtx_REG (compare_mode, regno); |
6888 | else |
6889 | { |
6890 | subst_mode (regno, newval: compare_mode); |
6891 | new_dest = regno_reg_rtx[regno]; |
6892 | } |
6893 | |
6894 | SUBST (SET_DEST (x), new_dest); |
6895 | SUBST (XEXP (*cc_use, 0), new_dest); |
6896 | other_changed = true; |
6897 | |
6898 | dest = new_dest; |
6899 | } |
6900 | } |
6901 | #endif /* SELECT_CC_MODE */ |
6902 | |
6903 | /* If the code changed, we have to build a new comparison in |
6904 | undobuf.other_insn. */ |
6905 | if (new_code != old_code) |
6906 | { |
6907 | bool other_changed_previously = other_changed; |
6908 | unsigned HOST_WIDE_INT mask; |
6909 | rtx old_cc_use = *cc_use; |
6910 | |
6911 | SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use), |
6912 | dest, const0_rtx)); |
6913 | other_changed = true; |
6914 | |
6915 | /* If the only change we made was to change an EQ into an NE or |
6916 | vice versa, OP0 has only one bit that might be nonzero, and OP1 |
6917 | is zero, check if changing the user of the condition code will |
6918 | produce a valid insn. If it won't, we can keep the original code |
6919 | in that insn by surrounding our operation with an XOR. */ |
6920 | |
6921 | if (((old_code == NE && new_code == EQ) |
6922 | || (old_code == EQ && new_code == NE)) |
6923 | && ! other_changed_previously && op1 == const0_rtx |
6924 | && HWI_COMPUTABLE_MODE_P (GET_MODE (op0)) |
6925 | && pow2p_hwi (x: mask = nonzero_bits (op0, GET_MODE (op0)))) |
6926 | { |
6927 | rtx pat = PATTERN (insn: other_insn), note = 0; |
6928 | |
6929 | if ((recog_for_combine (&pat, other_insn, ¬e) < 0 |
6930 | && ! check_asm_operands (pat))) |
6931 | { |
6932 | *cc_use = old_cc_use; |
6933 | other_changed = false; |
6934 | |
6935 | op0 = simplify_gen_binary (code: XOR, GET_MODE (op0), op0, |
6936 | op1: gen_int_mode (mask, |
6937 | GET_MODE (op0))); |
6938 | } |
6939 | } |
6940 | } |
6941 | |
6942 | if (other_changed) |
6943 | undobuf.other_insn = other_insn; |
6944 | |
6945 | /* Don't generate a compare of a CC with 0, just use that CC. */ |
6946 | if (GET_MODE (op0) == compare_mode && op1 == const0_rtx) |
6947 | { |
6948 | SUBST (SET_SRC (x), op0); |
6949 | src = SET_SRC (x); |
6950 | } |
6951 | /* Otherwise, if we didn't previously have the same COMPARE we |
6952 | want, create it from scratch. */ |
6953 | else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode |
6954 | || XEXP (src, 0) != op0 || XEXP (src, 1) != op1) |
6955 | { |
6956 | SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1)); |
6957 | src = SET_SRC (x); |
6958 | } |
6959 | } |
6960 | else |
6961 | { |
6962 | /* Get SET_SRC in a form where we have placed back any |
6963 | compound expressions. Then do the checks below. */ |
6964 | src = make_compound_operation (src, SET); |
6965 | SUBST (SET_SRC (x), src); |
6966 | } |
6967 | |
6968 | /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation, |
6969 | and X being a REG or (subreg (reg)), we may be able to convert this to |
6970 | (set (subreg:m2 x) (op)). |
6971 | |
6972 | We can always do this if M1 is narrower than M2 because that means that |
6973 | we only care about the low bits of the result. |
6974 | |
6975 | However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot |
6976 | perform a narrower operation than requested since the high-order bits will |
6977 | be undefined. On machine where it is defined, this transformation is safe |
6978 | as long as M1 and M2 have the same number of words. */ |
6979 | |
6980 | if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src) |
6981 | && !OBJECT_P (SUBREG_REG (src)) |
6982 | && (known_equal_after_align_up |
6983 | (a: GET_MODE_SIZE (GET_MODE (src)), |
6984 | b: GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))), |
6985 | UNITS_PER_WORD)) |
6986 | && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (x: src)) |
6987 | && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER |
6988 | && !REG_CAN_CHANGE_MODE_P (REGNO (dest), |
6989 | GET_MODE (SUBREG_REG (src)), |
6990 | GET_MODE (src))) |
6991 | && (REG_P (dest) |
6992 | || (GET_CODE (dest) == SUBREG |
6993 | && REG_P (SUBREG_REG (dest))))) |
6994 | { |
6995 | SUBST (SET_DEST (x), |
6996 | gen_lowpart (GET_MODE (SUBREG_REG (src)), |
6997 | dest)); |
6998 | SUBST (SET_SRC (x), SUBREG_REG (src)); |
6999 | |
7000 | src = SET_SRC (x), dest = SET_DEST (x); |
7001 | } |
7002 | |
7003 | /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this |
7004 | would require a paradoxical subreg. Replace the subreg with a |
7005 | zero_extend to avoid the reload that would otherwise be required. |
7006 | Don't do this unless we have a scalar integer mode, otherwise the |
7007 | transformation is incorrect. */ |
7008 | |
7009 | enum rtx_code extend_op; |
7010 | if (paradoxical_subreg_p (x: src) |
7011 | && MEM_P (SUBREG_REG (src)) |
7012 | && SCALAR_INT_MODE_P (GET_MODE (src)) |
7013 | && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN) |
7014 | { |
7015 | SUBST (SET_SRC (x), |
7016 | gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src))); |
7017 | |
7018 | src = SET_SRC (x); |
7019 | } |
7020 | |
7021 | /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we |
7022 | are comparing an item known to be 0 or -1 against 0, use a logical |
7023 | operation instead. Check for one of the arms being an IOR of the other |
7024 | arm with some value. We compute three terms to be IOR'ed together. In |
7025 | practice, at most two will be nonzero. Then we do the IOR's. */ |
7026 | |
7027 | if (GET_CODE (dest) != PC |
7028 | && GET_CODE (src) == IF_THEN_ELSE |
7029 | && is_int_mode (GET_MODE (src), int_mode: &int_mode) |
7030 | && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE) |
7031 | && XEXP (XEXP (src, 0), 1) == const0_rtx |
7032 | && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0)) |
7033 | && (!HAVE_conditional_move |
7034 | || ! can_conditionally_move_p (mode: int_mode)) |
7035 | && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode) |
7036 | == GET_MODE_PRECISION (mode: int_mode)) |
7037 | && ! side_effects_p (src)) |
7038 | { |
7039 | rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE |
7040 | ? XEXP (src, 1) : XEXP (src, 2)); |
7041 | rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE |
7042 | ? XEXP (src, 2) : XEXP (src, 1)); |
7043 | rtx term1 = const0_rtx, term2, term3; |
7044 | |
7045 | if (GET_CODE (true_rtx) == IOR |
7046 | && rtx_equal_p (XEXP (true_rtx, 0), false_rtx)) |
7047 | term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx; |
7048 | else if (GET_CODE (true_rtx) == IOR |
7049 | && rtx_equal_p (XEXP (true_rtx, 1), false_rtx)) |
7050 | term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx; |
7051 | else if (GET_CODE (false_rtx) == IOR |
7052 | && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)) |
7053 | term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx; |
7054 | else if (GET_CODE (false_rtx) == IOR |
7055 | && rtx_equal_p (XEXP (false_rtx, 1), true_rtx)) |
7056 | term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx; |
7057 | |
7058 | term2 = simplify_gen_binary (code: AND, mode: int_mode, |
7059 | XEXP (XEXP (src, 0), 0), op1: true_rtx); |
7060 | term3 = simplify_gen_binary (code: AND, mode: int_mode, |
7061 | op0: simplify_gen_unary (code: NOT, mode: int_mode, |
7062 | XEXP (XEXP (src, 0), 0), |
7063 | op_mode: int_mode), |
7064 | op1: false_rtx); |
7065 | |
7066 | SUBST (SET_SRC (x), |
7067 | simplify_gen_binary (IOR, int_mode, |
7068 | simplify_gen_binary (IOR, int_mode, |
7069 | term1, term2), |
7070 | term3)); |
7071 | |
7072 | src = SET_SRC (x); |
7073 | } |
7074 | |
7075 | /* If either SRC or DEST is a CLOBBER of (const_int 0), make this |
7076 | whole thing fail. */ |
7077 | if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx) |
7078 | return src; |
7079 | else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx) |
7080 | return dest; |
7081 | else |
7082 | /* Convert this into a field assignment operation, if possible. */ |
7083 | return make_field_assignment (x); |
7084 | } |
7085 | |
7086 | /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified |
7087 | result. */ |
7088 | |
7089 | static rtx |
7090 | simplify_logical (rtx x) |
7091 | { |
7092 | rtx op0 = XEXP (x, 0); |
7093 | rtx op1 = XEXP (x, 1); |
7094 | scalar_int_mode mode; |
7095 | |
7096 | switch (GET_CODE (x)) |
7097 | { |
7098 | case AND: |
7099 | /* We can call simplify_and_const_int only if we don't lose |
7100 | any (sign) bits when converting INTVAL (op1) to |
7101 | "unsigned HOST_WIDE_INT". */ |
7102 | if (is_a <scalar_int_mode> (GET_MODE (x), result: &mode) |
7103 | && CONST_INT_P (op1) |
7104 | && (HWI_COMPUTABLE_MODE_P (mode) |
7105 | || INTVAL (op1) > 0)) |
7106 | { |
7107 | x = simplify_and_const_int (x, mode, op0, INTVAL (op1)); |
7108 | if (GET_CODE (x) != AND) |
7109 | return x; |
7110 | |
7111 | op0 = XEXP (x, 0); |
7112 | op1 = XEXP (x, 1); |
7113 | } |
7114 | |
7115 | /* If we have any of (and (ior A B) C) or (and (xor A B) C), |
7116 | apply the distributive law and then the inverse distributive |
7117 | law to see if things simplify. */ |
7118 | if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR) |
7119 | { |
7120 | rtx result = distribute_and_simplify_rtx (x, 0); |
7121 | if (result) |
7122 | return result; |
7123 | } |
7124 | if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR) |
7125 | { |
7126 | rtx result = distribute_and_simplify_rtx (x, 1); |
7127 | if (result) |
7128 | return result; |
7129 | } |
7130 | break; |
7131 | |
7132 | case IOR: |
7133 | /* If we have (ior (and A B) C), apply the distributive law and then |
7134 | the inverse distributive law to see if things simplify. */ |
7135 | |
7136 | if (GET_CODE (op0) == AND) |
7137 | { |
7138 | rtx result = distribute_and_simplify_rtx (x, 0); |
7139 | if (result) |
7140 | return result; |
7141 | } |
7142 | |
7143 | if (GET_CODE (op1) == AND) |
7144 | { |
7145 | rtx result = distribute_and_simplify_rtx (x, 1); |
7146 | if (result) |
7147 | return result; |
7148 | } |
7149 | break; |
7150 | |
7151 | default: |
7152 | gcc_unreachable (); |
7153 | } |
7154 | |
7155 | return x; |
7156 | } |
7157 | |
7158 | /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound |
7159 | operations" because they can be replaced with two more basic operations. |
7160 | ZERO_EXTEND is also considered "compound" because it can be replaced with |
7161 | an AND operation, which is simpler, though only one operation. |
7162 | |
7163 | The function expand_compound_operation is called with an rtx expression |
7164 | and will convert it to the appropriate shifts and AND operations, |
7165 | simplifying at each stage. |
7166 | |
7167 | The function make_compound_operation is called to convert an expression |
7168 | consisting of shifts and ANDs into the equivalent compound expression. |
7169 | It is the inverse of this function, loosely speaking. */ |
7170 | |
7171 | static rtx |
7172 | expand_compound_operation (rtx x) |
7173 | { |
7174 | unsigned HOST_WIDE_INT pos = 0, len; |
7175 | bool unsignedp = false; |
7176 | unsigned int modewidth; |
7177 | rtx tem; |
7178 | scalar_int_mode inner_mode; |
7179 | |
7180 | switch (GET_CODE (x)) |
7181 | { |
7182 | case ZERO_EXTEND: |
7183 | unsignedp = true; |
7184 | /* FALLTHRU */ |
7185 | case SIGN_EXTEND: |
7186 | /* We can't necessarily use a const_int for a multiword mode; |
7187 | it depends on implicitly extending the value. |
7188 | Since we don't know the right way to extend it, |
7189 | we can't tell whether the implicit way is right. |
7190 | |
7191 | Even for a mode that is no wider than a const_int, |
7192 | we can't win, because we need to sign extend one of its bits through |
7193 | the rest of it, and we don't know which bit. */ |
7194 | if (CONST_INT_P (XEXP (x, 0))) |
7195 | return x; |
7196 | |
7197 | /* Reject modes that aren't scalar integers because turning vector |
7198 | or complex modes into shifts causes problems. */ |
7199 | if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), result: &inner_mode)) |
7200 | return x; |
7201 | |
7202 | /* Return if (subreg:MODE FROM 0) is not a safe replacement for |
7203 | (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM |
7204 | because (SUBREG (MEM...)) is guaranteed to cause the MEM to be |
7205 | reloaded. If not for that, MEM's would very rarely be safe. |
7206 | |
7207 | Reject modes bigger than a word, because we might not be able |
7208 | to reference a two-register group starting with an arbitrary register |
7209 | (and currently gen_lowpart might crash for a SUBREG). */ |
7210 | |
7211 | if (GET_MODE_SIZE (mode: inner_mode) > UNITS_PER_WORD) |
7212 | return x; |
7213 | |
7214 | len = GET_MODE_PRECISION (mode: inner_mode); |
7215 | /* If the inner object has VOIDmode (the only way this can happen |
7216 | is if it is an ASM_OPERANDS), we can't do anything since we don't |
7217 | know how much masking to do. */ |
7218 | if (len == 0) |
7219 | return x; |
7220 | |
7221 | break; |
7222 | |
7223 | case ZERO_EXTRACT: |
7224 | unsignedp = true; |
7225 | |
7226 | /* fall through */ |
7227 | |
7228 | case SIGN_EXTRACT: |
7229 | /* If the operand is a CLOBBER, just return it. */ |
7230 | if (GET_CODE (XEXP (x, 0)) == CLOBBER) |
7231 | return XEXP (x, 0); |
7232 | |
7233 | if (!CONST_INT_P (XEXP (x, 1)) |
7234 | || !CONST_INT_P (XEXP (x, 2))) |
7235 | return x; |
7236 | |
7237 | /* Reject modes that aren't scalar integers because turning vector |
7238 | or complex modes into shifts causes problems. */ |
7239 | if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), result: &inner_mode)) |
7240 | return x; |
7241 | |
7242 | len = INTVAL (XEXP (x, 1)); |
7243 | pos = INTVAL (XEXP (x, 2)); |
7244 | |
7245 | /* This should stay within the object being extracted, fail otherwise. */ |
7246 | if (len + pos > GET_MODE_PRECISION (mode: inner_mode)) |
7247 | return x; |
7248 | |
7249 | if (BITS_BIG_ENDIAN) |
7250 | pos = GET_MODE_PRECISION (mode: inner_mode) - len - pos; |
7251 | |
7252 | break; |
7253 | |
7254 | default: |
7255 | return x; |
7256 | } |
7257 | |
7258 | /* We've rejected non-scalar operations by now. */ |
7259 | scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x)); |
7260 | |
7261 | /* Convert sign extension to zero extension, if we know that the high |
7262 | bit is not set, as this is easier to optimize. It will be converted |
7263 | back to cheaper alternative in make_extraction. */ |
7264 | if (GET_CODE (x) == SIGN_EXTEND |
7265 | && HWI_COMPUTABLE_MODE_P (mode) |
7266 | && ((nonzero_bits (XEXP (x, 0), inner_mode) |
7267 | & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1)) |
7268 | == 0)) |
7269 | { |
7270 | rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0)); |
7271 | rtx temp2 = expand_compound_operation (x: temp); |
7272 | |
7273 | /* Make sure this is a profitable operation. */ |
7274 | if (set_src_cost (x, mode, speed_p: optimize_this_for_speed_p) |
7275 | > set_src_cost (x: temp2, mode, speed_p: optimize_this_for_speed_p)) |
7276 | return temp2; |
7277 | else if (set_src_cost (x, mode, speed_p: optimize_this_for_speed_p) |
7278 | > set_src_cost (x: temp, mode, speed_p: optimize_this_for_speed_p)) |
7279 | return temp; |
7280 | else |
7281 | return x; |
7282 | } |
7283 | |
7284 | /* We can optimize some special cases of ZERO_EXTEND. */ |
7285 | if (GET_CODE (x) == ZERO_EXTEND) |
7286 | { |
7287 | /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we |
7288 | know that the last value didn't have any inappropriate bits |
7289 | set. */ |
7290 | if (GET_CODE (XEXP (x, 0)) == TRUNCATE |
7291 | && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode |
7292 | && HWI_COMPUTABLE_MODE_P (mode) |
7293 | && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode) |
7294 | & ~GET_MODE_MASK (inner_mode)) == 0) |
7295 | return XEXP (XEXP (x, 0), 0); |
7296 | |
7297 | /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */ |
7298 | if (GET_CODE (XEXP (x, 0)) == SUBREG |
7299 | && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode |
7300 | && subreg_lowpart_p (XEXP (x, 0)) |
7301 | && HWI_COMPUTABLE_MODE_P (mode) |
7302 | && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode) |
7303 | & ~GET_MODE_MASK (inner_mode)) == 0) |
7304 | return SUBREG_REG (XEXP (x, 0)); |
7305 | |
7306 | /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo |
7307 | is a comparison and STORE_FLAG_VALUE permits. This is like |
7308 | the first case, but it works even when MODE is larger |
7309 | than HOST_WIDE_INT. */ |
7310 | if (GET_CODE (XEXP (x, 0)) == TRUNCATE |
7311 | && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode |
7312 | && COMPARISON_P (XEXP (XEXP (x, 0), 0)) |
7313 | && GET_MODE_PRECISION (mode: inner_mode) <= HOST_BITS_PER_WIDE_INT |
7314 | && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0) |
7315 | return XEXP (XEXP (x, 0), 0); |
7316 | |
7317 | /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */ |
7318 | if (GET_CODE (XEXP (x, 0)) == SUBREG |
7319 | && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode |
7320 | && subreg_lowpart_p (XEXP (x, 0)) |
7321 | && COMPARISON_P (SUBREG_REG (XEXP (x, 0))) |
7322 | && GET_MODE_PRECISION (mode: inner_mode) <= HOST_BITS_PER_WIDE_INT |
7323 | && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0) |
7324 | return SUBREG_REG (XEXP (x, 0)); |
7325 | |
7326 | } |
7327 | |
7328 | /* If we reach here, we want to return a pair of shifts. The inner |
7329 | shift is a left shift of BITSIZE - POS - LEN bits. The outer |
7330 | shift is a right shift of BITSIZE - LEN bits. It is arithmetic or |
7331 | logical depending on the value of UNSIGNEDP. |
7332 | |
7333 | If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be |
7334 | converted into an AND of a shift. |
7335 | |
7336 | We must check for the case where the left shift would have a negative |
7337 | count. This can happen in a case like (x >> 31) & 255 on machines |
7338 | that can't shift by a constant. On those machines, we would first |
7339 | combine the shift with the AND to produce a variable-position |
7340 | extraction. Then the constant of 31 would be substituted in |
7341 | to produce such a position. */ |
7342 | |
7343 | modewidth = GET_MODE_PRECISION (mode); |
7344 | if (modewidth >= pos + len) |
7345 | { |
7346 | tem = gen_lowpart (mode, XEXP (x, 0)); |
7347 | if (!tem || GET_CODE (tem) == CLOBBER) |
7348 | return x; |
7349 | tem = simplify_shift_const (NULL_RTX, ASHIFT, mode, |
7350 | tem, modewidth - pos - len); |
7351 | tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT, |
7352 | mode, tem, modewidth - len); |
7353 | } |
7354 | else if (unsignedp && len < HOST_BITS_PER_WIDE_INT) |
7355 | { |
7356 | tem = simplify_shift_const (NULL_RTX, LSHIFTRT, inner_mode, |
7357 | XEXP (x, 0), pos); |
7358 | tem = gen_lowpart (mode, tem); |
7359 | if (!tem || GET_CODE (tem) == CLOBBER) |
7360 | return x; |
7361 | tem = simplify_and_const_int (NULL_RTX, mode, tem, |
7362 | (HOST_WIDE_INT_1U << len) - 1); |
7363 | } |
7364 | else |
7365 | /* Any other cases we can't handle. */ |
7366 | return x; |
7367 | |
7368 | /* If we couldn't do this for some reason, return the original |
7369 | expression. */ |
7370 | if (GET_CODE (tem) == CLOBBER) |
7371 | return x; |
7372 | |
7373 | return tem; |
7374 | } |
7375 | |
7376 | /* X is a SET which contains an assignment of one object into |
7377 | a part of another (such as a bit-field assignment, STRICT_LOW_PART, |
7378 | or certain SUBREGS). If possible, convert it into a series of |
7379 | logical operations. |
7380 | |
7381 | We half-heartedly support variable positions, but do not at all |
7382 | support variable lengths. */ |
7383 | |
7384 | static const_rtx |
7385 | expand_field_assignment (const_rtx x) |
7386 | { |
7387 | rtx inner; |
7388 | rtx pos; /* Always counts from low bit. */ |
7389 | int len, inner_len; |
7390 | rtx mask, cleared, masked; |
7391 | scalar_int_mode compute_mode; |
7392 | |
7393 | /* Loop until we find something we can't simplify. */ |
7394 | while (1) |
7395 | { |
7396 | if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART |
7397 | && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG) |
7398 | { |
7399 | rtx x0 = XEXP (SET_DEST (x), 0); |
7400 | if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (const_value: &len)) |
7401 | break; |
7402 | inner = SUBREG_REG (XEXP (SET_DEST (x), 0)); |
7403 | pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)), |
7404 | MAX_MODE_INT); |
7405 | } |
7406 | else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT |
7407 | && CONST_INT_P (XEXP (SET_DEST (x), 1))) |
7408 | { |
7409 | inner = XEXP (SET_DEST (x), 0); |
7410 | if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (const_value: &inner_len)) |
7411 | break; |
7412 | |
7413 | len = INTVAL (XEXP (SET_DEST (x), 1)); |
7414 | pos = XEXP (SET_DEST (x), 2); |
7415 | |
7416 | /* A constant position should stay within the width of INNER. */ |
7417 | if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len) |
7418 | break; |
7419 | |
7420 | if (BITS_BIG_ENDIAN) |
7421 | { |
7422 | if (CONST_INT_P (pos)) |
7423 | pos = GEN_INT (inner_len - len - INTVAL (pos)); |
7424 | else if (GET_CODE (pos) == MINUS |
7425 | && CONST_INT_P (XEXP (pos, 1)) |
7426 | && INTVAL (XEXP (pos, 1)) == inner_len - len) |
7427 | /* If position is ADJUST - X, new position is X. */ |
7428 | pos = XEXP (pos, 0); |
7429 | else |
7430 | pos = simplify_gen_binary (code: MINUS, GET_MODE (pos), |
7431 | op0: gen_int_mode (inner_len - len, |
7432 | GET_MODE (pos)), |
7433 | op1: pos); |
7434 | } |
7435 | } |
7436 | |
7437 | /* If the destination is a subreg that overwrites the whole of the inner |
7438 | register, we can move the subreg to the source. */ |
7439 | else if (GET_CODE (SET_DEST (x)) == SUBREG |
7440 | /* We need SUBREGs to compute nonzero_bits properly. */ |
7441 | && nonzero_sign_valid |
7442 | && !read_modify_subreg_p (SET_DEST (x))) |
7443 | { |
7444 | x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)), |
7445 | gen_lowpart |
7446 | (GET_MODE (SUBREG_REG (SET_DEST (x))), |
7447 | SET_SRC (x))); |
7448 | continue; |
7449 | } |
7450 | else |
7451 | break; |
7452 | |
7453 | while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner)) |
7454 | inner = SUBREG_REG (inner); |
7455 | |
7456 | /* Don't attempt bitwise arithmetic on non scalar integer modes. */ |
7457 | if (!is_a <scalar_int_mode> (GET_MODE (inner), result: &compute_mode)) |
7458 | { |
7459 | /* Don't do anything for vector or complex integral types. */ |
7460 | if (! FLOAT_MODE_P (GET_MODE (inner))) |
7461 | break; |
7462 | |
7463 | /* Try to find an integral mode to pun with. */ |
7464 | if (!int_mode_for_size (size: GET_MODE_BITSIZE (GET_MODE (inner)), limit: 0) |
7465 | .exists (mode: &compute_mode)) |
7466 | break; |
7467 | |
7468 | inner = gen_lowpart (compute_mode, inner); |
7469 | } |
7470 | |
7471 | /* Compute a mask of LEN bits, if we can do this on the host machine. */ |
7472 | if (len >= HOST_BITS_PER_WIDE_INT) |
7473 | break; |
7474 | |
7475 | /* Don't try to compute in too wide unsupported modes. */ |
7476 | if (!targetm.scalar_mode_supported_p (compute_mode)) |
7477 | break; |
7478 | |
7479 | /* gen_lowpart_for_combine returns CLOBBER on failure. */ |
7480 | rtx lowpart = gen_lowpart (compute_mode, SET_SRC (x)); |
7481 | if (GET_CODE (lowpart) == CLOBBER) |
7482 | break; |
7483 | |
7484 | /* Now compute the equivalent expression. Make a copy of INNER |
7485 | for the SET_DEST in case it is a MEM into which we will substitute; |
7486 | we don't want shared RTL in that case. */ |
7487 | mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1, |
7488 | compute_mode); |
7489 | cleared = simplify_gen_binary (code: AND, mode: compute_mode, |
7490 | op0: simplify_gen_unary (code: NOT, mode: compute_mode, |
7491 | op: simplify_gen_binary (code: ASHIFT, |
7492 | mode: compute_mode, |
7493 | op0: mask, op1: pos), |
7494 | op_mode: compute_mode), |
7495 | op1: inner); |
7496 | masked = simplify_gen_binary (code: ASHIFT, mode: compute_mode, |
7497 | op0: simplify_gen_binary ( |
7498 | code: AND, mode: compute_mode, op0: lowpart, op1: mask), |
7499 | op1: pos); |
7500 | |
7501 | x = gen_rtx_SET (copy_rtx (inner), |
7502 | simplify_gen_binary (IOR, compute_mode, |
7503 | cleared, masked)); |
7504 | } |
7505 | |
7506 | return x; |
7507 | } |
7508 | |
7509 | /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero, |
7510 | it is an RTX that represents the (variable) starting position; otherwise, |
7511 | POS is the (constant) starting bit position. Both are counted from the LSB. |
7512 | |
7513 | UNSIGNEDP is true for an unsigned reference and zero for a signed one. |
7514 | |
7515 | IN_DEST is true if this is a reference in the destination of a SET. |
7516 | This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero, |
7517 | a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will |
7518 | be used. |
7519 | |
7520 | IN_COMPARE is true if we are in a COMPARE. This means that a |
7521 | ZERO_EXTRACT should be built even for bits starting at bit 0. |
7522 | |
7523 | MODE is the desired mode of the result (if IN_DEST == 0). |
7524 | |
7525 | The result is an RTX for the extraction or NULL_RTX if the target |
7526 | can't handle it. */ |
7527 | |
7528 | static rtx |
7529 | (machine_mode mode, rtx inner, HOST_WIDE_INT pos, |
7530 | rtx pos_rtx, unsigned HOST_WIDE_INT len, bool unsignedp, |
7531 | bool in_dest, bool in_compare) |
7532 | { |
7533 | /* This mode describes the size of the storage area |
7534 | to fetch the overall value from. Within that, we |
7535 | ignore the POS lowest bits, etc. */ |
7536 | machine_mode is_mode = GET_MODE (inner); |
7537 | machine_mode inner_mode; |
7538 | scalar_int_mode wanted_inner_mode; |
7539 | scalar_int_mode wanted_inner_reg_mode = word_mode; |
7540 | scalar_int_mode pos_mode = word_mode; |
7541 | machine_mode = word_mode; |
7542 | rtx new_rtx = 0; |
7543 | rtx orig_pos_rtx = pos_rtx; |
7544 | HOST_WIDE_INT orig_pos; |
7545 | |
7546 | if (pos_rtx && CONST_INT_P (pos_rtx)) |
7547 | pos = INTVAL (pos_rtx), pos_rtx = 0; |
7548 | |
7549 | if (GET_CODE (inner) == SUBREG |
7550 | && subreg_lowpart_p (inner) |
7551 | && (paradoxical_subreg_p (x: inner) |
7552 | /* If trying or potentionally trying to extract |
7553 | bits outside of is_mode, don't look through |
7554 | non-paradoxical SUBREGs. See PR82192. */ |
7555 | || (pos_rtx == NULL_RTX |
7556 | && known_le (pos + len, GET_MODE_PRECISION (is_mode))))) |
7557 | { |
7558 | /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...), |
7559 | consider just the QI as the memory to extract from. |
7560 | The subreg adds or removes high bits; its mode is |
7561 | irrelevant to the meaning of this extraction, |
7562 | since POS and LEN count from the lsb. */ |
7563 | if (MEM_P (SUBREG_REG (inner))) |
7564 | is_mode = GET_MODE (SUBREG_REG (inner)); |
7565 | inner = SUBREG_REG (inner); |
7566 | } |
7567 | else if (GET_CODE (inner) == ASHIFT |
7568 | && CONST_INT_P (XEXP (inner, 1)) |
7569 | && pos_rtx == 0 && pos == 0 |
7570 | && len > UINTVAL (XEXP (inner, 1))) |
7571 | { |
7572 | /* We're extracting the least significant bits of an rtx |
7573 | (ashift X (const_int C)), where LEN > C. Extract the |
7574 | least significant (LEN - C) bits of X, giving an rtx |
7575 | whose mode is MODE, then shift it left C times. */ |
7576 | new_rtx = make_extraction (mode, XEXP (inner, 0), |
7577 | pos: 0, pos_rtx: 0, len: len - INTVAL (XEXP (inner, 1)), |
7578 | unsignedp, in_dest, in_compare); |
7579 | if (new_rtx != 0) |
7580 | return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1)); |
7581 | } |
7582 | else if (GET_CODE (inner) == MULT |
7583 | && CONST_INT_P (XEXP (inner, 1)) |
7584 | && pos_rtx == 0 && pos == 0) |
7585 | { |
7586 | /* We're extracting the least significant bits of an rtx |
7587 | (mult X (const_int 2^C)), where LEN > C. Extract the |
7588 | least significant (LEN - C) bits of X, giving an rtx |
7589 | whose mode is MODE, then multiply it by 2^C. */ |
7590 | const HOST_WIDE_INT shift_amt = exact_log2 (INTVAL (XEXP (inner, 1))); |
7591 | if (IN_RANGE (shift_amt, 1, len - 1)) |
7592 | { |
7593 | new_rtx = make_extraction (mode, XEXP (inner, 0), |
7594 | pos: 0, pos_rtx: 0, len: len - shift_amt, |
7595 | unsignedp, in_dest, in_compare); |
7596 | if (new_rtx) |
7597 | return gen_rtx_MULT (mode, new_rtx, XEXP (inner, 1)); |
7598 | } |
7599 | } |
7600 | else if (GET_CODE (inner) == TRUNCATE |
7601 | /* If trying or potentionally trying to extract |
7602 | bits outside of is_mode, don't look through |
7603 | TRUNCATE. See PR82192. */ |
7604 | && pos_rtx == NULL_RTX |
7605 | && known_le (pos + len, GET_MODE_PRECISION (is_mode))) |
7606 | inner = XEXP (inner, 0); |
7607 | |
7608 | inner_mode = GET_MODE (inner); |
7609 | |
7610 | /* See if this can be done without an extraction. We never can if the |
7611 | width of the field is not the same as that of some integer mode. For |
7612 | registers, we can only avoid the extraction if the position is at the |
7613 | low-order bit and this is either not in the destination or we have the |
7614 | appropriate STRICT_LOW_PART operation available. |
7615 | |
7616 | For MEM, we can avoid an extract if the field starts on an appropriate |
7617 | boundary and we can change the mode of the memory reference. */ |
7618 | |
7619 | scalar_int_mode tmode; |
7620 | if (int_mode_for_size (size: len, limit: 1).exists (mode: &tmode) |
7621 | && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0 |
7622 | && !MEM_P (inner) |
7623 | && (pos == 0 || REG_P (inner)) |
7624 | && (inner_mode == tmode |
7625 | || !REG_P (inner) |
7626 | || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode) |
7627 | || reg_truncated_to_mode (tmode, inner)) |
7628 | && (! in_dest |
7629 | || (REG_P (inner) |
7630 | && have_insn_for (STRICT_LOW_PART, tmode)))) |
7631 | || (MEM_P (inner) && pos_rtx == 0 |
7632 | && (pos |
7633 | % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode) |
7634 | : BITS_PER_UNIT)) == 0 |
7635 | /* We can't do this if we are widening INNER_MODE (it |
7636 | may not be aligned, for one thing). */ |
7637 | && !paradoxical_subreg_p (outermode: tmode, innermode: inner_mode) |
7638 | && known_le (pos + len, GET_MODE_PRECISION (is_mode)) |
7639 | && (inner_mode == tmode |
7640 | || (! mode_dependent_address_p (XEXP (inner, 0), |
7641 | MEM_ADDR_SPACE (inner)) |
7642 | && ! MEM_VOLATILE_P (inner)))))) |
7643 | { |
7644 | /* If INNER is a MEM, make a new MEM that encompasses just the desired |
7645 | field. If the original and current mode are the same, we need not |
7646 | adjust the offset. Otherwise, we do if bytes big endian. |
7647 | |
7648 | If INNER is not a MEM, get a piece consisting of just the field |
7649 | of interest (in this case POS % BITS_PER_WORD must be 0). */ |
7650 | |
7651 | if (MEM_P (inner)) |
7652 | { |
7653 | poly_int64 offset; |
7654 | |
7655 | /* POS counts from lsb, but make OFFSET count in memory order. */ |
7656 | if (BYTES_BIG_ENDIAN) |
7657 | offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode) |
7658 | - len - pos); |
7659 | else |
7660 | offset = pos / BITS_PER_UNIT; |
7661 | |
7662 | new_rtx = adjust_address_nv (inner, tmode, offset); |
7663 | } |
7664 | else if (REG_P (inner)) |
7665 | { |
7666 | if (tmode != inner_mode) |
7667 | { |
7668 | /* We can't call gen_lowpart in a DEST since we |
7669 | always want a SUBREG (see below) and it would sometimes |
7670 | return a new hard register. */ |
7671 | if (pos || in_dest) |
7672 | { |
7673 | poly_uint64 offset |
7674 | = subreg_offset_from_lsb (outer_mode: tmode, inner_mode, lsb_shift: pos); |
7675 | |
7676 | /* Avoid creating invalid subregs, for example when |
7677 | simplifying (x>>32)&255. */ |
7678 | if (!validate_subreg (tmode, inner_mode, inner, offset)) |
7679 | return NULL_RTX; |
7680 | |
7681 | new_rtx = gen_rtx_SUBREG (tmode, inner, offset); |
7682 | } |
7683 | else |
7684 | new_rtx = gen_lowpart (tmode, inner); |
7685 | } |
7686 | else |
7687 | new_rtx = inner; |
7688 | } |
7689 | else |
7690 | new_rtx = force_to_mode (inner, tmode, |
7691 | len >= HOST_BITS_PER_WIDE_INT |
7692 | ? HOST_WIDE_INT_M1U |
7693 | : (HOST_WIDE_INT_1U << len) - 1, false); |
7694 | |
7695 | /* If this extraction is going into the destination of a SET, |
7696 | make a STRICT_LOW_PART unless we made a MEM. */ |
7697 | |
7698 | if (in_dest) |
7699 | return (MEM_P (new_rtx) ? new_rtx |
7700 | : (GET_CODE (new_rtx) != SUBREG |
7701 | ? gen_rtx_CLOBBER (tmode, const0_rtx) |
7702 | : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx))); |
7703 | |
7704 | if (mode == tmode) |
7705 | return new_rtx; |
7706 | |
7707 | if (CONST_SCALAR_INT_P (new_rtx)) |
7708 | return simplify_unary_operation (code: unsignedp ? ZERO_EXTEND : SIGN_EXTEND, |
7709 | mode, op: new_rtx, op_mode: tmode); |
7710 | |
7711 | /* If we know that no extraneous bits are set, and that the high |
7712 | bit is not set, convert the extraction to the cheaper of |
7713 | sign and zero extension, that are equivalent in these cases. */ |
7714 | if (flag_expensive_optimizations |
7715 | && (HWI_COMPUTABLE_MODE_P (mode: tmode) |
7716 | && ((nonzero_bits (new_rtx, tmode) |
7717 | & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1)) |
7718 | == 0))) |
7719 | { |
7720 | rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx); |
7721 | rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx); |
7722 | |
7723 | /* Prefer ZERO_EXTENSION, since it gives more information to |
7724 | backends. */ |
7725 | if (set_src_cost (x: temp, mode, speed_p: optimize_this_for_speed_p) |
7726 | <= set_src_cost (x: temp1, mode, speed_p: optimize_this_for_speed_p)) |
7727 | return temp; |
7728 | return temp1; |
7729 | } |
7730 | |
7731 | /* Otherwise, sign- or zero-extend unless we already are in the |
7732 | proper mode. */ |
7733 | |
7734 | return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND, |
7735 | mode, new_rtx)); |
7736 | } |
7737 | |
7738 | /* Unless this is a COMPARE or we have a funny memory reference, |
7739 | don't do anything with zero-extending field extracts starting at |
7740 | the low-order bit since they are simple AND operations. */ |
7741 | if (pos_rtx == 0 && pos == 0 && ! in_dest |
7742 | && ! in_compare && unsignedp) |
7743 | return 0; |
7744 | |
7745 | /* Unless INNER is not MEM, reject this if we would be spanning bytes or |
7746 | if the position is not a constant and the length is not 1. In all |
7747 | other cases, we would only be going outside our object in cases when |
7748 | an original shift would have been undefined. */ |
7749 | if (MEM_P (inner) |
7750 | && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode))) |
7751 | || (pos_rtx != 0 && len != 1))) |
7752 | return 0; |
7753 | |
7754 | enum extraction_pattern pattern = (in_dest ? EP_insv |
7755 | : unsignedp ? EP_extzv : EP_extv); |
7756 | |
7757 | /* If INNER is not from memory, we want it to have the mode of a register |
7758 | extraction pattern's structure operand, or word_mode if there is no |
7759 | such pattern. The same applies to extraction_mode and pos_mode |
7760 | and their respective operands. |
7761 | |
7762 | For memory, assume that the desired extraction_mode and pos_mode |
7763 | are the same as for a register operation, since at present we don't |
7764 | have named patterns for aligned memory structures. */ |
7765 | class extraction_insn insn; |
7766 | unsigned int inner_size; |
7767 | if (GET_MODE_BITSIZE (mode: inner_mode).is_constant (const_value: &inner_size) |
7768 | && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode)) |
7769 | { |
7770 | wanted_inner_reg_mode = insn.struct_mode.require (); |
7771 | pos_mode = insn.pos_mode; |
7772 | extraction_mode = insn.field_mode; |
7773 | } |
7774 | |
7775 | /* Never narrow an object, since that might not be safe. */ |
7776 | |
7777 | if (mode != VOIDmode |
7778 | && partial_subreg_p (outermode: extraction_mode, innermode: mode)) |
7779 | extraction_mode = mode; |
7780 | |
7781 | /* Punt if len is too large for extraction_mode. */ |
7782 | if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode))) |
7783 | return NULL_RTX; |
7784 | |
7785 | if (!MEM_P (inner)) |
7786 | wanted_inner_mode = wanted_inner_reg_mode; |
7787 | else |
7788 | { |
7789 | /* Be careful not to go beyond the extracted object and maintain the |
7790 | natural alignment of the memory. */ |
7791 | wanted_inner_mode = smallest_int_mode_for_size (size: len); |
7792 | while (pos % GET_MODE_BITSIZE (mode: wanted_inner_mode) + len |
7793 | > GET_MODE_BITSIZE (mode: wanted_inner_mode)) |
7794 | wanted_inner_mode = GET_MODE_WIDER_MODE (m: wanted_inner_mode).require (); |
7795 | } |
7796 | |
7797 | orig_pos = pos; |
7798 | |
7799 | if (BITS_BIG_ENDIAN) |
7800 | { |
7801 | /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to |
7802 | BITS_BIG_ENDIAN style. If position is constant, compute new |
7803 | position. Otherwise, build subtraction. |
7804 | Note that POS is relative to the mode of the original argument. |
7805 | If it's a MEM we need to recompute POS relative to that. |
7806 | However, if we're extracting from (or inserting into) a register, |
7807 | we want to recompute POS relative to wanted_inner_mode. */ |
7808 | int width; |
7809 | if (!MEM_P (inner)) |
7810 | width = GET_MODE_BITSIZE (mode: wanted_inner_mode); |
7811 | else if (!GET_MODE_BITSIZE (mode: is_mode).is_constant (const_value: &width)) |
7812 | return NULL_RTX; |
7813 | |
7814 | if (pos_rtx == 0) |
7815 | pos = width - len - pos; |
7816 | else |
7817 | pos_rtx |
7818 | = gen_rtx_MINUS (GET_MODE (pos_rtx), |
7819 | gen_int_mode (width - len, GET_MODE (pos_rtx)), |
7820 | pos_rtx); |
7821 | /* POS may be less than 0 now, but we check for that below. |
7822 | Note that it can only be less than 0 if !MEM_P (inner). */ |
7823 | } |
7824 | |
7825 | /* If INNER has a wider mode, and this is a constant extraction, try to |
7826 | make it smaller and adjust the byte to point to the byte containing |
7827 | the value. */ |
7828 | if (wanted_inner_mode != VOIDmode |
7829 | && inner_mode != wanted_inner_mode |
7830 | && ! pos_rtx |
7831 | && partial_subreg_p (outermode: wanted_inner_mode, innermode: is_mode) |
7832 | && MEM_P (inner) |
7833 | && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner)) |
7834 | && ! MEM_VOLATILE_P (inner)) |
7835 | { |
7836 | poly_int64 offset = 0; |
7837 | |
7838 | /* The computations below will be correct if the machine is big |
7839 | endian in both bits and bytes or little endian in bits and bytes. |
7840 | If it is mixed, we must adjust. */ |
7841 | |
7842 | /* If bytes are big endian and we had a paradoxical SUBREG, we must |
7843 | adjust OFFSET to compensate. */ |
7844 | if (BYTES_BIG_ENDIAN |
7845 | && paradoxical_subreg_p (outermode: is_mode, innermode: inner_mode)) |
7846 | offset -= GET_MODE_SIZE (mode: is_mode) - GET_MODE_SIZE (mode: inner_mode); |
7847 | |
7848 | /* We can now move to the desired byte. */ |
7849 | offset += (pos / GET_MODE_BITSIZE (mode: wanted_inner_mode)) |
7850 | * GET_MODE_SIZE (mode: wanted_inner_mode); |
7851 | pos %= GET_MODE_BITSIZE (mode: wanted_inner_mode); |
7852 | |
7853 | if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN |
7854 | && is_mode != wanted_inner_mode) |
7855 | offset = (GET_MODE_SIZE (mode: is_mode) |
7856 | - GET_MODE_SIZE (mode: wanted_inner_mode) - offset); |
7857 | |
7858 | inner = adjust_address_nv (inner, wanted_inner_mode, offset); |
7859 | } |
7860 | |
7861 | /* If INNER is not memory, get it into the proper mode. If we are changing |
7862 | its mode, POS must be a constant and smaller than the size of the new |
7863 | mode. */ |
7864 | else if (!MEM_P (inner)) |
7865 | { |
7866 | /* On the LHS, don't create paradoxical subregs implicitely truncating |
7867 | the register unless TARGET_TRULY_NOOP_TRUNCATION. */ |
7868 | if (in_dest |
7869 | && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner), |
7870 | wanted_inner_mode)) |
7871 | return NULL_RTX; |
7872 | |
7873 | if (GET_MODE (inner) != wanted_inner_mode |
7874 | && (pos_rtx != 0 |
7875 | || orig_pos + len > GET_MODE_BITSIZE (mode: wanted_inner_mode))) |
7876 | return NULL_RTX; |
7877 | |
7878 | if (orig_pos < 0) |
7879 | return NULL_RTX; |
7880 | |
7881 | inner = force_to_mode (inner, wanted_inner_mode, |
7882 | pos_rtx |
7883 | || len + orig_pos >= HOST_BITS_PER_WIDE_INT |
7884 | ? HOST_WIDE_INT_M1U |
7885 | : (((HOST_WIDE_INT_1U << len) - 1) |
7886 | << orig_pos), false); |
7887 | } |
7888 | |
7889 | /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we |
7890 | have to zero extend. Otherwise, we can just use a SUBREG. |
7891 | |
7892 | We dealt with constant rtxes earlier, so pos_rtx cannot |
7893 | have VOIDmode at this point. */ |
7894 | if (pos_rtx != 0 |
7895 | && (GET_MODE_SIZE (mode: pos_mode) |
7896 | > GET_MODE_SIZE (mode: as_a <scalar_int_mode> (GET_MODE (pos_rtx))))) |
7897 | { |
7898 | rtx temp = simplify_gen_unary (code: ZERO_EXTEND, mode: pos_mode, op: pos_rtx, |
7899 | GET_MODE (pos_rtx)); |
7900 | |
7901 | /* If we know that no extraneous bits are set, and that the high |
7902 | bit is not set, convert extraction to cheaper one - either |
7903 | SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these |
7904 | cases. */ |
7905 | if (flag_expensive_optimizations |
7906 | && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx)) |
7907 | && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx)) |
7908 | & ~(((unsigned HOST_WIDE_INT) |
7909 | GET_MODE_MASK (GET_MODE (pos_rtx))) |
7910 | >> 1)) |
7911 | == 0))) |
7912 | { |
7913 | rtx temp1 = simplify_gen_unary (code: SIGN_EXTEND, mode: pos_mode, op: pos_rtx, |
7914 | GET_MODE (pos_rtx)); |
7915 | |
7916 | /* Prefer ZERO_EXTENSION, since it gives more information to |
7917 | backends. */ |
7918 | if (set_src_cost (x: temp1, mode: pos_mode, speed_p: optimize_this_for_speed_p) |
7919 | < set_src_cost (x: temp, mode: pos_mode, speed_p: optimize_this_for_speed_p)) |
7920 | temp = temp1; |
7921 | } |
7922 | pos_rtx = temp; |
7923 | } |
7924 | |
7925 | /* Make POS_RTX unless we already have it and it is correct. If we don't |
7926 | have a POS_RTX but we do have an ORIG_POS_RTX, the latter must |
7927 | be a CONST_INT. */ |
7928 | if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos) |
7929 | pos_rtx = orig_pos_rtx; |
7930 | |
7931 | else if (pos_rtx == 0) |
7932 | pos_rtx = GEN_INT (pos); |
7933 | |
7934 | /* Make the required operation. See if we can use existing rtx. */ |
7935 | new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT, |
7936 | extraction_mode, inner, GEN_INT (len), pos_rtx); |
7937 | if (! in_dest) |
7938 | new_rtx = gen_lowpart (mode, new_rtx); |
7939 | |
7940 | return new_rtx; |
7941 | } |
7942 | |
7943 | /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that |
7944 | can be commuted with any other operations in X. Return X without |
7945 | that shift if so. */ |
7946 | |
7947 | static rtx |
7948 | (scalar_int_mode mode, rtx x, int count) |
7949 | { |
7950 | enum rtx_code code = GET_CODE (x); |
7951 | rtx tem; |
7952 | |
7953 | switch (code) |
7954 | { |
7955 | case ASHIFT: |
7956 | /* This is the shift itself. If it is wide enough, we will return |
7957 | either the value being shifted if the shift count is equal to |
7958 | COUNT or a shift for the difference. */ |
7959 | if (CONST_INT_P (XEXP (x, 1)) |
7960 | && INTVAL (XEXP (x, 1)) >= count) |
7961 | return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0), |
7962 | INTVAL (XEXP (x, 1)) - count); |
7963 | break; |
7964 | |
7965 | case NEG: case NOT: |
7966 | if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0) |
7967 | return simplify_gen_unary (code, mode, op: tem, op_mode: mode); |
7968 | |
7969 | break; |
7970 | |
7971 | case PLUS: case IOR: case XOR: case AND: |
7972 | /* If we can safely shift this constant and we find the inner shift, |
7973 | make a new operation. */ |
7974 | if (CONST_INT_P (XEXP (x, 1)) |
7975 | && (UINTVAL (XEXP (x, 1)) |
7976 | & (((HOST_WIDE_INT_1U << count)) - 1)) == 0 |
7977 | && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0) |
7978 | { |
7979 | HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count; |
7980 | return simplify_gen_binary (code, mode, op0: tem, |
7981 | op1: gen_int_mode (val, mode)); |
7982 | } |
7983 | break; |
7984 | |
7985 | default: |
7986 | break; |
7987 | } |
7988 | |
7989 | return 0; |
7990 | } |
7991 | |
7992 | /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current |
7993 | level of the expression and MODE is its mode. IN_CODE is as for |
7994 | make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE |
7995 | that should be used when recursing on operands of *X_PTR. |
7996 | |
7997 | There are two possible actions: |
7998 | |
7999 | - Return null. This tells the caller to recurse on *X_PTR with IN_CODE |
8000 | equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value. |
8001 | |
8002 | - Return a new rtx, which the caller returns directly. */ |
8003 | |
8004 | static rtx |
8005 | make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr, |
8006 | enum rtx_code in_code, |
8007 | enum rtx_code *next_code_ptr) |
8008 | { |
8009 | rtx x = *x_ptr; |
8010 | enum rtx_code next_code = *next_code_ptr; |
8011 | enum rtx_code code = GET_CODE (x); |
8012 | int mode_width = GET_MODE_PRECISION (mode); |
8013 | rtx rhs, lhs; |
8014 | rtx new_rtx = 0; |
8015 | int i; |
8016 | rtx tem; |
8017 | scalar_int_mode inner_mode; |
8018 | bool equality_comparison = false; |
8019 | |
8020 | if (in_code == EQ) |
8021 | { |
8022 | equality_comparison = true; |
8023 | in_code = COMPARE; |
8024 | } |
8025 | |
8026 | /* Process depending on the code of this operation. If NEW is set |
8027 | nonzero, it will be returned. */ |
8028 | |
8029 | switch (code) |
8030 | { |
8031 | case ASHIFT: |
8032 | /* Convert shifts by constants into multiplications if inside |
8033 | an address. */ |
8034 | if (in_code == MEM && CONST_INT_P (XEXP (x, 1)) |
8035 | && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT |
8036 | && INTVAL (XEXP (x, 1)) >= 0) |
8037 | { |
8038 | HOST_WIDE_INT count = INTVAL (XEXP (x, 1)); |
8039 | HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count; |
8040 | |
8041 | new_rtx = make_compound_operation (XEXP (x, 0), next_code); |
8042 | if (GET_CODE (new_rtx) == NEG) |
8043 | { |
8044 | new_rtx = XEXP (new_rtx, 0); |
8045 | multval = -multval; |
8046 | } |
8047 | multval = trunc_int_for_mode (multval, mode); |
8048 | new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode)); |
8049 | } |
8050 | break; |
8051 | |
8052 | case PLUS: |
8053 | lhs = XEXP (x, 0); |
8054 | rhs = XEXP (x, 1); |
8055 | lhs = make_compound_operation (lhs, next_code); |
8056 | rhs = make_compound_operation (rhs, next_code); |
8057 | if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG) |
8058 | { |
8059 | tem = simplify_gen_binary (code: MULT, mode, XEXP (XEXP (lhs, 0), 0), |
8060 | XEXP (lhs, 1)); |
8061 | new_rtx = simplify_gen_binary (code: MINUS, mode, op0: rhs, op1: tem); |
8062 | } |
8063 | else if (GET_CODE (lhs) == MULT |
8064 | && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0)) |
8065 | { |
8066 | tem = simplify_gen_binary (code: MULT, mode, XEXP (lhs, 0), |
8067 | op1: simplify_gen_unary (code: NEG, mode, |
8068 | XEXP (lhs, 1), |
8069 | op_mode: mode)); |
8070 | new_rtx = simplify_gen_binary (code: MINUS, mode, op0: rhs, op1: tem); |
8071 | } |
8072 | else |
8073 | { |
8074 | SUBST (XEXP (x, 0), lhs); |
8075 | SUBST (XEXP (x, 1), rhs); |
8076 | } |
8077 | maybe_swap_commutative_operands (x); |
8078 | return x; |
8079 | |
8080 | case MINUS: |
8081 | lhs = XEXP (x, 0); |
8082 | rhs = XEXP (x, 1); |
8083 | lhs = make_compound_operation (lhs, next_code); |
8084 | rhs = make_compound_operation (rhs, next_code); |
8085 | if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG) |
8086 | { |
8087 | tem = simplify_gen_binary (code: MULT, mode, XEXP (XEXP (rhs, 0), 0), |
8088 | XEXP (rhs, 1)); |
8089 | return simplify_gen_binary (code: PLUS, mode, op0: tem, op1: lhs); |
8090 | } |
8091 | else if (GET_CODE (rhs) == MULT |
8092 | && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0)) |
8093 | { |
8094 | tem = simplify_gen_binary (code: MULT, mode, XEXP (rhs, 0), |
8095 | op1: simplify_gen_unary (code: NEG, mode, |
8096 | XEXP (rhs, 1), |
8097 | op_mode: mode)); |
8098 | return simplify_gen_binary (code: PLUS, mode, op0: tem, op1: lhs); |
8099 | } |
8100 | else |
8101 | { |
8102 | SUBST (XEXP (x, 0), lhs); |
8103 | SUBST (XEXP (x, 1), rhs); |
8104 | return x; |
8105 | } |
8106 | |
8107 | case AND: |
8108 | /* If the second operand is not a constant, we can't do anything |
8109 | with it. */ |
8110 | if (!CONST_INT_P (XEXP (x, 1))) |
8111 | break; |
8112 | |
8113 | /* If the constant is a power of two minus one and the first operand |
8114 | is a logical right shift, make an extraction. */ |
8115 | if (GET_CODE (XEXP (x, 0)) == LSHIFTRT |
8116 | && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0) |
8117 | { |
8118 | new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code); |
8119 | new_rtx = make_extraction (mode, inner: new_rtx, pos: 0, XEXP (XEXP (x, 0), 1), |
8120 | len: i, unsignedp: true, in_dest: false, in_compare: in_code == COMPARE); |
8121 | } |
8122 | |
8123 | /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */ |
8124 | else if (GET_CODE (XEXP (x, 0)) == SUBREG |
8125 | && subreg_lowpart_p (XEXP (x, 0)) |
8126 | && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))), |
8127 | result: &inner_mode) |
8128 | && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT |
8129 | && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0) |
8130 | { |
8131 | rtx inner_x0 = SUBREG_REG (XEXP (x, 0)); |
8132 | new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code); |
8133 | new_rtx = make_extraction (mode: inner_mode, inner: new_rtx, pos: 0, |
8134 | XEXP (inner_x0, 1), |
8135 | len: i, unsignedp: true, in_dest: false, in_compare: in_code == COMPARE); |
8136 | |
8137 | /* If we narrowed the mode when dropping the subreg, then we lose. */ |
8138 | if (GET_MODE_SIZE (mode: inner_mode) < GET_MODE_SIZE (mode)) |
8139 | new_rtx = NULL; |
8140 | |
8141 | /* If that didn't give anything, see if the AND simplifies on |
8142 | its own. */ |
8143 | if (!new_rtx && i >= 0) |
8144 | { |
8145 | new_rtx = make_compound_operation (XEXP (x, 0), next_code); |
8146 | new_rtx = make_extraction (mode, inner: new_rtx, pos: 0, NULL_RTX, len: i, |
8147 | unsignedp: true, in_dest: false, in_compare: in_code == COMPARE); |
8148 | } |
8149 | } |
8150 | /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */ |
8151 | else if ((GET_CODE (XEXP (x, 0)) == XOR |
8152 | || GET_CODE (XEXP (x, 0)) == IOR) |
8153 | && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT |
8154 | && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT |
8155 | && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0) |
8156 | { |
8157 | /* Apply the distributive law, and then try to make extractions. */ |
8158 | new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode, |
8159 | gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0), |
8160 | XEXP (x, 1)), |
8161 | gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1), |
8162 | XEXP (x, 1))); |
8163 | new_rtx = make_compound_operation (new_rtx, in_code); |
8164 | } |
8165 | |
8166 | /* If we are have (and (rotate X C) M) and C is larger than the number |
8167 | of bits in M, this is an extraction. */ |
8168 | |
8169 | else if (GET_CODE (XEXP (x, 0)) == ROTATE |
8170 | && CONST_INT_P (XEXP (XEXP (x, 0), 1)) |
8171 | && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0 |
8172 | && i <= INTVAL (XEXP (XEXP (x, 0), 1))) |
8173 | { |
8174 | new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code); |
8175 | new_rtx = make_extraction (mode, inner: new_rtx, |
8176 | pos: (GET_MODE_PRECISION (mode) |
8177 | - INTVAL (XEXP (XEXP (x, 0), 1))), |
8178 | NULL_RTX, len: i, unsignedp: true, in_dest: false, |
8179 | in_compare: in_code == COMPARE); |
8180 | } |
8181 | |
8182 | /* On machines without logical shifts, if the operand of the AND is |
8183 | a logical shift and our mask turns off all the propagated sign |
8184 | bits, we can replace the logical shift with an arithmetic shift. */ |
8185 | else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT |
8186 | && !have_insn_for (LSHIFTRT, mode) |
8187 | && have_insn_for (ASHIFTRT, mode) |
8188 | && CONST_INT_P (XEXP (XEXP (x, 0), 1)) |
8189 | && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 |
8190 | && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT |
8191 | && mode_width <= HOST_BITS_PER_WIDE_INT) |
8192 | { |
8193 | unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); |
8194 | |
8195 | mask >>= INTVAL (XEXP (XEXP (x, 0), 1)); |
8196 | if ((INTVAL (XEXP (x, 1)) & ~mask) == 0) |
8197 | SUBST (XEXP (x, 0), |
8198 | gen_rtx_ASHIFTRT (mode, |
8199 | make_compound_operation (XEXP (XEXP (x, |
8200 | 0), |
8201 | 0), |
8202 | next_code), |
8203 | XEXP (XEXP (x, 0), 1))); |
8204 | } |
8205 | |
8206 | /* If the constant is one less than a power of two, this might be |
8207 | representable by an extraction even if no shift is present. |
8208 | If it doesn't end up being a ZERO_EXTEND, we will ignore it unless |
8209 | we are in a COMPARE. */ |
8210 | else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0) |
8211 | new_rtx = make_extraction (mode, |
8212 | inner: make_compound_operation (XEXP (x, 0), |
8213 | next_code), |
8214 | pos: 0, NULL_RTX, len: i, |
8215 | unsignedp: true, in_dest: false, in_compare: in_code == COMPARE); |
8216 | |
8217 | /* If we are in a comparison and this is an AND with a power of two, |
8218 | convert this into the appropriate bit extract. */ |
8219 | else if (in_code == COMPARE |
8220 | && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0 |
8221 | && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1)) |
8222 | new_rtx = make_extraction (mode, |
8223 | inner: make_compound_operation (XEXP (x, 0), |
8224 | next_code), |
8225 | pos: i, NULL_RTX, len: 1, unsignedp: true, in_dest: false, in_compare: true); |
8226 | |
8227 | /* If the one operand is a paradoxical subreg of a register or memory and |
8228 | the constant (limited to the smaller mode) has only zero bits where |
8229 | the sub expression has known zero bits, this can be expressed as |
8230 | a zero_extend. */ |
8231 | else if (GET_CODE (XEXP (x, 0)) == SUBREG) |
8232 | { |
8233 | rtx sub; |
8234 | |
8235 | sub = XEXP (XEXP (x, 0), 0); |
8236 | machine_mode sub_mode = GET_MODE (sub); |
8237 | int sub_width; |
8238 | if ((REG_P (sub) || MEM_P (sub)) |
8239 | && GET_MODE_PRECISION (mode: sub_mode).is_constant (const_value: &sub_width) |
8240 | && sub_width < mode_width |
8241 | && (!WORD_REGISTER_OPERATIONS |
8242 | || sub_width >= BITS_PER_WORD |
8243 | /* On WORD_REGISTER_OPERATIONS targets the bits |
8244 | beyond sub_mode aren't considered undefined, |
8245 | so optimize only if it is a MEM load when MEM loads |
8246 | zero extend, because then the upper bits are all zero. */ |
8247 | || (MEM_P (sub) |
8248 | && load_extend_op (mode: sub_mode) == ZERO_EXTEND))) |
8249 | { |
8250 | unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode); |
8251 | unsigned HOST_WIDE_INT mask; |
8252 | |
8253 | /* Original AND constant with all the known zero bits set. */ |
8254 | mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode)); |
8255 | if ((mask & mode_mask) == mode_mask) |
8256 | { |
8257 | new_rtx = make_compound_operation (sub, next_code); |
8258 | new_rtx = make_extraction (mode, inner: new_rtx, pos: 0, pos_rtx: 0, len: sub_width, |
8259 | unsignedp: true, in_dest: false, in_compare: in_code == COMPARE); |
8260 | } |
8261 | } |
8262 | } |
8263 | |
8264 | break; |
8265 | |
8266 | case LSHIFTRT: |
8267 | /* If the sign bit is known to be zero, replace this with an |
8268 | arithmetic shift. */ |
8269 | if (have_insn_for (ASHIFTRT, mode) |
8270 | && ! have_insn_for (LSHIFTRT, mode) |
8271 | && mode_width <= HOST_BITS_PER_WIDE_INT |
8272 | && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0) |
8273 | { |
8274 | new_rtx = gen_rtx_ASHIFTRT (mode, |
8275 | make_compound_operation (XEXP (x, 0), |
8276 | next_code), |
8277 | XEXP (x, 1)); |
8278 | break; |
8279 | } |
8280 | |
8281 | /* fall through */ |
8282 | |
8283 | case ASHIFTRT: |
8284 | lhs = XEXP (x, 0); |
8285 | rhs = XEXP (x, 1); |
8286 | |
8287 | /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1, |
8288 | this is a SIGN_EXTRACT. */ |
8289 | if (CONST_INT_P (rhs) |
8290 | && GET_CODE (lhs) == ASHIFT |
8291 | && CONST_INT_P (XEXP (lhs, 1)) |
8292 | && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1)) |
8293 | && INTVAL (XEXP (lhs, 1)) >= 0 |
8294 | && INTVAL (rhs) < mode_width) |
8295 | { |
8296 | new_rtx = make_compound_operation (XEXP (lhs, 0), next_code); |
8297 | new_rtx = make_extraction (mode, inner: new_rtx, |
8298 | INTVAL (rhs) - INTVAL (XEXP (lhs, 1)), |
8299 | NULL_RTX, len: mode_width - INTVAL (rhs), |
8300 | unsignedp: code == LSHIFTRT, in_dest: false, |
8301 | in_compare: in_code == COMPARE); |
8302 | break; |
8303 | } |
8304 | |
8305 | /* See if we have operations between an ASHIFTRT and an ASHIFT. |
8306 | If so, try to merge the shifts into a SIGN_EXTEND. We could |
8307 | also do this for some cases of SIGN_EXTRACT, but it doesn't |
8308 | seem worth the effort; the case checked for occurs on Alpha. */ |
8309 | |
8310 | if (!OBJECT_P (lhs) |
8311 | && ! (GET_CODE (lhs) == SUBREG |
8312 | && (OBJECT_P (SUBREG_REG (lhs)))) |
8313 | && CONST_INT_P (rhs) |
8314 | && INTVAL (rhs) >= 0 |
8315 | && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT |
8316 | && INTVAL (rhs) < mode_width |
8317 | && (new_rtx = extract_left_shift (mode, x: lhs, INTVAL (rhs))) != 0) |
8318 | new_rtx = make_extraction (mode, inner: make_compound_operation (new_rtx, |
8319 | next_code), |
8320 | pos: 0, NULL_RTX, len: mode_width - INTVAL (rhs), |
8321 | unsignedp: code == LSHIFTRT, in_dest: false, in_compare: in_code == COMPARE); |
8322 | |
8323 | break; |
8324 | |
8325 | case SUBREG: |
8326 | /* Call ourselves recursively on the inner expression. If we are |
8327 | narrowing the object and it has a different RTL code from |
8328 | what it originally did, do this SUBREG as a force_to_mode. */ |
8329 | { |
8330 | rtx inner = SUBREG_REG (x), simplified; |
8331 | enum rtx_code subreg_code = in_code; |
8332 | |
8333 | /* If the SUBREG is masking of a logical right shift, |
8334 | make an extraction. */ |
8335 | if (GET_CODE (inner) == LSHIFTRT |
8336 | && is_a <scalar_int_mode> (GET_MODE (inner), result: &inner_mode) |
8337 | && GET_MODE_SIZE (mode) < GET_MODE_SIZE (mode: inner_mode) |
8338 | && CONST_INT_P (XEXP (inner, 1)) |
8339 | && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (mode: inner_mode) |
8340 | && subreg_lowpart_p (x)) |
8341 | { |
8342 | new_rtx = make_compound_operation (XEXP (inner, 0), next_code); |
8343 | int width = GET_MODE_PRECISION (mode: inner_mode) |
8344 | - INTVAL (XEXP (inner, 1)); |
8345 | if (width > mode_width) |
8346 | width = mode_width; |
8347 | new_rtx = make_extraction (mode, inner: new_rtx, pos: 0, XEXP (inner, 1), |
8348 | len: width, unsignedp: true, in_dest: false, in_compare: in_code == COMPARE); |
8349 | break; |
8350 | } |
8351 | |
8352 | /* If in_code is COMPARE, it isn't always safe to pass it through |
8353 | to the recursive make_compound_operation call. */ |
8354 | if (subreg_code == COMPARE |
8355 | && (!subreg_lowpart_p (x) |
8356 | || GET_CODE (inner) == SUBREG |
8357 | /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0) |
8358 | is (const_int 0), rather than |
8359 | (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0). |
8360 | Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0) |
8361 | for non-equality comparisons against 0 is not equivalent |
8362 | to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */ |
8363 | || (GET_CODE (inner) == AND |
8364 | && CONST_INT_P (XEXP (inner, 1)) |
8365 | && partial_subreg_p (x) |
8366 | && exact_log2 (UINTVAL (XEXP (inner, 1))) |
8367 | >= GET_MODE_BITSIZE (mode) - 1))) |
8368 | subreg_code = SET; |
8369 | |
8370 | tem = make_compound_operation (inner, subreg_code); |
8371 | |
8372 | simplified |
8373 | = simplify_subreg (outermode: mode, op: tem, GET_MODE (inner), SUBREG_BYTE (x)); |
8374 | if (simplified) |
8375 | tem = simplified; |
8376 | |
8377 | if (GET_CODE (tem) != GET_CODE (inner) |
8378 | && partial_subreg_p (x) |
8379 | && subreg_lowpart_p (x)) |
8380 | { |
8381 | rtx newer |
8382 | = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, false); |
8383 | |
8384 | /* If we have something other than a SUBREG, we might have |
8385 | done an expansion, so rerun ourselves. */ |
8386 | if (GET_CODE (newer) != SUBREG) |
8387 | newer = make_compound_operation (newer, in_code); |
8388 | |
8389 | /* force_to_mode can expand compounds. If it just re-expanded |
8390 | the compound, use gen_lowpart to convert to the desired |
8391 | mode. */ |
8392 | if (rtx_equal_p (newer, x) |
8393 | /* Likewise if it re-expanded the compound only partially. |
8394 | This happens for SUBREG of ZERO_EXTRACT if they extract |
8395 | the same number of bits. */ |
8396 | || (GET_CODE (newer) == SUBREG |
8397 | && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT |
8398 | || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT) |
8399 | && GET_CODE (inner) == AND |
8400 | && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0)))) |
8401 | return gen_lowpart (GET_MODE (x), tem); |
8402 | |
8403 | return newer; |
8404 | } |
8405 | |
8406 | if (simplified) |
8407 | return tem; |
8408 | } |
8409 | break; |
8410 | |
8411 | default: |
8412 | break; |
8413 | } |
8414 | |
8415 | if (new_rtx) |
8416 | *x_ptr = gen_lowpart (mode, new_rtx); |
8417 | *next_code_ptr = next_code; |
8418 | return NULL_RTX; |
8419 | } |
8420 | |
8421 | /* Look at the expression rooted at X. Look for expressions |
8422 | equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND. |
8423 | Form these expressions. |
8424 | |
8425 | Return the new rtx, usually just X. |
8426 | |
8427 | Also, for machines like the VAX that don't have logical shift insns, |
8428 | try to convert logical to arithmetic shift operations in cases where |
8429 | they are equivalent. This undoes the canonicalizations to logical |
8430 | shifts done elsewhere. |
8431 | |
8432 | We try, as much as possible, to re-use rtl expressions to save memory. |
8433 | |
8434 | IN_CODE says what kind of expression we are processing. Normally, it is |
8435 | SET. In a memory address it is MEM. When processing the arguments of |
8436 | a comparison or a COMPARE against zero, it is COMPARE, or EQ if more |
8437 | precisely it is an equality comparison against zero. */ |
8438 | |
8439 | rtx |
8440 | make_compound_operation (rtx x, enum rtx_code in_code) |
8441 | { |
8442 | enum rtx_code code = GET_CODE (x); |
8443 | const char *fmt; |
8444 | int i, j; |
8445 | enum rtx_code next_code; |
8446 | rtx new_rtx, tem; |
8447 | |
8448 | /* Select the code to be used in recursive calls. Once we are inside an |
8449 | address, we stay there. If we have a comparison, set to COMPARE, |
8450 | but once inside, go back to our default of SET. */ |
8451 | |
8452 | next_code = (code == MEM ? MEM |
8453 | : ((code == COMPARE || COMPARISON_P (x)) |
8454 | && XEXP (x, 1) == const0_rtx) ? COMPARE |
8455 | : in_code == COMPARE || in_code == EQ ? SET : in_code); |
8456 | |
8457 | scalar_int_mode mode; |
8458 | if (is_a <scalar_int_mode> (GET_MODE (x), result: &mode)) |
8459 | { |
8460 | rtx new_rtx = make_compound_operation_int (mode, x_ptr: &x, in_code, |
8461 | next_code_ptr: &next_code); |
8462 | if (new_rtx) |
8463 | return new_rtx; |
8464 | code = GET_CODE (x); |
8465 | } |
8466 | |
8467 | /* Now recursively process each operand of this operation. We need to |
8468 | handle ZERO_EXTEND specially so that we don't lose track of the |
8469 | inner mode. */ |
8470 | if (code == ZERO_EXTEND) |
8471 | { |
8472 | new_rtx = make_compound_operation (XEXP (x, 0), in_code: next_code); |
8473 | tem = simplify_unary_operation (code: ZERO_EXTEND, GET_MODE (x), |
8474 | op: new_rtx, GET_MODE (XEXP (x, 0))); |
8475 | if (tem) |
8476 | return tem; |
8477 | SUBST (XEXP (x, 0), new_rtx); |
8478 | return x; |
8479 | } |
8480 | |
8481 | fmt = GET_RTX_FORMAT (code); |
8482 | for (i = 0; i < GET_RTX_LENGTH (code); i++) |
8483 | if (fmt[i] == 'e') |
8484 | { |
8485 | new_rtx = make_compound_operation (XEXP (x, i), in_code: next_code); |
8486 | SUBST (XEXP (x, i), new_rtx); |
8487 | } |
8488 | else if (fmt[i] == 'E') |
8489 | for (j = 0; j < XVECLEN (x, i); j++) |
8490 | { |
8491 | new_rtx = make_compound_operation (XVECEXP (x, i, j), in_code: next_code); |
8492 | SUBST (XVECEXP (x, i, j), new_rtx); |
8493 | } |
8494 | |
8495 | maybe_swap_commutative_operands (x); |
8496 | return x; |
8497 | } |
8498 | |
8499 | /* Given M see if it is a value that would select a field of bits |
8500 | within an item, but not the entire word. Return -1 if not. |
8501 | Otherwise, return the starting position of the field, where 0 is the |
8502 | low-order bit. |
8503 | |
8504 | *PLEN is set to the length of the field. */ |
8505 | |
8506 | static int |
8507 | get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen) |
8508 | { |
8509 | /* Get the bit number of the first 1 bit from the right, -1 if none. */ |
8510 | int pos = m ? ctz_hwi (x: m) : -1; |
8511 | int len = 0; |
8512 | |
8513 | if (pos >= 0) |
8514 | /* Now shift off the low-order zero bits and see if we have a |
8515 | power of two minus 1. */ |
8516 | len = exact_log2 (x: (m >> pos) + 1); |
8517 | |
8518 | if (len <= 0) |
8519 | pos = -1; |
8520 | |
8521 | *plen = len; |
8522 | return pos; |
8523 | } |
8524 | |
8525 | /* If X refers to a register that equals REG in value, replace these |
8526 | references with REG. */ |
8527 | static rtx |
8528 | canon_reg_for_combine (rtx x, rtx reg) |
8529 | { |
8530 | rtx op0, op1, op2; |
8531 | const char *fmt; |
8532 | int i; |
8533 | bool copied; |
8534 | |
8535 | enum rtx_code code = GET_CODE (x); |
8536 | switch (GET_RTX_CLASS (code)) |
8537 | { |
8538 | case RTX_UNARY: |
8539 | op0 = canon_reg_for_combine (XEXP (x, 0), reg); |
8540 | if (op0 != XEXP (x, 0)) |
8541 | return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op: op0, |
8542 | GET_MODE (reg)); |
8543 | break; |
8544 | |
8545 | case RTX_BIN_ARITH: |
8546 | case RTX_COMM_ARITH: |
8547 | op0 = canon_reg_for_combine (XEXP (x, 0), reg); |
8548 | op1 = canon_reg_for_combine (XEXP (x, 1), reg); |
8549 | if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) |
8550 | return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1); |
8551 | break; |
8552 | |
8553 | case RTX_COMPARE: |
8554 | case RTX_COMM_COMPARE: |
8555 | op0 = canon_reg_for_combine (XEXP (x, 0), reg); |
8556 | op1 = canon_reg_for_combine (XEXP (x, 1), reg); |
8557 | if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) |
8558 | return simplify_gen_relational (GET_CODE (x), GET_MODE (x), |
8559 | GET_MODE (op0), op0, op1); |
8560 | break; |
8561 | |
8562 | case RTX_TERNARY: |
8563 | case RTX_BITFIELD_OPS: |
8564 | op0 = canon_reg_for_combine (XEXP (x, 0), reg); |
8565 | op1 = canon_reg_for_combine (XEXP (x, 1), reg); |
8566 | op2 = canon_reg_for_combine (XEXP (x, 2), reg); |
8567 | if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2)) |
8568 | return simplify_gen_ternary (GET_CODE (x), GET_MODE (x), |
8569 | GET_MODE (op0), op0, op1, op2); |
8570 | /* FALLTHRU */ |
8571 | |
8572 | case RTX_OBJ: |
8573 | if (REG_P (x)) |
8574 | { |
8575 | if (rtx_equal_p (get_last_value (reg), x) |
8576 | || rtx_equal_p (reg, get_last_value (x))) |
8577 | return reg; |
8578 | else |
8579 | break; |
8580 | } |
8581 | |
8582 | /* fall through */ |
8583 | |
8584 | default: |
8585 | fmt = GET_RTX_FORMAT (code); |
8586 | copied = false; |
8587 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
8588 | if (fmt[i] == 'e') |
8589 | { |
8590 | rtx op = canon_reg_for_combine (XEXP (x, i), reg); |
8591 | if (op != XEXP (x, i)) |
8592 | { |
8593 | if (!copied) |
8594 | { |
8595 | copied = true; |
8596 | x = copy_rtx (x); |
8597 | } |
8598 | XEXP (x, i) = op; |
8599 | } |
8600 | } |
8601 | else if (fmt[i] == 'E') |
8602 | { |
8603 | int j; |
8604 | for (j = 0; j < XVECLEN (x, i); j++) |
8605 | { |
8606 | rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg); |
8607 | if (op != XVECEXP (x, i, j)) |
8608 | { |
8609 | if (!copied) |
8610 | { |
8611 | copied = true; |
8612 | x = copy_rtx (x); |
8613 | } |
8614 | XVECEXP (x, i, j) = op; |
8615 | } |
8616 | } |
8617 | } |
8618 | |
8619 | break; |
8620 | } |
8621 | |
8622 | return x; |
8623 | } |
8624 | |
8625 | /* Return X converted to MODE. If the value is already truncated to |
8626 | MODE we can just return a subreg even though in the general case we |
8627 | would need an explicit truncation. */ |
8628 | |
8629 | static rtx |
8630 | gen_lowpart_or_truncate (machine_mode mode, rtx x) |
8631 | { |
8632 | if (!CONST_INT_P (x) |
8633 | && partial_subreg_p (outermode: mode, GET_MODE (x)) |
8634 | && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x)) |
8635 | && !(REG_P (x) && reg_truncated_to_mode (mode, x))) |
8636 | { |
8637 | /* Bit-cast X into an integer mode. */ |
8638 | if (!SCALAR_INT_MODE_P (GET_MODE (x))) |
8639 | x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x); |
8640 | x = simplify_gen_unary (code: TRUNCATE, mode: int_mode_for_mode (mode).require (), |
8641 | op: x, GET_MODE (x)); |
8642 | } |
8643 | |
8644 | return gen_lowpart (mode, x); |
8645 | } |
8646 | |
8647 | /* See if X can be simplified knowing that we will only refer to it in |
8648 | MODE and will only refer to those bits that are nonzero in MASK. |
8649 | If other bits are being computed or if masking operations are done |
8650 | that select a superset of the bits in MASK, they can sometimes be |
8651 | ignored. |
8652 | |
8653 | Return a possibly simplified expression, but always convert X to |
8654 | MODE. If X is a CONST_INT, AND the CONST_INT with MASK. |
8655 | |
8656 | If JUST_SELECT is true, don't optimize by noticing that bits in MASK |
8657 | are all off in X. This is used when X will be complemented, by either |
8658 | NOT, NEG, or XOR. */ |
8659 | |
8660 | static rtx |
8661 | force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask, |
8662 | bool just_select) |
8663 | { |
8664 | enum rtx_code code = GET_CODE (x); |
8665 | bool next_select = just_select || code == XOR || code == NOT || code == NEG; |
8666 | machine_mode op_mode; |
8667 | unsigned HOST_WIDE_INT nonzero; |
8668 | |
8669 | /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the |
8670 | code below will do the wrong thing since the mode of such an |
8671 | expression is VOIDmode. |
8672 | |
8673 | Also do nothing if X is a CLOBBER; this can happen if X was |
8674 | the return value from a call to gen_lowpart. */ |
8675 | if (code == CALL || code == ASM_OPERANDS || code == CLOBBER) |
8676 | return x; |
8677 | |
8678 | /* We want to perform the operation in its present mode unless we know |
8679 | that the operation is valid in MODE, in which case we do the operation |
8680 | in MODE. */ |
8681 | op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x)) |
8682 | && have_insn_for (code, mode)) |
8683 | ? mode : GET_MODE (x)); |
8684 | |
8685 | /* It is not valid to do a right-shift in a narrower mode |
8686 | than the one it came in with. */ |
8687 | if ((code == LSHIFTRT || code == ASHIFTRT) |
8688 | && partial_subreg_p (outermode: mode, GET_MODE (x))) |
8689 | op_mode = GET_MODE (x); |
8690 | |
8691 | /* Truncate MASK to fit OP_MODE. */ |
8692 | if (op_mode) |
8693 | mask &= GET_MODE_MASK (op_mode); |
8694 | |
8695 | /* Determine what bits of X are guaranteed to be (non)zero. */ |
8696 | nonzero = nonzero_bits (x, mode); |
8697 | |
8698 | /* If none of the bits in X are needed, return a zero. */ |
8699 | if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x)) |
8700 | x = const0_rtx; |
8701 | |
8702 | /* If X is a CONST_INT, return a new one. Do this here since the |
8703 | test below will fail. */ |
8704 | if (CONST_INT_P (x)) |
8705 | { |
8706 | if (SCALAR_INT_MODE_P (mode)) |
8707 | return gen_int_mode (INTVAL (x) & mask, mode); |
8708 | else |
8709 | { |
8710 | x = GEN_INT (INTVAL (x) & mask); |
8711 | return gen_lowpart_common (mode, x); |
8712 | } |
8713 | } |
8714 | |
8715 | /* If X is narrower than MODE and we want all the bits in X's mode, just |
8716 | get X in the proper mode. */ |
8717 | if (paradoxical_subreg_p (outermode: mode, GET_MODE (x)) |
8718 | && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0) |
8719 | return gen_lowpart (mode, x); |
8720 | |
8721 | /* We can ignore the effect of a SUBREG if it narrows the mode or |
8722 | if the constant masks to zero all the bits the mode doesn't have. */ |
8723 | if (GET_CODE (x) == SUBREG |
8724 | && subreg_lowpart_p (x) |
8725 | && (partial_subreg_p (x) |
8726 | || (mask |
8727 | & GET_MODE_MASK (GET_MODE (x)) |
8728 | & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0)) |
8729 | return force_to_mode (SUBREG_REG (x), mode, mask, just_select: next_select); |
8730 | |
8731 | scalar_int_mode int_mode, xmode; |
8732 | if (is_a <scalar_int_mode> (m: mode, result: &int_mode) |
8733 | && is_a <scalar_int_mode> (GET_MODE (x), result: &xmode)) |
8734 | /* OP_MODE is either MODE or XMODE, so it must be a scalar |
8735 | integer too. */ |
8736 | return force_int_to_mode (x, int_mode, xmode, |
8737 | as_a <scalar_int_mode> (m: op_mode), |
8738 | mask, just_select); |
8739 | |
8740 | return gen_lowpart_or_truncate (mode, x); |
8741 | } |
8742 | |
8743 | /* Subroutine of force_to_mode that handles cases in which both X and |
8744 | the result are scalar integers. MODE is the mode of the result, |
8745 | XMODE is the mode of X, and OP_MODE says which of MODE or XMODE |
8746 | is preferred for simplified versions of X. The other arguments |
8747 | are as for force_to_mode. */ |
8748 | |
8749 | static rtx |
8750 | force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode, |
8751 | scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask, |
8752 | bool just_select) |
8753 | { |
8754 | enum rtx_code code = GET_CODE (x); |
8755 | bool next_select = just_select || code == XOR || code == NOT || code == NEG; |
8756 | unsigned HOST_WIDE_INT fuller_mask; |
8757 | rtx op0, op1, temp; |
8758 | poly_int64 const_op0; |
8759 | |
8760 | /* When we have an arithmetic operation, or a shift whose count we |
8761 | do not know, we need to assume that all bits up to the highest-order |
8762 | bit in MASK will be needed. This is how we form such a mask. */ |
8763 | if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1))) |
8764 | fuller_mask = HOST_WIDE_INT_M1U; |
8765 | else |
8766 | fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (x: mask) + 1)) - 1); |
8767 | |
8768 | switch (code) |
8769 | { |
8770 | case CLOBBER: |
8771 | /* If X is a (clobber (const_int)), return it since we know we are |
8772 | generating something that won't match. */ |
8773 | return x; |
8774 | |
8775 | case SIGN_EXTEND: |
8776 | case ZERO_EXTEND: |
8777 | case ZERO_EXTRACT: |
8778 | case SIGN_EXTRACT: |
8779 | x = expand_compound_operation (x); |
8780 | if (GET_CODE (x) != code) |
8781 | return force_to_mode (x, mode, mask, just_select: next_select); |
8782 | break; |
8783 | |
8784 | case TRUNCATE: |
8785 | /* Similarly for a truncate. */ |
8786 | return force_to_mode (XEXP (x, 0), mode, mask, just_select: next_select); |
8787 | |
8788 | case AND: |
8789 | /* If this is an AND with a constant, convert it into an AND |
8790 | whose constant is the AND of that constant with MASK. If it |
8791 | remains an AND of MASK, delete it since it is redundant. */ |
8792 | |
8793 | if (CONST_INT_P (XEXP (x, 1))) |
8794 | { |
8795 | x = simplify_and_const_int (x, op_mode, XEXP (x, 0), |
8796 | mask & INTVAL (XEXP (x, 1))); |
8797 | xmode = op_mode; |
8798 | |
8799 | /* If X is still an AND, see if it is an AND with a mask that |
8800 | is just some low-order bits. If so, and it is MASK, we don't |
8801 | need it. */ |
8802 | |
8803 | if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)) |
8804 | && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask) |
8805 | x = XEXP (x, 0); |
8806 | |
8807 | /* If it remains an AND, try making another AND with the bits |
8808 | in the mode mask that aren't in MASK turned on. If the |
8809 | constant in the AND is wide enough, this might make a |
8810 | cheaper constant. */ |
8811 | |
8812 | if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)) |
8813 | && GET_MODE_MASK (xmode) != mask |
8814 | && HWI_COMPUTABLE_MODE_P (mode: xmode)) |
8815 | { |
8816 | unsigned HOST_WIDE_INT cval |
8817 | = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask); |
8818 | rtx y; |
8819 | |
8820 | y = simplify_gen_binary (code: AND, mode: xmode, XEXP (x, 0), |
8821 | op1: gen_int_mode (cval, xmode)); |
8822 | if (set_src_cost (x: y, mode: xmode, speed_p: optimize_this_for_speed_p) |
8823 | < set_src_cost (x, mode: xmode, speed_p: optimize_this_for_speed_p)) |
8824 | x = y; |
8825 | } |
8826 | |
8827 | break; |
8828 | } |
8829 | |
8830 | goto binop; |
8831 | |
8832 | case PLUS: |
8833 | /* In (and (plus FOO C1) M), if M is a mask that just turns off |
8834 | low-order bits (as in an alignment operation) and FOO is already |
8835 | aligned to that boundary, mask C1 to that boundary as well. |
8836 | This may eliminate that PLUS and, later, the AND. */ |
8837 | |
8838 | { |
8839 | unsigned int width = GET_MODE_PRECISION (mode); |
8840 | unsigned HOST_WIDE_INT smask = mask; |
8841 | |
8842 | /* If MODE is narrower than HOST_WIDE_INT and mask is a negative |
8843 | number, sign extend it. */ |
8844 | |
8845 | if (width < HOST_BITS_PER_WIDE_INT |
8846 | && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0) |
8847 | smask |= HOST_WIDE_INT_M1U << width; |
8848 | |
8849 | if (CONST_INT_P (XEXP (x, 1)) |
8850 | && pow2p_hwi (x: - smask) |
8851 | && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0 |
8852 | && (INTVAL (XEXP (x, 1)) & ~smask) != 0) |
8853 | return force_to_mode (x: plus_constant (xmode, XEXP (x, 0), |
8854 | (INTVAL (XEXP (x, 1)) & smask)), |
8855 | mode, mask: smask, just_select: next_select); |
8856 | } |
8857 | |
8858 | /* fall through */ |
8859 | |
8860 | case MULT: |
8861 | /* Substituting into the operands of a widening MULT is not likely to |
8862 | create RTL matching a machine insn. */ |
8863 | if (code == MULT |
8864 | && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND |
8865 | || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND) |
8866 | && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND |
8867 | || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND) |
8868 | && REG_P (XEXP (XEXP (x, 0), 0)) |
8869 | && REG_P (XEXP (XEXP (x, 1), 0))) |
8870 | return gen_lowpart_or_truncate (mode, x); |
8871 | |
8872 | /* For PLUS, MINUS and MULT, we need any bits less significant than the |
8873 | most significant bit in MASK since carries from those bits will |
8874 | affect the bits we are interested in. */ |
8875 | mask = fuller_mask; |
8876 | goto binop; |
8877 | |
8878 | case MINUS: |
8879 | /* If X is (minus C Y) where C's least set bit is larger than any bit |
8880 | in the mask, then we may replace with (neg Y). */ |
8881 | if (poly_int_rtx_p (XEXP (x, 0), res: &const_op0) |
8882 | && known_alignment (a: poly_uint64 (const_op0)) > mask) |
8883 | { |
8884 | x = simplify_gen_unary (code: NEG, mode: xmode, XEXP (x, 1), op_mode: xmode); |
8885 | return force_to_mode (x, mode, mask, just_select: next_select); |
8886 | } |
8887 | |
8888 | /* Similarly, if C contains every bit in the fuller_mask, then we may |
8889 | replace with (not Y). */ |
8890 | if (CONST_INT_P (XEXP (x, 0)) |
8891 | && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0)))) |
8892 | { |
8893 | x = simplify_gen_unary (code: NOT, mode: xmode, XEXP (x, 1), op_mode: xmode); |
8894 | return force_to_mode (x, mode, mask, just_select: next_select); |
8895 | } |
8896 | |
8897 | mask = fuller_mask; |
8898 | goto binop; |
8899 | |
8900 | case IOR: |
8901 | case XOR: |
8902 | /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and |
8903 | LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...) |
8904 | operation which may be a bitfield extraction. Ensure that the |
8905 | constant we form is not wider than the mode of X. */ |
8906 | |
8907 | if (GET_CODE (XEXP (x, 0)) == LSHIFTRT |
8908 | && CONST_INT_P (XEXP (XEXP (x, 0), 1)) |
8909 | && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 |
8910 | && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT |
8911 | && CONST_INT_P (XEXP (x, 1)) |
8912 | && ((INTVAL (XEXP (XEXP (x, 0), 1)) |
8913 | + floor_log2 (INTVAL (XEXP (x, 1)))) |
8914 | < GET_MODE_PRECISION (mode: xmode)) |
8915 | && (UINTVAL (XEXP (x, 1)) |
8916 | & ~nonzero_bits (XEXP (x, 0), xmode)) == 0) |
8917 | { |
8918 | temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask) |
8919 | << INTVAL (XEXP (XEXP (x, 0), 1)), |
8920 | xmode); |
8921 | temp = simplify_gen_binary (GET_CODE (x), mode: xmode, |
8922 | XEXP (XEXP (x, 0), 0), op1: temp); |
8923 | x = simplify_gen_binary (code: LSHIFTRT, mode: xmode, op0: temp, |
8924 | XEXP (XEXP (x, 0), 1)); |
8925 | return force_to_mode (x, mode, mask, just_select: next_select); |
8926 | } |
8927 | |
8928 | binop: |
8929 | /* For most binary operations, just propagate into the operation and |
8930 | change the mode if we have an operation of that mode. */ |
8931 | |
8932 | op0 = force_to_mode (XEXP (x, 0), mode, mask, just_select: next_select); |
8933 | op1 = force_to_mode (XEXP (x, 1), mode, mask, just_select: next_select); |
8934 | |
8935 | /* If we ended up truncating both operands, truncate the result of the |
8936 | operation instead. */ |
8937 | if (GET_CODE (op0) == TRUNCATE |
8938 | && GET_CODE (op1) == TRUNCATE) |
8939 | { |
8940 | op0 = XEXP (op0, 0); |
8941 | op1 = XEXP (op1, 0); |
8942 | } |
8943 | |
8944 | op0 = gen_lowpart_or_truncate (mode: op_mode, x: op0); |
8945 | op1 = gen_lowpart_or_truncate (mode: op_mode, x: op1); |
8946 | |
8947 | if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1)) |
8948 | { |
8949 | x = simplify_gen_binary (code, mode: op_mode, op0, op1); |
8950 | xmode = op_mode; |
8951 | } |
8952 | break; |
8953 | |
8954 | case ASHIFT: |
8955 | /* For left shifts, do the same, but just for the first operand. |
8956 | However, we cannot do anything with shifts where we cannot |
8957 | guarantee that the counts are smaller than the size of the mode |
8958 | because such a count will have a different meaning in a |
8959 | wider mode. */ |
8960 | |
8961 | if (! (CONST_INT_P (XEXP (x, 1)) |
8962 | && INTVAL (XEXP (x, 1)) >= 0 |
8963 | && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode)) |
8964 | && ! (GET_MODE (XEXP (x, 1)) != VOIDmode |
8965 | && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1))) |
8966 | < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode)))) |
8967 | break; |
8968 | |
8969 | /* If the shift count is a constant and we can do arithmetic in |
8970 | the mode of the shift, refine which bits we need. Otherwise, use the |
8971 | conservative form of the mask. */ |
8972 | if (CONST_INT_P (XEXP (x, 1)) |
8973 | && INTVAL (XEXP (x, 1)) >= 0 |
8974 | && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode: op_mode) |
8975 | && HWI_COMPUTABLE_MODE_P (mode: op_mode)) |
8976 | mask >>= INTVAL (XEXP (x, 1)); |
8977 | else |
8978 | mask = fuller_mask; |
8979 | |
8980 | op0 = gen_lowpart_or_truncate (mode: op_mode, |
8981 | x: force_to_mode (XEXP (x, 0), mode, |
8982 | mask, just_select: next_select)); |
8983 | |
8984 | if (op_mode != xmode || op0 != XEXP (x, 0)) |
8985 | { |
8986 | x = simplify_gen_binary (code, mode: op_mode, op0, XEXP (x, 1)); |
8987 | xmode = op_mode; |
8988 | } |
8989 | break; |
8990 | |
8991 | case LSHIFTRT: |
8992 | /* Here we can only do something if the shift count is a constant, |
8993 | this shift constant is valid for the host, and we can do arithmetic |
8994 | in OP_MODE. */ |
8995 | |
8996 | if (CONST_INT_P (XEXP (x, 1)) |
8997 | && INTVAL (XEXP (x, 1)) >= 0 |
8998 | && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT |
8999 | && HWI_COMPUTABLE_MODE_P (mode: op_mode)) |
9000 | { |
9001 | rtx inner = XEXP (x, 0); |
9002 | unsigned HOST_WIDE_INT inner_mask; |
9003 | |
9004 | /* Select the mask of the bits we need for the shift operand. */ |
9005 | inner_mask = mask << INTVAL (XEXP (x, 1)); |
9006 | |
9007 | /* We can only change the mode of the shift if we can do arithmetic |
9008 | in the mode of the shift and INNER_MASK is no wider than the |
9009 | width of X's mode. */ |
9010 | if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0) |
9011 | op_mode = xmode; |
9012 | |
9013 | inner = force_to_mode (x: inner, mode: op_mode, mask: inner_mask, just_select: next_select); |
9014 | |
9015 | if (xmode != op_mode || inner != XEXP (x, 0)) |
9016 | { |
9017 | x = simplify_gen_binary (code: LSHIFTRT, mode: op_mode, op0: inner, XEXP (x, 1)); |
9018 | xmode = op_mode; |
9019 | } |
9020 | } |
9021 | |
9022 | /* If we have (and (lshiftrt FOO C1) C2) where the combination of the |
9023 | shift and AND produces only copies of the sign bit (C2 is one less |
9024 | than a power of two), we can do this with just a shift. */ |
9025 | |
9026 | if (GET_CODE (x) == LSHIFTRT |
9027 | && CONST_INT_P (XEXP (x, 1)) |
9028 | /* The shift puts one of the sign bit copies in the least significant |
9029 | bit. */ |
9030 | && ((INTVAL (XEXP (x, 1)) |
9031 | + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))) |
9032 | >= GET_MODE_PRECISION (mode: xmode)) |
9033 | && pow2p_hwi (x: mask + 1) |
9034 | /* Number of bits left after the shift must be more than the mask |
9035 | needs. */ |
9036 | && ((INTVAL (XEXP (x, 1)) + exact_log2 (x: mask + 1)) |
9037 | <= GET_MODE_PRECISION (mode: xmode)) |
9038 | /* Must be more sign bit copies than the mask needs. */ |
9039 | && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))) |
9040 | >= exact_log2 (x: mask + 1))) |
9041 | { |
9042 | int nbits = GET_MODE_PRECISION (mode: xmode) - exact_log2 (x: mask + 1); |
9043 | x = simplify_gen_binary (code: LSHIFTRT, mode: xmode, XEXP (x, 0), |
9044 | op1: gen_int_shift_amount (xmode, nbits)); |
9045 | } |
9046 | goto shiftrt; |
9047 | |
9048 | case ASHIFTRT: |
9049 | /* If we are just looking for the sign bit, we don't need this shift at |
9050 | all, even if it has a variable count. */ |
9051 | if (val_signbit_p (xmode, mask)) |
9052 | return force_to_mode (XEXP (x, 0), mode, mask, just_select: next_select); |
9053 | |
9054 | /* If this is a shift by a constant, get a mask that contains those bits |
9055 | that are not copies of the sign bit. We then have two cases: If |
9056 | MASK only includes those bits, this can be a logical shift, which may |
9057 | allow simplifications. If MASK is a single-bit field not within |
9058 | those bits, we are requesting a copy of the sign bit and hence can |
9059 | shift the sign bit to the appropriate location. */ |
9060 | |
9061 | if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 |
9062 | && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) |
9063 | { |
9064 | unsigned HOST_WIDE_INT nonzero; |
9065 | int i; |
9066 | |
9067 | /* If the considered data is wider than HOST_WIDE_INT, we can't |
9068 | represent a mask for all its bits in a single scalar. |
9069 | But we only care about the lower bits, so calculate these. */ |
9070 | |
9071 | if (GET_MODE_PRECISION (mode: xmode) > HOST_BITS_PER_WIDE_INT) |
9072 | { |
9073 | nonzero = HOST_WIDE_INT_M1U; |
9074 | |
9075 | /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1)) |
9076 | is the number of bits a full-width mask would have set. |
9077 | We need only shift if these are fewer than nonzero can |
9078 | hold. If not, we must keep all bits set in nonzero. */ |
9079 | |
9080 | if (GET_MODE_PRECISION (mode: xmode) - INTVAL (XEXP (x, 1)) |
9081 | < HOST_BITS_PER_WIDE_INT) |
9082 | nonzero >>= INTVAL (XEXP (x, 1)) |
9083 | + HOST_BITS_PER_WIDE_INT |
9084 | - GET_MODE_PRECISION (mode: xmode); |
9085 | } |
9086 | else |
9087 | { |
9088 | nonzero = GET_MODE_MASK (xmode); |
9089 | nonzero >>= INTVAL (XEXP (x, 1)); |
9090 | } |
9091 | |
9092 | if ((mask & ~nonzero) == 0) |
9093 | { |
9094 | x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode, |
9095 | XEXP (x, 0), INTVAL (XEXP (x, 1))); |
9096 | if (GET_CODE (x) != ASHIFTRT) |
9097 | return force_to_mode (x, mode, mask, just_select: next_select); |
9098 | } |
9099 | |
9100 | else if ((i = exact_log2 (x: mask)) >= 0) |
9101 | { |
9102 | x = simplify_shift_const |
9103 | (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0), |
9104 | GET_MODE_PRECISION (mode: xmode) - 1 - i); |
9105 | |
9106 | if (GET_CODE (x) != ASHIFTRT) |
9107 | return force_to_mode (x, mode, mask, just_select: next_select); |
9108 | } |
9109 | } |
9110 | |
9111 | /* If MASK is 1, convert this to an LSHIFTRT. This can be done |
9112 | even if the shift count isn't a constant. */ |
9113 | if (mask == 1) |
9114 | x = simplify_gen_binary (code: LSHIFTRT, mode: xmode, XEXP (x, 0), XEXP (x, 1)); |
9115 | |
9116 | shiftrt: |
9117 | |
9118 | /* If this is a zero- or sign-extension operation that just affects bits |
9119 | we don't care about, remove it. Be sure the call above returned |
9120 | something that is still a shift. */ |
9121 | |
9122 | if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT) |
9123 | && CONST_INT_P (XEXP (x, 1)) |
9124 | && INTVAL (XEXP (x, 1)) >= 0 |
9125 | && (INTVAL (XEXP (x, 1)) |
9126 | <= GET_MODE_PRECISION (mode: xmode) - (floor_log2 (x: mask) + 1)) |
9127 | && GET_CODE (XEXP (x, 0)) == ASHIFT |
9128 | && XEXP (XEXP (x, 0), 1) == XEXP (x, 1)) |
9129 | return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask, just_select: next_select); |
9130 | |
9131 | break; |
9132 | |
9133 | case ROTATE: |
9134 | case ROTATERT: |
9135 | /* If the shift count is constant and we can do computations |
9136 | in the mode of X, compute where the bits we care about are. |
9137 | Otherwise, we can't do anything. Don't change the mode of |
9138 | the shift or propagate MODE into the shift, though. */ |
9139 | if (CONST_INT_P (XEXP (x, 1)) |
9140 | && INTVAL (XEXP (x, 1)) >= 0) |
9141 | { |
9142 | temp = simplify_binary_operation (code: code == ROTATE ? ROTATERT : ROTATE, |
9143 | mode: xmode, op0: gen_int_mode (mask, xmode), |
9144 | XEXP (x, 1)); |
9145 | if (temp && CONST_INT_P (temp)) |
9146 | x = simplify_gen_binary (code, mode: xmode, |
9147 | op0: force_to_mode (XEXP (x, 0), mode: xmode, |
9148 | INTVAL (temp), just_select: next_select), |
9149 | XEXP (x, 1)); |
9150 | } |
9151 | break; |
9152 | |
9153 | case NEG: |
9154 | /* If we just want the low-order bit, the NEG isn't needed since it |
9155 | won't change the low-order bit. */ |
9156 | if (mask == 1) |
9157 | return force_to_mode (XEXP (x, 0), mode, mask, just_select); |
9158 | |
9159 | /* We need any bits less significant than the most significant bit in |
9160 | MASK since carries from those bits will affect the bits we are |
9161 | interested in. */ |
9162 | mask = fuller_mask; |
9163 | goto unop; |
9164 | |
9165 | case NOT: |
9166 | /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the |
9167 | same as the XOR case above. Ensure that the constant we form is not |
9168 | wider than the mode of X. */ |
9169 | |
9170 | if (GET_CODE (XEXP (x, 0)) == LSHIFTRT |
9171 | && CONST_INT_P (XEXP (XEXP (x, 0), 1)) |
9172 | && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 |
9173 | && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (x: mask) |
9174 | < GET_MODE_PRECISION (mode: xmode)) |
9175 | && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT) |
9176 | { |
9177 | temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode); |
9178 | temp = simplify_gen_binary (code: XOR, mode: xmode, XEXP (XEXP (x, 0), 0), op1: temp); |
9179 | x = simplify_gen_binary (code: LSHIFTRT, mode: xmode, |
9180 | op0: temp, XEXP (XEXP (x, 0), 1)); |
9181 | |
9182 | return force_to_mode (x, mode, mask, just_select: next_select); |
9183 | } |
9184 | |
9185 | /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must |
9186 | use the full mask inside the NOT. */ |
9187 | mask = fuller_mask; |
9188 | |
9189 | unop: |
9190 | op0 = gen_lowpart_or_truncate (mode: op_mode, |
9191 | x: force_to_mode (XEXP (x, 0), mode, mask, |
9192 | just_select: next_select)); |
9193 | if (op_mode != xmode || op0 != XEXP (x, 0)) |
9194 | { |
9195 | x = simplify_gen_unary (code, mode: op_mode, op: op0, op_mode); |
9196 | xmode = op_mode; |
9197 | } |
9198 | break; |
9199 | |
9200 | case NE: |
9201 | /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included |
9202 | in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero, |
9203 | which is equal to STORE_FLAG_VALUE. */ |
9204 | if ((mask & ~STORE_FLAG_VALUE) == 0 |
9205 | && XEXP (x, 1) == const0_rtx |
9206 | && GET_MODE (XEXP (x, 0)) == mode |
9207 | && pow2p_hwi (x: nonzero_bits (XEXP (x, 0), mode)) |
9208 | && (nonzero_bits (XEXP (x, 0), mode) |
9209 | == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE)) |
9210 | return force_to_mode (XEXP (x, 0), mode, mask, just_select: next_select); |
9211 | |
9212 | break; |
9213 | |
9214 | case IF_THEN_ELSE: |
9215 | /* We have no way of knowing if the IF_THEN_ELSE can itself be |
9216 | written in a narrower mode. We play it safe and do not do so. */ |
9217 | |
9218 | op0 = gen_lowpart_or_truncate (mode: xmode, |
9219 | x: force_to_mode (XEXP (x, 1), mode, |
9220 | mask, just_select: next_select)); |
9221 | op1 = gen_lowpart_or_truncate (mode: xmode, |
9222 | x: force_to_mode (XEXP (x, 2), mode, |
9223 | mask, just_select: next_select)); |
9224 | if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2)) |
9225 | x = simplify_gen_ternary (code: IF_THEN_ELSE, mode: xmode, |
9226 | GET_MODE (XEXP (x, 0)), XEXP (x, 0), |
9227 | op1: op0, op2: op1); |
9228 | break; |
9229 | |
9230 | default: |
9231 | break; |
9232 | } |
9233 | |
9234 | /* Ensure we return a value of the proper mode. */ |
9235 | return gen_lowpart_or_truncate (mode, x); |
9236 | } |
9237 | |
9238 | /* Return nonzero if X is an expression that has one of two values depending on |
9239 | whether some other value is zero or nonzero. In that case, we return the |
9240 | value that is being tested, *PTRUE is set to the value if the rtx being |
9241 | returned has a nonzero value, and *PFALSE is set to the other alternative. |
9242 | |
9243 | If we return zero, we set *PTRUE and *PFALSE to X. */ |
9244 | |
9245 | static rtx |
9246 | if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse) |
9247 | { |
9248 | machine_mode mode = GET_MODE (x); |
9249 | enum rtx_code code = GET_CODE (x); |
9250 | rtx cond0, cond1, true0, true1, false0, false1; |
9251 | unsigned HOST_WIDE_INT nz; |
9252 | scalar_int_mode int_mode; |
9253 | |
9254 | /* If we are comparing a value against zero, we are done. */ |
9255 | if ((code == NE || code == EQ) |
9256 | && XEXP (x, 1) == const0_rtx) |
9257 | { |
9258 | *ptrue = (code == NE) ? const_true_rtx : const0_rtx; |
9259 | *pfalse = (code == NE) ? const0_rtx : const_true_rtx; |
9260 | return XEXP (x, 0); |
9261 | } |
9262 | |
9263 | /* If this is a unary operation whose operand has one of two values, apply |
9264 | our opcode to compute those values. */ |
9265 | else if (UNARY_P (x) |
9266 | && (cond0 = if_then_else_cond (XEXP (x, 0), ptrue: &true0, pfalse: &false0)) != 0) |
9267 | { |
9268 | *ptrue = simplify_gen_unary (code, mode, op: true0, GET_MODE (XEXP (x, 0))); |
9269 | *pfalse = simplify_gen_unary (code, mode, op: false0, |
9270 | GET_MODE (XEXP (x, 0))); |
9271 | return cond0; |
9272 | } |
9273 | |
9274 | /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would |
9275 | make can't possibly match and would suppress other optimizations. */ |
9276 | else if (code == COMPARE) |
9277 | ; |
9278 | |
9279 | /* If this is a binary operation, see if either side has only one of two |
9280 | values. If either one does or if both do and they are conditional on |
9281 | the same value, compute the new true and false values. */ |
9282 | else if (BINARY_P (x)) |
9283 | { |
9284 | rtx op0 = XEXP (x, 0); |
9285 | rtx op1 = XEXP (x, 1); |
9286 | cond0 = if_then_else_cond (x: op0, ptrue: &true0, pfalse: &false0); |
9287 | cond1 = if_then_else_cond (x: op1, ptrue: &true1, pfalse: &false1); |
9288 | |
9289 | if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)) |
9290 | && (REG_P (op0) || REG_P (op1))) |
9291 | { |
9292 | /* Try to enable a simplification by undoing work done by |
9293 | if_then_else_cond if it converted a REG into something more |
9294 | complex. */ |
9295 | if (REG_P (op0)) |
9296 | { |
9297 | cond0 = 0; |
9298 | true0 = false0 = op0; |
9299 | } |
9300 | else |
9301 | { |
9302 | cond1 = 0; |
9303 | true1 = false1 = op1; |
9304 | } |
9305 | } |
9306 | |
9307 | if ((cond0 != 0 || cond1 != 0) |
9308 | && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))) |
9309 | { |
9310 | /* If if_then_else_cond returned zero, then true/false are the |
9311 | same rtl. We must copy one of them to prevent invalid rtl |
9312 | sharing. */ |
9313 | if (cond0 == 0) |
9314 | true0 = copy_rtx (true0); |
9315 | else if (cond1 == 0) |
9316 | true1 = copy_rtx (true1); |
9317 | |
9318 | if (COMPARISON_P (x)) |
9319 | { |
9320 | *ptrue = simplify_gen_relational (code, mode, VOIDmode, |
9321 | op0: true0, op1: true1); |
9322 | *pfalse = simplify_gen_relational (code, mode, VOIDmode, |
9323 | op0: false0, op1: false1); |
9324 | } |
9325 | else |
9326 | { |
9327 | *ptrue = simplify_gen_binary (code, mode, op0: true0, op1: true1); |
9328 | *pfalse = simplify_gen_binary (code, mode, op0: false0, op1: false1); |
9329 | } |
9330 | |
9331 | return cond0 ? cond0 : cond1; |
9332 | } |
9333 | |
9334 | /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the |
9335 | operands is zero when the other is nonzero, and vice-versa, |
9336 | and STORE_FLAG_VALUE is 1 or -1. */ |
9337 | |
9338 | if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) |
9339 | && (code == PLUS || code == IOR || code == XOR || code == MINUS |
9340 | || code == UMAX) |
9341 | && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT) |
9342 | { |
9343 | rtx op0 = XEXP (XEXP (x, 0), 1); |
9344 | rtx op1 = XEXP (XEXP (x, 1), 1); |
9345 | |
9346 | cond0 = XEXP (XEXP (x, 0), 0); |
9347 | cond1 = XEXP (XEXP (x, 1), 0); |
9348 | |
9349 | if (COMPARISON_P (cond0) |
9350 | && COMPARISON_P (cond1) |
9351 | && SCALAR_INT_MODE_P (mode) |
9352 | && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL) |
9353 | && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0)) |
9354 | && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1))) |
9355 | || ((swap_condition (GET_CODE (cond0)) |
9356 | == reversed_comparison_code (cond1, NULL)) |
9357 | && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1)) |
9358 | && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0)))) |
9359 | && ! side_effects_p (x)) |
9360 | { |
9361 | *ptrue = simplify_gen_binary (code: MULT, mode, op0, op1: const_true_rtx); |
9362 | *pfalse = simplify_gen_binary (code: MULT, mode, |
9363 | op0: (code == MINUS |
9364 | ? simplify_gen_unary (code: NEG, mode, |
9365 | op: op1, op_mode: mode) |
9366 | : op1), |
9367 | op1: const_true_rtx); |
9368 | return cond0; |
9369 | } |
9370 | } |
9371 | |
9372 | /* Similarly for MULT, AND and UMIN, except that for these the result |
9373 | is always zero. */ |
9374 | if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) |
9375 | && (code == MULT || code == AND || code == UMIN) |
9376 | && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT) |
9377 | { |
9378 | cond0 = XEXP (XEXP (x, 0), 0); |
9379 | cond1 = XEXP (XEXP (x, 1), 0); |
9380 | |
9381 | if (COMPARISON_P (cond0) |
9382 | && COMPARISON_P (cond1) |
9383 | && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL) |
9384 | && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0)) |
9385 | && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1))) |
9386 | || ((swap_condition (GET_CODE (cond0)) |
9387 | == reversed_comparison_code (cond1, NULL)) |
9388 | && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1)) |
9389 | && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0)))) |
9390 | && ! side_effects_p (x)) |
9391 | { |
9392 | *ptrue = *pfalse = const0_rtx; |
9393 | return cond0; |
9394 | } |
9395 | } |
9396 | } |
9397 | |
9398 | else if (code == IF_THEN_ELSE) |
9399 | { |
9400 | /* If we have IF_THEN_ELSE already, extract the condition and |
9401 | canonicalize it if it is NE or EQ. */ |
9402 | cond0 = XEXP (x, 0); |
9403 | *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2); |
9404 | if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx) |
9405 | return XEXP (cond0, 0); |
9406 | else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx) |
9407 | { |
9408 | *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1); |
9409 | return XEXP (cond0, 0); |
9410 | } |
9411 | else |
9412 | return cond0; |
9413 | } |
9414 | |
9415 | /* If X is a SUBREG, we can narrow both the true and false values |
9416 | if the inner expression, if there is a condition. */ |
9417 | else if (code == SUBREG |
9418 | && (cond0 = if_then_else_cond (SUBREG_REG (x), ptrue: &true0, |
9419 | pfalse: &false0)) != 0) |
9420 | { |
9421 | true0 = simplify_gen_subreg (outermode: mode, op: true0, |
9422 | GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); |
9423 | false0 = simplify_gen_subreg (outermode: mode, op: false0, |
9424 | GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); |
9425 | if (true0 && false0) |
9426 | { |
9427 | *ptrue = true0; |
9428 | *pfalse = false0; |
9429 | return cond0; |
9430 | } |
9431 | } |
9432 | |
9433 | /* If X is a constant, this isn't special and will cause confusions |
9434 | if we treat it as such. Likewise if it is equivalent to a constant. */ |
9435 | else if (CONSTANT_P (x) |
9436 | || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0))) |
9437 | ; |
9438 | |
9439 | /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that |
9440 | will be least confusing to the rest of the compiler. */ |
9441 | else if (mode == BImode) |
9442 | { |
9443 | *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx; |
9444 | return x; |
9445 | } |
9446 | |
9447 | /* If X is known to be either 0 or -1, those are the true and |
9448 | false values when testing X. */ |
9449 | else if (x == constm1_rtx || x == const0_rtx |
9450 | || (is_a <scalar_int_mode> (m: mode, result: &int_mode) |
9451 | && (num_sign_bit_copies (x, int_mode) |
9452 | == GET_MODE_PRECISION (mode: int_mode)))) |
9453 | { |
9454 | *ptrue = constm1_rtx, *pfalse = const0_rtx; |
9455 | return x; |
9456 | } |
9457 | |
9458 | /* Likewise for 0 or a single bit. */ |
9459 | else if (HWI_COMPUTABLE_MODE_P (mode) |
9460 | && pow2p_hwi (x: nz = nonzero_bits (x, mode))) |
9461 | { |
9462 | *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx; |
9463 | return x; |
9464 | } |
9465 | |
9466 | /* Otherwise fail; show no condition with true and false values the same. */ |
9467 | *ptrue = *pfalse = x; |
9468 | return 0; |
9469 | } |
9470 | |
9471 | /* Return the value of expression X given the fact that condition COND |
9472 | is known to be true when applied to REG as its first operand and VAL |
9473 | as its second. X is known to not be shared and so can be modified in |
9474 | place. |
9475 | |
9476 | We only handle the simplest cases, and specifically those cases that |
9477 | arise with IF_THEN_ELSE expressions. */ |
9478 | |
9479 | static rtx |
9480 | known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val) |
9481 | { |
9482 | enum rtx_code code = GET_CODE (x); |
9483 | const char *fmt; |
9484 | int i, j; |
9485 | |
9486 | if (side_effects_p (x)) |
9487 | return x; |
9488 | |
9489 | /* If either operand of the condition is a floating point value, |
9490 | then we have to avoid collapsing an EQ comparison. */ |
9491 | if (cond == EQ |
9492 | && rtx_equal_p (x, reg) |
9493 | && ! FLOAT_MODE_P (GET_MODE (x)) |
9494 | && ! FLOAT_MODE_P (GET_MODE (val))) |
9495 | return val; |
9496 | |
9497 | if (cond == UNEQ && rtx_equal_p (x, reg)) |
9498 | return val; |
9499 | |
9500 | /* If X is (abs REG) and we know something about REG's relationship |
9501 | with zero, we may be able to simplify this. */ |
9502 | |
9503 | if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx) |
9504 | switch (cond) |
9505 | { |
9506 | case GE: case GT: case EQ: |
9507 | return XEXP (x, 0); |
9508 | case LT: case LE: |
9509 | return simplify_gen_unary (code: NEG, GET_MODE (XEXP (x, 0)), |
9510 | XEXP (x, 0), |
9511 | GET_MODE (XEXP (x, 0))); |
9512 | default: |
9513 | break; |
9514 | } |
9515 | |
9516 | /* The only other cases we handle are MIN, MAX, and comparisons if the |
9517 | operands are the same as REG and VAL. */ |
9518 | |
9519 | else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x)) |
9520 | { |
9521 | if (rtx_equal_p (XEXP (x, 0), val)) |
9522 | { |
9523 | std::swap (a&: val, b&: reg); |
9524 | cond = swap_condition (cond); |
9525 | } |
9526 | |
9527 | if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val)) |
9528 | { |
9529 | if (COMPARISON_P (x)) |
9530 | { |
9531 | if (comparison_dominates_p (cond, code)) |
9532 | return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx; |
9533 | |
9534 | code = reversed_comparison_code (x, NULL); |
9535 | if (code != UNKNOWN |
9536 | && comparison_dominates_p (cond, code)) |
9537 | return CONST0_RTX (GET_MODE (x)); |
9538 | else |
9539 | return x; |
9540 | } |
9541 | else if (code == SMAX || code == SMIN |
9542 | || code == UMIN || code == UMAX) |
9543 | { |
9544 | int unsignedp = (code == UMIN || code == UMAX); |
9545 | |
9546 | /* Do not reverse the condition when it is NE or EQ. |
9547 | This is because we cannot conclude anything about |
9548 | the value of 'SMAX (x, y)' when x is not equal to y, |
9549 | but we can when x equals y. */ |
9550 | if ((code == SMAX || code == UMAX) |
9551 | && ! (cond == EQ || cond == NE)) |
9552 | cond = reverse_condition (cond); |
9553 | |
9554 | switch (cond) |
9555 | { |
9556 | case GE: case GT: |
9557 | return unsignedp ? x : XEXP (x, 1); |
9558 | case LE: case LT: |
9559 | return unsignedp ? x : XEXP (x, 0); |
9560 | case GEU: case GTU: |
9561 | return unsignedp ? XEXP (x, 1) : x; |
9562 | case LEU: case LTU: |
9563 | return unsignedp ? XEXP (x, 0) : x; |
9564 | default: |
9565 | break; |
9566 | } |
9567 | } |
9568 | } |
9569 | } |
9570 | else if (code == SUBREG) |
9571 | { |
9572 | machine_mode inner_mode = GET_MODE (SUBREG_REG (x)); |
9573 | rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val); |
9574 | |
9575 | if (SUBREG_REG (x) != r) |
9576 | { |
9577 | /* We must simplify subreg here, before we lose track of the |
9578 | original inner_mode. */ |
9579 | new_rtx = simplify_subreg (GET_MODE (x), op: r, |
9580 | innermode: inner_mode, SUBREG_BYTE (x)); |
9581 | if (new_rtx) |
9582 | return new_rtx; |
9583 | else |
9584 | SUBST (SUBREG_REG (x), r); |
9585 | } |
9586 | |
9587 | return x; |
9588 | } |
9589 | /* We don't have to handle SIGN_EXTEND here, because even in the |
9590 | case of replacing something with a modeless CONST_INT, a |
9591 | CONST_INT is already (supposed to be) a valid sign extension for |
9592 | its narrower mode, which implies it's already properly |
9593 | sign-extended for the wider mode. Now, for ZERO_EXTEND, the |
9594 | story is different. */ |
9595 | else if (code == ZERO_EXTEND) |
9596 | { |
9597 | machine_mode inner_mode = GET_MODE (XEXP (x, 0)); |
9598 | rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val); |
9599 | |
9600 | if (XEXP (x, 0) != r) |
9601 | { |
9602 | /* We must simplify the zero_extend here, before we lose |
9603 | track of the original inner_mode. */ |
9604 | new_rtx = simplify_unary_operation (code: ZERO_EXTEND, GET_MODE (x), |
9605 | op: r, op_mode: inner_mode); |
9606 | if (new_rtx) |
9607 | return new_rtx; |
9608 | else |
9609 | SUBST (XEXP (x, 0), r); |
9610 | } |
9611 | |
9612 | return x; |
9613 | } |
9614 | |
9615 | fmt = GET_RTX_FORMAT (code); |
9616 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
9617 | { |
9618 | if (fmt[i] == 'e') |
9619 | SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val)); |
9620 | else if (fmt[i] == 'E') |
9621 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
9622 | SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j), |
9623 | cond, reg, val)); |
9624 | } |
9625 | |
9626 | return x; |
9627 | } |
9628 | |
9629 | /* See if X and Y are equal for the purposes of seeing if we can rewrite an |
9630 | assignment as a field assignment. */ |
9631 | |
9632 | static bool |
9633 | rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x) |
9634 | { |
9635 | if (widen_x && GET_MODE (x) != GET_MODE (y)) |
9636 | { |
9637 | if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y))) |
9638 | return false; |
9639 | if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN) |
9640 | return false; |
9641 | x = adjust_address_nv (x, GET_MODE (y), |
9642 | byte_lowpart_offset (GET_MODE (y), |
9643 | GET_MODE (x))); |
9644 | } |
9645 | |
9646 | if (x == y || rtx_equal_p (x, y)) |
9647 | return true; |
9648 | |
9649 | if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y)) |
9650 | return false; |
9651 | |
9652 | /* Check for a paradoxical SUBREG of a MEM compared with the MEM. |
9653 | Note that all SUBREGs of MEM are paradoxical; otherwise they |
9654 | would have been rewritten. */ |
9655 | if (MEM_P (x) && GET_CODE (y) == SUBREG |
9656 | && MEM_P (SUBREG_REG (y)) |
9657 | && rtx_equal_p (SUBREG_REG (y), |
9658 | gen_lowpart (GET_MODE (SUBREG_REG (y)), x))) |
9659 | return true; |
9660 | |
9661 | if (MEM_P (y) && GET_CODE (x) == SUBREG |
9662 | && MEM_P (SUBREG_REG (x)) |
9663 | && rtx_equal_p (SUBREG_REG (x), |
9664 | gen_lowpart (GET_MODE (SUBREG_REG (x)), y))) |
9665 | return true; |
9666 | |
9667 | /* We used to see if get_last_value of X and Y were the same but that's |
9668 | not correct. In one direction, we'll cause the assignment to have |
9669 | the wrong destination and in the case, we'll import a register into this |
9670 | insn that might have already have been dead. So fail if none of the |
9671 | above cases are true. */ |
9672 | return false; |
9673 | } |
9674 | |
9675 | /* See if X, a SET operation, can be rewritten as a bit-field assignment. |
9676 | Return that assignment if so. |
9677 | |
9678 | We only handle the most common cases. */ |
9679 | |
9680 | static rtx |
9681 | make_field_assignment (rtx x) |
9682 | { |
9683 | rtx dest = SET_DEST (x); |
9684 | rtx src = SET_SRC (x); |
9685 | rtx assign; |
9686 | rtx rhs, lhs; |
9687 | HOST_WIDE_INT c1; |
9688 | HOST_WIDE_INT pos; |
9689 | unsigned HOST_WIDE_INT len; |
9690 | rtx other; |
9691 | |
9692 | /* All the rules in this function are specific to scalar integers. */ |
9693 | scalar_int_mode mode; |
9694 | if (!is_a <scalar_int_mode> (GET_MODE (dest), result: &mode)) |
9695 | return x; |
9696 | |
9697 | /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is |
9698 | a clear of a one-bit field. We will have changed it to |
9699 | (and (rotate (const_int -2) POS) DEST), so check for that. Also check |
9700 | for a SUBREG. */ |
9701 | |
9702 | if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE |
9703 | && CONST_INT_P (XEXP (XEXP (src, 0), 0)) |
9704 | && INTVAL (XEXP (XEXP (src, 0), 0)) == -2 |
9705 | && rtx_equal_for_field_assignment_p (x: dest, XEXP (src, 1))) |
9706 | { |
9707 | assign = make_extraction (VOIDmode, inner: dest, pos: 0, XEXP (XEXP (src, 0), 1), |
9708 | len: 1, unsignedp: true, in_dest: true, in_compare: false); |
9709 | if (assign != 0) |
9710 | return gen_rtx_SET (assign, const0_rtx); |
9711 | return x; |
9712 | } |
9713 | |
9714 | if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG |
9715 | && subreg_lowpart_p (XEXP (src, 0)) |
9716 | && partial_subreg_p (XEXP (src, 0)) |
9717 | && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE |
9718 | && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) |
9719 | && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2 |
9720 | && rtx_equal_for_field_assignment_p (x: dest, XEXP (src, 1))) |
9721 | { |
9722 | assign = make_extraction (VOIDmode, inner: dest, pos: 0, |
9723 | XEXP (SUBREG_REG (XEXP (src, 0)), 1), |
9724 | len: 1, unsignedp: true, in_dest: true, in_compare: false); |
9725 | if (assign != 0) |
9726 | return gen_rtx_SET (assign, const0_rtx); |
9727 | return x; |
9728 | } |
9729 | |
9730 | /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a |
9731 | one-bit field. */ |
9732 | if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT |
9733 | && XEXP (XEXP (src, 0), 0) == const1_rtx |
9734 | && rtx_equal_for_field_assignment_p (x: dest, XEXP (src, 1))) |
9735 | { |
9736 | assign = make_extraction (VOIDmode, inner: dest, pos: 0, XEXP (XEXP (src, 0), 1), |
9737 | len: 1, unsignedp: true, in_dest: true, in_compare: false); |
9738 | if (assign != 0) |
9739 | return gen_rtx_SET (assign, const1_rtx); |
9740 | return x; |
9741 | } |
9742 | |
9743 | /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the |
9744 | SRC is an AND with all bits of that field set, then we can discard |
9745 | the AND. */ |
9746 | if (GET_CODE (dest) == ZERO_EXTRACT |
9747 | && CONST_INT_P (XEXP (dest, 1)) |
9748 | && GET_CODE (src) == AND |
9749 | && CONST_INT_P (XEXP (src, 1))) |
9750 | { |
9751 | HOST_WIDE_INT width = INTVAL (XEXP (dest, 1)); |
9752 | unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1)); |
9753 | unsigned HOST_WIDE_INT ze_mask; |
9754 | |
9755 | if (width >= HOST_BITS_PER_WIDE_INT) |
9756 | ze_mask = -1; |
9757 | else |
9758 | ze_mask = (HOST_WIDE_INT_1U << width) - 1; |
9759 | |
9760 | /* Complete overlap. We can remove the source AND. */ |
9761 | if ((and_mask & ze_mask) == ze_mask) |
9762 | return gen_rtx_SET (dest, XEXP (src, 0)); |
9763 | |
9764 | /* Partial overlap. We can reduce the source AND. */ |
9765 | if ((and_mask & ze_mask) != and_mask) |
9766 | { |
9767 | src = gen_rtx_AND (mode, XEXP (src, 0), |
9768 | gen_int_mode (and_mask & ze_mask, mode)); |
9769 | return gen_rtx_SET (dest, src); |
9770 | } |
9771 | } |
9772 | |
9773 | /* The other case we handle is assignments into a constant-position |
9774 | field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents |
9775 | a mask that has all one bits except for a group of zero bits and |
9776 | OTHER is known to have zeros where C1 has ones, this is such an |
9777 | assignment. Compute the position and length from C1. Shift OTHER |
9778 | to the appropriate position, force it to the required mode, and |
9779 | make the extraction. Check for the AND in both operands. */ |
9780 | |
9781 | /* One or more SUBREGs might obscure the constant-position field |
9782 | assignment. The first one we are likely to encounter is an outer |
9783 | narrowing SUBREG, which we can just strip for the purposes of |
9784 | identifying the constant-field assignment. */ |
9785 | scalar_int_mode src_mode = mode; |
9786 | if (GET_CODE (src) == SUBREG |
9787 | && subreg_lowpart_p (src) |
9788 | && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), result: &src_mode)) |
9789 | src = SUBREG_REG (src); |
9790 | |
9791 | if (GET_CODE (src) != IOR && GET_CODE (src) != XOR) |
9792 | return x; |
9793 | |
9794 | rhs = expand_compound_operation (XEXP (src, 0)); |
9795 | lhs = expand_compound_operation (XEXP (src, 1)); |
9796 | |
9797 | if (GET_CODE (rhs) == AND |
9798 | && CONST_INT_P (XEXP (rhs, 1)) |
9799 | && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), y: dest)) |
9800 | c1 = INTVAL (XEXP (rhs, 1)), other = lhs; |
9801 | /* The second SUBREG that might get in the way is a paradoxical |
9802 | SUBREG around the first operand of the AND. We want to |
9803 | pretend the operand is as wide as the destination here. We |
9804 | do this by adjusting the MEM to wider mode for the sole |
9805 | purpose of the call to rtx_equal_for_field_assignment_p. Also |
9806 | note this trick only works for MEMs. */ |
9807 | else if (GET_CODE (rhs) == AND |
9808 | && paradoxical_subreg_p (XEXP (rhs, 0)) |
9809 | && MEM_P (SUBREG_REG (XEXP (rhs, 0))) |
9810 | && CONST_INT_P (XEXP (rhs, 1)) |
9811 | && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)), |
9812 | y: dest, widen_x: true)) |
9813 | c1 = INTVAL (XEXP (rhs, 1)), other = lhs; |
9814 | else if (GET_CODE (lhs) == AND |
9815 | && CONST_INT_P (XEXP (lhs, 1)) |
9816 | && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), y: dest)) |
9817 | c1 = INTVAL (XEXP (lhs, 1)), other = rhs; |
9818 | /* The second SUBREG that might get in the way is a paradoxical |
9819 | SUBREG around the first operand of the AND. We want to |
9820 | pretend the operand is as wide as the destination here. We |
9821 | do this by adjusting the MEM to wider mode for the sole |
9822 | purpose of the call to rtx_equal_for_field_assignment_p. Also |
9823 | note this trick only works for MEMs. */ |
9824 | else if (GET_CODE (lhs) == AND |
9825 | && paradoxical_subreg_p (XEXP (lhs, 0)) |
9826 | && MEM_P (SUBREG_REG (XEXP (lhs, 0))) |
9827 | && CONST_INT_P (XEXP (lhs, 1)) |
9828 | && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)), |
9829 | y: dest, widen_x: true)) |
9830 | c1 = INTVAL (XEXP (lhs, 1)), other = rhs; |
9831 | else |
9832 | return x; |
9833 | |
9834 | pos = get_pos_from_mask (m: (~c1) & GET_MODE_MASK (mode), plen: &len); |
9835 | if (pos < 0 |
9836 | || pos + len > GET_MODE_PRECISION (mode) |
9837 | || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT |
9838 | || (c1 & nonzero_bits (other, mode)) != 0) |
9839 | return x; |
9840 | |
9841 | assign = make_extraction (VOIDmode, inner: dest, pos, NULL_RTX, len, |
9842 | unsignedp: true, in_dest: true, in_compare: false); |
9843 | if (assign == 0) |
9844 | return x; |
9845 | |
9846 | /* The mode to use for the source is the mode of the assignment, or of |
9847 | what is inside a possible STRICT_LOW_PART. */ |
9848 | machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART |
9849 | ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign)); |
9850 | |
9851 | /* Shift OTHER right POS places and make it the source, restricting it |
9852 | to the proper length and mode. */ |
9853 | |
9854 | src = canon_reg_for_combine (x: simplify_shift_const (NULL_RTX, LSHIFTRT, |
9855 | src_mode, other, pos), |
9856 | reg: dest); |
9857 | src = force_to_mode (x: src, mode: new_mode, |
9858 | mask: len >= HOST_BITS_PER_WIDE_INT |
9859 | ? HOST_WIDE_INT_M1U |
9860 | : (HOST_WIDE_INT_1U << len) - 1, just_select: false); |
9861 | |
9862 | /* If SRC is masked by an AND that does not make a difference in |
9863 | the value being stored, strip it. */ |
9864 | if (GET_CODE (assign) == ZERO_EXTRACT |
9865 | && CONST_INT_P (XEXP (assign, 1)) |
9866 | && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT |
9867 | && GET_CODE (src) == AND |
9868 | && CONST_INT_P (XEXP (src, 1)) |
9869 | && UINTVAL (XEXP (src, 1)) |
9870 | == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1) |
9871 | src = XEXP (src, 0); |
9872 | |
9873 | return gen_rtx_SET (assign, src); |
9874 | } |
9875 | |
9876 | /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c) |
9877 | if so. */ |
9878 | |
9879 | static rtx |
9880 | apply_distributive_law (rtx x) |
9881 | { |
9882 | enum rtx_code code = GET_CODE (x); |
9883 | enum rtx_code inner_code; |
9884 | rtx lhs, rhs, other; |
9885 | rtx tem; |
9886 | |
9887 | /* Distributivity is not true for floating point as it can change the |
9888 | value. So we don't do it unless -funsafe-math-optimizations. */ |
9889 | if (FLOAT_MODE_P (GET_MODE (x)) |
9890 | && ! flag_unsafe_math_optimizations) |
9891 | return x; |
9892 | |
9893 | /* The outer operation can only be one of the following: */ |
9894 | if (code != IOR && code != AND && code != XOR |
9895 | && code != PLUS && code != MINUS) |
9896 | return x; |
9897 | |
9898 | lhs = XEXP (x, 0); |
9899 | rhs = XEXP (x, 1); |
9900 | |
9901 | /* If either operand is a primitive we can't do anything, so get out |
9902 | fast. */ |
9903 | if (OBJECT_P (lhs) || OBJECT_P (rhs)) |
9904 | return x; |
9905 | |
9906 | lhs = expand_compound_operation (x: lhs); |
9907 | rhs = expand_compound_operation (x: rhs); |
9908 | inner_code = GET_CODE (lhs); |
9909 | if (inner_code != GET_CODE (rhs)) |
9910 | return x; |
9911 | |
9912 | /* See if the inner and outer operations distribute. */ |
9913 | switch (inner_code) |
9914 | { |
9915 | case LSHIFTRT: |
9916 | case ASHIFTRT: |
9917 | case AND: |
9918 | case IOR: |
9919 | /* These all distribute except over PLUS. */ |
9920 | if (code == PLUS || code == MINUS) |
9921 | return x; |
9922 | break; |
9923 | |
9924 | case MULT: |
9925 | if (code != PLUS && code != MINUS) |
9926 | return x; |
9927 | break; |
9928 | |
9929 | case ASHIFT: |
9930 | /* This is also a multiply, so it distributes over everything. */ |
9931 | break; |
9932 | |
9933 | /* This used to handle SUBREG, but this turned out to be counter- |
9934 | productive, since (subreg (op ...)) usually is not handled by |
9935 | insn patterns, and this "optimization" therefore transformed |
9936 | recognizable patterns into unrecognizable ones. Therefore the |
9937 | SUBREG case was removed from here. |
9938 | |
9939 | It is possible that distributing SUBREG over arithmetic operations |
9940 | leads to an intermediate result than can then be optimized further, |
9941 | e.g. by moving the outer SUBREG to the other side of a SET as done |
9942 | in simplify_set. This seems to have been the original intent of |
9943 | handling SUBREGs here. |
9944 | |
9945 | However, with current GCC this does not appear to actually happen, |
9946 | at least on major platforms. If some case is found where removing |
9947 | the SUBREG case here prevents follow-on optimizations, distributing |
9948 | SUBREGs ought to be re-added at that place, e.g. in simplify_set. */ |
9949 | |
9950 | default: |
9951 | return x; |
9952 | } |
9953 | |
9954 | /* Set LHS and RHS to the inner operands (A and B in the example |
9955 | above) and set OTHER to the common operand (C in the example). |
9956 | There is only one way to do this unless the inner operation is |
9957 | commutative. */ |
9958 | if (COMMUTATIVE_ARITH_P (lhs) |
9959 | && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0))) |
9960 | other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1); |
9961 | else if (COMMUTATIVE_ARITH_P (lhs) |
9962 | && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1))) |
9963 | other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0); |
9964 | else if (COMMUTATIVE_ARITH_P (lhs) |
9965 | && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0))) |
9966 | other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1); |
9967 | else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1))) |
9968 | other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0); |
9969 | else |
9970 | return x; |
9971 | |
9972 | /* Form the new inner operation, seeing if it simplifies first. */ |
9973 | tem = simplify_gen_binary (code, GET_MODE (x), op0: lhs, op1: rhs); |
9974 | |
9975 | /* There is one exception to the general way of distributing: |
9976 | (a | c) ^ (b | c) -> (a ^ b) & ~c */ |
9977 | if (code == XOR && inner_code == IOR) |
9978 | { |
9979 | inner_code = AND; |
9980 | other = simplify_gen_unary (code: NOT, GET_MODE (x), op: other, GET_MODE (x)); |
9981 | } |
9982 | |
9983 | /* We may be able to continuing distributing the result, so call |
9984 | ourselves recursively on the inner operation before forming the |
9985 | outer operation, which we return. */ |
9986 | return simplify_gen_binary (code: inner_code, GET_MODE (x), |
9987 | op0: apply_distributive_law (x: tem), op1: other); |
9988 | } |
9989 | |
9990 | /* See if X is of the form (* (+ A B) C), and if so convert to |
9991 | (+ (* A C) (* B C)) and try to simplify. |
9992 | |
9993 | Most of the time, this results in no change. However, if some of |
9994 | the operands are the same or inverses of each other, simplifications |
9995 | will result. |
9996 | |
9997 | For example, (and (ior A B) (not B)) can occur as the result of |
9998 | expanding a bit field assignment. When we apply the distributive |
9999 | law to this, we get (ior (and (A (not B))) (and (B (not B)))), |
10000 | which then simplifies to (and (A (not B))). |
10001 | |
10002 | Note that no checks happen on the validity of applying the inverse |
10003 | distributive law. This is pointless since we can do it in the |
10004 | few places where this routine is called. |
10005 | |
10006 | N is the index of the term that is decomposed (the arithmetic operation, |
10007 | i.e. (+ A B) in the first example above). !N is the index of the term that |
10008 | is distributed, i.e. of C in the first example above. */ |
10009 | static rtx |
10010 | distribute_and_simplify_rtx (rtx x, int n) |
10011 | { |
10012 | machine_mode mode; |
10013 | enum rtx_code outer_code, inner_code; |
10014 | rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp; |
10015 | |
10016 | /* Distributivity is not true for floating point as it can change the |
10017 | value. So we don't do it unless -funsafe-math-optimizations. */ |
10018 | if (FLOAT_MODE_P (GET_MODE (x)) |
10019 | && ! flag_unsafe_math_optimizations) |
10020 | return NULL_RTX; |
10021 | |
10022 | decomposed = XEXP (x, n); |
10023 | if (!ARITHMETIC_P (decomposed)) |
10024 | return NULL_RTX; |
10025 | |
10026 | mode = GET_MODE (x); |
10027 | outer_code = GET_CODE (x); |
10028 | distributed = XEXP (x, !n); |
10029 | |
10030 | inner_code = GET_CODE (decomposed); |
10031 | inner_op0 = XEXP (decomposed, 0); |
10032 | inner_op1 = XEXP (decomposed, 1); |
10033 | |
10034 | /* Special case (and (xor B C) (not A)), which is equivalent to |
10035 | (xor (ior A B) (ior A C)) */ |
10036 | if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT) |
10037 | { |
10038 | distributed = XEXP (distributed, 0); |
10039 | outer_code = IOR; |
10040 | } |
10041 | |
10042 | if (n == 0) |
10043 | { |
10044 | /* Distribute the second term. */ |
10045 | new_op0 = simplify_gen_binary (code: outer_code, mode, op0: inner_op0, op1: distributed); |
10046 | new_op1 = simplify_gen_binary (code: outer_code, mode, op0: inner_op1, op1: distributed); |
10047 | } |
10048 | else |
10049 | { |
10050 | /* Distribute the first term. */ |
10051 | new_op0 = simplify_gen_binary (code: outer_code, mode, op0: distributed, op1: inner_op0); |
10052 | new_op1 = simplify_gen_binary (code: outer_code, mode, op0: distributed, op1: inner_op1); |
10053 | } |
10054 | |
10055 | tmp = apply_distributive_law (x: simplify_gen_binary (code: inner_code, mode, |
10056 | op0: new_op0, op1: new_op1)); |
10057 | if (GET_CODE (tmp) != outer_code |
10058 | && (set_src_cost (x: tmp, mode, speed_p: optimize_this_for_speed_p) |
10059 | < set_src_cost (x, mode, speed_p: optimize_this_for_speed_p))) |
10060 | return tmp; |
10061 | |
10062 | return NULL_RTX; |
10063 | } |
10064 | |
10065 | /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done |
10066 | in MODE. Return an equivalent form, if different from (and VAROP |
10067 | (const_int CONSTOP)). Otherwise, return NULL_RTX. */ |
10068 | |
10069 | static rtx |
10070 | simplify_and_const_int_1 (scalar_int_mode mode, rtx varop, |
10071 | unsigned HOST_WIDE_INT constop) |
10072 | { |
10073 | unsigned HOST_WIDE_INT nonzero; |
10074 | unsigned HOST_WIDE_INT orig_constop; |
10075 | rtx orig_varop; |
10076 | int i; |
10077 | |
10078 | orig_varop = varop; |
10079 | orig_constop = constop; |
10080 | if (GET_CODE (varop) == CLOBBER) |
10081 | return NULL_RTX; |
10082 | |
10083 | /* Simplify VAROP knowing that we will be only looking at some of the |
10084 | bits in it. |
10085 | |
10086 | Note by passing in CONSTOP, we guarantee that the bits not set in |
10087 | CONSTOP are not significant and will never be examined. We must |
10088 | ensure that is the case by explicitly masking out those bits |
10089 | before returning. */ |
10090 | varop = force_to_mode (x: varop, mode, mask: constop, just_select: false); |
10091 | |
10092 | /* If VAROP is a CLOBBER, we will fail so return it. */ |
10093 | if (GET_CODE (varop) == CLOBBER) |
10094 | return varop; |
10095 | |
10096 | /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP |
10097 | to VAROP and return the new constant. */ |
10098 | if (CONST_INT_P (varop)) |
10099 | return gen_int_mode (INTVAL (varop) & constop, mode); |
10100 | |
10101 | /* See what bits may be nonzero in VAROP. Unlike the general case of |
10102 | a call to nonzero_bits, here we don't care about bits outside |
10103 | MODE unless WORD_REGISTER_OPERATIONS is true. */ |
10104 | |
10105 | scalar_int_mode tmode = mode; |
10106 | if (WORD_REGISTER_OPERATIONS && GET_MODE_BITSIZE (mode) < BITS_PER_WORD) |
10107 | tmode = word_mode; |
10108 | nonzero = nonzero_bits (varop, tmode) & GET_MODE_MASK (tmode); |
10109 | |
10110 | /* Turn off all bits in the constant that are known to already be zero. |
10111 | Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS |
10112 | which is tested below. */ |
10113 | |
10114 | constop &= nonzero; |
10115 | |
10116 | /* If we don't have any bits left, return zero. */ |
10117 | if (constop == 0 && !side_effects_p (varop)) |
10118 | return const0_rtx; |
10119 | |
10120 | /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is |
10121 | a power of two, we can replace this with an ASHIFT. */ |
10122 | if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), tmode) == 1 |
10123 | && (i = exact_log2 (x: constop)) >= 0) |
10124 | return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i); |
10125 | |
10126 | /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR |
10127 | or XOR, then try to apply the distributive law. This may eliminate |
10128 | operations if either branch can be simplified because of the AND. |
10129 | It may also make some cases more complex, but those cases probably |
10130 | won't match a pattern either with or without this. */ |
10131 | |
10132 | if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR) |
10133 | { |
10134 | scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop)); |
10135 | return |
10136 | gen_lowpart |
10137 | (mode, |
10138 | apply_distributive_law |
10139 | (x: simplify_gen_binary (GET_CODE (varop), mode: varop_mode, |
10140 | op0: simplify_and_const_int (NULL_RTX, varop_mode, |
10141 | XEXP (varop, 0), |
10142 | constop), |
10143 | op1: simplify_and_const_int (NULL_RTX, varop_mode, |
10144 | XEXP (varop, 1), |
10145 | constop)))); |
10146 | } |
10147 | |
10148 | /* If VAROP is PLUS, and the constant is a mask of low bits, distribute |
10149 | the AND and see if one of the operands simplifies to zero. If so, we |
10150 | may eliminate it. */ |
10151 | |
10152 | if (GET_CODE (varop) == PLUS |
10153 | && pow2p_hwi (x: constop + 1)) |
10154 | { |
10155 | rtx o0, o1; |
10156 | |
10157 | o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop); |
10158 | o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop); |
10159 | if (o0 == const0_rtx) |
10160 | return o1; |
10161 | if (o1 == const0_rtx) |
10162 | return o0; |
10163 | } |
10164 | |
10165 | /* Make a SUBREG if necessary. If we can't make it, fail. */ |
10166 | varop = gen_lowpart (mode, varop); |
10167 | if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER) |
10168 | return NULL_RTX; |
10169 | |
10170 | /* If we are only masking insignificant bits, return VAROP. */ |
10171 | if (constop == nonzero) |
10172 | return varop; |
10173 | |
10174 | if (varop == orig_varop && constop == orig_constop) |
10175 | return NULL_RTX; |
10176 | |
10177 | /* Otherwise, return an AND. */ |
10178 | return simplify_gen_binary (code: AND, mode, op0: varop, op1: gen_int_mode (constop, mode)); |
10179 | } |
10180 | |
10181 | |
10182 | /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done |
10183 | in MODE. |
10184 | |
10185 | Return an equivalent form, if different from X. Otherwise, return X. If |
10186 | X is zero, we are to always construct the equivalent form. */ |
10187 | |
10188 | static rtx |
10189 | simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop, |
10190 | unsigned HOST_WIDE_INT constop) |
10191 | { |
10192 | rtx tem = simplify_and_const_int_1 (mode, varop, constop); |
10193 | if (tem) |
10194 | return tem; |
10195 | |
10196 | if (!x) |
10197 | x = simplify_gen_binary (code: AND, GET_MODE (varop), op0: varop, |
10198 | op1: gen_int_mode (constop, mode)); |
10199 | if (GET_MODE (x) != mode) |
10200 | x = gen_lowpart (mode, x); |
10201 | return x; |
10202 | } |
10203 | |
10204 | /* Given a REG X of mode XMODE, compute which bits in X can be nonzero. |
10205 | We don't care about bits outside of those defined in MODE. |
10206 | We DO care about all the bits in MODE, even if XMODE is smaller than MODE. |
10207 | |
10208 | For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is |
10209 | a shift, AND, or zero_extract, we can do better. */ |
10210 | |
10211 | static rtx |
10212 | reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode, |
10213 | scalar_int_mode mode, |
10214 | unsigned HOST_WIDE_INT *nonzero) |
10215 | { |
10216 | rtx tem; |
10217 | reg_stat_type *rsp; |
10218 | |
10219 | /* If X is a register whose nonzero bits value is current, use it. |
10220 | Otherwise, if X is a register whose value we can find, use that |
10221 | value. Otherwise, use the previously-computed global nonzero bits |
10222 | for this register. */ |
10223 | |
10224 | rsp = ®_stat[REGNO (x)]; |
10225 | if (rsp->last_set_value != 0 |
10226 | && (rsp->last_set_mode == mode |
10227 | || (REGNO (x) >= FIRST_PSEUDO_REGISTER |
10228 | && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT |
10229 | && GET_MODE_CLASS (mode) == MODE_INT)) |
10230 | && ((rsp->last_set_label >= label_tick_ebb_start |
10231 | && rsp->last_set_label < label_tick) |
10232 | || (rsp->last_set_label == label_tick |
10233 | && DF_INSN_LUID (rsp->last_set) < subst_low_luid) |
10234 | || (REGNO (x) >= FIRST_PSEUDO_REGISTER |
10235 | && REGNO (x) < reg_n_sets_max |
10236 | && REG_N_SETS (REGNO (x)) == 1 |
10237 | && !REGNO_REG_SET_P |
10238 | (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), |
10239 | REGNO (x))))) |
10240 | { |
10241 | /* Note that, even if the precision of last_set_mode is lower than that |
10242 | of mode, record_value_for_reg invoked nonzero_bits on the register |
10243 | with nonzero_bits_mode (because last_set_mode is necessarily integral |
10244 | and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode |
10245 | are all valid, hence in mode too since nonzero_bits_mode is defined |
10246 | to the largest HWI_COMPUTABLE_MODE_P mode. */ |
10247 | *nonzero &= rsp->last_set_nonzero_bits; |
10248 | return NULL; |
10249 | } |
10250 | |
10251 | tem = get_last_value (x); |
10252 | if (tem) |
10253 | { |
10254 | if (SHORT_IMMEDIATES_SIGN_EXTEND) |
10255 | tem = sign_extend_short_imm (src: tem, mode: xmode, prec: GET_MODE_PRECISION (mode)); |
10256 | |
10257 | return tem; |
10258 | } |
10259 | |
10260 | if (nonzero_sign_valid && rsp->nonzero_bits) |
10261 | { |
10262 | unsigned HOST_WIDE_INT mask = rsp->nonzero_bits; |
10263 | |
10264 | if (GET_MODE_PRECISION (mode: xmode) < GET_MODE_PRECISION (mode)) |
10265 | /* We don't know anything about the upper bits. */ |
10266 | mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode); |
10267 | |
10268 | *nonzero &= mask; |
10269 | } |
10270 | |
10271 | return NULL; |
10272 | } |
10273 | |
10274 | /* Given a reg X of mode XMODE, return the number of bits at the high-order |
10275 | end of X that are known to be equal to the sign bit. X will be used |
10276 | in mode MODE; the returned value will always be between 1 and the |
10277 | number of bits in MODE. */ |
10278 | |
10279 | static rtx |
10280 | reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode, |
10281 | scalar_int_mode mode, |
10282 | unsigned int *result) |
10283 | { |
10284 | rtx tem; |
10285 | reg_stat_type *rsp; |
10286 | |
10287 | rsp = ®_stat[REGNO (x)]; |
10288 | if (rsp->last_set_value != 0 |
10289 | && rsp->last_set_mode == mode |
10290 | && ((rsp->last_set_label >= label_tick_ebb_start |
10291 | && rsp->last_set_label < label_tick) |
10292 | || (rsp->last_set_label == label_tick |
10293 | && DF_INSN_LUID (rsp->last_set) < subst_low_luid) |
10294 | || (REGNO (x) >= FIRST_PSEUDO_REGISTER |
10295 | && REGNO (x) < reg_n_sets_max |
10296 | && REG_N_SETS (REGNO (x)) == 1 |
10297 | && !REGNO_REG_SET_P |
10298 | (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), |
10299 | REGNO (x))))) |
10300 | { |
10301 | *result = rsp->last_set_sign_bit_copies; |
10302 | return NULL; |
10303 | } |
10304 | |
10305 | tem = get_last_value (x); |
10306 | if (tem != 0) |
10307 | return tem; |
10308 | |
10309 | if (nonzero_sign_valid && rsp->sign_bit_copies != 0 |
10310 | && GET_MODE_PRECISION (mode: xmode) == GET_MODE_PRECISION (mode)) |
10311 | *result = rsp->sign_bit_copies; |
10312 | |
10313 | return NULL; |
10314 | } |
10315 | |
10316 | /* Return the number of "extended" bits there are in X, when interpreted |
10317 | as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For |
10318 | unsigned quantities, this is the number of high-order zero bits. |
10319 | For signed quantities, this is the number of copies of the sign bit |
10320 | minus 1. In both case, this function returns the number of "spare" |
10321 | bits. For example, if two quantities for which this function returns |
10322 | at least 1 are added, the addition is known not to overflow. |
10323 | |
10324 | This function will always return 0 unless called during combine, which |
10325 | implies that it must be called from a define_split. */ |
10326 | |
10327 | unsigned int |
10328 | extended_count (const_rtx x, machine_mode mode, bool unsignedp) |
10329 | { |
10330 | if (nonzero_sign_valid == 0) |
10331 | return 0; |
10332 | |
10333 | scalar_int_mode int_mode; |
10334 | return (unsignedp |
10335 | ? (is_a <scalar_int_mode> (m: mode, result: &int_mode) |
10336 | && HWI_COMPUTABLE_MODE_P (mode: int_mode) |
10337 | ? (unsigned int) (GET_MODE_PRECISION (mode: int_mode) - 1 |
10338 | - floor_log2 (x: nonzero_bits (x, int_mode))) |
10339 | : 0) |
10340 | : num_sign_bit_copies (x, mode) - 1); |
10341 | } |
10342 | |
10343 | /* This function is called from `simplify_shift_const' to merge two |
10344 | outer operations. Specifically, we have already found that we need |
10345 | to perform operation *POP0 with constant *PCONST0 at the outermost |
10346 | position. We would now like to also perform OP1 with constant CONST1 |
10347 | (with *POP0 being done last). |
10348 | |
10349 | Return true if we can do the operation and update *POP0 and *PCONST0 with |
10350 | the resulting operation. *PCOMP_P is set to true if we would need to |
10351 | complement the innermost operand, otherwise it is unchanged. |
10352 | |
10353 | MODE is the mode in which the operation will be done. No bits outside |
10354 | the width of this mode matter. It is assumed that the width of this mode |
10355 | is smaller than or equal to HOST_BITS_PER_WIDE_INT. |
10356 | |
10357 | If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS, |
10358 | IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper |
10359 | result is simply *PCONST0. |
10360 | |
10361 | If the resulting operation cannot be expressed as one operation, we |
10362 | return false and do not change *POP0, *PCONST0, and *PCOMP_P. */ |
10363 | |
10364 | static bool |
10365 | merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, |
10366 | enum rtx_code op1, HOST_WIDE_INT const1, |
10367 | machine_mode mode, bool *pcomp_p) |
10368 | { |
10369 | enum rtx_code op0 = *pop0; |
10370 | HOST_WIDE_INT const0 = *pconst0; |
10371 | |
10372 | const0 &= GET_MODE_MASK (mode); |
10373 | const1 &= GET_MODE_MASK (mode); |
10374 | |
10375 | /* If OP0 is an AND, clear unimportant bits in CONST1. */ |
10376 | if (op0 == AND) |
10377 | const1 &= const0; |
10378 | |
10379 | /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or |
10380 | if OP0 is SET. */ |
10381 | |
10382 | if (op1 == UNKNOWN || op0 == SET) |
10383 | return true; |
10384 | |
10385 | else if (op0 == UNKNOWN) |
10386 | op0 = op1, const0 = const1; |
10387 | |
10388 | else if (op0 == op1) |
10389 | { |
10390 | switch (op0) |
10391 | { |
10392 | case AND: |
10393 | const0 &= const1; |
10394 | break; |
10395 | case IOR: |
10396 | const0 |= const1; |
10397 | break; |
10398 | case XOR: |
10399 | const0 ^= const1; |
10400 | break; |
10401 | case PLUS: |
10402 | const0 += const1; |
10403 | break; |
10404 | case NEG: |
10405 | op0 = UNKNOWN; |
10406 | break; |
10407 | default: |
10408 | break; |
10409 | } |
10410 | } |
10411 | |
10412 | /* Otherwise, if either is a PLUS or NEG, we can't do anything. */ |
10413 | else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG) |
10414 | return false; |
10415 | |
10416 | /* If the two constants aren't the same, we can't do anything. The |
10417 | remaining six cases can all be done. */ |
10418 | else if (const0 != const1) |
10419 | return false; |
10420 | |
10421 | else |
10422 | switch (op0) |
10423 | { |
10424 | case IOR: |
10425 | if (op1 == AND) |
10426 | /* (a & b) | b == b */ |
10427 | op0 = SET; |
10428 | else /* op1 == XOR */ |
10429 | /* (a ^ b) | b == a | b */ |
10430 | {;} |
10431 | break; |
10432 | |
10433 | case XOR: |
10434 | if (op1 == AND) |
10435 | /* (a & b) ^ b == (~a) & b */ |
10436 | op0 = AND, *pcomp_p = true; |
10437 | else /* op1 == IOR */ |
10438 | /* (a | b) ^ b == a & ~b */ |
10439 | op0 = AND, const0 = ~const0; |
10440 | break; |
10441 | |
10442 | case AND: |
10443 | if (op1 == IOR) |
10444 | /* (a | b) & b == b */ |
10445 | op0 = SET; |
10446 | else /* op1 == XOR */ |
10447 | /* (a ^ b) & b) == (~a) & b */ |
10448 | *pcomp_p = true; |
10449 | break; |
10450 | default: |
10451 | break; |
10452 | } |
10453 | |
10454 | /* Check for NO-OP cases. */ |
10455 | const0 &= GET_MODE_MASK (mode); |
10456 | if (const0 == 0 |
10457 | && (op0 == IOR || op0 == XOR || op0 == PLUS)) |
10458 | op0 = UNKNOWN; |
10459 | else if (const0 == 0 && op0 == AND) |
10460 | op0 = SET; |
10461 | else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode) |
10462 | && op0 == AND) |
10463 | op0 = UNKNOWN; |
10464 | |
10465 | *pop0 = op0; |
10466 | |
10467 | /* ??? Slightly redundant with the above mask, but not entirely. |
10468 | Moving this above means we'd have to sign-extend the mode mask |
10469 | for the final test. */ |
10470 | if (op0 != UNKNOWN && op0 != NEG) |
10471 | *pconst0 = trunc_int_for_mode (const0, mode); |
10472 | |
10473 | return true; |
10474 | } |
10475 | |
10476 | /* A helper to simplify_shift_const_1 to determine the mode we can perform |
10477 | the shift in. The original shift operation CODE is performed on OP in |
10478 | ORIG_MODE. Return the wider mode MODE if we can perform the operation |
10479 | in that mode. Return ORIG_MODE otherwise. We can also assume that the |
10480 | result of the shift is subject to operation OUTER_CODE with operand |
10481 | OUTER_CONST. */ |
10482 | |
10483 | static scalar_int_mode |
10484 | try_widen_shift_mode (enum rtx_code code, rtx op, int count, |
10485 | scalar_int_mode orig_mode, scalar_int_mode mode, |
10486 | enum rtx_code outer_code, HOST_WIDE_INT outer_const) |
10487 | { |
10488 | gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode)); |
10489 | |
10490 | /* In general we can't perform in wider mode for right shift and rotate. */ |
10491 | switch (code) |
10492 | { |
10493 | case ASHIFTRT: |
10494 | /* We can still widen if the bits brought in from the left are identical |
10495 | to the sign bit of ORIG_MODE. */ |
10496 | if (num_sign_bit_copies (op, mode) |
10497 | > (unsigned) (GET_MODE_PRECISION (mode) |
10498 | - GET_MODE_PRECISION (mode: orig_mode))) |
10499 | return mode; |
10500 | return orig_mode; |
10501 | |
10502 | case LSHIFTRT: |
10503 | /* Similarly here but with zero bits. */ |
10504 | if (HWI_COMPUTABLE_MODE_P (mode) |
10505 | && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0) |
10506 | return mode; |
10507 | |
10508 | /* We can also widen if the bits brought in will be masked off. This |
10509 | operation is performed in ORIG_MODE. */ |
10510 | if (outer_code == AND) |
10511 | { |
10512 | int care_bits = low_bitmask_len (orig_mode, outer_const); |
10513 | |
10514 | if (care_bits >= 0 |
10515 | && GET_MODE_PRECISION (mode: orig_mode) - care_bits >= count) |
10516 | return mode; |
10517 | } |
10518 | /* fall through */ |
10519 | |
10520 | case ROTATE: |
10521 | return orig_mode; |
10522 | |
10523 | case ROTATERT: |
10524 | gcc_unreachable (); |
10525 | |
10526 | default: |
10527 | return mode; |
10528 | } |
10529 | } |
10530 | |
10531 | /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind |
10532 | of shift. The result of the shift is RESULT_MODE. Return NULL_RTX |
10533 | if we cannot simplify it. Otherwise, return a simplified value. |
10534 | |
10535 | The shift is normally computed in the widest mode we find in VAROP, as |
10536 | long as it isn't a different number of words than RESULT_MODE. Exceptions |
10537 | are ASHIFTRT and ROTATE, which are always done in their original mode. */ |
10538 | |
10539 | static rtx |
10540 | simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode, |
10541 | rtx varop, int orig_count) |
10542 | { |
10543 | enum rtx_code orig_code = code; |
10544 | rtx orig_varop = varop; |
10545 | int count, log2; |
10546 | machine_mode mode = result_mode; |
10547 | machine_mode shift_mode; |
10548 | scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode; |
10549 | /* We form (outer_op (code varop count) (outer_const)). */ |
10550 | enum rtx_code outer_op = UNKNOWN; |
10551 | HOST_WIDE_INT outer_const = 0; |
10552 | bool complement_p = false; |
10553 | rtx new_rtx, x; |
10554 | |
10555 | /* Make sure and truncate the "natural" shift on the way in. We don't |
10556 | want to do this inside the loop as it makes it more difficult to |
10557 | combine shifts. */ |
10558 | if (SHIFT_COUNT_TRUNCATED) |
10559 | orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1; |
10560 | |
10561 | /* If we were given an invalid count, don't do anything except exactly |
10562 | what was requested. */ |
10563 | |
10564 | if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode)) |
10565 | return NULL_RTX; |
10566 | |
10567 | count = orig_count; |
10568 | |
10569 | /* Unless one of the branches of the `if' in this loop does a `continue', |
10570 | we will `break' the loop after the `if'. */ |
10571 | |
10572 | while (count != 0) |
10573 | { |
10574 | /* If we have an operand of (clobber (const_int 0)), fail. */ |
10575 | if (GET_CODE (varop) == CLOBBER) |
10576 | return NULL_RTX; |
10577 | |
10578 | /* Convert ROTATERT to ROTATE. */ |
10579 | if (code == ROTATERT) |
10580 | { |
10581 | unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode); |
10582 | code = ROTATE; |
10583 | count = bitsize - count; |
10584 | } |
10585 | |
10586 | shift_mode = result_mode; |
10587 | if (shift_mode != mode) |
10588 | { |
10589 | /* We only change the modes of scalar shifts. */ |
10590 | int_mode = as_a <scalar_int_mode> (m: mode); |
10591 | int_result_mode = as_a <scalar_int_mode> (m: result_mode); |
10592 | shift_mode = try_widen_shift_mode (code, op: varop, count, |
10593 | orig_mode: int_result_mode, mode: int_mode, |
10594 | outer_code: outer_op, outer_const); |
10595 | } |
10596 | |
10597 | scalar_int_mode shift_unit_mode |
10598 | = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode)); |
10599 | |
10600 | /* Handle cases where the count is greater than the size of the mode |
10601 | minus 1. For ASHIFT, use the size minus one as the count (this can |
10602 | occur when simplifying (lshiftrt (ashiftrt ..))). For rotates, |
10603 | take the count modulo the size. For other shifts, the result is |
10604 | zero. |
10605 | |
10606 | Since these shifts are being produced by the compiler by combining |
10607 | multiple operations, each of which are defined, we know what the |
10608 | result is supposed to be. */ |
10609 | |
10610 | if (count > (GET_MODE_PRECISION (mode: shift_unit_mode) - 1)) |
10611 | { |
10612 | if (code == ASHIFTRT) |
10613 | count = GET_MODE_PRECISION (mode: shift_unit_mode) - 1; |
10614 | else if (code == ROTATE || code == ROTATERT) |
10615 | count %= GET_MODE_PRECISION (mode: shift_unit_mode); |
10616 | else |
10617 | { |
10618 | /* We can't simply return zero because there may be an |
10619 | outer op. */ |
10620 | varop = const0_rtx; |
10621 | count = 0; |
10622 | break; |
10623 | } |
10624 | } |
10625 | |
10626 | /* If we discovered we had to complement VAROP, leave. Making a NOT |
10627 | here would cause an infinite loop. */ |
10628 | if (complement_p) |
10629 | break; |
10630 | |
10631 | if (shift_mode == shift_unit_mode) |
10632 | { |
10633 | /* An arithmetic right shift of a quantity known to be -1 or 0 |
10634 | is a no-op. */ |
10635 | if (code == ASHIFTRT |
10636 | && (num_sign_bit_copies (varop, shift_unit_mode) |
10637 | == GET_MODE_PRECISION (mode: shift_unit_mode))) |
10638 | { |
10639 | count = 0; |
10640 | break; |
10641 | } |
10642 | |
10643 | /* If we are doing an arithmetic right shift and discarding all but |
10644 | the sign bit copies, this is equivalent to doing a shift by the |
10645 | bitsize minus one. Convert it into that shift because it will |
10646 | often allow other simplifications. */ |
10647 | |
10648 | if (code == ASHIFTRT |
10649 | && (count + num_sign_bit_copies (varop, shift_unit_mode) |
10650 | >= GET_MODE_PRECISION (mode: shift_unit_mode))) |
10651 | count = GET_MODE_PRECISION (mode: shift_unit_mode) - 1; |
10652 | |
10653 | /* We simplify the tests below and elsewhere by converting |
10654 | ASHIFTRT to LSHIFTRT if we know the sign bit is clear. |
10655 | `make_compound_operation' will convert it to an ASHIFTRT for |
10656 | those machines (such as VAX) that don't have an LSHIFTRT. */ |
10657 | if (code == ASHIFTRT |
10658 | && HWI_COMPUTABLE_MODE_P (mode: shift_unit_mode) |
10659 | && val_signbit_known_clear_p (shift_unit_mode, |
10660 | nonzero_bits (varop, |
10661 | shift_unit_mode))) |
10662 | code = LSHIFTRT; |
10663 | |
10664 | if (((code == LSHIFTRT |
10665 | && HWI_COMPUTABLE_MODE_P (mode: shift_unit_mode) |
10666 | && !(nonzero_bits (varop, shift_unit_mode) >> count)) |
10667 | || (code == ASHIFT |
10668 | && HWI_COMPUTABLE_MODE_P (mode: shift_unit_mode) |
10669 | && !((nonzero_bits (varop, shift_unit_mode) << count) |
10670 | & GET_MODE_MASK (shift_unit_mode)))) |
10671 | && !side_effects_p (varop)) |
10672 | varop = const0_rtx; |
10673 | } |
10674 | |
10675 | switch (GET_CODE (varop)) |
10676 | { |
10677 | case SIGN_EXTEND: |
10678 | case ZERO_EXTEND: |
10679 | case SIGN_EXTRACT: |
10680 | case ZERO_EXTRACT: |
10681 | new_rtx = expand_compound_operation (x: varop); |
10682 | if (new_rtx != varop) |
10683 | { |
10684 | varop = new_rtx; |
10685 | continue; |
10686 | } |
10687 | break; |
10688 | |
10689 | case MEM: |
10690 | /* The following rules apply only to scalars. */ |
10691 | if (shift_mode != shift_unit_mode) |
10692 | break; |
10693 | int_mode = as_a <scalar_int_mode> (m: mode); |
10694 | |
10695 | /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH |
10696 | minus the width of a smaller mode, we can do this with a |
10697 | SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */ |
10698 | if ((code == ASHIFTRT || code == LSHIFTRT) |
10699 | && ! mode_dependent_address_p (XEXP (varop, 0), |
10700 | MEM_ADDR_SPACE (varop)) |
10701 | && ! MEM_VOLATILE_P (varop) |
10702 | && (int_mode_for_size (size: GET_MODE_BITSIZE (mode: int_mode) - count, limit: 1) |
10703 | .exists (mode: &tmode))) |
10704 | { |
10705 | new_rtx = adjust_address_nv (varop, tmode, |
10706 | BYTES_BIG_ENDIAN ? 0 |
10707 | : count / BITS_PER_UNIT); |
10708 | |
10709 | varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND |
10710 | : ZERO_EXTEND, int_mode, new_rtx); |
10711 | count = 0; |
10712 | continue; |
10713 | } |
10714 | break; |
10715 | |
10716 | case SUBREG: |
10717 | /* The following rules apply only to scalars. */ |
10718 | if (shift_mode != shift_unit_mode) |
10719 | break; |
10720 | int_mode = as_a <scalar_int_mode> (m: mode); |
10721 | int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop)); |
10722 | |
10723 | /* If VAROP is a SUBREG, strip it as long as the inner operand has |
10724 | the same number of words as what we've seen so far. Then store |
10725 | the widest mode in MODE. */ |
10726 | if (subreg_lowpart_p (varop) |
10727 | && is_int_mode (GET_MODE (SUBREG_REG (varop)), int_mode: &inner_mode) |
10728 | && GET_MODE_SIZE (mode: inner_mode) > GET_MODE_SIZE (mode: int_varop_mode) |
10729 | && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD) |
10730 | == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD)) |
10731 | && GET_MODE_CLASS (int_varop_mode) == MODE_INT) |
10732 | { |
10733 | varop = SUBREG_REG (varop); |
10734 | if (GET_MODE_SIZE (mode: inner_mode) > GET_MODE_SIZE (mode: int_mode)) |
10735 | mode = inner_mode; |
10736 | continue; |
10737 | } |
10738 | break; |
10739 | |
10740 | case MULT: |
10741 | /* Some machines use MULT instead of ASHIFT because MULT |
10742 | is cheaper. But it is still better on those machines to |
10743 | merge two shifts into one. */ |
10744 | if (CONST_INT_P (XEXP (varop, 1)) |
10745 | && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0) |
10746 | { |
10747 | rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2); |
10748 | varop = simplify_gen_binary (code: ASHIFT, GET_MODE (varop), |
10749 | XEXP (varop, 0), op1: log2_rtx); |
10750 | continue; |
10751 | } |
10752 | break; |
10753 | |
10754 | case UDIV: |
10755 | /* Similar, for when divides are cheaper. */ |
10756 | if (CONST_INT_P (XEXP (varop, 1)) |
10757 | && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0) |
10758 | { |
10759 | rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2); |
10760 | varop = simplify_gen_binary (code: LSHIFTRT, GET_MODE (varop), |
10761 | XEXP (varop, 0), op1: log2_rtx); |
10762 | continue; |
10763 | } |
10764 | break; |
10765 | |
10766 | case ASHIFTRT: |
10767 | /* If we are extracting just the sign bit of an arithmetic |
10768 | right shift, that shift is not needed. However, the sign |
10769 | bit of a wider mode may be different from what would be |
10770 | interpreted as the sign bit in a narrower mode, so, if |
10771 | the result is narrower, don't discard the shift. */ |
10772 | if (code == LSHIFTRT |
10773 | && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1) |
10774 | && (GET_MODE_UNIT_BITSIZE (result_mode) |
10775 | >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop)))) |
10776 | { |
10777 | varop = XEXP (varop, 0); |
10778 | continue; |
10779 | } |
10780 | |
10781 | /* fall through */ |
10782 | |
10783 | case LSHIFTRT: |
10784 | case ASHIFT: |
10785 | case ROTATE: |
10786 | /* The following rules apply only to scalars. */ |
10787 | if (shift_mode != shift_unit_mode) |
10788 | break; |
10789 | int_mode = as_a <scalar_int_mode> (m: mode); |
10790 | int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop)); |
10791 | int_result_mode = as_a <scalar_int_mode> (m: result_mode); |
10792 | |
10793 | /* Here we have two nested shifts. The result is usually the |
10794 | AND of a new shift with a mask. We compute the result below. */ |
10795 | if (CONST_INT_P (XEXP (varop, 1)) |
10796 | && INTVAL (XEXP (varop, 1)) >= 0 |
10797 | && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (mode: int_varop_mode) |
10798 | && HWI_COMPUTABLE_MODE_P (mode: int_result_mode) |
10799 | && HWI_COMPUTABLE_MODE_P (mode: int_mode)) |
10800 | { |
10801 | enum rtx_code first_code = GET_CODE (varop); |
10802 | unsigned int first_count = INTVAL (XEXP (varop, 1)); |
10803 | unsigned HOST_WIDE_INT mask; |
10804 | rtx mask_rtx; |
10805 | |
10806 | /* We have one common special case. We can't do any merging if |
10807 | the inner code is an ASHIFTRT of a smaller mode. However, if |
10808 | we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2) |
10809 | with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2), |
10810 | we can convert it to |
10811 | (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1). |
10812 | This simplifies certain SIGN_EXTEND operations. */ |
10813 | if (code == ASHIFT && first_code == ASHIFTRT |
10814 | && count == (GET_MODE_PRECISION (mode: int_result_mode) |
10815 | - GET_MODE_PRECISION (mode: int_varop_mode))) |
10816 | { |
10817 | /* C3 has the low-order C1 bits zero. */ |
10818 | |
10819 | mask = GET_MODE_MASK (int_mode) |
10820 | & ~((HOST_WIDE_INT_1U << first_count) - 1); |
10821 | |
10822 | varop = simplify_and_const_int (NULL_RTX, mode: int_result_mode, |
10823 | XEXP (varop, 0), constop: mask); |
10824 | varop = simplify_shift_const (NULL_RTX, ASHIFT, |
10825 | int_result_mode, varop, count); |
10826 | count = first_count; |
10827 | code = ASHIFTRT; |
10828 | continue; |
10829 | } |
10830 | |
10831 | /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more |
10832 | than C1 high-order bits equal to the sign bit, we can convert |
10833 | this to either an ASHIFT or an ASHIFTRT depending on the |
10834 | two counts. |
10835 | |
10836 | We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */ |
10837 | |
10838 | if (code == ASHIFTRT && first_code == ASHIFT |
10839 | && int_varop_mode == shift_unit_mode |
10840 | && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode) |
10841 | > first_count)) |
10842 | { |
10843 | varop = XEXP (varop, 0); |
10844 | count -= first_count; |
10845 | if (count < 0) |
10846 | { |
10847 | count = -count; |
10848 | code = ASHIFT; |
10849 | } |
10850 | |
10851 | continue; |
10852 | } |
10853 | |
10854 | /* There are some cases we can't do. If CODE is ASHIFTRT, |
10855 | we can only do this if FIRST_CODE is also ASHIFTRT. |
10856 | |
10857 | We can't do the case when CODE is ROTATE and FIRST_CODE is |
10858 | ASHIFTRT. |
10859 | |
10860 | If the mode of this shift is not the mode of the outer shift, |
10861 | we can't do this if either shift is a right shift or ROTATE. |
10862 | |
10863 | Finally, we can't do any of these if the mode is too wide |
10864 | unless the codes are the same. |
10865 | |
10866 | Handle the case where the shift codes are the same |
10867 | first. */ |
10868 | |
10869 | if (code == first_code) |
10870 | { |
10871 | if (int_varop_mode != int_result_mode |
10872 | && (code == ASHIFTRT || code == LSHIFTRT |
10873 | || code == ROTATE)) |
10874 | break; |
10875 | |
10876 | count += first_count; |
10877 | varop = XEXP (varop, 0); |
10878 | continue; |
10879 | } |
10880 | |
10881 | if (code == ASHIFTRT |
10882 | || (code == ROTATE && first_code == ASHIFTRT) |
10883 | || GET_MODE_PRECISION (mode: int_mode) > HOST_BITS_PER_WIDE_INT |
10884 | || (int_varop_mode != int_result_mode |
10885 | && (first_code == ASHIFTRT || first_code == LSHIFTRT |
10886 | || first_code == ROTATE |
10887 | || code == ROTATE))) |
10888 | break; |
10889 | |
10890 | /* To compute the mask to apply after the shift, shift the |
10891 | nonzero bits of the inner shift the same way the |
10892 | outer shift will. */ |
10893 | |
10894 | mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode), |
10895 | int_result_mode); |
10896 | rtx count_rtx = gen_int_shift_amount (int_result_mode, count); |
10897 | mask_rtx |
10898 | = simplify_const_binary_operation (code, int_result_mode, |
10899 | mask_rtx, count_rtx); |
10900 | |
10901 | /* Give up if we can't compute an outer operation to use. */ |
10902 | if (mask_rtx == 0 |
10903 | || !CONST_INT_P (mask_rtx) |
10904 | || ! merge_outer_ops (pop0: &outer_op, pconst0: &outer_const, op1: AND, |
10905 | INTVAL (mask_rtx), |
10906 | mode: int_result_mode, pcomp_p: &complement_p)) |
10907 | break; |
10908 | |
10909 | /* If the shifts are in the same direction, we add the |
10910 | counts. Otherwise, we subtract them. */ |
10911 | if ((code == ASHIFTRT || code == LSHIFTRT) |
10912 | == (first_code == ASHIFTRT || first_code == LSHIFTRT)) |
10913 | count += first_count; |
10914 | else |
10915 | count -= first_count; |
10916 | |
10917 | /* If COUNT is positive, the new shift is usually CODE, |
10918 | except for the two exceptions below, in which case it is |
10919 | FIRST_CODE. If the count is negative, FIRST_CODE should |
10920 | always be used */ |
10921 | if (count > 0 |
10922 | && ((first_code == ROTATE && code == ASHIFT) |
10923 | || (first_code == ASHIFTRT && code == LSHIFTRT))) |
10924 | code = first_code; |
10925 | else if (count < 0) |
10926 | code = first_code, count = -count; |
10927 | |
10928 | varop = XEXP (varop, 0); |
10929 | continue; |
10930 | } |
10931 | |
10932 | /* If we have (A << B << C) for any shift, we can convert this to |
10933 | (A << C << B). This wins if A is a constant. Only try this if |
10934 | B is not a constant. */ |
10935 | |
10936 | else if (GET_CODE (varop) == code |
10937 | && CONST_INT_P (XEXP (varop, 0)) |
10938 | && !CONST_INT_P (XEXP (varop, 1))) |
10939 | { |
10940 | /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make |
10941 | sure the result will be masked. See PR70222. */ |
10942 | if (code == LSHIFTRT |
10943 | && int_mode != int_result_mode |
10944 | && !merge_outer_ops (pop0: &outer_op, pconst0: &outer_const, op1: AND, |
10945 | GET_MODE_MASK (int_result_mode) |
10946 | >> orig_count, mode: int_result_mode, |
10947 | pcomp_p: &complement_p)) |
10948 | break; |
10949 | /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing |
10950 | up outer sign extension (often left and right shift) is |
10951 | hardly more efficient than the original. See PR70429. |
10952 | Similarly punt for rotates with different modes. |
10953 | See PR97386. */ |
10954 | if ((code == ASHIFTRT || code == ROTATE) |
10955 | && int_mode != int_result_mode) |
10956 | break; |
10957 | |
10958 | rtx count_rtx = gen_int_shift_amount (int_result_mode, count); |
10959 | rtx new_rtx = simplify_const_binary_operation (code, int_mode, |
10960 | XEXP (varop, 0), |
10961 | count_rtx); |
10962 | varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1)); |
10963 | count = 0; |
10964 | continue; |
10965 | } |
10966 | break; |
10967 | |
10968 | case NOT: |
10969 | /* The following rules apply only to scalars. */ |
10970 | if (shift_mode != shift_unit_mode) |
10971 | break; |
10972 | |
10973 | /* Make this fit the case below. */ |
10974 | varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx); |
10975 | continue; |
10976 | |
10977 | case IOR: |
10978 | case AND: |
10979 | case XOR: |
10980 | /* The following rules apply only to scalars. */ |
10981 | if (shift_mode != shift_unit_mode) |
10982 | break; |
10983 | int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop)); |
10984 | int_result_mode = as_a <scalar_int_mode> (m: result_mode); |
10985 | |
10986 | /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C) |
10987 | with C the size of VAROP - 1 and the shift is logical if |
10988 | STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1, |
10989 | we have an (le X 0) operation. If we have an arithmetic shift |
10990 | and STORE_FLAG_VALUE is 1 or we have a logical shift with |
10991 | STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */ |
10992 | |
10993 | if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS |
10994 | && XEXP (XEXP (varop, 0), 1) == constm1_rtx |
10995 | && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) |
10996 | && (code == LSHIFTRT || code == ASHIFTRT) |
10997 | && count == (GET_MODE_PRECISION (mode: int_varop_mode) - 1) |
10998 | && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1))) |
10999 | { |
11000 | count = 0; |
11001 | varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1), |
11002 | const0_rtx); |
11003 | |
11004 | if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT) |
11005 | varop = gen_rtx_NEG (int_varop_mode, varop); |
11006 | |
11007 | continue; |
11008 | } |
11009 | |
11010 | /* If we have (shift (logical)), move the logical to the outside |
11011 | to allow it to possibly combine with another logical and the |
11012 | shift to combine with another shift. This also canonicalizes to |
11013 | what a ZERO_EXTRACT looks like. Also, some machines have |
11014 | (and (shift)) insns. */ |
11015 | |
11016 | if (CONST_INT_P (XEXP (varop, 1)) |
11017 | /* We can't do this if we have (ashiftrt (xor)) and the |
11018 | constant has its sign bit set in shift_unit_mode with |
11019 | shift_unit_mode wider than result_mode. */ |
11020 | && !(code == ASHIFTRT && GET_CODE (varop) == XOR |
11021 | && int_result_mode != shift_unit_mode |
11022 | && trunc_int_for_mode (INTVAL (XEXP (varop, 1)), |
11023 | shift_unit_mode) < 0) |
11024 | && (new_rtx = simplify_const_binary_operation |
11025 | (code, int_result_mode, |
11026 | gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode), |
11027 | gen_int_shift_amount (int_result_mode, count))) != 0 |
11028 | && CONST_INT_P (new_rtx) |
11029 | && merge_outer_ops (pop0: &outer_op, pconst0: &outer_const, GET_CODE (varop), |
11030 | INTVAL (new_rtx), mode: int_result_mode, |
11031 | pcomp_p: &complement_p)) |
11032 | { |
11033 | varop = XEXP (varop, 0); |
11034 | continue; |
11035 | } |
11036 | |
11037 | /* If we can't do that, try to simplify the shift in each arm of the |
11038 | logical expression, make a new logical expression, and apply |
11039 | the inverse distributive law. This also can't be done for |
11040 | (ashiftrt (xor)) where we've widened the shift and the constant |
11041 | changes the sign bit. */ |
11042 | if (CONST_INT_P (XEXP (varop, 1)) |
11043 | && !(code == ASHIFTRT && GET_CODE (varop) == XOR |
11044 | && int_result_mode != shift_unit_mode |
11045 | && trunc_int_for_mode (INTVAL (XEXP (varop, 1)), |
11046 | shift_unit_mode) < 0)) |
11047 | { |
11048 | rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode, |
11049 | XEXP (varop, 0), count); |
11050 | rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode, |
11051 | XEXP (varop, 1), count); |
11052 | |
11053 | varop = simplify_gen_binary (GET_CODE (varop), mode: shift_unit_mode, |
11054 | op0: lhs, op1: rhs); |
11055 | varop = apply_distributive_law (x: varop); |
11056 | |
11057 | count = 0; |
11058 | continue; |
11059 | } |
11060 | break; |
11061 | |
11062 | case EQ: |
11063 | /* The following rules apply only to scalars. */ |
11064 | if (shift_mode != shift_unit_mode) |
11065 | break; |
11066 | int_result_mode = as_a <scalar_int_mode> (m: result_mode); |
11067 | |
11068 | /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE |
11069 | says that the sign bit can be tested, FOO has mode MODE, C is |
11070 | GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit |
11071 | that may be nonzero. */ |
11072 | if (code == LSHIFTRT |
11073 | && XEXP (varop, 1) == const0_rtx |
11074 | && GET_MODE (XEXP (varop, 0)) == int_result_mode |
11075 | && count == (GET_MODE_PRECISION (mode: int_result_mode) - 1) |
11076 | && HWI_COMPUTABLE_MODE_P (mode: int_result_mode) |
11077 | && STORE_FLAG_VALUE == -1 |
11078 | && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1 |
11079 | && merge_outer_ops (pop0: &outer_op, pconst0: &outer_const, op1: XOR, const1: 1, |
11080 | mode: int_result_mode, pcomp_p: &complement_p)) |
11081 | { |
11082 | varop = XEXP (varop, 0); |
11083 | count = 0; |
11084 | continue; |
11085 | } |
11086 | break; |
11087 | |
11088 | case NEG: |
11089 | /* The following rules apply only to scalars. */ |
11090 | if (shift_mode != shift_unit_mode) |
11091 | break; |
11092 | int_result_mode = as_a <scalar_int_mode> (m: result_mode); |
11093 | |
11094 | /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less |
11095 | than the number of bits in the mode is equivalent to A. */ |
11096 | if (code == LSHIFTRT |
11097 | && count == (GET_MODE_PRECISION (mode: int_result_mode) - 1) |
11098 | && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1) |
11099 | { |
11100 | varop = XEXP (varop, 0); |
11101 | count = 0; |
11102 | continue; |
11103 | } |
11104 | |
11105 | /* NEG commutes with ASHIFT since it is multiplication. Move the |
11106 | NEG outside to allow shifts to combine. */ |
11107 | if (code == ASHIFT |
11108 | && merge_outer_ops (pop0: &outer_op, pconst0: &outer_const, op1: NEG, const1: 0, |
11109 | mode: int_result_mode, pcomp_p: &complement_p)) |
11110 | { |
11111 | varop = XEXP (varop, 0); |
11112 | continue; |
11113 | } |
11114 | break; |
11115 | |
11116 | case PLUS: |
11117 | /* The following rules apply only to scalars. */ |
11118 | if (shift_mode != shift_unit_mode) |
11119 | break; |
11120 | int_result_mode = as_a <scalar_int_mode> (m: result_mode); |
11121 | |
11122 | /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C |
11123 | is one less than the number of bits in the mode is |
11124 | equivalent to (xor A 1). */ |
11125 | if (code == LSHIFTRT |
11126 | && count == (GET_MODE_PRECISION (mode: int_result_mode) - 1) |
11127 | && XEXP (varop, 1) == constm1_rtx |
11128 | && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1 |
11129 | && merge_outer_ops (pop0: &outer_op, pconst0: &outer_const, op1: XOR, const1: 1, |
11130 | mode: int_result_mode, pcomp_p: &complement_p)) |
11131 | { |
11132 | count = 0; |
11133 | varop = XEXP (varop, 0); |
11134 | continue; |
11135 | } |
11136 | |
11137 | /* If we have (xshiftrt (plus FOO BAR) C), and the only bits |
11138 | that might be nonzero in BAR are those being shifted out and those |
11139 | bits are known zero in FOO, we can replace the PLUS with FOO. |
11140 | Similarly in the other operand order. This code occurs when |
11141 | we are computing the size of a variable-size array. */ |
11142 | |
11143 | if ((code == ASHIFTRT || code == LSHIFTRT) |
11144 | && count < HOST_BITS_PER_WIDE_INT |
11145 | && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0 |
11146 | && (nonzero_bits (XEXP (varop, 1), int_result_mode) |
11147 | & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0) |
11148 | { |
11149 | varop = XEXP (varop, 0); |
11150 | continue; |
11151 | } |
11152 | else if ((code == ASHIFTRT || code == LSHIFTRT) |
11153 | && count < HOST_BITS_PER_WIDE_INT |
11154 | && HWI_COMPUTABLE_MODE_P (mode: int_result_mode) |
11155 | && (nonzero_bits (XEXP (varop, 0), int_result_mode) |
11156 | >> count) == 0 |
11157 | && (nonzero_bits (XEXP (varop, 0), int_result_mode) |
11158 | & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0) |
11159 | { |
11160 | varop = XEXP (varop, 1); |
11161 | continue; |
11162 | } |
11163 | |
11164 | /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */ |
11165 | if (code == ASHIFT |
11166 | && CONST_INT_P (XEXP (varop, 1)) |
11167 | && (new_rtx = simplify_const_binary_operation |
11168 | (ASHIFT, int_result_mode, |
11169 | gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode), |
11170 | gen_int_shift_amount (int_result_mode, count))) != 0 |
11171 | && CONST_INT_P (new_rtx) |
11172 | && merge_outer_ops (pop0: &outer_op, pconst0: &outer_const, op1: PLUS, |
11173 | INTVAL (new_rtx), mode: int_result_mode, |
11174 | pcomp_p: &complement_p)) |
11175 | { |
11176 | varop = XEXP (varop, 0); |
11177 | continue; |
11178 | } |
11179 | |
11180 | /* Check for 'PLUS signbit', which is the canonical form of 'XOR |
11181 | signbit', and attempt to change the PLUS to an XOR and move it to |
11182 | the outer operation as is done above in the AND/IOR/XOR case |
11183 | leg for shift(logical). See details in logical handling above |
11184 | for reasoning in doing so. */ |
11185 | if (code == LSHIFTRT |
11186 | && CONST_INT_P (XEXP (varop, 1)) |
11187 | && mode_signbit_p (int_result_mode, XEXP (varop, 1)) |
11188 | && (new_rtx = simplify_const_binary_operation |
11189 | (code, int_result_mode, |
11190 | gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode), |
11191 | gen_int_shift_amount (int_result_mode, count))) != 0 |
11192 | && CONST_INT_P (new_rtx) |
11193 | && merge_outer_ops (pop0: &outer_op, pconst0: &outer_const, op1: XOR, |
11194 | INTVAL (new_rtx), mode: int_result_mode, |
11195 | pcomp_p: &complement_p)) |
11196 | { |
11197 | varop = XEXP (varop, 0); |
11198 | continue; |
11199 | } |
11200 | |
11201 | break; |
11202 | |
11203 | case MINUS: |
11204 | /* The following rules apply only to scalars. */ |
11205 | if (shift_mode != shift_unit_mode) |
11206 | break; |
11207 | int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop)); |
11208 | |
11209 | /* If we have (xshiftrt (minus (ashiftrt X C)) X) C) |
11210 | with C the size of VAROP - 1 and the shift is logical if |
11211 | STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1, |
11212 | we have a (gt X 0) operation. If the shift is arithmetic with |
11213 | STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1, |
11214 | we have a (neg (gt X 0)) operation. */ |
11215 | |
11216 | if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) |
11217 | && GET_CODE (XEXP (varop, 0)) == ASHIFTRT |
11218 | && count == (GET_MODE_PRECISION (mode: int_varop_mode) - 1) |
11219 | && (code == LSHIFTRT || code == ASHIFTRT) |
11220 | && CONST_INT_P (XEXP (XEXP (varop, 0), 1)) |
11221 | && INTVAL (XEXP (XEXP (varop, 0), 1)) == count |
11222 | && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1))) |
11223 | { |
11224 | count = 0; |
11225 | varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1), |
11226 | const0_rtx); |
11227 | |
11228 | if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT) |
11229 | varop = gen_rtx_NEG (int_varop_mode, varop); |
11230 | |
11231 | continue; |
11232 | } |
11233 | break; |
11234 | |
11235 | case TRUNCATE: |
11236 | /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt)) |
11237 | if the truncate does not affect the value. */ |
11238 | if (code == LSHIFTRT |
11239 | && GET_CODE (XEXP (varop, 0)) == LSHIFTRT |
11240 | && CONST_INT_P (XEXP (XEXP (varop, 0), 1)) |
11241 | && (INTVAL (XEXP (XEXP (varop, 0), 1)) |
11242 | >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0))) |
11243 | - GET_MODE_UNIT_PRECISION (GET_MODE (varop))))) |
11244 | { |
11245 | rtx varop_inner = XEXP (varop, 0); |
11246 | int new_count = count + INTVAL (XEXP (varop_inner, 1)); |
11247 | rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner), |
11248 | new_count); |
11249 | varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner), |
11250 | XEXP (varop_inner, 0), |
11251 | new_count_rtx); |
11252 | varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner); |
11253 | count = 0; |
11254 | continue; |
11255 | } |
11256 | break; |
11257 | |
11258 | default: |
11259 | break; |
11260 | } |
11261 | |
11262 | break; |
11263 | } |
11264 | |
11265 | shift_mode = result_mode; |
11266 | if (shift_mode != mode) |
11267 | { |
11268 | /* We only change the modes of scalar shifts. */ |
11269 | int_mode = as_a <scalar_int_mode> (m: mode); |
11270 | int_result_mode = as_a <scalar_int_mode> (m: result_mode); |
11271 | shift_mode = try_widen_shift_mode (code, op: varop, count, orig_mode: int_result_mode, |
11272 | mode: int_mode, outer_code: outer_op, outer_const); |
11273 | } |
11274 | |
11275 | /* We have now finished analyzing the shift. The result should be |
11276 | a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If |
11277 | OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied |
11278 | to the result of the shift. OUTER_CONST is the relevant constant, |
11279 | but we must turn off all bits turned off in the shift. */ |
11280 | |
11281 | if (outer_op == UNKNOWN |
11282 | && orig_code == code && orig_count == count |
11283 | && varop == orig_varop |
11284 | && shift_mode == GET_MODE (varop)) |
11285 | return NULL_RTX; |
11286 | |
11287 | /* Make a SUBREG if necessary. If we can't make it, fail. */ |
11288 | varop = gen_lowpart (shift_mode, varop); |
11289 | if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER) |
11290 | return NULL_RTX; |
11291 | |
11292 | /* If we have an outer operation and we just made a shift, it is |
11293 | possible that we could have simplified the shift were it not |
11294 | for the outer operation. So try to do the simplification |
11295 | recursively. */ |
11296 | |
11297 | if (outer_op != UNKNOWN) |
11298 | x = simplify_shift_const_1 (code, result_mode: shift_mode, varop, orig_count: count); |
11299 | else |
11300 | x = NULL_RTX; |
11301 | |
11302 | if (x == NULL_RTX) |
11303 | x = simplify_gen_binary (code, mode: shift_mode, op0: varop, |
11304 | op1: gen_int_shift_amount (shift_mode, count)); |
11305 | |
11306 | /* If we were doing an LSHIFTRT in a wider mode than it was originally, |
11307 | turn off all the bits that the shift would have turned off. */ |
11308 | if (orig_code == LSHIFTRT && result_mode != shift_mode) |
11309 | /* We only change the modes of scalar shifts. */ |
11310 | x = simplify_and_const_int (NULL_RTX, mode: as_a <scalar_int_mode> (m: shift_mode), |
11311 | varop: x, GET_MODE_MASK (result_mode) >> orig_count); |
11312 | |
11313 | /* Do the remainder of the processing in RESULT_MODE. */ |
11314 | x = gen_lowpart_or_truncate (mode: result_mode, x); |
11315 | |
11316 | /* If COMPLEMENT_P is set, we have to complement X before doing the outer |
11317 | operation. */ |
11318 | if (complement_p) |
11319 | x = simplify_gen_unary (code: NOT, mode: result_mode, op: x, op_mode: result_mode); |
11320 | |
11321 | if (outer_op != UNKNOWN) |
11322 | { |
11323 | int_result_mode = as_a <scalar_int_mode> (m: result_mode); |
11324 | |
11325 | if (GET_RTX_CLASS (outer_op) != RTX_UNARY |
11326 | && GET_MODE_PRECISION (mode: int_result_mode) < HOST_BITS_PER_WIDE_INT) |
11327 | outer_const = trunc_int_for_mode (outer_const, int_result_mode); |
11328 | |
11329 | if (outer_op == AND) |
11330 | x = simplify_and_const_int (NULL_RTX, mode: int_result_mode, varop: x, constop: outer_const); |
11331 | else if (outer_op == SET) |
11332 | { |
11333 | /* This means that we have determined that the result is |
11334 | equivalent to a constant. This should be rare. */ |
11335 | if (!side_effects_p (x)) |
11336 | x = GEN_INT (outer_const); |
11337 | } |
11338 | else if (GET_RTX_CLASS (outer_op) == RTX_UNARY) |
11339 | x = simplify_gen_unary (code: outer_op, mode: int_result_mode, op: x, op_mode: int_result_mode); |
11340 | else |
11341 | x = simplify_gen_binary (code: outer_op, mode: int_result_mode, op0: x, |
11342 | GEN_INT (outer_const)); |
11343 | } |
11344 | |
11345 | return x; |
11346 | } |
11347 | |
11348 | /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift. |
11349 | The result of the shift is RESULT_MODE. If we cannot simplify it, |
11350 | return X or, if it is NULL, synthesize the expression with |
11351 | simplify_gen_binary. Otherwise, return a simplified value. |
11352 | |
11353 | The shift is normally computed in the widest mode we find in VAROP, as |
11354 | long as it isn't a different number of words than RESULT_MODE. Exceptions |
11355 | are ASHIFTRT and ROTATE, which are always done in their original mode. */ |
11356 | |
11357 | static rtx |
11358 | simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode, |
11359 | rtx varop, int count) |
11360 | { |
11361 | rtx tem = simplify_shift_const_1 (code, result_mode, varop, orig_count: count); |
11362 | if (tem) |
11363 | return tem; |
11364 | |
11365 | if (!x) |
11366 | x = simplify_gen_binary (code, GET_MODE (varop), op0: varop, |
11367 | op1: gen_int_shift_amount (GET_MODE (varop), count)); |
11368 | if (GET_MODE (x) != result_mode) |
11369 | x = gen_lowpart (result_mode, x); |
11370 | return x; |
11371 | } |
11372 | |
11373 | |
11374 | /* A subroutine of recog_for_combine. See there for arguments and |
11375 | return value. */ |
11376 | |
11377 | static int |
11378 | recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes) |
11379 | { |
11380 | rtx pat = *pnewpat; |
11381 | rtx pat_without_clobbers; |
11382 | int insn_code_number; |
11383 | int num_clobbers_to_add = 0; |
11384 | int i; |
11385 | rtx notes = NULL_RTX; |
11386 | rtx old_notes, old_pat; |
11387 | int old_icode; |
11388 | |
11389 | /* If PAT is a PARALLEL, check to see if it contains the CLOBBER |
11390 | we use to indicate that something didn't match. If we find such a |
11391 | thing, force rejection. */ |
11392 | if (GET_CODE (pat) == PARALLEL) |
11393 | for (i = XVECLEN (pat, 0) - 1; i >= 0; i--) |
11394 | if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER |
11395 | && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx) |
11396 | return -1; |
11397 | |
11398 | old_pat = PATTERN (insn); |
11399 | old_notes = REG_NOTES (insn); |
11400 | PATTERN (insn) = pat; |
11401 | REG_NOTES (insn) = NULL_RTX; |
11402 | |
11403 | insn_code_number = recog (pat, insn, &num_clobbers_to_add); |
11404 | if (dump_file && (dump_flags & TDF_DETAILS)) |
11405 | { |
11406 | if (insn_code_number < 0) |
11407 | fputs (s: "Failed to match this instruction:\n" , stream: dump_file); |
11408 | else |
11409 | fputs (s: "Successfully matched this instruction:\n" , stream: dump_file); |
11410 | print_rtl_single (dump_file, pat); |
11411 | } |
11412 | |
11413 | /* If it isn't, there is the possibility that we previously had an insn |
11414 | that clobbered some register as a side effect, but the combined |
11415 | insn doesn't need to do that. So try once more without the clobbers |
11416 | unless this represents an ASM insn. */ |
11417 | |
11418 | if (insn_code_number < 0 && ! check_asm_operands (pat) |
11419 | && GET_CODE (pat) == PARALLEL) |
11420 | { |
11421 | int pos; |
11422 | |
11423 | for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++) |
11424 | if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER) |
11425 | { |
11426 | if (i != pos) |
11427 | SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i)); |
11428 | pos++; |
11429 | } |
11430 | |
11431 | SUBST_INT (XVECLEN (pat, 0), pos); |
11432 | |
11433 | if (pos == 1) |
11434 | pat = XVECEXP (pat, 0, 0); |
11435 | |
11436 | PATTERN (insn) = pat; |
11437 | insn_code_number = recog (pat, insn, &num_clobbers_to_add); |
11438 | if (dump_file && (dump_flags & TDF_DETAILS)) |
11439 | { |
11440 | if (insn_code_number < 0) |
11441 | fputs (s: "Failed to match this instruction:\n" , stream: dump_file); |
11442 | else |
11443 | fputs (s: "Successfully matched this instruction:\n" , stream: dump_file); |
11444 | print_rtl_single (dump_file, pat); |
11445 | } |
11446 | } |
11447 | |
11448 | pat_without_clobbers = pat; |
11449 | |
11450 | PATTERN (insn) = old_pat; |
11451 | REG_NOTES (insn) = old_notes; |
11452 | |
11453 | /* Recognize all noop sets, these will be killed by followup pass. */ |
11454 | if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat)) |
11455 | insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0; |
11456 | |
11457 | /* If we had any clobbers to add, make a new pattern than contains |
11458 | them. Then check to make sure that all of them are dead. */ |
11459 | if (num_clobbers_to_add) |
11460 | { |
11461 | rtx newpat = gen_rtx_PARALLEL (VOIDmode, |
11462 | rtvec_alloc (GET_CODE (pat) == PARALLEL |
11463 | ? (XVECLEN (pat, 0) |
11464 | + num_clobbers_to_add) |
11465 | : num_clobbers_to_add + 1)); |
11466 | |
11467 | if (GET_CODE (pat) == PARALLEL) |
11468 | for (i = 0; i < XVECLEN (pat, 0); i++) |
11469 | XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i); |
11470 | else |
11471 | XVECEXP (newpat, 0, 0) = pat; |
11472 | |
11473 | add_clobbers (newpat, insn_code_number); |
11474 | |
11475 | for (i = XVECLEN (newpat, 0) - num_clobbers_to_add; |
11476 | i < XVECLEN (newpat, 0); i++) |
11477 | { |
11478 | if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)) |
11479 | && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn)) |
11480 | return -1; |
11481 | if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH) |
11482 | { |
11483 | gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))); |
11484 | notes = alloc_reg_note (REG_UNUSED, |
11485 | XEXP (XVECEXP (newpat, 0, i), 0), notes); |
11486 | } |
11487 | } |
11488 | pat = newpat; |
11489 | } |
11490 | |
11491 | if (insn_code_number >= 0 |
11492 | && insn_code_number != NOOP_MOVE_INSN_CODE) |
11493 | { |
11494 | old_pat = PATTERN (insn); |
11495 | old_notes = REG_NOTES (insn); |
11496 | old_icode = INSN_CODE (insn); |
11497 | PATTERN (insn) = pat; |
11498 | REG_NOTES (insn) = notes; |
11499 | INSN_CODE (insn) = insn_code_number; |
11500 | |
11501 | /* Allow targets to reject combined insn. */ |
11502 | if (!targetm.legitimate_combined_insn (insn)) |
11503 | { |
11504 | if (dump_file && (dump_flags & TDF_DETAILS)) |
11505 | fputs (s: "Instruction not appropriate for target." , |
11506 | stream: dump_file); |
11507 | |
11508 | /* Callers expect recog_for_combine to strip |
11509 | clobbers from the pattern on failure. */ |
11510 | pat = pat_without_clobbers; |
11511 | notes = NULL_RTX; |
11512 | |
11513 | insn_code_number = -1; |
11514 | } |
11515 | |
11516 | PATTERN (insn) = old_pat; |
11517 | REG_NOTES (insn) = old_notes; |
11518 | INSN_CODE (insn) = old_icode; |
11519 | } |
11520 | |
11521 | *pnewpat = pat; |
11522 | *pnotes = notes; |
11523 | |
11524 | return insn_code_number; |
11525 | } |
11526 | |
11527 | /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be |
11528 | expressed as an AND and maybe an LSHIFTRT, to that formulation. |
11529 | Return whether anything was so changed. */ |
11530 | |
11531 | static bool |
11532 | change_zero_ext (rtx pat) |
11533 | { |
11534 | bool changed = false; |
11535 | rtx *src = &SET_SRC (pat); |
11536 | |
11537 | subrtx_ptr_iterator::array_type array; |
11538 | FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST) |
11539 | { |
11540 | rtx x = **iter; |
11541 | scalar_int_mode mode, inner_mode; |
11542 | if (!is_a <scalar_int_mode> (GET_MODE (x), result: &mode)) |
11543 | continue; |
11544 | int size; |
11545 | |
11546 | if (GET_CODE (x) == ZERO_EXTRACT |
11547 | && CONST_INT_P (XEXP (x, 1)) |
11548 | && CONST_INT_P (XEXP (x, 2)) |
11549 | && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), result: &inner_mode) |
11550 | && GET_MODE_PRECISION (mode: inner_mode) <= GET_MODE_PRECISION (mode)) |
11551 | { |
11552 | size = INTVAL (XEXP (x, 1)); |
11553 | |
11554 | int start = INTVAL (XEXP (x, 2)); |
11555 | if (BITS_BIG_ENDIAN) |
11556 | start = GET_MODE_PRECISION (mode: inner_mode) - size - start; |
11557 | |
11558 | if (start != 0) |
11559 | x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), |
11560 | gen_int_shift_amount (inner_mode, start)); |
11561 | else |
11562 | x = XEXP (x, 0); |
11563 | |
11564 | if (mode != inner_mode) |
11565 | { |
11566 | if (REG_P (x) && HARD_REGISTER_P (x) |
11567 | && !can_change_dest_mode (x, added_sets: 0, mode)) |
11568 | continue; |
11569 | |
11570 | x = gen_lowpart_SUBREG (mode, x); |
11571 | } |
11572 | } |
11573 | else if (GET_CODE (x) == ZERO_EXTEND |
11574 | && GET_CODE (XEXP (x, 0)) == SUBREG |
11575 | && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0)))) |
11576 | && !paradoxical_subreg_p (XEXP (x, 0)) |
11577 | && subreg_lowpart_p (XEXP (x, 0))) |
11578 | { |
11579 | inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0))); |
11580 | size = GET_MODE_PRECISION (mode: inner_mode); |
11581 | x = SUBREG_REG (XEXP (x, 0)); |
11582 | if (GET_MODE (x) != mode) |
11583 | { |
11584 | if (REG_P (x) && HARD_REGISTER_P (x) |
11585 | && !can_change_dest_mode (x, added_sets: 0, mode)) |
11586 | continue; |
11587 | |
11588 | x = gen_lowpart_SUBREG (mode, x); |
11589 | } |
11590 | } |
11591 | else if (GET_CODE (x) == ZERO_EXTEND |
11592 | && REG_P (XEXP (x, 0)) |
11593 | && HARD_REGISTER_P (XEXP (x, 0)) |
11594 | && can_change_dest_mode (XEXP (x, 0), added_sets: 0, mode)) |
11595 | { |
11596 | inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0))); |
11597 | size = GET_MODE_PRECISION (mode: inner_mode); |
11598 | x = gen_rtx_REG (mode, REGNO (XEXP (x, 0))); |
11599 | } |
11600 | else |
11601 | continue; |
11602 | |
11603 | if (!(GET_CODE (x) == LSHIFTRT |
11604 | && CONST_INT_P (XEXP (x, 1)) |
11605 | && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode))) |
11606 | { |
11607 | wide_int mask = wi::mask (width: size, negate_p: false, precision: GET_MODE_PRECISION (mode)); |
11608 | x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode)); |
11609 | } |
11610 | |
11611 | SUBST (**iter, x); |
11612 | changed = true; |
11613 | } |
11614 | |
11615 | if (changed) |
11616 | FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST) |
11617 | maybe_swap_commutative_operands (x: **iter); |
11618 | |
11619 | rtx *dst = &SET_DEST (pat); |
11620 | scalar_int_mode mode; |
11621 | if (GET_CODE (*dst) == ZERO_EXTRACT |
11622 | && REG_P (XEXP (*dst, 0)) |
11623 | && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), result: &mode) |
11624 | && CONST_INT_P (XEXP (*dst, 1)) |
11625 | && CONST_INT_P (XEXP (*dst, 2))) |
11626 | { |
11627 | rtx reg = XEXP (*dst, 0); |
11628 | int width = INTVAL (XEXP (*dst, 1)); |
11629 | int offset = INTVAL (XEXP (*dst, 2)); |
11630 | int reg_width = GET_MODE_PRECISION (mode); |
11631 | if (BITS_BIG_ENDIAN) |
11632 | offset = reg_width - width - offset; |
11633 | |
11634 | rtx x, y, z, w; |
11635 | wide_int mask = wi::shifted_mask (start: offset, width, negate_p: true, precision: reg_width); |
11636 | wide_int mask2 = wi::shifted_mask (start: offset, width, negate_p: false, precision: reg_width); |
11637 | x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode)); |
11638 | if (offset) |
11639 | y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset)); |
11640 | else |
11641 | y = SET_SRC (pat); |
11642 | z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode)); |
11643 | w = gen_rtx_IOR (mode, x, z); |
11644 | SUBST (SET_DEST (pat), reg); |
11645 | SUBST (SET_SRC (pat), w); |
11646 | |
11647 | changed = true; |
11648 | } |
11649 | |
11650 | return changed; |
11651 | } |
11652 | |
11653 | /* Like recog, but we receive the address of a pointer to a new pattern. |
11654 | We try to match the rtx that the pointer points to. |
11655 | If that fails, we may try to modify or replace the pattern, |
11656 | storing the replacement into the same pointer object. |
11657 | |
11658 | Modifications include deletion or addition of CLOBBERs. If the |
11659 | instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT |
11660 | to the equivalent AND and perhaps LSHIFTRT patterns, and try with that |
11661 | (and undo if that fails). |
11662 | |
11663 | PNOTES is a pointer to a location where any REG_UNUSED notes added for |
11664 | the CLOBBERs are placed. |
11665 | |
11666 | The value is the final insn code from the pattern ultimately matched, |
11667 | or -1. */ |
11668 | |
11669 | static int |
11670 | recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes) |
11671 | { |
11672 | rtx pat = *pnewpat; |
11673 | int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes); |
11674 | if (insn_code_number >= 0 || check_asm_operands (pat)) |
11675 | return insn_code_number; |
11676 | |
11677 | void *marker = get_undo_marker (); |
11678 | bool changed = false; |
11679 | |
11680 | if (GET_CODE (pat) == SET) |
11681 | { |
11682 | /* For an unrecognized single set of a constant, try placing it in |
11683 | the constant pool, if this function already uses one. */ |
11684 | rtx src = SET_SRC (pat); |
11685 | if (CONSTANT_P (src) |
11686 | && !CONST_INT_P (src) |
11687 | && crtl->uses_const_pool) |
11688 | { |
11689 | machine_mode mode = GET_MODE (src); |
11690 | if (mode == VOIDmode) |
11691 | mode = GET_MODE (SET_DEST (pat)); |
11692 | src = force_const_mem (mode, src); |
11693 | if (src) |
11694 | { |
11695 | SUBST (SET_SRC (pat), src); |
11696 | changed = true; |
11697 | } |
11698 | } |
11699 | else |
11700 | changed = change_zero_ext (pat); |
11701 | } |
11702 | else if (GET_CODE (pat) == PARALLEL) |
11703 | { |
11704 | int i; |
11705 | for (i = 0; i < XVECLEN (pat, 0); i++) |
11706 | { |
11707 | rtx set = XVECEXP (pat, 0, i); |
11708 | if (GET_CODE (set) == SET) |
11709 | changed |= change_zero_ext (pat: set); |
11710 | } |
11711 | } |
11712 | |
11713 | if (changed) |
11714 | { |
11715 | insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes); |
11716 | |
11717 | if (insn_code_number < 0) |
11718 | undo_to_marker (marker); |
11719 | } |
11720 | |
11721 | return insn_code_number; |
11722 | } |
11723 | |
11724 | /* Like gen_lowpart_general but for use by combine. In combine it |
11725 | is not possible to create any new pseudoregs. However, it is |
11726 | safe to create invalid memory addresses, because combine will |
11727 | try to recognize them and all they will do is make the combine |
11728 | attempt fail. |
11729 | |
11730 | If for some reason this cannot do its job, an rtx |
11731 | (clobber (const_int 0)) is returned. |
11732 | An insn containing that will not be recognized. */ |
11733 | |
11734 | static rtx |
11735 | gen_lowpart_for_combine (machine_mode omode, rtx x) |
11736 | { |
11737 | machine_mode imode = GET_MODE (x); |
11738 | rtx result; |
11739 | |
11740 | if (omode == imode) |
11741 | return x; |
11742 | |
11743 | /* We can only support MODE being wider than a word if X is a |
11744 | constant integer or has a mode the same size. */ |
11745 | if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD) |
11746 | && ! (CONST_SCALAR_INT_P (x) |
11747 | || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode)))) |
11748 | goto fail; |
11749 | |
11750 | /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart |
11751 | won't know what to do. So we will strip off the SUBREG here and |
11752 | process normally. */ |
11753 | if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x))) |
11754 | { |
11755 | x = SUBREG_REG (x); |
11756 | |
11757 | /* For use in case we fall down into the address adjustments |
11758 | further below, we need to adjust the known mode and size of |
11759 | x; imode and isize, since we just adjusted x. */ |
11760 | imode = GET_MODE (x); |
11761 | |
11762 | if (imode == omode) |
11763 | return x; |
11764 | } |
11765 | |
11766 | result = gen_lowpart_common (omode, x); |
11767 | |
11768 | if (result) |
11769 | return result; |
11770 | |
11771 | if (MEM_P (x)) |
11772 | { |
11773 | /* Refuse to work on a volatile memory ref or one with a mode-dependent |
11774 | address. */ |
11775 | if (MEM_VOLATILE_P (x) |
11776 | || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x))) |
11777 | goto fail; |
11778 | |
11779 | /* If we want to refer to something bigger than the original memref, |
11780 | generate a paradoxical subreg instead. That will force a reload |
11781 | of the original memref X. */ |
11782 | if (paradoxical_subreg_p (outermode: omode, innermode: imode)) |
11783 | return gen_rtx_SUBREG (omode, x, 0); |
11784 | |
11785 | poly_int64 offset = byte_lowpart_offset (omode, imode); |
11786 | return adjust_address_nv (x, omode, offset); |
11787 | } |
11788 | |
11789 | /* If X is a comparison operator, rewrite it in a new mode. This |
11790 | probably won't match, but may allow further simplifications. */ |
11791 | else if (COMPARISON_P (x) |
11792 | && SCALAR_INT_MODE_P (imode) |
11793 | && SCALAR_INT_MODE_P (omode)) |
11794 | return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1)); |
11795 | |
11796 | /* If we couldn't simplify X any other way, just enclose it in a |
11797 | SUBREG. Normally, this SUBREG won't match, but some patterns may |
11798 | include an explicit SUBREG or we may simplify it further in combine. */ |
11799 | else |
11800 | { |
11801 | rtx res; |
11802 | |
11803 | if (imode == VOIDmode) |
11804 | { |
11805 | imode = int_mode_for_mode (omode).require (); |
11806 | x = gen_lowpart_common (imode, x); |
11807 | if (x == NULL) |
11808 | goto fail; |
11809 | } |
11810 | res = lowpart_subreg (outermode: omode, op: x, innermode: imode); |
11811 | if (res) |
11812 | return res; |
11813 | } |
11814 | |
11815 | fail: |
11816 | return gen_rtx_CLOBBER (omode, const0_rtx); |
11817 | } |
11818 | |
11819 | /* Try to simplify a comparison between OP0 and a constant OP1, |
11820 | where CODE is the comparison code that will be tested, into a |
11821 | (CODE OP0 const0_rtx) form. |
11822 | |
11823 | The result is a possibly different comparison code to use. |
11824 | *POP0 and *POP1 may be updated. */ |
11825 | |
11826 | static enum rtx_code |
11827 | simplify_compare_const (enum rtx_code code, machine_mode mode, |
11828 | rtx *pop0, rtx *pop1) |
11829 | { |
11830 | scalar_int_mode int_mode; |
11831 | rtx op0 = *pop0; |
11832 | HOST_WIDE_INT const_op = INTVAL (*pop1); |
11833 | |
11834 | /* Get the constant we are comparing against and turn off all bits |
11835 | not on in our mode. */ |
11836 | if (mode != VOIDmode) |
11837 | const_op = trunc_int_for_mode (const_op, mode); |
11838 | |
11839 | /* If we are comparing against a constant power of two and the value |
11840 | being compared can only have that single bit nonzero (e.g., it was |
11841 | `and'ed with that bit), we can replace this with a comparison |
11842 | with zero. */ |
11843 | if (const_op |
11844 | && (code == EQ || code == NE || code == GE || code == GEU |
11845 | || code == LT || code == LTU) |
11846 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
11847 | && GET_MODE_PRECISION (mode: int_mode) - 1 < HOST_BITS_PER_WIDE_INT |
11848 | && pow2p_hwi (x: const_op & GET_MODE_MASK (int_mode)) |
11849 | && (nonzero_bits (op0, int_mode) |
11850 | == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode)))) |
11851 | { |
11852 | code = (code == EQ || code == GE || code == GEU ? NE : EQ); |
11853 | const_op = 0; |
11854 | } |
11855 | |
11856 | /* Similarly, if we are comparing a value known to be either -1 or |
11857 | 0 with -1, change it to the opposite comparison against zero. */ |
11858 | if (const_op == -1 |
11859 | && (code == EQ || code == NE || code == GT || code == LE |
11860 | || code == GEU || code == LTU) |
11861 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
11862 | && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (mode: int_mode)) |
11863 | { |
11864 | code = (code == EQ || code == LE || code == GEU ? NE : EQ); |
11865 | const_op = 0; |
11866 | } |
11867 | |
11868 | /* Do some canonicalizations based on the comparison code. We prefer |
11869 | comparisons against zero and then prefer equality comparisons. |
11870 | If we can reduce the size of a constant, we will do that too. */ |
11871 | switch (code) |
11872 | { |
11873 | case LT: |
11874 | /* < C is equivalent to <= (C - 1) */ |
11875 | if (const_op > 0) |
11876 | { |
11877 | const_op -= 1; |
11878 | code = LE; |
11879 | /* ... fall through to LE case below. */ |
11880 | gcc_fallthrough (); |
11881 | } |
11882 | else |
11883 | break; |
11884 | |
11885 | case LE: |
11886 | /* <= C is equivalent to < (C + 1); we do this for C < 0 */ |
11887 | if (const_op < 0) |
11888 | { |
11889 | const_op += 1; |
11890 | code = LT; |
11891 | } |
11892 | |
11893 | /* If we are doing a <= 0 comparison on a value known to have |
11894 | a zero sign bit, we can replace this with == 0. */ |
11895 | else if (const_op == 0 |
11896 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
11897 | && GET_MODE_PRECISION (mode: int_mode) - 1 < HOST_BITS_PER_WIDE_INT |
11898 | && (nonzero_bits (op0, int_mode) |
11899 | & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (mode: int_mode) - 1))) |
11900 | == 0) |
11901 | code = EQ; |
11902 | break; |
11903 | |
11904 | case GE: |
11905 | /* >= C is equivalent to > (C - 1). */ |
11906 | if (const_op > 0) |
11907 | { |
11908 | const_op -= 1; |
11909 | code = GT; |
11910 | /* ... fall through to GT below. */ |
11911 | gcc_fallthrough (); |
11912 | } |
11913 | else |
11914 | break; |
11915 | |
11916 | case GT: |
11917 | /* > C is equivalent to >= (C + 1); we do this for C < 0. */ |
11918 | if (const_op < 0) |
11919 | { |
11920 | const_op += 1; |
11921 | code = GE; |
11922 | } |
11923 | |
11924 | /* If we are doing a > 0 comparison on a value known to have |
11925 | a zero sign bit, we can replace this with != 0. */ |
11926 | else if (const_op == 0 |
11927 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
11928 | && GET_MODE_PRECISION (mode: int_mode) - 1 < HOST_BITS_PER_WIDE_INT |
11929 | && (nonzero_bits (op0, int_mode) |
11930 | & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (mode: int_mode) - 1))) |
11931 | == 0) |
11932 | code = NE; |
11933 | break; |
11934 | |
11935 | case LTU: |
11936 | /* < C is equivalent to <= (C - 1). */ |
11937 | if (const_op > 0) |
11938 | { |
11939 | const_op -= 1; |
11940 | code = LEU; |
11941 | /* ... fall through ... */ |
11942 | gcc_fallthrough (); |
11943 | } |
11944 | /* (unsigned) < 0x80000000 is equivalent to >= 0. */ |
11945 | else if (is_a <scalar_int_mode> (m: mode, result: &int_mode) |
11946 | && GET_MODE_PRECISION (mode: int_mode) - 1 < HOST_BITS_PER_WIDE_INT |
11947 | && (((unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode)) |
11948 | == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (mode: int_mode) - 1))) |
11949 | { |
11950 | const_op = 0; |
11951 | code = GE; |
11952 | break; |
11953 | } |
11954 | else |
11955 | break; |
11956 | |
11957 | case LEU: |
11958 | /* unsigned <= 0 is equivalent to == 0 */ |
11959 | if (const_op == 0) |
11960 | code = EQ; |
11961 | /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */ |
11962 | else if (is_a <scalar_int_mode> (m: mode, result: &int_mode) |
11963 | && GET_MODE_PRECISION (mode: int_mode) - 1 < HOST_BITS_PER_WIDE_INT |
11964 | && ((unsigned HOST_WIDE_INT) const_op |
11965 | == ((HOST_WIDE_INT_1U |
11966 | << (GET_MODE_PRECISION (mode: int_mode) - 1)) - 1))) |
11967 | { |
11968 | const_op = 0; |
11969 | code = GE; |
11970 | } |
11971 | break; |
11972 | |
11973 | case GEU: |
11974 | /* >= C is equivalent to > (C - 1). */ |
11975 | if (const_op > 1) |
11976 | { |
11977 | const_op -= 1; |
11978 | code = GTU; |
11979 | /* ... fall through ... */ |
11980 | gcc_fallthrough (); |
11981 | } |
11982 | |
11983 | /* (unsigned) >= 0x80000000 is equivalent to < 0. */ |
11984 | else if (is_a <scalar_int_mode> (m: mode, result: &int_mode) |
11985 | && GET_MODE_PRECISION (mode: int_mode) - 1 < HOST_BITS_PER_WIDE_INT |
11986 | && (((unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode)) |
11987 | == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (mode: int_mode) - 1))) |
11988 | { |
11989 | const_op = 0; |
11990 | code = LT; |
11991 | break; |
11992 | } |
11993 | else |
11994 | break; |
11995 | |
11996 | case GTU: |
11997 | /* unsigned > 0 is equivalent to != 0 */ |
11998 | if (const_op == 0) |
11999 | code = NE; |
12000 | /* (unsigned) > 0x7fffffff is equivalent to < 0. */ |
12001 | else if (is_a <scalar_int_mode> (m: mode, result: &int_mode) |
12002 | && GET_MODE_PRECISION (mode: int_mode) - 1 < HOST_BITS_PER_WIDE_INT |
12003 | && ((unsigned HOST_WIDE_INT) const_op |
12004 | == (HOST_WIDE_INT_1U |
12005 | << (GET_MODE_PRECISION (mode: int_mode) - 1)) - 1)) |
12006 | { |
12007 | const_op = 0; |
12008 | code = LT; |
12009 | } |
12010 | break; |
12011 | |
12012 | default: |
12013 | break; |
12014 | } |
12015 | |
12016 | /* Narrow non-symmetric comparison of memory and constant as e.g. |
12017 | x0...x7 <= 0x3fffffffffffffff into x0 <= 0x3f where x0 is the most |
12018 | significant byte. Likewise, transform x0...x7 >= 0x4000000000000000 into |
12019 | x0 >= 0x40. */ |
12020 | if ((code == LEU || code == LTU || code == GEU || code == GTU) |
12021 | && is_a <scalar_int_mode> (GET_MODE (op0), result: &int_mode) |
12022 | && HWI_COMPUTABLE_MODE_P (mode: int_mode) |
12023 | && MEM_P (op0) |
12024 | && !MEM_VOLATILE_P (op0) |
12025 | /* The optimization makes only sense for constants which are big enough |
12026 | so that we have a chance to chop off something at all. */ |
12027 | && ((unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode)) > 0xff |
12028 | /* Ensure that we do not overflow during normalization. */ |
12029 | && (code != GTU |
12030 | || ((unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode)) |
12031 | < HOST_WIDE_INT_M1U) |
12032 | && trunc_int_for_mode (const_op, int_mode) == const_op) |
12033 | { |
12034 | unsigned HOST_WIDE_INT n |
12035 | = (unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode); |
12036 | enum rtx_code adjusted_code; |
12037 | |
12038 | /* Normalize code to either LEU or GEU. */ |
12039 | if (code == LTU) |
12040 | { |
12041 | --n; |
12042 | adjusted_code = LEU; |
12043 | } |
12044 | else if (code == GTU) |
12045 | { |
12046 | ++n; |
12047 | adjusted_code = GEU; |
12048 | } |
12049 | else |
12050 | adjusted_code = code; |
12051 | |
12052 | scalar_int_mode narrow_mode_iter; |
12053 | FOR_EACH_MODE_UNTIL (narrow_mode_iter, int_mode) |
12054 | { |
12055 | unsigned nbits = GET_MODE_PRECISION (mode: int_mode) |
12056 | - GET_MODE_PRECISION (mode: narrow_mode_iter); |
12057 | unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << nbits) - 1; |
12058 | unsigned HOST_WIDE_INT lower_bits = n & mask; |
12059 | if ((adjusted_code == LEU && lower_bits == mask) |
12060 | || (adjusted_code == GEU && lower_bits == 0)) |
12061 | { |
12062 | n >>= nbits; |
12063 | break; |
12064 | } |
12065 | } |
12066 | |
12067 | if (narrow_mode_iter < int_mode) |
12068 | { |
12069 | if (dump_file && (dump_flags & TDF_DETAILS)) |
12070 | { |
12071 | fprintf ( |
12072 | stream: dump_file, format: "narrow comparison from mode %s to %s: (MEM %s " |
12073 | HOST_WIDE_INT_PRINT_HEX ") to (MEM %s " |
12074 | HOST_WIDE_INT_PRINT_HEX ").\n" , GET_MODE_NAME (int_mode), |
12075 | GET_MODE_NAME (narrow_mode_iter), GET_RTX_NAME (code), |
12076 | (unsigned HOST_WIDE_INT) const_op & GET_MODE_MASK (int_mode), |
12077 | GET_RTX_NAME (adjusted_code), n); |
12078 | } |
12079 | poly_int64 offset = (BYTES_BIG_ENDIAN |
12080 | ? 0 |
12081 | : (GET_MODE_SIZE (mode: int_mode) |
12082 | - GET_MODE_SIZE (mode: narrow_mode_iter))); |
12083 | *pop0 = adjust_address_nv (op0, narrow_mode_iter, offset); |
12084 | *pop1 = gen_int_mode (n, narrow_mode_iter); |
12085 | return adjusted_code; |
12086 | } |
12087 | } |
12088 | |
12089 | *pop1 = GEN_INT (const_op); |
12090 | return code; |
12091 | } |
12092 | |
12093 | /* Simplify a comparison between *POP0 and *POP1 where CODE is the |
12094 | comparison code that will be tested. |
12095 | |
12096 | The result is a possibly different comparison code to use. *POP0 and |
12097 | *POP1 may be updated. |
12098 | |
12099 | It is possible that we might detect that a comparison is either always |
12100 | true or always false. However, we do not perform general constant |
12101 | folding in combine, so this knowledge isn't useful. Such tautologies |
12102 | should have been detected earlier. Hence we ignore all such cases. */ |
12103 | |
12104 | static enum rtx_code |
12105 | simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) |
12106 | { |
12107 | rtx op0 = *pop0; |
12108 | rtx op1 = *pop1; |
12109 | rtx tem, tem1; |
12110 | int i; |
12111 | scalar_int_mode mode, inner_mode, tmode; |
12112 | opt_scalar_int_mode tmode_iter; |
12113 | |
12114 | /* Try a few ways of applying the same transformation to both operands. */ |
12115 | while (1) |
12116 | { |
12117 | /* The test below this one won't handle SIGN_EXTENDs on these machines, |
12118 | so check specially. */ |
12119 | if (!WORD_REGISTER_OPERATIONS |
12120 | && code != GTU && code != GEU && code != LTU && code != LEU |
12121 | && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT |
12122 | && GET_CODE (XEXP (op0, 0)) == ASHIFT |
12123 | && GET_CODE (XEXP (op1, 0)) == ASHIFT |
12124 | && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG |
12125 | && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG |
12126 | && is_a <scalar_int_mode> (GET_MODE (op0), result: &mode) |
12127 | && (is_a <scalar_int_mode> |
12128 | (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), result: &inner_mode)) |
12129 | && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))) |
12130 | && CONST_INT_P (XEXP (op0, 1)) |
12131 | && XEXP (op0, 1) == XEXP (op1, 1) |
12132 | && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1) |
12133 | && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1) |
12134 | && (INTVAL (XEXP (op0, 1)) |
12135 | == (GET_MODE_PRECISION (mode) |
12136 | - GET_MODE_PRECISION (mode: inner_mode)))) |
12137 | { |
12138 | op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0)); |
12139 | op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0)); |
12140 | } |
12141 | |
12142 | /* If both operands are the same constant shift, see if we can ignore the |
12143 | shift. We can if the shift is a rotate or if the bits shifted out of |
12144 | this shift are known to be zero for both inputs and if the type of |
12145 | comparison is compatible with the shift. */ |
12146 | if (GET_CODE (op0) == GET_CODE (op1) |
12147 | && HWI_COMPUTABLE_MODE_P (GET_MODE (op0)) |
12148 | && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ)) |
12149 | || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT) |
12150 | && (code != GT && code != LT && code != GE && code != LE)) |
12151 | || (GET_CODE (op0) == ASHIFTRT |
12152 | && (code != GTU && code != LTU |
12153 | && code != GEU && code != LEU))) |
12154 | && CONST_INT_P (XEXP (op0, 1)) |
12155 | && INTVAL (XEXP (op0, 1)) >= 0 |
12156 | && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT |
12157 | && XEXP (op0, 1) == XEXP (op1, 1)) |
12158 | { |
12159 | machine_mode mode = GET_MODE (op0); |
12160 | unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); |
12161 | int shift_count = INTVAL (XEXP (op0, 1)); |
12162 | |
12163 | if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT) |
12164 | mask &= (mask >> shift_count) << shift_count; |
12165 | else if (GET_CODE (op0) == ASHIFT) |
12166 | mask = (mask & (mask << shift_count)) >> shift_count; |
12167 | |
12168 | if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0 |
12169 | && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0) |
12170 | op0 = XEXP (op0, 0), op1 = XEXP (op1, 0); |
12171 | else |
12172 | break; |
12173 | } |
12174 | |
12175 | /* If both operands are AND's of a paradoxical SUBREG by constant, the |
12176 | SUBREGs are of the same mode, and, in both cases, the AND would |
12177 | be redundant if the comparison was done in the narrower mode, |
12178 | do the comparison in the narrower mode (e.g., we are AND'ing with 1 |
12179 | and the operand's possibly nonzero bits are 0xffffff01; in that case |
12180 | if we only care about QImode, we don't need the AND). This case |
12181 | occurs if the output mode of an scc insn is not SImode and |
12182 | STORE_FLAG_VALUE == 1 (e.g., the 386). |
12183 | |
12184 | Similarly, check for a case where the AND's are ZERO_EXTEND |
12185 | operations from some narrower mode even though a SUBREG is not |
12186 | present. */ |
12187 | |
12188 | else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND |
12189 | && CONST_INT_P (XEXP (op0, 1)) |
12190 | && CONST_INT_P (XEXP (op1, 1))) |
12191 | { |
12192 | rtx inner_op0 = XEXP (op0, 0); |
12193 | rtx inner_op1 = XEXP (op1, 0); |
12194 | HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1)); |
12195 | HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1)); |
12196 | bool changed = false; |
12197 | |
12198 | if (paradoxical_subreg_p (x: inner_op0) |
12199 | && GET_CODE (inner_op1) == SUBREG |
12200 | && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0))) |
12201 | && (GET_MODE (SUBREG_REG (inner_op0)) |
12202 | == GET_MODE (SUBREG_REG (inner_op1))) |
12203 | && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0), |
12204 | GET_MODE (SUBREG_REG (inner_op0)))) == 0 |
12205 | && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1), |
12206 | GET_MODE (SUBREG_REG (inner_op1)))) == 0) |
12207 | { |
12208 | op0 = SUBREG_REG (inner_op0); |
12209 | op1 = SUBREG_REG (inner_op1); |
12210 | |
12211 | /* The resulting comparison is always unsigned since we masked |
12212 | off the original sign bit. */ |
12213 | code = unsigned_condition (code); |
12214 | |
12215 | changed = true; |
12216 | } |
12217 | |
12218 | else if (c0 == c1) |
12219 | FOR_EACH_MODE_UNTIL (tmode, |
12220 | as_a <scalar_int_mode> (GET_MODE (op0))) |
12221 | if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode)) |
12222 | { |
12223 | op0 = gen_lowpart_or_truncate (mode: tmode, x: inner_op0); |
12224 | op1 = gen_lowpart_or_truncate (mode: tmode, x: inner_op1); |
12225 | code = unsigned_condition (code); |
12226 | changed = true; |
12227 | break; |
12228 | } |
12229 | |
12230 | if (! changed) |
12231 | break; |
12232 | } |
12233 | |
12234 | /* If both operands are NOT, we can strip off the outer operation |
12235 | and adjust the comparison code for swapped operands; similarly for |
12236 | NEG, except that this must be an equality comparison. */ |
12237 | else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT) |
12238 | || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG |
12239 | && (code == EQ || code == NE))) |
12240 | op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code); |
12241 | |
12242 | else |
12243 | break; |
12244 | } |
12245 | |
12246 | /* If the first operand is a constant, swap the operands and adjust the |
12247 | comparison code appropriately, but don't do this if the second operand |
12248 | is already a constant integer. */ |
12249 | if (swap_commutative_operands_p (op0, op1)) |
12250 | { |
12251 | std::swap (a&: op0, b&: op1); |
12252 | code = swap_condition (code); |
12253 | } |
12254 | |
12255 | /* We now enter a loop during which we will try to simplify the comparison. |
12256 | For the most part, we only are concerned with comparisons with zero, |
12257 | but some things may really be comparisons with zero but not start |
12258 | out looking that way. */ |
12259 | |
12260 | while (CONST_INT_P (op1)) |
12261 | { |
12262 | machine_mode raw_mode = GET_MODE (op0); |
12263 | scalar_int_mode int_mode; |
12264 | int equality_comparison_p; |
12265 | int sign_bit_comparison_p; |
12266 | int unsigned_comparison_p; |
12267 | HOST_WIDE_INT const_op; |
12268 | |
12269 | /* We only want to handle integral modes. This catches VOIDmode, |
12270 | CCmode, and the floating-point modes. An exception is that we |
12271 | can handle VOIDmode if OP0 is a COMPARE or a comparison |
12272 | operation. */ |
12273 | |
12274 | if (GET_MODE_CLASS (raw_mode) != MODE_INT |
12275 | && ! (raw_mode == VOIDmode |
12276 | && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0)))) |
12277 | break; |
12278 | |
12279 | /* Try to simplify the compare to constant, possibly changing the |
12280 | comparison op, and/or changing op1 to zero. */ |
12281 | code = simplify_compare_const (code, mode: raw_mode, pop0: &op0, pop1: &op1); |
12282 | const_op = INTVAL (op1); |
12283 | |
12284 | /* Compute some predicates to simplify code below. */ |
12285 | |
12286 | equality_comparison_p = (code == EQ || code == NE); |
12287 | sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0); |
12288 | unsigned_comparison_p = (code == LTU || code == LEU || code == GTU |
12289 | || code == GEU); |
12290 | |
12291 | /* If this is a sign bit comparison and we can do arithmetic in |
12292 | MODE, say that we will only be needing the sign bit of OP0. */ |
12293 | if (sign_bit_comparison_p |
12294 | && is_a <scalar_int_mode> (m: raw_mode, result: &int_mode) |
12295 | && HWI_COMPUTABLE_MODE_P (mode: int_mode)) |
12296 | op0 = force_to_mode (x: op0, mode: int_mode, |
12297 | HOST_WIDE_INT_1U |
12298 | << (GET_MODE_PRECISION (mode: int_mode) - 1), just_select: false); |
12299 | |
12300 | if (COMPARISON_P (op0)) |
12301 | { |
12302 | /* We can't do anything if OP0 is a condition code value, rather |
12303 | than an actual data value. */ |
12304 | if (const_op != 0 |
12305 | || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC) |
12306 | break; |
12307 | |
12308 | /* Get the two operands being compared. */ |
12309 | if (GET_CODE (XEXP (op0, 0)) == COMPARE) |
12310 | tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1); |
12311 | else |
12312 | tem = XEXP (op0, 0), tem1 = XEXP (op0, 1); |
12313 | |
12314 | /* Check for the cases where we simply want the result of the |
12315 | earlier test or the opposite of that result. */ |
12316 | if (code == NE || code == EQ |
12317 | || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE) |
12318 | && (code == LT || code == GE))) |
12319 | { |
12320 | enum rtx_code new_code; |
12321 | if (code == LT || code == NE) |
12322 | new_code = GET_CODE (op0); |
12323 | else |
12324 | new_code = reversed_comparison_code (op0, NULL); |
12325 | |
12326 | if (new_code != UNKNOWN) |
12327 | { |
12328 | code = new_code; |
12329 | op0 = tem; |
12330 | op1 = tem1; |
12331 | continue; |
12332 | } |
12333 | } |
12334 | break; |
12335 | } |
12336 | |
12337 | if (raw_mode == VOIDmode) |
12338 | break; |
12339 | scalar_int_mode mode = as_a <scalar_int_mode> (m: raw_mode); |
12340 | |
12341 | /* Now try cases based on the opcode of OP0. If none of the cases |
12342 | does a "continue", we exit this loop immediately after the |
12343 | switch. */ |
12344 | |
12345 | unsigned int mode_width = GET_MODE_PRECISION (mode); |
12346 | unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); |
12347 | switch (GET_CODE (op0)) |
12348 | { |
12349 | case ZERO_EXTRACT: |
12350 | /* If we are extracting a single bit from a variable position in |
12351 | a constant that has only a single bit set and are comparing it |
12352 | with zero, we can convert this into an equality comparison |
12353 | between the position and the location of the single bit. */ |
12354 | /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might |
12355 | have already reduced the shift count modulo the word size. */ |
12356 | if (!SHIFT_COUNT_TRUNCATED |
12357 | && CONST_INT_P (XEXP (op0, 0)) |
12358 | && XEXP (op0, 1) == const1_rtx |
12359 | && equality_comparison_p && const_op == 0 |
12360 | && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0) |
12361 | { |
12362 | if (BITS_BIG_ENDIAN) |
12363 | i = BITS_PER_WORD - 1 - i; |
12364 | |
12365 | op0 = XEXP (op0, 2); |
12366 | op1 = GEN_INT (i); |
12367 | const_op = i; |
12368 | |
12369 | /* Result is nonzero iff shift count is equal to I. */ |
12370 | code = reverse_condition (code); |
12371 | continue; |
12372 | } |
12373 | |
12374 | /* fall through */ |
12375 | |
12376 | case SIGN_EXTRACT: |
12377 | tem = expand_compound_operation (x: op0); |
12378 | if (tem != op0) |
12379 | { |
12380 | op0 = tem; |
12381 | continue; |
12382 | } |
12383 | break; |
12384 | |
12385 | case NOT: |
12386 | /* If testing for equality, we can take the NOT of the constant. */ |
12387 | if (equality_comparison_p |
12388 | && (tem = simplify_unary_operation (code: NOT, mode, op: op1, op_mode: mode)) != 0) |
12389 | { |
12390 | op0 = XEXP (op0, 0); |
12391 | op1 = tem; |
12392 | continue; |
12393 | } |
12394 | |
12395 | /* If just looking at the sign bit, reverse the sense of the |
12396 | comparison. */ |
12397 | if (sign_bit_comparison_p) |
12398 | { |
12399 | op0 = XEXP (op0, 0); |
12400 | code = (code == GE ? LT : GE); |
12401 | continue; |
12402 | } |
12403 | break; |
12404 | |
12405 | case NEG: |
12406 | /* If testing for equality, we can take the NEG of the constant. */ |
12407 | if (equality_comparison_p |
12408 | && (tem = simplify_unary_operation (code: NEG, mode, op: op1, op_mode: mode)) != 0) |
12409 | { |
12410 | op0 = XEXP (op0, 0); |
12411 | op1 = tem; |
12412 | continue; |
12413 | } |
12414 | |
12415 | /* The remaining cases only apply to comparisons with zero. */ |
12416 | if (const_op != 0) |
12417 | break; |
12418 | |
12419 | /* When X is ABS or is known positive, |
12420 | (neg X) is < 0 if and only if X != 0. */ |
12421 | |
12422 | if (sign_bit_comparison_p |
12423 | && (GET_CODE (XEXP (op0, 0)) == ABS |
12424 | || (mode_width <= HOST_BITS_PER_WIDE_INT |
12425 | && (nonzero_bits (XEXP (op0, 0), mode) |
12426 | & (HOST_WIDE_INT_1U << (mode_width - 1))) |
12427 | == 0))) |
12428 | { |
12429 | op0 = XEXP (op0, 0); |
12430 | code = (code == LT ? NE : EQ); |
12431 | continue; |
12432 | } |
12433 | |
12434 | /* If we have NEG of something whose two high-order bits are the |
12435 | same, we know that "(-a) < 0" is equivalent to "a > 0". */ |
12436 | if (num_sign_bit_copies (op0, mode) >= 2) |
12437 | { |
12438 | op0 = XEXP (op0, 0); |
12439 | code = swap_condition (code); |
12440 | continue; |
12441 | } |
12442 | break; |
12443 | |
12444 | case ROTATE: |
12445 | /* If we are testing equality and our count is a constant, we |
12446 | can perform the inverse operation on our RHS. */ |
12447 | if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1)) |
12448 | && (tem = simplify_binary_operation (code: ROTATERT, mode, |
12449 | op0: op1, XEXP (op0, 1))) != 0) |
12450 | { |
12451 | op0 = XEXP (op0, 0); |
12452 | op1 = tem; |
12453 | continue; |
12454 | } |
12455 | |
12456 | /* If we are doing a < 0 or >= 0 comparison, it means we are testing |
12457 | a particular bit. Convert it to an AND of a constant of that |
12458 | bit. This will be converted into a ZERO_EXTRACT. */ |
12459 | if (const_op == 0 && sign_bit_comparison_p |
12460 | && CONST_INT_P (XEXP (op0, 1)) |
12461 | && mode_width <= HOST_BITS_PER_WIDE_INT |
12462 | && UINTVAL (XEXP (op0, 1)) < mode_width) |
12463 | { |
12464 | op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), |
12465 | constop: (HOST_WIDE_INT_1U |
12466 | << (mode_width - 1 |
12467 | - INTVAL (XEXP (op0, 1))))); |
12468 | code = (code == LT ? NE : EQ); |
12469 | continue; |
12470 | } |
12471 | |
12472 | /* Fall through. */ |
12473 | |
12474 | case ABS: |
12475 | /* ABS is ignorable inside an equality comparison with zero. */ |
12476 | if (const_op == 0 && equality_comparison_p) |
12477 | { |
12478 | op0 = XEXP (op0, 0); |
12479 | continue; |
12480 | } |
12481 | break; |
12482 | |
12483 | case SIGN_EXTEND: |
12484 | /* Can simplify (compare (zero/sign_extend FOO) CONST) to |
12485 | (compare FOO CONST) if CONST fits in FOO's mode and we |
12486 | are either testing inequality or have an unsigned |
12487 | comparison with ZERO_EXTEND or a signed comparison with |
12488 | SIGN_EXTEND. But don't do it if we don't have a compare |
12489 | insn of the given mode, since we'd have to revert it |
12490 | later on, and then we wouldn't know whether to sign- or |
12491 | zero-extend. */ |
12492 | if (is_int_mode (GET_MODE (XEXP (op0, 0)), int_mode: &mode) |
12493 | && ! unsigned_comparison_p |
12494 | && HWI_COMPUTABLE_MODE_P (mode) |
12495 | && trunc_int_for_mode (const_op, mode) == const_op |
12496 | && have_insn_for (COMPARE, mode)) |
12497 | { |
12498 | op0 = XEXP (op0, 0); |
12499 | continue; |
12500 | } |
12501 | break; |
12502 | |
12503 | case SUBREG: |
12504 | /* Check for the case where we are comparing A - C1 with C2, that is |
12505 | |
12506 | (subreg:MODE (plus (A) (-C1))) op (C2) |
12507 | |
12508 | with C1 a constant, and try to lift the SUBREG, i.e. to do the |
12509 | comparison in the wider mode. One of the following two conditions |
12510 | must be true in order for this to be valid: |
12511 | |
12512 | 1. The mode extension results in the same bit pattern being added |
12513 | on both sides and the comparison is equality or unsigned. As |
12514 | C2 has been truncated to fit in MODE, the pattern can only be |
12515 | all 0s or all 1s. |
12516 | |
12517 | 2. The mode extension results in the sign bit being copied on |
12518 | each side. |
12519 | |
12520 | The difficulty here is that we have predicates for A but not for |
12521 | (A - C1) so we need to check that C1 is within proper bounds so |
12522 | as to perturbate A as little as possible. */ |
12523 | |
12524 | if (mode_width <= HOST_BITS_PER_WIDE_INT |
12525 | && subreg_lowpart_p (op0) |
12526 | && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)), |
12527 | result: &inner_mode) |
12528 | && GET_MODE_PRECISION (mode: inner_mode) > mode_width |
12529 | && GET_CODE (SUBREG_REG (op0)) == PLUS |
12530 | && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))) |
12531 | { |
12532 | rtx a = XEXP (SUBREG_REG (op0), 0); |
12533 | HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1)); |
12534 | |
12535 | if ((c1 > 0 |
12536 | && (unsigned HOST_WIDE_INT) c1 |
12537 | < HOST_WIDE_INT_1U << (mode_width - 1) |
12538 | && (equality_comparison_p || unsigned_comparison_p) |
12539 | /* (A - C1) zero-extends if it is positive and sign-extends |
12540 | if it is negative, C2 both zero- and sign-extends. */ |
12541 | && (((nonzero_bits (a, inner_mode) |
12542 | & ~GET_MODE_MASK (mode)) == 0 |
12543 | && const_op >= 0) |
12544 | /* (A - C1) sign-extends if it is positive and 1-extends |
12545 | if it is negative, C2 both sign- and 1-extends. */ |
12546 | || (num_sign_bit_copies (a, inner_mode) |
12547 | > (unsigned int) (GET_MODE_PRECISION (mode: inner_mode) |
12548 | - mode_width) |
12549 | && const_op < 0))) |
12550 | || ((unsigned HOST_WIDE_INT) c1 |
12551 | < HOST_WIDE_INT_1U << (mode_width - 2) |
12552 | /* (A - C1) always sign-extends, like C2. */ |
12553 | && num_sign_bit_copies (a, inner_mode) |
12554 | > (unsigned int) (GET_MODE_PRECISION (mode: inner_mode) |
12555 | - (mode_width - 1)))) |
12556 | { |
12557 | op0 = SUBREG_REG (op0); |
12558 | continue; |
12559 | } |
12560 | } |
12561 | |
12562 | /* If the inner mode is narrower and we are extracting the low part, |
12563 | we can treat the SUBREG as if it were a ZERO_EXTEND ... */ |
12564 | if (paradoxical_subreg_p (x: op0)) |
12565 | { |
12566 | if (WORD_REGISTER_OPERATIONS |
12567 | && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)), |
12568 | result: &inner_mode) |
12569 | && GET_MODE_PRECISION (mode: inner_mode) < BITS_PER_WORD |
12570 | /* On WORD_REGISTER_OPERATIONS targets the bits |
12571 | beyond sub_mode aren't considered undefined, |
12572 | so optimize only if it is a MEM load when MEM loads |
12573 | zero extend, because then the upper bits are all zero. */ |
12574 | && !(MEM_P (SUBREG_REG (op0)) |
12575 | && load_extend_op (mode: inner_mode) == ZERO_EXTEND)) |
12576 | break; |
12577 | /* FALLTHROUGH to case ZERO_EXTEND */ |
12578 | } |
12579 | else if (subreg_lowpart_p (op0) |
12580 | && GET_MODE_CLASS (mode) == MODE_INT |
12581 | && is_int_mode (GET_MODE (SUBREG_REG (op0)), int_mode: &inner_mode) |
12582 | && (code == NE || code == EQ) |
12583 | && GET_MODE_PRECISION (mode: inner_mode) <= HOST_BITS_PER_WIDE_INT |
12584 | && !paradoxical_subreg_p (x: op0) |
12585 | && (nonzero_bits (SUBREG_REG (op0), inner_mode) |
12586 | & ~GET_MODE_MASK (mode)) == 0) |
12587 | { |
12588 | /* Remove outer subregs that don't do anything. */ |
12589 | tem = gen_lowpart (inner_mode, op1); |
12590 | |
12591 | if ((nonzero_bits (tem, inner_mode) |
12592 | & ~GET_MODE_MASK (mode)) == 0) |
12593 | { |
12594 | op0 = SUBREG_REG (op0); |
12595 | op1 = tem; |
12596 | continue; |
12597 | } |
12598 | break; |
12599 | } |
12600 | else |
12601 | break; |
12602 | |
12603 | /* FALLTHROUGH */ |
12604 | |
12605 | case ZERO_EXTEND: |
12606 | if (is_int_mode (GET_MODE (XEXP (op0, 0)), int_mode: &mode) |
12607 | && (unsigned_comparison_p || equality_comparison_p) |
12608 | && HWI_COMPUTABLE_MODE_P (mode) |
12609 | && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode) |
12610 | && const_op >= 0 |
12611 | && have_insn_for (COMPARE, mode)) |
12612 | { |
12613 | op0 = XEXP (op0, 0); |
12614 | continue; |
12615 | } |
12616 | break; |
12617 | |
12618 | case PLUS: |
12619 | /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do |
12620 | this for equality comparisons due to pathological cases involving |
12621 | overflows. */ |
12622 | if (equality_comparison_p |
12623 | && (tem = simplify_binary_operation (code: MINUS, mode, |
12624 | op0: op1, XEXP (op0, 1))) != 0) |
12625 | { |
12626 | op0 = XEXP (op0, 0); |
12627 | op1 = tem; |
12628 | continue; |
12629 | } |
12630 | |
12631 | /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */ |
12632 | if (const_op == 0 && XEXP (op0, 1) == constm1_rtx |
12633 | && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p) |
12634 | { |
12635 | op0 = XEXP (XEXP (op0, 0), 0); |
12636 | code = (code == LT ? EQ : NE); |
12637 | continue; |
12638 | } |
12639 | break; |
12640 | |
12641 | case MINUS: |
12642 | /* We used to optimize signed comparisons against zero, but that |
12643 | was incorrect. Unsigned comparisons against zero (GTU, LEU) |
12644 | arrive here as equality comparisons, or (GEU, LTU) are |
12645 | optimized away. No need to special-case them. */ |
12646 | |
12647 | /* (eq (minus A B) C) -> (eq A (plus B C)) or |
12648 | (eq B (minus A C)), whichever simplifies. We can only do |
12649 | this for equality comparisons due to pathological cases involving |
12650 | overflows. */ |
12651 | if (equality_comparison_p |
12652 | && (tem = simplify_binary_operation (code: PLUS, mode, |
12653 | XEXP (op0, 1), op1)) != 0) |
12654 | { |
12655 | op0 = XEXP (op0, 0); |
12656 | op1 = tem; |
12657 | continue; |
12658 | } |
12659 | |
12660 | if (equality_comparison_p |
12661 | && (tem = simplify_binary_operation (code: MINUS, mode, |
12662 | XEXP (op0, 0), op1)) != 0) |
12663 | { |
12664 | op0 = XEXP (op0, 1); |
12665 | op1 = tem; |
12666 | continue; |
12667 | } |
12668 | |
12669 | /* The sign bit of (minus (ashiftrt X C) X), where C is the number |
12670 | of bits in X minus 1, is one iff X > 0. */ |
12671 | if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT |
12672 | && CONST_INT_P (XEXP (XEXP (op0, 0), 1)) |
12673 | && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1 |
12674 | && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1))) |
12675 | { |
12676 | op0 = XEXP (op0, 1); |
12677 | code = (code == GE ? LE : GT); |
12678 | continue; |
12679 | } |
12680 | break; |
12681 | |
12682 | case XOR: |
12683 | /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification |
12684 | if C is zero or B is a constant. */ |
12685 | if (equality_comparison_p |
12686 | && (tem = simplify_binary_operation (code: XOR, mode, |
12687 | XEXP (op0, 1), op1)) != 0) |
12688 | { |
12689 | op0 = XEXP (op0, 0); |
12690 | op1 = tem; |
12691 | continue; |
12692 | } |
12693 | break; |
12694 | |
12695 | |
12696 | case IOR: |
12697 | /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero |
12698 | iff X <= 0. */ |
12699 | if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS |
12700 | && XEXP (XEXP (op0, 0), 1) == constm1_rtx |
12701 | && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1))) |
12702 | { |
12703 | op0 = XEXP (op0, 1); |
12704 | code = (code == GE ? GT : LE); |
12705 | continue; |
12706 | } |
12707 | break; |
12708 | |
12709 | case AND: |
12710 | /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This |
12711 | will be converted to a ZERO_EXTRACT later. */ |
12712 | if (const_op == 0 && equality_comparison_p |
12713 | && GET_CODE (XEXP (op0, 0)) == ASHIFT |
12714 | && XEXP (XEXP (op0, 0), 0) == const1_rtx) |
12715 | { |
12716 | op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1), |
12717 | XEXP (XEXP (op0, 0), 1)); |
12718 | op0 = simplify_and_const_int (NULL_RTX, mode, varop: op0, constop: 1); |
12719 | continue; |
12720 | } |
12721 | |
12722 | /* If we are comparing (and (lshiftrt X C1) C2) for equality with |
12723 | zero and X is a comparison and C1 and C2 describe only bits set |
12724 | in STORE_FLAG_VALUE, we can compare with X. */ |
12725 | if (const_op == 0 && equality_comparison_p |
12726 | && mode_width <= HOST_BITS_PER_WIDE_INT |
12727 | && CONST_INT_P (XEXP (op0, 1)) |
12728 | && GET_CODE (XEXP (op0, 0)) == LSHIFTRT |
12729 | && CONST_INT_P (XEXP (XEXP (op0, 0), 1)) |
12730 | && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0 |
12731 | && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT) |
12732 | { |
12733 | mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode)) |
12734 | << INTVAL (XEXP (XEXP (op0, 0), 1))); |
12735 | if ((~STORE_FLAG_VALUE & mask) == 0 |
12736 | && (COMPARISON_P (XEXP (XEXP (op0, 0), 0)) |
12737 | || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0 |
12738 | && COMPARISON_P (tem)))) |
12739 | { |
12740 | op0 = XEXP (XEXP (op0, 0), 0); |
12741 | continue; |
12742 | } |
12743 | } |
12744 | |
12745 | /* If we are doing an equality comparison of an AND of a bit equal |
12746 | to the sign bit, replace this with a LT or GE comparison of |
12747 | the underlying value. */ |
12748 | if (equality_comparison_p |
12749 | && const_op == 0 |
12750 | && CONST_INT_P (XEXP (op0, 1)) |
12751 | && mode_width <= HOST_BITS_PER_WIDE_INT |
12752 | && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode)) |
12753 | == HOST_WIDE_INT_1U << (mode_width - 1))) |
12754 | { |
12755 | op0 = XEXP (op0, 0); |
12756 | code = (code == EQ ? GE : LT); |
12757 | continue; |
12758 | } |
12759 | |
12760 | /* If this AND operation is really a ZERO_EXTEND from a narrower |
12761 | mode, the constant fits within that mode, and this is either an |
12762 | equality or unsigned comparison, try to do this comparison in |
12763 | the narrower mode. |
12764 | |
12765 | Note that in: |
12766 | |
12767 | (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0)) |
12768 | -> (ne:DI (reg:SI 4) (const_int 0)) |
12769 | |
12770 | unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is |
12771 | known to hold a value of the required mode the |
12772 | transformation is invalid. */ |
12773 | if ((equality_comparison_p || unsigned_comparison_p) |
12774 | && CONST_INT_P (XEXP (op0, 1)) |
12775 | && (i = exact_log2 (x: (UINTVAL (XEXP (op0, 1)) |
12776 | & GET_MODE_MASK (mode)) |
12777 | + 1)) >= 0 |
12778 | && const_op >> i == 0 |
12779 | && int_mode_for_size (size: i, limit: 1).exists (mode: &tmode)) |
12780 | { |
12781 | op0 = gen_lowpart_or_truncate (mode: tmode, XEXP (op0, 0)); |
12782 | continue; |
12783 | } |
12784 | |
12785 | /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1 |
12786 | fits in both M1 and M2 and the SUBREG is either paradoxical |
12787 | or represents the low part, permute the SUBREG and the AND |
12788 | and try again. */ |
12789 | if (GET_CODE (XEXP (op0, 0)) == SUBREG |
12790 | && CONST_INT_P (XEXP (op0, 1))) |
12791 | { |
12792 | unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1)); |
12793 | /* Require an integral mode, to avoid creating something like |
12794 | (AND:SF ...). */ |
12795 | if ((is_a <scalar_int_mode> |
12796 | (GET_MODE (SUBREG_REG (XEXP (op0, 0))), result: &tmode)) |
12797 | /* It is unsafe to commute the AND into the SUBREG if the |
12798 | SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is |
12799 | not defined. As originally written the upper bits |
12800 | have a defined value due to the AND operation. |
12801 | However, if we commute the AND inside the SUBREG then |
12802 | they no longer have defined values and the meaning of |
12803 | the code has been changed. |
12804 | Also C1 should not change value in the smaller mode, |
12805 | see PR67028 (a positive C1 can become negative in the |
12806 | smaller mode, so that the AND does no longer mask the |
12807 | upper bits). */ |
12808 | && ((WORD_REGISTER_OPERATIONS |
12809 | && mode_width > GET_MODE_PRECISION (mode: tmode) |
12810 | && mode_width <= BITS_PER_WORD |
12811 | && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1) |
12812 | || (mode_width <= GET_MODE_PRECISION (mode: tmode) |
12813 | && subreg_lowpart_p (XEXP (op0, 0)))) |
12814 | && mode_width <= HOST_BITS_PER_WIDE_INT |
12815 | && HWI_COMPUTABLE_MODE_P (mode: tmode) |
12816 | && (c1 & ~mask) == 0 |
12817 | && (c1 & ~GET_MODE_MASK (tmode)) == 0 |
12818 | && c1 != mask |
12819 | && c1 != GET_MODE_MASK (tmode)) |
12820 | { |
12821 | op0 = simplify_gen_binary (code: AND, mode: tmode, |
12822 | SUBREG_REG (XEXP (op0, 0)), |
12823 | op1: gen_int_mode (c1, tmode)); |
12824 | op0 = gen_lowpart (mode, op0); |
12825 | continue; |
12826 | } |
12827 | } |
12828 | |
12829 | /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */ |
12830 | if (const_op == 0 && equality_comparison_p |
12831 | && XEXP (op0, 1) == const1_rtx |
12832 | && GET_CODE (XEXP (op0, 0)) == NOT) |
12833 | { |
12834 | op0 = simplify_and_const_int (NULL_RTX, mode, |
12835 | XEXP (XEXP (op0, 0), 0), constop: 1); |
12836 | code = (code == NE ? EQ : NE); |
12837 | continue; |
12838 | } |
12839 | |
12840 | /* Convert (ne (and (lshiftrt (not X)) 1) 0) to |
12841 | (eq (and (lshiftrt X) 1) 0). |
12842 | Also handle the case where (not X) is expressed using xor. */ |
12843 | if (const_op == 0 && equality_comparison_p |
12844 | && XEXP (op0, 1) == const1_rtx |
12845 | && GET_CODE (XEXP (op0, 0)) == LSHIFTRT) |
12846 | { |
12847 | rtx shift_op = XEXP (XEXP (op0, 0), 0); |
12848 | rtx shift_count = XEXP (XEXP (op0, 0), 1); |
12849 | |
12850 | if (GET_CODE (shift_op) == NOT |
12851 | || (GET_CODE (shift_op) == XOR |
12852 | && CONST_INT_P (XEXP (shift_op, 1)) |
12853 | && CONST_INT_P (shift_count) |
12854 | && HWI_COMPUTABLE_MODE_P (mode) |
12855 | && (UINTVAL (XEXP (shift_op, 1)) |
12856 | == HOST_WIDE_INT_1U |
12857 | << INTVAL (shift_count)))) |
12858 | { |
12859 | op0 |
12860 | = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count); |
12861 | op0 = simplify_and_const_int (NULL_RTX, mode, varop: op0, constop: 1); |
12862 | code = (code == NE ? EQ : NE); |
12863 | continue; |
12864 | } |
12865 | } |
12866 | break; |
12867 | |
12868 | case ASHIFT: |
12869 | /* If we have (compare (ashift FOO N) (const_int C)) and |
12870 | the high order N bits of FOO (N+1 if an inequality comparison) |
12871 | are known to be zero, we can do this by comparing FOO with C |
12872 | shifted right N bits so long as the low-order N bits of C are |
12873 | zero. */ |
12874 | if (CONST_INT_P (XEXP (op0, 1)) |
12875 | && INTVAL (XEXP (op0, 1)) >= 0 |
12876 | && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p) |
12877 | < HOST_BITS_PER_WIDE_INT) |
12878 | && (((unsigned HOST_WIDE_INT) const_op |
12879 | & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1))) |
12880 | - 1)) == 0) |
12881 | && mode_width <= HOST_BITS_PER_WIDE_INT |
12882 | && (nonzero_bits (XEXP (op0, 0), mode) |
12883 | & ~(mask >> (INTVAL (XEXP (op0, 1)) |
12884 | + ! equality_comparison_p))) == 0) |
12885 | { |
12886 | /* We must perform a logical shift, not an arithmetic one, |
12887 | as we want the top N bits of C to be zero. */ |
12888 | unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode); |
12889 | |
12890 | temp >>= INTVAL (XEXP (op0, 1)); |
12891 | op1 = gen_int_mode (temp, mode); |
12892 | op0 = XEXP (op0, 0); |
12893 | continue; |
12894 | } |
12895 | |
12896 | /* If we are doing a sign bit comparison, it means we are testing |
12897 | a particular bit. Convert it to the appropriate AND. */ |
12898 | if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1)) |
12899 | && mode_width <= HOST_BITS_PER_WIDE_INT) |
12900 | { |
12901 | op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), |
12902 | constop: (HOST_WIDE_INT_1U |
12903 | << (mode_width - 1 |
12904 | - INTVAL (XEXP (op0, 1))))); |
12905 | code = (code == LT ? NE : EQ); |
12906 | continue; |
12907 | } |
12908 | |
12909 | /* If this an equality comparison with zero and we are shifting |
12910 | the low bit to the sign bit, we can convert this to an AND of the |
12911 | low-order bit. */ |
12912 | if (const_op == 0 && equality_comparison_p |
12913 | && CONST_INT_P (XEXP (op0, 1)) |
12914 | && UINTVAL (XEXP (op0, 1)) == mode_width - 1) |
12915 | { |
12916 | op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), constop: 1); |
12917 | continue; |
12918 | } |
12919 | break; |
12920 | |
12921 | case ASHIFTRT: |
12922 | /* If this is an equality comparison with zero, we can do this |
12923 | as a logical shift, which might be much simpler. */ |
12924 | if (equality_comparison_p && const_op == 0 |
12925 | && CONST_INT_P (XEXP (op0, 1))) |
12926 | { |
12927 | op0 = simplify_shift_const (NULL_RTX, code: LSHIFTRT, result_mode: mode, |
12928 | XEXP (op0, 0), |
12929 | INTVAL (XEXP (op0, 1))); |
12930 | continue; |
12931 | } |
12932 | |
12933 | /* If OP0 is a sign extension and CODE is not an unsigned comparison, |
12934 | do the comparison in a narrower mode. */ |
12935 | if (! unsigned_comparison_p |
12936 | && CONST_INT_P (XEXP (op0, 1)) |
12937 | && GET_CODE (XEXP (op0, 0)) == ASHIFT |
12938 | && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1) |
12939 | && (int_mode_for_size (size: mode_width - INTVAL (XEXP (op0, 1)), limit: 1) |
12940 | .exists (mode: &tmode)) |
12941 | && (((unsigned HOST_WIDE_INT) const_op |
12942 | + (GET_MODE_MASK (tmode) >> 1) + 1) |
12943 | <= GET_MODE_MASK (tmode))) |
12944 | { |
12945 | op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0)); |
12946 | continue; |
12947 | } |
12948 | |
12949 | /* Likewise if OP0 is a PLUS of a sign extension with a |
12950 | constant, which is usually represented with the PLUS |
12951 | between the shifts. */ |
12952 | if (! unsigned_comparison_p |
12953 | && CONST_INT_P (XEXP (op0, 1)) |
12954 | && GET_CODE (XEXP (op0, 0)) == PLUS |
12955 | && CONST_INT_P (XEXP (XEXP (op0, 0), 1)) |
12956 | && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT |
12957 | && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1) |
12958 | && (int_mode_for_size (size: mode_width - INTVAL (XEXP (op0, 1)), limit: 1) |
12959 | .exists (mode: &tmode)) |
12960 | && (((unsigned HOST_WIDE_INT) const_op |
12961 | + (GET_MODE_MASK (tmode) >> 1) + 1) |
12962 | <= GET_MODE_MASK (tmode))) |
12963 | { |
12964 | rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0); |
12965 | rtx add_const = XEXP (XEXP (op0, 0), 1); |
12966 | rtx new_const = simplify_gen_binary (code: ASHIFTRT, mode, |
12967 | op0: add_const, XEXP (op0, 1)); |
12968 | |
12969 | op0 = simplify_gen_binary (code: PLUS, mode: tmode, |
12970 | gen_lowpart (tmode, inner), |
12971 | op1: new_const); |
12972 | continue; |
12973 | } |
12974 | |
12975 | /* FALLTHROUGH */ |
12976 | case LSHIFTRT: |
12977 | /* If we have (compare (xshiftrt FOO N) (const_int C)) and |
12978 | the low order N bits of FOO are known to be zero, we can do this |
12979 | by comparing FOO with C shifted left N bits so long as no |
12980 | overflow occurs. Even if the low order N bits of FOO aren't known |
12981 | to be zero, if the comparison is >= or < we can use the same |
12982 | optimization and for > or <= by setting all the low |
12983 | order N bits in the comparison constant. */ |
12984 | if (CONST_INT_P (XEXP (op0, 1)) |
12985 | && INTVAL (XEXP (op0, 1)) > 0 |
12986 | && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT |
12987 | && mode_width <= HOST_BITS_PER_WIDE_INT |
12988 | && (((unsigned HOST_WIDE_INT) const_op |
12989 | + (GET_CODE (op0) != LSHIFTRT |
12990 | ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1) |
12991 | + 1) |
12992 | : 0)) |
12993 | <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)))) |
12994 | { |
12995 | unsigned HOST_WIDE_INT low_bits |
12996 | = (nonzero_bits (XEXP (op0, 0), mode) |
12997 | & ((HOST_WIDE_INT_1U |
12998 | << INTVAL (XEXP (op0, 1))) - 1)); |
12999 | if (low_bits == 0 || !equality_comparison_p) |
13000 | { |
13001 | /* If the shift was logical, then we must make the condition |
13002 | unsigned. */ |
13003 | if (GET_CODE (op0) == LSHIFTRT) |
13004 | code = unsigned_condition (code); |
13005 | |
13006 | const_op = (unsigned HOST_WIDE_INT) const_op |
13007 | << INTVAL (XEXP (op0, 1)); |
13008 | if (low_bits != 0 |
13009 | && (code == GT || code == GTU |
13010 | || code == LE || code == LEU)) |
13011 | const_op |
13012 | |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1); |
13013 | op1 = GEN_INT (const_op); |
13014 | op0 = XEXP (op0, 0); |
13015 | continue; |
13016 | } |
13017 | } |
13018 | |
13019 | /* If we are using this shift to extract just the sign bit, we |
13020 | can replace this with an LT or GE comparison. */ |
13021 | if (const_op == 0 |
13022 | && (equality_comparison_p || sign_bit_comparison_p) |
13023 | && CONST_INT_P (XEXP (op0, 1)) |
13024 | && UINTVAL (XEXP (op0, 1)) == mode_width - 1) |
13025 | { |
13026 | op0 = XEXP (op0, 0); |
13027 | code = (code == NE || code == GT ? LT : GE); |
13028 | continue; |
13029 | } |
13030 | break; |
13031 | |
13032 | default: |
13033 | break; |
13034 | } |
13035 | |
13036 | break; |
13037 | } |
13038 | |
13039 | /* Now make any compound operations involved in this comparison. Then, |
13040 | check for an outmost SUBREG on OP0 that is not doing anything or is |
13041 | paradoxical. The latter transformation must only be performed when |
13042 | it is known that the "extra" bits will be the same in op0 and op1 or |
13043 | that they don't matter. There are three cases to consider: |
13044 | |
13045 | 1. SUBREG_REG (op0) is a register. In this case the bits are don't |
13046 | care bits and we can assume they have any convenient value. So |
13047 | making the transformation is safe. |
13048 | |
13049 | 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN. |
13050 | In this case the upper bits of op0 are undefined. We should not make |
13051 | the simplification in that case as we do not know the contents of |
13052 | those bits. |
13053 | |
13054 | 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN. |
13055 | In that case we know those bits are zeros or ones. We must also be |
13056 | sure that they are the same as the upper bits of op1. |
13057 | |
13058 | We can never remove a SUBREG for a non-equality comparison because |
13059 | the sign bit is in a different place in the underlying object. */ |
13060 | |
13061 | rtx_code op0_mco_code = SET; |
13062 | if (op1 == const0_rtx) |
13063 | op0_mco_code = code == NE || code == EQ ? EQ : COMPARE; |
13064 | |
13065 | op0 = make_compound_operation (x: op0, in_code: op0_mco_code); |
13066 | op1 = make_compound_operation (x: op1, in_code: SET); |
13067 | |
13068 | if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0) |
13069 | && is_int_mode (GET_MODE (op0), int_mode: &mode) |
13070 | && is_int_mode (GET_MODE (SUBREG_REG (op0)), int_mode: &inner_mode) |
13071 | && (code == NE || code == EQ)) |
13072 | { |
13073 | if (paradoxical_subreg_p (x: op0)) |
13074 | { |
13075 | /* For paradoxical subregs, allow case 1 as above. Case 3 isn't |
13076 | implemented. */ |
13077 | if (REG_P (SUBREG_REG (op0))) |
13078 | { |
13079 | op0 = SUBREG_REG (op0); |
13080 | op1 = gen_lowpart (inner_mode, op1); |
13081 | } |
13082 | } |
13083 | else if (GET_MODE_PRECISION (mode: inner_mode) <= HOST_BITS_PER_WIDE_INT |
13084 | && (nonzero_bits (SUBREG_REG (op0), inner_mode) |
13085 | & ~GET_MODE_MASK (mode)) == 0) |
13086 | { |
13087 | tem = gen_lowpart (inner_mode, op1); |
13088 | |
13089 | if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0) |
13090 | op0 = SUBREG_REG (op0), op1 = tem; |
13091 | } |
13092 | } |
13093 | |
13094 | /* We now do the opposite procedure: Some machines don't have compare |
13095 | insns in all modes. If OP0's mode is an integer mode smaller than a |
13096 | word and we can't do a compare in that mode, see if there is a larger |
13097 | mode for which we can do the compare. There are a number of cases in |
13098 | which we can use the wider mode. */ |
13099 | |
13100 | if (is_int_mode (GET_MODE (op0), int_mode: &mode) |
13101 | && GET_MODE_SIZE (mode) < UNITS_PER_WORD |
13102 | && ! have_insn_for (COMPARE, mode)) |
13103 | FOR_EACH_WIDER_MODE (tmode_iter, mode) |
13104 | { |
13105 | tmode = tmode_iter.require (); |
13106 | if (!HWI_COMPUTABLE_MODE_P (mode: tmode)) |
13107 | break; |
13108 | if (have_insn_for (COMPARE, tmode)) |
13109 | { |
13110 | int zero_extended; |
13111 | |
13112 | /* If this is a test for negative, we can make an explicit |
13113 | test of the sign bit. Test this first so we can use |
13114 | a paradoxical subreg to extend OP0. */ |
13115 | |
13116 | if (op1 == const0_rtx && (code == LT || code == GE) |
13117 | && HWI_COMPUTABLE_MODE_P (mode)) |
13118 | { |
13119 | unsigned HOST_WIDE_INT sign |
13120 | = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1); |
13121 | op0 = simplify_gen_binary (code: AND, mode: tmode, |
13122 | gen_lowpart (tmode, op0), |
13123 | op1: gen_int_mode (sign, tmode)); |
13124 | code = (code == LT) ? NE : EQ; |
13125 | break; |
13126 | } |
13127 | |
13128 | /* If the only nonzero bits in OP0 and OP1 are those in the |
13129 | narrower mode and this is an equality or unsigned comparison, |
13130 | we can use the wider mode. Similarly for sign-extended |
13131 | values, in which case it is true for all comparisons. */ |
13132 | zero_extended = ((code == EQ || code == NE |
13133 | || code == GEU || code == GTU |
13134 | || code == LEU || code == LTU) |
13135 | && (nonzero_bits (op0, tmode) |
13136 | & ~GET_MODE_MASK (mode)) == 0 |
13137 | && ((CONST_INT_P (op1) |
13138 | || (nonzero_bits (op1, tmode) |
13139 | & ~GET_MODE_MASK (mode)) == 0))); |
13140 | |
13141 | if (zero_extended |
13142 | || ((num_sign_bit_copies (op0, tmode) |
13143 | > (unsigned int) (GET_MODE_PRECISION (mode: tmode) |
13144 | - GET_MODE_PRECISION (mode))) |
13145 | && (num_sign_bit_copies (op1, tmode) |
13146 | > (unsigned int) (GET_MODE_PRECISION (mode: tmode) |
13147 | - GET_MODE_PRECISION (mode))))) |
13148 | { |
13149 | /* If OP0 is an AND and we don't have an AND in MODE either, |
13150 | make a new AND in the proper mode. */ |
13151 | if (GET_CODE (op0) == AND |
13152 | && !have_insn_for (AND, mode)) |
13153 | op0 = simplify_gen_binary (code: AND, mode: tmode, |
13154 | gen_lowpart (tmode, |
13155 | XEXP (op0, 0)), |
13156 | gen_lowpart (tmode, |
13157 | XEXP (op0, 1))); |
13158 | else |
13159 | { |
13160 | if (zero_extended) |
13161 | { |
13162 | op0 = simplify_gen_unary (code: ZERO_EXTEND, mode: tmode, |
13163 | op: op0, op_mode: mode); |
13164 | op1 = simplify_gen_unary (code: ZERO_EXTEND, mode: tmode, |
13165 | op: op1, op_mode: mode); |
13166 | } |
13167 | else |
13168 | { |
13169 | op0 = simplify_gen_unary (code: SIGN_EXTEND, mode: tmode, |
13170 | op: op0, op_mode: mode); |
13171 | op1 = simplify_gen_unary (code: SIGN_EXTEND, mode: tmode, |
13172 | op: op1, op_mode: mode); |
13173 | } |
13174 | break; |
13175 | } |
13176 | } |
13177 | } |
13178 | } |
13179 | |
13180 | /* We may have changed the comparison operands. Re-canonicalize. */ |
13181 | if (swap_commutative_operands_p (op0, op1)) |
13182 | { |
13183 | std::swap (a&: op0, b&: op1); |
13184 | code = swap_condition (code); |
13185 | } |
13186 | |
13187 | /* If this machine only supports a subset of valid comparisons, see if we |
13188 | can convert an unsupported one into a supported one. */ |
13189 | target_canonicalize_comparison (code: &code, op0: &op0, op1: &op1, op0_preserve_value: 0); |
13190 | |
13191 | *pop0 = op0; |
13192 | *pop1 = op1; |
13193 | |
13194 | return code; |
13195 | } |
13196 | |
13197 | /* Utility function for record_value_for_reg. Count number of |
13198 | rtxs in X. */ |
13199 | static int |
13200 | count_rtxs (rtx x) |
13201 | { |
13202 | enum rtx_code code = GET_CODE (x); |
13203 | const char *fmt; |
13204 | int i, j, ret = 1; |
13205 | |
13206 | if (GET_RTX_CLASS (code) == RTX_BIN_ARITH |
13207 | || GET_RTX_CLASS (code) == RTX_COMM_ARITH) |
13208 | { |
13209 | rtx x0 = XEXP (x, 0); |
13210 | rtx x1 = XEXP (x, 1); |
13211 | |
13212 | if (x0 == x1) |
13213 | return 1 + 2 * count_rtxs (x: x0); |
13214 | |
13215 | if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH |
13216 | || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH) |
13217 | && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1))) |
13218 | return 2 + 2 * count_rtxs (x: x0) |
13219 | + count_rtxs (x: x == XEXP (x1, 0) |
13220 | ? XEXP (x1, 1) : XEXP (x1, 0)); |
13221 | |
13222 | if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH |
13223 | || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH) |
13224 | && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1))) |
13225 | return 2 + 2 * count_rtxs (x: x1) |
13226 | + count_rtxs (x: x == XEXP (x0, 0) |
13227 | ? XEXP (x0, 1) : XEXP (x0, 0)); |
13228 | } |
13229 | |
13230 | fmt = GET_RTX_FORMAT (code); |
13231 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
13232 | if (fmt[i] == 'e') |
13233 | ret += count_rtxs (XEXP (x, i)); |
13234 | else if (fmt[i] == 'E') |
13235 | for (j = 0; j < XVECLEN (x, i); j++) |
13236 | ret += count_rtxs (XVECEXP (x, i, j)); |
13237 | |
13238 | return ret; |
13239 | } |
13240 | |
13241 | /* Utility function for following routine. Called when X is part of a value |
13242 | being stored into last_set_value. Sets last_set_table_tick |
13243 | for each register mentioned. Similar to mention_regs in cse.cc */ |
13244 | |
13245 | static void |
13246 | update_table_tick (rtx x) |
13247 | { |
13248 | enum rtx_code code = GET_CODE (x); |
13249 | const char *fmt = GET_RTX_FORMAT (code); |
13250 | int i, j; |
13251 | |
13252 | if (code == REG) |
13253 | { |
13254 | unsigned int regno = REGNO (x); |
13255 | unsigned int endregno = END_REGNO (x); |
13256 | unsigned int r; |
13257 | |
13258 | for (r = regno; r < endregno; r++) |
13259 | { |
13260 | reg_stat_type *rsp = ®_stat[r]; |
13261 | rsp->last_set_table_tick = label_tick; |
13262 | } |
13263 | |
13264 | return; |
13265 | } |
13266 | |
13267 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
13268 | if (fmt[i] == 'e') |
13269 | { |
13270 | /* Check for identical subexpressions. If x contains |
13271 | identical subexpression we only have to traverse one of |
13272 | them. */ |
13273 | if (i == 0 && ARITHMETIC_P (x)) |
13274 | { |
13275 | /* Note that at this point x1 has already been |
13276 | processed. */ |
13277 | rtx x0 = XEXP (x, 0); |
13278 | rtx x1 = XEXP (x, 1); |
13279 | |
13280 | /* If x0 and x1 are identical then there is no need to |
13281 | process x0. */ |
13282 | if (x0 == x1) |
13283 | break; |
13284 | |
13285 | /* If x0 is identical to a subexpression of x1 then while |
13286 | processing x1, x0 has already been processed. Thus we |
13287 | are done with x. */ |
13288 | if (ARITHMETIC_P (x1) |
13289 | && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1))) |
13290 | break; |
13291 | |
13292 | /* If x1 is identical to a subexpression of x0 then we |
13293 | still have to process the rest of x0. */ |
13294 | if (ARITHMETIC_P (x0) |
13295 | && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1))) |
13296 | { |
13297 | update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0)); |
13298 | break; |
13299 | } |
13300 | } |
13301 | |
13302 | update_table_tick (XEXP (x, i)); |
13303 | } |
13304 | else if (fmt[i] == 'E') |
13305 | for (j = 0; j < XVECLEN (x, i); j++) |
13306 | update_table_tick (XVECEXP (x, i, j)); |
13307 | } |
13308 | |
13309 | /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we |
13310 | are saying that the register is clobbered and we no longer know its |
13311 | value. If INSN is zero, don't update reg_stat[].last_set; this is |
13312 | only permitted with VALUE also zero and is used to invalidate the |
13313 | register. */ |
13314 | |
13315 | static void |
13316 | record_value_for_reg (rtx reg, rtx_insn *insn, rtx value) |
13317 | { |
13318 | unsigned int regno = REGNO (reg); |
13319 | unsigned int endregno = END_REGNO (x: reg); |
13320 | unsigned int i; |
13321 | reg_stat_type *rsp; |
13322 | |
13323 | /* If VALUE contains REG and we have a previous value for REG, substitute |
13324 | the previous value. */ |
13325 | if (value && insn && reg_overlap_mentioned_p (reg, value)) |
13326 | { |
13327 | rtx tem; |
13328 | |
13329 | /* Set things up so get_last_value is allowed to see anything set up to |
13330 | our insn. */ |
13331 | subst_low_luid = DF_INSN_LUID (insn); |
13332 | tem = get_last_value (reg); |
13333 | |
13334 | /* If TEM is simply a binary operation with two CLOBBERs as operands, |
13335 | it isn't going to be useful and will take a lot of time to process, |
13336 | so just use the CLOBBER. */ |
13337 | |
13338 | if (tem) |
13339 | { |
13340 | if (ARITHMETIC_P (tem) |
13341 | && GET_CODE (XEXP (tem, 0)) == CLOBBER |
13342 | && GET_CODE (XEXP (tem, 1)) == CLOBBER) |
13343 | tem = XEXP (tem, 0); |
13344 | else if (count_occurrences (value, reg, 1) >= 2) |
13345 | { |
13346 | /* If there are two or more occurrences of REG in VALUE, |
13347 | prevent the value from growing too much. */ |
13348 | if (count_rtxs (x: tem) > param_max_last_value_rtl) |
13349 | tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx); |
13350 | } |
13351 | |
13352 | value = replace_rtx (copy_rtx (value), reg, tem); |
13353 | } |
13354 | } |
13355 | |
13356 | /* For each register modified, show we don't know its value, that |
13357 | we don't know about its bitwise content, that its value has been |
13358 | updated, and that we don't know the location of the death of the |
13359 | register. */ |
13360 | for (i = regno; i < endregno; i++) |
13361 | { |
13362 | rsp = ®_stat[i]; |
13363 | |
13364 | if (insn) |
13365 | rsp->last_set = insn; |
13366 | |
13367 | rsp->last_set_value = 0; |
13368 | rsp->last_set_mode = VOIDmode; |
13369 | rsp->last_set_nonzero_bits = 0; |
13370 | rsp->last_set_sign_bit_copies = 0; |
13371 | rsp->last_death = 0; |
13372 | rsp->truncated_to_mode = VOIDmode; |
13373 | } |
13374 | |
13375 | /* Mark registers that are being referenced in this value. */ |
13376 | if (value) |
13377 | update_table_tick (x: value); |
13378 | |
13379 | /* Now update the status of each register being set. |
13380 | If someone is using this register in this block, set this register |
13381 | to invalid since we will get confused between the two lives in this |
13382 | basic block. This makes using this register always invalid. In cse, we |
13383 | scan the table to invalidate all entries using this register, but this |
13384 | is too much work for us. */ |
13385 | |
13386 | for (i = regno; i < endregno; i++) |
13387 | { |
13388 | rsp = ®_stat[i]; |
13389 | rsp->last_set_label = label_tick; |
13390 | if (!insn |
13391 | || (value && rsp->last_set_table_tick >= label_tick_ebb_start)) |
13392 | rsp->last_set_invalid = true; |
13393 | else |
13394 | rsp->last_set_invalid = false; |
13395 | } |
13396 | |
13397 | /* The value being assigned might refer to X (like in "x++;"). In that |
13398 | case, we must replace it with (clobber (const_int 0)) to prevent |
13399 | infinite loops. */ |
13400 | rsp = ®_stat[regno]; |
13401 | if (value && !get_last_value_validate (&value, insn, label_tick, false)) |
13402 | { |
13403 | value = copy_rtx (value); |
13404 | if (!get_last_value_validate (&value, insn, label_tick, true)) |
13405 | value = 0; |
13406 | } |
13407 | |
13408 | /* For the main register being modified, update the value, the mode, the |
13409 | nonzero bits, and the number of sign bit copies. */ |
13410 | |
13411 | rsp->last_set_value = value; |
13412 | |
13413 | if (value) |
13414 | { |
13415 | machine_mode mode = GET_MODE (reg); |
13416 | subst_low_luid = DF_INSN_LUID (insn); |
13417 | rsp->last_set_mode = mode; |
13418 | if (GET_MODE_CLASS (mode) == MODE_INT |
13419 | && HWI_COMPUTABLE_MODE_P (mode)) |
13420 | mode = nonzero_bits_mode; |
13421 | rsp->last_set_nonzero_bits = nonzero_bits (value, mode); |
13422 | rsp->last_set_sign_bit_copies |
13423 | = num_sign_bit_copies (value, GET_MODE (reg)); |
13424 | } |
13425 | } |
13426 | |
13427 | /* Called via note_stores from record_dead_and_set_regs to handle one |
13428 | SET or CLOBBER in an insn. DATA is the instruction in which the |
13429 | set is occurring. */ |
13430 | |
13431 | static void |
13432 | record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data) |
13433 | { |
13434 | rtx_insn *record_dead_insn = (rtx_insn *) data; |
13435 | |
13436 | if (GET_CODE (dest) == SUBREG) |
13437 | dest = SUBREG_REG (dest); |
13438 | |
13439 | if (!record_dead_insn) |
13440 | { |
13441 | if (REG_P (dest)) |
13442 | record_value_for_reg (reg: dest, NULL, NULL_RTX); |
13443 | return; |
13444 | } |
13445 | |
13446 | if (REG_P (dest)) |
13447 | { |
13448 | /* If we are setting the whole register, we know its value. */ |
13449 | if (GET_CODE (setter) == SET && dest == SET_DEST (setter)) |
13450 | record_value_for_reg (reg: dest, insn: record_dead_insn, SET_SRC (setter)); |
13451 | /* We can handle a SUBREG if it's the low part, but we must be |
13452 | careful with paradoxical SUBREGs on RISC architectures because |
13453 | we cannot strip e.g. an extension around a load and record the |
13454 | naked load since the RTL middle-end considers that the upper bits |
13455 | are defined according to LOAD_EXTEND_OP. */ |
13456 | else if (GET_CODE (setter) == SET |
13457 | && GET_CODE (SET_DEST (setter)) == SUBREG |
13458 | && SUBREG_REG (SET_DEST (setter)) == dest |
13459 | && known_le (GET_MODE_PRECISION (GET_MODE (dest)), |
13460 | BITS_PER_WORD) |
13461 | && subreg_lowpart_p (SET_DEST (setter))) |
13462 | { |
13463 | if (WORD_REGISTER_OPERATIONS |
13464 | && word_register_operation_p (SET_SRC (setter)) |
13465 | && paradoxical_subreg_p (SET_DEST (setter))) |
13466 | record_value_for_reg (reg: dest, insn: record_dead_insn, SET_SRC (setter)); |
13467 | else if (!partial_subreg_p (SET_DEST (setter))) |
13468 | record_value_for_reg (reg: dest, insn: record_dead_insn, |
13469 | gen_lowpart (GET_MODE (dest), |
13470 | SET_SRC (setter))); |
13471 | else |
13472 | { |
13473 | record_value_for_reg (reg: dest, insn: record_dead_insn, |
13474 | gen_lowpart (GET_MODE (dest), |
13475 | SET_SRC (setter))); |
13476 | |
13477 | unsigned HOST_WIDE_INT mask; |
13478 | reg_stat_type *rsp = ®_stat[REGNO (dest)]; |
13479 | mask = GET_MODE_MASK (GET_MODE (SET_DEST (setter))); |
13480 | rsp->last_set_nonzero_bits |= ~mask; |
13481 | rsp->last_set_sign_bit_copies = 1; |
13482 | } |
13483 | } |
13484 | /* Otherwise show that we don't know the value. */ |
13485 | else |
13486 | record_value_for_reg (reg: dest, insn: record_dead_insn, NULL_RTX); |
13487 | } |
13488 | else if (MEM_P (dest) |
13489 | /* Ignore pushes, they clobber nothing. */ |
13490 | && ! push_operand (dest, GET_MODE (dest))) |
13491 | mem_last_set = DF_INSN_LUID (record_dead_insn); |
13492 | } |
13493 | |
13494 | /* Update the records of when each REG was most recently set or killed |
13495 | for the things done by INSN. This is the last thing done in processing |
13496 | INSN in the combiner loop. |
13497 | |
13498 | We update reg_stat[], in particular fields last_set, last_set_value, |
13499 | last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies, |
13500 | last_death, and also the similar information mem_last_set (which insn |
13501 | most recently modified memory) and last_call_luid (which insn was the |
13502 | most recent subroutine call). */ |
13503 | |
13504 | static void |
13505 | record_dead_and_set_regs (rtx_insn *insn) |
13506 | { |
13507 | rtx link; |
13508 | unsigned int i; |
13509 | |
13510 | for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) |
13511 | { |
13512 | if (REG_NOTE_KIND (link) == REG_DEAD |
13513 | && REG_P (XEXP (link, 0))) |
13514 | { |
13515 | unsigned int regno = REGNO (XEXP (link, 0)); |
13516 | unsigned int endregno = END_REGNO (XEXP (link, 0)); |
13517 | |
13518 | for (i = regno; i < endregno; i++) |
13519 | { |
13520 | reg_stat_type *rsp; |
13521 | |
13522 | rsp = ®_stat[i]; |
13523 | rsp->last_death = insn; |
13524 | } |
13525 | } |
13526 | else if (REG_NOTE_KIND (link) == REG_INC) |
13527 | record_value_for_reg (XEXP (link, 0), insn, NULL_RTX); |
13528 | } |
13529 | |
13530 | if (CALL_P (insn)) |
13531 | { |
13532 | HARD_REG_SET callee_clobbers |
13533 | = insn_callee_abi (insn).full_and_partial_reg_clobbers (); |
13534 | hard_reg_set_iterator hrsi; |
13535 | EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, i, hrsi) |
13536 | { |
13537 | reg_stat_type *rsp; |
13538 | |
13539 | /* ??? We could try to preserve some information from the last |
13540 | set of register I if the call doesn't actually clobber |
13541 | (reg:last_set_mode I), which might be true for ABIs with |
13542 | partial clobbers. However, it would be difficult to |
13543 | update last_set_nonzero_bits and last_sign_bit_copies |
13544 | to account for the part of I that actually was clobbered. |
13545 | It wouldn't help much anyway, since we rarely see this |
13546 | situation before RA. */ |
13547 | rsp = ®_stat[i]; |
13548 | rsp->last_set_invalid = true; |
13549 | rsp->last_set = insn; |
13550 | rsp->last_set_value = 0; |
13551 | rsp->last_set_mode = VOIDmode; |
13552 | rsp->last_set_nonzero_bits = 0; |
13553 | rsp->last_set_sign_bit_copies = 0; |
13554 | rsp->last_death = 0; |
13555 | rsp->truncated_to_mode = VOIDmode; |
13556 | } |
13557 | |
13558 | last_call_luid = mem_last_set = DF_INSN_LUID (insn); |
13559 | |
13560 | /* We can't combine into a call pattern. Remember, though, that |
13561 | the return value register is set at this LUID. We could |
13562 | still replace a register with the return value from the |
13563 | wrong subroutine call! */ |
13564 | note_stores (insn, record_dead_and_set_regs_1, NULL_RTX); |
13565 | } |
13566 | else |
13567 | note_stores (insn, record_dead_and_set_regs_1, insn); |
13568 | } |
13569 | |
13570 | /* If a SUBREG has the promoted bit set, it is in fact a property of the |
13571 | register present in the SUBREG, so for each such SUBREG go back and |
13572 | adjust nonzero and sign bit information of the registers that are |
13573 | known to have some zero/sign bits set. |
13574 | |
13575 | This is needed because when combine blows the SUBREGs away, the |
13576 | information on zero/sign bits is lost and further combines can be |
13577 | missed because of that. */ |
13578 | |
13579 | static void |
13580 | record_promoted_value (rtx_insn *insn, rtx subreg) |
13581 | { |
13582 | struct insn_link *links; |
13583 | rtx set; |
13584 | unsigned int regno = REGNO (SUBREG_REG (subreg)); |
13585 | machine_mode mode = GET_MODE (subreg); |
13586 | |
13587 | if (!HWI_COMPUTABLE_MODE_P (mode)) |
13588 | return; |
13589 | |
13590 | for (links = LOG_LINKS (insn); links;) |
13591 | { |
13592 | reg_stat_type *rsp; |
13593 | |
13594 | insn = links->insn; |
13595 | set = single_set (insn); |
13596 | |
13597 | if (! set || !REG_P (SET_DEST (set)) |
13598 | || REGNO (SET_DEST (set)) != regno |
13599 | || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg))) |
13600 | { |
13601 | links = links->next; |
13602 | continue; |
13603 | } |
13604 | |
13605 | rsp = ®_stat[regno]; |
13606 | if (rsp->last_set == insn) |
13607 | { |
13608 | if (SUBREG_PROMOTED_UNSIGNED_P (subreg)) |
13609 | rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode); |
13610 | } |
13611 | |
13612 | if (REG_P (SET_SRC (set))) |
13613 | { |
13614 | regno = REGNO (SET_SRC (set)); |
13615 | links = LOG_LINKS (insn); |
13616 | } |
13617 | else |
13618 | break; |
13619 | } |
13620 | } |
13621 | |
13622 | /* Check if X, a register, is known to contain a value already |
13623 | truncated to MODE. In this case we can use a subreg to refer to |
13624 | the truncated value even though in the generic case we would need |
13625 | an explicit truncation. */ |
13626 | |
13627 | static bool |
13628 | reg_truncated_to_mode (machine_mode mode, const_rtx x) |
13629 | { |
13630 | reg_stat_type *rsp = ®_stat[REGNO (x)]; |
13631 | machine_mode truncated = rsp->truncated_to_mode; |
13632 | |
13633 | if (truncated == 0 |
13634 | || rsp->truncation_label < label_tick_ebb_start) |
13635 | return false; |
13636 | if (!partial_subreg_p (outermode: mode, innermode: truncated)) |
13637 | return true; |
13638 | if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated)) |
13639 | return true; |
13640 | return false; |
13641 | } |
13642 | |
13643 | /* If X is a hard reg or a subreg record the mode that the register is |
13644 | accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be |
13645 | able to turn a truncate into a subreg using this information. Return true |
13646 | if traversing X is complete. */ |
13647 | |
13648 | static bool |
13649 | record_truncated_value (rtx x) |
13650 | { |
13651 | machine_mode truncated_mode; |
13652 | reg_stat_type *rsp; |
13653 | |
13654 | if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x))) |
13655 | { |
13656 | machine_mode original_mode = GET_MODE (SUBREG_REG (x)); |
13657 | truncated_mode = GET_MODE (x); |
13658 | |
13659 | if (!partial_subreg_p (outermode: truncated_mode, innermode: original_mode)) |
13660 | return true; |
13661 | |
13662 | truncated_mode = GET_MODE (x); |
13663 | if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode)) |
13664 | return true; |
13665 | |
13666 | x = SUBREG_REG (x); |
13667 | } |
13668 | /* ??? For hard-regs we now record everything. We might be able to |
13669 | optimize this using last_set_mode. */ |
13670 | else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER) |
13671 | truncated_mode = GET_MODE (x); |
13672 | else |
13673 | return false; |
13674 | |
13675 | rsp = ®_stat[REGNO (x)]; |
13676 | if (rsp->truncated_to_mode == 0 |
13677 | || rsp->truncation_label < label_tick_ebb_start |
13678 | || partial_subreg_p (outermode: truncated_mode, innermode: rsp->truncated_to_mode)) |
13679 | { |
13680 | rsp->truncated_to_mode = truncated_mode; |
13681 | rsp->truncation_label = label_tick; |
13682 | } |
13683 | |
13684 | return true; |
13685 | } |
13686 | |
13687 | /* Callback for note_uses. Find hardregs and subregs of pseudos and |
13688 | the modes they are used in. This can help truning TRUNCATEs into |
13689 | SUBREGs. */ |
13690 | |
13691 | static void |
13692 | record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED) |
13693 | { |
13694 | subrtx_var_iterator::array_type array; |
13695 | FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST) |
13696 | if (record_truncated_value (x: *iter)) |
13697 | iter.skip_subrtxes (); |
13698 | } |
13699 | |
13700 | /* Scan X for promoted SUBREGs. For each one found, |
13701 | note what it implies to the registers used in it. */ |
13702 | |
13703 | static void |
13704 | check_promoted_subreg (rtx_insn *insn, rtx x) |
13705 | { |
13706 | if (GET_CODE (x) == SUBREG |
13707 | && SUBREG_PROMOTED_VAR_P (x) |
13708 | && REG_P (SUBREG_REG (x))) |
13709 | record_promoted_value (insn, subreg: x); |
13710 | else |
13711 | { |
13712 | const char *format = GET_RTX_FORMAT (GET_CODE (x)); |
13713 | int i, j; |
13714 | |
13715 | for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++) |
13716 | switch (format[i]) |
13717 | { |
13718 | case 'e': |
13719 | check_promoted_subreg (insn, XEXP (x, i)); |
13720 | break; |
13721 | case 'V': |
13722 | case 'E': |
13723 | if (XVEC (x, i) != 0) |
13724 | for (j = 0; j < XVECLEN (x, i); j++) |
13725 | check_promoted_subreg (insn, XVECEXP (x, i, j)); |
13726 | break; |
13727 | } |
13728 | } |
13729 | } |
13730 | |
13731 | /* Verify that all the registers and memory references mentioned in *LOC are |
13732 | still valid. *LOC was part of a value set in INSN when label_tick was |
13733 | equal to TICK. Return false if some are not. If REPLACE is true, replace |
13734 | the invalid references with (clobber (const_int 0)) and return true. This |
13735 | replacement is useful because we often can get useful information about |
13736 | the form of a value (e.g., if it was produced by a shift that always |
13737 | produces -1 or 0) even though we don't know exactly what registers it |
13738 | was produced from. */ |
13739 | |
13740 | static bool |
13741 | get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, bool replace) |
13742 | { |
13743 | rtx x = *loc; |
13744 | const char *fmt = GET_RTX_FORMAT (GET_CODE (x)); |
13745 | int len = GET_RTX_LENGTH (GET_CODE (x)); |
13746 | int i, j; |
13747 | |
13748 | if (REG_P (x)) |
13749 | { |
13750 | unsigned int regno = REGNO (x); |
13751 | unsigned int endregno = END_REGNO (x); |
13752 | unsigned int j; |
13753 | |
13754 | for (j = regno; j < endregno; j++) |
13755 | { |
13756 | reg_stat_type *rsp = ®_stat[j]; |
13757 | if (rsp->last_set_invalid |
13758 | /* If this is a pseudo-register that was only set once and not |
13759 | live at the beginning of the function, it is always valid. */ |
13760 | || (! (regno >= FIRST_PSEUDO_REGISTER |
13761 | && regno < reg_n_sets_max |
13762 | && REG_N_SETS (regno) == 1 |
13763 | && (!REGNO_REG_SET_P |
13764 | (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), |
13765 | regno))) |
13766 | && rsp->last_set_label > tick)) |
13767 | { |
13768 | if (replace) |
13769 | *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); |
13770 | return replace; |
13771 | } |
13772 | } |
13773 | |
13774 | return true; |
13775 | } |
13776 | /* If this is a memory reference, make sure that there were no stores after |
13777 | it that might have clobbered the value. We don't have alias info, so we |
13778 | assume any store invalidates it. Moreover, we only have local UIDs, so |
13779 | we also assume that there were stores in the intervening basic blocks. */ |
13780 | else if (MEM_P (x) && !MEM_READONLY_P (x) |
13781 | && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set)) |
13782 | { |
13783 | if (replace) |
13784 | *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx); |
13785 | return replace; |
13786 | } |
13787 | |
13788 | for (i = 0; i < len; i++) |
13789 | { |
13790 | if (fmt[i] == 'e') |
13791 | { |
13792 | /* Check for identical subexpressions. If x contains |
13793 | identical subexpression we only have to traverse one of |
13794 | them. */ |
13795 | if (i == 1 && ARITHMETIC_P (x)) |
13796 | { |
13797 | /* Note that at this point x0 has already been checked |
13798 | and found valid. */ |
13799 | rtx x0 = XEXP (x, 0); |
13800 | rtx x1 = XEXP (x, 1); |
13801 | |
13802 | /* If x0 and x1 are identical then x is also valid. */ |
13803 | if (x0 == x1) |
13804 | return true; |
13805 | |
13806 | /* If x1 is identical to a subexpression of x0 then |
13807 | while checking x0, x1 has already been checked. Thus |
13808 | it is valid and so as x. */ |
13809 | if (ARITHMETIC_P (x0) |
13810 | && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1))) |
13811 | return true; |
13812 | |
13813 | /* If x0 is identical to a subexpression of x1 then x is |
13814 | valid iff the rest of x1 is valid. */ |
13815 | if (ARITHMETIC_P (x1) |
13816 | && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1))) |
13817 | return |
13818 | get_last_value_validate (loc: &XEXP (x1, |
13819 | x0 == XEXP (x1, 0) ? 1 : 0), |
13820 | insn, tick, replace); |
13821 | } |
13822 | |
13823 | if (!get_last_value_validate (loc: &XEXP (x, i), insn, tick, replace)) |
13824 | return false; |
13825 | } |
13826 | else if (fmt[i] == 'E') |
13827 | for (j = 0; j < XVECLEN (x, i); j++) |
13828 | if (!get_last_value_validate (loc: &XVECEXP (x, i, j), |
13829 | insn, tick, replace)) |
13830 | return false; |
13831 | } |
13832 | |
13833 | /* If we haven't found a reason for it to be invalid, it is valid. */ |
13834 | return true; |
13835 | } |
13836 | |
13837 | /* Get the last value assigned to X, if known. Some registers |
13838 | in the value may be replaced with (clobber (const_int 0)) if their value |
13839 | is known longer known reliably. */ |
13840 | |
13841 | static rtx |
13842 | get_last_value (const_rtx x) |
13843 | { |
13844 | unsigned int regno; |
13845 | rtx value; |
13846 | reg_stat_type *rsp; |
13847 | |
13848 | /* If this is a non-paradoxical SUBREG, get the value of its operand and |
13849 | then convert it to the desired mode. If this is a paradoxical SUBREG, |
13850 | we cannot predict what values the "extra" bits might have. */ |
13851 | if (GET_CODE (x) == SUBREG |
13852 | && subreg_lowpart_p (x) |
13853 | && !paradoxical_subreg_p (x) |
13854 | && (value = get_last_value (SUBREG_REG (x))) != 0) |
13855 | return gen_lowpart (GET_MODE (x), value); |
13856 | |
13857 | if (!REG_P (x)) |
13858 | return 0; |
13859 | |
13860 | regno = REGNO (x); |
13861 | rsp = ®_stat[regno]; |
13862 | value = rsp->last_set_value; |
13863 | |
13864 | /* If we don't have a value, or if it isn't for this basic block and |
13865 | it's either a hard register, set more than once, or it's a live |
13866 | at the beginning of the function, return 0. |
13867 | |
13868 | Because if it's not live at the beginning of the function then the reg |
13869 | is always set before being used (is never used without being set). |
13870 | And, if it's set only once, and it's always set before use, then all |
13871 | uses must have the same last value, even if it's not from this basic |
13872 | block. */ |
13873 | |
13874 | if (value == 0 |
13875 | || (rsp->last_set_label < label_tick_ebb_start |
13876 | && (regno < FIRST_PSEUDO_REGISTER |
13877 | || regno >= reg_n_sets_max |
13878 | || REG_N_SETS (regno) != 1 |
13879 | || REGNO_REG_SET_P |
13880 | (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno)))) |
13881 | return 0; |
13882 | |
13883 | /* If the value was set in a later insn than the ones we are processing, |
13884 | we can't use it even if the register was only set once. */ |
13885 | if (rsp->last_set_label == label_tick |
13886 | && DF_INSN_LUID (rsp->last_set) >= subst_low_luid) |
13887 | return 0; |
13888 | |
13889 | /* If fewer bits were set than what we are asked for now, we cannot use |
13890 | the value. */ |
13891 | if (maybe_lt (a: GET_MODE_PRECISION (mode: rsp->last_set_mode), |
13892 | b: GET_MODE_PRECISION (GET_MODE (x)))) |
13893 | return 0; |
13894 | |
13895 | /* If the value has all its registers valid, return it. */ |
13896 | if (get_last_value_validate (loc: &value, insn: rsp->last_set, |
13897 | tick: rsp->last_set_label, replace: false)) |
13898 | return value; |
13899 | |
13900 | /* Otherwise, make a copy and replace any invalid register with |
13901 | (clobber (const_int 0)). If that fails for some reason, return 0. */ |
13902 | |
13903 | value = copy_rtx (value); |
13904 | if (get_last_value_validate (loc: &value, insn: rsp->last_set, |
13905 | tick: rsp->last_set_label, replace: true)) |
13906 | return value; |
13907 | |
13908 | return 0; |
13909 | } |
13910 | |
13911 | /* Define three variables used for communication between the following |
13912 | routines. */ |
13913 | |
13914 | static unsigned int reg_dead_regno, reg_dead_endregno; |
13915 | static int reg_dead_flag; |
13916 | rtx reg_dead_reg; |
13917 | |
13918 | /* Function called via note_stores from reg_dead_at_p. |
13919 | |
13920 | If DEST is within [reg_dead_regno, reg_dead_endregno), set |
13921 | reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */ |
13922 | |
13923 | static void |
13924 | reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED) |
13925 | { |
13926 | unsigned int regno, endregno; |
13927 | |
13928 | if (!REG_P (dest)) |
13929 | return; |
13930 | |
13931 | regno = REGNO (dest); |
13932 | endregno = END_REGNO (x: dest); |
13933 | if (reg_dead_endregno > regno && reg_dead_regno < endregno) |
13934 | reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1; |
13935 | } |
13936 | |
13937 | /* Return true if REG is known to be dead at INSN. |
13938 | |
13939 | We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER |
13940 | referencing REG, it is dead. If we hit a SET referencing REG, it is |
13941 | live. Otherwise, see if it is live or dead at the start of the basic |
13942 | block we are in. Hard regs marked as being live in NEWPAT_USED_REGS |
13943 | must be assumed to be always live. */ |
13944 | |
13945 | static bool |
13946 | reg_dead_at_p (rtx reg, rtx_insn *insn) |
13947 | { |
13948 | basic_block block; |
13949 | unsigned int i; |
13950 | |
13951 | /* Set variables for reg_dead_at_p_1. */ |
13952 | reg_dead_regno = REGNO (reg); |
13953 | reg_dead_endregno = END_REGNO (x: reg); |
13954 | reg_dead_reg = reg; |
13955 | |
13956 | reg_dead_flag = 0; |
13957 | |
13958 | /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers |
13959 | we allow the machine description to decide whether use-and-clobber |
13960 | patterns are OK. */ |
13961 | if (reg_dead_regno < FIRST_PSEUDO_REGISTER) |
13962 | { |
13963 | for (i = reg_dead_regno; i < reg_dead_endregno; i++) |
13964 | if (!fixed_regs[i] && TEST_HARD_REG_BIT (set: newpat_used_regs, bit: i)) |
13965 | return false; |
13966 | } |
13967 | |
13968 | /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or |
13969 | beginning of basic block. */ |
13970 | block = BLOCK_FOR_INSN (insn); |
13971 | for (;;) |
13972 | { |
13973 | if (INSN_P (insn)) |
13974 | { |
13975 | if (find_regno_note (insn, REG_UNUSED, reg_dead_regno)) |
13976 | return true; |
13977 | |
13978 | note_stores (insn, reg_dead_at_p_1, NULL); |
13979 | if (reg_dead_flag) |
13980 | return reg_dead_flag == 1 ? 1 : 0; |
13981 | |
13982 | if (find_regno_note (insn, REG_DEAD, reg_dead_regno)) |
13983 | return true; |
13984 | } |
13985 | |
13986 | if (insn == BB_HEAD (block)) |
13987 | break; |
13988 | |
13989 | insn = PREV_INSN (insn); |
13990 | } |
13991 | |
13992 | /* Look at live-in sets for the basic block that we were in. */ |
13993 | for (i = reg_dead_regno; i < reg_dead_endregno; i++) |
13994 | if (REGNO_REG_SET_P (df_get_live_in (block), i)) |
13995 | return false; |
13996 | |
13997 | return true; |
13998 | } |
13999 | |
14000 | /* Note hard registers in X that are used. */ |
14001 | |
14002 | static void |
14003 | mark_used_regs_combine (rtx x) |
14004 | { |
14005 | RTX_CODE code = GET_CODE (x); |
14006 | unsigned int regno; |
14007 | int i; |
14008 | |
14009 | switch (code) |
14010 | { |
14011 | case LABEL_REF: |
14012 | case SYMBOL_REF: |
14013 | case CONST: |
14014 | CASE_CONST_ANY: |
14015 | case PC: |
14016 | case ADDR_VEC: |
14017 | case ADDR_DIFF_VEC: |
14018 | case ASM_INPUT: |
14019 | return; |
14020 | |
14021 | case CLOBBER: |
14022 | /* If we are clobbering a MEM, mark any hard registers inside the |
14023 | address as used. */ |
14024 | if (MEM_P (XEXP (x, 0))) |
14025 | mark_used_regs_combine (XEXP (XEXP (x, 0), 0)); |
14026 | return; |
14027 | |
14028 | case REG: |
14029 | regno = REGNO (x); |
14030 | /* A hard reg in a wide mode may really be multiple registers. |
14031 | If so, mark all of them just like the first. */ |
14032 | if (regno < FIRST_PSEUDO_REGISTER) |
14033 | { |
14034 | /* None of this applies to the stack, frame or arg pointers. */ |
14035 | if (regno == STACK_POINTER_REGNUM |
14036 | || (!HARD_FRAME_POINTER_IS_FRAME_POINTER |
14037 | && regno == HARD_FRAME_POINTER_REGNUM) |
14038 | || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
14039 | && regno == ARG_POINTER_REGNUM && fixed_regs[regno]) |
14040 | || regno == FRAME_POINTER_REGNUM) |
14041 | return; |
14042 | |
14043 | add_to_hard_reg_set (regs: &newpat_used_regs, GET_MODE (x), regno); |
14044 | } |
14045 | return; |
14046 | |
14047 | case SET: |
14048 | { |
14049 | /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in |
14050 | the address. */ |
14051 | rtx testreg = SET_DEST (x); |
14052 | |
14053 | while (GET_CODE (testreg) == SUBREG |
14054 | || GET_CODE (testreg) == ZERO_EXTRACT |
14055 | || GET_CODE (testreg) == STRICT_LOW_PART) |
14056 | testreg = XEXP (testreg, 0); |
14057 | |
14058 | if (MEM_P (testreg)) |
14059 | mark_used_regs_combine (XEXP (testreg, 0)); |
14060 | |
14061 | mark_used_regs_combine (SET_SRC (x)); |
14062 | } |
14063 | return; |
14064 | |
14065 | default: |
14066 | break; |
14067 | } |
14068 | |
14069 | /* Recursively scan the operands of this expression. */ |
14070 | |
14071 | { |
14072 | const char *fmt = GET_RTX_FORMAT (code); |
14073 | |
14074 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
14075 | { |
14076 | if (fmt[i] == 'e') |
14077 | mark_used_regs_combine (XEXP (x, i)); |
14078 | else if (fmt[i] == 'E') |
14079 | { |
14080 | int j; |
14081 | |
14082 | for (j = 0; j < XVECLEN (x, i); j++) |
14083 | mark_used_regs_combine (XVECEXP (x, i, j)); |
14084 | } |
14085 | } |
14086 | } |
14087 | } |
14088 | |
14089 | /* Remove register number REGNO from the dead registers list of INSN. |
14090 | |
14091 | Return the note used to record the death, if there was one. */ |
14092 | |
14093 | rtx |
14094 | remove_death (unsigned int regno, rtx_insn *insn) |
14095 | { |
14096 | rtx note = find_regno_note (insn, REG_DEAD, regno); |
14097 | |
14098 | if (note) |
14099 | remove_note (insn, note); |
14100 | |
14101 | return note; |
14102 | } |
14103 | |
14104 | /* For each register (hardware or pseudo) used within expression X, if its |
14105 | death is in an instruction with luid between FROM_LUID (inclusive) and |
14106 | TO_INSN (exclusive), put a REG_DEAD note for that register in the |
14107 | list headed by PNOTES. |
14108 | |
14109 | That said, don't move registers killed by maybe_kill_insn. |
14110 | |
14111 | This is done when X is being merged by combination into TO_INSN. These |
14112 | notes will then be distributed as needed. */ |
14113 | |
14114 | static void |
14115 | move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn, |
14116 | rtx *pnotes) |
14117 | { |
14118 | const char *fmt; |
14119 | int len, i; |
14120 | enum rtx_code code = GET_CODE (x); |
14121 | |
14122 | if (code == REG) |
14123 | { |
14124 | unsigned int regno = REGNO (x); |
14125 | rtx_insn *where_dead = reg_stat[regno].last_death; |
14126 | |
14127 | /* If we do not know where the register died, it may still die between |
14128 | FROM_LUID and TO_INSN. If so, find it. This is PR83304. */ |
14129 | if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn)) |
14130 | { |
14131 | rtx_insn *insn = prev_real_nondebug_insn (to_insn); |
14132 | while (insn |
14133 | && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (insn: to_insn) |
14134 | && DF_INSN_LUID (insn) >= from_luid) |
14135 | { |
14136 | if (dead_or_set_regno_p (insn, regno)) |
14137 | { |
14138 | if (find_regno_note (insn, REG_DEAD, regno)) |
14139 | where_dead = insn; |
14140 | break; |
14141 | } |
14142 | |
14143 | insn = prev_real_nondebug_insn (insn); |
14144 | } |
14145 | } |
14146 | |
14147 | /* Don't move the register if it gets killed in between from and to. */ |
14148 | if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn) |
14149 | && ! reg_referenced_p (x, maybe_kill_insn)) |
14150 | return; |
14151 | |
14152 | if (where_dead |
14153 | && BLOCK_FOR_INSN (insn: where_dead) == BLOCK_FOR_INSN (insn: to_insn) |
14154 | && DF_INSN_LUID (where_dead) >= from_luid |
14155 | && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn)) |
14156 | { |
14157 | rtx note = remove_death (regno, insn: where_dead); |
14158 | |
14159 | /* It is possible for the call above to return 0. This can occur |
14160 | when last_death points to I2 or I1 that we combined with. |
14161 | In that case make a new note. |
14162 | |
14163 | We must also check for the case where X is a hard register |
14164 | and NOTE is a death note for a range of hard registers |
14165 | including X. In that case, we must put REG_DEAD notes for |
14166 | the remaining registers in place of NOTE. */ |
14167 | |
14168 | if (note != 0 && regno < FIRST_PSEUDO_REGISTER |
14169 | && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0)))) |
14170 | { |
14171 | unsigned int deadregno = REGNO (XEXP (note, 0)); |
14172 | unsigned int deadend = END_REGNO (XEXP (note, 0)); |
14173 | unsigned int ourend = END_REGNO (x); |
14174 | unsigned int i; |
14175 | |
14176 | for (i = deadregno; i < deadend; i++) |
14177 | if (i < regno || i >= ourend) |
14178 | add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]); |
14179 | } |
14180 | |
14181 | /* If we didn't find any note, or if we found a REG_DEAD note that |
14182 | covers only part of the given reg, and we have a multi-reg hard |
14183 | register, then to be safe we must check for REG_DEAD notes |
14184 | for each register other than the first. They could have |
14185 | their own REG_DEAD notes lying around. */ |
14186 | else if ((note == 0 |
14187 | || (note != 0 |
14188 | && partial_subreg_p (GET_MODE (XEXP (note, 0)), |
14189 | GET_MODE (x)))) |
14190 | && regno < FIRST_PSEUDO_REGISTER |
14191 | && REG_NREGS (x) > 1) |
14192 | { |
14193 | unsigned int ourend = END_REGNO (x); |
14194 | unsigned int i, offset; |
14195 | rtx oldnotes = 0; |
14196 | |
14197 | if (note) |
14198 | offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0))); |
14199 | else |
14200 | offset = 1; |
14201 | |
14202 | for (i = regno + offset; i < ourend; i++) |
14203 | move_deaths (x: regno_reg_rtx[i], |
14204 | maybe_kill_insn, from_luid, to_insn, pnotes: &oldnotes); |
14205 | } |
14206 | |
14207 | if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x)) |
14208 | { |
14209 | XEXP (note, 1) = *pnotes; |
14210 | *pnotes = note; |
14211 | } |
14212 | else |
14213 | *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes); |
14214 | } |
14215 | |
14216 | return; |
14217 | } |
14218 | |
14219 | else if (GET_CODE (x) == SET) |
14220 | { |
14221 | rtx dest = SET_DEST (x); |
14222 | |
14223 | move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes); |
14224 | |
14225 | /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG |
14226 | that accesses one word of a multi-word item, some |
14227 | piece of everything register in the expression is used by |
14228 | this insn, so remove any old death. */ |
14229 | /* ??? So why do we test for equality of the sizes? */ |
14230 | |
14231 | if (GET_CODE (dest) == ZERO_EXTRACT |
14232 | || GET_CODE (dest) == STRICT_LOW_PART |
14233 | || (GET_CODE (dest) == SUBREG |
14234 | && !read_modify_subreg_p (dest))) |
14235 | { |
14236 | move_deaths (x: dest, maybe_kill_insn, from_luid, to_insn, pnotes); |
14237 | return; |
14238 | } |
14239 | |
14240 | /* If this is some other SUBREG, we know it replaces the entire |
14241 | value, so use that as the destination. */ |
14242 | if (GET_CODE (dest) == SUBREG) |
14243 | dest = SUBREG_REG (dest); |
14244 | |
14245 | /* If this is a MEM, adjust deaths of anything used in the address. |
14246 | For a REG (the only other possibility), the entire value is |
14247 | being replaced so the old value is not used in this insn. */ |
14248 | |
14249 | if (MEM_P (dest)) |
14250 | move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid, |
14251 | to_insn, pnotes); |
14252 | return; |
14253 | } |
14254 | |
14255 | else if (GET_CODE (x) == CLOBBER) |
14256 | return; |
14257 | |
14258 | len = GET_RTX_LENGTH (code); |
14259 | fmt = GET_RTX_FORMAT (code); |
14260 | |
14261 | for (i = 0; i < len; i++) |
14262 | { |
14263 | if (fmt[i] == 'E') |
14264 | { |
14265 | int j; |
14266 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
14267 | move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid, |
14268 | to_insn, pnotes); |
14269 | } |
14270 | else if (fmt[i] == 'e') |
14271 | move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes); |
14272 | } |
14273 | } |
14274 | |
14275 | /* Return true if X is the target of a bit-field assignment in BODY, the |
14276 | pattern of an insn. X must be a REG. */ |
14277 | |
14278 | static bool |
14279 | reg_bitfield_target_p (rtx x, rtx body) |
14280 | { |
14281 | int i; |
14282 | |
14283 | if (GET_CODE (body) == SET) |
14284 | { |
14285 | rtx dest = SET_DEST (body); |
14286 | rtx target; |
14287 | unsigned int regno, tregno, endregno, endtregno; |
14288 | |
14289 | if (GET_CODE (dest) == ZERO_EXTRACT) |
14290 | target = XEXP (dest, 0); |
14291 | else if (GET_CODE (dest) == STRICT_LOW_PART) |
14292 | target = SUBREG_REG (XEXP (dest, 0)); |
14293 | else |
14294 | return false; |
14295 | |
14296 | if (GET_CODE (target) == SUBREG) |
14297 | target = SUBREG_REG (target); |
14298 | |
14299 | if (!REG_P (target)) |
14300 | return false; |
14301 | |
14302 | tregno = REGNO (target), regno = REGNO (x); |
14303 | if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER) |
14304 | return target == x; |
14305 | |
14306 | endtregno = end_hard_regno (GET_MODE (target), regno: tregno); |
14307 | endregno = end_hard_regno (GET_MODE (x), regno); |
14308 | |
14309 | return endregno > tregno && regno < endtregno; |
14310 | } |
14311 | |
14312 | else if (GET_CODE (body) == PARALLEL) |
14313 | for (i = XVECLEN (body, 0) - 1; i >= 0; i--) |
14314 | if (reg_bitfield_target_p (x, XVECEXP (body, 0, i))) |
14315 | return true; |
14316 | |
14317 | return false; |
14318 | } |
14319 | |
14320 | /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them |
14321 | as appropriate. I3 and I2 are the insns resulting from the combination |
14322 | insns including FROM (I2 may be zero). |
14323 | |
14324 | ELIM_I2 and ELIM_I1 are either zero or registers that we know will |
14325 | not need REG_DEAD notes because they are being substituted for. This |
14326 | saves searching in the most common cases. |
14327 | |
14328 | Each note in the list is either ignored or placed on some insns, depending |
14329 | on the type of note. */ |
14330 | |
14331 | static void |
14332 | distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2, |
14333 | rtx elim_i2, rtx elim_i1, rtx elim_i0) |
14334 | { |
14335 | rtx note, next_note; |
14336 | rtx tem_note; |
14337 | rtx_insn *tem_insn; |
14338 | |
14339 | for (note = notes; note; note = next_note) |
14340 | { |
14341 | rtx_insn *place = 0, *place2 = 0; |
14342 | |
14343 | next_note = XEXP (note, 1); |
14344 | switch (REG_NOTE_KIND (note)) |
14345 | { |
14346 | case REG_BR_PROB: |
14347 | case REG_BR_PRED: |
14348 | /* Doesn't matter much where we put this, as long as it's somewhere. |
14349 | It is preferable to keep these notes on branches, which is most |
14350 | likely to be i3. */ |
14351 | place = i3; |
14352 | break; |
14353 | |
14354 | case REG_NON_LOCAL_GOTO: |
14355 | if (JUMP_P (i3)) |
14356 | place = i3; |
14357 | else |
14358 | { |
14359 | gcc_assert (i2 && JUMP_P (i2)); |
14360 | place = i2; |
14361 | } |
14362 | break; |
14363 | |
14364 | case REG_EH_REGION: |
14365 | { |
14366 | /* The landing pad handling needs to be kept in sync with the |
14367 | prerequisite checking in try_combine. */ |
14368 | int lp_nr = INTVAL (XEXP (note, 0)); |
14369 | /* A REG_EH_REGION note transfering control can only ever come |
14370 | from i3. */ |
14371 | if (lp_nr > 0) |
14372 | gcc_assert (from_insn == i3); |
14373 | /* We are making sure there is a single effective REG_EH_REGION |
14374 | note and it's valid to put it on i3. */ |
14375 | if (!insn_could_throw_p (from_insn) |
14376 | && !(lp_nr == INT_MIN && can_nonlocal_goto (from_insn))) |
14377 | /* Throw away stray notes on insns that can never throw or |
14378 | make a nonlocal goto. */ |
14379 | ; |
14380 | else |
14381 | { |
14382 | if (CALL_P (i3)) |
14383 | place = i3; |
14384 | else |
14385 | { |
14386 | gcc_assert (cfun->can_throw_non_call_exceptions); |
14387 | /* If i3 can still trap preserve the note, otherwise we've |
14388 | combined things such that we can now prove that the |
14389 | instructions can't trap. Drop the note in this case. */ |
14390 | if (may_trap_p (i3)) |
14391 | place = i3; |
14392 | } |
14393 | } |
14394 | break; |
14395 | } |
14396 | |
14397 | case REG_ARGS_SIZE: |
14398 | /* ??? How to distribute between i3-i1. Assume i3 contains the |
14399 | entire adjustment. Assert i3 contains at least some adjust. */ |
14400 | if (!noop_move_p (i3)) |
14401 | { |
14402 | poly_int64 old_size, args_size = get_args_size (note); |
14403 | /* fixup_args_size_notes looks at REG_NORETURN note, |
14404 | so ensure the note is placed there first. */ |
14405 | if (CALL_P (i3)) |
14406 | { |
14407 | rtx *np; |
14408 | for (np = &next_note; *np; np = &XEXP (*np, 1)) |
14409 | if (REG_NOTE_KIND (*np) == REG_NORETURN) |
14410 | { |
14411 | rtx n = *np; |
14412 | *np = XEXP (n, 1); |
14413 | XEXP (n, 1) = REG_NOTES (i3); |
14414 | REG_NOTES (i3) = n; |
14415 | break; |
14416 | } |
14417 | } |
14418 | old_size = fixup_args_size_notes (PREV_INSN (insn: i3), i3, args_size); |
14419 | /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS |
14420 | REG_ARGS_SIZE note to all noreturn calls, allow that here. */ |
14421 | gcc_assert (maybe_ne (old_size, args_size) |
14422 | || (CALL_P (i3) |
14423 | && !ACCUMULATE_OUTGOING_ARGS |
14424 | && find_reg_note (i3, REG_NORETURN, NULL_RTX))); |
14425 | } |
14426 | break; |
14427 | |
14428 | case REG_NORETURN: |
14429 | case REG_SETJMP: |
14430 | case REG_TM: |
14431 | case REG_CALL_DECL: |
14432 | case REG_UNTYPED_CALL: |
14433 | case REG_CALL_NOCF_CHECK: |
14434 | /* These notes must remain with the call. It should not be |
14435 | possible for both I2 and I3 to be a call. */ |
14436 | if (CALL_P (i3)) |
14437 | place = i3; |
14438 | else |
14439 | { |
14440 | gcc_assert (i2 && CALL_P (i2)); |
14441 | place = i2; |
14442 | } |
14443 | break; |
14444 | |
14445 | case REG_UNUSED: |
14446 | /* Any clobbers for i3 may still exist, and so we must process |
14447 | REG_UNUSED notes from that insn. |
14448 | |
14449 | Any clobbers from i2 or i1 can only exist if they were added by |
14450 | recog_for_combine. In that case, recog_for_combine created the |
14451 | necessary REG_UNUSED notes. Trying to keep any original |
14452 | REG_UNUSED notes from these insns can cause incorrect output |
14453 | if it is for the same register as the original i3 dest. |
14454 | In that case, we will notice that the register is set in i3, |
14455 | and then add a REG_UNUSED note for the destination of i3, which |
14456 | is wrong. However, it is possible to have REG_UNUSED notes from |
14457 | i2 or i1 for register which were both used and clobbered, so |
14458 | we keep notes from i2 or i1 if they will turn into REG_DEAD |
14459 | notes. */ |
14460 | |
14461 | /* If this register is set or clobbered between FROM_INSN and I3, |
14462 | we should not create a note for it. */ |
14463 | if (reg_set_between_p (XEXP (note, 0), from_insn, i3)) |
14464 | break; |
14465 | |
14466 | /* If this register is set or clobbered in I3, put the note there |
14467 | unless there is one already. */ |
14468 | if (reg_set_p (XEXP (note, 0), PATTERN (insn: i3))) |
14469 | { |
14470 | if (from_insn != i3) |
14471 | break; |
14472 | |
14473 | if (! (REG_P (XEXP (note, 0)) |
14474 | ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0))) |
14475 | : find_reg_note (i3, REG_UNUSED, XEXP (note, 0)))) |
14476 | place = i3; |
14477 | } |
14478 | /* Otherwise, if this register is used by I3, then this register |
14479 | now dies here, so we must put a REG_DEAD note here unless there |
14480 | is one already. */ |
14481 | else if (reg_referenced_p (XEXP (note, 0), PATTERN (insn: i3)) |
14482 | && ! (REG_P (XEXP (note, 0)) |
14483 | ? find_regno_note (i3, REG_DEAD, |
14484 | REGNO (XEXP (note, 0))) |
14485 | : find_reg_note (i3, REG_DEAD, XEXP (note, 0)))) |
14486 | { |
14487 | PUT_REG_NOTE_KIND (note, REG_DEAD); |
14488 | place = i3; |
14489 | } |
14490 | |
14491 | /* A SET or CLOBBER of the REG_UNUSED reg has been removed, |
14492 | but we can't tell which at this point. We must reset any |
14493 | expectations we had about the value that was previously |
14494 | stored in the reg. ??? Ideally, we'd adjust REG_N_SETS |
14495 | and, if appropriate, restore its previous value, but we |
14496 | don't have enough information for that at this point. */ |
14497 | else |
14498 | { |
14499 | record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX); |
14500 | |
14501 | /* Otherwise, if this register is now referenced in i2 |
14502 | then the register used to be modified in one of the |
14503 | original insns. If it was i3 (say, in an unused |
14504 | parallel), it's now completely gone, so the note can |
14505 | be discarded. But if it was modified in i2, i1 or i0 |
14506 | and we still reference it in i2, then we're |
14507 | referencing the previous value, and since the |
14508 | register was modified and REG_UNUSED, we know that |
14509 | the previous value is now dead. So, if we only |
14510 | reference the register in i2, we change the note to |
14511 | REG_DEAD, to reflect the previous value. However, if |
14512 | we're also setting or clobbering the register as |
14513 | scratch, we know (because the register was not |
14514 | referenced in i3) that it's unused, just as it was |
14515 | unused before, and we place the note in i2. */ |
14516 | if (from_insn != i3 && i2 && INSN_P (i2) |
14517 | && reg_referenced_p (XEXP (note, 0), PATTERN (insn: i2))) |
14518 | { |
14519 | if (!reg_set_p (XEXP (note, 0), PATTERN (insn: i2))) |
14520 | PUT_REG_NOTE_KIND (note, REG_DEAD); |
14521 | if (! (REG_P (XEXP (note, 0)) |
14522 | ? find_regno_note (i2, REG_NOTE_KIND (note), |
14523 | REGNO (XEXP (note, 0))) |
14524 | : find_reg_note (i2, REG_NOTE_KIND (note), |
14525 | XEXP (note, 0)))) |
14526 | place = i2; |
14527 | } |
14528 | } |
14529 | |
14530 | break; |
14531 | |
14532 | case REG_EQUAL: |
14533 | case REG_EQUIV: |
14534 | case REG_NOALIAS: |
14535 | /* These notes say something about results of an insn. We can |
14536 | only support them if they used to be on I3 in which case they |
14537 | remain on I3. Otherwise they are ignored. |
14538 | |
14539 | If the note refers to an expression that is not a constant, we |
14540 | must also ignore the note since we cannot tell whether the |
14541 | equivalence is still true. It might be possible to do |
14542 | slightly better than this (we only have a problem if I2DEST |
14543 | or I1DEST is present in the expression), but it doesn't |
14544 | seem worth the trouble. */ |
14545 | |
14546 | if (from_insn == i3 |
14547 | && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0)))) |
14548 | place = i3; |
14549 | break; |
14550 | |
14551 | case REG_INC: |
14552 | /* These notes say something about how a register is used. They must |
14553 | be present on any use of the register in I2 or I3. */ |
14554 | if (reg_mentioned_p (XEXP (note, 0), PATTERN (insn: i3))) |
14555 | place = i3; |
14556 | |
14557 | if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (insn: i2))) |
14558 | { |
14559 | if (place) |
14560 | place2 = i2; |
14561 | else |
14562 | place = i2; |
14563 | } |
14564 | break; |
14565 | |
14566 | case REG_LABEL_TARGET: |
14567 | case REG_LABEL_OPERAND: |
14568 | /* This can show up in several ways -- either directly in the |
14569 | pattern, or hidden off in the constant pool with (or without?) |
14570 | a REG_EQUAL note. */ |
14571 | /* ??? Ignore the without-reg_equal-note problem for now. */ |
14572 | if (reg_mentioned_p (XEXP (note, 0), PATTERN (insn: i3)) |
14573 | || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX)) |
14574 | && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF |
14575 | && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))) |
14576 | place = i3; |
14577 | |
14578 | if (i2 |
14579 | && (reg_mentioned_p (XEXP (note, 0), PATTERN (insn: i2)) |
14580 | || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX)) |
14581 | && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF |
14582 | && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))) |
14583 | { |
14584 | if (place) |
14585 | place2 = i2; |
14586 | else |
14587 | place = i2; |
14588 | } |
14589 | |
14590 | /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note |
14591 | as a JUMP_LABEL or decrement LABEL_NUSES if it's already |
14592 | there. */ |
14593 | if (place && JUMP_P (place) |
14594 | && REG_NOTE_KIND (note) == REG_LABEL_TARGET |
14595 | && (JUMP_LABEL (place) == NULL |
14596 | || JUMP_LABEL (place) == XEXP (note, 0))) |
14597 | { |
14598 | rtx label = JUMP_LABEL (place); |
14599 | |
14600 | if (!label) |
14601 | JUMP_LABEL (place) = XEXP (note, 0); |
14602 | else if (LABEL_P (label)) |
14603 | LABEL_NUSES (label)--; |
14604 | } |
14605 | |
14606 | if (place2 && JUMP_P (place2) |
14607 | && REG_NOTE_KIND (note) == REG_LABEL_TARGET |
14608 | && (JUMP_LABEL (place2) == NULL |
14609 | || JUMP_LABEL (place2) == XEXP (note, 0))) |
14610 | { |
14611 | rtx label = JUMP_LABEL (place2); |
14612 | |
14613 | if (!label) |
14614 | JUMP_LABEL (place2) = XEXP (note, 0); |
14615 | else if (LABEL_P (label)) |
14616 | LABEL_NUSES (label)--; |
14617 | place2 = 0; |
14618 | } |
14619 | break; |
14620 | |
14621 | case REG_NONNEG: |
14622 | /* This note says something about the value of a register prior |
14623 | to the execution of an insn. It is too much trouble to see |
14624 | if the note is still correct in all situations. It is better |
14625 | to simply delete it. */ |
14626 | break; |
14627 | |
14628 | case REG_DEAD: |
14629 | /* If we replaced the right hand side of FROM_INSN with a |
14630 | REG_EQUAL note, the original use of the dying register |
14631 | will not have been combined into I3 and I2. In such cases, |
14632 | FROM_INSN is guaranteed to be the first of the combined |
14633 | instructions, so we simply need to search back before |
14634 | FROM_INSN for the previous use or set of this register, |
14635 | then alter the notes there appropriately. |
14636 | |
14637 | If the register is used as an input in I3, it dies there. |
14638 | Similarly for I2, if it is nonzero and adjacent to I3. |
14639 | |
14640 | If the register is not used as an input in either I3 or I2 |
14641 | and it is not one of the registers we were supposed to eliminate, |
14642 | there are two possibilities. We might have a non-adjacent I2 |
14643 | or we might have somehow eliminated an additional register |
14644 | from a computation. For example, we might have had A & B where |
14645 | we discover that B will always be zero. In this case we will |
14646 | eliminate the reference to A. |
14647 | |
14648 | In both cases, we must search to see if we can find a previous |
14649 | use of A and put the death note there. */ |
14650 | |
14651 | if (from_insn |
14652 | && from_insn == i2mod |
14653 | && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs)) |
14654 | tem_insn = from_insn; |
14655 | else |
14656 | { |
14657 | if (from_insn |
14658 | && CALL_P (from_insn) |
14659 | && find_reg_fusage (from_insn, USE, XEXP (note, 0))) |
14660 | place = from_insn; |
14661 | else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (insn: i2))) |
14662 | { |
14663 | /* If the new I2 sets the same register that is marked |
14664 | dead in the note, we do not in general know where to |
14665 | put the note. One important case we _can_ handle is |
14666 | when the note comes from I3. */ |
14667 | if (from_insn == i3) |
14668 | place = i3; |
14669 | else |
14670 | break; |
14671 | } |
14672 | else if (reg_referenced_p (XEXP (note, 0), PATTERN (insn: i3))) |
14673 | place = i3; |
14674 | else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3 |
14675 | && reg_referenced_p (XEXP (note, 0), PATTERN (insn: i2))) |
14676 | place = i2; |
14677 | else if ((rtx_equal_p (XEXP (note, 0), elim_i2) |
14678 | && !(i2mod |
14679 | && reg_overlap_mentioned_p (XEXP (note, 0), |
14680 | i2mod_old_rhs))) |
14681 | || rtx_equal_p (XEXP (note, 0), elim_i1) |
14682 | || rtx_equal_p (XEXP (note, 0), elim_i0)) |
14683 | break; |
14684 | tem_insn = i3; |
14685 | } |
14686 | |
14687 | if (place == 0) |
14688 | { |
14689 | basic_block bb = this_basic_block; |
14690 | |
14691 | for (tem_insn = PREV_INSN (insn: tem_insn); place == 0; tem_insn = PREV_INSN (insn: tem_insn)) |
14692 | { |
14693 | if (!NONDEBUG_INSN_P (tem_insn)) |
14694 | { |
14695 | if (tem_insn == BB_HEAD (bb)) |
14696 | break; |
14697 | continue; |
14698 | } |
14699 | |
14700 | /* If the register is being set at TEM_INSN, see if that is all |
14701 | TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this |
14702 | into a REG_UNUSED note instead. Don't delete sets to |
14703 | global register vars. */ |
14704 | if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER |
14705 | || !global_regs[REGNO (XEXP (note, 0))]) |
14706 | && reg_set_p (XEXP (note, 0), PATTERN (insn: tem_insn))) |
14707 | { |
14708 | rtx set = single_set (insn: tem_insn); |
14709 | rtx inner_dest = 0; |
14710 | |
14711 | if (set != 0) |
14712 | for (inner_dest = SET_DEST (set); |
14713 | (GET_CODE (inner_dest) == STRICT_LOW_PART |
14714 | || GET_CODE (inner_dest) == SUBREG |
14715 | || GET_CODE (inner_dest) == ZERO_EXTRACT); |
14716 | inner_dest = XEXP (inner_dest, 0)) |
14717 | ; |
14718 | |
14719 | /* Verify that it was the set, and not a clobber that |
14720 | modified the register. |
14721 | |
14722 | If we cannot delete the setter due to side |
14723 | effects, mark the user with an UNUSED note instead |
14724 | of deleting it. */ |
14725 | |
14726 | if (set != 0 && ! side_effects_p (SET_SRC (set)) |
14727 | && rtx_equal_p (XEXP (note, 0), inner_dest)) |
14728 | { |
14729 | /* Move the notes and links of TEM_INSN elsewhere. |
14730 | This might delete other dead insns recursively. |
14731 | First set the pattern to something that won't use |
14732 | any register. */ |
14733 | rtx old_notes = REG_NOTES (tem_insn); |
14734 | |
14735 | PATTERN (insn: tem_insn) = pc_rtx; |
14736 | REG_NOTES (tem_insn) = NULL; |
14737 | |
14738 | distribute_notes (notes: old_notes, from_insn: tem_insn, i3: tem_insn, NULL, |
14739 | NULL_RTX, NULL_RTX, NULL_RTX); |
14740 | distribute_links (LOG_LINKS (tem_insn)); |
14741 | |
14742 | unsigned int regno = REGNO (XEXP (note, 0)); |
14743 | reg_stat_type *rsp = ®_stat[regno]; |
14744 | if (rsp->last_set == tem_insn) |
14745 | record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX); |
14746 | |
14747 | SET_INSN_DELETED (tem_insn); |
14748 | if (tem_insn == i2) |
14749 | i2 = NULL; |
14750 | } |
14751 | else |
14752 | { |
14753 | PUT_REG_NOTE_KIND (note, REG_UNUSED); |
14754 | |
14755 | /* If there isn't already a REG_UNUSED note, put one |
14756 | here. Do not place a REG_DEAD note, even if |
14757 | the register is also used here; that would not |
14758 | match the algorithm used in lifetime analysis |
14759 | and can cause the consistency check in the |
14760 | scheduler to fail. */ |
14761 | if (! find_regno_note (tem_insn, REG_UNUSED, |
14762 | REGNO (XEXP (note, 0)))) |
14763 | place = tem_insn; |
14764 | break; |
14765 | } |
14766 | } |
14767 | else if (reg_referenced_p (XEXP (note, 0), PATTERN (insn: tem_insn)) |
14768 | || (CALL_P (tem_insn) |
14769 | && find_reg_fusage (tem_insn, USE, XEXP (note, 0)))) |
14770 | { |
14771 | place = tem_insn; |
14772 | |
14773 | /* If we are doing a 3->2 combination, and we have a |
14774 | register which formerly died in i3 and was not used |
14775 | by i2, which now no longer dies in i3 and is used in |
14776 | i2 but does not die in i2, and place is between i2 |
14777 | and i3, then we may need to move a link from place to |
14778 | i2. */ |
14779 | if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2) |
14780 | && from_insn |
14781 | && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2) |
14782 | && reg_referenced_p (XEXP (note, 0), PATTERN (insn: i2))) |
14783 | { |
14784 | struct insn_link *links = LOG_LINKS (place); |
14785 | LOG_LINKS (place) = NULL; |
14786 | distribute_links (links); |
14787 | } |
14788 | break; |
14789 | } |
14790 | |
14791 | if (tem_insn == BB_HEAD (bb)) |
14792 | break; |
14793 | } |
14794 | |
14795 | } |
14796 | |
14797 | /* If the register is set or already dead at PLACE, we needn't do |
14798 | anything with this note if it is still a REG_DEAD note. |
14799 | We check here if it is set at all, not if is it totally replaced, |
14800 | which is what `dead_or_set_p' checks, so also check for it being |
14801 | set partially. */ |
14802 | |
14803 | if (place && REG_NOTE_KIND (note) == REG_DEAD) |
14804 | { |
14805 | unsigned int regno = REGNO (XEXP (note, 0)); |
14806 | reg_stat_type *rsp = ®_stat[regno]; |
14807 | |
14808 | if (dead_or_set_p (place, XEXP (note, 0)) |
14809 | || reg_bitfield_target_p (XEXP (note, 0), body: PATTERN (insn: place))) |
14810 | { |
14811 | /* Unless the register previously died in PLACE, clear |
14812 | last_death. [I no longer understand why this is |
14813 | being done.] */ |
14814 | if (rsp->last_death != place) |
14815 | rsp->last_death = 0; |
14816 | place = 0; |
14817 | } |
14818 | else |
14819 | rsp->last_death = place; |
14820 | |
14821 | /* If this is a death note for a hard reg that is occupying |
14822 | multiple registers, ensure that we are still using all |
14823 | parts of the object. If we find a piece of the object |
14824 | that is unused, we must arrange for an appropriate REG_DEAD |
14825 | note to be added for it. However, we can't just emit a USE |
14826 | and tag the note to it, since the register might actually |
14827 | be dead; so we recourse, and the recursive call then finds |
14828 | the previous insn that used this register. */ |
14829 | |
14830 | if (place && REG_NREGS (XEXP (note, 0)) > 1) |
14831 | { |
14832 | unsigned int endregno = END_REGNO (XEXP (note, 0)); |
14833 | bool all_used = true; |
14834 | unsigned int i; |
14835 | |
14836 | for (i = regno; i < endregno; i++) |
14837 | if ((! refers_to_regno_p (regnum: i, x: PATTERN (insn: place)) |
14838 | && ! find_regno_fusage (place, USE, i)) |
14839 | || dead_or_set_regno_p (place, i)) |
14840 | { |
14841 | all_used = false; |
14842 | break; |
14843 | } |
14844 | |
14845 | if (! all_used) |
14846 | { |
14847 | /* Put only REG_DEAD notes for pieces that are |
14848 | not already dead or set. */ |
14849 | |
14850 | for (i = regno; i < endregno; |
14851 | i += hard_regno_nregs (regno: i, reg_raw_mode[i])) |
14852 | { |
14853 | rtx piece = regno_reg_rtx[i]; |
14854 | basic_block bb = this_basic_block; |
14855 | |
14856 | if (! dead_or_set_p (place, piece) |
14857 | && ! reg_bitfield_target_p (x: piece, |
14858 | body: PATTERN (insn: place))) |
14859 | { |
14860 | rtx new_note = alloc_reg_note (REG_DEAD, piece, |
14861 | NULL_RTX); |
14862 | |
14863 | distribute_notes (notes: new_note, from_insn: place, i3: place, |
14864 | NULL, NULL_RTX, NULL_RTX, |
14865 | NULL_RTX); |
14866 | } |
14867 | else if (! refers_to_regno_p (regnum: i, x: PATTERN (insn: place)) |
14868 | && ! find_regno_fusage (place, USE, i)) |
14869 | for (tem_insn = PREV_INSN (insn: place); ; |
14870 | tem_insn = PREV_INSN (insn: tem_insn)) |
14871 | { |
14872 | if (!NONDEBUG_INSN_P (tem_insn)) |
14873 | { |
14874 | if (tem_insn == BB_HEAD (bb)) |
14875 | break; |
14876 | continue; |
14877 | } |
14878 | if (dead_or_set_p (tem_insn, piece) |
14879 | || reg_bitfield_target_p (x: piece, |
14880 | body: PATTERN (insn: tem_insn))) |
14881 | { |
14882 | add_reg_note (tem_insn, REG_UNUSED, piece); |
14883 | break; |
14884 | } |
14885 | } |
14886 | } |
14887 | |
14888 | place = 0; |
14889 | } |
14890 | } |
14891 | } |
14892 | break; |
14893 | |
14894 | default: |
14895 | /* Any other notes should not be present at this point in the |
14896 | compilation. */ |
14897 | gcc_unreachable (); |
14898 | } |
14899 | |
14900 | if (place) |
14901 | { |
14902 | XEXP (note, 1) = REG_NOTES (place); |
14903 | REG_NOTES (place) = note; |
14904 | |
14905 | /* Set added_notes_insn to the earliest insn we added a note to. */ |
14906 | if (added_notes_insn == 0 |
14907 | || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place)) |
14908 | added_notes_insn = place; |
14909 | } |
14910 | |
14911 | if (place2) |
14912 | { |
14913 | add_shallow_copy_of_reg_note (place2, note); |
14914 | |
14915 | /* Set added_notes_insn to the earliest insn we added a note to. */ |
14916 | if (added_notes_insn == 0 |
14917 | || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2)) |
14918 | added_notes_insn = place2; |
14919 | } |
14920 | } |
14921 | } |
14922 | |
14923 | /* Similarly to above, distribute the LOG_LINKS that used to be present on |
14924 | I3, I2, and I1 to new locations. This is also called to add a link |
14925 | pointing at I3 when I3's destination is changed. */ |
14926 | |
14927 | static void |
14928 | distribute_links (struct insn_link *links) |
14929 | { |
14930 | struct insn_link *link, *next_link; |
14931 | |
14932 | for (link = links; link; link = next_link) |
14933 | { |
14934 | rtx_insn *place = 0; |
14935 | rtx_insn *insn; |
14936 | rtx set, reg; |
14937 | |
14938 | next_link = link->next; |
14939 | |
14940 | /* If the insn that this link points to is a NOTE, ignore it. */ |
14941 | if (NOTE_P (link->insn)) |
14942 | continue; |
14943 | |
14944 | set = 0; |
14945 | rtx pat = PATTERN (insn: link->insn); |
14946 | if (GET_CODE (pat) == SET) |
14947 | set = pat; |
14948 | else if (GET_CODE (pat) == PARALLEL) |
14949 | { |
14950 | int i; |
14951 | for (i = 0; i < XVECLEN (pat, 0); i++) |
14952 | { |
14953 | set = XVECEXP (pat, 0, i); |
14954 | if (GET_CODE (set) != SET) |
14955 | continue; |
14956 | |
14957 | reg = SET_DEST (set); |
14958 | while (GET_CODE (reg) == ZERO_EXTRACT |
14959 | || GET_CODE (reg) == STRICT_LOW_PART |
14960 | || GET_CODE (reg) == SUBREG) |
14961 | reg = XEXP (reg, 0); |
14962 | |
14963 | if (!REG_P (reg)) |
14964 | continue; |
14965 | |
14966 | if (REGNO (reg) == link->regno) |
14967 | break; |
14968 | } |
14969 | if (i == XVECLEN (pat, 0)) |
14970 | continue; |
14971 | } |
14972 | else |
14973 | continue; |
14974 | |
14975 | reg = SET_DEST (set); |
14976 | |
14977 | while (GET_CODE (reg) == ZERO_EXTRACT |
14978 | || GET_CODE (reg) == STRICT_LOW_PART |
14979 | || GET_CODE (reg) == SUBREG) |
14980 | reg = XEXP (reg, 0); |
14981 | |
14982 | if (reg == pc_rtx) |
14983 | continue; |
14984 | |
14985 | /* A LOG_LINK is defined as being placed on the first insn that uses |
14986 | a register and points to the insn that sets the register. Start |
14987 | searching at the next insn after the target of the link and stop |
14988 | when we reach a set of the register or the end of the basic block. |
14989 | |
14990 | Note that this correctly handles the link that used to point from |
14991 | I3 to I2. Also note that not much searching is typically done here |
14992 | since most links don't point very far away. */ |
14993 | |
14994 | for (insn = NEXT_INSN (insn: link->insn); |
14995 | (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
14996 | || BB_HEAD (this_basic_block->next_bb) != insn)); |
14997 | insn = NEXT_INSN (insn)) |
14998 | if (DEBUG_INSN_P (insn)) |
14999 | continue; |
15000 | else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn))) |
15001 | { |
15002 | if (reg_referenced_p (reg, PATTERN (insn))) |
15003 | place = insn; |
15004 | break; |
15005 | } |
15006 | else if (CALL_P (insn) |
15007 | && find_reg_fusage (insn, USE, reg)) |
15008 | { |
15009 | place = insn; |
15010 | break; |
15011 | } |
15012 | else if (INSN_P (insn) && reg_set_p (reg, insn)) |
15013 | break; |
15014 | |
15015 | /* If we found a place to put the link, place it there unless there |
15016 | is already a link to the same insn as LINK at that point. */ |
15017 | |
15018 | if (place) |
15019 | { |
15020 | struct insn_link *link2; |
15021 | |
15022 | FOR_EACH_LOG_LINK (link2, place) |
15023 | if (link2->insn == link->insn && link2->regno == link->regno) |
15024 | break; |
15025 | |
15026 | if (link2 == NULL) |
15027 | { |
15028 | link->next = LOG_LINKS (place); |
15029 | LOG_LINKS (place) = link; |
15030 | |
15031 | /* Set added_links_insn to the earliest insn we added a |
15032 | link to. */ |
15033 | if (added_links_insn == 0 |
15034 | || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place)) |
15035 | added_links_insn = place; |
15036 | } |
15037 | } |
15038 | } |
15039 | } |
15040 | |
15041 | /* Check for any register or memory mentioned in EQUIV that is not |
15042 | mentioned in EXPR. This is used to restrict EQUIV to "specializations" |
15043 | of EXPR where some registers may have been replaced by constants. */ |
15044 | |
15045 | static bool |
15046 | unmentioned_reg_p (rtx equiv, rtx expr) |
15047 | { |
15048 | subrtx_iterator::array_type array; |
15049 | FOR_EACH_SUBRTX (iter, array, equiv, NONCONST) |
15050 | { |
15051 | const_rtx x = *iter; |
15052 | if ((REG_P (x) || MEM_P (x)) |
15053 | && !reg_mentioned_p (x, expr)) |
15054 | return true; |
15055 | } |
15056 | return false; |
15057 | } |
15058 | |
15059 | /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because |
15060 | the reg-to-reg copy can usefully combine with later instructions, but we |
15061 | do not want to combine the hard reg into later instructions, for that |
15062 | restricts register allocation. */ |
15063 | static void |
15064 | make_more_copies (void) |
15065 | { |
15066 | basic_block bb; |
15067 | |
15068 | FOR_EACH_BB_FN (bb, cfun) |
15069 | { |
15070 | rtx_insn *insn; |
15071 | |
15072 | FOR_BB_INSNS (bb, insn) |
15073 | { |
15074 | if (!NONDEBUG_INSN_P (insn)) |
15075 | continue; |
15076 | |
15077 | rtx set = single_set (insn); |
15078 | if (!set) |
15079 | continue; |
15080 | |
15081 | rtx dest = SET_DEST (set); |
15082 | if (!(REG_P (dest) && !HARD_REGISTER_P (dest))) |
15083 | continue; |
15084 | |
15085 | rtx src = SET_SRC (set); |
15086 | if (!(REG_P (src) && HARD_REGISTER_P (src))) |
15087 | continue; |
15088 | if (TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))) |
15089 | continue; |
15090 | |
15091 | rtx new_reg = gen_reg_rtx (GET_MODE (dest)); |
15092 | rtx_insn *new_insn = gen_move_insn (new_reg, src); |
15093 | SET_SRC (set) = new_reg; |
15094 | emit_insn_before (new_insn, insn); |
15095 | df_insn_rescan (insn); |
15096 | } |
15097 | } |
15098 | } |
15099 | |
15100 | /* Try combining insns through substitution. */ |
15101 | static void |
15102 | rest_of_handle_combine (void) |
15103 | { |
15104 | make_more_copies (); |
15105 | |
15106 | df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN); |
15107 | df_note_add_problem (); |
15108 | df_analyze (); |
15109 | |
15110 | regstat_init_n_sets_and_refs (); |
15111 | reg_n_sets_max = max_reg_num (); |
15112 | |
15113 | bool rebuild_jump_labels_after_combine |
15114 | = combine_instructions (f: get_insns (), nregs: max_reg_num ()); |
15115 | |
15116 | /* Combining insns may have turned an indirect jump into a |
15117 | direct jump. Rebuild the JUMP_LABEL fields of jumping |
15118 | instructions. */ |
15119 | if (rebuild_jump_labels_after_combine) |
15120 | { |
15121 | if (dom_info_available_p (CDI_DOMINATORS)) |
15122 | free_dominance_info (CDI_DOMINATORS); |
15123 | timevar_push (tv: TV_JUMP); |
15124 | rebuild_jump_labels (get_insns ()); |
15125 | cleanup_cfg (0); |
15126 | timevar_pop (tv: TV_JUMP); |
15127 | } |
15128 | |
15129 | regstat_free_n_sets_and_refs (); |
15130 | } |
15131 | |
15132 | namespace { |
15133 | |
15134 | const pass_data pass_data_combine = |
15135 | { |
15136 | .type: RTL_PASS, /* type */ |
15137 | .name: "combine" , /* name */ |
15138 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
15139 | .tv_id: TV_COMBINE, /* tv_id */ |
15140 | PROP_cfglayout, /* properties_required */ |
15141 | .properties_provided: 0, /* properties_provided */ |
15142 | .properties_destroyed: 0, /* properties_destroyed */ |
15143 | .todo_flags_start: 0, /* todo_flags_start */ |
15144 | TODO_df_finish, /* todo_flags_finish */ |
15145 | }; |
15146 | |
15147 | class pass_combine : public rtl_opt_pass |
15148 | { |
15149 | public: |
15150 | pass_combine (gcc::context *ctxt) |
15151 | : rtl_opt_pass (pass_data_combine, ctxt) |
15152 | {} |
15153 | |
15154 | /* opt_pass methods: */ |
15155 | bool gate (function *) final override { return (optimize > 0); } |
15156 | unsigned int execute (function *) final override |
15157 | { |
15158 | rest_of_handle_combine (); |
15159 | return 0; |
15160 | } |
15161 | |
15162 | }; // class pass_combine |
15163 | |
15164 | } // anon namespace |
15165 | |
15166 | rtl_opt_pass * |
15167 | make_pass_combine (gcc::context *ctxt) |
15168 | { |
15169 | return new pass_combine (ctxt); |
15170 | } |
15171 | |