1 | /* Code for RTL transformations to satisfy insn constraints. |
2 | Copyright (C) 2010-2024 Free Software Foundation, Inc. |
3 | Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | |
22 | /* This file contains code for 3 passes: constraint pass, |
23 | inheritance/split pass, and pass for undoing failed inheritance and |
24 | split. |
25 | |
26 | The major goal of constraint pass is to transform RTL to satisfy |
27 | insn and address constraints by: |
28 | o choosing insn alternatives; |
29 | o generating *reload insns* (or reloads in brief) and *reload |
30 | pseudos* which will get necessary hard registers later; |
31 | o substituting pseudos with equivalent values and removing the |
32 | instructions that initialized those pseudos. |
33 | |
34 | The constraint pass has biggest and most complicated code in LRA. |
35 | There are a lot of important details like: |
36 | o reuse of input reload pseudos to simplify reload pseudo |
37 | allocations; |
38 | o some heuristics to choose insn alternative to improve the |
39 | inheritance; |
40 | o early clobbers etc. |
41 | |
42 | The pass is mimicking former reload pass in alternative choosing |
43 | because the reload pass is oriented to current machine description |
44 | model. It might be changed if the machine description model is |
45 | changed. |
46 | |
47 | There is special code for preventing all LRA and this pass cycling |
48 | in case of bugs. |
49 | |
50 | On the first iteration of the pass we process every instruction and |
51 | choose an alternative for each one. On subsequent iterations we try |
52 | to avoid reprocessing instructions if we can be sure that the old |
53 | choice is still valid. |
54 | |
55 | The inheritance/spilt pass is to transform code to achieve |
56 | ineheritance and live range splitting. It is done on backward |
57 | traversal of EBBs. |
58 | |
59 | The inheritance optimization goal is to reuse values in hard |
60 | registers. There is analogous optimization in old reload pass. The |
61 | inheritance is achieved by following transformation: |
62 | |
63 | reload_p1 <- p reload_p1 <- p |
64 | ... new_p <- reload_p1 |
65 | ... => ... |
66 | reload_p2 <- p reload_p2 <- new_p |
67 | |
68 | where p is spilled and not changed between the insns. Reload_p1 is |
69 | also called *original pseudo* and new_p is called *inheritance |
70 | pseudo*. |
71 | |
72 | The subsequent assignment pass will try to assign the same (or |
73 | another if it is not possible) hard register to new_p as to |
74 | reload_p1 or reload_p2. |
75 | |
76 | If the assignment pass fails to assign a hard register to new_p, |
77 | this file will undo the inheritance and restore the original code. |
78 | This is because implementing the above sequence with a spilled |
79 | new_p would make the code much worse. The inheritance is done in |
80 | EBB scope. The above is just a simplified example to get an idea |
81 | of the inheritance as the inheritance is also done for non-reload |
82 | insns. |
83 | |
84 | Splitting (transformation) is also done in EBB scope on the same |
85 | pass as the inheritance: |
86 | |
87 | r <- ... or ... <- r r <- ... or ... <- r |
88 | ... s <- r (new insn -- save) |
89 | ... => |
90 | ... r <- s (new insn -- restore) |
91 | ... <- r ... <- r |
92 | |
93 | The *split pseudo* s is assigned to the hard register of the |
94 | original pseudo or hard register r. |
95 | |
96 | Splitting is done: |
97 | o In EBBs with high register pressure for global pseudos (living |
98 | in at least 2 BBs) and assigned to hard registers when there |
99 | are more one reloads needing the hard registers; |
100 | o for pseudos needing save/restore code around calls. |
101 | |
102 | If the split pseudo still has the same hard register as the |
103 | original pseudo after the subsequent assignment pass or the |
104 | original pseudo was split, the opposite transformation is done on |
105 | the same pass for undoing inheritance. */ |
106 | |
107 | #undef REG_OK_STRICT |
108 | |
109 | #include "config.h" |
110 | #include "system.h" |
111 | #include "coretypes.h" |
112 | #include "backend.h" |
113 | #include "hooks.h" |
114 | #include "target.h" |
115 | #include "rtl.h" |
116 | #include "tree.h" |
117 | #include "predict.h" |
118 | #include "df.h" |
119 | #include "memmodel.h" |
120 | #include "tm_p.h" |
121 | #include "expmed.h" |
122 | #include "optabs.h" |
123 | #include "regs.h" |
124 | #include "ira.h" |
125 | #include "recog.h" |
126 | #include "output.h" |
127 | #include "addresses.h" |
128 | #include "expr.h" |
129 | #include "cfgrtl.h" |
130 | #include "rtl-error.h" |
131 | #include "lra.h" |
132 | #include "lra-int.h" |
133 | #include "print-rtl.h" |
134 | #include "function-abi.h" |
135 | #include "rtl-iter.h" |
136 | |
137 | /* Value of LRA_CURR_RELOAD_NUM at the beginning of BB of the current |
138 | insn. Remember that LRA_CURR_RELOAD_NUM is the number of emitted |
139 | reload insns. */ |
140 | static int bb_reload_num; |
141 | |
142 | /* The current insn being processed and corresponding its single set |
143 | (NULL otherwise), its data (basic block, the insn data, the insn |
144 | static data, and the mode of each operand). */ |
145 | static rtx_insn *curr_insn; |
146 | static rtx curr_insn_set; |
147 | static basic_block curr_bb; |
148 | static lra_insn_recog_data_t curr_id; |
149 | static struct lra_static_insn_data *curr_static_id; |
150 | static machine_mode curr_operand_mode[MAX_RECOG_OPERANDS]; |
151 | /* Mode of the register substituted by its equivalence with VOIDmode |
152 | (e.g. constant) and whose subreg is given operand of the current |
153 | insn. VOIDmode in all other cases. */ |
154 | static machine_mode original_subreg_reg_mode[MAX_RECOG_OPERANDS]; |
155 | |
156 | |
157 | |
158 | /* Start numbers for new registers and insns at the current constraints |
159 | pass start. */ |
160 | static int new_regno_start; |
161 | static int new_insn_uid_start; |
162 | |
163 | /* If LOC is nonnull, strip any outer subreg from it. */ |
164 | static inline rtx * |
165 | strip_subreg (rtx *loc) |
166 | { |
167 | return loc && GET_CODE (*loc) == SUBREG ? &SUBREG_REG (*loc) : loc; |
168 | } |
169 | |
170 | /* Return hard regno of REGNO or if it is was not assigned to a hard |
171 | register, use a hard register from its allocno class. */ |
172 | static int |
173 | get_try_hard_regno (int regno) |
174 | { |
175 | int hard_regno; |
176 | enum reg_class rclass; |
177 | |
178 | if ((hard_regno = regno) >= FIRST_PSEUDO_REGISTER) |
179 | hard_regno = lra_get_regno_hard_regno (regno); |
180 | if (hard_regno >= 0) |
181 | return hard_regno; |
182 | rclass = lra_get_allocno_class (regno); |
183 | if (rclass == NO_REGS) |
184 | return -1; |
185 | return ira_class_hard_regs[rclass][0]; |
186 | } |
187 | |
188 | /* Return the hard regno of X after removing its subreg. If X is not a |
189 | register or a subreg of a register, return -1. If X is a pseudo, use its |
190 | assignment. If X is a hard regno, return the final hard regno which will be |
191 | after elimination. */ |
192 | static int |
193 | get_hard_regno (rtx x) |
194 | { |
195 | rtx reg; |
196 | int hard_regno; |
197 | |
198 | reg = x; |
199 | if (SUBREG_P (x)) |
200 | reg = SUBREG_REG (x); |
201 | if (! REG_P (reg)) |
202 | return -1; |
203 | if (! HARD_REGISTER_NUM_P (hard_regno = REGNO (reg))) |
204 | hard_regno = lra_get_regno_hard_regno (regno: hard_regno); |
205 | if (hard_regno < 0) |
206 | return -1; |
207 | if (HARD_REGISTER_NUM_P (REGNO (reg))) |
208 | hard_regno = lra_get_elimination_hard_regno (hard_regno); |
209 | if (SUBREG_P (x)) |
210 | hard_regno += subreg_regno_offset (hard_regno, GET_MODE (reg), |
211 | SUBREG_BYTE (x), GET_MODE (x)); |
212 | return hard_regno; |
213 | } |
214 | |
215 | /* If REGNO is a hard register or has been allocated a hard register, |
216 | return the class of that register. If REGNO is a reload pseudo |
217 | created by the current constraints pass, return its allocno class. |
218 | Return NO_REGS otherwise. */ |
219 | static enum reg_class |
220 | get_reg_class (int regno) |
221 | { |
222 | int hard_regno; |
223 | |
224 | if (! HARD_REGISTER_NUM_P (hard_regno = regno)) |
225 | hard_regno = lra_get_regno_hard_regno (regno); |
226 | if (hard_regno >= 0) |
227 | { |
228 | hard_regno = lra_get_elimination_hard_regno (hard_regno); |
229 | return REGNO_REG_CLASS (hard_regno); |
230 | } |
231 | if (regno >= new_regno_start) |
232 | return lra_get_allocno_class (regno); |
233 | return NO_REGS; |
234 | } |
235 | |
236 | /* Return true if REG_CLASS has enough allocatable hard regs to keep value of |
237 | REG_MODE. */ |
238 | static bool |
239 | enough_allocatable_hard_regs_p (enum reg_class reg_class, |
240 | enum machine_mode reg_mode) |
241 | { |
242 | int i, j, hard_regno, class_size, nregs; |
243 | |
244 | if (hard_reg_set_subset_p (reg_class_contents[reg_class], y: lra_no_alloc_regs)) |
245 | return false; |
246 | class_size = ira_class_hard_regs_num[reg_class]; |
247 | for (i = 0; i < class_size; i++) |
248 | { |
249 | hard_regno = ira_class_hard_regs[reg_class][i]; |
250 | nregs = hard_regno_nregs (regno: hard_regno, mode: reg_mode); |
251 | if (nregs == 1) |
252 | return true; |
253 | for (j = 0; j < nregs; j++) |
254 | if (TEST_HARD_REG_BIT (set: lra_no_alloc_regs, bit: hard_regno + j) |
255 | || ! TEST_HARD_REG_BIT (reg_class_contents[reg_class], |
256 | bit: hard_regno + j)) |
257 | break; |
258 | if (j >= nregs) |
259 | return true; |
260 | } |
261 | return false; |
262 | } |
263 | |
264 | /* True if C is a non-empty register class that has too few registers |
265 | to be safely used as a reload target class. */ |
266 | #define SMALL_REGISTER_CLASS_P(C) \ |
267 | (ira_class_hard_regs_num [(C)] == 1 \ |
268 | || (ira_class_hard_regs_num [(C)] >= 1 \ |
269 | && targetm.class_likely_spilled_p (C))) |
270 | |
271 | /* Return true if REG satisfies (or will satisfy) reg class constraint |
272 | CL. Use elimination first if REG is a hard register. If REG is a |
273 | reload pseudo created by this constraints pass, assume that it will |
274 | be allocated a hard register from its allocno class, but allow that |
275 | class to be narrowed to CL if it is currently a superset of CL and |
276 | if either: |
277 | |
278 | - ALLOW_ALL_RELOAD_CLASS_CHANGES_P is true or |
279 | - the instruction we're processing is not a reload move. |
280 | |
281 | If NEW_CLASS is nonnull, set *NEW_CLASS to the new allocno class of |
282 | REGNO (reg), or NO_REGS if no change in its class was needed. */ |
283 | static bool |
284 | in_class_p (rtx reg, enum reg_class cl, enum reg_class *new_class, |
285 | bool allow_all_reload_class_changes_p = false) |
286 | { |
287 | enum reg_class rclass, common_class; |
288 | machine_mode reg_mode; |
289 | rtx src; |
290 | int regno = REGNO (reg); |
291 | |
292 | if (new_class != NULL) |
293 | *new_class = NO_REGS; |
294 | if (regno < FIRST_PSEUDO_REGISTER) |
295 | { |
296 | rtx final_reg = reg; |
297 | rtx *final_loc = &final_reg; |
298 | |
299 | lra_eliminate_reg_if_possible (final_loc); |
300 | return TEST_HARD_REG_BIT (reg_class_contents[cl], REGNO (*final_loc)); |
301 | } |
302 | reg_mode = GET_MODE (reg); |
303 | rclass = get_reg_class (regno); |
304 | src = curr_insn_set != NULL ? SET_SRC (curr_insn_set) : NULL; |
305 | if (regno < new_regno_start |
306 | /* Do not allow the constraints for reload instructions to |
307 | influence the classes of new pseudos. These reloads are |
308 | typically moves that have many alternatives, and restricting |
309 | reload pseudos for one alternative may lead to situations |
310 | where other reload pseudos are no longer allocatable. */ |
311 | || (!allow_all_reload_class_changes_p |
312 | && INSN_UID (insn: curr_insn) >= new_insn_uid_start |
313 | && src != NULL |
314 | && ((REG_P (src) || MEM_P (src)) |
315 | || (GET_CODE (src) == SUBREG |
316 | && (REG_P (SUBREG_REG (src)) || MEM_P (SUBREG_REG (src))))))) |
317 | /* When we don't know what class will be used finally for reload |
318 | pseudos, we use ALL_REGS. */ |
319 | return ((regno >= new_regno_start && rclass == ALL_REGS) |
320 | || (rclass != NO_REGS && ira_class_subset_p[rclass][cl] |
321 | && ! hard_reg_set_subset_p (reg_class_contents[cl], |
322 | y: lra_no_alloc_regs))); |
323 | else |
324 | { |
325 | common_class = ira_reg_class_subset[rclass][cl]; |
326 | if (new_class != NULL) |
327 | *new_class = common_class; |
328 | return (enough_allocatable_hard_regs_p (reg_class: common_class, reg_mode) |
329 | /* Do not permit reload insn operand matching (new_class == NULL |
330 | case) if the new class is too small. */ |
331 | && (new_class != NULL || common_class == rclass |
332 | || !SMALL_REGISTER_CLASS_P (common_class))); |
333 | } |
334 | } |
335 | |
336 | /* Return true if REGNO satisfies a memory constraint. */ |
337 | static bool |
338 | in_mem_p (int regno) |
339 | { |
340 | return get_reg_class (regno) == NO_REGS; |
341 | } |
342 | |
343 | /* Return true if ADDR is a valid memory address for mode MODE in address |
344 | space AS, and check that each pseudo has the proper kind of hard |
345 | reg. */ |
346 | static bool |
347 | valid_address_p (machine_mode mode ATTRIBUTE_UNUSED, |
348 | rtx addr, addr_space_t as) |
349 | { |
350 | #ifdef GO_IF_LEGITIMATE_ADDRESS |
351 | lra_assert (ADDR_SPACE_GENERIC_P (as)); |
352 | GO_IF_LEGITIMATE_ADDRESS (mode, addr, win); |
353 | return false; |
354 | |
355 | win: |
356 | return true; |
357 | #else |
358 | return targetm.addr_space.legitimate_address_p (mode, addr, 0, as, |
359 | ERROR_MARK); |
360 | #endif |
361 | } |
362 | |
363 | namespace { |
364 | /* Temporarily eliminates registers in an address (for the lifetime of |
365 | the object). */ |
366 | class address_eliminator { |
367 | public: |
368 | address_eliminator (struct address_info *ad); |
369 | ~address_eliminator (); |
370 | |
371 | private: |
372 | struct address_info *m_ad; |
373 | rtx *m_base_loc; |
374 | rtx m_base_reg; |
375 | rtx *m_index_loc; |
376 | rtx m_index_reg; |
377 | }; |
378 | } |
379 | |
380 | address_eliminator::address_eliminator (struct address_info *ad) |
381 | : m_ad (ad), |
382 | m_base_loc (strip_subreg (loc: ad->base_term)), |
383 | m_base_reg (NULL_RTX), |
384 | m_index_loc (strip_subreg (loc: ad->index_term)), |
385 | m_index_reg (NULL_RTX) |
386 | { |
387 | if (m_base_loc != NULL) |
388 | { |
389 | m_base_reg = *m_base_loc; |
390 | /* If we have non-legitimate address which is decomposed not in |
391 | the way we expected, don't do elimination here. In such case |
392 | the address will be reloaded and elimination will be done in |
393 | reload insn finally. */ |
394 | if (REG_P (m_base_reg)) |
395 | lra_eliminate_reg_if_possible (m_base_loc); |
396 | if (m_ad->base_term2 != NULL) |
397 | *m_ad->base_term2 = *m_ad->base_term; |
398 | } |
399 | if (m_index_loc != NULL) |
400 | { |
401 | m_index_reg = *m_index_loc; |
402 | if (REG_P (m_index_reg)) |
403 | lra_eliminate_reg_if_possible (m_index_loc); |
404 | } |
405 | } |
406 | |
407 | address_eliminator::~address_eliminator () |
408 | { |
409 | if (m_base_loc && *m_base_loc != m_base_reg) |
410 | { |
411 | *m_base_loc = m_base_reg; |
412 | if (m_ad->base_term2 != NULL) |
413 | *m_ad->base_term2 = *m_ad->base_term; |
414 | } |
415 | if (m_index_loc && *m_index_loc != m_index_reg) |
416 | *m_index_loc = m_index_reg; |
417 | } |
418 | |
419 | /* Return true if the eliminated form of AD is a legitimate target address. |
420 | If OP is a MEM, AD is the address within OP, otherwise OP should be |
421 | ignored. CONSTRAINT is one constraint that the operand may need |
422 | to meet. */ |
423 | static bool |
424 | valid_address_p (rtx op, struct address_info *ad, |
425 | enum constraint_num constraint) |
426 | { |
427 | address_eliminator eliminator (ad); |
428 | |
429 | /* Allow a memory OP if it matches CONSTRAINT, even if CONSTRAINT is more |
430 | forgiving than "m". |
431 | Need to extract memory from op for special memory constraint, |
432 | i.e. bcst_mem_operand in i386 backend. */ |
433 | if (MEM_P (extract_mem_from_operand (op)) |
434 | && insn_extra_relaxed_memory_constraint (constraint) |
435 | && constraint_satisfied_p (x: op, c: constraint)) |
436 | return true; |
437 | |
438 | return valid_address_p (mode: ad->mode, addr: *ad->outer, as: ad->as); |
439 | } |
440 | |
441 | /* For special_memory_operand, it could be false for MEM_P (op), |
442 | i.e. bcst_mem_operand in i386 backend. |
443 | Extract and return real memory operand or op. */ |
444 | rtx |
445 | extract_mem_from_operand (rtx op) |
446 | { |
447 | for (rtx x = op;; x = XEXP (x, 0)) |
448 | { |
449 | if (MEM_P (x)) |
450 | return x; |
451 | if (GET_RTX_LENGTH (GET_CODE (x)) != 1 |
452 | || GET_RTX_FORMAT (GET_CODE (x))[0] != 'e') |
453 | break; |
454 | } |
455 | return op; |
456 | } |
457 | |
458 | /* Return true if the eliminated form of memory reference OP satisfies |
459 | extra (special) memory constraint CONSTRAINT. */ |
460 | static bool |
461 | satisfies_memory_constraint_p (rtx op, enum constraint_num constraint) |
462 | { |
463 | struct address_info ad; |
464 | rtx mem = extract_mem_from_operand (op); |
465 | if (!MEM_P (mem)) |
466 | return false; |
467 | |
468 | decompose_mem_address (&ad, mem); |
469 | address_eliminator eliminator (&ad); |
470 | return constraint_satisfied_p (x: op, c: constraint); |
471 | } |
472 | |
473 | /* Return true if the eliminated form of address AD satisfies extra |
474 | address constraint CONSTRAINT. */ |
475 | static bool |
476 | satisfies_address_constraint_p (struct address_info *ad, |
477 | enum constraint_num constraint) |
478 | { |
479 | address_eliminator eliminator (ad); |
480 | return constraint_satisfied_p (x: *ad->outer, c: constraint); |
481 | } |
482 | |
483 | /* Return true if the eliminated form of address OP satisfies extra |
484 | address constraint CONSTRAINT. */ |
485 | static bool |
486 | satisfies_address_constraint_p (rtx op, enum constraint_num constraint) |
487 | { |
488 | struct address_info ad; |
489 | |
490 | decompose_lea_address (&ad, &op); |
491 | return satisfies_address_constraint_p (ad: &ad, constraint); |
492 | } |
493 | |
494 | /* Initiate equivalences for LRA. As we keep original equivalences |
495 | before any elimination, we need to make copies otherwise any change |
496 | in insns might change the equivalences. */ |
497 | void |
498 | lra_init_equiv (void) |
499 | { |
500 | ira_expand_reg_equiv (); |
501 | for (int i = FIRST_PSEUDO_REGISTER; i < max_reg_num (); i++) |
502 | { |
503 | rtx res; |
504 | |
505 | if ((res = ira_reg_equiv[i].memory) != NULL_RTX) |
506 | ira_reg_equiv[i].memory = copy_rtx (res); |
507 | if ((res = ira_reg_equiv[i].invariant) != NULL_RTX) |
508 | ira_reg_equiv[i].invariant = copy_rtx (res); |
509 | } |
510 | } |
511 | |
512 | static rtx loc_equivalence_callback (rtx, const_rtx, void *); |
513 | |
514 | /* Update equivalence for REGNO. We need to this as the equivalence |
515 | might contain other pseudos which are changed by their |
516 | equivalences. */ |
517 | static void |
518 | update_equiv (int regno) |
519 | { |
520 | rtx x; |
521 | |
522 | if ((x = ira_reg_equiv[regno].memory) != NULL_RTX) |
523 | ira_reg_equiv[regno].memory |
524 | = simplify_replace_fn_rtx (x, NULL_RTX, fn: loc_equivalence_callback, |
525 | NULL_RTX); |
526 | if ((x = ira_reg_equiv[regno].invariant) != NULL_RTX) |
527 | ira_reg_equiv[regno].invariant |
528 | = simplify_replace_fn_rtx (x, NULL_RTX, fn: loc_equivalence_callback, |
529 | NULL_RTX); |
530 | } |
531 | |
532 | /* If we have decided to substitute X with another value, return that |
533 | value, otherwise return X. */ |
534 | static rtx |
535 | get_equiv (rtx x) |
536 | { |
537 | int regno; |
538 | rtx res; |
539 | |
540 | if (! REG_P (x) || (regno = REGNO (x)) < FIRST_PSEUDO_REGISTER |
541 | || ! ira_reg_equiv[regno].defined_p |
542 | || ! ira_reg_equiv[regno].profitable_p |
543 | || lra_get_regno_hard_regno (regno) >= 0) |
544 | return x; |
545 | if ((res = ira_reg_equiv[regno].memory) != NULL_RTX) |
546 | { |
547 | if (targetm.cannot_substitute_mem_equiv_p (res)) |
548 | return x; |
549 | return res; |
550 | } |
551 | if ((res = ira_reg_equiv[regno].constant) != NULL_RTX) |
552 | return res; |
553 | if ((res = ira_reg_equiv[regno].invariant) != NULL_RTX) |
554 | return res; |
555 | gcc_unreachable (); |
556 | } |
557 | |
558 | /* If we have decided to substitute X with the equivalent value, |
559 | return that value after elimination for INSN, otherwise return |
560 | X. */ |
561 | static rtx |
562 | get_equiv_with_elimination (rtx x, rtx_insn *insn) |
563 | { |
564 | rtx res = get_equiv (x); |
565 | |
566 | if (x == res || CONSTANT_P (res)) |
567 | return res; |
568 | return lra_eliminate_regs_1 (insn, res, GET_MODE (res), |
569 | false, false, 0, true); |
570 | } |
571 | |
572 | /* Set up curr_operand_mode. */ |
573 | static void |
574 | init_curr_operand_mode (void) |
575 | { |
576 | int nop = curr_static_id->n_operands; |
577 | for (int i = 0; i < nop; i++) |
578 | { |
579 | machine_mode mode = GET_MODE (*curr_id->operand_loc[i]); |
580 | if (mode == VOIDmode) |
581 | { |
582 | /* The .md mode for address operands is the mode of the |
583 | addressed value rather than the mode of the address itself. */ |
584 | if (curr_id->icode >= 0 && curr_static_id->operand[i].is_address) |
585 | mode = Pmode; |
586 | else |
587 | mode = curr_static_id->operand[i].mode; |
588 | } |
589 | curr_operand_mode[i] = mode; |
590 | } |
591 | } |
592 | |
593 | |
594 | |
595 | /* The page contains code to reuse input reloads. */ |
596 | |
597 | /* Structure describes input reload of the current insns. */ |
598 | struct input_reload |
599 | { |
600 | /* True for input reload of matched operands. */ |
601 | bool match_p; |
602 | /* Reloaded value. */ |
603 | rtx input; |
604 | /* Reload pseudo used. */ |
605 | rtx reg; |
606 | }; |
607 | |
608 | /* The number of elements in the following array. */ |
609 | static int curr_insn_input_reloads_num; |
610 | /* Array containing info about input reloads. It is used to find the |
611 | same input reload and reuse the reload pseudo in this case. */ |
612 | static struct input_reload curr_insn_input_reloads[LRA_MAX_INSN_RELOADS]; |
613 | |
614 | /* Initiate data concerning reuse of input reloads for the current |
615 | insn. */ |
616 | static void |
617 | init_curr_insn_input_reloads (void) |
618 | { |
619 | curr_insn_input_reloads_num = 0; |
620 | } |
621 | |
622 | /* The canonical form of an rtx inside a MEM is not necessarily the same as the |
623 | canonical form of the rtx outside the MEM. Fix this up in the case that |
624 | we're reloading an address (and therefore pulling it outside a MEM). */ |
625 | static rtx |
626 | canonicalize_reload_addr (rtx addr) |
627 | { |
628 | subrtx_var_iterator::array_type array; |
629 | FOR_EACH_SUBRTX_VAR (iter, array, addr, NONCONST) |
630 | { |
631 | rtx x = *iter; |
632 | if (GET_CODE (x) == MULT && CONST_INT_P (XEXP (x, 1))) |
633 | { |
634 | const HOST_WIDE_INT ci = INTVAL (XEXP (x, 1)); |
635 | const int pwr2 = exact_log2 (x: ci); |
636 | if (pwr2 > 0) |
637 | { |
638 | /* Rewrite this to use a shift instead, which is canonical when |
639 | outside of a MEM. */ |
640 | PUT_CODE (x, ASHIFT); |
641 | XEXP (x, 1) = GEN_INT (pwr2); |
642 | } |
643 | } |
644 | } |
645 | |
646 | return addr; |
647 | } |
648 | |
649 | /* Create a new pseudo using MODE, RCLASS, EXCLUDE_START_HARD_REGS, ORIGINAL or |
650 | reuse an existing reload pseudo. Don't reuse an existing reload pseudo if |
651 | IN_SUBREG_P is true and the reused pseudo should be wrapped up in a SUBREG. |
652 | The result pseudo is returned through RESULT_REG. Return TRUE if we created |
653 | a new pseudo, FALSE if we reused an existing reload pseudo. Use TITLE to |
654 | describe new registers for debug purposes. */ |
655 | static bool |
656 | get_reload_reg (enum op_type type, machine_mode mode, rtx original, |
657 | enum reg_class rclass, HARD_REG_SET *exclude_start_hard_regs, |
658 | bool in_subreg_p, const char *title, rtx *result_reg) |
659 | { |
660 | int i, regno; |
661 | enum reg_class new_class; |
662 | bool unique_p = false; |
663 | |
664 | if (type == OP_OUT) |
665 | { |
666 | /* Output reload registers tend to start out with a conservative |
667 | choice of register class. Usually this is ALL_REGS, although |
668 | a target might narrow it (for performance reasons) through |
669 | targetm.preferred_reload_class. It's therefore quite common |
670 | for a reload instruction to require a more restrictive class |
671 | than the class that was originally assigned to the reload register. |
672 | |
673 | In these situations, it's more efficient to refine the choice |
674 | of register class rather than create a second reload register. |
675 | This also helps to avoid cycling for registers that are only |
676 | used by reload instructions. */ |
677 | if (REG_P (original) |
678 | && (int) REGNO (original) >= new_regno_start |
679 | && INSN_UID (insn: curr_insn) >= new_insn_uid_start |
680 | && in_class_p (reg: original, cl: rclass, new_class: &new_class, allow_all_reload_class_changes_p: true)) |
681 | { |
682 | unsigned int regno = REGNO (original); |
683 | if (lra_dump_file != NULL) |
684 | { |
685 | fprintf (stream: lra_dump_file, format: " Reuse r%d for output " , regno); |
686 | dump_value_slim (lra_dump_file, original, 1); |
687 | } |
688 | if (new_class != lra_get_allocno_class (regno)) |
689 | lra_change_class (regno, new_class, title: ", change to" , nl_p: false); |
690 | if (lra_dump_file != NULL) |
691 | fprintf (stream: lra_dump_file, format: "\n" ); |
692 | *result_reg = original; |
693 | return false; |
694 | } |
695 | *result_reg |
696 | = lra_create_new_reg_with_unique_value (mode, original, rclass, |
697 | exclude_start_hard_regs, title); |
698 | return true; |
699 | } |
700 | /* Prevent reuse value of expression with side effects, |
701 | e.g. volatile memory. */ |
702 | if (! side_effects_p (original)) |
703 | for (i = 0; i < curr_insn_input_reloads_num; i++) |
704 | { |
705 | if (! curr_insn_input_reloads[i].match_p |
706 | && rtx_equal_p (curr_insn_input_reloads[i].input, original) |
707 | && in_class_p (reg: curr_insn_input_reloads[i].reg, cl: rclass, new_class: &new_class)) |
708 | { |
709 | rtx reg = curr_insn_input_reloads[i].reg; |
710 | regno = REGNO (reg); |
711 | /* If input is equal to original and both are VOIDmode, |
712 | GET_MODE (reg) might be still different from mode. |
713 | Ensure we don't return *result_reg with wrong mode. */ |
714 | if (GET_MODE (reg) != mode) |
715 | { |
716 | if (in_subreg_p) |
717 | continue; |
718 | if (maybe_lt (a: GET_MODE_SIZE (GET_MODE (reg)), |
719 | b: GET_MODE_SIZE (mode))) |
720 | continue; |
721 | reg = lowpart_subreg (outermode: mode, op: reg, GET_MODE (reg)); |
722 | if (reg == NULL_RTX || GET_CODE (reg) != SUBREG) |
723 | continue; |
724 | } |
725 | *result_reg = reg; |
726 | if (lra_dump_file != NULL) |
727 | { |
728 | fprintf (stream: lra_dump_file, format: " Reuse r%d for reload " , regno); |
729 | dump_value_slim (lra_dump_file, original, 1); |
730 | } |
731 | if (new_class != lra_get_allocno_class (regno)) |
732 | lra_change_class (regno, new_class, title: ", change to" , nl_p: false); |
733 | if (lra_dump_file != NULL) |
734 | fprintf (stream: lra_dump_file, format: "\n" ); |
735 | return false; |
736 | } |
737 | /* If we have an input reload with a different mode, make sure it |
738 | will get a different hard reg. */ |
739 | else if (REG_P (original) |
740 | && REG_P (curr_insn_input_reloads[i].input) |
741 | && REGNO (original) == REGNO (curr_insn_input_reloads[i].input) |
742 | && (GET_MODE (original) |
743 | != GET_MODE (curr_insn_input_reloads[i].input))) |
744 | unique_p = true; |
745 | } |
746 | *result_reg = (unique_p |
747 | ? lra_create_new_reg_with_unique_value |
748 | : lra_create_new_reg) (mode, original, rclass, |
749 | exclude_start_hard_regs, title); |
750 | lra_assert (curr_insn_input_reloads_num < LRA_MAX_INSN_RELOADS); |
751 | curr_insn_input_reloads[curr_insn_input_reloads_num].input = original; |
752 | curr_insn_input_reloads[curr_insn_input_reloads_num].match_p = false; |
753 | curr_insn_input_reloads[curr_insn_input_reloads_num++].reg = *result_reg; |
754 | return true; |
755 | } |
756 | |
757 | |
758 | /* The page contains major code to choose the current insn alternative |
759 | and generate reloads for it. */ |
760 | |
761 | /* Return the offset from REGNO of the least significant register |
762 | in (reg:MODE REGNO). |
763 | |
764 | This function is used to tell whether two registers satisfy |
765 | a matching constraint. (reg:MODE1 REGNO1) matches (reg:MODE2 REGNO2) if: |
766 | |
767 | REGNO1 + lra_constraint_offset (REGNO1, MODE1) |
768 | == REGNO2 + lra_constraint_offset (REGNO2, MODE2) */ |
769 | int |
770 | lra_constraint_offset (int regno, machine_mode mode) |
771 | { |
772 | lra_assert (regno < FIRST_PSEUDO_REGISTER); |
773 | |
774 | scalar_int_mode int_mode; |
775 | if (WORDS_BIG_ENDIAN |
776 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
777 | && GET_MODE_SIZE (mode: int_mode) > UNITS_PER_WORD) |
778 | return hard_regno_nregs (regno, mode) - 1; |
779 | return 0; |
780 | } |
781 | |
782 | /* Like rtx_equal_p except that it allows a REG and a SUBREG to match |
783 | if they are the same hard reg, and has special hacks for |
784 | auto-increment and auto-decrement. This is specifically intended for |
785 | process_alt_operands to use in determining whether two operands |
786 | match. X is the operand whose number is the lower of the two. |
787 | |
788 | It is supposed that X is the output operand and Y is the input |
789 | operand. Y_HARD_REGNO is the final hard regno of register Y or |
790 | register in subreg Y as we know it now. Otherwise, it is a |
791 | negative value. */ |
792 | static bool |
793 | operands_match_p (rtx x, rtx y, int y_hard_regno) |
794 | { |
795 | int i; |
796 | RTX_CODE code = GET_CODE (x); |
797 | const char *fmt; |
798 | |
799 | if (x == y) |
800 | return true; |
801 | if ((code == REG || (code == SUBREG && REG_P (SUBREG_REG (x)))) |
802 | && (REG_P (y) || (GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y))))) |
803 | { |
804 | int j; |
805 | |
806 | i = get_hard_regno (x); |
807 | if (i < 0) |
808 | goto slow; |
809 | |
810 | if ((j = y_hard_regno) < 0) |
811 | goto slow; |
812 | |
813 | i += lra_constraint_offset (regno: i, GET_MODE (x)); |
814 | j += lra_constraint_offset (regno: j, GET_MODE (y)); |
815 | |
816 | return i == j; |
817 | } |
818 | |
819 | /* If two operands must match, because they are really a single |
820 | operand of an assembler insn, then two post-increments are invalid |
821 | because the assembler insn would increment only once. On the |
822 | other hand, a post-increment matches ordinary indexing if the |
823 | post-increment is the output operand. */ |
824 | if (code == POST_DEC || code == POST_INC || code == POST_MODIFY) |
825 | return operands_match_p (XEXP (x, 0), y, y_hard_regno); |
826 | |
827 | /* Two pre-increments are invalid because the assembler insn would |
828 | increment only once. On the other hand, a pre-increment matches |
829 | ordinary indexing if the pre-increment is the input operand. */ |
830 | if (GET_CODE (y) == PRE_DEC || GET_CODE (y) == PRE_INC |
831 | || GET_CODE (y) == PRE_MODIFY) |
832 | return operands_match_p (x, XEXP (y, 0), y_hard_regno: -1); |
833 | |
834 | slow: |
835 | |
836 | if (code == REG && REG_P (y)) |
837 | return REGNO (x) == REGNO (y); |
838 | |
839 | if (code == REG && GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y)) |
840 | && x == SUBREG_REG (y)) |
841 | return true; |
842 | if (GET_CODE (y) == REG && code == SUBREG && REG_P (SUBREG_REG (x)) |
843 | && SUBREG_REG (x) == y) |
844 | return true; |
845 | |
846 | /* Now we have disposed of all the cases in which different rtx |
847 | codes can match. */ |
848 | if (code != GET_CODE (y)) |
849 | return false; |
850 | |
851 | /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ |
852 | if (GET_MODE (x) != GET_MODE (y)) |
853 | return false; |
854 | |
855 | switch (code) |
856 | { |
857 | CASE_CONST_UNIQUE: |
858 | return false; |
859 | |
860 | case CONST_VECTOR: |
861 | if (!same_vector_encodings_p (x, y)) |
862 | return false; |
863 | break; |
864 | |
865 | case LABEL_REF: |
866 | return label_ref_label (ref: x) == label_ref_label (ref: y); |
867 | case SYMBOL_REF: |
868 | return XSTR (x, 0) == XSTR (y, 0); |
869 | |
870 | default: |
871 | break; |
872 | } |
873 | |
874 | /* Compare the elements. If any pair of corresponding elements fail |
875 | to match, return false for the whole things. */ |
876 | |
877 | fmt = GET_RTX_FORMAT (code); |
878 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
879 | { |
880 | int val, j; |
881 | switch (fmt[i]) |
882 | { |
883 | case 'w': |
884 | if (XWINT (x, i) != XWINT (y, i)) |
885 | return false; |
886 | break; |
887 | |
888 | case 'i': |
889 | if (XINT (x, i) != XINT (y, i)) |
890 | return false; |
891 | break; |
892 | |
893 | case 'p': |
894 | if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y))) |
895 | return false; |
896 | break; |
897 | |
898 | case 'e': |
899 | val = operands_match_p (XEXP (x, i), XEXP (y, i), y_hard_regno: -1); |
900 | if (val == 0) |
901 | return false; |
902 | break; |
903 | |
904 | case '0': |
905 | break; |
906 | |
907 | case 'E': |
908 | if (XVECLEN (x, i) != XVECLEN (y, i)) |
909 | return false; |
910 | for (j = XVECLEN (x, i) - 1; j >= 0; --j) |
911 | { |
912 | val = operands_match_p (XVECEXP (x, i, j), XVECEXP (y, i, j), y_hard_regno: -1); |
913 | if (val == 0) |
914 | return false; |
915 | } |
916 | break; |
917 | |
918 | /* It is believed that rtx's at this level will never |
919 | contain anything but integers and other rtx's, except for |
920 | within LABEL_REFs and SYMBOL_REFs. */ |
921 | default: |
922 | gcc_unreachable (); |
923 | } |
924 | } |
925 | return true; |
926 | } |
927 | |
928 | /* True if X is a constant that can be forced into the constant pool. |
929 | MODE is the mode of the operand, or VOIDmode if not known. */ |
930 | #define CONST_POOL_OK_P(MODE, X) \ |
931 | ((MODE) != VOIDmode \ |
932 | && CONSTANT_P (X) \ |
933 | && GET_CODE (X) != HIGH \ |
934 | && GET_MODE_SIZE (MODE).is_constant () \ |
935 | && !targetm.cannot_force_const_mem (MODE, X)) |
936 | |
937 | /* If REG is a reload pseudo, try to make its class satisfying CL. */ |
938 | static void |
939 | narrow_reload_pseudo_class (rtx reg, enum reg_class cl) |
940 | { |
941 | enum reg_class rclass; |
942 | |
943 | /* Do not make more accurate class from reloads generated. They are |
944 | mostly moves with a lot of constraints. Making more accurate |
945 | class may results in very narrow class and impossibility of find |
946 | registers for several reloads of one insn. */ |
947 | if (INSN_UID (insn: curr_insn) >= new_insn_uid_start) |
948 | return; |
949 | if (GET_CODE (reg) == SUBREG) |
950 | reg = SUBREG_REG (reg); |
951 | if (! REG_P (reg) || (int) REGNO (reg) < new_regno_start) |
952 | return; |
953 | if (in_class_p (reg, cl, new_class: &rclass) && rclass != cl) |
954 | lra_change_class (REGNO (reg), new_class: rclass, title: " Change to" , nl_p: true); |
955 | } |
956 | |
957 | /* Searches X for any reference to a reg with the same value as REGNO, |
958 | returning the rtx of the reference found if any. Otherwise, |
959 | returns NULL_RTX. */ |
960 | static rtx |
961 | regno_val_use_in (unsigned int regno, rtx x) |
962 | { |
963 | const char *fmt; |
964 | int i, j; |
965 | rtx tem; |
966 | |
967 | if (REG_P (x) && lra_reg_info[REGNO (x)].val == lra_reg_info[regno].val) |
968 | return x; |
969 | |
970 | fmt = GET_RTX_FORMAT (GET_CODE (x)); |
971 | for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) |
972 | { |
973 | if (fmt[i] == 'e') |
974 | { |
975 | if ((tem = regno_val_use_in (regno, XEXP (x, i)))) |
976 | return tem; |
977 | } |
978 | else if (fmt[i] == 'E') |
979 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
980 | if ((tem = regno_val_use_in (regno , XVECEXP (x, i, j)))) |
981 | return tem; |
982 | } |
983 | |
984 | return NULL_RTX; |
985 | } |
986 | |
987 | /* Return true if all current insn non-output operands except INS (it |
988 | has a negaitve end marker) do not use pseudos with the same value |
989 | as REGNO. */ |
990 | static bool |
991 | check_conflict_input_operands (int regno, signed char *ins) |
992 | { |
993 | int in; |
994 | int n_operands = curr_static_id->n_operands; |
995 | |
996 | for (int nop = 0; nop < n_operands; nop++) |
997 | if (! curr_static_id->operand[nop].is_operator |
998 | && curr_static_id->operand[nop].type != OP_OUT) |
999 | { |
1000 | for (int i = 0; (in = ins[i]) >= 0; i++) |
1001 | if (in == nop) |
1002 | break; |
1003 | if (in < 0 |
1004 | && regno_val_use_in (regno, x: *curr_id->operand_loc[nop]) != NULL_RTX) |
1005 | return false; |
1006 | } |
1007 | return true; |
1008 | } |
1009 | |
1010 | /* Generate reloads for matching OUT and INS (array of input operand numbers |
1011 | with end marker -1) with reg class GOAL_CLASS and EXCLUDE_START_HARD_REGS, |
1012 | considering output operands OUTS (similar array to INS) needing to be in |
1013 | different registers. Add input and output reloads correspondingly to the |
1014 | lists *BEFORE and *AFTER. OUT might be negative. In this case we generate |
1015 | input reloads for matched input operands INS. EARLY_CLOBBER_P is a flag |
1016 | that the output operand is early clobbered for chosen alternative. */ |
1017 | static void |
1018 | match_reload (signed char out, signed char *ins, signed char *outs, |
1019 | enum reg_class goal_class, HARD_REG_SET *exclude_start_hard_regs, |
1020 | rtx_insn **before, rtx_insn **after, bool early_clobber_p) |
1021 | { |
1022 | bool out_conflict; |
1023 | int i, in; |
1024 | rtx new_in_reg, new_out_reg, reg; |
1025 | machine_mode inmode, outmode; |
1026 | rtx in_rtx = *curr_id->operand_loc[ins[0]]; |
1027 | rtx out_rtx = out < 0 ? in_rtx : *curr_id->operand_loc[out]; |
1028 | |
1029 | inmode = curr_operand_mode[ins[0]]; |
1030 | outmode = out < 0 ? inmode : curr_operand_mode[out]; |
1031 | push_to_sequence (*before); |
1032 | if (inmode != outmode) |
1033 | { |
1034 | /* process_alt_operands has already checked that the mode sizes |
1035 | are ordered. */ |
1036 | if (partial_subreg_p (outermode: outmode, innermode: inmode)) |
1037 | { |
1038 | bool asm_p = asm_noperands (PATTERN (insn: curr_insn)) >= 0; |
1039 | int hr; |
1040 | HARD_REG_SET temp_hard_reg_set; |
1041 | |
1042 | if (asm_p && (hr = get_hard_regno (x: out_rtx)) >= 0 |
1043 | && hard_regno_nregs (regno: hr, mode: inmode) > 1) |
1044 | { |
1045 | /* See gcc.c-torture/execute/20030222-1.c. |
1046 | Consider the code for 32-bit (e.g. BE) target: |
1047 | int i, v; long x; x = v; asm ("" : "=r" (i) : "0" (x)); |
1048 | We generate the following RTL with reload insns: |
1049 | 1. subreg:si(x:di, 0) = 0; |
1050 | 2. subreg:si(x:di, 4) = v:si; |
1051 | 3. t:di = x:di, dead x; |
1052 | 4. asm ("" : "=r" (subreg:si(t:di,4)) : "0" (t:di)) |
1053 | 5. i:si = subreg:si(t:di,4); |
1054 | If we assign hard reg of x to t, dead code elimination |
1055 | will remove insn #2 and we will use unitialized hard reg. |
1056 | So exclude the hard reg of x for t. We could ignore this |
1057 | problem for non-empty asm using all x value but it is hard to |
1058 | check that the asm are expanded into insn realy using x |
1059 | and setting r. */ |
1060 | CLEAR_HARD_REG_SET (set&: temp_hard_reg_set); |
1061 | if (exclude_start_hard_regs != NULL) |
1062 | temp_hard_reg_set = *exclude_start_hard_regs; |
1063 | SET_HARD_REG_BIT (set&: temp_hard_reg_set, bit: hr); |
1064 | exclude_start_hard_regs = &temp_hard_reg_set; |
1065 | } |
1066 | reg = new_in_reg |
1067 | = lra_create_new_reg_with_unique_value (inmode, in_rtx, goal_class, |
1068 | exclude_start_hard_regs, |
1069 | "" ); |
1070 | new_out_reg = gen_lowpart_SUBREG (outmode, reg); |
1071 | LRA_SUBREG_P (new_out_reg) = 1; |
1072 | /* If the input reg is dying here, we can use the same hard |
1073 | register for REG and IN_RTX. We do it only for original |
1074 | pseudos as reload pseudos can die although original |
1075 | pseudos still live where reload pseudos dies. */ |
1076 | if (REG_P (in_rtx) && (int) REGNO (in_rtx) < lra_new_regno_start |
1077 | && find_regno_note (curr_insn, REG_DEAD, REGNO (in_rtx)) |
1078 | && (!early_clobber_p |
1079 | || check_conflict_input_operands(REGNO (in_rtx), ins))) |
1080 | lra_assign_reg_val (REGNO (in_rtx), REGNO (reg)); |
1081 | } |
1082 | else |
1083 | { |
1084 | reg = new_out_reg |
1085 | = lra_create_new_reg_with_unique_value (outmode, out_rtx, |
1086 | goal_class, |
1087 | exclude_start_hard_regs, |
1088 | "" ); |
1089 | new_in_reg = gen_lowpart_SUBREG (inmode, reg); |
1090 | /* NEW_IN_REG is non-paradoxical subreg. We don't want |
1091 | NEW_OUT_REG living above. We add clobber clause for |
1092 | this. This is just a temporary clobber. We can remove |
1093 | it at the end of LRA work. */ |
1094 | rtx_insn *clobber = emit_clobber (new_out_reg); |
1095 | LRA_TEMP_CLOBBER_P (PATTERN (clobber)) = 1; |
1096 | LRA_SUBREG_P (new_in_reg) = 1; |
1097 | if (GET_CODE (in_rtx) == SUBREG) |
1098 | { |
1099 | rtx subreg_reg = SUBREG_REG (in_rtx); |
1100 | |
1101 | /* If SUBREG_REG is dying here and sub-registers IN_RTX |
1102 | and NEW_IN_REG are similar, we can use the same hard |
1103 | register for REG and SUBREG_REG. */ |
1104 | if (REG_P (subreg_reg) |
1105 | && (int) REGNO (subreg_reg) < lra_new_regno_start |
1106 | && GET_MODE (subreg_reg) == outmode |
1107 | && known_eq (SUBREG_BYTE (in_rtx), SUBREG_BYTE (new_in_reg)) |
1108 | && find_regno_note (curr_insn, REG_DEAD, REGNO (subreg_reg)) |
1109 | && (! early_clobber_p |
1110 | || check_conflict_input_operands (REGNO (subreg_reg), |
1111 | ins))) |
1112 | lra_assign_reg_val (REGNO (subreg_reg), REGNO (reg)); |
1113 | } |
1114 | } |
1115 | } |
1116 | else |
1117 | { |
1118 | /* Pseudos have values -- see comments for lra_reg_info. |
1119 | Different pseudos with the same value do not conflict even if |
1120 | they live in the same place. When we create a pseudo we |
1121 | assign value of original pseudo (if any) from which we |
1122 | created the new pseudo. If we create the pseudo from the |
1123 | input pseudo, the new pseudo will have no conflict with the |
1124 | input pseudo which is wrong when the input pseudo lives after |
1125 | the insn and as the new pseudo value is changed by the insn |
1126 | output. Therefore we create the new pseudo from the output |
1127 | except the case when we have single matched dying input |
1128 | pseudo. |
1129 | |
1130 | We cannot reuse the current output register because we might |
1131 | have a situation like "a <- a op b", where the constraints |
1132 | force the second input operand ("b") to match the output |
1133 | operand ("a"). "b" must then be copied into a new register |
1134 | so that it doesn't clobber the current value of "a". |
1135 | |
1136 | We cannot use the same value if the output pseudo is |
1137 | early clobbered or the input pseudo is mentioned in the |
1138 | output, e.g. as an address part in memory, because |
1139 | output reload will actually extend the pseudo liveness. |
1140 | We don't care about eliminable hard regs here as we are |
1141 | interesting only in pseudos. */ |
1142 | |
1143 | /* Matching input's register value is the same as one of the other |
1144 | output operand. Output operands in a parallel insn must be in |
1145 | different registers. */ |
1146 | out_conflict = false; |
1147 | if (REG_P (in_rtx)) |
1148 | { |
1149 | for (i = 0; outs[i] >= 0; i++) |
1150 | { |
1151 | rtx other_out_rtx = *curr_id->operand_loc[outs[i]]; |
1152 | if (outs[i] != out && REG_P (other_out_rtx) |
1153 | && (regno_val_use_in (REGNO (in_rtx), x: other_out_rtx) |
1154 | != NULL_RTX)) |
1155 | { |
1156 | out_conflict = true; |
1157 | break; |
1158 | } |
1159 | } |
1160 | } |
1161 | |
1162 | new_in_reg = new_out_reg |
1163 | = (! early_clobber_p && ins[1] < 0 && REG_P (in_rtx) |
1164 | && (int) REGNO (in_rtx) < lra_new_regno_start |
1165 | && find_regno_note (curr_insn, REG_DEAD, REGNO (in_rtx)) |
1166 | && (! early_clobber_p |
1167 | || check_conflict_input_operands (REGNO (in_rtx), ins)) |
1168 | && (out < 0 |
1169 | || regno_val_use_in (REGNO (in_rtx), x: out_rtx) == NULL_RTX) |
1170 | && !out_conflict |
1171 | ? lra_create_new_reg (inmode, in_rtx, goal_class, |
1172 | exclude_start_hard_regs, "" ) |
1173 | : lra_create_new_reg_with_unique_value (outmode, out_rtx, goal_class, |
1174 | exclude_start_hard_regs, |
1175 | "" )); |
1176 | } |
1177 | /* In operand can be got from transformations before processing insn |
1178 | constraints. One example of such transformations is subreg |
1179 | reloading (see function simplify_operand_subreg). The new |
1180 | pseudos created by the transformations might have inaccurate |
1181 | class (ALL_REGS) and we should make their classes more |
1182 | accurate. */ |
1183 | narrow_reload_pseudo_class (reg: in_rtx, cl: goal_class); |
1184 | lra_emit_move (copy_rtx (new_in_reg), in_rtx); |
1185 | *before = get_insns (); |
1186 | end_sequence (); |
1187 | /* Add the new pseudo to consider values of subsequent input reload |
1188 | pseudos. */ |
1189 | lra_assert (curr_insn_input_reloads_num < LRA_MAX_INSN_RELOADS); |
1190 | curr_insn_input_reloads[curr_insn_input_reloads_num].input = in_rtx; |
1191 | curr_insn_input_reloads[curr_insn_input_reloads_num].match_p = true; |
1192 | curr_insn_input_reloads[curr_insn_input_reloads_num++].reg = new_in_reg; |
1193 | for (i = 0; (in = ins[i]) >= 0; i++) |
1194 | if (GET_MODE (*curr_id->operand_loc[in]) == VOIDmode |
1195 | || GET_MODE (new_in_reg) == GET_MODE (*curr_id->operand_loc[in])) |
1196 | *curr_id->operand_loc[in] = new_in_reg; |
1197 | else |
1198 | { |
1199 | lra_assert |
1200 | (GET_MODE (new_out_reg) == GET_MODE (*curr_id->operand_loc[in])); |
1201 | *curr_id->operand_loc[in] = new_out_reg; |
1202 | } |
1203 | lra_update_dups (curr_id, ins); |
1204 | if (out < 0) |
1205 | return; |
1206 | /* See a comment for the input operand above. */ |
1207 | narrow_reload_pseudo_class (reg: out_rtx, cl: goal_class); |
1208 | if (find_reg_note (curr_insn, REG_UNUSED, out_rtx) == NULL_RTX) |
1209 | { |
1210 | reg = SUBREG_P (out_rtx) ? SUBREG_REG (out_rtx) : out_rtx; |
1211 | start_sequence (); |
1212 | /* If we had strict_low_part, use it also in reload to keep other |
1213 | parts unchanged but do it only for regs as strict_low_part |
1214 | has no sense for memory and probably there is no insn pattern |
1215 | to match the reload insn in memory case. */ |
1216 | if (out >= 0 && curr_static_id->operand[out].strict_low && REG_P (reg)) |
1217 | out_rtx = gen_rtx_STRICT_LOW_PART (VOIDmode, out_rtx); |
1218 | lra_emit_move (out_rtx, copy_rtx (new_out_reg)); |
1219 | emit_insn (*after); |
1220 | *after = get_insns (); |
1221 | end_sequence (); |
1222 | } |
1223 | *curr_id->operand_loc[out] = new_out_reg; |
1224 | lra_update_dup (id: curr_id, nop: out); |
1225 | } |
1226 | |
1227 | /* Return register class which is union of all reg classes in insn |
1228 | constraint alternative string starting with P. */ |
1229 | static enum reg_class |
1230 | reg_class_from_constraints (const char *p) |
1231 | { |
1232 | int c, len; |
1233 | enum reg_class op_class = NO_REGS; |
1234 | |
1235 | do |
1236 | switch ((c = *p, len = CONSTRAINT_LEN (c, p)), c) |
1237 | { |
1238 | case '#': |
1239 | case ',': |
1240 | return op_class; |
1241 | |
1242 | case 'g': |
1243 | op_class = reg_class_subunion[op_class][GENERAL_REGS]; |
1244 | break; |
1245 | |
1246 | default: |
1247 | enum constraint_num cn = lookup_constraint (p); |
1248 | enum reg_class cl = reg_class_for_constraint (c: cn); |
1249 | if (cl == NO_REGS) |
1250 | { |
1251 | if (insn_extra_address_constraint (c: cn)) |
1252 | op_class |
1253 | = (reg_class_subunion |
1254 | [op_class][base_reg_class (VOIDmode, ADDR_SPACE_GENERIC, |
1255 | outer_code: ADDRESS, index_code: SCRATCH)]); |
1256 | break; |
1257 | } |
1258 | |
1259 | op_class = reg_class_subunion[op_class][cl]; |
1260 | break; |
1261 | } |
1262 | while ((p += len), c); |
1263 | return op_class; |
1264 | } |
1265 | |
1266 | /* If OP is a register, return the class of the register as per |
1267 | get_reg_class, otherwise return NO_REGS. */ |
1268 | static inline enum reg_class |
1269 | get_op_class (rtx op) |
1270 | { |
1271 | return REG_P (op) ? get_reg_class (REGNO (op)) : NO_REGS; |
1272 | } |
1273 | |
1274 | /* Return generated insn mem_pseudo:=val if TO_P or val:=mem_pseudo |
1275 | otherwise. If modes of MEM_PSEUDO and VAL are different, use |
1276 | SUBREG for VAL to make them equal. */ |
1277 | static rtx_insn * |
1278 | emit_spill_move (bool to_p, rtx mem_pseudo, rtx val) |
1279 | { |
1280 | if (GET_MODE (mem_pseudo) != GET_MODE (val)) |
1281 | { |
1282 | /* Usually size of mem_pseudo is greater than val size but in |
1283 | rare cases it can be less as it can be defined by target |
1284 | dependent macro HARD_REGNO_CALLER_SAVE_MODE. */ |
1285 | if (! MEM_P (val)) |
1286 | { |
1287 | val = gen_lowpart_SUBREG (GET_MODE (mem_pseudo), |
1288 | GET_CODE (val) == SUBREG |
1289 | ? SUBREG_REG (val) : val); |
1290 | LRA_SUBREG_P (val) = 1; |
1291 | } |
1292 | else |
1293 | { |
1294 | mem_pseudo = gen_lowpart_SUBREG (GET_MODE (val), mem_pseudo); |
1295 | LRA_SUBREG_P (mem_pseudo) = 1; |
1296 | } |
1297 | } |
1298 | return to_p ? gen_move_insn (mem_pseudo, val) |
1299 | : gen_move_insn (val, mem_pseudo); |
1300 | } |
1301 | |
1302 | /* Process a special case insn (register move), return true if we |
1303 | don't need to process it anymore. INSN should be a single set |
1304 | insn. Set up that RTL was changed through CHANGE_P and that hook |
1305 | TARGET_SECONDARY_MEMORY_NEEDED says to use secondary memory through |
1306 | SEC_MEM_P. */ |
1307 | static bool |
1308 | check_and_process_move (bool *change_p, bool *sec_mem_p ATTRIBUTE_UNUSED) |
1309 | { |
1310 | int sregno, dregno; |
1311 | rtx dest, src, dreg, sreg, new_reg, scratch_reg; |
1312 | rtx_insn *before; |
1313 | enum reg_class dclass, sclass, secondary_class; |
1314 | secondary_reload_info sri; |
1315 | |
1316 | lra_assert (curr_insn_set != NULL_RTX); |
1317 | dreg = dest = SET_DEST (curr_insn_set); |
1318 | sreg = src = SET_SRC (curr_insn_set); |
1319 | if (GET_CODE (dest) == SUBREG) |
1320 | dreg = SUBREG_REG (dest); |
1321 | if (GET_CODE (src) == SUBREG) |
1322 | sreg = SUBREG_REG (src); |
1323 | if (! (REG_P (dreg) || MEM_P (dreg)) || ! (REG_P (sreg) || MEM_P (sreg))) |
1324 | return false; |
1325 | sclass = dclass = NO_REGS; |
1326 | if (REG_P (dreg)) |
1327 | dclass = get_reg_class (REGNO (dreg)); |
1328 | gcc_assert (dclass < LIM_REG_CLASSES && dclass >= NO_REGS); |
1329 | if (dclass == ALL_REGS) |
1330 | /* ALL_REGS is used for new pseudos created by transformations |
1331 | like reload of SUBREG_REG (see function |
1332 | simplify_operand_subreg). We don't know their class yet. We |
1333 | should figure out the class from processing the insn |
1334 | constraints not in this fast path function. Even if ALL_REGS |
1335 | were a right class for the pseudo, secondary_... hooks usually |
1336 | are not define for ALL_REGS. */ |
1337 | return false; |
1338 | if (REG_P (sreg)) |
1339 | sclass = get_reg_class (REGNO (sreg)); |
1340 | gcc_assert (sclass < LIM_REG_CLASSES && sclass >= NO_REGS); |
1341 | if (sclass == ALL_REGS) |
1342 | /* See comments above. */ |
1343 | return false; |
1344 | if (sclass == NO_REGS && dclass == NO_REGS) |
1345 | return false; |
1346 | if (targetm.secondary_memory_needed (GET_MODE (src), sclass, dclass) |
1347 | && ((sclass != NO_REGS && dclass != NO_REGS) |
1348 | || (GET_MODE (src) |
1349 | != targetm.secondary_memory_needed_mode (GET_MODE (src))))) |
1350 | { |
1351 | *sec_mem_p = true; |
1352 | return false; |
1353 | } |
1354 | if (! REG_P (dreg) || ! REG_P (sreg)) |
1355 | return false; |
1356 | sri.prev_sri = NULL; |
1357 | sri.icode = CODE_FOR_nothing; |
1358 | sri.extra_cost = 0; |
1359 | secondary_class = NO_REGS; |
1360 | /* Set up hard register for a reload pseudo for hook |
1361 | secondary_reload because some targets just ignore unassigned |
1362 | pseudos in the hook. */ |
1363 | if (dclass != NO_REGS && lra_get_regno_hard_regno (REGNO (dreg)) < 0) |
1364 | { |
1365 | dregno = REGNO (dreg); |
1366 | reg_renumber[dregno] = ira_class_hard_regs[dclass][0]; |
1367 | } |
1368 | else |
1369 | dregno = -1; |
1370 | if (sclass != NO_REGS && lra_get_regno_hard_regno (REGNO (sreg)) < 0) |
1371 | { |
1372 | sregno = REGNO (sreg); |
1373 | reg_renumber[sregno] = ira_class_hard_regs[sclass][0]; |
1374 | } |
1375 | else |
1376 | sregno = -1; |
1377 | if (sclass != NO_REGS) |
1378 | secondary_class |
1379 | = (enum reg_class) targetm.secondary_reload (false, dest, |
1380 | (reg_class_t) sclass, |
1381 | GET_MODE (src), &sri); |
1382 | if (sclass == NO_REGS |
1383 | || ((secondary_class != NO_REGS || sri.icode != CODE_FOR_nothing) |
1384 | && dclass != NO_REGS)) |
1385 | { |
1386 | enum reg_class old_sclass = secondary_class; |
1387 | secondary_reload_info old_sri = sri; |
1388 | |
1389 | sri.prev_sri = NULL; |
1390 | sri.icode = CODE_FOR_nothing; |
1391 | sri.extra_cost = 0; |
1392 | secondary_class |
1393 | = (enum reg_class) targetm.secondary_reload (true, src, |
1394 | (reg_class_t) dclass, |
1395 | GET_MODE (src), &sri); |
1396 | /* Check the target hook consistency. */ |
1397 | lra_assert |
1398 | ((secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) |
1399 | || (old_sclass == NO_REGS && old_sri.icode == CODE_FOR_nothing) |
1400 | || (secondary_class == old_sclass && sri.icode == old_sri.icode)); |
1401 | } |
1402 | if (sregno >= 0) |
1403 | reg_renumber [sregno] = -1; |
1404 | if (dregno >= 0) |
1405 | reg_renumber [dregno] = -1; |
1406 | if (secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) |
1407 | return false; |
1408 | *change_p = true; |
1409 | new_reg = NULL_RTX; |
1410 | if (secondary_class != NO_REGS) |
1411 | new_reg = lra_create_new_reg_with_unique_value (GET_MODE (src), NULL_RTX, |
1412 | secondary_class, NULL, |
1413 | "secondary" ); |
1414 | start_sequence (); |
1415 | if (sri.icode == CODE_FOR_nothing) |
1416 | lra_emit_move (new_reg, src); |
1417 | else |
1418 | { |
1419 | enum reg_class scratch_class; |
1420 | |
1421 | scratch_class = (reg_class_from_constraints |
1422 | (p: insn_data[sri.icode].operand[2].constraint)); |
1423 | scratch_reg = (lra_create_new_reg_with_unique_value |
1424 | (insn_data[sri.icode].operand[2].mode, NULL_RTX, |
1425 | scratch_class, NULL, "scratch" )); |
1426 | emit_insn (GEN_FCN (sri.icode) (new_reg != NULL_RTX ? new_reg : dest, |
1427 | src, scratch_reg)); |
1428 | } |
1429 | before = get_insns (); |
1430 | end_sequence (); |
1431 | lra_process_new_insns (curr_insn, before, NULL, "Inserting the move" ); |
1432 | if (new_reg != NULL_RTX) |
1433 | SET_SRC (curr_insn_set) = new_reg; |
1434 | else |
1435 | { |
1436 | if (lra_dump_file != NULL) |
1437 | { |
1438 | fprintf (stream: lra_dump_file, format: "Deleting move %u\n" , INSN_UID (insn: curr_insn)); |
1439 | dump_insn_slim (lra_dump_file, curr_insn); |
1440 | } |
1441 | lra_set_insn_deleted (curr_insn); |
1442 | return true; |
1443 | } |
1444 | return false; |
1445 | } |
1446 | |
1447 | /* The following data describe the result of process_alt_operands. |
1448 | The data are used in curr_insn_transform to generate reloads. */ |
1449 | |
1450 | /* The chosen reg classes which should be used for the corresponding |
1451 | operands. */ |
1452 | static enum reg_class goal_alt[MAX_RECOG_OPERANDS]; |
1453 | /* Hard registers which cannot be a start hard register for the corresponding |
1454 | operands. */ |
1455 | static HARD_REG_SET goal_alt_exclude_start_hard_regs[MAX_RECOG_OPERANDS]; |
1456 | /* True if the operand should be the same as another operand and that |
1457 | other operand does not need a reload. */ |
1458 | static bool goal_alt_match_win[MAX_RECOG_OPERANDS]; |
1459 | /* True if the operand does not need a reload. */ |
1460 | static bool goal_alt_win[MAX_RECOG_OPERANDS]; |
1461 | /* True if the operand can be offsetable memory. */ |
1462 | static bool goal_alt_offmemok[MAX_RECOG_OPERANDS]; |
1463 | /* The number of an operand to which given operand can be matched to. */ |
1464 | static int goal_alt_matches[MAX_RECOG_OPERANDS]; |
1465 | /* The number of elements in the following array. */ |
1466 | static int goal_alt_dont_inherit_ops_num; |
1467 | /* Numbers of operands whose reload pseudos should not be inherited. */ |
1468 | static int goal_alt_dont_inherit_ops[MAX_RECOG_OPERANDS]; |
1469 | /* True if we should try only this alternative for the next constraint sub-pass |
1470 | to speed up the sub-pass. */ |
1471 | static bool goal_reuse_alt_p; |
1472 | /* True if the insn commutative operands should be swapped. */ |
1473 | static bool goal_alt_swapped; |
1474 | /* The chosen insn alternative. */ |
1475 | static int goal_alt_number; |
1476 | /* True if output reload of the stack pointer should be generated. */ |
1477 | static bool goal_alt_out_sp_reload_p; |
1478 | |
1479 | /* True if the corresponding operand is the result of an equivalence |
1480 | substitution. */ |
1481 | static bool equiv_substition_p[MAX_RECOG_OPERANDS]; |
1482 | |
1483 | /* The following five variables are used to choose the best insn |
1484 | alternative. They reflect final characteristics of the best |
1485 | alternative. */ |
1486 | |
1487 | /* Number of necessary reloads and overall cost reflecting the |
1488 | previous value and other unpleasantness of the best alternative. */ |
1489 | static int best_losers, best_overall; |
1490 | /* Overall number hard registers used for reloads. For example, on |
1491 | some targets we need 2 general registers to reload DFmode and only |
1492 | one floating point register. */ |
1493 | static int best_reload_nregs; |
1494 | /* Overall number reflecting distances of previous reloading the same |
1495 | value. The distances are counted from the current BB start. It is |
1496 | used to improve inheritance chances. */ |
1497 | static int best_reload_sum; |
1498 | |
1499 | /* True if the current insn should have no correspondingly input or |
1500 | output reloads. */ |
1501 | static bool no_input_reloads_p, no_output_reloads_p; |
1502 | |
1503 | /* True if we swapped the commutative operands in the current |
1504 | insn. */ |
1505 | static int curr_swapped; |
1506 | |
1507 | /* if CHECK_ONLY_P is false, arrange for address element *LOC to be a |
1508 | register of class CL. Add any input reloads to list BEFORE. AFTER |
1509 | is nonnull if *LOC is an automodified value; handle that case by |
1510 | adding the required output reloads to list AFTER. Return true if |
1511 | the RTL was changed. |
1512 | |
1513 | if CHECK_ONLY_P is true, check that the *LOC is a correct address |
1514 | register. Return false if the address register is correct. */ |
1515 | static bool |
1516 | process_addr_reg (rtx *loc, bool check_only_p, rtx_insn **before, rtx_insn **after, |
1517 | enum reg_class cl) |
1518 | { |
1519 | int regno; |
1520 | enum reg_class rclass, new_class; |
1521 | rtx reg; |
1522 | rtx new_reg; |
1523 | machine_mode mode; |
1524 | bool subreg_p, before_p = false; |
1525 | |
1526 | subreg_p = GET_CODE (*loc) == SUBREG; |
1527 | if (subreg_p) |
1528 | { |
1529 | reg = SUBREG_REG (*loc); |
1530 | mode = GET_MODE (reg); |
1531 | |
1532 | /* For mode with size bigger than ptr_mode, there unlikely to be "mov" |
1533 | between two registers with different classes, but there normally will |
1534 | be "mov" which transfers element of vector register into the general |
1535 | register, and this normally will be a subreg which should be reloaded |
1536 | as a whole. This is particularly likely to be triggered when |
1537 | -fno-split-wide-types specified. */ |
1538 | if (!REG_P (reg) |
1539 | || in_class_p (reg, cl, new_class: &new_class) |
1540 | || known_le (GET_MODE_SIZE (mode), GET_MODE_SIZE (ptr_mode))) |
1541 | loc = &SUBREG_REG (*loc); |
1542 | } |
1543 | |
1544 | reg = *loc; |
1545 | mode = GET_MODE (reg); |
1546 | if (! REG_P (reg)) |
1547 | { |
1548 | if (check_only_p) |
1549 | return true; |
1550 | /* Always reload memory in an address even if the target supports |
1551 | such addresses. */ |
1552 | new_reg = lra_create_new_reg_with_unique_value (mode, reg, cl, NULL, |
1553 | "address" ); |
1554 | before_p = true; |
1555 | } |
1556 | else |
1557 | { |
1558 | regno = REGNO (reg); |
1559 | rclass = get_reg_class (regno); |
1560 | if (! check_only_p |
1561 | && (*loc = get_equiv_with_elimination (x: reg, insn: curr_insn)) != reg) |
1562 | { |
1563 | if (lra_dump_file != NULL) |
1564 | { |
1565 | fprintf (stream: lra_dump_file, |
1566 | format: "Changing pseudo %d in address of insn %u on equiv " , |
1567 | REGNO (reg), INSN_UID (insn: curr_insn)); |
1568 | dump_value_slim (lra_dump_file, *loc, 1); |
1569 | fprintf (stream: lra_dump_file, format: "\n" ); |
1570 | } |
1571 | *loc = copy_rtx (*loc); |
1572 | } |
1573 | if (*loc != reg || ! in_class_p (reg, cl, new_class: &new_class)) |
1574 | { |
1575 | if (check_only_p) |
1576 | return true; |
1577 | reg = *loc; |
1578 | if (get_reload_reg (type: after == NULL ? OP_IN : OP_INOUT, |
1579 | mode, original: reg, rclass: cl, NULL, |
1580 | in_subreg_p: subreg_p, title: "address" , result_reg: &new_reg)) |
1581 | before_p = true; |
1582 | } |
1583 | else if (new_class != NO_REGS && rclass != new_class) |
1584 | { |
1585 | if (check_only_p) |
1586 | return true; |
1587 | lra_change_class (regno, new_class, title: " Change to" , nl_p: true); |
1588 | return false; |
1589 | } |
1590 | else |
1591 | return false; |
1592 | } |
1593 | if (before_p) |
1594 | { |
1595 | push_to_sequence (*before); |
1596 | lra_emit_move (new_reg, reg); |
1597 | *before = get_insns (); |
1598 | end_sequence (); |
1599 | } |
1600 | *loc = new_reg; |
1601 | if (after != NULL) |
1602 | { |
1603 | start_sequence (); |
1604 | lra_emit_move (before_p ? copy_rtx (reg) : reg, new_reg); |
1605 | emit_insn (*after); |
1606 | *after = get_insns (); |
1607 | end_sequence (); |
1608 | } |
1609 | return true; |
1610 | } |
1611 | |
1612 | /* Insert move insn in simplify_operand_subreg. BEFORE returns |
1613 | the insn to be inserted before curr insn. AFTER returns the |
1614 | the insn to be inserted after curr insn. ORIGREG and NEWREG |
1615 | are the original reg and new reg for reload. */ |
1616 | static void |
1617 | insert_move_for_subreg (rtx_insn **before, rtx_insn **after, rtx origreg, |
1618 | rtx newreg) |
1619 | { |
1620 | if (before) |
1621 | { |
1622 | push_to_sequence (*before); |
1623 | lra_emit_move (newreg, origreg); |
1624 | *before = get_insns (); |
1625 | end_sequence (); |
1626 | } |
1627 | if (after) |
1628 | { |
1629 | start_sequence (); |
1630 | lra_emit_move (origreg, newreg); |
1631 | emit_insn (*after); |
1632 | *after = get_insns (); |
1633 | end_sequence (); |
1634 | } |
1635 | } |
1636 | |
1637 | static bool valid_address_p (machine_mode mode, rtx addr, addr_space_t as); |
1638 | static bool process_address (int, bool, rtx_insn **, rtx_insn **); |
1639 | |
1640 | /* Make reloads for subreg in operand NOP with internal subreg mode |
1641 | REG_MODE, add new reloads for further processing. Return true if |
1642 | any change was done. */ |
1643 | static bool |
1644 | simplify_operand_subreg (int nop, machine_mode reg_mode) |
1645 | { |
1646 | int hard_regno, inner_hard_regno; |
1647 | rtx_insn *before, *after; |
1648 | machine_mode mode, innermode; |
1649 | rtx reg, new_reg; |
1650 | rtx operand = *curr_id->operand_loc[nop]; |
1651 | enum reg_class regclass; |
1652 | enum op_type type; |
1653 | |
1654 | before = after = NULL; |
1655 | |
1656 | if (GET_CODE (operand) != SUBREG) |
1657 | return false; |
1658 | |
1659 | mode = GET_MODE (operand); |
1660 | reg = SUBREG_REG (operand); |
1661 | innermode = GET_MODE (reg); |
1662 | type = curr_static_id->operand[nop].type; |
1663 | if (MEM_P (reg)) |
1664 | { |
1665 | const bool addr_was_valid |
1666 | = valid_address_p (mode: innermode, XEXP (reg, 0), MEM_ADDR_SPACE (reg)); |
1667 | alter_subreg (curr_id->operand_loc[nop], false); |
1668 | rtx subst = *curr_id->operand_loc[nop]; |
1669 | lra_assert (MEM_P (subst)); |
1670 | const bool addr_is_valid = valid_address_p (GET_MODE (subst), |
1671 | XEXP (subst, 0), |
1672 | MEM_ADDR_SPACE (subst)); |
1673 | if (!addr_was_valid |
1674 | || addr_is_valid |
1675 | || ((get_constraint_type (c: lookup_constraint |
1676 | (p: curr_static_id->operand[nop].constraint)) |
1677 | != CT_SPECIAL_MEMORY) |
1678 | /* We still can reload address and if the address is |
1679 | valid, we can remove subreg without reloading its |
1680 | inner memory. */ |
1681 | && valid_address_p (GET_MODE (subst), |
1682 | addr: regno_reg_rtx |
1683 | [ira_class_hard_regs |
1684 | [base_reg_class (GET_MODE (subst), |
1685 | MEM_ADDR_SPACE (subst), |
1686 | outer_code: ADDRESS, index_code: SCRATCH)][0]], |
1687 | MEM_ADDR_SPACE (subst)))) |
1688 | { |
1689 | /* If we change the address for a paradoxical subreg of memory, the |
1690 | new address might violate the necessary alignment or the access |
1691 | might be slow; take this into consideration. We need not worry |
1692 | about accesses beyond allocated memory for paradoxical memory |
1693 | subregs as we don't substitute such equiv memory (see processing |
1694 | equivalences in function lra_constraints) and because for spilled |
1695 | pseudos we allocate stack memory enough for the biggest |
1696 | corresponding paradoxical subreg. |
1697 | |
1698 | However, do not blindly simplify a (subreg (mem ...)) for |
1699 | WORD_REGISTER_OPERATIONS targets as this may lead to loading junk |
1700 | data into a register when the inner is narrower than outer or |
1701 | missing important data from memory when the inner is wider than |
1702 | outer. This rule only applies to modes that are no wider than |
1703 | a word. |
1704 | |
1705 | If valid memory becomes invalid after subreg elimination |
1706 | and address might be different we still have to reload |
1707 | memory. |
1708 | */ |
1709 | if ((! addr_was_valid |
1710 | || addr_is_valid |
1711 | || known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (innermode))) |
1712 | && !(maybe_ne (a: GET_MODE_PRECISION (mode), |
1713 | b: GET_MODE_PRECISION (mode: innermode)) |
1714 | && known_le (GET_MODE_SIZE (mode), UNITS_PER_WORD) |
1715 | && known_le (GET_MODE_SIZE (innermode), UNITS_PER_WORD) |
1716 | && WORD_REGISTER_OPERATIONS) |
1717 | && (!(MEM_ALIGN (subst) < GET_MODE_ALIGNMENT (mode) |
1718 | && targetm.slow_unaligned_access (mode, MEM_ALIGN (subst))) |
1719 | || (MEM_ALIGN (reg) < GET_MODE_ALIGNMENT (innermode) |
1720 | && targetm.slow_unaligned_access (innermode, |
1721 | MEM_ALIGN (reg))))) |
1722 | return true; |
1723 | |
1724 | *curr_id->operand_loc[nop] = operand; |
1725 | |
1726 | /* But if the address was not valid, we cannot reload the MEM without |
1727 | reloading the address first. */ |
1728 | if (!addr_was_valid) |
1729 | process_address (nop, false, &before, &after); |
1730 | |
1731 | /* INNERMODE is fast, MODE slow. Reload the mem in INNERMODE. */ |
1732 | enum reg_class rclass |
1733 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1734 | if (get_reload_reg (type: curr_static_id->operand[nop].type, mode: innermode, |
1735 | original: reg, rclass, NULL, |
1736 | in_subreg_p: true, title: "slow/invalid mem" , result_reg: &new_reg)) |
1737 | { |
1738 | bool insert_before, insert_after; |
1739 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
1740 | |
1741 | insert_before = (type != OP_OUT |
1742 | || partial_subreg_p (outermode: mode, innermode)); |
1743 | insert_after = type != OP_IN; |
1744 | insert_move_for_subreg (before: insert_before ? &before : NULL, |
1745 | after: insert_after ? &after : NULL, |
1746 | origreg: reg, newreg: new_reg); |
1747 | } |
1748 | SUBREG_REG (operand) = new_reg; |
1749 | |
1750 | /* Convert to MODE. */ |
1751 | reg = operand; |
1752 | rclass |
1753 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1754 | if (get_reload_reg (type: curr_static_id->operand[nop].type, mode, original: reg, |
1755 | rclass, NULL, |
1756 | in_subreg_p: true, title: "slow/invalid mem" , result_reg: &new_reg)) |
1757 | { |
1758 | bool insert_before, insert_after; |
1759 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
1760 | |
1761 | insert_before = type != OP_OUT; |
1762 | insert_after = type != OP_IN; |
1763 | insert_move_for_subreg (before: insert_before ? &before : NULL, |
1764 | after: insert_after ? &after : NULL, |
1765 | origreg: reg, newreg: new_reg); |
1766 | } |
1767 | *curr_id->operand_loc[nop] = new_reg; |
1768 | lra_process_new_insns (curr_insn, before, after, |
1769 | "Inserting slow/invalid mem reload" ); |
1770 | return true; |
1771 | } |
1772 | |
1773 | /* If the address was valid and became invalid, prefer to reload |
1774 | the memory. Typical case is when the index scale should |
1775 | correspond the memory. */ |
1776 | *curr_id->operand_loc[nop] = operand; |
1777 | /* Do not return false here as the MEM_P (reg) will be processed |
1778 | later in this function. */ |
1779 | } |
1780 | else if (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER) |
1781 | { |
1782 | alter_subreg (curr_id->operand_loc[nop], false); |
1783 | return true; |
1784 | } |
1785 | else if (CONSTANT_P (reg)) |
1786 | { |
1787 | /* Try to simplify subreg of constant. It is usually result of |
1788 | equivalence substitution. */ |
1789 | if (innermode == VOIDmode |
1790 | && (innermode = original_subreg_reg_mode[nop]) == VOIDmode) |
1791 | innermode = curr_static_id->operand[nop].mode; |
1792 | if ((new_reg = simplify_subreg (outermode: mode, op: reg, innermode, |
1793 | SUBREG_BYTE (operand))) != NULL_RTX) |
1794 | { |
1795 | *curr_id->operand_loc[nop] = new_reg; |
1796 | return true; |
1797 | } |
1798 | } |
1799 | /* Put constant into memory when we have mixed modes. It generates |
1800 | a better code in most cases as it does not need a secondary |
1801 | reload memory. It also prevents LRA looping when LRA is using |
1802 | secondary reload memory again and again. */ |
1803 | if (CONSTANT_P (reg) && CONST_POOL_OK_P (reg_mode, reg) |
1804 | && SCALAR_INT_MODE_P (reg_mode) != SCALAR_INT_MODE_P (mode)) |
1805 | { |
1806 | SUBREG_REG (operand) = force_const_mem (reg_mode, reg); |
1807 | alter_subreg (curr_id->operand_loc[nop], false); |
1808 | return true; |
1809 | } |
1810 | auto fp_subreg_can_be_simplified_after_reload_p = [] (machine_mode innermode, |
1811 | poly_uint64 offset, |
1812 | machine_mode mode) { |
1813 | reload_completed = 1; |
1814 | bool res = simplify_subreg_regno (FRAME_POINTER_REGNUM, |
1815 | innermode, |
1816 | offset, mode) >= 0; |
1817 | reload_completed = 0; |
1818 | return res; |
1819 | }; |
1820 | /* Force a reload of the SUBREG_REG if this is a constant or PLUS or |
1821 | if there may be a problem accessing OPERAND in the outer |
1822 | mode. */ |
1823 | if ((REG_P (reg) |
1824 | && REGNO (reg) >= FIRST_PSEUDO_REGISTER |
1825 | && (hard_regno = lra_get_regno_hard_regno (REGNO (reg))) >= 0 |
1826 | /* Don't reload paradoxical subregs because we could be looping |
1827 | having repeatedly final regno out of hard regs range. */ |
1828 | && (hard_regno_nregs (regno: hard_regno, mode: innermode) |
1829 | >= hard_regno_nregs (regno: hard_regno, mode)) |
1830 | && simplify_subreg_regno (hard_regno, innermode, |
1831 | SUBREG_BYTE (operand), mode) < 0 |
1832 | /* Exclude reloading of frame pointer in subreg if frame pointer can not |
1833 | be simplified here only because the reload is not finished yet. */ |
1834 | && (hard_regno != FRAME_POINTER_REGNUM |
1835 | || !fp_subreg_can_be_simplified_after_reload_p (innermode, |
1836 | SUBREG_BYTE (operand), |
1837 | mode)) |
1838 | /* Don't reload subreg for matching reload. It is actually |
1839 | valid subreg in LRA. */ |
1840 | && ! LRA_SUBREG_P (operand)) |
1841 | || CONSTANT_P (reg) || GET_CODE (reg) == PLUS || MEM_P (reg)) |
1842 | { |
1843 | enum reg_class rclass; |
1844 | |
1845 | if (REG_P (reg)) |
1846 | /* There is a big probability that we will get the same class |
1847 | for the new pseudo and we will get the same insn which |
1848 | means infinite looping. So spill the new pseudo. */ |
1849 | rclass = NO_REGS; |
1850 | else |
1851 | /* The class will be defined later in curr_insn_transform. */ |
1852 | rclass |
1853 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1854 | |
1855 | if (get_reload_reg (type: curr_static_id->operand[nop].type, mode: reg_mode, original: reg, |
1856 | rclass, NULL, |
1857 | in_subreg_p: true, title: "subreg reg" , result_reg: &new_reg)) |
1858 | { |
1859 | bool insert_before, insert_after; |
1860 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
1861 | |
1862 | insert_before = (type != OP_OUT |
1863 | || read_modify_subreg_p (operand)); |
1864 | insert_after = (type != OP_IN); |
1865 | insert_move_for_subreg (before: insert_before ? &before : NULL, |
1866 | after: insert_after ? &after : NULL, |
1867 | origreg: reg, newreg: new_reg); |
1868 | } |
1869 | SUBREG_REG (operand) = new_reg; |
1870 | lra_process_new_insns (curr_insn, before, after, |
1871 | "Inserting subreg reload" ); |
1872 | return true; |
1873 | } |
1874 | /* Force a reload for a paradoxical subreg. For paradoxical subreg, |
1875 | IRA allocates hardreg to the inner pseudo reg according to its mode |
1876 | instead of the outermode, so the size of the hardreg may not be enough |
1877 | to contain the outermode operand, in that case we may need to insert |
1878 | reload for the reg. For the following two types of paradoxical subreg, |
1879 | we need to insert reload: |
1880 | 1. If the op_type is OP_IN, and the hardreg could not be paired with |
1881 | other hardreg to contain the outermode operand |
1882 | (checked by in_hard_reg_set_p), we need to insert the reload. |
1883 | 2. If the op_type is OP_OUT or OP_INOUT. |
1884 | |
1885 | Here is a paradoxical subreg example showing how the reload is generated: |
1886 | |
1887 | (insn 5 4 7 2 (set (reg:TI 106 [ __comp ]) |
1888 | (subreg:TI (reg:DI 107 [ __comp ]) 0)) {*movti_internal_rex64} |
1889 | |
1890 | In IRA, reg107 is allocated to a DImode hardreg. We use x86-64 as example |
1891 | here, if reg107 is assigned to hardreg R15, because R15 is the last |
1892 | hardreg, compiler cannot find another hardreg to pair with R15 to |
1893 | contain TImode data. So we insert a TImode reload reg180 for it. |
1894 | After reload is inserted: |
1895 | |
1896 | (insn 283 0 0 (set (subreg:DI (reg:TI 180 [orig:107 __comp ] [107]) 0) |
1897 | (reg:DI 107 [ __comp ])) -1 |
1898 | (insn 5 4 7 2 (set (reg:TI 106 [ __comp ]) |
1899 | (subreg:TI (reg:TI 180 [orig:107 __comp ] [107]) 0)) {*movti_internal_rex64} |
1900 | |
1901 | Two reload hard registers will be allocated to reg180 to save TImode data |
1902 | in LRA_assign. |
1903 | |
1904 | For LRA pseudos this should normally be handled by the biggest_mode |
1905 | mechanism. However, it's possible for new uses of an LRA pseudo |
1906 | to be introduced after we've allocated it, such as when undoing |
1907 | inheritance, and the allocated register might not then be appropriate |
1908 | for the new uses. */ |
1909 | else if (REG_P (reg) |
1910 | && REGNO (reg) >= FIRST_PSEUDO_REGISTER |
1911 | && paradoxical_subreg_p (x: operand) |
1912 | && (inner_hard_regno = lra_get_regno_hard_regno (REGNO (reg))) >= 0 |
1913 | && ((hard_regno |
1914 | = simplify_subreg_regno (inner_hard_regno, innermode, |
1915 | SUBREG_BYTE (operand), mode)) < 0 |
1916 | || ((hard_regno_nregs (regno: inner_hard_regno, mode: innermode) |
1917 | < hard_regno_nregs (regno: hard_regno, mode)) |
1918 | && (regclass = lra_get_allocno_class (REGNO (reg))) |
1919 | && (type != OP_IN |
1920 | || !in_hard_reg_set_p (reg_class_contents[regclass], |
1921 | mode, regno: hard_regno) |
1922 | || overlaps_hard_reg_set_p (regs: lra_no_alloc_regs, |
1923 | mode, regno: hard_regno))))) |
1924 | { |
1925 | /* The class will be defined later in curr_insn_transform. */ |
1926 | enum reg_class rclass |
1927 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1928 | |
1929 | if (get_reload_reg (type: curr_static_id->operand[nop].type, mode, original: reg, |
1930 | rclass, NULL, |
1931 | in_subreg_p: true, title: "paradoxical subreg" , result_reg: &new_reg)) |
1932 | { |
1933 | rtx subreg; |
1934 | bool insert_before, insert_after; |
1935 | |
1936 | PUT_MODE (x: new_reg, mode); |
1937 | subreg = gen_lowpart_SUBREG (innermode, new_reg); |
1938 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)); |
1939 | |
1940 | insert_before = (type != OP_OUT); |
1941 | insert_after = (type != OP_IN); |
1942 | insert_move_for_subreg (before: insert_before ? &before : NULL, |
1943 | after: insert_after ? &after : NULL, |
1944 | origreg: reg, newreg: subreg); |
1945 | } |
1946 | SUBREG_REG (operand) = new_reg; |
1947 | lra_process_new_insns (curr_insn, before, after, |
1948 | "Inserting paradoxical subreg reload" ); |
1949 | return true; |
1950 | } |
1951 | return false; |
1952 | } |
1953 | |
1954 | /* Return TRUE if X refers for a hard register from SET. */ |
1955 | static bool |
1956 | uses_hard_regs_p (rtx x, HARD_REG_SET set) |
1957 | { |
1958 | int i, j, x_hard_regno; |
1959 | machine_mode mode; |
1960 | const char *fmt; |
1961 | enum rtx_code code; |
1962 | |
1963 | if (x == NULL_RTX) |
1964 | return false; |
1965 | code = GET_CODE (x); |
1966 | mode = GET_MODE (x); |
1967 | |
1968 | if (code == SUBREG) |
1969 | { |
1970 | /* For all SUBREGs we want to check whether the full multi-register |
1971 | overlaps the set. For normal SUBREGs this means 'get_hard_regno' of |
1972 | the inner register, for paradoxical SUBREGs this means the |
1973 | 'get_hard_regno' of the full SUBREG and for complete SUBREGs either is |
1974 | fine. Use the wider mode for all cases. */ |
1975 | rtx subreg = SUBREG_REG (x); |
1976 | mode = wider_subreg_mode (x); |
1977 | if (mode == GET_MODE (subreg)) |
1978 | { |
1979 | x = subreg; |
1980 | code = GET_CODE (x); |
1981 | } |
1982 | } |
1983 | |
1984 | if (REG_P (x) || SUBREG_P (x)) |
1985 | { |
1986 | x_hard_regno = get_hard_regno (x); |
1987 | return (x_hard_regno >= 0 |
1988 | && overlaps_hard_reg_set_p (regs: set, mode, regno: x_hard_regno)); |
1989 | } |
1990 | fmt = GET_RTX_FORMAT (code); |
1991 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
1992 | { |
1993 | if (fmt[i] == 'e') |
1994 | { |
1995 | if (uses_hard_regs_p (XEXP (x, i), set)) |
1996 | return true; |
1997 | } |
1998 | else if (fmt[i] == 'E') |
1999 | { |
2000 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
2001 | if (uses_hard_regs_p (XVECEXP (x, i, j), set)) |
2002 | return true; |
2003 | } |
2004 | } |
2005 | return false; |
2006 | } |
2007 | |
2008 | /* Return true if OP is a spilled pseudo. */ |
2009 | static inline bool |
2010 | spilled_pseudo_p (rtx op) |
2011 | { |
2012 | return (REG_P (op) |
2013 | && REGNO (op) >= FIRST_PSEUDO_REGISTER && in_mem_p (REGNO (op))); |
2014 | } |
2015 | |
2016 | /* Return true if X is a general constant. */ |
2017 | static inline bool |
2018 | general_constant_p (rtx x) |
2019 | { |
2020 | return CONSTANT_P (x) && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (x)); |
2021 | } |
2022 | |
2023 | static bool |
2024 | reg_in_class_p (rtx reg, enum reg_class cl) |
2025 | { |
2026 | if (cl == NO_REGS) |
2027 | return get_reg_class (REGNO (reg)) == NO_REGS; |
2028 | return in_class_p (reg, cl, NULL); |
2029 | } |
2030 | |
2031 | /* Return true if SET of RCLASS contains no hard regs which can be |
2032 | used in MODE. */ |
2033 | static bool |
2034 | prohibited_class_reg_set_mode_p (enum reg_class rclass, |
2035 | HARD_REG_SET &set, |
2036 | machine_mode mode) |
2037 | { |
2038 | HARD_REG_SET temp; |
2039 | |
2040 | lra_assert (hard_reg_set_subset_p (reg_class_contents[rclass], set)); |
2041 | temp = set & ~lra_no_alloc_regs; |
2042 | return (hard_reg_set_subset_p |
2043 | (x: temp, ira_prohibited_class_mode_regs[rclass][mode])); |
2044 | } |
2045 | |
2046 | |
2047 | /* Used to check validity info about small class input operands. It |
2048 | should be incremented at start of processing an insn |
2049 | alternative. */ |
2050 | static unsigned int curr_small_class_check = 0; |
2051 | |
2052 | /* Update number of used inputs of class OP_CLASS for operand NOP |
2053 | of alternative NALT. Return true if we have more such class operands |
2054 | than the number of available regs. */ |
2055 | static bool |
2056 | update_and_check_small_class_inputs (int nop, int nalt, |
2057 | enum reg_class op_class) |
2058 | { |
2059 | static unsigned int small_class_check[LIM_REG_CLASSES]; |
2060 | static int small_class_input_nums[LIM_REG_CLASSES]; |
2061 | |
2062 | if (SMALL_REGISTER_CLASS_P (op_class) |
2063 | /* We are interesting in classes became small because of fixing |
2064 | some hard regs, e.g. by an user through GCC options. */ |
2065 | && hard_reg_set_intersect_p (reg_class_contents[op_class], |
2066 | ira_no_alloc_regs) |
2067 | && (curr_static_id->operand[nop].type != OP_OUT |
2068 | || TEST_BIT (curr_static_id->operand[nop].early_clobber_alts, nalt))) |
2069 | { |
2070 | if (small_class_check[op_class] == curr_small_class_check) |
2071 | small_class_input_nums[op_class]++; |
2072 | else |
2073 | { |
2074 | small_class_check[op_class] = curr_small_class_check; |
2075 | small_class_input_nums[op_class] = 1; |
2076 | } |
2077 | if (small_class_input_nums[op_class] > ira_class_hard_regs_num[op_class]) |
2078 | return true; |
2079 | } |
2080 | return false; |
2081 | } |
2082 | |
2083 | /* Print operand constraints for alternative ALT_NUMBER of the current |
2084 | insn. */ |
2085 | static void |
2086 | print_curr_insn_alt (int alt_number) |
2087 | { |
2088 | for (int i = 0; i < curr_static_id->n_operands; i++) |
2089 | { |
2090 | const char *p = (curr_static_id->operand_alternative |
2091 | [alt_number * curr_static_id->n_operands + i].constraint); |
2092 | if (*p == '\0') |
2093 | continue; |
2094 | fprintf (stream: lra_dump_file, format: " (%d) " , i); |
2095 | for (; *p != '\0' && *p != ',' && *p != '#'; p++) |
2096 | fputc (c: *p, stream: lra_dump_file); |
2097 | } |
2098 | } |
2099 | |
2100 | /* Major function to choose the current insn alternative and what |
2101 | operands should be reloaded and how. If ONLY_ALTERNATIVE is not |
2102 | negative we should consider only this alternative. Return false if |
2103 | we cannot choose the alternative or find how to reload the |
2104 | operands. */ |
2105 | static bool |
2106 | process_alt_operands (int only_alternative) |
2107 | { |
2108 | bool ok_p = false; |
2109 | int nop, overall, nalt; |
2110 | int n_alternatives = curr_static_id->n_alternatives; |
2111 | int n_operands = curr_static_id->n_operands; |
2112 | /* LOSERS counts the operands that don't fit this alternative and |
2113 | would require loading. */ |
2114 | int losers; |
2115 | int addr_losers; |
2116 | /* REJECT is a count of how undesirable this alternative says it is |
2117 | if any reloading is required. If the alternative matches exactly |
2118 | then REJECT is ignored, but otherwise it gets this much counted |
2119 | against it in addition to the reloading needed. */ |
2120 | int reject; |
2121 | /* This is defined by '!' or '?' alternative constraint and added to |
2122 | reject. But in some cases it can be ignored. */ |
2123 | int static_reject; |
2124 | int op_reject; |
2125 | /* The number of elements in the following array. */ |
2126 | int early_clobbered_regs_num; |
2127 | /* Numbers of operands which are early clobber registers. */ |
2128 | int early_clobbered_nops[MAX_RECOG_OPERANDS]; |
2129 | enum reg_class curr_alt[MAX_RECOG_OPERANDS]; |
2130 | HARD_REG_SET curr_alt_set[MAX_RECOG_OPERANDS]; |
2131 | HARD_REG_SET curr_alt_exclude_start_hard_regs[MAX_RECOG_OPERANDS]; |
2132 | bool curr_alt_match_win[MAX_RECOG_OPERANDS]; |
2133 | bool curr_alt_win[MAX_RECOG_OPERANDS]; |
2134 | bool curr_alt_offmemok[MAX_RECOG_OPERANDS]; |
2135 | int curr_alt_matches[MAX_RECOG_OPERANDS]; |
2136 | /* The number of elements in the following array. */ |
2137 | int curr_alt_dont_inherit_ops_num; |
2138 | /* Numbers of operands whose reload pseudos should not be inherited. */ |
2139 | int curr_alt_dont_inherit_ops[MAX_RECOG_OPERANDS]; |
2140 | bool curr_reuse_alt_p; |
2141 | /* True if output stack pointer reload should be generated for the current |
2142 | alternative. */ |
2143 | bool curr_alt_out_sp_reload_p; |
2144 | bool curr_alt_class_change_p; |
2145 | rtx op; |
2146 | /* The register when the operand is a subreg of register, otherwise the |
2147 | operand itself. */ |
2148 | rtx no_subreg_reg_operand[MAX_RECOG_OPERANDS]; |
2149 | /* The register if the operand is a register or subreg of register, |
2150 | otherwise NULL. */ |
2151 | rtx operand_reg[MAX_RECOG_OPERANDS]; |
2152 | int hard_regno[MAX_RECOG_OPERANDS]; |
2153 | machine_mode biggest_mode[MAX_RECOG_OPERANDS]; |
2154 | int reload_nregs, reload_sum; |
2155 | bool costly_p; |
2156 | enum reg_class cl; |
2157 | const HARD_REG_SET *cl_filter; |
2158 | |
2159 | /* Calculate some data common for all alternatives to speed up the |
2160 | function. */ |
2161 | for (nop = 0; nop < n_operands; nop++) |
2162 | { |
2163 | rtx reg; |
2164 | |
2165 | op = no_subreg_reg_operand[nop] = *curr_id->operand_loc[nop]; |
2166 | /* The real hard regno of the operand after the allocation. */ |
2167 | hard_regno[nop] = get_hard_regno (x: op); |
2168 | |
2169 | operand_reg[nop] = reg = op; |
2170 | biggest_mode[nop] = GET_MODE (op); |
2171 | if (GET_CODE (op) == SUBREG) |
2172 | { |
2173 | biggest_mode[nop] = wider_subreg_mode (x: op); |
2174 | operand_reg[nop] = reg = SUBREG_REG (op); |
2175 | } |
2176 | if (! REG_P (reg)) |
2177 | operand_reg[nop] = NULL_RTX; |
2178 | else if (REGNO (reg) >= FIRST_PSEUDO_REGISTER |
2179 | || ((int) REGNO (reg) |
2180 | == lra_get_elimination_hard_regno (REGNO (reg)))) |
2181 | no_subreg_reg_operand[nop] = reg; |
2182 | else |
2183 | operand_reg[nop] = no_subreg_reg_operand[nop] |
2184 | /* Just use natural mode for elimination result. It should |
2185 | be enough for extra constraints hooks. */ |
2186 | = regno_reg_rtx[hard_regno[nop]]; |
2187 | } |
2188 | |
2189 | /* The constraints are made of several alternatives. Each operand's |
2190 | constraint looks like foo,bar,... with commas separating the |
2191 | alternatives. The first alternatives for all operands go |
2192 | together, the second alternatives go together, etc. |
2193 | |
2194 | First loop over alternatives. */ |
2195 | alternative_mask preferred = curr_id->preferred_alternatives; |
2196 | if (only_alternative >= 0) |
2197 | preferred &= ALTERNATIVE_BIT (only_alternative); |
2198 | |
2199 | for (nalt = 0; nalt < n_alternatives; nalt++) |
2200 | { |
2201 | /* Loop over operands for one constraint alternative. */ |
2202 | if (!TEST_BIT (preferred, nalt)) |
2203 | continue; |
2204 | |
2205 | if (lra_dump_file != NULL) |
2206 | { |
2207 | fprintf (stream: lra_dump_file, format: " Considering alt=%d of insn %d: " , |
2208 | nalt, INSN_UID (insn: curr_insn)); |
2209 | print_curr_insn_alt (alt_number: nalt); |
2210 | fprintf (stream: lra_dump_file, format: "\n" ); |
2211 | } |
2212 | |
2213 | bool matching_early_clobber[MAX_RECOG_OPERANDS]; |
2214 | curr_small_class_check++; |
2215 | overall = losers = addr_losers = 0; |
2216 | static_reject = reject = reload_nregs = reload_sum = 0; |
2217 | for (nop = 0; nop < n_operands; nop++) |
2218 | { |
2219 | int inc = (curr_static_id |
2220 | ->operand_alternative[nalt * n_operands + nop].reject); |
2221 | if (lra_dump_file != NULL && inc != 0) |
2222 | fprintf (stream: lra_dump_file, |
2223 | format: " Staticly defined alt reject+=%d\n" , inc); |
2224 | static_reject += inc; |
2225 | matching_early_clobber[nop] = 0; |
2226 | } |
2227 | reject += static_reject; |
2228 | early_clobbered_regs_num = 0; |
2229 | curr_alt_out_sp_reload_p = false; |
2230 | curr_reuse_alt_p = true; |
2231 | curr_alt_class_change_p = false; |
2232 | |
2233 | for (nop = 0; nop < n_operands; nop++) |
2234 | { |
2235 | const char *p; |
2236 | char *end; |
2237 | int len, c, m, i, opalt_num, this_alternative_matches; |
2238 | bool win, did_match, offmemok, early_clobber_p; |
2239 | /* false => this operand can be reloaded somehow for this |
2240 | alternative. */ |
2241 | bool badop; |
2242 | /* true => this operand can be reloaded if the alternative |
2243 | allows regs. */ |
2244 | bool winreg; |
2245 | /* True if a constant forced into memory would be OK for |
2246 | this operand. */ |
2247 | bool constmemok; |
2248 | enum reg_class this_alternative, this_costly_alternative; |
2249 | HARD_REG_SET this_alternative_set, this_costly_alternative_set; |
2250 | HARD_REG_SET this_alternative_exclude_start_hard_regs; |
2251 | bool this_alternative_match_win, this_alternative_win; |
2252 | bool this_alternative_offmemok; |
2253 | bool scratch_p; |
2254 | machine_mode mode; |
2255 | enum constraint_num cn; |
2256 | bool class_change_p = false; |
2257 | |
2258 | opalt_num = nalt * n_operands + nop; |
2259 | if (curr_static_id->operand_alternative[opalt_num].anything_ok) |
2260 | { |
2261 | /* Fast track for no constraints at all. */ |
2262 | curr_alt[nop] = NO_REGS; |
2263 | CLEAR_HARD_REG_SET (set&: curr_alt_set[nop]); |
2264 | curr_alt_win[nop] = true; |
2265 | curr_alt_match_win[nop] = false; |
2266 | curr_alt_offmemok[nop] = false; |
2267 | curr_alt_matches[nop] = -1; |
2268 | continue; |
2269 | } |
2270 | |
2271 | op = no_subreg_reg_operand[nop]; |
2272 | mode = curr_operand_mode[nop]; |
2273 | |
2274 | win = did_match = winreg = offmemok = constmemok = false; |
2275 | badop = true; |
2276 | |
2277 | early_clobber_p = false; |
2278 | p = curr_static_id->operand_alternative[opalt_num].constraint; |
2279 | |
2280 | this_costly_alternative = this_alternative = NO_REGS; |
2281 | /* We update set of possible hard regs besides its class |
2282 | because reg class might be inaccurate. For example, |
2283 | union of LO_REGS (l), HI_REGS(h), and STACK_REG(k) in ARM |
2284 | is translated in HI_REGS because classes are merged by |
2285 | pairs and there is no accurate intermediate class. */ |
2286 | CLEAR_HARD_REG_SET (set&: this_alternative_set); |
2287 | CLEAR_HARD_REG_SET (set&: this_costly_alternative_set); |
2288 | CLEAR_HARD_REG_SET (set&: this_alternative_exclude_start_hard_regs); |
2289 | this_alternative_win = false; |
2290 | this_alternative_match_win = false; |
2291 | this_alternative_offmemok = false; |
2292 | this_alternative_matches = -1; |
2293 | |
2294 | /* An empty constraint should be excluded by the fast |
2295 | track. */ |
2296 | lra_assert (*p != 0 && *p != ','); |
2297 | |
2298 | op_reject = 0; |
2299 | /* Scan this alternative's specs for this operand; set WIN |
2300 | if the operand fits any letter in this alternative. |
2301 | Otherwise, clear BADOP if this operand could fit some |
2302 | letter after reloads, or set WINREG if this operand could |
2303 | fit after reloads provided the constraint allows some |
2304 | registers. */ |
2305 | costly_p = false; |
2306 | do |
2307 | { |
2308 | switch ((c = *p, len = CONSTRAINT_LEN (c, p)), c) |
2309 | { |
2310 | case '\0': |
2311 | len = 0; |
2312 | break; |
2313 | case ',': |
2314 | c = '\0'; |
2315 | break; |
2316 | |
2317 | case '&': |
2318 | early_clobber_p = true; |
2319 | break; |
2320 | |
2321 | case '$': |
2322 | op_reject += LRA_MAX_REJECT; |
2323 | break; |
2324 | case '^': |
2325 | op_reject += LRA_LOSER_COST_FACTOR; |
2326 | break; |
2327 | |
2328 | case '#': |
2329 | /* Ignore rest of this alternative. */ |
2330 | c = '\0'; |
2331 | break; |
2332 | |
2333 | case '0': case '1': case '2': case '3': case '4': |
2334 | case '5': case '6': case '7': case '8': case '9': |
2335 | { |
2336 | int m_hregno; |
2337 | bool match_p; |
2338 | |
2339 | m = strtoul (nptr: p, endptr: &end, base: 10); |
2340 | p = end; |
2341 | len = 0; |
2342 | lra_assert (nop > m); |
2343 | |
2344 | /* Reject matches if we don't know which operand is |
2345 | bigger. This situation would arguably be a bug in |
2346 | an .md pattern, but could also occur in a user asm. */ |
2347 | if (!ordered_p (a: GET_MODE_SIZE (mode: biggest_mode[m]), |
2348 | b: GET_MODE_SIZE (mode: biggest_mode[nop]))) |
2349 | break; |
2350 | |
2351 | /* Don't match wrong asm insn operands for proper |
2352 | diagnostic later. */ |
2353 | if (INSN_CODE (curr_insn) < 0 |
2354 | && (curr_operand_mode[m] == BLKmode |
2355 | || curr_operand_mode[nop] == BLKmode) |
2356 | && curr_operand_mode[m] != curr_operand_mode[nop]) |
2357 | break; |
2358 | |
2359 | m_hregno = get_hard_regno (x: *curr_id->operand_loc[m]); |
2360 | /* We are supposed to match a previous operand. |
2361 | If we do, we win if that one did. If we do |
2362 | not, count both of the operands as losers. |
2363 | (This is too conservative, since most of the |
2364 | time only a single reload insn will be needed |
2365 | to make the two operands win. As a result, |
2366 | this alternative may be rejected when it is |
2367 | actually desirable.) */ |
2368 | match_p = false; |
2369 | if (operands_match_p (x: *curr_id->operand_loc[nop], |
2370 | y: *curr_id->operand_loc[m], y_hard_regno: m_hregno)) |
2371 | { |
2372 | /* We should reject matching of an early |
2373 | clobber operand if the matching operand is |
2374 | not dying in the insn. */ |
2375 | if (!TEST_BIT (curr_static_id->operand[m] |
2376 | .early_clobber_alts, nalt) |
2377 | || operand_reg[nop] == NULL_RTX |
2378 | || (find_regno_note (curr_insn, REG_DEAD, |
2379 | REGNO (op)) |
2380 | || REGNO (op) == REGNO (operand_reg[m]))) |
2381 | match_p = true; |
2382 | } |
2383 | if (match_p) |
2384 | { |
2385 | /* If we are matching a non-offsettable |
2386 | address where an offsettable address was |
2387 | expected, then we must reject this |
2388 | combination, because we can't reload |
2389 | it. */ |
2390 | if (curr_alt_offmemok[m] |
2391 | && MEM_P (*curr_id->operand_loc[m]) |
2392 | && curr_alt[m] == NO_REGS && ! curr_alt_win[m]) |
2393 | continue; |
2394 | } |
2395 | else |
2396 | { |
2397 | /* If the operands do not match and one |
2398 | operand is INOUT, we can not match them. |
2399 | Try other possibilities, e.g. other |
2400 | alternatives or commutative operand |
2401 | exchange. */ |
2402 | if (curr_static_id->operand[nop].type == OP_INOUT |
2403 | || curr_static_id->operand[m].type == OP_INOUT) |
2404 | break; |
2405 | /* Operands don't match. If the operands are |
2406 | different user defined explicit hard |
2407 | registers, then we cannot make them match |
2408 | when one is early clobber operand. */ |
2409 | if ((REG_P (*curr_id->operand_loc[nop]) |
2410 | || SUBREG_P (*curr_id->operand_loc[nop])) |
2411 | && (REG_P (*curr_id->operand_loc[m]) |
2412 | || SUBREG_P (*curr_id->operand_loc[m]))) |
2413 | { |
2414 | rtx nop_reg = *curr_id->operand_loc[nop]; |
2415 | if (SUBREG_P (nop_reg)) |
2416 | nop_reg = SUBREG_REG (nop_reg); |
2417 | rtx m_reg = *curr_id->operand_loc[m]; |
2418 | if (SUBREG_P (m_reg)) |
2419 | m_reg = SUBREG_REG (m_reg); |
2420 | |
2421 | if (REG_P (nop_reg) |
2422 | && HARD_REGISTER_P (nop_reg) |
2423 | && REG_USERVAR_P (nop_reg) |
2424 | && REG_P (m_reg) |
2425 | && HARD_REGISTER_P (m_reg) |
2426 | && REG_USERVAR_P (m_reg)) |
2427 | { |
2428 | int i; |
2429 | |
2430 | for (i = 0; i < early_clobbered_regs_num; i++) |
2431 | if (m == early_clobbered_nops[i]) |
2432 | break; |
2433 | if (i < early_clobbered_regs_num |
2434 | || early_clobber_p) |
2435 | break; |
2436 | } |
2437 | } |
2438 | /* Both operands must allow a reload register, |
2439 | otherwise we cannot make them match. */ |
2440 | if (curr_alt[m] == NO_REGS) |
2441 | break; |
2442 | /* Retroactively mark the operand we had to |
2443 | match as a loser, if it wasn't already and |
2444 | it wasn't matched to a register constraint |
2445 | (e.g it might be matched by memory). */ |
2446 | if (curr_alt_win[m] |
2447 | && (operand_reg[m] == NULL_RTX |
2448 | || hard_regno[m] < 0)) |
2449 | { |
2450 | losers++; |
2451 | reload_nregs |
2452 | += (ira_reg_class_max_nregs[curr_alt[m]] |
2453 | [GET_MODE (*curr_id->operand_loc[m])]); |
2454 | } |
2455 | |
2456 | /* Prefer matching earlyclobber alternative as |
2457 | it results in less hard regs required for |
2458 | the insn than a non-matching earlyclobber |
2459 | alternative. */ |
2460 | if (TEST_BIT (curr_static_id->operand[m] |
2461 | .early_clobber_alts, nalt)) |
2462 | { |
2463 | if (lra_dump_file != NULL) |
2464 | fprintf |
2465 | (stream: lra_dump_file, |
2466 | format: " %d Matching earlyclobber alt:" |
2467 | " reject--\n" , |
2468 | nop); |
2469 | if (!matching_early_clobber[m]) |
2470 | { |
2471 | reject--; |
2472 | matching_early_clobber[m] = 1; |
2473 | } |
2474 | } |
2475 | /* Otherwise we prefer no matching |
2476 | alternatives because it gives more freedom |
2477 | in RA. */ |
2478 | else if (operand_reg[nop] == NULL_RTX |
2479 | || (find_regno_note (curr_insn, REG_DEAD, |
2480 | REGNO (operand_reg[nop])) |
2481 | == NULL_RTX)) |
2482 | { |
2483 | if (lra_dump_file != NULL) |
2484 | fprintf |
2485 | (stream: lra_dump_file, |
2486 | format: " %d Matching alt: reject+=2\n" , |
2487 | nop); |
2488 | reject += 2; |
2489 | } |
2490 | } |
2491 | /* If we have to reload this operand and some |
2492 | previous operand also had to match the same |
2493 | thing as this operand, we don't know how to do |
2494 | that. */ |
2495 | if (!match_p || !curr_alt_win[m]) |
2496 | { |
2497 | for (i = 0; i < nop; i++) |
2498 | if (curr_alt_matches[i] == m) |
2499 | break; |
2500 | if (i < nop) |
2501 | break; |
2502 | } |
2503 | else |
2504 | did_match = true; |
2505 | |
2506 | this_alternative_matches = m; |
2507 | /* This can be fixed with reloads if the operand |
2508 | we are supposed to match can be fixed with |
2509 | reloads. */ |
2510 | badop = false; |
2511 | this_alternative = curr_alt[m]; |
2512 | this_alternative_set = curr_alt_set[m]; |
2513 | this_alternative_exclude_start_hard_regs |
2514 | = curr_alt_exclude_start_hard_regs[m]; |
2515 | winreg = this_alternative != NO_REGS; |
2516 | break; |
2517 | } |
2518 | |
2519 | case 'g': |
2520 | if (MEM_P (op) |
2521 | || general_constant_p (x: op) |
2522 | || spilled_pseudo_p (op)) |
2523 | win = true; |
2524 | cl = GENERAL_REGS; |
2525 | cl_filter = nullptr; |
2526 | goto reg; |
2527 | |
2528 | default: |
2529 | cn = lookup_constraint (p); |
2530 | switch (get_constraint_type (c: cn)) |
2531 | { |
2532 | case CT_REGISTER: |
2533 | cl = reg_class_for_constraint (c: cn); |
2534 | if (cl != NO_REGS) |
2535 | { |
2536 | cl_filter = get_register_filter (cn); |
2537 | goto reg; |
2538 | } |
2539 | break; |
2540 | |
2541 | case CT_CONST_INT: |
2542 | if (CONST_INT_P (op) |
2543 | && insn_const_int_ok_for_constraint (INTVAL (op), cn)) |
2544 | win = true; |
2545 | break; |
2546 | |
2547 | case CT_MEMORY: |
2548 | case CT_RELAXED_MEMORY: |
2549 | if (MEM_P (op) |
2550 | && satisfies_memory_constraint_p (op, constraint: cn)) |
2551 | win = true; |
2552 | else if (spilled_pseudo_p (op)) |
2553 | win = true; |
2554 | |
2555 | /* If we didn't already win, we can reload constants |
2556 | via force_const_mem or put the pseudo value into |
2557 | memory, or make other memory by reloading the |
2558 | address like for 'o'. */ |
2559 | if (CONST_POOL_OK_P (mode, op) |
2560 | || MEM_P (op) || REG_P (op) |
2561 | /* We can restore the equiv insn by a |
2562 | reload. */ |
2563 | || equiv_substition_p[nop]) |
2564 | badop = false; |
2565 | constmemok = true; |
2566 | offmemok = true; |
2567 | break; |
2568 | |
2569 | case CT_ADDRESS: |
2570 | /* An asm operand with an address constraint |
2571 | that doesn't satisfy address_operand has |
2572 | is_address cleared, so that we don't try to |
2573 | make a non-address fit. */ |
2574 | if (!curr_static_id->operand[nop].is_address) |
2575 | break; |
2576 | /* If we didn't already win, we can reload the address |
2577 | into a base register. */ |
2578 | if (satisfies_address_constraint_p (op, constraint: cn)) |
2579 | win = true; |
2580 | cl = base_reg_class (VOIDmode, ADDR_SPACE_GENERIC, |
2581 | outer_code: ADDRESS, index_code: SCRATCH); |
2582 | cl_filter = nullptr; |
2583 | badop = false; |
2584 | goto reg; |
2585 | |
2586 | case CT_FIXED_FORM: |
2587 | if (constraint_satisfied_p (x: op, c: cn)) |
2588 | win = true; |
2589 | break; |
2590 | |
2591 | case CT_SPECIAL_MEMORY: |
2592 | if (satisfies_memory_constraint_p (op, constraint: cn)) |
2593 | win = true; |
2594 | else if (spilled_pseudo_p (op)) |
2595 | { |
2596 | curr_reuse_alt_p = false; |
2597 | win = true; |
2598 | } |
2599 | break; |
2600 | } |
2601 | break; |
2602 | |
2603 | reg: |
2604 | if (mode == BLKmode) |
2605 | break; |
2606 | this_alternative = reg_class_subunion[this_alternative][cl]; |
2607 | if (hard_reg_set_subset_p (x: this_alternative_set, |
2608 | reg_class_contents[cl])) |
2609 | this_alternative_exclude_start_hard_regs |
2610 | = ira_exclude_class_mode_regs[cl][mode]; |
2611 | else if (!hard_reg_set_subset_p (reg_class_contents[cl], |
2612 | y: this_alternative_set)) |
2613 | this_alternative_exclude_start_hard_regs |
2614 | |= ira_exclude_class_mode_regs[cl][mode]; |
2615 | this_alternative_set |= reg_class_contents[cl]; |
2616 | if (cl_filter) |
2617 | this_alternative_exclude_start_hard_regs |= ~*cl_filter; |
2618 | if (costly_p) |
2619 | { |
2620 | this_costly_alternative |
2621 | = reg_class_subunion[this_costly_alternative][cl]; |
2622 | this_costly_alternative_set |= reg_class_contents[cl]; |
2623 | } |
2624 | winreg = true; |
2625 | if (REG_P (op)) |
2626 | { |
2627 | tree decl; |
2628 | if (hard_regno[nop] >= 0 |
2629 | && in_hard_reg_set_p (regs: this_alternative_set, |
2630 | mode, regno: hard_regno[nop]) |
2631 | && (!cl_filter |
2632 | || TEST_HARD_REG_BIT (set: *cl_filter, |
2633 | bit: hard_regno[nop])) |
2634 | && ((REG_ATTRS (op) && (decl = REG_EXPR (op)) != NULL |
2635 | && VAR_P (decl) && DECL_HARD_REGISTER (decl)) |
2636 | || !(TEST_HARD_REG_BIT |
2637 | (set: this_alternative_exclude_start_hard_regs, |
2638 | bit: hard_regno[nop])))) |
2639 | win = true; |
2640 | else if (hard_regno[nop] < 0) |
2641 | { |
2642 | if (in_class_p (reg: op, cl: this_alternative, NULL)) |
2643 | win = true; |
2644 | else if (in_class_p (reg: op, cl: this_alternative, NULL, allow_all_reload_class_changes_p: true)) |
2645 | { |
2646 | class_change_p = true; |
2647 | win = true; |
2648 | } |
2649 | } |
2650 | } |
2651 | break; |
2652 | } |
2653 | if (c != ' ' && c != '\t') |
2654 | costly_p = c == '*'; |
2655 | } |
2656 | while ((p += len), c); |
2657 | |
2658 | scratch_p = (operand_reg[nop] != NULL_RTX |
2659 | && ira_former_scratch_p (REGNO (operand_reg[nop]))); |
2660 | /* Record which operands fit this alternative. */ |
2661 | if (win) |
2662 | { |
2663 | this_alternative_win = true; |
2664 | if (class_change_p) |
2665 | { |
2666 | curr_alt_class_change_p = true; |
2667 | if (lra_dump_file != NULL) |
2668 | fprintf (stream: lra_dump_file, |
2669 | format: " %d Narrowing class: reject+=3\n" , |
2670 | nop); |
2671 | reject += 3; |
2672 | } |
2673 | if (operand_reg[nop] != NULL_RTX) |
2674 | { |
2675 | if (hard_regno[nop] >= 0) |
2676 | { |
2677 | if (in_hard_reg_set_p (regs: this_costly_alternative_set, |
2678 | mode, regno: hard_regno[nop])) |
2679 | { |
2680 | if (lra_dump_file != NULL) |
2681 | fprintf (stream: lra_dump_file, |
2682 | format: " %d Costly set: reject++\n" , |
2683 | nop); |
2684 | reject++; |
2685 | } |
2686 | } |
2687 | else |
2688 | { |
2689 | /* Prefer won reg to spilled pseudo under other |
2690 | equal conditions for possibe inheritance. */ |
2691 | if (! scratch_p) |
2692 | { |
2693 | if (lra_dump_file != NULL) |
2694 | fprintf |
2695 | (stream: lra_dump_file, |
2696 | format: " %d Non pseudo reload: reject++\n" , |
2697 | nop); |
2698 | reject++; |
2699 | } |
2700 | if (in_class_p (reg: operand_reg[nop], |
2701 | cl: this_costly_alternative, NULL, allow_all_reload_class_changes_p: true)) |
2702 | { |
2703 | if (lra_dump_file != NULL) |
2704 | fprintf |
2705 | (stream: lra_dump_file, |
2706 | format: " %d Non pseudo costly reload:" |
2707 | " reject++\n" , |
2708 | nop); |
2709 | reject++; |
2710 | } |
2711 | } |
2712 | /* We simulate the behavior of old reload here. |
2713 | Although scratches need hard registers and it |
2714 | might result in spilling other pseudos, no reload |
2715 | insns are generated for the scratches. So it |
2716 | might cost something but probably less than old |
2717 | reload pass believes. */ |
2718 | if (scratch_p) |
2719 | { |
2720 | if (lra_dump_file != NULL) |
2721 | fprintf (stream: lra_dump_file, |
2722 | format: " %d Scratch win: reject+=2\n" , |
2723 | nop); |
2724 | reject += 2; |
2725 | } |
2726 | } |
2727 | } |
2728 | else if (did_match) |
2729 | this_alternative_match_win = true; |
2730 | else |
2731 | { |
2732 | int const_to_mem = 0; |
2733 | bool no_regs_p; |
2734 | |
2735 | reject += op_reject; |
2736 | /* Mark output reload of the stack pointer. */ |
2737 | if (op == stack_pointer_rtx |
2738 | && curr_static_id->operand[nop].type != OP_IN) |
2739 | curr_alt_out_sp_reload_p = true; |
2740 | |
2741 | /* If this alternative asks for a specific reg class, see if there |
2742 | is at least one allocatable register in that class. */ |
2743 | no_regs_p |
2744 | = (this_alternative == NO_REGS |
2745 | || (hard_reg_set_subset_p |
2746 | (reg_class_contents[this_alternative], |
2747 | y: lra_no_alloc_regs))); |
2748 | |
2749 | /* For asms, verify that the class for this alternative is possible |
2750 | for the mode that is specified. */ |
2751 | if (!no_regs_p && INSN_CODE (curr_insn) < 0) |
2752 | { |
2753 | int i; |
2754 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
2755 | if (targetm.hard_regno_mode_ok (i, mode) |
2756 | && in_hard_reg_set_p (reg_class_contents[this_alternative], |
2757 | mode, regno: i)) |
2758 | break; |
2759 | if (i == FIRST_PSEUDO_REGISTER) |
2760 | winreg = false; |
2761 | } |
2762 | |
2763 | /* If this operand accepts a register, and if the |
2764 | register class has at least one allocatable register, |
2765 | then this operand can be reloaded. */ |
2766 | if (winreg && !no_regs_p) |
2767 | badop = false; |
2768 | |
2769 | if (badop) |
2770 | { |
2771 | if (lra_dump_file != NULL) |
2772 | fprintf (stream: lra_dump_file, |
2773 | format: " Bad operand -- refuse\n" ); |
2774 | goto fail; |
2775 | } |
2776 | |
2777 | if (this_alternative != NO_REGS) |
2778 | { |
2779 | HARD_REG_SET available_regs |
2780 | = (reg_class_contents[this_alternative] |
2781 | & ~((ira_prohibited_class_mode_regs |
2782 | [this_alternative][mode]) |
2783 | | lra_no_alloc_regs)); |
2784 | if (hard_reg_set_empty_p (x: available_regs)) |
2785 | { |
2786 | /* There are no hard regs holding a value of given |
2787 | mode. */ |
2788 | if (offmemok) |
2789 | { |
2790 | this_alternative = NO_REGS; |
2791 | if (lra_dump_file != NULL) |
2792 | fprintf (stream: lra_dump_file, |
2793 | format: " %d Using memory because of" |
2794 | " a bad mode: reject+=2\n" , |
2795 | nop); |
2796 | reject += 2; |
2797 | } |
2798 | else |
2799 | { |
2800 | if (lra_dump_file != NULL) |
2801 | fprintf (stream: lra_dump_file, |
2802 | format: " Wrong mode -- refuse\n" ); |
2803 | goto fail; |
2804 | } |
2805 | } |
2806 | } |
2807 | |
2808 | /* If not assigned pseudo has a class which a subset of |
2809 | required reg class, it is a less costly alternative |
2810 | as the pseudo still can get a hard reg of necessary |
2811 | class. */ |
2812 | if (! no_regs_p && REG_P (op) && hard_regno[nop] < 0 |
2813 | && (cl = get_reg_class (REGNO (op))) != NO_REGS |
2814 | && ira_class_subset_p[this_alternative][cl]) |
2815 | { |
2816 | if (lra_dump_file != NULL) |
2817 | fprintf |
2818 | (stream: lra_dump_file, |
2819 | format: " %d Super set class reg: reject-=3\n" , nop); |
2820 | reject -= 3; |
2821 | } |
2822 | |
2823 | this_alternative_offmemok = offmemok; |
2824 | if (this_costly_alternative != NO_REGS) |
2825 | { |
2826 | if (lra_dump_file != NULL) |
2827 | fprintf (stream: lra_dump_file, |
2828 | format: " %d Costly loser: reject++\n" , nop); |
2829 | reject++; |
2830 | } |
2831 | /* If the operand is dying, has a matching constraint, |
2832 | and satisfies constraints of the matched operand |
2833 | which failed to satisfy the own constraints, most probably |
2834 | the reload for this operand will be gone. */ |
2835 | if (this_alternative_matches >= 0 |
2836 | && !curr_alt_win[this_alternative_matches] |
2837 | && REG_P (op) |
2838 | && find_regno_note (curr_insn, REG_DEAD, REGNO (op)) |
2839 | && (hard_regno[nop] >= 0 |
2840 | ? in_hard_reg_set_p (regs: this_alternative_set, |
2841 | mode, regno: hard_regno[nop]) |
2842 | : in_class_p (reg: op, cl: this_alternative, NULL))) |
2843 | { |
2844 | if (lra_dump_file != NULL) |
2845 | fprintf |
2846 | (stream: lra_dump_file, |
2847 | format: " %d Dying matched operand reload: reject++\n" , |
2848 | nop); |
2849 | reject++; |
2850 | } |
2851 | else |
2852 | { |
2853 | /* Strict_low_part requires to reload the register |
2854 | not the sub-register. In this case we should |
2855 | check that a final reload hard reg can hold the |
2856 | value mode. */ |
2857 | if (curr_static_id->operand[nop].strict_low |
2858 | && REG_P (op) |
2859 | && hard_regno[nop] < 0 |
2860 | && GET_CODE (*curr_id->operand_loc[nop]) == SUBREG |
2861 | && ira_class_hard_regs_num[this_alternative] > 0 |
2862 | && (!targetm.hard_regno_mode_ok |
2863 | (ira_class_hard_regs[this_alternative][0], |
2864 | GET_MODE (*curr_id->operand_loc[nop])))) |
2865 | { |
2866 | if (lra_dump_file != NULL) |
2867 | fprintf |
2868 | (stream: lra_dump_file, |
2869 | format: " Strict low subreg reload -- refuse\n" ); |
2870 | goto fail; |
2871 | } |
2872 | losers++; |
2873 | } |
2874 | if (operand_reg[nop] != NULL_RTX |
2875 | /* Output operands and matched input operands are |
2876 | not inherited. The following conditions do not |
2877 | exactly describe the previous statement but they |
2878 | are pretty close. */ |
2879 | && curr_static_id->operand[nop].type != OP_OUT |
2880 | && (this_alternative_matches < 0 |
2881 | || curr_static_id->operand[nop].type != OP_IN)) |
2882 | { |
2883 | int last_reload = (lra_reg_info[ORIGINAL_REGNO |
2884 | (operand_reg[nop])] |
2885 | .last_reload); |
2886 | |
2887 | /* The value of reload_sum has sense only if we |
2888 | process insns in their order. It happens only on |
2889 | the first constraints sub-pass when we do most of |
2890 | reload work. */ |
2891 | if (lra_constraint_iter == 1 && last_reload > bb_reload_num) |
2892 | reload_sum += last_reload - bb_reload_num; |
2893 | } |
2894 | /* If this is a constant that is reloaded into the |
2895 | desired class by copying it to memory first, count |
2896 | that as another reload. This is consistent with |
2897 | other code and is required to avoid choosing another |
2898 | alternative when the constant is moved into memory. |
2899 | Note that the test here is precisely the same as in |
2900 | the code below that calls force_const_mem. */ |
2901 | if (CONST_POOL_OK_P (mode, op) |
2902 | && ((targetm.preferred_reload_class |
2903 | (op, this_alternative) == NO_REGS) |
2904 | || no_input_reloads_p)) |
2905 | { |
2906 | const_to_mem = 1; |
2907 | if (! no_regs_p) |
2908 | losers++; |
2909 | } |
2910 | |
2911 | /* Alternative loses if it requires a type of reload not |
2912 | permitted for this insn. We can always reload |
2913 | objects with a REG_UNUSED note. */ |
2914 | if ((curr_static_id->operand[nop].type != OP_IN |
2915 | && no_output_reloads_p |
2916 | && ! find_reg_note (curr_insn, REG_UNUSED, op)) |
2917 | || (curr_static_id->operand[nop].type != OP_OUT |
2918 | && no_input_reloads_p && ! const_to_mem) |
2919 | || (this_alternative_matches >= 0 |
2920 | && (no_input_reloads_p |
2921 | || (no_output_reloads_p |
2922 | && (curr_static_id->operand |
2923 | [this_alternative_matches].type != OP_IN) |
2924 | && ! find_reg_note (curr_insn, REG_UNUSED, |
2925 | no_subreg_reg_operand |
2926 | [this_alternative_matches]))))) |
2927 | { |
2928 | if (lra_dump_file != NULL) |
2929 | fprintf |
2930 | (stream: lra_dump_file, |
2931 | format: " No input/output reload -- refuse\n" ); |
2932 | goto fail; |
2933 | } |
2934 | |
2935 | /* Alternative loses if it required class pseudo cannot |
2936 | hold value of required mode. Such insns can be |
2937 | described by insn definitions with mode iterators. */ |
2938 | if (GET_MODE (*curr_id->operand_loc[nop]) != VOIDmode |
2939 | && ! hard_reg_set_empty_p (x: this_alternative_set) |
2940 | /* It is common practice for constraints to use a |
2941 | class which does not have actually enough regs to |
2942 | hold the value (e.g. x86 AREG for mode requiring |
2943 | more one general reg). Therefore we have 2 |
2944 | conditions to check that the reload pseudo cannot |
2945 | hold the mode value. */ |
2946 | && (!targetm.hard_regno_mode_ok |
2947 | (ira_class_hard_regs[this_alternative][0], |
2948 | GET_MODE (*curr_id->operand_loc[nop]))) |
2949 | /* The above condition is not enough as the first |
2950 | reg in ira_class_hard_regs can be not aligned for |
2951 | multi-words mode values. */ |
2952 | && (prohibited_class_reg_set_mode_p |
2953 | (rclass: this_alternative, set&: this_alternative_set, |
2954 | GET_MODE (*curr_id->operand_loc[nop])))) |
2955 | { |
2956 | if (lra_dump_file != NULL) |
2957 | fprintf (stream: lra_dump_file, |
2958 | format: " reload pseudo for op %d " |
2959 | "cannot hold the mode value -- refuse\n" , |
2960 | nop); |
2961 | goto fail; |
2962 | } |
2963 | |
2964 | /* Check strong discouragement of reload of non-constant |
2965 | into class THIS_ALTERNATIVE. */ |
2966 | if (! CONSTANT_P (op) && ! no_regs_p |
2967 | && (targetm.preferred_reload_class |
2968 | (op, this_alternative) == NO_REGS |
2969 | || (curr_static_id->operand[nop].type == OP_OUT |
2970 | && (targetm.preferred_output_reload_class |
2971 | (op, this_alternative) == NO_REGS)))) |
2972 | { |
2973 | if (offmemok && REG_P (op)) |
2974 | { |
2975 | if (lra_dump_file != NULL) |
2976 | fprintf |
2977 | (stream: lra_dump_file, |
2978 | format: " %d Spill pseudo into memory: reject+=3\n" , |
2979 | nop); |
2980 | reject += 3; |
2981 | } |
2982 | else |
2983 | { |
2984 | if (lra_dump_file != NULL) |
2985 | fprintf |
2986 | (stream: lra_dump_file, |
2987 | format: " %d Non-prefered reload: reject+=%d\n" , |
2988 | nop, LRA_MAX_REJECT); |
2989 | reject += LRA_MAX_REJECT; |
2990 | } |
2991 | } |
2992 | |
2993 | if (! (MEM_P (op) && offmemok) |
2994 | && ! (const_to_mem && constmemok)) |
2995 | { |
2996 | /* We prefer to reload pseudos over reloading other |
2997 | things, since such reloads may be able to be |
2998 | eliminated later. So bump REJECT in other cases. |
2999 | Don't do this in the case where we are forcing a |
3000 | constant into memory and it will then win since |
3001 | we don't want to have a different alternative |
3002 | match then. */ |
3003 | if (! (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)) |
3004 | { |
3005 | if (lra_dump_file != NULL) |
3006 | fprintf |
3007 | (stream: lra_dump_file, |
3008 | format: " %d Non-pseudo reload: reject+=2\n" , |
3009 | nop); |
3010 | reject += 2; |
3011 | } |
3012 | |
3013 | if (! no_regs_p) |
3014 | reload_nregs |
3015 | += ira_reg_class_max_nregs[this_alternative][mode]; |
3016 | |
3017 | if (SMALL_REGISTER_CLASS_P (this_alternative)) |
3018 | { |
3019 | if (lra_dump_file != NULL) |
3020 | fprintf |
3021 | (stream: lra_dump_file, |
3022 | format: " %d Small class reload: reject+=%d\n" , |
3023 | nop, LRA_LOSER_COST_FACTOR / 2); |
3024 | reject += LRA_LOSER_COST_FACTOR / 2; |
3025 | } |
3026 | } |
3027 | |
3028 | /* We are trying to spill pseudo into memory. It is |
3029 | usually more costly than moving to a hard register |
3030 | although it might takes the same number of |
3031 | reloads. |
3032 | |
3033 | Non-pseudo spill may happen also. Suppose a target allows both |
3034 | register and memory in the operand constraint alternatives, |
3035 | then it's typical that an eliminable register has a substition |
3036 | of "base + offset" which can either be reloaded by a simple |
3037 | "new_reg <= base + offset" which will match the register |
3038 | constraint, or a similar reg addition followed by further spill |
3039 | to and reload from memory which will match the memory |
3040 | constraint, but this memory spill will be much more costly |
3041 | usually. |
3042 | |
3043 | Code below increases the reject for both pseudo and non-pseudo |
3044 | spill. */ |
3045 | if (no_regs_p |
3046 | && !(MEM_P (op) && offmemok) |
3047 | && !(REG_P (op) && hard_regno[nop] < 0)) |
3048 | { |
3049 | if (lra_dump_file != NULL) |
3050 | fprintf |
3051 | (stream: lra_dump_file, |
3052 | format: " %d Spill %spseudo into memory: reject+=3\n" , |
3053 | nop, REG_P (op) ? "" : "Non-" ); |
3054 | reject += 3; |
3055 | if (VECTOR_MODE_P (mode)) |
3056 | { |
3057 | /* Spilling vectors into memory is usually more |
3058 | costly as they contain big values. */ |
3059 | if (lra_dump_file != NULL) |
3060 | fprintf |
3061 | (stream: lra_dump_file, |
3062 | format: " %d Spill vector pseudo: reject+=2\n" , |
3063 | nop); |
3064 | reject += 2; |
3065 | } |
3066 | } |
3067 | |
3068 | /* When we use an operand requiring memory in given |
3069 | alternative, the insn should write *and* read the |
3070 | value to/from memory it is costly in comparison with |
3071 | an insn alternative which does not use memory |
3072 | (e.g. register or immediate operand). We exclude |
3073 | memory operand for such case as we can satisfy the |
3074 | memory constraints by reloading address. */ |
3075 | if (no_regs_p && offmemok && !MEM_P (op)) |
3076 | { |
3077 | if (lra_dump_file != NULL) |
3078 | fprintf |
3079 | (stream: lra_dump_file, |
3080 | format: " Using memory insn operand %d: reject+=3\n" , |
3081 | nop); |
3082 | reject += 3; |
3083 | } |
3084 | |
3085 | /* If reload requires moving value through secondary |
3086 | memory, it will need one more insn at least. */ |
3087 | if (this_alternative != NO_REGS |
3088 | && REG_P (op) && (cl = get_reg_class (REGNO (op))) != NO_REGS |
3089 | && ((curr_static_id->operand[nop].type != OP_OUT |
3090 | && targetm.secondary_memory_needed (GET_MODE (op), cl, |
3091 | this_alternative)) |
3092 | || (curr_static_id->operand[nop].type != OP_IN |
3093 | && (targetm.secondary_memory_needed |
3094 | (GET_MODE (op), this_alternative, cl))))) |
3095 | losers++; |
3096 | |
3097 | if (MEM_P (op) && offmemok) |
3098 | addr_losers++; |
3099 | else |
3100 | { |
3101 | /* Input reloads can be inherited more often than |
3102 | output reloads can be removed, so penalize output |
3103 | reloads. */ |
3104 | if (!REG_P (op) || curr_static_id->operand[nop].type != OP_IN) |
3105 | { |
3106 | if (lra_dump_file != NULL) |
3107 | fprintf |
3108 | (stream: lra_dump_file, |
3109 | format: " %d Non input pseudo reload: reject++\n" , |
3110 | nop); |
3111 | reject++; |
3112 | } |
3113 | |
3114 | if (curr_static_id->operand[nop].type == OP_INOUT) |
3115 | { |
3116 | if (lra_dump_file != NULL) |
3117 | fprintf |
3118 | (stream: lra_dump_file, |
3119 | format: " %d Input/Output reload: reject+=%d\n" , |
3120 | nop, LRA_LOSER_COST_FACTOR); |
3121 | reject += LRA_LOSER_COST_FACTOR; |
3122 | } |
3123 | } |
3124 | } |
3125 | |
3126 | if (early_clobber_p && ! scratch_p) |
3127 | { |
3128 | if (lra_dump_file != NULL) |
3129 | fprintf (stream: lra_dump_file, |
3130 | format: " %d Early clobber: reject++\n" , nop); |
3131 | reject++; |
3132 | } |
3133 | /* ??? We check early clobbers after processing all operands |
3134 | (see loop below) and there we update the costs more. |
3135 | Should we update the cost (may be approximately) here |
3136 | because of early clobber register reloads or it is a rare |
3137 | or non-important thing to be worth to do it. */ |
3138 | overall = (losers * LRA_LOSER_COST_FACTOR + reject |
3139 | - (addr_losers == losers ? static_reject : 0)); |
3140 | if ((best_losers == 0 || losers != 0) && best_overall < overall) |
3141 | { |
3142 | if (lra_dump_file != NULL) |
3143 | fprintf (stream: lra_dump_file, |
3144 | format: " overall=%d,losers=%d -- refuse\n" , |
3145 | overall, losers); |
3146 | goto fail; |
3147 | } |
3148 | |
3149 | if (update_and_check_small_class_inputs (nop, nalt, |
3150 | op_class: this_alternative)) |
3151 | { |
3152 | if (lra_dump_file != NULL) |
3153 | fprintf (stream: lra_dump_file, |
3154 | format: " not enough small class regs -- refuse\n" ); |
3155 | goto fail; |
3156 | } |
3157 | curr_alt[nop] = this_alternative; |
3158 | curr_alt_set[nop] = this_alternative_set; |
3159 | curr_alt_exclude_start_hard_regs[nop] |
3160 | = this_alternative_exclude_start_hard_regs; |
3161 | curr_alt_win[nop] = this_alternative_win; |
3162 | curr_alt_match_win[nop] = this_alternative_match_win; |
3163 | curr_alt_offmemok[nop] = this_alternative_offmemok; |
3164 | curr_alt_matches[nop] = this_alternative_matches; |
3165 | |
3166 | if (this_alternative_matches >= 0 |
3167 | && !did_match && !this_alternative_win) |
3168 | curr_alt_win[this_alternative_matches] = false; |
3169 | |
3170 | if (early_clobber_p && operand_reg[nop] != NULL_RTX) |
3171 | early_clobbered_nops[early_clobbered_regs_num++] = nop; |
3172 | } |
3173 | |
3174 | if (curr_insn_set != NULL_RTX && n_operands == 2 |
3175 | /* Prevent processing non-move insns. */ |
3176 | && (GET_CODE (SET_SRC (curr_insn_set)) == SUBREG |
3177 | || SET_SRC (curr_insn_set) == no_subreg_reg_operand[1]) |
3178 | && ((! curr_alt_win[0] && ! curr_alt_win[1] |
3179 | && REG_P (no_subreg_reg_operand[0]) |
3180 | && REG_P (no_subreg_reg_operand[1]) |
3181 | && (reg_in_class_p (reg: no_subreg_reg_operand[0], cl: curr_alt[1]) |
3182 | || reg_in_class_p (reg: no_subreg_reg_operand[1], cl: curr_alt[0]))) |
3183 | || (! curr_alt_win[0] && curr_alt_win[1] |
3184 | && REG_P (no_subreg_reg_operand[1]) |
3185 | /* Check that we reload memory not the memory |
3186 | address. */ |
3187 | && ! (curr_alt_offmemok[0] |
3188 | && MEM_P (no_subreg_reg_operand[0])) |
3189 | && reg_in_class_p (reg: no_subreg_reg_operand[1], cl: curr_alt[0])) |
3190 | || (curr_alt_win[0] && ! curr_alt_win[1] |
3191 | && REG_P (no_subreg_reg_operand[0]) |
3192 | /* Check that we reload memory not the memory |
3193 | address. */ |
3194 | && ! (curr_alt_offmemok[1] |
3195 | && MEM_P (no_subreg_reg_operand[1])) |
3196 | && reg_in_class_p (reg: no_subreg_reg_operand[0], cl: curr_alt[1]) |
3197 | && (! CONST_POOL_OK_P (curr_operand_mode[1], |
3198 | no_subreg_reg_operand[1]) |
3199 | || (targetm.preferred_reload_class |
3200 | (no_subreg_reg_operand[1], |
3201 | (enum reg_class) curr_alt[1]) != NO_REGS)) |
3202 | /* If it is a result of recent elimination in move |
3203 | insn we can transform it into an add still by |
3204 | using this alternative. */ |
3205 | && GET_CODE (no_subreg_reg_operand[1]) != PLUS |
3206 | /* Likewise if the source has been replaced with an |
3207 | equivalent value. This only happens once -- the reload |
3208 | will use the equivalent value instead of the register it |
3209 | replaces -- so there should be no danger of cycling. */ |
3210 | && !equiv_substition_p[1]))) |
3211 | { |
3212 | /* We have a move insn and a new reload insn will be similar |
3213 | to the current insn. We should avoid such situation as |
3214 | it results in LRA cycling. */ |
3215 | if (lra_dump_file != NULL) |
3216 | fprintf (stream: lra_dump_file, |
3217 | format: " Cycle danger: overall += LRA_MAX_REJECT\n" ); |
3218 | overall += LRA_MAX_REJECT; |
3219 | } |
3220 | ok_p = true; |
3221 | curr_alt_dont_inherit_ops_num = 0; |
3222 | for (nop = 0; nop < early_clobbered_regs_num; nop++) |
3223 | { |
3224 | int i, j, clobbered_hard_regno, first_conflict_j, last_conflict_j; |
3225 | HARD_REG_SET temp_set; |
3226 | |
3227 | i = early_clobbered_nops[nop]; |
3228 | if ((! curr_alt_win[i] && ! curr_alt_match_win[i]) |
3229 | || hard_regno[i] < 0) |
3230 | continue; |
3231 | lra_assert (operand_reg[i] != NULL_RTX); |
3232 | clobbered_hard_regno = hard_regno[i]; |
3233 | CLEAR_HARD_REG_SET (set&: temp_set); |
3234 | add_to_hard_reg_set (regs: &temp_set, GET_MODE (*curr_id->operand_loc[i]), |
3235 | regno: clobbered_hard_regno); |
3236 | first_conflict_j = last_conflict_j = -1; |
3237 | for (j = 0; j < n_operands; j++) |
3238 | if (j == i |
3239 | /* We don't want process insides of match_operator and |
3240 | match_parallel because otherwise we would process |
3241 | their operands once again generating a wrong |
3242 | code. */ |
3243 | || curr_static_id->operand[j].is_operator) |
3244 | continue; |
3245 | else if ((curr_alt_matches[j] == i && curr_alt_match_win[j]) |
3246 | || (curr_alt_matches[i] == j && curr_alt_match_win[i])) |
3247 | continue; |
3248 | /* If we don't reload j-th operand, check conflicts. */ |
3249 | else if ((curr_alt_win[j] || curr_alt_match_win[j]) |
3250 | && uses_hard_regs_p (x: *curr_id->operand_loc[j], set: temp_set)) |
3251 | { |
3252 | if (first_conflict_j < 0) |
3253 | first_conflict_j = j; |
3254 | last_conflict_j = j; |
3255 | /* Both the earlyclobber operand and conflicting operand |
3256 | cannot both be user defined hard registers. */ |
3257 | if (HARD_REGISTER_P (operand_reg[i]) |
3258 | && REG_USERVAR_P (operand_reg[i]) |
3259 | && operand_reg[j] != NULL_RTX |
3260 | && HARD_REGISTER_P (operand_reg[j]) |
3261 | && REG_USERVAR_P (operand_reg[j])) |
3262 | { |
3263 | /* For asm, let curr_insn_transform diagnose it. */ |
3264 | if (INSN_CODE (curr_insn) < 0) |
3265 | return false; |
3266 | fatal_insn ("unable to generate reloads for " |
3267 | "impossible constraints:" , curr_insn); |
3268 | } |
3269 | } |
3270 | if (last_conflict_j < 0) |
3271 | continue; |
3272 | |
3273 | /* If an earlyclobber operand conflicts with another non-matching |
3274 | operand (ie, they have been assigned the same hard register), |
3275 | then it is better to reload the other operand, as there may |
3276 | exist yet another operand with a matching constraint associated |
3277 | with the earlyclobber operand. However, if one of the operands |
3278 | is an explicit use of a hard register, then we must reload the |
3279 | other non-hard register operand. */ |
3280 | if (HARD_REGISTER_P (operand_reg[i]) |
3281 | || (first_conflict_j == last_conflict_j |
3282 | && operand_reg[last_conflict_j] != NULL_RTX |
3283 | && !curr_alt_match_win[last_conflict_j] |
3284 | && !HARD_REGISTER_P (operand_reg[last_conflict_j]))) |
3285 | { |
3286 | curr_alt_win[last_conflict_j] = false; |
3287 | curr_alt_dont_inherit_ops[curr_alt_dont_inherit_ops_num++] |
3288 | = last_conflict_j; |
3289 | losers++; |
3290 | if (lra_dump_file != NULL) |
3291 | fprintf |
3292 | (stream: lra_dump_file, |
3293 | format: " %d Conflict early clobber reload: reject--\n" , |
3294 | i); |
3295 | } |
3296 | else |
3297 | { |
3298 | /* We need to reload early clobbered register and the |
3299 | matched registers. */ |
3300 | for (j = 0; j < n_operands; j++) |
3301 | if (curr_alt_matches[j] == i) |
3302 | { |
3303 | curr_alt_match_win[j] = false; |
3304 | losers++; |
3305 | overall += LRA_LOSER_COST_FACTOR; |
3306 | } |
3307 | if (! curr_alt_match_win[i]) |
3308 | curr_alt_dont_inherit_ops[curr_alt_dont_inherit_ops_num++] = i; |
3309 | else |
3310 | { |
3311 | /* Remember pseudos used for match reloads are never |
3312 | inherited. */ |
3313 | lra_assert (curr_alt_matches[i] >= 0); |
3314 | curr_alt_win[curr_alt_matches[i]] = false; |
3315 | } |
3316 | curr_alt_win[i] = curr_alt_match_win[i] = false; |
3317 | losers++; |
3318 | if (lra_dump_file != NULL) |
3319 | fprintf |
3320 | (stream: lra_dump_file, |
3321 | format: " %d Matched conflict early clobber reloads: " |
3322 | "reject--\n" , |
3323 | i); |
3324 | } |
3325 | /* Early clobber was already reflected in REJECT. */ |
3326 | if (!matching_early_clobber[i]) |
3327 | { |
3328 | lra_assert (reject > 0); |
3329 | reject--; |
3330 | matching_early_clobber[i] = 1; |
3331 | } |
3332 | overall += LRA_LOSER_COST_FACTOR - 1; |
3333 | } |
3334 | if (lra_dump_file != NULL) |
3335 | fprintf (stream: lra_dump_file, format: " overall=%d,losers=%d,rld_nregs=%d\n" , |
3336 | overall, losers, reload_nregs); |
3337 | |
3338 | /* If this alternative can be made to work by reloading, and it |
3339 | needs less reloading than the others checked so far, record |
3340 | it as the chosen goal for reloading. */ |
3341 | if ((best_losers != 0 && losers == 0) |
3342 | || (((best_losers == 0 && losers == 0) |
3343 | || (best_losers != 0 && losers != 0)) |
3344 | && (best_overall > overall |
3345 | || (best_overall == overall |
3346 | /* If the cost of the reloads is the same, |
3347 | prefer alternative which requires minimal |
3348 | number of reload regs. */ |
3349 | && (reload_nregs < best_reload_nregs |
3350 | || (reload_nregs == best_reload_nregs |
3351 | && (best_reload_sum < reload_sum |
3352 | || (best_reload_sum == reload_sum |
3353 | && nalt < goal_alt_number)))))))) |
3354 | { |
3355 | for (nop = 0; nop < n_operands; nop++) |
3356 | { |
3357 | goal_alt_win[nop] = curr_alt_win[nop]; |
3358 | goal_alt_match_win[nop] = curr_alt_match_win[nop]; |
3359 | goal_alt_matches[nop] = curr_alt_matches[nop]; |
3360 | goal_alt[nop] = curr_alt[nop]; |
3361 | goal_alt_exclude_start_hard_regs[nop] |
3362 | = curr_alt_exclude_start_hard_regs[nop]; |
3363 | goal_alt_offmemok[nop] = curr_alt_offmemok[nop]; |
3364 | } |
3365 | goal_alt_dont_inherit_ops_num = curr_alt_dont_inherit_ops_num; |
3366 | goal_reuse_alt_p = curr_reuse_alt_p; |
3367 | for (nop = 0; nop < curr_alt_dont_inherit_ops_num; nop++) |
3368 | goal_alt_dont_inherit_ops[nop] = curr_alt_dont_inherit_ops[nop]; |
3369 | goal_alt_swapped = curr_swapped; |
3370 | goal_alt_out_sp_reload_p = curr_alt_out_sp_reload_p; |
3371 | best_overall = overall; |
3372 | best_losers = losers; |
3373 | best_reload_nregs = reload_nregs; |
3374 | best_reload_sum = reload_sum; |
3375 | goal_alt_number = nalt; |
3376 | } |
3377 | if (losers == 0 && !curr_alt_class_change_p) |
3378 | /* Everything is satisfied. Do not process alternatives |
3379 | anymore. */ |
3380 | break; |
3381 | fail: |
3382 | ; |
3383 | } |
3384 | return ok_p; |
3385 | } |
3386 | |
3387 | /* Make reload base reg from address AD. */ |
3388 | static rtx |
3389 | base_to_reg (struct address_info *ad) |
3390 | { |
3391 | enum reg_class cl; |
3392 | int code = -1; |
3393 | rtx new_inner = NULL_RTX; |
3394 | rtx new_reg = NULL_RTX; |
3395 | rtx_insn *insn; |
3396 | rtx_insn *last_insn = get_last_insn(); |
3397 | |
3398 | lra_assert (ad->disp == ad->disp_term); |
3399 | cl = base_reg_class (mode: ad->mode, as: ad->as, outer_code: ad->base_outer_code, |
3400 | index_code: get_index_code (ad)); |
3401 | new_reg = lra_create_new_reg (GET_MODE (*ad->base), NULL_RTX, cl, NULL, |
3402 | "base" ); |
3403 | new_inner = simplify_gen_binary (code: PLUS, GET_MODE (new_reg), op0: new_reg, |
3404 | op1: ad->disp_term == NULL |
3405 | ? const0_rtx |
3406 | : *ad->disp_term); |
3407 | if (!valid_address_p (mode: ad->mode, addr: new_inner, as: ad->as)) |
3408 | return NULL_RTX; |
3409 | insn = emit_insn (gen_rtx_SET (new_reg, *ad->base)); |
3410 | code = recog_memoized (insn); |
3411 | if (code < 0) |
3412 | { |
3413 | delete_insns_since (last_insn); |
3414 | return NULL_RTX; |
3415 | } |
3416 | |
3417 | return new_inner; |
3418 | } |
3419 | |
3420 | /* Make reload base reg + DISP from address AD. Return the new pseudo. */ |
3421 | static rtx |
3422 | base_plus_disp_to_reg (struct address_info *ad, rtx disp) |
3423 | { |
3424 | enum reg_class cl; |
3425 | rtx new_reg; |
3426 | |
3427 | lra_assert (ad->base == ad->base_term); |
3428 | cl = base_reg_class (mode: ad->mode, as: ad->as, outer_code: ad->base_outer_code, |
3429 | index_code: get_index_code (ad)); |
3430 | new_reg = lra_create_new_reg (GET_MODE (*ad->base_term), NULL_RTX, cl, NULL, |
3431 | "base + disp" ); |
3432 | lra_emit_add (new_reg, *ad->base_term, disp); |
3433 | return new_reg; |
3434 | } |
3435 | |
3436 | /* Make reload of index part of address AD. Return the new |
3437 | pseudo. */ |
3438 | static rtx |
3439 | index_part_to_reg (struct address_info *ad, enum reg_class index_class) |
3440 | { |
3441 | rtx new_reg; |
3442 | |
3443 | new_reg = lra_create_new_reg (GET_MODE (*ad->index), NULL_RTX, |
3444 | index_class, NULL, "index term" ); |
3445 | expand_mult (GET_MODE (*ad->index), *ad->index_term, |
3446 | GEN_INT (get_index_scale (ad)), new_reg, 1); |
3447 | return new_reg; |
3448 | } |
3449 | |
3450 | /* Return true if we can add a displacement to address AD, even if that |
3451 | makes the address invalid. The fix-up code requires any new address |
3452 | to be the sum of the BASE_TERM, INDEX and DISP_TERM fields. */ |
3453 | static bool |
3454 | can_add_disp_p (struct address_info *ad) |
3455 | { |
3456 | return (!ad->autoinc_p |
3457 | && ad->segment == NULL |
3458 | && ad->base == ad->base_term |
3459 | && ad->disp == ad->disp_term); |
3460 | } |
3461 | |
3462 | /* Make equiv substitution in address AD. Return true if a substitution |
3463 | was made. */ |
3464 | static bool |
3465 | equiv_address_substitution (struct address_info *ad) |
3466 | { |
3467 | rtx base_reg, new_base_reg, index_reg, new_index_reg, *base_term, *index_term; |
3468 | poly_int64 disp; |
3469 | HOST_WIDE_INT scale; |
3470 | bool change_p; |
3471 | |
3472 | base_term = strip_subreg (loc: ad->base_term); |
3473 | if (base_term == NULL) |
3474 | base_reg = new_base_reg = NULL_RTX; |
3475 | else |
3476 | { |
3477 | base_reg = *base_term; |
3478 | new_base_reg = get_equiv_with_elimination (x: base_reg, insn: curr_insn); |
3479 | } |
3480 | index_term = strip_subreg (loc: ad->index_term); |
3481 | if (index_term == NULL) |
3482 | index_reg = new_index_reg = NULL_RTX; |
3483 | else |
3484 | { |
3485 | index_reg = *index_term; |
3486 | new_index_reg = get_equiv_with_elimination (x: index_reg, insn: curr_insn); |
3487 | } |
3488 | if (base_reg == new_base_reg && index_reg == new_index_reg) |
3489 | return false; |
3490 | disp = 0; |
3491 | change_p = false; |
3492 | if (lra_dump_file != NULL) |
3493 | { |
3494 | fprintf (stream: lra_dump_file, format: "Changing address in insn %d " , |
3495 | INSN_UID (insn: curr_insn)); |
3496 | dump_value_slim (lra_dump_file, *ad->outer, 1); |
3497 | } |
3498 | if (base_reg != new_base_reg) |
3499 | { |
3500 | poly_int64 offset; |
3501 | if (REG_P (new_base_reg)) |
3502 | { |
3503 | *base_term = new_base_reg; |
3504 | change_p = true; |
3505 | } |
3506 | else if (GET_CODE (new_base_reg) == PLUS |
3507 | && REG_P (XEXP (new_base_reg, 0)) |
3508 | && poly_int_rtx_p (XEXP (new_base_reg, 1), res: &offset) |
3509 | && can_add_disp_p (ad)) |
3510 | { |
3511 | disp += offset; |
3512 | *base_term = XEXP (new_base_reg, 0); |
3513 | change_p = true; |
3514 | } |
3515 | if (ad->base_term2 != NULL) |
3516 | *ad->base_term2 = *ad->base_term; |
3517 | } |
3518 | if (index_reg != new_index_reg) |
3519 | { |
3520 | poly_int64 offset; |
3521 | if (REG_P (new_index_reg)) |
3522 | { |
3523 | *index_term = new_index_reg; |
3524 | change_p = true; |
3525 | } |
3526 | else if (GET_CODE (new_index_reg) == PLUS |
3527 | && REG_P (XEXP (new_index_reg, 0)) |
3528 | && poly_int_rtx_p (XEXP (new_index_reg, 1), res: &offset) |
3529 | && can_add_disp_p (ad) |
3530 | && (scale = get_index_scale (ad))) |
3531 | { |
3532 | disp += offset * scale; |
3533 | *index_term = XEXP (new_index_reg, 0); |
3534 | change_p = true; |
3535 | } |
3536 | } |
3537 | if (maybe_ne (a: disp, b: 0)) |
3538 | { |
3539 | if (ad->disp != NULL) |
3540 | *ad->disp = plus_constant (GET_MODE (*ad->inner), *ad->disp, disp); |
3541 | else |
3542 | { |
3543 | *ad->inner = plus_constant (GET_MODE (*ad->inner), *ad->inner, disp); |
3544 | update_address (ad); |
3545 | } |
3546 | change_p = true; |
3547 | } |
3548 | if (lra_dump_file != NULL) |
3549 | { |
3550 | if (! change_p) |
3551 | fprintf (stream: lra_dump_file, format: " -- no change\n" ); |
3552 | else |
3553 | { |
3554 | fprintf (stream: lra_dump_file, format: " on equiv " ); |
3555 | dump_value_slim (lra_dump_file, *ad->outer, 1); |
3556 | fprintf (stream: lra_dump_file, format: "\n" ); |
3557 | } |
3558 | } |
3559 | return change_p; |
3560 | } |
3561 | |
3562 | /* Skip all modifiers and whitespaces in constraint STR and return the |
3563 | result. */ |
3564 | static const char * |
3565 | skip_constraint_modifiers (const char *str) |
3566 | { |
3567 | for (;;str++) |
3568 | switch (*str) |
3569 | { |
3570 | case '+': case '&' : case '=': case '*': case ' ': case '\t': |
3571 | case '$': case '^' : case '%': case '?': case '!': |
3572 | break; |
3573 | default: return str; |
3574 | } |
3575 | } |
3576 | |
3577 | /* Takes a string of 0 or more comma-separated constraints. When more |
3578 | than one constraint is present, evaluate whether they all correspond |
3579 | to a single, repeated constraint (e.g. "r,r") or whether we have |
3580 | more than one distinct constraints (e.g. "r,m"). */ |
3581 | static bool |
3582 | constraint_unique (const char *cstr) |
3583 | { |
3584 | enum constraint_num ca, cb; |
3585 | ca = CONSTRAINT__UNKNOWN; |
3586 | for (;;) |
3587 | { |
3588 | cstr = skip_constraint_modifiers (str: cstr); |
3589 | if (*cstr == '\0' || *cstr == ',') |
3590 | cb = CONSTRAINT_X; |
3591 | else |
3592 | { |
3593 | cb = lookup_constraint (p: cstr); |
3594 | if (cb == CONSTRAINT__UNKNOWN) |
3595 | return false; |
3596 | cstr += CONSTRAINT_LEN (cstr[0], cstr); |
3597 | } |
3598 | /* Handle the first iteration of the loop. */ |
3599 | if (ca == CONSTRAINT__UNKNOWN) |
3600 | ca = cb; |
3601 | /* Handle the general case of comparing ca with subsequent |
3602 | constraints. */ |
3603 | else if (ca != cb) |
3604 | return false; |
3605 | if (*cstr == '\0') |
3606 | return true; |
3607 | if (*cstr == ',') |
3608 | cstr += 1; |
3609 | } |
3610 | } |
3611 | |
3612 | /* Major function to make reloads for an address in operand NOP or |
3613 | check its correctness (If CHECK_ONLY_P is true). The supported |
3614 | cases are: |
3615 | |
3616 | 1) an address that existed before LRA started, at which point it |
3617 | must have been valid. These addresses are subject to elimination |
3618 | and may have become invalid due to the elimination offset being out |
3619 | of range. |
3620 | |
3621 | 2) an address created by forcing a constant to memory |
3622 | (force_const_to_mem). The initial form of these addresses might |
3623 | not be valid, and it is this function's job to make them valid. |
3624 | |
3625 | 3) a frame address formed from a register and a (possibly zero) |
3626 | constant offset. As above, these addresses might not be valid and |
3627 | this function must make them so. |
3628 | |
3629 | Add reloads to the lists *BEFORE and *AFTER. We might need to add |
3630 | reloads to *AFTER because of inc/dec, {pre, post} modify in the |
3631 | address. Return true for any RTL change. |
3632 | |
3633 | The function is a helper function which does not produce all |
3634 | transformations (when CHECK_ONLY_P is false) which can be |
3635 | necessary. It does just basic steps. To do all necessary |
3636 | transformations use function process_address. */ |
3637 | static bool |
3638 | process_address_1 (int nop, bool check_only_p, |
3639 | rtx_insn **before, rtx_insn **after) |
3640 | { |
3641 | struct address_info ad; |
3642 | rtx new_reg; |
3643 | HOST_WIDE_INT scale; |
3644 | rtx op = *curr_id->operand_loc[nop]; |
3645 | rtx mem = extract_mem_from_operand (op); |
3646 | const char *constraint; |
3647 | enum constraint_num cn; |
3648 | bool change_p = false; |
3649 | |
3650 | if (MEM_P (mem) |
3651 | && GET_MODE (mem) == BLKmode |
3652 | && GET_CODE (XEXP (mem, 0)) == SCRATCH) |
3653 | return false; |
3654 | |
3655 | constraint |
3656 | = skip_constraint_modifiers (str: curr_static_id->operand[nop].constraint); |
3657 | if (IN_RANGE (constraint[0], '0', '9')) |
3658 | { |
3659 | char *end; |
3660 | unsigned long dup = strtoul (nptr: constraint, endptr: &end, base: 10); |
3661 | constraint |
3662 | = skip_constraint_modifiers (str: curr_static_id->operand[dup].constraint); |
3663 | } |
3664 | cn = lookup_constraint (p: *constraint == '\0' ? "X" : constraint); |
3665 | /* If we have several alternatives or/and several constraints in an |
3666 | alternative and we can not say at this stage what constraint will be used, |
3667 | use unknown constraint. The exception is an address constraint. If |
3668 | operand has one address constraint, probably all others constraints are |
3669 | address ones. */ |
3670 | if (constraint[0] != '\0' && get_constraint_type (c: cn) != CT_ADDRESS |
3671 | && !constraint_unique (cstr: constraint)) |
3672 | cn = CONSTRAINT__UNKNOWN; |
3673 | if (insn_extra_address_constraint (c: cn) |
3674 | /* When we find an asm operand with an address constraint that |
3675 | doesn't satisfy address_operand to begin with, we clear |
3676 | is_address, so that we don't try to make a non-address fit. |
3677 | If the asm statement got this far, it's because other |
3678 | constraints are available, and we'll use them, disregarding |
3679 | the unsatisfiable address ones. */ |
3680 | && curr_static_id->operand[nop].is_address) |
3681 | decompose_lea_address (&ad, curr_id->operand_loc[nop]); |
3682 | /* Do not attempt to decompose arbitrary addresses generated by combine |
3683 | for asm operands with loose constraints, e.g 'X'. |
3684 | Need to extract memory from op for special memory constraint, |
3685 | i.e. bcst_mem_operand in i386 backend. */ |
3686 | else if (MEM_P (mem) |
3687 | && !(INSN_CODE (curr_insn) < 0 |
3688 | && get_constraint_type (c: cn) == CT_FIXED_FORM |
3689 | && constraint_satisfied_p (x: op, c: cn))) |
3690 | decompose_mem_address (&ad, mem); |
3691 | else if (GET_CODE (op) == SUBREG |
3692 | && MEM_P (SUBREG_REG (op))) |
3693 | decompose_mem_address (&ad, SUBREG_REG (op)); |
3694 | else |
3695 | return false; |
3696 | /* If INDEX_REG_CLASS is assigned to base_term already and isn't to |
3697 | index_term, swap them so to avoid assigning INDEX_REG_CLASS to both |
3698 | when INDEX_REG_CLASS is a single register class. */ |
3699 | enum reg_class index_cl = index_reg_class (insn: curr_insn); |
3700 | if (ad.base_term != NULL |
3701 | && ad.index_term != NULL |
3702 | && ira_class_hard_regs_num[index_cl] == 1 |
3703 | && REG_P (*ad.base_term) |
3704 | && REG_P (*ad.index_term) |
3705 | && in_class_p (reg: *ad.base_term, cl: index_cl, NULL) |
3706 | && ! in_class_p (reg: *ad.index_term, cl: index_cl, NULL)) |
3707 | { |
3708 | std::swap (a&: ad.base, b&: ad.index); |
3709 | std::swap (a&: ad.base_term, b&: ad.index_term); |
3710 | } |
3711 | if (! check_only_p) |
3712 | change_p = equiv_address_substitution (ad: &ad); |
3713 | if (ad.base_term != NULL |
3714 | && (process_addr_reg |
3715 | (loc: ad.base_term, check_only_p, before, |
3716 | after: (ad.autoinc_p |
3717 | && !(REG_P (*ad.base_term) |
3718 | && find_regno_note (curr_insn, REG_DEAD, |
3719 | REGNO (*ad.base_term)) != NULL_RTX) |
3720 | ? after : NULL), |
3721 | cl: base_reg_class (mode: ad.mode, as: ad.as, outer_code: ad.base_outer_code, |
3722 | index_code: get_index_code (&ad), insn: curr_insn)))) |
3723 | { |
3724 | change_p = true; |
3725 | if (ad.base_term2 != NULL) |
3726 | *ad.base_term2 = *ad.base_term; |
3727 | } |
3728 | if (ad.index_term != NULL |
3729 | && process_addr_reg (loc: ad.index_term, check_only_p, |
3730 | before, NULL, cl: index_cl)) |
3731 | change_p = true; |
3732 | |
3733 | /* Target hooks sometimes don't treat extra-constraint addresses as |
3734 | legitimate address_operands, so handle them specially. */ |
3735 | if (insn_extra_address_constraint (c: cn) |
3736 | && satisfies_address_constraint_p (ad: &ad, constraint: cn)) |
3737 | return change_p; |
3738 | |
3739 | if (check_only_p) |
3740 | return change_p; |
3741 | |
3742 | /* There are three cases where the shape of *AD.INNER may now be invalid: |
3743 | |
3744 | 1) the original address was valid, but either elimination or |
3745 | equiv_address_substitution was applied and that made |
3746 | the address invalid. |
3747 | |
3748 | 2) the address is an invalid symbolic address created by |
3749 | force_const_to_mem. |
3750 | |
3751 | 3) the address is a frame address with an invalid offset. |
3752 | |
3753 | 4) the address is a frame address with an invalid base. |
3754 | |
3755 | All these cases involve a non-autoinc address, so there is no |
3756 | point revalidating other types. */ |
3757 | if (ad.autoinc_p || valid_address_p (op, ad: &ad, constraint: cn)) |
3758 | return change_p; |
3759 | |
3760 | /* Any index existed before LRA started, so we can assume that the |
3761 | presence and shape of the index is valid. */ |
3762 | push_to_sequence (*before); |
3763 | lra_assert (ad.disp == ad.disp_term); |
3764 | if (ad.base == NULL) |
3765 | { |
3766 | if (ad.index == NULL) |
3767 | { |
3768 | rtx_insn *insn; |
3769 | rtx_insn *last = get_last_insn (); |
3770 | int code = -1; |
3771 | enum reg_class cl = base_reg_class (mode: ad.mode, as: ad.as, |
3772 | outer_code: SCRATCH, index_code: SCRATCH, |
3773 | insn: curr_insn); |
3774 | rtx addr = *ad.inner; |
3775 | |
3776 | new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, NULL, "addr" ); |
3777 | if (HAVE_lo_sum) |
3778 | { |
3779 | /* addr => lo_sum (new_base, addr), case (2) above. */ |
3780 | insn = emit_insn (gen_rtx_SET |
3781 | (new_reg, |
3782 | gen_rtx_HIGH (Pmode, copy_rtx (addr)))); |
3783 | code = recog_memoized (insn); |
3784 | if (code >= 0) |
3785 | { |
3786 | *ad.inner = gen_rtx_LO_SUM (Pmode, new_reg, addr); |
3787 | if (!valid_address_p (op, ad: &ad, constraint: cn)) |
3788 | { |
3789 | /* Try to put lo_sum into register. */ |
3790 | insn = emit_insn (gen_rtx_SET |
3791 | (new_reg, |
3792 | gen_rtx_LO_SUM (Pmode, new_reg, addr))); |
3793 | code = recog_memoized (insn); |
3794 | if (code >= 0) |
3795 | { |
3796 | *ad.inner = new_reg; |
3797 | if (!valid_address_p (op, ad: &ad, constraint: cn)) |
3798 | { |
3799 | *ad.inner = addr; |
3800 | code = -1; |
3801 | } |
3802 | } |
3803 | |
3804 | } |
3805 | } |
3806 | if (code < 0) |
3807 | delete_insns_since (last); |
3808 | } |
3809 | |
3810 | if (code < 0) |
3811 | { |
3812 | /* addr => new_base, case (2) above. */ |
3813 | lra_emit_move (new_reg, addr); |
3814 | |
3815 | for (insn = last == NULL_RTX ? get_insns () : NEXT_INSN (insn: last); |
3816 | insn != NULL_RTX; |
3817 | insn = NEXT_INSN (insn)) |
3818 | if (recog_memoized (insn) < 0) |
3819 | break; |
3820 | if (insn != NULL_RTX) |
3821 | { |
3822 | /* Do nothing if we cannot generate right insns. |
3823 | This is analogous to reload pass behavior. */ |
3824 | delete_insns_since (last); |
3825 | end_sequence (); |
3826 | return false; |
3827 | } |
3828 | *ad.inner = new_reg; |
3829 | } |
3830 | } |
3831 | else |
3832 | { |
3833 | /* index * scale + disp => new base + index * scale, |
3834 | case (1) above. */ |
3835 | enum reg_class cl = base_reg_class (mode: ad.mode, as: ad.as, outer_code: PLUS, |
3836 | GET_CODE (*ad.index), |
3837 | insn: curr_insn); |
3838 | |
3839 | lra_assert (index_cl != NO_REGS); |
3840 | new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, NULL, "disp" ); |
3841 | lra_emit_move (new_reg, *ad.disp); |
3842 | *ad.inner = simplify_gen_binary (code: PLUS, GET_MODE (new_reg), |
3843 | op0: new_reg, op1: *ad.index); |
3844 | } |
3845 | } |
3846 | else if (ad.index == NULL) |
3847 | { |
3848 | int regno; |
3849 | enum reg_class cl; |
3850 | rtx set; |
3851 | rtx_insn *insns, *last_insn; |
3852 | /* Try to reload base into register only if the base is invalid |
3853 | for the address but with valid offset, case (4) above. */ |
3854 | start_sequence (); |
3855 | new_reg = base_to_reg (ad: &ad); |
3856 | |
3857 | /* base + disp => new base, cases (1) and (3) above. */ |
3858 | /* Another option would be to reload the displacement into an |
3859 | index register. However, postreload has code to optimize |
3860 | address reloads that have the same base and different |
3861 | displacements, so reloading into an index register would |
3862 | not necessarily be a win. */ |
3863 | if (new_reg == NULL_RTX) |
3864 | { |
3865 | /* See if the target can split the displacement into a |
3866 | legitimate new displacement from a local anchor. */ |
3867 | gcc_assert (ad.disp == ad.disp_term); |
3868 | poly_int64 orig_offset; |
3869 | rtx offset1, offset2; |
3870 | if (poly_int_rtx_p (x: *ad.disp, res: &orig_offset) |
3871 | && targetm.legitimize_address_displacement (&offset1, &offset2, |
3872 | orig_offset, |
3873 | ad.mode)) |
3874 | { |
3875 | new_reg = base_plus_disp_to_reg (ad: &ad, disp: offset1); |
3876 | new_reg = gen_rtx_PLUS (GET_MODE (new_reg), new_reg, offset2); |
3877 | } |
3878 | else |
3879 | new_reg = base_plus_disp_to_reg (ad: &ad, disp: *ad.disp); |
3880 | } |
3881 | insns = get_insns (); |
3882 | last_insn = get_last_insn (); |
3883 | /* If we generated at least two insns, try last insn source as |
3884 | an address. If we succeed, we generate one less insn. */ |
3885 | if (REG_P (new_reg) |
3886 | && last_insn != insns |
3887 | && (set = single_set (insn: last_insn)) != NULL_RTX |
3888 | && GET_CODE (SET_SRC (set)) == PLUS |
3889 | && REG_P (XEXP (SET_SRC (set), 0)) |
3890 | && CONSTANT_P (XEXP (SET_SRC (set), 1))) |
3891 | { |
3892 | *ad.inner = SET_SRC (set); |
3893 | if (valid_address_p (op, ad: &ad, constraint: cn)) |
3894 | { |
3895 | *ad.base_term = XEXP (SET_SRC (set), 0); |
3896 | *ad.disp_term = XEXP (SET_SRC (set), 1); |
3897 | cl = base_reg_class (mode: ad.mode, as: ad.as, outer_code: ad.base_outer_code, |
3898 | index_code: get_index_code (&ad), insn: curr_insn); |
3899 | regno = REGNO (*ad.base_term); |
3900 | if (regno >= FIRST_PSEUDO_REGISTER |
3901 | && cl != lra_get_allocno_class (regno)) |
3902 | lra_change_class (regno, new_class: cl, title: " Change to" , nl_p: true); |
3903 | new_reg = SET_SRC (set); |
3904 | delete_insns_since (PREV_INSN (insn: last_insn)); |
3905 | } |
3906 | } |
3907 | end_sequence (); |
3908 | emit_insn (insns); |
3909 | *ad.inner = new_reg; |
3910 | } |
3911 | else if (ad.disp_term != NULL) |
3912 | { |
3913 | /* base + scale * index + disp => new base + scale * index, |
3914 | case (1) above. */ |
3915 | gcc_assert (ad.disp == ad.disp_term); |
3916 | new_reg = base_plus_disp_to_reg (ad: &ad, disp: *ad.disp); |
3917 | *ad.inner = simplify_gen_binary (code: PLUS, GET_MODE (new_reg), |
3918 | op0: new_reg, op1: *ad.index); |
3919 | } |
3920 | else if ((scale = get_index_scale (&ad)) == 1) |
3921 | { |
3922 | /* The last transformation to one reg will be made in |
3923 | curr_insn_transform function. */ |
3924 | end_sequence (); |
3925 | return false; |
3926 | } |
3927 | else if (scale != 0) |
3928 | { |
3929 | /* base + scale * index => base + new_reg, |
3930 | case (1) above. |
3931 | Index part of address may become invalid. For example, we |
3932 | changed pseudo on the equivalent memory and a subreg of the |
3933 | pseudo onto the memory of different mode for which the scale is |
3934 | prohibitted. */ |
3935 | new_reg = index_part_to_reg (ad: &ad, index_class: index_cl); |
3936 | *ad.inner = simplify_gen_binary (code: PLUS, GET_MODE (new_reg), |
3937 | op0: *ad.base_term, op1: new_reg); |
3938 | } |
3939 | else |
3940 | { |
3941 | enum reg_class cl = base_reg_class (mode: ad.mode, as: ad.as, |
3942 | outer_code: SCRATCH, index_code: SCRATCH, |
3943 | insn: curr_insn); |
3944 | rtx addr = *ad.inner; |
3945 | |
3946 | new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, NULL, "addr" ); |
3947 | /* addr => new_base. */ |
3948 | lra_emit_move (new_reg, addr); |
3949 | *ad.inner = new_reg; |
3950 | } |
3951 | *before = get_insns (); |
3952 | end_sequence (); |
3953 | return true; |
3954 | } |
3955 | |
3956 | /* If CHECK_ONLY_P is false, do address reloads until it is necessary. |
3957 | Use process_address_1 as a helper function. Return true for any |
3958 | RTL changes. |
3959 | |
3960 | If CHECK_ONLY_P is true, just check address correctness. Return |
3961 | false if the address correct. */ |
3962 | static bool |
3963 | process_address (int nop, bool check_only_p, |
3964 | rtx_insn **before, rtx_insn **after) |
3965 | { |
3966 | bool res = false; |
3967 | |
3968 | while (process_address_1 (nop, check_only_p, before, after)) |
3969 | { |
3970 | if (check_only_p) |
3971 | return true; |
3972 | res = true; |
3973 | } |
3974 | return res; |
3975 | } |
3976 | |
3977 | /* Override the generic address_reload_context in order to |
3978 | control the creation of reload pseudos. */ |
3979 | class lra_autoinc_reload_context : public address_reload_context |
3980 | { |
3981 | machine_mode mode; |
3982 | enum reg_class rclass; |
3983 | |
3984 | public: |
3985 | lra_autoinc_reload_context (machine_mode mode, enum reg_class new_rclass) |
3986 | : mode (mode), rclass (new_rclass) {} |
3987 | |
3988 | rtx get_reload_reg () const override final |
3989 | { |
3990 | return lra_create_new_reg (mode, NULL_RTX, rclass, NULL, "INC/DEC result" ); |
3991 | } |
3992 | }; |
3993 | |
3994 | /* Emit insns to reload VALUE into a new register. VALUE is an |
3995 | auto-increment or auto-decrement RTX whose operand is a register or |
3996 | memory location; so reloading involves incrementing that location. |
3997 | |
3998 | INC_AMOUNT is the number to increment or decrement by (always |
3999 | positive and ignored for POST_MODIFY/PRE_MODIFY). |
4000 | |
4001 | Return a pseudo containing the result. */ |
4002 | static rtx |
4003 | emit_inc (enum reg_class new_rclass, rtx value, poly_int64 inc_amount) |
4004 | { |
4005 | lra_autoinc_reload_context context (GET_MODE (value), new_rclass); |
4006 | return context.emit_autoinc (value, amount: inc_amount); |
4007 | } |
4008 | |
4009 | /* Return true if the current move insn does not need processing as we |
4010 | already know that it satisfies its constraints. */ |
4011 | static bool |
4012 | simple_move_p (void) |
4013 | { |
4014 | rtx dest, src; |
4015 | enum reg_class dclass, sclass; |
4016 | |
4017 | lra_assert (curr_insn_set != NULL_RTX); |
4018 | dest = SET_DEST (curr_insn_set); |
4019 | src = SET_SRC (curr_insn_set); |
4020 | |
4021 | /* If the instruction has multiple sets we need to process it even if it |
4022 | is single_set. This can happen if one or more of the SETs are dead. |
4023 | See PR73650. */ |
4024 | if (multiple_sets (curr_insn)) |
4025 | return false; |
4026 | |
4027 | return ((dclass = get_op_class (op: dest)) != NO_REGS |
4028 | && (sclass = get_op_class (op: src)) != NO_REGS |
4029 | /* The backend guarantees that register moves of cost 2 |
4030 | never need reloads. */ |
4031 | && targetm.register_move_cost (GET_MODE (src), sclass, dclass) == 2); |
4032 | } |
4033 | |
4034 | /* Swap operands NOP and NOP + 1. */ |
4035 | static inline void |
4036 | swap_operands (int nop) |
4037 | { |
4038 | std::swap (a&: curr_operand_mode[nop], b&: curr_operand_mode[nop + 1]); |
4039 | std::swap (a&: original_subreg_reg_mode[nop], b&: original_subreg_reg_mode[nop + 1]); |
4040 | std::swap (a&: *curr_id->operand_loc[nop], b&: *curr_id->operand_loc[nop + 1]); |
4041 | std::swap (a&: equiv_substition_p[nop], b&: equiv_substition_p[nop + 1]); |
4042 | /* Swap the duplicates too. */ |
4043 | lra_update_dup (id: curr_id, nop); |
4044 | lra_update_dup (id: curr_id, nop: nop + 1); |
4045 | } |
4046 | |
4047 | /* Main entry point of the constraint code: search the body of the |
4048 | current insn to choose the best alternative. It is mimicking insn |
4049 | alternative cost calculation model of former reload pass. That is |
4050 | because machine descriptions were written to use this model. This |
4051 | model can be changed in future. Make commutative operand exchange |
4052 | if it is chosen. |
4053 | |
4054 | if CHECK_ONLY_P is false, do RTL changes to satisfy the |
4055 | constraints. Return true if any change happened during function |
4056 | call. |
4057 | |
4058 | If CHECK_ONLY_P is true then don't do any transformation. Just |
4059 | check that the insn satisfies all constraints. If the insn does |
4060 | not satisfy any constraint, return true. */ |
4061 | static bool |
4062 | curr_insn_transform (bool check_only_p) |
4063 | { |
4064 | int i, j, k; |
4065 | int n_operands; |
4066 | int n_alternatives; |
4067 | int n_outputs; |
4068 | int commutative; |
4069 | signed char goal_alt_matched[MAX_RECOG_OPERANDS][MAX_RECOG_OPERANDS]; |
4070 | signed char match_inputs[MAX_RECOG_OPERANDS + 1]; |
4071 | signed char outputs[MAX_RECOG_OPERANDS + 1]; |
4072 | rtx_insn *before, *after; |
4073 | bool alt_p = false; |
4074 | /* Flag that the insn has been changed through a transformation. */ |
4075 | bool change_p; |
4076 | bool sec_mem_p; |
4077 | bool use_sec_mem_p; |
4078 | int max_regno_before; |
4079 | int reused_alternative_num; |
4080 | |
4081 | curr_insn_set = single_set (insn: curr_insn); |
4082 | if (curr_insn_set != NULL_RTX && simple_move_p ()) |
4083 | { |
4084 | /* We assume that the corresponding insn alternative has no |
4085 | earlier clobbers. If it is not the case, don't define move |
4086 | cost equal to 2 for the corresponding register classes. */ |
4087 | lra_set_used_insn_alternative (curr_insn, LRA_NON_CLOBBERED_ALT); |
4088 | return false; |
4089 | } |
4090 | |
4091 | no_input_reloads_p = no_output_reloads_p = false; |
4092 | goal_alt_number = -1; |
4093 | change_p = sec_mem_p = false; |
4094 | |
4095 | /* CALL_INSNs are not allowed to have any output reloads. */ |
4096 | if (CALL_P (curr_insn)) |
4097 | no_output_reloads_p = true; |
4098 | |
4099 | n_operands = curr_static_id->n_operands; |
4100 | n_alternatives = curr_static_id->n_alternatives; |
4101 | |
4102 | /* Just return "no reloads" if insn has no operands with |
4103 | constraints. */ |
4104 | if (n_operands == 0 || n_alternatives == 0) |
4105 | return false; |
4106 | |
4107 | max_regno_before = max_reg_num (); |
4108 | |
4109 | for (i = 0; i < n_operands; i++) |
4110 | { |
4111 | goal_alt_matched[i][0] = -1; |
4112 | goal_alt_matches[i] = -1; |
4113 | } |
4114 | |
4115 | commutative = curr_static_id->commutative; |
4116 | |
4117 | /* Now see what we need for pseudos that didn't get hard regs or got |
4118 | the wrong kind of hard reg. For this, we must consider all the |
4119 | operands together against the register constraints. */ |
4120 | |
4121 | best_losers = best_overall = INT_MAX; |
4122 | best_reload_sum = 0; |
4123 | |
4124 | curr_swapped = false; |
4125 | goal_alt_swapped = false; |
4126 | |
4127 | if (! check_only_p) |
4128 | /* Make equivalence substitution and memory subreg elimination |
4129 | before address processing because an address legitimacy can |
4130 | depend on memory mode. */ |
4131 | for (i = 0; i < n_operands; i++) |
4132 | { |
4133 | rtx op, subst, old; |
4134 | bool op_change_p = false; |
4135 | |
4136 | if (curr_static_id->operand[i].is_operator) |
4137 | continue; |
4138 | |
4139 | old = op = *curr_id->operand_loc[i]; |
4140 | if (GET_CODE (old) == SUBREG) |
4141 | old = SUBREG_REG (old); |
4142 | subst = get_equiv_with_elimination (x: old, insn: curr_insn); |
4143 | original_subreg_reg_mode[i] = VOIDmode; |
4144 | equiv_substition_p[i] = false; |
4145 | if (subst != old) |
4146 | { |
4147 | equiv_substition_p[i] = true; |
4148 | subst = copy_rtx (subst); |
4149 | lra_assert (REG_P (old)); |
4150 | if (GET_CODE (op) != SUBREG) |
4151 | *curr_id->operand_loc[i] = subst; |
4152 | else |
4153 | { |
4154 | SUBREG_REG (op) = subst; |
4155 | if (GET_MODE (subst) == VOIDmode) |
4156 | original_subreg_reg_mode[i] = GET_MODE (old); |
4157 | } |
4158 | if (lra_dump_file != NULL) |
4159 | { |
4160 | fprintf (stream: lra_dump_file, |
4161 | format: "Changing pseudo %d in operand %i of insn %u on equiv " , |
4162 | REGNO (old), i, INSN_UID (insn: curr_insn)); |
4163 | dump_value_slim (lra_dump_file, subst, 1); |
4164 | fprintf (stream: lra_dump_file, format: "\n" ); |
4165 | } |
4166 | op_change_p = change_p = true; |
4167 | } |
4168 | if (simplify_operand_subreg (nop: i, GET_MODE (old)) || op_change_p) |
4169 | { |
4170 | change_p = true; |
4171 | lra_update_dup (id: curr_id, nop: i); |
4172 | } |
4173 | } |
4174 | |
4175 | /* Reload address registers and displacements. We do it before |
4176 | finding an alternative because of memory constraints. */ |
4177 | before = after = NULL; |
4178 | for (i = 0; i < n_operands; i++) |
4179 | if (! curr_static_id->operand[i].is_operator |
4180 | && process_address (nop: i, check_only_p, before: &before, after: &after)) |
4181 | { |
4182 | if (check_only_p) |
4183 | return true; |
4184 | change_p = true; |
4185 | lra_update_dup (id: curr_id, nop: i); |
4186 | } |
4187 | |
4188 | if (change_p) |
4189 | /* If we've changed the instruction then any alternative that |
4190 | we chose previously may no longer be valid. */ |
4191 | lra_set_used_insn_alternative (curr_insn, LRA_UNKNOWN_ALT); |
4192 | |
4193 | if (! check_only_p && curr_insn_set != NULL_RTX |
4194 | && check_and_process_move (change_p: &change_p, sec_mem_p: &sec_mem_p)) |
4195 | return change_p; |
4196 | |
4197 | try_swapped: |
4198 | |
4199 | reused_alternative_num = check_only_p ? LRA_UNKNOWN_ALT : curr_id->used_insn_alternative; |
4200 | if (lra_dump_file != NULL && reused_alternative_num >= 0) |
4201 | fprintf (stream: lra_dump_file, format: "Reusing alternative %d for insn #%u\n" , |
4202 | reused_alternative_num, INSN_UID (insn: curr_insn)); |
4203 | |
4204 | if (process_alt_operands (only_alternative: reused_alternative_num)) |
4205 | alt_p = true; |
4206 | |
4207 | if (check_only_p) |
4208 | return ! alt_p || best_losers != 0; |
4209 | |
4210 | /* If insn is commutative (it's safe to exchange a certain pair of |
4211 | operands) then we need to try each alternative twice, the second |
4212 | time matching those two operands as if we had exchanged them. To |
4213 | do this, really exchange them in operands. |
4214 | |
4215 | If we have just tried the alternatives the second time, return |
4216 | operands to normal and drop through. */ |
4217 | |
4218 | if (reused_alternative_num < 0 && commutative >= 0) |
4219 | { |
4220 | curr_swapped = !curr_swapped; |
4221 | if (curr_swapped) |
4222 | { |
4223 | swap_operands (nop: commutative); |
4224 | goto try_swapped; |
4225 | } |
4226 | else |
4227 | swap_operands (nop: commutative); |
4228 | } |
4229 | |
4230 | if (! alt_p && ! sec_mem_p) |
4231 | { |
4232 | /* No alternative works with reloads?? */ |
4233 | if (INSN_CODE (curr_insn) >= 0) |
4234 | fatal_insn ("unable to generate reloads for:" , curr_insn); |
4235 | error_for_asm (curr_insn, |
4236 | "inconsistent operand constraints in an %<asm%>" ); |
4237 | lra_asm_error_p = true; |
4238 | if (! JUMP_P (curr_insn)) |
4239 | { |
4240 | /* Avoid further trouble with this insn. Don't generate use |
4241 | pattern here as we could use the insn SP offset. */ |
4242 | lra_set_insn_deleted (curr_insn); |
4243 | } |
4244 | else |
4245 | { |
4246 | lra_invalidate_insn_data (curr_insn); |
4247 | ira_nullify_asm_goto (insn: curr_insn); |
4248 | lra_update_insn_regno_info (curr_insn); |
4249 | } |
4250 | return true; |
4251 | } |
4252 | |
4253 | /* If the best alternative is with operands 1 and 2 swapped, swap |
4254 | them. Update the operand numbers of any reloads already |
4255 | pushed. */ |
4256 | |
4257 | if (goal_alt_swapped) |
4258 | { |
4259 | if (lra_dump_file != NULL) |
4260 | fprintf (stream: lra_dump_file, format: " Commutative operand exchange in insn %u\n" , |
4261 | INSN_UID (insn: curr_insn)); |
4262 | |
4263 | /* Swap the duplicates too. */ |
4264 | swap_operands (nop: commutative); |
4265 | change_p = true; |
4266 | } |
4267 | |
4268 | /* Some targets' TARGET_SECONDARY_MEMORY_NEEDED (e.g. x86) are defined |
4269 | too conservatively. So we use the secondary memory only if there |
4270 | is no any alternative without reloads. */ |
4271 | use_sec_mem_p = false; |
4272 | if (! alt_p) |
4273 | use_sec_mem_p = true; |
4274 | else if (sec_mem_p) |
4275 | { |
4276 | for (i = 0; i < n_operands; i++) |
4277 | if (! goal_alt_win[i] && ! goal_alt_match_win[i]) |
4278 | break; |
4279 | use_sec_mem_p = i < n_operands; |
4280 | } |
4281 | |
4282 | if (use_sec_mem_p) |
4283 | { |
4284 | int in = -1, out = -1; |
4285 | rtx new_reg, src, dest, rld; |
4286 | machine_mode sec_mode, rld_mode; |
4287 | |
4288 | lra_assert (curr_insn_set != NULL_RTX && sec_mem_p); |
4289 | dest = SET_DEST (curr_insn_set); |
4290 | src = SET_SRC (curr_insn_set); |
4291 | for (i = 0; i < n_operands; i++) |
4292 | if (*curr_id->operand_loc[i] == dest) |
4293 | out = i; |
4294 | else if (*curr_id->operand_loc[i] == src) |
4295 | in = i; |
4296 | for (i = 0; i < curr_static_id->n_dups; i++) |
4297 | if (out < 0 && *curr_id->dup_loc[i] == dest) |
4298 | out = curr_static_id->dup_num[i]; |
4299 | else if (in < 0 && *curr_id->dup_loc[i] == src) |
4300 | in = curr_static_id->dup_num[i]; |
4301 | lra_assert (out >= 0 && in >= 0 |
4302 | && curr_static_id->operand[out].type == OP_OUT |
4303 | && curr_static_id->operand[in].type == OP_IN); |
4304 | rld = partial_subreg_p (GET_MODE (src), GET_MODE (dest)) ? src : dest; |
4305 | rld_mode = GET_MODE (rld); |
4306 | sec_mode = targetm.secondary_memory_needed_mode (rld_mode); |
4307 | new_reg = lra_create_new_reg (sec_mode, NULL_RTX, NO_REGS, NULL, |
4308 | "secondary" ); |
4309 | /* If the mode is changed, it should be wider. */ |
4310 | lra_assert (!partial_subreg_p (sec_mode, rld_mode)); |
4311 | if (sec_mode != rld_mode) |
4312 | { |
4313 | /* If the target says specifically to use another mode for |
4314 | secondary memory moves we cannot reuse the original |
4315 | insn. */ |
4316 | after = emit_spill_move (to_p: false, mem_pseudo: new_reg, val: dest); |
4317 | lra_process_new_insns (curr_insn, NULL, after, |
4318 | "Inserting the sec. move" ); |
4319 | /* We may have non null BEFORE here (e.g. after address |
4320 | processing. */ |
4321 | push_to_sequence (before); |
4322 | before = emit_spill_move (to_p: true, mem_pseudo: new_reg, val: src); |
4323 | emit_insn (before); |
4324 | before = get_insns (); |
4325 | end_sequence (); |
4326 | lra_process_new_insns (curr_insn, before, NULL, "Changing on" ); |
4327 | lra_set_insn_deleted (curr_insn); |
4328 | } |
4329 | else if (dest == rld) |
4330 | { |
4331 | *curr_id->operand_loc[out] = new_reg; |
4332 | lra_update_dup (id: curr_id, nop: out); |
4333 | after = emit_spill_move (to_p: false, mem_pseudo: new_reg, val: dest); |
4334 | lra_process_new_insns (curr_insn, NULL, after, |
4335 | "Inserting the sec. move" ); |
4336 | } |
4337 | else |
4338 | { |
4339 | *curr_id->operand_loc[in] = new_reg; |
4340 | lra_update_dup (id: curr_id, nop: in); |
4341 | /* See comments above. */ |
4342 | push_to_sequence (before); |
4343 | before = emit_spill_move (to_p: true, mem_pseudo: new_reg, val: src); |
4344 | emit_insn (before); |
4345 | before = get_insns (); |
4346 | end_sequence (); |
4347 | lra_process_new_insns (curr_insn, before, NULL, |
4348 | "Inserting the sec. move" ); |
4349 | } |
4350 | lra_update_insn_regno_info (curr_insn); |
4351 | return true; |
4352 | } |
4353 | |
4354 | lra_assert (goal_alt_number >= 0); |
4355 | lra_set_used_insn_alternative (curr_insn, goal_reuse_alt_p |
4356 | ? goal_alt_number : LRA_UNKNOWN_ALT); |
4357 | |
4358 | if (lra_dump_file != NULL) |
4359 | { |
4360 | const char *p; |
4361 | |
4362 | fprintf (stream: lra_dump_file, format: " Choosing alt %d in insn %u:" , |
4363 | goal_alt_number, INSN_UID (insn: curr_insn)); |
4364 | print_curr_insn_alt (alt_number: goal_alt_number); |
4365 | if (INSN_CODE (curr_insn) >= 0 |
4366 | && (p = get_insn_name (INSN_CODE (curr_insn))) != NULL) |
4367 | fprintf (stream: lra_dump_file, format: " {%s}" , p); |
4368 | if (maybe_ne (a: curr_id->sp_offset, b: 0)) |
4369 | { |
4370 | fprintf (stream: lra_dump_file, format: " (sp_off=" ); |
4371 | print_dec (value: curr_id->sp_offset, file: lra_dump_file); |
4372 | fprintf (stream: lra_dump_file, format: ")" ); |
4373 | } |
4374 | fprintf (stream: lra_dump_file, format: "\n" ); |
4375 | } |
4376 | |
4377 | /* Right now, for any pair of operands I and J that are required to |
4378 | match, with J < I, goal_alt_matches[I] is J. Add I to |
4379 | goal_alt_matched[J]. */ |
4380 | |
4381 | for (i = 0; i < n_operands; i++) |
4382 | if ((j = goal_alt_matches[i]) >= 0) |
4383 | { |
4384 | for (k = 0; goal_alt_matched[j][k] >= 0; k++) |
4385 | ; |
4386 | /* We allow matching one output operand and several input |
4387 | operands. */ |
4388 | lra_assert (k == 0 |
4389 | || (curr_static_id->operand[j].type == OP_OUT |
4390 | && curr_static_id->operand[i].type == OP_IN |
4391 | && (curr_static_id->operand |
4392 | [goal_alt_matched[j][0]].type == OP_IN))); |
4393 | goal_alt_matched[j][k] = i; |
4394 | goal_alt_matched[j][k + 1] = -1; |
4395 | } |
4396 | |
4397 | for (i = 0; i < n_operands; i++) |
4398 | goal_alt_win[i] |= goal_alt_match_win[i]; |
4399 | |
4400 | /* Any constants that aren't allowed and can't be reloaded into |
4401 | registers are here changed into memory references. */ |
4402 | for (i = 0; i < n_operands; i++) |
4403 | if (goal_alt_win[i]) |
4404 | { |
4405 | int regno; |
4406 | enum reg_class new_class; |
4407 | rtx reg = *curr_id->operand_loc[i]; |
4408 | |
4409 | if (GET_CODE (reg) == SUBREG) |
4410 | reg = SUBREG_REG (reg); |
4411 | |
4412 | if (REG_P (reg) && (regno = REGNO (reg)) >= FIRST_PSEUDO_REGISTER) |
4413 | { |
4414 | bool ok_p = in_class_p (reg, cl: goal_alt[i], new_class: &new_class, allow_all_reload_class_changes_p: true); |
4415 | |
4416 | if (new_class != NO_REGS && get_reg_class (regno) != new_class) |
4417 | { |
4418 | lra_assert (ok_p); |
4419 | lra_change_class (regno, new_class, title: " Change to" , nl_p: true); |
4420 | } |
4421 | } |
4422 | } |
4423 | else |
4424 | { |
4425 | const char *constraint; |
4426 | char c; |
4427 | rtx op = *curr_id->operand_loc[i]; |
4428 | rtx subreg = NULL_RTX; |
4429 | machine_mode mode = curr_operand_mode[i]; |
4430 | |
4431 | if (GET_CODE (op) == SUBREG) |
4432 | { |
4433 | subreg = op; |
4434 | op = SUBREG_REG (op); |
4435 | mode = GET_MODE (op); |
4436 | } |
4437 | |
4438 | if (CONST_POOL_OK_P (mode, op) |
4439 | && ((targetm.preferred_reload_class |
4440 | (op, (enum reg_class) goal_alt[i]) == NO_REGS) |
4441 | || no_input_reloads_p)) |
4442 | { |
4443 | rtx tem = force_const_mem (mode, op); |
4444 | |
4445 | change_p = true; |
4446 | if (subreg != NULL_RTX) |
4447 | tem = gen_rtx_SUBREG (mode, tem, SUBREG_BYTE (subreg)); |
4448 | |
4449 | *curr_id->operand_loc[i] = tem; |
4450 | lra_update_dup (id: curr_id, nop: i); |
4451 | process_address (nop: i, check_only_p: false, before: &before, after: &after); |
4452 | |
4453 | /* If the alternative accepts constant pool refs directly |
4454 | there will be no reload needed at all. */ |
4455 | if (subreg != NULL_RTX) |
4456 | continue; |
4457 | /* Skip alternatives before the one requested. */ |
4458 | constraint = (curr_static_id->operand_alternative |
4459 | [goal_alt_number * n_operands + i].constraint); |
4460 | for (; |
4461 | (c = *constraint) && c != ',' && c != '#'; |
4462 | constraint += CONSTRAINT_LEN (c, constraint)) |
4463 | { |
4464 | enum constraint_num cn = lookup_constraint (p: constraint); |
4465 | if ((insn_extra_memory_constraint (c: cn) |
4466 | || insn_extra_special_memory_constraint (c: cn) |
4467 | || insn_extra_relaxed_memory_constraint (cn)) |
4468 | && satisfies_memory_constraint_p (op: tem, constraint: cn)) |
4469 | break; |
4470 | } |
4471 | if (c == '\0' || c == ',' || c == '#') |
4472 | continue; |
4473 | |
4474 | goal_alt_win[i] = true; |
4475 | } |
4476 | } |
4477 | |
4478 | n_outputs = 0; |
4479 | for (i = 0; i < n_operands; i++) |
4480 | if (curr_static_id->operand[i].type == OP_OUT) |
4481 | outputs[n_outputs++] = i; |
4482 | outputs[n_outputs] = -1; |
4483 | for (i = 0; i < n_operands; i++) |
4484 | { |
4485 | int regno; |
4486 | bool optional_p = false; |
4487 | rtx old, new_reg; |
4488 | rtx op = *curr_id->operand_loc[i]; |
4489 | |
4490 | if (goal_alt_win[i]) |
4491 | { |
4492 | if (goal_alt[i] == NO_REGS |
4493 | && REG_P (op) |
4494 | && (regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER |
4495 | /* We assigned a hard register to the pseudo in the past but now |
4496 | decided to spill it for the insn. If the pseudo is used only |
4497 | in this insn, it is better to spill it here as we free hard |
4498 | registers for other pseudos referenced in the insn. The most |
4499 | common case of this is a scratch register which will be |
4500 | transformed to scratch back at the end of LRA. */ |
4501 | && bitmap_single_bit_set_p (&lra_reg_info[regno].insn_bitmap)) |
4502 | { |
4503 | if (lra_get_allocno_class (regno) != NO_REGS) |
4504 | lra_change_class (regno, new_class: NO_REGS, title: " Change to" , nl_p: true); |
4505 | reg_renumber[regno] = -1; |
4506 | } |
4507 | /* We can do an optional reload. If the pseudo got a hard |
4508 | reg, we might improve the code through inheritance. If |
4509 | it does not get a hard register we coalesce memory/memory |
4510 | moves later. Ignore move insns to avoid cycling. */ |
4511 | if (! lra_simple_p |
4512 | && lra_undo_inheritance_iter < LRA_MAX_INHERITANCE_PASSES |
4513 | && goal_alt[i] != NO_REGS && REG_P (op) |
4514 | && (regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER |
4515 | && regno < new_regno_start |
4516 | && ! ira_former_scratch_p (regno) |
4517 | && reg_renumber[regno] < 0 |
4518 | /* Check that the optional reload pseudo will be able to |
4519 | hold given mode value. */ |
4520 | && ! (prohibited_class_reg_set_mode_p |
4521 | (rclass: goal_alt[i], reg_class_contents[goal_alt[i]], |
4522 | PSEUDO_REGNO_MODE (regno))) |
4523 | && (curr_insn_set == NULL_RTX |
4524 | || !((REG_P (SET_SRC (curr_insn_set)) |
4525 | || MEM_P (SET_SRC (curr_insn_set)) |
4526 | || GET_CODE (SET_SRC (curr_insn_set)) == SUBREG) |
4527 | && (REG_P (SET_DEST (curr_insn_set)) |
4528 | || MEM_P (SET_DEST (curr_insn_set)) |
4529 | || GET_CODE (SET_DEST (curr_insn_set)) == SUBREG)))) |
4530 | optional_p = true; |
4531 | else if (goal_alt_matched[i][0] != -1 |
4532 | && curr_static_id->operand[i].type == OP_OUT |
4533 | && (curr_static_id->operand_alternative |
4534 | [goal_alt_number * n_operands + i].earlyclobber) |
4535 | && REG_P (op)) |
4536 | { |
4537 | for (j = 0; goal_alt_matched[i][j] != -1; j++) |
4538 | { |
4539 | rtx op2 = *curr_id->operand_loc[goal_alt_matched[i][j]]; |
4540 | |
4541 | if (REG_P (op2) && REGNO (op) != REGNO (op2)) |
4542 | break; |
4543 | } |
4544 | if (goal_alt_matched[i][j] != -1) |
4545 | { |
4546 | /* Generate reloads for different output and matched |
4547 | input registers. This is the easiest way to avoid |
4548 | creation of non-existing register conflicts in |
4549 | lra-lives.cc. */ |
4550 | match_reload (out: i, ins: goal_alt_matched[i], outs: outputs, goal_class: goal_alt[i], |
4551 | exclude_start_hard_regs: &goal_alt_exclude_start_hard_regs[i], before: &before, |
4552 | after: &after, early_clobber_p: true); |
4553 | } |
4554 | continue; |
4555 | } |
4556 | else |
4557 | { |
4558 | enum reg_class rclass, common_class; |
4559 | |
4560 | if (REG_P (op) && goal_alt[i] != NO_REGS |
4561 | && (regno = REGNO (op)) >= new_regno_start |
4562 | && (rclass = get_reg_class (regno)) == ALL_REGS |
4563 | && ((common_class = ira_reg_class_subset[rclass][goal_alt[i]]) |
4564 | != NO_REGS) |
4565 | && common_class != ALL_REGS |
4566 | && enough_allocatable_hard_regs_p (reg_class: common_class, |
4567 | GET_MODE (op))) |
4568 | /* Refine reload pseudo class from chosen alternative |
4569 | constraint. */ |
4570 | lra_change_class (regno, new_class: common_class, title: " Change to" , nl_p: true); |
4571 | continue; |
4572 | } |
4573 | } |
4574 | |
4575 | /* Operands that match previous ones have already been handled. */ |
4576 | if (goal_alt_matches[i] >= 0) |
4577 | continue; |
4578 | |
4579 | /* We should not have an operand with a non-offsettable address |
4580 | appearing where an offsettable address will do. It also may |
4581 | be a case when the address should be special in other words |
4582 | not a general one (e.g. it needs no index reg). */ |
4583 | if (goal_alt_matched[i][0] == -1 && goal_alt_offmemok[i] && MEM_P (op)) |
4584 | { |
4585 | enum reg_class rclass; |
4586 | rtx *loc = &XEXP (op, 0); |
4587 | enum rtx_code code = GET_CODE (*loc); |
4588 | |
4589 | push_to_sequence (before); |
4590 | rclass = base_reg_class (GET_MODE (op), MEM_ADDR_SPACE (op), |
4591 | outer_code: MEM, index_code: SCRATCH, insn: curr_insn); |
4592 | if (GET_RTX_CLASS (code) == RTX_AUTOINC) |
4593 | new_reg = emit_inc (new_rclass: rclass, value: *loc, |
4594 | /* This value does not matter for MODIFY. */ |
4595 | inc_amount: GET_MODE_SIZE (GET_MODE (op))); |
4596 | else if (get_reload_reg (type: OP_IN, Pmode, original: *loc, rclass, |
4597 | NULL, in_subreg_p: false, |
4598 | title: "offsetable address" , result_reg: &new_reg)) |
4599 | { |
4600 | rtx addr = *loc; |
4601 | enum rtx_code code = GET_CODE (addr); |
4602 | bool align_p = false; |
4603 | |
4604 | if (code == AND && CONST_INT_P (XEXP (addr, 1))) |
4605 | { |
4606 | /* (and ... (const_int -X)) is used to align to X bytes. */ |
4607 | align_p = true; |
4608 | addr = XEXP (*loc, 0); |
4609 | } |
4610 | else |
4611 | addr = canonicalize_reload_addr (addr); |
4612 | |
4613 | lra_emit_move (new_reg, addr); |
4614 | if (align_p) |
4615 | emit_move_insn (new_reg, gen_rtx_AND (GET_MODE (new_reg), new_reg, XEXP (*loc, 1))); |
4616 | } |
4617 | before = get_insns (); |
4618 | end_sequence (); |
4619 | *loc = new_reg; |
4620 | lra_update_dup (id: curr_id, nop: i); |
4621 | } |
4622 | else if (goal_alt_matched[i][0] == -1) |
4623 | { |
4624 | machine_mode mode; |
4625 | rtx reg, *loc; |
4626 | int hard_regno; |
4627 | enum op_type type = curr_static_id->operand[i].type; |
4628 | |
4629 | loc = curr_id->operand_loc[i]; |
4630 | mode = curr_operand_mode[i]; |
4631 | if (GET_CODE (*loc) == SUBREG) |
4632 | { |
4633 | reg = SUBREG_REG (*loc); |
4634 | poly_int64 byte = SUBREG_BYTE (*loc); |
4635 | if (REG_P (reg) |
4636 | /* Strict_low_part requires reloading the register and not |
4637 | just the subreg. Likewise for a strict subreg no wider |
4638 | than a word for WORD_REGISTER_OPERATIONS targets. */ |
4639 | && (curr_static_id->operand[i].strict_low |
4640 | || (!paradoxical_subreg_p (outermode: mode, GET_MODE (reg)) |
4641 | && (hard_regno |
4642 | = get_try_hard_regno (REGNO (reg))) >= 0 |
4643 | && (simplify_subreg_regno |
4644 | (hard_regno, |
4645 | GET_MODE (reg), byte, mode) < 0) |
4646 | && (goal_alt[i] == NO_REGS |
4647 | || (simplify_subreg_regno |
4648 | (ira_class_hard_regs[goal_alt[i]][0], |
4649 | GET_MODE (reg), byte, mode) >= 0))) |
4650 | || (partial_subreg_p (outermode: mode, GET_MODE (reg)) |
4651 | && known_le (GET_MODE_SIZE (GET_MODE (reg)), |
4652 | UNITS_PER_WORD) |
4653 | && WORD_REGISTER_OPERATIONS)) |
4654 | /* Avoid the situation when there are no available hard regs |
4655 | for the pseudo mode but there are ones for the subreg |
4656 | mode: */ |
4657 | && !(goal_alt[i] != NO_REGS |
4658 | && REGNO (reg) >= FIRST_PSEUDO_REGISTER |
4659 | && (prohibited_class_reg_set_mode_p |
4660 | (rclass: goal_alt[i], reg_class_contents[goal_alt[i]], |
4661 | GET_MODE (reg))) |
4662 | && !(prohibited_class_reg_set_mode_p |
4663 | (rclass: goal_alt[i], reg_class_contents[goal_alt[i]], |
4664 | mode)))) |
4665 | { |
4666 | /* An OP_INOUT is required when reloading a subreg of a |
4667 | mode wider than a word to ensure that data beyond the |
4668 | word being reloaded is preserved. Also automatically |
4669 | ensure that strict_low_part reloads are made into |
4670 | OP_INOUT which should already be true from the backend |
4671 | constraints. */ |
4672 | if (type == OP_OUT |
4673 | && (curr_static_id->operand[i].strict_low |
4674 | || read_modify_subreg_p (*loc))) |
4675 | type = OP_INOUT; |
4676 | loc = &SUBREG_REG (*loc); |
4677 | mode = GET_MODE (*loc); |
4678 | } |
4679 | } |
4680 | old = *loc; |
4681 | if (get_reload_reg (type, mode, original: old, rclass: goal_alt[i], |
4682 | exclude_start_hard_regs: &goal_alt_exclude_start_hard_regs[i], |
4683 | in_subreg_p: loc != curr_id->operand_loc[i], title: "" , result_reg: &new_reg) |
4684 | && type != OP_OUT) |
4685 | { |
4686 | push_to_sequence (before); |
4687 | lra_emit_move (new_reg, old); |
4688 | before = get_insns (); |
4689 | end_sequence (); |
4690 | } |
4691 | *loc = new_reg; |
4692 | if (type != OP_IN |
4693 | && find_reg_note (curr_insn, REG_UNUSED, old) == NULL_RTX) |
4694 | { |
4695 | start_sequence (); |
4696 | lra_emit_move (type == OP_INOUT ? copy_rtx (old) : old, new_reg); |
4697 | emit_insn (after); |
4698 | after = get_insns (); |
4699 | end_sequence (); |
4700 | *loc = new_reg; |
4701 | } |
4702 | for (j = 0; j < goal_alt_dont_inherit_ops_num; j++) |
4703 | if (goal_alt_dont_inherit_ops[j] == i) |
4704 | { |
4705 | lra_set_regno_unique_value (REGNO (new_reg)); |
4706 | break; |
4707 | } |
4708 | lra_update_dup (id: curr_id, nop: i); |
4709 | } |
4710 | else if (curr_static_id->operand[i].type == OP_IN |
4711 | && (curr_static_id->operand[goal_alt_matched[i][0]].type |
4712 | == OP_OUT |
4713 | || (curr_static_id->operand[goal_alt_matched[i][0]].type |
4714 | == OP_INOUT |
4715 | && (operands_match_p |
4716 | (x: *curr_id->operand_loc[i], |
4717 | y: *curr_id->operand_loc[goal_alt_matched[i][0]], |
4718 | y_hard_regno: -1))))) |
4719 | { |
4720 | /* generate reloads for input and matched outputs. */ |
4721 | match_inputs[0] = i; |
4722 | match_inputs[1] = -1; |
4723 | match_reload (out: goal_alt_matched[i][0], ins: match_inputs, outs: outputs, |
4724 | goal_class: goal_alt[i], exclude_start_hard_regs: &goal_alt_exclude_start_hard_regs[i], |
4725 | before: &before, after: &after, |
4726 | early_clobber_p: curr_static_id->operand_alternative |
4727 | [goal_alt_number * n_operands + goal_alt_matched[i][0]] |
4728 | .earlyclobber); |
4729 | } |
4730 | else if ((curr_static_id->operand[i].type == OP_OUT |
4731 | || (curr_static_id->operand[i].type == OP_INOUT |
4732 | && (operands_match_p |
4733 | (x: *curr_id->operand_loc[i], |
4734 | y: *curr_id->operand_loc[goal_alt_matched[i][0]], |
4735 | y_hard_regno: -1)))) |
4736 | && (curr_static_id->operand[goal_alt_matched[i][0]].type |
4737 | == OP_IN)) |
4738 | /* Generate reloads for output and matched inputs. */ |
4739 | match_reload (out: i, ins: goal_alt_matched[i], outs: outputs, goal_class: goal_alt[i], |
4740 | exclude_start_hard_regs: &goal_alt_exclude_start_hard_regs[i], before: &before, after: &after, |
4741 | early_clobber_p: curr_static_id->operand_alternative |
4742 | [goal_alt_number * n_operands + i].earlyclobber); |
4743 | else if (curr_static_id->operand[i].type == OP_IN |
4744 | && (curr_static_id->operand[goal_alt_matched[i][0]].type |
4745 | == OP_IN)) |
4746 | { |
4747 | /* Generate reloads for matched inputs. */ |
4748 | match_inputs[0] = i; |
4749 | for (j = 0; (k = goal_alt_matched[i][j]) >= 0; j++) |
4750 | match_inputs[j + 1] = k; |
4751 | match_inputs[j + 1] = -1; |
4752 | match_reload (out: -1, ins: match_inputs, outs: outputs, goal_class: goal_alt[i], |
4753 | exclude_start_hard_regs: &goal_alt_exclude_start_hard_regs[i], |
4754 | before: &before, after: &after, early_clobber_p: false); |
4755 | } |
4756 | else |
4757 | /* We must generate code in any case when function |
4758 | process_alt_operands decides that it is possible. */ |
4759 | gcc_unreachable (); |
4760 | |
4761 | if (optional_p) |
4762 | { |
4763 | rtx reg = op; |
4764 | |
4765 | lra_assert (REG_P (reg)); |
4766 | regno = REGNO (reg); |
4767 | op = *curr_id->operand_loc[i]; /* Substitution. */ |
4768 | if (GET_CODE (op) == SUBREG) |
4769 | op = SUBREG_REG (op); |
4770 | gcc_assert (REG_P (op) && (int) REGNO (op) >= new_regno_start); |
4771 | bitmap_set_bit (&lra_optional_reload_pseudos, REGNO (op)); |
4772 | lra_reg_info[REGNO (op)].restore_rtx = reg; |
4773 | if (lra_dump_file != NULL) |
4774 | fprintf (stream: lra_dump_file, |
4775 | format: " Making reload reg %d for reg %d optional\n" , |
4776 | REGNO (op), regno); |
4777 | } |
4778 | } |
4779 | if (before != NULL_RTX || after != NULL_RTX |
4780 | || max_regno_before != max_reg_num ()) |
4781 | change_p = true; |
4782 | if (change_p) |
4783 | { |
4784 | lra_update_operator_dups (id: curr_id); |
4785 | /* Something changes -- process the insn. */ |
4786 | lra_update_insn_regno_info (curr_insn); |
4787 | if (asm_noperands (PATTERN (insn: curr_insn)) >= 0 |
4788 | && ++curr_id->asm_reloads_num >= FIRST_PSEUDO_REGISTER) |
4789 | /* Most probably there are no enough registers to satisfy asm insn: */ |
4790 | lra_asm_insn_error (insn: curr_insn); |
4791 | } |
4792 | if (goal_alt_out_sp_reload_p) |
4793 | { |
4794 | /* We have an output stack pointer reload -- update sp offset: */ |
4795 | rtx set; |
4796 | bool done_p = false; |
4797 | poly_int64 sp_offset = curr_id->sp_offset; |
4798 | for (rtx_insn *insn = after; insn != NULL_RTX; insn = NEXT_INSN (insn)) |
4799 | if ((set = single_set (insn)) != NULL_RTX |
4800 | && SET_DEST (set) == stack_pointer_rtx) |
4801 | { |
4802 | lra_assert (!done_p); |
4803 | done_p = true; |
4804 | curr_id->sp_offset = 0; |
4805 | lra_insn_recog_data_t id = lra_get_insn_recog_data (insn); |
4806 | id->sp_offset = sp_offset; |
4807 | if (lra_dump_file != NULL) |
4808 | fprintf (stream: lra_dump_file, |
4809 | format: " Moving sp offset from insn %u to %u\n" , |
4810 | INSN_UID (insn: curr_insn), INSN_UID (insn)); |
4811 | } |
4812 | lra_assert (done_p); |
4813 | } |
4814 | lra_process_new_insns (curr_insn, before, after, "Inserting insn reload" ); |
4815 | return change_p; |
4816 | } |
4817 | |
4818 | /* Return true if INSN satisfies all constraints. In other words, no |
4819 | reload insns are needed. */ |
4820 | bool |
4821 | lra_constrain_insn (rtx_insn *insn) |
4822 | { |
4823 | int saved_new_regno_start = new_regno_start; |
4824 | int saved_new_insn_uid_start = new_insn_uid_start; |
4825 | bool change_p; |
4826 | |
4827 | curr_insn = insn; |
4828 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
4829 | curr_static_id = curr_id->insn_static_data; |
4830 | new_insn_uid_start = get_max_uid (); |
4831 | new_regno_start = max_reg_num (); |
4832 | change_p = curr_insn_transform (check_only_p: true); |
4833 | new_regno_start = saved_new_regno_start; |
4834 | new_insn_uid_start = saved_new_insn_uid_start; |
4835 | return ! change_p; |
4836 | } |
4837 | |
4838 | /* Return true if X is in LIST. */ |
4839 | static bool |
4840 | in_list_p (rtx x, rtx list) |
4841 | { |
4842 | for (; list != NULL_RTX; list = XEXP (list, 1)) |
4843 | if (XEXP (list, 0) == x) |
4844 | return true; |
4845 | return false; |
4846 | } |
4847 | |
4848 | /* Return true if X contains an allocatable hard register (if |
4849 | HARD_REG_P) or a (spilled if SPILLED_P) pseudo. */ |
4850 | static bool |
4851 | contains_reg_p (rtx x, bool hard_reg_p, bool spilled_p) |
4852 | { |
4853 | int i, j; |
4854 | const char *fmt; |
4855 | enum rtx_code code; |
4856 | |
4857 | code = GET_CODE (x); |
4858 | if (REG_P (x)) |
4859 | { |
4860 | int regno = REGNO (x); |
4861 | HARD_REG_SET alloc_regs; |
4862 | |
4863 | if (hard_reg_p) |
4864 | { |
4865 | if (regno >= FIRST_PSEUDO_REGISTER) |
4866 | regno = lra_get_regno_hard_regno (regno); |
4867 | if (regno < 0) |
4868 | return false; |
4869 | alloc_regs = ~lra_no_alloc_regs; |
4870 | return overlaps_hard_reg_set_p (regs: alloc_regs, GET_MODE (x), regno); |
4871 | } |
4872 | else |
4873 | { |
4874 | if (regno < FIRST_PSEUDO_REGISTER) |
4875 | return false; |
4876 | if (! spilled_p) |
4877 | return true; |
4878 | return lra_get_regno_hard_regno (regno) < 0; |
4879 | } |
4880 | } |
4881 | fmt = GET_RTX_FORMAT (code); |
4882 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
4883 | { |
4884 | if (fmt[i] == 'e') |
4885 | { |
4886 | if (contains_reg_p (XEXP (x, i), hard_reg_p, spilled_p)) |
4887 | return true; |
4888 | } |
4889 | else if (fmt[i] == 'E') |
4890 | { |
4891 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
4892 | if (contains_reg_p (XVECEXP (x, i, j), hard_reg_p, spilled_p)) |
4893 | return true; |
4894 | } |
4895 | } |
4896 | return false; |
4897 | } |
4898 | |
4899 | /* Process all regs in location *LOC and change them on equivalent |
4900 | substitution. Return true if any change was done. */ |
4901 | static bool |
4902 | loc_equivalence_change_p (rtx *loc) |
4903 | { |
4904 | rtx subst, reg, x = *loc; |
4905 | bool result = false; |
4906 | enum rtx_code code = GET_CODE (x); |
4907 | const char *fmt; |
4908 | int i, j; |
4909 | |
4910 | if (code == SUBREG) |
4911 | { |
4912 | reg = SUBREG_REG (x); |
4913 | if ((subst = get_equiv_with_elimination (x: reg, insn: curr_insn)) != reg |
4914 | && GET_MODE (subst) == VOIDmode) |
4915 | { |
4916 | /* We cannot reload debug location. Simplify subreg here |
4917 | while we know the inner mode. */ |
4918 | *loc = simplify_gen_subreg (GET_MODE (x), op: subst, |
4919 | GET_MODE (reg), SUBREG_BYTE (x)); |
4920 | return true; |
4921 | } |
4922 | } |
4923 | if (code == REG && (subst = get_equiv_with_elimination (x, insn: curr_insn)) != x) |
4924 | { |
4925 | *loc = subst; |
4926 | return true; |
4927 | } |
4928 | |
4929 | /* Scan all the operand sub-expressions. */ |
4930 | fmt = GET_RTX_FORMAT (code); |
4931 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
4932 | { |
4933 | if (fmt[i] == 'e') |
4934 | result = loc_equivalence_change_p (loc: &XEXP (x, i)) || result; |
4935 | else if (fmt[i] == 'E') |
4936 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
4937 | result |
4938 | = loc_equivalence_change_p (loc: &XVECEXP (x, i, j)) || result; |
4939 | } |
4940 | return result; |
4941 | } |
4942 | |
4943 | /* Similar to loc_equivalence_change_p, but for use as |
4944 | simplify_replace_fn_rtx callback. DATA is insn for which the |
4945 | elimination is done. If it null we don't do the elimination. */ |
4946 | static rtx |
4947 | loc_equivalence_callback (rtx loc, const_rtx, void *data) |
4948 | { |
4949 | if (!REG_P (loc)) |
4950 | return NULL_RTX; |
4951 | |
4952 | rtx subst = (data == NULL |
4953 | ? get_equiv (x: loc) : get_equiv_with_elimination (x: loc, insn: (rtx_insn *) data)); |
4954 | if (subst != loc) |
4955 | return subst; |
4956 | |
4957 | return NULL_RTX; |
4958 | } |
4959 | |
4960 | /* Maximum number of generated reload insns per an insn. It is for |
4961 | preventing this pass cycling in a bug case. */ |
4962 | #define MAX_RELOAD_INSNS_NUMBER LRA_MAX_INSN_RELOADS |
4963 | |
4964 | /* The current iteration number of this LRA pass. */ |
4965 | int lra_constraint_iter; |
4966 | |
4967 | /* True if we should during assignment sub-pass check assignment |
4968 | correctness for all pseudos and spill some of them to correct |
4969 | conflicts. It can be necessary when we substitute equiv which |
4970 | needs checking register allocation correctness because the |
4971 | equivalent value contains allocatable hard registers, or when we |
4972 | restore multi-register pseudo, or when we change the insn code and |
4973 | its operand became INOUT operand when it was IN one before. */ |
4974 | bool check_and_force_assignment_correctness_p; |
4975 | |
4976 | /* Return true if REGNO is referenced in more than one block. */ |
4977 | static bool |
4978 | multi_block_pseudo_p (int regno) |
4979 | { |
4980 | basic_block bb = NULL; |
4981 | unsigned int uid; |
4982 | bitmap_iterator bi; |
4983 | |
4984 | if (regno < FIRST_PSEUDO_REGISTER) |
4985 | return false; |
4986 | |
4987 | EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi) |
4988 | if (bb == NULL) |
4989 | bb = BLOCK_FOR_INSN (insn: lra_insn_recog_data[uid]->insn); |
4990 | else if (BLOCK_FOR_INSN (insn: lra_insn_recog_data[uid]->insn) != bb) |
4991 | return true; |
4992 | return false; |
4993 | } |
4994 | |
4995 | /* Return true if LIST contains a deleted insn. */ |
4996 | static bool |
4997 | contains_deleted_insn_p (rtx_insn_list *list) |
4998 | { |
4999 | for (; list != NULL_RTX; list = list->next ()) |
5000 | if (NOTE_P (list->insn ()) |
5001 | && NOTE_KIND (list->insn ()) == NOTE_INSN_DELETED) |
5002 | return true; |
5003 | return false; |
5004 | } |
5005 | |
5006 | /* Return true if X contains a pseudo dying in INSN. */ |
5007 | static bool |
5008 | dead_pseudo_p (rtx x, rtx_insn *insn) |
5009 | { |
5010 | int i, j; |
5011 | const char *fmt; |
5012 | enum rtx_code code; |
5013 | |
5014 | if (REG_P (x)) |
5015 | return (insn != NULL_RTX |
5016 | && find_regno_note (insn, REG_DEAD, REGNO (x)) != NULL_RTX); |
5017 | code = GET_CODE (x); |
5018 | fmt = GET_RTX_FORMAT (code); |
5019 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
5020 | { |
5021 | if (fmt[i] == 'e') |
5022 | { |
5023 | if (dead_pseudo_p (XEXP (x, i), insn)) |
5024 | return true; |
5025 | } |
5026 | else if (fmt[i] == 'E') |
5027 | { |
5028 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
5029 | if (dead_pseudo_p (XVECEXP (x, i, j), insn)) |
5030 | return true; |
5031 | } |
5032 | } |
5033 | return false; |
5034 | } |
5035 | |
5036 | /* Return true if INSN contains a dying pseudo in INSN right hand |
5037 | side. */ |
5038 | static bool |
5039 | insn_rhs_dead_pseudo_p (rtx_insn *insn) |
5040 | { |
5041 | rtx set = single_set (insn); |
5042 | |
5043 | gcc_assert (set != NULL); |
5044 | return dead_pseudo_p (SET_SRC (set), insn); |
5045 | } |
5046 | |
5047 | /* Return true if any init insn of REGNO contains a dying pseudo in |
5048 | insn right hand side. */ |
5049 | static bool |
5050 | init_insn_rhs_dead_pseudo_p (int regno) |
5051 | { |
5052 | rtx_insn_list *insns = ira_reg_equiv[regno].init_insns; |
5053 | |
5054 | if (insns == NULL) |
5055 | return false; |
5056 | for (; insns != NULL_RTX; insns = insns->next ()) |
5057 | if (insn_rhs_dead_pseudo_p (insn: insns->insn ())) |
5058 | return true; |
5059 | return false; |
5060 | } |
5061 | |
5062 | /* Return TRUE if REGNO has a reverse equivalence. The equivalence is |
5063 | reverse only if we have one init insn with given REGNO as a |
5064 | source. */ |
5065 | static bool |
5066 | reverse_equiv_p (int regno) |
5067 | { |
5068 | rtx_insn_list *insns = ira_reg_equiv[regno].init_insns; |
5069 | rtx set; |
5070 | |
5071 | if (insns == NULL) |
5072 | return false; |
5073 | if (! INSN_P (insns->insn ()) |
5074 | || insns->next () != NULL) |
5075 | return false; |
5076 | if ((set = single_set (insn: insns->insn ())) == NULL_RTX) |
5077 | return false; |
5078 | return REG_P (SET_SRC (set)) && (int) REGNO (SET_SRC (set)) == regno; |
5079 | } |
5080 | |
5081 | /* Return TRUE if REGNO was reloaded in an equivalence init insn. We |
5082 | call this function only for non-reverse equivalence. */ |
5083 | static bool |
5084 | contains_reloaded_insn_p (int regno) |
5085 | { |
5086 | rtx set; |
5087 | rtx_insn_list *list = ira_reg_equiv[regno].init_insns; |
5088 | |
5089 | for (; list != NULL; list = list->next ()) |
5090 | if ((set = single_set (insn: list->insn ())) == NULL_RTX |
5091 | || ! REG_P (SET_DEST (set)) |
5092 | || (int) REGNO (SET_DEST (set)) != regno) |
5093 | return true; |
5094 | return false; |
5095 | } |
5096 | |
5097 | /* Try combine secondary memory reload insn FROM for insn TO into TO insn. |
5098 | FROM should be a load insn (usually a secondary memory reload insn). Return |
5099 | TRUE in case of success. */ |
5100 | static bool |
5101 | combine_reload_insn (rtx_insn *from, rtx_insn *to) |
5102 | { |
5103 | bool ok_p; |
5104 | rtx_insn *saved_insn; |
5105 | rtx set, from_reg, to_reg, op; |
5106 | enum reg_class to_class, from_class; |
5107 | int n, nop; |
5108 | signed char changed_nops[MAX_RECOG_OPERANDS + 1]; |
5109 | |
5110 | /* Check conditions for second memory reload and original insn: */ |
5111 | if ((targetm.secondary_memory_needed |
5112 | == hook_bool_mode_reg_class_t_reg_class_t_false) |
5113 | || NEXT_INSN (insn: from) != to |
5114 | || !NONDEBUG_INSN_P (to) |
5115 | || CALL_P (to)) |
5116 | return false; |
5117 | |
5118 | lra_insn_recog_data_t id = lra_get_insn_recog_data (insn: to); |
5119 | struct lra_static_insn_data *static_id = id->insn_static_data; |
5120 | |
5121 | if (id->used_insn_alternative == LRA_UNKNOWN_ALT |
5122 | || (set = single_set (insn: from)) == NULL_RTX) |
5123 | return false; |
5124 | from_reg = SET_DEST (set); |
5125 | to_reg = SET_SRC (set); |
5126 | /* Ignore optional reloads: */ |
5127 | if (! REG_P (from_reg) || ! REG_P (to_reg) |
5128 | || bitmap_bit_p (&lra_optional_reload_pseudos, REGNO (from_reg))) |
5129 | return false; |
5130 | to_class = lra_get_allocno_class (REGNO (to_reg)); |
5131 | from_class = lra_get_allocno_class (REGNO (from_reg)); |
5132 | /* Check that reload insn is a load: */ |
5133 | if (to_class != NO_REGS || from_class == NO_REGS) |
5134 | return false; |
5135 | for (n = nop = 0; nop < static_id->n_operands; nop++) |
5136 | { |
5137 | if (static_id->operand[nop].type != OP_IN) |
5138 | continue; |
5139 | op = *id->operand_loc[nop]; |
5140 | if (!REG_P (op) || REGNO (op) != REGNO (from_reg)) |
5141 | continue; |
5142 | *id->operand_loc[nop] = to_reg; |
5143 | changed_nops[n++] = nop; |
5144 | } |
5145 | changed_nops[n] = -1; |
5146 | lra_update_dups (id, changed_nops); |
5147 | lra_update_insn_regno_info (to); |
5148 | ok_p = recog_memoized (insn: to) >= 0; |
5149 | if (ok_p) |
5150 | { |
5151 | /* Check that combined insn does not need any reloads: */ |
5152 | saved_insn = curr_insn; |
5153 | curr_insn = to; |
5154 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
5155 | curr_static_id = curr_id->insn_static_data; |
5156 | for (bool swapped_p = false;;) |
5157 | { |
5158 | ok_p = !curr_insn_transform (check_only_p: true); |
5159 | if (ok_p || curr_static_id->commutative < 0) |
5160 | break; |
5161 | swap_operands (nop: curr_static_id->commutative); |
5162 | if (lra_dump_file != NULL) |
5163 | { |
5164 | fprintf (stream: lra_dump_file, |
5165 | format: " Swapping %scombined insn operands:\n" , |
5166 | swapped_p ? "back " : "" ); |
5167 | dump_insn_slim (lra_dump_file, to); |
5168 | } |
5169 | if (swapped_p) |
5170 | break; |
5171 | swapped_p = true; |
5172 | } |
5173 | curr_insn = saved_insn; |
5174 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
5175 | curr_static_id = curr_id->insn_static_data; |
5176 | } |
5177 | if (ok_p) |
5178 | { |
5179 | id->used_insn_alternative = -1; |
5180 | lra_push_insn_and_update_insn_regno_info (to); |
5181 | if (lra_dump_file != NULL) |
5182 | { |
5183 | fprintf (stream: lra_dump_file, format: " Use combined insn:\n" ); |
5184 | dump_insn_slim (lra_dump_file, to); |
5185 | } |
5186 | return true; |
5187 | } |
5188 | if (lra_dump_file != NULL) |
5189 | { |
5190 | fprintf (stream: lra_dump_file, format: " Failed combined insn:\n" ); |
5191 | dump_insn_slim (lra_dump_file, to); |
5192 | } |
5193 | for (int i = 0; i < n; i++) |
5194 | { |
5195 | nop = changed_nops[i]; |
5196 | *id->operand_loc[nop] = from_reg; |
5197 | } |
5198 | lra_update_dups (id, changed_nops); |
5199 | lra_update_insn_regno_info (to); |
5200 | if (lra_dump_file != NULL) |
5201 | { |
5202 | fprintf (stream: lra_dump_file, format: " Restoring insn after failed combining:\n" ); |
5203 | dump_insn_slim (lra_dump_file, to); |
5204 | } |
5205 | return false; |
5206 | } |
5207 | |
5208 | /* Entry function of LRA constraint pass. Return true if the |
5209 | constraint pass did change the code. */ |
5210 | bool |
5211 | lra_constraints (bool first_p) |
5212 | { |
5213 | bool changed_p; |
5214 | int i, hard_regno, new_insns_num; |
5215 | unsigned int min_len, new_min_len, uid; |
5216 | rtx set, x, reg, nosubreg_dest; |
5217 | rtx_insn *original_insn; |
5218 | basic_block last_bb; |
5219 | bitmap_iterator bi; |
5220 | |
5221 | lra_constraint_iter++; |
5222 | if (lra_dump_file != NULL) |
5223 | fprintf (stream: lra_dump_file, format: "\n********** Local #%d: **********\n\n" , |
5224 | lra_constraint_iter); |
5225 | changed_p = false; |
5226 | if (pic_offset_table_rtx |
5227 | && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER) |
5228 | check_and_force_assignment_correctness_p = true; |
5229 | else if (first_p) |
5230 | /* On the first iteration we should check IRA assignment |
5231 | correctness. In rare cases, the assignments can be wrong as |
5232 | early clobbers operands are ignored in IRA or usages of |
5233 | paradoxical sub-registers are not taken into account by |
5234 | IRA. */ |
5235 | check_and_force_assignment_correctness_p = true; |
5236 | new_insn_uid_start = get_max_uid (); |
5237 | new_regno_start = first_p ? lra_constraint_new_regno_start : max_reg_num (); |
5238 | /* Mark used hard regs for target stack size calulations. */ |
5239 | for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++) |
5240 | if (lra_reg_info[i].nrefs != 0 |
5241 | && (hard_regno = lra_get_regno_hard_regno (regno: i)) >= 0) |
5242 | { |
5243 | int j, nregs; |
5244 | |
5245 | nregs = hard_regno_nregs (regno: hard_regno, mode: lra_reg_info[i].biggest_mode); |
5246 | for (j = 0; j < nregs; j++) |
5247 | df_set_regs_ever_live (hard_regno + j, true); |
5248 | } |
5249 | /* Do elimination before the equivalence processing as we can spill |
5250 | some pseudos during elimination. */ |
5251 | lra_eliminate (false, first_p); |
5252 | auto_bitmap equiv_insn_bitmap (®_obstack); |
5253 | for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++) |
5254 | if (lra_reg_info[i].nrefs != 0) |
5255 | { |
5256 | ira_reg_equiv[i].profitable_p = true; |
5257 | reg = regno_reg_rtx[i]; |
5258 | if (lra_get_regno_hard_regno (regno: i) < 0 && (x = get_equiv (x: reg)) != reg) |
5259 | { |
5260 | bool pseudo_p = contains_reg_p (x, hard_reg_p: false, spilled_p: false); |
5261 | |
5262 | /* After RTL transformation, we cannot guarantee that |
5263 | pseudo in the substitution was not reloaded which might |
5264 | make equivalence invalid. For example, in reverse |
5265 | equiv of p0 |
5266 | |
5267 | p0 <- ... |
5268 | ... |
5269 | equiv_mem <- p0 |
5270 | |
5271 | the memory address register was reloaded before the 2nd |
5272 | insn. */ |
5273 | if ((! first_p && pseudo_p) |
5274 | /* We don't use DF for compilation speed sake. So it |
5275 | is problematic to update live info when we use an |
5276 | equivalence containing pseudos in more than one |
5277 | BB. */ |
5278 | || (pseudo_p && multi_block_pseudo_p (regno: i)) |
5279 | /* If an init insn was deleted for some reason, cancel |
5280 | the equiv. We could update the equiv insns after |
5281 | transformations including an equiv insn deletion |
5282 | but it is not worthy as such cases are extremely |
5283 | rare. */ |
5284 | || contains_deleted_insn_p (list: ira_reg_equiv[i].init_insns) |
5285 | /* If it is not a reverse equivalence, we check that a |
5286 | pseudo in rhs of the init insn is not dying in the |
5287 | insn. Otherwise, the live info at the beginning of |
5288 | the corresponding BB might be wrong after we |
5289 | removed the insn. When the equiv can be a |
5290 | constant, the right hand side of the init insn can |
5291 | be a pseudo. */ |
5292 | || (! reverse_equiv_p (regno: i) |
5293 | && (init_insn_rhs_dead_pseudo_p (regno: i) |
5294 | /* If we reloaded the pseudo in an equivalence |
5295 | init insn, we cannot remove the equiv init |
5296 | insns and the init insns might write into |
5297 | const memory in this case. */ |
5298 | || contains_reloaded_insn_p (regno: i))) |
5299 | /* Prevent access beyond equivalent memory for |
5300 | paradoxical subregs. */ |
5301 | || (MEM_P (x) |
5302 | && maybe_gt (GET_MODE_SIZE (lra_reg_info[i].biggest_mode), |
5303 | GET_MODE_SIZE (GET_MODE (x)))) |
5304 | || (pic_offset_table_rtx |
5305 | && ((CONST_POOL_OK_P (PSEUDO_REGNO_MODE (i), x) |
5306 | && (targetm.preferred_reload_class |
5307 | (x, lra_get_allocno_class (regno: i)) == NO_REGS)) |
5308 | || contains_symbol_ref_p (x)))) |
5309 | ira_reg_equiv[i].defined_p |
5310 | = ira_reg_equiv[i].caller_save_p = false; |
5311 | if (contains_reg_p (x, hard_reg_p: false, spilled_p: true)) |
5312 | ira_reg_equiv[i].profitable_p = false; |
5313 | if (get_equiv (x: reg) != reg) |
5314 | bitmap_ior_into (equiv_insn_bitmap, &lra_reg_info[i].insn_bitmap); |
5315 | } |
5316 | } |
5317 | for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++) |
5318 | update_equiv (regno: i); |
5319 | /* We should add all insns containing pseudos which should be |
5320 | substituted by their equivalences. */ |
5321 | EXECUTE_IF_SET_IN_BITMAP (equiv_insn_bitmap, 0, uid, bi) |
5322 | lra_push_insn_by_uid (uid); |
5323 | min_len = lra_insn_stack_length (); |
5324 | new_insns_num = 0; |
5325 | last_bb = NULL; |
5326 | changed_p = false; |
5327 | original_insn = NULL; |
5328 | while ((new_min_len = lra_insn_stack_length ()) != 0) |
5329 | { |
5330 | curr_insn = lra_pop_insn (); |
5331 | --new_min_len; |
5332 | curr_bb = BLOCK_FOR_INSN (insn: curr_insn); |
5333 | if (curr_bb != last_bb) |
5334 | { |
5335 | last_bb = curr_bb; |
5336 | bb_reload_num = lra_curr_reload_num; |
5337 | } |
5338 | if (min_len > new_min_len) |
5339 | { |
5340 | min_len = new_min_len; |
5341 | new_insns_num = 0; |
5342 | original_insn = curr_insn; |
5343 | } |
5344 | else if (combine_reload_insn (from: curr_insn, to: original_insn)) |
5345 | { |
5346 | continue; |
5347 | } |
5348 | if (new_insns_num > MAX_RELOAD_INSNS_NUMBER) |
5349 | internal_error |
5350 | ("maximum number of generated reload insns per insn achieved (%d)" , |
5351 | MAX_RELOAD_INSNS_NUMBER); |
5352 | new_insns_num++; |
5353 | if (DEBUG_INSN_P (curr_insn)) |
5354 | { |
5355 | /* We need to check equivalence in debug insn and change |
5356 | pseudo to the equivalent value if necessary. */ |
5357 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
5358 | if (bitmap_bit_p (equiv_insn_bitmap, INSN_UID (insn: curr_insn))) |
5359 | { |
5360 | rtx old = *curr_id->operand_loc[0]; |
5361 | *curr_id->operand_loc[0] |
5362 | = simplify_replace_fn_rtx (old, NULL_RTX, |
5363 | fn: loc_equivalence_callback, curr_insn); |
5364 | if (old != *curr_id->operand_loc[0]) |
5365 | { |
5366 | /* If we substitute pseudo by shared equivalence, we can fail |
5367 | to update LRA reg info and this can result in many |
5368 | unexpected consequences. So keep rtl unshared: */ |
5369 | *curr_id->operand_loc[0] |
5370 | = copy_rtx (*curr_id->operand_loc[0]); |
5371 | lra_update_insn_regno_info (curr_insn); |
5372 | changed_p = true; |
5373 | } |
5374 | } |
5375 | } |
5376 | else if (INSN_P (curr_insn)) |
5377 | { |
5378 | if ((set = single_set (insn: curr_insn)) != NULL_RTX) |
5379 | { |
5380 | nosubreg_dest = SET_DEST (set); |
5381 | /* The equivalence pseudo could be set up as SUBREG in a |
5382 | case when it is a call restore insn in a mode |
5383 | different from the pseudo mode. */ |
5384 | if (GET_CODE (nosubreg_dest) == SUBREG) |
5385 | nosubreg_dest = SUBREG_REG (nosubreg_dest); |
5386 | if ((REG_P (nosubreg_dest) |
5387 | && (x = get_equiv (x: nosubreg_dest)) != nosubreg_dest |
5388 | /* Remove insns which set up a pseudo whose value |
5389 | cannot be changed. Such insns might be not in |
5390 | init_insns because we don't update equiv data |
5391 | during insn transformations. |
5392 | |
5393 | As an example, let suppose that a pseudo got |
5394 | hard register and on the 1st pass was not |
5395 | changed to equivalent constant. We generate an |
5396 | additional insn setting up the pseudo because of |
5397 | secondary memory movement. Then the pseudo is |
5398 | spilled and we use the equiv constant. In this |
5399 | case we should remove the additional insn and |
5400 | this insn is not init_insns list. */ |
5401 | && (! MEM_P (x) || MEM_READONLY_P (x) |
5402 | /* Check that this is actually an insn setting |
5403 | up the equivalence. */ |
5404 | || in_list_p (x: curr_insn, |
5405 | list: ira_reg_equiv |
5406 | [REGNO (nosubreg_dest)].init_insns))) |
5407 | || (((x = get_equiv (SET_SRC (set))) != SET_SRC (set)) |
5408 | && in_list_p (x: curr_insn, |
5409 | list: ira_reg_equiv |
5410 | [REGNO (SET_SRC (set))].init_insns) |
5411 | /* This is a reverse equivalence to memory (see ira.cc) |
5412 | in store insn. We can reload all the destination and |
5413 | have an output reload which is a store to memory. If |
5414 | we just remove the insn, we will have the output |
5415 | reload storing an undefined value to the memory. |
5416 | Check that we did not reload the memory to prevent a |
5417 | wrong code generation. We could implement using the |
5418 | equivalence still in such case but doing this is not |
5419 | worth the efforts as such case is very rare. */ |
5420 | && MEM_P (nosubreg_dest))) |
5421 | { |
5422 | /* This is equiv init insn of pseudo which did not get a |
5423 | hard register -- remove the insn. */ |
5424 | if (lra_dump_file != NULL) |
5425 | { |
5426 | fprintf (stream: lra_dump_file, |
5427 | format: " Removing equiv init insn %i (freq=%d)\n" , |
5428 | INSN_UID (insn: curr_insn), |
5429 | REG_FREQ_FROM_BB (BLOCK_FOR_INSN (curr_insn))); |
5430 | dump_insn_slim (lra_dump_file, curr_insn); |
5431 | } |
5432 | if (contains_reg_p (x, hard_reg_p: true, spilled_p: false)) |
5433 | check_and_force_assignment_correctness_p = true; |
5434 | lra_set_insn_deleted (curr_insn); |
5435 | continue; |
5436 | } |
5437 | } |
5438 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
5439 | curr_static_id = curr_id->insn_static_data; |
5440 | init_curr_insn_input_reloads (); |
5441 | init_curr_operand_mode (); |
5442 | if (curr_insn_transform (check_only_p: false)) |
5443 | changed_p = true; |
5444 | /* Check non-transformed insns too for equiv change as USE |
5445 | or CLOBBER don't need reloads but can contain pseudos |
5446 | being changed on their equivalences. */ |
5447 | else if (bitmap_bit_p (equiv_insn_bitmap, INSN_UID (insn: curr_insn)) |
5448 | && loc_equivalence_change_p (loc: &PATTERN (insn: curr_insn))) |
5449 | { |
5450 | lra_update_insn_regno_info (curr_insn); |
5451 | changed_p = true; |
5452 | } |
5453 | } |
5454 | } |
5455 | |
5456 | /* If we used a new hard regno, changed_p should be true because the |
5457 | hard reg is assigned to a new pseudo. */ |
5458 | if (flag_checking && !changed_p) |
5459 | { |
5460 | for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++) |
5461 | if (lra_reg_info[i].nrefs != 0 |
5462 | && (hard_regno = lra_get_regno_hard_regno (regno: i)) >= 0) |
5463 | { |
5464 | int j, nregs = hard_regno_nregs (regno: hard_regno, |
5465 | PSEUDO_REGNO_MODE (i)); |
5466 | |
5467 | for (j = 0; j < nregs; j++) |
5468 | lra_assert (df_regs_ever_live_p (hard_regno + j)); |
5469 | } |
5470 | } |
5471 | if (changed_p) |
5472 | lra_dump_insns_if_possible (title: "changed func after local" ); |
5473 | return changed_p; |
5474 | } |
5475 | |
5476 | static void initiate_invariants (void); |
5477 | static void finish_invariants (void); |
5478 | |
5479 | /* Initiate the LRA constraint pass. It is done once per |
5480 | function. */ |
5481 | void |
5482 | lra_constraints_init (void) |
5483 | { |
5484 | initiate_invariants (); |
5485 | } |
5486 | |
5487 | /* Finalize the LRA constraint pass. It is done once per |
5488 | function. */ |
5489 | void |
5490 | lra_constraints_finish (void) |
5491 | { |
5492 | finish_invariants (); |
5493 | } |
5494 | |
5495 | |
5496 | |
5497 | /* Structure describes invariants for ineheritance. */ |
5498 | struct lra_invariant |
5499 | { |
5500 | /* The order number of the invariant. */ |
5501 | int num; |
5502 | /* The invariant RTX. */ |
5503 | rtx invariant_rtx; |
5504 | /* The origin insn of the invariant. */ |
5505 | rtx_insn *insn; |
5506 | }; |
5507 | |
5508 | typedef lra_invariant invariant_t; |
5509 | typedef invariant_t *invariant_ptr_t; |
5510 | typedef const invariant_t *const_invariant_ptr_t; |
5511 | |
5512 | /* Pointer to the inheritance invariants. */ |
5513 | static vec<invariant_ptr_t> invariants; |
5514 | |
5515 | /* Allocation pool for the invariants. */ |
5516 | static object_allocator<lra_invariant> *invariants_pool; |
5517 | |
5518 | /* Hash table for the invariants. */ |
5519 | static htab_t invariant_table; |
5520 | |
5521 | /* Hash function for INVARIANT. */ |
5522 | static hashval_t |
5523 | invariant_hash (const void *invariant) |
5524 | { |
5525 | rtx inv = ((const_invariant_ptr_t) invariant)->invariant_rtx; |
5526 | return lra_rtx_hash (x: inv); |
5527 | } |
5528 | |
5529 | /* Equal function for invariants INVARIANT1 and INVARIANT2. */ |
5530 | static int |
5531 | invariant_eq_p (const void *invariant1, const void *invariant2) |
5532 | { |
5533 | rtx inv1 = ((const_invariant_ptr_t) invariant1)->invariant_rtx; |
5534 | rtx inv2 = ((const_invariant_ptr_t) invariant2)->invariant_rtx; |
5535 | |
5536 | return rtx_equal_p (inv1, inv2); |
5537 | } |
5538 | |
5539 | /* Insert INVARIANT_RTX into the table if it is not there yet. Return |
5540 | invariant which is in the table. */ |
5541 | static invariant_ptr_t |
5542 | insert_invariant (rtx invariant_rtx) |
5543 | { |
5544 | void **entry_ptr; |
5545 | invariant_t invariant; |
5546 | invariant_ptr_t invariant_ptr; |
5547 | |
5548 | invariant.invariant_rtx = invariant_rtx; |
5549 | entry_ptr = htab_find_slot (invariant_table, &invariant, INSERT); |
5550 | if (*entry_ptr == NULL) |
5551 | { |
5552 | invariant_ptr = invariants_pool->allocate (); |
5553 | invariant_ptr->invariant_rtx = invariant_rtx; |
5554 | invariant_ptr->insn = NULL; |
5555 | invariants.safe_push (obj: invariant_ptr); |
5556 | *entry_ptr = (void *) invariant_ptr; |
5557 | } |
5558 | return (invariant_ptr_t) *entry_ptr; |
5559 | } |
5560 | |
5561 | /* Initiate the invariant table. */ |
5562 | static void |
5563 | initiate_invariants (void) |
5564 | { |
5565 | invariants.create (nelems: 100); |
5566 | invariants_pool |
5567 | = new object_allocator<lra_invariant> ("Inheritance invariants" ); |
5568 | invariant_table = htab_create (100, invariant_hash, invariant_eq_p, NULL); |
5569 | } |
5570 | |
5571 | /* Finish the invariant table. */ |
5572 | static void |
5573 | finish_invariants (void) |
5574 | { |
5575 | htab_delete (invariant_table); |
5576 | delete invariants_pool; |
5577 | invariants.release (); |
5578 | } |
5579 | |
5580 | /* Make the invariant table empty. */ |
5581 | static void |
5582 | clear_invariants (void) |
5583 | { |
5584 | htab_empty (invariant_table); |
5585 | invariants_pool->release (); |
5586 | invariants.truncate (size: 0); |
5587 | } |
5588 | |
5589 | |
5590 | |
5591 | /* This page contains code to do inheritance/split |
5592 | transformations. */ |
5593 | |
5594 | /* Number of reloads passed so far in current EBB. */ |
5595 | static int reloads_num; |
5596 | |
5597 | /* Number of calls passed so far in current EBB. */ |
5598 | static int calls_num; |
5599 | |
5600 | /* Index ID is the CALLS_NUM associated the last call we saw with |
5601 | ABI identifier ID. */ |
5602 | static int last_call_for_abi[NUM_ABI_IDS]; |
5603 | |
5604 | /* Which registers have been fully or partially clobbered by a call |
5605 | since they were last used. */ |
5606 | static HARD_REG_SET full_and_partial_call_clobbers; |
5607 | |
5608 | /* Current reload pseudo check for validity of elements in |
5609 | USAGE_INSNS. */ |
5610 | static int curr_usage_insns_check; |
5611 | |
5612 | /* Info about last usage of registers in EBB to do inheritance/split |
5613 | transformation. Inheritance transformation is done from a spilled |
5614 | pseudo and split transformations from a hard register or a pseudo |
5615 | assigned to a hard register. */ |
5616 | struct usage_insns |
5617 | { |
5618 | /* If the value is equal to CURR_USAGE_INSNS_CHECK, then the member |
5619 | value INSNS is valid. The insns is chain of optional debug insns |
5620 | and a finishing non-debug insn using the corresponding reg. The |
5621 | value is also used to mark the registers which are set up in the |
5622 | current insn. The negated insn uid is used for this. */ |
5623 | int check; |
5624 | /* Value of global reloads_num at the last insn in INSNS. */ |
5625 | int reloads_num; |
5626 | /* Value of global reloads_nums at the last insn in INSNS. */ |
5627 | int calls_num; |
5628 | /* It can be true only for splitting. And it means that the restore |
5629 | insn should be put after insn given by the following member. */ |
5630 | bool after_p; |
5631 | /* Next insns in the current EBB which use the original reg and the |
5632 | original reg value is not changed between the current insn and |
5633 | the next insns. In order words, e.g. for inheritance, if we need |
5634 | to use the original reg value again in the next insns we can try |
5635 | to use the value in a hard register from a reload insn of the |
5636 | current insn. */ |
5637 | rtx insns; |
5638 | }; |
5639 | |
5640 | /* Map: regno -> corresponding pseudo usage insns. */ |
5641 | static struct usage_insns *usage_insns; |
5642 | |
5643 | static void |
5644 | setup_next_usage_insn (int regno, rtx insn, int reloads_num, bool after_p) |
5645 | { |
5646 | usage_insns[regno].check = curr_usage_insns_check; |
5647 | usage_insns[regno].insns = insn; |
5648 | usage_insns[regno].reloads_num = reloads_num; |
5649 | usage_insns[regno].calls_num = calls_num; |
5650 | usage_insns[regno].after_p = after_p; |
5651 | if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0) |
5652 | remove_from_hard_reg_set (regs: &full_and_partial_call_clobbers, |
5653 | PSEUDO_REGNO_MODE (regno), |
5654 | regno: reg_renumber[regno]); |
5655 | } |
5656 | |
5657 | /* The function is used to form list REGNO usages which consists of |
5658 | optional debug insns finished by a non-debug insn using REGNO. |
5659 | RELOADS_NUM is current number of reload insns processed so far. */ |
5660 | static void |
5661 | add_next_usage_insn (int regno, rtx_insn *insn, int reloads_num) |
5662 | { |
5663 | rtx next_usage_insns; |
5664 | |
5665 | if (usage_insns[regno].check == curr_usage_insns_check |
5666 | && (next_usage_insns = usage_insns[regno].insns) != NULL_RTX |
5667 | && DEBUG_INSN_P (insn)) |
5668 | { |
5669 | /* Check that we did not add the debug insn yet. */ |
5670 | if (next_usage_insns != insn |
5671 | && (GET_CODE (next_usage_insns) != INSN_LIST |
5672 | || XEXP (next_usage_insns, 0) != insn)) |
5673 | usage_insns[regno].insns = gen_rtx_INSN_LIST (VOIDmode, insn, |
5674 | next_usage_insns); |
5675 | } |
5676 | else if (NONDEBUG_INSN_P (insn)) |
5677 | setup_next_usage_insn (regno, insn, reloads_num, after_p: false); |
5678 | else |
5679 | usage_insns[regno].check = 0; |
5680 | } |
5681 | |
5682 | /* Return first non-debug insn in list USAGE_INSNS. */ |
5683 | static rtx_insn * |
5684 | skip_usage_debug_insns (rtx usage_insns) |
5685 | { |
5686 | rtx insn; |
5687 | |
5688 | /* Skip debug insns. */ |
5689 | for (insn = usage_insns; |
5690 | insn != NULL_RTX && GET_CODE (insn) == INSN_LIST; |
5691 | insn = XEXP (insn, 1)) |
5692 | ; |
5693 | return safe_as_a <rtx_insn *> (p: insn); |
5694 | } |
5695 | |
5696 | /* Return true if we need secondary memory moves for insn in |
5697 | USAGE_INSNS after inserting inherited pseudo of class INHER_CL |
5698 | into the insn. */ |
5699 | static bool |
5700 | check_secondary_memory_needed_p (enum reg_class inher_cl ATTRIBUTE_UNUSED, |
5701 | rtx usage_insns ATTRIBUTE_UNUSED) |
5702 | { |
5703 | rtx_insn *insn; |
5704 | rtx set, dest; |
5705 | enum reg_class cl; |
5706 | |
5707 | if (inher_cl == ALL_REGS |
5708 | || (insn = skip_usage_debug_insns (usage_insns)) == NULL_RTX) |
5709 | return false; |
5710 | lra_assert (INSN_P (insn)); |
5711 | if ((set = single_set (insn)) == NULL_RTX || ! REG_P (SET_DEST (set))) |
5712 | return false; |
5713 | dest = SET_DEST (set); |
5714 | if (! REG_P (dest)) |
5715 | return false; |
5716 | lra_assert (inher_cl != NO_REGS); |
5717 | cl = get_reg_class (REGNO (dest)); |
5718 | return (cl != NO_REGS && cl != ALL_REGS |
5719 | && targetm.secondary_memory_needed (GET_MODE (dest), inher_cl, cl)); |
5720 | } |
5721 | |
5722 | /* Registers involved in inheritance/split in the current EBB |
5723 | (inheritance/split pseudos and original registers). */ |
5724 | static bitmap_head check_only_regs; |
5725 | |
5726 | /* Reload pseudos cannot be involded in invariant inheritance in the |
5727 | current EBB. */ |
5728 | static bitmap_head invalid_invariant_regs; |
5729 | |
5730 | /* Do inheritance transformations for insn INSN, which defines (if |
5731 | DEF_P) or uses ORIGINAL_REGNO. NEXT_USAGE_INSNS specifies which |
5732 | instruction in the EBB next uses ORIGINAL_REGNO; it has the same |
5733 | form as the "insns" field of usage_insns. Return true if we |
5734 | succeed in such transformation. |
5735 | |
5736 | The transformations look like: |
5737 | |
5738 | p <- ... i <- ... |
5739 | ... p <- i (new insn) |
5740 | ... => |
5741 | <- ... p ... <- ... i ... |
5742 | or |
5743 | ... i <- p (new insn) |
5744 | <- ... p ... <- ... i ... |
5745 | ... => |
5746 | <- ... p ... <- ... i ... |
5747 | where p is a spilled original pseudo and i is a new inheritance pseudo. |
5748 | |
5749 | |
5750 | The inheritance pseudo has the smallest class of two classes CL and |
5751 | class of ORIGINAL REGNO. */ |
5752 | static bool |
5753 | inherit_reload_reg (bool def_p, int original_regno, |
5754 | enum reg_class cl, rtx_insn *insn, rtx next_usage_insns) |
5755 | { |
5756 | if (optimize_function_for_size_p (cfun)) |
5757 | return false; |
5758 | |
5759 | enum reg_class rclass = lra_get_allocno_class (regno: original_regno); |
5760 | rtx original_reg = regno_reg_rtx[original_regno]; |
5761 | rtx new_reg, usage_insn; |
5762 | rtx_insn *new_insns; |
5763 | |
5764 | lra_assert (! usage_insns[original_regno].after_p); |
5765 | if (lra_dump_file != NULL) |
5766 | fprintf (stream: lra_dump_file, |
5767 | format: " <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n" ); |
5768 | if (! ira_reg_classes_intersect_p[cl][rclass]) |
5769 | { |
5770 | if (lra_dump_file != NULL) |
5771 | { |
5772 | fprintf (stream: lra_dump_file, |
5773 | format: " Rejecting inheritance for %d " |
5774 | "because of disjoint classes %s and %s\n" , |
5775 | original_regno, reg_class_names[cl], |
5776 | reg_class_names[rclass]); |
5777 | fprintf (stream: lra_dump_file, |
5778 | format: " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" ); |
5779 | } |
5780 | return false; |
5781 | } |
5782 | if ((ira_class_subset_p[cl][rclass] && cl != rclass) |
5783 | /* We don't use a subset of two classes because it can be |
5784 | NO_REGS. This transformation is still profitable in most |
5785 | cases even if the classes are not intersected as register |
5786 | move is probably cheaper than a memory load. */ |
5787 | || ira_class_hard_regs_num[cl] < ira_class_hard_regs_num[rclass]) |
5788 | { |
5789 | if (lra_dump_file != NULL) |
5790 | fprintf (stream: lra_dump_file, format: " Use smallest class of %s and %s\n" , |
5791 | reg_class_names[cl], reg_class_names[rclass]); |
5792 | |
5793 | rclass = cl; |
5794 | } |
5795 | if (check_secondary_memory_needed_p (inher_cl: rclass, usage_insns: next_usage_insns)) |
5796 | { |
5797 | /* Reject inheritance resulting in secondary memory moves. |
5798 | Otherwise, there is a danger in LRA cycling. Also such |
5799 | transformation will be unprofitable. */ |
5800 | if (lra_dump_file != NULL) |
5801 | { |
5802 | rtx_insn *insn = skip_usage_debug_insns (usage_insns: next_usage_insns); |
5803 | rtx set = single_set (insn); |
5804 | |
5805 | lra_assert (set != NULL_RTX); |
5806 | |
5807 | rtx dest = SET_DEST (set); |
5808 | |
5809 | lra_assert (REG_P (dest)); |
5810 | fprintf (stream: lra_dump_file, |
5811 | format: " Rejecting inheritance for insn %d(%s)<-%d(%s) " |
5812 | "as secondary mem is needed\n" , |
5813 | REGNO (dest), reg_class_names[get_reg_class (REGNO (dest))], |
5814 | original_regno, reg_class_names[rclass]); |
5815 | fprintf (stream: lra_dump_file, |
5816 | format: " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" ); |
5817 | } |
5818 | return false; |
5819 | } |
5820 | new_reg = lra_create_new_reg (GET_MODE (original_reg), original_reg, |
5821 | rclass, NULL, "inheritance" ); |
5822 | start_sequence (); |
5823 | if (def_p) |
5824 | lra_emit_move (original_reg, new_reg); |
5825 | else |
5826 | lra_emit_move (new_reg, original_reg); |
5827 | new_insns = get_insns (); |
5828 | end_sequence (); |
5829 | if (NEXT_INSN (insn: new_insns) != NULL_RTX) |
5830 | { |
5831 | if (lra_dump_file != NULL) |
5832 | { |
5833 | fprintf (stream: lra_dump_file, |
5834 | format: " Rejecting inheritance %d->%d " |
5835 | "as it results in 2 or more insns:\n" , |
5836 | original_regno, REGNO (new_reg)); |
5837 | dump_rtl_slim (lra_dump_file, new_insns, NULL, -1, 0); |
5838 | fprintf (stream: lra_dump_file, |
5839 | format: " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" ); |
5840 | } |
5841 | return false; |
5842 | } |
5843 | lra_substitute_pseudo_within_insn (insn, original_regno, new_reg, false); |
5844 | lra_update_insn_regno_info (insn); |
5845 | if (! def_p) |
5846 | /* We now have a new usage insn for original regno. */ |
5847 | setup_next_usage_insn (regno: original_regno, insn: new_insns, reloads_num, after_p: false); |
5848 | if (lra_dump_file != NULL) |
5849 | fprintf (stream: lra_dump_file, format: " Original reg change %d->%d (bb%d):\n" , |
5850 | original_regno, REGNO (new_reg), BLOCK_FOR_INSN (insn)->index); |
5851 | lra_reg_info[REGNO (new_reg)].restore_rtx = regno_reg_rtx[original_regno]; |
5852 | bitmap_set_bit (&check_only_regs, REGNO (new_reg)); |
5853 | bitmap_set_bit (&check_only_regs, original_regno); |
5854 | bitmap_set_bit (&lra_inheritance_pseudos, REGNO (new_reg)); |
5855 | if (def_p) |
5856 | lra_process_new_insns (insn, NULL, new_insns, |
5857 | "Add original<-inheritance" ); |
5858 | else |
5859 | lra_process_new_insns (insn, new_insns, NULL, |
5860 | "Add inheritance<-original" ); |
5861 | while (next_usage_insns != NULL_RTX) |
5862 | { |
5863 | if (GET_CODE (next_usage_insns) != INSN_LIST) |
5864 | { |
5865 | usage_insn = next_usage_insns; |
5866 | lra_assert (NONDEBUG_INSN_P (usage_insn)); |
5867 | next_usage_insns = NULL; |
5868 | } |
5869 | else |
5870 | { |
5871 | usage_insn = XEXP (next_usage_insns, 0); |
5872 | lra_assert (DEBUG_INSN_P (usage_insn)); |
5873 | next_usage_insns = XEXP (next_usage_insns, 1); |
5874 | } |
5875 | lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false, |
5876 | DEBUG_INSN_P (usage_insn)); |
5877 | lra_update_insn_regno_info (as_a <rtx_insn *> (p: usage_insn)); |
5878 | if (lra_dump_file != NULL) |
5879 | { |
5880 | basic_block bb = BLOCK_FOR_INSN (insn: usage_insn); |
5881 | fprintf (stream: lra_dump_file, |
5882 | format: " Inheritance reuse change %d->%d (bb%d):\n" , |
5883 | original_regno, REGNO (new_reg), |
5884 | bb ? bb->index : -1); |
5885 | dump_insn_slim (lra_dump_file, as_a <rtx_insn *> (p: usage_insn)); |
5886 | } |
5887 | } |
5888 | if (lra_dump_file != NULL) |
5889 | fprintf (stream: lra_dump_file, |
5890 | format: " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n" ); |
5891 | return true; |
5892 | } |
5893 | |
5894 | /* Return true if we need a caller save/restore for pseudo REGNO which |
5895 | was assigned to a hard register. */ |
5896 | static inline bool |
5897 | need_for_call_save_p (int regno) |
5898 | { |
5899 | lra_assert (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0); |
5900 | if (usage_insns[regno].calls_num < calls_num) |
5901 | { |
5902 | unsigned int abis = 0; |
5903 | for (unsigned int i = 0; i < NUM_ABI_IDS; ++i) |
5904 | if (last_call_for_abi[i] > usage_insns[regno].calls_num) |
5905 | abis |= 1 << i; |
5906 | gcc_assert (abis); |
5907 | if (call_clobbered_in_region_p (abis, mask: full_and_partial_call_clobbers, |
5908 | PSEUDO_REGNO_MODE (regno), |
5909 | regno: reg_renumber[regno])) |
5910 | return true; |
5911 | } |
5912 | return false; |
5913 | } |
5914 | |
5915 | /* Global registers occurring in the current EBB. */ |
5916 | static bitmap_head ebb_global_regs; |
5917 | |
5918 | /* Return true if we need a split for hard register REGNO or pseudo |
5919 | REGNO which was assigned to a hard register. |
5920 | POTENTIAL_RELOAD_HARD_REGS contains hard registers which might be |
5921 | used for reloads since the EBB end. It is an approximation of the |
5922 | used hard registers in the split range. The exact value would |
5923 | require expensive calculations. If we were aggressive with |
5924 | splitting because of the approximation, the split pseudo will save |
5925 | the same hard register assignment and will be removed in the undo |
5926 | pass. We still need the approximation because too aggressive |
5927 | splitting would result in too inaccurate cost calculation in the |
5928 | assignment pass because of too many generated moves which will be |
5929 | probably removed in the undo pass. */ |
5930 | static inline bool |
5931 | need_for_split_p (HARD_REG_SET potential_reload_hard_regs, int regno) |
5932 | { |
5933 | int hard_regno = regno < FIRST_PSEUDO_REGISTER ? regno : reg_renumber[regno]; |
5934 | |
5935 | lra_assert (hard_regno >= 0); |
5936 | return ((TEST_HARD_REG_BIT (set: potential_reload_hard_regs, bit: hard_regno) |
5937 | /* Don't split eliminable hard registers, otherwise we can |
5938 | split hard registers like hard frame pointer, which |
5939 | lives on BB start/end according to DF-infrastructure, |
5940 | when there is a pseudo assigned to the register and |
5941 | living in the same BB. */ |
5942 | && (regno >= FIRST_PSEUDO_REGISTER |
5943 | || ! TEST_HARD_REG_BIT (set: eliminable_regset, bit: hard_regno)) |
5944 | && ! TEST_HARD_REG_BIT (set: lra_no_alloc_regs, bit: hard_regno) |
5945 | /* Don't split call clobbered hard regs living through |
5946 | calls, otherwise we might have a check problem in the |
5947 | assign sub-pass as in the most cases (exception is a |
5948 | situation when check_and_force_assignment_correctness_p value is |
5949 | true) the assign pass assumes that all pseudos living |
5950 | through calls are assigned to call saved hard regs. */ |
5951 | && (regno >= FIRST_PSEUDO_REGISTER |
5952 | || !TEST_HARD_REG_BIT (set: full_and_partial_call_clobbers, bit: regno)) |
5953 | /* We need at least 2 reloads to make pseudo splitting |
5954 | profitable. We should provide hard regno splitting in |
5955 | any case to solve 1st insn scheduling problem when |
5956 | moving hard register definition up might result in |
5957 | impossibility to find hard register for reload pseudo of |
5958 | small register class. */ |
5959 | && (usage_insns[regno].reloads_num |
5960 | + (regno < FIRST_PSEUDO_REGISTER ? 0 : 3) < reloads_num) |
5961 | && (regno < FIRST_PSEUDO_REGISTER |
5962 | /* For short living pseudos, spilling + inheritance can |
5963 | be considered a substitution for splitting. |
5964 | Therefore we do not splitting for local pseudos. It |
5965 | decreases also aggressiveness of splitting. The |
5966 | minimal number of references is chosen taking into |
5967 | account that for 2 references splitting has no sense |
5968 | as we can just spill the pseudo. */ |
5969 | || (regno >= FIRST_PSEUDO_REGISTER |
5970 | && lra_reg_info[regno].nrefs > 3 |
5971 | && bitmap_bit_p (&ebb_global_regs, regno)))) |
5972 | || (regno >= FIRST_PSEUDO_REGISTER && need_for_call_save_p (regno))); |
5973 | } |
5974 | |
5975 | /* Return class for the split pseudo created from original pseudo with |
5976 | ALLOCNO_CLASS and MODE which got a hard register HARD_REGNO. We |
5977 | choose subclass of ALLOCNO_CLASS which contains HARD_REGNO and |
5978 | results in no secondary memory movements. */ |
5979 | static enum reg_class |
5980 | choose_split_class (enum reg_class allocno_class, |
5981 | int hard_regno ATTRIBUTE_UNUSED, |
5982 | machine_mode mode ATTRIBUTE_UNUSED) |
5983 | { |
5984 | int i; |
5985 | enum reg_class cl, best_cl = NO_REGS; |
5986 | enum reg_class hard_reg_class ATTRIBUTE_UNUSED |
5987 | = REGNO_REG_CLASS (hard_regno); |
5988 | |
5989 | if (! targetm.secondary_memory_needed (mode, allocno_class, allocno_class) |
5990 | && TEST_HARD_REG_BIT (reg_class_contents[allocno_class], bit: hard_regno)) |
5991 | return allocno_class; |
5992 | for (i = 0; |
5993 | (cl = reg_class_subclasses[allocno_class][i]) != LIM_REG_CLASSES; |
5994 | i++) |
5995 | if (! targetm.secondary_memory_needed (mode, cl, hard_reg_class) |
5996 | && ! targetm.secondary_memory_needed (mode, hard_reg_class, cl) |
5997 | && TEST_HARD_REG_BIT (reg_class_contents[cl], bit: hard_regno) |
5998 | && (best_cl == NO_REGS |
5999 | || ira_class_hard_regs_num[best_cl] < ira_class_hard_regs_num[cl])) |
6000 | best_cl = cl; |
6001 | return best_cl; |
6002 | } |
6003 | |
6004 | /* Copy any equivalence information from ORIGINAL_REGNO to NEW_REGNO. It only |
6005 | makes sense to call this function if NEW_REGNO is always equal to |
6006 | ORIGINAL_REGNO. Set up defined_p flag when caller_save_p flag is set up and |
6007 | CALL_SAVE_P is true. */ |
6008 | |
6009 | static void |
6010 | lra_copy_reg_equiv (unsigned int new_regno, unsigned int original_regno, |
6011 | bool call_save_p) |
6012 | { |
6013 | if (!ira_reg_equiv[original_regno].defined_p |
6014 | && !(call_save_p && ira_reg_equiv[original_regno].caller_save_p)) |
6015 | return; |
6016 | |
6017 | ira_expand_reg_equiv (); |
6018 | ira_reg_equiv[new_regno].defined_p = true; |
6019 | if (ira_reg_equiv[original_regno].memory) |
6020 | ira_reg_equiv[new_regno].memory |
6021 | = copy_rtx (ira_reg_equiv[original_regno].memory); |
6022 | if (ira_reg_equiv[original_regno].constant) |
6023 | ira_reg_equiv[new_regno].constant |
6024 | = copy_rtx (ira_reg_equiv[original_regno].constant); |
6025 | if (ira_reg_equiv[original_regno].invariant) |
6026 | ira_reg_equiv[new_regno].invariant |
6027 | = copy_rtx (ira_reg_equiv[original_regno].invariant); |
6028 | } |
6029 | |
6030 | /* Do split transformations for insn INSN, which defines or uses |
6031 | ORIGINAL_REGNO. NEXT_USAGE_INSNS specifies which instruction in |
6032 | the EBB next uses ORIGINAL_REGNO; it has the same form as the |
6033 | "insns" field of usage_insns. If TO is not NULL, we don't use |
6034 | usage_insns, we put restore insns after TO insn. It is a case when |
6035 | we call it from lra_split_hard_reg_for, outside the inheritance |
6036 | pass. |
6037 | |
6038 | The transformations look like: |
6039 | |
6040 | p <- ... p <- ... |
6041 | ... s <- p (new insn -- save) |
6042 | ... => |
6043 | ... p <- s (new insn -- restore) |
6044 | <- ... p ... <- ... p ... |
6045 | or |
6046 | <- ... p ... <- ... p ... |
6047 | ... s <- p (new insn -- save) |
6048 | ... => |
6049 | ... p <- s (new insn -- restore) |
6050 | <- ... p ... <- ... p ... |
6051 | |
6052 | where p is an original pseudo got a hard register or a hard |
6053 | register and s is a new split pseudo. The save is put before INSN |
6054 | if BEFORE_P is true. Return true if we succeed in such |
6055 | transformation. */ |
6056 | static bool |
6057 | split_reg (bool before_p, int original_regno, rtx_insn *insn, |
6058 | rtx next_usage_insns, rtx_insn *to) |
6059 | { |
6060 | enum reg_class rclass; |
6061 | rtx original_reg; |
6062 | int hard_regno, nregs; |
6063 | rtx new_reg, usage_insn; |
6064 | rtx_insn *restore, *save; |
6065 | bool after_p; |
6066 | bool call_save_p; |
6067 | machine_mode mode; |
6068 | |
6069 | if (original_regno < FIRST_PSEUDO_REGISTER) |
6070 | { |
6071 | rclass = ira_allocno_class_translate[REGNO_REG_CLASS (original_regno)]; |
6072 | hard_regno = original_regno; |
6073 | call_save_p = false; |
6074 | nregs = 1; |
6075 | mode = lra_reg_info[hard_regno].biggest_mode; |
6076 | machine_mode reg_rtx_mode = GET_MODE (regno_reg_rtx[hard_regno]); |
6077 | /* A reg can have a biggest_mode of VOIDmode if it was only ever seen as |
6078 | part of a multi-word register. In that case, just use the reg_rtx |
6079 | mode. Do the same also if the biggest mode was larger than a register |
6080 | or we can not compare the modes. Otherwise, limit the size to that of |
6081 | the biggest access in the function or to the natural mode at least. */ |
6082 | if (mode == VOIDmode |
6083 | || !ordered_p (a: GET_MODE_PRECISION (mode), |
6084 | b: GET_MODE_PRECISION (mode: reg_rtx_mode)) |
6085 | || paradoxical_subreg_p (outermode: mode, innermode: reg_rtx_mode) |
6086 | || maybe_gt (GET_MODE_PRECISION (reg_rtx_mode), GET_MODE_PRECISION (mode))) |
6087 | { |
6088 | original_reg = regno_reg_rtx[hard_regno]; |
6089 | mode = reg_rtx_mode; |
6090 | } |
6091 | else |
6092 | original_reg = gen_rtx_REG (mode, hard_regno); |
6093 | } |
6094 | else |
6095 | { |
6096 | mode = PSEUDO_REGNO_MODE (original_regno); |
6097 | hard_regno = reg_renumber[original_regno]; |
6098 | nregs = hard_regno_nregs (regno: hard_regno, mode); |
6099 | rclass = lra_get_allocno_class (regno: original_regno); |
6100 | original_reg = regno_reg_rtx[original_regno]; |
6101 | call_save_p = need_for_call_save_p (regno: original_regno); |
6102 | } |
6103 | lra_assert (hard_regno >= 0); |
6104 | if (lra_dump_file != NULL) |
6105 | fprintf (stream: lra_dump_file, |
6106 | format: " ((((((((((((((((((((((((((((((((((((((((((((((((\n" ); |
6107 | |
6108 | if (call_save_p) |
6109 | { |
6110 | mode = HARD_REGNO_CALLER_SAVE_MODE (hard_regno, |
6111 | hard_regno_nregs (hard_regno, mode), |
6112 | mode); |
6113 | new_reg = lra_create_new_reg (mode, NULL_RTX, NO_REGS, NULL, "save" ); |
6114 | } |
6115 | else |
6116 | { |
6117 | rclass = choose_split_class (allocno_class: rclass, hard_regno, mode); |
6118 | if (rclass == NO_REGS) |
6119 | { |
6120 | if (lra_dump_file != NULL) |
6121 | { |
6122 | fprintf (stream: lra_dump_file, |
6123 | format: " Rejecting split of %d(%s): " |
6124 | "no good reg class for %d(%s)\n" , |
6125 | original_regno, |
6126 | reg_class_names[lra_get_allocno_class (regno: original_regno)], |
6127 | hard_regno, |
6128 | reg_class_names[REGNO_REG_CLASS (hard_regno)]); |
6129 | fprintf |
6130 | (stream: lra_dump_file, |
6131 | format: " ))))))))))))))))))))))))))))))))))))))))))))))))\n" ); |
6132 | } |
6133 | return false; |
6134 | } |
6135 | /* Split_if_necessary can split hard registers used as part of a |
6136 | multi-register mode but splits each register individually. The |
6137 | mode used for each independent register may not be supported |
6138 | so reject the split. Splitting the wider mode should theoretically |
6139 | be possible but is not implemented. */ |
6140 | if (!targetm.hard_regno_mode_ok (hard_regno, mode)) |
6141 | { |
6142 | if (lra_dump_file != NULL) |
6143 | { |
6144 | fprintf (stream: lra_dump_file, |
6145 | format: " Rejecting split of %d(%s): unsuitable mode %s\n" , |
6146 | original_regno, |
6147 | reg_class_names[lra_get_allocno_class (regno: original_regno)], |
6148 | GET_MODE_NAME (mode)); |
6149 | fprintf |
6150 | (stream: lra_dump_file, |
6151 | format: " ))))))))))))))))))))))))))))))))))))))))))))))))\n" ); |
6152 | } |
6153 | return false; |
6154 | } |
6155 | new_reg = lra_create_new_reg (mode, original_reg, rclass, NULL, "split" ); |
6156 | reg_renumber[REGNO (new_reg)] = hard_regno; |
6157 | } |
6158 | int new_regno = REGNO (new_reg); |
6159 | save = emit_spill_move (to_p: true, mem_pseudo: new_reg, val: original_reg); |
6160 | if (NEXT_INSN (insn: save) != NULL_RTX && !call_save_p) |
6161 | { |
6162 | if (lra_dump_file != NULL) |
6163 | { |
6164 | fprintf |
6165 | (stream: lra_dump_file, |
6166 | format: " Rejecting split %d->%d resulting in > 2 save insns:\n" , |
6167 | original_regno, new_regno); |
6168 | dump_rtl_slim (lra_dump_file, save, NULL, -1, 0); |
6169 | fprintf (stream: lra_dump_file, |
6170 | format: " ))))))))))))))))))))))))))))))))))))))))))))))))\n" ); |
6171 | } |
6172 | return false; |
6173 | } |
6174 | restore = emit_spill_move (to_p: false, mem_pseudo: new_reg, val: original_reg); |
6175 | if (NEXT_INSN (insn: restore) != NULL_RTX && !call_save_p) |
6176 | { |
6177 | if (lra_dump_file != NULL) |
6178 | { |
6179 | fprintf (stream: lra_dump_file, |
6180 | format: " Rejecting split %d->%d " |
6181 | "resulting in > 2 restore insns:\n" , |
6182 | original_regno, new_regno); |
6183 | dump_rtl_slim (lra_dump_file, restore, NULL, -1, 0); |
6184 | fprintf (stream: lra_dump_file, |
6185 | format: " ))))))))))))))))))))))))))))))))))))))))))))))))\n" ); |
6186 | } |
6187 | return false; |
6188 | } |
6189 | /* Transfer equivalence information to the spill register, so that |
6190 | if we fail to allocate the spill register, we have the option of |
6191 | rematerializing the original value instead of spilling to the stack. */ |
6192 | if (!HARD_REGISTER_NUM_P (original_regno) |
6193 | && mode == PSEUDO_REGNO_MODE (original_regno)) |
6194 | lra_copy_reg_equiv (new_regno, original_regno, call_save_p); |
6195 | lra_reg_info[new_regno].restore_rtx = regno_reg_rtx[original_regno]; |
6196 | bitmap_set_bit (&lra_split_regs, new_regno); |
6197 | if (to != NULL) |
6198 | { |
6199 | lra_assert (next_usage_insns == NULL); |
6200 | usage_insn = to; |
6201 | after_p = true; |
6202 | } |
6203 | else |
6204 | { |
6205 | /* We need check_only_regs only inside the inheritance pass. */ |
6206 | bitmap_set_bit (&check_only_regs, new_regno); |
6207 | bitmap_set_bit (&check_only_regs, original_regno); |
6208 | after_p = usage_insns[original_regno].after_p; |
6209 | for (;;) |
6210 | { |
6211 | if (GET_CODE (next_usage_insns) != INSN_LIST) |
6212 | { |
6213 | usage_insn = next_usage_insns; |
6214 | break; |
6215 | } |
6216 | usage_insn = XEXP (next_usage_insns, 0); |
6217 | lra_assert (DEBUG_INSN_P (usage_insn)); |
6218 | next_usage_insns = XEXP (next_usage_insns, 1); |
6219 | lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false, |
6220 | true); |
6221 | lra_update_insn_regno_info (as_a <rtx_insn *> (p: usage_insn)); |
6222 | if (lra_dump_file != NULL) |
6223 | { |
6224 | fprintf (stream: lra_dump_file, format: " Split reuse change %d->%d:\n" , |
6225 | original_regno, new_regno); |
6226 | dump_insn_slim (lra_dump_file, as_a <rtx_insn *> (p: usage_insn)); |
6227 | } |
6228 | } |
6229 | } |
6230 | lra_assert (NOTE_P (usage_insn) || NONDEBUG_INSN_P (usage_insn)); |
6231 | lra_assert (usage_insn != insn || (after_p && before_p)); |
6232 | lra_process_new_insns (as_a <rtx_insn *> (p: usage_insn), |
6233 | after_p ? NULL : restore, |
6234 | after_p ? restore : NULL, |
6235 | call_save_p |
6236 | ? "Add reg<-save" : "Add reg<-split" ); |
6237 | lra_process_new_insns (insn, before_p ? save : NULL, |
6238 | before_p ? NULL : save, |
6239 | call_save_p |
6240 | ? "Add save<-reg" : "Add split<-reg" ); |
6241 | if (nregs > 1 || original_regno < FIRST_PSEUDO_REGISTER) |
6242 | /* If we are trying to split multi-register. We should check |
6243 | conflicts on the next assignment sub-pass. IRA can allocate on |
6244 | sub-register levels, LRA do this on pseudos level right now and |
6245 | this discrepancy may create allocation conflicts after |
6246 | splitting. |
6247 | |
6248 | If we are trying to split hard register we should also check conflicts |
6249 | as such splitting can create artificial conflict of the hard register |
6250 | with another pseudo because of simplified conflict calculation in |
6251 | LRA. */ |
6252 | check_and_force_assignment_correctness_p = true; |
6253 | if (lra_dump_file != NULL) |
6254 | fprintf (stream: lra_dump_file, |
6255 | format: " ))))))))))))))))))))))))))))))))))))))))))))))))\n" ); |
6256 | return true; |
6257 | } |
6258 | |
6259 | /* Split a hard reg for reload pseudo REGNO having RCLASS and living |
6260 | in the range [FROM, TO]. Return true if did a split. Otherwise, |
6261 | return false. */ |
6262 | bool |
6263 | spill_hard_reg_in_range (int regno, enum reg_class rclass, rtx_insn *from, rtx_insn *to) |
6264 | { |
6265 | int i, hard_regno; |
6266 | int rclass_size; |
6267 | rtx_insn *insn; |
6268 | unsigned int uid; |
6269 | bitmap_iterator bi; |
6270 | HARD_REG_SET ignore; |
6271 | |
6272 | lra_assert (from != NULL && to != NULL); |
6273 | ignore = lra_no_alloc_regs; |
6274 | EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi) |
6275 | { |
6276 | lra_insn_recog_data_t id = lra_insn_recog_data[uid]; |
6277 | struct lra_static_insn_data *static_id = id->insn_static_data; |
6278 | struct lra_insn_reg *reg; |
6279 | |
6280 | for (reg = id->regs; reg != NULL; reg = reg->next) |
6281 | if (reg->regno < FIRST_PSEUDO_REGISTER) |
6282 | SET_HARD_REG_BIT (set&: ignore, bit: reg->regno); |
6283 | for (reg = static_id->hard_regs; reg != NULL; reg = reg->next) |
6284 | SET_HARD_REG_BIT (set&: ignore, bit: reg->regno); |
6285 | } |
6286 | rclass_size = ira_class_hard_regs_num[rclass]; |
6287 | for (i = 0; i < rclass_size; i++) |
6288 | { |
6289 | hard_regno = ira_class_hard_regs[rclass][i]; |
6290 | if (! TEST_HARD_REG_BIT (set: lra_reg_info[regno].conflict_hard_regs, bit: hard_regno) |
6291 | || TEST_HARD_REG_BIT (set: ignore, bit: hard_regno)) |
6292 | continue; |
6293 | for (insn = from; insn != NEXT_INSN (insn: to); insn = NEXT_INSN (insn)) |
6294 | { |
6295 | struct lra_static_insn_data *static_id; |
6296 | struct lra_insn_reg *reg; |
6297 | |
6298 | if (!INSN_P (insn)) |
6299 | continue; |
6300 | if (bitmap_bit_p (&lra_reg_info[hard_regno].insn_bitmap, |
6301 | INSN_UID (insn))) |
6302 | break; |
6303 | static_id = lra_get_insn_recog_data (insn)->insn_static_data; |
6304 | for (reg = static_id->hard_regs; reg != NULL; reg = reg->next) |
6305 | if (reg->regno == hard_regno) |
6306 | break; |
6307 | if (reg != NULL) |
6308 | break; |
6309 | } |
6310 | if (insn != NEXT_INSN (insn: to)) |
6311 | continue; |
6312 | if (split_reg (before_p: true, original_regno: hard_regno, insn: from, NULL, to)) |
6313 | return true; |
6314 | } |
6315 | return false; |
6316 | } |
6317 | |
6318 | /* Recognize that we need a split transformation for insn INSN, which |
6319 | defines or uses REGNO in its insn biggest MODE (we use it only if |
6320 | REGNO is a hard register). POTENTIAL_RELOAD_HARD_REGS contains |
6321 | hard registers which might be used for reloads since the EBB end. |
6322 | Put the save before INSN if BEFORE_P is true. MAX_UID is maximla |
6323 | uid before starting INSN processing. Return true if we succeed in |
6324 | such transformation. */ |
6325 | static bool |
6326 | split_if_necessary (int regno, machine_mode mode, |
6327 | HARD_REG_SET potential_reload_hard_regs, |
6328 | bool before_p, rtx_insn *insn, int max_uid) |
6329 | { |
6330 | bool res = false; |
6331 | int i, nregs = 1; |
6332 | rtx next_usage_insns; |
6333 | |
6334 | if (regno < FIRST_PSEUDO_REGISTER) |
6335 | nregs = hard_regno_nregs (regno, mode); |
6336 | for (i = 0; i < nregs; i++) |
6337 | if (usage_insns[regno + i].check == curr_usage_insns_check |
6338 | && (next_usage_insns = usage_insns[regno + i].insns) != NULL_RTX |
6339 | /* To avoid processing the register twice or more. */ |
6340 | && ((GET_CODE (next_usage_insns) != INSN_LIST |
6341 | && INSN_UID (insn: next_usage_insns) < max_uid) |
6342 | || (GET_CODE (next_usage_insns) == INSN_LIST |
6343 | && (INSN_UID (XEXP (next_usage_insns, 0)) < max_uid))) |
6344 | && need_for_split_p (potential_reload_hard_regs, regno: regno + i) |
6345 | && split_reg (before_p, original_regno: regno + i, insn, next_usage_insns, NULL)) |
6346 | res = true; |
6347 | return res; |
6348 | } |
6349 | |
6350 | /* Return TRUE if rtx X is considered as an invariant for |
6351 | inheritance. */ |
6352 | static bool |
6353 | invariant_p (const_rtx x) |
6354 | { |
6355 | machine_mode mode; |
6356 | const char *fmt; |
6357 | enum rtx_code code; |
6358 | int i, j; |
6359 | |
6360 | if (side_effects_p (x)) |
6361 | return false; |
6362 | |
6363 | code = GET_CODE (x); |
6364 | mode = GET_MODE (x); |
6365 | if (code == SUBREG) |
6366 | { |
6367 | x = SUBREG_REG (x); |
6368 | code = GET_CODE (x); |
6369 | mode = wider_subreg_mode (outermode: mode, GET_MODE (x)); |
6370 | } |
6371 | |
6372 | if (MEM_P (x)) |
6373 | return false; |
6374 | |
6375 | if (REG_P (x)) |
6376 | { |
6377 | int i, nregs, regno = REGNO (x); |
6378 | |
6379 | if (regno >= FIRST_PSEUDO_REGISTER || regno == STACK_POINTER_REGNUM |
6380 | || TEST_HARD_REG_BIT (set: eliminable_regset, bit: regno) |
6381 | || GET_MODE_CLASS (GET_MODE (x)) == MODE_CC) |
6382 | return false; |
6383 | nregs = hard_regno_nregs (regno, mode); |
6384 | for (i = 0; i < nregs; i++) |
6385 | if (! fixed_regs[regno + i] |
6386 | /* A hard register may be clobbered in the current insn |
6387 | but we can ignore this case because if the hard |
6388 | register is used it should be set somewhere after the |
6389 | clobber. */ |
6390 | || bitmap_bit_p (&invalid_invariant_regs, regno + i)) |
6391 | return false; |
6392 | } |
6393 | fmt = GET_RTX_FORMAT (code); |
6394 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
6395 | { |
6396 | if (fmt[i] == 'e') |
6397 | { |
6398 | if (! invariant_p (XEXP (x, i))) |
6399 | return false; |
6400 | } |
6401 | else if (fmt[i] == 'E') |
6402 | { |
6403 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
6404 | if (! invariant_p (XVECEXP (x, i, j))) |
6405 | return false; |
6406 | } |
6407 | } |
6408 | return true; |
6409 | } |
6410 | |
6411 | /* We have 'dest_reg <- invariant'. Let us try to make an invariant |
6412 | inheritance transformation (using dest_reg instead invariant in a |
6413 | subsequent insn). */ |
6414 | static bool |
6415 | process_invariant_for_inheritance (rtx dst_reg, rtx invariant_rtx) |
6416 | { |
6417 | invariant_ptr_t invariant_ptr; |
6418 | rtx_insn *insn, *new_insns; |
6419 | rtx insn_set, insn_reg, new_reg; |
6420 | int insn_regno; |
6421 | bool succ_p = false; |
6422 | int dst_regno = REGNO (dst_reg); |
6423 | machine_mode dst_mode = GET_MODE (dst_reg); |
6424 | enum reg_class cl = lra_get_allocno_class (regno: dst_regno), insn_reg_cl; |
6425 | |
6426 | invariant_ptr = insert_invariant (invariant_rtx); |
6427 | if ((insn = invariant_ptr->insn) != NULL_RTX) |
6428 | { |
6429 | /* We have a subsequent insn using the invariant. */ |
6430 | insn_set = single_set (insn); |
6431 | lra_assert (insn_set != NULL); |
6432 | insn_reg = SET_DEST (insn_set); |
6433 | lra_assert (REG_P (insn_reg)); |
6434 | insn_regno = REGNO (insn_reg); |
6435 | insn_reg_cl = lra_get_allocno_class (regno: insn_regno); |
6436 | |
6437 | if (dst_mode == GET_MODE (insn_reg) |
6438 | /* We should consider only result move reg insns which are |
6439 | cheap. */ |
6440 | && targetm.register_move_cost (dst_mode, cl, insn_reg_cl) == 2 |
6441 | && targetm.register_move_cost (dst_mode, cl, cl) == 2) |
6442 | { |
6443 | if (lra_dump_file != NULL) |
6444 | fprintf (stream: lra_dump_file, |
6445 | format: " [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n" ); |
6446 | new_reg = lra_create_new_reg (dst_mode, dst_reg, cl, NULL, |
6447 | "invariant inheritance" ); |
6448 | bitmap_set_bit (&lra_inheritance_pseudos, REGNO (new_reg)); |
6449 | bitmap_set_bit (&check_only_regs, REGNO (new_reg)); |
6450 | lra_reg_info[REGNO (new_reg)].restore_rtx = PATTERN (insn); |
6451 | start_sequence (); |
6452 | lra_emit_move (new_reg, dst_reg); |
6453 | new_insns = get_insns (); |
6454 | end_sequence (); |
6455 | lra_process_new_insns (curr_insn, NULL, new_insns, |
6456 | "Add invariant inheritance<-original" ); |
6457 | start_sequence (); |
6458 | lra_emit_move (SET_DEST (insn_set), new_reg); |
6459 | new_insns = get_insns (); |
6460 | end_sequence (); |
6461 | lra_process_new_insns (insn, NULL, new_insns, |
6462 | "Changing reload<-inheritance" ); |
6463 | lra_set_insn_deleted (insn); |
6464 | succ_p = true; |
6465 | if (lra_dump_file != NULL) |
6466 | { |
6467 | fprintf (stream: lra_dump_file, |
6468 | format: " Invariant inheritance reuse change %d (bb%d):\n" , |
6469 | REGNO (new_reg), BLOCK_FOR_INSN (insn)->index); |
6470 | dump_insn_slim (lra_dump_file, insn); |
6471 | fprintf (stream: lra_dump_file, |
6472 | format: " ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\n" ); |
6473 | } |
6474 | } |
6475 | } |
6476 | invariant_ptr->insn = curr_insn; |
6477 | return succ_p; |
6478 | } |
6479 | |
6480 | /* Check only registers living at the current program point in the |
6481 | current EBB. */ |
6482 | static bitmap_head live_regs; |
6483 | |
6484 | /* Update live info in EBB given by its HEAD and TAIL insns after |
6485 | inheritance/split transformation. The function removes dead moves |
6486 | too. */ |
6487 | static void |
6488 | update_ebb_live_info (rtx_insn *head, rtx_insn *tail) |
6489 | { |
6490 | unsigned int j; |
6491 | int i, regno; |
6492 | bool live_p; |
6493 | rtx_insn *prev_insn; |
6494 | rtx set; |
6495 | bool remove_p; |
6496 | basic_block last_bb, prev_bb, curr_bb; |
6497 | bitmap_iterator bi; |
6498 | struct lra_insn_reg *reg; |
6499 | edge e; |
6500 | edge_iterator ei; |
6501 | |
6502 | last_bb = BLOCK_FOR_INSN (insn: tail); |
6503 | prev_bb = NULL; |
6504 | for (curr_insn = tail; |
6505 | curr_insn != PREV_INSN (insn: head); |
6506 | curr_insn = prev_insn) |
6507 | { |
6508 | prev_insn = PREV_INSN (insn: curr_insn); |
6509 | /* We need to process empty blocks too. They contain |
6510 | NOTE_INSN_BASIC_BLOCK referring for the basic block. */ |
6511 | if (NOTE_P (curr_insn) && NOTE_KIND (curr_insn) != NOTE_INSN_BASIC_BLOCK) |
6512 | continue; |
6513 | curr_bb = BLOCK_FOR_INSN (insn: curr_insn); |
6514 | if (curr_bb != prev_bb) |
6515 | { |
6516 | if (prev_bb != NULL) |
6517 | { |
6518 | /* Update df_get_live_in (prev_bb): */ |
6519 | EXECUTE_IF_SET_IN_BITMAP (&check_only_regs, 0, j, bi) |
6520 | if (bitmap_bit_p (&live_regs, j)) |
6521 | bitmap_set_bit (df_get_live_in (bb: prev_bb), j); |
6522 | else |
6523 | bitmap_clear_bit (df_get_live_in (bb: prev_bb), j); |
6524 | } |
6525 | if (curr_bb != last_bb) |
6526 | { |
6527 | /* Update df_get_live_out (curr_bb): */ |
6528 | EXECUTE_IF_SET_IN_BITMAP (&check_only_regs, 0, j, bi) |
6529 | { |
6530 | live_p = bitmap_bit_p (&live_regs, j); |
6531 | if (! live_p) |
6532 | FOR_EACH_EDGE (e, ei, curr_bb->succs) |
6533 | if (bitmap_bit_p (df_get_live_in (bb: e->dest), j)) |
6534 | { |
6535 | live_p = true; |
6536 | break; |
6537 | } |
6538 | if (live_p) |
6539 | bitmap_set_bit (df_get_live_out (bb: curr_bb), j); |
6540 | else |
6541 | bitmap_clear_bit (df_get_live_out (bb: curr_bb), j); |
6542 | } |
6543 | } |
6544 | prev_bb = curr_bb; |
6545 | bitmap_and (&live_regs, &check_only_regs, df_get_live_out (bb: curr_bb)); |
6546 | } |
6547 | if (! NONDEBUG_INSN_P (curr_insn)) |
6548 | continue; |
6549 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
6550 | curr_static_id = curr_id->insn_static_data; |
6551 | remove_p = false; |
6552 | if ((set = single_set (insn: curr_insn)) != NULL_RTX |
6553 | && REG_P (SET_DEST (set)) |
6554 | && (regno = REGNO (SET_DEST (set))) >= FIRST_PSEUDO_REGISTER |
6555 | && SET_DEST (set) != pic_offset_table_rtx |
6556 | && bitmap_bit_p (&check_only_regs, regno) |
6557 | && ! bitmap_bit_p (&live_regs, regno)) |
6558 | remove_p = true; |
6559 | /* See which defined values die here. */ |
6560 | for (reg = curr_id->regs; reg != NULL; reg = reg->next) |
6561 | if (reg->type == OP_OUT && ! reg->subreg_p) |
6562 | bitmap_clear_bit (&live_regs, reg->regno); |
6563 | for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next) |
6564 | if (reg->type == OP_OUT && ! reg->subreg_p) |
6565 | bitmap_clear_bit (&live_regs, reg->regno); |
6566 | if (curr_id->arg_hard_regs != NULL) |
6567 | /* Make clobbered argument hard registers die. */ |
6568 | for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++) |
6569 | if (regno >= FIRST_PSEUDO_REGISTER) |
6570 | bitmap_clear_bit (&live_regs, regno - FIRST_PSEUDO_REGISTER); |
6571 | /* Mark each used value as live. */ |
6572 | for (reg = curr_id->regs; reg != NULL; reg = reg->next) |
6573 | if (reg->type != OP_OUT |
6574 | && bitmap_bit_p (&check_only_regs, reg->regno)) |
6575 | bitmap_set_bit (&live_regs, reg->regno); |
6576 | for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next) |
6577 | if (reg->type != OP_OUT |
6578 | && bitmap_bit_p (&check_only_regs, reg->regno)) |
6579 | bitmap_set_bit (&live_regs, reg->regno); |
6580 | if (curr_id->arg_hard_regs != NULL) |
6581 | /* Make used argument hard registers live. */ |
6582 | for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++) |
6583 | if (regno < FIRST_PSEUDO_REGISTER |
6584 | && bitmap_bit_p (&check_only_regs, regno)) |
6585 | bitmap_set_bit (&live_regs, regno); |
6586 | /* It is quite important to remove dead move insns because it |
6587 | means removing dead store. We don't need to process them for |
6588 | constraints. */ |
6589 | if (remove_p) |
6590 | { |
6591 | if (lra_dump_file != NULL) |
6592 | { |
6593 | fprintf (stream: lra_dump_file, format: " Removing dead insn:\n " ); |
6594 | dump_insn_slim (lra_dump_file, curr_insn); |
6595 | } |
6596 | lra_set_insn_deleted (curr_insn); |
6597 | } |
6598 | } |
6599 | } |
6600 | |
6601 | /* The structure describes info to do an inheritance for the current |
6602 | insn. We need to collect such info first before doing the |
6603 | transformations because the transformations change the insn |
6604 | internal representation. */ |
6605 | struct to_inherit |
6606 | { |
6607 | /* Original regno. */ |
6608 | int regno; |
6609 | /* Subsequent insns which can inherit original reg value. */ |
6610 | rtx insns; |
6611 | }; |
6612 | |
6613 | /* Array containing all info for doing inheritance from the current |
6614 | insn. */ |
6615 | static struct to_inherit to_inherit[LRA_MAX_INSN_RELOADS]; |
6616 | |
6617 | /* Number elements in the previous array. */ |
6618 | static int to_inherit_num; |
6619 | |
6620 | /* Add inheritance info REGNO and INSNS. Their meaning is described in |
6621 | structure to_inherit. */ |
6622 | static void |
6623 | add_to_inherit (int regno, rtx insns) |
6624 | { |
6625 | int i; |
6626 | |
6627 | for (i = 0; i < to_inherit_num; i++) |
6628 | if (to_inherit[i].regno == regno) |
6629 | return; |
6630 | lra_assert (to_inherit_num < LRA_MAX_INSN_RELOADS); |
6631 | to_inherit[to_inherit_num].regno = regno; |
6632 | to_inherit[to_inherit_num++].insns = insns; |
6633 | } |
6634 | |
6635 | /* Return the last non-debug insn in basic block BB, or the block begin |
6636 | note if none. */ |
6637 | static rtx_insn * |
6638 | get_last_insertion_point (basic_block bb) |
6639 | { |
6640 | rtx_insn *insn; |
6641 | |
6642 | FOR_BB_INSNS_REVERSE (bb, insn) |
6643 | if (NONDEBUG_INSN_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn)) |
6644 | return insn; |
6645 | gcc_unreachable (); |
6646 | } |
6647 | |
6648 | /* Set up RES by registers living on edges FROM except the edge (FROM, |
6649 | TO) or by registers set up in a jump insn in BB FROM. */ |
6650 | static void |
6651 | get_live_on_other_edges (basic_block from, basic_block to, bitmap res) |
6652 | { |
6653 | rtx_insn *last; |
6654 | struct lra_insn_reg *reg; |
6655 | edge e; |
6656 | edge_iterator ei; |
6657 | |
6658 | lra_assert (to != NULL); |
6659 | bitmap_clear (res); |
6660 | FOR_EACH_EDGE (e, ei, from->succs) |
6661 | if (e->dest != to) |
6662 | bitmap_ior_into (res, df_get_live_in (bb: e->dest)); |
6663 | last = get_last_insertion_point (bb: from); |
6664 | if (! JUMP_P (last)) |
6665 | return; |
6666 | curr_id = lra_get_insn_recog_data (insn: last); |
6667 | for (reg = curr_id->regs; reg != NULL; reg = reg->next) |
6668 | if (reg->type != OP_IN) |
6669 | bitmap_set_bit (res, reg->regno); |
6670 | } |
6671 | |
6672 | /* Used as a temporary results of some bitmap calculations. */ |
6673 | static bitmap_head temp_bitmap; |
6674 | |
6675 | /* We split for reloads of small class of hard regs. The following |
6676 | defines how many hard regs the class should have to be qualified as |
6677 | small. The code is mostly oriented to x86/x86-64 architecture |
6678 | where some insns need to use only specific register or pair of |
6679 | registers and these register can live in RTL explicitly, e.g. for |
6680 | parameter passing. */ |
6681 | static const int max_small_class_regs_num = 2; |
6682 | |
6683 | /* Do inheritance/split transformations in EBB starting with HEAD and |
6684 | finishing on TAIL. We process EBB insns in the reverse order. |
6685 | Return true if we did any inheritance/split transformation in the |
6686 | EBB. |
6687 | |
6688 | We should avoid excessive splitting which results in worse code |
6689 | because of inaccurate cost calculations for spilling new split |
6690 | pseudos in such case. To achieve this we do splitting only if |
6691 | register pressure is high in given basic block and there are reload |
6692 | pseudos requiring hard registers. We could do more register |
6693 | pressure calculations at any given program point to avoid necessary |
6694 | splitting even more but it is to expensive and the current approach |
6695 | works well enough. */ |
6696 | static bool |
6697 | inherit_in_ebb (rtx_insn *head, rtx_insn *tail) |
6698 | { |
6699 | int i, src_regno, dst_regno, nregs; |
6700 | bool change_p, succ_p, update_reloads_num_p; |
6701 | rtx_insn *prev_insn, *last_insn; |
6702 | rtx next_usage_insns, curr_set; |
6703 | enum reg_class cl; |
6704 | struct lra_insn_reg *reg; |
6705 | basic_block last_processed_bb, curr_bb = NULL; |
6706 | HARD_REG_SET potential_reload_hard_regs, live_hard_regs; |
6707 | bitmap to_process; |
6708 | unsigned int j; |
6709 | bitmap_iterator bi; |
6710 | bool head_p, after_p; |
6711 | |
6712 | change_p = false; |
6713 | curr_usage_insns_check++; |
6714 | clear_invariants (); |
6715 | reloads_num = calls_num = 0; |
6716 | for (unsigned int i = 0; i < NUM_ABI_IDS; ++i) |
6717 | last_call_for_abi[i] = 0; |
6718 | CLEAR_HARD_REG_SET (set&: full_and_partial_call_clobbers); |
6719 | bitmap_clear (&check_only_regs); |
6720 | bitmap_clear (&invalid_invariant_regs); |
6721 | last_processed_bb = NULL; |
6722 | CLEAR_HARD_REG_SET (set&: potential_reload_hard_regs); |
6723 | live_hard_regs = eliminable_regset | lra_no_alloc_regs; |
6724 | /* We don't process new insns generated in the loop. */ |
6725 | for (curr_insn = tail; curr_insn != PREV_INSN (insn: head); curr_insn = prev_insn) |
6726 | { |
6727 | prev_insn = PREV_INSN (insn: curr_insn); |
6728 | if (BLOCK_FOR_INSN (insn: curr_insn) != NULL) |
6729 | curr_bb = BLOCK_FOR_INSN (insn: curr_insn); |
6730 | if (last_processed_bb != curr_bb) |
6731 | { |
6732 | /* We are at the end of BB. Add qualified living |
6733 | pseudos for potential splitting. */ |
6734 | to_process = df_get_live_out (bb: curr_bb); |
6735 | if (last_processed_bb != NULL) |
6736 | { |
6737 | /* We are somewhere in the middle of EBB. */ |
6738 | get_live_on_other_edges (from: curr_bb, to: last_processed_bb, |
6739 | res: &temp_bitmap); |
6740 | to_process = &temp_bitmap; |
6741 | } |
6742 | last_processed_bb = curr_bb; |
6743 | last_insn = get_last_insertion_point (bb: curr_bb); |
6744 | after_p = (! JUMP_P (last_insn) |
6745 | && (! CALL_P (last_insn) |
6746 | || (find_reg_note (last_insn, |
6747 | REG_NORETURN, NULL_RTX) == NULL_RTX |
6748 | && ! SIBLING_CALL_P (last_insn)))); |
6749 | CLEAR_HARD_REG_SET (set&: potential_reload_hard_regs); |
6750 | EXECUTE_IF_SET_IN_BITMAP (to_process, 0, j, bi) |
6751 | { |
6752 | if ((int) j >= lra_constraint_new_regno_start) |
6753 | break; |
6754 | if (j < FIRST_PSEUDO_REGISTER || reg_renumber[j] >= 0) |
6755 | { |
6756 | if (j < FIRST_PSEUDO_REGISTER) |
6757 | SET_HARD_REG_BIT (set&: live_hard_regs, bit: j); |
6758 | else |
6759 | add_to_hard_reg_set (regs: &live_hard_regs, |
6760 | PSEUDO_REGNO_MODE (j), |
6761 | regno: reg_renumber[j]); |
6762 | setup_next_usage_insn (regno: j, insn: last_insn, reloads_num, after_p); |
6763 | } |
6764 | } |
6765 | } |
6766 | src_regno = dst_regno = -1; |
6767 | curr_set = single_set (insn: curr_insn); |
6768 | if (curr_set != NULL_RTX && REG_P (SET_DEST (curr_set))) |
6769 | dst_regno = REGNO (SET_DEST (curr_set)); |
6770 | if (curr_set != NULL_RTX && REG_P (SET_SRC (curr_set))) |
6771 | src_regno = REGNO (SET_SRC (curr_set)); |
6772 | update_reloads_num_p = true; |
6773 | if (src_regno < lra_constraint_new_regno_start |
6774 | && src_regno >= FIRST_PSEUDO_REGISTER |
6775 | && reg_renumber[src_regno] < 0 |
6776 | && dst_regno >= lra_constraint_new_regno_start |
6777 | && (cl = lra_get_allocno_class (regno: dst_regno)) != NO_REGS) |
6778 | { |
6779 | /* 'reload_pseudo <- original_pseudo'. */ |
6780 | if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num) |
6781 | reloads_num++; |
6782 | update_reloads_num_p = false; |
6783 | succ_p = false; |
6784 | if (usage_insns[src_regno].check == curr_usage_insns_check |
6785 | && (next_usage_insns = usage_insns[src_regno].insns) != NULL_RTX) |
6786 | succ_p = inherit_reload_reg (def_p: false, original_regno: src_regno, cl, |
6787 | insn: curr_insn, next_usage_insns); |
6788 | if (succ_p) |
6789 | change_p = true; |
6790 | else |
6791 | setup_next_usage_insn (regno: src_regno, insn: curr_insn, reloads_num, after_p: false); |
6792 | if (hard_reg_set_subset_p (reg_class_contents[cl], y: live_hard_regs)) |
6793 | potential_reload_hard_regs |= reg_class_contents[cl]; |
6794 | } |
6795 | else if (src_regno < 0 |
6796 | && dst_regno >= lra_constraint_new_regno_start |
6797 | && invariant_p (SET_SRC (curr_set)) |
6798 | && (cl = lra_get_allocno_class (regno: dst_regno)) != NO_REGS |
6799 | && ! bitmap_bit_p (&invalid_invariant_regs, dst_regno) |
6800 | && ! bitmap_bit_p (&invalid_invariant_regs, |
6801 | ORIGINAL_REGNO(regno_reg_rtx[dst_regno]))) |
6802 | { |
6803 | /* 'reload_pseudo <- invariant'. */ |
6804 | if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num) |
6805 | reloads_num++; |
6806 | update_reloads_num_p = false; |
6807 | if (process_invariant_for_inheritance (SET_DEST (curr_set), SET_SRC (curr_set))) |
6808 | change_p = true; |
6809 | if (hard_reg_set_subset_p (reg_class_contents[cl], y: live_hard_regs)) |
6810 | potential_reload_hard_regs |= reg_class_contents[cl]; |
6811 | } |
6812 | else if (src_regno >= lra_constraint_new_regno_start |
6813 | && dst_regno < lra_constraint_new_regno_start |
6814 | && dst_regno >= FIRST_PSEUDO_REGISTER |
6815 | && reg_renumber[dst_regno] < 0 |
6816 | && (cl = lra_get_allocno_class (regno: src_regno)) != NO_REGS |
6817 | && usage_insns[dst_regno].check == curr_usage_insns_check |
6818 | && (next_usage_insns |
6819 | = usage_insns[dst_regno].insns) != NULL_RTX) |
6820 | { |
6821 | if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num) |
6822 | reloads_num++; |
6823 | update_reloads_num_p = false; |
6824 | /* 'original_pseudo <- reload_pseudo'. */ |
6825 | if (! JUMP_P (curr_insn) |
6826 | && inherit_reload_reg (def_p: true, original_regno: dst_regno, cl, |
6827 | insn: curr_insn, next_usage_insns)) |
6828 | change_p = true; |
6829 | /* Invalidate. */ |
6830 | usage_insns[dst_regno].check = 0; |
6831 | if (hard_reg_set_subset_p (reg_class_contents[cl], y: live_hard_regs)) |
6832 | potential_reload_hard_regs |= reg_class_contents[cl]; |
6833 | } |
6834 | else if (INSN_P (curr_insn)) |
6835 | { |
6836 | int iter; |
6837 | int max_uid = get_max_uid (); |
6838 | |
6839 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
6840 | curr_static_id = curr_id->insn_static_data; |
6841 | to_inherit_num = 0; |
6842 | /* Process insn definitions. */ |
6843 | for (iter = 0; iter < 2; iter++) |
6844 | for (reg = iter == 0 ? curr_id->regs : curr_static_id->hard_regs; |
6845 | reg != NULL; |
6846 | reg = reg->next) |
6847 | if (reg->type != OP_IN |
6848 | && (dst_regno = reg->regno) < lra_constraint_new_regno_start) |
6849 | { |
6850 | if (dst_regno >= FIRST_PSEUDO_REGISTER && reg->type == OP_OUT |
6851 | && reg_renumber[dst_regno] < 0 && ! reg->subreg_p |
6852 | && usage_insns[dst_regno].check == curr_usage_insns_check |
6853 | && (next_usage_insns |
6854 | = usage_insns[dst_regno].insns) != NULL_RTX) |
6855 | { |
6856 | struct lra_insn_reg *r; |
6857 | |
6858 | for (r = curr_id->regs; r != NULL; r = r->next) |
6859 | if (r->type != OP_OUT && r->regno == dst_regno) |
6860 | break; |
6861 | /* Don't do inheritance if the pseudo is also |
6862 | used in the insn. */ |
6863 | if (r == NULL) |
6864 | /* We cannot do inheritance right now |
6865 | because the current insn reg info (chain |
6866 | regs) can change after that. */ |
6867 | add_to_inherit (regno: dst_regno, insns: next_usage_insns); |
6868 | } |
6869 | /* We cannot process one reg twice here because of |
6870 | usage_insns invalidation. */ |
6871 | if ((dst_regno < FIRST_PSEUDO_REGISTER |
6872 | || reg_renumber[dst_regno] >= 0) |
6873 | && ! reg->subreg_p && reg->type != OP_IN) |
6874 | { |
6875 | HARD_REG_SET s; |
6876 | |
6877 | if (split_if_necessary (regno: dst_regno, mode: reg->biggest_mode, |
6878 | potential_reload_hard_regs, |
6879 | before_p: false, insn: curr_insn, max_uid)) |
6880 | change_p = true; |
6881 | CLEAR_HARD_REG_SET (set&: s); |
6882 | if (dst_regno < FIRST_PSEUDO_REGISTER) |
6883 | add_to_hard_reg_set (regs: &s, mode: reg->biggest_mode, regno: dst_regno); |
6884 | else |
6885 | add_to_hard_reg_set (regs: &s, PSEUDO_REGNO_MODE (dst_regno), |
6886 | regno: reg_renumber[dst_regno]); |
6887 | live_hard_regs &= ~s; |
6888 | potential_reload_hard_regs &= ~s; |
6889 | } |
6890 | /* We should invalidate potential inheritance or |
6891 | splitting for the current insn usages to the next |
6892 | usage insns (see code below) as the output pseudo |
6893 | prevents this. */ |
6894 | if ((dst_regno >= FIRST_PSEUDO_REGISTER |
6895 | && reg_renumber[dst_regno] < 0) |
6896 | || (reg->type == OP_OUT && ! reg->subreg_p |
6897 | && (dst_regno < FIRST_PSEUDO_REGISTER |
6898 | || reg_renumber[dst_regno] >= 0))) |
6899 | { |
6900 | /* Invalidate and mark definitions. */ |
6901 | if (dst_regno >= FIRST_PSEUDO_REGISTER) |
6902 | usage_insns[dst_regno].check = -(int) INSN_UID (insn: curr_insn); |
6903 | else |
6904 | { |
6905 | nregs = hard_regno_nregs (regno: dst_regno, |
6906 | mode: reg->biggest_mode); |
6907 | for (i = 0; i < nregs; i++) |
6908 | usage_insns[dst_regno + i].check |
6909 | = -(int) INSN_UID (insn: curr_insn); |
6910 | } |
6911 | } |
6912 | } |
6913 | /* Process clobbered call regs. */ |
6914 | if (curr_id->arg_hard_regs != NULL) |
6915 | for (i = 0; (dst_regno = curr_id->arg_hard_regs[i]) >= 0; i++) |
6916 | if (dst_regno >= FIRST_PSEUDO_REGISTER) |
6917 | usage_insns[dst_regno - FIRST_PSEUDO_REGISTER].check |
6918 | = -(int) INSN_UID (insn: curr_insn); |
6919 | if (! JUMP_P (curr_insn)) |
6920 | for (i = 0; i < to_inherit_num; i++) |
6921 | if (inherit_reload_reg (def_p: true, original_regno: to_inherit[i].regno, |
6922 | cl: ALL_REGS, insn: curr_insn, |
6923 | next_usage_insns: to_inherit[i].insns)) |
6924 | change_p = true; |
6925 | if (CALL_P (curr_insn)) |
6926 | { |
6927 | rtx cheap, pat, dest; |
6928 | rtx_insn *restore; |
6929 | int regno, hard_regno; |
6930 | |
6931 | calls_num++; |
6932 | function_abi callee_abi = insn_callee_abi (curr_insn); |
6933 | last_call_for_abi[callee_abi.id ()] = calls_num; |
6934 | full_and_partial_call_clobbers |
6935 | |= callee_abi.full_and_partial_reg_clobbers (); |
6936 | if ((cheap = find_reg_note (curr_insn, |
6937 | REG_RETURNED, NULL_RTX)) != NULL_RTX |
6938 | && ((cheap = XEXP (cheap, 0)), true) |
6939 | && (regno = REGNO (cheap)) >= FIRST_PSEUDO_REGISTER |
6940 | && (hard_regno = reg_renumber[regno]) >= 0 |
6941 | && usage_insns[regno].check == curr_usage_insns_check |
6942 | /* If there are pending saves/restores, the |
6943 | optimization is not worth. */ |
6944 | && usage_insns[regno].calls_num == calls_num - 1 |
6945 | && callee_abi.clobbers_reg_p (GET_MODE (cheap), regno: hard_regno)) |
6946 | { |
6947 | /* Restore the pseudo from the call result as |
6948 | REG_RETURNED note says that the pseudo value is |
6949 | in the call result and the pseudo is an argument |
6950 | of the call. */ |
6951 | pat = PATTERN (insn: curr_insn); |
6952 | if (GET_CODE (pat) == PARALLEL) |
6953 | pat = XVECEXP (pat, 0, 0); |
6954 | dest = SET_DEST (pat); |
6955 | /* For multiple return values dest is PARALLEL. |
6956 | Currently we handle only single return value case. */ |
6957 | if (REG_P (dest)) |
6958 | { |
6959 | start_sequence (); |
6960 | emit_move_insn (cheap, copy_rtx (dest)); |
6961 | restore = get_insns (); |
6962 | end_sequence (); |
6963 | lra_process_new_insns (curr_insn, NULL, restore, |
6964 | "Inserting call parameter restore" ); |
6965 | /* We don't need to save/restore of the pseudo from |
6966 | this call. */ |
6967 | usage_insns[regno].calls_num = calls_num; |
6968 | remove_from_hard_reg_set |
6969 | (regs: &full_and_partial_call_clobbers, |
6970 | GET_MODE (cheap), regno: hard_regno); |
6971 | bitmap_set_bit (&check_only_regs, regno); |
6972 | } |
6973 | } |
6974 | } |
6975 | to_inherit_num = 0; |
6976 | /* Process insn usages. */ |
6977 | for (iter = 0; iter < 2; iter++) |
6978 | for (reg = iter == 0 ? curr_id->regs : curr_static_id->hard_regs; |
6979 | reg != NULL; |
6980 | reg = reg->next) |
6981 | if ((reg->type != OP_OUT |
6982 | || (reg->type == OP_OUT && reg->subreg_p)) |
6983 | && (src_regno = reg->regno) < lra_constraint_new_regno_start) |
6984 | { |
6985 | if (src_regno >= FIRST_PSEUDO_REGISTER |
6986 | && reg_renumber[src_regno] < 0 && reg->type == OP_IN) |
6987 | { |
6988 | if (usage_insns[src_regno].check == curr_usage_insns_check |
6989 | && (next_usage_insns |
6990 | = usage_insns[src_regno].insns) != NULL_RTX |
6991 | && NONDEBUG_INSN_P (curr_insn)) |
6992 | add_to_inherit (regno: src_regno, insns: next_usage_insns); |
6993 | else if (usage_insns[src_regno].check |
6994 | != -(int) INSN_UID (insn: curr_insn)) |
6995 | /* Add usages but only if the reg is not set up |
6996 | in the same insn. */ |
6997 | add_next_usage_insn (regno: src_regno, insn: curr_insn, reloads_num); |
6998 | } |
6999 | else if (src_regno < FIRST_PSEUDO_REGISTER |
7000 | || reg_renumber[src_regno] >= 0) |
7001 | { |
7002 | bool before_p; |
7003 | rtx_insn *use_insn = curr_insn; |
7004 | |
7005 | before_p = (JUMP_P (curr_insn) |
7006 | || (CALL_P (curr_insn) && reg->type == OP_IN)); |
7007 | if (NONDEBUG_INSN_P (curr_insn) |
7008 | && (! JUMP_P (curr_insn) || reg->type == OP_IN) |
7009 | && split_if_necessary (regno: src_regno, mode: reg->biggest_mode, |
7010 | potential_reload_hard_regs, |
7011 | before_p, insn: curr_insn, max_uid)) |
7012 | { |
7013 | if (reg->subreg_p) |
7014 | check_and_force_assignment_correctness_p = true; |
7015 | change_p = true; |
7016 | /* Invalidate. */ |
7017 | usage_insns[src_regno].check = 0; |
7018 | if (before_p) |
7019 | use_insn = PREV_INSN (insn: curr_insn); |
7020 | } |
7021 | if (NONDEBUG_INSN_P (curr_insn)) |
7022 | { |
7023 | if (src_regno < FIRST_PSEUDO_REGISTER) |
7024 | add_to_hard_reg_set (regs: &live_hard_regs, |
7025 | mode: reg->biggest_mode, regno: src_regno); |
7026 | else |
7027 | add_to_hard_reg_set (regs: &live_hard_regs, |
7028 | PSEUDO_REGNO_MODE (src_regno), |
7029 | regno: reg_renumber[src_regno]); |
7030 | } |
7031 | if (src_regno >= FIRST_PSEUDO_REGISTER) |
7032 | add_next_usage_insn (regno: src_regno, insn: use_insn, reloads_num); |
7033 | else |
7034 | { |
7035 | for (i = 0; i < hard_regno_nregs (regno: src_regno, mode: reg->biggest_mode); i++) |
7036 | add_next_usage_insn (regno: src_regno + i, insn: use_insn, reloads_num); |
7037 | } |
7038 | } |
7039 | } |
7040 | /* Process used call regs. */ |
7041 | if (curr_id->arg_hard_regs != NULL) |
7042 | for (i = 0; (src_regno = curr_id->arg_hard_regs[i]) >= 0; i++) |
7043 | if (src_regno < FIRST_PSEUDO_REGISTER) |
7044 | { |
7045 | SET_HARD_REG_BIT (set&: live_hard_regs, bit: src_regno); |
7046 | add_next_usage_insn (regno: src_regno, insn: curr_insn, reloads_num); |
7047 | } |
7048 | for (i = 0; i < to_inherit_num; i++) |
7049 | { |
7050 | src_regno = to_inherit[i].regno; |
7051 | if (inherit_reload_reg (def_p: false, original_regno: src_regno, cl: ALL_REGS, |
7052 | insn: curr_insn, next_usage_insns: to_inherit[i].insns)) |
7053 | change_p = true; |
7054 | else |
7055 | setup_next_usage_insn (regno: src_regno, insn: curr_insn, reloads_num, after_p: false); |
7056 | } |
7057 | } |
7058 | if (update_reloads_num_p |
7059 | && NONDEBUG_INSN_P (curr_insn) && curr_set != NULL_RTX) |
7060 | { |
7061 | int regno = -1; |
7062 | if ((REG_P (SET_DEST (curr_set)) |
7063 | && (regno = REGNO (SET_DEST (curr_set))) >= lra_constraint_new_regno_start |
7064 | && reg_renumber[regno] < 0 |
7065 | && (cl = lra_get_allocno_class (regno)) != NO_REGS) |
7066 | || (REG_P (SET_SRC (curr_set)) |
7067 | && (regno = REGNO (SET_SRC (curr_set))) >= lra_constraint_new_regno_start |
7068 | && reg_renumber[regno] < 0 |
7069 | && (cl = lra_get_allocno_class (regno)) != NO_REGS)) |
7070 | { |
7071 | if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num) |
7072 | reloads_num++; |
7073 | if (hard_reg_set_subset_p (reg_class_contents[cl], y: live_hard_regs)) |
7074 | potential_reload_hard_regs |= reg_class_contents[cl]; |
7075 | } |
7076 | } |
7077 | if (NONDEBUG_INSN_P (curr_insn)) |
7078 | { |
7079 | int regno; |
7080 | |
7081 | /* Invalidate invariants with changed regs. */ |
7082 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
7083 | for (reg = curr_id->regs; reg != NULL; reg = reg->next) |
7084 | if (reg->type != OP_IN) |
7085 | { |
7086 | bitmap_set_bit (&invalid_invariant_regs, reg->regno); |
7087 | bitmap_set_bit (&invalid_invariant_regs, |
7088 | ORIGINAL_REGNO (regno_reg_rtx[reg->regno])); |
7089 | } |
7090 | curr_static_id = curr_id->insn_static_data; |
7091 | for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next) |
7092 | if (reg->type != OP_IN) |
7093 | bitmap_set_bit (&invalid_invariant_regs, reg->regno); |
7094 | if (curr_id->arg_hard_regs != NULL) |
7095 | for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++) |
7096 | if (regno >= FIRST_PSEUDO_REGISTER) |
7097 | bitmap_set_bit (&invalid_invariant_regs, |
7098 | regno - FIRST_PSEUDO_REGISTER); |
7099 | } |
7100 | /* We reached the start of the current basic block. */ |
7101 | if (prev_insn == NULL_RTX || prev_insn == PREV_INSN (insn: head) |
7102 | || BLOCK_FOR_INSN (insn: prev_insn) != curr_bb) |
7103 | { |
7104 | /* We reached the beginning of the current block -- do |
7105 | rest of spliting in the current BB. */ |
7106 | to_process = df_get_live_in (bb: curr_bb); |
7107 | if (BLOCK_FOR_INSN (insn: head) != curr_bb) |
7108 | { |
7109 | /* We are somewhere in the middle of EBB. */ |
7110 | get_live_on_other_edges (EDGE_PRED (curr_bb, 0)->src, |
7111 | to: curr_bb, res: &temp_bitmap); |
7112 | to_process = &temp_bitmap; |
7113 | } |
7114 | head_p = true; |
7115 | EXECUTE_IF_SET_IN_BITMAP (to_process, 0, j, bi) |
7116 | { |
7117 | if ((int) j >= lra_constraint_new_regno_start) |
7118 | break; |
7119 | if (((int) j < FIRST_PSEUDO_REGISTER || reg_renumber[j] >= 0) |
7120 | && usage_insns[j].check == curr_usage_insns_check |
7121 | && (next_usage_insns = usage_insns[j].insns) != NULL_RTX) |
7122 | { |
7123 | if (need_for_split_p (potential_reload_hard_regs, regno: j)) |
7124 | { |
7125 | if (lra_dump_file != NULL && head_p) |
7126 | { |
7127 | fprintf (stream: lra_dump_file, |
7128 | format: " ----------------------------------\n" ); |
7129 | head_p = false; |
7130 | } |
7131 | if (split_reg (before_p: false, original_regno: j, insn: bb_note (curr_bb), |
7132 | next_usage_insns, NULL)) |
7133 | change_p = true; |
7134 | } |
7135 | usage_insns[j].check = 0; |
7136 | } |
7137 | } |
7138 | } |
7139 | } |
7140 | return change_p; |
7141 | } |
7142 | |
7143 | /* This value affects EBB forming. If probability of edge from EBB to |
7144 | a BB is not greater than the following value, we don't add the BB |
7145 | to EBB. */ |
7146 | #define EBB_PROBABILITY_CUTOFF \ |
7147 | ((REG_BR_PROB_BASE * param_lra_inheritance_ebb_probability_cutoff) / 100) |
7148 | |
7149 | /* Current number of inheritance/split iteration. */ |
7150 | int lra_inheritance_iter; |
7151 | |
7152 | /* Entry function for inheritance/split pass. */ |
7153 | void |
7154 | lra_inheritance (void) |
7155 | { |
7156 | int i; |
7157 | basic_block bb, start_bb; |
7158 | edge e; |
7159 | |
7160 | lra_inheritance_iter++; |
7161 | if (lra_inheritance_iter > LRA_MAX_INHERITANCE_PASSES) |
7162 | return; |
7163 | timevar_push (tv: TV_LRA_INHERITANCE); |
7164 | if (lra_dump_file != NULL) |
7165 | fprintf (stream: lra_dump_file, format: "\n********** Inheritance #%d: **********\n\n" , |
7166 | lra_inheritance_iter); |
7167 | curr_usage_insns_check = 0; |
7168 | usage_insns = XNEWVEC (struct usage_insns, lra_constraint_new_regno_start); |
7169 | for (i = 0; i < lra_constraint_new_regno_start; i++) |
7170 | usage_insns[i].check = 0; |
7171 | bitmap_initialize (head: &check_only_regs, obstack: ®_obstack); |
7172 | bitmap_initialize (head: &invalid_invariant_regs, obstack: ®_obstack); |
7173 | bitmap_initialize (head: &live_regs, obstack: ®_obstack); |
7174 | bitmap_initialize (head: &temp_bitmap, obstack: ®_obstack); |
7175 | bitmap_initialize (head: &ebb_global_regs, obstack: ®_obstack); |
7176 | FOR_EACH_BB_FN (bb, cfun) |
7177 | { |
7178 | start_bb = bb; |
7179 | if (lra_dump_file != NULL) |
7180 | fprintf (stream: lra_dump_file, format: "EBB" ); |
7181 | /* Form a EBB starting with BB. */ |
7182 | bitmap_clear (&ebb_global_regs); |
7183 | bitmap_ior_into (&ebb_global_regs, df_get_live_in (bb)); |
7184 | for (;;) |
7185 | { |
7186 | if (lra_dump_file != NULL) |
7187 | fprintf (stream: lra_dump_file, format: " %d" , bb->index); |
7188 | if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
7189 | || LABEL_P (BB_HEAD (bb->next_bb))) |
7190 | break; |
7191 | e = find_fallthru_edge (edges: bb->succs); |
7192 | if (! e) |
7193 | break; |
7194 | if (e->probability.initialized_p () |
7195 | && e->probability.to_reg_br_prob_base () < EBB_PROBABILITY_CUTOFF) |
7196 | break; |
7197 | bb = bb->next_bb; |
7198 | } |
7199 | bitmap_ior_into (&ebb_global_regs, df_get_live_out (bb)); |
7200 | if (lra_dump_file != NULL) |
7201 | fprintf (stream: lra_dump_file, format: "\n" ); |
7202 | if (inherit_in_ebb (BB_HEAD (start_bb), BB_END (bb))) |
7203 | /* Remember that the EBB head and tail can change in |
7204 | inherit_in_ebb. */ |
7205 | update_ebb_live_info (BB_HEAD (start_bb), BB_END (bb)); |
7206 | } |
7207 | bitmap_release (head: &ebb_global_regs); |
7208 | bitmap_release (head: &temp_bitmap); |
7209 | bitmap_release (head: &live_regs); |
7210 | bitmap_release (head: &invalid_invariant_regs); |
7211 | bitmap_release (head: &check_only_regs); |
7212 | free (ptr: usage_insns); |
7213 | lra_dump_insns_if_possible (title: "func after inheritance" ); |
7214 | timevar_pop (tv: TV_LRA_INHERITANCE); |
7215 | } |
7216 | |
7217 | |
7218 | |
7219 | /* This page contains code to undo failed inheritance/split |
7220 | transformations. */ |
7221 | |
7222 | /* Current number of iteration undoing inheritance/split. */ |
7223 | int lra_undo_inheritance_iter; |
7224 | |
7225 | /* Fix BB live info LIVE after removing pseudos created on pass doing |
7226 | inheritance/split which are REMOVED_PSEUDOS. */ |
7227 | static void |
7228 | fix_bb_live_info (bitmap live, bitmap removed_pseudos) |
7229 | { |
7230 | unsigned int regno; |
7231 | bitmap_iterator bi; |
7232 | |
7233 | EXECUTE_IF_SET_IN_BITMAP (removed_pseudos, 0, regno, bi) |
7234 | if (bitmap_clear_bit (live, regno) |
7235 | && REG_P (lra_reg_info[regno].restore_rtx)) |
7236 | bitmap_set_bit (live, REGNO (lra_reg_info[regno].restore_rtx)); |
7237 | } |
7238 | |
7239 | /* Return regno of the (subreg of) REG. Otherwise, return a negative |
7240 | number. */ |
7241 | static int |
7242 | get_regno (rtx reg) |
7243 | { |
7244 | if (GET_CODE (reg) == SUBREG) |
7245 | reg = SUBREG_REG (reg); |
7246 | if (REG_P (reg)) |
7247 | return REGNO (reg); |
7248 | return -1; |
7249 | } |
7250 | |
7251 | /* Delete a move INSN with destination reg DREGNO and a previous |
7252 | clobber insn with the same regno. The inheritance/split code can |
7253 | generate moves with preceding clobber and when we delete such moves |
7254 | we should delete the clobber insn too to keep the correct life |
7255 | info. */ |
7256 | static void |
7257 | delete_move_and_clobber (rtx_insn *insn, int dregno) |
7258 | { |
7259 | rtx_insn *prev_insn = PREV_INSN (insn); |
7260 | |
7261 | lra_set_insn_deleted (insn); |
7262 | lra_assert (dregno >= 0); |
7263 | if (prev_insn != NULL && NONDEBUG_INSN_P (prev_insn) |
7264 | && GET_CODE (PATTERN (prev_insn)) == CLOBBER |
7265 | && dregno == get_regno (XEXP (PATTERN (prev_insn), 0))) |
7266 | lra_set_insn_deleted (prev_insn); |
7267 | } |
7268 | |
7269 | /* Remove inheritance/split pseudos which are in REMOVE_PSEUDOS and |
7270 | return true if we did any change. The undo transformations for |
7271 | inheritance looks like |
7272 | i <- i2 |
7273 | p <- i => p <- i2 |
7274 | or removing |
7275 | p <- i, i <- p, and i <- i3 |
7276 | where p is original pseudo from which inheritance pseudo i was |
7277 | created, i and i3 are removed inheritance pseudos, i2 is another |
7278 | not removed inheritance pseudo. All split pseudos or other |
7279 | occurrences of removed inheritance pseudos are changed on the |
7280 | corresponding original pseudos. |
7281 | |
7282 | The function also schedules insns changed and created during |
7283 | inheritance/split pass for processing by the subsequent constraint |
7284 | pass. */ |
7285 | static bool |
7286 | remove_inheritance_pseudos (bitmap remove_pseudos) |
7287 | { |
7288 | basic_block bb; |
7289 | int regno, sregno, prev_sregno, dregno; |
7290 | rtx restore_rtx; |
7291 | rtx set, prev_set; |
7292 | rtx_insn *prev_insn; |
7293 | bool change_p, done_p; |
7294 | |
7295 | change_p = ! bitmap_empty_p (map: remove_pseudos); |
7296 | /* We cannot finish the function right away if CHANGE_P is true |
7297 | because we need to marks insns affected by previous |
7298 | inheritance/split pass for processing by the subsequent |
7299 | constraint pass. */ |
7300 | FOR_EACH_BB_FN (bb, cfun) |
7301 | { |
7302 | fix_bb_live_info (live: df_get_live_in (bb), removed_pseudos: remove_pseudos); |
7303 | fix_bb_live_info (live: df_get_live_out (bb), removed_pseudos: remove_pseudos); |
7304 | FOR_BB_INSNS_REVERSE (bb, curr_insn) |
7305 | { |
7306 | if (! INSN_P (curr_insn)) |
7307 | continue; |
7308 | done_p = false; |
7309 | sregno = dregno = -1; |
7310 | if (change_p && NONDEBUG_INSN_P (curr_insn) |
7311 | && (set = single_set (insn: curr_insn)) != NULL_RTX) |
7312 | { |
7313 | dregno = get_regno (SET_DEST (set)); |
7314 | sregno = get_regno (SET_SRC (set)); |
7315 | } |
7316 | |
7317 | if (sregno >= 0 && dregno >= 0) |
7318 | { |
7319 | if (bitmap_bit_p (remove_pseudos, dregno) |
7320 | && ! REG_P (lra_reg_info[dregno].restore_rtx)) |
7321 | { |
7322 | /* invariant inheritance pseudo <- original pseudo */ |
7323 | if (lra_dump_file != NULL) |
7324 | { |
7325 | fprintf (stream: lra_dump_file, format: " Removing invariant inheritance:\n" ); |
7326 | dump_insn_slim (lra_dump_file, curr_insn); |
7327 | fprintf (stream: lra_dump_file, format: "\n" ); |
7328 | } |
7329 | delete_move_and_clobber (insn: curr_insn, dregno); |
7330 | done_p = true; |
7331 | } |
7332 | else if (bitmap_bit_p (remove_pseudos, sregno) |
7333 | && ! REG_P (lra_reg_info[sregno].restore_rtx)) |
7334 | { |
7335 | /* reload pseudo <- invariant inheritance pseudo */ |
7336 | start_sequence (); |
7337 | /* We cannot just change the source. It might be |
7338 | an insn different from the move. */ |
7339 | emit_insn (lra_reg_info[sregno].restore_rtx); |
7340 | rtx_insn *new_insns = get_insns (); |
7341 | end_sequence (); |
7342 | lra_assert (single_set (new_insns) != NULL |
7343 | && SET_DEST (set) == SET_DEST (single_set (new_insns))); |
7344 | lra_process_new_insns (curr_insn, NULL, new_insns, |
7345 | "Changing reload<-invariant inheritance" ); |
7346 | delete_move_and_clobber (insn: curr_insn, dregno); |
7347 | done_p = true; |
7348 | } |
7349 | else if ((bitmap_bit_p (remove_pseudos, sregno) |
7350 | && (get_regno (reg: lra_reg_info[sregno].restore_rtx) == dregno |
7351 | || (bitmap_bit_p (remove_pseudos, dregno) |
7352 | && get_regno (reg: lra_reg_info[sregno].restore_rtx) >= 0 |
7353 | && (get_regno (reg: lra_reg_info[sregno].restore_rtx) |
7354 | == get_regno (reg: lra_reg_info[dregno].restore_rtx))))) |
7355 | || (bitmap_bit_p (remove_pseudos, dregno) |
7356 | && get_regno (reg: lra_reg_info[dregno].restore_rtx) == sregno)) |
7357 | /* One of the following cases: |
7358 | original <- removed inheritance pseudo |
7359 | removed inherit pseudo <- another removed inherit pseudo |
7360 | removed inherit pseudo <- original pseudo |
7361 | Or |
7362 | removed_split_pseudo <- original_reg |
7363 | original_reg <- removed_split_pseudo */ |
7364 | { |
7365 | if (lra_dump_file != NULL) |
7366 | { |
7367 | fprintf (stream: lra_dump_file, format: " Removing %s:\n" , |
7368 | bitmap_bit_p (&lra_split_regs, sregno) |
7369 | || bitmap_bit_p (&lra_split_regs, dregno) |
7370 | ? "split" : "inheritance" ); |
7371 | dump_insn_slim (lra_dump_file, curr_insn); |
7372 | } |
7373 | delete_move_and_clobber (insn: curr_insn, dregno); |
7374 | done_p = true; |
7375 | } |
7376 | else if (bitmap_bit_p (remove_pseudos, sregno) |
7377 | && bitmap_bit_p (&lra_inheritance_pseudos, sregno)) |
7378 | { |
7379 | /* Search the following pattern: |
7380 | inherit_or_split_pseudo1 <- inherit_or_split_pseudo2 |
7381 | original_pseudo <- inherit_or_split_pseudo1 |
7382 | where the 2nd insn is the current insn and |
7383 | inherit_or_split_pseudo2 is not removed. If it is found, |
7384 | change the current insn onto: |
7385 | original_pseudo <- inherit_or_split_pseudo2. */ |
7386 | for (prev_insn = PREV_INSN (insn: curr_insn); |
7387 | prev_insn != NULL_RTX && ! NONDEBUG_INSN_P (prev_insn); |
7388 | prev_insn = PREV_INSN (insn: prev_insn)) |
7389 | ; |
7390 | if (prev_insn != NULL_RTX && BLOCK_FOR_INSN (insn: prev_insn) == bb |
7391 | && (prev_set = single_set (insn: prev_insn)) != NULL_RTX |
7392 | /* There should be no subregs in insn we are |
7393 | searching because only the original reg might |
7394 | be in subreg when we changed the mode of |
7395 | load/store for splitting. */ |
7396 | && REG_P (SET_DEST (prev_set)) |
7397 | && REG_P (SET_SRC (prev_set)) |
7398 | && (int) REGNO (SET_DEST (prev_set)) == sregno |
7399 | && ((prev_sregno = REGNO (SET_SRC (prev_set))) |
7400 | >= FIRST_PSEUDO_REGISTER) |
7401 | && (lra_reg_info[prev_sregno].restore_rtx == NULL_RTX |
7402 | || |
7403 | /* As we consider chain of inheritance or |
7404 | splitting described in above comment we should |
7405 | check that sregno and prev_sregno were |
7406 | inheritance/split pseudos created from the |
7407 | same original regno. */ |
7408 | (get_regno (reg: lra_reg_info[sregno].restore_rtx) >= 0 |
7409 | && (get_regno (reg: lra_reg_info[sregno].restore_rtx) |
7410 | == get_regno (reg: lra_reg_info[prev_sregno].restore_rtx)))) |
7411 | && ! bitmap_bit_p (remove_pseudos, prev_sregno)) |
7412 | { |
7413 | int restore_regno = get_regno (reg: lra_reg_info[sregno].restore_rtx); |
7414 | if (restore_regno < 0) |
7415 | restore_regno = prev_sregno; |
7416 | lra_assert (GET_MODE (SET_SRC (prev_set)) |
7417 | == GET_MODE (regno_reg_rtx[restore_regno])); |
7418 | /* Although we have a single set, the insn can |
7419 | contain more one sregno register occurrence |
7420 | as a source. Change all occurrences. */ |
7421 | lra_substitute_pseudo_within_insn (curr_insn, sregno, |
7422 | regno_reg_rtx[restore_regno], |
7423 | false); |
7424 | /* As we are finishing with processing the insn |
7425 | here, check the destination too as it might |
7426 | inheritance pseudo for another pseudo. */ |
7427 | if (bitmap_bit_p (remove_pseudos, dregno) |
7428 | && bitmap_bit_p (&lra_inheritance_pseudos, dregno) |
7429 | && (restore_rtx |
7430 | = lra_reg_info[dregno].restore_rtx) != NULL_RTX) |
7431 | { |
7432 | if (GET_CODE (SET_DEST (set)) == SUBREG) |
7433 | SUBREG_REG (SET_DEST (set)) = restore_rtx; |
7434 | else |
7435 | SET_DEST (set) = restore_rtx; |
7436 | } |
7437 | lra_push_insn_and_update_insn_regno_info (curr_insn); |
7438 | lra_set_used_insn_alternative_by_uid |
7439 | (INSN_UID (insn: curr_insn), LRA_UNKNOWN_ALT); |
7440 | done_p = true; |
7441 | if (lra_dump_file != NULL) |
7442 | { |
7443 | fprintf (stream: lra_dump_file, format: " Change reload insn:\n" ); |
7444 | dump_insn_slim (lra_dump_file, curr_insn); |
7445 | } |
7446 | } |
7447 | } |
7448 | } |
7449 | if (! done_p) |
7450 | { |
7451 | struct lra_insn_reg *reg; |
7452 | bool restored_regs_p = false; |
7453 | bool kept_regs_p = false; |
7454 | |
7455 | curr_id = lra_get_insn_recog_data (insn: curr_insn); |
7456 | for (reg = curr_id->regs; reg != NULL; reg = reg->next) |
7457 | { |
7458 | regno = reg->regno; |
7459 | restore_rtx = lra_reg_info[regno].restore_rtx; |
7460 | if (restore_rtx != NULL_RTX) |
7461 | { |
7462 | if (change_p && bitmap_bit_p (remove_pseudos, regno)) |
7463 | { |
7464 | lra_substitute_pseudo_within_insn |
7465 | (curr_insn, regno, restore_rtx, false); |
7466 | restored_regs_p = true; |
7467 | } |
7468 | else |
7469 | kept_regs_p = true; |
7470 | } |
7471 | } |
7472 | if (NONDEBUG_INSN_P (curr_insn) && kept_regs_p) |
7473 | { |
7474 | /* The instruction has changed since the previous |
7475 | constraints pass. */ |
7476 | lra_push_insn_and_update_insn_regno_info (curr_insn); |
7477 | lra_set_used_insn_alternative_by_uid |
7478 | (INSN_UID (insn: curr_insn), LRA_UNKNOWN_ALT); |
7479 | } |
7480 | else if (restored_regs_p) |
7481 | /* The instruction has been restored to the form that |
7482 | it had during the previous constraints pass. */ |
7483 | lra_update_insn_regno_info (curr_insn); |
7484 | if (restored_regs_p && lra_dump_file != NULL) |
7485 | { |
7486 | fprintf (stream: lra_dump_file, format: " Insn after restoring regs:\n" ); |
7487 | dump_insn_slim (lra_dump_file, curr_insn); |
7488 | } |
7489 | } |
7490 | } |
7491 | } |
7492 | return change_p; |
7493 | } |
7494 | |
7495 | /* If optional reload pseudos failed to get a hard register or was not |
7496 | inherited, it is better to remove optional reloads. We do this |
7497 | transformation after undoing inheritance to figure out necessity to |
7498 | remove optional reloads easier. Return true if we do any |
7499 | change. */ |
7500 | static bool |
7501 | undo_optional_reloads (void) |
7502 | { |
7503 | bool change_p, keep_p; |
7504 | unsigned int regno, uid; |
7505 | bitmap_iterator bi, bi2; |
7506 | rtx_insn *insn; |
7507 | rtx set, src, dest; |
7508 | auto_bitmap removed_optional_reload_pseudos (®_obstack); |
7509 | |
7510 | bitmap_copy (removed_optional_reload_pseudos, &lra_optional_reload_pseudos); |
7511 | EXECUTE_IF_SET_IN_BITMAP (&lra_optional_reload_pseudos, 0, regno, bi) |
7512 | { |
7513 | keep_p = false; |
7514 | /* Keep optional reloads from previous subpasses. */ |
7515 | if (lra_reg_info[regno].restore_rtx == NULL_RTX |
7516 | /* If the original pseudo changed its allocation, just |
7517 | removing the optional pseudo is dangerous as the original |
7518 | pseudo will have longer live range. */ |
7519 | || reg_renumber[REGNO (lra_reg_info[regno].restore_rtx)] >= 0) |
7520 | keep_p = true; |
7521 | else if (reg_renumber[regno] >= 0) |
7522 | EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi2) |
7523 | { |
7524 | insn = lra_insn_recog_data[uid]->insn; |
7525 | if ((set = single_set (insn)) == NULL_RTX) |
7526 | continue; |
7527 | src = SET_SRC (set); |
7528 | dest = SET_DEST (set); |
7529 | if ((! REG_P (src) && ! SUBREG_P (src)) |
7530 | || (! REG_P (dest) && ! SUBREG_P (dest))) |
7531 | continue; |
7532 | if (get_regno (reg: dest) == (int) regno |
7533 | /* Ignore insn for optional reloads itself. */ |
7534 | && (get_regno (reg: lra_reg_info[regno].restore_rtx) |
7535 | != get_regno (reg: src)) |
7536 | /* Check only inheritance on last inheritance pass. */ |
7537 | && get_regno (reg: src) >= new_regno_start |
7538 | /* Check that the optional reload was inherited. */ |
7539 | && bitmap_bit_p (&lra_inheritance_pseudos, get_regno (reg: src))) |
7540 | { |
7541 | keep_p = true; |
7542 | break; |
7543 | } |
7544 | } |
7545 | if (keep_p) |
7546 | { |
7547 | bitmap_clear_bit (removed_optional_reload_pseudos, regno); |
7548 | if (lra_dump_file != NULL) |
7549 | fprintf (stream: lra_dump_file, format: "Keep optional reload reg %d\n" , regno); |
7550 | } |
7551 | } |
7552 | change_p = ! bitmap_empty_p (map: removed_optional_reload_pseudos); |
7553 | auto_bitmap insn_bitmap (®_obstack); |
7554 | EXECUTE_IF_SET_IN_BITMAP (removed_optional_reload_pseudos, 0, regno, bi) |
7555 | { |
7556 | if (lra_dump_file != NULL) |
7557 | fprintf (stream: lra_dump_file, format: "Remove optional reload reg %d\n" , regno); |
7558 | bitmap_copy (insn_bitmap, &lra_reg_info[regno].insn_bitmap); |
7559 | EXECUTE_IF_SET_IN_BITMAP (insn_bitmap, 0, uid, bi2) |
7560 | { |
7561 | /* We may have already removed a clobber. */ |
7562 | if (!lra_insn_recog_data[uid]) |
7563 | continue; |
7564 | insn = lra_insn_recog_data[uid]->insn; |
7565 | if ((set = single_set (insn)) != NULL_RTX) |
7566 | { |
7567 | src = SET_SRC (set); |
7568 | dest = SET_DEST (set); |
7569 | if ((REG_P (src) || SUBREG_P (src)) |
7570 | && (REG_P (dest) || SUBREG_P (dest)) |
7571 | && ((get_regno (reg: src) == (int) regno |
7572 | && (get_regno (reg: lra_reg_info[regno].restore_rtx) |
7573 | == get_regno (reg: dest))) |
7574 | || (get_regno (reg: dest) == (int) regno |
7575 | && (get_regno (reg: lra_reg_info[regno].restore_rtx) |
7576 | == get_regno (reg: src))))) |
7577 | { |
7578 | if (lra_dump_file != NULL) |
7579 | { |
7580 | fprintf (stream: lra_dump_file, format: " Deleting move %u\n" , |
7581 | INSN_UID (insn)); |
7582 | dump_insn_slim (lra_dump_file, insn); |
7583 | } |
7584 | delete_move_and_clobber (insn, dregno: get_regno (reg: dest)); |
7585 | continue; |
7586 | } |
7587 | /* We should not worry about generation memory-memory |
7588 | moves here as if the corresponding inheritance did |
7589 | not work (inheritance pseudo did not get a hard reg), |
7590 | we remove the inheritance pseudo and the optional |
7591 | reload. */ |
7592 | } |
7593 | if (GET_CODE (PATTERN (insn)) == CLOBBER |
7594 | && REG_P (SET_DEST (insn)) |
7595 | && get_regno (SET_DEST (insn)) == (int) regno) |
7596 | /* Refuse to remap clobbers to preexisting pseudos. */ |
7597 | gcc_unreachable (); |
7598 | lra_substitute_pseudo_within_insn |
7599 | (insn, regno, lra_reg_info[regno].restore_rtx, false); |
7600 | lra_update_insn_regno_info (insn); |
7601 | if (lra_dump_file != NULL) |
7602 | { |
7603 | fprintf (stream: lra_dump_file, |
7604 | format: " Restoring original insn:\n" ); |
7605 | dump_insn_slim (lra_dump_file, insn); |
7606 | } |
7607 | } |
7608 | } |
7609 | /* Clear restore_regnos. */ |
7610 | EXECUTE_IF_SET_IN_BITMAP (&lra_optional_reload_pseudos, 0, regno, bi) |
7611 | lra_reg_info[regno].restore_rtx = NULL_RTX; |
7612 | return change_p; |
7613 | } |
7614 | |
7615 | /* Entry function for undoing inheritance/split transformation. Return true |
7616 | if we did any RTL change in this pass. */ |
7617 | bool |
7618 | lra_undo_inheritance (void) |
7619 | { |
7620 | unsigned int regno; |
7621 | int hard_regno; |
7622 | int n_all_inherit, n_inherit, n_all_split, n_split; |
7623 | rtx restore_rtx; |
7624 | bitmap_iterator bi; |
7625 | bool change_p; |
7626 | |
7627 | lra_undo_inheritance_iter++; |
7628 | if (lra_undo_inheritance_iter > LRA_MAX_INHERITANCE_PASSES) |
7629 | return false; |
7630 | if (lra_dump_file != NULL) |
7631 | fprintf (stream: lra_dump_file, |
7632 | format: "\n********** Undoing inheritance #%d: **********\n\n" , |
7633 | lra_undo_inheritance_iter); |
7634 | auto_bitmap remove_pseudos (®_obstack); |
7635 | n_inherit = n_all_inherit = 0; |
7636 | EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, regno, bi) |
7637 | if (lra_reg_info[regno].restore_rtx != NULL_RTX) |
7638 | { |
7639 | n_all_inherit++; |
7640 | if (reg_renumber[regno] < 0 |
7641 | /* If the original pseudo changed its allocation, just |
7642 | removing inheritance is dangerous as for changing |
7643 | allocation we used shorter live-ranges. */ |
7644 | && (! REG_P (lra_reg_info[regno].restore_rtx) |
7645 | || reg_renumber[REGNO (lra_reg_info[regno].restore_rtx)] < 0)) |
7646 | bitmap_set_bit (remove_pseudos, regno); |
7647 | else |
7648 | n_inherit++; |
7649 | } |
7650 | if (lra_dump_file != NULL && n_all_inherit != 0) |
7651 | fprintf (stream: lra_dump_file, format: "Inherit %d out of %d (%.2f%%)\n" , |
7652 | n_inherit, n_all_inherit, |
7653 | (double) n_inherit / n_all_inherit * 100); |
7654 | n_split = n_all_split = 0; |
7655 | EXECUTE_IF_SET_IN_BITMAP (&lra_split_regs, 0, regno, bi) |
7656 | if ((restore_rtx = lra_reg_info[regno].restore_rtx) != NULL_RTX) |
7657 | { |
7658 | int restore_regno = REGNO (restore_rtx); |
7659 | |
7660 | n_all_split++; |
7661 | hard_regno = (restore_regno >= FIRST_PSEUDO_REGISTER |
7662 | ? reg_renumber[restore_regno] : restore_regno); |
7663 | if (hard_regno < 0 || reg_renumber[regno] == hard_regno) |
7664 | bitmap_set_bit (remove_pseudos, regno); |
7665 | else |
7666 | { |
7667 | n_split++; |
7668 | if (lra_dump_file != NULL) |
7669 | fprintf (stream: lra_dump_file, format: " Keep split r%d (orig=r%d)\n" , |
7670 | regno, restore_regno); |
7671 | } |
7672 | } |
7673 | if (lra_dump_file != NULL && n_all_split != 0) |
7674 | fprintf (stream: lra_dump_file, format: "Split %d out of %d (%.2f%%)\n" , |
7675 | n_split, n_all_split, |
7676 | (double) n_split / n_all_split * 100); |
7677 | change_p = remove_inheritance_pseudos (remove_pseudos); |
7678 | /* Clear restore_regnos. */ |
7679 | EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, regno, bi) |
7680 | lra_reg_info[regno].restore_rtx = NULL_RTX; |
7681 | EXECUTE_IF_SET_IN_BITMAP (&lra_split_regs, 0, regno, bi) |
7682 | lra_reg_info[regno].restore_rtx = NULL_RTX; |
7683 | change_p = undo_optional_reloads () || change_p; |
7684 | if (change_p) |
7685 | lra_dump_insns_if_possible (title: "changed func after undoing inheritance" ); |
7686 | return change_p; |
7687 | } |
7688 | |