1 | /* Compute different info about registers. |
2 | Copyright (C) 1987-2024 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | |
21 | /* This file contains regscan pass of the compiler and passes for |
22 | dealing with info about modes of pseudo-registers inside |
23 | subregisters. It also defines some tables of information about the |
24 | hardware registers, function init_reg_sets to initialize the |
25 | tables, and other auxiliary functions to deal with info about |
26 | registers and their classes. */ |
27 | |
28 | #include "config.h" |
29 | #include "system.h" |
30 | #include "coretypes.h" |
31 | #include "backend.h" |
32 | #include "target.h" |
33 | #include "rtl.h" |
34 | #include "tree.h" |
35 | #include "df.h" |
36 | #include "memmodel.h" |
37 | #include "tm_p.h" |
38 | #include "insn-config.h" |
39 | #include "regs.h" |
40 | #include "ira.h" |
41 | #include "recog.h" |
42 | #include "diagnostic-core.h" |
43 | #include "reload.h" |
44 | #include "output.h" |
45 | #include "tree-pass.h" |
46 | #include "function-abi.h" |
47 | |
48 | /* Maximum register number used in this function, plus one. */ |
49 | |
50 | int max_regno; |
51 | |
52 | /* Used to cache the results of simplifiable_subregs. SHAPE is the input |
53 | parameter and SIMPLIFIABLE_REGS is the result. */ |
54 | class simplifiable_subreg |
55 | { |
56 | public: |
57 | simplifiable_subreg (const subreg_shape &); |
58 | |
59 | subreg_shape shape; |
60 | HARD_REG_SET simplifiable_regs; |
61 | }; |
62 | |
63 | struct target_hard_regs default_target_hard_regs; |
64 | struct target_regs default_target_regs; |
65 | #if SWITCHABLE_TARGET |
66 | struct target_hard_regs *this_target_hard_regs = &default_target_hard_regs; |
67 | struct target_regs *this_target_regs = &default_target_regs; |
68 | #endif |
69 | |
70 | #define call_used_regs \ |
71 | (this_target_hard_regs->x_call_used_regs) |
72 | #define regs_invalidated_by_call \ |
73 | (this_target_hard_regs->x_regs_invalidated_by_call) |
74 | |
75 | /* Data for initializing fixed_regs. */ |
76 | static const char initial_fixed_regs[] = FIXED_REGISTERS; |
77 | |
78 | /* Data for initializing call_used_regs. */ |
79 | #ifdef CALL_REALLY_USED_REGISTERS |
80 | #ifdef CALL_USED_REGISTERS |
81 | #error CALL_USED_REGISTERS and CALL_REALLY_USED_REGISTERS are both defined |
82 | #endif |
83 | static const char initial_call_used_regs[] = CALL_REALLY_USED_REGISTERS; |
84 | #else |
85 | static const char initial_call_used_regs[] = CALL_USED_REGISTERS; |
86 | #endif |
87 | |
88 | /* Indexed by hard register number, contains 1 for registers |
89 | that are being used for global register decls. |
90 | These must be exempt from ordinary flow analysis |
91 | and are also considered fixed. */ |
92 | char global_regs[FIRST_PSEUDO_REGISTER]; |
93 | |
94 | /* The set of global registers. */ |
95 | HARD_REG_SET global_reg_set; |
96 | |
97 | /* Declaration for the global register. */ |
98 | tree global_regs_decl[FIRST_PSEUDO_REGISTER]; |
99 | |
100 | /* Used to initialize reg_alloc_order. */ |
101 | #ifdef REG_ALLOC_ORDER |
102 | static int initial_reg_alloc_order[FIRST_PSEUDO_REGISTER] = REG_ALLOC_ORDER; |
103 | #endif |
104 | |
105 | /* The same information, but as an array of unsigned ints. We copy from |
106 | these unsigned ints to the table above. We do this so the tm.h files |
107 | do not have to be aware of the wordsize for machines with <= 64 regs. |
108 | Note that we hard-code 32 here, not HOST_BITS_PER_INT. */ |
109 | #define N_REG_INTS \ |
110 | ((FIRST_PSEUDO_REGISTER + (32 - 1)) / 32) |
111 | |
112 | static const unsigned int_reg_class_contents[N_REG_CLASSES][N_REG_INTS] |
113 | = REG_CLASS_CONTENTS; |
114 | |
115 | /* Array containing all of the register names. */ |
116 | static const char *const initial_reg_names[] = REGISTER_NAMES; |
117 | |
118 | /* Array containing all of the register class names. */ |
119 | const char * reg_class_names[] = REG_CLASS_NAMES; |
120 | |
121 | /* No more global register variables may be declared; true once |
122 | reginfo has been initialized. */ |
123 | static int no_global_reg_vars = 0; |
124 | |
125 | static void |
126 | clear_global_regs_cache (void) |
127 | { |
128 | for (size_t i = 0 ; i < FIRST_PSEUDO_REGISTER ; i++) |
129 | { |
130 | global_regs[i] = 0; |
131 | global_regs_decl[i] = NULL; |
132 | } |
133 | } |
134 | |
135 | void |
136 | reginfo_cc_finalize (void) |
137 | { |
138 | clear_global_regs_cache (); |
139 | no_global_reg_vars = 0; |
140 | CLEAR_HARD_REG_SET (set&: global_reg_set); |
141 | } |
142 | |
143 | /* In insn-preds.cc. */ |
144 | extern void init_reg_class_start_regs (); |
145 | |
146 | /* Given a register bitmap, turn on the bits in a HARD_REG_SET that |
147 | correspond to the hard registers, if any, set in that map. This |
148 | could be done far more efficiently by having all sorts of special-cases |
149 | with moving single words, but probably isn't worth the trouble. */ |
150 | void |
151 | reg_set_to_hard_reg_set (HARD_REG_SET *to, const_bitmap from) |
152 | { |
153 | unsigned i; |
154 | bitmap_iterator bi; |
155 | |
156 | EXECUTE_IF_SET_IN_BITMAP (from, 0, i, bi) |
157 | { |
158 | if (i >= FIRST_PSEUDO_REGISTER) |
159 | return; |
160 | SET_HARD_REG_BIT (set&: *to, bit: i); |
161 | } |
162 | } |
163 | |
164 | /* Function called only once per target_globals to initialize the |
165 | target_hard_regs structure. Once this is done, various switches |
166 | may override. */ |
167 | void |
168 | init_reg_sets (void) |
169 | { |
170 | int i, j; |
171 | |
172 | /* First copy the register information from the initial int form into |
173 | the regsets. */ |
174 | |
175 | for (i = 0; i < N_REG_CLASSES; i++) |
176 | { |
177 | CLEAR_HARD_REG_SET (reg_class_contents[i]); |
178 | |
179 | /* Note that we hard-code 32 here, not HOST_BITS_PER_INT. */ |
180 | for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) |
181 | if (int_reg_class_contents[i][j / 32] |
182 | & ((unsigned) 1 << (j % 32))) |
183 | SET_HARD_REG_BIT (reg_class_contents[i], bit: j); |
184 | } |
185 | |
186 | /* Sanity check: make sure the target macros FIXED_REGISTERS and |
187 | CALL_USED_REGISTERS had the right number of initializers. */ |
188 | gcc_assert (sizeof fixed_regs == sizeof initial_fixed_regs); |
189 | gcc_assert (sizeof call_used_regs == sizeof initial_call_used_regs); |
190 | #ifdef REG_ALLOC_ORDER |
191 | gcc_assert (sizeof reg_alloc_order == sizeof initial_reg_alloc_order); |
192 | #endif |
193 | gcc_assert (sizeof reg_names == sizeof initial_reg_names); |
194 | |
195 | memcpy (fixed_regs, src: initial_fixed_regs, n: sizeof fixed_regs); |
196 | memcpy (call_used_regs, src: initial_call_used_regs, n: sizeof call_used_regs); |
197 | #ifdef REG_ALLOC_ORDER |
198 | memcpy (reg_alloc_order, src: initial_reg_alloc_order, n: sizeof reg_alloc_order); |
199 | #endif |
200 | memcpy (reg_names, src: initial_reg_names, n: sizeof reg_names); |
201 | |
202 | SET_HARD_REG_SET (accessible_reg_set); |
203 | SET_HARD_REG_SET (operand_reg_set); |
204 | |
205 | init_reg_class_start_regs (); |
206 | } |
207 | |
208 | /* We need to save copies of some of the register information which |
209 | can be munged by command-line switches so we can restore it during |
210 | subsequent back-end reinitialization. */ |
211 | static char saved_fixed_regs[FIRST_PSEUDO_REGISTER]; |
212 | static char saved_call_used_regs[FIRST_PSEUDO_REGISTER]; |
213 | static const char *saved_reg_names[FIRST_PSEUDO_REGISTER]; |
214 | static HARD_REG_SET saved_accessible_reg_set; |
215 | static HARD_REG_SET saved_operand_reg_set; |
216 | |
217 | /* Save the register information. */ |
218 | void |
219 | save_register_info (void) |
220 | { |
221 | /* Sanity check: make sure the target macros FIXED_REGISTERS and |
222 | CALL_USED_REGISTERS had the right number of initializers. */ |
223 | gcc_assert (sizeof fixed_regs == sizeof saved_fixed_regs); |
224 | gcc_assert (sizeof call_used_regs == sizeof saved_call_used_regs); |
225 | memcpy (dest: saved_fixed_regs, fixed_regs, n: sizeof fixed_regs); |
226 | memcpy (dest: saved_call_used_regs, call_used_regs, n: sizeof call_used_regs); |
227 | |
228 | /* And similarly for reg_names. */ |
229 | gcc_assert (sizeof reg_names == sizeof saved_reg_names); |
230 | memcpy (dest: saved_reg_names, reg_names, n: sizeof reg_names); |
231 | saved_accessible_reg_set = accessible_reg_set; |
232 | saved_operand_reg_set = operand_reg_set; |
233 | } |
234 | |
235 | /* Restore the register information. */ |
236 | static void |
237 | restore_register_info (void) |
238 | { |
239 | memcpy (fixed_regs, src: saved_fixed_regs, n: sizeof fixed_regs); |
240 | memcpy (call_used_regs, src: saved_call_used_regs, n: sizeof call_used_regs); |
241 | |
242 | memcpy (reg_names, src: saved_reg_names, n: sizeof reg_names); |
243 | accessible_reg_set = saved_accessible_reg_set; |
244 | operand_reg_set = saved_operand_reg_set; |
245 | } |
246 | |
247 | /* After switches have been processed, which perhaps alter |
248 | `fixed_regs' and `call_used_regs', convert them to HARD_REG_SETs. */ |
249 | static void |
250 | init_reg_sets_1 (void) |
251 | { |
252 | unsigned int i, j; |
253 | unsigned int /* machine_mode */ m; |
254 | |
255 | restore_register_info (); |
256 | |
257 | #ifdef REG_ALLOC_ORDER |
258 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
259 | inv_reg_alloc_order[reg_alloc_order[i]] = i; |
260 | #endif |
261 | |
262 | /* Let the target tweak things if necessary. */ |
263 | |
264 | targetm.conditional_register_usage (); |
265 | |
266 | /* Compute number of hard regs in each class. */ |
267 | |
268 | memset (reg_class_size, c: 0, n: sizeof reg_class_size); |
269 | for (i = 0; i < N_REG_CLASSES; i++) |
270 | { |
271 | bool any_nonfixed = false; |
272 | for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) |
273 | if (TEST_HARD_REG_BIT (reg_class_contents[i], bit: j)) |
274 | { |
275 | reg_class_size[i]++; |
276 | if (!fixed_regs[j]) |
277 | any_nonfixed = true; |
278 | } |
279 | class_only_fixed_regs[i] = !any_nonfixed; |
280 | } |
281 | |
282 | /* Initialize the table of subunions. |
283 | reg_class_subunion[I][J] gets the largest-numbered reg-class |
284 | that is contained in the union of classes I and J. */ |
285 | |
286 | memset (reg_class_subunion, c: 0, n: sizeof reg_class_subunion); |
287 | for (i = 0; i < N_REG_CLASSES; i++) |
288 | { |
289 | for (j = 0; j < N_REG_CLASSES; j++) |
290 | { |
291 | HARD_REG_SET c; |
292 | int k; |
293 | |
294 | c = reg_class_contents[i] | reg_class_contents[j]; |
295 | for (k = 0; k < N_REG_CLASSES; k++) |
296 | if (hard_reg_set_subset_p (reg_class_contents[k], y: c) |
297 | && !hard_reg_set_subset_p (reg_class_contents[k], |
298 | reg_class_contents |
299 | [(int) reg_class_subunion[i][j]])) |
300 | reg_class_subunion[i][j] = (enum reg_class) k; |
301 | } |
302 | } |
303 | |
304 | /* Initialize the table of superunions. |
305 | reg_class_superunion[I][J] gets the smallest-numbered reg-class |
306 | containing the union of classes I and J. */ |
307 | |
308 | memset (reg_class_superunion, c: 0, n: sizeof reg_class_superunion); |
309 | for (i = 0; i < N_REG_CLASSES; i++) |
310 | { |
311 | for (j = 0; j < N_REG_CLASSES; j++) |
312 | { |
313 | HARD_REG_SET c; |
314 | int k; |
315 | |
316 | c = reg_class_contents[i] | reg_class_contents[j]; |
317 | for (k = 0; k < N_REG_CLASSES; k++) |
318 | if (hard_reg_set_subset_p (x: c, reg_class_contents[k])) |
319 | break; |
320 | |
321 | reg_class_superunion[i][j] = (enum reg_class) k; |
322 | } |
323 | } |
324 | |
325 | /* Initialize the tables of subclasses and superclasses of each reg class. |
326 | First clear the whole table, then add the elements as they are found. */ |
327 | |
328 | for (i = 0; i < N_REG_CLASSES; i++) |
329 | { |
330 | for (j = 0; j < N_REG_CLASSES; j++) |
331 | reg_class_subclasses[i][j] = LIM_REG_CLASSES; |
332 | } |
333 | |
334 | for (i = 0; i < N_REG_CLASSES; i++) |
335 | { |
336 | if (i == (int) NO_REGS) |
337 | continue; |
338 | |
339 | for (j = i + 1; j < N_REG_CLASSES; j++) |
340 | if (hard_reg_set_subset_p (reg_class_contents[i], |
341 | reg_class_contents[j])) |
342 | { |
343 | /* Reg class I is a subclass of J. |
344 | Add J to the table of superclasses of I. */ |
345 | enum reg_class *p; |
346 | |
347 | /* Add I to the table of superclasses of J. */ |
348 | p = ®_class_subclasses[j][0]; |
349 | while (*p != LIM_REG_CLASSES) p++; |
350 | *p = (enum reg_class) i; |
351 | } |
352 | } |
353 | |
354 | /* Initialize "constant" tables. */ |
355 | |
356 | CLEAR_HARD_REG_SET (fixed_reg_set); |
357 | CLEAR_HARD_REG_SET (regs_invalidated_by_call); |
358 | |
359 | operand_reg_set &= accessible_reg_set; |
360 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
361 | { |
362 | /* As a special exception, registers whose class is NO_REGS are |
363 | not accepted by `register_operand'. The reason for this change |
364 | is to allow the representation of special architecture artifacts |
365 | (such as a condition code register) without extending the rtl |
366 | definitions. Since registers of class NO_REGS cannot be used |
367 | as registers in any case where register classes are examined, |
368 | it is better to apply this exception in a target-independent way. */ |
369 | if (REGNO_REG_CLASS (i) == NO_REGS) |
370 | CLEAR_HARD_REG_BIT (operand_reg_set, bit: i); |
371 | |
372 | /* If a register is too limited to be treated as a register operand, |
373 | then it should never be allocated to a pseudo. */ |
374 | if (!TEST_HARD_REG_BIT (operand_reg_set, bit: i)) |
375 | fixed_regs[i] = 1; |
376 | |
377 | if (fixed_regs[i]) |
378 | SET_HARD_REG_BIT (fixed_reg_set, bit: i); |
379 | |
380 | /* There are a couple of fixed registers that we know are safe to |
381 | exclude from being clobbered by calls: |
382 | |
383 | The frame pointer is always preserved across calls. The arg |
384 | pointer is if it is fixed. The stack pointer usually is, |
385 | unless TARGET_RETURN_POPS_ARGS, in which case an explicit |
386 | CLOBBER will be present. If we are generating PIC code, the |
387 | PIC offset table register is preserved across calls, though the |
388 | target can override that. */ |
389 | |
390 | if (i == STACK_POINTER_REGNUM) |
391 | ; |
392 | else if (global_regs[i]) |
393 | SET_HARD_REG_BIT (regs_invalidated_by_call, bit: i); |
394 | else if (i == FRAME_POINTER_REGNUM) |
395 | ; |
396 | else if (!HARD_FRAME_POINTER_IS_FRAME_POINTER |
397 | && i == HARD_FRAME_POINTER_REGNUM) |
398 | ; |
399 | else if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
400 | && i == ARG_POINTER_REGNUM && fixed_regs[i]) |
401 | ; |
402 | else if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED |
403 | && i == (unsigned) PIC_OFFSET_TABLE_REGNUM && fixed_regs[i]) |
404 | ; |
405 | else if (call_used_regs[i]) |
406 | SET_HARD_REG_BIT (regs_invalidated_by_call, bit: i); |
407 | } |
408 | |
409 | SET_HARD_REG_SET (savable_regs); |
410 | fixed_nonglobal_reg_set = fixed_reg_set; |
411 | |
412 | /* Preserve global registers if called more than once. */ |
413 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
414 | { |
415 | if (global_regs[i]) |
416 | { |
417 | fixed_regs[i] = call_used_regs[i] = 1; |
418 | SET_HARD_REG_BIT (fixed_reg_set, bit: i); |
419 | SET_HARD_REG_BIT (set&: global_reg_set, bit: i); |
420 | } |
421 | } |
422 | |
423 | memset (have_regs_of_mode, c: 0, n: sizeof (have_regs_of_mode)); |
424 | memset (contains_reg_of_mode, c: 0, n: sizeof (contains_reg_of_mode)); |
425 | for (m = 0; m < (unsigned int) MAX_MACHINE_MODE; m++) |
426 | { |
427 | HARD_REG_SET ok_regs, ok_regs2; |
428 | CLEAR_HARD_REG_SET (set&: ok_regs); |
429 | CLEAR_HARD_REG_SET (set&: ok_regs2); |
430 | for (j = 0; j < FIRST_PSEUDO_REGISTER; j++) |
431 | if (!TEST_HARD_REG_BIT (fixed_nonglobal_reg_set, bit: j) |
432 | && targetm.hard_regno_mode_ok (j, (machine_mode) m)) |
433 | { |
434 | SET_HARD_REG_BIT (set&: ok_regs, bit: j); |
435 | if (!fixed_regs[j]) |
436 | SET_HARD_REG_BIT (set&: ok_regs2, bit: j); |
437 | } |
438 | |
439 | for (i = 0; i < N_REG_CLASSES; i++) |
440 | if ((targetm.class_max_nregs ((reg_class_t) i, (machine_mode) m) |
441 | <= reg_class_size[i]) |
442 | && hard_reg_set_intersect_p (x: ok_regs, reg_class_contents[i])) |
443 | { |
444 | contains_reg_of_mode[i][m] = 1; |
445 | if (hard_reg_set_intersect_p (x: ok_regs2, reg_class_contents[i])) |
446 | { |
447 | have_regs_of_mode[m] = 1; |
448 | contains_allocatable_reg_of_mode[i][m] = 1; |
449 | } |
450 | } |
451 | } |
452 | |
453 | default_function_abi.initialize (0, regs_invalidated_by_call); |
454 | } |
455 | |
456 | /* Compute the table of register modes. |
457 | These values are used to record death information for individual registers |
458 | (as opposed to a multi-register mode). |
459 | This function might be invoked more than once, if the target has support |
460 | for changing register usage conventions on a per-function basis. |
461 | */ |
462 | void |
463 | init_reg_modes_target (void) |
464 | { |
465 | int i, j; |
466 | |
467 | this_target_regs->x_hard_regno_max_nregs = 1; |
468 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
469 | for (j = 0; j < MAX_MACHINE_MODE; j++) |
470 | { |
471 | unsigned char nregs = targetm.hard_regno_nregs (i, (machine_mode) j); |
472 | this_target_regs->x_hard_regno_nregs[i][j] = nregs; |
473 | if (nregs > this_target_regs->x_hard_regno_max_nregs) |
474 | this_target_regs->x_hard_regno_max_nregs = nregs; |
475 | } |
476 | |
477 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
478 | { |
479 | reg_raw_mode[i] = choose_hard_reg_mode (i, 1, NULL); |
480 | |
481 | /* If we couldn't find a valid mode, just use the previous mode |
482 | if it is suitable, otherwise fall back on word_mode. */ |
483 | if (reg_raw_mode[i] == VOIDmode) |
484 | { |
485 | if (i > 0 && hard_regno_nregs (regno: i, reg_raw_mode[i - 1]) == 1) |
486 | reg_raw_mode[i] = reg_raw_mode[i - 1]; |
487 | else |
488 | reg_raw_mode[i] = word_mode; |
489 | } |
490 | } |
491 | } |
492 | |
493 | /* Finish initializing the register sets and initialize the register modes. |
494 | This function might be invoked more than once, if the target has support |
495 | for changing register usage conventions on a per-function basis. |
496 | */ |
497 | void |
498 | init_regs (void) |
499 | { |
500 | /* This finishes what was started by init_reg_sets, but couldn't be done |
501 | until after register usage was specified. */ |
502 | init_reg_sets_1 (); |
503 | } |
504 | |
505 | /* The same as previous function plus initializing IRA. */ |
506 | void |
507 | reinit_regs (void) |
508 | { |
509 | init_regs (); |
510 | /* caller_save needs to be re-initialized. */ |
511 | caller_save_initialized_p = false; |
512 | if (this_target_rtl->target_specific_initialized) |
513 | { |
514 | ira_init (); |
515 | recog_init (); |
516 | } |
517 | } |
518 | |
519 | /* Initialize some fake stack-frame MEM references for use in |
520 | memory_move_secondary_cost. */ |
521 | void |
522 | init_fake_stack_mems (void) |
523 | { |
524 | int i; |
525 | |
526 | for (i = 0; i < MAX_MACHINE_MODE; i++) |
527 | top_of_stack[i] = gen_rtx_MEM ((machine_mode) i, stack_pointer_rtx); |
528 | } |
529 | |
530 | |
531 | /* Compute cost of moving data from a register of class FROM to one of |
532 | TO, using MODE. */ |
533 | |
534 | int |
535 | register_move_cost (machine_mode mode, reg_class_t from, reg_class_t to) |
536 | { |
537 | return targetm.register_move_cost (mode, from, to); |
538 | } |
539 | |
540 | /* Compute cost of moving registers to/from memory. */ |
541 | |
542 | int |
543 | memory_move_cost (machine_mode mode, reg_class_t rclass, bool in) |
544 | { |
545 | return targetm.memory_move_cost (mode, rclass, in); |
546 | } |
547 | |
548 | /* Compute extra cost of moving registers to/from memory due to reloads. |
549 | Only needed if secondary reloads are required for memory moves. */ |
550 | int |
551 | memory_move_secondary_cost (machine_mode mode, reg_class_t rclass, |
552 | bool in) |
553 | { |
554 | reg_class_t altclass; |
555 | int partial_cost = 0; |
556 | /* We need a memory reference to feed to SECONDARY... macros. */ |
557 | /* mem may be unused even if the SECONDARY_ macros are defined. */ |
558 | rtx mem ATTRIBUTE_UNUSED = top_of_stack[(int) mode]; |
559 | |
560 | altclass = secondary_reload_class (in ? 1 : 0, rclass, mode, mem); |
561 | |
562 | if (altclass == NO_REGS) |
563 | return 0; |
564 | |
565 | if (in) |
566 | partial_cost = register_move_cost (mode, from: altclass, to: rclass); |
567 | else |
568 | partial_cost = register_move_cost (mode, from: rclass, to: altclass); |
569 | |
570 | if (rclass == altclass) |
571 | /* This isn't simply a copy-to-temporary situation. Can't guess |
572 | what it is, so TARGET_MEMORY_MOVE_COST really ought not to be |
573 | calling here in that case. |
574 | |
575 | I'm tempted to put in an assert here, but returning this will |
576 | probably only give poor estimates, which is what we would've |
577 | had before this code anyways. */ |
578 | return partial_cost; |
579 | |
580 | /* Check if the secondary reload register will also need a |
581 | secondary reload. */ |
582 | return memory_move_secondary_cost (mode, rclass: altclass, in) + partial_cost; |
583 | } |
584 | |
585 | /* Return a machine mode that is legitimate for hard reg REGNO and large |
586 | enough to save nregs. If we can't find one, return VOIDmode. |
587 | If ABI is nonnull, only consider modes that are preserved across |
588 | calls that use ABI. */ |
589 | machine_mode |
590 | choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED, |
591 | unsigned int nregs, const predefined_function_abi *abi) |
592 | { |
593 | unsigned int /* machine_mode */ m; |
594 | machine_mode found_mode = VOIDmode, mode; |
595 | |
596 | /* We first look for the largest integer mode that can be validly |
597 | held in REGNO. If none, we look for the largest floating-point mode. |
598 | If we still didn't find a valid mode, try CCmode. |
599 | |
600 | The tests use maybe_gt rather than known_gt because we want (for example) |
601 | N V4SFs to win over plain V4SF even though N might be 1. */ |
602 | FOR_EACH_MODE_IN_CLASS (mode, MODE_INT) |
603 | if (hard_regno_nregs (regno, mode) == nregs |
604 | && targetm.hard_regno_mode_ok (regno, mode) |
605 | && (!abi || !abi->clobbers_reg_p (mode, regno)) |
606 | && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) |
607 | found_mode = mode; |
608 | |
609 | FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT) |
610 | if (hard_regno_nregs (regno, mode) == nregs |
611 | && targetm.hard_regno_mode_ok (regno, mode) |
612 | && (!abi || !abi->clobbers_reg_p (mode, regno)) |
613 | && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) |
614 | found_mode = mode; |
615 | |
616 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT) |
617 | if (hard_regno_nregs (regno, mode) == nregs |
618 | && targetm.hard_regno_mode_ok (regno, mode) |
619 | && (!abi || !abi->clobbers_reg_p (mode, regno)) |
620 | && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) |
621 | found_mode = mode; |
622 | |
623 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT) |
624 | if (hard_regno_nregs (regno, mode) == nregs |
625 | && targetm.hard_regno_mode_ok (regno, mode) |
626 | && (!abi || !abi->clobbers_reg_p (mode, regno)) |
627 | && maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode))) |
628 | found_mode = mode; |
629 | |
630 | if (found_mode != VOIDmode) |
631 | return found_mode; |
632 | |
633 | /* Iterate over all of the CCmodes. */ |
634 | for (m = (unsigned int) CCmode; m < (unsigned int) NUM_MACHINE_MODES; ++m) |
635 | { |
636 | mode = (machine_mode) m; |
637 | if (hard_regno_nregs (regno, mode) == nregs |
638 | && targetm.hard_regno_mode_ok (regno, mode) |
639 | && (!abi || !abi->clobbers_reg_p (mode, regno))) |
640 | return mode; |
641 | } |
642 | |
643 | /* We can't find a mode valid for this register. */ |
644 | return VOIDmode; |
645 | } |
646 | |
647 | /* Specify the usage characteristics of the register named NAME. |
648 | It should be a fixed register if FIXED and a |
649 | call-used register if CALL_USED. */ |
650 | void |
651 | fix_register (const char *name, int fixed, int call_used) |
652 | { |
653 | int i; |
654 | int reg, nregs; |
655 | |
656 | /* Decode the name and update the primary form of |
657 | the register info. */ |
658 | |
659 | if ((reg = decode_reg_name_and_count (name, &nregs)) >= 0) |
660 | { |
661 | gcc_assert (nregs >= 1); |
662 | for (i = reg; i < reg + nregs; i++) |
663 | { |
664 | if ((i == STACK_POINTER_REGNUM |
665 | #ifdef HARD_FRAME_POINTER_REGNUM |
666 | || i == HARD_FRAME_POINTER_REGNUM |
667 | #else |
668 | || i == FRAME_POINTER_REGNUM |
669 | #endif |
670 | ) |
671 | && (fixed == 0 || call_used == 0)) |
672 | { |
673 | switch (fixed) |
674 | { |
675 | case 0: |
676 | switch (call_used) |
677 | { |
678 | case 0: |
679 | error ("cannot use %qs as a call-saved register" , name); |
680 | break; |
681 | |
682 | case 1: |
683 | error ("cannot use %qs as a call-used register" , name); |
684 | break; |
685 | |
686 | default: |
687 | gcc_unreachable (); |
688 | } |
689 | break; |
690 | |
691 | case 1: |
692 | switch (call_used) |
693 | { |
694 | case 1: |
695 | error ("cannot use %qs as a fixed register" , name); |
696 | break; |
697 | |
698 | case 0: |
699 | default: |
700 | gcc_unreachable (); |
701 | } |
702 | break; |
703 | |
704 | default: |
705 | gcc_unreachable (); |
706 | } |
707 | } |
708 | else |
709 | { |
710 | fixed_regs[i] = fixed; |
711 | #ifdef CALL_REALLY_USED_REGISTERS |
712 | if (fixed == 0) |
713 | call_used_regs[i] = call_used; |
714 | #else |
715 | call_used_regs[i] = call_used; |
716 | #endif |
717 | } |
718 | } |
719 | } |
720 | else |
721 | { |
722 | warning (0, "unknown register name: %s" , name); |
723 | } |
724 | } |
725 | |
726 | /* Mark register number I as global. */ |
727 | void |
728 | globalize_reg (tree decl, int i) |
729 | { |
730 | location_t loc = DECL_SOURCE_LOCATION (decl); |
731 | |
732 | #ifdef STACK_REGS |
733 | if (IN_RANGE (i, FIRST_STACK_REG, LAST_STACK_REG)) |
734 | { |
735 | error ("stack register used for global register variable" ); |
736 | return; |
737 | } |
738 | #endif |
739 | |
740 | if (fixed_regs[i] == 0 && no_global_reg_vars) |
741 | error_at (loc, "global register variable follows a function definition" ); |
742 | |
743 | if (global_regs[i]) |
744 | { |
745 | auto_diagnostic_group d; |
746 | warning_at (loc, 0, |
747 | "register of %qD used for multiple global register variables" , |
748 | decl); |
749 | inform (DECL_SOURCE_LOCATION (global_regs_decl[i]), |
750 | "conflicts with %qD" , global_regs_decl[i]); |
751 | return; |
752 | } |
753 | |
754 | if (call_used_regs[i] && ! fixed_regs[i]) |
755 | warning_at (loc, 0, "call-clobbered register used for global register variable" ); |
756 | |
757 | global_regs[i] = 1; |
758 | global_regs_decl[i] = decl; |
759 | SET_HARD_REG_BIT (set&: global_reg_set, bit: i); |
760 | |
761 | /* If we're globalizing the frame pointer, we need to set the |
762 | appropriate regs_invalidated_by_call bit, even if it's already |
763 | set in fixed_regs. */ |
764 | if (i != STACK_POINTER_REGNUM) |
765 | { |
766 | SET_HARD_REG_BIT (regs_invalidated_by_call, bit: i); |
767 | for (unsigned int j = 0; j < NUM_ABI_IDS; ++j) |
768 | function_abis[j].add_full_reg_clobber (i); |
769 | } |
770 | |
771 | /* If already fixed, nothing else to do. */ |
772 | if (fixed_regs[i]) |
773 | return; |
774 | |
775 | fixed_regs[i] = call_used_regs[i] = 1; |
776 | |
777 | SET_HARD_REG_BIT (fixed_reg_set, bit: i); |
778 | |
779 | reinit_regs (); |
780 | } |
781 | |
782 | |
783 | /* Structure used to record preferences of given pseudo. */ |
784 | struct reg_pref |
785 | { |
786 | /* (enum reg_class) prefclass is the preferred class. May be |
787 | NO_REGS if no class is better than memory. */ |
788 | char prefclass; |
789 | |
790 | /* altclass is a register class that we should use for allocating |
791 | pseudo if no register in the preferred class is available. |
792 | If no register in this class is available, memory is preferred. |
793 | |
794 | It might appear to be more general to have a bitmask of classes here, |
795 | but since it is recommended that there be a class corresponding to the |
796 | union of most major pair of classes, that generality is not required. */ |
797 | char altclass; |
798 | |
799 | /* allocnoclass is a register class that IRA uses for allocating |
800 | the pseudo. */ |
801 | char allocnoclass; |
802 | }; |
803 | |
804 | /* Record preferences of each pseudo. This is available after RA is |
805 | run. */ |
806 | static struct reg_pref *reg_pref; |
807 | |
808 | /* Current size of reg_info. */ |
809 | static int reg_info_size; |
810 | /* Max_reg_num still last resize_reg_info call. */ |
811 | static int max_regno_since_last_resize; |
812 | |
813 | /* Return the reg_class in which pseudo reg number REGNO is best allocated. |
814 | This function is sometimes called before the info has been computed. |
815 | When that happens, just return GENERAL_REGS, which is innocuous. */ |
816 | enum reg_class |
817 | reg_preferred_class (int regno) |
818 | { |
819 | if (reg_pref == 0) |
820 | return GENERAL_REGS; |
821 | |
822 | gcc_assert (regno < reg_info_size); |
823 | return (enum reg_class) reg_pref[regno].prefclass; |
824 | } |
825 | |
826 | enum reg_class |
827 | reg_alternate_class (int regno) |
828 | { |
829 | if (reg_pref == 0) |
830 | return ALL_REGS; |
831 | |
832 | gcc_assert (regno < reg_info_size); |
833 | return (enum reg_class) reg_pref[regno].altclass; |
834 | } |
835 | |
836 | /* Return the reg_class which is used by IRA for its allocation. */ |
837 | enum reg_class |
838 | reg_allocno_class (int regno) |
839 | { |
840 | if (reg_pref == 0) |
841 | return NO_REGS; |
842 | |
843 | gcc_assert (regno < reg_info_size); |
844 | return (enum reg_class) reg_pref[regno].allocnoclass; |
845 | } |
846 | |
847 | |
848 | |
849 | /* Allocate space for reg info and initilize it. */ |
850 | static void |
851 | allocate_reg_info (void) |
852 | { |
853 | int i; |
854 | |
855 | max_regno_since_last_resize = max_reg_num (); |
856 | reg_info_size = max_regno_since_last_resize * 3 / 2 + 1; |
857 | gcc_assert (! reg_pref && ! reg_renumber); |
858 | reg_renumber = XNEWVEC (short, reg_info_size); |
859 | reg_pref = XCNEWVEC (struct reg_pref, reg_info_size); |
860 | memset (s: reg_renumber, c: -1, n: reg_info_size * sizeof (short)); |
861 | for (i = 0; i < reg_info_size; i++) |
862 | { |
863 | reg_pref[i].prefclass = GENERAL_REGS; |
864 | reg_pref[i].altclass = ALL_REGS; |
865 | reg_pref[i].allocnoclass = GENERAL_REGS; |
866 | } |
867 | } |
868 | |
869 | |
870 | /* Resize reg info. The new elements will be initialized. Return TRUE |
871 | if new pseudos were added since the last call. */ |
872 | bool |
873 | resize_reg_info (void) |
874 | { |
875 | int old, i; |
876 | bool change_p; |
877 | |
878 | if (reg_pref == NULL) |
879 | { |
880 | allocate_reg_info (); |
881 | return true; |
882 | } |
883 | change_p = max_regno_since_last_resize != max_reg_num (); |
884 | max_regno_since_last_resize = max_reg_num (); |
885 | if (reg_info_size >= max_reg_num ()) |
886 | return change_p; |
887 | old = reg_info_size; |
888 | reg_info_size = max_reg_num () * 3 / 2 + 1; |
889 | gcc_assert (reg_pref && reg_renumber); |
890 | reg_renumber = XRESIZEVEC (short, reg_renumber, reg_info_size); |
891 | reg_pref = XRESIZEVEC (struct reg_pref, reg_pref, reg_info_size); |
892 | memset (s: reg_pref + old, c: -1, |
893 | n: (reg_info_size - old) * sizeof (struct reg_pref)); |
894 | memset (s: reg_renumber + old, c: -1, n: (reg_info_size - old) * sizeof (short)); |
895 | for (i = old; i < reg_info_size; i++) |
896 | { |
897 | reg_pref[i].prefclass = GENERAL_REGS; |
898 | reg_pref[i].altclass = ALL_REGS; |
899 | reg_pref[i].allocnoclass = GENERAL_REGS; |
900 | } |
901 | return true; |
902 | } |
903 | |
904 | |
905 | /* Free up the space allocated by allocate_reg_info. */ |
906 | void |
907 | free_reg_info (void) |
908 | { |
909 | if (reg_pref) |
910 | { |
911 | free (ptr: reg_pref); |
912 | reg_pref = NULL; |
913 | } |
914 | |
915 | if (reg_renumber) |
916 | { |
917 | free (ptr: reg_renumber); |
918 | reg_renumber = NULL; |
919 | } |
920 | } |
921 | |
922 | /* Initialize some global data for this pass. */ |
923 | static unsigned int |
924 | reginfo_init (void) |
925 | { |
926 | if (df) |
927 | df_compute_regs_ever_live (true); |
928 | |
929 | /* This prevents dump_reg_info from losing if called |
930 | before reginfo is run. */ |
931 | reg_pref = NULL; |
932 | reg_info_size = max_regno_since_last_resize = 0; |
933 | /* No more global register variables may be declared. */ |
934 | no_global_reg_vars = 1; |
935 | return 1; |
936 | } |
937 | |
938 | namespace { |
939 | |
940 | const pass_data pass_data_reginfo_init = |
941 | { |
942 | .type: RTL_PASS, /* type */ |
943 | .name: "reginfo" , /* name */ |
944 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
945 | .tv_id: TV_NONE, /* tv_id */ |
946 | .properties_required: 0, /* properties_required */ |
947 | .properties_provided: 0, /* properties_provided */ |
948 | .properties_destroyed: 0, /* properties_destroyed */ |
949 | .todo_flags_start: 0, /* todo_flags_start */ |
950 | .todo_flags_finish: 0, /* todo_flags_finish */ |
951 | }; |
952 | |
953 | class pass_reginfo_init : public rtl_opt_pass |
954 | { |
955 | public: |
956 | pass_reginfo_init (gcc::context *ctxt) |
957 | : rtl_opt_pass (pass_data_reginfo_init, ctxt) |
958 | {} |
959 | |
960 | /* opt_pass methods: */ |
961 | unsigned int execute (function *) final override { return reginfo_init (); } |
962 | |
963 | }; // class pass_reginfo_init |
964 | |
965 | } // anon namespace |
966 | |
967 | rtl_opt_pass * |
968 | make_pass_reginfo_init (gcc::context *ctxt) |
969 | { |
970 | return new pass_reginfo_init (ctxt); |
971 | } |
972 | |
973 | |
974 | |
975 | /* Set up preferred, alternate, and allocno classes for REGNO as |
976 | PREFCLASS, ALTCLASS, and ALLOCNOCLASS. */ |
977 | void |
978 | setup_reg_classes (int regno, |
979 | enum reg_class prefclass, enum reg_class altclass, |
980 | enum reg_class allocnoclass) |
981 | { |
982 | if (reg_pref == NULL) |
983 | return; |
984 | gcc_assert (reg_info_size >= max_reg_num ()); |
985 | reg_pref[regno].prefclass = prefclass; |
986 | reg_pref[regno].altclass = altclass; |
987 | reg_pref[regno].allocnoclass = allocnoclass; |
988 | } |
989 | |
990 | |
991 | /* This is the `regscan' pass of the compiler, run just before cse and |
992 | again just before loop. It finds the first and last use of each |
993 | pseudo-register. */ |
994 | |
995 | static void reg_scan_mark_refs (rtx, rtx_insn *); |
996 | |
997 | void |
998 | reg_scan (rtx_insn *f, unsigned int nregs ATTRIBUTE_UNUSED) |
999 | { |
1000 | rtx_insn *insn; |
1001 | |
1002 | timevar_push (tv: TV_REG_SCAN); |
1003 | |
1004 | for (insn = f; insn; insn = NEXT_INSN (insn)) |
1005 | if (INSN_P (insn)) |
1006 | { |
1007 | reg_scan_mark_refs (PATTERN (insn), insn); |
1008 | if (REG_NOTES (insn)) |
1009 | reg_scan_mark_refs (REG_NOTES (insn), insn); |
1010 | } |
1011 | |
1012 | timevar_pop (tv: TV_REG_SCAN); |
1013 | } |
1014 | |
1015 | |
1016 | /* X is the expression to scan. INSN is the insn it appears in. |
1017 | NOTE_FLAG is nonzero if X is from INSN's notes rather than its body. |
1018 | We should only record information for REGs with numbers |
1019 | greater than or equal to MIN_REGNO. */ |
1020 | static void |
1021 | reg_scan_mark_refs (rtx x, rtx_insn *insn) |
1022 | { |
1023 | enum rtx_code code; |
1024 | rtx dest; |
1025 | rtx note; |
1026 | |
1027 | if (!x) |
1028 | return; |
1029 | code = GET_CODE (x); |
1030 | switch (code) |
1031 | { |
1032 | case CONST: |
1033 | CASE_CONST_ANY: |
1034 | case PC: |
1035 | case SYMBOL_REF: |
1036 | case LABEL_REF: |
1037 | case ADDR_VEC: |
1038 | case ADDR_DIFF_VEC: |
1039 | case REG: |
1040 | return; |
1041 | |
1042 | case EXPR_LIST: |
1043 | if (XEXP (x, 0)) |
1044 | reg_scan_mark_refs (XEXP (x, 0), insn); |
1045 | if (XEXP (x, 1)) |
1046 | reg_scan_mark_refs (XEXP (x, 1), insn); |
1047 | break; |
1048 | |
1049 | case INSN_LIST: |
1050 | case INT_LIST: |
1051 | if (XEXP (x, 1)) |
1052 | reg_scan_mark_refs (XEXP (x, 1), insn); |
1053 | break; |
1054 | |
1055 | case CLOBBER: |
1056 | if (MEM_P (XEXP (x, 0))) |
1057 | reg_scan_mark_refs (XEXP (XEXP (x, 0), 0), insn); |
1058 | break; |
1059 | |
1060 | case SET: |
1061 | /* Count a set of the destination if it is a register. */ |
1062 | for (dest = SET_DEST (x); |
1063 | GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART |
1064 | || GET_CODE (dest) == ZERO_EXTRACT; |
1065 | dest = XEXP (dest, 0)) |
1066 | ; |
1067 | |
1068 | /* If this is setting a pseudo from another pseudo or the sum of a |
1069 | pseudo and a constant integer and the other pseudo is known to be |
1070 | a pointer, set the destination to be a pointer as well. |
1071 | |
1072 | Likewise if it is setting the destination from an address or from a |
1073 | value equivalent to an address or to the sum of an address and |
1074 | something else. |
1075 | |
1076 | But don't do any of this if the pseudo corresponds to a user |
1077 | variable since it should have already been set as a pointer based |
1078 | on the type. */ |
1079 | |
1080 | if (REG_P (SET_DEST (x)) |
1081 | && REGNO (SET_DEST (x)) >= FIRST_PSEUDO_REGISTER |
1082 | /* If the destination pseudo is set more than once, then other |
1083 | sets might not be to a pointer value (consider access to a |
1084 | union in two threads of control in the presence of global |
1085 | optimizations). So only set REG_POINTER on the destination |
1086 | pseudo if this is the only set of that pseudo. */ |
1087 | && DF_REG_DEF_COUNT (REGNO (SET_DEST (x))) == 1 |
1088 | && ! REG_USERVAR_P (SET_DEST (x)) |
1089 | && ! REG_POINTER (SET_DEST (x)) |
1090 | && ((REG_P (SET_SRC (x)) |
1091 | && REG_POINTER (SET_SRC (x))) |
1092 | || ((GET_CODE (SET_SRC (x)) == PLUS |
1093 | || GET_CODE (SET_SRC (x)) == LO_SUM) |
1094 | && CONST_INT_P (XEXP (SET_SRC (x), 1)) |
1095 | && REG_P (XEXP (SET_SRC (x), 0)) |
1096 | && REG_POINTER (XEXP (SET_SRC (x), 0))) |
1097 | || GET_CODE (SET_SRC (x)) == CONST |
1098 | || GET_CODE (SET_SRC (x)) == SYMBOL_REF |
1099 | || GET_CODE (SET_SRC (x)) == LABEL_REF |
1100 | || (GET_CODE (SET_SRC (x)) == HIGH |
1101 | && (GET_CODE (XEXP (SET_SRC (x), 0)) == CONST |
1102 | || GET_CODE (XEXP (SET_SRC (x), 0)) == SYMBOL_REF |
1103 | || GET_CODE (XEXP (SET_SRC (x), 0)) == LABEL_REF)) |
1104 | || ((GET_CODE (SET_SRC (x)) == PLUS |
1105 | || GET_CODE (SET_SRC (x)) == LO_SUM) |
1106 | && (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST |
1107 | || GET_CODE (XEXP (SET_SRC (x), 1)) == SYMBOL_REF |
1108 | || GET_CODE (XEXP (SET_SRC (x), 1)) == LABEL_REF)) |
1109 | || ((note = find_reg_note (insn, REG_EQUAL, 0)) != 0 |
1110 | && (GET_CODE (XEXP (note, 0)) == CONST |
1111 | || GET_CODE (XEXP (note, 0)) == SYMBOL_REF |
1112 | || GET_CODE (XEXP (note, 0)) == LABEL_REF)))) |
1113 | REG_POINTER (SET_DEST (x)) = 1; |
1114 | |
1115 | /* If this is setting a register from a register or from a simple |
1116 | conversion of a register, propagate REG_EXPR. */ |
1117 | if (REG_P (dest) && !REG_ATTRS (dest)) |
1118 | set_reg_attrs_from_value (dest, SET_SRC (x)); |
1119 | |
1120 | /* fall through */ |
1121 | |
1122 | default: |
1123 | { |
1124 | const char *fmt = GET_RTX_FORMAT (code); |
1125 | int i; |
1126 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
1127 | { |
1128 | if (fmt[i] == 'e') |
1129 | reg_scan_mark_refs (XEXP (x, i), insn); |
1130 | else if (fmt[i] == 'E' && XVEC (x, i) != 0) |
1131 | { |
1132 | int j; |
1133 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
1134 | reg_scan_mark_refs (XVECEXP (x, i, j), insn); |
1135 | } |
1136 | } |
1137 | } |
1138 | } |
1139 | } |
1140 | |
1141 | |
1142 | /* Return true if C1 is a subset of C2, i.e., if every register in C1 |
1143 | is also in C2. */ |
1144 | bool |
1145 | reg_class_subset_p (reg_class_t c1, reg_class_t c2) |
1146 | { |
1147 | return (c1 == c2 |
1148 | || c2 == ALL_REGS |
1149 | || hard_reg_set_subset_p (reg_class_contents[(int) c1], |
1150 | reg_class_contents[(int) c2])); |
1151 | } |
1152 | |
1153 | /* Return true if there is a register that is in both C1 and C2. */ |
1154 | bool |
1155 | reg_classes_intersect_p (reg_class_t c1, reg_class_t c2) |
1156 | { |
1157 | return (c1 == c2 |
1158 | || c1 == ALL_REGS |
1159 | || c2 == ALL_REGS |
1160 | || hard_reg_set_intersect_p (reg_class_contents[(int) c1], |
1161 | reg_class_contents[(int) c2])); |
1162 | } |
1163 | |
1164 | |
1165 | inline hashval_t |
1166 | simplifiable_subregs_hasher::hash (const simplifiable_subreg *value) |
1167 | { |
1168 | inchash::hash h; |
1169 | h.add_hwi (v: value->shape.unique_id ()); |
1170 | return h.end (); |
1171 | } |
1172 | |
1173 | inline bool |
1174 | simplifiable_subregs_hasher::equal (const simplifiable_subreg *value, |
1175 | const subreg_shape *compare) |
1176 | { |
1177 | return value->shape == *compare; |
1178 | } |
1179 | |
1180 | inline simplifiable_subreg::simplifiable_subreg (const subreg_shape &shape_in) |
1181 | : shape (shape_in) |
1182 | { |
1183 | CLEAR_HARD_REG_SET (set&: simplifiable_regs); |
1184 | } |
1185 | |
1186 | /* Return the set of hard registers that are able to form the subreg |
1187 | described by SHAPE. */ |
1188 | |
1189 | const HARD_REG_SET & |
1190 | simplifiable_subregs (const subreg_shape &shape) |
1191 | { |
1192 | if (!this_target_hard_regs->x_simplifiable_subregs) |
1193 | this_target_hard_regs->x_simplifiable_subregs |
1194 | = new hash_table <simplifiable_subregs_hasher> (30); |
1195 | inchash::hash h; |
1196 | h.add_hwi (v: shape.unique_id ()); |
1197 | simplifiable_subreg **slot |
1198 | = (this_target_hard_regs->x_simplifiable_subregs |
1199 | ->find_slot_with_hash (comparable: &shape, hash: h.end (), insert: INSERT)); |
1200 | |
1201 | if (!*slot) |
1202 | { |
1203 | simplifiable_subreg *info = new simplifiable_subreg (shape); |
1204 | for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; ++i) |
1205 | if (targetm.hard_regno_mode_ok (i, shape.inner_mode) |
1206 | && simplify_subreg_regno (i, shape.inner_mode, shape.offset, |
1207 | shape.outer_mode) >= 0) |
1208 | SET_HARD_REG_BIT (set&: info->simplifiable_regs, bit: i); |
1209 | *slot = info; |
1210 | } |
1211 | return (*slot)->simplifiable_regs; |
1212 | } |
1213 | |
1214 | /* Passes for keeping and updating info about modes of registers |
1215 | inside subregisters. */ |
1216 | |
1217 | static HARD_REG_SET **valid_mode_changes; |
1218 | static obstack valid_mode_changes_obstack; |
1219 | |
1220 | /* Restrict the choice of register for SUBREG_REG (SUBREG) based |
1221 | on information about SUBREG. |
1222 | |
1223 | If PARTIAL_DEF, SUBREG is a partial definition of a multipart inner |
1224 | register and we want to ensure that the other parts of the inner |
1225 | register are correctly preserved. If !PARTIAL_DEF we need to |
1226 | ensure that SUBREG itself can be formed. */ |
1227 | |
1228 | static void |
1229 | record_subregs_of_mode (rtx subreg, bool partial_def) |
1230 | { |
1231 | unsigned int regno; |
1232 | |
1233 | if (!REG_P (SUBREG_REG (subreg))) |
1234 | return; |
1235 | |
1236 | regno = REGNO (SUBREG_REG (subreg)); |
1237 | if (regno < FIRST_PSEUDO_REGISTER) |
1238 | return; |
1239 | |
1240 | subreg_shape shape (shape_of_subreg (x: subreg)); |
1241 | if (partial_def) |
1242 | { |
1243 | /* The number of independently-accessible SHAPE.outer_mode values |
1244 | in SHAPE.inner_mode is GET_MODE_SIZE (SHAPE.inner_mode) / SIZE. |
1245 | We need to check that the assignment will preserve all the other |
1246 | SIZE-byte chunks in the inner register besides the one that |
1247 | includes SUBREG. |
1248 | |
1249 | In practice it is enough to check whether an equivalent |
1250 | SHAPE.inner_mode value in an adjacent SIZE-byte chunk can be formed. |
1251 | If the underlying registers are small enough, both subregs will |
1252 | be valid. If the underlying registers are too large, one of the |
1253 | subregs will be invalid. |
1254 | |
1255 | This relies on the fact that we've already been passed |
1256 | SUBREG with PARTIAL_DEF set to false. |
1257 | |
1258 | The size of the outer mode must ordered wrt the size of the |
1259 | inner mode's registers, since otherwise we wouldn't know at |
1260 | compile time how many registers the outer mode occupies. */ |
1261 | poly_uint64 size = ordered_max (REGMODE_NATURAL_SIZE (shape.inner_mode), |
1262 | b: GET_MODE_SIZE (mode: shape.outer_mode)); |
1263 | gcc_checking_assert (known_lt (size, GET_MODE_SIZE (shape.inner_mode))); |
1264 | if (known_ge (shape.offset, size)) |
1265 | shape.offset -= size; |
1266 | else |
1267 | shape.offset += size; |
1268 | } |
1269 | |
1270 | if (valid_mode_changes[regno]) |
1271 | *valid_mode_changes[regno] &= simplifiable_subregs (shape); |
1272 | else |
1273 | { |
1274 | valid_mode_changes[regno] |
1275 | = XOBNEW (&valid_mode_changes_obstack, HARD_REG_SET); |
1276 | *valid_mode_changes[regno] = simplifiable_subregs (shape); |
1277 | } |
1278 | } |
1279 | |
1280 | /* Call record_subregs_of_mode for all the subregs in X. */ |
1281 | static void |
1282 | find_subregs_of_mode (rtx x) |
1283 | { |
1284 | enum rtx_code code = GET_CODE (x); |
1285 | const char * const fmt = GET_RTX_FORMAT (code); |
1286 | int i; |
1287 | |
1288 | if (code == SUBREG) |
1289 | record_subregs_of_mode (subreg: x, partial_def: false); |
1290 | |
1291 | /* Time for some deep diving. */ |
1292 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
1293 | { |
1294 | if (fmt[i] == 'e') |
1295 | find_subregs_of_mode (XEXP (x, i)); |
1296 | else if (fmt[i] == 'E') |
1297 | { |
1298 | int j; |
1299 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
1300 | find_subregs_of_mode (XVECEXP (x, i, j)); |
1301 | } |
1302 | } |
1303 | } |
1304 | |
1305 | void |
1306 | init_subregs_of_mode (void) |
1307 | { |
1308 | basic_block bb; |
1309 | rtx_insn *insn; |
1310 | |
1311 | gcc_obstack_init (&valid_mode_changes_obstack); |
1312 | valid_mode_changes = XCNEWVEC (HARD_REG_SET *, max_reg_num ()); |
1313 | |
1314 | FOR_EACH_BB_FN (bb, cfun) |
1315 | FOR_BB_INSNS (bb, insn) |
1316 | if (NONDEBUG_INSN_P (insn)) |
1317 | { |
1318 | find_subregs_of_mode (x: PATTERN (insn)); |
1319 | df_ref def; |
1320 | FOR_EACH_INSN_DEF (def, insn) |
1321 | if (DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL) |
1322 | && read_modify_subreg_p (DF_REF_REG (def))) |
1323 | record_subregs_of_mode (DF_REF_REG (def), partial_def: true); |
1324 | } |
1325 | } |
1326 | |
1327 | const HARD_REG_SET * |
1328 | valid_mode_changes_for_regno (unsigned int regno) |
1329 | { |
1330 | return valid_mode_changes[regno]; |
1331 | } |
1332 | |
1333 | void |
1334 | finish_subregs_of_mode (void) |
1335 | { |
1336 | XDELETEVEC (valid_mode_changes); |
1337 | obstack_free (&valid_mode_changes_obstack, NULL); |
1338 | } |
1339 | |
1340 | /* Free all data attached to the structure. This isn't a destructor because |
1341 | we don't want to run on exit. */ |
1342 | |
1343 | void |
1344 | target_hard_regs::finalize () |
1345 | { |
1346 | delete x_simplifiable_subregs; |
1347 | } |
1348 | |