1 | /* Emit RTL for the GCC expander. |
2 | Copyright (C) 1987-2024 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | |
21 | /* Middle-to-low level generation of rtx code and insns. |
22 | |
23 | This file contains support functions for creating rtl expressions |
24 | and manipulating them in the doubly-linked chain of insns. |
25 | |
26 | The patterns of the insns are created by machine-dependent |
27 | routines in insn-emit.cc, which is generated automatically from |
28 | the machine description. These routines make the individual rtx's |
29 | of the pattern with `gen_rtx_fmt_ee' and others in genrtl.[ch], |
30 | which are automatically generated from rtl.def; what is machine |
31 | dependent is the kind of rtx's they make and what arguments they |
32 | use. */ |
33 | |
34 | #include "config.h" |
35 | #include "system.h" |
36 | #include "coretypes.h" |
37 | #include "memmodel.h" |
38 | #include "backend.h" |
39 | #include "target.h" |
40 | #include "rtl.h" |
41 | #include "tree.h" |
42 | #include "df.h" |
43 | #include "tm_p.h" |
44 | #include "stringpool.h" |
45 | #include "insn-config.h" |
46 | #include "regs.h" |
47 | #include "emit-rtl.h" |
48 | #include "recog.h" |
49 | #include "diagnostic-core.h" |
50 | #include "alias.h" |
51 | #include "fold-const.h" |
52 | #include "varasm.h" |
53 | #include "cfgrtl.h" |
54 | #include "tree-eh.h" |
55 | #include "explow.h" |
56 | #include "expr.h" |
57 | #include "builtins.h" |
58 | #include "rtl-iter.h" |
59 | #include "stor-layout.h" |
60 | #include "opts.h" |
61 | #include "optabs.h" |
62 | #include "predict.h" |
63 | #include "rtx-vector-builder.h" |
64 | #include "gimple.h" |
65 | #include "gimple-ssa.h" |
66 | #include "gimplify.h" |
67 | |
68 | struct target_rtl default_target_rtl; |
69 | #if SWITCHABLE_TARGET |
70 | struct target_rtl *this_target_rtl = &default_target_rtl; |
71 | #endif |
72 | |
73 | #define initial_regno_reg_rtx (this_target_rtl->x_initial_regno_reg_rtx) |
74 | |
75 | /* Commonly used modes. */ |
76 | |
77 | scalar_int_mode byte_mode; /* Mode whose width is BITS_PER_UNIT. */ |
78 | scalar_int_mode word_mode; /* Mode whose width is BITS_PER_WORD. */ |
79 | scalar_int_mode ptr_mode; /* Mode whose width is POINTER_SIZE. */ |
80 | |
81 | /* Datastructures maintained for currently processed function in RTL form. */ |
82 | |
83 | struct rtl_data x_rtl; |
84 | |
85 | /* Indexed by pseudo register number, gives the rtx for that pseudo. |
86 | Allocated in parallel with regno_pointer_align. |
87 | FIXME: We could put it into emit_status struct, but gengtype is not able to deal |
88 | with length attribute nested in top level structures. */ |
89 | |
90 | rtx * regno_reg_rtx; |
91 | |
92 | /* This is *not* reset after each function. It gives each CODE_LABEL |
93 | in the entire compilation a unique label number. */ |
94 | |
95 | static GTY(()) int label_num = 1; |
96 | |
97 | /* We record floating-point CONST_DOUBLEs in each floating-point mode for |
98 | the values of 0, 1, and 2. For the integer entries and VOIDmode, we |
99 | record a copy of const[012]_rtx and constm1_rtx. CONSTM1_RTX |
100 | is set only for MODE_INT and MODE_VECTOR_INT modes. */ |
101 | |
102 | rtx const_tiny_rtx[4][(int) MAX_MACHINE_MODE]; |
103 | |
104 | rtx const_true_rtx; |
105 | |
106 | REAL_VALUE_TYPE dconst0; |
107 | REAL_VALUE_TYPE dconst1; |
108 | REAL_VALUE_TYPE dconst2; |
109 | REAL_VALUE_TYPE dconstm0; |
110 | REAL_VALUE_TYPE dconstm1; |
111 | REAL_VALUE_TYPE dconsthalf; |
112 | REAL_VALUE_TYPE dconstinf; |
113 | REAL_VALUE_TYPE dconstninf; |
114 | |
115 | /* Record fixed-point constant 0 and 1. */ |
116 | FIXED_VALUE_TYPE fconst0[MAX_FCONST0]; |
117 | FIXED_VALUE_TYPE fconst1[MAX_FCONST1]; |
118 | |
119 | /* We make one copy of (const_int C) where C is in |
120 | [- MAX_SAVED_CONST_INT, MAX_SAVED_CONST_INT] |
121 | to save space during the compilation and simplify comparisons of |
122 | integers. */ |
123 | |
124 | rtx const_int_rtx[MAX_SAVED_CONST_INT * 2 + 1]; |
125 | |
126 | /* Standard pieces of rtx, to be substituted directly into things. */ |
127 | rtx pc_rtx; |
128 | rtx ret_rtx; |
129 | rtx simple_return_rtx; |
130 | |
131 | /* Marker used for denoting an INSN, which should never be accessed (i.e., |
132 | this pointer should normally never be dereferenced), but is required to be |
133 | distinct from NULL_RTX. Currently used by peephole2 pass. */ |
134 | rtx_insn *invalid_insn_rtx; |
135 | |
136 | /* A hash table storing CONST_INTs whose absolute value is greater |
137 | than MAX_SAVED_CONST_INT. */ |
138 | |
139 | struct const_int_hasher : ggc_cache_ptr_hash<rtx_def> |
140 | { |
141 | typedef HOST_WIDE_INT compare_type; |
142 | |
143 | static hashval_t hash (rtx i); |
144 | static bool equal (rtx i, HOST_WIDE_INT h); |
145 | }; |
146 | |
147 | static GTY ((cache)) hash_table<const_int_hasher> *const_int_htab; |
148 | |
149 | struct const_wide_int_hasher : ggc_cache_ptr_hash<rtx_def> |
150 | { |
151 | static hashval_t hash (rtx x); |
152 | static bool equal (rtx x, rtx y); |
153 | }; |
154 | |
155 | static GTY ((cache)) hash_table<const_wide_int_hasher> *const_wide_int_htab; |
156 | |
157 | struct const_poly_int_hasher : ggc_cache_ptr_hash<rtx_def> |
158 | { |
159 | typedef std::pair<machine_mode, poly_wide_int_ref> compare_type; |
160 | |
161 | static hashval_t hash (rtx x); |
162 | static bool equal (rtx x, const compare_type &y); |
163 | }; |
164 | |
165 | static GTY ((cache)) hash_table<const_poly_int_hasher> *const_poly_int_htab; |
166 | |
167 | /* A hash table storing register attribute structures. */ |
168 | struct reg_attr_hasher : ggc_cache_ptr_hash<reg_attrs> |
169 | { |
170 | static hashval_t hash (reg_attrs *x); |
171 | static bool equal (reg_attrs *a, reg_attrs *b); |
172 | }; |
173 | |
174 | static GTY ((cache)) hash_table<reg_attr_hasher> *reg_attrs_htab; |
175 | |
176 | /* A hash table storing all CONST_DOUBLEs. */ |
177 | struct const_double_hasher : ggc_cache_ptr_hash<rtx_def> |
178 | { |
179 | static hashval_t hash (rtx x); |
180 | static bool equal (rtx x, rtx y); |
181 | }; |
182 | |
183 | static GTY ((cache)) hash_table<const_double_hasher> *const_double_htab; |
184 | |
185 | /* A hash table storing all CONST_FIXEDs. */ |
186 | struct const_fixed_hasher : ggc_cache_ptr_hash<rtx_def> |
187 | { |
188 | static hashval_t hash (rtx x); |
189 | static bool equal (rtx x, rtx y); |
190 | }; |
191 | |
192 | static GTY ((cache)) hash_table<const_fixed_hasher> *const_fixed_htab; |
193 | |
194 | #define cur_insn_uid (crtl->emit.x_cur_insn_uid) |
195 | #define cur_debug_insn_uid (crtl->emit.x_cur_debug_insn_uid) |
196 | #define first_label_num (crtl->emit.x_first_label_num) |
197 | |
198 | static void set_used_decls (tree); |
199 | static void mark_label_nuses (rtx); |
200 | #if TARGET_SUPPORTS_WIDE_INT |
201 | static rtx lookup_const_wide_int (rtx); |
202 | #endif |
203 | static rtx lookup_const_double (rtx); |
204 | static rtx lookup_const_fixed (rtx); |
205 | static rtx gen_const_vector (machine_mode, int); |
206 | static void copy_rtx_if_shared_1 (rtx *orig); |
207 | |
208 | /* Probability of the conditional branch currently proceeded by try_split. */ |
209 | profile_probability split_branch_probability; |
210 | |
211 | /* Returns a hash code for X (which is a really a CONST_INT). */ |
212 | |
213 | hashval_t |
214 | const_int_hasher::hash (rtx x) |
215 | { |
216 | return (hashval_t) INTVAL (x); |
217 | } |
218 | |
219 | /* Returns true if the value represented by X (which is really a |
220 | CONST_INT) is the same as that given by Y (which is really a |
221 | HOST_WIDE_INT *). */ |
222 | |
223 | bool |
224 | const_int_hasher::equal (rtx x, HOST_WIDE_INT y) |
225 | { |
226 | return (INTVAL (x) == y); |
227 | } |
228 | |
229 | #if TARGET_SUPPORTS_WIDE_INT |
230 | /* Returns a hash code for X (which is a really a CONST_WIDE_INT). */ |
231 | |
232 | hashval_t |
233 | const_wide_int_hasher::hash (rtx x) |
234 | { |
235 | int i; |
236 | unsigned HOST_WIDE_INT hash = 0; |
237 | const_rtx xr = x; |
238 | |
239 | for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++) |
240 | hash += CONST_WIDE_INT_ELT (xr, i); |
241 | |
242 | return (hashval_t) hash; |
243 | } |
244 | |
245 | /* Returns true if the value represented by X (which is really a |
246 | CONST_WIDE_INT) is the same as that given by Y (which is really a |
247 | CONST_WIDE_INT). */ |
248 | |
249 | bool |
250 | const_wide_int_hasher::equal (rtx x, rtx y) |
251 | { |
252 | int i; |
253 | const_rtx xr = x; |
254 | const_rtx yr = y; |
255 | if (CONST_WIDE_INT_NUNITS (xr) != CONST_WIDE_INT_NUNITS (yr)) |
256 | return false; |
257 | |
258 | for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++) |
259 | if (CONST_WIDE_INT_ELT (xr, i) != CONST_WIDE_INT_ELT (yr, i)) |
260 | return false; |
261 | |
262 | return true; |
263 | } |
264 | #endif |
265 | |
266 | /* Returns a hash code for CONST_POLY_INT X. */ |
267 | |
268 | hashval_t |
269 | const_poly_int_hasher::hash (rtx x) |
270 | { |
271 | inchash::hash h; |
272 | h.add_int (GET_MODE (x)); |
273 | for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i) |
274 | h.add_wide_int (CONST_POLY_INT_COEFFS (x)[i]); |
275 | return h.end (); |
276 | } |
277 | |
278 | /* Returns true if CONST_POLY_INT X is an rtx representation of Y. */ |
279 | |
280 | bool |
281 | const_poly_int_hasher::equal (rtx x, const compare_type &y) |
282 | { |
283 | if (GET_MODE (x) != y.first) |
284 | return false; |
285 | for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i) |
286 | if (CONST_POLY_INT_COEFFS (x)[i] != y.second.coeffs[i]) |
287 | return false; |
288 | return true; |
289 | } |
290 | |
291 | /* Returns a hash code for X (which is really a CONST_DOUBLE). */ |
292 | hashval_t |
293 | const_double_hasher::hash (rtx x) |
294 | { |
295 | const_rtx const value = x; |
296 | hashval_t h; |
297 | |
298 | if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (value) == VOIDmode) |
299 | h = CONST_DOUBLE_LOW (value) ^ CONST_DOUBLE_HIGH (value); |
300 | else |
301 | { |
302 | h = real_hash (CONST_DOUBLE_REAL_VALUE (value)); |
303 | /* MODE is used in the comparison, so it should be in the hash. */ |
304 | h ^= GET_MODE (value); |
305 | } |
306 | return h; |
307 | } |
308 | |
309 | /* Returns true if the value represented by X (really a ...) |
310 | is the same as that represented by Y (really a ...) */ |
311 | bool |
312 | const_double_hasher::equal (rtx x, rtx y) |
313 | { |
314 | const_rtx const a = x, b = y; |
315 | |
316 | if (GET_MODE (a) != GET_MODE (b)) |
317 | return false; |
318 | if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (a) == VOIDmode) |
319 | return (CONST_DOUBLE_LOW (a) == CONST_DOUBLE_LOW (b) |
320 | && CONST_DOUBLE_HIGH (a) == CONST_DOUBLE_HIGH (b)); |
321 | else |
322 | return real_identical (CONST_DOUBLE_REAL_VALUE (a), |
323 | CONST_DOUBLE_REAL_VALUE (b)); |
324 | } |
325 | |
326 | /* Returns a hash code for X (which is really a CONST_FIXED). */ |
327 | |
328 | hashval_t |
329 | const_fixed_hasher::hash (rtx x) |
330 | { |
331 | const_rtx const value = x; |
332 | hashval_t h; |
333 | |
334 | h = fixed_hash (CONST_FIXED_VALUE (value)); |
335 | /* MODE is used in the comparison, so it should be in the hash. */ |
336 | h ^= GET_MODE (value); |
337 | return h; |
338 | } |
339 | |
340 | /* Returns true if the value represented by X is the same as that |
341 | represented by Y. */ |
342 | |
343 | bool |
344 | const_fixed_hasher::equal (rtx x, rtx y) |
345 | { |
346 | const_rtx const a = x, b = y; |
347 | |
348 | if (GET_MODE (a) != GET_MODE (b)) |
349 | return false; |
350 | return fixed_identical (CONST_FIXED_VALUE (a), CONST_FIXED_VALUE (b)); |
351 | } |
352 | |
353 | /* Return true if the given memory attributes are equal. */ |
354 | |
355 | bool |
356 | (const class mem_attrs *p, const class mem_attrs *q) |
357 | { |
358 | if (p == q) |
359 | return true; |
360 | if (!p || !q) |
361 | return false; |
362 | return (p->alias == q->alias |
363 | && p->offset_known_p == q->offset_known_p |
364 | && (!p->offset_known_p || known_eq (p->offset, q->offset)) |
365 | && p->size_known_p == q->size_known_p |
366 | && (!p->size_known_p || known_eq (p->size, q->size)) |
367 | && p->align == q->align |
368 | && p->addrspace == q->addrspace |
369 | && (p->expr == q->expr |
370 | || (p->expr != NULL_TREE && q->expr != NULL_TREE |
371 | && operand_equal_p (p->expr, q->expr, flags: 0)))); |
372 | } |
373 | |
374 | /* Set MEM's memory attributes so that they are the same as ATTRS. */ |
375 | |
376 | static void |
377 | set_mem_attrs (rtx mem, mem_attrs *attrs) |
378 | { |
379 | /* If everything is the default, we can just clear the attributes. */ |
380 | if (mem_attrs_eq_p (p: attrs, mode_mem_attrs[(int) GET_MODE (mem)])) |
381 | { |
382 | MEM_ATTRS (mem) = 0; |
383 | return; |
384 | } |
385 | |
386 | if (!MEM_ATTRS (mem) |
387 | || !mem_attrs_eq_p (p: attrs, MEM_ATTRS (mem))) |
388 | { |
389 | MEM_ATTRS (mem) = ggc_alloc<mem_attrs> (); |
390 | memcpy (MEM_ATTRS (mem), src: attrs, n: sizeof (mem_attrs)); |
391 | } |
392 | } |
393 | |
394 | /* Returns a hash code for X (which is a really a reg_attrs *). */ |
395 | |
396 | hashval_t |
397 | reg_attr_hasher::hash (reg_attrs *x) |
398 | { |
399 | const reg_attrs *const p = x; |
400 | |
401 | inchash::hash h; |
402 | h.add_ptr (ptr: p->decl); |
403 | h.add_poly_hwi (v: p->offset); |
404 | return h.end (); |
405 | } |
406 | |
407 | /* Returns true if the value represented by X is the same as that given by |
408 | Y. */ |
409 | |
410 | bool |
411 | reg_attr_hasher:: (reg_attrs *x, reg_attrs *y) |
412 | { |
413 | const reg_attrs *const p = x; |
414 | const reg_attrs *const q = y; |
415 | |
416 | return (p->decl == q->decl && known_eq (p->offset, q->offset)); |
417 | } |
418 | /* Allocate a new reg_attrs structure and insert it into the hash table if |
419 | one identical to it is not already in the table. We are doing this for |
420 | MEM of mode MODE. */ |
421 | |
422 | static reg_attrs * |
423 | get_reg_attrs (tree decl, poly_int64 offset) |
424 | { |
425 | reg_attrs attrs; |
426 | |
427 | /* If everything is the default, we can just return zero. */ |
428 | if (decl == 0 && known_eq (offset, 0)) |
429 | return 0; |
430 | |
431 | attrs.decl = decl; |
432 | attrs.offset = offset; |
433 | |
434 | reg_attrs **slot = reg_attrs_htab->find_slot (value: &attrs, insert: INSERT); |
435 | if (*slot == 0) |
436 | { |
437 | *slot = ggc_alloc<reg_attrs> (); |
438 | memcpy (dest: *slot, src: &attrs, n: sizeof (reg_attrs)); |
439 | } |
440 | |
441 | return *slot; |
442 | } |
443 | |
444 | |
445 | #if !HAVE_blockage |
446 | /* Generate an empty ASM_INPUT, which is used to block attempts to schedule, |
447 | and to block register equivalences to be seen across this insn. */ |
448 | |
449 | rtx |
450 | gen_blockage (void) |
451 | { |
452 | rtx x = gen_rtx_ASM_INPUT (VOIDmode, "" ); |
453 | MEM_VOLATILE_P (x) = true; |
454 | return x; |
455 | } |
456 | #endif |
457 | |
458 | |
459 | /* Set the mode and register number of X to MODE and REGNO. */ |
460 | |
461 | void |
462 | set_mode_and_regno (rtx x, machine_mode mode, unsigned int regno) |
463 | { |
464 | unsigned int nregs = (HARD_REGISTER_NUM_P (regno) |
465 | ? hard_regno_nregs (regno, mode) |
466 | : 1); |
467 | PUT_MODE_RAW (x, mode); |
468 | set_regno_raw (x, regno, nregs); |
469 | } |
470 | |
471 | /* Initialize a fresh REG rtx with mode MODE and register REGNO. */ |
472 | |
473 | rtx |
474 | init_raw_REG (rtx x, machine_mode mode, unsigned int regno) |
475 | { |
476 | set_mode_and_regno (x, mode, regno); |
477 | REG_ATTRS (x) = NULL; |
478 | ORIGINAL_REGNO (x) = regno; |
479 | return x; |
480 | } |
481 | |
482 | /* Generate a new REG rtx. Make sure ORIGINAL_REGNO is set properly, and |
483 | don't attempt to share with the various global pieces of rtl (such as |
484 | frame_pointer_rtx). */ |
485 | |
486 | rtx |
487 | gen_raw_REG (machine_mode mode, unsigned int regno) |
488 | { |
489 | rtx x = rtx_alloc (REG MEM_STAT_INFO); |
490 | init_raw_REG (x, mode, regno); |
491 | return x; |
492 | } |
493 | |
494 | /* There are some RTL codes that require special attention; the generation |
495 | functions do the raw handling. If you add to this list, modify |
496 | special_rtx in gengenrtl.cc as well. */ |
497 | |
498 | rtx_expr_list * |
499 | gen_rtx_EXPR_LIST (machine_mode mode, rtx expr, rtx expr_list) |
500 | { |
501 | return as_a <rtx_expr_list *> (gen_rtx_fmt_ee (EXPR_LIST, mode, expr, |
502 | expr_list)); |
503 | } |
504 | |
505 | rtx_insn_list * |
506 | gen_rtx_INSN_LIST (machine_mode mode, rtx insn, rtx insn_list) |
507 | { |
508 | return as_a <rtx_insn_list *> (gen_rtx_fmt_ue (INSN_LIST, mode, insn, |
509 | insn_list)); |
510 | } |
511 | |
512 | rtx_insn * |
513 | gen_rtx_INSN (machine_mode mode, rtx_insn *prev_insn, rtx_insn *next_insn, |
514 | basic_block bb, rtx pattern, int location, int code, |
515 | rtx reg_notes) |
516 | { |
517 | return as_a <rtx_insn *> (gen_rtx_fmt_uuBeiie (INSN, mode, |
518 | prev_insn, next_insn, |
519 | bb, pattern, location, code, |
520 | reg_notes)); |
521 | } |
522 | |
523 | rtx |
524 | gen_rtx_CONST_INT (machine_mode mode ATTRIBUTE_UNUSED, HOST_WIDE_INT arg) |
525 | { |
526 | if (arg >= - MAX_SAVED_CONST_INT && arg <= MAX_SAVED_CONST_INT) |
527 | return const_int_rtx[arg + MAX_SAVED_CONST_INT]; |
528 | |
529 | #if STORE_FLAG_VALUE != 1 && STORE_FLAG_VALUE != -1 |
530 | if (const_true_rtx && arg == STORE_FLAG_VALUE) |
531 | return const_true_rtx; |
532 | #endif |
533 | |
534 | /* Look up the CONST_INT in the hash table. */ |
535 | rtx *slot = const_int_htab->find_slot_with_hash (comparable: arg, hash: (hashval_t) arg, |
536 | insert: INSERT); |
537 | if (*slot == 0) |
538 | *slot = gen_rtx_raw_CONST_INT (VOIDmode, arg); |
539 | |
540 | return *slot; |
541 | } |
542 | |
543 | rtx |
544 | gen_int_mode (poly_int64 c, machine_mode mode) |
545 | { |
546 | c = trunc_int_for_mode (c, mode); |
547 | if (c.is_constant ()) |
548 | return GEN_INT (c.coeffs[0]); |
549 | unsigned int prec = GET_MODE_PRECISION (mode: as_a <scalar_mode> (m: mode)); |
550 | return immed_wide_int_const (poly_wide_int::from (a: c, bitsize: prec, sgn: SIGNED), mode); |
551 | } |
552 | |
553 | /* CONST_DOUBLEs might be created from pairs of integers, or from |
554 | REAL_VALUE_TYPEs. Also, their length is known only at run time, |
555 | so we cannot use gen_rtx_raw_CONST_DOUBLE. */ |
556 | |
557 | /* Determine whether REAL, a CONST_DOUBLE, already exists in the |
558 | hash table. If so, return its counterpart; otherwise add it |
559 | to the hash table and return it. */ |
560 | static rtx |
561 | lookup_const_double (rtx real) |
562 | { |
563 | rtx *slot = const_double_htab->find_slot (value: real, insert: INSERT); |
564 | if (*slot == 0) |
565 | *slot = real; |
566 | |
567 | return *slot; |
568 | } |
569 | |
570 | /* Return a CONST_DOUBLE rtx for a floating-point value specified by |
571 | VALUE in mode MODE. */ |
572 | rtx |
573 | const_double_from_real_value (REAL_VALUE_TYPE value, machine_mode mode) |
574 | { |
575 | rtx real = rtx_alloc (CONST_DOUBLE); |
576 | PUT_MODE (x: real, mode); |
577 | |
578 | real->u.rv = value; |
579 | |
580 | return lookup_const_double (real); |
581 | } |
582 | |
583 | /* Determine whether FIXED, a CONST_FIXED, already exists in the |
584 | hash table. If so, return its counterpart; otherwise add it |
585 | to the hash table and return it. */ |
586 | |
587 | static rtx |
588 | lookup_const_fixed (rtx fixed) |
589 | { |
590 | rtx *slot = const_fixed_htab->find_slot (value: fixed, insert: INSERT); |
591 | if (*slot == 0) |
592 | *slot = fixed; |
593 | |
594 | return *slot; |
595 | } |
596 | |
597 | /* Return a CONST_FIXED rtx for a fixed-point value specified by |
598 | VALUE in mode MODE. */ |
599 | |
600 | rtx |
601 | const_fixed_from_fixed_value (FIXED_VALUE_TYPE value, machine_mode mode) |
602 | { |
603 | rtx fixed = rtx_alloc (CONST_FIXED); |
604 | PUT_MODE (x: fixed, mode); |
605 | |
606 | fixed->u.fv = value; |
607 | |
608 | return lookup_const_fixed (fixed); |
609 | } |
610 | |
611 | #if TARGET_SUPPORTS_WIDE_INT == 0 |
612 | /* Constructs double_int from rtx CST. */ |
613 | |
614 | double_int |
615 | rtx_to_double_int (const_rtx cst) |
616 | { |
617 | double_int r; |
618 | |
619 | if (CONST_INT_P (cst)) |
620 | r = double_int::from_shwi (INTVAL (cst)); |
621 | else if (CONST_DOUBLE_AS_INT_P (cst)) |
622 | { |
623 | r.low = CONST_DOUBLE_LOW (cst); |
624 | r.high = CONST_DOUBLE_HIGH (cst); |
625 | } |
626 | else |
627 | gcc_unreachable (); |
628 | |
629 | return r; |
630 | } |
631 | #endif |
632 | |
633 | #if TARGET_SUPPORTS_WIDE_INT |
634 | /* Determine whether CONST_WIDE_INT WINT already exists in the hash table. |
635 | If so, return its counterpart; otherwise add it to the hash table and |
636 | return it. */ |
637 | |
638 | static rtx |
639 | lookup_const_wide_int (rtx wint) |
640 | { |
641 | rtx *slot = const_wide_int_htab->find_slot (value: wint, insert: INSERT); |
642 | if (*slot == 0) |
643 | *slot = wint; |
644 | |
645 | return *slot; |
646 | } |
647 | #endif |
648 | |
649 | /* Return an rtx constant for V, given that the constant has mode MODE. |
650 | The returned rtx will be a CONST_INT if V fits, otherwise it will be |
651 | a CONST_DOUBLE (if !TARGET_SUPPORTS_WIDE_INT) or a CONST_WIDE_INT |
652 | (if TARGET_SUPPORTS_WIDE_INT). */ |
653 | |
654 | static rtx |
655 | immed_wide_int_const_1 (const wide_int_ref &v, machine_mode mode) |
656 | { |
657 | unsigned int len = v.get_len (); |
658 | /* Not scalar_int_mode because we also allow pointer bound modes. */ |
659 | unsigned int prec = GET_MODE_PRECISION (mode: as_a <scalar_mode> (m: mode)); |
660 | |
661 | /* Allow truncation but not extension since we do not know if the |
662 | number is signed or unsigned. */ |
663 | gcc_assert (prec <= v.get_precision ()); |
664 | |
665 | if (len < 2 || prec <= HOST_BITS_PER_WIDE_INT) |
666 | return gen_int_mode (c: v.elt (i: 0), mode); |
667 | |
668 | #if TARGET_SUPPORTS_WIDE_INT |
669 | { |
670 | unsigned int i; |
671 | rtx value; |
672 | unsigned int blocks_needed |
673 | = (prec + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT; |
674 | |
675 | if (len > blocks_needed) |
676 | len = blocks_needed; |
677 | |
678 | value = const_wide_int_alloc (len); |
679 | |
680 | /* It is so tempting to just put the mode in here. Must control |
681 | myself ... */ |
682 | PUT_MODE (x: value, VOIDmode); |
683 | CWI_PUT_NUM_ELEM (value, len); |
684 | |
685 | for (i = 0; i < len; i++) |
686 | CONST_WIDE_INT_ELT (value, i) = v.elt (i); |
687 | |
688 | return lookup_const_wide_int (wint: value); |
689 | } |
690 | #else |
691 | return immed_double_const (v.elt (0), v.elt (1), mode); |
692 | #endif |
693 | } |
694 | |
695 | #if TARGET_SUPPORTS_WIDE_INT == 0 |
696 | /* Return a CONST_DOUBLE or CONST_INT for a value specified as a pair |
697 | of ints: I0 is the low-order word and I1 is the high-order word. |
698 | For values that are larger than HOST_BITS_PER_DOUBLE_INT, the |
699 | implied upper bits are copies of the high bit of i1. The value |
700 | itself is neither signed nor unsigned. Do not use this routine for |
701 | non-integer modes; convert to REAL_VALUE_TYPE and use |
702 | const_double_from_real_value. */ |
703 | |
704 | rtx |
705 | immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, machine_mode mode) |
706 | { |
707 | rtx value; |
708 | unsigned int i; |
709 | |
710 | /* There are the following cases (note that there are no modes with |
711 | HOST_BITS_PER_WIDE_INT < GET_MODE_BITSIZE (mode) < HOST_BITS_PER_DOUBLE_INT): |
712 | |
713 | 1) If GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT, then we use |
714 | gen_int_mode. |
715 | 2) If the value of the integer fits into HOST_WIDE_INT anyway |
716 | (i.e., i1 consists only from copies of the sign bit, and sign |
717 | of i0 and i1 are the same), then we return a CONST_INT for i0. |
718 | 3) Otherwise, we create a CONST_DOUBLE for i0 and i1. */ |
719 | scalar_mode smode; |
720 | if (is_a <scalar_mode> (mode, &smode) |
721 | && GET_MODE_BITSIZE (smode) <= HOST_BITS_PER_WIDE_INT) |
722 | return gen_int_mode (i0, mode); |
723 | |
724 | /* If this integer fits in one word, return a CONST_INT. */ |
725 | if ((i1 == 0 && i0 >= 0) || (i1 == ~0 && i0 < 0)) |
726 | return GEN_INT (i0); |
727 | |
728 | /* We use VOIDmode for integers. */ |
729 | value = rtx_alloc (CONST_DOUBLE); |
730 | PUT_MODE (value, VOIDmode); |
731 | |
732 | CONST_DOUBLE_LOW (value) = i0; |
733 | CONST_DOUBLE_HIGH (value) = i1; |
734 | |
735 | for (i = 2; i < (sizeof CONST_DOUBLE_FORMAT - 1); i++) |
736 | XWINT (value, i) = 0; |
737 | |
738 | return lookup_const_double (value); |
739 | } |
740 | #endif |
741 | |
742 | /* Return an rtx representation of C in mode MODE. */ |
743 | |
744 | rtx |
745 | immed_wide_int_const (const poly_wide_int_ref &c, machine_mode mode) |
746 | { |
747 | if (c.is_constant ()) |
748 | return immed_wide_int_const_1 (v: c.coeffs[0], mode); |
749 | |
750 | /* Not scalar_int_mode because we also allow pointer bound modes. */ |
751 | unsigned int prec = GET_MODE_PRECISION (mode: as_a <scalar_mode> (m: mode)); |
752 | |
753 | /* Allow truncation but not extension since we do not know if the |
754 | number is signed or unsigned. */ |
755 | gcc_assert (prec <= c.coeffs[0].get_precision ()); |
756 | poly_wide_int newc = poly_wide_int::from (a: c, bitsize: prec, sgn: SIGNED); |
757 | |
758 | /* See whether we already have an rtx for this constant. */ |
759 | inchash::hash h; |
760 | h.add_int (v: mode); |
761 | for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i) |
762 | h.add_wide_int (x: newc.coeffs[i]); |
763 | const_poly_int_hasher::compare_type typed_value (mode, newc); |
764 | rtx *slot = const_poly_int_htab->find_slot_with_hash (comparable: typed_value, |
765 | hash: h.end (), insert: INSERT); |
766 | rtx x = *slot; |
767 | if (x) |
768 | return x; |
769 | |
770 | /* Create a new rtx. There's a choice to be made here between installing |
771 | the actual mode of the rtx or leaving it as VOIDmode (for consistency |
772 | with CONST_INT). In practice the handling of the codes is different |
773 | enough that we get no benefit from using VOIDmode, and various places |
774 | assume that VOIDmode implies CONST_INT. Using the real mode seems like |
775 | the right long-term direction anyway. */ |
776 | typedef trailing_wide_ints<NUM_POLY_INT_COEFFS> twi; |
777 | size_t = twi::extra_size (precision: prec); |
778 | x = rtx_alloc_v (CONST_POLY_INT, |
779 | sizeof (struct const_poly_int_def) + extra_size); |
780 | PUT_MODE (x, mode); |
781 | CONST_POLY_INT_COEFFS (x).set_precision (precision: prec); |
782 | for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i) |
783 | CONST_POLY_INT_COEFFS (x)[i] = newc.coeffs[i]; |
784 | |
785 | *slot = x; |
786 | return x; |
787 | } |
788 | |
789 | rtx |
790 | gen_rtx_REG (machine_mode mode, unsigned int regno) |
791 | { |
792 | /* In case the MD file explicitly references the frame pointer, have |
793 | all such references point to the same frame pointer. This is |
794 | used during frame pointer elimination to distinguish the explicit |
795 | references to these registers from pseudos that happened to be |
796 | assigned to them. |
797 | |
798 | If we have eliminated the frame pointer or arg pointer, we will |
799 | be using it as a normal register, for example as a spill |
800 | register. In such cases, we might be accessing it in a mode that |
801 | is not Pmode and therefore cannot use the pre-allocated rtx. |
802 | |
803 | Also don't do this when we are making new REGs in reload, since |
804 | we don't want to get confused with the real pointers. */ |
805 | |
806 | if (mode == Pmode && !reload_in_progress && !lra_in_progress) |
807 | { |
808 | if (regno == FRAME_POINTER_REGNUM |
809 | && (!reload_completed || frame_pointer_needed)) |
810 | return frame_pointer_rtx; |
811 | |
812 | if (!HARD_FRAME_POINTER_IS_FRAME_POINTER |
813 | && regno == HARD_FRAME_POINTER_REGNUM |
814 | && (!reload_completed || frame_pointer_needed)) |
815 | return hard_frame_pointer_rtx; |
816 | #if !HARD_FRAME_POINTER_IS_ARG_POINTER |
817 | if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM |
818 | && regno == ARG_POINTER_REGNUM) |
819 | return arg_pointer_rtx; |
820 | #endif |
821 | #ifdef RETURN_ADDRESS_POINTER_REGNUM |
822 | if (regno == RETURN_ADDRESS_POINTER_REGNUM) |
823 | return return_address_pointer_rtx; |
824 | #endif |
825 | if (regno == (unsigned) PIC_OFFSET_TABLE_REGNUM |
826 | && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM |
827 | && fixed_regs[PIC_OFFSET_TABLE_REGNUM]) |
828 | return pic_offset_table_rtx; |
829 | if (regno == STACK_POINTER_REGNUM) |
830 | return stack_pointer_rtx; |
831 | } |
832 | |
833 | #if 0 |
834 | /* If the per-function register table has been set up, try to re-use |
835 | an existing entry in that table to avoid useless generation of RTL. |
836 | |
837 | This code is disabled for now until we can fix the various backends |
838 | which depend on having non-shared hard registers in some cases. Long |
839 | term we want to re-enable this code as it can significantly cut down |
840 | on the amount of useless RTL that gets generated. |
841 | |
842 | We'll also need to fix some code that runs after reload that wants to |
843 | set ORIGINAL_REGNO. */ |
844 | |
845 | if (cfun |
846 | && cfun->emit |
847 | && regno_reg_rtx |
848 | && regno < FIRST_PSEUDO_REGISTER |
849 | && reg_raw_mode[regno] == mode) |
850 | return regno_reg_rtx[regno]; |
851 | #endif |
852 | |
853 | return gen_raw_REG (mode, regno); |
854 | } |
855 | |
856 | rtx |
857 | gen_rtx_MEM (machine_mode mode, rtx addr) |
858 | { |
859 | rtx rt = gen_rtx_raw_MEM (mode, addr); |
860 | |
861 | /* This field is not cleared by the mere allocation of the rtx, so |
862 | we clear it here. */ |
863 | MEM_ATTRS (rt) = 0; |
864 | |
865 | return rt; |
866 | } |
867 | |
868 | /* Generate a memory referring to non-trapping constant memory. */ |
869 | |
870 | rtx |
871 | gen_const_mem (machine_mode mode, rtx addr) |
872 | { |
873 | rtx mem = gen_rtx_MEM (mode, addr); |
874 | MEM_READONLY_P (mem) = 1; |
875 | MEM_NOTRAP_P (mem) = 1; |
876 | return mem; |
877 | } |
878 | |
879 | /* Generate a MEM referring to fixed portions of the frame, e.g., register |
880 | save areas. */ |
881 | |
882 | rtx |
883 | gen_frame_mem (machine_mode mode, rtx addr) |
884 | { |
885 | rtx mem = gen_rtx_MEM (mode, addr); |
886 | MEM_NOTRAP_P (mem) = 1; |
887 | set_mem_alias_set (mem, get_frame_alias_set ()); |
888 | return mem; |
889 | } |
890 | |
891 | /* Generate a MEM referring to a temporary use of the stack, not part |
892 | of the fixed stack frame. For example, something which is pushed |
893 | by a target splitter. */ |
894 | rtx |
895 | gen_tmp_stack_mem (machine_mode mode, rtx addr) |
896 | { |
897 | rtx mem = gen_rtx_MEM (mode, addr); |
898 | MEM_NOTRAP_P (mem) = 1; |
899 | if (!cfun->calls_alloca) |
900 | set_mem_alias_set (mem, get_frame_alias_set ()); |
901 | return mem; |
902 | } |
903 | |
904 | /* We want to create (subreg:OMODE (obj:IMODE) OFFSET). Return true if |
905 | this construct would be valid, and false otherwise. */ |
906 | |
907 | bool |
908 | validate_subreg (machine_mode omode, machine_mode imode, |
909 | const_rtx reg, poly_uint64 offset) |
910 | { |
911 | poly_uint64 isize = GET_MODE_SIZE (mode: imode); |
912 | poly_uint64 osize = GET_MODE_SIZE (mode: omode); |
913 | |
914 | /* The sizes must be ordered, so that we know whether the subreg |
915 | is partial, paradoxical or complete. */ |
916 | if (!ordered_p (a: isize, b: osize)) |
917 | return false; |
918 | |
919 | /* All subregs must be aligned. */ |
920 | if (!multiple_p (a: offset, b: osize)) |
921 | return false; |
922 | |
923 | /* The subreg offset cannot be outside the inner object. */ |
924 | if (maybe_ge (offset, isize)) |
925 | return false; |
926 | |
927 | poly_uint64 regsize = REGMODE_NATURAL_SIZE (imode); |
928 | |
929 | /* ??? This should not be here. Temporarily continue to allow word_mode |
930 | subregs of anything. The most common offender is (subreg:SI (reg:DF)). |
931 | Generally, backends are doing something sketchy but it'll take time to |
932 | fix them all. */ |
933 | if (omode == word_mode) |
934 | ; |
935 | /* ??? Similarly, e.g. with (subreg:DF (reg:TI)). Though store_bit_field |
936 | is the culprit here, and not the backends. */ |
937 | else if (known_ge (osize, regsize) && known_ge (isize, osize)) |
938 | ; |
939 | /* Allow component subregs of complex and vector. Though given the below |
940 | extraction rules, it's not always clear what that means. */ |
941 | else if ((COMPLEX_MODE_P (imode) || VECTOR_MODE_P (imode)) |
942 | && GET_MODE_INNER (imode) == omode) |
943 | ; |
944 | /* ??? x86 sse code makes heavy use of *paradoxical* vector subregs, |
945 | i.e. (subreg:V4SF (reg:SF) 0) or (subreg:V4SF (reg:V2SF) 0). This |
946 | surely isn't the cleanest way to represent this. It's questionable |
947 | if this ought to be represented at all -- why can't this all be hidden |
948 | in post-reload splitters that make arbitrarily mode changes to the |
949 | registers themselves. */ |
950 | else if (VECTOR_MODE_P (omode) |
951 | && GET_MODE_UNIT_SIZE (omode) == GET_MODE_UNIT_SIZE (imode)) |
952 | ; |
953 | /* Subregs involving floating point modes are not allowed to |
954 | change size unless it's an insert into a complex mode. |
955 | Therefore (subreg:DI (reg:DF) 0) and (subreg:CS (reg:SF) 0) are fine, but |
956 | (subreg:SI (reg:DF) 0) isn't. */ |
957 | else if ((FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode)) |
958 | && !COMPLEX_MODE_P (omode)) |
959 | { |
960 | if (! (known_eq (isize, osize) |
961 | /* LRA can use subreg to store a floating point value in |
962 | an integer mode. Although the floating point and the |
963 | integer modes need the same number of hard registers, |
964 | the size of floating point mode can be less than the |
965 | integer mode. LRA also uses subregs for a register |
966 | should be used in different mode in on insn. */ |
967 | || lra_in_progress)) |
968 | return false; |
969 | } |
970 | |
971 | /* Paradoxical subregs must have offset zero. */ |
972 | if (maybe_gt (osize, isize)) |
973 | return known_eq (offset, 0U); |
974 | |
975 | /* This is a normal subreg. Verify that the offset is representable. */ |
976 | |
977 | /* For hard registers, we already have most of these rules collected in |
978 | subreg_offset_representable_p. */ |
979 | if (reg && REG_P (reg) && HARD_REGISTER_P (reg)) |
980 | { |
981 | unsigned int regno = REGNO (reg); |
982 | |
983 | if ((COMPLEX_MODE_P (imode) || VECTOR_MODE_P (imode)) |
984 | && GET_MODE_INNER (imode) == omode) |
985 | ; |
986 | else if (!REG_CAN_CHANGE_MODE_P (regno, imode, omode)) |
987 | return false; |
988 | |
989 | return subreg_offset_representable_p (regno, imode, offset, omode); |
990 | } |
991 | /* Do not allow SUBREG with stricter alignment than the inner MEM. */ |
992 | else if (reg && MEM_P (reg) && STRICT_ALIGNMENT |
993 | && MEM_ALIGN (reg) < GET_MODE_ALIGNMENT (omode)) |
994 | return false; |
995 | |
996 | /* The outer size must be ordered wrt the register size, otherwise |
997 | we wouldn't know at compile time how many registers the outer |
998 | mode occupies. */ |
999 | if (!ordered_p (a: osize, b: regsize)) |
1000 | return false; |
1001 | |
1002 | /* For pseudo registers, we want most of the same checks. Namely: |
1003 | |
1004 | Assume that the pseudo register will be allocated to hard registers |
1005 | that can hold REGSIZE bytes each. If OSIZE is not a multiple of REGSIZE, |
1006 | the remainder must correspond to the lowpart of the containing hard |
1007 | register. If BYTES_BIG_ENDIAN, the lowpart is at the highest offset, |
1008 | otherwise it is at the lowest offset. |
1009 | |
1010 | Given that we've already checked the mode and offset alignment, |
1011 | we only have to check subblock subregs here. */ |
1012 | if (maybe_lt (a: osize, b: regsize) |
1013 | && ! (lra_in_progress && (FLOAT_MODE_P (imode) || FLOAT_MODE_P (omode)))) |
1014 | { |
1015 | /* It is invalid for the target to pick a register size for a mode |
1016 | that isn't ordered wrt to the size of that mode. */ |
1017 | poly_uint64 block_size = ordered_min (a: isize, b: regsize); |
1018 | unsigned int start_reg; |
1019 | poly_uint64 offset_within_reg; |
1020 | if (!can_div_trunc_p (a: offset, b: block_size, quotient: &start_reg, remainder: &offset_within_reg) |
1021 | || (BYTES_BIG_ENDIAN |
1022 | ? maybe_ne (a: offset_within_reg, b: block_size - osize) |
1023 | : maybe_ne (a: offset_within_reg, b: 0U))) |
1024 | return false; |
1025 | } |
1026 | return true; |
1027 | } |
1028 | |
1029 | rtx |
1030 | gen_rtx_SUBREG (machine_mode mode, rtx reg, poly_uint64 offset) |
1031 | { |
1032 | gcc_assert (validate_subreg (mode, GET_MODE (reg), reg, offset)); |
1033 | return gen_rtx_raw_SUBREG (mode, reg, offset); |
1034 | } |
1035 | |
1036 | /* Generate a SUBREG representing the least-significant part of REG if MODE |
1037 | is smaller than mode of REG, otherwise paradoxical SUBREG. */ |
1038 | |
1039 | rtx |
1040 | gen_lowpart_SUBREG (machine_mode mode, rtx reg) |
1041 | { |
1042 | machine_mode inmode; |
1043 | |
1044 | inmode = GET_MODE (reg); |
1045 | if (inmode == VOIDmode) |
1046 | inmode = mode; |
1047 | return gen_rtx_SUBREG (mode, reg, |
1048 | offset: subreg_lowpart_offset (outermode: mode, innermode: inmode)); |
1049 | } |
1050 | |
1051 | rtx |
1052 | gen_rtx_VAR_LOCATION (machine_mode mode, tree decl, rtx loc, |
1053 | enum var_init_status status) |
1054 | { |
1055 | rtx x = gen_rtx_fmt_te (VAR_LOCATION, mode, decl, loc); |
1056 | PAT_VAR_LOCATION_STATUS (x) = status; |
1057 | return x; |
1058 | } |
1059 | |
1060 | |
1061 | /* Create an rtvec and stores within it the RTXen passed in the arguments. */ |
1062 | |
1063 | rtvec |
1064 | gen_rtvec (int n, ...) |
1065 | { |
1066 | int i; |
1067 | rtvec rt_val; |
1068 | va_list p; |
1069 | |
1070 | va_start (p, n); |
1071 | |
1072 | /* Don't allocate an empty rtvec... */ |
1073 | if (n == 0) |
1074 | { |
1075 | va_end (p); |
1076 | return NULL_RTVEC; |
1077 | } |
1078 | |
1079 | rt_val = rtvec_alloc (n); |
1080 | |
1081 | for (i = 0; i < n; i++) |
1082 | rt_val->elem[i] = va_arg (p, rtx); |
1083 | |
1084 | va_end (p); |
1085 | return rt_val; |
1086 | } |
1087 | |
1088 | rtvec |
1089 | gen_rtvec_v (int n, rtx *argp) |
1090 | { |
1091 | int i; |
1092 | rtvec rt_val; |
1093 | |
1094 | /* Don't allocate an empty rtvec... */ |
1095 | if (n == 0) |
1096 | return NULL_RTVEC; |
1097 | |
1098 | rt_val = rtvec_alloc (n); |
1099 | |
1100 | for (i = 0; i < n; i++) |
1101 | rt_val->elem[i] = *argp++; |
1102 | |
1103 | return rt_val; |
1104 | } |
1105 | |
1106 | rtvec |
1107 | gen_rtvec_v (int n, rtx_insn **argp) |
1108 | { |
1109 | int i; |
1110 | rtvec rt_val; |
1111 | |
1112 | /* Don't allocate an empty rtvec... */ |
1113 | if (n == 0) |
1114 | return NULL_RTVEC; |
1115 | |
1116 | rt_val = rtvec_alloc (n); |
1117 | |
1118 | for (i = 0; i < n; i++) |
1119 | rt_val->elem[i] = *argp++; |
1120 | |
1121 | return rt_val; |
1122 | } |
1123 | |
1124 | |
1125 | /* Return the number of bytes between the start of an OUTER_MODE |
1126 | in-memory value and the start of an INNER_MODE in-memory value, |
1127 | given that the former is a lowpart of the latter. It may be a |
1128 | paradoxical lowpart, in which case the offset will be negative |
1129 | on big-endian targets. */ |
1130 | |
1131 | poly_int64 |
1132 | byte_lowpart_offset (machine_mode outer_mode, |
1133 | machine_mode inner_mode) |
1134 | { |
1135 | if (paradoxical_subreg_p (outermode: outer_mode, innermode: inner_mode)) |
1136 | return -subreg_lowpart_offset (outermode: inner_mode, innermode: outer_mode); |
1137 | else |
1138 | return subreg_lowpart_offset (outermode: outer_mode, innermode: inner_mode); |
1139 | } |
1140 | |
1141 | /* Return the offset of (subreg:OUTER_MODE (mem:INNER_MODE X) OFFSET) |
1142 | from address X. For paradoxical big-endian subregs this is a |
1143 | negative value, otherwise it's the same as OFFSET. */ |
1144 | |
1145 | poly_int64 |
1146 | subreg_memory_offset (machine_mode outer_mode, machine_mode inner_mode, |
1147 | poly_uint64 offset) |
1148 | { |
1149 | if (paradoxical_subreg_p (outermode: outer_mode, innermode: inner_mode)) |
1150 | { |
1151 | gcc_assert (known_eq (offset, 0U)); |
1152 | return -subreg_lowpart_offset (outermode: inner_mode, innermode: outer_mode); |
1153 | } |
1154 | return offset; |
1155 | } |
1156 | |
1157 | /* As above, but return the offset that existing subreg X would have |
1158 | if SUBREG_REG (X) were stored in memory. The only significant thing |
1159 | about the current SUBREG_REG is its mode. */ |
1160 | |
1161 | poly_int64 |
1162 | subreg_memory_offset (const_rtx x) |
1163 | { |
1164 | return subreg_memory_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x)), |
1165 | SUBREG_BYTE (x)); |
1166 | } |
1167 | |
1168 | /* Generate a REG rtx for a new pseudo register of mode MODE. |
1169 | This pseudo is assigned the next sequential register number. */ |
1170 | |
1171 | rtx |
1172 | gen_reg_rtx (machine_mode mode) |
1173 | { |
1174 | rtx val; |
1175 | unsigned int align = GET_MODE_ALIGNMENT (mode); |
1176 | |
1177 | gcc_assert (can_create_pseudo_p ()); |
1178 | |
1179 | /* If a virtual register with bigger mode alignment is generated, |
1180 | increase stack alignment estimation because it might be spilled |
1181 | to stack later. */ |
1182 | if (SUPPORTS_STACK_ALIGNMENT |
1183 | && crtl->stack_alignment_estimated < align |
1184 | && !crtl->stack_realign_processed) |
1185 | { |
1186 | unsigned int min_align = MINIMUM_ALIGNMENT (NULL, mode, align); |
1187 | if (crtl->stack_alignment_estimated < min_align) |
1188 | crtl->stack_alignment_estimated = min_align; |
1189 | } |
1190 | |
1191 | if (generating_concat_p |
1192 | && (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT |
1193 | || GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)) |
1194 | { |
1195 | /* For complex modes, don't make a single pseudo. |
1196 | Instead, make a CONCAT of two pseudos. |
1197 | This allows noncontiguous allocation of the real and imaginary parts, |
1198 | which makes much better code. Besides, allocating DCmode |
1199 | pseudos overstrains reload on some machines like the 386. */ |
1200 | rtx realpart, imagpart; |
1201 | machine_mode partmode = GET_MODE_INNER (mode); |
1202 | |
1203 | realpart = gen_reg_rtx (mode: partmode); |
1204 | imagpart = gen_reg_rtx (mode: partmode); |
1205 | return gen_rtx_CONCAT (mode, realpart, imagpart); |
1206 | } |
1207 | |
1208 | /* Do not call gen_reg_rtx with uninitialized crtl. */ |
1209 | gcc_assert (crtl->emit.regno_pointer_align_length); |
1210 | |
1211 | crtl->emit.ensure_regno_capacity (); |
1212 | gcc_assert (reg_rtx_no < crtl->emit.regno_pointer_align_length); |
1213 | |
1214 | val = gen_raw_REG (mode, reg_rtx_no); |
1215 | regno_reg_rtx[reg_rtx_no++] = val; |
1216 | return val; |
1217 | } |
1218 | |
1219 | /* Make sure m_regno_pointer_align, and regno_reg_rtx are large |
1220 | enough to have elements in the range 0 <= idx <= reg_rtx_no. */ |
1221 | |
1222 | void |
1223 | emit_status::ensure_regno_capacity () |
1224 | { |
1225 | int old_size = regno_pointer_align_length; |
1226 | |
1227 | if (reg_rtx_no < old_size) |
1228 | return; |
1229 | |
1230 | int new_size = old_size * 2; |
1231 | while (reg_rtx_no >= new_size) |
1232 | new_size *= 2; |
1233 | |
1234 | char *tmp = XRESIZEVEC (char, regno_pointer_align, new_size); |
1235 | memset (s: tmp + old_size, c: 0, n: new_size - old_size); |
1236 | regno_pointer_align = (unsigned char *) tmp; |
1237 | |
1238 | rtx *new1 = GGC_RESIZEVEC (rtx, regno_reg_rtx, new_size); |
1239 | memset (s: new1 + old_size, c: 0, n: (new_size - old_size) * sizeof (rtx)); |
1240 | regno_reg_rtx = new1; |
1241 | |
1242 | crtl->emit.regno_pointer_align_length = new_size; |
1243 | } |
1244 | |
1245 | /* Return TRUE if REG is a PARM_DECL, FALSE otherwise. */ |
1246 | |
1247 | bool |
1248 | reg_is_parm_p (rtx reg) |
1249 | { |
1250 | tree decl; |
1251 | |
1252 | gcc_assert (REG_P (reg)); |
1253 | decl = REG_EXPR (reg); |
1254 | return (decl && TREE_CODE (decl) == PARM_DECL); |
1255 | } |
1256 | |
1257 | /* Update NEW with the same attributes as REG, but with OFFSET added |
1258 | to the REG_OFFSET. */ |
1259 | |
1260 | static void |
1261 | update_reg_offset (rtx new_rtx, rtx reg, poly_int64 offset) |
1262 | { |
1263 | REG_ATTRS (new_rtx) = get_reg_attrs (REG_EXPR (reg), |
1264 | REG_OFFSET (reg) + offset); |
1265 | } |
1266 | |
1267 | /* Generate a register with same attributes as REG, but with OFFSET |
1268 | added to the REG_OFFSET. */ |
1269 | |
1270 | rtx |
1271 | gen_rtx_REG_offset (rtx reg, machine_mode mode, unsigned int regno, |
1272 | poly_int64 offset) |
1273 | { |
1274 | /* Use gen_raw_REG rather than gen_rtx_REG, because otherwise we'd |
1275 | overwrite REG_ATTRS (and in the callers often ORIGINAL_REGNO too) |
1276 | of the shared REG rtxes like stack_pointer_rtx etc. This should |
1277 | happen only for SUBREGs from DEBUG_INSNs, RA should ensure |
1278 | multi-word registers don't overlap the special registers like |
1279 | stack pointer. */ |
1280 | rtx new_rtx = gen_raw_REG (mode, regno); |
1281 | |
1282 | update_reg_offset (new_rtx, reg, offset); |
1283 | return new_rtx; |
1284 | } |
1285 | |
1286 | /* Generate a new pseudo-register with the same attributes as REG, but |
1287 | with OFFSET added to the REG_OFFSET. */ |
1288 | |
1289 | rtx |
1290 | gen_reg_rtx_offset (rtx reg, machine_mode mode, int offset) |
1291 | { |
1292 | rtx new_rtx = gen_reg_rtx (mode); |
1293 | |
1294 | update_reg_offset (new_rtx, reg, offset); |
1295 | return new_rtx; |
1296 | } |
1297 | |
1298 | /* Adjust REG in-place so that it has mode MODE. It is assumed that the |
1299 | new register is a (possibly paradoxical) lowpart of the old one. */ |
1300 | |
1301 | void |
1302 | adjust_reg_mode (rtx reg, machine_mode mode) |
1303 | { |
1304 | update_reg_offset (new_rtx: reg, reg, offset: byte_lowpart_offset (outer_mode: mode, GET_MODE (reg))); |
1305 | PUT_MODE (x: reg, mode); |
1306 | } |
1307 | |
1308 | /* Copy REG's attributes from X, if X has any attributes. If REG and X |
1309 | have different modes, REG is a (possibly paradoxical) lowpart of X. */ |
1310 | |
1311 | void |
1312 | set_reg_attrs_from_value (rtx reg, rtx x) |
1313 | { |
1314 | poly_int64 offset; |
1315 | bool can_be_reg_pointer = true; |
1316 | |
1317 | /* Don't call mark_reg_pointer for incompatible pointer sign |
1318 | extension. */ |
1319 | while (GET_CODE (x) == SIGN_EXTEND |
1320 | || GET_CODE (x) == ZERO_EXTEND |
1321 | || GET_CODE (x) == TRUNCATE |
1322 | || (GET_CODE (x) == SUBREG && subreg_lowpart_p (x))) |
1323 | { |
1324 | #if defined(POINTERS_EXTEND_UNSIGNED) |
1325 | if (((GET_CODE (x) == SIGN_EXTEND && POINTERS_EXTEND_UNSIGNED) |
1326 | || (GET_CODE (x) == ZERO_EXTEND && ! POINTERS_EXTEND_UNSIGNED) |
1327 | || (paradoxical_subreg_p (x) |
1328 | && ! (SUBREG_PROMOTED_VAR_P (x) |
1329 | && SUBREG_CHECK_PROMOTED_SIGN (x, |
1330 | POINTERS_EXTEND_UNSIGNED)))) |
1331 | && !targetm.have_ptr_extend ()) |
1332 | can_be_reg_pointer = false; |
1333 | #endif |
1334 | x = XEXP (x, 0); |
1335 | } |
1336 | |
1337 | /* Hard registers can be reused for multiple purposes within the same |
1338 | function, so setting REG_ATTRS, REG_POINTER and REG_POINTER_ALIGN |
1339 | on them is wrong. */ |
1340 | if (HARD_REGISTER_P (reg)) |
1341 | return; |
1342 | |
1343 | offset = byte_lowpart_offset (GET_MODE (reg), GET_MODE (x)); |
1344 | if (MEM_P (x)) |
1345 | { |
1346 | if (MEM_OFFSET_KNOWN_P (x)) |
1347 | REG_ATTRS (reg) = get_reg_attrs (MEM_EXPR (x), |
1348 | MEM_OFFSET (x) + offset); |
1349 | if (can_be_reg_pointer && MEM_POINTER (x)) |
1350 | mark_reg_pointer (reg, 0); |
1351 | } |
1352 | else if (REG_P (x)) |
1353 | { |
1354 | if (REG_ATTRS (x)) |
1355 | update_reg_offset (new_rtx: reg, reg: x, offset); |
1356 | if (can_be_reg_pointer && REG_POINTER (x)) |
1357 | mark_reg_pointer (reg, REGNO_POINTER_ALIGN (REGNO (x))); |
1358 | } |
1359 | } |
1360 | |
1361 | /* Generate a REG rtx for a new pseudo register, copying the mode |
1362 | and attributes from X. */ |
1363 | |
1364 | rtx |
1365 | gen_reg_rtx_and_attrs (rtx x) |
1366 | { |
1367 | rtx reg = gen_reg_rtx (GET_MODE (x)); |
1368 | set_reg_attrs_from_value (reg, x); |
1369 | return reg; |
1370 | } |
1371 | |
1372 | /* Set the register attributes for registers contained in PARM_RTX. |
1373 | Use needed values from memory attributes of MEM. */ |
1374 | |
1375 | void |
1376 | set_reg_attrs_for_parm (rtx parm_rtx, rtx mem) |
1377 | { |
1378 | if (REG_P (parm_rtx)) |
1379 | set_reg_attrs_from_value (reg: parm_rtx, x: mem); |
1380 | else if (GET_CODE (parm_rtx) == PARALLEL) |
1381 | { |
1382 | /* Check for a NULL entry in the first slot, used to indicate that the |
1383 | parameter goes both on the stack and in registers. */ |
1384 | int i = XEXP (XVECEXP (parm_rtx, 0, 0), 0) ? 0 : 1; |
1385 | for (; i < XVECLEN (parm_rtx, 0); i++) |
1386 | { |
1387 | rtx x = XVECEXP (parm_rtx, 0, i); |
1388 | if (REG_P (XEXP (x, 0))) |
1389 | REG_ATTRS (XEXP (x, 0)) |
1390 | = get_reg_attrs (MEM_EXPR (mem), |
1391 | INTVAL (XEXP (x, 1))); |
1392 | } |
1393 | } |
1394 | } |
1395 | |
1396 | /* Set the REG_ATTRS for registers in value X, given that X represents |
1397 | decl T. */ |
1398 | |
1399 | void |
1400 | set_reg_attrs_for_decl_rtl (tree t, rtx x) |
1401 | { |
1402 | if (!t) |
1403 | return; |
1404 | tree tdecl = t; |
1405 | if (GET_CODE (x) == SUBREG) |
1406 | { |
1407 | gcc_assert (subreg_lowpart_p (x)); |
1408 | x = SUBREG_REG (x); |
1409 | } |
1410 | if (REG_P (x)) |
1411 | REG_ATTRS (x) |
1412 | = get_reg_attrs (decl: t, offset: byte_lowpart_offset (GET_MODE (x), |
1413 | DECL_P (tdecl) |
1414 | ? DECL_MODE (tdecl) |
1415 | : TYPE_MODE (TREE_TYPE (tdecl)))); |
1416 | if (GET_CODE (x) == CONCAT) |
1417 | { |
1418 | if (REG_P (XEXP (x, 0))) |
1419 | REG_ATTRS (XEXP (x, 0)) = get_reg_attrs (decl: t, offset: 0); |
1420 | if (REG_P (XEXP (x, 1))) |
1421 | REG_ATTRS (XEXP (x, 1)) |
1422 | = get_reg_attrs (decl: t, GET_MODE_UNIT_SIZE (GET_MODE (XEXP (x, 0)))); |
1423 | } |
1424 | if (GET_CODE (x) == PARALLEL) |
1425 | { |
1426 | int i, start; |
1427 | |
1428 | /* Check for a NULL entry, used to indicate that the parameter goes |
1429 | both on the stack and in registers. */ |
1430 | if (XEXP (XVECEXP (x, 0, 0), 0)) |
1431 | start = 0; |
1432 | else |
1433 | start = 1; |
1434 | |
1435 | for (i = start; i < XVECLEN (x, 0); i++) |
1436 | { |
1437 | rtx y = XVECEXP (x, 0, i); |
1438 | if (REG_P (XEXP (y, 0))) |
1439 | REG_ATTRS (XEXP (y, 0)) = get_reg_attrs (decl: t, INTVAL (XEXP (y, 1))); |
1440 | } |
1441 | } |
1442 | } |
1443 | |
1444 | /* Assign the RTX X to declaration T. */ |
1445 | |
1446 | void |
1447 | set_decl_rtl (tree t, rtx x) |
1448 | { |
1449 | DECL_WRTL_CHECK (t)->decl_with_rtl.rtl = x; |
1450 | if (x) |
1451 | set_reg_attrs_for_decl_rtl (t, x); |
1452 | } |
1453 | |
1454 | /* Assign the RTX X to parameter declaration T. BY_REFERENCE_P is true |
1455 | if the ABI requires the parameter to be passed by reference. */ |
1456 | |
1457 | void |
1458 | set_decl_incoming_rtl (tree t, rtx x, bool by_reference_p) |
1459 | { |
1460 | DECL_INCOMING_RTL (t) = x; |
1461 | if (x && !by_reference_p) |
1462 | set_reg_attrs_for_decl_rtl (t, x); |
1463 | } |
1464 | |
1465 | /* Identify REG (which may be a CONCAT) as a user register. */ |
1466 | |
1467 | void |
1468 | mark_user_reg (rtx reg) |
1469 | { |
1470 | if (GET_CODE (reg) == CONCAT) |
1471 | { |
1472 | REG_USERVAR_P (XEXP (reg, 0)) = 1; |
1473 | REG_USERVAR_P (XEXP (reg, 1)) = 1; |
1474 | } |
1475 | else |
1476 | { |
1477 | gcc_assert (REG_P (reg)); |
1478 | REG_USERVAR_P (reg) = 1; |
1479 | } |
1480 | } |
1481 | |
1482 | /* Identify REG as a probable pointer register and show its alignment |
1483 | as ALIGN, if nonzero. */ |
1484 | |
1485 | void |
1486 | mark_reg_pointer (rtx reg, int align) |
1487 | { |
1488 | if (! REG_POINTER (reg)) |
1489 | { |
1490 | REG_POINTER (reg) = 1; |
1491 | |
1492 | if (align) |
1493 | REGNO_POINTER_ALIGN (REGNO (reg)) = align; |
1494 | } |
1495 | else if (align && align < REGNO_POINTER_ALIGN (REGNO (reg))) |
1496 | /* We can no-longer be sure just how aligned this pointer is. */ |
1497 | REGNO_POINTER_ALIGN (REGNO (reg)) = align; |
1498 | } |
1499 | |
1500 | /* Return 1 plus largest pseudo reg number used in the current function. */ |
1501 | |
1502 | int |
1503 | max_reg_num (void) |
1504 | { |
1505 | return reg_rtx_no; |
1506 | } |
1507 | |
1508 | /* Return 1 + the largest label number used so far in the current function. */ |
1509 | |
1510 | int |
1511 | max_label_num (void) |
1512 | { |
1513 | return label_num; |
1514 | } |
1515 | |
1516 | /* Return first label number used in this function (if any were used). */ |
1517 | |
1518 | int |
1519 | get_first_label_num (void) |
1520 | { |
1521 | return first_label_num; |
1522 | } |
1523 | |
1524 | /* If the rtx for label was created during the expansion of a nested |
1525 | function, then first_label_num won't include this label number. |
1526 | Fix this now so that array indices work later. */ |
1527 | |
1528 | void |
1529 | maybe_set_first_label_num (rtx_code_label *x) |
1530 | { |
1531 | if (CODE_LABEL_NUMBER (x) < first_label_num) |
1532 | first_label_num = CODE_LABEL_NUMBER (x); |
1533 | } |
1534 | |
1535 | /* For use by the RTL function loader, when mingling with normal |
1536 | functions. |
1537 | Ensure that label_num is greater than the label num of X, to avoid |
1538 | duplicate labels in the generated assembler. */ |
1539 | |
1540 | void |
1541 | maybe_set_max_label_num (rtx_code_label *x) |
1542 | { |
1543 | if (CODE_LABEL_NUMBER (x) >= label_num) |
1544 | label_num = CODE_LABEL_NUMBER (x) + 1; |
1545 | } |
1546 | |
1547 | |
1548 | /* Return a value representing some low-order bits of X, where the number |
1549 | of low-order bits is given by MODE. Note that no conversion is done |
1550 | between floating-point and fixed-point values, rather, the bit |
1551 | representation is returned. |
1552 | |
1553 | This function handles the cases in common between gen_lowpart, below, |
1554 | and two variants in cse.cc and combine.cc. These are the cases that can |
1555 | be safely handled at all points in the compilation. |
1556 | |
1557 | If this is not a case we can handle, return 0. */ |
1558 | |
1559 | rtx |
1560 | gen_lowpart_common (machine_mode mode, rtx x) |
1561 | { |
1562 | poly_uint64 msize = GET_MODE_SIZE (mode); |
1563 | machine_mode innermode; |
1564 | |
1565 | /* Unfortunately, this routine doesn't take a parameter for the mode of X, |
1566 | so we have to make one up. Yuk. */ |
1567 | innermode = GET_MODE (x); |
1568 | if (CONST_INT_P (x) |
1569 | && known_le (msize * BITS_PER_UNIT, |
1570 | (unsigned HOST_WIDE_INT) HOST_BITS_PER_WIDE_INT)) |
1571 | innermode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, limit: 0).require (); |
1572 | else if (innermode == VOIDmode) |
1573 | innermode = int_mode_for_size (HOST_BITS_PER_DOUBLE_INT, limit: 0).require (); |
1574 | |
1575 | gcc_assert (innermode != VOIDmode && innermode != BLKmode); |
1576 | |
1577 | if (innermode == mode) |
1578 | return x; |
1579 | |
1580 | /* The size of the outer and inner modes must be ordered. */ |
1581 | poly_uint64 xsize = GET_MODE_SIZE (mode: innermode); |
1582 | if (!ordered_p (a: msize, b: xsize)) |
1583 | return 0; |
1584 | |
1585 | if (SCALAR_FLOAT_MODE_P (mode)) |
1586 | { |
1587 | /* Don't allow paradoxical FLOAT_MODE subregs. */ |
1588 | if (maybe_gt (msize, xsize)) |
1589 | return 0; |
1590 | } |
1591 | else |
1592 | { |
1593 | /* MODE must occupy no more of the underlying registers than X. */ |
1594 | poly_uint64 regsize = REGMODE_NATURAL_SIZE (innermode); |
1595 | unsigned int mregs, xregs; |
1596 | if (!can_div_away_from_zero_p (a: msize, b: regsize, quotient: &mregs) |
1597 | || !can_div_away_from_zero_p (a: xsize, b: regsize, quotient: &xregs) |
1598 | || mregs > xregs) |
1599 | return 0; |
1600 | } |
1601 | |
1602 | scalar_int_mode int_mode, int_innermode, from_mode; |
1603 | if ((GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND) |
1604 | && is_a <scalar_int_mode> (m: mode, result: &int_mode) |
1605 | && is_a <scalar_int_mode> (m: innermode, result: &int_innermode) |
1606 | && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), result: &from_mode)) |
1607 | { |
1608 | /* If we are getting the low-order part of something that has been |
1609 | sign- or zero-extended, we can either just use the object being |
1610 | extended or make a narrower extension. If we want an even smaller |
1611 | piece than the size of the object being extended, call ourselves |
1612 | recursively. |
1613 | |
1614 | This case is used mostly by combine and cse. */ |
1615 | |
1616 | if (from_mode == int_mode) |
1617 | return XEXP (x, 0); |
1618 | else if (GET_MODE_SIZE (mode: int_mode) < GET_MODE_SIZE (mode: from_mode)) |
1619 | return gen_lowpart_common (mode: int_mode, XEXP (x, 0)); |
1620 | else if (GET_MODE_SIZE (mode: int_mode) < GET_MODE_SIZE (mode: int_innermode)) |
1621 | return gen_rtx_fmt_e (GET_CODE (x), int_mode, XEXP (x, 0)); |
1622 | } |
1623 | else if (GET_CODE (x) == SUBREG || REG_P (x) |
1624 | || GET_CODE (x) == CONCAT || GET_CODE (x) == CONST_VECTOR |
1625 | || CONST_DOUBLE_AS_FLOAT_P (x) || CONST_SCALAR_INT_P (x) |
1626 | || CONST_POLY_INT_P (x)) |
1627 | return lowpart_subreg (outermode: mode, op: x, innermode); |
1628 | |
1629 | /* Otherwise, we can't do this. */ |
1630 | return 0; |
1631 | } |
1632 | |
1633 | rtx |
1634 | gen_highpart (machine_mode mode, rtx x) |
1635 | { |
1636 | poly_uint64 msize = GET_MODE_SIZE (mode); |
1637 | rtx result; |
1638 | |
1639 | /* This case loses if X is a subreg. To catch bugs early, |
1640 | complain if an invalid MODE is used even in other cases. */ |
1641 | gcc_assert (known_le (msize, (unsigned int) UNITS_PER_WORD) |
1642 | || known_eq (msize, GET_MODE_UNIT_SIZE (GET_MODE (x)))); |
1643 | |
1644 | /* gen_lowpart_common handles a lot of special cases due to needing to handle |
1645 | paradoxical subregs; it only calls simplify_gen_subreg when certain that |
1646 | it will produce something meaningful. The only case we need to handle |
1647 | specially here is MEM. */ |
1648 | if (MEM_P (x)) |
1649 | { |
1650 | poly_int64 offset = subreg_highpart_offset (outermode: mode, GET_MODE (x)); |
1651 | return adjust_address (x, mode, offset); |
1652 | } |
1653 | |
1654 | result = simplify_gen_subreg (outermode: mode, op: x, GET_MODE (x), |
1655 | byte: subreg_highpart_offset (outermode: mode, GET_MODE (x))); |
1656 | /* Since we handle MEM directly above, we should never get a MEM back |
1657 | from simplify_gen_subreg. */ |
1658 | gcc_assert (result && !MEM_P (result)); |
1659 | |
1660 | return result; |
1661 | } |
1662 | |
1663 | /* Like gen_highpart, but accept mode of EXP operand in case EXP can |
1664 | be VOIDmode constant. */ |
1665 | rtx |
1666 | gen_highpart_mode (machine_mode outermode, machine_mode innermode, rtx exp) |
1667 | { |
1668 | if (GET_MODE (exp) != VOIDmode) |
1669 | { |
1670 | gcc_assert (GET_MODE (exp) == innermode); |
1671 | return gen_highpart (mode: outermode, x: exp); |
1672 | } |
1673 | return simplify_gen_subreg (outermode, op: exp, innermode, |
1674 | byte: subreg_highpart_offset (outermode, innermode)); |
1675 | } |
1676 | |
1677 | /* Return the SUBREG_BYTE for a lowpart subreg whose outer mode has |
1678 | OUTER_BYTES bytes and whose inner mode has INNER_BYTES bytes. */ |
1679 | |
1680 | poly_uint64 |
1681 | subreg_size_lowpart_offset (poly_uint64 outer_bytes, poly_uint64 inner_bytes) |
1682 | { |
1683 | gcc_checking_assert (ordered_p (outer_bytes, inner_bytes)); |
1684 | if (maybe_gt (outer_bytes, inner_bytes)) |
1685 | /* Paradoxical subregs always have a SUBREG_BYTE of 0. */ |
1686 | return 0; |
1687 | |
1688 | if (BYTES_BIG_ENDIAN && WORDS_BIG_ENDIAN) |
1689 | return inner_bytes - outer_bytes; |
1690 | else if (!BYTES_BIG_ENDIAN && !WORDS_BIG_ENDIAN) |
1691 | return 0; |
1692 | else |
1693 | return subreg_size_offset_from_lsb (outer_bytes, inner_bytes, 0); |
1694 | } |
1695 | |
1696 | /* Return the SUBREG_BYTE for a highpart subreg whose outer mode has |
1697 | OUTER_BYTES bytes and whose inner mode has INNER_BYTES bytes. */ |
1698 | |
1699 | poly_uint64 |
1700 | subreg_size_highpart_offset (poly_uint64 outer_bytes, poly_uint64 inner_bytes) |
1701 | { |
1702 | gcc_assert (known_ge (inner_bytes, outer_bytes)); |
1703 | |
1704 | if (BYTES_BIG_ENDIAN && WORDS_BIG_ENDIAN) |
1705 | return 0; |
1706 | else if (!BYTES_BIG_ENDIAN && !WORDS_BIG_ENDIAN) |
1707 | return inner_bytes - outer_bytes; |
1708 | else |
1709 | return subreg_size_offset_from_lsb (outer_bytes, inner_bytes, |
1710 | (inner_bytes - outer_bytes) |
1711 | * BITS_PER_UNIT); |
1712 | } |
1713 | |
1714 | /* Return true iff X, assumed to be a SUBREG, |
1715 | refers to the least significant part of its containing reg. |
1716 | If X is not a SUBREG, always return true (it is its own low part!). */ |
1717 | |
1718 | bool |
1719 | subreg_lowpart_p (const_rtx x) |
1720 | { |
1721 | if (GET_CODE (x) != SUBREG) |
1722 | return true; |
1723 | else if (GET_MODE (SUBREG_REG (x)) == VOIDmode) |
1724 | return false; |
1725 | |
1726 | return known_eq (subreg_lowpart_offset (GET_MODE (x), |
1727 | GET_MODE (SUBREG_REG (x))), |
1728 | SUBREG_BYTE (x)); |
1729 | } |
1730 | |
1731 | /* Return subword OFFSET of operand OP. |
1732 | The word number, OFFSET, is interpreted as the word number starting |
1733 | at the low-order address. OFFSET 0 is the low-order word if not |
1734 | WORDS_BIG_ENDIAN, otherwise it is the high-order word. |
1735 | |
1736 | If we cannot extract the required word, we return zero. Otherwise, |
1737 | an rtx corresponding to the requested word will be returned. |
1738 | |
1739 | VALIDATE_ADDRESS is nonzero if the address should be validated. Before |
1740 | reload has completed, a valid address will always be returned. After |
1741 | reload, if a valid address cannot be returned, we return zero. |
1742 | |
1743 | If VALIDATE_ADDRESS is zero, we simply form the required address; validating |
1744 | it is the responsibility of the caller. |
1745 | |
1746 | MODE is the mode of OP in case it is a CONST_INT. |
1747 | |
1748 | ??? This is still rather broken for some cases. The problem for the |
1749 | moment is that all callers of this thing provide no 'goal mode' to |
1750 | tell us to work with. This exists because all callers were written |
1751 | in a word based SUBREG world. |
1752 | Now use of this function can be deprecated by simplify_subreg in most |
1753 | cases. |
1754 | */ |
1755 | |
1756 | rtx |
1757 | operand_subword (rtx op, poly_uint64 offset, int validate_address, |
1758 | machine_mode mode) |
1759 | { |
1760 | if (mode == VOIDmode) |
1761 | mode = GET_MODE (op); |
1762 | |
1763 | gcc_assert (mode != VOIDmode); |
1764 | |
1765 | /* If OP is narrower than a word, fail. */ |
1766 | if (mode != BLKmode |
1767 | && maybe_lt (a: GET_MODE_SIZE (mode), UNITS_PER_WORD)) |
1768 | return 0; |
1769 | |
1770 | /* If we want a word outside OP, return zero. */ |
1771 | if (mode != BLKmode |
1772 | && maybe_gt ((offset + 1) * UNITS_PER_WORD, GET_MODE_SIZE (mode))) |
1773 | return const0_rtx; |
1774 | |
1775 | /* Form a new MEM at the requested address. */ |
1776 | if (MEM_P (op)) |
1777 | { |
1778 | rtx new_rtx = adjust_address_nv (op, word_mode, offset * UNITS_PER_WORD); |
1779 | |
1780 | if (! validate_address) |
1781 | return new_rtx; |
1782 | |
1783 | else if (reload_completed) |
1784 | { |
1785 | if (! strict_memory_address_addr_space_p (word_mode, |
1786 | XEXP (new_rtx, 0), |
1787 | MEM_ADDR_SPACE (op))) |
1788 | return 0; |
1789 | } |
1790 | else |
1791 | return replace_equiv_address (new_rtx, XEXP (new_rtx, 0)); |
1792 | } |
1793 | |
1794 | /* Rest can be handled by simplify_subreg. */ |
1795 | return simplify_gen_subreg (outermode: word_mode, op, innermode: mode, byte: (offset * UNITS_PER_WORD)); |
1796 | } |
1797 | |
1798 | /* Similar to `operand_subword', but never return 0. If we can't |
1799 | extract the required subword, put OP into a register and try again. |
1800 | The second attempt must succeed. We always validate the address in |
1801 | this case. |
1802 | |
1803 | MODE is the mode of OP, in case it is CONST_INT. */ |
1804 | |
1805 | rtx |
1806 | operand_subword_force (rtx op, poly_uint64 offset, machine_mode mode) |
1807 | { |
1808 | rtx result = operand_subword (op, offset, validate_address: 1, mode); |
1809 | |
1810 | if (result) |
1811 | return result; |
1812 | |
1813 | if (mode != BLKmode && mode != VOIDmode) |
1814 | { |
1815 | /* If this is a register which cannot be accessed by words, copy it |
1816 | to a pseudo register. */ |
1817 | if (REG_P (op)) |
1818 | op = copy_to_reg (op); |
1819 | else |
1820 | op = force_reg (mode, op); |
1821 | } |
1822 | |
1823 | result = operand_subword (op, offset, validate_address: 1, mode); |
1824 | gcc_assert (result); |
1825 | |
1826 | return result; |
1827 | } |
1828 | |
1829 | mem_attrs::mem_attrs () |
1830 | : expr (NULL_TREE), |
1831 | offset (0), |
1832 | size (0), |
1833 | alias (0), |
1834 | align (0), |
1835 | addrspace (ADDR_SPACE_GENERIC), |
1836 | offset_known_p (false), |
1837 | size_known_p (false) |
1838 | {} |
1839 | |
1840 | /* Returns true if both MEM_EXPR can be considered equal |
1841 | and false otherwise. */ |
1842 | |
1843 | bool |
1844 | mem_expr_equal_p (const_tree expr1, const_tree expr2) |
1845 | { |
1846 | if (expr1 == expr2) |
1847 | return true; |
1848 | |
1849 | if (! expr1 || ! expr2) |
1850 | return false; |
1851 | |
1852 | if (TREE_CODE (expr1) != TREE_CODE (expr2)) |
1853 | return false; |
1854 | |
1855 | return operand_equal_p (expr1, expr2, flags: 0); |
1856 | } |
1857 | |
1858 | /* Return OFFSET if XEXP (MEM, 0) - OFFSET is known to be ALIGN |
1859 | bits aligned for 0 <= OFFSET < ALIGN / BITS_PER_UNIT, or |
1860 | -1 if not known. */ |
1861 | |
1862 | int |
1863 | get_mem_align_offset (rtx mem, unsigned int align) |
1864 | { |
1865 | tree expr; |
1866 | poly_uint64 offset; |
1867 | |
1868 | /* This function can't use |
1869 | if (!MEM_EXPR (mem) || !MEM_OFFSET_KNOWN_P (mem) |
1870 | || (MAX (MEM_ALIGN (mem), |
1871 | MAX (align, get_object_alignment (MEM_EXPR (mem)))) |
1872 | < align)) |
1873 | return -1; |
1874 | else |
1875 | return (- MEM_OFFSET (mem)) & (align / BITS_PER_UNIT - 1); |
1876 | for two reasons: |
1877 | - COMPONENT_REFs in MEM_EXPR can have NULL first operand, |
1878 | for <variable>. get_inner_reference doesn't handle it and |
1879 | even if it did, the alignment in that case needs to be determined |
1880 | from DECL_FIELD_CONTEXT's TYPE_ALIGN. |
1881 | - it would do suboptimal job for COMPONENT_REFs, even if MEM_EXPR |
1882 | isn't sufficiently aligned, the object it is in might be. */ |
1883 | gcc_assert (MEM_P (mem)); |
1884 | expr = MEM_EXPR (mem); |
1885 | if (expr == NULL_TREE || !MEM_OFFSET_KNOWN_P (mem)) |
1886 | return -1; |
1887 | |
1888 | offset = MEM_OFFSET (mem); |
1889 | if (DECL_P (expr)) |
1890 | { |
1891 | if (DECL_ALIGN (expr) < align) |
1892 | return -1; |
1893 | } |
1894 | else if (INDIRECT_REF_P (expr)) |
1895 | { |
1896 | if (TYPE_ALIGN (TREE_TYPE (expr)) < (unsigned int) align) |
1897 | return -1; |
1898 | } |
1899 | else if (TREE_CODE (expr) == COMPONENT_REF) |
1900 | { |
1901 | while (1) |
1902 | { |
1903 | tree inner = TREE_OPERAND (expr, 0); |
1904 | tree field = TREE_OPERAND (expr, 1); |
1905 | tree byte_offset = component_ref_field_offset (expr); |
1906 | tree bit_offset = DECL_FIELD_BIT_OFFSET (field); |
1907 | |
1908 | poly_uint64 suboffset; |
1909 | if (!byte_offset |
1910 | || !poly_int_tree_p (t: byte_offset, value: &suboffset) |
1911 | || !tree_fits_uhwi_p (bit_offset)) |
1912 | return -1; |
1913 | |
1914 | offset += suboffset; |
1915 | offset += tree_to_uhwi (bit_offset) / BITS_PER_UNIT; |
1916 | |
1917 | if (inner == NULL_TREE) |
1918 | { |
1919 | if (TYPE_ALIGN (DECL_FIELD_CONTEXT (field)) |
1920 | < (unsigned int) align) |
1921 | return -1; |
1922 | break; |
1923 | } |
1924 | else if (DECL_P (inner)) |
1925 | { |
1926 | if (DECL_ALIGN (inner) < align) |
1927 | return -1; |
1928 | break; |
1929 | } |
1930 | else if (TREE_CODE (inner) != COMPONENT_REF) |
1931 | return -1; |
1932 | expr = inner; |
1933 | } |
1934 | } |
1935 | else |
1936 | return -1; |
1937 | |
1938 | HOST_WIDE_INT misalign; |
1939 | if (!known_misalignment (value: offset, align: align / BITS_PER_UNIT, misalign: &misalign)) |
1940 | return -1; |
1941 | return misalign; |
1942 | } |
1943 | |
1944 | /* Given REF (a MEM) and T, either the type of X or the expression |
1945 | corresponding to REF, set the memory attributes. OBJECTP is nonzero |
1946 | if we are making a new object of this type. BITPOS is nonzero if |
1947 | there is an offset outstanding on T that will be applied later. */ |
1948 | |
1949 | void |
1950 | set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp, |
1951 | poly_int64 bitpos) |
1952 | { |
1953 | poly_int64 apply_bitpos = 0; |
1954 | tree type; |
1955 | class mem_attrs attrs, *defattrs, *refattrs; |
1956 | addr_space_t as; |
1957 | |
1958 | /* It can happen that type_for_mode was given a mode for which there |
1959 | is no language-level type. In which case it returns NULL, which |
1960 | we can see here. */ |
1961 | if (t == NULL_TREE) |
1962 | return; |
1963 | |
1964 | type = TYPE_P (t) ? t : TREE_TYPE (t); |
1965 | if (type == error_mark_node) |
1966 | return; |
1967 | |
1968 | /* If we have already set DECL_RTL = ref, get_alias_set will get the |
1969 | wrong answer, as it assumes that DECL_RTL already has the right alias |
1970 | info. Callers should not set DECL_RTL until after the call to |
1971 | set_mem_attributes. */ |
1972 | gcc_assert (!DECL_P (t) || ref != DECL_RTL_IF_SET (t)); |
1973 | |
1974 | /* Get the alias set from the expression or type (perhaps using a |
1975 | front-end routine) and use it. */ |
1976 | attrs.alias = get_alias_set (t); |
1977 | |
1978 | MEM_VOLATILE_P (ref) |= TYPE_VOLATILE (type); |
1979 | MEM_POINTER (ref) = POINTER_TYPE_P (type); |
1980 | |
1981 | /* Default values from pre-existing memory attributes if present. */ |
1982 | refattrs = MEM_ATTRS (ref); |
1983 | if (refattrs) |
1984 | { |
1985 | /* ??? Can this ever happen? Calling this routine on a MEM that |
1986 | already carries memory attributes should probably be invalid. */ |
1987 | attrs.expr = refattrs->expr; |
1988 | attrs.offset_known_p = refattrs->offset_known_p; |
1989 | attrs.offset = refattrs->offset; |
1990 | attrs.size_known_p = refattrs->size_known_p; |
1991 | attrs.size = refattrs->size; |
1992 | attrs.align = refattrs->align; |
1993 | } |
1994 | |
1995 | /* Otherwise, default values from the mode of the MEM reference. */ |
1996 | else |
1997 | { |
1998 | defattrs = mode_mem_attrs[(int) GET_MODE (ref)]; |
1999 | gcc_assert (!defattrs->expr); |
2000 | gcc_assert (!defattrs->offset_known_p); |
2001 | |
2002 | /* Respect mode size. */ |
2003 | attrs.size_known_p = defattrs->size_known_p; |
2004 | attrs.size = defattrs->size; |
2005 | /* ??? Is this really necessary? We probably should always get |
2006 | the size from the type below. */ |
2007 | |
2008 | /* Respect mode alignment for STRICT_ALIGNMENT targets if T is a type; |
2009 | if T is an object, always compute the object alignment below. */ |
2010 | if (TYPE_P (t)) |
2011 | attrs.align = defattrs->align; |
2012 | else |
2013 | attrs.align = BITS_PER_UNIT; |
2014 | /* ??? If T is a type, respecting mode alignment may *also* be wrong |
2015 | e.g. if the type carries an alignment attribute. Should we be |
2016 | able to simply always use TYPE_ALIGN? */ |
2017 | } |
2018 | |
2019 | /* We can set the alignment from the type if we are making an object or if |
2020 | this is an INDIRECT_REF. */ |
2021 | if (objectp || TREE_CODE (t) == INDIRECT_REF) |
2022 | attrs.align = MAX (attrs.align, TYPE_ALIGN (type)); |
2023 | |
2024 | /* If the size is known, we can set that. */ |
2025 | tree new_size = TYPE_SIZE_UNIT (type); |
2026 | |
2027 | /* The address-space is that of the type. */ |
2028 | as = TYPE_ADDR_SPACE (type); |
2029 | |
2030 | /* If T is not a type, we may be able to deduce some more information about |
2031 | the expression. */ |
2032 | if (! TYPE_P (t)) |
2033 | { |
2034 | tree base; |
2035 | |
2036 | if (TREE_THIS_VOLATILE (t)) |
2037 | MEM_VOLATILE_P (ref) = 1; |
2038 | |
2039 | /* Now remove any conversions: they don't change what the underlying |
2040 | object is. Likewise for SAVE_EXPR. */ |
2041 | while (CONVERT_EXPR_P (t) |
2042 | || TREE_CODE (t) == VIEW_CONVERT_EXPR |
2043 | || TREE_CODE (t) == SAVE_EXPR) |
2044 | t = TREE_OPERAND (t, 0); |
2045 | |
2046 | /* Note whether this expression can trap. */ |
2047 | MEM_NOTRAP_P (ref) = !tree_could_trap_p (t); |
2048 | |
2049 | base = get_base_address (t); |
2050 | if (base) |
2051 | { |
2052 | if (DECL_P (base) |
2053 | && TREE_READONLY (base) |
2054 | && (TREE_STATIC (base) || DECL_EXTERNAL (base)) |
2055 | && !TREE_THIS_VOLATILE (base)) |
2056 | MEM_READONLY_P (ref) = 1; |
2057 | |
2058 | /* Mark static const strings readonly as well. */ |
2059 | if (TREE_CODE (base) == STRING_CST |
2060 | && TREE_READONLY (base) |
2061 | && TREE_STATIC (base)) |
2062 | MEM_READONLY_P (ref) = 1; |
2063 | |
2064 | /* Address-space information is on the base object. */ |
2065 | if (TREE_CODE (base) == MEM_REF |
2066 | || TREE_CODE (base) == TARGET_MEM_REF) |
2067 | as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (base, |
2068 | 0)))); |
2069 | else |
2070 | as = TYPE_ADDR_SPACE (TREE_TYPE (base)); |
2071 | } |
2072 | |
2073 | /* If this expression uses it's parent's alias set, mark it such |
2074 | that we won't change it. */ |
2075 | if (component_uses_parent_alias_set_from (t) != NULL_TREE) |
2076 | MEM_KEEP_ALIAS_SET_P (ref) = 1; |
2077 | |
2078 | /* If this is a decl, set the attributes of the MEM from it. */ |
2079 | if (DECL_P (t)) |
2080 | { |
2081 | attrs.expr = t; |
2082 | attrs.offset_known_p = true; |
2083 | attrs.offset = 0; |
2084 | apply_bitpos = bitpos; |
2085 | new_size = DECL_SIZE_UNIT (t); |
2086 | } |
2087 | |
2088 | /* ??? If we end up with a constant or a descriptor do not |
2089 | record a MEM_EXPR. */ |
2090 | else if (CONSTANT_CLASS_P (t) |
2091 | || TREE_CODE (t) == CONSTRUCTOR) |
2092 | ; |
2093 | |
2094 | /* If this is a field reference, record it. */ |
2095 | else if (TREE_CODE (t) == COMPONENT_REF) |
2096 | { |
2097 | attrs.expr = t; |
2098 | attrs.offset_known_p = true; |
2099 | attrs.offset = 0; |
2100 | apply_bitpos = bitpos; |
2101 | if (DECL_BIT_FIELD (TREE_OPERAND (t, 1))) |
2102 | new_size = DECL_SIZE_UNIT (TREE_OPERAND (t, 1)); |
2103 | } |
2104 | |
2105 | /* Else record it. */ |
2106 | else |
2107 | { |
2108 | gcc_assert (handled_component_p (t) |
2109 | || TREE_CODE (t) == MEM_REF |
2110 | || TREE_CODE (t) == TARGET_MEM_REF); |
2111 | attrs.expr = t; |
2112 | attrs.offset_known_p = true; |
2113 | attrs.offset = 0; |
2114 | apply_bitpos = bitpos; |
2115 | } |
2116 | |
2117 | /* If this is a reference based on a partitioned decl replace the |
2118 | base with a MEM_REF of the pointer representative we created |
2119 | during stack slot partitioning. */ |
2120 | if (attrs.expr |
2121 | && VAR_P (base) |
2122 | && ! is_global_var (t: base) |
2123 | && cfun->gimple_df->decls_to_pointers != NULL) |
2124 | { |
2125 | tree *namep = cfun->gimple_df->decls_to_pointers->get (k: base); |
2126 | if (namep) |
2127 | { |
2128 | attrs.expr = unshare_expr (attrs.expr); |
2129 | tree *orig_base = &attrs.expr; |
2130 | while (handled_component_p (t: *orig_base)) |
2131 | orig_base = &TREE_OPERAND (*orig_base, 0); |
2132 | if (TREE_CODE (*orig_base) == MEM_REF |
2133 | || TREE_CODE (*orig_base) == TARGET_MEM_REF) |
2134 | TREE_OPERAND (*orig_base, 0) = *namep; |
2135 | else |
2136 | { |
2137 | tree aptrt = reference_alias_ptr_type (*orig_base); |
2138 | *orig_base = build2 (MEM_REF, TREE_TYPE (*orig_base), |
2139 | *namep, build_int_cst (aptrt, 0)); |
2140 | } |
2141 | } |
2142 | } |
2143 | |
2144 | /* Compute the alignment. */ |
2145 | unsigned int obj_align; |
2146 | unsigned HOST_WIDE_INT obj_bitpos; |
2147 | get_object_alignment_1 (t, &obj_align, &obj_bitpos); |
2148 | unsigned int diff_align = known_alignment (a: obj_bitpos - bitpos); |
2149 | if (diff_align != 0) |
2150 | obj_align = MIN (obj_align, diff_align); |
2151 | attrs.align = MAX (attrs.align, obj_align); |
2152 | } |
2153 | |
2154 | poly_uint64 const_size; |
2155 | if (poly_int_tree_p (t: new_size, value: &const_size)) |
2156 | { |
2157 | attrs.size_known_p = true; |
2158 | attrs.size = const_size; |
2159 | } |
2160 | |
2161 | /* If we modified OFFSET based on T, then subtract the outstanding |
2162 | bit position offset. Similarly, increase the size of the accessed |
2163 | object to contain the negative offset. */ |
2164 | if (maybe_ne (a: apply_bitpos, b: 0)) |
2165 | { |
2166 | gcc_assert (attrs.offset_known_p); |
2167 | poly_int64 bytepos = bits_to_bytes_round_down (apply_bitpos); |
2168 | attrs.offset -= bytepos; |
2169 | if (attrs.size_known_p) |
2170 | attrs.size += bytepos; |
2171 | } |
2172 | |
2173 | /* Now set the attributes we computed above. */ |
2174 | attrs.addrspace = as; |
2175 | set_mem_attrs (mem: ref, attrs: &attrs); |
2176 | } |
2177 | |
2178 | void |
2179 | set_mem_attributes (rtx ref, tree t, int objectp) |
2180 | { |
2181 | set_mem_attributes_minus_bitpos (ref, t, objectp, bitpos: 0); |
2182 | } |
2183 | |
2184 | /* Set the alias set of MEM to SET. */ |
2185 | |
2186 | void |
2187 | set_mem_alias_set (rtx mem, alias_set_type set) |
2188 | { |
2189 | /* If the new and old alias sets don't conflict, something is wrong. */ |
2190 | gcc_checking_assert (alias_sets_conflict_p (set, MEM_ALIAS_SET (mem))); |
2191 | mem_attrs attrs (*get_mem_attrs (x: mem)); |
2192 | attrs.alias = set; |
2193 | set_mem_attrs (mem, attrs: &attrs); |
2194 | } |
2195 | |
2196 | /* Set the address space of MEM to ADDRSPACE (target-defined). */ |
2197 | |
2198 | void |
2199 | set_mem_addr_space (rtx mem, addr_space_t addrspace) |
2200 | { |
2201 | mem_attrs attrs (*get_mem_attrs (x: mem)); |
2202 | attrs.addrspace = addrspace; |
2203 | set_mem_attrs (mem, attrs: &attrs); |
2204 | } |
2205 | |
2206 | /* Set the alignment of MEM to ALIGN bits. */ |
2207 | |
2208 | void |
2209 | set_mem_align (rtx mem, unsigned int align) |
2210 | { |
2211 | mem_attrs attrs (*get_mem_attrs (x: mem)); |
2212 | attrs.align = align; |
2213 | set_mem_attrs (mem, attrs: &attrs); |
2214 | } |
2215 | |
2216 | /* Set the expr for MEM to EXPR. */ |
2217 | |
2218 | void |
2219 | set_mem_expr (rtx mem, tree expr) |
2220 | { |
2221 | mem_attrs attrs (*get_mem_attrs (x: mem)); |
2222 | attrs.expr = expr; |
2223 | set_mem_attrs (mem, attrs: &attrs); |
2224 | } |
2225 | |
2226 | /* Set the offset of MEM to OFFSET. */ |
2227 | |
2228 | void |
2229 | set_mem_offset (rtx mem, poly_int64 offset) |
2230 | { |
2231 | mem_attrs attrs (*get_mem_attrs (x: mem)); |
2232 | attrs.offset_known_p = true; |
2233 | attrs.offset = offset; |
2234 | set_mem_attrs (mem, attrs: &attrs); |
2235 | } |
2236 | |
2237 | /* Clear the offset of MEM. */ |
2238 | |
2239 | void |
2240 | clear_mem_offset (rtx mem) |
2241 | { |
2242 | mem_attrs attrs (*get_mem_attrs (x: mem)); |
2243 | attrs.offset_known_p = false; |
2244 | set_mem_attrs (mem, attrs: &attrs); |
2245 | } |
2246 | |
2247 | /* Set the size of MEM to SIZE. */ |
2248 | |
2249 | void |
2250 | set_mem_size (rtx mem, poly_int64 size) |
2251 | { |
2252 | mem_attrs attrs (*get_mem_attrs (x: mem)); |
2253 | attrs.size_known_p = true; |
2254 | attrs.size = size; |
2255 | set_mem_attrs (mem, attrs: &attrs); |
2256 | } |
2257 | |
2258 | /* Clear the size of MEM. */ |
2259 | |
2260 | void |
2261 | clear_mem_size (rtx mem) |
2262 | { |
2263 | mem_attrs attrs (*get_mem_attrs (x: mem)); |
2264 | attrs.size_known_p = false; |
2265 | set_mem_attrs (mem, attrs: &attrs); |
2266 | } |
2267 | |
2268 | /* Return a memory reference like MEMREF, but with its mode changed to MODE |
2269 | and its address changed to ADDR. (VOIDmode means don't change the mode. |
2270 | NULL for ADDR means don't change the address.) VALIDATE is nonzero if the |
2271 | returned memory location is required to be valid. INPLACE is true if any |
2272 | changes can be made directly to MEMREF or false if MEMREF must be treated |
2273 | as immutable. |
2274 | |
2275 | The memory attributes are not changed. */ |
2276 | |
2277 | static rtx |
2278 | change_address_1 (rtx memref, machine_mode mode, rtx addr, int validate, |
2279 | bool inplace) |
2280 | { |
2281 | addr_space_t as; |
2282 | rtx new_rtx; |
2283 | |
2284 | gcc_assert (MEM_P (memref)); |
2285 | as = MEM_ADDR_SPACE (memref); |
2286 | if (mode == VOIDmode) |
2287 | mode = GET_MODE (memref); |
2288 | if (addr == 0) |
2289 | addr = XEXP (memref, 0); |
2290 | if (mode == GET_MODE (memref) && addr == XEXP (memref, 0) |
2291 | && (!validate || memory_address_addr_space_p (mode, addr, as))) |
2292 | return memref; |
2293 | |
2294 | /* Don't validate address for LRA. LRA can make the address valid |
2295 | by itself in most efficient way. */ |
2296 | if (validate && !lra_in_progress) |
2297 | { |
2298 | if (reload_in_progress || reload_completed) |
2299 | gcc_assert (memory_address_addr_space_p (mode, addr, as)); |
2300 | else |
2301 | addr = memory_address_addr_space (mode, addr, as); |
2302 | } |
2303 | |
2304 | if (rtx_equal_p (addr, XEXP (memref, 0)) && mode == GET_MODE (memref)) |
2305 | return memref; |
2306 | |
2307 | if (inplace) |
2308 | { |
2309 | XEXP (memref, 0) = addr; |
2310 | return memref; |
2311 | } |
2312 | |
2313 | new_rtx = gen_rtx_MEM (mode, addr); |
2314 | MEM_COPY_ATTRIBUTES (new_rtx, memref); |
2315 | return new_rtx; |
2316 | } |
2317 | |
2318 | /* Like change_address_1 with VALIDATE nonzero, but we are not saying in what |
2319 | way we are changing MEMREF, so we only preserve the alias set. */ |
2320 | |
2321 | rtx |
2322 | change_address (rtx memref, machine_mode mode, rtx addr) |
2323 | { |
2324 | rtx new_rtx = change_address_1 (memref, mode, addr, validate: 1, inplace: false); |
2325 | machine_mode mmode = GET_MODE (new_rtx); |
2326 | class mem_attrs *defattrs; |
2327 | |
2328 | mem_attrs attrs (*get_mem_attrs (x: memref)); |
2329 | defattrs = mode_mem_attrs[(int) mmode]; |
2330 | attrs.expr = NULL_TREE; |
2331 | attrs.offset_known_p = false; |
2332 | attrs.size_known_p = defattrs->size_known_p; |
2333 | attrs.size = defattrs->size; |
2334 | attrs.align = defattrs->align; |
2335 | |
2336 | /* If there are no changes, just return the original memory reference. */ |
2337 | if (new_rtx == memref) |
2338 | { |
2339 | if (mem_attrs_eq_p (p: get_mem_attrs (x: memref), q: &attrs)) |
2340 | return new_rtx; |
2341 | |
2342 | new_rtx = gen_rtx_MEM (mode: mmode, XEXP (memref, 0)); |
2343 | MEM_COPY_ATTRIBUTES (new_rtx, memref); |
2344 | } |
2345 | |
2346 | set_mem_attrs (mem: new_rtx, attrs: &attrs); |
2347 | return new_rtx; |
2348 | } |
2349 | |
2350 | /* Return a memory reference like MEMREF, but with its mode changed |
2351 | to MODE and its address offset by OFFSET bytes. If VALIDATE is |
2352 | nonzero, the memory address is forced to be valid. |
2353 | If ADJUST_ADDRESS is zero, OFFSET is only used to update MEM_ATTRS |
2354 | and the caller is responsible for adjusting MEMREF base register. |
2355 | If ADJUST_OBJECT is zero, the underlying object associated with the |
2356 | memory reference is left unchanged and the caller is responsible for |
2357 | dealing with it. Otherwise, if the new memory reference is outside |
2358 | the underlying object, even partially, then the object is dropped. |
2359 | SIZE, if nonzero, is the size of an access in cases where MODE |
2360 | has no inherent size. */ |
2361 | |
2362 | rtx |
2363 | adjust_address_1 (rtx memref, machine_mode mode, poly_int64 offset, |
2364 | int validate, int adjust_address, int adjust_object, |
2365 | poly_int64 size) |
2366 | { |
2367 | rtx addr = XEXP (memref, 0); |
2368 | rtx new_rtx; |
2369 | scalar_int_mode address_mode; |
2370 | class mem_attrs attrs (*get_mem_attrs (x: memref)), *defattrs; |
2371 | unsigned HOST_WIDE_INT max_align; |
2372 | #ifdef POINTERS_EXTEND_UNSIGNED |
2373 | scalar_int_mode pointer_mode |
2374 | = targetm.addr_space.pointer_mode (attrs.addrspace); |
2375 | #endif |
2376 | |
2377 | /* VOIDmode means no mode change for change_address_1. */ |
2378 | if (mode == VOIDmode) |
2379 | mode = GET_MODE (memref); |
2380 | |
2381 | /* Take the size of non-BLKmode accesses from the mode. */ |
2382 | defattrs = mode_mem_attrs[(int) mode]; |
2383 | if (defattrs->size_known_p) |
2384 | size = defattrs->size; |
2385 | |
2386 | /* If there are no changes, just return the original memory reference. */ |
2387 | if (mode == GET_MODE (memref) |
2388 | && known_eq (offset, 0) |
2389 | && (known_eq (size, 0) |
2390 | || (attrs.size_known_p && known_eq (attrs.size, size))) |
2391 | && (!validate || memory_address_addr_space_p (mode, addr, |
2392 | attrs.addrspace))) |
2393 | return memref; |
2394 | |
2395 | /* ??? Prefer to create garbage instead of creating shared rtl. |
2396 | This may happen even if offset is nonzero -- consider |
2397 | (plus (plus reg reg) const_int) -- so do this always. */ |
2398 | addr = copy_rtx (addr); |
2399 | |
2400 | /* Convert a possibly large offset to a signed value within the |
2401 | range of the target address space. */ |
2402 | address_mode = get_address_mode (mem: memref); |
2403 | offset = trunc_int_for_mode (offset, address_mode); |
2404 | |
2405 | if (adjust_address) |
2406 | { |
2407 | /* If MEMREF is a LO_SUM and the offset is within the alignment of the |
2408 | object, we can merge it into the LO_SUM. */ |
2409 | if (GET_MODE (memref) != BLKmode |
2410 | && GET_CODE (addr) == LO_SUM |
2411 | && known_in_range_p (val: offset, |
2412 | pos: 0, size: (GET_MODE_ALIGNMENT (GET_MODE (memref)) |
2413 | / BITS_PER_UNIT))) |
2414 | addr = gen_rtx_LO_SUM (address_mode, XEXP (addr, 0), |
2415 | plus_constant (address_mode, |
2416 | XEXP (addr, 1), offset)); |
2417 | #ifdef POINTERS_EXTEND_UNSIGNED |
2418 | /* If MEMREF is a ZERO_EXTEND from pointer_mode and the offset is valid |
2419 | in that mode, we merge it into the ZERO_EXTEND. We take advantage of |
2420 | the fact that pointers are not allowed to overflow. */ |
2421 | else if (POINTERS_EXTEND_UNSIGNED > 0 |
2422 | && GET_CODE (addr) == ZERO_EXTEND |
2423 | && GET_MODE (XEXP (addr, 0)) == pointer_mode |
2424 | && known_eq (trunc_int_for_mode (offset, pointer_mode), offset)) |
2425 | addr = gen_rtx_ZERO_EXTEND (address_mode, |
2426 | plus_constant (pointer_mode, |
2427 | XEXP (addr, 0), offset)); |
2428 | #endif |
2429 | else |
2430 | addr = plus_constant (address_mode, addr, offset); |
2431 | } |
2432 | |
2433 | new_rtx = change_address_1 (memref, mode, addr, validate, inplace: false); |
2434 | |
2435 | /* If the address is a REG, change_address_1 rightfully returns memref, |
2436 | but this would destroy memref's MEM_ATTRS. */ |
2437 | if (new_rtx == memref && maybe_ne (a: offset, b: 0)) |
2438 | new_rtx = copy_rtx (new_rtx); |
2439 | |
2440 | /* Conservatively drop the object if we don't know where we start from. */ |
2441 | if (adjust_object && (!attrs.offset_known_p || !attrs.size_known_p)) |
2442 | { |
2443 | attrs.expr = NULL_TREE; |
2444 | attrs.alias = 0; |
2445 | } |
2446 | |
2447 | /* Compute the new values of the memory attributes due to this adjustment. |
2448 | We add the offsets and update the alignment. */ |
2449 | if (attrs.offset_known_p) |
2450 | { |
2451 | attrs.offset += offset; |
2452 | |
2453 | /* Drop the object if the new left end is not within its bounds. */ |
2454 | if (adjust_object && maybe_lt (a: attrs.offset, b: 0)) |
2455 | { |
2456 | attrs.expr = NULL_TREE; |
2457 | attrs.alias = 0; |
2458 | } |
2459 | } |
2460 | |
2461 | /* Compute the new alignment by taking the MIN of the alignment and the |
2462 | lowest-order set bit in OFFSET, but don't change the alignment if OFFSET |
2463 | if zero. */ |
2464 | if (maybe_ne (a: offset, b: 0)) |
2465 | { |
2466 | max_align = known_alignment (a: offset) * BITS_PER_UNIT; |
2467 | attrs.align = MIN (attrs.align, max_align); |
2468 | } |
2469 | |
2470 | if (maybe_ne (a: size, b: 0)) |
2471 | { |
2472 | /* Drop the object if the new right end is not within its bounds. */ |
2473 | if (adjust_object && maybe_gt (offset + size, attrs.size)) |
2474 | { |
2475 | attrs.expr = NULL_TREE; |
2476 | attrs.alias = 0; |
2477 | } |
2478 | attrs.size_known_p = true; |
2479 | attrs.size = size; |
2480 | } |
2481 | else if (attrs.size_known_p) |
2482 | { |
2483 | gcc_assert (!adjust_object); |
2484 | attrs.size -= offset; |
2485 | /* ??? The store_by_pieces machinery generates negative sizes, |
2486 | so don't assert for that here. */ |
2487 | } |
2488 | |
2489 | set_mem_attrs (mem: new_rtx, attrs: &attrs); |
2490 | |
2491 | return new_rtx; |
2492 | } |
2493 | |
2494 | /* Return a memory reference like MEMREF, but with its mode changed |
2495 | to MODE and its address changed to ADDR, which is assumed to be |
2496 | MEMREF offset by OFFSET bytes. If VALIDATE is |
2497 | nonzero, the memory address is forced to be valid. */ |
2498 | |
2499 | rtx |
2500 | adjust_automodify_address_1 (rtx memref, machine_mode mode, rtx addr, |
2501 | poly_int64 offset, int validate) |
2502 | { |
2503 | memref = change_address_1 (memref, VOIDmode, addr, validate, inplace: false); |
2504 | return adjust_address_1 (memref, mode, offset, validate, adjust_address: 0, adjust_object: 0, size: 0); |
2505 | } |
2506 | |
2507 | /* Return a memory reference like MEMREF, but whose address is changed by |
2508 | adding OFFSET, an RTX, to it. POW2 is the highest power of two factor |
2509 | known to be in OFFSET (possibly 1). */ |
2510 | |
2511 | rtx |
2512 | offset_address (rtx memref, rtx offset, unsigned HOST_WIDE_INT pow2) |
2513 | { |
2514 | rtx new_rtx, addr = XEXP (memref, 0); |
2515 | machine_mode address_mode; |
2516 | class mem_attrs *defattrs; |
2517 | |
2518 | mem_attrs attrs (*get_mem_attrs (x: memref)); |
2519 | address_mode = get_address_mode (mem: memref); |
2520 | new_rtx = simplify_gen_binary (code: PLUS, mode: address_mode, op0: addr, op1: offset); |
2521 | |
2522 | /* At this point we don't know _why_ the address is invalid. It |
2523 | could have secondary memory references, multiplies or anything. |
2524 | |
2525 | However, if we did go and rearrange things, we can wind up not |
2526 | being able to recognize the magic around pic_offset_table_rtx. |
2527 | This stuff is fragile, and is yet another example of why it is |
2528 | bad to expose PIC machinery too early. */ |
2529 | if (! memory_address_addr_space_p (GET_MODE (memref), new_rtx, |
2530 | attrs.addrspace) |
2531 | && GET_CODE (addr) == PLUS |
2532 | && XEXP (addr, 0) == pic_offset_table_rtx) |
2533 | { |
2534 | addr = force_reg (GET_MODE (addr), addr); |
2535 | new_rtx = simplify_gen_binary (code: PLUS, mode: address_mode, op0: addr, op1: offset); |
2536 | } |
2537 | |
2538 | update_temp_slot_address (XEXP (memref, 0), new_rtx); |
2539 | new_rtx = change_address_1 (memref, VOIDmode, addr: new_rtx, validate: 1, inplace: false); |
2540 | |
2541 | /* If there are no changes, just return the original memory reference. */ |
2542 | if (new_rtx == memref) |
2543 | return new_rtx; |
2544 | |
2545 | /* Update the alignment to reflect the offset. Reset the offset, which |
2546 | we don't know. */ |
2547 | defattrs = mode_mem_attrs[(int) GET_MODE (new_rtx)]; |
2548 | attrs.offset_known_p = false; |
2549 | attrs.size_known_p = defattrs->size_known_p; |
2550 | attrs.size = defattrs->size; |
2551 | attrs.align = MIN (attrs.align, pow2 * BITS_PER_UNIT); |
2552 | set_mem_attrs (mem: new_rtx, attrs: &attrs); |
2553 | return new_rtx; |
2554 | } |
2555 | |
2556 | /* Return a memory reference like MEMREF, but with its address changed to |
2557 | ADDR. The caller is asserting that the actual piece of memory pointed |
2558 | to is the same, just the form of the address is being changed, such as |
2559 | by putting something into a register. INPLACE is true if any changes |
2560 | can be made directly to MEMREF or false if MEMREF must be treated as |
2561 | immutable. */ |
2562 | |
2563 | rtx |
2564 | replace_equiv_address (rtx memref, rtx addr, bool inplace) |
2565 | { |
2566 | /* change_address_1 copies the memory attribute structure without change |
2567 | and that's exactly what we want here. */ |
2568 | update_temp_slot_address (XEXP (memref, 0), addr); |
2569 | return change_address_1 (memref, VOIDmode, addr, validate: 1, inplace); |
2570 | } |
2571 | |
2572 | /* Likewise, but the reference is not required to be valid. */ |
2573 | |
2574 | rtx |
2575 | replace_equiv_address_nv (rtx memref, rtx addr, bool inplace) |
2576 | { |
2577 | return change_address_1 (memref, VOIDmode, addr, validate: 0, inplace); |
2578 | } |
2579 | |
2580 | |
2581 | /* Emit insns to reload VALUE into a new register. VALUE is an |
2582 | auto-increment or auto-decrement RTX whose operand is a register or |
2583 | memory location; so reloading involves incrementing that location. |
2584 | |
2585 | INC_AMOUNT is the number to increment or decrement by (always |
2586 | positive and ignored for POST_MODIFY/PRE_MODIFY). |
2587 | |
2588 | Return a pseudo containing the result. */ |
2589 | rtx |
2590 | address_reload_context::emit_autoinc (rtx value, poly_int64 inc_amount) |
2591 | { |
2592 | /* Since we're going to call recog, and might be called within recog, |
2593 | we need to ensure we save and restore recog_data. */ |
2594 | recog_data_saver recog_save; |
2595 | |
2596 | /* REG or MEM to be copied and incremented. */ |
2597 | rtx incloc = XEXP (value, 0); |
2598 | |
2599 | const rtx_code code = GET_CODE (value); |
2600 | const bool post_p |
2601 | = code == POST_DEC || code == POST_INC || code == POST_MODIFY; |
2602 | |
2603 | bool plus_p = true; |
2604 | rtx inc; |
2605 | if (code == PRE_MODIFY || code == POST_MODIFY) |
2606 | { |
2607 | gcc_assert (GET_CODE (XEXP (value, 1)) == PLUS |
2608 | || GET_CODE (XEXP (value, 1)) == MINUS); |
2609 | gcc_assert (rtx_equal_p (XEXP (XEXP (value, 1), 0), XEXP (value, 0))); |
2610 | plus_p = GET_CODE (XEXP (value, 1)) == PLUS; |
2611 | inc = XEXP (XEXP (value, 1), 1); |
2612 | } |
2613 | else |
2614 | { |
2615 | if (code == PRE_DEC || code == POST_DEC) |
2616 | inc_amount = -inc_amount; |
2617 | |
2618 | inc = gen_int_mode (c: inc_amount, GET_MODE (value)); |
2619 | } |
2620 | |
2621 | rtx result; |
2622 | if (!post_p && REG_P (incloc)) |
2623 | result = incloc; |
2624 | else |
2625 | { |
2626 | result = get_reload_reg (); |
2627 | /* First copy the location to the result register. */ |
2628 | emit_insn (gen_move_insn (result, incloc)); |
2629 | } |
2630 | |
2631 | /* See if we can directly increment INCLOC. */ |
2632 | rtx_insn *last = get_last_insn (); |
2633 | rtx_insn *add_insn = emit_insn (plus_p |
2634 | ? gen_add2_insn (incloc, inc) |
2635 | : gen_sub2_insn (incloc, inc)); |
2636 | const int icode = recog_memoized (insn: add_insn); |
2637 | if (icode >= 0) |
2638 | { |
2639 | if (!post_p && result != incloc) |
2640 | emit_insn (gen_move_insn (result, incloc)); |
2641 | return result; |
2642 | } |
2643 | delete_insns_since (last); |
2644 | |
2645 | /* If couldn't do the increment directly, must increment in RESULT. |
2646 | The way we do this depends on whether this is pre- or |
2647 | post-increment. For pre-increment, copy INCLOC to the reload |
2648 | register, increment it there, then save back. */ |
2649 | if (!post_p) |
2650 | { |
2651 | if (incloc != result) |
2652 | emit_insn (gen_move_insn (result, incloc)); |
2653 | if (plus_p) |
2654 | emit_insn (gen_add2_insn (result, inc)); |
2655 | else |
2656 | emit_insn (gen_sub2_insn (result, inc)); |
2657 | if (incloc != result) |
2658 | emit_insn (gen_move_insn (incloc, result)); |
2659 | } |
2660 | else |
2661 | { |
2662 | /* Post-increment. |
2663 | |
2664 | Because this might be a jump insn or a compare, and because |
2665 | RESULT may not be available after the insn in an input |
2666 | reload, we must do the incrementing before the insn being |
2667 | reloaded for. |
2668 | |
2669 | We have already copied INCLOC to RESULT. Increment the copy in |
2670 | RESULT, save that back, then decrement RESULT so it has |
2671 | the original value. */ |
2672 | if (plus_p) |
2673 | emit_insn (gen_add2_insn (result, inc)); |
2674 | else |
2675 | emit_insn (gen_sub2_insn (result, inc)); |
2676 | emit_insn (gen_move_insn (incloc, result)); |
2677 | /* Restore non-modified value for the result. We prefer this |
2678 | way because it does not require an additional hard |
2679 | register. */ |
2680 | if (plus_p) |
2681 | { |
2682 | poly_int64 offset; |
2683 | if (poly_int_rtx_p (x: inc, res: &offset)) |
2684 | emit_insn (gen_add2_insn (result, |
2685 | gen_int_mode (c: -offset, |
2686 | GET_MODE (result)))); |
2687 | else |
2688 | emit_insn (gen_sub2_insn (result, inc)); |
2689 | } |
2690 | else |
2691 | emit_insn (gen_add2_insn (result, inc)); |
2692 | } |
2693 | return result; |
2694 | } |
2695 | |
2696 | /* Return a memory reference like MEM, but with the address reloaded into a |
2697 | pseudo register. */ |
2698 | |
2699 | rtx |
2700 | force_reload_address (rtx mem) |
2701 | { |
2702 | rtx addr = XEXP (mem, 0); |
2703 | if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC) |
2704 | { |
2705 | const auto size = GET_MODE_SIZE (GET_MODE (mem)); |
2706 | addr = address_reload_context ().emit_autoinc (value: addr, inc_amount: size); |
2707 | } |
2708 | else |
2709 | addr = force_reg (Pmode, addr); |
2710 | |
2711 | return replace_equiv_address (memref: mem, addr); |
2712 | } |
2713 | |
2714 | /* Return a memory reference like MEMREF, but with its mode widened to |
2715 | MODE and offset by OFFSET. This would be used by targets that e.g. |
2716 | cannot issue QImode memory operations and have to use SImode memory |
2717 | operations plus masking logic. */ |
2718 | |
2719 | rtx |
2720 | widen_memory_access (rtx memref, machine_mode mode, poly_int64 offset) |
2721 | { |
2722 | rtx new_rtx = adjust_address_1 (memref, mode, offset, validate: 1, adjust_address: 1, adjust_object: 0, size: 0); |
2723 | poly_uint64 size = GET_MODE_SIZE (mode); |
2724 | |
2725 | /* If there are no changes, just return the original memory reference. */ |
2726 | if (new_rtx == memref) |
2727 | return new_rtx; |
2728 | |
2729 | mem_attrs attrs (*get_mem_attrs (x: new_rtx)); |
2730 | |
2731 | /* If we don't know what offset we were at within the expression, then |
2732 | we can't know if we've overstepped the bounds. */ |
2733 | if (! attrs.offset_known_p) |
2734 | attrs.expr = NULL_TREE; |
2735 | |
2736 | while (attrs.expr) |
2737 | { |
2738 | if (TREE_CODE (attrs.expr) == COMPONENT_REF) |
2739 | { |
2740 | tree field = TREE_OPERAND (attrs.expr, 1); |
2741 | tree offset = component_ref_field_offset (attrs.expr); |
2742 | |
2743 | if (! DECL_SIZE_UNIT (field)) |
2744 | { |
2745 | attrs.expr = NULL_TREE; |
2746 | break; |
2747 | } |
2748 | |
2749 | /* Is the field at least as large as the access? If so, ok, |
2750 | otherwise strip back to the containing structure. */ |
2751 | if (poly_int_tree_p (DECL_SIZE_UNIT (field)) |
2752 | && known_ge (wi::to_poly_offset (DECL_SIZE_UNIT (field)), size) |
2753 | && known_ge (attrs.offset, 0)) |
2754 | break; |
2755 | |
2756 | poly_uint64 suboffset; |
2757 | if (!poly_int_tree_p (t: offset, value: &suboffset)) |
2758 | { |
2759 | attrs.expr = NULL_TREE; |
2760 | break; |
2761 | } |
2762 | |
2763 | attrs.expr = TREE_OPERAND (attrs.expr, 0); |
2764 | attrs.offset += suboffset; |
2765 | attrs.offset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) |
2766 | / BITS_PER_UNIT); |
2767 | } |
2768 | /* Similarly for the decl. */ |
2769 | else if (DECL_P (attrs.expr) |
2770 | && DECL_SIZE_UNIT (attrs.expr) |
2771 | && poly_int_tree_p (DECL_SIZE_UNIT (attrs.expr)) |
2772 | && known_ge (wi::to_poly_offset (DECL_SIZE_UNIT (attrs.expr)), |
2773 | size) |
2774 | && known_ge (attrs.offset, 0)) |
2775 | break; |
2776 | else |
2777 | { |
2778 | /* The widened memory access overflows the expression, which means |
2779 | that it could alias another expression. Zap it. */ |
2780 | attrs.expr = NULL_TREE; |
2781 | break; |
2782 | } |
2783 | } |
2784 | |
2785 | if (! attrs.expr) |
2786 | attrs.offset_known_p = false; |
2787 | |
2788 | /* The widened memory may alias other stuff, so zap the alias set. */ |
2789 | /* ??? Maybe use get_alias_set on any remaining expression. */ |
2790 | attrs.alias = 0; |
2791 | attrs.size_known_p = true; |
2792 | attrs.size = size; |
2793 | set_mem_attrs (mem: new_rtx, attrs: &attrs); |
2794 | return new_rtx; |
2795 | } |
2796 | |
2797 | /* A fake decl that is used as the MEM_EXPR of spill slots. */ |
2798 | static GTY(()) tree spill_slot_decl; |
2799 | |
2800 | tree |
2801 | get_spill_slot_decl (bool force_build_p) |
2802 | { |
2803 | tree d = spill_slot_decl; |
2804 | rtx rd; |
2805 | |
2806 | if (d || !force_build_p) |
2807 | return d; |
2808 | |
2809 | d = build_decl (DECL_SOURCE_LOCATION (current_function_decl), |
2810 | VAR_DECL, get_identifier ("%sfp" ), void_type_node); |
2811 | DECL_ARTIFICIAL (d) = 1; |
2812 | DECL_IGNORED_P (d) = 1; |
2813 | TREE_USED (d) = 1; |
2814 | spill_slot_decl = d; |
2815 | |
2816 | rd = gen_rtx_MEM (BLKmode, frame_pointer_rtx); |
2817 | MEM_NOTRAP_P (rd) = 1; |
2818 | mem_attrs attrs (*mode_mem_attrs[(int) BLKmode]); |
2819 | attrs.alias = new_alias_set (); |
2820 | attrs.expr = d; |
2821 | set_mem_attrs (mem: rd, attrs: &attrs); |
2822 | SET_DECL_RTL (d, rd); |
2823 | |
2824 | return d; |
2825 | } |
2826 | |
2827 | /* Given MEM, a result from assign_stack_local, fill in the memory |
2828 | attributes as appropriate for a register allocator spill slot. |
2829 | These slots are not aliasable by other memory. We arrange for |
2830 | them all to use a single MEM_EXPR, so that the aliasing code can |
2831 | work properly in the case of shared spill slots. */ |
2832 | |
2833 | void |
2834 | set_mem_attrs_for_spill (rtx mem) |
2835 | { |
2836 | rtx addr; |
2837 | |
2838 | mem_attrs attrs (*get_mem_attrs (x: mem)); |
2839 | attrs.expr = get_spill_slot_decl (force_build_p: true); |
2840 | attrs.alias = MEM_ALIAS_SET (DECL_RTL (attrs.expr)); |
2841 | attrs.addrspace = ADDR_SPACE_GENERIC; |
2842 | |
2843 | /* We expect the incoming memory to be of the form: |
2844 | (mem:MODE (plus (reg sfp) (const_int offset))) |
2845 | with perhaps the plus missing for offset = 0. */ |
2846 | addr = XEXP (mem, 0); |
2847 | attrs.offset_known_p = true; |
2848 | strip_offset (addr, &attrs.offset); |
2849 | |
2850 | set_mem_attrs (mem, attrs: &attrs); |
2851 | MEM_NOTRAP_P (mem) = 1; |
2852 | } |
2853 | |
2854 | /* Return a newly created CODE_LABEL rtx with a unique label number. */ |
2855 | |
2856 | rtx_code_label * |
2857 | gen_label_rtx (void) |
2858 | { |
2859 | return as_a <rtx_code_label *> ( |
2860 | gen_rtx_CODE_LABEL (VOIDmode, NULL_RTX, NULL_RTX, |
2861 | NULL, label_num++, NULL)); |
2862 | } |
2863 | |
2864 | /* For procedure integration. */ |
2865 | |
2866 | /* Install new pointers to the first and last insns in the chain. |
2867 | Also, set cur_insn_uid to one higher than the last in use. |
2868 | Used for an inline-procedure after copying the insn chain. */ |
2869 | |
2870 | void |
2871 | set_new_first_and_last_insn (rtx_insn *first, rtx_insn *last) |
2872 | { |
2873 | rtx_insn *insn; |
2874 | |
2875 | set_first_insn (first); |
2876 | set_last_insn (last); |
2877 | cur_insn_uid = 0; |
2878 | |
2879 | if (param_min_nondebug_insn_uid || MAY_HAVE_DEBUG_INSNS) |
2880 | { |
2881 | int debug_count = 0; |
2882 | |
2883 | cur_insn_uid = param_min_nondebug_insn_uid - 1; |
2884 | cur_debug_insn_uid = 0; |
2885 | |
2886 | for (insn = first; insn; insn = NEXT_INSN (insn)) |
2887 | if (INSN_UID (insn) < param_min_nondebug_insn_uid) |
2888 | cur_debug_insn_uid = MAX (cur_debug_insn_uid, INSN_UID (insn)); |
2889 | else |
2890 | { |
2891 | cur_insn_uid = MAX (cur_insn_uid, INSN_UID (insn)); |
2892 | if (DEBUG_INSN_P (insn)) |
2893 | debug_count++; |
2894 | } |
2895 | |
2896 | if (debug_count) |
2897 | cur_debug_insn_uid = param_min_nondebug_insn_uid + debug_count; |
2898 | else |
2899 | cur_debug_insn_uid++; |
2900 | } |
2901 | else |
2902 | for (insn = first; insn; insn = NEXT_INSN (insn)) |
2903 | cur_insn_uid = MAX (cur_insn_uid, INSN_UID (insn)); |
2904 | |
2905 | cur_insn_uid++; |
2906 | } |
2907 | |
2908 | /* Go through all the RTL insn bodies and copy any invalid shared |
2909 | structure. This routine should only be called once. */ |
2910 | |
2911 | static void |
2912 | unshare_all_rtl_1 (rtx_insn *insn) |
2913 | { |
2914 | /* Unshare just about everything else. */ |
2915 | unshare_all_rtl_in_chain (insn); |
2916 | |
2917 | /* Make sure the addresses of stack slots found outside the insn chain |
2918 | (such as, in DECL_RTL of a variable) are not shared |
2919 | with the insn chain. |
2920 | |
2921 | This special care is necessary when the stack slot MEM does not |
2922 | actually appear in the insn chain. If it does appear, its address |
2923 | is unshared from all else at that point. */ |
2924 | unsigned int i; |
2925 | rtx temp; |
2926 | FOR_EACH_VEC_SAFE_ELT (stack_slot_list, i, temp) |
2927 | (*stack_slot_list)[i] = copy_rtx_if_shared (temp); |
2928 | } |
2929 | |
2930 | /* Go through all the RTL insn bodies and copy any invalid shared |
2931 | structure, again. This is a fairly expensive thing to do so it |
2932 | should be done sparingly. */ |
2933 | |
2934 | void |
2935 | unshare_all_rtl_again (rtx_insn *insn) |
2936 | { |
2937 | rtx_insn *p; |
2938 | tree decl; |
2939 | |
2940 | for (p = insn; p; p = NEXT_INSN (insn: p)) |
2941 | if (INSN_P (p)) |
2942 | { |
2943 | reset_used_flags (PATTERN (insn: p)); |
2944 | reset_used_flags (REG_NOTES (p)); |
2945 | if (CALL_P (p)) |
2946 | reset_used_flags (CALL_INSN_FUNCTION_USAGE (p)); |
2947 | } |
2948 | |
2949 | /* Make sure that virtual stack slots are not shared. */ |
2950 | set_used_decls (DECL_INITIAL (cfun->decl)); |
2951 | |
2952 | /* Make sure that virtual parameters are not shared. */ |
2953 | for (decl = DECL_ARGUMENTS (cfun->decl); decl; decl = DECL_CHAIN (decl)) |
2954 | set_used_flags (DECL_RTL (decl)); |
2955 | |
2956 | rtx temp; |
2957 | unsigned int i; |
2958 | FOR_EACH_VEC_SAFE_ELT (stack_slot_list, i, temp) |
2959 | reset_used_flags (temp); |
2960 | |
2961 | unshare_all_rtl_1 (insn); |
2962 | } |
2963 | |
2964 | void |
2965 | unshare_all_rtl (void) |
2966 | { |
2967 | unshare_all_rtl_1 (insn: get_insns ()); |
2968 | |
2969 | for (tree decl = DECL_ARGUMENTS (cfun->decl); decl; decl = DECL_CHAIN (decl)) |
2970 | { |
2971 | if (DECL_RTL_SET_P (decl)) |
2972 | SET_DECL_RTL (decl, copy_rtx_if_shared (DECL_RTL (decl))); |
2973 | DECL_INCOMING_RTL (decl) = copy_rtx_if_shared (DECL_INCOMING_RTL (decl)); |
2974 | } |
2975 | } |
2976 | |
2977 | |
2978 | /* Check that ORIG is not marked when it should not be and mark ORIG as in use, |
2979 | Recursively does the same for subexpressions. */ |
2980 | |
2981 | static void |
2982 | verify_rtx_sharing (rtx orig, rtx insn) |
2983 | { |
2984 | rtx x = orig; |
2985 | int i; |
2986 | enum rtx_code code; |
2987 | const char *format_ptr; |
2988 | |
2989 | if (x == 0) |
2990 | return; |
2991 | |
2992 | code = GET_CODE (x); |
2993 | |
2994 | /* These types may be freely shared. */ |
2995 | |
2996 | switch (code) |
2997 | { |
2998 | case REG: |
2999 | case DEBUG_EXPR: |
3000 | case VALUE: |
3001 | CASE_CONST_ANY: |
3002 | case SYMBOL_REF: |
3003 | case LABEL_REF: |
3004 | case CODE_LABEL: |
3005 | case PC: |
3006 | case RETURN: |
3007 | case SIMPLE_RETURN: |
3008 | case SCRATCH: |
3009 | /* SCRATCH must be shared because they represent distinct values. */ |
3010 | return; |
3011 | case CLOBBER: |
3012 | /* Share clobbers of hard registers, but do not share pseudo reg |
3013 | clobbers or clobbers of hard registers that originated as pseudos. |
3014 | This is needed to allow safe register renaming. */ |
3015 | if (REG_P (XEXP (x, 0)) |
3016 | && HARD_REGISTER_NUM_P (REGNO (XEXP (x, 0))) |
3017 | && HARD_REGISTER_NUM_P (ORIGINAL_REGNO (XEXP (x, 0)))) |
3018 | return; |
3019 | break; |
3020 | |
3021 | case CONST: |
3022 | if (shared_const_p (orig)) |
3023 | return; |
3024 | break; |
3025 | |
3026 | case MEM: |
3027 | /* A MEM is allowed to be shared if its address is constant. */ |
3028 | if (CONSTANT_ADDRESS_P (XEXP (x, 0)) |
3029 | || reload_completed || reload_in_progress) |
3030 | return; |
3031 | |
3032 | break; |
3033 | |
3034 | default: |
3035 | break; |
3036 | } |
3037 | |
3038 | /* This rtx may not be shared. If it has already been seen, |
3039 | replace it with a copy of itself. */ |
3040 | if (flag_checking && RTX_FLAG (x, used)) |
3041 | { |
3042 | error ("invalid rtl sharing found in the insn" ); |
3043 | debug_rtx (insn); |
3044 | error ("shared rtx" ); |
3045 | debug_rtx (x); |
3046 | internal_error ("internal consistency failure" ); |
3047 | } |
3048 | gcc_assert (!RTX_FLAG (x, used)); |
3049 | |
3050 | RTX_FLAG (x, used) = 1; |
3051 | |
3052 | /* Now scan the subexpressions recursively. */ |
3053 | |
3054 | format_ptr = GET_RTX_FORMAT (code); |
3055 | |
3056 | for (i = 0; i < GET_RTX_LENGTH (code); i++) |
3057 | { |
3058 | switch (*format_ptr++) |
3059 | { |
3060 | case 'e': |
3061 | verify_rtx_sharing (XEXP (x, i), insn); |
3062 | break; |
3063 | |
3064 | case 'E': |
3065 | if (XVEC (x, i) != NULL) |
3066 | { |
3067 | int j; |
3068 | int len = XVECLEN (x, i); |
3069 | |
3070 | for (j = 0; j < len; j++) |
3071 | { |
3072 | /* We allow sharing of ASM_OPERANDS inside single |
3073 | instruction. */ |
3074 | if (j && GET_CODE (XVECEXP (x, i, j)) == SET |
3075 | && (GET_CODE (SET_SRC (XVECEXP (x, i, j))) |
3076 | == ASM_OPERANDS)) |
3077 | verify_rtx_sharing (SET_DEST (XVECEXP (x, i, j)), insn); |
3078 | else |
3079 | verify_rtx_sharing (XVECEXP (x, i, j), insn); |
3080 | } |
3081 | } |
3082 | break; |
3083 | } |
3084 | } |
3085 | } |
3086 | |
3087 | /* Reset used-flags for INSN. */ |
3088 | |
3089 | static void |
3090 | reset_insn_used_flags (rtx insn) |
3091 | { |
3092 | gcc_assert (INSN_P (insn)); |
3093 | reset_used_flags (PATTERN (insn)); |
3094 | reset_used_flags (REG_NOTES (insn)); |
3095 | if (CALL_P (insn)) |
3096 | reset_used_flags (CALL_INSN_FUNCTION_USAGE (insn)); |
3097 | } |
3098 | |
3099 | /* Go through all the RTL insn bodies and clear all the USED bits. */ |
3100 | |
3101 | static void |
3102 | reset_all_used_flags (void) |
3103 | { |
3104 | rtx_insn *p; |
3105 | |
3106 | for (p = get_insns (); p; p = NEXT_INSN (insn: p)) |
3107 | if (INSN_P (p)) |
3108 | { |
3109 | rtx pat = PATTERN (insn: p); |
3110 | if (GET_CODE (pat) != SEQUENCE) |
3111 | reset_insn_used_flags (insn: p); |
3112 | else |
3113 | { |
3114 | gcc_assert (REG_NOTES (p) == NULL); |
3115 | for (int i = 0; i < XVECLEN (pat, 0); i++) |
3116 | { |
3117 | rtx insn = XVECEXP (pat, 0, i); |
3118 | if (INSN_P (insn)) |
3119 | reset_insn_used_flags (insn); |
3120 | } |
3121 | } |
3122 | } |
3123 | } |
3124 | |
3125 | /* Verify sharing in INSN. */ |
3126 | |
3127 | static void |
3128 | verify_insn_sharing (rtx insn) |
3129 | { |
3130 | gcc_assert (INSN_P (insn)); |
3131 | verify_rtx_sharing (orig: PATTERN (insn), insn); |
3132 | verify_rtx_sharing (REG_NOTES (insn), insn); |
3133 | if (CALL_P (insn)) |
3134 | verify_rtx_sharing (CALL_INSN_FUNCTION_USAGE (insn), insn); |
3135 | } |
3136 | |
3137 | /* Go through all the RTL insn bodies and check that there is no unexpected |
3138 | sharing in between the subexpressions. */ |
3139 | |
3140 | DEBUG_FUNCTION void |
3141 | verify_rtl_sharing (void) |
3142 | { |
3143 | rtx_insn *p; |
3144 | |
3145 | timevar_push (tv: TV_VERIFY_RTL_SHARING); |
3146 | |
3147 | reset_all_used_flags (); |
3148 | |
3149 | for (p = get_insns (); p; p = NEXT_INSN (insn: p)) |
3150 | if (INSN_P (p)) |
3151 | { |
3152 | rtx pat = PATTERN (insn: p); |
3153 | if (GET_CODE (pat) != SEQUENCE) |
3154 | verify_insn_sharing (insn: p); |
3155 | else |
3156 | for (int i = 0; i < XVECLEN (pat, 0); i++) |
3157 | { |
3158 | rtx insn = XVECEXP (pat, 0, i); |
3159 | if (INSN_P (insn)) |
3160 | verify_insn_sharing (insn); |
3161 | } |
3162 | } |
3163 | |
3164 | reset_all_used_flags (); |
3165 | |
3166 | timevar_pop (tv: TV_VERIFY_RTL_SHARING); |
3167 | } |
3168 | |
3169 | /* Go through all the RTL insn bodies and copy any invalid shared structure. |
3170 | Assumes the mark bits are cleared at entry. */ |
3171 | |
3172 | void |
3173 | unshare_all_rtl_in_chain (rtx_insn *insn) |
3174 | { |
3175 | for (; insn; insn = NEXT_INSN (insn)) |
3176 | if (INSN_P (insn)) |
3177 | { |
3178 | PATTERN (insn) = copy_rtx_if_shared (PATTERN (insn)); |
3179 | REG_NOTES (insn) = copy_rtx_if_shared (REG_NOTES (insn)); |
3180 | if (CALL_P (insn)) |
3181 | CALL_INSN_FUNCTION_USAGE (insn) |
3182 | = copy_rtx_if_shared (CALL_INSN_FUNCTION_USAGE (insn)); |
3183 | } |
3184 | } |
3185 | |
3186 | /* Go through all virtual stack slots of a function and mark them as |
3187 | shared. We never replace the DECL_RTLs themselves with a copy, |
3188 | but expressions mentioned into a DECL_RTL cannot be shared with |
3189 | expressions in the instruction stream. |
3190 | |
3191 | Note that reload may convert pseudo registers into memories in-place. |
3192 | Pseudo registers are always shared, but MEMs never are. Thus if we |
3193 | reset the used flags on MEMs in the instruction stream, we must set |
3194 | them again on MEMs that appear in DECL_RTLs. */ |
3195 | |
3196 | static void |
3197 | set_used_decls (tree blk) |
3198 | { |
3199 | tree t; |
3200 | |
3201 | /* Mark decls. */ |
3202 | for (t = BLOCK_VARS (blk); t; t = DECL_CHAIN (t)) |
3203 | if (DECL_RTL_SET_P (t)) |
3204 | set_used_flags (DECL_RTL (t)); |
3205 | |
3206 | /* Now process sub-blocks. */ |
3207 | for (t = BLOCK_SUBBLOCKS (blk); t; t = BLOCK_CHAIN (t)) |
3208 | set_used_decls (t); |
3209 | } |
3210 | |
3211 | /* Mark ORIG as in use, and return a copy of it if it was already in use. |
3212 | Recursively does the same for subexpressions. Uses |
3213 | copy_rtx_if_shared_1 to reduce stack space. */ |
3214 | |
3215 | rtx |
3216 | copy_rtx_if_shared (rtx orig) |
3217 | { |
3218 | copy_rtx_if_shared_1 (orig: &orig); |
3219 | return orig; |
3220 | } |
3221 | |
3222 | /* Mark *ORIG1 as in use, and set it to a copy of it if it was already in |
3223 | use. Recursively does the same for subexpressions. */ |
3224 | |
3225 | static void |
3226 | copy_rtx_if_shared_1 (rtx *orig1) |
3227 | { |
3228 | rtx x; |
3229 | int i; |
3230 | enum rtx_code code; |
3231 | rtx *last_ptr; |
3232 | const char *format_ptr; |
3233 | int copied = 0; |
3234 | int length; |
3235 | |
3236 | /* Repeat is used to turn tail-recursion into iteration. */ |
3237 | repeat: |
3238 | x = *orig1; |
3239 | |
3240 | if (x == 0) |
3241 | return; |
3242 | |
3243 | code = GET_CODE (x); |
3244 | |
3245 | /* These types may be freely shared. */ |
3246 | |
3247 | switch (code) |
3248 | { |
3249 | case REG: |
3250 | case DEBUG_EXPR: |
3251 | case VALUE: |
3252 | CASE_CONST_ANY: |
3253 | case SYMBOL_REF: |
3254 | case LABEL_REF: |
3255 | case CODE_LABEL: |
3256 | case PC: |
3257 | case RETURN: |
3258 | case SIMPLE_RETURN: |
3259 | case SCRATCH: |
3260 | /* SCRATCH must be shared because they represent distinct values. */ |
3261 | return; |
3262 | case CLOBBER: |
3263 | /* Share clobbers of hard registers, but do not share pseudo reg |
3264 | clobbers or clobbers of hard registers that originated as pseudos. |
3265 | This is needed to allow safe register renaming. */ |
3266 | if (REG_P (XEXP (x, 0)) |
3267 | && HARD_REGISTER_NUM_P (REGNO (XEXP (x, 0))) |
3268 | && HARD_REGISTER_NUM_P (ORIGINAL_REGNO (XEXP (x, 0)))) |
3269 | return; |
3270 | break; |
3271 | |
3272 | case CONST: |
3273 | if (shared_const_p (x)) |
3274 | return; |
3275 | break; |
3276 | |
3277 | case DEBUG_INSN: |
3278 | case INSN: |
3279 | case JUMP_INSN: |
3280 | case CALL_INSN: |
3281 | case NOTE: |
3282 | case BARRIER: |
3283 | /* The chain of insns is not being copied. */ |
3284 | return; |
3285 | |
3286 | default: |
3287 | break; |
3288 | } |
3289 | |
3290 | /* This rtx may not be shared. If it has already been seen, |
3291 | replace it with a copy of itself. */ |
3292 | |
3293 | if (RTX_FLAG (x, used)) |
3294 | { |
3295 | x = shallow_copy_rtx (x); |
3296 | copied = 1; |
3297 | } |
3298 | RTX_FLAG (x, used) = 1; |
3299 | |
3300 | /* Now scan the subexpressions recursively. |
3301 | We can store any replaced subexpressions directly into X |
3302 | since we know X is not shared! Any vectors in X |
3303 | must be copied if X was copied. */ |
3304 | |
3305 | format_ptr = GET_RTX_FORMAT (code); |
3306 | length = GET_RTX_LENGTH (code); |
3307 | last_ptr = NULL; |
3308 | |
3309 | for (i = 0; i < length; i++) |
3310 | { |
3311 | switch (*format_ptr++) |
3312 | { |
3313 | case 'e': |
3314 | if (last_ptr) |
3315 | copy_rtx_if_shared_1 (orig1: last_ptr); |
3316 | last_ptr = &XEXP (x, i); |
3317 | break; |
3318 | |
3319 | case 'E': |
3320 | if (XVEC (x, i) != NULL) |
3321 | { |
3322 | int j; |
3323 | int len = XVECLEN (x, i); |
3324 | |
3325 | /* Copy the vector iff I copied the rtx and the length |
3326 | is nonzero. */ |
3327 | if (copied && len > 0) |
3328 | XVEC (x, i) = gen_rtvec_v (n: len, XVEC (x, i)->elem); |
3329 | |
3330 | /* Call recursively on all inside the vector. */ |
3331 | for (j = 0; j < len; j++) |
3332 | { |
3333 | if (last_ptr) |
3334 | copy_rtx_if_shared_1 (orig1: last_ptr); |
3335 | last_ptr = &XVECEXP (x, i, j); |
3336 | } |
3337 | } |
3338 | break; |
3339 | } |
3340 | } |
3341 | *orig1 = x; |
3342 | if (last_ptr) |
3343 | { |
3344 | orig1 = last_ptr; |
3345 | goto repeat; |
3346 | } |
3347 | } |
3348 | |
3349 | /* Set the USED bit in X and its non-shareable subparts to FLAG. */ |
3350 | |
3351 | static void |
3352 | mark_used_flags (rtx x, int flag) |
3353 | { |
3354 | int i, j; |
3355 | enum rtx_code code; |
3356 | const char *format_ptr; |
3357 | int length; |
3358 | |
3359 | /* Repeat is used to turn tail-recursion into iteration. */ |
3360 | repeat: |
3361 | if (x == 0) |
3362 | return; |
3363 | |
3364 | code = GET_CODE (x); |
3365 | |
3366 | /* These types may be freely shared so we needn't do any resetting |
3367 | for them. */ |
3368 | |
3369 | switch (code) |
3370 | { |
3371 | case REG: |
3372 | case DEBUG_EXPR: |
3373 | case VALUE: |
3374 | CASE_CONST_ANY: |
3375 | case SYMBOL_REF: |
3376 | case CODE_LABEL: |
3377 | case PC: |
3378 | case RETURN: |
3379 | case SIMPLE_RETURN: |
3380 | return; |
3381 | |
3382 | case DEBUG_INSN: |
3383 | case INSN: |
3384 | case JUMP_INSN: |
3385 | case CALL_INSN: |
3386 | case NOTE: |
3387 | case LABEL_REF: |
3388 | case BARRIER: |
3389 | /* The chain of insns is not being copied. */ |
3390 | return; |
3391 | |
3392 | default: |
3393 | break; |
3394 | } |
3395 | |
3396 | RTX_FLAG (x, used) = flag; |
3397 | |
3398 | format_ptr = GET_RTX_FORMAT (code); |
3399 | length = GET_RTX_LENGTH (code); |
3400 | |
3401 | for (i = 0; i < length; i++) |
3402 | { |
3403 | switch (*format_ptr++) |
3404 | { |
3405 | case 'e': |
3406 | if (i == length-1) |
3407 | { |
3408 | x = XEXP (x, i); |
3409 | goto repeat; |
3410 | } |
3411 | mark_used_flags (XEXP (x, i), flag); |
3412 | break; |
3413 | |
3414 | case 'E': |
3415 | for (j = 0; j < XVECLEN (x, i); j++) |
3416 | mark_used_flags (XVECEXP (x, i, j), flag); |
3417 | break; |
3418 | } |
3419 | } |
3420 | } |
3421 | |
3422 | /* Clear all the USED bits in X to allow copy_rtx_if_shared to be used |
3423 | to look for shared sub-parts. */ |
3424 | |
3425 | void |
3426 | reset_used_flags (rtx x) |
3427 | { |
3428 | mark_used_flags (x, flag: 0); |
3429 | } |
3430 | |
3431 | /* Set all the USED bits in X to allow copy_rtx_if_shared to be used |
3432 | to look for shared sub-parts. */ |
3433 | |
3434 | void |
3435 | set_used_flags (rtx x) |
3436 | { |
3437 | mark_used_flags (x, flag: 1); |
3438 | } |
3439 | |
3440 | /* Copy X if necessary so that it won't be altered by changes in OTHER. |
3441 | Return X or the rtx for the pseudo reg the value of X was copied into. |
3442 | OTHER must be valid as a SET_DEST. */ |
3443 | |
3444 | rtx |
3445 | make_safe_from (rtx x, rtx other) |
3446 | { |
3447 | while (1) |
3448 | switch (GET_CODE (other)) |
3449 | { |
3450 | case SUBREG: |
3451 | other = SUBREG_REG (other); |
3452 | break; |
3453 | case STRICT_LOW_PART: |
3454 | case SIGN_EXTEND: |
3455 | case ZERO_EXTEND: |
3456 | other = XEXP (other, 0); |
3457 | break; |
3458 | default: |
3459 | goto done; |
3460 | } |
3461 | done: |
3462 | if ((MEM_P (other) |
3463 | && ! CONSTANT_P (x) |
3464 | && !REG_P (x) |
3465 | && GET_CODE (x) != SUBREG) |
3466 | || (REG_P (other) |
3467 | && (REGNO (other) < FIRST_PSEUDO_REGISTER |
3468 | || reg_mentioned_p (other, x)))) |
3469 | { |
3470 | rtx temp = gen_reg_rtx (GET_MODE (x)); |
3471 | emit_move_insn (temp, x); |
3472 | return temp; |
3473 | } |
3474 | return x; |
3475 | } |
3476 | |
3477 | /* Emission of insns (adding them to the doubly-linked list). */ |
3478 | |
3479 | /* Return the last insn emitted, even if it is in a sequence now pushed. */ |
3480 | |
3481 | rtx_insn * |
3482 | get_last_insn_anywhere (void) |
3483 | { |
3484 | struct sequence_stack *seq; |
3485 | for (seq = get_current_sequence (); seq; seq = seq->next) |
3486 | if (seq->last != 0) |
3487 | return seq->last; |
3488 | return 0; |
3489 | } |
3490 | |
3491 | /* Return the first nonnote insn emitted in current sequence or current |
3492 | function. This routine looks inside SEQUENCEs. */ |
3493 | |
3494 | rtx_insn * |
3495 | get_first_nonnote_insn (void) |
3496 | { |
3497 | rtx_insn *insn = get_insns (); |
3498 | |
3499 | if (insn) |
3500 | { |
3501 | if (NOTE_P (insn)) |
3502 | for (insn = next_insn (insn); |
3503 | insn && NOTE_P (insn); |
3504 | insn = next_insn (insn)) |
3505 | continue; |
3506 | else |
3507 | { |
3508 | if (NONJUMP_INSN_P (insn) |
3509 | && GET_CODE (PATTERN (insn)) == SEQUENCE) |
3510 | insn = as_a <rtx_sequence *> (p: PATTERN (insn))->insn (index: 0); |
3511 | } |
3512 | } |
3513 | |
3514 | return insn; |
3515 | } |
3516 | |
3517 | /* Return the last nonnote insn emitted in current sequence or current |
3518 | function. This routine looks inside SEQUENCEs. */ |
3519 | |
3520 | rtx_insn * |
3521 | get_last_nonnote_insn (void) |
3522 | { |
3523 | rtx_insn *insn = get_last_insn (); |
3524 | |
3525 | if (insn) |
3526 | { |
3527 | if (NOTE_P (insn)) |
3528 | for (insn = previous_insn (insn); |
3529 | insn && NOTE_P (insn); |
3530 | insn = previous_insn (insn)) |
3531 | continue; |
3532 | else |
3533 | { |
3534 | if (NONJUMP_INSN_P (insn)) |
3535 | if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (p: PATTERN (insn))) |
3536 | insn = seq->insn (index: seq->len () - 1); |
3537 | } |
3538 | } |
3539 | |
3540 | return insn; |
3541 | } |
3542 | |
3543 | /* Return the number of actual (non-debug) insns emitted in this |
3544 | function. */ |
3545 | |
3546 | int |
3547 | get_max_insn_count (void) |
3548 | { |
3549 | int n = cur_insn_uid; |
3550 | |
3551 | /* The table size must be stable across -g, to avoid codegen |
3552 | differences due to debug insns, and not be affected by |
3553 | -fmin-insn-uid, to avoid excessive table size and to simplify |
3554 | debugging of -fcompare-debug failures. */ |
3555 | if (cur_debug_insn_uid > param_min_nondebug_insn_uid) |
3556 | n -= cur_debug_insn_uid; |
3557 | else |
3558 | n -= param_min_nondebug_insn_uid; |
3559 | |
3560 | return n; |
3561 | } |
3562 | |
3563 | |
3564 | /* Return the next insn. If it is a SEQUENCE, return the first insn |
3565 | of the sequence. */ |
3566 | |
3567 | rtx_insn * |
3568 | next_insn (rtx_insn *insn) |
3569 | { |
3570 | if (insn) |
3571 | { |
3572 | insn = NEXT_INSN (insn); |
3573 | if (insn && NONJUMP_INSN_P (insn) |
3574 | && GET_CODE (PATTERN (insn)) == SEQUENCE) |
3575 | insn = as_a <rtx_sequence *> (p: PATTERN (insn))->insn (index: 0); |
3576 | } |
3577 | |
3578 | return insn; |
3579 | } |
3580 | |
3581 | /* Return the previous insn. If it is a SEQUENCE, return the last insn |
3582 | of the sequence. */ |
3583 | |
3584 | rtx_insn * |
3585 | previous_insn (rtx_insn *insn) |
3586 | { |
3587 | if (insn) |
3588 | { |
3589 | insn = PREV_INSN (insn); |
3590 | if (insn && NONJUMP_INSN_P (insn)) |
3591 | if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (p: PATTERN (insn))) |
3592 | insn = seq->insn (index: seq->len () - 1); |
3593 | } |
3594 | |
3595 | return insn; |
3596 | } |
3597 | |
3598 | /* Return the next insn after INSN that is not a NOTE. This routine does not |
3599 | look inside SEQUENCEs. */ |
3600 | |
3601 | rtx_insn * |
3602 | next_nonnote_insn (rtx_insn *insn) |
3603 | { |
3604 | while (insn) |
3605 | { |
3606 | insn = NEXT_INSN (insn); |
3607 | if (insn == 0 || !NOTE_P (insn)) |
3608 | break; |
3609 | } |
3610 | |
3611 | return insn; |
3612 | } |
3613 | |
3614 | /* Return the next insn after INSN that is not a DEBUG_INSN. This |
3615 | routine does not look inside SEQUENCEs. */ |
3616 | |
3617 | rtx_insn * |
3618 | next_nondebug_insn (rtx_insn *insn) |
3619 | { |
3620 | while (insn) |
3621 | { |
3622 | insn = NEXT_INSN (insn); |
3623 | if (insn == 0 || !DEBUG_INSN_P (insn)) |
3624 | break; |
3625 | } |
3626 | |
3627 | return insn; |
3628 | } |
3629 | |
3630 | /* Return the previous insn before INSN that is not a NOTE. This routine does |
3631 | not look inside SEQUENCEs. */ |
3632 | |
3633 | rtx_insn * |
3634 | prev_nonnote_insn (rtx_insn *insn) |
3635 | { |
3636 | while (insn) |
3637 | { |
3638 | insn = PREV_INSN (insn); |
3639 | if (insn == 0 || !NOTE_P (insn)) |
3640 | break; |
3641 | } |
3642 | |
3643 | return insn; |
3644 | } |
3645 | |
3646 | /* Return the previous insn before INSN that is not a DEBUG_INSN. |
3647 | This routine does not look inside SEQUENCEs. */ |
3648 | |
3649 | rtx_insn * |
3650 | prev_nondebug_insn (rtx_insn *insn) |
3651 | { |
3652 | while (insn) |
3653 | { |
3654 | insn = PREV_INSN (insn); |
3655 | if (insn == 0 || !DEBUG_INSN_P (insn)) |
3656 | break; |
3657 | } |
3658 | |
3659 | return insn; |
3660 | } |
3661 | |
3662 | /* Return the next insn after INSN that is not a NOTE nor DEBUG_INSN. |
3663 | This routine does not look inside SEQUENCEs. */ |
3664 | |
3665 | rtx_insn * |
3666 | next_nonnote_nondebug_insn (rtx_insn *insn) |
3667 | { |
3668 | while (insn) |
3669 | { |
3670 | insn = NEXT_INSN (insn); |
3671 | if (insn == 0 || (!NOTE_P (insn) && !DEBUG_INSN_P (insn))) |
3672 | break; |
3673 | } |
3674 | |
3675 | return insn; |
3676 | } |
3677 | |
3678 | /* Return the next insn after INSN that is not a NOTE nor DEBUG_INSN, |
3679 | but stop the search before we enter another basic block. This |
3680 | routine does not look inside SEQUENCEs. */ |
3681 | |
3682 | rtx_insn * |
3683 | next_nonnote_nondebug_insn_bb (rtx_insn *insn) |
3684 | { |
3685 | while (insn) |
3686 | { |
3687 | insn = NEXT_INSN (insn); |
3688 | if (insn == 0) |
3689 | break; |
3690 | if (DEBUG_INSN_P (insn)) |
3691 | continue; |
3692 | if (!NOTE_P (insn)) |
3693 | break; |
3694 | if (NOTE_INSN_BASIC_BLOCK_P (insn)) |
3695 | return NULL; |
3696 | } |
3697 | |
3698 | return insn; |
3699 | } |
3700 | |
3701 | /* Return the previous insn before INSN that is not a NOTE nor DEBUG_INSN. |
3702 | This routine does not look inside SEQUENCEs. */ |
3703 | |
3704 | rtx_insn * |
3705 | prev_nonnote_nondebug_insn (rtx_insn *insn) |
3706 | { |
3707 | while (insn) |
3708 | { |
3709 | insn = PREV_INSN (insn); |
3710 | if (insn == 0 || (!NOTE_P (insn) && !DEBUG_INSN_P (insn))) |
3711 | break; |
3712 | } |
3713 | |
3714 | return insn; |
3715 | } |
3716 | |
3717 | /* Return the previous insn before INSN that is not a NOTE nor |
3718 | DEBUG_INSN, but stop the search before we enter another basic |
3719 | block. This routine does not look inside SEQUENCEs. */ |
3720 | |
3721 | rtx_insn * |
3722 | prev_nonnote_nondebug_insn_bb (rtx_insn *insn) |
3723 | { |
3724 | while (insn) |
3725 | { |
3726 | insn = PREV_INSN (insn); |
3727 | if (insn == 0) |
3728 | break; |
3729 | if (DEBUG_INSN_P (insn)) |
3730 | continue; |
3731 | if (!NOTE_P (insn)) |
3732 | break; |
3733 | if (NOTE_INSN_BASIC_BLOCK_P (insn)) |
3734 | return NULL; |
3735 | } |
3736 | |
3737 | return insn; |
3738 | } |
3739 | |
3740 | /* Return the next INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN after INSN; |
3741 | or 0, if there is none. This routine does not look inside |
3742 | SEQUENCEs. */ |
3743 | |
3744 | rtx_insn * |
3745 | next_real_insn (rtx_insn *insn) |
3746 | { |
3747 | while (insn) |
3748 | { |
3749 | insn = NEXT_INSN (insn); |
3750 | if (insn == 0 || INSN_P (insn)) |
3751 | break; |
3752 | } |
3753 | |
3754 | return insn; |
3755 | } |
3756 | |
3757 | /* Return the last INSN, CALL_INSN, JUMP_INSN or DEBUG_INSN before INSN; |
3758 | or 0, if there is none. This routine does not look inside |
3759 | SEQUENCEs. */ |
3760 | |
3761 | rtx_insn * |
3762 | prev_real_insn (rtx_insn *insn) |
3763 | { |
3764 | while (insn) |
3765 | { |
3766 | insn = PREV_INSN (insn); |
3767 | if (insn == 0 || INSN_P (insn)) |
3768 | break; |
3769 | } |
3770 | |
3771 | return insn; |
3772 | } |
3773 | |
3774 | /* Return the next INSN, CALL_INSN or JUMP_INSN after INSN; |
3775 | or 0, if there is none. This routine does not look inside |
3776 | SEQUENCEs. */ |
3777 | |
3778 | rtx_insn * |
3779 | next_real_nondebug_insn (rtx uncast_insn) |
3780 | { |
3781 | rtx_insn *insn = safe_as_a <rtx_insn *> (p: uncast_insn); |
3782 | |
3783 | while (insn) |
3784 | { |
3785 | insn = NEXT_INSN (insn); |
3786 | if (insn == 0 || NONDEBUG_INSN_P (insn)) |
3787 | break; |
3788 | } |
3789 | |
3790 | return insn; |
3791 | } |
3792 | |
3793 | /* Return the last INSN, CALL_INSN or JUMP_INSN before INSN; |
3794 | or 0, if there is none. This routine does not look inside |
3795 | SEQUENCEs. */ |
3796 | |
3797 | rtx_insn * |
3798 | prev_real_nondebug_insn (rtx_insn *insn) |
3799 | { |
3800 | while (insn) |
3801 | { |
3802 | insn = PREV_INSN (insn); |
3803 | if (insn == 0 || NONDEBUG_INSN_P (insn)) |
3804 | break; |
3805 | } |
3806 | |
3807 | return insn; |
3808 | } |
3809 | |
3810 | /* Return the last CALL_INSN in the current list, or 0 if there is none. |
3811 | This routine does not look inside SEQUENCEs. */ |
3812 | |
3813 | rtx_call_insn * |
3814 | last_call_insn (void) |
3815 | { |
3816 | rtx_insn *insn; |
3817 | |
3818 | for (insn = get_last_insn (); |
3819 | insn && !CALL_P (insn); |
3820 | insn = PREV_INSN (insn)) |
3821 | ; |
3822 | |
3823 | return safe_as_a <rtx_call_insn *> (p: insn); |
3824 | } |
3825 | |
3826 | bool |
3827 | active_insn_p (const rtx_insn *insn) |
3828 | { |
3829 | return (CALL_P (insn) || JUMP_P (insn) |
3830 | || JUMP_TABLE_DATA_P (insn) /* FIXME */ |
3831 | || (NONJUMP_INSN_P (insn) |
3832 | && (! reload_completed |
3833 | || (GET_CODE (PATTERN (insn)) != USE |
3834 | && GET_CODE (PATTERN (insn)) != CLOBBER)))); |
3835 | } |
3836 | |
3837 | /* Find the next insn after INSN that really does something. This routine |
3838 | does not look inside SEQUENCEs. After reload this also skips over |
3839 | standalone USE and CLOBBER insn. */ |
3840 | |
3841 | rtx_insn * |
3842 | next_active_insn (rtx_insn *insn) |
3843 | { |
3844 | while (insn) |
3845 | { |
3846 | insn = NEXT_INSN (insn); |
3847 | if (insn == 0 || active_insn_p (insn)) |
3848 | break; |
3849 | } |
3850 | |
3851 | return insn; |
3852 | } |
3853 | |
3854 | /* Find the last insn before INSN that really does something. This routine |
3855 | does not look inside SEQUENCEs. After reload this also skips over |
3856 | standalone USE and CLOBBER insn. */ |
3857 | |
3858 | rtx_insn * |
3859 | prev_active_insn (rtx_insn *insn) |
3860 | { |
3861 | while (insn) |
3862 | { |
3863 | insn = PREV_INSN (insn); |
3864 | if (insn == 0 || active_insn_p (insn)) |
3865 | break; |
3866 | } |
3867 | |
3868 | return insn; |
3869 | } |
3870 | |
3871 | /* Find a RTX_AUTOINC class rtx which matches DATA. */ |
3872 | |
3873 | static int |
3874 | find_auto_inc (const_rtx x, const_rtx reg) |
3875 | { |
3876 | subrtx_iterator::array_type array; |
3877 | FOR_EACH_SUBRTX (iter, array, x, NONCONST) |
3878 | { |
3879 | const_rtx x = *iter; |
3880 | if (GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC |
3881 | && rtx_equal_p (reg, XEXP (x, 0))) |
3882 | return true; |
3883 | } |
3884 | return false; |
3885 | } |
3886 | |
3887 | /* Increment the label uses for all labels present in rtx. */ |
3888 | |
3889 | static void |
3890 | mark_label_nuses (rtx x) |
3891 | { |
3892 | enum rtx_code code; |
3893 | int i, j; |
3894 | const char *fmt; |
3895 | |
3896 | code = GET_CODE (x); |
3897 | if (code == LABEL_REF && LABEL_P (label_ref_label (x))) |
3898 | LABEL_NUSES (label_ref_label (x))++; |
3899 | |
3900 | fmt = GET_RTX_FORMAT (code); |
3901 | for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) |
3902 | { |
3903 | if (fmt[i] == 'e') |
3904 | mark_label_nuses (XEXP (x, i)); |
3905 | else if (fmt[i] == 'E') |
3906 | for (j = XVECLEN (x, i) - 1; j >= 0; j--) |
3907 | mark_label_nuses (XVECEXP (x, i, j)); |
3908 | } |
3909 | } |
3910 | |
3911 | |
3912 | /* Try splitting insns that can be split for better scheduling. |
3913 | PAT is the pattern which might split. |
3914 | TRIAL is the insn providing PAT. |
3915 | LAST is nonzero if we should return the last insn of the sequence produced. |
3916 | |
3917 | If this routine succeeds in splitting, it returns the first or last |
3918 | replacement insn depending on the value of LAST. Otherwise, it |
3919 | returns TRIAL. If the insn to be returned can be split, it will be. */ |
3920 | |
3921 | rtx_insn * |
3922 | try_split (rtx pat, rtx_insn *trial, int last) |
3923 | { |
3924 | rtx_insn *before, *after; |
3925 | rtx note; |
3926 | rtx_insn *seq, *tem; |
3927 | profile_probability probability; |
3928 | rtx_insn *insn_last, *insn; |
3929 | int njumps = 0; |
3930 | rtx_insn *call_insn = NULL; |
3931 | |
3932 | if (any_condjump_p (trial) |
3933 | && (note = find_reg_note (trial, REG_BR_PROB, 0))) |
3934 | split_branch_probability |
3935 | = profile_probability::from_reg_br_prob_note (XINT (note, 0)); |
3936 | else |
3937 | split_branch_probability = profile_probability::uninitialized (); |
3938 | |
3939 | probability = split_branch_probability; |
3940 | |
3941 | seq = split_insns (pat, trial); |
3942 | |
3943 | split_branch_probability = profile_probability::uninitialized (); |
3944 | |
3945 | if (!seq) |
3946 | return trial; |
3947 | |
3948 | int split_insn_count = 0; |
3949 | /* Avoid infinite loop if any insn of the result matches |
3950 | the original pattern. */ |
3951 | insn_last = seq; |
3952 | while (1) |
3953 | { |
3954 | if (INSN_P (insn_last) |
3955 | && rtx_equal_p (PATTERN (insn: insn_last), pat)) |
3956 | return trial; |
3957 | split_insn_count++; |
3958 | if (!NEXT_INSN (insn: insn_last)) |
3959 | break; |
3960 | insn_last = NEXT_INSN (insn: insn_last); |
3961 | } |
3962 | |
3963 | /* We're not good at redistributing frame information if |
3964 | the split occurs before reload or if it results in more |
3965 | than one insn. */ |
3966 | if (RTX_FRAME_RELATED_P (trial)) |
3967 | { |
3968 | if (!reload_completed || split_insn_count != 1) |
3969 | return trial; |
3970 | |
3971 | rtx_insn *new_insn = seq; |
3972 | rtx_insn *old_insn = trial; |
3973 | copy_frame_info_to_split_insn (old_insn, new_insn); |
3974 | } |
3975 | |
3976 | /* We will be adding the new sequence to the function. The splitters |
3977 | may have introduced invalid RTL sharing, so unshare the sequence now. */ |
3978 | unshare_all_rtl_in_chain (insn: seq); |
3979 | |
3980 | /* Mark labels and copy flags. */ |
3981 | for (insn = insn_last; insn ; insn = PREV_INSN (insn)) |
3982 | { |
3983 | if (JUMP_P (insn)) |
3984 | { |
3985 | if (JUMP_P (trial)) |
3986 | CROSSING_JUMP_P (insn) = CROSSING_JUMP_P (trial); |
3987 | mark_jump_label (PATTERN (insn), insn, 0); |
3988 | njumps++; |
3989 | if (probability.initialized_p () |
3990 | && any_condjump_p (insn) |
3991 | && !find_reg_note (insn, REG_BR_PROB, 0)) |
3992 | { |
3993 | /* We can preserve the REG_BR_PROB notes only if exactly |
3994 | one jump is created, otherwise the machine description |
3995 | is responsible for this step using |
3996 | split_branch_probability variable. */ |
3997 | gcc_assert (njumps == 1); |
3998 | add_reg_br_prob_note (insn, probability); |
3999 | } |
4000 | } |
4001 | } |
4002 | |
4003 | /* If we are splitting a CALL_INSN, look for the CALL_INSN |
4004 | in SEQ and copy any additional information across. */ |
4005 | if (CALL_P (trial)) |
4006 | { |
4007 | for (insn = insn_last; insn ; insn = PREV_INSN (insn)) |
4008 | if (CALL_P (insn)) |
4009 | { |
4010 | gcc_assert (call_insn == NULL_RTX); |
4011 | call_insn = insn; |
4012 | |
4013 | /* Add the old CALL_INSN_FUNCTION_USAGE to whatever the |
4014 | target may have explicitly specified. */ |
4015 | rtx *p = &CALL_INSN_FUNCTION_USAGE (insn); |
4016 | while (*p) |
4017 | p = &XEXP (*p, 1); |
4018 | *p = CALL_INSN_FUNCTION_USAGE (trial); |
4019 | |
4020 | /* If the old call was a sibling call, the new one must |
4021 | be too. */ |
4022 | SIBLING_CALL_P (insn) = SIBLING_CALL_P (trial); |
4023 | } |
4024 | } |
4025 | |
4026 | /* Copy notes, particularly those related to the CFG. */ |
4027 | for (note = REG_NOTES (trial); note; note = XEXP (note, 1)) |
4028 | { |
4029 | switch (REG_NOTE_KIND (note)) |
4030 | { |
4031 | case REG_EH_REGION: |
4032 | copy_reg_eh_region_note_backward (note, insn_last, NULL); |
4033 | break; |
4034 | |
4035 | case REG_NORETURN: |
4036 | case REG_SETJMP: |
4037 | case REG_TM: |
4038 | case REG_CALL_NOCF_CHECK: |
4039 | case REG_CALL_ARG_LOCATION: |
4040 | for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn)) |
4041 | { |
4042 | if (CALL_P (insn)) |
4043 | add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0)); |
4044 | } |
4045 | break; |
4046 | |
4047 | case REG_NON_LOCAL_GOTO: |
4048 | case REG_LABEL_TARGET: |
4049 | for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn)) |
4050 | { |
4051 | if (JUMP_P (insn)) |
4052 | add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0)); |
4053 | } |
4054 | break; |
4055 | |
4056 | case REG_INC: |
4057 | if (!AUTO_INC_DEC) |
4058 | break; |
4059 | |
4060 | for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn)) |
4061 | { |
4062 | rtx reg = XEXP (note, 0); |
4063 | if (!FIND_REG_INC_NOTE (insn, reg) |
4064 | && find_auto_inc (x: PATTERN (insn), reg)) |
4065 | add_reg_note (insn, REG_INC, reg); |
4066 | } |
4067 | break; |
4068 | |
4069 | case REG_ARGS_SIZE: |
4070 | fixup_args_size_notes (NULL, insn_last, get_args_size (note)); |
4071 | break; |
4072 | |
4073 | case REG_CALL_DECL: |
4074 | case REG_UNTYPED_CALL: |
4075 | gcc_assert (call_insn != NULL_RTX); |
4076 | add_reg_note (call_insn, REG_NOTE_KIND (note), XEXP (note, 0)); |
4077 | break; |
4078 | |
4079 | default: |
4080 | break; |
4081 | } |
4082 | } |
4083 | |
4084 | /* If there are LABELS inside the split insns increment the |
4085 | usage count so we don't delete the label. */ |
4086 | if (INSN_P (trial)) |
4087 | { |
4088 | insn = insn_last; |
4089 | while (insn != NULL_RTX) |
4090 | { |
4091 | /* JUMP_P insns have already been "marked" above. */ |
4092 | if (NONJUMP_INSN_P (insn)) |
4093 | mark_label_nuses (x: PATTERN (insn)); |
4094 | |
4095 | insn = PREV_INSN (insn); |
4096 | } |
4097 | } |
4098 | |
4099 | before = PREV_INSN (insn: trial); |
4100 | after = NEXT_INSN (insn: trial); |
4101 | |
4102 | emit_insn_after_setloc (seq, trial, INSN_LOCATION (insn: trial)); |
4103 | |
4104 | delete_insn (trial); |
4105 | |
4106 | /* Recursively call try_split for each new insn created; by the |
4107 | time control returns here that insn will be fully split, so |
4108 | set LAST and continue from the insn after the one returned. |
4109 | We can't use next_active_insn here since AFTER may be a note. |
4110 | Ignore deleted insns, which can be occur if not optimizing. */ |
4111 | for (tem = NEXT_INSN (insn: before); tem != after; tem = NEXT_INSN (insn: tem)) |
4112 | if (! tem->deleted () && INSN_P (tem)) |
4113 | tem = try_split (pat: PATTERN (insn: tem), trial: tem, last: 1); |
4114 | |
4115 | /* Return either the first or the last insn, depending on which was |
4116 | requested. */ |
4117 | return last |
4118 | ? (after ? PREV_INSN (insn: after) : get_last_insn ()) |
4119 | : NEXT_INSN (insn: before); |
4120 | } |
4121 | |
4122 | /* Make and return an INSN rtx, initializing all its slots. |
4123 | Store PATTERN in the pattern slots. */ |
4124 | |
4125 | rtx_insn * |
4126 | make_insn_raw (rtx pattern) |
4127 | { |
4128 | rtx_insn *insn; |
4129 | |
4130 | insn = as_a <rtx_insn *> (p: rtx_alloc (INSN)); |
4131 | |
4132 | INSN_UID (insn) = cur_insn_uid++; |
4133 | PATTERN (insn) = pattern; |
4134 | INSN_CODE (insn) = -1; |
4135 | REG_NOTES (insn) = NULL; |
4136 | INSN_LOCATION (insn) = curr_insn_location (); |
4137 | BLOCK_FOR_INSN (insn) = NULL; |
4138 | |
4139 | #ifdef ENABLE_RTL_CHECKING |
4140 | if (insn |
4141 | && INSN_P (insn) |
4142 | && (returnjump_p (insn) |
4143 | || (GET_CODE (insn) == SET |
4144 | && SET_DEST (insn) == pc_rtx))) |
4145 | { |
4146 | warning (0, "ICE: %<emit_insn%> used where %<emit_jump_insn%> needed:" ); |
4147 | debug_rtx (insn); |
4148 | } |
4149 | #endif |
4150 | |
4151 | return insn; |
4152 | } |
4153 | |
4154 | /* Like `make_insn_raw' but make a DEBUG_INSN instead of an insn. */ |
4155 | |
4156 | static rtx_insn * |
4157 | make_debug_insn_raw (rtx pattern) |
4158 | { |
4159 | rtx_debug_insn *insn; |
4160 | |
4161 | insn = as_a <rtx_debug_insn *> (p: rtx_alloc (DEBUG_INSN)); |
4162 | INSN_UID (insn) = cur_debug_insn_uid++; |
4163 | if (cur_debug_insn_uid > param_min_nondebug_insn_uid) |
4164 | INSN_UID (insn) = cur_insn_uid++; |
4165 | |
4166 | PATTERN (insn) = pattern; |
4167 | INSN_CODE (insn) = -1; |
4168 | REG_NOTES (insn) = NULL; |
4169 | INSN_LOCATION (insn) = curr_insn_location (); |
4170 | BLOCK_FOR_INSN (insn) = NULL; |
4171 | |
4172 | return insn; |
4173 | } |
4174 | |
4175 | /* Like `make_insn_raw' but make a JUMP_INSN instead of an insn. */ |
4176 | |
4177 | static rtx_insn * |
4178 | make_jump_insn_raw (rtx pattern) |
4179 | { |
4180 | rtx_jump_insn *insn; |
4181 | |
4182 | insn = as_a <rtx_jump_insn *> (p: rtx_alloc (JUMP_INSN)); |
4183 | INSN_UID (insn) = cur_insn_uid++; |
4184 | |
4185 | PATTERN (insn) = pattern; |
4186 | INSN_CODE (insn) = -1; |
4187 | REG_NOTES (insn) = NULL; |
4188 | JUMP_LABEL (insn) = NULL; |
4189 | INSN_LOCATION (insn) = curr_insn_location (); |
4190 | BLOCK_FOR_INSN (insn) = NULL; |
4191 | |
4192 | return insn; |
4193 | } |
4194 | |
4195 | /* Like `make_insn_raw' but make a CALL_INSN instead of an insn. */ |
4196 | |
4197 | static rtx_insn * |
4198 | make_call_insn_raw (rtx pattern) |
4199 | { |
4200 | rtx_call_insn *insn; |
4201 | |
4202 | insn = as_a <rtx_call_insn *> (p: rtx_alloc (CALL_INSN)); |
4203 | INSN_UID (insn) = cur_insn_uid++; |
4204 | |
4205 | PATTERN (insn) = pattern; |
4206 | INSN_CODE (insn) = -1; |
4207 | REG_NOTES (insn) = NULL; |
4208 | CALL_INSN_FUNCTION_USAGE (insn) = NULL; |
4209 | INSN_LOCATION (insn) = curr_insn_location (); |
4210 | BLOCK_FOR_INSN (insn) = NULL; |
4211 | |
4212 | return insn; |
4213 | } |
4214 | |
4215 | /* Like `make_insn_raw' but make a NOTE instead of an insn. */ |
4216 | |
4217 | static rtx_note * |
4218 | make_note_raw (enum insn_note subtype) |
4219 | { |
4220 | /* Some notes are never created this way at all. These notes are |
4221 | only created by patching out insns. */ |
4222 | gcc_assert (subtype != NOTE_INSN_DELETED_LABEL |
4223 | && subtype != NOTE_INSN_DELETED_DEBUG_LABEL); |
4224 | |
4225 | rtx_note *note = as_a <rtx_note *> (p: rtx_alloc (NOTE)); |
4226 | INSN_UID (insn: note) = cur_insn_uid++; |
4227 | NOTE_KIND (note) = subtype; |
4228 | BLOCK_FOR_INSN (insn: note) = NULL; |
4229 | memset (s: &NOTE_DATA (note), c: 0, n: sizeof (NOTE_DATA (note))); |
4230 | return note; |
4231 | } |
4232 | |
4233 | /* Add INSN to the end of the doubly-linked list, between PREV and NEXT. |
4234 | INSN may be any object that can appear in the chain: INSN_P and NOTE_P objects, |
4235 | but also BARRIERs and JUMP_TABLE_DATAs. PREV and NEXT may be NULL. */ |
4236 | |
4237 | static inline void |
4238 | link_insn_into_chain (rtx_insn *insn, rtx_insn *prev, rtx_insn *next) |
4239 | { |
4240 | SET_PREV_INSN (insn) = prev; |
4241 | SET_NEXT_INSN (insn) = next; |
4242 | if (prev != NULL) |
4243 | { |
4244 | SET_NEXT_INSN (prev) = insn; |
4245 | if (NONJUMP_INSN_P (prev) && GET_CODE (PATTERN (prev)) == SEQUENCE) |
4246 | { |
4247 | rtx_sequence *sequence = as_a <rtx_sequence *> (p: PATTERN (insn: prev)); |
4248 | SET_NEXT_INSN (sequence->insn (index: sequence->len () - 1)) = insn; |
4249 | } |
4250 | } |
4251 | if (next != NULL) |
4252 | { |
4253 | SET_PREV_INSN (next) = insn; |
4254 | if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE) |
4255 | { |
4256 | rtx_sequence *sequence = as_a <rtx_sequence *> (p: PATTERN (insn: next)); |
4257 | SET_PREV_INSN (sequence->insn (index: 0)) = insn; |
4258 | } |
4259 | } |
4260 | |
4261 | if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE) |
4262 | { |
4263 | rtx_sequence *sequence = as_a <rtx_sequence *> (p: PATTERN (insn)); |
4264 | SET_PREV_INSN (sequence->insn (index: 0)) = prev; |
4265 | SET_NEXT_INSN (sequence->insn (index: sequence->len () - 1)) = next; |
4266 | } |
4267 | } |
4268 | |
4269 | /* Add INSN to the end of the doubly-linked list. |
4270 | INSN may be an INSN, JUMP_INSN, CALL_INSN, CODE_LABEL, BARRIER or NOTE. */ |
4271 | |
4272 | void |
4273 | add_insn (rtx_insn *insn) |
4274 | { |
4275 | rtx_insn *prev = get_last_insn (); |
4276 | link_insn_into_chain (insn, prev, NULL); |
4277 | if (get_insns () == NULL) |
4278 | set_first_insn (insn); |
4279 | set_last_insn (insn); |
4280 | } |
4281 | |
4282 | /* Add INSN into the doubly-linked list after insn AFTER. */ |
4283 | |
4284 | static void |
4285 | add_insn_after_nobb (rtx_insn *insn, rtx_insn *after) |
4286 | { |
4287 | rtx_insn *next = NEXT_INSN (insn: after); |
4288 | |
4289 | gcc_assert (!optimize || !after->deleted ()); |
4290 | |
4291 | link_insn_into_chain (insn, prev: after, next); |
4292 | |
4293 | if (next == NULL) |
4294 | { |
4295 | struct sequence_stack *seq; |
4296 | |
4297 | for (seq = get_current_sequence (); seq; seq = seq->next) |
4298 | if (after == seq->last) |
4299 | { |
4300 | seq->last = insn; |
4301 | break; |
4302 | } |
4303 | } |
4304 | } |
4305 | |
4306 | /* Add INSN into the doubly-linked list before insn BEFORE. */ |
4307 | |
4308 | static void |
4309 | add_insn_before_nobb (rtx_insn *insn, rtx_insn *before) |
4310 | { |
4311 | rtx_insn *prev = PREV_INSN (insn: before); |
4312 | |
4313 | gcc_assert (!optimize || !before->deleted ()); |
4314 | |
4315 | link_insn_into_chain (insn, prev, next: before); |
4316 | |
4317 | if (prev == NULL) |
4318 | { |
4319 | struct sequence_stack *seq; |
4320 | |
4321 | for (seq = get_current_sequence (); seq; seq = seq->next) |
4322 | if (before == seq->first) |
4323 | { |
4324 | seq->first = insn; |
4325 | break; |
4326 | } |
4327 | |
4328 | gcc_assert (seq); |
4329 | } |
4330 | } |
4331 | |
4332 | /* Like add_insn_after_nobb, but try to set BLOCK_FOR_INSN. |
4333 | If BB is NULL, an attempt is made to infer the bb from before. |
4334 | |
4335 | This and the next function should be the only functions called |
4336 | to insert an insn once delay slots have been filled since only |
4337 | they know how to update a SEQUENCE. */ |
4338 | |
4339 | void |
4340 | add_insn_after (rtx_insn *insn, rtx_insn *after, basic_block bb) |
4341 | { |
4342 | add_insn_after_nobb (insn, after); |
4343 | if (!BARRIER_P (after) |
4344 | && !BARRIER_P (insn) |
4345 | && (bb = BLOCK_FOR_INSN (insn: after))) |
4346 | { |
4347 | set_block_for_insn (insn, bb); |
4348 | if (INSN_P (insn)) |
4349 | df_insn_rescan (insn); |
4350 | /* Should not happen as first in the BB is always |
4351 | either NOTE or LABEL. */ |
4352 | if (BB_END (bb) == after |
4353 | /* Avoid clobbering of structure when creating new BB. */ |
4354 | && !BARRIER_P (insn) |
4355 | && !NOTE_INSN_BASIC_BLOCK_P (insn)) |
4356 | BB_END (bb) = insn; |
4357 | } |
4358 | } |
4359 | |
4360 | /* Like add_insn_before_nobb, but try to set BLOCK_FOR_INSN. |
4361 | If BB is NULL, an attempt is made to infer the bb from before. |
4362 | |
4363 | This and the previous function should be the only functions called |
4364 | to insert an insn once delay slots have been filled since only |
4365 | they know how to update a SEQUENCE. */ |
4366 | |
4367 | void |
4368 | add_insn_before (rtx_insn *insn, rtx_insn *before, basic_block bb) |
4369 | { |
4370 | add_insn_before_nobb (insn, before); |
4371 | |
4372 | if (!bb |
4373 | && !BARRIER_P (before) |
4374 | && !BARRIER_P (insn)) |
4375 | bb = BLOCK_FOR_INSN (insn: before); |
4376 | |
4377 | if (bb) |
4378 | { |
4379 | set_block_for_insn (insn, bb); |
4380 | if (INSN_P (insn)) |
4381 | df_insn_rescan (insn); |
4382 | /* Should not happen as first in the BB is always either NOTE or |
4383 | LABEL. */ |
4384 | gcc_assert (BB_HEAD (bb) != insn |
4385 | /* Avoid clobbering of structure when creating new BB. */ |
4386 | || BARRIER_P (insn) |
4387 | || NOTE_INSN_BASIC_BLOCK_P (insn)); |
4388 | } |
4389 | } |
4390 | |
4391 | /* Replace insn with an deleted instruction note. */ |
4392 | |
4393 | void |
4394 | set_insn_deleted (rtx_insn *insn) |
4395 | { |
4396 | if (INSN_P (insn)) |
4397 | df_insn_delete (insn); |
4398 | PUT_CODE (insn, NOTE); |
4399 | NOTE_KIND (insn) = NOTE_INSN_DELETED; |
4400 | } |
4401 | |
4402 | |
4403 | /* Unlink INSN from the insn chain. |
4404 | |
4405 | This function knows how to handle sequences. |
4406 | |
4407 | This function does not invalidate data flow information associated with |
4408 | INSN (i.e. does not call df_insn_delete). That makes this function |
4409 | usable for only disconnecting an insn from the chain, and re-emit it |
4410 | elsewhere later. |
4411 | |
4412 | To later insert INSN elsewhere in the insn chain via add_insn and |
4413 | similar functions, PREV_INSN and NEXT_INSN must be nullified by |
4414 | the caller. Nullifying them here breaks many insn chain walks. |
4415 | |
4416 | To really delete an insn and related DF information, use delete_insn. */ |
4417 | |
4418 | void |
4419 | remove_insn (rtx_insn *insn) |
4420 | { |
4421 | rtx_insn *next = NEXT_INSN (insn); |
4422 | rtx_insn *prev = PREV_INSN (insn); |
4423 | basic_block bb; |
4424 | |
4425 | if (prev) |
4426 | { |
4427 | SET_NEXT_INSN (prev) = next; |
4428 | if (NONJUMP_INSN_P (prev) && GET_CODE (PATTERN (prev)) == SEQUENCE) |
4429 | { |
4430 | rtx_sequence *sequence = as_a <rtx_sequence *> (p: PATTERN (insn: prev)); |
4431 | SET_NEXT_INSN (sequence->insn (index: sequence->len () - 1)) = next; |
4432 | } |
4433 | } |
4434 | else |
4435 | { |
4436 | struct sequence_stack *seq; |
4437 | |
4438 | for (seq = get_current_sequence (); seq; seq = seq->next) |
4439 | if (insn == seq->first) |
4440 | { |
4441 | seq->first = next; |
4442 | break; |
4443 | } |
4444 | |
4445 | gcc_assert (seq); |
4446 | } |
4447 | |
4448 | if (next) |
4449 | { |
4450 | SET_PREV_INSN (next) = prev; |
4451 | if (NONJUMP_INSN_P (next) && GET_CODE (PATTERN (next)) == SEQUENCE) |
4452 | { |
4453 | rtx_sequence *sequence = as_a <rtx_sequence *> (p: PATTERN (insn: next)); |
4454 | SET_PREV_INSN (sequence->insn (index: 0)) = prev; |
4455 | } |
4456 | } |
4457 | else |
4458 | { |
4459 | struct sequence_stack *seq; |
4460 | |
4461 | for (seq = get_current_sequence (); seq; seq = seq->next) |
4462 | if (insn == seq->last) |
4463 | { |
4464 | seq->last = prev; |
4465 | break; |
4466 | } |
4467 | |
4468 | gcc_assert (seq); |
4469 | } |
4470 | |
4471 | /* Fix up basic block boundaries, if necessary. */ |
4472 | if (!BARRIER_P (insn) |
4473 | && (bb = BLOCK_FOR_INSN (insn))) |
4474 | { |
4475 | if (BB_HEAD (bb) == insn) |
4476 | { |
4477 | /* Never ever delete the basic block note without deleting whole |
4478 | basic block. */ |
4479 | gcc_assert (!NOTE_P (insn)); |
4480 | BB_HEAD (bb) = next; |
4481 | } |
4482 | if (BB_END (bb) == insn) |
4483 | BB_END (bb) = prev; |
4484 | } |
4485 | } |
4486 | |
4487 | /* Append CALL_FUSAGE to the CALL_INSN_FUNCTION_USAGE for CALL_INSN. */ |
4488 | |
4489 | void |
4490 | add_function_usage_to (rtx call_insn, rtx call_fusage) |
4491 | { |
4492 | gcc_assert (call_insn && CALL_P (call_insn)); |
4493 | |
4494 | /* Put the register usage information on the CALL. If there is already |
4495 | some usage information, put ours at the end. */ |
4496 | if (CALL_INSN_FUNCTION_USAGE (call_insn)) |
4497 | { |
4498 | rtx link; |
4499 | |
4500 | for (link = CALL_INSN_FUNCTION_USAGE (call_insn); XEXP (link, 1) != 0; |
4501 | link = XEXP (link, 1)) |
4502 | ; |
4503 | |
4504 | XEXP (link, 1) = call_fusage; |
4505 | } |
4506 | else |
4507 | CALL_INSN_FUNCTION_USAGE (call_insn) = call_fusage; |
4508 | } |
4509 | |
4510 | /* Delete all insns made since FROM. |
4511 | FROM becomes the new last instruction. */ |
4512 | |
4513 | void |
4514 | delete_insns_since (rtx_insn *from) |
4515 | { |
4516 | if (from == 0) |
4517 | set_first_insn (0); |
4518 | else |
4519 | SET_NEXT_INSN (from) = 0; |
4520 | set_last_insn (from); |
4521 | } |
4522 | |
4523 | /* This function is deprecated, please use sequences instead. |
4524 | |
4525 | Move a consecutive bunch of insns to a different place in the chain. |
4526 | The insns to be moved are those between FROM and TO. |
4527 | They are moved to a new position after the insn AFTER. |
4528 | AFTER must not be FROM or TO or any insn in between. |
4529 | |
4530 | This function does not know about SEQUENCEs and hence should not be |
4531 | called after delay-slot filling has been done. */ |
4532 | |
4533 | void |
4534 | reorder_insns_nobb (rtx_insn *from, rtx_insn *to, rtx_insn *after) |
4535 | { |
4536 | if (flag_checking) |
4537 | { |
4538 | for (rtx_insn *x = from; x != to; x = NEXT_INSN (insn: x)) |
4539 | gcc_assert (after != x); |
4540 | gcc_assert (after != to); |
4541 | } |
4542 | |
4543 | /* Splice this bunch out of where it is now. */ |
4544 | if (PREV_INSN (insn: from)) |
4545 | SET_NEXT_INSN (PREV_INSN (insn: from)) = NEXT_INSN (insn: to); |
4546 | if (NEXT_INSN (insn: to)) |
4547 | SET_PREV_INSN (NEXT_INSN (insn: to)) = PREV_INSN (insn: from); |
4548 | if (get_last_insn () == to) |
4549 | set_last_insn (PREV_INSN (insn: from)); |
4550 | if (get_insns () == from) |
4551 | set_first_insn (NEXT_INSN (insn: to)); |
4552 | |
4553 | /* Make the new neighbors point to it and it to them. */ |
4554 | if (NEXT_INSN (insn: after)) |
4555 | SET_PREV_INSN (NEXT_INSN (insn: after)) = to; |
4556 | |
4557 | SET_NEXT_INSN (to) = NEXT_INSN (insn: after); |
4558 | SET_PREV_INSN (from) = after; |
4559 | SET_NEXT_INSN (after) = from; |
4560 | if (after == get_last_insn ()) |
4561 | set_last_insn (to); |
4562 | } |
4563 | |
4564 | /* Same as function above, but take care to update BB boundaries. */ |
4565 | void |
4566 | reorder_insns (rtx_insn *from, rtx_insn *to, rtx_insn *after) |
4567 | { |
4568 | rtx_insn *prev = PREV_INSN (insn: from); |
4569 | basic_block bb, bb2; |
4570 | |
4571 | reorder_insns_nobb (from, to, after); |
4572 | |
4573 | if (!BARRIER_P (after) |
4574 | && (bb = BLOCK_FOR_INSN (insn: after))) |
4575 | { |
4576 | rtx_insn *x; |
4577 | df_set_bb_dirty (bb); |
4578 | |
4579 | if (!BARRIER_P (from) |
4580 | && (bb2 = BLOCK_FOR_INSN (insn: from))) |
4581 | { |
4582 | if (BB_END (bb2) == to) |
4583 | BB_END (bb2) = prev; |
4584 | df_set_bb_dirty (bb2); |
4585 | } |
4586 | |
4587 | if (BB_END (bb) == after) |
4588 | BB_END (bb) = to; |
4589 | |
4590 | for (x = from; x != NEXT_INSN (insn: to); x = NEXT_INSN (insn: x)) |
4591 | if (!BARRIER_P (x)) |
4592 | df_insn_change_bb (x, bb); |
4593 | } |
4594 | } |
4595 | |
4596 | |
4597 | /* Emit insn(s) of given code and pattern |
4598 | at a specified place within the doubly-linked list. |
4599 | |
4600 | All of the emit_foo global entry points accept an object |
4601 | X which is either an insn list or a PATTERN of a single |
4602 | instruction. |
4603 | |
4604 | There are thus a few canonical ways to generate code and |
4605 | emit it at a specific place in the instruction stream. For |
4606 | example, consider the instruction named SPOT and the fact that |
4607 | we would like to emit some instructions before SPOT. We might |
4608 | do it like this: |
4609 | |
4610 | start_sequence (); |
4611 | ... emit the new instructions ... |
4612 | insns_head = get_insns (); |
4613 | end_sequence (); |
4614 | |
4615 | emit_insn_before (insns_head, SPOT); |
4616 | |
4617 | It used to be common to generate SEQUENCE rtl instead, but that |
4618 | is a relic of the past which no longer occurs. The reason is that |
4619 | SEQUENCE rtl results in much fragmented RTL memory since the SEQUENCE |
4620 | generated would almost certainly die right after it was created. */ |
4621 | |
4622 | static rtx_insn * |
4623 | emit_pattern_before_noloc (rtx x, rtx_insn *before, rtx_insn *last, |
4624 | basic_block bb, |
4625 | rtx_insn *(*make_raw) (rtx)) |
4626 | { |
4627 | rtx_insn *insn; |
4628 | |
4629 | gcc_assert (before); |
4630 | |
4631 | if (x == NULL_RTX) |
4632 | return last; |
4633 | |
4634 | switch (GET_CODE (x)) |
4635 | { |
4636 | case DEBUG_INSN: |
4637 | case INSN: |
4638 | case JUMP_INSN: |
4639 | case CALL_INSN: |
4640 | case CODE_LABEL: |
4641 | case BARRIER: |
4642 | case NOTE: |
4643 | insn = as_a <rtx_insn *> (p: x); |
4644 | while (insn) |
4645 | { |
4646 | rtx_insn *next = NEXT_INSN (insn); |
4647 | add_insn_before (insn, before, bb); |
4648 | last = insn; |
4649 | insn = next; |
4650 | } |
4651 | break; |
4652 | |
4653 | #ifdef ENABLE_RTL_CHECKING |
4654 | case SEQUENCE: |
4655 | gcc_unreachable (); |
4656 | break; |
4657 | #endif |
4658 | |
4659 | default: |
4660 | last = (*make_raw) (x); |
4661 | add_insn_before (insn: last, before, bb); |
4662 | break; |
4663 | } |
4664 | |
4665 | return last; |
4666 | } |
4667 | |
4668 | /* Make X be output before the instruction BEFORE. */ |
4669 | |
4670 | rtx_insn * |
4671 | emit_insn_before_noloc (rtx x, rtx_insn *before, basic_block bb) |
4672 | { |
4673 | return emit_pattern_before_noloc (x, before, last: before, bb, make_raw: make_insn_raw); |
4674 | } |
4675 | |
4676 | /* Make an instruction with body X and code JUMP_INSN |
4677 | and output it before the instruction BEFORE. */ |
4678 | |
4679 | rtx_jump_insn * |
4680 | emit_jump_insn_before_noloc (rtx x, rtx_insn *before) |
4681 | { |
4682 | return as_a <rtx_jump_insn *> ( |
4683 | p: emit_pattern_before_noloc (x, before, NULL, NULL, |
4684 | make_raw: make_jump_insn_raw)); |
4685 | } |
4686 | |
4687 | /* Make an instruction with body X and code CALL_INSN |
4688 | and output it before the instruction BEFORE. */ |
4689 | |
4690 | rtx_insn * |
4691 | emit_call_insn_before_noloc (rtx x, rtx_insn *before) |
4692 | { |
4693 | return emit_pattern_before_noloc (x, before, NULL, NULL, |
4694 | make_raw: make_call_insn_raw); |
4695 | } |
4696 | |
4697 | /* Make an instruction with body X and code DEBUG_INSN |
4698 | and output it before the instruction BEFORE. */ |
4699 | |
4700 | rtx_insn * |
4701 | emit_debug_insn_before_noloc (rtx x, rtx_insn *before) |
4702 | { |
4703 | return emit_pattern_before_noloc (x, before, NULL, NULL, |
4704 | make_raw: make_debug_insn_raw); |
4705 | } |
4706 | |
4707 | /* Make an insn of code BARRIER |
4708 | and output it before the insn BEFORE. */ |
4709 | |
4710 | rtx_barrier * |
4711 | emit_barrier_before (rtx_insn *before) |
4712 | { |
4713 | rtx_barrier *insn = as_a <rtx_barrier *> (p: rtx_alloc (BARRIER)); |
4714 | |
4715 | INSN_UID (insn) = cur_insn_uid++; |
4716 | |
4717 | add_insn_before (insn, before, NULL); |
4718 | return insn; |
4719 | } |
4720 | |
4721 | /* Emit the label LABEL before the insn BEFORE. */ |
4722 | |
4723 | rtx_code_label * |
4724 | emit_label_before (rtx_code_label *label, rtx_insn *before) |
4725 | { |
4726 | gcc_checking_assert (INSN_UID (label) == 0); |
4727 | INSN_UID (insn: label) = cur_insn_uid++; |
4728 | add_insn_before (insn: label, before, NULL); |
4729 | return label; |
4730 | } |
4731 | |
4732 | /* Helper for emit_insn_after, handles lists of instructions |
4733 | efficiently. */ |
4734 | |
4735 | static rtx_insn * |
4736 | emit_insn_after_1 (rtx_insn *first, rtx_insn *after, basic_block bb) |
4737 | { |
4738 | rtx_insn *last; |
4739 | rtx_insn *after_after; |
4740 | if (!bb && !BARRIER_P (after)) |
4741 | bb = BLOCK_FOR_INSN (insn: after); |
4742 | |
4743 | if (bb) |
4744 | { |
4745 | df_set_bb_dirty (bb); |
4746 | for (last = first; NEXT_INSN (insn: last); last = NEXT_INSN (insn: last)) |
4747 | if (!BARRIER_P (last)) |
4748 | { |
4749 | set_block_for_insn (insn: last, bb); |
4750 | df_insn_rescan (last); |
4751 | } |
4752 | if (!BARRIER_P (last)) |
4753 | { |
4754 | set_block_for_insn (insn: last, bb); |
4755 | df_insn_rescan (last); |
4756 | } |
4757 | if (BB_END (bb) == after) |
4758 | BB_END (bb) = last; |
4759 | } |
4760 | else |
4761 | for (last = first; NEXT_INSN (insn: last); last = NEXT_INSN (insn: last)) |
4762 | continue; |
4763 | |
4764 | after_after = NEXT_INSN (insn: after); |
4765 | |
4766 | SET_NEXT_INSN (after) = first; |
4767 | SET_PREV_INSN (first) = after; |
4768 | SET_NEXT_INSN (last) = after_after; |
4769 | if (after_after) |
4770 | SET_PREV_INSN (after_after) = last; |
4771 | |
4772 | if (after == get_last_insn ()) |
4773 | set_last_insn (last); |
4774 | |
4775 | return last; |
4776 | } |
4777 | |
4778 | static rtx_insn * |
4779 | emit_pattern_after_noloc (rtx x, rtx_insn *after, basic_block bb, |
4780 | rtx_insn *(*make_raw)(rtx)) |
4781 | { |
4782 | rtx_insn *last = after; |
4783 | |
4784 | gcc_assert (after); |
4785 | |
4786 | if (x == NULL_RTX) |
4787 | return last; |
4788 | |
4789 | switch (GET_CODE (x)) |
4790 | { |
4791 | case DEBUG_INSN: |
4792 | case INSN: |
4793 | case JUMP_INSN: |
4794 | case CALL_INSN: |
4795 | case CODE_LABEL: |
4796 | case BARRIER: |
4797 | case NOTE: |
4798 | last = emit_insn_after_1 (first: as_a <rtx_insn *> (p: x), after, bb); |
4799 | break; |
4800 | |
4801 | #ifdef ENABLE_RTL_CHECKING |
4802 | case SEQUENCE: |
4803 | gcc_unreachable (); |
4804 | break; |
4805 | #endif |
4806 | |
4807 | default: |
4808 | last = (*make_raw) (x); |
4809 | add_insn_after (insn: last, after, bb); |
4810 | break; |
4811 | } |
4812 | |
4813 | return last; |
4814 | } |
4815 | |
4816 | /* Make X be output after the insn AFTER and set the BB of insn. If |
4817 | BB is NULL, an attempt is made to infer the BB from AFTER. */ |
4818 | |
4819 | rtx_insn * |
4820 | emit_insn_after_noloc (rtx x, rtx_insn *after, basic_block bb) |
4821 | { |
4822 | return emit_pattern_after_noloc (x, after, bb, make_raw: make_insn_raw); |
4823 | } |
4824 | |
4825 | |
4826 | /* Make an insn of code JUMP_INSN with body X |
4827 | and output it after the insn AFTER. */ |
4828 | |
4829 | rtx_jump_insn * |
4830 | emit_jump_insn_after_noloc (rtx x, rtx_insn *after) |
4831 | { |
4832 | return as_a <rtx_jump_insn *> ( |
4833 | p: emit_pattern_after_noloc (x, after, NULL, make_raw: make_jump_insn_raw)); |
4834 | } |
4835 | |
4836 | /* Make an instruction with body X and code CALL_INSN |
4837 | and output it after the instruction AFTER. */ |
4838 | |
4839 | rtx_insn * |
4840 | emit_call_insn_after_noloc (rtx x, rtx_insn *after) |
4841 | { |
4842 | return emit_pattern_after_noloc (x, after, NULL, make_raw: make_call_insn_raw); |
4843 | } |
4844 | |
4845 | /* Make an instruction with body X and code CALL_INSN |
4846 | and output it after the instruction AFTER. */ |
4847 | |
4848 | rtx_insn * |
4849 | emit_debug_insn_after_noloc (rtx x, rtx_insn *after) |
4850 | { |
4851 | return emit_pattern_after_noloc (x, after, NULL, make_raw: make_debug_insn_raw); |
4852 | } |
4853 | |
4854 | /* Make an insn of code BARRIER |
4855 | and output it after the insn AFTER. */ |
4856 | |
4857 | rtx_barrier * |
4858 | emit_barrier_after (rtx_insn *after) |
4859 | { |
4860 | rtx_barrier *insn = as_a <rtx_barrier *> (p: rtx_alloc (BARRIER)); |
4861 | |
4862 | INSN_UID (insn) = cur_insn_uid++; |
4863 | |
4864 | add_insn_after (insn, after, NULL); |
4865 | return insn; |
4866 | } |
4867 | |
4868 | /* Emit the label LABEL after the insn AFTER. */ |
4869 | |
4870 | rtx_insn * |
4871 | emit_label_after (rtx_insn *label, rtx_insn *after) |
4872 | { |
4873 | gcc_checking_assert (INSN_UID (label) == 0); |
4874 | INSN_UID (insn: label) = cur_insn_uid++; |
4875 | add_insn_after (insn: label, after, NULL); |
4876 | return label; |
4877 | } |
4878 | |
4879 | /* Notes require a bit of special handling: Some notes need to have their |
4880 | BLOCK_FOR_INSN set, others should never have it set, and some should |
4881 | have it set or clear depending on the context. */ |
4882 | |
4883 | /* Return true iff a note of kind SUBTYPE should be emitted with routines |
4884 | that never set BLOCK_FOR_INSN on NOTE. BB_BOUNDARY is true if the |
4885 | caller is asked to emit a note before BB_HEAD, or after BB_END. */ |
4886 | |
4887 | static bool |
4888 | note_outside_basic_block_p (enum insn_note subtype, bool on_bb_boundary_p) |
4889 | { |
4890 | switch (subtype) |
4891 | { |
4892 | /* NOTE_INSN_SWITCH_TEXT_SECTIONS only appears between basic blocks. */ |
4893 | case NOTE_INSN_SWITCH_TEXT_SECTIONS: |
4894 | return true; |
4895 | |
4896 | /* Notes for var tracking and EH region markers can appear between or |
4897 | inside basic blocks. If the caller is emitting on the basic block |
4898 | boundary, do not set BLOCK_FOR_INSN on the new note. */ |
4899 | case NOTE_INSN_VAR_LOCATION: |
4900 | case NOTE_INSN_EH_REGION_BEG: |
4901 | case NOTE_INSN_EH_REGION_END: |
4902 | return on_bb_boundary_p; |
4903 | |
4904 | /* Otherwise, BLOCK_FOR_INSN must be set. */ |
4905 | default: |
4906 | return false; |
4907 | } |
4908 | } |
4909 | |
4910 | /* Emit a note of subtype SUBTYPE after the insn AFTER. */ |
4911 | |
4912 | rtx_note * |
4913 | emit_note_after (enum insn_note subtype, rtx_insn *after) |
4914 | { |
4915 | rtx_note *note = make_note_raw (subtype); |
4916 | basic_block bb = BARRIER_P (after) ? NULL : BLOCK_FOR_INSN (insn: after); |
4917 | bool on_bb_boundary_p = (bb != NULL && BB_END (bb) == after); |
4918 | |
4919 | if (note_outside_basic_block_p (subtype, on_bb_boundary_p)) |
4920 | add_insn_after_nobb (insn: note, after); |
4921 | else |
4922 | add_insn_after (insn: note, after, bb); |
4923 | return note; |
4924 | } |
4925 | |
4926 | /* Emit a note of subtype SUBTYPE before the insn BEFORE. */ |
4927 | |
4928 | rtx_note * |
4929 | emit_note_before (enum insn_note subtype, rtx_insn *before) |
4930 | { |
4931 | rtx_note *note = make_note_raw (subtype); |
4932 | basic_block bb = BARRIER_P (before) ? NULL : BLOCK_FOR_INSN (insn: before); |
4933 | bool on_bb_boundary_p = (bb != NULL && BB_HEAD (bb) == before); |
4934 | |
4935 | if (note_outside_basic_block_p (subtype, on_bb_boundary_p)) |
4936 | add_insn_before_nobb (insn: note, before); |
4937 | else |
4938 | add_insn_before (insn: note, before, bb); |
4939 | return note; |
4940 | } |
4941 | |
4942 | /* Insert PATTERN after AFTER, setting its INSN_LOCATION to LOC. |
4943 | MAKE_RAW indicates how to turn PATTERN into a real insn. */ |
4944 | |
4945 | static rtx_insn * |
4946 | emit_pattern_after_setloc (rtx pattern, rtx_insn *after, location_t loc, |
4947 | rtx_insn *(*make_raw) (rtx)) |
4948 | { |
4949 | rtx_insn *last = emit_pattern_after_noloc (x: pattern, after, NULL, make_raw); |
4950 | |
4951 | if (pattern == NULL_RTX || !loc) |
4952 | return last; |
4953 | |
4954 | after = NEXT_INSN (insn: after); |
4955 | while (1) |
4956 | { |
4957 | if (active_insn_p (insn: after) |
4958 | && !JUMP_TABLE_DATA_P (after) /* FIXME */ |
4959 | && !INSN_LOCATION (insn: after)) |
4960 | INSN_LOCATION (insn: after) = loc; |
4961 | if (after == last) |
4962 | break; |
4963 | after = NEXT_INSN (insn: after); |
4964 | } |
4965 | return last; |
4966 | } |
4967 | |
4968 | /* Insert PATTERN after AFTER. MAKE_RAW indicates how to turn PATTERN |
4969 | into a real insn. SKIP_DEBUG_INSNS indicates whether to insert after |
4970 | any DEBUG_INSNs. */ |
4971 | |
4972 | static rtx_insn * |
4973 | emit_pattern_after (rtx pattern, rtx_insn *after, bool skip_debug_insns, |
4974 | rtx_insn *(*make_raw) (rtx)) |
4975 | { |
4976 | rtx_insn *prev = after; |
4977 | |
4978 | if (skip_debug_insns) |
4979 | while (DEBUG_INSN_P (prev)) |
4980 | prev = PREV_INSN (insn: prev); |
4981 | |
4982 | if (INSN_P (prev)) |
4983 | return emit_pattern_after_setloc (pattern, after, loc: INSN_LOCATION (insn: prev), |
4984 | make_raw); |
4985 | else |
4986 | return emit_pattern_after_noloc (x: pattern, after, NULL, make_raw); |
4987 | } |
4988 | |
4989 | /* Like emit_insn_after_noloc, but set INSN_LOCATION according to LOC. */ |
4990 | rtx_insn * |
4991 | emit_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc) |
4992 | { |
4993 | return emit_pattern_after_setloc (pattern, after, loc, make_raw: make_insn_raw); |
4994 | } |
4995 | |
4996 | /* Like emit_insn_after_noloc, but set INSN_LOCATION according to AFTER. */ |
4997 | rtx_insn * |
4998 | emit_insn_after (rtx pattern, rtx_insn *after) |
4999 | { |
5000 | return emit_pattern_after (pattern, after, skip_debug_insns: true, make_raw: make_insn_raw); |
5001 | } |
5002 | |
5003 | /* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to LOC. */ |
5004 | rtx_jump_insn * |
5005 | emit_jump_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc) |
5006 | { |
5007 | return as_a <rtx_jump_insn *> ( |
5008 | p: emit_pattern_after_setloc (pattern, after, loc, make_raw: make_jump_insn_raw)); |
5009 | } |
5010 | |
5011 | /* Like emit_jump_insn_after_noloc, but set INSN_LOCATION according to AFTER. */ |
5012 | rtx_jump_insn * |
5013 | emit_jump_insn_after (rtx pattern, rtx_insn *after) |
5014 | { |
5015 | return as_a <rtx_jump_insn *> ( |
5016 | p: emit_pattern_after (pattern, after, skip_debug_insns: true, make_raw: make_jump_insn_raw)); |
5017 | } |
5018 | |
5019 | /* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to LOC. */ |
5020 | rtx_insn * |
5021 | emit_call_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc) |
5022 | { |
5023 | return emit_pattern_after_setloc (pattern, after, loc, make_raw: make_call_insn_raw); |
5024 | } |
5025 | |
5026 | /* Like emit_call_insn_after_noloc, but set INSN_LOCATION according to AFTER. */ |
5027 | rtx_insn * |
5028 | emit_call_insn_after (rtx pattern, rtx_insn *after) |
5029 | { |
5030 | return emit_pattern_after (pattern, after, skip_debug_insns: true, make_raw: make_call_insn_raw); |
5031 | } |
5032 | |
5033 | /* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to LOC. */ |
5034 | rtx_insn * |
5035 | emit_debug_insn_after_setloc (rtx pattern, rtx_insn *after, location_t loc) |
5036 | { |
5037 | return emit_pattern_after_setloc (pattern, after, loc, make_raw: make_debug_insn_raw); |
5038 | } |
5039 | |
5040 | /* Like emit_debug_insn_after_noloc, but set INSN_LOCATION according to AFTER. */ |
5041 | rtx_insn * |
5042 | emit_debug_insn_after (rtx pattern, rtx_insn *after) |
5043 | { |
5044 | return emit_pattern_after (pattern, after, skip_debug_insns: false, make_raw: make_debug_insn_raw); |
5045 | } |
5046 | |
5047 | /* Insert PATTERN before BEFORE, setting its INSN_LOCATION to LOC. |
5048 | MAKE_RAW indicates how to turn PATTERN into a real insn. INSNP |
5049 | indicates if PATTERN is meant for an INSN as opposed to a JUMP_INSN, |
5050 | CALL_INSN, etc. */ |
5051 | |
5052 | static rtx_insn * |
5053 | emit_pattern_before_setloc (rtx pattern, rtx_insn *before, location_t loc, |
5054 | bool insnp, rtx_insn *(*make_raw) (rtx)) |
5055 | { |
5056 | rtx_insn *first = PREV_INSN (insn: before); |
5057 | rtx_insn *last = emit_pattern_before_noloc (x: pattern, before, |
5058 | last: insnp ? before : NULL, |
5059 | NULL, make_raw); |
5060 | |
5061 | if (pattern == NULL_RTX || !loc) |
5062 | return last; |
5063 | |
5064 | if (!first) |
5065 | first = get_insns (); |
5066 | else |
5067 | first = NEXT_INSN (insn: first); |
5068 | while (1) |
5069 | { |
5070 | if (active_insn_p (insn: first) |
5071 | && !JUMP_TABLE_DATA_P (first) /* FIXME */ |
5072 | && !INSN_LOCATION (insn: first)) |
5073 | INSN_LOCATION (insn: first) = loc; |
5074 | if (first == last) |
5075 | break; |
5076 | first = NEXT_INSN (insn: first); |
5077 | } |
5078 | return last; |
5079 | } |
5080 | |
5081 | /* Insert PATTERN before BEFORE. MAKE_RAW indicates how to turn PATTERN |
5082 | into a real insn. SKIP_DEBUG_INSNS indicates whether to insert |
5083 | before any DEBUG_INSNs. INSNP indicates if PATTERN is meant for an |
5084 | INSN as opposed to a JUMP_INSN, CALL_INSN, etc. */ |
5085 | |
5086 | static rtx_insn * |
5087 | emit_pattern_before (rtx pattern, rtx_insn *before, bool skip_debug_insns, |
5088 | bool insnp, rtx_insn *(*make_raw) (rtx)) |
5089 | { |
5090 | rtx_insn *next = before; |
5091 | |
5092 | if (skip_debug_insns) |
5093 | while (DEBUG_INSN_P (next)) |
5094 | next = PREV_INSN (insn: next); |
5095 | |
5096 | if (INSN_P (next)) |
5097 | return emit_pattern_before_setloc (pattern, before, loc: INSN_LOCATION (insn: next), |
5098 | insnp, make_raw); |
5099 | else |
5100 | return emit_pattern_before_noloc (x: pattern, before, |
5101 | last: insnp ? before : NULL, |
5102 | NULL, make_raw); |
5103 | } |
5104 | |
5105 | /* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */ |
5106 | rtx_insn * |
5107 | emit_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc) |
5108 | { |
5109 | return emit_pattern_before_setloc (pattern, before, loc, insnp: true, |
5110 | make_raw: make_insn_raw); |
5111 | } |
5112 | |
5113 | /* Like emit_insn_before_noloc, but set INSN_LOCATION according to BEFORE. */ |
5114 | rtx_insn * |
5115 | emit_insn_before (rtx pattern, rtx_insn *before) |
5116 | { |
5117 | return emit_pattern_before (pattern, before, skip_debug_insns: true, insnp: true, make_raw: make_insn_raw); |
5118 | } |
5119 | |
5120 | /* like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */ |
5121 | rtx_jump_insn * |
5122 | emit_jump_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc) |
5123 | { |
5124 | return as_a <rtx_jump_insn *> ( |
5125 | p: emit_pattern_before_setloc (pattern, before, loc, insnp: false, |
5126 | make_raw: make_jump_insn_raw)); |
5127 | } |
5128 | |
5129 | /* Like emit_jump_insn_before_noloc, but set INSN_LOCATION according to BEFORE. */ |
5130 | rtx_jump_insn * |
5131 | emit_jump_insn_before (rtx pattern, rtx_insn *before) |
5132 | { |
5133 | return as_a <rtx_jump_insn *> ( |
5134 | p: emit_pattern_before (pattern, before, skip_debug_insns: true, insnp: false, |
5135 | make_raw: make_jump_insn_raw)); |
5136 | } |
5137 | |
5138 | /* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */ |
5139 | rtx_insn * |
5140 | emit_call_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc) |
5141 | { |
5142 | return emit_pattern_before_setloc (pattern, before, loc, insnp: false, |
5143 | make_raw: make_call_insn_raw); |
5144 | } |
5145 | |
5146 | /* Like emit_call_insn_before_noloc, |
5147 | but set insn_location according to BEFORE. */ |
5148 | rtx_insn * |
5149 | emit_call_insn_before (rtx pattern, rtx_insn *before) |
5150 | { |
5151 | return emit_pattern_before (pattern, before, skip_debug_insns: true, insnp: false, |
5152 | make_raw: make_call_insn_raw); |
5153 | } |
5154 | |
5155 | /* Like emit_insn_before_noloc, but set INSN_LOCATION according to LOC. */ |
5156 | rtx_insn * |
5157 | emit_debug_insn_before_setloc (rtx pattern, rtx_insn *before, location_t loc) |
5158 | { |
5159 | return emit_pattern_before_setloc (pattern, before, loc, insnp: false, |
5160 | make_raw: make_debug_insn_raw); |
5161 | } |
5162 | |
5163 | /* Like emit_debug_insn_before_noloc, |
5164 | but set insn_location according to BEFORE. */ |
5165 | rtx_insn * |
5166 | emit_debug_insn_before (rtx pattern, rtx_insn *before) |
5167 | { |
5168 | return emit_pattern_before (pattern, before, skip_debug_insns: false, insnp: false, |
5169 | make_raw: make_debug_insn_raw); |
5170 | } |
5171 | |
5172 | /* Take X and emit it at the end of the doubly-linked |
5173 | INSN list. |
5174 | |
5175 | Returns the last insn emitted. */ |
5176 | |
5177 | rtx_insn * |
5178 | emit_insn (rtx x) |
5179 | { |
5180 | rtx_insn *last = get_last_insn (); |
5181 | rtx_insn *insn; |
5182 | |
5183 | if (x == NULL_RTX) |
5184 | return last; |
5185 | |
5186 | switch (GET_CODE (x)) |
5187 | { |
5188 | case DEBUG_INSN: |
5189 | case INSN: |
5190 | case JUMP_INSN: |
5191 | case CALL_INSN: |
5192 | case CODE_LABEL: |
5193 | case BARRIER: |
5194 | case NOTE: |
5195 | insn = as_a <rtx_insn *> (p: x); |
5196 | while (insn) |
5197 | { |
5198 | rtx_insn *next = NEXT_INSN (insn); |
5199 | add_insn (insn); |
5200 | last = insn; |
5201 | insn = next; |
5202 | } |
5203 | break; |
5204 | |
5205 | #ifdef ENABLE_RTL_CHECKING |
5206 | case JUMP_TABLE_DATA: |
5207 | case SEQUENCE: |
5208 | gcc_unreachable (); |
5209 | break; |
5210 | #endif |
5211 | |
5212 | default: |
5213 | last = make_insn_raw (pattern: x); |
5214 | add_insn (insn: last); |
5215 | break; |
5216 | } |
5217 | |
5218 | return last; |
5219 | } |
5220 | |
5221 | /* Make an insn of code DEBUG_INSN with pattern X |
5222 | and add it to the end of the doubly-linked list. */ |
5223 | |
5224 | rtx_insn * |
5225 | emit_debug_insn (rtx x) |
5226 | { |
5227 | rtx_insn *last = get_last_insn (); |
5228 | rtx_insn *insn; |
5229 | |
5230 | if (x == NULL_RTX) |
5231 | return last; |
5232 | |
5233 | switch (GET_CODE (x)) |
5234 | { |
5235 | case DEBUG_INSN: |
5236 | case INSN: |
5237 | case JUMP_INSN: |
5238 | case CALL_INSN: |
5239 | case CODE_LABEL: |
5240 | case BARRIER: |
5241 | case NOTE: |
5242 | insn = as_a <rtx_insn *> (p: x); |
5243 | while (insn) |
5244 | { |
5245 | rtx_insn *next = NEXT_INSN (insn); |
5246 | add_insn (insn); |
5247 | last = insn; |
5248 | insn = next; |
5249 | } |
5250 | break; |
5251 | |
5252 | #ifdef ENABLE_RTL_CHECKING |
5253 | case JUMP_TABLE_DATA: |
5254 | case SEQUENCE: |
5255 | gcc_unreachable (); |
5256 | break; |
5257 | #endif |
5258 | |
5259 | default: |
5260 | last = make_debug_insn_raw (pattern: x); |
5261 | add_insn (insn: last); |
5262 | break; |
5263 | } |
5264 | |
5265 | return last; |
5266 | } |
5267 | |
5268 | /* Make an insn of code JUMP_INSN with pattern X |
5269 | and add it to the end of the doubly-linked list. */ |
5270 | |
5271 | rtx_insn * |
5272 | emit_jump_insn (rtx x) |
5273 | { |
5274 | rtx_insn *last = NULL; |
5275 | rtx_insn *insn; |
5276 | |
5277 | switch (GET_CODE (x)) |
5278 | { |
5279 | case DEBUG_INSN: |
5280 | case INSN: |
5281 | case JUMP_INSN: |
5282 | case CALL_INSN: |
5283 | case CODE_LABEL: |
5284 | case BARRIER: |
5285 | case NOTE: |
5286 | insn = as_a <rtx_insn *> (p: x); |
5287 | while (insn) |
5288 | { |
5289 | rtx_insn *next = NEXT_INSN (insn); |
5290 | add_insn (insn); |
5291 | last = insn; |
5292 | insn = next; |
5293 | } |
5294 | break; |
5295 | |
5296 | #ifdef ENABLE_RTL_CHECKING |
5297 | case JUMP_TABLE_DATA: |
5298 | case SEQUENCE: |
5299 | gcc_unreachable (); |
5300 | break; |
5301 | #endif |
5302 | |
5303 | default: |
5304 | last = make_jump_insn_raw (pattern: x); |
5305 | add_insn (insn: last); |
5306 | break; |
5307 | } |
5308 | |
5309 | return last; |
5310 | } |
5311 | |
5312 | /* Make an insn of code JUMP_INSN with pattern X, |
5313 | add a REG_BR_PROB note that indicates very likely probability, |
5314 | and add it to the end of the doubly-linked list. */ |
5315 | |
5316 | rtx_insn * |
5317 | emit_likely_jump_insn (rtx x) |
5318 | { |
5319 | rtx_insn *jump = emit_jump_insn (x); |
5320 | add_reg_br_prob_note (jump, profile_probability::very_likely ()); |
5321 | return jump; |
5322 | } |
5323 | |
5324 | /* Make an insn of code JUMP_INSN with pattern X, |
5325 | add a REG_BR_PROB note that indicates very unlikely probability, |
5326 | and add it to the end of the doubly-linked list. */ |
5327 | |
5328 | rtx_insn * |
5329 | emit_unlikely_jump_insn (rtx x) |
5330 | { |
5331 | rtx_insn *jump = emit_jump_insn (x); |
5332 | add_reg_br_prob_note (jump, profile_probability::very_unlikely ()); |
5333 | return jump; |
5334 | } |
5335 | |
5336 | /* Make an insn of code CALL_INSN with pattern X |
5337 | and add it to the end of the doubly-linked list. */ |
5338 | |
5339 | rtx_insn * |
5340 | emit_call_insn (rtx x) |
5341 | { |
5342 | rtx_insn *insn; |
5343 | |
5344 | switch (GET_CODE (x)) |
5345 | { |
5346 | case DEBUG_INSN: |
5347 | case INSN: |
5348 | case JUMP_INSN: |
5349 | case CALL_INSN: |
5350 | case CODE_LABEL: |
5351 | case BARRIER: |
5352 | case NOTE: |
5353 | insn = emit_insn (x); |
5354 | break; |
5355 | |
5356 | #ifdef ENABLE_RTL_CHECKING |
5357 | case SEQUENCE: |
5358 | case JUMP_TABLE_DATA: |
5359 | gcc_unreachable (); |
5360 | break; |
5361 | #endif |
5362 | |
5363 | default: |
5364 | insn = make_call_insn_raw (pattern: x); |
5365 | add_insn (insn); |
5366 | break; |
5367 | } |
5368 | |
5369 | return insn; |
5370 | } |
5371 | |
5372 | /* Add the label LABEL to the end of the doubly-linked list. */ |
5373 | |
5374 | rtx_code_label * |
5375 | emit_label (rtx uncast_label) |
5376 | { |
5377 | rtx_code_label *label = as_a <rtx_code_label *> (p: uncast_label); |
5378 | |
5379 | gcc_checking_assert (INSN_UID (label) == 0); |
5380 | INSN_UID (insn: label) = cur_insn_uid++; |
5381 | add_insn (insn: label); |
5382 | return label; |
5383 | } |
5384 | |
5385 | /* Make an insn of code JUMP_TABLE_DATA |
5386 | and add it to the end of the doubly-linked list. */ |
5387 | |
5388 | rtx_jump_table_data * |
5389 | emit_jump_table_data (rtx table) |
5390 | { |
5391 | rtx_jump_table_data *jump_table_data = |
5392 | as_a <rtx_jump_table_data *> (p: rtx_alloc (JUMP_TABLE_DATA)); |
5393 | INSN_UID (insn: jump_table_data) = cur_insn_uid++; |
5394 | PATTERN (insn: jump_table_data) = table; |
5395 | BLOCK_FOR_INSN (insn: jump_table_data) = NULL; |
5396 | add_insn (insn: jump_table_data); |
5397 | return jump_table_data; |
5398 | } |
5399 | |
5400 | /* Make an insn of code BARRIER |
5401 | and add it to the end of the doubly-linked list. */ |
5402 | |
5403 | rtx_barrier * |
5404 | emit_barrier (void) |
5405 | { |
5406 | rtx_barrier *barrier = as_a <rtx_barrier *> (p: rtx_alloc (BARRIER)); |
5407 | INSN_UID (insn: barrier) = cur_insn_uid++; |
5408 | add_insn (insn: barrier); |
5409 | return barrier; |
5410 | } |
5411 | |
5412 | /* Emit a copy of note ORIG. */ |
5413 | |
5414 | rtx_note * |
5415 | emit_note_copy (rtx_note *orig) |
5416 | { |
5417 | enum insn_note kind = (enum insn_note) NOTE_KIND (orig); |
5418 | rtx_note *note = make_note_raw (subtype: kind); |
5419 | NOTE_DATA (note) = NOTE_DATA (orig); |
5420 | add_insn (insn: note); |
5421 | return note; |
5422 | } |
5423 | |
5424 | /* Make an insn of code NOTE or type NOTE_NO |
5425 | and add it to the end of the doubly-linked list. */ |
5426 | |
5427 | rtx_note * |
5428 | emit_note (enum insn_note kind) |
5429 | { |
5430 | rtx_note *note = make_note_raw (subtype: kind); |
5431 | add_insn (insn: note); |
5432 | return note; |
5433 | } |
5434 | |
5435 | /* Emit a clobber of lvalue X. */ |
5436 | |
5437 | rtx_insn * |
5438 | emit_clobber (rtx x) |
5439 | { |
5440 | /* CONCATs should not appear in the insn stream. */ |
5441 | if (GET_CODE (x) == CONCAT) |
5442 | { |
5443 | emit_clobber (XEXP (x, 0)); |
5444 | return emit_clobber (XEXP (x, 1)); |
5445 | } |
5446 | return emit_insn (gen_rtx_CLOBBER (VOIDmode, x)); |
5447 | } |
5448 | |
5449 | /* Return a sequence of insns to clobber lvalue X. */ |
5450 | |
5451 | rtx_insn * |
5452 | gen_clobber (rtx x) |
5453 | { |
5454 | rtx_insn *seq; |
5455 | |
5456 | start_sequence (); |
5457 | emit_clobber (x); |
5458 | seq = get_insns (); |
5459 | end_sequence (); |
5460 | return seq; |
5461 | } |
5462 | |
5463 | /* Emit a use of rvalue X. */ |
5464 | |
5465 | rtx_insn * |
5466 | emit_use (rtx x) |
5467 | { |
5468 | /* CONCATs should not appear in the insn stream. */ |
5469 | if (GET_CODE (x) == CONCAT) |
5470 | { |
5471 | emit_use (XEXP (x, 0)); |
5472 | return emit_use (XEXP (x, 1)); |
5473 | } |
5474 | return emit_insn (gen_rtx_USE (VOIDmode, x)); |
5475 | } |
5476 | |
5477 | /* Return a sequence of insns to use rvalue X. */ |
5478 | |
5479 | rtx_insn * |
5480 | gen_use (rtx x) |
5481 | { |
5482 | rtx_insn *seq; |
5483 | |
5484 | start_sequence (); |
5485 | emit_use (x); |
5486 | seq = get_insns (); |
5487 | end_sequence (); |
5488 | return seq; |
5489 | } |
5490 | |
5491 | /* Notes like REG_EQUAL and REG_EQUIV refer to a set in an instruction. |
5492 | Return the set in INSN that such notes describe, or NULL if the notes |
5493 | have no meaning for INSN. */ |
5494 | |
5495 | rtx |
5496 | set_for_reg_notes (rtx insn) |
5497 | { |
5498 | rtx pat, reg; |
5499 | |
5500 | if (!INSN_P (insn)) |
5501 | return NULL_RTX; |
5502 | |
5503 | pat = PATTERN (insn); |
5504 | if (GET_CODE (pat) == PARALLEL) |
5505 | { |
5506 | /* We do not use single_set because that ignores SETs of unused |
5507 | registers. REG_EQUAL and REG_EQUIV notes really do require the |
5508 | PARALLEL to have a single SET. */ |
5509 | if (multiple_sets (insn)) |
5510 | return NULL_RTX; |
5511 | pat = XVECEXP (pat, 0, 0); |
5512 | } |
5513 | |
5514 | if (GET_CODE (pat) != SET) |
5515 | return NULL_RTX; |
5516 | |
5517 | reg = SET_DEST (pat); |
5518 | |
5519 | /* Notes apply to the contents of a STRICT_LOW_PART. */ |
5520 | if (GET_CODE (reg) == STRICT_LOW_PART |
5521 | || GET_CODE (reg) == ZERO_EXTRACT) |
5522 | reg = XEXP (reg, 0); |
5523 | |
5524 | /* Check that we have a register. */ |
5525 | if (!(REG_P (reg) || GET_CODE (reg) == SUBREG)) |
5526 | return NULL_RTX; |
5527 | |
5528 | return pat; |
5529 | } |
5530 | |
5531 | /* Place a note of KIND on insn INSN with DATUM as the datum. If a |
5532 | note of this type already exists, remove it first. */ |
5533 | |
5534 | rtx |
5535 | set_unique_reg_note (rtx insn, enum reg_note kind, rtx datum) |
5536 | { |
5537 | rtx note = find_reg_note (insn, kind, NULL_RTX); |
5538 | |
5539 | switch (kind) |
5540 | { |
5541 | case REG_EQUAL: |
5542 | case REG_EQUIV: |
5543 | /* We need to support the REG_EQUAL on USE trick of find_reloads. */ |
5544 | if (!set_for_reg_notes (insn) && GET_CODE (PATTERN (insn)) != USE) |
5545 | return NULL_RTX; |
5546 | |
5547 | /* Don't add ASM_OPERAND REG_EQUAL/REG_EQUIV notes. |
5548 | It serves no useful purpose and breaks eliminate_regs. */ |
5549 | if (GET_CODE (datum) == ASM_OPERANDS) |
5550 | return NULL_RTX; |
5551 | |
5552 | /* Notes with side effects are dangerous. Even if the side-effect |
5553 | initially mirrors one in PATTERN (INSN), later optimizations |
5554 | might alter the way that the final register value is calculated |
5555 | and so move or alter the side-effect in some way. The note would |
5556 | then no longer be a valid substitution for SET_SRC. */ |
5557 | if (side_effects_p (datum)) |
5558 | return NULL_RTX; |
5559 | break; |
5560 | |
5561 | default: |
5562 | break; |
5563 | } |
5564 | |
5565 | if (note) |
5566 | XEXP (note, 0) = datum; |
5567 | else |
5568 | { |
5569 | add_reg_note (insn, kind, datum); |
5570 | note = REG_NOTES (insn); |
5571 | } |
5572 | |
5573 | switch (kind) |
5574 | { |
5575 | case REG_EQUAL: |
5576 | case REG_EQUIV: |
5577 | df_notes_rescan (as_a <rtx_insn *> (p: insn)); |
5578 | break; |
5579 | default: |
5580 | break; |
5581 | } |
5582 | |
5583 | return note; |
5584 | } |
5585 | |
5586 | /* Like set_unique_reg_note, but don't do anything unless INSN sets DST. */ |
5587 | rtx |
5588 | set_dst_reg_note (rtx insn, enum reg_note kind, rtx datum, rtx dst) |
5589 | { |
5590 | rtx set = set_for_reg_notes (insn); |
5591 | |
5592 | if (set && SET_DEST (set) == dst) |
5593 | return set_unique_reg_note (insn, kind, datum); |
5594 | return NULL_RTX; |
5595 | } |
5596 | |
5597 | /* Emit the rtl pattern X as an appropriate kind of insn. Also emit a |
5598 | following barrier if the instruction needs one and if ALLOW_BARRIER_P |
5599 | is true. |
5600 | |
5601 | If X is a label, it is simply added into the insn chain. */ |
5602 | |
5603 | rtx_insn * |
5604 | emit (rtx x, bool allow_barrier_p) |
5605 | { |
5606 | enum rtx_code code = classify_insn (x); |
5607 | |
5608 | switch (code) |
5609 | { |
5610 | case CODE_LABEL: |
5611 | return emit_label (uncast_label: x); |
5612 | case INSN: |
5613 | return emit_insn (x); |
5614 | case JUMP_INSN: |
5615 | { |
5616 | rtx_insn *insn = emit_jump_insn (x); |
5617 | if (allow_barrier_p |
5618 | && (any_uncondjump_p (insn) || GET_CODE (x) == RETURN)) |
5619 | return emit_barrier (); |
5620 | return insn; |
5621 | } |
5622 | case CALL_INSN: |
5623 | return emit_call_insn (x); |
5624 | case DEBUG_INSN: |
5625 | return emit_debug_insn (x); |
5626 | default: |
5627 | gcc_unreachable (); |
5628 | } |
5629 | } |
5630 | |
5631 | /* Space for free sequence stack entries. */ |
5632 | static GTY ((deletable)) struct sequence_stack *free_sequence_stack; |
5633 | |
5634 | /* Begin emitting insns to a sequence. If this sequence will contain |
5635 | something that might cause the compiler to pop arguments to function |
5636 | calls (because those pops have previously been deferred; see |
5637 | INHIBIT_DEFER_POP for more details), use do_pending_stack_adjust |
5638 | before calling this function. That will ensure that the deferred |
5639 | pops are not accidentally emitted in the middle of this sequence. */ |
5640 | |
5641 | void |
5642 | start_sequence (void) |
5643 | { |
5644 | struct sequence_stack *tem; |
5645 | |
5646 | if (free_sequence_stack != NULL) |
5647 | { |
5648 | tem = free_sequence_stack; |
5649 | free_sequence_stack = tem->next; |
5650 | } |
5651 | else |
5652 | tem = ggc_alloc<sequence_stack> (); |
5653 | |
5654 | tem->next = get_current_sequence ()->next; |
5655 | tem->first = get_insns (); |
5656 | tem->last = get_last_insn (); |
5657 | get_current_sequence ()->next = tem; |
5658 | |
5659 | set_first_insn (0); |
5660 | set_last_insn (0); |
5661 | } |
5662 | |
5663 | /* Set up the insn chain starting with FIRST as the current sequence, |
5664 | saving the previously current one. See the documentation for |
5665 | start_sequence for more information about how to use this function. */ |
5666 | |
5667 | void |
5668 | push_to_sequence (rtx_insn *first) |
5669 | { |
5670 | rtx_insn *last; |
5671 | |
5672 | start_sequence (); |
5673 | |
5674 | for (last = first; last && NEXT_INSN (insn: last); last = NEXT_INSN (insn: last)) |
5675 | ; |
5676 | |
5677 | set_first_insn (first); |
5678 | set_last_insn (last); |
5679 | } |
5680 | |
5681 | /* Like push_to_sequence, but take the last insn as an argument to avoid |
5682 | looping through the list. */ |
5683 | |
5684 | void |
5685 | push_to_sequence2 (rtx_insn *first, rtx_insn *last) |
5686 | { |
5687 | start_sequence (); |
5688 | |
5689 | set_first_insn (first); |
5690 | set_last_insn (last); |
5691 | } |
5692 | |
5693 | /* Set up the outer-level insn chain |
5694 | as the current sequence, saving the previously current one. */ |
5695 | |
5696 | void |
5697 | push_topmost_sequence (void) |
5698 | { |
5699 | struct sequence_stack *top; |
5700 | |
5701 | start_sequence (); |
5702 | |
5703 | top = get_topmost_sequence (); |
5704 | set_first_insn (top->first); |
5705 | set_last_insn (top->last); |
5706 | } |
5707 | |
5708 | /* After emitting to the outer-level insn chain, update the outer-level |
5709 | insn chain, and restore the previous saved state. */ |
5710 | |
5711 | void |
5712 | pop_topmost_sequence (void) |
5713 | { |
5714 | struct sequence_stack *top; |
5715 | |
5716 | top = get_topmost_sequence (); |
5717 | top->first = get_insns (); |
5718 | top->last = get_last_insn (); |
5719 | |
5720 | end_sequence (); |
5721 | } |
5722 | |
5723 | /* After emitting to a sequence, restore previous saved state. |
5724 | |
5725 | To get the contents of the sequence just made, you must call |
5726 | `get_insns' *before* calling here. |
5727 | |
5728 | If the compiler might have deferred popping arguments while |
5729 | generating this sequence, and this sequence will not be immediately |
5730 | inserted into the instruction stream, use do_pending_stack_adjust |
5731 | before calling get_insns. That will ensure that the deferred |
5732 | pops are inserted into this sequence, and not into some random |
5733 | location in the instruction stream. See INHIBIT_DEFER_POP for more |
5734 | information about deferred popping of arguments. */ |
5735 | |
5736 | void |
5737 | end_sequence (void) |
5738 | { |
5739 | struct sequence_stack *tem = get_current_sequence ()->next; |
5740 | |
5741 | set_first_insn (tem->first); |
5742 | set_last_insn (tem->last); |
5743 | get_current_sequence ()->next = tem->next; |
5744 | |
5745 | memset (s: tem, c: 0, n: sizeof (*tem)); |
5746 | tem->next = free_sequence_stack; |
5747 | free_sequence_stack = tem; |
5748 | } |
5749 | |
5750 | /* Return true if currently emitting into a sequence. */ |
5751 | |
5752 | bool |
5753 | in_sequence_p (void) |
5754 | { |
5755 | return get_current_sequence ()->next != 0; |
5756 | } |
5757 | |
5758 | /* Put the various virtual registers into REGNO_REG_RTX. */ |
5759 | |
5760 | static void |
5761 | init_virtual_regs (void) |
5762 | { |
5763 | regno_reg_rtx[VIRTUAL_INCOMING_ARGS_REGNUM] = virtual_incoming_args_rtx; |
5764 | regno_reg_rtx[VIRTUAL_STACK_VARS_REGNUM] = virtual_stack_vars_rtx; |
5765 | regno_reg_rtx[VIRTUAL_STACK_DYNAMIC_REGNUM] = virtual_stack_dynamic_rtx; |
5766 | regno_reg_rtx[VIRTUAL_OUTGOING_ARGS_REGNUM] = virtual_outgoing_args_rtx; |
5767 | regno_reg_rtx[VIRTUAL_CFA_REGNUM] = virtual_cfa_rtx; |
5768 | regno_reg_rtx[VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM] |
5769 | = virtual_preferred_stack_boundary_rtx; |
5770 | } |
5771 | |
5772 | |
5773 | /* Used by copy_insn_1 to avoid copying SCRATCHes more than once. */ |
5774 | static rtx copy_insn_scratch_in[MAX_RECOG_OPERANDS]; |
5775 | static rtx copy_insn_scratch_out[MAX_RECOG_OPERANDS]; |
5776 | static int copy_insn_n_scratches; |
5777 | |
5778 | /* When an insn is being copied by copy_insn_1, this is nonzero if we have |
5779 | copied an ASM_OPERANDS. |
5780 | In that case, it is the original input-operand vector. */ |
5781 | static rtvec orig_asm_operands_vector; |
5782 | |
5783 | /* When an insn is being copied by copy_insn_1, this is nonzero if we have |
5784 | copied an ASM_OPERANDS. |
5785 | In that case, it is the copied input-operand vector. */ |
5786 | static rtvec copy_asm_operands_vector; |
5787 | |
5788 | /* Likewise for the constraints vector. */ |
5789 | static rtvec orig_asm_constraints_vector; |
5790 | static rtvec copy_asm_constraints_vector; |
5791 | |
5792 | /* Recursively create a new copy of an rtx for copy_insn. |
5793 | This function differs from copy_rtx in that it handles SCRATCHes and |
5794 | ASM_OPERANDs properly. |
5795 | Normally, this function is not used directly; use copy_insn as front end. |
5796 | However, you could first copy an insn pattern with copy_insn and then use |
5797 | this function afterwards to properly copy any REG_NOTEs containing |
5798 | SCRATCHes. */ |
5799 | |
5800 | rtx |
5801 | copy_insn_1 (rtx orig) |
5802 | { |
5803 | rtx copy; |
5804 | int i, j; |
5805 | RTX_CODE code; |
5806 | const char *format_ptr; |
5807 | |
5808 | if (orig == NULL) |
5809 | return NULL; |
5810 | |
5811 | code = GET_CODE (orig); |
5812 | |
5813 | switch (code) |
5814 | { |
5815 | case REG: |
5816 | case DEBUG_EXPR: |
5817 | CASE_CONST_ANY: |
5818 | case SYMBOL_REF: |
5819 | case CODE_LABEL: |
5820 | case PC: |
5821 | case RETURN: |
5822 | case SIMPLE_RETURN: |
5823 | return orig; |
5824 | case CLOBBER: |
5825 | /* Share clobbers of hard registers, but do not share pseudo reg |
5826 | clobbers or clobbers of hard registers that originated as pseudos. |
5827 | This is needed to allow safe register renaming. */ |
5828 | if (REG_P (XEXP (orig, 0)) |
5829 | && HARD_REGISTER_NUM_P (REGNO (XEXP (orig, 0))) |
5830 | && HARD_REGISTER_NUM_P (ORIGINAL_REGNO (XEXP (orig, 0)))) |
5831 | return orig; |
5832 | break; |
5833 | |
5834 | case SCRATCH: |
5835 | for (i = 0; i < copy_insn_n_scratches; i++) |
5836 | if (copy_insn_scratch_in[i] == orig) |
5837 | return copy_insn_scratch_out[i]; |
5838 | break; |
5839 | |
5840 | case CONST: |
5841 | if (shared_const_p (orig)) |
5842 | return orig; |
5843 | break; |
5844 | |
5845 | /* A MEM with a constant address is not sharable. The problem is that |
5846 | the constant address may need to be reloaded. If the mem is shared, |
5847 | then reloading one copy of this mem will cause all copies to appear |
5848 | to have been reloaded. */ |
5849 | |
5850 | default: |
5851 | break; |
5852 | } |
5853 | |
5854 | /* Copy the various flags, fields, and other information. We assume |
5855 | that all fields need copying, and then clear the fields that should |
5856 | not be copied. That is the sensible default behavior, and forces |
5857 | us to explicitly document why we are *not* copying a flag. */ |
5858 | copy = shallow_copy_rtx (orig); |
5859 | |
5860 | /* We do not copy JUMP, CALL, or FRAME_RELATED for INSNs. */ |
5861 | if (INSN_P (orig)) |
5862 | { |
5863 | RTX_FLAG (copy, jump) = 0; |
5864 | RTX_FLAG (copy, call) = 0; |
5865 | RTX_FLAG (copy, frame_related) = 0; |
5866 | } |
5867 | |
5868 | format_ptr = GET_RTX_FORMAT (GET_CODE (copy)); |
5869 | |
5870 | for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++) |
5871 | switch (*format_ptr++) |
5872 | { |
5873 | case 'e': |
5874 | if (XEXP (orig, i) != NULL) |
5875 | XEXP (copy, i) = copy_insn_1 (XEXP (orig, i)); |
5876 | break; |
5877 | |
5878 | case 'E': |
5879 | case 'V': |
5880 | if (XVEC (orig, i) == orig_asm_constraints_vector) |
5881 | XVEC (copy, i) = copy_asm_constraints_vector; |
5882 | else if (XVEC (orig, i) == orig_asm_operands_vector) |
5883 | XVEC (copy, i) = copy_asm_operands_vector; |
5884 | else if (XVEC (orig, i) != NULL) |
5885 | { |
5886 | XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i)); |
5887 | for (j = 0; j < XVECLEN (copy, i); j++) |
5888 | XVECEXP (copy, i, j) = copy_insn_1 (XVECEXP (orig, i, j)); |
5889 | } |
5890 | break; |
5891 | |
5892 | case 't': |
5893 | case 'w': |
5894 | case 'i': |
5895 | case 'p': |
5896 | case 's': |
5897 | case 'S': |
5898 | case 'u': |
5899 | case '0': |
5900 | /* These are left unchanged. */ |
5901 | break; |
5902 | |
5903 | default: |
5904 | gcc_unreachable (); |
5905 | } |
5906 | |
5907 | if (code == SCRATCH) |
5908 | { |
5909 | i = copy_insn_n_scratches++; |
5910 | gcc_assert (i < MAX_RECOG_OPERANDS); |
5911 | copy_insn_scratch_in[i] = orig; |
5912 | copy_insn_scratch_out[i] = copy; |
5913 | } |
5914 | else if (code == ASM_OPERANDS) |
5915 | { |
5916 | orig_asm_operands_vector = ASM_OPERANDS_INPUT_VEC (orig); |
5917 | copy_asm_operands_vector = ASM_OPERANDS_INPUT_VEC (copy); |
5918 | orig_asm_constraints_vector = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (orig); |
5919 | copy_asm_constraints_vector = ASM_OPERANDS_INPUT_CONSTRAINT_VEC (copy); |
5920 | } |
5921 | |
5922 | return copy; |
5923 | } |
5924 | |
5925 | /* Create a new copy of an rtx. |
5926 | This function differs from copy_rtx in that it handles SCRATCHes and |
5927 | ASM_OPERANDs properly. |
5928 | INSN doesn't really have to be a full INSN; it could be just the |
5929 | pattern. */ |
5930 | rtx |
5931 | copy_insn (rtx insn) |
5932 | { |
5933 | copy_insn_n_scratches = 0; |
5934 | orig_asm_operands_vector = 0; |
5935 | orig_asm_constraints_vector = 0; |
5936 | copy_asm_operands_vector = 0; |
5937 | copy_asm_constraints_vector = 0; |
5938 | return copy_insn_1 (orig: insn); |
5939 | } |
5940 | |
5941 | /* Return a copy of INSN that can be used in a SEQUENCE delay slot, |
5942 | on that assumption that INSN itself remains in its original place. */ |
5943 | |
5944 | rtx_insn * |
5945 | copy_delay_slot_insn (rtx_insn *insn) |
5946 | { |
5947 | /* Copy INSN with its rtx_code, all its notes, location etc. */ |
5948 | insn = as_a <rtx_insn *> (p: copy_rtx (insn)); |
5949 | INSN_UID (insn) = cur_insn_uid++; |
5950 | return insn; |
5951 | } |
5952 | |
5953 | /* Initialize data structures and variables in this file |
5954 | before generating rtl for each function. */ |
5955 | |
5956 | void |
5957 | init_emit (void) |
5958 | { |
5959 | set_first_insn (NULL); |
5960 | set_last_insn (NULL); |
5961 | if (param_min_nondebug_insn_uid) |
5962 | cur_insn_uid = param_min_nondebug_insn_uid; |
5963 | else |
5964 | cur_insn_uid = 1; |
5965 | cur_debug_insn_uid = 1; |
5966 | reg_rtx_no = LAST_VIRTUAL_REGISTER + 1; |
5967 | first_label_num = label_num; |
5968 | get_current_sequence ()->next = NULL; |
5969 | |
5970 | /* Init the tables that describe all the pseudo regs. */ |
5971 | |
5972 | crtl->emit.regno_pointer_align_length = LAST_VIRTUAL_REGISTER + 101; |
5973 | |
5974 | crtl->emit.regno_pointer_align |
5975 | = XCNEWVEC (unsigned char, crtl->emit.regno_pointer_align_length); |
5976 | |
5977 | regno_reg_rtx |
5978 | = ggc_cleared_vec_alloc<rtx> (crtl->emit.regno_pointer_align_length); |
5979 | |
5980 | /* Put copies of all the hard registers into regno_reg_rtx. */ |
5981 | memcpy (dest: regno_reg_rtx, |
5982 | initial_regno_reg_rtx, |
5983 | FIRST_PSEUDO_REGISTER * sizeof (rtx)); |
5984 | |
5985 | /* Put copies of all the virtual register rtx into regno_reg_rtx. */ |
5986 | init_virtual_regs (); |
5987 | |
5988 | /* Indicate that the virtual registers and stack locations are |
5989 | all pointers. */ |
5990 | REG_POINTER (stack_pointer_rtx) = 1; |
5991 | REG_POINTER (frame_pointer_rtx) = 1; |
5992 | REG_POINTER (hard_frame_pointer_rtx) = 1; |
5993 | REG_POINTER (arg_pointer_rtx) = 1; |
5994 | |
5995 | REG_POINTER (virtual_incoming_args_rtx) = 1; |
5996 | REG_POINTER (virtual_stack_vars_rtx) = 1; |
5997 | REG_POINTER (virtual_stack_dynamic_rtx) = 1; |
5998 | REG_POINTER (virtual_outgoing_args_rtx) = 1; |
5999 | REG_POINTER (virtual_cfa_rtx) = 1; |
6000 | |
6001 | #ifdef STACK_BOUNDARY |
6002 | REGNO_POINTER_ALIGN (STACK_POINTER_REGNUM) = STACK_BOUNDARY; |
6003 | REGNO_POINTER_ALIGN (FRAME_POINTER_REGNUM) = STACK_BOUNDARY; |
6004 | REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = STACK_BOUNDARY; |
6005 | REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) = STACK_BOUNDARY; |
6006 | |
6007 | REGNO_POINTER_ALIGN (VIRTUAL_INCOMING_ARGS_REGNUM) = STACK_BOUNDARY; |
6008 | REGNO_POINTER_ALIGN (VIRTUAL_STACK_VARS_REGNUM) = STACK_BOUNDARY; |
6009 | REGNO_POINTER_ALIGN (VIRTUAL_STACK_DYNAMIC_REGNUM) = STACK_BOUNDARY; |
6010 | REGNO_POINTER_ALIGN (VIRTUAL_OUTGOING_ARGS_REGNUM) = STACK_BOUNDARY; |
6011 | |
6012 | REGNO_POINTER_ALIGN (VIRTUAL_CFA_REGNUM) = BITS_PER_WORD; |
6013 | #endif |
6014 | |
6015 | #ifdef INIT_EXPANDERS |
6016 | INIT_EXPANDERS; |
6017 | #endif |
6018 | } |
6019 | |
6020 | /* Return the value of element I of CONST_VECTOR X as a wide_int. */ |
6021 | |
6022 | wide_int |
6023 | const_vector_int_elt (const_rtx x, unsigned int i) |
6024 | { |
6025 | /* First handle elements that are directly encoded. */ |
6026 | machine_mode elt_mode = GET_MODE_INNER (GET_MODE (x)); |
6027 | if (i < (unsigned int) XVECLEN (x, 0)) |
6028 | return rtx_mode_t (CONST_VECTOR_ENCODED_ELT (x, i), elt_mode); |
6029 | |
6030 | /* Identify the pattern that contains element I and work out the index of |
6031 | the last encoded element for that pattern. */ |
6032 | unsigned int encoded_nelts = const_vector_encoded_nelts (x); |
6033 | unsigned int npatterns = CONST_VECTOR_NPATTERNS (x); |
6034 | unsigned int count = i / npatterns; |
6035 | unsigned int pattern = i % npatterns; |
6036 | unsigned int final_i = encoded_nelts - npatterns + pattern; |
6037 | |
6038 | /* If there are no steps, the final encoded value is the right one. */ |
6039 | if (!CONST_VECTOR_STEPPED_P (x)) |
6040 | return rtx_mode_t (CONST_VECTOR_ENCODED_ELT (x, final_i), elt_mode); |
6041 | |
6042 | /* Otherwise work out the value from the last two encoded elements. */ |
6043 | rtx v1 = CONST_VECTOR_ENCODED_ELT (x, final_i - npatterns); |
6044 | rtx v2 = CONST_VECTOR_ENCODED_ELT (x, final_i); |
6045 | wide_int diff = wi::sub (x: rtx_mode_t (v2, elt_mode), |
6046 | y: rtx_mode_t (v1, elt_mode)); |
6047 | return wi::add (x: rtx_mode_t (v2, elt_mode), y: (count - 2) * diff); |
6048 | } |
6049 | |
6050 | /* Return the value of element I of CONST_VECTOR X. */ |
6051 | |
6052 | rtx |
6053 | const_vector_elt (const_rtx x, unsigned int i) |
6054 | { |
6055 | /* First handle elements that are directly encoded. */ |
6056 | if (i < (unsigned int) XVECLEN (x, 0)) |
6057 | return CONST_VECTOR_ENCODED_ELT (x, i); |
6058 | |
6059 | /* If there are no steps, the final encoded value is the right one. */ |
6060 | if (!CONST_VECTOR_STEPPED_P (x)) |
6061 | { |
6062 | /* Identify the pattern that contains element I and work out the index of |
6063 | the last encoded element for that pattern. */ |
6064 | unsigned int encoded_nelts = const_vector_encoded_nelts (x); |
6065 | unsigned int npatterns = CONST_VECTOR_NPATTERNS (x); |
6066 | unsigned int pattern = i % npatterns; |
6067 | unsigned int final_i = encoded_nelts - npatterns + pattern; |
6068 | return CONST_VECTOR_ENCODED_ELT (x, final_i); |
6069 | } |
6070 | |
6071 | /* Otherwise work out the value from the last two encoded elements. */ |
6072 | return immed_wide_int_const (c: const_vector_int_elt (x, i), |
6073 | GET_MODE_INNER (GET_MODE (x))); |
6074 | } |
6075 | |
6076 | /* Return true if X is a valid element for a CONST_VECTOR of the given |
6077 | mode. */ |
6078 | |
6079 | bool |
6080 | valid_for_const_vector_p (machine_mode, rtx x) |
6081 | { |
6082 | return (CONST_SCALAR_INT_P (x) |
6083 | || CONST_POLY_INT_P (x) |
6084 | || CONST_DOUBLE_AS_FLOAT_P (x) |
6085 | || CONST_FIXED_P (x)); |
6086 | } |
6087 | |
6088 | /* Generate a vector constant of mode MODE in which every element has |
6089 | value ELT. */ |
6090 | |
6091 | rtx |
6092 | gen_const_vec_duplicate (machine_mode mode, rtx elt) |
6093 | { |
6094 | rtx_vector_builder builder (mode, 1, 1); |
6095 | builder.quick_push (obj: elt); |
6096 | return builder.build (); |
6097 | } |
6098 | |
6099 | /* Return a vector rtx of mode MODE in which every element has value X. |
6100 | The result will be a constant if X is constant. */ |
6101 | |
6102 | rtx |
6103 | gen_vec_duplicate (machine_mode mode, rtx x) |
6104 | { |
6105 | if (valid_for_const_vector_p (mode, x)) |
6106 | return gen_const_vec_duplicate (mode, elt: x); |
6107 | return gen_rtx_VEC_DUPLICATE (mode, x); |
6108 | } |
6109 | |
6110 | /* A subroutine of const_vec_series_p that handles the case in which: |
6111 | |
6112 | (GET_CODE (X) == CONST_VECTOR |
6113 | && CONST_VECTOR_NPATTERNS (X) == 1 |
6114 | && !CONST_VECTOR_DUPLICATE_P (X)) |
6115 | |
6116 | is known to hold. */ |
6117 | |
6118 | bool |
6119 | const_vec_series_p_1 (const_rtx x, rtx *base_out, rtx *step_out) |
6120 | { |
6121 | /* Stepped sequences are only defined for integers, to avoid specifying |
6122 | rounding behavior. */ |
6123 | if (GET_MODE_CLASS (GET_MODE (x)) != MODE_VECTOR_INT) |
6124 | return false; |
6125 | |
6126 | /* A non-duplicated vector with two elements can always be seen as a |
6127 | series with a nonzero step. Longer vectors must have a stepped |
6128 | encoding. */ |
6129 | if (maybe_ne (CONST_VECTOR_NUNITS (x), b: 2) |
6130 | && !CONST_VECTOR_STEPPED_P (x)) |
6131 | return false; |
6132 | |
6133 | /* Calculate the step between the first and second elements. */ |
6134 | scalar_mode inner = GET_MODE_INNER (GET_MODE (x)); |
6135 | rtx base = CONST_VECTOR_ELT (x, 0); |
6136 | rtx step = simplify_binary_operation (code: MINUS, mode: inner, |
6137 | CONST_VECTOR_ENCODED_ELT (x, 1), op1: base); |
6138 | if (rtx_equal_p (step, CONST0_RTX (inner))) |
6139 | return false; |
6140 | |
6141 | /* If we have a stepped encoding, check that the step between the |
6142 | second and third elements is the same as STEP. */ |
6143 | if (CONST_VECTOR_STEPPED_P (x)) |
6144 | { |
6145 | rtx diff = simplify_binary_operation (code: MINUS, mode: inner, |
6146 | CONST_VECTOR_ENCODED_ELT (x, 2), |
6147 | CONST_VECTOR_ENCODED_ELT (x, 1)); |
6148 | if (!rtx_equal_p (step, diff)) |
6149 | return false; |
6150 | } |
6151 | |
6152 | *base_out = base; |
6153 | *step_out = step; |
6154 | return true; |
6155 | } |
6156 | |
6157 | /* Generate a vector constant of mode MODE in which element I has |
6158 | the value BASE + I * STEP. */ |
6159 | |
6160 | rtx |
6161 | gen_const_vec_series (machine_mode mode, rtx base, rtx step) |
6162 | { |
6163 | gcc_assert (valid_for_const_vector_p (mode, base) |
6164 | && valid_for_const_vector_p (mode, step)); |
6165 | |
6166 | rtx_vector_builder builder (mode, 1, 3); |
6167 | builder.quick_push (obj: base); |
6168 | for (int i = 1; i < 3; ++i) |
6169 | builder.quick_push (obj: simplify_gen_binary (code: PLUS, GET_MODE_INNER (mode), |
6170 | op0: builder[i - 1], op1: step)); |
6171 | return builder.build (); |
6172 | } |
6173 | |
6174 | /* Generate a vector of mode MODE in which element I has the value |
6175 | BASE + I * STEP. The result will be a constant if BASE and STEP |
6176 | are both constants. */ |
6177 | |
6178 | rtx |
6179 | gen_vec_series (machine_mode mode, rtx base, rtx step) |
6180 | { |
6181 | if (step == const0_rtx) |
6182 | return gen_vec_duplicate (mode, x: base); |
6183 | if (valid_for_const_vector_p (mode, x: base) |
6184 | && valid_for_const_vector_p (mode, x: step)) |
6185 | return gen_const_vec_series (mode, base, step); |
6186 | return gen_rtx_VEC_SERIES (mode, base, step); |
6187 | } |
6188 | |
6189 | /* Generate a new vector constant for mode MODE and constant value |
6190 | CONSTANT. */ |
6191 | |
6192 | static rtx |
6193 | gen_const_vector (machine_mode mode, int constant) |
6194 | { |
6195 | machine_mode inner = GET_MODE_INNER (mode); |
6196 | |
6197 | gcc_assert (!DECIMAL_FLOAT_MODE_P (inner)); |
6198 | |
6199 | rtx el = const_tiny_rtx[constant][(int) inner]; |
6200 | gcc_assert (el); |
6201 | |
6202 | return gen_const_vec_duplicate (mode, elt: el); |
6203 | } |
6204 | |
6205 | /* Generate a vector like gen_rtx_raw_CONST_VEC, but use the zero vector when |
6206 | all elements are zero, and the one vector when all elements are one. */ |
6207 | rtx |
6208 | gen_rtx_CONST_VECTOR (machine_mode mode, rtvec v) |
6209 | { |
6210 | gcc_assert (known_eq (GET_MODE_NUNITS (mode), GET_NUM_ELEM (v))); |
6211 | |
6212 | /* If the values are all the same, check to see if we can use one of the |
6213 | standard constant vectors. */ |
6214 | if (rtvec_all_equal_p (v)) |
6215 | return gen_const_vec_duplicate (mode, RTVEC_ELT (v, 0)); |
6216 | |
6217 | unsigned int nunits = GET_NUM_ELEM (v); |
6218 | rtx_vector_builder builder (mode, nunits, 1); |
6219 | for (unsigned int i = 0; i < nunits; ++i) |
6220 | builder.quick_push (RTVEC_ELT (v, i)); |
6221 | return builder.build (v); |
6222 | } |
6223 | |
6224 | /* Initialise global register information required by all functions. */ |
6225 | |
6226 | void |
6227 | init_emit_regs (void) |
6228 | { |
6229 | int i; |
6230 | machine_mode mode; |
6231 | mem_attrs *attrs; |
6232 | |
6233 | /* Reset register attributes */ |
6234 | reg_attrs_htab->empty (); |
6235 | |
6236 | /* We need reg_raw_mode, so initialize the modes now. */ |
6237 | init_reg_modes_target (); |
6238 | |
6239 | /* Assign register numbers to the globally defined register rtx. */ |
6240 | stack_pointer_rtx = gen_raw_REG (Pmode, STACK_POINTER_REGNUM); |
6241 | frame_pointer_rtx = gen_raw_REG (Pmode, FRAME_POINTER_REGNUM); |
6242 | hard_frame_pointer_rtx = gen_raw_REG (Pmode, HARD_FRAME_POINTER_REGNUM); |
6243 | arg_pointer_rtx = gen_raw_REG (Pmode, ARG_POINTER_REGNUM); |
6244 | virtual_incoming_args_rtx = |
6245 | gen_raw_REG (Pmode, VIRTUAL_INCOMING_ARGS_REGNUM); |
6246 | virtual_stack_vars_rtx = |
6247 | gen_raw_REG (Pmode, VIRTUAL_STACK_VARS_REGNUM); |
6248 | virtual_stack_dynamic_rtx = |
6249 | gen_raw_REG (Pmode, VIRTUAL_STACK_DYNAMIC_REGNUM); |
6250 | virtual_outgoing_args_rtx = |
6251 | gen_raw_REG (Pmode, VIRTUAL_OUTGOING_ARGS_REGNUM); |
6252 | virtual_cfa_rtx = gen_raw_REG (Pmode, VIRTUAL_CFA_REGNUM); |
6253 | virtual_preferred_stack_boundary_rtx = |
6254 | gen_raw_REG (Pmode, VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM); |
6255 | |
6256 | /* Initialize RTL for commonly used hard registers. These are |
6257 | copied into regno_reg_rtx as we begin to compile each function. */ |
6258 | for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
6259 | initial_regno_reg_rtx[i] = gen_raw_REG (reg_raw_mode[i], regno: i); |
6260 | |
6261 | #ifdef RETURN_ADDRESS_POINTER_REGNUM |
6262 | return_address_pointer_rtx |
6263 | = gen_raw_REG (Pmode, RETURN_ADDRESS_POINTER_REGNUM); |
6264 | #endif |
6265 | |
6266 | pic_offset_table_rtx = NULL_RTX; |
6267 | if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM) |
6268 | pic_offset_table_rtx = gen_raw_REG (Pmode, PIC_OFFSET_TABLE_REGNUM); |
6269 | |
6270 | /* Process stack-limiting command-line options. */ |
6271 | if (opt_fstack_limit_symbol_arg != NULL) |
6272 | stack_limit_rtx |
6273 | = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (opt_fstack_limit_symbol_arg)); |
6274 | if (opt_fstack_limit_register_no >= 0) |
6275 | stack_limit_rtx = gen_rtx_REG (Pmode, regno: opt_fstack_limit_register_no); |
6276 | |
6277 | for (i = 0; i < (int) MAX_MACHINE_MODE; i++) |
6278 | { |
6279 | mode = (machine_mode) i; |
6280 | attrs = ggc_cleared_alloc<mem_attrs> (); |
6281 | attrs->align = BITS_PER_UNIT; |
6282 | attrs->addrspace = ADDR_SPACE_GENERIC; |
6283 | if (mode != BLKmode && mode != VOIDmode) |
6284 | { |
6285 | attrs->size_known_p = true; |
6286 | attrs->size = GET_MODE_SIZE (mode); |
6287 | if (STRICT_ALIGNMENT) |
6288 | attrs->align = GET_MODE_ALIGNMENT (mode); |
6289 | } |
6290 | mode_mem_attrs[i] = attrs; |
6291 | } |
6292 | |
6293 | split_branch_probability = profile_probability::uninitialized (); |
6294 | } |
6295 | |
6296 | /* Initialize global machine_mode variables. */ |
6297 | |
6298 | void |
6299 | init_derived_machine_modes (void) |
6300 | { |
6301 | opt_scalar_int_mode mode_iter, opt_byte_mode, opt_word_mode; |
6302 | FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT) |
6303 | { |
6304 | scalar_int_mode mode = mode_iter.require (); |
6305 | |
6306 | if (GET_MODE_BITSIZE (mode) == BITS_PER_UNIT |
6307 | && !opt_byte_mode.exists ()) |
6308 | opt_byte_mode = mode; |
6309 | |
6310 | if (GET_MODE_BITSIZE (mode) == BITS_PER_WORD |
6311 | && !opt_word_mode.exists ()) |
6312 | opt_word_mode = mode; |
6313 | } |
6314 | |
6315 | byte_mode = opt_byte_mode.require (); |
6316 | word_mode = opt_word_mode.require (); |
6317 | ptr_mode = as_a <scalar_int_mode> |
6318 | (m: mode_for_size (POINTER_SIZE, GET_MODE_CLASS (Pmode), 0).require ()); |
6319 | } |
6320 | |
6321 | /* Create some permanent unique rtl objects shared between all functions. */ |
6322 | |
6323 | void |
6324 | init_emit_once (void) |
6325 | { |
6326 | int i; |
6327 | machine_mode mode; |
6328 | scalar_float_mode double_mode; |
6329 | opt_scalar_mode smode_iter; |
6330 | |
6331 | /* Initialize the CONST_INT, CONST_WIDE_INT, CONST_DOUBLE, |
6332 | CONST_FIXED, and memory attribute hash tables. */ |
6333 | const_int_htab = hash_table<const_int_hasher>::create_ggc (n: 37); |
6334 | |
6335 | #if TARGET_SUPPORTS_WIDE_INT |
6336 | const_wide_int_htab = hash_table<const_wide_int_hasher>::create_ggc (n: 37); |
6337 | #endif |
6338 | const_double_htab = hash_table<const_double_hasher>::create_ggc (n: 37); |
6339 | |
6340 | if (NUM_POLY_INT_COEFFS > 1) |
6341 | const_poly_int_htab = hash_table<const_poly_int_hasher>::create_ggc (n: 37); |
6342 | |
6343 | const_fixed_htab = hash_table<const_fixed_hasher>::create_ggc (n: 37); |
6344 | |
6345 | reg_attrs_htab = hash_table<reg_attr_hasher>::create_ggc (n: 37); |
6346 | |
6347 | #ifdef INIT_EXPANDERS |
6348 | /* This is to initialize {init|mark|free}_machine_status before the first |
6349 | call to push_function_context_to. This is needed by the Chill front |
6350 | end which calls push_function_context_to before the first call to |
6351 | init_function_start. */ |
6352 | INIT_EXPANDERS; |
6353 | #endif |
6354 | |
6355 | /* Create the unique rtx's for certain rtx codes and operand values. */ |
6356 | |
6357 | /* Don't use gen_rtx_CONST_INT here since gen_rtx_CONST_INT in this case |
6358 | tries to use these variables. */ |
6359 | for (i = - MAX_SAVED_CONST_INT; i <= MAX_SAVED_CONST_INT; i++) |
6360 | const_int_rtx[i + MAX_SAVED_CONST_INT] = |
6361 | gen_rtx_raw_CONST_INT (VOIDmode, (HOST_WIDE_INT) i); |
6362 | |
6363 | if (STORE_FLAG_VALUE >= - MAX_SAVED_CONST_INT |
6364 | && STORE_FLAG_VALUE <= MAX_SAVED_CONST_INT) |
6365 | const_true_rtx = const_int_rtx[STORE_FLAG_VALUE + MAX_SAVED_CONST_INT]; |
6366 | else |
6367 | const_true_rtx = gen_rtx_CONST_INT (VOIDmode, STORE_FLAG_VALUE); |
6368 | |
6369 | double_mode = float_mode_for_size (DOUBLE_TYPE_SIZE).require (); |
6370 | |
6371 | real_from_integer (&dconst0, double_mode, 0, SIGNED); |
6372 | real_from_integer (&dconst1, double_mode, 1, SIGNED); |
6373 | real_from_integer (&dconst2, double_mode, 2, SIGNED); |
6374 | |
6375 | dconstm0 = dconst0; |
6376 | dconstm0.sign = 1; |
6377 | |
6378 | dconstm1 = dconst1; |
6379 | dconstm1.sign = 1; |
6380 | |
6381 | dconsthalf = dconst1; |
6382 | SET_REAL_EXP (&dconsthalf, REAL_EXP (&dconsthalf) - 1); |
6383 | |
6384 | real_inf (&dconstinf); |
6385 | real_inf (&dconstninf, sign: true); |
6386 | |
6387 | for (i = 0; i < 3; i++) |
6388 | { |
6389 | const REAL_VALUE_TYPE *const r = |
6390 | (i == 0 ? &dconst0 : i == 1 ? &dconst1 : &dconst2); |
6391 | |
6392 | FOR_EACH_MODE_IN_CLASS (mode, MODE_FLOAT) |
6393 | const_tiny_rtx[i][(int) mode] = |
6394 | const_double_from_real_value (value: *r, mode); |
6395 | |
6396 | FOR_EACH_MODE_IN_CLASS (mode, MODE_DECIMAL_FLOAT) |
6397 | const_tiny_rtx[i][(int) mode] = |
6398 | const_double_from_real_value (value: *r, mode); |
6399 | |
6400 | const_tiny_rtx[i][(int) VOIDmode] = GEN_INT (i); |
6401 | |
6402 | FOR_EACH_MODE_IN_CLASS (mode, MODE_INT) |
6403 | const_tiny_rtx[i][(int) mode] = GEN_INT (i); |
6404 | |
6405 | for (mode = MIN_MODE_PARTIAL_INT; |
6406 | mode <= MAX_MODE_PARTIAL_INT; |
6407 | mode = (machine_mode)((int)(mode) + 1)) |
6408 | const_tiny_rtx[i][(int) mode] = GEN_INT (i); |
6409 | } |
6410 | |
6411 | const_tiny_rtx[3][(int) VOIDmode] = constm1_rtx; |
6412 | |
6413 | FOR_EACH_MODE_IN_CLASS (mode, MODE_INT) |
6414 | const_tiny_rtx[3][(int) mode] = constm1_rtx; |
6415 | |
6416 | /* For BImode, 1 and -1 are unsigned and signed interpretations |
6417 | of the same value. */ |
6418 | for (mode = MIN_MODE_BOOL; |
6419 | mode <= MAX_MODE_BOOL; |
6420 | mode = (machine_mode)((int)(mode) + 1)) |
6421 | { |
6422 | const_tiny_rtx[0][(int) mode] = const0_rtx; |
6423 | if (mode == BImode) |
6424 | { |
6425 | const_tiny_rtx[1][(int) mode] = const_true_rtx; |
6426 | const_tiny_rtx[3][(int) mode] = const_true_rtx; |
6427 | } |
6428 | else |
6429 | { |
6430 | const_tiny_rtx[1][(int) mode] = const1_rtx; |
6431 | const_tiny_rtx[3][(int) mode] = constm1_rtx; |
6432 | } |
6433 | } |
6434 | |
6435 | for (mode = MIN_MODE_PARTIAL_INT; |
6436 | mode <= MAX_MODE_PARTIAL_INT; |
6437 | mode = (machine_mode)((int)(mode) + 1)) |
6438 | const_tiny_rtx[3][(int) mode] = constm1_rtx; |
6439 | |
6440 | FOR_EACH_MODE_IN_CLASS (mode, MODE_COMPLEX_INT) |
6441 | { |
6442 | rtx inner = const_tiny_rtx[0][(int)GET_MODE_INNER (mode)]; |
6443 | const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner); |
6444 | } |
6445 | |
6446 | FOR_EACH_MODE_IN_CLASS (mode, MODE_COMPLEX_FLOAT) |
6447 | { |
6448 | rtx inner = const_tiny_rtx[0][(int)GET_MODE_INNER (mode)]; |
6449 | const_tiny_rtx[0][(int) mode] = gen_rtx_CONCAT (mode, inner, inner); |
6450 | } |
6451 | |
6452 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_BOOL) |
6453 | { |
6454 | const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, constant: 0); |
6455 | const_tiny_rtx[3][(int) mode] = gen_const_vector (mode, constant: 3); |
6456 | if (GET_MODE_INNER (mode) == BImode) |
6457 | /* As for BImode, "all 1" and "all -1" are unsigned and signed |
6458 | interpretations of the same value. */ |
6459 | const_tiny_rtx[1][(int) mode] = const_tiny_rtx[3][(int) mode]; |
6460 | else |
6461 | const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, constant: 1); |
6462 | } |
6463 | |
6464 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT) |
6465 | { |
6466 | const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, constant: 0); |
6467 | const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, constant: 1); |
6468 | const_tiny_rtx[3][(int) mode] = gen_const_vector (mode, constant: 3); |
6469 | } |
6470 | |
6471 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FLOAT) |
6472 | { |
6473 | const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, constant: 0); |
6474 | const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, constant: 1); |
6475 | } |
6476 | |
6477 | FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_FRACT) |
6478 | { |
6479 | scalar_mode smode = smode_iter.require (); |
6480 | FCONST0 (smode).data.high = 0; |
6481 | FCONST0 (smode).data.low = 0; |
6482 | FCONST0 (smode).mode = smode; |
6483 | const_tiny_rtx[0][(int) smode] |
6484 | = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode); |
6485 | } |
6486 | |
6487 | FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_UFRACT) |
6488 | { |
6489 | scalar_mode smode = smode_iter.require (); |
6490 | FCONST0 (smode).data.high = 0; |
6491 | FCONST0 (smode).data.low = 0; |
6492 | FCONST0 (smode).mode = smode; |
6493 | const_tiny_rtx[0][(int) smode] |
6494 | = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode); |
6495 | } |
6496 | |
6497 | FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_ACCUM) |
6498 | { |
6499 | scalar_mode smode = smode_iter.require (); |
6500 | FCONST0 (smode).data.high = 0; |
6501 | FCONST0 (smode).data.low = 0; |
6502 | FCONST0 (smode).mode = smode; |
6503 | const_tiny_rtx[0][(int) smode] |
6504 | = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode); |
6505 | |
6506 | /* We store the value 1. */ |
6507 | FCONST1 (smode).data.high = 0; |
6508 | FCONST1 (smode).data.low = 0; |
6509 | FCONST1 (smode).mode = smode; |
6510 | FCONST1 (smode).data |
6511 | = double_int_one.lshift (GET_MODE_FBIT (smode), |
6512 | HOST_BITS_PER_DOUBLE_INT, |
6513 | SIGNED_FIXED_POINT_MODE_P (smode)); |
6514 | const_tiny_rtx[1][(int) smode] |
6515 | = CONST_FIXED_FROM_FIXED_VALUE (FCONST1 (smode), smode); |
6516 | } |
6517 | |
6518 | FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_UACCUM) |
6519 | { |
6520 | scalar_mode smode = smode_iter.require (); |
6521 | FCONST0 (smode).data.high = 0; |
6522 | FCONST0 (smode).data.low = 0; |
6523 | FCONST0 (smode).mode = smode; |
6524 | const_tiny_rtx[0][(int) smode] |
6525 | = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode); |
6526 | |
6527 | /* We store the value 1. */ |
6528 | FCONST1 (smode).data.high = 0; |
6529 | FCONST1 (smode).data.low = 0; |
6530 | FCONST1 (smode).mode = smode; |
6531 | FCONST1 (smode).data |
6532 | = double_int_one.lshift (GET_MODE_FBIT (smode), |
6533 | HOST_BITS_PER_DOUBLE_INT, |
6534 | SIGNED_FIXED_POINT_MODE_P (smode)); |
6535 | const_tiny_rtx[1][(int) smode] |
6536 | = CONST_FIXED_FROM_FIXED_VALUE (FCONST1 (smode), smode); |
6537 | } |
6538 | |
6539 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FRACT) |
6540 | { |
6541 | const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, constant: 0); |
6542 | } |
6543 | |
6544 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_UFRACT) |
6545 | { |
6546 | const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, constant: 0); |
6547 | } |
6548 | |
6549 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_ACCUM) |
6550 | { |
6551 | const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, constant: 0); |
6552 | const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, constant: 1); |
6553 | } |
6554 | |
6555 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_UACCUM) |
6556 | { |
6557 | const_tiny_rtx[0][(int) mode] = gen_const_vector (mode, constant: 0); |
6558 | const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, constant: 1); |
6559 | } |
6560 | |
6561 | for (i = (int) CCmode; i < (int) MAX_MACHINE_MODE; ++i) |
6562 | if (GET_MODE_CLASS ((machine_mode) i) == MODE_CC) |
6563 | const_tiny_rtx[0][i] = const0_rtx; |
6564 | |
6565 | pc_rtx = gen_rtx_fmt_ (PC, VOIDmode); |
6566 | ret_rtx = gen_rtx_fmt_ (RETURN, VOIDmode); |
6567 | simple_return_rtx = gen_rtx_fmt_ (SIMPLE_RETURN, VOIDmode); |
6568 | invalid_insn_rtx = gen_rtx_INSN (VOIDmode, |
6569 | /*prev_insn=*/NULL, |
6570 | /*next_insn=*/NULL, |
6571 | /*bb=*/NULL, |
6572 | /*pattern=*/NULL_RTX, |
6573 | /*location=*/-1, |
6574 | code: CODE_FOR_nothing, |
6575 | /*reg_notes=*/NULL_RTX); |
6576 | } |
6577 | |
6578 | /* Produce exact duplicate of insn INSN after AFTER. |
6579 | Care updating of libcall regions if present. */ |
6580 | |
6581 | rtx_insn * |
6582 | emit_copy_of_insn_after (rtx_insn *insn, rtx_insn *after) |
6583 | { |
6584 | rtx_insn *new_rtx; |
6585 | rtx link; |
6586 | |
6587 | switch (GET_CODE (insn)) |
6588 | { |
6589 | case INSN: |
6590 | new_rtx = emit_insn_after (pattern: copy_insn (insn: PATTERN (insn)), after); |
6591 | break; |
6592 | |
6593 | case JUMP_INSN: |
6594 | new_rtx = emit_jump_insn_after (pattern: copy_insn (insn: PATTERN (insn)), after); |
6595 | CROSSING_JUMP_P (new_rtx) = CROSSING_JUMP_P (insn); |
6596 | break; |
6597 | |
6598 | case DEBUG_INSN: |
6599 | new_rtx = emit_debug_insn_after (pattern: copy_insn (insn: PATTERN (insn)), after); |
6600 | break; |
6601 | |
6602 | case CALL_INSN: |
6603 | new_rtx = emit_call_insn_after (pattern: copy_insn (insn: PATTERN (insn)), after); |
6604 | if (CALL_INSN_FUNCTION_USAGE (insn)) |
6605 | CALL_INSN_FUNCTION_USAGE (new_rtx) |
6606 | = copy_insn (CALL_INSN_FUNCTION_USAGE (insn)); |
6607 | SIBLING_CALL_P (new_rtx) = SIBLING_CALL_P (insn); |
6608 | RTL_CONST_CALL_P (new_rtx) = RTL_CONST_CALL_P (insn); |
6609 | RTL_PURE_CALL_P (new_rtx) = RTL_PURE_CALL_P (insn); |
6610 | RTL_LOOPING_CONST_OR_PURE_CALL_P (new_rtx) |
6611 | = RTL_LOOPING_CONST_OR_PURE_CALL_P (insn); |
6612 | break; |
6613 | |
6614 | default: |
6615 | gcc_unreachable (); |
6616 | } |
6617 | |
6618 | /* Update LABEL_NUSES. */ |
6619 | if (NONDEBUG_INSN_P (insn)) |
6620 | mark_jump_label (PATTERN (insn: new_rtx), new_rtx, 0); |
6621 | |
6622 | INSN_LOCATION (insn: new_rtx) = INSN_LOCATION (insn); |
6623 | |
6624 | /* If the old insn is frame related, then so is the new one. This is |
6625 | primarily needed for IA-64 unwind info which marks epilogue insns, |
6626 | which may be duplicated by the basic block reordering code. */ |
6627 | RTX_FRAME_RELATED_P (new_rtx) = RTX_FRAME_RELATED_P (insn); |
6628 | |
6629 | /* Locate the end of existing REG_NOTES in NEW_RTX. */ |
6630 | rtx *ptail = ®_NOTES (new_rtx); |
6631 | while (*ptail != NULL_RTX) |
6632 | ptail = &XEXP (*ptail, 1); |
6633 | |
6634 | /* Copy all REG_NOTES except REG_LABEL_OPERAND since mark_jump_label |
6635 | will make them. REG_LABEL_TARGETs are created there too, but are |
6636 | supposed to be sticky, so we copy them. */ |
6637 | for (link = REG_NOTES (insn); link; link = XEXP (link, 1)) |
6638 | if (REG_NOTE_KIND (link) != REG_LABEL_OPERAND) |
6639 | { |
6640 | *ptail = duplicate_reg_note (link); |
6641 | ptail = &XEXP (*ptail, 1); |
6642 | } |
6643 | |
6644 | INSN_CODE (new_rtx) = INSN_CODE (insn); |
6645 | return new_rtx; |
6646 | } |
6647 | |
6648 | static GTY((deletable)) rtx hard_reg_clobbers [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER]; |
6649 | rtx |
6650 | gen_hard_reg_clobber (machine_mode mode, unsigned int regno) |
6651 | { |
6652 | if (hard_reg_clobbers[mode][regno]) |
6653 | return hard_reg_clobbers[mode][regno]; |
6654 | else |
6655 | return (hard_reg_clobbers[mode][regno] = |
6656 | gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (mode, regno))); |
6657 | } |
6658 | |
6659 | location_t prologue_location; |
6660 | location_t epilogue_location; |
6661 | |
6662 | /* Hold current location information and last location information, so the |
6663 | datastructures are built lazily only when some instructions in given |
6664 | place are needed. */ |
6665 | static location_t curr_location; |
6666 | |
6667 | /* Allocate insn location datastructure. */ |
6668 | void |
6669 | insn_locations_init (void) |
6670 | { |
6671 | prologue_location = epilogue_location = 0; |
6672 | curr_location = UNKNOWN_LOCATION; |
6673 | } |
6674 | |
6675 | /* At the end of emit stage, clear current location. */ |
6676 | void |
6677 | insn_locations_finalize (void) |
6678 | { |
6679 | epilogue_location = curr_location; |
6680 | curr_location = UNKNOWN_LOCATION; |
6681 | } |
6682 | |
6683 | /* Set current location. */ |
6684 | void |
6685 | set_curr_insn_location (location_t location) |
6686 | { |
6687 | curr_location = location; |
6688 | } |
6689 | |
6690 | /* Get current location. */ |
6691 | location_t |
6692 | curr_insn_location (void) |
6693 | { |
6694 | return curr_location; |
6695 | } |
6696 | |
6697 | /* Set the location of the insn chain starting at INSN to LOC. */ |
6698 | void |
6699 | set_insn_locations (rtx_insn *insn, location_t loc) |
6700 | { |
6701 | while (insn) |
6702 | { |
6703 | if (INSN_P (insn)) |
6704 | INSN_LOCATION (insn) = loc; |
6705 | insn = NEXT_INSN (insn); |
6706 | } |
6707 | } |
6708 | |
6709 | /* Return lexical scope block insn belongs to. */ |
6710 | tree |
6711 | insn_scope (const rtx_insn *insn) |
6712 | { |
6713 | return LOCATION_BLOCK (INSN_LOCATION (insn)); |
6714 | } |
6715 | |
6716 | /* Return line number of the statement that produced this insn. */ |
6717 | int |
6718 | insn_line (const rtx_insn *insn) |
6719 | { |
6720 | return LOCATION_LINE (INSN_LOCATION (insn)); |
6721 | } |
6722 | |
6723 | /* Return source file of the statement that produced this insn. */ |
6724 | const char * |
6725 | insn_file (const rtx_insn *insn) |
6726 | { |
6727 | return LOCATION_FILE (INSN_LOCATION (insn)); |
6728 | } |
6729 | |
6730 | /* Return expanded location of the statement that produced this insn. */ |
6731 | expanded_location |
6732 | insn_location (const rtx_insn *insn) |
6733 | { |
6734 | return expand_location (INSN_LOCATION (insn)); |
6735 | } |
6736 | |
6737 | /* Return true if memory model MODEL requires a pre-operation (release-style) |
6738 | barrier or a post-operation (acquire-style) barrier. While not universal, |
6739 | this function matches behavior of several targets. */ |
6740 | |
6741 | bool |
6742 | need_atomic_barrier_p (enum memmodel model, bool pre) |
6743 | { |
6744 | switch (model & MEMMODEL_BASE_MASK) |
6745 | { |
6746 | case MEMMODEL_RELAXED: |
6747 | case MEMMODEL_CONSUME: |
6748 | return false; |
6749 | case MEMMODEL_RELEASE: |
6750 | return pre; |
6751 | case MEMMODEL_ACQUIRE: |
6752 | return !pre; |
6753 | case MEMMODEL_ACQ_REL: |
6754 | case MEMMODEL_SEQ_CST: |
6755 | return true; |
6756 | default: |
6757 | gcc_unreachable (); |
6758 | } |
6759 | } |
6760 | |
6761 | /* Return a constant shift amount for shifting a value of mode MODE |
6762 | by VALUE bits. */ |
6763 | |
6764 | rtx |
6765 | gen_int_shift_amount (machine_mode, poly_int64 value) |
6766 | { |
6767 | /* Use a 64-bit mode, to avoid any truncation. |
6768 | |
6769 | ??? Perhaps this should be automatically derived from the .md files |
6770 | instead, or perhaps have a target hook. */ |
6771 | scalar_int_mode shift_mode = (BITS_PER_UNIT == 8 |
6772 | ? DImode |
6773 | : int_mode_for_size (size: 64, limit: 0).require ()); |
6774 | return gen_int_mode (c: value, mode: shift_mode); |
6775 | } |
6776 | |
6777 | /* Initialize fields of rtl_data related to stack alignment. */ |
6778 | |
6779 | void |
6780 | rtl_data::init_stack_alignment () |
6781 | { |
6782 | stack_alignment_needed = STACK_BOUNDARY; |
6783 | max_used_stack_slot_alignment = STACK_BOUNDARY; |
6784 | stack_alignment_estimated = 0; |
6785 | preferred_stack_boundary = STACK_BOUNDARY; |
6786 | } |
6787 | |
6788 | |
6789 | #include "gt-emit-rtl.h" |
6790 | |